]> git.lizzy.rs Git - rust.git/commitdiff
Rollup merge of #87260 - antoyo:libgccjit-codegen, r=Mark-Simulacrum
authorGuillaume Gomez <guillaume1.gomez@gmail.com>
Tue, 28 Sep 2021 18:00:12 +0000 (20:00 +0200)
committerGitHub <noreply@github.com>
Tue, 28 Sep 2021 18:00:12 +0000 (20:00 +0200)
Libgccjit codegen

This PR introduces a subtree for a gcc-based codegen backend to the repository, per decision in https://github.com/rust-lang/compiler-team/issues/442. We do not yet expect to ship this backend on nightly or run tests in CI, but we do verify that the backend checks (i.e., `cargo check`) successfully.

Work is expected to progress primarily in https://github.com/rust-lang/rustc_codegen_gcc, with semi-regular upstreaming, like with other subtrees.

80 files changed:
Cargo.toml
compiler/rustc_codegen_gcc/.github/workflows/main.yml [new file with mode: 0644]
compiler/rustc_codegen_gcc/.gitignore [new file with mode: 0644]
compiler/rustc_codegen_gcc/Cargo.lock [new file with mode: 0644]
compiler/rustc_codegen_gcc/Cargo.toml [new file with mode: 0644]
compiler/rustc_codegen_gcc/LICENSE-APACHE [new file with mode: 0644]
compiler/rustc_codegen_gcc/LICENSE-MIT [new file with mode: 0644]
compiler/rustc_codegen_gcc/Readme.md [new file with mode: 0644]
compiler/rustc_codegen_gcc/build.sh [new file with mode: 0755]
compiler/rustc_codegen_gcc/build_sysroot/Cargo.toml [new file with mode: 0644]
compiler/rustc_codegen_gcc/build_sysroot/build_sysroot.sh [new file with mode: 0755]
compiler/rustc_codegen_gcc/build_sysroot/prepare_sysroot_src.sh [new file with mode: 0755]
compiler/rustc_codegen_gcc/build_sysroot/src/lib.rs [new file with mode: 0644]
compiler/rustc_codegen_gcc/cargo.sh [new file with mode: 0755]
compiler/rustc_codegen_gcc/clean_all.sh [new file with mode: 0755]
compiler/rustc_codegen_gcc/config.sh [new file with mode: 0644]
compiler/rustc_codegen_gcc/example/alloc_example.rs [new file with mode: 0644]
compiler/rustc_codegen_gcc/example/alloc_system.rs [new file with mode: 0644]
compiler/rustc_codegen_gcc/example/arbitrary_self_types_pointers_and_wrappers.rs [new file with mode: 0644]
compiler/rustc_codegen_gcc/example/dst-field-align.rs [new file with mode: 0644]
compiler/rustc_codegen_gcc/example/example.rs [new file with mode: 0644]
compiler/rustc_codegen_gcc/example/mini_core.rs [new file with mode: 0644]
compiler/rustc_codegen_gcc/example/mini_core_hello_world.rs [new file with mode: 0644]
compiler/rustc_codegen_gcc/example/mod_bench.rs [new file with mode: 0644]
compiler/rustc_codegen_gcc/example/std_example.rs [new file with mode: 0644]
compiler/rustc_codegen_gcc/example/subslice-patterns-const-eval.rs [new file with mode: 0644]
compiler/rustc_codegen_gcc/example/track-caller-attribute.rs [new file with mode: 0644]
compiler/rustc_codegen_gcc/patches/0022-core-Disable-not-compiling-tests.patch [new file with mode: 0644]
compiler/rustc_codegen_gcc/patches/0023-core-Ignore-failing-tests.patch [new file with mode: 0644]
compiler/rustc_codegen_gcc/prepare.sh [new file with mode: 0755]
compiler/rustc_codegen_gcc/prepare_build.sh [new file with mode: 0755]
compiler/rustc_codegen_gcc/rust-toolchain [new file with mode: 0644]
compiler/rustc_codegen_gcc/rustup.sh [new file with mode: 0755]
compiler/rustc_codegen_gcc/src/abi.rs [new file with mode: 0644]
compiler/rustc_codegen_gcc/src/allocator.rs [new file with mode: 0644]
compiler/rustc_codegen_gcc/src/archive.rs [new file with mode: 0644]
compiler/rustc_codegen_gcc/src/asm.rs [new file with mode: 0644]
compiler/rustc_codegen_gcc/src/back/mod.rs [new file with mode: 0644]
compiler/rustc_codegen_gcc/src/back/write.rs [new file with mode: 0644]
compiler/rustc_codegen_gcc/src/base.rs [new file with mode: 0644]
compiler/rustc_codegen_gcc/src/builder.rs [new file with mode: 0644]
compiler/rustc_codegen_gcc/src/callee.rs [new file with mode: 0644]
compiler/rustc_codegen_gcc/src/common.rs [new file with mode: 0644]
compiler/rustc_codegen_gcc/src/consts.rs [new file with mode: 0644]
compiler/rustc_codegen_gcc/src/context.rs [new file with mode: 0644]
compiler/rustc_codegen_gcc/src/coverageinfo.rs [new file with mode: 0644]
compiler/rustc_codegen_gcc/src/debuginfo.rs [new file with mode: 0644]
compiler/rustc_codegen_gcc/src/declare.rs [new file with mode: 0644]
compiler/rustc_codegen_gcc/src/intrinsic/llvm.rs [new file with mode: 0644]
compiler/rustc_codegen_gcc/src/intrinsic/mod.rs [new file with mode: 0644]
compiler/rustc_codegen_gcc/src/intrinsic/simd.rs [new file with mode: 0644]
compiler/rustc_codegen_gcc/src/lib.rs [new file with mode: 0644]
compiler/rustc_codegen_gcc/src/mono_item.rs [new file with mode: 0644]
compiler/rustc_codegen_gcc/src/type_.rs [new file with mode: 0644]
compiler/rustc_codegen_gcc/src/type_of.rs [new file with mode: 0644]
compiler/rustc_codegen_gcc/test.sh [new file with mode: 0755]
compiler/rustc_codegen_gcc/tests/lib.rs [new file with mode: 0644]
compiler/rustc_codegen_gcc/tests/run/abort1.rs [new file with mode: 0644]
compiler/rustc_codegen_gcc/tests/run/abort2.rs [new file with mode: 0644]
compiler/rustc_codegen_gcc/tests/run/array.rs [new file with mode: 0644]
compiler/rustc_codegen_gcc/tests/run/asm.rs [new file with mode: 0644]
compiler/rustc_codegen_gcc/tests/run/assign.rs [new file with mode: 0644]
compiler/rustc_codegen_gcc/tests/run/closure.rs [new file with mode: 0644]
compiler/rustc_codegen_gcc/tests/run/condition.rs [new file with mode: 0644]
compiler/rustc_codegen_gcc/tests/run/empty_main.rs [new file with mode: 0644]
compiler/rustc_codegen_gcc/tests/run/exit.rs [new file with mode: 0644]
compiler/rustc_codegen_gcc/tests/run/exit_code.rs [new file with mode: 0644]
compiler/rustc_codegen_gcc/tests/run/fun_ptr.rs [new file with mode: 0644]
compiler/rustc_codegen_gcc/tests/run/int_overflow.rs [new file with mode: 0644]
compiler/rustc_codegen_gcc/tests/run/mut_ref.rs [new file with mode: 0644]
compiler/rustc_codegen_gcc/tests/run/operations.rs [new file with mode: 0644]
compiler/rustc_codegen_gcc/tests/run/ptr_cast.rs [new file with mode: 0644]
compiler/rustc_codegen_gcc/tests/run/return-tuple.rs [new file with mode: 0644]
compiler/rustc_codegen_gcc/tests/run/slice.rs [new file with mode: 0644]
compiler/rustc_codegen_gcc/tests/run/static.rs [new file with mode: 0644]
compiler/rustc_codegen_gcc/tests/run/structs.rs [new file with mode: 0644]
compiler/rustc_codegen_gcc/tests/run/tuple.rs [new file with mode: 0644]
rustfmt.toml
src/bootstrap/check.rs
src/tools/tidy/src/lib.rs

index 3822da2ccd5e4694756480493eb2ff55afcce26e..ce7073886c20e95658065773511c7a05174d5274 100644 (file)
@@ -41,6 +41,7 @@ members = [
 exclude = [
   "build",
   "compiler/rustc_codegen_cranelift",
+  "compiler/rustc_codegen_gcc",
   "src/test/rustdoc-gui",
   # HACK(eddyb) This hardcodes the fact that our CI uses `/checkout/obj`.
   "obj",
diff --git a/compiler/rustc_codegen_gcc/.github/workflows/main.yml b/compiler/rustc_codegen_gcc/.github/workflows/main.yml
new file mode 100644 (file)
index 0000000..98bed8e
--- /dev/null
@@ -0,0 +1,96 @@
+name: CI
+
+on:
+  - push
+  - pull_request
+
+jobs:
+  build:
+    runs-on: ubuntu-latest
+
+    strategy:
+      fail-fast: false
+
+    steps:
+    - uses: actions/checkout@v2
+
+    - name: Install packages
+      run: sudo apt-get install ninja-build ripgrep
+
+    - name: Download artifact
+      uses: dawidd6/action-download-artifact@v2
+      with:
+          workflow: main.yml
+          name: libgccjit.so
+          path: gcc-build
+          repo: antoyo/gcc
+
+    - name: Setup path to libgccjit
+      run: |
+          echo $(readlink -f gcc-build) > gcc_path
+          ln gcc-build/libgccjit.so gcc-build/libgccjit.so.0
+
+    - name: Set LIBRARY_PATH
+      run: |
+        echo "LIBRARY_PATH=$(cat gcc_path)" >> $GITHUB_ENV
+        echo "LD_LIBRARY_PATH=$(cat gcc_path)" >> $GITHUB_ENV
+
+    # https://github.com/actions/cache/issues/133
+    - name: Fixup owner of ~/.cargo/
+      # Don't remove the trailing /. It is necessary to follow the symlink.
+      run: sudo chown -R $(whoami):$(id -ng) ~/.cargo/
+
+    - name: Cache cargo installed crates
+      uses: actions/cache@v1.1.2
+      with:
+        path: ~/.cargo/bin
+        key: cargo-installed-crates2-ubuntu-latest
+
+    - name: Cache cargo registry
+      uses: actions/cache@v1
+      with:
+        path: ~/.cargo/registry
+        key: ${{ runner.os }}-cargo-registry2-${{ hashFiles('**/Cargo.lock') }}
+
+    - name: Cache cargo index
+      uses: actions/cache@v1
+      with:
+        path: ~/.cargo/git
+        key: ${{ runner.os }}-cargo-index-${{ hashFiles('**/Cargo.lock') }}
+
+    - name: Cache cargo target dir
+      uses: actions/cache@v1.1.2
+      with:
+        path: target
+        key: ${{ runner.os }}-cargo-build-target-${{ hashFiles('rust-toolchain') }}
+
+    - name: Build
+      run: |
+        ./prepare_build.sh
+        ./build.sh
+        cargo test
+        ./clean_all.sh
+
+    - name: Prepare dependencies
+      run: |
+        git config --global user.email "user@example.com"
+        git config --global user.name "User"
+        ./prepare.sh
+
+    # Compile is a separate step, as the actions-rs/cargo action supports error annotations
+    - name: Compile
+      uses: actions-rs/cargo@v1.0.3
+      with:
+        command: build
+        args: --release
+
+    - name: Test
+      run: |
+        # Enable backtraces for easier debugging
+        export RUST_BACKTRACE=1
+
+        # Reduce amount of benchmark runs as they are slow
+        export COMPILE_RUNS=2
+        export RUN_RUNS=2
+
+        ./test.sh --release
diff --git a/compiler/rustc_codegen_gcc/.gitignore b/compiler/rustc_codegen_gcc/.gitignore
new file mode 100644 (file)
index 0000000..1e2f9e3
--- /dev/null
@@ -0,0 +1,20 @@
+target
+**/*.rs.bk
+*.rlib
+*.o
+perf.data
+perf.data.old
+*.events
+*.string*
+/build_sysroot/sysroot
+/build_sysroot/sysroot_src
+/build_sysroot/Cargo.lock
+/build_sysroot/test_target/Cargo.lock
+/rust
+/simple-raytracer
+/regex
+gimple*
+*asm
+res
+test-backend
+gcc_path
diff --git a/compiler/rustc_codegen_gcc/Cargo.lock b/compiler/rustc_codegen_gcc/Cargo.lock
new file mode 100644 (file)
index 0000000..60a2101
--- /dev/null
@@ -0,0 +1,373 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "aho-corasick"
+version = "0.7.18"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f"
+dependencies = [
+ "memchr",
+]
+
+[[package]]
+name = "ar"
+version = "0.8.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "450575f58f7bee32816abbff470cbc47797397c2a81e0eaced4b98436daf52e1"
+
+[[package]]
+name = "autocfg"
+version = "1.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
+
+[[package]]
+name = "bitflags"
+version = "1.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
+
+[[package]]
+name = "cfg-if"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
+
+[[package]]
+name = "crc32fast"
+version = "1.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a"
+dependencies = [
+ "cfg-if",
+]
+
+[[package]]
+name = "fm"
+version = "0.1.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "68fda3cff2cce84c19e5dfa5179a4b35d2c0f18b893f108002b8a6a54984acca"
+dependencies = [
+ "regex",
+]
+
+[[package]]
+name = "gccjit"
+version = "1.0.0"
+source = "git+https://github.com/antoyo/gccjit.rs#2d4fea7319f80531b2e5d264fca9f1c498a3a62e"
+dependencies = [
+ "gccjit_sys",
+]
+
+[[package]]
+name = "gccjit_sys"
+version = "0.0.1"
+source = "git+https://github.com/antoyo/gccjit.rs#2d4fea7319f80531b2e5d264fca9f1c498a3a62e"
+dependencies = [
+ "libc 0.1.12",
+]
+
+[[package]]
+name = "getopts"
+version = "0.2.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "14dbbfd5c71d70241ecf9e6f13737f7b5ce823821063188d7e46c41d371eebd5"
+dependencies = [
+ "unicode-width",
+]
+
+[[package]]
+name = "getrandom"
+version = "0.2.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753"
+dependencies = [
+ "cfg-if",
+ "libc 0.2.102",
+ "wasi",
+]
+
+[[package]]
+name = "hashbrown"
+version = "0.11.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e"
+
+[[package]]
+name = "hermit-abi"
+version = "0.1.19"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33"
+dependencies = [
+ "libc 0.2.102",
+]
+
+[[package]]
+name = "indexmap"
+version = "1.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bc633605454125dec4b66843673f01c7df2b89479b32e0ed634e43a91cff62a5"
+dependencies = [
+ "autocfg",
+ "hashbrown",
+]
+
+[[package]]
+name = "lang_tester"
+version = "0.3.13"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "96bd995a092cac79868250589869b5a5d656b02a02bd74c8ebdc566dc7203090"
+dependencies = [
+ "fm",
+ "getopts",
+ "libc 0.2.102",
+ "num_cpus",
+ "termcolor",
+ "threadpool",
+ "wait-timeout",
+ "walkdir",
+]
+
+[[package]]
+name = "libc"
+version = "0.1.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e32a70cf75e5846d53a673923498228bbec6a8624708a9ea5645f075d6276122"
+
+[[package]]
+name = "libc"
+version = "0.2.102"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a2a5ac8f984bfcf3a823267e5fde638acc3325f6496633a5da6bb6eb2171e103"
+
+[[package]]
+name = "memchr"
+version = "2.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a"
+
+[[package]]
+name = "num_cpus"
+version = "1.13.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3"
+dependencies = [
+ "hermit-abi",
+ "libc 0.2.102",
+]
+
+[[package]]
+name = "object"
+version = "0.25.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a38f2be3697a57b4060074ff41b44c16870d916ad7877c17696e063257482bc7"
+dependencies = [
+ "crc32fast",
+ "indexmap",
+ "memchr",
+]
+
+[[package]]
+name = "ppv-lite86"
+version = "0.2.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857"
+
+[[package]]
+name = "rand"
+version = "0.8.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8"
+dependencies = [
+ "libc 0.2.102",
+ "rand_chacha",
+ "rand_core",
+ "rand_hc",
+]
+
+[[package]]
+name = "rand_chacha"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
+dependencies = [
+ "ppv-lite86",
+ "rand_core",
+]
+
+[[package]]
+name = "rand_core"
+version = "0.6.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7"
+dependencies = [
+ "getrandom",
+]
+
+[[package]]
+name = "rand_hc"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7"
+dependencies = [
+ "rand_core",
+]
+
+[[package]]
+name = "redox_syscall"
+version = "0.2.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8383f39639269cde97d255a32bdb68c047337295414940c68bdd30c2e13203ff"
+dependencies = [
+ "bitflags",
+]
+
+[[package]]
+name = "regex"
+version = "1.5.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d07a8629359eb56f1e2fb1652bb04212c072a87ba68546a04065d525673ac461"
+dependencies = [
+ "aho-corasick",
+ "memchr",
+ "regex-syntax",
+]
+
+[[package]]
+name = "regex-syntax"
+version = "0.6.25"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b"
+
+[[package]]
+name = "remove_dir_all"
+version = "0.5.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7"
+dependencies = [
+ "winapi",
+]
+
+[[package]]
+name = "rustc_codegen_gcc"
+version = "0.1.0"
+dependencies = [
+ "ar",
+ "gccjit",
+ "lang_tester",
+ "object",
+ "target-lexicon",
+ "tempfile",
+]
+
+[[package]]
+name = "same-file"
+version = "1.0.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
+dependencies = [
+ "winapi-util",
+]
+
+[[package]]
+name = "target-lexicon"
+version = "0.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ab0e7238dcc7b40a7be719a25365910f6807bd864f4cce6b2e6b873658e2b19d"
+
+[[package]]
+name = "tempfile"
+version = "3.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22"
+dependencies = [
+ "cfg-if",
+ "libc 0.2.102",
+ "rand",
+ "redox_syscall",
+ "remove_dir_all",
+ "winapi",
+]
+
+[[package]]
+name = "termcolor"
+version = "1.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2dfed899f0eb03f32ee8c6a0aabdb8a7949659e3466561fc0adf54e26d88c5f4"
+dependencies = [
+ "winapi-util",
+]
+
+[[package]]
+name = "threadpool"
+version = "1.8.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa"
+dependencies = [
+ "num_cpus",
+]
+
+[[package]]
+name = "unicode-width"
+version = "0.1.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973"
+
+[[package]]
+name = "wait-timeout"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9f200f5b12eb75f8c1ed65abd4b2db8a6e1b138a20de009dacee265a2498f3f6"
+dependencies = [
+ "libc 0.2.102",
+]
+
+[[package]]
+name = "walkdir"
+version = "2.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56"
+dependencies = [
+ "same-file",
+ "winapi",
+ "winapi-util",
+]
+
+[[package]]
+name = "wasi"
+version = "0.10.2+wasi-snapshot-preview1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6"
+
+[[package]]
+name = "winapi"
+version = "0.3.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
+dependencies = [
+ "winapi-i686-pc-windows-gnu",
+ "winapi-x86_64-pc-windows-gnu",
+]
+
+[[package]]
+name = "winapi-i686-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
+
+[[package]]
+name = "winapi-util"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178"
+dependencies = [
+ "winapi",
+]
+
+[[package]]
+name = "winapi-x86_64-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
diff --git a/compiler/rustc_codegen_gcc/Cargo.toml b/compiler/rustc_codegen_gcc/Cargo.toml
new file mode 100644 (file)
index 0000000..9e8c195
--- /dev/null
@@ -0,0 +1,51 @@
+[package]
+name = "rustc_codegen_gcc"
+version = "0.1.0"
+authors = ["Antoni Boucher <bouanto@zoho.com>"]
+edition = "2018"
+license = "MIT OR Apache-2.0"
+
+[lib]
+crate-type = ["dylib"]
+
+[[test]]
+name = "lang_tests"
+path = "tests/lib.rs"
+harness = false
+
+[dependencies]
+gccjit = { git = "https://github.com/antoyo/gccjit.rs" }
+
+# Local copy.
+#gccjit = { path = "../gccjit.rs" }
+
+target-lexicon = "0.10.0"
+
+ar = "0.8.0"
+
+[dependencies.object]
+version = "0.25.0"
+default-features = false
+features = ["read", "std", "write"] # We don't need WASM support.
+
+[dev-dependencies]
+lang_tester = "0.3.9"
+tempfile = "3.1.0"
+
+[profile.dev]
+# By compiling dependencies with optimizations, performing tests gets much faster.
+opt-level = 3
+
+[profile.dev.package.rustc_codegen_gcc]
+# Disabling optimizations for cg_gccjit itself makes compilation after a change faster.
+opt-level = 0
+
+# Disable optimizations and debuginfo of build scripts and some of the heavy build deps, as the
+# execution time of build scripts is so fast that optimizing them slows down the total build time.
+[profile.dev.build-override]
+opt-level = 0
+debug = false
+
+[profile.release.build-override]
+opt-level = 0
+debug = false
diff --git a/compiler/rustc_codegen_gcc/LICENSE-APACHE b/compiler/rustc_codegen_gcc/LICENSE-APACHE
new file mode 100644 (file)
index 0000000..1b5ec8b
--- /dev/null
@@ -0,0 +1,176 @@
+                              Apache License
+                        Version 2.0, January 2004
+                     http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+   "License" shall mean the terms and conditions for use, reproduction,
+   and distribution as defined by Sections 1 through 9 of this document.
+
+   "Licensor" shall mean the copyright owner or entity authorized by
+   the copyright owner that is granting the License.
+
+   "Legal Entity" shall mean the union of the acting entity and all
+   other entities that control, are controlled by, or are under common
+   control with that entity. For the purposes of this definition,
+   "control" means (i) the power, direct or indirect, to cause the
+   direction or management of such entity, whether by contract or
+   otherwise, or (ii) ownership of fifty percent (50%) or more of the
+   outstanding shares, or (iii) beneficial ownership of such entity.
+
+   "You" (or "Your") shall mean an individual or Legal Entity
+   exercising permissions granted by this License.
+
+   "Source" form shall mean the preferred form for making modifications,
+   including but not limited to software source code, documentation
+   source, and configuration files.
+
+   "Object" form shall mean any form resulting from mechanical
+   transformation or translation of a Source form, including but
+   not limited to compiled object code, generated documentation,
+   and conversions to other media types.
+
+   "Work" shall mean the work of authorship, whether in Source or
+   Object form, made available under the License, as indicated by a
+   copyright notice that is included in or attached to the work
+   (an example is provided in the Appendix below).
+
+   "Derivative Works" shall mean any work, whether in Source or Object
+   form, that is based on (or derived from) the Work and for which the
+   editorial revisions, annotations, elaborations, or other modifications
+   represent, as a whole, an original work of authorship. For the purposes
+   of this License, Derivative Works shall not include works that remain
+   separable from, or merely link (or bind by name) to the interfaces of,
+   the Work and Derivative Works thereof.
+
+   "Contribution" shall mean any work of authorship, including
+   the original version of the Work and any modifications or additions
+   to that Work or Derivative Works thereof, that is intentionally
+   submitted to Licensor for inclusion in the Work by the copyright owner
+   or by an individual or Legal Entity authorized to submit on behalf of
+   the copyright owner. For the purposes of this definition, "submitted"
+   means any form of electronic, verbal, or written communication sent
+   to the Licensor or its representatives, including but not limited to
+   communication on electronic mailing lists, source code control systems,
+   and issue tracking systems that are managed by, or on behalf of, the
+   Licensor for the purpose of discussing and improving the Work, but
+   excluding communication that is conspicuously marked or otherwise
+   designated in writing by the copyright owner as "Not a Contribution."
+
+   "Contributor" shall mean Licensor and any individual or Legal Entity
+   on behalf of whom a Contribution has been received by Licensor and
+   subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+   this License, each Contributor hereby grants to You a perpetual,
+   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+   copyright license to reproduce, prepare Derivative Works of,
+   publicly display, publicly perform, sublicense, and distribute the
+   Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+   this License, each Contributor hereby grants to You a perpetual,
+   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+   (except as stated in this section) patent license to make, have made,
+   use, offer to sell, sell, import, and otherwise transfer the Work,
+   where such license applies only to those patent claims licensable
+   by such Contributor that are necessarily infringed by their
+   Contribution(s) alone or by combination of their Contribution(s)
+   with the Work to which such Contribution(s) was submitted. If You
+   institute patent litigation against any entity (including a
+   cross-claim or counterclaim in a lawsuit) alleging that the Work
+   or a Contribution incorporated within the Work constitutes direct
+   or contributory patent infringement, then any patent licenses
+   granted to You under this License for that Work shall terminate
+   as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+   Work or Derivative Works thereof in any medium, with or without
+   modifications, and in Source or Object form, provided that You
+   meet the following conditions:
+
+   (a) You must give any other recipients of the Work or
+       Derivative Works a copy of this License; and
+
+   (b) You must cause any modified files to carry prominent notices
+       stating that You changed the files; and
+
+   (c) You must retain, in the Source form of any Derivative Works
+       that You distribute, all copyright, patent, trademark, and
+       attribution notices from the Source form of the Work,
+       excluding those notices that do not pertain to any part of
+       the Derivative Works; and
+
+   (d) If the Work includes a "NOTICE" text file as part of its
+       distribution, then any Derivative Works that You distribute must
+       include a readable copy of the attribution notices contained
+       within such NOTICE file, excluding those notices that do not
+       pertain to any part of the Derivative Works, in at least one
+       of the following places: within a NOTICE text file distributed
+       as part of the Derivative Works; within the Source form or
+       documentation, if provided along with the Derivative Works; or,
+       within a display generated by the Derivative Works, if and
+       wherever such third-party notices normally appear. The contents
+       of the NOTICE file are for informational purposes only and
+       do not modify the License. You may add Your own attribution
+       notices within Derivative Works that You distribute, alongside
+       or as an addendum to the NOTICE text from the Work, provided
+       that such additional attribution notices cannot be construed
+       as modifying the License.
+
+   You may add Your own copyright statement to Your modifications and
+   may provide additional or different license terms and conditions
+   for use, reproduction, or distribution of Your modifications, or
+   for any such Derivative Works as a whole, provided Your use,
+   reproduction, and distribution of the Work otherwise complies with
+   the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+   any Contribution intentionally submitted for inclusion in the Work
+   by You to the Licensor shall be under the terms and conditions of
+   this License, without any additional terms or conditions.
+   Notwithstanding the above, nothing herein shall supersede or modify
+   the terms of any separate license agreement you may have executed
+   with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+   names, trademarks, service marks, or product names of the Licensor,
+   except as required for reasonable and customary use in describing the
+   origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+   agreed to in writing, Licensor provides the Work (and each
+   Contributor provides its Contributions) on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+   implied, including, without limitation, any warranties or conditions
+   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+   PARTICULAR PURPOSE. You are solely responsible for determining the
+   appropriateness of using or redistributing the Work and assume any
+   risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+   whether in tort (including negligence), contract, or otherwise,
+   unless required by applicable law (such as deliberate and grossly
+   negligent acts) or agreed to in writing, shall any Contributor be
+   liable to You for damages, including any direct, indirect, special,
+   incidental, or consequential damages of any character arising as a
+   result of this License or out of the use or inability to use the
+   Work (including but not limited to damages for loss of goodwill,
+   work stoppage, computer failure or malfunction, or any and all
+   other commercial damages or losses), even if such Contributor
+   has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+   the Work or Derivative Works thereof, You may choose to offer,
+   and charge a fee for, acceptance of support, warranty, indemnity,
+   or other liability obligations and/or rights consistent with this
+   License. However, in accepting such obligations, You may act only
+   on Your own behalf and on Your sole responsibility, not on behalf
+   of any other Contributor, and only if You agree to indemnify,
+   defend, and hold each Contributor harmless for any liability
+   incurred by, or claims asserted against, such Contributor by reason
+   of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
diff --git a/compiler/rustc_codegen_gcc/LICENSE-MIT b/compiler/rustc_codegen_gcc/LICENSE-MIT
new file mode 100644 (file)
index 0000000..31aa793
--- /dev/null
@@ -0,0 +1,23 @@
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/compiler/rustc_codegen_gcc/Readme.md b/compiler/rustc_codegen_gcc/Readme.md
new file mode 100644 (file)
index 0000000..709d93c
--- /dev/null
@@ -0,0 +1,135 @@
+# WIP libgccjit codegen backend for rust
+
+This is a GCC codegen for rustc, which means it can be loaded by the existing rustc frontend, but benefits from GCC: more architectures are supported and GCC's optimizations are used.
+
+**Despite its name, libgccjit can be used for ahead-of-time compilation, as is used here.**
+
+## Motivation
+
+The primary goal of this project is to be able to compile Rust code on platforms unsupported by LLVM.
+A secondary goal is to check if using the gcc backend will provide any run-time speed improvement for the programs compiled using rustc.
+
+## Building
+
+**This requires a patched libgccjit in order to work.
+The patches in [this repostory](https://github.com/antoyo/libgccjit-patches) need to be applied.
+(Those patches should work when applied on master, but in case it doesn't work, they are known to work when applied on 079c23cfe079f203d5df83fea8e92a60c7d7e878.)
+You can also use my [fork of gcc](https://github.com/antoyo/gcc) which already includes these patches.**
+
+**Put the path to your custom build of libgccjit in the file `gcc_path`.**
+
+```bash
+$ git clone https://github.com/rust-lang/rustc_codegen_gcc.git
+$ cd rustc_codegen_gcc
+$ ./prepare_build.sh # download and patch sysroot src
+$ ./build.sh --release
+```
+
+To run the tests:
+
+```bash
+$ ./prepare.sh # download and patch sysroot src and install hyperfine for benchmarking
+$ ./test.sh --release
+```
+
+## Usage
+
+`$cg_gccjit_dir` is the directory you cloned this repo into in the following instructions.
+
+### Cargo
+
+```bash
+$ CHANNEL="release" $cg_gccjit_dir/cargo.sh run
+```
+
+If you compiled cg_gccjit in debug mode (aka you didn't pass `--release` to `./test.sh`) you should use `CHANNEL="debug"` instead or omit `CHANNEL="release"` completely.
+
+### Rustc
+
+> You should prefer using the Cargo method.
+
+```bash
+$ rustc +$(cat $cg_gccjit_dir/rust-toolchain) -Cpanic=abort -Zcodegen-backend=$cg_gccjit_dir/target/release/librustc_codegen_gcc.so --sysroot $cg_gccjit_dir/build_sysroot/sysroot my_crate.rs
+```
+
+## Env vars
+
+<dl>
+    <dt>CG_GCCJIT_INCR_CACHE_DISABLED</dt>
+    <dd>Don't cache object files in the incremental cache. Useful during development of cg_gccjit
+    to make it possible to use incremental mode for all analyses performed by rustc without caching
+    object files when their content should have been changed by a change to cg_gccjit.</dd>
+    <dt>CG_GCCJIT_DISPLAY_CG_TIME</dt>
+    <dd>Display the time it took to perform codegen for a crate</dd>
+</dl>
+
+## Debugging
+
+Sometimes, libgccjit will crash and output an error like this:
+
+```
+during RTL pass: expand
+libgccjit.so: error: in expmed_mode_index, at expmed.h:249
+0x7f0da2e61a35 expmed_mode_index
+       ../../../gcc/gcc/expmed.h:249
+0x7f0da2e61aa4 expmed_op_cost_ptr
+       ../../../gcc/gcc/expmed.h:271
+0x7f0da2e620dc sdiv_cost_ptr
+       ../../../gcc/gcc/expmed.h:540
+0x7f0da2e62129 sdiv_cost
+       ../../../gcc/gcc/expmed.h:558
+0x7f0da2e73c12 expand_divmod(int, tree_code, machine_mode, rtx_def*, rtx_def*, rtx_def*, int)
+       ../../../gcc/gcc/expmed.c:4335
+0x7f0da2ea1423 expand_expr_real_2(separate_ops*, rtx_def*, machine_mode, expand_modifier)
+       ../../../gcc/gcc/expr.c:9240
+0x7f0da2cd1a1e expand_gimple_stmt_1
+       ../../../gcc/gcc/cfgexpand.c:3796
+0x7f0da2cd1c30 expand_gimple_stmt
+       ../../../gcc/gcc/cfgexpand.c:3857
+0x7f0da2cd90a9 expand_gimple_basic_block
+       ../../../gcc/gcc/cfgexpand.c:5898
+0x7f0da2cdade8 execute
+       ../../../gcc/gcc/cfgexpand.c:6582
+```
+
+To see the code which causes this error, call the following function:
+
+```c
+gcc_jit_context_dump_to_file(ctxt, "/tmp/output.c", 1 /* update_locations */)
+```
+
+This will create a C-like file and add the locations into the IR pointing to this C file.
+Then, rerun the program and it will output the location in the second line:
+
+```
+libgccjit.so: /tmp/something.c:61322:0: error: in expmed_mode_index, at expmed.h:249
+```
+
+Or add a breakpoint to `add_error` in gdb and print the line number using:
+
+```
+p loc->m_line
+```
+
+### How to use a custom-build rustc
+
+ * Build the stage2 compiler (`rustup toolchain link debug-current build/x86_64-unknown-linux-gnu/stage2`).
+ * Clean and rebuild the codegen with `debug-current` in the file `rust-toolchain`.
+
+### How to build a cross-compiling libgccjit
+
+#### Building libgccjit
+
+ * Follow these instructions: https://preshing.com/20141119/how-to-build-a-gcc-cross-compiler/ with the following changes:
+ * Configure gcc with `../gcc/configure --enable-host-shared --disable-multilib --enable-languages=c,jit,c++ --disable-bootstrap --enable-checking=release --prefix=/opt/m68k-gcc/ --target=m68k-linux --without-headers`.
+ * Some shells, like fish, don't define the environment variable `$MACHTYPE`.
+ * Add `CFLAGS="-Wno-error=attributes -g -O2"` at the end of the configure command for building glibc (`CFLAGS="-Wno-error=attributes -Wno-error=array-parameter -Wno-error=stringop-overflow -Wno-error=array-bounds -g -O2"` for glibc 2.31, which is useful for Debian).
+
+#### Configuring rustc_codegen_gcc
+
+ * Set `TARGET_TRIPLE="m68k-unknown-linux-gnu"` in config.sh.
+ * Since rustc doesn't support this architecture yet, set it back to `TARGET_TRIPLE="mips-unknown-linux-gnu"` (or another target having the same attributes). Alternatively, create a [target specification file](https://book.avr-rust.com/005.1-the-target-specification-json-file.html) (note that the `arch` specified in this file must be supported by the rust compiler).
+ * Set `linker='-Clinker=m68k-linux-gcc'`.
+ * Set the path to the cross-compiling libgccjit in `gcc_path`.
+ * Disable the 128-bit integer types if the target doesn't support them by using `let i128_type = context.new_type::<i64>();` in `context.rs` (same for u128_type).
+ * (might not be necessary) Disable the compilation of libstd.so (and possibly libcore.so?).
diff --git a/compiler/rustc_codegen_gcc/build.sh b/compiler/rustc_codegen_gcc/build.sh
new file mode 100755 (executable)
index 0000000..17a0d2a
--- /dev/null
@@ -0,0 +1,31 @@
+#!/bin/bash
+
+#set -x
+set -e
+
+if [ -f ./gcc_path ]; then 
+    export GCC_PATH=$(cat gcc_path)
+else
+    echo 'Please put the path to your custom build of libgccjit in the file `gcc_path`, see Readme.md for details'
+    exit 1
+fi
+
+export LD_LIBRARY_PATH="$GCC_PATH"
+export LIBRARY_PATH="$GCC_PATH"
+
+if [[ "$1" == "--release" ]]; then
+    export CHANNEL='release'
+    CARGO_INCREMENTAL=1 cargo rustc --release
+else
+    echo $LD_LIBRARY_PATH
+    export CHANNEL='debug'
+    cargo rustc
+fi
+
+source config.sh
+
+rm -r target/out || true
+mkdir -p target/out/gccjit
+
+echo "[BUILD] sysroot"
+time ./build_sysroot/build_sysroot.sh $CHANNEL
diff --git a/compiler/rustc_codegen_gcc/build_sysroot/Cargo.toml b/compiler/rustc_codegen_gcc/build_sysroot/Cargo.toml
new file mode 100644 (file)
index 0000000..cfadf47
--- /dev/null
@@ -0,0 +1,19 @@
+[package]
+authors = ["bjorn3 <bjorn3@users.noreply.github.com>"]
+name = "sysroot"
+version = "0.0.0"
+
+[dependencies]
+core = { path = "./sysroot_src/library/core" }
+compiler_builtins = "0.1"
+alloc = { path = "./sysroot_src/library/alloc" }
+std = { path = "./sysroot_src/library/std", features = ["panic_unwind", "backtrace"] }
+test = { path = "./sysroot_src/library/test" }
+
+[patch.crates-io]
+rustc-std-workspace-core = { path = "./sysroot_src/library/rustc-std-workspace-core" }
+rustc-std-workspace-alloc = { path = "./sysroot_src/library/rustc-std-workspace-alloc" }
+rustc-std-workspace-std = { path = "./sysroot_src/library/rustc-std-workspace-std" }
+
+[profile.release]
+debug = true
diff --git a/compiler/rustc_codegen_gcc/build_sysroot/build_sysroot.sh b/compiler/rustc_codegen_gcc/build_sysroot/build_sysroot.sh
new file mode 100755 (executable)
index 0000000..d1dcf49
--- /dev/null
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+# Requires the CHANNEL env var to be set to `debug` or `release.`
+
+set -e
+cd $(dirname "$0")
+
+pushd ../ >/dev/null
+source ./config.sh
+popd >/dev/null
+
+# Cleanup for previous run
+#     v Clean target dir except for build scripts and incremental cache
+rm -r target/*/{debug,release}/{build,deps,examples,libsysroot*,native} 2>/dev/null || true
+rm Cargo.lock test_target/Cargo.lock 2>/dev/null || true
+rm -r sysroot/ 2>/dev/null || true
+
+# Build libs
+export RUSTFLAGS="$RUSTFLAGS -Z force-unstable-if-unmarked -Cpanic=abort"
+if [[ "$1" == "--release" ]]; then
+    sysroot_channel='release'
+    RUSTFLAGS="$RUSTFLAGS -Zmir-opt-level=3" cargo build --target $TARGET_TRIPLE --release
+else
+    sysroot_channel='debug'
+    cargo build --target $TARGET_TRIPLE
+fi
+
+# Copy files to sysroot
+mkdir -p sysroot/lib/rustlib/$TARGET_TRIPLE/lib/
+cp -r target/$TARGET_TRIPLE/$sysroot_channel/deps/* sysroot/lib/rustlib/$TARGET_TRIPLE/lib/
diff --git a/compiler/rustc_codegen_gcc/build_sysroot/prepare_sysroot_src.sh b/compiler/rustc_codegen_gcc/build_sysroot/prepare_sysroot_src.sh
new file mode 100755 (executable)
index 0000000..071e7ed
--- /dev/null
@@ -0,0 +1,39 @@
+#!/bin/bash
+set -e
+cd $(dirname "$0")
+
+SRC_DIR=$(dirname $(rustup which rustc))"/../lib/rustlib/src/rust/"
+DST_DIR="sysroot_src"
+
+if [ ! -e $SRC_DIR ]; then
+    echo "Please install rust-src component"
+    exit 1
+fi
+
+rm -rf $DST_DIR
+mkdir -p $DST_DIR/library
+cp -r $SRC_DIR/library $DST_DIR/
+
+pushd $DST_DIR
+echo "[GIT] init"
+git init
+echo "[GIT] add"
+git add .
+echo "[GIT] commit"
+
+# This is needed on systems where nothing is configured.
+# git really needs something here, or it will fail.
+# Even using --author is not enough.
+git config user.email || git config user.email "none@example.com"
+git config user.name || git config user.name "None"
+
+git commit -m "Initial commit" -q
+for file in $(ls ../../patches/ | grep -v patcha); do
+echo "[GIT] apply" $file
+git apply ../../patches/$file
+git add -A
+git commit --no-gpg-sign -m "Patch $file"
+done
+popd
+
+echo "Successfully prepared libcore for building"
diff --git a/compiler/rustc_codegen_gcc/build_sysroot/src/lib.rs b/compiler/rustc_codegen_gcc/build_sysroot/src/lib.rs
new file mode 100644 (file)
index 0000000..0c9ac1a
--- /dev/null
@@ -0,0 +1 @@
+#![no_std]
diff --git a/compiler/rustc_codegen_gcc/cargo.sh b/compiler/rustc_codegen_gcc/cargo.sh
new file mode 100755 (executable)
index 0000000..1001c52
--- /dev/null
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+if [ -z $CHANNEL ]; then
+export CHANNEL='debug'
+fi
+
+pushd $(dirname "$0") >/dev/null
+source config.sh
+
+# read nightly compiler from rust-toolchain file
+TOOLCHAIN=$(cat rust-toolchain)
+
+popd >/dev/null
+
+if [[ $(rustc -V) != $(rustc +${TOOLCHAIN} -V) ]]; then
+    echo "rustc_codegen_gcc is build for $(rustc +${TOOLCHAIN} -V) but the default rustc version is $(rustc -V)."
+    echo "Using $(rustc +${TOOLCHAIN} -V)."
+fi
+
+cmd=$1
+shift
+
+RUSTDOCFLAGS="$RUSTFLAGS" cargo +${TOOLCHAIN} $cmd --target $TARGET_TRIPLE $@
diff --git a/compiler/rustc_codegen_gcc/clean_all.sh b/compiler/rustc_codegen_gcc/clean_all.sh
new file mode 100755 (executable)
index 0000000..a77d148
--- /dev/null
@@ -0,0 +1,5 @@
+#!/bin/bash --verbose
+set -e
+
+rm -rf target/ build_sysroot/{sysroot/,sysroot_src/,target/,Cargo.lock} perf.data{,.old}
+rm -rf regex/ simple-raytracer/
diff --git a/compiler/rustc_codegen_gcc/config.sh b/compiler/rustc_codegen_gcc/config.sh
new file mode 100644 (file)
index 0000000..87df2f2
--- /dev/null
@@ -0,0 +1,52 @@
+set -e
+
+export CARGO_INCREMENTAL=0
+
+if [ -f ./gcc_path ]; then 
+    export GCC_PATH=$(cat gcc_path)
+else
+    echo 'Please put the path to your custom build of libgccjit in the file `gcc_path`, see Readme.md for details'
+    exit 1
+fi
+
+unamestr=`uname`
+if [[ "$unamestr" == 'Linux' ]]; then
+   dylib_ext='so'
+elif [[ "$unamestr" == 'Darwin' ]]; then
+   dylib_ext='dylib'
+else
+   echo "Unsupported os"
+   exit 1
+fi
+
+HOST_TRIPLE=$(rustc -vV | grep host | cut -d: -f2 | tr -d " ")
+TARGET_TRIPLE=$HOST_TRIPLE
+#TARGET_TRIPLE="m68k-unknown-linux-gnu"
+
+linker=''
+RUN_WRAPPER=''
+if [[ "$HOST_TRIPLE" != "$TARGET_TRIPLE" ]]; then
+   if [[ "$TARGET_TRIPLE" == "m68k-unknown-linux-gnu" ]]; then
+       TARGET_TRIPLE="mips-unknown-linux-gnu"
+       linker='-Clinker=m68k-linux-gcc'
+   elif [[ "$TARGET_TRIPLE" == "aarch64-unknown-linux-gnu" ]]; then
+      # We are cross-compiling for aarch64. Use the correct linker and run tests in qemu.
+      linker='-Clinker=aarch64-linux-gnu-gcc'
+      RUN_WRAPPER='qemu-aarch64 -L /usr/aarch64-linux-gnu'
+   else
+      echo "Unknown non-native platform"
+   fi
+fi
+
+export RUSTFLAGS="$linker -Cpanic=abort -Zsymbol-mangling-version=v0 -Cdebuginfo=2 -Clto=off -Zpanic-abort-tests -Zcodegen-backend=$(pwd)/target/${CHANNEL:-debug}/librustc_codegen_gcc.$dylib_ext --sysroot $(pwd)/build_sysroot/sysroot"
+
+# FIXME(antoyo): remove once the atomic shim is gone
+if [[ `uname` == 'Darwin' ]]; then
+   export RUSTFLAGS="$RUSTFLAGS -Clink-arg=-undefined -Clink-arg=dynamic_lookup"
+fi
+
+RUSTC="rustc $RUSTFLAGS -L crate=target/out --out-dir target/out"
+export RUSTC_LOG=warn # display metadata load errors
+
+export LD_LIBRARY_PATH="$(pwd)/target/out:$(pwd)/build_sysroot/sysroot/lib/rustlib/$TARGET_TRIPLE/lib:$GCC_PATH"
+export DYLD_LIBRARY_PATH=$LD_LIBRARY_PATH
diff --git a/compiler/rustc_codegen_gcc/example/alloc_example.rs b/compiler/rustc_codegen_gcc/example/alloc_example.rs
new file mode 100644 (file)
index 0000000..bc6dd00
--- /dev/null
@@ -0,0 +1,41 @@
+#![feature(start, box_syntax, core_intrinsics, alloc_prelude, alloc_error_handler)]
+#![no_std]
+
+extern crate alloc;
+extern crate alloc_system;
+
+use alloc::prelude::v1::*;
+
+use alloc_system::System;
+
+#[global_allocator]
+static ALLOC: System = System;
+
+#[link(name = "c")]
+extern "C" {
+    fn puts(s: *const u8) -> i32;
+}
+
+#[panic_handler]
+fn panic_handler(_: &core::panic::PanicInfo) -> ! {
+    unsafe {
+        core::intrinsics::abort();
+    }
+}
+
+#[alloc_error_handler]
+fn alloc_error_handler(_: alloc::alloc::Layout) -> ! {
+    unsafe {
+        core::intrinsics::abort();
+    }
+}
+
+#[start]
+fn main(_argc: isize, _argv: *const *const u8) -> isize {
+    let world: Box<&str> = box "Hello World!\0";
+    unsafe {
+        puts(*world as *const str as *const u8);
+    }
+
+    0
+}
diff --git a/compiler/rustc_codegen_gcc/example/alloc_system.rs b/compiler/rustc_codegen_gcc/example/alloc_system.rs
new file mode 100644 (file)
index 0000000..5f66ca6
--- /dev/null
@@ -0,0 +1,212 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+#![no_std]
+#![feature(allocator_api, rustc_private)]
+#![cfg_attr(any(unix, target_os = "redox"), feature(libc))]
+
+// The minimum alignment guaranteed by the architecture. This value is used to
+// add fast paths for low alignment values.
+#[cfg(all(any(target_arch = "x86",
+              target_arch = "arm",
+              target_arch = "mips",
+              target_arch = "powerpc",
+              target_arch = "powerpc64")))]
+const MIN_ALIGN: usize = 8;
+#[cfg(all(any(target_arch = "x86_64",
+              target_arch = "aarch64",
+              target_arch = "mips64",
+              target_arch = "s390x",
+              target_arch = "sparc64")))]
+const MIN_ALIGN: usize = 16;
+
+pub struct System;
+#[cfg(any(windows, unix, target_os = "redox"))]
+mod realloc_fallback {
+    use core::alloc::{GlobalAlloc, Layout};
+    use core::cmp;
+    use core::ptr;
+    impl super::System {
+        pub(crate) unsafe fn realloc_fallback(&self, ptr: *mut u8, old_layout: Layout,
+                                              new_size: usize) -> *mut u8 {
+            // Docs for GlobalAlloc::realloc require this to be valid:
+            let new_layout = Layout::from_size_align_unchecked(new_size, old_layout.align());
+            let new_ptr = GlobalAlloc::alloc(self, new_layout);
+            if !new_ptr.is_null() {
+                let size = cmp::min(old_layout.size(), new_size);
+                ptr::copy_nonoverlapping(ptr, new_ptr, size);
+                GlobalAlloc::dealloc(self, ptr, old_layout);
+            }
+            new_ptr
+        }
+    }
+}
+#[cfg(any(unix, target_os = "redox"))]
+mod platform {
+    extern crate libc;
+    use core::ptr;
+    use MIN_ALIGN;
+    use System;
+    use core::alloc::{GlobalAlloc, Layout};
+    unsafe impl GlobalAlloc for System {
+        #[inline]
+        unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
+            if layout.align() <= MIN_ALIGN && layout.align() <= layout.size() {
+                libc::malloc(layout.size()) as *mut u8
+            } else {
+                #[cfg(target_os = "macos")]
+                {
+                    if layout.align() > (1 << 31) {
+                        return ptr::null_mut()
+                    }
+                }
+                aligned_malloc(&layout)
+            }
+        }
+        #[inline]
+        unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
+            if layout.align() <= MIN_ALIGN && layout.align() <= layout.size() {
+                libc::calloc(layout.size(), 1) as *mut u8
+            } else {
+                let ptr = self.alloc(layout.clone());
+                if !ptr.is_null() {
+                    ptr::write_bytes(ptr, 0, layout.size());
+                }
+                ptr
+            }
+        }
+        #[inline]
+        unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) {
+            libc::free(ptr as *mut libc::c_void)
+        }
+        #[inline]
+        unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
+            if layout.align() <= MIN_ALIGN && layout.align() <= new_size {
+                libc::realloc(ptr as *mut libc::c_void, new_size) as *mut u8
+            } else {
+                self.realloc_fallback(ptr, layout, new_size)
+            }
+        }
+    }
+    #[cfg(any(target_os = "android",
+              target_os = "hermit",
+              target_os = "redox",
+              target_os = "solaris"))]
+    #[inline]
+    unsafe fn aligned_malloc(layout: &Layout) -> *mut u8 {
+        // On android we currently target API level 9 which unfortunately
+        // doesn't have the `posix_memalign` API used below. Instead we use
+        // `memalign`, but this unfortunately has the property on some systems
+        // where the memory returned cannot be deallocated by `free`!
+        //
+        // Upon closer inspection, however, this appears to work just fine with
+        // Android, so for this platform we should be fine to call `memalign`
+        // (which is present in API level 9). Some helpful references could
+        // possibly be chromium using memalign [1], attempts at documenting that
+        // memalign + free is ok [2] [3], or the current source of chromium
+        // which still uses memalign on android [4].
+        //
+        // [1]: https://codereview.chromium.org/10796020/
+        // [2]: https://code.google.com/p/android/issues/detail?id=35391
+        // [3]: https://bugs.chromium.org/p/chromium/issues/detail?id=138579
+        // [4]: https://chromium.googlesource.com/chromium/src/base/+/master/
+        //                                       /memory/aligned_memory.cc
+        libc::memalign(layout.align(), layout.size()) as *mut u8
+    }
+    #[cfg(not(any(target_os = "android",
+                  target_os = "hermit",
+                  target_os = "redox",
+                  target_os = "solaris")))]
+    #[inline]
+    unsafe fn aligned_malloc(layout: &Layout) -> *mut u8 {
+        let mut out = ptr::null_mut();
+        let ret = libc::posix_memalign(&mut out, layout.align(), layout.size());
+        if ret != 0 {
+            ptr::null_mut()
+        } else {
+            out as *mut u8
+        }
+    }
+}
+#[cfg(windows)]
+#[allow(nonstandard_style)]
+mod platform {
+    use MIN_ALIGN;
+    use System;
+    use core::alloc::{GlobalAlloc, Layout};
+    type LPVOID = *mut u8;
+    type HANDLE = LPVOID;
+    type SIZE_T = usize;
+    type DWORD = u32;
+    type BOOL = i32;
+    extern "system" {
+        fn GetProcessHeap() -> HANDLE;
+        fn HeapAlloc(hHeap: HANDLE, dwFlags: DWORD, dwBytes: SIZE_T) -> LPVOID;
+        fn HeapReAlloc(hHeap: HANDLE, dwFlags: DWORD, lpMem: LPVOID, dwBytes: SIZE_T) -> LPVOID;
+        fn HeapFree(hHeap: HANDLE, dwFlags: DWORD, lpMem: LPVOID) -> BOOL;
+        fn GetLastError() -> DWORD;
+    }
+    #[repr(C)]
+    struct Header(*mut u8);
+    const HEAP_ZERO_MEMORY: DWORD = 0x00000008;
+    unsafe fn get_header<'a>(ptr: *mut u8) -> &'a mut Header {
+        &mut *(ptr as *mut Header).offset(-1)
+    }
+    unsafe fn align_ptr(ptr: *mut u8, align: usize) -> *mut u8 {
+        let aligned = ptr.add(align - (ptr as usize & (align - 1)));
+        *get_header(aligned) = Header(ptr);
+        aligned
+    }
+    #[inline]
+    unsafe fn allocate_with_flags(layout: Layout, flags: DWORD) -> *mut u8 {
+        let ptr = if layout.align() <= MIN_ALIGN {
+            HeapAlloc(GetProcessHeap(), flags, layout.size())
+        } else {
+            let size = layout.size() + layout.align();
+            let ptr = HeapAlloc(GetProcessHeap(), flags, size);
+            if ptr.is_null() {
+                ptr
+            } else {
+                align_ptr(ptr, layout.align())
+            }
+        };
+        ptr as *mut u8
+    }
+    unsafe impl GlobalAlloc for System {
+        #[inline]
+        unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
+            allocate_with_flags(layout, 0)
+        }
+        #[inline]
+        unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
+            allocate_with_flags(layout, HEAP_ZERO_MEMORY)
+        }
+        #[inline]
+        unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
+            if layout.align() <= MIN_ALIGN {
+                let err = HeapFree(GetProcessHeap(), 0, ptr as LPVOID);
+                debug_assert!(err != 0, "Failed to free heap memory: {}",
+                              GetLastError());
+            } else {
+                let header = get_header(ptr);
+                let err = HeapFree(GetProcessHeap(), 0, header.0 as LPVOID);
+                debug_assert!(err != 0, "Failed to free heap memory: {}",
+                              GetLastError());
+            }
+        }
+        #[inline]
+        unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
+            if layout.align() <= MIN_ALIGN {
+                HeapReAlloc(GetProcessHeap(), 0, ptr as LPVOID, new_size) as *mut u8
+            } else {
+                self.realloc_fallback(ptr, layout, new_size)
+            }
+        }
+    }
+}
diff --git a/compiler/rustc_codegen_gcc/example/arbitrary_self_types_pointers_and_wrappers.rs b/compiler/rustc_codegen_gcc/example/arbitrary_self_types_pointers_and_wrappers.rs
new file mode 100644 (file)
index 0000000..ddeb752
--- /dev/null
@@ -0,0 +1,69 @@
+// Adapted from rustc run-pass test suite
+
+#![feature(arbitrary_self_types, unsize, coerce_unsized, dispatch_from_dyn)]
+#![feature(rustc_attrs)]
+
+use std::{
+    ops::{Deref, CoerceUnsized, DispatchFromDyn},
+    marker::Unsize,
+};
+
+struct Ptr<T: ?Sized>(Box<T>);
+
+impl<T: ?Sized> Deref for Ptr<T> {
+    type Target = T;
+
+    fn deref(&self) -> &T {
+        &*self.0
+    }
+}
+
+impl<T: Unsize<U> + ?Sized, U: ?Sized> CoerceUnsized<Ptr<U>> for Ptr<T> {}
+impl<T: Unsize<U> + ?Sized, U: ?Sized> DispatchFromDyn<Ptr<U>> for Ptr<T> {}
+
+struct Wrapper<T: ?Sized>(T);
+
+impl<T: ?Sized> Deref for Wrapper<T> {
+    type Target = T;
+
+    fn deref(&self) -> &T {
+        &self.0
+    }
+}
+
+impl<T: CoerceUnsized<U>, U> CoerceUnsized<Wrapper<U>> for Wrapper<T> {}
+impl<T: DispatchFromDyn<U>, U> DispatchFromDyn<Wrapper<U>> for Wrapper<T> {}
+
+
+trait Trait {
+    // This method isn't object-safe yet. Unsized by-value `self` is object-safe (but not callable
+    // without unsized_locals), but wrappers arond `Self` currently are not.
+    // FIXME (mikeyhew) uncomment this when unsized rvalues object-safety is implemented
+    // fn wrapper(self: Wrapper<Self>) -> i32;
+    fn ptr_wrapper(self: Ptr<Wrapper<Self>>) -> i32;
+    fn wrapper_ptr(self: Wrapper<Ptr<Self>>) -> i32;
+    fn wrapper_ptr_wrapper(self: Wrapper<Ptr<Wrapper<Self>>>) -> i32;
+}
+
+impl Trait for i32 {
+    fn ptr_wrapper(self: Ptr<Wrapper<Self>>) -> i32 {
+        **self
+    }
+    fn wrapper_ptr(self: Wrapper<Ptr<Self>>) -> i32 {
+        **self
+    }
+    fn wrapper_ptr_wrapper(self: Wrapper<Ptr<Wrapper<Self>>>) -> i32 {
+        ***self
+    }
+}
+
+fn main() {
+    let pw = Ptr(Box::new(Wrapper(5))) as Ptr<Wrapper<dyn Trait>>;
+    assert_eq!(pw.ptr_wrapper(), 5);
+
+    let wp = Wrapper(Ptr(Box::new(6))) as Wrapper<Ptr<dyn Trait>>;
+    assert_eq!(wp.wrapper_ptr(), 6);
+
+    let wpw = Wrapper(Ptr(Box::new(Wrapper(7)))) as Wrapper<Ptr<Wrapper<dyn Trait>>>;
+    assert_eq!(wpw.wrapper_ptr_wrapper(), 7);
+}
diff --git a/compiler/rustc_codegen_gcc/example/dst-field-align.rs b/compiler/rustc_codegen_gcc/example/dst-field-align.rs
new file mode 100644 (file)
index 0000000..6c338e9
--- /dev/null
@@ -0,0 +1,67 @@
+// run-pass
+#![allow(dead_code)]
+struct Foo<T: ?Sized> {
+    a: u16,
+    b: T
+}
+
+trait Bar {
+    fn get(&self) -> usize;
+}
+
+impl Bar for usize {
+    fn get(&self) -> usize { *self }
+}
+
+struct Baz<T: ?Sized> {
+    a: T
+}
+
+struct HasDrop<T: ?Sized> {
+    ptr: Box<usize>,
+    data: T
+}
+
+fn main() {
+    // Test that zero-offset works properly
+    let b : Baz<usize> = Baz { a: 7 };
+    assert_eq!(b.a.get(), 7);
+    let b : &Baz<dyn Bar> = &b;
+    assert_eq!(b.a.get(), 7);
+
+    // Test that the field is aligned properly
+    let f : Foo<usize> = Foo { a: 0, b: 11 };
+    assert_eq!(f.b.get(), 11);
+    let ptr1 : *const u8 = &f.b as *const _ as *const u8;
+
+    let f : &Foo<dyn Bar> = &f;
+    let ptr2 : *const u8 = &f.b as *const _ as *const u8;
+    assert_eq!(f.b.get(), 11);
+
+    // The pointers should be the same
+    assert_eq!(ptr1, ptr2);
+
+    // Test that nested DSTs work properly
+    let f : Foo<Foo<usize>> = Foo { a: 0, b: Foo { a: 1, b: 17 }};
+    assert_eq!(f.b.b.get(), 17);
+    let f : &Foo<Foo<dyn Bar>> = &f;
+    assert_eq!(f.b.b.get(), 17);
+
+    // Test that get the pointer via destructuring works
+
+    let f : Foo<usize> = Foo { a: 0, b: 11 };
+    let f : &Foo<dyn Bar> = &f;
+    let &Foo { a: _, b: ref bar } = f;
+    assert_eq!(bar.get(), 11);
+
+    // Make sure that drop flags don't screw things up
+
+    let d : HasDrop<Baz<[i32; 4]>> = HasDrop {
+        ptr: Box::new(0),
+        data: Baz { a: [1,2,3,4] }
+    };
+    assert_eq!([1,2,3,4], d.data.a);
+
+    let d : &HasDrop<Baz<[i32]>> = &d;
+    assert_eq!(&[1,2,3,4], &d.data.a);
+}
diff --git a/compiler/rustc_codegen_gcc/example/example.rs b/compiler/rustc_codegen_gcc/example/example.rs
new file mode 100644 (file)
index 0000000..5878e85
--- /dev/null
@@ -0,0 +1,208 @@
+#![feature(no_core, unboxed_closures)]
+#![no_core]
+#![allow(dead_code)]
+
+extern crate mini_core;
+
+use mini_core::*;
+
+fn abc(a: u8) -> u8 {
+    a * 2
+}
+
+fn bcd(b: bool, a: u8) -> u8 {
+    if b {
+        a * 2
+    } else {
+        a * 3
+    }
+}
+
+fn call() {
+    abc(42);
+}
+
+fn indirect_call() {
+    let f: fn() = call;
+    f();
+}
+
+enum BoolOption {
+    Some(bool),
+    None,
+}
+
+fn option_unwrap_or(o: BoolOption, d: bool) -> bool {
+    match o {
+        BoolOption::Some(b) => b,
+        BoolOption::None => d,
+    }
+}
+
+fn ret_42() -> u8 {
+    42
+}
+
+fn return_str() -> &'static str {
+    "hello world"
+}
+
+fn promoted_val() -> &'static u8 {
+    &(1 * 2)
+}
+
+fn cast_ref_to_raw_ptr(abc: &u8) -> *const u8 {
+    abc as *const u8
+}
+
+fn cmp_raw_ptr(a: *const u8, b: *const u8) -> bool {
+    a == b
+}
+
+fn int_cast(a: u16, b: i16) -> (u8, u16, u32, usize, i8, i16, i32, isize, u8, u32) {
+    (
+        a as u8, a as u16, a as u32, a as usize, a as i8, a as i16, a as i32, a as isize, b as u8,
+        b as u32,
+    )
+}
+
+fn char_cast(c: char) -> u8 {
+    c as u8
+}
+
+pub struct DebugTuple(());
+
+fn debug_tuple() -> DebugTuple {
+    DebugTuple(())
+}
+
+fn size_of<T>() -> usize {
+    intrinsics::size_of::<T>()
+}
+
+fn use_size_of() -> usize {
+    size_of::<u64>()
+}
+
+unsafe fn use_copy_intrinsic(src: *const u8, dst: *mut u8) {
+    intrinsics::copy::<u8>(src, dst, 1);
+}
+
+unsafe fn use_copy_intrinsic_ref(src: *const u8, dst: *mut u8) {
+    let copy2 = &intrinsics::copy::<u8>;
+    copy2(src, dst, 1);
+}
+
+const ABC: u8 = 6 * 7;
+
+fn use_const() -> u8 {
+    ABC
+}
+
+pub fn call_closure_3arg() {
+    (|_, _, _| {})(0u8, 42u16, 0u8)
+}
+
+pub fn call_closure_2arg() {
+    (|_, _| {})(0u8, 42u16)
+}
+
+struct IsNotEmpty;
+
+impl<'a, 'b> FnOnce<(&'a &'b [u16],)> for IsNotEmpty {
+    type Output = (u8, u8);
+
+    #[inline]
+    extern "rust-call" fn call_once(mut self, arg: (&'a &'b [u16],)) -> (u8, u8) {
+        self.call_mut(arg)
+    }
+}
+
+impl<'a, 'b> FnMut<(&'a &'b [u16],)> for IsNotEmpty {
+    #[inline]
+    extern "rust-call" fn call_mut(&mut self, _arg: (&'a &'b [u16],)) -> (u8, u8) {
+        (0, 42)
+    }
+}
+
+pub fn call_is_not_empty() {
+    IsNotEmpty.call_once((&(&[0u16] as &[_]),));
+}
+
+fn eq_char(a: char, b: char) -> bool {
+    a == b
+}
+
+unsafe fn transmute(c: char) -> u32 {
+    intrinsics::transmute(c)
+}
+
+unsafe fn deref_str_ptr(s: *const str) -> &'static str {
+    &*s
+}
+
+fn use_array(arr: [u8; 3]) -> u8 {
+    arr[1]
+}
+
+fn repeat_array() -> [u8; 3] {
+    [0; 3]
+}
+
+fn array_as_slice(arr: &[u8; 3]) -> &[u8] {
+    arr
+}
+
+unsafe fn use_ctlz_nonzero(a: u16) -> u16 {
+    intrinsics::ctlz_nonzero(a)
+}
+
+fn ptr_as_usize(ptr: *const u8) -> usize {
+    ptr as usize
+}
+
+fn float_cast(a: f32, b: f64) -> (f64, f32) {
+    (a as f64, b as f32)
+}
+
+fn int_to_float(a: u8, b: i32) -> (f64, f32) {
+    (a as f64, b as f32)
+}
+
+fn make_array() -> [u8; 3] {
+    [42, 0, 5]
+}
+
+fn some_promoted_tuple() -> &'static (&'static str, &'static str) {
+    &("abc", "some")
+}
+
+fn index_slice(s: &[u8]) -> u8 {
+    s[2]
+}
+
+pub struct StrWrapper {
+    s: str,
+}
+
+fn str_wrapper_get(w: &StrWrapper) -> &str {
+    &w.s
+}
+
+fn i16_as_i8(a: i16) -> i8 {
+    a as i8
+}
+
+struct Unsized(u8, str);
+
+fn get_sized_field_ref_from_unsized_type(u: &Unsized) -> &u8 {
+    &u.0
+}
+
+fn get_unsized_field_ref_from_unsized_type(u: &Unsized) -> &str {
+    &u.1
+}
+
+pub fn reuse_byref_argument_storage(a: (u8, u16, u32)) -> u8 {
+    a.0
+}
diff --git a/compiler/rustc_codegen_gcc/example/mini_core.rs b/compiler/rustc_codegen_gcc/example/mini_core.rs
new file mode 100644 (file)
index 0000000..1067cee
--- /dev/null
@@ -0,0 +1,585 @@
+#![feature(
+    no_core, lang_items, intrinsics, unboxed_closures, type_ascription, extern_types,
+    untagged_unions, decl_macro, rustc_attrs, transparent_unions, auto_traits,
+    thread_local
+)]
+#![no_core]
+#![allow(dead_code)]
+
+#[no_mangle]
+unsafe extern "C" fn _Unwind_Resume() {
+    intrinsics::unreachable();
+}
+
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "unsize"]
+pub trait Unsize<T: ?Sized> {}
+
+#[lang = "coerce_unsized"]
+pub trait CoerceUnsized<T> {}
+
+impl<'a, 'b: 'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a U> for &'b T {}
+impl<'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a mut U> for &'a mut T {}
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*const U> for *const T {}
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*mut U> for *mut T {}
+
+#[lang = "dispatch_from_dyn"]
+pub trait DispatchFromDyn<T> {}
+
+// &T -> &U
+impl<'a, T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<&'a U> for &'a T {}
+// &mut T -> &mut U
+impl<'a, T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<&'a mut U> for &'a mut T {}
+// *const T -> *const U
+impl<T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<*const U> for *const T {}
+// *mut T -> *mut U
+impl<T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<*mut U> for *mut T {}
+impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Box<U>> for Box<T> {}
+
+#[lang = "receiver"]
+pub trait Receiver {}
+
+impl<T: ?Sized> Receiver for &T {}
+impl<T: ?Sized> Receiver for &mut T {}
+impl<T: ?Sized> Receiver for Box<T> {}
+
+#[lang = "copy"]
+pub unsafe trait Copy {}
+
+unsafe impl Copy for bool {}
+unsafe impl Copy for u8 {}
+unsafe impl Copy for u16 {}
+unsafe impl Copy for u32 {}
+unsafe impl Copy for u64 {}
+unsafe impl Copy for usize {}
+unsafe impl Copy for i8 {}
+unsafe impl Copy for i16 {}
+unsafe impl Copy for i32 {}
+unsafe impl Copy for isize {}
+unsafe impl Copy for f32 {}
+unsafe impl Copy for char {}
+unsafe impl<'a, T: ?Sized> Copy for &'a T {}
+unsafe impl<T: ?Sized> Copy for *const T {}
+unsafe impl<T: ?Sized> Copy for *mut T {}
+
+#[lang = "sync"]
+pub unsafe trait Sync {}
+
+unsafe impl Sync for bool {}
+unsafe impl Sync for u8 {}
+unsafe impl Sync for u16 {}
+unsafe impl Sync for u32 {}
+unsafe impl Sync for u64 {}
+unsafe impl Sync for usize {}
+unsafe impl Sync for i8 {}
+unsafe impl Sync for i16 {}
+unsafe impl Sync for i32 {}
+unsafe impl Sync for isize {}
+unsafe impl Sync for char {}
+unsafe impl<'a, T: ?Sized> Sync for &'a T {}
+unsafe impl Sync for [u8; 16] {}
+
+#[lang = "freeze"]
+unsafe auto trait Freeze {}
+
+unsafe impl<T: ?Sized> Freeze for PhantomData<T> {}
+unsafe impl<T: ?Sized> Freeze for *const T {}
+unsafe impl<T: ?Sized> Freeze for *mut T {}
+unsafe impl<T: ?Sized> Freeze for &T {}
+unsafe impl<T: ?Sized> Freeze for &mut T {}
+
+#[lang = "structural_peq"]
+pub trait StructuralPartialEq {}
+
+#[lang = "structural_teq"]
+pub trait StructuralEq {}
+
+#[lang = "not"]
+pub trait Not {
+    type Output;
+
+    fn not(self) -> Self::Output;
+}
+
+impl Not for bool {
+    type Output = bool;
+
+    fn not(self) -> bool {
+        !self
+    }
+}
+
+#[lang = "mul"]
+pub trait Mul<RHS = Self> {
+    type Output;
+
+    #[must_use]
+    fn mul(self, rhs: RHS) -> Self::Output;
+}
+
+impl Mul for u8 {
+    type Output = Self;
+
+    fn mul(self, rhs: Self) -> Self::Output {
+        self * rhs
+    }
+}
+
+impl Mul for usize {
+    type Output = Self;
+
+    fn mul(self, rhs: Self) -> Self::Output {
+        self * rhs
+    }
+}
+
+#[lang = "add"]
+pub trait Add<RHS = Self> {
+    type Output;
+
+    fn add(self, rhs: RHS) -> Self::Output;
+}
+
+impl Add for u8 {
+    type Output = Self;
+
+    fn add(self, rhs: Self) -> Self {
+        self + rhs
+    }
+}
+
+impl Add for i8 {
+    type Output = Self;
+
+    fn add(self, rhs: Self) -> Self {
+        self + rhs
+    }
+}
+
+impl Add for usize {
+    type Output = Self;
+
+    fn add(self, rhs: Self) -> Self {
+        self + rhs
+    }
+}
+
+#[lang = "sub"]
+pub trait Sub<RHS = Self> {
+    type Output;
+
+    fn sub(self, rhs: RHS) -> Self::Output;
+}
+
+impl Sub for usize {
+    type Output = Self;
+
+    fn sub(self, rhs: Self) -> Self {
+        self - rhs
+    }
+}
+
+impl Sub for u8 {
+    type Output = Self;
+
+    fn sub(self, rhs: Self) -> Self {
+        self - rhs
+    }
+}
+
+impl Sub for i8 {
+    type Output = Self;
+
+    fn sub(self, rhs: Self) -> Self {
+        self - rhs
+    }
+}
+
+impl Sub for i16 {
+    type Output = Self;
+
+    fn sub(self, rhs: Self) -> Self {
+        self - rhs
+    }
+}
+
+#[lang = "rem"]
+pub trait Rem<RHS = Self> {
+    type Output;
+
+    fn rem(self, rhs: RHS) -> Self::Output;
+}
+
+impl Rem for usize {
+    type Output = Self;
+
+    fn rem(self, rhs: Self) -> Self {
+        self % rhs
+    }
+}
+
+#[lang = "bitor"]
+pub trait BitOr<RHS = Self> {
+    type Output;
+
+    #[must_use]
+    fn bitor(self, rhs: RHS) -> Self::Output;
+}
+
+impl BitOr for bool {
+    type Output = bool;
+
+    fn bitor(self, rhs: bool) -> bool {
+        self | rhs
+    }
+}
+
+impl<'a> BitOr<bool> for &'a bool {
+    type Output = bool;
+
+    fn bitor(self, rhs: bool) -> bool {
+        *self | rhs
+    }
+}
+
+#[lang = "eq"]
+pub trait PartialEq<Rhs: ?Sized = Self> {
+    fn eq(&self, other: &Rhs) -> bool;
+    fn ne(&self, other: &Rhs) -> bool;
+}
+
+impl PartialEq for u8 {
+    fn eq(&self, other: &u8) -> bool {
+        (*self) == (*other)
+    }
+    fn ne(&self, other: &u8) -> bool {
+        (*self) != (*other)
+    }
+}
+
+impl PartialEq for u16 {
+    fn eq(&self, other: &u16) -> bool {
+        (*self) == (*other)
+    }
+    fn ne(&self, other: &u16) -> bool {
+        (*self) != (*other)
+    }
+}
+
+impl PartialEq for u32 {
+    fn eq(&self, other: &u32) -> bool {
+        (*self) == (*other)
+    }
+    fn ne(&self, other: &u32) -> bool {
+        (*self) != (*other)
+    }
+}
+
+
+impl PartialEq for u64 {
+    fn eq(&self, other: &u64) -> bool {
+        (*self) == (*other)
+    }
+    fn ne(&self, other: &u64) -> bool {
+        (*self) != (*other)
+    }
+}
+
+impl PartialEq for usize {
+    fn eq(&self, other: &usize) -> bool {
+        (*self) == (*other)
+    }
+    fn ne(&self, other: &usize) -> bool {
+        (*self) != (*other)
+    }
+}
+
+impl PartialEq for i8 {
+    fn eq(&self, other: &i8) -> bool {
+        (*self) == (*other)
+    }
+    fn ne(&self, other: &i8) -> bool {
+        (*self) != (*other)
+    }
+}
+
+impl PartialEq for i32 {
+    fn eq(&self, other: &i32) -> bool {
+        (*self) == (*other)
+    }
+    fn ne(&self, other: &i32) -> bool {
+        (*self) != (*other)
+    }
+}
+
+impl PartialEq for isize {
+    fn eq(&self, other: &isize) -> bool {
+        (*self) == (*other)
+    }
+    fn ne(&self, other: &isize) -> bool {
+        (*self) != (*other)
+    }
+}
+
+impl PartialEq for char {
+    fn eq(&self, other: &char) -> bool {
+        (*self) == (*other)
+    }
+    fn ne(&self, other: &char) -> bool {
+        (*self) != (*other)
+    }
+}
+
+impl<T: ?Sized> PartialEq for *const T {
+    fn eq(&self, other: &*const T) -> bool {
+        *self == *other
+    }
+    fn ne(&self, other: &*const T) -> bool {
+        *self != *other
+    }
+}
+
+#[lang = "neg"]
+pub trait Neg {
+    type Output;
+
+    fn neg(self) -> Self::Output;
+}
+
+impl Neg for i8 {
+    type Output = i8;
+
+    fn neg(self) -> i8 {
+        -self
+    }
+}
+
+impl Neg for i16 {
+    type Output = i16;
+
+    fn neg(self) -> i16 {
+        self
+    }
+}
+
+impl Neg for isize {
+    type Output = isize;
+
+    fn neg(self) -> isize {
+        -self
+    }
+}
+
+impl Neg for f32 {
+    type Output = f32;
+
+    fn neg(self) -> f32 {
+        -self
+    }
+}
+
+pub enum Option<T> {
+    Some(T),
+    None,
+}
+
+pub use Option::*;
+
+#[lang = "phantom_data"]
+pub struct PhantomData<T: ?Sized>;
+
+#[lang = "fn_once"]
+#[rustc_paren_sugar]
+pub trait FnOnce<Args> {
+    #[lang = "fn_once_output"]
+    type Output;
+
+    extern "rust-call" fn call_once(self, args: Args) -> Self::Output;
+}
+
+#[lang = "fn_mut"]
+#[rustc_paren_sugar]
+pub trait FnMut<Args>: FnOnce<Args> {
+    extern "rust-call" fn call_mut(&mut self, args: Args) -> Self::Output;
+}
+
+#[lang = "panic"]
+#[track_caller]
+pub fn panic(_msg: &str) -> ! {
+    unsafe {
+        libc::puts("Panicking\n\0" as *const str as *const u8);
+        intrinsics::abort();
+    }
+}
+
+#[lang = "panic_bounds_check"]
+#[track_caller]
+fn panic_bounds_check(index: usize, len: usize) -> ! {
+    unsafe {
+        libc::printf("index out of bounds: the len is %d but the index is %d\n\0" as *const str as *const i8, len, index);
+        intrinsics::abort();
+    }
+}
+
+#[lang = "eh_personality"]
+fn eh_personality() -> ! {
+    loop {}
+}
+
+#[lang = "drop_in_place"]
+#[allow(unconditional_recursion)]
+pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
+    // Code here does not matter - this is replaced by the
+    // real drop glue by the compiler.
+    drop_in_place(to_drop);
+}
+
+#[lang = "deref"]
+pub trait Deref {
+    type Target: ?Sized;
+
+    fn deref(&self) -> &Self::Target;
+}
+
+#[lang = "owned_box"]
+pub struct Box<T: ?Sized>(*mut T);
+
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Box<U>> for Box<T> {}
+
+impl<T: ?Sized> Drop for Box<T> {
+    fn drop(&mut self) {
+        // drop is currently performed by compiler.
+    }
+}
+
+impl<T> Deref for Box<T> {
+    type Target = T;
+
+    fn deref(&self) -> &Self::Target {
+        &**self
+    }
+}
+
+#[lang = "exchange_malloc"]
+unsafe fn allocate(size: usize, _align: usize) -> *mut u8 {
+    libc::malloc(size)
+}
+
+#[lang = "box_free"]
+unsafe fn box_free<T: ?Sized>(ptr: *mut T) {
+    libc::free(ptr as *mut u8);
+}
+
+#[lang = "drop"]
+pub trait Drop {
+    fn drop(&mut self);
+}
+
+#[lang = "manually_drop"]
+#[repr(transparent)]
+pub struct ManuallyDrop<T: ?Sized> {
+    pub value: T,
+}
+
+#[lang = "maybe_uninit"]
+#[repr(transparent)]
+pub union MaybeUninit<T> {
+    pub uninit: (),
+    pub value: ManuallyDrop<T>,
+}
+
+pub mod intrinsics {
+    extern "rust-intrinsic" {
+        pub fn abort() -> !;
+        pub fn size_of<T>() -> usize;
+        pub fn size_of_val<T: ?::Sized>(val: *const T) -> usize;
+        pub fn min_align_of<T>() -> usize;
+        pub fn min_align_of_val<T: ?::Sized>(val: *const T) -> usize;
+        pub fn copy<T>(src: *const T, dst: *mut T, count: usize);
+        pub fn transmute<T, U>(e: T) -> U;
+        pub fn ctlz_nonzero<T>(x: T) -> T;
+        pub fn needs_drop<T>() -> bool;
+        pub fn bitreverse<T>(x: T) -> T;
+        pub fn bswap<T>(x: T) -> T;
+        pub fn write_bytes<T>(dst: *mut T, val: u8, count: usize);
+        pub fn unreachable() -> !;
+    }
+}
+
+pub mod libc {
+    #[link(name = "c")]
+    extern "C" {
+        pub fn puts(s: *const u8) -> i32;
+        pub fn printf(format: *const i8, ...) -> i32;
+        pub fn malloc(size: usize) -> *mut u8;
+        pub fn free(ptr: *mut u8);
+        pub fn memcpy(dst: *mut u8, src: *const u8, size: usize);
+        pub fn memmove(dst: *mut u8, src: *const u8, size: usize);
+        pub fn strncpy(dst: *mut u8, src: *const u8, size: usize);
+    }
+}
+
+#[lang = "index"]
+pub trait Index<Idx: ?Sized> {
+    type Output: ?Sized;
+    fn index(&self, index: Idx) -> &Self::Output;
+}
+
+impl<T> Index<usize> for [T; 3] {
+    type Output = T;
+
+    fn index(&self, index: usize) -> &Self::Output {
+        &self[index]
+    }
+}
+
+impl<T> Index<usize> for [T] {
+    type Output = T;
+
+    fn index(&self, index: usize) -> &Self::Output {
+        &self[index]
+    }
+}
+
+extern {
+    type VaListImpl;
+}
+
+#[lang = "va_list"]
+#[repr(transparent)]
+pub struct VaList<'a>(&'a mut VaListImpl);
+
+#[rustc_builtin_macro]
+#[rustc_macro_transparency = "semitransparent"]
+pub macro stringify($($t:tt)*) { /* compiler built-in */ }
+
+#[rustc_builtin_macro]
+#[rustc_macro_transparency = "semitransparent"]
+pub macro file() { /* compiler built-in */ }
+
+#[rustc_builtin_macro]
+#[rustc_macro_transparency = "semitransparent"]
+pub macro line() { /* compiler built-in */ }
+
+#[rustc_builtin_macro]
+#[rustc_macro_transparency = "semitransparent"]
+pub macro cfg() { /* compiler built-in */ }
+
+pub static A_STATIC: u8 = 42;
+
+#[lang = "panic_location"]
+struct PanicLocation {
+    file: &'static str,
+    line: u32,
+    column: u32,
+}
+
+#[no_mangle]
+pub fn get_tls() -> u8 {
+    #[thread_local]
+    static A: u8 = 42;
+
+    A
+}
diff --git a/compiler/rustc_codegen_gcc/example/mini_core_hello_world.rs b/compiler/rustc_codegen_gcc/example/mini_core_hello_world.rs
new file mode 100644 (file)
index 0000000..69d5915
--- /dev/null
@@ -0,0 +1,424 @@
+// Adapted from https://github.com/sunfishcode/mir2cranelift/blob/master/rust-examples/nocore-hello-world.rs
+
+#![feature(
+    no_core, unboxed_closures, start, lang_items, box_syntax, never_type, linkage,
+    extern_types, thread_local
+)]
+#![no_core]
+#![allow(dead_code, non_camel_case_types)]
+
+extern crate mini_core;
+
+use mini_core::*;
+use mini_core::libc::*;
+
+unsafe extern "C" fn my_puts(s: *const u8) {
+    puts(s);
+}
+
+#[lang = "termination"]
+trait Termination {
+    fn report(self) -> i32;
+}
+
+impl Termination for () {
+    fn report(self) -> i32 {
+        unsafe {
+            NUM = 6 * 7 + 1 + (1u8 == 1u8) as u8; // 44
+            *NUM_REF as i32
+        }
+    }
+}
+
+trait SomeTrait {
+    fn object_safe(&self);
+}
+
+impl SomeTrait for &'static str {
+    fn object_safe(&self) {
+        unsafe {
+            puts(*self as *const str as *const u8);
+        }
+    }
+}
+
+struct NoisyDrop {
+    text: &'static str,
+    inner: NoisyDropInner,
+}
+
+struct NoisyDropInner;
+
+impl Drop for NoisyDrop {
+    fn drop(&mut self) {
+        unsafe {
+            puts(self.text as *const str as *const u8);
+        }
+    }
+}
+
+impl Drop for NoisyDropInner {
+    fn drop(&mut self) {
+        unsafe {
+            puts("Inner got dropped!\0" as *const str as *const u8);
+        }
+    }
+}
+
+impl SomeTrait for NoisyDrop {
+    fn object_safe(&self) {}
+}
+
+enum Ordering {
+    Less = -1,
+    Equal = 0,
+    Greater = 1,
+}
+
+#[lang = "start"]
+fn start<T: Termination + 'static>(
+    main: fn() -> T,
+    argc: isize,
+    argv: *const *const u8,
+) -> isize {
+    if argc == 3 {
+        unsafe { puts(*argv); }
+        unsafe { puts(*((argv as usize + intrinsics::size_of::<*const u8>()) as *const *const u8)); }
+        unsafe { puts(*((argv as usize + 2 * intrinsics::size_of::<*const u8>()) as *const *const u8)); }
+    }
+
+    main().report();
+    0
+}
+
+static mut NUM: u8 = 6 * 7;
+static NUM_REF: &'static u8 = unsafe { &NUM };
+
+macro_rules! assert {
+    ($e:expr) => {
+        if !$e {
+            panic(stringify!(! $e));
+        }
+    };
+}
+
+macro_rules! assert_eq {
+    ($l:expr, $r: expr) => {
+        if $l != $r {
+            panic(stringify!($l != $r));
+        }
+    }
+}
+
+struct Unique<T: ?Sized> {
+    pointer: *const T,
+    _marker: PhantomData<T>,
+}
+
+impl<T: ?Sized, U: ?Sized> CoerceUnsized<Unique<U>> for Unique<T> where T: Unsize<U> {}
+
+unsafe fn zeroed<T>() -> T {
+    let mut uninit = MaybeUninit { uninit: () };
+    intrinsics::write_bytes(&mut uninit.value.value as *mut T, 0, 1);
+    uninit.value.value
+}
+
+fn take_f32(_f: f32) {}
+fn take_unique(_u: Unique<()>) {}
+
+fn return_u128_pair() -> (u128, u128) {
+    (0, 0)
+}
+
+fn call_return_u128_pair() {
+    return_u128_pair();
+}
+
+fn main() {
+    take_unique(Unique {
+        pointer: 0 as *const (),
+        _marker: PhantomData,
+    });
+    take_f32(0.1);
+
+    //call_return_u128_pair();
+
+    let slice = &[0, 1] as &[i32];
+    let slice_ptr = slice as *const [i32] as *const i32;
+
+    assert_eq!(slice_ptr as usize % 4, 0);
+
+    //return;
+
+    unsafe {
+        printf("Hello %s\n\0" as *const str as *const i8, "printf\0" as *const str as *const i8);
+
+        let hello: &[u8] = b"Hello\0" as &[u8; 6];
+        let ptr: *const u8 = hello as *const [u8] as *const u8;
+        puts(ptr);
+
+        let world: Box<&str> = box "World!\0";
+        puts(*world as *const str as *const u8);
+        world as Box<dyn SomeTrait>;
+
+        assert_eq!(intrinsics::bitreverse(0b10101000u8), 0b00010101u8);
+
+        assert_eq!(intrinsics::bswap(0xabu8), 0xabu8);
+        assert_eq!(intrinsics::bswap(0xddccu16), 0xccddu16);
+        assert_eq!(intrinsics::bswap(0xffee_ddccu32), 0xccdd_eeffu32);
+        assert_eq!(intrinsics::bswap(0x1234_5678_ffee_ddccu64), 0xccdd_eeff_7856_3412u64);
+
+        assert_eq!(intrinsics::size_of_val(hello) as u8, 6);
+
+        let chars = &['C', 'h', 'a', 'r', 's'];
+        let chars = chars as &[char];
+        assert_eq!(intrinsics::size_of_val(chars) as u8, 4 * 5);
+
+        let a: &dyn SomeTrait = &"abc\0";
+        a.object_safe();
+
+        assert_eq!(intrinsics::size_of_val(a) as u8, 16);
+        assert_eq!(intrinsics::size_of_val(&0u32) as u8, 4);
+
+        assert_eq!(intrinsics::min_align_of::<u16>() as u8, 2);
+        assert_eq!(intrinsics::min_align_of_val(&a) as u8, intrinsics::min_align_of::<&str>() as u8);
+
+        assert!(!intrinsics::needs_drop::<u8>());
+        assert!(intrinsics::needs_drop::<NoisyDrop>());
+
+        Unique {
+            pointer: 0 as *const &str,
+            _marker: PhantomData,
+        } as Unique<dyn SomeTrait>;
+
+        struct MyDst<T: ?Sized>(T);
+
+        intrinsics::size_of_val(&MyDst([0u8; 4]) as &MyDst<[u8]>);
+
+        struct Foo {
+            x: u8,
+            y: !,
+        }
+
+        unsafe fn uninitialized<T>() -> T {
+            MaybeUninit { uninit: () }.value.value
+        }
+
+        zeroed::<(u8, u8)>();
+        #[allow(unreachable_code)]
+        {
+            if false {
+                zeroed::<!>();
+                zeroed::<Foo>();
+                uninitialized::<Foo>();
+            }
+        }
+    }
+
+    let _ = box NoisyDrop {
+        text: "Boxed outer got dropped!\0",
+        inner: NoisyDropInner,
+    } as Box<dyn SomeTrait>;
+
+    const FUNC_REF: Option<fn()> = Some(main);
+    match FUNC_REF {
+        Some(_) => {},
+        None => assert!(false),
+    }
+
+    match Ordering::Less {
+        Ordering::Less => {},
+        _ => assert!(false),
+    }
+
+    [NoisyDropInner, NoisyDropInner];
+
+    let x = &[0u32, 42u32] as &[u32];
+    match x {
+        [] => assert_eq!(0u32, 1),
+        [_, ref y @ ..] => assert_eq!(&x[1] as *const u32 as usize, &y[0] as *const u32 as usize),
+    }
+
+    assert_eq!(((|()| 42u8) as fn(()) -> u8)(()), 42);
+
+    extern {
+        #[linkage = "weak"]
+        static ABC: *const u8;
+    }
+
+    {
+        extern {
+            #[linkage = "weak"]
+            static ABC: *const u8;
+        }
+    }
+
+    // TODO(antoyo): to make this work, support weak linkage.
+    //unsafe { assert_eq!(ABC as usize, 0); }
+
+    &mut (|| Some(0 as *const ())) as &mut dyn FnMut() -> Option<*const ()>;
+
+    let f = 1000.0;
+    assert_eq!(f as u8, 255);
+    let f2 = -1000.0;
+    assert_eq!(f2 as i8, -128);
+    assert_eq!(f2 as u8, 0);
+
+    static ANOTHER_STATIC: &u8 = &A_STATIC;
+    assert_eq!(*ANOTHER_STATIC, 42);
+
+    check_niche_behavior();
+
+    extern "C" {
+        type ExternType;
+    }
+
+    struct ExternTypeWrapper {
+        _a: ExternType,
+    }
+
+    let nullptr = 0 as *const ();
+    let extern_nullptr = nullptr as *const ExternTypeWrapper;
+    extern_nullptr as *const ();
+    let slice_ptr = &[] as *const [u8];
+    slice_ptr as *const u8;
+
+    #[cfg(not(jit))]
+    test_tls();
+}
+
+#[repr(C)]
+enum c_void {
+    _1,
+    _2,
+}
+
+type c_int = i32;
+type c_ulong = u64;
+
+type pthread_t = c_ulong;
+
+#[repr(C)]
+struct pthread_attr_t {
+    __size: [u64; 7],
+}
+
+#[link(name = "pthread")]
+extern "C" {
+    fn pthread_attr_init(attr: *mut pthread_attr_t) -> c_int;
+
+    fn pthread_create(
+        native: *mut pthread_t,
+        attr: *const pthread_attr_t,
+        f: extern "C" fn(_: *mut c_void) -> *mut c_void,
+        value: *mut c_void
+    ) -> c_int;
+
+    fn pthread_join(
+        native: pthread_t,
+        value: *mut *mut c_void
+    ) -> c_int;
+}
+
+#[thread_local]
+#[cfg(not(jit))]
+static mut TLS: u8 = 42;
+
+#[cfg(not(jit))]
+extern "C" fn mutate_tls(_: *mut c_void) -> *mut c_void {
+    unsafe { TLS = 0; }
+    0 as *mut c_void
+}
+
+#[cfg(not(jit))]
+fn test_tls() {
+    unsafe {
+        let mut attr: pthread_attr_t = zeroed();
+        let mut thread: pthread_t = 0;
+
+        assert_eq!(TLS, 42);
+
+        if pthread_attr_init(&mut attr) != 0 {
+            assert!(false);
+        }
+
+        if pthread_create(&mut thread, &attr, mutate_tls, 0 as *mut c_void) != 0 {
+            assert!(false);
+        }
+
+        let mut res = 0 as *mut c_void;
+        pthread_join(thread, &mut res);
+
+        // TLS of main thread must not have been changed by the other thread.
+        assert_eq!(TLS, 42);
+
+        puts("TLS works!\n\0" as *const str as *const u8);
+    }
+}
+
+// Copied ui/issues/issue-61696.rs
+
+pub enum Infallible {}
+
+// The check that the `bool` field of `V1` is encoding a "niche variant"
+// (i.e. not `V1`, so `V3` or `V4`) used to be mathematically incorrect,
+// causing valid `V1` values to be interpreted as other variants.
+pub enum E1 {
+    V1 { f: bool },
+    V2 { f: Infallible },
+    V3,
+    V4,
+}
+
+// Computing the discriminant used to be done using the niche type (here `u8`,
+// from the `bool` field of `V1`), overflowing for variants with large enough
+// indices (`V3` and `V4`), causing them to be interpreted as other variants.
+pub enum E2<X> {
+    V1 { f: bool },
+
+    /*_00*/ _01(X), _02(X), _03(X), _04(X), _05(X), _06(X), _07(X),
+    _08(X), _09(X), _0A(X), _0B(X), _0C(X), _0D(X), _0E(X), _0F(X),
+    _10(X), _11(X), _12(X), _13(X), _14(X), _15(X), _16(X), _17(X),
+    _18(X), _19(X), _1A(X), _1B(X), _1C(X), _1D(X), _1E(X), _1F(X),
+    _20(X), _21(X), _22(X), _23(X), _24(X), _25(X), _26(X), _27(X),
+    _28(X), _29(X), _2A(X), _2B(X), _2C(X), _2D(X), _2E(X), _2F(X),
+    _30(X), _31(X), _32(X), _33(X), _34(X), _35(X), _36(X), _37(X),
+    _38(X), _39(X), _3A(X), _3B(X), _3C(X), _3D(X), _3E(X), _3F(X),
+    _40(X), _41(X), _42(X), _43(X), _44(X), _45(X), _46(X), _47(X),
+    _48(X), _49(X), _4A(X), _4B(X), _4C(X), _4D(X), _4E(X), _4F(X),
+    _50(X), _51(X), _52(X), _53(X), _54(X), _55(X), _56(X), _57(X),
+    _58(X), _59(X), _5A(X), _5B(X), _5C(X), _5D(X), _5E(X), _5F(X),
+    _60(X), _61(X), _62(X), _63(X), _64(X), _65(X), _66(X), _67(X),
+    _68(X), _69(X), _6A(X), _6B(X), _6C(X), _6D(X), _6E(X), _6F(X),
+    _70(X), _71(X), _72(X), _73(X), _74(X), _75(X), _76(X), _77(X),
+    _78(X), _79(X), _7A(X), _7B(X), _7C(X), _7D(X), _7E(X), _7F(X),
+    _80(X), _81(X), _82(X), _83(X), _84(X), _85(X), _86(X), _87(X),
+    _88(X), _89(X), _8A(X), _8B(X), _8C(X), _8D(X), _8E(X), _8F(X),
+    _90(X), _91(X), _92(X), _93(X), _94(X), _95(X), _96(X), _97(X),
+    _98(X), _99(X), _9A(X), _9B(X), _9C(X), _9D(X), _9E(X), _9F(X),
+    _A0(X), _A1(X), _A2(X), _A3(X), _A4(X), _A5(X), _A6(X), _A7(X),
+    _A8(X), _A9(X), _AA(X), _AB(X), _AC(X), _AD(X), _AE(X), _AF(X),
+    _B0(X), _B1(X), _B2(X), _B3(X), _B4(X), _B5(X), _B6(X), _B7(X),
+    _B8(X), _B9(X), _BA(X), _BB(X), _BC(X), _BD(X), _BE(X), _BF(X),
+    _C0(X), _C1(X), _C2(X), _C3(X), _C4(X), _C5(X), _C6(X), _C7(X),
+    _C8(X), _C9(X), _CA(X), _CB(X), _CC(X), _CD(X), _CE(X), _CF(X),
+    _D0(X), _D1(X), _D2(X), _D3(X), _D4(X), _D5(X), _D6(X), _D7(X),
+    _D8(X), _D9(X), _DA(X), _DB(X), _DC(X), _DD(X), _DE(X), _DF(X),
+    _E0(X), _E1(X), _E2(X), _E3(X), _E4(X), _E5(X), _E6(X), _E7(X),
+    _E8(X), _E9(X), _EA(X), _EB(X), _EC(X), _ED(X), _EE(X), _EF(X),
+    _F0(X), _F1(X), _F2(X), _F3(X), _F4(X), _F5(X), _F6(X), _F7(X),
+    _F8(X), _F9(X), _FA(X), _FB(X), _FC(X), _FD(X), _FE(X), _FF(X),
+
+    V3,
+    V4,
+}
+
+fn check_niche_behavior () {
+    if let E1::V2 { .. } = (E1::V1 { f: true }) {
+        intrinsics::abort();
+    }
+
+    if let E2::V1 { .. } = E2::V3::<Infallible> {
+        intrinsics::abort();
+    }
+}
diff --git a/compiler/rustc_codegen_gcc/example/mod_bench.rs b/compiler/rustc_codegen_gcc/example/mod_bench.rs
new file mode 100644 (file)
index 0000000..2e2b005
--- /dev/null
@@ -0,0 +1,37 @@
+#![feature(start, box_syntax, core_intrinsics, lang_items)]
+#![no_std]
+
+#[link(name = "c")]
+extern {}
+
+#[panic_handler]
+fn panic_handler(_: &core::panic::PanicInfo) -> ! {
+    unsafe {
+        core::intrinsics::abort();
+    }
+}
+
+#[lang="eh_personality"]
+fn eh_personality(){}
+
+// Required for rustc_codegen_llvm
+#[no_mangle]
+unsafe extern "C" fn _Unwind_Resume() {
+    core::intrinsics::unreachable();
+}
+
+#[start]
+fn main(_argc: isize, _argv: *const *const u8) -> isize {
+    for i in 2..100_000_000 {
+        black_box((i + 1) % i);
+    }
+
+    0
+}
+
+#[inline(never)]
+fn black_box(i: u32) {
+    if i != 1 {
+        unsafe { core::intrinsics::abort(); }
+    }
+}
diff --git a/compiler/rustc_codegen_gcc/example/std_example.rs b/compiler/rustc_codegen_gcc/example/std_example.rs
new file mode 100644 (file)
index 0000000..eba0eb8
--- /dev/null
@@ -0,0 +1,278 @@
+#![feature(core_intrinsics, generators, generator_trait, is_sorted)]
+
+use std::arch::x86_64::*;
+use std::io::Write;
+use std::ops::Generator;
+
+extern {
+    pub fn printf(format: *const i8, ...) -> i32;
+}
+
+fn main() {
+    let mutex = std::sync::Mutex::new(());
+    let _guard = mutex.lock().unwrap();
+
+    let _ = ::std::iter::repeat('a' as u8).take(10).collect::<Vec<_>>();
+    let stderr = ::std::io::stderr();
+    let mut stderr = stderr.lock();
+
+    std::thread::spawn(move || {
+        println!("Hello from another thread!");
+    });
+
+    writeln!(stderr, "some {} text", "<unknown>").unwrap();
+
+    let _ = std::process::Command::new("true").env("c", "d").spawn();
+
+    println!("cargo:rustc-link-lib=z");
+
+    static ONCE: std::sync::Once = std::sync::Once::new();
+    ONCE.call_once(|| {});
+
+    let _eq = LoopState::Continue(()) == LoopState::Break(());
+
+    // Make sure ByValPair values with differently sized components are correctly passed
+    map(None::<(u8, Box<Instruction>)>);
+
+    println!("{}", 2.3f32.exp());
+    println!("{}", 2.3f32.exp2());
+    println!("{}", 2.3f32.abs());
+    println!("{}", 2.3f32.sqrt());
+    println!("{}", 2.3f32.floor());
+    println!("{}", 2.3f32.ceil());
+    println!("{}", 2.3f32.min(1.0));
+    println!("{}", 2.3f32.max(1.0));
+    println!("{}", 2.3f32.powi(2));
+    println!("{}", 2.3f32.log2());
+    assert_eq!(2.3f32.copysign(-1.0), -2.3f32);
+    println!("{}", 2.3f32.powf(2.0));
+
+    assert_eq!(-128i8, (-128i8).saturating_sub(1));
+    assert_eq!(127i8, 127i8.saturating_sub(-128));
+    assert_eq!(-128i8, (-128i8).saturating_add(-128));
+    assert_eq!(127i8, 127i8.saturating_add(1));
+
+    assert_eq!(-32768i16, (-32768i16).saturating_add(-32768));
+    assert_eq!(32767i16, 32767i16.saturating_add(1));
+
+    assert_eq!(0b0000000000000000000000000010000010000000000000000000000000000000_0000000000100000000000000000000000001000000000000100000000000000u128.leading_zeros(), 26);
+    assert_eq!(0b0000000000000000000000000010000000000000000000000000000000000000_0000000000000000000000000000000000001000000000000000000010000000u128.trailing_zeros(), 7);
+
+    let _d = 0i128.checked_div(2i128);
+    let _d = 0u128.checked_div(2u128);
+    assert_eq!(1u128 + 2, 3);
+
+    assert_eq!(0b100010000000000000000000000000000u128 >> 10, 0b10001000000000000000000u128);
+    assert_eq!(0xFEDCBA987654321123456789ABCDEFu128 >> 64, 0xFEDCBA98765432u128);
+    assert_eq!(0xFEDCBA987654321123456789ABCDEFu128 as i128 >> 64, 0xFEDCBA98765432i128);
+
+    let tmp = 353985398u128;
+    assert_eq!(tmp * 932490u128, 330087843781020u128);
+
+    let tmp = -0x1234_5678_9ABC_DEF0i64;
+    assert_eq!(tmp as i128, -0x1234_5678_9ABC_DEF0i128);
+
+    // Check that all u/i128 <-> float casts work correctly.
+    let houndred_u128 = 100u128;
+    let houndred_i128 = 100i128;
+    let houndred_f32 = 100.0f32;
+    let houndred_f64 = 100.0f64;
+    assert_eq!(houndred_u128 as f32, 100.0);
+    assert_eq!(houndred_u128 as f64, 100.0);
+    assert_eq!(houndred_f32 as u128, 100);
+    assert_eq!(houndred_f64 as u128, 100);
+    assert_eq!(houndred_i128 as f32, 100.0);
+    assert_eq!(houndred_i128 as f64, 100.0);
+    assert_eq!(houndred_f32 as i128, 100);
+    assert_eq!(houndred_f64 as i128, 100);
+
+    let _a = 1u32 << 2u8;
+
+    let empty: [i32; 0] = [];
+    assert!(empty.is_sorted());
+
+    println!("{:?}", std::intrinsics::caller_location());
+
+    /*unsafe {
+        test_simd();
+    }*/
+
+    Box::pin(move |mut _task_context| {
+        yield ();
+    }).as_mut().resume(0);
+
+    println!("End");
+}
+
+/*#[target_feature(enable = "sse2")]
+unsafe fn test_simd() {
+    let x = _mm_setzero_si128();
+    let y = _mm_set1_epi16(7);
+    let or = _mm_or_si128(x, y);
+    let cmp_eq = _mm_cmpeq_epi8(y, y);
+    let cmp_lt = _mm_cmplt_epi8(y, y);
+
+    /*assert_eq!(std::mem::transmute::<_, [u16; 8]>(or), [7, 7, 7, 7, 7, 7, 7, 7]);
+    assert_eq!(std::mem::transmute::<_, [u16; 8]>(cmp_eq), [0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff]);
+    assert_eq!(std::mem::transmute::<_, [u16; 8]>(cmp_lt), [0, 0, 0, 0, 0, 0, 0, 0]);
+
+    test_mm_slli_si128();
+    test_mm_movemask_epi8();
+    test_mm256_movemask_epi8();
+    test_mm_add_epi8();
+    test_mm_add_pd();
+    test_mm_cvtepi8_epi16();
+    test_mm_cvtsi128_si64();
+
+    // FIXME(#666) implement `#[rustc_arg_required_const(..)]` support
+    //test_mm_extract_epi8();
+
+    let mask1 = _mm_movemask_epi8(dbg!(_mm_setr_epi8(255u8 as i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)));
+    assert_eq!(mask1, 1);*/
+}*/
+
+/*#[target_feature(enable = "sse2")]
+unsafe fn test_mm_slli_si128() {
+    #[rustfmt::skip]
+    let a = _mm_setr_epi8(
+        1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+    );
+    let r = _mm_slli_si128(a, 1);
+    let e = _mm_setr_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
+    assert_eq_m128i(r, e);
+
+    #[rustfmt::skip]
+    let a = _mm_setr_epi8(
+        1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+    );
+    let r = _mm_slli_si128(a, 15);
+    let e = _mm_setr_epi8(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1);
+    assert_eq_m128i(r, e);
+
+    #[rustfmt::skip]
+    let a = _mm_setr_epi8(
+        1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+    );
+    let r = _mm_slli_si128(a, 16);
+    assert_eq_m128i(r, _mm_set1_epi8(0));
+
+    #[rustfmt::skip]
+    let a = _mm_setr_epi8(
+        1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+    );
+    let r = _mm_slli_si128(a, -1);
+    assert_eq_m128i(_mm_set1_epi8(0), r);
+
+    #[rustfmt::skip]
+    let a = _mm_setr_epi8(
+        1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+    );
+    let r = _mm_slli_si128(a, -0x80000000);
+    assert_eq_m128i(r, _mm_set1_epi8(0));
+}
+
+#[target_feature(enable = "sse2")]
+unsafe fn test_mm_movemask_epi8() {
+    #[rustfmt::skip]
+    let a = _mm_setr_epi8(
+        0b1000_0000u8 as i8, 0b0, 0b1000_0000u8 as i8, 0b01,
+        0b0101, 0b1111_0000u8 as i8, 0, 0,
+        0, 0, 0b1111_0000u8 as i8, 0b0101,
+        0b01, 0b1000_0000u8 as i8, 0b0, 0b1000_0000u8 as i8,
+    );
+    let r = _mm_movemask_epi8(a);
+    assert_eq!(r, 0b10100100_00100101);
+}
+
+#[target_feature(enable = "avx2")]
+unsafe fn test_mm256_movemask_epi8() {
+    let a = _mm256_set1_epi8(-1);
+    let r = _mm256_movemask_epi8(a);
+    let e = -1;
+    assert_eq!(r, e);
+}
+
+#[target_feature(enable = "sse2")]
+unsafe fn test_mm_add_epi8() {
+    let a = _mm_setr_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
+    #[rustfmt::skip]
+    let b = _mm_setr_epi8(
+        16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+    );
+    let r = _mm_add_epi8(a, b);
+    #[rustfmt::skip]
+    let e = _mm_setr_epi8(
+        16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46,
+    );
+    assert_eq_m128i(r, e);
+}
+
+#[target_feature(enable = "sse2")]
+unsafe fn test_mm_add_pd() {
+    let a = _mm_setr_pd(1.0, 2.0);
+    let b = _mm_setr_pd(5.0, 10.0);
+    let r = _mm_add_pd(a, b);
+    assert_eq_m128d(r, _mm_setr_pd(6.0, 12.0));
+}
+
+fn assert_eq_m128i(x: std::arch::x86_64::__m128i, y: std::arch::x86_64::__m128i) {
+    unsafe {
+        assert_eq!(std::mem::transmute::<_, [u8; 16]>(x), std::mem::transmute::<_, [u8; 16]>(y));
+    }
+}
+
+#[target_feature(enable = "sse2")]
+pub unsafe fn assert_eq_m128d(a: __m128d, b: __m128d) {
+    if _mm_movemask_pd(_mm_cmpeq_pd(a, b)) != 0b11 {
+        panic!("{:?} != {:?}", a, b);
+    }
+}
+
+#[target_feature(enable = "sse2")]
+unsafe fn test_mm_cvtsi128_si64() {
+    let r = _mm_cvtsi128_si64(std::mem::transmute::<[i64; 2], _>([5, 0]));
+    assert_eq!(r, 5);
+}
+
+#[target_feature(enable = "sse4.1")]
+unsafe fn test_mm_cvtepi8_epi16() {
+    let a = _mm_set1_epi8(10);
+    let r = _mm_cvtepi8_epi16(a);
+    let e = _mm_set1_epi16(10);
+    assert_eq_m128i(r, e);
+    let a = _mm_set1_epi8(-10);
+    let r = _mm_cvtepi8_epi16(a);
+    let e = _mm_set1_epi16(-10);
+    assert_eq_m128i(r, e);
+}
+
+#[target_feature(enable = "sse4.1")]
+unsafe fn test_mm_extract_epi8() {
+    #[rustfmt::skip]
+    let a = _mm_setr_epi8(
+        -1, 1, 2, 3, 4, 5, 6, 7,
+        8, 9, 10, 11, 12, 13, 14, 15
+    );
+    let r1 = _mm_extract_epi8(a, 0);
+    let r2 = _mm_extract_epi8(a, 19);
+    assert_eq!(r1, 0xFF);
+    assert_eq!(r2, 3);
+}*/
+
+#[derive(PartialEq)]
+enum LoopState {
+    Continue(()),
+    Break(())
+}
+
+pub enum Instruction {
+    Increment,
+    Loop,
+}
+
+fn map(a: Option<(u8, Box<Instruction>)>) -> Option<Box<Instruction>> {
+    match a {
+        None => None,
+        Some((_, instr)) => Some(instr),
+    }
+}
diff --git a/compiler/rustc_codegen_gcc/example/subslice-patterns-const-eval.rs b/compiler/rustc_codegen_gcc/example/subslice-patterns-const-eval.rs
new file mode 100644 (file)
index 0000000..2cb8478
--- /dev/null
@@ -0,0 +1,97 @@
+// Based on https://github.com/rust-lang/rust/blob/c5840f9d252c2f5cc16698dbf385a29c5de3ca07/src/test/ui/array-slice-vec/subslice-patterns-const-eval-match.rs
+
+// Test that array subslice patterns are correctly handled in const evaluation.
+
+// run-pass
+
+#[derive(PartialEq, Debug, Clone)]
+struct N(u8);
+
+#[derive(PartialEq, Debug, Clone)]
+struct Z;
+
+macro_rules! n {
+    ($($e:expr),* $(,)?) => {
+        [$(N($e)),*]
+    }
+}
+
+// This macro has an unused variable so that it can be repeated base on the
+// number of times a repeated variable (`$e` in `z`) occurs.
+macro_rules! zed {
+    ($e:expr) => { Z }
+}
+
+macro_rules! z {
+    ($($e:expr),* $(,)?) => {
+        [$(zed!($e)),*]
+    }
+}
+
+// Compare constant evaluation and runtime evaluation of a given expression.
+macro_rules! compare_evaluation {
+    ($e:expr, $t:ty $(,)?) => {{
+        const CONST_EVAL: $t = $e;
+        const fn const_eval() -> $t { $e }
+        static CONST_EVAL2: $t = const_eval();
+        let runtime_eval = $e;
+        assert_eq!(CONST_EVAL, runtime_eval);
+        assert_eq!(CONST_EVAL2, runtime_eval);
+    }}
+}
+
+// Repeat `$test`, substituting the given macro variables with the given
+// identifiers.
+//
+// For example:
+//
+// repeat! {
+//     ($name); X; Y:
+//     struct $name;
+// }
+//
+// Expands to:
+//
+// struct X; struct Y;
+//
+// This is used to repeat the tests using both the `N` and `Z`
+// types.
+macro_rules! repeat {
+    (($($dollar:tt $placeholder:ident)*); $($($values:ident),+);*: $($test:tt)*) => {
+        macro_rules! single {
+            ($($dollar $placeholder:ident),*) => { $($test)* }
+        }
+        $(single!($($values),+);)*
+    }
+}
+
+fn main() {
+    repeat! {
+        ($arr $Ty); n, N; z, Z:
+        compare_evaluation!({ let [_, x @ .., _] = $arr!(1, 2, 3, 4); x }, [$Ty; 2]);
+        compare_evaluation!({ let [_, ref x @ .., _] = $arr!(1, 2, 3, 4); x }, &'static [$Ty; 2]);
+        compare_evaluation!({ let [_, x @ .., _] = &$arr!(1, 2, 3, 4); x }, &'static [$Ty; 2]);
+
+        compare_evaluation!({ let [_, _, x @ .., _, _] = $arr!(1, 2, 3, 4); x }, [$Ty; 0]);
+        compare_evaluation!(
+            { let [_, _, ref x @ .., _, _] = $arr!(1, 2, 3, 4); x },
+            &'static [$Ty; 0],
+        );
+        compare_evaluation!(
+            { let [_, _, x @ .., _, _] = &$arr!(1, 2, 3, 4); x },
+            &'static [$Ty; 0],
+        );
+
+        compare_evaluation!({ let [_, .., x] = $arr!(1, 2, 3, 4); x }, $Ty);
+        compare_evaluation!({ let [_, .., ref x] = $arr!(1, 2, 3, 4); x }, &'static $Ty);
+        compare_evaluation!({ let [_, _y @ .., x] = &$arr!(1, 2, 3, 4); x }, &'static $Ty);
+    }
+
+    compare_evaluation!({ let [_, .., N(x)] = n!(1, 2, 3, 4); x }, u8);
+    compare_evaluation!({ let [_, .., N(ref x)] = n!(1, 2, 3, 4); x }, &'static u8);
+    compare_evaluation!({ let [_, .., N(x)] = &n!(1, 2, 3, 4); x }, &'static u8);
+
+    compare_evaluation!({ let [N(x), .., _] = n!(1, 2, 3, 4); x }, u8);
+    compare_evaluation!({ let [N(ref x), .., _] = n!(1, 2, 3, 4); x }, &'static u8);
+    compare_evaluation!({ let [N(x), .., _] = &n!(1, 2, 3, 4); x }, &'static u8);
+}
diff --git a/compiler/rustc_codegen_gcc/example/track-caller-attribute.rs b/compiler/rustc_codegen_gcc/example/track-caller-attribute.rs
new file mode 100644 (file)
index 0000000..93bab17
--- /dev/null
@@ -0,0 +1,40 @@
+// Based on https://github.com/anp/rust/blob/175631311716d7dfeceec40d2587cde7142ffa8c/src/test/ui/rfc-2091-track-caller/track-caller-attribute.rs
+
+// run-pass
+
+use std::panic::Location;
+
+#[track_caller]
+fn tracked() -> &'static Location<'static> {
+    Location::caller()
+}
+
+fn nested_intrinsic() -> &'static Location<'static> {
+    Location::caller()
+}
+
+fn nested_tracked() -> &'static Location<'static> {
+    tracked()
+}
+
+fn main() {
+    let location = Location::caller();
+    assert_eq!(location.file(), file!());
+    assert_eq!(location.line(), 21);
+    assert_eq!(location.column(), 20);
+
+    let tracked = tracked();
+    assert_eq!(tracked.file(), file!());
+    assert_eq!(tracked.line(), 26);
+    assert_eq!(tracked.column(), 19);
+
+    let nested = nested_intrinsic();
+    assert_eq!(nested.file(), file!());
+    assert_eq!(nested.line(), 13);
+    assert_eq!(nested.column(), 5);
+
+    let contained = nested_tracked();
+    assert_eq!(contained.file(), file!());
+    assert_eq!(contained.line(), 17);
+    assert_eq!(contained.column(), 5);
+}
diff --git a/compiler/rustc_codegen_gcc/patches/0022-core-Disable-not-compiling-tests.patch b/compiler/rustc_codegen_gcc/patches/0022-core-Disable-not-compiling-tests.patch
new file mode 100644 (file)
index 0000000..aae62a9
--- /dev/null
@@ -0,0 +1,63 @@
+From f6befc4bb51d84f5f1cf35938a168c953d421350 Mon Sep 17 00:00:00 2001
+From: bjorn3 <bjorn3@users.noreply.github.com>
+Date: Sun, 24 Nov 2019 15:10:23 +0100
+Subject: [PATCH] [core] Disable not compiling tests
+
+---
+ library/core/tests/Cargo.toml         | 8 ++++++++
+ library/core/tests/num/flt2dec/mod.rs | 1 -
+ library/core/tests/num/int_macros.rs  | 2 ++
+ library/core/tests/num/uint_macros.rs | 2 ++
+ library/core/tests/ptr.rs             | 2 ++
+ library/core/tests/slice.rs           | 2 ++
+ 6 files changed, 16 insertions(+), 1 deletion(-)
+ create mode 100644 library/core/tests/Cargo.toml
+
+diff --git a/library/core/tests/Cargo.toml b/library/core/tests/Cargo.toml
+new file mode 100644
+index 0000000..46fd999
+--- /dev/null
++++ b/library/core/tests/Cargo.toml
+@@ -0,0 +1,8 @@
++[package]
++name = "core"
++version = "0.0.0"
++edition = "2018"
++
++[lib]
++name = "coretests"
++path = "lib.rs"
+diff --git a/library/core/tests/num/flt2dec/mod.rs b/library/core/tests/num/flt2dec/mod.rs
+index a35897e..f0bf645 100644
+--- a/library/core/tests/num/flt2dec/mod.rs
++++ b/library/core/tests/num/flt2dec/mod.rs
+@@ -13,7 +13,6 @@ mod strategy {
+     mod dragon;
+     mod grisu;
+ }
+-mod random;
+ pub fn decode_finite<T: DecodableFloat>(v: T) -> Decoded {
+     match decode(v).1 {
+diff --git a/library/core/tests/slice.rs b/library/core/tests/slice.rs
+index 6609bc3..241b497 100644
+--- a/library/core/tests/slice.rs
++++ b/library/core/tests/slice.rs
+@@ -1209,6 +1209,7 @@ fn brute_force_rotate_test_1() {
+     }
+ }
++/*
+ #[test]
+ #[cfg(not(target_arch = "wasm32"))]
+ fn sort_unstable() {
+@@ -1394,6 +1395,7 @@ fn partition_at_index() {
+     v.select_nth_unstable(0);
+     assert!(v == [0xDEADBEEF]);
+ }
++*/
+ #[test]
+ #[should_panic(expected = "index 0 greater than length of slice")]
+--
+2.21.0 (Apple Git-122)
diff --git a/compiler/rustc_codegen_gcc/patches/0023-core-Ignore-failing-tests.patch b/compiler/rustc_codegen_gcc/patches/0023-core-Ignore-failing-tests.patch
new file mode 100644 (file)
index 0000000..ee5ba44
--- /dev/null
@@ -0,0 +1,49 @@
+From dd82e95c9de212524e14fc60155de1ae40156dfc Mon Sep 17 00:00:00 2001
+From: bjorn3 <bjorn3@users.noreply.github.com>
+Date: Sun, 24 Nov 2019 15:34:06 +0100
+Subject: [PATCH] [core] Ignore failing tests
+
+---
+ library/core/tests/iter.rs       |  4 ++++
+ library/core/tests/num/bignum.rs | 10 ++++++++++
+ library/core/tests/num/mod.rs    |  5 +++--
+ library/core/tests/time.rs       |  1 +
+ 4 files changed, 18 insertions(+), 2 deletions(-)
+
+diff --git a/library/core/tests/array.rs b/library/core/tests/array.rs
+index 4bc44e9..8e3c7a4 100644
+--- a/library/core/tests/array.rs
++++ b/library/core/tests/array.rs
+@@ -242,6 +242,7 @@ fn iterator_drops() {
+     assert_eq!(i.get(), 5);
+ }
++/*
+ // This test does not work on targets without panic=unwind support.
+ // To work around this problem, test is marked is should_panic, so it will
+ // be automagically skipped on unsuitable targets, such as
+@@ -283,6 +284,7 @@ fn array_default_impl_avoids_leaks_on_panic() {
+     assert_eq!(COUNTER.load(Relaxed), 0);
+     panic!("test succeeded")
+ }
++*/
+ #[test]
+ fn empty_array_is_always_default() {
+@@ -304,6 +304,7 @@ fn array_map() {
+     assert_eq!(b, [1, 2, 3]);
+ }
++/*
+ // See note on above test for why `should_panic` is used.
+ #[test]
+ #[should_panic(expected = "test succeeded")]
+@@ -332,6 +333,7 @@ fn array_map_drop_safety() {
+     assert_eq!(DROPPED.load(Ordering::SeqCst), num_to_create);
+     panic!("test succeeded")
+ }
++*/
+ #[test]
+ fn cell_allows_array_cycle() {
+-- 2.21.0 (Apple Git-122)
diff --git a/compiler/rustc_codegen_gcc/prepare.sh b/compiler/rustc_codegen_gcc/prepare.sh
new file mode 100755 (executable)
index 0000000..503fa29
--- /dev/null
@@ -0,0 +1,22 @@
+#!/bin/bash --verbose
+set -e
+
+source prepare_build.sh
+
+cargo install hyperfine || echo "Skipping hyperfine install"
+
+git clone https://github.com/rust-lang/regex.git || echo "rust-lang/regex has already been cloned"
+pushd regex
+git checkout -- .
+git checkout 341f207c1071f7290e3f228c710817c280c8dca1
+popd
+
+git clone https://github.com/ebobby/simple-raytracer || echo "ebobby/simple-raytracer has already been cloned"
+pushd simple-raytracer
+git checkout -- .
+git checkout 804a7a21b9e673a482797aa289a18ed480e4d813
+
+# build with cg_llvm for perf comparison
+cargo build
+mv target/debug/main raytracer_cg_llvm
+popd
diff --git a/compiler/rustc_codegen_gcc/prepare_build.sh b/compiler/rustc_codegen_gcc/prepare_build.sh
new file mode 100755 (executable)
index 0000000..ccf5350
--- /dev/null
@@ -0,0 +1,5 @@
+#!/bin/bash --verbose
+set -e
+
+rustup component add rust-src rustc-dev llvm-tools-preview
+./build_sysroot/prepare_sysroot_src.sh
diff --git a/compiler/rustc_codegen_gcc/rust-toolchain b/compiler/rustc_codegen_gcc/rust-toolchain
new file mode 100644 (file)
index 0000000..d311a33
--- /dev/null
@@ -0,0 +1 @@
+nightly-2021-09-28
diff --git a/compiler/rustc_codegen_gcc/rustup.sh b/compiler/rustc_codegen_gcc/rustup.sh
new file mode 100755 (executable)
index 0000000..01ce5bb
--- /dev/null
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+set -e
+
+case $1 in
+    "prepare")
+        TOOLCHAIN=$(date +%Y-%m-%d)
+
+        echo "=> Installing new nightly"
+        rustup toolchain install --profile minimal nightly-${TOOLCHAIN} # Sanity check to see if the nightly exists
+        echo nightly-${TOOLCHAIN} > rust-toolchain
+
+        echo "=> Uninstalling all old nighlies"
+        for nightly in $(rustup toolchain list | grep nightly | grep -v $TOOLCHAIN | grep -v nightly-x86_64); do
+            rustup toolchain uninstall $nightly
+        done
+
+        ./clean_all.sh
+        ./prepare.sh
+        ;;
+    "commit")
+        git add rust-toolchain
+        git commit -m "Rustup to $(rustc -V)"
+        ;;
+    *)
+        echo "Unknown command '$1'"
+        echo "Usage: ./rustup.sh prepare|commit"
+        ;;
+esac
diff --git a/compiler/rustc_codegen_gcc/src/abi.rs b/compiler/rustc_codegen_gcc/src/abi.rs
new file mode 100644 (file)
index 0000000..ce428c5
--- /dev/null
@@ -0,0 +1,160 @@
+use gccjit::{ToRValue, Type};
+use rustc_codegen_ssa::traits::{AbiBuilderMethods, BaseTypeMethods};
+use rustc_middle::bug;
+use rustc_middle::ty::Ty;
+use rustc_target::abi::call::{CastTarget, FnAbi, PassMode, Reg, RegKind};
+
+use crate::builder::Builder;
+use crate::context::CodegenCx;
+use crate::intrinsic::ArgAbiExt;
+use crate::type_of::LayoutGccExt;
+
+impl<'a, 'gcc, 'tcx> AbiBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
+    fn apply_attrs_callsite(&mut self, _fn_abi: &FnAbi<'tcx, Ty<'tcx>>, _callsite: Self::Value) {
+        // TODO(antoyo)
+    }
+
+    fn get_param(&self, index: usize) -> Self::Value {
+        self.cx.current_func.borrow().expect("current func")
+            .get_param(index as i32)
+            .to_rvalue()
+    }
+}
+
+impl GccType for CastTarget {
+    fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, '_>) -> Type<'gcc> {
+        let rest_gcc_unit = self.rest.unit.gcc_type(cx);
+        let (rest_count, rem_bytes) =
+            if self.rest.unit.size.bytes() == 0 {
+                (0, 0)
+            }
+            else {
+                (self.rest.total.bytes() / self.rest.unit.size.bytes(), self.rest.total.bytes() % self.rest.unit.size.bytes())
+            };
+
+        if self.prefix.iter().all(|x| x.is_none()) {
+            // Simplify to a single unit when there is no prefix and size <= unit size
+            if self.rest.total <= self.rest.unit.size {
+                return rest_gcc_unit;
+            }
+
+            // Simplify to array when all chunks are the same size and type
+            if rem_bytes == 0 {
+                return cx.type_array(rest_gcc_unit, rest_count);
+            }
+        }
+
+        // Create list of fields in the main structure
+        let mut args: Vec<_> = self
+            .prefix
+            .iter()
+            .flat_map(|option_kind| {
+                option_kind.map(|kind| Reg { kind, size: self.prefix_chunk_size }.gcc_type(cx))
+            })
+            .chain((0..rest_count).map(|_| rest_gcc_unit))
+            .collect();
+
+        // Append final integer
+        if rem_bytes != 0 {
+            // Only integers can be really split further.
+            assert_eq!(self.rest.unit.kind, RegKind::Integer);
+            args.push(cx.type_ix(rem_bytes * 8));
+        }
+
+        cx.type_struct(&args, false)
+    }
+}
+
+pub trait GccType {
+    fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, '_>) -> Type<'gcc>;
+}
+
+impl GccType for Reg {
+    fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, '_>) -> Type<'gcc> {
+        match self.kind {
+            RegKind::Integer => cx.type_ix(self.size.bits()),
+            RegKind::Float => {
+                match self.size.bits() {
+                    32 => cx.type_f32(),
+                    64 => cx.type_f64(),
+                    _ => bug!("unsupported float: {:?}", self),
+                }
+            },
+            RegKind::Vector => unimplemented!(), //cx.type_vector(cx.type_i8(), self.size.bytes()),
+        }
+    }
+}
+
+pub trait FnAbiGccExt<'gcc, 'tcx> {
+    // TODO(antoyo): return a function pointer type instead?
+    fn gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> (Type<'gcc>, Vec<Type<'gcc>>, bool);
+    fn ptr_to_gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>;
+}
+
+impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
+    fn gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> (Type<'gcc>, Vec<Type<'gcc>>, bool) {
+        let args_capacity: usize = self.args.iter().map(|arg|
+            if arg.pad.is_some() {
+                1
+            }
+            else {
+                0
+            } +
+            if let PassMode::Pair(_, _) = arg.mode {
+                2
+            } else {
+                1
+            }
+        ).sum();
+        let mut argument_tys = Vec::with_capacity(
+            if let PassMode::Indirect { .. } = self.ret.mode {
+                1
+            }
+            else {
+                0
+            } + args_capacity,
+        );
+
+        let return_ty =
+            match self.ret.mode {
+                PassMode::Ignore => cx.type_void(),
+                PassMode::Direct(_) | PassMode::Pair(..) => self.ret.layout.immediate_gcc_type(cx),
+                PassMode::Cast(cast) => cast.gcc_type(cx),
+                PassMode::Indirect { .. } => {
+                    argument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx)));
+                    cx.type_void()
+                }
+            };
+
+        for arg in &self.args {
+            // add padding
+            if let Some(ty) = arg.pad {
+                argument_tys.push(ty.gcc_type(cx));
+            }
+
+            let arg_ty = match arg.mode {
+                PassMode::Ignore => continue,
+                PassMode::Direct(_) => arg.layout.immediate_gcc_type(cx),
+                PassMode::Pair(..) => {
+                    argument_tys.push(arg.layout.scalar_pair_element_gcc_type(cx, 0, true));
+                    argument_tys.push(arg.layout.scalar_pair_element_gcc_type(cx, 1, true));
+                    continue;
+                }
+                PassMode::Indirect { extra_attrs: Some(_), .. } => {
+                    unimplemented!();
+                }
+                PassMode::Cast(cast) => cast.gcc_type(cx),
+                PassMode::Indirect { extra_attrs: None, .. } => cx.type_ptr_to(arg.memory_ty(cx)),
+            };
+            argument_tys.push(arg_ty);
+        }
+
+        (return_ty, argument_tys, self.c_variadic)
+    }
+
+    fn ptr_to_gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
+        let (return_type, params, variadic) = self.gcc_type(cx);
+        let pointer_type = cx.context.new_function_pointer_type(None, return_type, &params, variadic);
+        pointer_type
+    }
+}
diff --git a/compiler/rustc_codegen_gcc/src/allocator.rs b/compiler/rustc_codegen_gcc/src/allocator.rs
new file mode 100644 (file)
index 0000000..6378a31
--- /dev/null
@@ -0,0 +1,116 @@
+use gccjit::{FunctionType, ToRValue};
+use rustc_ast::expand::allocator::{AllocatorKind, AllocatorTy, ALLOCATOR_METHODS};
+use rustc_middle::bug;
+use rustc_middle::ty::TyCtxt;
+use rustc_span::symbol::sym;
+
+use crate::GccContext;
+
+pub(crate) unsafe fn codegen(tcx: TyCtxt<'_>, mods: &mut GccContext, _module_name: &str, kind: AllocatorKind, has_alloc_error_handler: bool) {
+    let context = &mods.context;
+    let usize =
+        match tcx.sess.target.pointer_width {
+            16 => context.new_type::<u16>(),
+            32 => context.new_type::<u32>(),
+            64 => context.new_type::<u64>(),
+            tws => bug!("Unsupported target word size for int: {}", tws),
+        };
+    let i8 = context.new_type::<i8>();
+    let i8p = i8.make_pointer();
+    let void = context.new_type::<()>();
+
+    for method in ALLOCATOR_METHODS {
+        let mut types = Vec::with_capacity(method.inputs.len());
+        for ty in method.inputs.iter() {
+            match *ty {
+                AllocatorTy::Layout => {
+                    types.push(usize);
+                    types.push(usize);
+                }
+                AllocatorTy::Ptr => types.push(i8p),
+                AllocatorTy::Usize => types.push(usize),
+
+                AllocatorTy::ResultPtr | AllocatorTy::Unit => panic!("invalid allocator arg"),
+            }
+        }
+        let output = match method.output {
+            AllocatorTy::ResultPtr => Some(i8p),
+            AllocatorTy::Unit => None,
+
+            AllocatorTy::Layout | AllocatorTy::Usize | AllocatorTy::Ptr => {
+                panic!("invalid allocator output")
+            }
+        };
+        let name = format!("__rust_{}", method.name);
+
+        let args: Vec<_> = types.iter().enumerate()
+            .map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index)))
+            .collect();
+        let func = context.new_function(None, FunctionType::Exported, output.unwrap_or(void), &args, name, false);
+
+        if tcx.sess.target.options.default_hidden_visibility {
+            // TODO(antoyo): set visibility.
+        }
+        if tcx.sess.must_emit_unwind_tables() {
+            // TODO(antoyo): emit unwind tables.
+        }
+
+        let callee = kind.fn_name(method.name);
+        let args: Vec<_> = types.iter().enumerate()
+            .map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index)))
+            .collect();
+        let callee = context.new_function(None, FunctionType::Extern, output.unwrap_or(void), &args, callee, false);
+        // TODO(antoyo): set visibility.
+
+        let block = func.new_block("entry");
+
+        let args = args
+            .iter()
+            .enumerate()
+            .map(|(i, _)| func.get_param(i as i32).to_rvalue())
+            .collect::<Vec<_>>();
+        let ret = context.new_call(None, callee, &args);
+        //llvm::LLVMSetTailCall(ret, True);
+        if output.is_some() {
+            block.end_with_return(None, ret);
+        }
+        else {
+            block.end_with_void_return(None);
+        }
+
+        // TODO(@Commeownist): Check if we need to emit some extra debugging info in certain circumstances
+        // as described in https://github.com/rust-lang/rust/commit/77a96ed5646f7c3ee8897693decc4626fe380643
+    }
+
+    let types = [usize, usize];
+    let name = "__rust_alloc_error_handler".to_string();
+    let args: Vec<_> = types.iter().enumerate()
+        .map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index)))
+        .collect();
+    let func = context.new_function(None, FunctionType::Exported, void, &args, name, false);
+
+    let kind =
+        if has_alloc_error_handler {
+            AllocatorKind::Global
+        }
+        else {
+            AllocatorKind::Default
+        };
+    let callee = kind.fn_name(sym::oom);
+    let args: Vec<_> = types.iter().enumerate()
+        .map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index)))
+        .collect();
+    let callee = context.new_function(None, FunctionType::Extern, void, &args, callee, false);
+    //llvm::LLVMRustSetVisibility(callee, llvm::Visibility::Hidden);
+
+    let block = func.new_block("entry");
+
+    let args = args
+        .iter()
+        .enumerate()
+        .map(|(i, _)| func.get_param(i as i32).to_rvalue())
+        .collect::<Vec<_>>();
+    let _ret = context.new_call(None, callee, &args);
+    //llvm::LLVMSetTailCall(ret, True);
+    block.end_with_void_return(None);
+}
diff --git a/compiler/rustc_codegen_gcc/src/archive.rs b/compiler/rustc_codegen_gcc/src/archive.rs
new file mode 100644 (file)
index 0000000..d749d76
--- /dev/null
@@ -0,0 +1,218 @@
+use std::fs::File;
+use std::path::{Path, PathBuf};
+
+use rustc_session::Session;
+use rustc_codegen_ssa::back::archive::ArchiveBuilder;
+
+use rustc_data_structures::temp_dir::MaybeTempDir;
+use rustc_middle::middle::cstore::DllImport;
+
+
+struct ArchiveConfig<'a> {
+    sess: &'a Session,
+    dst: PathBuf,
+    use_native_ar: bool,
+    use_gnu_style_archive: bool,
+}
+
+#[derive(Debug)]
+enum ArchiveEntry {
+    FromArchive {
+        archive_index: usize,
+        entry_index: usize,
+    },
+    File(PathBuf),
+}
+
+pub struct ArArchiveBuilder<'a> {
+    config: ArchiveConfig<'a>,
+    src_archives: Vec<(PathBuf, ar::Archive<File>)>,
+    // Don't use `HashMap` here, as the order is important. `rust.metadata.bin` must always be at
+    // the end of an archive for linkers to not get confused.
+    entries: Vec<(String, ArchiveEntry)>,
+}
+
+impl<'a> ArchiveBuilder<'a> for ArArchiveBuilder<'a> {
+    fn new(sess: &'a Session, output: &Path, input: Option<&Path>) -> Self {
+        let config = ArchiveConfig {
+            sess,
+            dst: output.to_path_buf(),
+            use_native_ar: false,
+            // FIXME test for linux and System V derivatives instead
+            use_gnu_style_archive: sess.target.options.archive_format == "gnu",
+        };
+
+        let (src_archives, entries) = if let Some(input) = input {
+            let mut archive = ar::Archive::new(File::open(input).unwrap());
+            let mut entries = Vec::new();
+
+            let mut i = 0;
+            while let Some(entry) = archive.next_entry() {
+                let entry = entry.unwrap();
+                entries.push((
+                    String::from_utf8(entry.header().identifier().to_vec()).unwrap(),
+                    ArchiveEntry::FromArchive {
+                        archive_index: 0,
+                        entry_index: i,
+                    },
+                ));
+                i += 1;
+            }
+
+            (vec![(input.to_owned(), archive)], entries)
+        } else {
+            (vec![], Vec::new())
+        };
+
+        ArArchiveBuilder {
+            config,
+            src_archives,
+            entries,
+        }
+    }
+
+    fn src_files(&mut self) -> Vec<String> {
+        self.entries.iter().map(|(name, _)| name.clone()).collect()
+    }
+
+    fn remove_file(&mut self, name: &str) {
+        let index = self
+            .entries
+            .iter()
+            .position(|(entry_name, _)| entry_name == name)
+            .expect("Tried to remove file not existing in src archive");
+        self.entries.remove(index);
+    }
+
+    fn add_file(&mut self, file: &Path) {
+        self.entries.push((
+            file.file_name().unwrap().to_str().unwrap().to_string(),
+            ArchiveEntry::File(file.to_owned()),
+        ));
+    }
+
+    fn add_archive<F>(&mut self, archive_path: &Path, mut skip: F) -> std::io::Result<()>
+    where
+        F: FnMut(&str) -> bool + 'static,
+    {
+        let mut archive = ar::Archive::new(std::fs::File::open(&archive_path)?);
+        let archive_index = self.src_archives.len();
+
+        let mut i = 0;
+        while let Some(entry) = archive.next_entry() {
+            let entry = entry?;
+            let file_name = String::from_utf8(entry.header().identifier().to_vec())
+                .map_err(|err| std::io::Error::new(std::io::ErrorKind::InvalidData, err))?;
+            if !skip(&file_name) {
+                self.entries
+                    .push((file_name, ArchiveEntry::FromArchive { archive_index, entry_index: i }));
+            }
+            i += 1;
+        }
+
+        self.src_archives.push((archive_path.to_owned(), archive));
+        Ok(())
+    }
+
+    fn update_symbols(&mut self) {
+    }
+
+    fn build(mut self) {
+        use std::process::Command;
+
+        fn add_file_using_ar(archive: &Path, file: &Path) {
+            Command::new("ar")
+                .arg("r") // add or replace file
+                .arg("-c") // silence created file message
+                .arg(archive)
+                .arg(&file)
+                .status()
+                .unwrap();
+        }
+
+        enum BuilderKind<'a> {
+            Bsd(ar::Builder<File>),
+            Gnu(ar::GnuBuilder<File>),
+            NativeAr(&'a Path),
+        }
+
+        let mut builder = if self.config.use_native_ar {
+            BuilderKind::NativeAr(&self.config.dst)
+        } else if self.config.use_gnu_style_archive {
+            BuilderKind::Gnu(ar::GnuBuilder::new(
+                File::create(&self.config.dst).unwrap(),
+                self.entries
+                    .iter()
+                    .map(|(name, _)| name.as_bytes().to_vec())
+                    .collect(),
+            ))
+        } else {
+            BuilderKind::Bsd(ar::Builder::new(File::create(&self.config.dst).unwrap()))
+        };
+
+        // Add all files
+        for (entry_name, entry) in self.entries.into_iter() {
+            match entry {
+                ArchiveEntry::FromArchive {
+                    archive_index,
+                    entry_index,
+                } => {
+                    let (ref src_archive_path, ref mut src_archive) =
+                        self.src_archives[archive_index];
+                    let entry = src_archive.jump_to_entry(entry_index).unwrap();
+                    let header = entry.header().clone();
+
+                    match builder {
+                        BuilderKind::Bsd(ref mut builder) => {
+                            builder.append(&header, entry).unwrap()
+                        }
+                        BuilderKind::Gnu(ref mut builder) => {
+                            builder.append(&header, entry).unwrap()
+                        }
+                        BuilderKind::NativeAr(archive_file) => {
+                            Command::new("ar")
+                                .arg("x")
+                                .arg(src_archive_path)
+                                .arg(&entry_name)
+                                .status()
+                                .unwrap();
+                            add_file_using_ar(archive_file, Path::new(&entry_name));
+                            std::fs::remove_file(entry_name).unwrap();
+                        }
+                    }
+                }
+                ArchiveEntry::File(file) =>
+                    match builder {
+                        BuilderKind::Bsd(ref mut builder) => {
+                            builder
+                                .append_file(entry_name.as_bytes(), &mut File::open(file).expect("file for bsd builder"))
+                                .unwrap()
+                        },
+                        BuilderKind::Gnu(ref mut builder) => {
+                            builder
+                                .append_file(entry_name.as_bytes(), &mut File::open(&file).expect(&format!("file {:?} for gnu builder", file)))
+                                .unwrap()
+                        },
+                        BuilderKind::NativeAr(archive_file) => add_file_using_ar(archive_file, &file),
+                    },
+            }
+        }
+
+        // Finalize archive
+        std::mem::drop(builder);
+
+        // Run ranlib to be able to link the archive
+        let status = std::process::Command::new("ranlib")
+            .arg(self.config.dst)
+            .status()
+            .expect("Couldn't run ranlib");
+
+        if !status.success() {
+            self.config.sess.fatal(&format!("Ranlib exited with code {:?}", status.code()));
+        }
+    }
+
+    fn inject_dll_import_lib(&mut self, _lib_name: &str, _dll_imports: &[DllImport], _tmpdir: &MaybeTempDir) {
+        unimplemented!();
+    }
+}
diff --git a/compiler/rustc_codegen_gcc/src/asm.rs b/compiler/rustc_codegen_gcc/src/asm.rs
new file mode 100644 (file)
index 0000000..3b77097
--- /dev/null
@@ -0,0 +1,785 @@
+use gccjit::{LValue, RValue, ToRValue, Type};
+use rustc_ast::ast::{InlineAsmOptions, InlineAsmTemplatePiece};
+use rustc_codegen_ssa::mir::operand::OperandValue;
+use rustc_codegen_ssa::mir::place::PlaceRef;
+use rustc_codegen_ssa::traits::{AsmBuilderMethods, AsmMethods, BaseTypeMethods, BuilderMethods, GlobalAsmOperandRef, InlineAsmOperandRef};
+
+use rustc_hir::LlvmInlineAsmInner;
+use rustc_middle::{bug, ty::Instance};
+use rustc_span::{Span, Symbol};
+use rustc_target::asm::*;
+
+use std::borrow::Cow;
+
+use crate::builder::Builder;
+use crate::context::CodegenCx;
+use crate::type_of::LayoutGccExt;
+
+
+// Rust asm! and GCC Extended Asm semantics differ substantially.
+//
+// 1. Rust asm operands go along as one list of operands. Operands themselves indicate 
+//    if they're "in" or "out". "In" and "out" operands can interleave. One operand can be 
+//    both "in" and "out" (`inout(reg)`).
+//
+//    GCC asm has two different lists for "in" and "out" operands. In terms of gccjit, 
+//    this means that all "out" operands must go before "in" operands. "In" and "out" operands 
+//    cannot interleave.
+//
+// 2. Operand lists in both Rust and GCC are indexed. Index starts from 0. Indexes are important 
+//    because the asm template refers to operands by index.
+//
+//    Mapping from Rust to GCC index would be 1-1 if it wasn't for...
+//
+// 3. Clobbers. GCC has a separate list of clobbers, and clobbers don't have indexes. 
+//    Contrary, Rust expresses clobbers through "out" operands that aren't tied to 
+//    a variable (`_`),  and such "clobbers" do have index.
+//
+// 4. Furthermore, GCC Extended Asm does not support explicit register constraints 
+//    (like `out("eax")`) directly, offering so-called "local register variables" 
+//    as a workaround. These variables need to be declared and initialized *before* 
+//    the Extended Asm block but *after* normal local variables 
+//    (see comment in `codegen_inline_asm` for explanation).
+//
+// With that in mind, let's see how we translate Rust syntax to GCC 
+// (from now on, `CC` stands for "constraint code"):
+//
+// * `out(reg_class) var`   -> translated to output operand: `"=CC"(var)`
+// * `inout(reg_class) var` -> translated to output operand: `"+CC"(var)`
+// * `in(reg_class) var`    -> translated to input operand: `"CC"(var)`
+//
+// * `out(reg_class) _` -> translated to one `=r(tmp)`, where "tmp" is a temporary unused variable
+//
+// * `out("explicit register") _` -> not translated to any operands, register is simply added to clobbers list
+//
+// * `inout(reg_class) in_var => out_var` -> translated to two operands: 
+//                              output: `"=CC"(in_var)`
+//                              input:  `"num"(out_var)` where num is the GCC index 
+//                                       of the corresponding output operand
+//
+// * `inout(reg_class) in_var => _` -> same as `inout(reg_class) in_var => tmp`, 
+//                                      where "tmp" is a temporary unused variable
+//
+// * `out/in/inout("explicit register") var` -> translated to one or two operands as described above 
+//                                              with `"r"(var)` constraint, 
+//                                              and one register variable assigned to the desired register.
+// 
+
+const ATT_SYNTAX_INS: &str = ".att_syntax noprefix\n\t";
+const INTEL_SYNTAX_INS: &str = "\n\t.intel_syntax noprefix";
+
+
+struct AsmOutOperand<'a, 'tcx, 'gcc> {
+    rust_idx: usize,
+    constraint: &'a str,
+    late: bool,
+    readwrite: bool,
+
+    tmp_var: LValue<'gcc>,
+    out_place: Option<PlaceRef<'tcx, RValue<'gcc>>>
+}
+
+struct AsmInOperand<'a, 'tcx> {
+    rust_idx: usize,
+    constraint: Cow<'a, str>,
+    val: RValue<'tcx>
+}
+
+impl AsmOutOperand<'_, '_, '_> {
+    fn to_constraint(&self) -> String {
+        let mut res = String::with_capacity(self.constraint.len() + self.late as usize + 1);
+
+        let sign = if self.readwrite { '+' } else { '=' };
+        res.push(sign);
+        if !self.late {
+            res.push('&');
+        }
+
+        res.push_str(&self.constraint);
+        res
+    }
+}
+
+enum ConstraintOrRegister {
+    Constraint(&'static str),
+    Register(&'static str)
+}
+
+
+impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
+    fn codegen_llvm_inline_asm(&mut self, _ia: &LlvmInlineAsmInner, _outputs: Vec<PlaceRef<'tcx, RValue<'gcc>>>, _inputs: Vec<RValue<'gcc>>, span: Span) -> bool {
+        self.sess().struct_span_err(span, "GCC backend does not support `llvm_asm!`")
+            .help("consider using the `asm!` macro instead")
+            .emit();
+
+        // We return `true` even if we've failed to generate the asm
+        // because we want to suppress the "malformed inline assembly" error
+        // generated by the frontend.
+        true
+    }
+
+    fn codegen_inline_asm(&mut self, template: &[InlineAsmTemplatePiece], rust_operands: &[InlineAsmOperandRef<'tcx, Self>], options: InlineAsmOptions, _span: &[Span]) {
+        let asm_arch = self.tcx.sess.asm_arch.unwrap();
+        let is_x86 = matches!(asm_arch, InlineAsmArch::X86 | InlineAsmArch::X86_64);
+        let att_dialect = is_x86 && options.contains(InlineAsmOptions::ATT_SYNTAX);
+        let intel_dialect = is_x86 && !options.contains(InlineAsmOptions::ATT_SYNTAX);
+
+        // GCC index of an output operand equals its position in the array 
+        let mut outputs = vec![];
+
+        // GCC index of an input operand equals its position in the array
+        // added to `outputs.len()`
+        let mut inputs = vec![];
+
+        // Clobbers collected from `out("explicit register") _` and `inout("expl_reg") var => _`
+        let mut clobbers = vec![];
+
+        // We're trying to preallocate space for the template
+        let mut constants_len = 0;
+
+        // There are rules we must adhere to if we want GCC to do the right thing:
+        // 
+        // * Every local variable that the asm block uses as an output must be declared *before*
+        //   the asm block. 
+        // * There must be no instructions whatsoever between the register variables and the asm.
+        //
+        // Therefore, the backend must generate the instructions strictly in this order:
+        //
+        // 1. Output variables.
+        // 2. Register variables.
+        // 3. The asm block.
+        //
+        // We also must make sure that no input operands are emitted before output operands.
+        //
+        // This is why we work in passes, first emitting local vars, then local register vars.
+        // Also, we don't emit any asm operands immediately; we save them to 
+        // the one of the buffers to be emitted later.
+
+        // 1. Normal variables (and saving operands to buffers).
+        for (rust_idx, op) in rust_operands.iter().enumerate() {
+            match *op {
+                InlineAsmOperandRef::Out { reg, late, place } => {
+                    use ConstraintOrRegister::*;
+
+                    let (constraint, ty) = match (reg_to_gcc(reg), place) {
+                        (Constraint(constraint), Some(place)) => (constraint, place.layout.gcc_type(self.cx, false)),
+                        // When `reg` is a class and not an explicit register but the out place is not specified,
+                        // we need to create an unused output variable to assign the output to. This var
+                        // needs to be of a type that's "compatible" with the register class, but specific type 
+                        // doesn't matter.
+                        (Constraint(constraint), None) => (constraint, dummy_output_type(self.cx, reg.reg_class())),
+                        (Register(_), Some(_)) => {
+                            // left for the next pass
+                            continue
+                        },
+                        (Register(reg_name), None) => {
+                            // `clobber_abi` can add lots of clobbers that are not supported by the target,
+                            // such as AVX-512 registers, so we just ignore unsupported registers
+                            let is_target_supported = reg.reg_class().supported_types(asm_arch).iter()
+                                .any(|&(_, feature)| {
+                                    if let Some(feature) = feature {
+                                        self.tcx.sess.target_features.contains(&Symbol::intern(feature))
+                                    } else {
+                                        true // Register class is unconditionally supported
+                                    }
+                                });
+
+                            if is_target_supported && !clobbers.contains(&reg_name) {
+                                clobbers.push(reg_name);
+                            }
+                            continue
+                        }
+                    };
+
+                    let tmp_var = self.current_func().new_local(None, ty, "output_register");
+                    outputs.push(AsmOutOperand {
+                        constraint, 
+                        rust_idx,
+                        late,
+                        readwrite: false,
+                        tmp_var,
+                        out_place: place
+                    });
+                }
+
+                InlineAsmOperandRef::In { reg, value } => {
+                    if let ConstraintOrRegister::Constraint(constraint) = reg_to_gcc(reg) {
+                        inputs.push(AsmInOperand { 
+                            constraint: Cow::Borrowed(constraint), 
+                            rust_idx, 
+                            val: value.immediate()
+                        });
+                    } 
+                    else {
+                        // left for the next pass
+                        continue
+                    }
+                }
+
+                InlineAsmOperandRef::InOut { reg, late, in_value, out_place } => {
+                    let constraint = if let ConstraintOrRegister::Constraint(constraint) = reg_to_gcc(reg) {
+                        constraint
+                    } 
+                    else {
+                        // left for the next pass
+                        continue
+                    };
+
+                    // Rustc frontend guarantees that input and output types are "compatible",
+                    // so we can just use input var's type for the output variable.
+                    //
+                    // This decision is also backed by the fact that LLVM needs in and out 
+                    // values to be of *exactly the same type*, not just "compatible". 
+                    // I'm not sure if GCC is so picky too, but better safe than sorry.
+                    let ty = in_value.layout.gcc_type(self.cx, false);
+                    let tmp_var = self.current_func().new_local(None, ty, "output_register");
+
+                    // If the out_place is None (i.e `inout(reg) _` syntax was used), we translate
+                    // it to one "readwrite (+) output variable", otherwise we translate it to two 
+                    // "out and tied in" vars as described above.
+                    let readwrite = out_place.is_none();
+                    outputs.push(AsmOutOperand {
+                        constraint, 
+                        rust_idx,
+                        late,
+                        readwrite,
+                        tmp_var, 
+                        out_place,
+                    });
+
+                    if !readwrite {
+                        let out_gcc_idx = outputs.len() - 1;
+                        let constraint = Cow::Owned(out_gcc_idx.to_string());
+
+                        inputs.push(AsmInOperand {
+                            constraint, 
+                            rust_idx, 
+                            val: in_value.immediate()
+                        });
+                    }
+                }
+
+                InlineAsmOperandRef::Const { ref string } => {
+                    constants_len += string.len() + att_dialect as usize;
+                }
+
+                InlineAsmOperandRef::SymFn { instance } => {
+                    constants_len += self.tcx.symbol_name(instance).name.len();
+                }
+                InlineAsmOperandRef::SymStatic { def_id } => {
+                    constants_len += self.tcx.symbol_name(Instance::mono(self.tcx, def_id)).name.len();
+                }
+            }
+        }
+
+        // 2. Register variables.
+        for (rust_idx, op) in rust_operands.iter().enumerate() {
+            match *op {
+                // `out("explicit register") var`
+                InlineAsmOperandRef::Out { reg, late, place } => {
+                    if let ConstraintOrRegister::Register(reg_name) = reg_to_gcc(reg) {
+                        let out_place = if let Some(place) = place {
+                            place
+                        } 
+                        else {
+                            // processed in the previous pass
+                            continue
+                        };
+
+                        let ty = out_place.layout.gcc_type(self.cx, false);
+                        let tmp_var = self.current_func().new_local(None, ty, "output_register");
+                        tmp_var.set_register_name(reg_name);
+
+                        outputs.push(AsmOutOperand {
+                            constraint: "r".into(), 
+                            rust_idx,
+                            late,
+                            readwrite: false,
+                            tmp_var,
+                            out_place: Some(out_place)
+                        });
+                    }
+
+                    // processed in the previous pass
+                }
+
+                // `in("explicit register") var`
+                InlineAsmOperandRef::In { reg, value } => {
+                    if let ConstraintOrRegister::Register(reg_name) = reg_to_gcc(reg) {
+                        let ty = value.layout.gcc_type(self.cx, false);
+                        let reg_var = self.current_func().new_local(None, ty, "input_register");
+                        reg_var.set_register_name(reg_name);
+                        self.llbb().add_assignment(None, reg_var, value.immediate());
+
+                        inputs.push(AsmInOperand { 
+                            constraint: "r".into(), 
+                            rust_idx, 
+                            val: reg_var.to_rvalue()
+                        });
+                    }
+
+                    // processed in the previous pass
+                }
+
+                // `inout("explicit register") in_var => out_var`
+                InlineAsmOperandRef::InOut { reg, late, in_value, out_place } => {
+                    if let ConstraintOrRegister::Register(reg_name) = reg_to_gcc(reg) {
+                        let out_place = if let Some(place) = out_place {
+                            place
+                        } 
+                        else {
+                            // processed in the previous pass
+                            continue
+                        };
+
+                        // See explanation in the first pass.
+                        let ty = in_value.layout.gcc_type(self.cx, false);
+                        let tmp_var = self.current_func().new_local(None, ty, "output_register");
+                        tmp_var.set_register_name(reg_name);
+
+                        outputs.push(AsmOutOperand {
+                            constraint: "r".into(), 
+                            rust_idx,
+                            late,
+                            readwrite: false,
+                            tmp_var,
+                            out_place: Some(out_place)
+                        });
+
+                        let constraint = Cow::Owned((outputs.len() - 1).to_string());
+                        inputs.push(AsmInOperand { 
+                            constraint, 
+                            rust_idx,
+                            val: in_value.immediate()
+                        });
+                    }
+
+                    // processed in the previous pass
+                }
+
+                InlineAsmOperandRef::Const { .. } 
+                | InlineAsmOperandRef::SymFn { .. } 
+                | InlineAsmOperandRef::SymStatic { .. } => {
+                    // processed in the previous pass
+                }
+            }
+        }
+
+        // 3. Build the template string
+
+        let mut template_str = String::with_capacity(estimate_template_length(template, constants_len, att_dialect));
+        if !intel_dialect {
+            template_str.push_str(ATT_SYNTAX_INS);
+        }
+
+        for piece in template {
+            match *piece {
+                InlineAsmTemplatePiece::String(ref string) => {
+                    // TODO(@Commeownist): switch to `Iterator::intersperse` once it's stable
+                    let mut iter = string.split('%');
+                    if let Some(s) = iter.next() {
+                        template_str.push_str(s);
+                    }
+
+                    for s in iter {
+                        template_str.push_str("%%");
+                        template_str.push_str(s);
+                    }
+                }
+                InlineAsmTemplatePiece::Placeholder { operand_idx, modifier, span: _ } => {
+                    let mut push_to_template = |modifier, gcc_idx| {
+                        use std::fmt::Write;
+
+                        template_str.push('%');
+                        if let Some(modifier) = modifier {
+                            template_str.push(modifier);
+                        }
+                        write!(template_str, "{}", gcc_idx).expect("pushing to string failed");
+                    };
+
+                    match rust_operands[operand_idx] {
+                        InlineAsmOperandRef::Out { reg, ..  } => {
+                            let modifier = modifier_to_gcc(asm_arch, reg.reg_class(), modifier);
+                            let gcc_index = outputs.iter()
+                                .position(|op| operand_idx == op.rust_idx)
+                                .expect("wrong rust index");
+                            push_to_template(modifier, gcc_index);
+                        }
+
+                        InlineAsmOperandRef::In { reg, .. } => {
+                            let modifier = modifier_to_gcc(asm_arch, reg.reg_class(), modifier);
+                            let in_gcc_index = inputs.iter()
+                                .position(|op| operand_idx == op.rust_idx)
+                                .expect("wrong rust index");
+                            let gcc_index = in_gcc_index + outputs.len();
+                            push_to_template(modifier, gcc_index);
+                        }
+
+                        InlineAsmOperandRef::InOut { reg, .. } => {
+                            let modifier = modifier_to_gcc(asm_arch, reg.reg_class(), modifier);
+
+                            // The input register is tied to the output, so we can just use the index of the output register
+                            let gcc_index = outputs.iter()
+                                .position(|op| operand_idx == op.rust_idx)
+                                .expect("wrong rust index");
+                            push_to_template(modifier, gcc_index);
+                        }
+
+                        InlineAsmOperandRef::SymFn { instance } => {
+                            let name = self.tcx.symbol_name(instance).name;
+                            template_str.push_str(name);
+                        }
+
+                        InlineAsmOperandRef::SymStatic { def_id } => {
+                            // TODO(@Commeownist): This may not be sufficient for all kinds of statics.
+                            // Some statics may need the `@plt` suffix, like thread-local vars.
+                            let instance = Instance::mono(self.tcx, def_id);
+                            let name = self.tcx.symbol_name(instance).name;
+                            template_str.push_str(name);
+                        }
+
+                        InlineAsmOperandRef::Const { ref string } => {
+                            // Const operands get injected directly into the template
+                            if att_dialect {
+                                template_str.push('$');
+                            }
+                            template_str.push_str(string);
+                        }
+                    }
+                }
+            }
+        }
+
+        if !intel_dialect {
+            template_str.push_str(INTEL_SYNTAX_INS);
+        }
+        
+        // 4. Generate Extended Asm block
+
+        let block = self.llbb();
+        let extended_asm = block.add_extended_asm(None, &template_str);
+
+        for op in &outputs {
+            extended_asm.add_output_operand(None, &op.to_constraint(), op.tmp_var);
+        }
+
+        for op in &inputs {
+            extended_asm.add_input_operand(None, &op.constraint, op.val);
+        }
+
+        for clobber in clobbers.iter() {
+            extended_asm.add_clobber(clobber);
+        }
+
+        if !options.contains(InlineAsmOptions::PRESERVES_FLAGS) {
+            // TODO(@Commeownist): I'm not 100% sure this one clobber is sufficient 
+            // on all architectures. For instance, what about FP stack?
+            extended_asm.add_clobber("cc");
+        }
+        if !options.contains(InlineAsmOptions::NOMEM) {
+            extended_asm.add_clobber("memory");
+        }
+        if !options.contains(InlineAsmOptions::PURE) {
+            extended_asm.set_volatile_flag(true);
+        }
+        if !options.contains(InlineAsmOptions::NOSTACK) {
+            // TODO(@Commeownist): figure out how to align stack
+        }
+        if options.contains(InlineAsmOptions::NORETURN) {
+            let builtin_unreachable = self.context.get_builtin_function("__builtin_unreachable");
+            let builtin_unreachable: RValue<'gcc> = unsafe { std::mem::transmute(builtin_unreachable) };
+            self.call(self.type_void(), builtin_unreachable, &[], None);
+        }
+
+        // Write results to outputs. 
+        //
+        // We need to do this because:
+        //  1. Turning `PlaceRef` into `RValue` is error-prone and has nasty edge cases 
+        //     (especially with current `rustc_backend_ssa` API).
+        //  2. Not every output operand has an `out_place`, and it's required by `add_output_operand`.
+        //
+        // Instead, we generate a temporary output variable for each output operand, and then this loop,
+        // generates `out_place = tmp_var;` assignments if out_place exists.
+        for op in &outputs {
+            if let Some(place) = op.out_place {
+                OperandValue::Immediate(op.tmp_var.to_rvalue()).store(self, place);                
+            }
+        }
+
+    }
+}
+
+fn estimate_template_length(template: &[InlineAsmTemplatePiece], constants_len: usize, att_dialect: bool) -> usize {
+    let len: usize = template.iter().map(|piece| {
+        match *piece {
+            InlineAsmTemplatePiece::String(ref string) => {
+                string.len()
+            }
+            InlineAsmTemplatePiece::Placeholder { .. } => {
+                // '%' + 1 char modifier + 1 char index
+                3
+            }
+        }
+    })
+    .sum();
+
+    // increase it by 5% to account for possible '%' signs that'll be duplicated
+    // I pulled the number out of blue, but should be fair enough
+    // as the upper bound
+    let mut res = (len as f32 * 1.05) as usize + constants_len;
+
+    if att_dialect {
+        res += INTEL_SYNTAX_INS.len() + ATT_SYNTAX_INS.len();
+    }
+    res
+}
+
+/// Converts a register class to a GCC constraint code.
+fn reg_to_gcc(reg: InlineAsmRegOrRegClass) -> ConstraintOrRegister {
+    let constraint = match reg {
+        // For vector registers LLVM wants the register name to match the type size.
+        InlineAsmRegOrRegClass::Reg(reg) => {
+            match reg {
+                InlineAsmReg::X86(_) => {
+                    // TODO(antoyo): add support for vector register.
+                    //
+                    // // For explicit registers, we have to create a register variable: https://stackoverflow.com/a/31774784/389119
+                    return ConstraintOrRegister::Register(match reg.name() {
+                        // Some of registers' names does not map 1-1 from rust to gcc
+                        "st(0)" => "st",
+
+                        name => name,
+                    });
+                }
+
+                _ => unimplemented!(),
+            }
+        },
+        InlineAsmRegOrRegClass::RegClass(reg) => match reg {
+            InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => unimplemented!(),
+            InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => unimplemented!(),
+            InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg) => unimplemented!(),
+            InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => unimplemented!(),
+            InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => unimplemented!(),
+            InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg_thumb) => unimplemented!(),
+            InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
+            | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
+            | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8) => unimplemented!(),
+            InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16)
+            | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8)
+            | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => unimplemented!(),
+            InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
+            | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg) => unimplemented!(),
+            InlineAsmRegClass::Bpf(_) => unimplemented!(),
+            InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => unimplemented!(),
+            InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => unimplemented!(),
+            InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => unimplemented!(),
+            InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => unimplemented!(),
+            InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg32) => unimplemented!(),
+            InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg64) => unimplemented!(),
+            InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg) => unimplemented!(),
+            InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => unimplemented!(),
+            InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => unimplemented!(),
+            InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::cr)
+            | InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::xer) => {
+                unreachable!("clobber-only")
+            },
+            InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => unimplemented!(),
+            InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => unimplemented!(),
+            InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => unimplemented!(),
+            InlineAsmRegClass::X86(X86InlineAsmRegClass::reg) => "r",
+            InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => "Q",
+            InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => "q",
+            InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg)
+            | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg) => "x",
+            InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => "v",
+            InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => unimplemented!(),
+            InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => unimplemented!(),
+            InlineAsmRegClass::X86(
+                X86InlineAsmRegClass::x87_reg | X86InlineAsmRegClass::mmx_reg,
+            ) => unreachable!("clobber-only"),
+            InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
+                bug!("GCC backend does not support SPIR-V")
+            }
+            InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg) => unimplemented!(),
+            InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => unimplemented!(),
+            InlineAsmRegClass::Err => unreachable!(),
+        }
+    };
+
+    ConstraintOrRegister::Constraint(constraint)
+}
+
+/// Type to use for outputs that are discarded. It doesn't really matter what
+/// the type is, as long as it is valid for the constraint code.
+fn dummy_output_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, reg: InlineAsmRegClass) -> Type<'gcc> {
+    match reg {
+        InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => cx.type_i32(),
+        InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => unimplemented!(),
+        InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg)
+        | InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => {
+            unimplemented!()
+        }
+        InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg)
+        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg_thumb) => cx.type_i32(),
+        InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
+        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) => cx.type_f32(),
+        InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
+        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
+        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8) => cx.type_f64(),
+        InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg)
+        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8)
+        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => {
+            unimplemented!()
+        }
+        InlineAsmRegClass::Bpf(_) => unimplemented!(),
+        InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => cx.type_i32(),
+        InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => cx.type_i32(),
+        InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => cx.type_f32(),
+        InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => cx.type_i16(),
+        InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg32) => cx.type_i32(),
+        InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg64) => cx.type_i64(),
+        InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg) => cx.type_i32(),
+        InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => cx.type_i32(),
+        InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => cx.type_f64(),
+        InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::cr)
+        | InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::xer) => {
+            unreachable!("clobber-only")
+        },
+        InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => cx.type_i32(),
+        InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => cx.type_f32(),
+        InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => cx.type_f32(),
+        InlineAsmRegClass::X86(X86InlineAsmRegClass::reg)
+        | InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => cx.type_i32(),
+        InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => cx.type_i8(),
+        InlineAsmRegClass::X86(X86InlineAsmRegClass::mmx_reg) => unimplemented!(),
+        InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg)
+        | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg)
+        | InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => cx.type_f32(),
+        InlineAsmRegClass::X86(X86InlineAsmRegClass::x87_reg) => unimplemented!(),
+        InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => cx.type_i16(),
+        InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => cx.type_i32(),
+        InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
+            bug!("LLVM backend does not support SPIR-V")
+        },
+        InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg) => cx.type_i32(),
+        InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => cx.type_f64(),
+        InlineAsmRegClass::Err => unreachable!(),
+    }
+}
+
+impl<'gcc, 'tcx> AsmMethods for CodegenCx<'gcc, 'tcx> {
+    fn codegen_global_asm(&self, template: &[InlineAsmTemplatePiece], operands: &[GlobalAsmOperandRef], options: InlineAsmOptions, _line_spans: &[Span]) {
+        let asm_arch = self.tcx.sess.asm_arch.unwrap();
+
+        // Default to Intel syntax on x86
+        let intel_syntax = matches!(asm_arch, InlineAsmArch::X86 | InlineAsmArch::X86_64)
+            && !options.contains(InlineAsmOptions::ATT_SYNTAX);
+
+        // Build the template string
+        let mut template_str = String::new();
+        for piece in template {
+            match *piece {
+                InlineAsmTemplatePiece::String(ref string) => {
+                    for line in string.lines() {
+                        // NOTE: gcc does not allow inline comment, so remove them.
+                        let line =
+                            if let Some(index) = line.rfind("//") {
+                                &line[..index]
+                            }
+                            else {
+                                line
+                            };
+                        template_str.push_str(line);
+                        template_str.push('\n');
+                    }
+                },
+                InlineAsmTemplatePiece::Placeholder { operand_idx, modifier: _, span: _ } => {
+                    match operands[operand_idx] {
+                        GlobalAsmOperandRef::Const { ref string } => {
+                            // Const operands get injected directly into the
+                            // template. Note that we don't need to escape %
+                            // here unlike normal inline assembly.
+                            template_str.push_str(string);
+                        }
+                    }
+                }
+            }
+        }
+
+        let template_str =
+            if intel_syntax {
+                format!("{}\n\t.intel_syntax noprefix", template_str)
+            }
+            else {
+                format!(".att_syntax\n\t{}\n\t.intel_syntax noprefix", template_str)
+            };
+        // NOTE: seems like gcc will put the asm in the wrong section, so set it to .text manually.
+        let template_str = format!(".pushsection .text\n{}\n.popsection", template_str);
+        self.context.add_top_level_asm(None, &template_str);
+    }
+}
+
+fn modifier_to_gcc(arch: InlineAsmArch, reg: InlineAsmRegClass, modifier: Option<char>) -> Option<char> {
+    match reg {
+        InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => modifier,
+        InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => modifier,
+        InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg)
+        | InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => {
+            unimplemented!()
+        }
+        InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg)
+        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg_thumb) => unimplemented!(),
+        InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
+        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) => unimplemented!(),
+        InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
+        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
+        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8) => unimplemented!(),
+        InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg)
+        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8)
+        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => {
+            unimplemented!()
+        }
+        InlineAsmRegClass::Bpf(_) => unimplemented!(),
+        InlineAsmRegClass::Hexagon(_) => unimplemented!(),
+        InlineAsmRegClass::Mips(_) => unimplemented!(),
+        InlineAsmRegClass::Nvptx(_) => unimplemented!(),
+        InlineAsmRegClass::PowerPC(_) => unimplemented!(),
+        InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg)
+        | InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => unimplemented!(),
+        InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => unimplemented!(),
+        InlineAsmRegClass::X86(X86InlineAsmRegClass::reg)
+        | InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => match modifier {
+            None => if arch == InlineAsmArch::X86_64 { Some('q') } else { Some('k') },
+            Some('l') => Some('b'),
+            Some('h') => Some('h'),
+            Some('x') => Some('w'),
+            Some('e') => Some('k'),
+            Some('r') => Some('q'),
+            _ => unreachable!(),
+        },
+        InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => None,
+        InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::xmm_reg)
+        | InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::ymm_reg)
+        | InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::zmm_reg) => match (reg, modifier) {
+            (X86InlineAsmRegClass::xmm_reg, None) => Some('x'),
+            (X86InlineAsmRegClass::ymm_reg, None) => Some('t'),
+            (X86InlineAsmRegClass::zmm_reg, None) => Some('g'),
+            (_, Some('x')) => Some('x'),
+            (_, Some('y')) => Some('t'),
+            (_, Some('z')) => Some('g'),
+            _ => unreachable!(),
+        },
+        InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => None,
+        InlineAsmRegClass::X86(X86InlineAsmRegClass::x87_reg | X86InlineAsmRegClass::mmx_reg) => {
+            unreachable!("clobber-only")
+        }
+        InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => unimplemented!(),
+        InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
+            bug!("LLVM backend does not support SPIR-V")
+        },
+        InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg) => unimplemented!(),
+        InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => unimplemented!(),
+        InlineAsmRegClass::Err => unreachable!(),
+    }
+}
diff --git a/compiler/rustc_codegen_gcc/src/back/mod.rs b/compiler/rustc_codegen_gcc/src/back/mod.rs
new file mode 100644 (file)
index 0000000..d692799
--- /dev/null
@@ -0,0 +1 @@
+pub mod write;
diff --git a/compiler/rustc_codegen_gcc/src/back/write.rs b/compiler/rustc_codegen_gcc/src/back/write.rs
new file mode 100644 (file)
index 0000000..c3e3847
--- /dev/null
@@ -0,0 +1,78 @@
+use std::fs;
+
+use gccjit::OutputKind;
+use rustc_codegen_ssa::{CompiledModule, ModuleCodegen};
+use rustc_codegen_ssa::back::write::{CodegenContext, EmitObj, ModuleConfig};
+use rustc_errors::Handler;
+use rustc_session::config::OutputType;
+use rustc_span::fatal_error::FatalError;
+use rustc_target::spec::SplitDebuginfo;
+
+use crate::{GccCodegenBackend, GccContext};
+
+pub(crate) unsafe fn codegen(cgcx: &CodegenContext<GccCodegenBackend>, _diag_handler: &Handler, module: ModuleCodegen<GccContext>, config: &ModuleConfig) -> Result<CompiledModule, FatalError> {
+    let _timer = cgcx.prof.generic_activity_with_arg("LLVM_module_codegen", &module.name[..]);
+    {
+        let context = &module.module_llvm.context;
+
+        let module_name = module.name.clone();
+        let module_name = Some(&module_name[..]);
+
+        let _bc_out = cgcx.output_filenames.temp_path(OutputType::Bitcode, module_name);
+        let obj_out = cgcx.output_filenames.temp_path(OutputType::Object, module_name);
+
+        if config.bitcode_needed() {
+            // TODO(antoyo)
+        }
+
+        if config.emit_ir {
+            unimplemented!();
+        }
+
+        if config.emit_asm {
+            let _timer = cgcx
+                .prof
+                .generic_activity_with_arg("LLVM_module_codegen_emit_asm", &module.name[..]);
+            let path = cgcx.output_filenames.temp_path(OutputType::Assembly, module_name);
+            context.compile_to_file(OutputKind::Assembler, path.to_str().expect("path to str"));
+        }
+
+        match config.emit_obj {
+            EmitObj::ObjectCode(_) => {
+                let _timer = cgcx
+                    .prof
+                    .generic_activity_with_arg("LLVM_module_codegen_emit_obj", &module.name[..]);
+                match &*module.name {
+                    "std_example.7rcbfp3g-cgu.15" => {
+                        println!("Dumping reproducer {}", module.name);
+                        let _ = fs::create_dir("/tmp/reproducers");
+                        // FIXME(antoyo): segfault in dump_reproducer_to_file() might be caused by
+                        // transmuting an rvalue to an lvalue.
+                        // Segfault is actually in gcc::jit::reproducer::get_identifier_as_lvalue
+                        context.dump_reproducer_to_file(&format!("/tmp/reproducers/{}.c", module.name));
+                        println!("Dumped reproducer {}", module.name);
+                    },
+                    _ => (),
+                }
+                context.compile_to_file(OutputKind::ObjectFile, obj_out.to_str().expect("path to str"));
+            }
+
+            EmitObj::Bitcode => {
+                // TODO(antoyo)
+            }
+
+            EmitObj::None => {}
+        }
+    }
+
+    Ok(module.into_compiled_module(
+        config.emit_obj != EmitObj::None,
+        cgcx.target_can_use_split_dwarf && cgcx.split_debuginfo == SplitDebuginfo::Unpacked,
+        config.emit_bc,
+        &cgcx.output_filenames,
+    ))
+}
+
+pub(crate) fn link(_cgcx: &CodegenContext<GccCodegenBackend>, _diag_handler: &Handler, mut _modules: Vec<ModuleCodegen<GccContext>>) -> Result<ModuleCodegen<GccContext>, FatalError> {
+    unimplemented!();
+}
diff --git a/compiler/rustc_codegen_gcc/src/base.rs b/compiler/rustc_codegen_gcc/src/base.rs
new file mode 100644 (file)
index 0000000..9fd0436
--- /dev/null
@@ -0,0 +1,165 @@
+use std::env;
+use std::time::Instant;
+
+use gccjit::{
+    Context,
+    FunctionType,
+    GlobalKind,
+};
+use rustc_middle::dep_graph;
+use rustc_middle::middle::cstore::EncodedMetadata;
+use rustc_middle::middle::exported_symbols;
+use rustc_middle::ty::TyCtxt;
+use rustc_middle::mir::mono::Linkage;
+use rustc_codegen_ssa::{ModuleCodegen, ModuleKind};
+use rustc_codegen_ssa::base::maybe_create_entry_wrapper;
+use rustc_codegen_ssa::mono_item::MonoItemExt;
+use rustc_codegen_ssa::traits::DebugInfoMethods;
+use rustc_session::config::DebugInfo;
+use rustc_span::Symbol;
+
+use crate::GccContext;
+use crate::builder::Builder;
+use crate::context::CodegenCx;
+
+pub fn global_linkage_to_gcc(linkage: Linkage) -> GlobalKind {
+    match linkage {
+        Linkage::External => GlobalKind::Imported,
+        Linkage::AvailableExternally => GlobalKind::Imported,
+        Linkage::LinkOnceAny => unimplemented!(),
+        Linkage::LinkOnceODR => unimplemented!(),
+        Linkage::WeakAny => unimplemented!(),
+        Linkage::WeakODR => unimplemented!(),
+        Linkage::Appending => unimplemented!(),
+        Linkage::Internal => GlobalKind::Internal,
+        Linkage::Private => GlobalKind::Internal,
+        Linkage::ExternalWeak => GlobalKind::Imported, // TODO(antoyo): should be weak linkage.
+        Linkage::Common => unimplemented!(),
+    }
+}
+
+pub fn linkage_to_gcc(linkage: Linkage) -> FunctionType {
+    match linkage {
+        Linkage::External => FunctionType::Exported,
+        Linkage::AvailableExternally => FunctionType::Extern,
+        Linkage::LinkOnceAny => unimplemented!(),
+        Linkage::LinkOnceODR => unimplemented!(),
+        Linkage::WeakAny => FunctionType::Exported, // FIXME(antoyo): should be similar to linkonce.
+        Linkage::WeakODR => unimplemented!(),
+        Linkage::Appending => unimplemented!(),
+        Linkage::Internal => FunctionType::Internal,
+        Linkage::Private => FunctionType::Internal,
+        Linkage::ExternalWeak => unimplemented!(),
+        Linkage::Common => unimplemented!(),
+    }
+}
+
+pub fn compile_codegen_unit<'tcx>(tcx: TyCtxt<'tcx>, cgu_name: Symbol) -> (ModuleCodegen<GccContext>, u64) {
+    let prof_timer = tcx.prof.generic_activity("codegen_module");
+    let start_time = Instant::now();
+
+    let dep_node = tcx.codegen_unit(cgu_name).codegen_dep_node(tcx);
+    let (module, _) = tcx.dep_graph.with_task(dep_node, tcx, cgu_name, module_codegen, dep_graph::hash_result);
+    let time_to_codegen = start_time.elapsed();
+    drop(prof_timer);
+
+    // We assume that the cost to run GCC on a CGU is proportional to
+    // the time we needed for codegenning it.
+    let cost = time_to_codegen.as_secs() * 1_000_000_000 + time_to_codegen.subsec_nanos() as u64;
+
+    fn module_codegen(tcx: TyCtxt<'_>, cgu_name: Symbol) -> ModuleCodegen<GccContext> {
+        let cgu = tcx.codegen_unit(cgu_name);
+        // Instantiate monomorphizations without filling out definitions yet...
+        //let llvm_module = ModuleLlvm::new(tcx, &cgu_name.as_str());
+        let context = Context::default();
+        // TODO(antoyo): only set on x86 platforms.
+        context.add_command_line_option("-masm=intel");
+        for arg in &tcx.sess.opts.cg.llvm_args {
+            context.add_command_line_option(arg);
+        }
+        context.add_command_line_option("-fno-semantic-interposition");
+        if env::var("CG_GCCJIT_DUMP_CODE").as_deref() == Ok("1") {
+            context.set_dump_code_on_compile(true);
+        }
+        if env::var("CG_GCCJIT_DUMP_GIMPLE").as_deref() == Ok("1") {
+            context.set_dump_initial_gimple(true);
+        }
+        context.set_debug_info(true);
+        if env::var("CG_GCCJIT_DUMP_EVERYTHING").as_deref() == Ok("1") {
+            context.set_dump_everything(true);
+        }
+        if env::var("CG_GCCJIT_KEEP_INTERMEDIATES").as_deref() == Ok("1") {
+            context.set_keep_intermediates(true);
+        }
+
+        {
+            let cx = CodegenCx::new(&context, cgu, tcx);
+
+            let mono_items = cgu.items_in_deterministic_order(tcx);
+            for &(mono_item, (linkage, visibility)) in &mono_items {
+                mono_item.predefine::<Builder<'_, '_, '_>>(&cx, linkage, visibility);
+            }
+
+            // ... and now that we have everything pre-defined, fill out those definitions.
+            for &(mono_item, _) in &mono_items {
+                mono_item.define::<Builder<'_, '_, '_>>(&cx);
+            }
+
+            // If this codegen unit contains the main function, also create the
+            // wrapper here
+            maybe_create_entry_wrapper::<Builder<'_, '_, '_>>(&cx);
+
+            // Finalize debuginfo
+            if cx.sess().opts.debuginfo != DebugInfo::None {
+                cx.debuginfo_finalize();
+            }
+        }
+
+        ModuleCodegen {
+            name: cgu_name.to_string(),
+            module_llvm: GccContext {
+                context
+            },
+            kind: ModuleKind::Regular,
+        }
+    }
+
+    (module, cost)
+}
+
+pub fn write_compressed_metadata<'tcx>(tcx: TyCtxt<'tcx>, metadata: &EncodedMetadata, gcc_module: &mut GccContext) {
+    use snap::write::FrameEncoder;
+    use std::io::Write;
+
+    // Historical note:
+    //
+    // When using link.exe it was seen that the section name `.note.rustc`
+    // was getting shortened to `.note.ru`, and according to the PE and COFF
+    // specification:
+    //
+    // > Executable images do not use a string table and do not support
+    // > section names longer than 8 characters
+    //
+    // https://docs.microsoft.com/en-us/windows/win32/debug/pe-format
+    //
+    // As a result, we choose a slightly shorter name! As to why
+    // `.note.rustc` works on MinGW, see
+    // https://github.com/llvm/llvm-project/blob/llvmorg-12.0.0/lld/COFF/Writer.cpp#L1190-L1197
+    let section_name = if tcx.sess.target.is_like_osx { "__DATA,.rustc" } else { ".rustc" };
+
+    let context = &gcc_module.context;
+    let mut compressed = rustc_metadata::METADATA_HEADER.to_vec();
+    FrameEncoder::new(&mut compressed).write_all(&metadata.raw_data).unwrap();
+
+    let name = exported_symbols::metadata_symbol_name(tcx);
+    let typ = context.new_array_type(None, context.new_type::<u8>(), compressed.len() as i32);
+    let global = context.new_global(None, GlobalKind::Exported, typ, name);
+    global.global_set_initializer(&compressed);
+    global.set_link_section(section_name);
+
+    // Also generate a .section directive to force no
+    // flags, at least for ELF outputs, so that the
+    // metadata doesn't get loaded into memory.
+    let directive = format!(".section {}", section_name);
+    context.add_top_level_asm(None, &directive);
+}
diff --git a/compiler/rustc_codegen_gcc/src/builder.rs b/compiler/rustc_codegen_gcc/src/builder.rs
new file mode 100644 (file)
index 0000000..ac90841
--- /dev/null
@@ -0,0 +1,1540 @@
+use std::borrow::Cow;
+use std::cell::Cell;
+use std::convert::TryFrom;
+use std::ops::Deref;
+
+use gccjit::FunctionType;
+use gccjit::{
+    BinaryOp,
+    Block,
+    ComparisonOp,
+    Function,
+    LValue,
+    RValue,
+    ToRValue,
+    Type,
+    UnaryOp,
+};
+use rustc_codegen_ssa::MemFlags;
+use rustc_codegen_ssa::common::{AtomicOrdering, AtomicRmwBinOp, IntPredicate, RealPredicate, SynchronizationScope};
+use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
+use rustc_codegen_ssa::mir::place::PlaceRef;
+use rustc_codegen_ssa::traits::{
+    BackendTypes,
+    BaseTypeMethods,
+    BuilderMethods,
+    ConstMethods,
+    DerivedTypeMethods,
+    LayoutTypeMethods,
+    HasCodegen,
+    OverflowOp,
+    StaticBuilderMethods,
+};
+use rustc_middle::ty::{ParamEnv, Ty, TyCtxt};
+use rustc_middle::ty::layout::{FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasParamEnv, HasTyCtxt, LayoutError, LayoutOfHelpers, TyAndLayout};
+use rustc_span::Span;
+use rustc_span::def_id::DefId;
+use rustc_target::abi::{
+    self,
+    call::FnAbi,
+    Align,
+    HasDataLayout,
+    Size,
+    TargetDataLayout,
+    WrappingRange,
+};
+use rustc_target::spec::{HasTargetSpec, Target};
+
+use crate::common::{SignType, TypeReflection, type_is_pointer};
+use crate::context::CodegenCx;
+use crate::type_of::LayoutGccExt;
+
+// TODO(antoyo)
+type Funclet = ();
+
+// TODO(antoyo): remove this variable.
+static mut RETURN_VALUE_COUNT: usize = 0;
+
+enum ExtremumOperation {
+    Max,
+    Min,
+}
+
+trait EnumClone {
+    fn clone(&self) -> Self;
+}
+
+impl EnumClone for AtomicOrdering {
+    fn clone(&self) -> Self {
+        match *self {
+            AtomicOrdering::NotAtomic => AtomicOrdering::NotAtomic,
+            AtomicOrdering::Unordered => AtomicOrdering::Unordered,
+            AtomicOrdering::Monotonic => AtomicOrdering::Monotonic,
+            AtomicOrdering::Acquire => AtomicOrdering::Acquire,
+            AtomicOrdering::Release => AtomicOrdering::Release,
+            AtomicOrdering::AcquireRelease => AtomicOrdering::AcquireRelease,
+            AtomicOrdering::SequentiallyConsistent => AtomicOrdering::SequentiallyConsistent,
+        }
+    }
+}
+
+pub struct Builder<'a: 'gcc, 'gcc, 'tcx> {
+    pub cx: &'a CodegenCx<'gcc, 'tcx>,
+    pub block: Option<Block<'gcc>>,
+    stack_var_count: Cell<usize>,
+}
+
+impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
+    fn with_cx(cx: &'a CodegenCx<'gcc, 'tcx>) -> Self {
+        Builder {
+            cx,
+            block: None,
+            stack_var_count: Cell::new(0),
+        }
+    }
+
+    fn atomic_extremum(&mut self, operation: ExtremumOperation, dst: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering) -> RValue<'gcc> {
+        let size = self.cx.int_width(src.get_type()) / 8;
+
+        let func = self.current_func();
+
+        let load_ordering =
+            match order {
+                // TODO(antoyo): does this make sense?
+                AtomicOrdering::AcquireRelease | AtomicOrdering::Release => AtomicOrdering::Acquire,
+                _ => order.clone(),
+            };
+        let previous_value = self.atomic_load(dst.get_type(), dst, load_ordering.clone(), Size::from_bytes(size));
+        let previous_var = func.new_local(None, previous_value.get_type(), "previous_value");
+        let return_value = func.new_local(None, previous_value.get_type(), "return_value");
+        self.llbb().add_assignment(None, previous_var, previous_value);
+        self.llbb().add_assignment(None, return_value, previous_var.to_rvalue());
+
+        let while_block = func.new_block("while");
+        let after_block = func.new_block("after_while");
+        self.llbb().end_with_jump(None, while_block);
+
+        // NOTE: since jumps were added and compare_exchange doesn't expect this, the current blocks in the
+        // state need to be updated.
+        self.block = Some(while_block);
+        *self.cx.current_block.borrow_mut() = Some(while_block);
+
+        let comparison_operator =
+            match operation {
+                ExtremumOperation::Max => ComparisonOp::LessThan,
+                ExtremumOperation::Min => ComparisonOp::GreaterThan,
+            };
+
+        let cond1 = self.context.new_comparison(None, comparison_operator, previous_var.to_rvalue(), self.context.new_cast(None, src, previous_value.get_type()));
+        let compare_exchange = self.compare_exchange(dst, previous_var, src, order, load_ordering, false);
+        let cond2 = self.cx.context.new_unary_op(None, UnaryOp::LogicalNegate, compare_exchange.get_type(), compare_exchange);
+        let cond = self.cx.context.new_binary_op(None, BinaryOp::LogicalAnd, self.cx.bool_type, cond1, cond2);
+
+        while_block.end_with_conditional(None, cond, while_block, after_block);
+
+        // NOTE: since jumps were added in a place rustc does not expect, the current blocks in the
+        // state need to be updated.
+        self.block = Some(after_block);
+        *self.cx.current_block.borrow_mut() = Some(after_block);
+
+        return_value.to_rvalue()
+    }
+
+    fn compare_exchange(&self, dst: RValue<'gcc>, cmp: LValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering, failure_order: AtomicOrdering, weak: bool) -> RValue<'gcc> {
+        let size = self.cx.int_width(src.get_type());
+        let compare_exchange = self.context.get_builtin_function(&format!("__atomic_compare_exchange_{}", size / 8));
+        let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
+        let failure_order = self.context.new_rvalue_from_int(self.i32_type, failure_order.to_gcc());
+        let weak = self.context.new_rvalue_from_int(self.bool_type, weak as i32);
+
+        let void_ptr_type = self.context.new_type::<*mut ()>();
+        let volatile_void_ptr_type = void_ptr_type.make_volatile();
+        let dst = self.context.new_cast(None, dst, volatile_void_ptr_type);
+        let expected = self.context.new_cast(None, cmp.get_address(None), void_ptr_type);
+
+        // NOTE: not sure why, but we have the wrong type here.
+        let int_type = compare_exchange.get_param(2).to_rvalue().get_type();
+        let src = self.context.new_cast(None, src, int_type);
+        self.context.new_call(None, compare_exchange, &[dst, expected, src, weak, order, failure_order])
+    }
+
+    pub fn assign(&self, lvalue: LValue<'gcc>, value: RValue<'gcc>) {
+        self.llbb().add_assignment(None, lvalue, value);
+    }
+
+    fn check_call<'b>(&mut self, _typ: &str, func: Function<'gcc>, args: &'b [RValue<'gcc>]) -> Cow<'b, [RValue<'gcc>]> {
+        let mut all_args_match = true;
+        let mut param_types = vec![];
+        let param_count = func.get_param_count();
+        for (index, arg) in args.iter().enumerate().take(param_count) {
+            let param = func.get_param(index as i32);
+            let param = param.to_rvalue().get_type();
+            if param != arg.get_type() {
+                all_args_match = false;
+            }
+            param_types.push(param);
+        }
+
+        if all_args_match {
+            return Cow::Borrowed(args);
+        }
+
+        let casted_args: Vec<_> = param_types
+            .into_iter()
+            .zip(args.iter())
+            .enumerate()
+            .map(|(_i, (expected_ty, &actual_val))| {
+                let actual_ty = actual_val.get_type();
+                if expected_ty != actual_ty {
+                    self.bitcast(actual_val, expected_ty)
+                }
+                else {
+                    actual_val
+                }
+            })
+            .collect();
+
+        Cow::Owned(casted_args)
+    }
+
+    fn check_ptr_call<'b>(&mut self, _typ: &str, func_ptr: RValue<'gcc>, args: &'b [RValue<'gcc>]) -> Cow<'b, [RValue<'gcc>]> {
+        let mut all_args_match = true;
+        let mut param_types = vec![];
+        let gcc_func = func_ptr.get_type().is_function_ptr_type().expect("function ptr");
+        for (index, arg) in args.iter().enumerate().take(gcc_func.get_param_count()) {
+            let param = gcc_func.get_param_type(index);
+            if param != arg.get_type() {
+                all_args_match = false;
+            }
+            param_types.push(param);
+        }
+
+        if all_args_match {
+            return Cow::Borrowed(args);
+        }
+
+        let casted_args: Vec<_> = param_types
+            .into_iter()
+            .zip(args.iter())
+            .enumerate()
+            .map(|(_i, (expected_ty, &actual_val))| {
+                let actual_ty = actual_val.get_type();
+                if expected_ty != actual_ty {
+                    self.bitcast(actual_val, expected_ty)
+                }
+                else {
+                    actual_val
+                }
+            })
+            .collect();
+
+        Cow::Owned(casted_args)
+    }
+
+    fn check_store(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>) -> RValue<'gcc> {
+        let dest_ptr_ty = self.cx.val_ty(ptr).make_pointer(); // TODO(antoyo): make sure make_pointer() is okay here.
+        let stored_ty = self.cx.val_ty(val);
+        let stored_ptr_ty = self.cx.type_ptr_to(stored_ty);
+
+        if dest_ptr_ty == stored_ptr_ty {
+            ptr
+        }
+        else {
+            self.bitcast(ptr, stored_ptr_ty)
+        }
+    }
+
+    pub fn current_func(&self) -> Function<'gcc> {
+        self.block.expect("block").get_function()
+    }
+
+    fn function_call(&mut self, func: RValue<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
+        // TODO(antoyo): remove when the API supports a different type for functions.
+        let func: Function<'gcc> = self.cx.rvalue_as_function(func);
+        let args = self.check_call("call", func, args);
+
+        // gccjit requires to use the result of functions, even when it's not used.
+        // That's why we assign the result to a local or call add_eval().
+        let return_type = func.get_return_type();
+        let current_block = self.current_block.borrow().expect("block");
+        let void_type = self.context.new_type::<()>();
+        let current_func = current_block.get_function();
+        if return_type != void_type {
+            unsafe { RETURN_VALUE_COUNT += 1 };
+            let result = current_func.new_local(None, return_type, &format!("returnValue{}", unsafe { RETURN_VALUE_COUNT }));
+            current_block.add_assignment(None, result, self.cx.context.new_call(None, func, &args));
+            result.to_rvalue()
+        }
+        else {
+            current_block.add_eval(None, self.cx.context.new_call(None, func, &args));
+            // Return dummy value when not having return value.
+            self.context.new_rvalue_from_long(self.isize_type, 0)
+        }
+    }
+
+    fn function_ptr_call(&mut self, func_ptr: RValue<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
+        let args = self.check_ptr_call("call", func_ptr, args);
+
+        // gccjit requires to use the result of functions, even when it's not used.
+        // That's why we assign the result to a local or call add_eval().
+        let gcc_func = func_ptr.get_type().is_function_ptr_type().expect("function ptr");
+        let mut return_type = gcc_func.get_return_type();
+        let current_block = self.current_block.borrow().expect("block");
+        let void_type = self.context.new_type::<()>();
+        let current_func = current_block.get_function();
+
+        // FIXME(antoyo): As a temporary workaround for unsupported LLVM intrinsics.
+        if gcc_func.get_param_count() == 0 && format!("{:?}", func_ptr) == "__builtin_ia32_pmovmskb128" {
+            return_type = self.int_type;
+        }
+
+        if return_type != void_type {
+            unsafe { RETURN_VALUE_COUNT += 1 };
+            let result = current_func.new_local(None, return_type, &format!("returnValue{}", unsafe { RETURN_VALUE_COUNT }));
+            current_block.add_assignment(None, result, self.cx.context.new_call_through_ptr(None, func_ptr, &args));
+            result.to_rvalue()
+        }
+        else {
+            if gcc_func.get_param_count() == 0 {
+                // FIXME(antoyo): As a temporary workaround for unsupported LLVM intrinsics.
+                current_block.add_eval(None, self.cx.context.new_call_through_ptr(None, func_ptr, &[]));
+            }
+            else {
+                current_block.add_eval(None, self.cx.context.new_call_through_ptr(None, func_ptr, &args));
+            }
+            // Return dummy value when not having return value.
+            let result = current_func.new_local(None, self.isize_type, "dummyValueThatShouldNeverBeUsed");
+            current_block.add_assignment(None, result, self.context.new_rvalue_from_long(self.isize_type, 0));
+            result.to_rvalue()
+        }
+    }
+
+    pub fn overflow_call(&mut self, func: Function<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
+        // gccjit requires to use the result of functions, even when it's not used.
+        // That's why we assign the result to a local.
+        let return_type = self.context.new_type::<bool>();
+        let current_block = self.current_block.borrow().expect("block");
+        let current_func = current_block.get_function();
+        // TODO(antoyo): return the new_call() directly? Since the overflow function has no side-effects.
+        unsafe { RETURN_VALUE_COUNT += 1 };
+        let result = current_func.new_local(None, return_type, &format!("returnValue{}", unsafe { RETURN_VALUE_COUNT }));
+        current_block.add_assignment(None, result, self.cx.context.new_call(None, func, &args));
+        result.to_rvalue()
+    }
+}
+
+impl<'gcc, 'tcx> HasCodegen<'tcx> for Builder<'_, 'gcc, 'tcx> {
+    type CodegenCx = CodegenCx<'gcc, 'tcx>;
+}
+
+impl<'tcx> HasTyCtxt<'tcx> for Builder<'_, '_, 'tcx> {
+    fn tcx(&self) -> TyCtxt<'tcx> {
+        self.cx.tcx()
+    }
+}
+
+impl HasDataLayout for Builder<'_, '_, '_> {
+    fn data_layout(&self) -> &TargetDataLayout {
+        self.cx.data_layout()
+    }
+}
+
+impl<'tcx> LayoutOfHelpers<'tcx> for Builder<'_, '_, 'tcx> {
+    type LayoutOfResult = TyAndLayout<'tcx>;
+
+    #[inline]
+    fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! {
+        self.cx.handle_layout_err(err, span, ty)
+    }
+}
+
+impl<'tcx> FnAbiOfHelpers<'tcx> for Builder<'_, '_, 'tcx> {
+    type FnAbiOfResult = &'tcx FnAbi<'tcx, Ty<'tcx>>;
+
+    #[inline]
+    fn handle_fn_abi_err(
+        &self,
+        err: FnAbiError<'tcx>,
+        span: Span,
+        fn_abi_request: FnAbiRequest<'tcx>,
+    ) -> ! {
+        self.cx.handle_fn_abi_err(err, span, fn_abi_request)
+    }
+}
+
+impl<'gcc, 'tcx> Deref for Builder<'_, 'gcc, 'tcx> {
+    type Target = CodegenCx<'gcc, 'tcx>;
+
+    fn deref(&self) -> &Self::Target {
+        self.cx
+    }
+}
+
+impl<'gcc, 'tcx> BackendTypes for Builder<'_, 'gcc, 'tcx> {
+    type Value = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Value;
+    type Function = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Function;
+    type BasicBlock = <CodegenCx<'gcc, 'tcx> as BackendTypes>::BasicBlock;
+    type Type = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Type;
+    type Funclet = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Funclet;
+
+    type DIScope = <CodegenCx<'gcc, 'tcx> as BackendTypes>::DIScope;
+    type DILocation = <CodegenCx<'gcc, 'tcx> as BackendTypes>::DILocation;
+    type DIVariable = <CodegenCx<'gcc, 'tcx> as BackendTypes>::DIVariable;
+}
+
+impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
+    fn build(cx: &'a CodegenCx<'gcc, 'tcx>, block: Block<'gcc>) -> Self {
+        let mut bx = Builder::with_cx(cx);
+        *cx.current_block.borrow_mut() = Some(block);
+        bx.block = Some(block);
+        bx
+    }
+
+    fn build_sibling_block(&mut self, name: &str) -> Self {
+        let block = self.append_sibling_block(name);
+        Self::build(self.cx, block)
+    }
+
+    fn llbb(&self) -> Block<'gcc> {
+        self.block.expect("block")
+    }
+
+    fn append_block(cx: &'a CodegenCx<'gcc, 'tcx>, func: RValue<'gcc>, name: &str) -> Block<'gcc> {
+        let func = cx.rvalue_as_function(func);
+        func.new_block(name)
+    }
+
+    fn append_sibling_block(&mut self, name: &str) -> Block<'gcc> {
+        let func = self.current_func();
+        func.new_block(name)
+    }
+
+    fn ret_void(&mut self) {
+        self.llbb().end_with_void_return(None)
+    }
+
+    fn ret(&mut self, value: RValue<'gcc>) {
+        let value =
+            if self.structs_as_pointer.borrow().contains(&value) {
+                // NOTE: hack to workaround a limitation of the rustc API: see comment on
+                // CodegenCx.structs_as_pointer
+                value.dereference(None).to_rvalue()
+            }
+            else {
+                value
+            };
+        self.llbb().end_with_return(None, value);
+    }
+
+    fn br(&mut self, dest: Block<'gcc>) {
+        self.llbb().end_with_jump(None, dest)
+    }
+
+    fn cond_br(&mut self, cond: RValue<'gcc>, then_block: Block<'gcc>, else_block: Block<'gcc>) {
+        self.llbb().end_with_conditional(None, cond, then_block, else_block)
+    }
+
+    fn switch(&mut self, value: RValue<'gcc>, default_block: Block<'gcc>, cases: impl ExactSizeIterator<Item = (u128, Block<'gcc>)>) {
+        let mut gcc_cases = vec![];
+        let typ = self.val_ty(value);
+        for (on_val, dest) in cases {
+            let on_val = self.const_uint_big(typ, on_val);
+            gcc_cases.push(self.context.new_case(on_val, on_val, dest));
+        }
+        self.block.expect("block").end_with_switch(None, value, default_block, &gcc_cases);
+    }
+
+    fn invoke(&mut self, _typ: Type<'gcc>, _func: RValue<'gcc>, _args: &[RValue<'gcc>], then: Block<'gcc>, catch: Block<'gcc>, _funclet: Option<&Funclet>) -> RValue<'gcc> {
+        let condition = self.context.new_rvalue_from_int(self.bool_type, 0);
+        self.llbb().end_with_conditional(None, condition, then, catch);
+        self.context.new_rvalue_from_int(self.int_type, 0)
+
+        // TODO(antoyo)
+    }
+
+    fn unreachable(&mut self) {
+        let func = self.context.get_builtin_function("__builtin_unreachable");
+        let block = self.block.expect("block");
+        block.add_eval(None, self.context.new_call(None, func, &[]));
+        let return_type = block.get_function().get_return_type();
+        let void_type = self.context.new_type::<()>();
+        if return_type == void_type {
+            block.end_with_void_return(None)
+        }
+        else {
+            let return_value = self.current_func()
+                .new_local(None, return_type, "unreachableReturn");
+            block.end_with_return(None, return_value)
+        }
+    }
+
+    fn add(&mut self, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
+        // FIXME(antoyo): this should not be required.
+        if format!("{:?}", a.get_type()) != format!("{:?}", b.get_type()) {
+            b = self.context.new_cast(None, b, a.get_type());
+        }
+        a + b
+    }
+
+    fn fadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        a + b
+    }
+
+    fn sub(&mut self, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
+        if a.get_type() != b.get_type() {
+            b = self.context.new_cast(None, b, a.get_type());
+        }
+        a - b
+    }
+
+    fn fsub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        a - b
+    }
+
+    fn mul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        a * b
+    }
+
+    fn fmul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        a * b
+    }
+
+    fn udiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        // TODO(antoyo): convert the arguments to unsigned?
+        a / b
+    }
+
+    fn exactudiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        // TODO(antoyo): convert the arguments to unsigned?
+        // TODO(antoyo): poison if not exact.
+        a / b
+    }
+
+    fn sdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        // TODO(antoyo): convert the arguments to signed?
+        a / b
+    }
+
+    fn exactsdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        // TODO(antoyo): posion if not exact.
+        // FIXME(antoyo): rustc_codegen_ssa::mir::intrinsic uses different types for a and b but they
+        // should be the same.
+        let typ = a.get_type().to_signed(self);
+        let b = self.context.new_cast(None, b, typ);
+        a / b
+    }
+
+    fn fdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        a / b
+    }
+
+    fn urem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        a % b
+    }
+
+    fn srem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        a % b
+    }
+
+    fn frem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        if a.get_type() == self.cx.float_type {
+            let fmodf = self.context.get_builtin_function("fmodf");
+            // FIXME(antoyo): this seems to produce the wrong result.
+            return self.context.new_call(None, fmodf, &[a, b]);
+        }
+        assert_eq!(a.get_type(), self.cx.double_type);
+
+        let fmod = self.context.get_builtin_function("fmod");
+        return self.context.new_call(None, fmod, &[a, b]);
+    }
+
+    fn shl(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        // FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by an unsigned number.
+        let a_type = a.get_type();
+        let b_type = b.get_type();
+        if a_type.is_unsigned(self) && b_type.is_signed(self) {
+            let a = self.context.new_cast(None, a, b_type);
+            let result = a << b;
+            self.context.new_cast(None, result, a_type)
+        }
+        else if a_type.is_signed(self) && b_type.is_unsigned(self) {
+            let b = self.context.new_cast(None, b, a_type);
+            a << b
+        }
+        else {
+            a << b
+        }
+    }
+
+    fn lshr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        // FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by an unsigned number.
+        // TODO(antoyo): cast to unsigned to do a logical shift if that does not work.
+        let a_type = a.get_type();
+        let b_type = b.get_type();
+        if a_type.is_unsigned(self) && b_type.is_signed(self) {
+            let a = self.context.new_cast(None, a, b_type);
+            let result = a >> b;
+            self.context.new_cast(None, result, a_type)
+        }
+        else if a_type.is_signed(self) && b_type.is_unsigned(self) {
+            let b = self.context.new_cast(None, b, a_type);
+            a >> b
+        }
+        else {
+            a >> b
+        }
+    }
+
+    fn ashr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        // TODO(antoyo): check whether behavior is an arithmetic shift for >> .
+        // FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by an unsigned number.
+        let a_type = a.get_type();
+        let b_type = b.get_type();
+        if a_type.is_unsigned(self) && b_type.is_signed(self) {
+            let a = self.context.new_cast(None, a, b_type);
+            let result = a >> b;
+            self.context.new_cast(None, result, a_type)
+        }
+        else if a_type.is_signed(self) && b_type.is_unsigned(self) {
+            let b = self.context.new_cast(None, b, a_type);
+            a >> b
+        }
+        else {
+            a >> b
+        }
+    }
+
+    fn and(&mut self, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
+        // FIXME(antoyo): hack by putting the result in a variable to workaround this bug:
+        // https://gcc.gnu.org/bugzilla//show_bug.cgi?id=95498
+        if a.get_type() != b.get_type() {
+            b = self.context.new_cast(None, b, a.get_type());
+        }
+        let res = self.current_func().new_local(None, b.get_type(), "andResult");
+        self.llbb().add_assignment(None, res, a & b);
+        res.to_rvalue()
+    }
+
+    fn or(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        // FIXME(antoyo): hack by putting the result in a variable to workaround this bug:
+        // https://gcc.gnu.org/bugzilla//show_bug.cgi?id=95498
+        let res = self.current_func().new_local(None, b.get_type(), "orResult");
+        self.llbb().add_assignment(None, res, a | b);
+        res.to_rvalue()
+    }
+
+    fn xor(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        a ^ b
+    }
+
+    fn neg(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
+        // TODO(antoyo): use new_unary_op()?
+        self.cx.context.new_rvalue_from_long(a.get_type(), 0) - a
+    }
+
+    fn fneg(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
+        self.cx.context.new_unary_op(None, UnaryOp::Minus, a.get_type(), a)
+    }
+
+    fn not(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
+        let operation =
+            if a.get_type().is_bool() {
+                UnaryOp::LogicalNegate
+            }
+            else {
+                UnaryOp::BitwiseNegate
+            };
+        self.cx.context.new_unary_op(None, operation, a.get_type(), a)
+    }
+
+    fn unchecked_sadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        a + b
+    }
+
+    fn unchecked_uadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        a + b
+    }
+
+    fn unchecked_ssub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        a - b
+    }
+
+    fn unchecked_usub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        // TODO(antoyo): should generate poison value?
+        a - b
+    }
+
+    fn unchecked_smul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        a * b
+    }
+
+    fn unchecked_umul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+        a * b
+    }
+
+    fn fadd_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
+        unimplemented!();
+    }
+
+    fn fsub_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
+        unimplemented!();
+    }
+
+    fn fmul_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
+        unimplemented!();
+    }
+
+    fn fdiv_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
+        unimplemented!();
+    }
+
+    fn frem_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
+        unimplemented!();
+    }
+
+    fn checked_binop(&mut self, oop: OverflowOp, typ: Ty<'_>, lhs: Self::Value, rhs: Self::Value) -> (Self::Value, Self::Value) {
+        use rustc_middle::ty::{Int, IntTy::*, Uint, UintTy::*};
+
+        let new_kind =
+            match typ.kind() {
+                Int(t @ Isize) => Int(t.normalize(self.tcx.sess.target.pointer_width)),
+                Uint(t @ Usize) => Uint(t.normalize(self.tcx.sess.target.pointer_width)),
+                t @ (Uint(_) | Int(_)) => t.clone(),
+                _ => panic!("tried to get overflow intrinsic for op applied to non-int type"),
+            };
+
+        // TODO(antoyo): remove duplication with intrinsic?
+        let name =
+            match oop {
+                OverflowOp::Add =>
+                    match new_kind {
+                        Int(I8) => "__builtin_add_overflow",
+                        Int(I16) => "__builtin_add_overflow",
+                        Int(I32) => "__builtin_sadd_overflow",
+                        Int(I64) => "__builtin_saddll_overflow",
+                        Int(I128) => "__builtin_add_overflow",
+
+                        Uint(U8) => "__builtin_add_overflow",
+                        Uint(U16) => "__builtin_add_overflow",
+                        Uint(U32) => "__builtin_uadd_overflow",
+                        Uint(U64) => "__builtin_uaddll_overflow",
+                        Uint(U128) => "__builtin_add_overflow",
+
+                        _ => unreachable!(),
+                    },
+                OverflowOp::Sub =>
+                    match new_kind {
+                        Int(I8) => "__builtin_sub_overflow",
+                        Int(I16) => "__builtin_sub_overflow",
+                        Int(I32) => "__builtin_ssub_overflow",
+                        Int(I64) => "__builtin_ssubll_overflow",
+                        Int(I128) => "__builtin_sub_overflow",
+
+                        Uint(U8) => "__builtin_sub_overflow",
+                        Uint(U16) => "__builtin_sub_overflow",
+                        Uint(U32) => "__builtin_usub_overflow",
+                        Uint(U64) => "__builtin_usubll_overflow",
+                        Uint(U128) => "__builtin_sub_overflow",
+
+                        _ => unreachable!(),
+                    },
+                OverflowOp::Mul =>
+                    match new_kind {
+                        Int(I8) => "__builtin_mul_overflow",
+                        Int(I16) => "__builtin_mul_overflow",
+                        Int(I32) => "__builtin_smul_overflow",
+                        Int(I64) => "__builtin_smulll_overflow",
+                        Int(I128) => "__builtin_mul_overflow",
+
+                        Uint(U8) => "__builtin_mul_overflow",
+                        Uint(U16) => "__builtin_mul_overflow",
+                        Uint(U32) => "__builtin_umul_overflow",
+                        Uint(U64) => "__builtin_umulll_overflow",
+                        Uint(U128) => "__builtin_mul_overflow",
+
+                        _ => unreachable!(),
+                    },
+            };
+
+        let intrinsic = self.context.get_builtin_function(&name);
+        let res = self.current_func()
+            // TODO(antoyo): is it correct to use rhs type instead of the parameter typ?
+            .new_local(None, rhs.get_type(), "binopResult")
+            .get_address(None);
+        let overflow = self.overflow_call(intrinsic, &[lhs, rhs, res], None);
+        (res.dereference(None).to_rvalue(), overflow)
+    }
+
+    fn alloca(&mut self, ty: Type<'gcc>, align: Align) -> RValue<'gcc> {
+        // FIXME(antoyo): this check that we don't call get_aligned() a second time on a type.
+        // Ideally, we shouldn't need to do this check.
+        let aligned_type =
+            if ty == self.cx.u128_type || ty == self.cx.i128_type {
+                ty
+            }
+            else {
+                ty.get_aligned(align.bytes())
+            };
+        // TODO(antoyo): It might be better to return a LValue, but fixing the rustc API is non-trivial.
+        self.stack_var_count.set(self.stack_var_count.get() + 1);
+        self.current_func().new_local(None, aligned_type, &format!("stack_var_{}", self.stack_var_count.get())).get_address(None)
+    }
+
+    fn dynamic_alloca(&mut self, _ty: Type<'gcc>, _align: Align) -> RValue<'gcc> {
+        unimplemented!();
+    }
+
+    fn array_alloca(&mut self, _ty: Type<'gcc>, _len: RValue<'gcc>, _align: Align) -> RValue<'gcc> {
+        unimplemented!();
+    }
+
+    fn load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>, _align: Align) -> RValue<'gcc> {
+        // TODO(antoyo): use ty.
+        let block = self.llbb();
+        let function = block.get_function();
+        // NOTE: instead of returning the dereference here, we have to assign it to a variable in
+        // the current basic block. Otherwise, it could be used in another basic block, causing a
+        // dereference after a drop, for instance.
+        // TODO(antoyo): handle align.
+        let deref = ptr.dereference(None).to_rvalue();
+        let value_type = deref.get_type();
+        unsafe { RETURN_VALUE_COUNT += 1 };
+        let loaded_value = function.new_local(None, value_type, &format!("loadedValue{}", unsafe { RETURN_VALUE_COUNT }));
+        block.add_assignment(None, loaded_value, deref);
+        loaded_value.to_rvalue()
+    }
+
+    fn volatile_load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>) -> RValue<'gcc> {
+        // TODO(antoyo): use ty.
+        let ptr = self.context.new_cast(None, ptr, ptr.get_type().make_volatile());
+        ptr.dereference(None).to_rvalue()
+    }
+
+    fn atomic_load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>, order: AtomicOrdering, size: Size) -> RValue<'gcc> {
+        // TODO(antoyo): use ty.
+        // TODO(antoyo): handle alignment.
+        let atomic_load = self.context.get_builtin_function(&format!("__atomic_load_{}", size.bytes()));
+        let ordering = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
+
+        let volatile_const_void_ptr_type = self.context.new_type::<*mut ()>().make_const().make_volatile();
+        let ptr = self.context.new_cast(None, ptr, volatile_const_void_ptr_type);
+        self.context.new_call(None, atomic_load, &[ptr, ordering])
+    }
+
+    fn load_operand(&mut self, place: PlaceRef<'tcx, RValue<'gcc>>) -> OperandRef<'tcx, RValue<'gcc>> {
+        assert_eq!(place.llextra.is_some(), place.layout.is_unsized());
+
+        if place.layout.is_zst() {
+            return OperandRef::new_zst(self, place.layout);
+        }
+
+        fn scalar_load_metadata<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, load: RValue<'gcc>, scalar: &abi::Scalar) {
+            let vr = scalar.valid_range.clone();
+            match scalar.value {
+                abi::Int(..) => {
+                    if !scalar.is_always_valid(bx) {
+                        bx.range_metadata(load, scalar.valid_range);
+                    }
+                }
+                abi::Pointer if vr.start < vr.end && !vr.contains(0) => {
+                    bx.nonnull_metadata(load);
+                }
+                _ => {}
+            }
+        }
+
+        let val =
+            if let Some(llextra) = place.llextra {
+                OperandValue::Ref(place.llval, Some(llextra), place.align)
+            }
+            else if place.layout.is_gcc_immediate() {
+                let load = self.load(place.llval.get_type(), place.llval, place.align);
+                if let abi::Abi::Scalar(ref scalar) = place.layout.abi {
+                    scalar_load_metadata(self, load, scalar);
+                }
+                OperandValue::Immediate(self.to_immediate(load, place.layout))
+            }
+            else if let abi::Abi::ScalarPair(ref a, ref b) = place.layout.abi {
+                let b_offset = a.value.size(self).align_to(b.value.align(self).abi);
+                let pair_type = place.layout.gcc_type(self, false);
+
+                let mut load = |i, scalar: &abi::Scalar, align| {
+                    let llptr = self.struct_gep(pair_type, place.llval, i as u64);
+                    let load = self.load(llptr.get_type(), llptr, align);
+                    scalar_load_metadata(self, load, scalar);
+                    if scalar.is_bool() { self.trunc(load, self.type_i1()) } else { load }
+                };
+
+                OperandValue::Pair(
+                    load(0, a, place.align),
+                    load(1, b, place.align.restrict_for_offset(b_offset)),
+                )
+            }
+            else {
+                OperandValue::Ref(place.llval, None, place.align)
+            };
+
+        OperandRef { val, layout: place.layout }
+    }
+
+    fn write_operand_repeatedly(mut self, cg_elem: OperandRef<'tcx, RValue<'gcc>>, count: u64, dest: PlaceRef<'tcx, RValue<'gcc>>) -> Self {
+        let zero = self.const_usize(0);
+        let count = self.const_usize(count);
+        let start = dest.project_index(&mut self, zero).llval;
+        let end = dest.project_index(&mut self, count).llval;
+
+        let mut header_bx = self.build_sibling_block("repeat_loop_header");
+        let mut body_bx = self.build_sibling_block("repeat_loop_body");
+        let next_bx = self.build_sibling_block("repeat_loop_next");
+
+        let ptr_type = start.get_type();
+        let current = self.llbb().get_function().new_local(None, ptr_type, "loop_var");
+        let current_val = current.to_rvalue();
+        self.assign(current, start);
+
+        self.br(header_bx.llbb());
+
+        let keep_going = header_bx.icmp(IntPredicate::IntNE, current_val, end);
+        header_bx.cond_br(keep_going, body_bx.llbb(), next_bx.llbb());
+
+        let align = dest.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size);
+        cg_elem.val.store(&mut body_bx, PlaceRef::new_sized_aligned(current_val, cg_elem.layout, align));
+
+        let next = body_bx.inbounds_gep(self.backend_type(cg_elem.layout), current.to_rvalue(), &[self.const_usize(1)]);
+        body_bx.llbb().add_assignment(None, current, next);
+        body_bx.br(header_bx.llbb());
+
+        next_bx
+    }
+
+    fn range_metadata(&mut self, _load: RValue<'gcc>, _range: WrappingRange) {
+        // TODO(antoyo)
+    }
+
+    fn nonnull_metadata(&mut self, _load: RValue<'gcc>) {
+        // TODO(antoyo)
+    }
+
+    fn store(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>, align: Align) -> RValue<'gcc> {
+        self.store_with_flags(val, ptr, align, MemFlags::empty())
+    }
+
+    fn store_with_flags(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>, _align: Align, _flags: MemFlags) -> RValue<'gcc> {
+        let ptr = self.check_store(val, ptr);
+        self.llbb().add_assignment(None, ptr.dereference(None), val);
+        // TODO(antoyo): handle align and flags.
+        // NOTE: dummy value here since it's never used. FIXME(antoyo): API should not return a value here?
+        self.cx.context.new_rvalue_zero(self.type_i32())
+    }
+
+    fn atomic_store(&mut self, value: RValue<'gcc>, ptr: RValue<'gcc>, order: AtomicOrdering, size: Size) {
+        // TODO(antoyo): handle alignment.
+        let atomic_store = self.context.get_builtin_function(&format!("__atomic_store_{}", size.bytes()));
+        let ordering = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
+        let volatile_const_void_ptr_type = self.context.new_type::<*mut ()>().make_const().make_volatile();
+        let ptr = self.context.new_cast(None, ptr, volatile_const_void_ptr_type);
+
+        // FIXME(antoyo): fix libgccjit to allow comparing an integer type with an aligned integer type because
+        // the following cast is required to avoid this error:
+        // gcc_jit_context_new_call: mismatching types for argument 2 of function "__atomic_store_4": assignment to param arg1 (type: int) from loadedValue3577 (type: unsigned int  __attribute__((aligned(4))))
+        let int_type = atomic_store.get_param(1).to_rvalue().get_type();
+        let value = self.context.new_cast(None, value, int_type);
+        self.llbb()
+            .add_eval(None, self.context.new_call(None, atomic_store, &[ptr, value, ordering]));
+    }
+
+    fn gep(&mut self, _typ: Type<'gcc>, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc> {
+        let mut result = ptr;
+        for index in indices {
+            result = self.context.new_array_access(None, result, *index).get_address(None).to_rvalue();
+        }
+        result
+    }
+
+    fn inbounds_gep(&mut self, _typ: Type<'gcc>, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc> {
+        // FIXME(antoyo): would be safer if doing the same thing (loop) as gep.
+        // TODO(antoyo): specify inbounds somehow.
+        match indices.len() {
+            1 => {
+                self.context.new_array_access(None, ptr, indices[0]).get_address(None)
+            },
+            2 => {
+                let array = ptr.dereference(None); // TODO(antoyo): assert that first index is 0?
+                self.context.new_array_access(None, array, indices[1]).get_address(None)
+            },
+            _ => unimplemented!(),
+        }
+    }
+
+    fn struct_gep(&mut self, value_type: Type<'gcc>, ptr: RValue<'gcc>, idx: u64) -> RValue<'gcc> {
+        // FIXME(antoyo): it would be better if the API only called this on struct, not on arrays.
+        assert_eq!(idx as usize as u64, idx);
+        let value = ptr.dereference(None).to_rvalue();
+
+        if value_type.is_array().is_some() {
+            let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
+            let element = self.context.new_array_access(None, value, index);
+            element.get_address(None)
+        }
+        else if let Some(vector_type) = value_type.is_vector() {
+            let array_type = vector_type.get_element_type().make_pointer();
+            let array = self.bitcast(ptr, array_type);
+            let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
+            let element = self.context.new_array_access(None, array, index);
+            element.get_address(None)
+        }
+        else if let Some(struct_type) = value_type.is_struct() {
+            ptr.dereference_field(None, struct_type.get_field(idx as i32)).get_address(None)
+        }
+        else {
+            panic!("Unexpected type {:?}", value_type);
+        }
+    }
+
+    /* Casts */
+    fn trunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+        // TODO(antoyo): check that it indeed truncate the value.
+        self.context.new_cast(None, value, dest_ty)
+    }
+
+    fn sext(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+        // TODO(antoyo): check that it indeed sign extend the value.
+        if dest_ty.is_vector().is_some() {
+            // TODO(antoyo): nothing to do as it is only for LLVM?
+            return value;
+        }
+        self.context.new_cast(None, value, dest_ty)
+    }
+
+    fn fptoui(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+        self.context.new_cast(None, value, dest_ty)
+    }
+
+    fn fptosi(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+        self.context.new_cast(None, value, dest_ty)
+    }
+
+    fn uitofp(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+        self.context.new_cast(None, value, dest_ty)
+    }
+
+    fn sitofp(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+        self.context.new_cast(None, value, dest_ty)
+    }
+
+    fn fptrunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+        // TODO(antoyo): make sure it truncates.
+        self.context.new_cast(None, value, dest_ty)
+    }
+
+    fn fpext(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+        self.context.new_cast(None, value, dest_ty)
+    }
+
+    fn ptrtoint(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+        self.cx.ptrtoint(self.block.expect("block"), value, dest_ty)
+    }
+
+    fn inttoptr(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+        self.cx.inttoptr(self.block.expect("block"), value, dest_ty)
+    }
+
+    fn bitcast(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+        self.cx.const_bitcast(value, dest_ty)
+    }
+
+    fn intcast(&mut self, value: RValue<'gcc>, dest_typ: Type<'gcc>, _is_signed: bool) -> RValue<'gcc> {
+        // NOTE: is_signed is for value, not dest_typ.
+        self.cx.context.new_cast(None, value, dest_typ)
+    }
+
+    fn pointercast(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+        let val_type = value.get_type();
+        match (type_is_pointer(val_type), type_is_pointer(dest_ty)) {
+            (false, true) => {
+                // NOTE: Projecting a field of a pointer type will attemp a cast from a signed char to
+                // a pointer, which is not supported by gccjit.
+                return self.cx.context.new_cast(None, self.inttoptr(value, val_type.make_pointer()), dest_ty);
+            },
+            (false, false) => {
+                // When they are not pointers, we want a transmute (or reinterpret_cast).
+                self.bitcast(value, dest_ty)
+            },
+            (true, true) => self.cx.context.new_cast(None, value, dest_ty),
+            (true, false) => unimplemented!(),
+        }
+    }
+
+    /* Comparisons */
+    fn icmp(&mut self, op: IntPredicate, mut lhs: RValue<'gcc>, mut rhs: RValue<'gcc>) -> RValue<'gcc> {
+        let left_type = lhs.get_type();
+        let right_type = rhs.get_type();
+        if left_type != right_type {
+            // NOTE: because libgccjit cannot compare function pointers.
+            if left_type.is_function_ptr_type().is_some() && right_type.is_function_ptr_type().is_some() {
+                lhs = self.context.new_cast(None, lhs, self.usize_type.make_pointer());
+                rhs = self.context.new_cast(None, rhs, self.usize_type.make_pointer());
+            }
+            // NOTE: hack because we try to cast a vector type to the same vector type.
+            else if format!("{:?}", left_type) != format!("{:?}", right_type) {
+                rhs = self.context.new_cast(None, rhs, left_type);
+            }
+        }
+        self.context.new_comparison(None, op.to_gcc_comparison(), lhs, rhs)
+    }
+
+    fn fcmp(&mut self, op: RealPredicate, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
+        self.context.new_comparison(None, op.to_gcc_comparison(), lhs, rhs)
+    }
+
+    /* Miscellaneous instructions */
+    fn memcpy(&mut self, dst: RValue<'gcc>, dst_align: Align, src: RValue<'gcc>, src_align: Align, size: RValue<'gcc>, flags: MemFlags) {
+        if flags.contains(MemFlags::NONTEMPORAL) {
+            // HACK(nox): This is inefficient but there is no nontemporal memcpy.
+            let val = self.load(src.get_type(), src, src_align);
+            let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val)));
+            self.store_with_flags(val, ptr, dst_align, flags);
+            return;
+        }
+        let size = self.intcast(size, self.type_size_t(), false);
+        let _is_volatile = flags.contains(MemFlags::VOLATILE);
+        let dst = self.pointercast(dst, self.type_i8p());
+        let src = self.pointercast(src, self.type_ptr_to(self.type_void()));
+        let memcpy = self.context.get_builtin_function("memcpy");
+        let block = self.block.expect("block");
+        // TODO(antoyo): handle aligns and is_volatile.
+        block.add_eval(None, self.context.new_call(None, memcpy, &[dst, src, size]));
+    }
+
+    fn memmove(&mut self, dst: RValue<'gcc>, dst_align: Align, src: RValue<'gcc>, src_align: Align, size: RValue<'gcc>, flags: MemFlags) {
+        if flags.contains(MemFlags::NONTEMPORAL) {
+            // HACK(nox): This is inefficient but there is no nontemporal memmove.
+            let val = self.load(src.get_type(), src, src_align);
+            let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val)));
+            self.store_with_flags(val, ptr, dst_align, flags);
+            return;
+        }
+        let size = self.intcast(size, self.type_size_t(), false);
+        let _is_volatile = flags.contains(MemFlags::VOLATILE);
+        let dst = self.pointercast(dst, self.type_i8p());
+        let src = self.pointercast(src, self.type_ptr_to(self.type_void()));
+
+        let memmove = self.context.get_builtin_function("memmove");
+        let block = self.block.expect("block");
+        // TODO(antoyo): handle is_volatile.
+        block.add_eval(None, self.context.new_call(None, memmove, &[dst, src, size]));
+    }
+
+    fn memset(&mut self, ptr: RValue<'gcc>, fill_byte: RValue<'gcc>, size: RValue<'gcc>, _align: Align, flags: MemFlags) {
+        let _is_volatile = flags.contains(MemFlags::VOLATILE);
+        let ptr = self.pointercast(ptr, self.type_i8p());
+        let memset = self.context.get_builtin_function("memset");
+        let block = self.block.expect("block");
+        // TODO(antoyo): handle align and is_volatile.
+        let fill_byte = self.context.new_cast(None, fill_byte, self.i32_type);
+        let size = self.intcast(size, self.type_size_t(), false);
+        block.add_eval(None, self.context.new_call(None, memset, &[ptr, fill_byte, size]));
+    }
+
+    fn select(&mut self, cond: RValue<'gcc>, then_val: RValue<'gcc>, mut else_val: RValue<'gcc>) -> RValue<'gcc> {
+        let func = self.current_func();
+        let variable = func.new_local(None, then_val.get_type(), "selectVar");
+        let then_block = func.new_block("then");
+        let else_block = func.new_block("else");
+        let after_block = func.new_block("after");
+        self.llbb().end_with_conditional(None, cond, then_block, else_block);
+
+        then_block.add_assignment(None, variable, then_val);
+        then_block.end_with_jump(None, after_block);
+
+        if then_val.get_type() != else_val.get_type() {
+            else_val = self.context.new_cast(None, else_val, then_val.get_type());
+        }
+        else_block.add_assignment(None, variable, else_val);
+        else_block.end_with_jump(None, after_block);
+
+        // NOTE: since jumps were added in a place rustc does not expect, the current blocks in the
+        // state need to be updated.
+        self.block = Some(after_block);
+        *self.cx.current_block.borrow_mut() = Some(after_block);
+
+        variable.to_rvalue()
+    }
+
+    #[allow(dead_code)]
+    fn va_arg(&mut self, _list: RValue<'gcc>, _ty: Type<'gcc>) -> RValue<'gcc> {
+        unimplemented!();
+    }
+
+    fn extract_element(&mut self, _vec: RValue<'gcc>, _idx: RValue<'gcc>) -> RValue<'gcc> {
+        unimplemented!();
+    }
+
+    fn vector_splat(&mut self, _num_elts: usize, _elt: RValue<'gcc>) -> RValue<'gcc> {
+        unimplemented!();
+    }
+
+    fn extract_value(&mut self, aggregate_value: RValue<'gcc>, idx: u64) -> RValue<'gcc> {
+        // FIXME(antoyo): it would be better if the API only called this on struct, not on arrays.
+        assert_eq!(idx as usize as u64, idx);
+        let value_type = aggregate_value.get_type();
+
+        if value_type.is_array().is_some() {
+            let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
+            let element = self.context.new_array_access(None, aggregate_value, index);
+            element.get_address(None)
+        }
+        else if value_type.is_vector().is_some() {
+            panic!();
+        }
+        else if let Some(pointer_type) = value_type.get_pointee() {
+            if let Some(struct_type) = pointer_type.is_struct() {
+                // NOTE: hack to workaround a limitation of the rustc API: see comment on
+                // CodegenCx.structs_as_pointer
+                aggregate_value.dereference_field(None, struct_type.get_field(idx as i32)).to_rvalue()
+            }
+            else {
+                panic!("Unexpected type {:?}", value_type);
+            }
+        }
+        else if let Some(struct_type) = value_type.is_struct() {
+            aggregate_value.access_field(None, struct_type.get_field(idx as i32)).to_rvalue()
+        }
+        else {
+            panic!("Unexpected type {:?}", value_type);
+        }
+    }
+
+    fn insert_value(&mut self, aggregate_value: RValue<'gcc>, value: RValue<'gcc>, idx: u64) -> RValue<'gcc> {
+        // FIXME(antoyo): it would be better if the API only called this on struct, not on arrays.
+        assert_eq!(idx as usize as u64, idx);
+        let value_type = aggregate_value.get_type();
+
+        let lvalue =
+            if value_type.is_array().is_some() {
+                let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
+                self.context.new_array_access(None, aggregate_value, index)
+            }
+            else if value_type.is_vector().is_some() {
+                panic!();
+            }
+            else if let Some(pointer_type) = value_type.get_pointee() {
+                if let Some(struct_type) = pointer_type.is_struct() {
+                    // NOTE: hack to workaround a limitation of the rustc API: see comment on
+                    // CodegenCx.structs_as_pointer
+                    aggregate_value.dereference_field(None, struct_type.get_field(idx as i32))
+                }
+                else {
+                    panic!("Unexpected type {:?}", value_type);
+                }
+            }
+            else {
+                panic!("Unexpected type {:?}", value_type);
+            };
+
+        let lvalue_type = lvalue.to_rvalue().get_type();
+        let value =
+            // NOTE: sometimes, rustc will create a value with the wrong type.
+            if lvalue_type != value.get_type() {
+                self.context.new_cast(None, value, lvalue_type)
+            }
+            else {
+                value
+            };
+
+        self.llbb().add_assignment(None, lvalue, value);
+
+        aggregate_value
+    }
+
+    fn landing_pad(&mut self, _ty: Type<'gcc>, _pers_fn: RValue<'gcc>, _num_clauses: usize) -> RValue<'gcc> {
+        let field1 = self.context.new_field(None, self.u8_type, "landing_pad_field_1");
+        let field2 = self.context.new_field(None, self.i32_type, "landing_pad_field_1");
+        let struct_type = self.context.new_struct_type(None, "landing_pad", &[field1, field2]);
+        self.current_func().new_local(None, struct_type.as_type(), "landing_pad")
+            .to_rvalue()
+        // TODO(antoyo): Properly implement unwinding.
+        // the above is just to make the compilation work as it seems
+        // rustc_codegen_ssa now calls the unwinding builder methods even on panic=abort.
+    }
+
+    fn set_cleanup(&mut self, _landing_pad: RValue<'gcc>) {
+        // TODO(antoyo)
+    }
+
+    fn resume(&mut self, _exn: RValue<'gcc>) -> RValue<'gcc> {
+        unimplemented!();
+    }
+
+    fn cleanup_pad(&mut self, _parent: Option<RValue<'gcc>>, _args: &[RValue<'gcc>]) -> Funclet {
+        unimplemented!();
+    }
+
+    fn cleanup_ret(&mut self, _funclet: &Funclet, _unwind: Option<Block<'gcc>>) -> RValue<'gcc> {
+        unimplemented!();
+    }
+
+    fn catch_pad(&mut self, _parent: RValue<'gcc>, _args: &[RValue<'gcc>]) -> Funclet {
+        unimplemented!();
+    }
+
+    fn catch_switch(&mut self, _parent: Option<RValue<'gcc>>, _unwind: Option<Block<'gcc>>, _num_handlers: usize) -> RValue<'gcc> {
+        unimplemented!();
+    }
+
+    fn add_handler(&mut self, _catch_switch: RValue<'gcc>, _handler: Block<'gcc>) {
+        unimplemented!();
+    }
+
+    fn set_personality_fn(&mut self, _personality: RValue<'gcc>) {
+        // TODO(antoyo)
+    }
+
+    // Atomic Operations
+    fn atomic_cmpxchg(&mut self, dst: RValue<'gcc>, cmp: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering, failure_order: AtomicOrdering, weak: bool) -> RValue<'gcc> {
+        let expected = self.current_func().new_local(None, cmp.get_type(), "expected");
+        self.llbb().add_assignment(None, expected, cmp);
+        let success = self.compare_exchange(dst, expected, src, order, failure_order, weak);
+
+        let pair_type = self.cx.type_struct(&[src.get_type(), self.bool_type], false);
+        let result = self.current_func().new_local(None, pair_type, "atomic_cmpxchg_result");
+        let align = Align::from_bits(64).expect("align"); // TODO(antoyo): use good align.
+
+        let value_type = result.to_rvalue().get_type();
+        if let Some(struct_type) = value_type.is_struct() {
+            self.store(success, result.access_field(None, struct_type.get_field(1)).get_address(None), align);
+            // NOTE: since success contains the call to the intrinsic, it must be stored before
+            // expected so that we store expected after the call.
+            self.store(expected.to_rvalue(), result.access_field(None, struct_type.get_field(0)).get_address(None), align);
+        }
+        // TODO(antoyo): handle when value is not a struct.
+
+        result.to_rvalue()
+    }
+
+    fn atomic_rmw(&mut self, op: AtomicRmwBinOp, dst: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering) -> RValue<'gcc> {
+        let size = self.cx.int_width(src.get_type()) / 8;
+        let name =
+            match op {
+                AtomicRmwBinOp::AtomicXchg => format!("__atomic_exchange_{}", size),
+                AtomicRmwBinOp::AtomicAdd => format!("__atomic_fetch_add_{}", size),
+                AtomicRmwBinOp::AtomicSub => format!("__atomic_fetch_sub_{}", size),
+                AtomicRmwBinOp::AtomicAnd => format!("__atomic_fetch_and_{}", size),
+                AtomicRmwBinOp::AtomicNand => format!("__atomic_fetch_nand_{}", size),
+                AtomicRmwBinOp::AtomicOr => format!("__atomic_fetch_or_{}", size),
+                AtomicRmwBinOp::AtomicXor => format!("__atomic_fetch_xor_{}", size),
+                AtomicRmwBinOp::AtomicMax => return self.atomic_extremum(ExtremumOperation::Max, dst, src, order),
+                AtomicRmwBinOp::AtomicMin => return self.atomic_extremum(ExtremumOperation::Min, dst, src, order),
+                AtomicRmwBinOp::AtomicUMax => return self.atomic_extremum(ExtremumOperation::Max, dst, src, order),
+                AtomicRmwBinOp::AtomicUMin => return self.atomic_extremum(ExtremumOperation::Min, dst, src, order),
+            };
+
+
+        let atomic_function = self.context.get_builtin_function(name);
+        let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
+
+        let void_ptr_type = self.context.new_type::<*mut ()>();
+        let volatile_void_ptr_type = void_ptr_type.make_volatile();
+        let dst = self.context.new_cast(None, dst, volatile_void_ptr_type);
+        // FIXME(antoyo): not sure why, but we have the wrong type here.
+        let new_src_type = atomic_function.get_param(1).to_rvalue().get_type();
+        let src = self.context.new_cast(None, src, new_src_type);
+        let res = self.context.new_call(None, atomic_function, &[dst, src, order]);
+        self.context.new_cast(None, res, src.get_type())
+    }
+
+    fn atomic_fence(&mut self, order: AtomicOrdering, scope: SynchronizationScope) {
+        let name =
+            match scope {
+                SynchronizationScope::SingleThread => "__atomic_signal_fence",
+                SynchronizationScope::CrossThread => "__atomic_thread_fence",
+            };
+        let thread_fence = self.context.get_builtin_function(name);
+        let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
+        self.llbb().add_eval(None, self.context.new_call(None, thread_fence, &[order]));
+    }
+
+    fn set_invariant_load(&mut self, load: RValue<'gcc>) {
+        // NOTE: Hack to consider vtable function pointer as non-global-variable function pointer.
+        self.normal_function_addresses.borrow_mut().insert(load);
+        // TODO(antoyo)
+    }
+
+    fn lifetime_start(&mut self, _ptr: RValue<'gcc>, _size: Size) {
+        // TODO(antoyo)
+    }
+
+    fn lifetime_end(&mut self, _ptr: RValue<'gcc>, _size: Size) {
+        // TODO(antoyo)
+    }
+
+    fn call(&mut self, _typ: Type<'gcc>, func: RValue<'gcc>, args: &[RValue<'gcc>], funclet: Option<&Funclet>) -> RValue<'gcc> {
+        // FIXME(antoyo): remove when having a proper API.
+        let gcc_func = unsafe { std::mem::transmute(func) };
+        if self.functions.borrow().values().find(|value| **value == gcc_func).is_some() {
+            self.function_call(func, args, funclet)
+        }
+        else {
+            // If it's a not function that was defined, it's a function pointer.
+            self.function_ptr_call(func, args, funclet)
+        }
+    }
+
+    fn zext(&mut self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
+        // FIXME(antoyo): this does not zero-extend.
+        if value.get_type().is_bool() && dest_typ.is_i8(&self.cx) {
+            // FIXME(antoyo): hack because base::from_immediate converts i1 to i8.
+            // Fix the code in codegen_ssa::base::from_immediate.
+            return value;
+        }
+        self.context.new_cast(None, value, dest_typ)
+    }
+
+    fn cx(&self) -> &CodegenCx<'gcc, 'tcx> {
+        self.cx
+    }
+
+    fn do_not_inline(&mut self, _llret: RValue<'gcc>) {
+        unimplemented!();
+    }
+
+    fn set_span(&mut self, _span: Span) {}
+
+    fn from_immediate(&mut self, val: Self::Value) -> Self::Value {
+        if self.cx().val_ty(val) == self.cx().type_i1() {
+            self.zext(val, self.cx().type_i8())
+        }
+        else {
+            val
+        }
+    }
+
+    fn to_immediate_scalar(&mut self, val: Self::Value, scalar: abi::Scalar) -> Self::Value {
+        if scalar.is_bool() {
+            return self.trunc(val, self.cx().type_i1());
+        }
+        val
+    }
+
+    fn fptoui_sat(&mut self, _val: RValue<'gcc>, _dest_ty: Type<'gcc>) -> Option<RValue<'gcc>> {
+        None
+    }
+
+    fn fptosi_sat(&mut self, _val: RValue<'gcc>, _dest_ty: Type<'gcc>) -> Option<RValue<'gcc>> {
+        None
+    }
+
+    fn instrprof_increment(&mut self, _fn_name: RValue<'gcc>, _hash: RValue<'gcc>, _num_counters: RValue<'gcc>, _index: RValue<'gcc>) {
+        unimplemented!();
+    }
+}
+
+impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
+    pub fn shuffle_vector(&mut self, v1: RValue<'gcc>, v2: RValue<'gcc>, mask: RValue<'gcc>) -> RValue<'gcc> {
+        let return_type = v1.get_type();
+        let params = [
+            self.context.new_parameter(None, return_type, "v1"),
+            self.context.new_parameter(None, return_type, "v2"),
+            self.context.new_parameter(None, mask.get_type(), "mask"),
+        ];
+        let shuffle = self.context.new_function(None, FunctionType::Extern, return_type, &params, "_mm_shuffle_epi8", false);
+        self.context.new_call(None, shuffle, &[v1, v2, mask])
+    }
+}
+
+impl<'a, 'gcc, 'tcx> StaticBuilderMethods for Builder<'a, 'gcc, 'tcx> {
+    fn get_static(&mut self, def_id: DefId) -> RValue<'gcc> {
+        // Forward to the `get_static` method of `CodegenCx`
+        self.cx().get_static(def_id).get_address(None)
+    }
+}
+
+impl<'tcx> HasParamEnv<'tcx> for Builder<'_, '_, 'tcx> {
+    fn param_env(&self) -> ParamEnv<'tcx> {
+        self.cx.param_env()
+    }
+}
+
+impl<'tcx> HasTargetSpec for Builder<'_, '_, 'tcx> {
+    fn target_spec(&self) -> &Target {
+        &self.cx.target_spec()
+    }
+}
+
+trait ToGccComp {
+    fn to_gcc_comparison(&self) -> ComparisonOp;
+}
+
+impl ToGccComp for IntPredicate {
+    fn to_gcc_comparison(&self) -> ComparisonOp {
+        match *self {
+            IntPredicate::IntEQ => ComparisonOp::Equals,
+            IntPredicate::IntNE => ComparisonOp::NotEquals,
+            IntPredicate::IntUGT => ComparisonOp::GreaterThan,
+            IntPredicate::IntUGE => ComparisonOp::GreaterThanEquals,
+            IntPredicate::IntULT => ComparisonOp::LessThan,
+            IntPredicate::IntULE => ComparisonOp::LessThanEquals,
+            IntPredicate::IntSGT => ComparisonOp::GreaterThan,
+            IntPredicate::IntSGE => ComparisonOp::GreaterThanEquals,
+            IntPredicate::IntSLT => ComparisonOp::LessThan,
+            IntPredicate::IntSLE => ComparisonOp::LessThanEquals,
+        }
+    }
+}
+
+impl ToGccComp for RealPredicate {
+    fn to_gcc_comparison(&self) -> ComparisonOp {
+        // TODO(antoyo): check that ordered vs non-ordered is respected.
+        match *self {
+            RealPredicate::RealPredicateFalse => unreachable!(),
+            RealPredicate::RealOEQ => ComparisonOp::Equals,
+            RealPredicate::RealOGT => ComparisonOp::GreaterThan,
+            RealPredicate::RealOGE => ComparisonOp::GreaterThanEquals,
+            RealPredicate::RealOLT => ComparisonOp::LessThan,
+            RealPredicate::RealOLE => ComparisonOp::LessThanEquals,
+            RealPredicate::RealONE => ComparisonOp::NotEquals,
+            RealPredicate::RealORD => unreachable!(),
+            RealPredicate::RealUNO => unreachable!(),
+            RealPredicate::RealUEQ => ComparisonOp::Equals,
+            RealPredicate::RealUGT => ComparisonOp::GreaterThan,
+            RealPredicate::RealUGE => ComparisonOp::GreaterThan,
+            RealPredicate::RealULT => ComparisonOp::LessThan,
+            RealPredicate::RealULE => ComparisonOp::LessThan,
+            RealPredicate::RealUNE => ComparisonOp::NotEquals,
+            RealPredicate::RealPredicateTrue => unreachable!(),
+        }
+    }
+}
+
+#[repr(C)]
+#[allow(non_camel_case_types)]
+enum MemOrdering {
+    __ATOMIC_RELAXED,
+    __ATOMIC_CONSUME,
+    __ATOMIC_ACQUIRE,
+    __ATOMIC_RELEASE,
+    __ATOMIC_ACQ_REL,
+    __ATOMIC_SEQ_CST,
+}
+
+trait ToGccOrdering {
+    fn to_gcc(self) -> i32;
+}
+
+impl ToGccOrdering for AtomicOrdering {
+    fn to_gcc(self) -> i32 {
+        use MemOrdering::*;
+
+        let ordering =
+            match self {
+                AtomicOrdering::NotAtomic => __ATOMIC_RELAXED, // TODO(antoyo): check if that's the same.
+                AtomicOrdering::Unordered => __ATOMIC_RELAXED,
+                AtomicOrdering::Monotonic => __ATOMIC_RELAXED, // TODO(antoyo): check if that's the same.
+                AtomicOrdering::Acquire => __ATOMIC_ACQUIRE,
+                AtomicOrdering::Release => __ATOMIC_RELEASE,
+                AtomicOrdering::AcquireRelease => __ATOMIC_ACQ_REL,
+                AtomicOrdering::SequentiallyConsistent => __ATOMIC_SEQ_CST,
+            };
+        ordering as i32
+    }
+}
diff --git a/compiler/rustc_codegen_gcc/src/callee.rs b/compiler/rustc_codegen_gcc/src/callee.rs
new file mode 100644 (file)
index 0000000..76419b1
--- /dev/null
@@ -0,0 +1,77 @@
+use gccjit::{FunctionType, RValue};
+use rustc_codegen_ssa::traits::BaseTypeMethods;
+use rustc_middle::ty::{self, Instance, TypeFoldable};
+use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt};
+
+use crate::abi::FnAbiGccExt;
+use crate::context::CodegenCx;
+
+/// Codegens a reference to a fn/method item, monomorphizing and
+/// inlining as it goes.
+///
+/// # Parameters
+///
+/// - `cx`: the crate context
+/// - `instance`: the instance to be instantiated
+pub fn get_fn<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, instance: Instance<'tcx>) -> RValue<'gcc> {
+    let tcx = cx.tcx();
+
+    assert!(!instance.substs.needs_infer());
+    assert!(!instance.substs.has_escaping_bound_vars());
+
+    if let Some(&func) = cx.function_instances.borrow().get(&instance) {
+        return func;
+    }
+
+    let sym = tcx.symbol_name(instance).name;
+
+    let fn_abi = cx.fn_abi_of_instance(instance, ty::List::empty());
+
+    let func =
+        if let Some(func) = cx.get_declared_value(&sym) {
+            // Create a fn pointer with the new signature.
+            let ptrty = fn_abi.ptr_to_gcc_type(cx);
+
+            // This is subtle and surprising, but sometimes we have to bitcast
+            // the resulting fn pointer.  The reason has to do with external
+            // functions.  If you have two crates that both bind the same C
+            // library, they may not use precisely the same types: for
+            // example, they will probably each declare their own structs,
+            // which are distinct types from LLVM's point of view (nominal
+            // types).
+            //
+            // Now, if those two crates are linked into an application, and
+            // they contain inlined code, you can wind up with a situation
+            // where both of those functions wind up being loaded into this
+            // application simultaneously. In that case, the same function
+            // (from LLVM's point of view) requires two types. But of course
+            // LLVM won't allow one function to have two types.
+            //
+            // What we currently do, therefore, is declare the function with
+            // one of the two types (whichever happens to come first) and then
+            // bitcast as needed when the function is referenced to make sure
+            // it has the type we expect.
+            //
+            // This can occur on either a crate-local or crate-external
+            // reference. It also occurs when testing libcore and in some
+            // other weird situations. Annoying.
+            if cx.val_ty(func) != ptrty {
+                // TODO(antoyo): cast the pointer.
+                func
+            }
+            else {
+                func
+            }
+        }
+        else {
+            cx.linkage.set(FunctionType::Extern);
+            let func = cx.declare_fn(&sym, &fn_abi);
+
+            // TODO(antoyo): set linkage and attributes.
+            func
+        };
+
+    cx.function_instances.borrow_mut().insert(instance, func);
+
+    func
+}
diff --git a/compiler/rustc_codegen_gcc/src/common.rs b/compiler/rustc_codegen_gcc/src/common.rs
new file mode 100644 (file)
index 0000000..bda08b6
--- /dev/null
@@ -0,0 +1,450 @@
+use std::convert::TryFrom;
+use std::convert::TryInto;
+
+use gccjit::LValue;
+use gccjit::{Block, CType, RValue, Type, ToRValue};
+use rustc_codegen_ssa::mir::place::PlaceRef;
+use rustc_codegen_ssa::traits::{
+    BaseTypeMethods,
+    ConstMethods,
+    DerivedTypeMethods,
+    MiscMethods,
+    StaticMethods,
+};
+use rustc_middle::mir::Mutability;
+use rustc_middle::ty::ScalarInt;
+use rustc_middle::ty::layout::{TyAndLayout, LayoutOf};
+use rustc_middle::mir::interpret::{Allocation, GlobalAlloc, Scalar};
+use rustc_span::Symbol;
+use rustc_target::abi::{self, HasDataLayout, Pointer, Size};
+
+use crate::consts::const_alloc_to_gcc;
+use crate::context::CodegenCx;
+use crate::type_of::LayoutGccExt;
+
+impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
+    pub fn const_bytes(&self, bytes: &[u8]) -> RValue<'gcc> {
+        bytes_in_context(self, bytes)
+    }
+
+    fn const_cstr(&self, symbol: Symbol, _null_terminated: bool) -> LValue<'gcc> {
+        // TODO(antoyo): handle null_terminated.
+        if let Some(&value) = self.const_cstr_cache.borrow().get(&symbol) {
+            return value;
+        }
+
+        let global = self.global_string(&*symbol.as_str());
+
+        self.const_cstr_cache.borrow_mut().insert(symbol, global);
+        global
+    }
+
+    fn global_string(&self, string: &str) -> LValue<'gcc> {
+        // TODO(antoyo): handle non-null-terminated strings.
+        let string = self.context.new_string_literal(&*string);
+        let sym = self.generate_local_symbol_name("str");
+        let global = self.declare_private_global(&sym, self.val_ty(string));
+        global.global_set_initializer_value(string);
+        global
+        // TODO(antoyo): set linkage.
+    }
+
+    pub fn inttoptr(&self, block: Block<'gcc>, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+        let func = block.get_function();
+        let local = func.new_local(None, value.get_type(), "intLocal");
+        block.add_assignment(None, local, value);
+        let value_address = local.get_address(None);
+
+        let ptr = self.context.new_cast(None, value_address, dest_ty.make_pointer());
+        ptr.dereference(None).to_rvalue()
+    }
+
+    pub fn ptrtoint(&self, block: Block<'gcc>, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+        // TODO(antoyo): when libgccjit allow casting from pointer to int, remove this.
+        let func = block.get_function();
+        let local = func.new_local(None, value.get_type(), "ptrLocal");
+        block.add_assignment(None, local, value);
+        let ptr_address = local.get_address(None);
+
+        let ptr = self.context.new_cast(None, ptr_address, dest_ty.make_pointer());
+        ptr.dereference(None).to_rvalue()
+    }
+}
+
+pub fn bytes_in_context<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, bytes: &[u8]) -> RValue<'gcc> {
+    let context = &cx.context;
+    let byte_type = context.new_type::<u8>();
+    let typ = context.new_array_type(None, byte_type, bytes.len() as i32);
+    let elements: Vec<_> =
+        bytes.iter()
+        .map(|&byte| context.new_rvalue_from_int(byte_type, byte as i32))
+        .collect();
+    context.new_rvalue_from_array(None, typ, &elements)
+}
+
+pub fn type_is_pointer<'gcc>(typ: Type<'gcc>) -> bool {
+    typ.get_pointee().is_some()
+}
+
+impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
+    fn const_null(&self, typ: Type<'gcc>) -> RValue<'gcc> {
+        if type_is_pointer(typ) {
+            self.context.new_null(typ)
+        }
+        else {
+            self.const_int(typ, 0)
+        }
+    }
+
+    fn const_undef(&self, typ: Type<'gcc>) -> RValue<'gcc> {
+        let local = self.current_func.borrow().expect("func")
+            .new_local(None, typ, "undefined");
+        if typ.is_struct().is_some() {
+            // NOTE: hack to workaround a limitation of the rustc API: see comment on
+            // CodegenCx.structs_as_pointer
+            let pointer = local.get_address(None);
+            self.structs_as_pointer.borrow_mut().insert(pointer);
+            pointer
+        }
+        else {
+            local.to_rvalue()
+        }
+    }
+
+    fn const_int(&self, typ: Type<'gcc>, int: i64) -> RValue<'gcc> {
+        self.context.new_rvalue_from_long(typ, i64::try_from(int).expect("i64::try_from"))
+    }
+
+    fn const_uint(&self, typ: Type<'gcc>, int: u64) -> RValue<'gcc> {
+        self.context.new_rvalue_from_long(typ, u64::try_from(int).expect("u64::try_from") as i64)
+    }
+
+    fn const_uint_big(&self, typ: Type<'gcc>, num: u128) -> RValue<'gcc> {
+        let num64: Result<i64, _> = num.try_into();
+        if let Ok(num) = num64 {
+            // FIXME(antoyo): workaround for a bug where libgccjit is expecting a constant.
+            // The operations >> 64 and | low are making the normal case a non-constant.
+            return self.context.new_rvalue_from_long(typ, num as i64);
+        }
+
+        if num >> 64 != 0 {
+            // FIXME(antoyo): use a new function new_rvalue_from_unsigned_long()?
+            let low = self.context.new_rvalue_from_long(self.u64_type, num as u64 as i64);
+            let high = self.context.new_rvalue_from_long(typ, (num >> 64) as u64 as i64);
+
+            let sixty_four = self.context.new_rvalue_from_long(typ, 64);
+            (high << sixty_four) | self.context.new_cast(None, low, typ)
+        }
+        else if typ.is_i128(self) {
+            let num = self.context.new_rvalue_from_long(self.u64_type, num as u64 as i64);
+            self.context.new_cast(None, num, typ)
+        }
+        else {
+            self.context.new_rvalue_from_long(typ, num as u64 as i64)
+        }
+    }
+
+    fn const_bool(&self, val: bool) -> RValue<'gcc> {
+        self.const_uint(self.type_i1(), val as u64)
+    }
+
+    fn const_i32(&self, i: i32) -> RValue<'gcc> {
+        self.const_int(self.type_i32(), i as i64)
+    }
+
+    fn const_u32(&self, i: u32) -> RValue<'gcc> {
+        self.const_uint(self.type_u32(), i as u64)
+    }
+
+    fn const_u64(&self, i: u64) -> RValue<'gcc> {
+        self.const_uint(self.type_u64(), i)
+    }
+
+    fn const_usize(&self, i: u64) -> RValue<'gcc> {
+        let bit_size = self.data_layout().pointer_size.bits();
+        if bit_size < 64 {
+            // make sure it doesn't overflow
+            assert!(i < (1 << bit_size));
+        }
+
+        self.const_uint(self.usize_type, i)
+    }
+
+    fn const_u8(&self, _i: u8) -> RValue<'gcc> {
+        unimplemented!();
+    }
+
+    fn const_real(&self, _t: Type<'gcc>, _val: f64) -> RValue<'gcc> {
+        unimplemented!();
+    }
+
+    fn const_str(&self, s: Symbol) -> (RValue<'gcc>, RValue<'gcc>) {
+        let len = s.as_str().len();
+        let cs = self.const_ptrcast(self.const_cstr(s, false).get_address(None),
+            self.type_ptr_to(self.layout_of(self.tcx.types.str_).gcc_type(self, true)),
+        );
+        (cs, self.const_usize(len as u64))
+    }
+
+    fn const_struct(&self, values: &[RValue<'gcc>], packed: bool) -> RValue<'gcc> {
+        let fields: Vec<_> = values.iter()
+            .map(|value| value.get_type())
+            .collect();
+        // TODO(antoyo): cache the type? It's anonymous, so probably not.
+        let typ = self.type_struct(&fields, packed);
+        let struct_type = typ.is_struct().expect("struct type");
+        self.context.new_rvalue_from_struct(None, struct_type, values)
+    }
+
+    fn const_to_opt_uint(&self, _v: RValue<'gcc>) -> Option<u64> {
+        // TODO(antoyo)
+        None
+    }
+
+    fn const_to_opt_u128(&self, _v: RValue<'gcc>, _sign_ext: bool) -> Option<u128> {
+        // TODO(antoyo)
+        None
+    }
+
+    fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, ty: Type<'gcc>) -> RValue<'gcc> {
+        let bitsize = if layout.is_bool() { 1 } else { layout.value.size(self).bits() };
+        match cv {
+            Scalar::Int(ScalarInt::ZST) => {
+                assert_eq!(0, layout.value.size(self).bytes());
+                self.const_undef(self.type_ix(0))
+            }
+            Scalar::Int(int) => {
+                let data = int.assert_bits(layout.value.size(self));
+
+                // FIXME(antoyo): there's some issues with using the u128 code that follows, so hard-code
+                // the paths for floating-point values.
+                if ty == self.float_type {
+                    return self.context.new_rvalue_from_double(ty, f32::from_bits(data as u32) as f64);
+                }
+                else if ty == self.double_type {
+                    return self.context.new_rvalue_from_double(ty, f64::from_bits(data as u64));
+                }
+
+                let value = self.const_uint_big(self.type_ix(bitsize), data);
+                if layout.value == Pointer {
+                    self.inttoptr(self.current_block.borrow().expect("block"), value, ty)
+                } else {
+                    self.const_bitcast(value, ty)
+                }
+            }
+            Scalar::Ptr(ptr, _size) => {
+                let (alloc_id, offset) = ptr.into_parts();
+                let base_addr =
+                    match self.tcx.global_alloc(alloc_id) {
+                        GlobalAlloc::Memory(alloc) => {
+                            let init = const_alloc_to_gcc(self, alloc);
+                            let value =
+                                match alloc.mutability {
+                                    Mutability::Mut => self.static_addr_of_mut(init, alloc.align, None),
+                                    _ => self.static_addr_of(init, alloc.align, None),
+                                };
+                            if !self.sess().fewer_names() {
+                                // TODO(antoyo): set value name.
+                            }
+                            value
+                        },
+                        GlobalAlloc::Function(fn_instance) => {
+                            self.get_fn_addr(fn_instance)
+                        },
+                        GlobalAlloc::Static(def_id) => {
+                            assert!(self.tcx.is_static(def_id));
+                            self.get_static(def_id).get_address(None)
+                        },
+                    };
+                let ptr_type = base_addr.get_type();
+                let base_addr = self.const_bitcast(base_addr, self.usize_type);
+                let offset = self.context.new_rvalue_from_long(self.usize_type, offset.bytes() as i64);
+                let ptr = self.const_bitcast(base_addr + offset, ptr_type);
+                if layout.value != Pointer {
+                    self.const_bitcast(ptr.dereference(None).to_rvalue(), ty)
+                }
+                else {
+                    self.const_bitcast(ptr, ty)
+                }
+            }
+        }
+    }
+
+    fn const_data_from_alloc(&self, alloc: &Allocation) -> Self::Value {
+        const_alloc_to_gcc(self, alloc)
+    }
+
+    fn from_const_alloc(&self, layout: TyAndLayout<'tcx>, alloc: &Allocation, offset: Size) -> PlaceRef<'tcx, RValue<'gcc>> {
+        assert_eq!(alloc.align, layout.align.abi);
+        let ty = self.type_ptr_to(layout.gcc_type(self, true));
+        let value =
+            if layout.size == Size::ZERO {
+                let value = self.const_usize(alloc.align.bytes());
+                self.context.new_cast(None, value, ty)
+            }
+            else {
+                let init = const_alloc_to_gcc(self, alloc);
+                let base_addr = self.static_addr_of(init, alloc.align, None);
+
+                let array = self.const_bitcast(base_addr, self.type_i8p());
+                let value = self.context.new_array_access(None, array, self.const_usize(offset.bytes())).get_address(None);
+                self.const_bitcast(value, ty)
+            };
+        PlaceRef::new_sized(value, layout)
+    }
+
+    fn const_ptrcast(&self, val: RValue<'gcc>, ty: Type<'gcc>) -> RValue<'gcc> {
+        self.context.new_cast(None, val, ty)
+    }
+}
+
+pub trait SignType<'gcc, 'tcx> {
+    fn is_signed(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+    fn is_unsigned(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+    fn to_signed(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>;
+    fn to_unsigned(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>;
+}
+
+impl<'gcc, 'tcx> SignType<'gcc, 'tcx> for Type<'gcc> {
+    fn is_signed(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+        self.is_i8(cx) || self.is_i16(cx) || self.is_i32(cx) || self.is_i64(cx) || self.is_i128(cx)
+    }
+
+    fn is_unsigned(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+        self.is_u8(cx) || self.is_u16(cx) || self.is_u32(cx) || self.is_u64(cx) || self.is_u128(cx)
+    }
+
+    fn to_signed(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
+        if self.is_u8(cx) {
+            cx.i8_type
+        }
+        else if self.is_u16(cx) {
+            cx.i16_type
+        }
+        else if self.is_u32(cx) {
+            cx.i32_type
+        }
+        else if self.is_u64(cx) {
+            cx.i64_type
+        }
+        else if self.is_u128(cx) {
+            cx.i128_type
+        }
+        else {
+            self.clone()
+        }
+    }
+
+    fn to_unsigned(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
+        if self.is_i8(cx) {
+            cx.u8_type
+        }
+        else if self.is_i16(cx) {
+            cx.u16_type
+        }
+        else if self.is_i32(cx) {
+            cx.u32_type
+        }
+        else if self.is_i64(cx) {
+            cx.u64_type
+        }
+        else if self.is_i128(cx) {
+            cx.u128_type
+        }
+        else {
+            self.clone()
+        }
+    }
+}
+
+pub trait TypeReflection<'gcc, 'tcx>  {
+    fn is_uchar(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+    fn is_ushort(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+    fn is_uint(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+    fn is_ulong(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+    fn is_ulonglong(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+
+    fn is_i8(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+    fn is_u8(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+    fn is_i16(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+    fn is_u16(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+    fn is_i32(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+    fn is_u32(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+    fn is_i64(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+    fn is_u64(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+    fn is_i128(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+    fn is_u128(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+
+    fn is_f32(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+    fn is_f64(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+}
+
+impl<'gcc, 'tcx> TypeReflection<'gcc, 'tcx> for Type<'gcc> {
+    fn is_uchar(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+        self.unqualified() == cx.u8_type
+    }
+
+    fn is_ushort(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+        self.unqualified() == cx.u16_type
+    }
+
+    fn is_uint(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+        self.unqualified() == cx.uint_type
+    }
+
+    fn is_ulong(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+        self.unqualified() == cx.ulong_type
+    }
+
+    fn is_ulonglong(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+        self.unqualified() == cx.ulonglong_type
+    }
+
+    fn is_i8(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+        self.unqualified() == cx.i8_type
+    }
+
+    fn is_u8(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+        self.unqualified() == cx.u8_type
+    }
+
+    fn is_i16(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+        self.unqualified() == cx.i16_type
+    }
+
+    fn is_u16(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+        self.unqualified() == cx.u16_type
+    }
+
+    fn is_i32(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+        self.unqualified() == cx.i32_type
+    }
+
+    fn is_u32(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+        self.unqualified() == cx.u32_type
+    }
+
+    fn is_i64(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+        self.unqualified() == cx.i64_type
+    }
+
+    fn is_u64(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+        self.unqualified() == cx.u64_type
+    }
+
+    fn is_i128(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+        self.unqualified() == cx.context.new_c_type(CType::Int128t)
+    }
+
+    fn is_u128(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+        self.unqualified() == cx.context.new_c_type(CType::UInt128t)
+    }
+
+    fn is_f32(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+        self.unqualified() == cx.context.new_type::<f32>()
+    }
+
+    fn is_f64(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+        self.unqualified() == cx.context.new_type::<f64>()
+    }
+}
diff --git a/compiler/rustc_codegen_gcc/src/consts.rs b/compiler/rustc_codegen_gcc/src/consts.rs
new file mode 100644 (file)
index 0000000..205498a
--- /dev/null
@@ -0,0 +1,390 @@
+use gccjit::{LValue, RValue, ToRValue, Type};
+use rustc_codegen_ssa::traits::{BaseTypeMethods, ConstMethods, DerivedTypeMethods, StaticMethods};
+use rustc_hir as hir;
+use rustc_hir::Node;
+use rustc_middle::{bug, span_bug};
+use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs};
+use rustc_middle::mir::mono::MonoItem;
+use rustc_middle::ty::{self, Instance, Ty};
+use rustc_middle::ty::layout::LayoutOf;
+use rustc_middle::mir::interpret::{self, Allocation, ErrorHandled, Scalar as InterpScalar, read_target_uint};
+use rustc_span::Span;
+use rustc_span::def_id::DefId;
+use rustc_target::abi::{self, Align, HasDataLayout, Primitive, Size, WrappingRange};
+
+use crate::base;
+use crate::context::CodegenCx;
+use crate::type_of::LayoutGccExt;
+
+impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
+    pub fn const_bitcast(&self, value: RValue<'gcc>, typ: Type<'gcc>) -> RValue<'gcc> {
+        if value.get_type() == self.bool_type.make_pointer() {
+            if let Some(pointee) = typ.get_pointee() {
+                if pointee.is_vector().is_some() {
+                    panic!()
+                }
+            }
+        }
+        self.context.new_bitcast(None, value, typ)
+    }
+}
+
+impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> {
+    fn static_addr_of(&self, cv: RValue<'gcc>, align: Align, kind: Option<&str>) -> RValue<'gcc> {
+        if let Some(global_value) = self.const_globals.borrow().get(&cv) {
+            // TODO(antoyo): upgrade alignment.
+            return *global_value;
+        }
+        let global_value = self.static_addr_of_mut(cv, align, kind);
+        // TODO(antoyo): set global constant.
+        self.const_globals.borrow_mut().insert(cv, global_value);
+        global_value
+    }
+
+    fn codegen_static(&self, def_id: DefId, is_mutable: bool) {
+        let attrs = self.tcx.codegen_fn_attrs(def_id);
+
+        let value =
+            match codegen_static_initializer(&self, def_id) {
+                Ok((value, _)) => value,
+                // Error has already been reported
+                Err(_) => return,
+            };
+
+        let global = self.get_static(def_id);
+
+        // boolean SSA values are i1, but they have to be stored in i8 slots,
+        // otherwise some LLVM optimization passes don't work as expected
+        let val_llty = self.val_ty(value);
+        let value =
+            if val_llty == self.type_i1() {
+                unimplemented!();
+            }
+            else {
+                value
+            };
+
+        let instance = Instance::mono(self.tcx, def_id);
+        let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all());
+        let gcc_type = self.layout_of(ty).gcc_type(self, true);
+
+        // TODO(antoyo): set alignment.
+
+        let value =
+            if value.get_type() != gcc_type {
+                self.context.new_bitcast(None, value, gcc_type)
+            }
+            else {
+                value
+            };
+        global.global_set_initializer_value(value);
+
+        // As an optimization, all shared statics which do not have interior
+        // mutability are placed into read-only memory.
+        if !is_mutable {
+            if self.type_is_freeze(ty) {
+                // TODO(antoyo): set global constant.
+            }
+        }
+
+        if attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) {
+            // Do not allow LLVM to change the alignment of a TLS on macOS.
+            //
+            // By default a global's alignment can be freely increased.
+            // This allows LLVM to generate more performant instructions
+            // e.g., using load-aligned into a SIMD register.
+            //
+            // However, on macOS 10.10 or below, the dynamic linker does not
+            // respect any alignment given on the TLS (radar 24221680).
+            // This will violate the alignment assumption, and causing segfault at runtime.
+            //
+            // This bug is very easy to trigger. In `println!` and `panic!`,
+            // the `LOCAL_STDOUT`/`LOCAL_STDERR` handles are stored in a TLS,
+            // which the values would be `mem::replace`d on initialization.
+            // The implementation of `mem::replace` will use SIMD
+            // whenever the size is 32 bytes or higher. LLVM notices SIMD is used
+            // and tries to align `LOCAL_STDOUT`/`LOCAL_STDERR` to a 32-byte boundary,
+            // which macOS's dyld disregarded and causing crashes
+            // (see issues #51794, #51758, #50867, #48866 and #44056).
+            //
+            // To workaround the bug, we trick LLVM into not increasing
+            // the global's alignment by explicitly assigning a section to it
+            // (equivalent to automatically generating a `#[link_section]` attribute).
+            // See the comment in the `GlobalValue::canIncreaseAlignment()` function
+            // of `lib/IR/Globals.cpp` for why this works.
+            //
+            // When the alignment is not increased, the optimized `mem::replace`
+            // will use load-unaligned instructions instead, and thus avoiding the crash.
+            //
+            // We could remove this hack whenever we decide to drop macOS 10.10 support.
+            if self.tcx.sess.target.options.is_like_osx {
+                // The `inspect` method is okay here because we checked relocations, and
+                // because we are doing this access to inspect the final interpreter state
+                // (not as part of the interpreter execution).
+                //
+                // FIXME: This check requires that the (arbitrary) value of undefined bytes
+                // happens to be zero. Instead, we should only check the value of defined bytes
+                // and set all undefined bytes to zero if this allocation is headed for the
+                // BSS.
+                unimplemented!();
+            }
+        }
+
+        // Wasm statics with custom link sections get special treatment as they
+        // go into custom sections of the wasm executable.
+        if self.tcx.sess.opts.target_triple.triple().starts_with("wasm32") {
+            if let Some(_section) = attrs.link_section {
+                unimplemented!();
+            }
+        } else {
+            // TODO(antoyo): set link section.
+        }
+
+        if attrs.flags.contains(CodegenFnAttrFlags::USED) {
+            self.add_used_global(global.to_rvalue());
+        }
+    }
+
+    /// Add a global value to a list to be stored in the `llvm.used` variable, an array of i8*.
+    fn add_used_global(&self, _global: RValue<'gcc>) {
+        // TODO(antoyo)
+    }
+
+    fn add_compiler_used_global(&self, _global: RValue<'gcc>) {
+        // TODO(antoyo)
+    }
+}
+
+impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
+    pub fn static_addr_of_mut(&self, cv: RValue<'gcc>, align: Align, kind: Option<&str>) -> RValue<'gcc> {
+        let global =
+            match kind {
+                Some(kind) if !self.tcx.sess.fewer_names() => {
+                    let name = self.generate_local_symbol_name(kind);
+                    // TODO(antoyo): check if it's okay that TLS is off here.
+                    // TODO(antoyo): check if it's okay that link_section is None here.
+                    // TODO(antoyo): set alignment here as well.
+                    let global = self.define_global(&name[..], self.val_ty(cv), false, None);
+                    // TODO(antoyo): set linkage.
+                    global
+                }
+                _ => {
+                    let typ = self.val_ty(cv).get_aligned(align.bytes());
+                    let global = self.declare_unnamed_global(typ);
+                    global
+                },
+            };
+        // FIXME(antoyo): I think the name coming from generate_local_symbol_name() above cannot be used
+        // globally.
+        global.global_set_initializer_value(cv);
+        // TODO(antoyo): set unnamed address.
+        global.get_address(None)
+    }
+
+    pub fn get_static(&self, def_id: DefId) -> LValue<'gcc> {
+        let instance = Instance::mono(self.tcx, def_id);
+        let fn_attrs = self.tcx.codegen_fn_attrs(def_id);
+        if let Some(&global) = self.instances.borrow().get(&instance) {
+            return global;
+        }
+
+        let defined_in_current_codegen_unit =
+            self.codegen_unit.items().contains_key(&MonoItem::Static(def_id));
+        assert!(
+            !defined_in_current_codegen_unit,
+            "consts::get_static() should always hit the cache for \
+                 statics defined in the same CGU, but did not for `{:?}`",
+            def_id
+        );
+
+        let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all());
+        let sym = self.tcx.symbol_name(instance).name;
+
+        let global =
+            if let Some(def_id) = def_id.as_local() {
+                let id = self.tcx.hir().local_def_id_to_hir_id(def_id);
+                let llty = self.layout_of(ty).gcc_type(self, true);
+                // FIXME: refactor this to work without accessing the HIR
+                let global = match self.tcx.hir().get(id) {
+                    Node::Item(&hir::Item { span, kind: hir::ItemKind::Static(..), .. }) => {
+                        if let Some(global) = self.get_declared_value(&sym) {
+                            if self.val_ty(global) != self.type_ptr_to(llty) {
+                                span_bug!(span, "Conflicting types for static");
+                            }
+                        }
+
+                        let is_tls = fn_attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL);
+                        let global = self.declare_global(&sym, llty, is_tls, fn_attrs.link_section);
+
+                        if !self.tcx.is_reachable_non_generic(def_id) {
+                            // TODO(antoyo): set visibility.
+                        }
+
+                        global
+                    }
+
+                    Node::ForeignItem(&hir::ForeignItem {
+                        span,
+                        kind: hir::ForeignItemKind::Static(..),
+                        ..
+                    }) => {
+                        let fn_attrs = self.tcx.codegen_fn_attrs(def_id);
+                        check_and_apply_linkage(&self, &fn_attrs, ty, sym, span)
+                    }
+
+                    item => bug!("get_static: expected static, found {:?}", item),
+                };
+
+                global
+            }
+            else {
+                // FIXME(nagisa): perhaps the map of externs could be offloaded to llvm somehow?
+                //debug!("get_static: sym={} item_attr={:?}", sym, self.tcx.item_attrs(def_id));
+
+                let attrs = self.tcx.codegen_fn_attrs(def_id);
+                let span = self.tcx.def_span(def_id);
+                let global = check_and_apply_linkage(&self, &attrs, ty, sym, span);
+
+                let needs_dll_storage_attr = false; // TODO(antoyo)
+
+                // If this assertion triggers, there's something wrong with commandline
+                // argument validation.
+                debug_assert!(
+                    !(self.tcx.sess.opts.cg.linker_plugin_lto.enabled()
+                        && self.tcx.sess.target.options.is_like_msvc
+                        && self.tcx.sess.opts.cg.prefer_dynamic)
+                );
+
+                if needs_dll_storage_attr {
+                    // This item is external but not foreign, i.e., it originates from an external Rust
+                    // crate. Since we don't know whether this crate will be linked dynamically or
+                    // statically in the final application, we always mark such symbols as 'dllimport'.
+                    // If final linkage happens to be static, we rely on compiler-emitted __imp_ stubs
+                    // to make things work.
+                    //
+                    // However, in some scenarios we defer emission of statics to downstream
+                    // crates, so there are cases where a static with an upstream DefId
+                    // is actually present in the current crate. We can find out via the
+                    // is_codegened_item query.
+                    if !self.tcx.is_codegened_item(def_id) {
+                        unimplemented!();
+                    }
+                }
+                global
+            };
+
+        // TODO(antoyo): set dll storage class.
+
+        self.instances.borrow_mut().insert(instance, global);
+        global
+    }
+}
+
+pub fn const_alloc_to_gcc<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, alloc: &Allocation) -> RValue<'gcc> {
+    let mut llvals = Vec::with_capacity(alloc.relocations().len() + 1);
+    let dl = cx.data_layout();
+    let pointer_size = dl.pointer_size.bytes() as usize;
+
+    let mut next_offset = 0;
+    for &(offset, alloc_id) in alloc.relocations().iter() {
+        let offset = offset.bytes();
+        assert_eq!(offset as usize as u64, offset);
+        let offset = offset as usize;
+        if offset > next_offset {
+            // This `inspect` is okay since we have checked that it is not within a relocation, it
+            // is within the bounds of the allocation, and it doesn't affect interpreter execution
+            // (we inspect the result after interpreter execution). Any undef byte is replaced with
+            // some arbitrary byte value.
+            //
+            // FIXME: relay undef bytes to codegen as undef const bytes
+            let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(next_offset..offset);
+            llvals.push(cx.const_bytes(bytes));
+        }
+        let ptr_offset =
+            read_target_uint( dl.endian,
+                // This `inspect` is okay since it is within the bounds of the allocation, it doesn't
+                // affect interpreter execution (we inspect the result after interpreter execution),
+                // and we properly interpret the relocation as a relocation pointer offset.
+                alloc.inspect_with_uninit_and_ptr_outside_interpreter(offset..(offset + pointer_size)),
+            )
+            .expect("const_alloc_to_llvm: could not read relocation pointer")
+            as u64;
+        llvals.push(cx.scalar_to_backend(
+            InterpScalar::from_pointer(
+                interpret::Pointer::new(alloc_id, Size::from_bytes(ptr_offset)),
+                &cx.tcx,
+            ),
+            abi::Scalar { value: Primitive::Pointer, valid_range: WrappingRange { start: 0, end: !0 } },
+            cx.type_i8p(),
+        ));
+        next_offset = offset + pointer_size;
+    }
+    if alloc.len() >= next_offset {
+        let range = next_offset..alloc.len();
+        // This `inspect` is okay since we have check that it is after all relocations, it is
+        // within the bounds of the allocation, and it doesn't affect interpreter execution (we
+        // inspect the result after interpreter execution). Any undef byte is replaced with some
+        // arbitrary byte value.
+        //
+        // FIXME: relay undef bytes to codegen as undef const bytes
+        let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(range);
+        llvals.push(cx.const_bytes(bytes));
+    }
+
+    cx.const_struct(&llvals, true)
+}
+
+pub fn codegen_static_initializer<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, def_id: DefId) -> Result<(RValue<'gcc>, &'tcx Allocation), ErrorHandled> {
+    let alloc = cx.tcx.eval_static_initializer(def_id)?;
+    Ok((const_alloc_to_gcc(cx, alloc), alloc))
+}
+
+fn check_and_apply_linkage<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, attrs: &CodegenFnAttrs, ty: Ty<'tcx>, sym: &str, span: Span) -> LValue<'gcc> {
+    let is_tls = attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL);
+    let llty = cx.layout_of(ty).gcc_type(cx, true);
+    if let Some(linkage) = attrs.linkage {
+        // If this is a static with a linkage specified, then we need to handle
+        // it a little specially. The typesystem prevents things like &T and
+        // extern "C" fn() from being non-null, so we can't just declare a
+        // static and call it a day. Some linkages (like weak) will make it such
+        // that the static actually has a null value.
+        let llty2 =
+            if let ty::RawPtr(ref mt) = ty.kind() {
+                cx.layout_of(mt.ty).gcc_type(cx, true)
+            }
+            else {
+                cx.sess().span_fatal(
+                    span,
+                    "must have type `*const T` or `*mut T` due to `#[linkage]` attribute",
+                )
+            };
+        // Declare a symbol `foo` with the desired linkage.
+        let global1 = cx.declare_global_with_linkage(&sym, llty2, base::global_linkage_to_gcc(linkage));
+
+        // Declare an internal global `extern_with_linkage_foo` which
+        // is initialized with the address of `foo`.  If `foo` is
+        // discarded during linking (for example, if `foo` has weak
+        // linkage and there are no definitions), then
+        // `extern_with_linkage_foo` will instead be initialized to
+        // zero.
+        let mut real_name = "_rust_extern_with_linkage_".to_string();
+        real_name.push_str(&sym);
+        let global2 = cx.define_global(&real_name, llty, is_tls, attrs.link_section);
+        // TODO(antoyo): set linkage.
+        global2.global_set_initializer_value(global1.get_address(None));
+        // TODO(antoyo): use global_set_initializer() when it will work.
+        global2
+    }
+    else {
+        // Generate an external declaration.
+        // FIXME(nagisa): investigate whether it can be changed into define_global
+
+        // Thread-local statics in some other crate need to *always* be linked
+        // against in a thread-local fashion, so we need to be sure to apply the
+        // thread-local attribute locally if it was present remotely. If we
+        // don't do this then linker errors can be generated where the linker
+        // complains that one object files has a thread local version of the
+        // symbol and another one doesn't.
+        cx.declare_global(&sym, llty, is_tls, attrs.link_section)
+    }
+}
diff --git a/compiler/rustc_codegen_gcc/src/context.rs b/compiler/rustc_codegen_gcc/src/context.rs
new file mode 100644 (file)
index 0000000..7677ade
--- /dev/null
@@ -0,0 +1,475 @@
+use std::cell::{Cell, RefCell};
+
+use gccjit::{
+    Block,
+    Context,
+    CType,
+    Function,
+    FunctionType,
+    LValue,
+    RValue,
+    Struct,
+    Type,
+};
+use rustc_codegen_ssa::base::wants_msvc_seh;
+use rustc_codegen_ssa::traits::{
+    BackendTypes,
+    MiscMethods,
+};
+use rustc_data_structures::base_n;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_middle::span_bug;
+use rustc_middle::mir::mono::CodegenUnit;
+use rustc_middle::ty::{self, Instance, ParamEnv, PolyExistentialTraitRef, Ty, TyCtxt};
+use rustc_middle::ty::layout::{FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasParamEnv, HasTyCtxt, LayoutError, TyAndLayout, LayoutOfHelpers};
+use rustc_session::Session;
+use rustc_span::{Span, Symbol};
+use rustc_target::abi::{call::FnAbi, HasDataLayout, PointeeInfo, Size, TargetDataLayout, VariantIdx};
+use rustc_target::spec::{HasTargetSpec, Target, TlsModel};
+
+use crate::callee::get_fn;
+use crate::declare::mangle_name;
+
+#[derive(Clone)]
+pub struct FuncSig<'gcc> {
+    pub params: Vec<Type<'gcc>>,
+    pub return_type: Type<'gcc>,
+}
+
+pub struct CodegenCx<'gcc, 'tcx> {
+    pub check_overflow: bool,
+    pub codegen_unit: &'tcx CodegenUnit<'tcx>,
+    pub context: &'gcc Context<'gcc>,
+
+    // TODO(antoyo): First set it to a dummy block to avoid using Option?
+    pub current_block: RefCell<Option<Block<'gcc>>>,
+    pub current_func: RefCell<Option<Function<'gcc>>>,
+    pub normal_function_addresses: RefCell<FxHashSet<RValue<'gcc>>>,
+
+    pub functions: RefCell<FxHashMap<String, Function<'gcc>>>,
+
+    pub tls_model: gccjit::TlsModel,
+
+    pub bool_type: Type<'gcc>,
+    pub i8_type: Type<'gcc>,
+    pub i16_type: Type<'gcc>,
+    pub i32_type: Type<'gcc>,
+    pub i64_type: Type<'gcc>,
+    pub i128_type: Type<'gcc>,
+    pub isize_type: Type<'gcc>,
+
+    pub u8_type: Type<'gcc>,
+    pub u16_type: Type<'gcc>,
+    pub u32_type: Type<'gcc>,
+    pub u64_type: Type<'gcc>,
+    pub u128_type: Type<'gcc>,
+    pub usize_type: Type<'gcc>,
+
+    pub int_type: Type<'gcc>,
+    pub uint_type: Type<'gcc>,
+    pub long_type: Type<'gcc>,
+    pub ulong_type: Type<'gcc>,
+    pub ulonglong_type: Type<'gcc>,
+    pub sizet_type: Type<'gcc>,
+
+    pub float_type: Type<'gcc>,
+    pub double_type: Type<'gcc>,
+
+    pub linkage: Cell<FunctionType>,
+    pub scalar_types: RefCell<FxHashMap<Ty<'tcx>, Type<'gcc>>>,
+    pub types: RefCell<FxHashMap<(Ty<'tcx>, Option<VariantIdx>), Type<'gcc>>>,
+    pub tcx: TyCtxt<'tcx>,
+
+    pub struct_types: RefCell<FxHashMap<Vec<Type<'gcc>>, Type<'gcc>>>,
+
+    pub types_with_fields_to_set: RefCell<FxHashMap<Type<'gcc>, (Struct<'gcc>, TyAndLayout<'tcx>)>>,
+
+    /// Cache instances of monomorphic and polymorphic items
+    pub instances: RefCell<FxHashMap<Instance<'tcx>, LValue<'gcc>>>,
+    /// Cache function instances of monomorphic and polymorphic items
+    pub function_instances: RefCell<FxHashMap<Instance<'tcx>, RValue<'gcc>>>,
+    /// Cache generated vtables
+    pub vtables: RefCell<FxHashMap<(Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>), RValue<'gcc>>>,
+
+    /// Cache of emitted const globals (value -> global)
+    pub const_globals: RefCell<FxHashMap<RValue<'gcc>, RValue<'gcc>>>,
+
+    /// Cache of constant strings,
+    pub const_cstr_cache: RefCell<FxHashMap<Symbol, LValue<'gcc>>>,
+
+    /// Cache of globals.
+    pub globals: RefCell<FxHashMap<String, RValue<'gcc>>>,
+
+    /// A counter that is used for generating local symbol names
+    local_gen_sym_counter: Cell<usize>,
+    pub global_gen_sym_counter: Cell<usize>,
+
+    eh_personality: Cell<Option<RValue<'gcc>>>,
+
+    pub pointee_infos: RefCell<FxHashMap<(Ty<'tcx>, Size), Option<PointeeInfo>>>,
+
+    /// NOTE: a hack is used because the rustc API is not suitable to libgccjit and as such,
+    /// `const_undef()` returns struct as pointer so that they can later be assigned a value.
+    /// As such, this set remembers which of these pointers were returned by this function so that
+    /// they can be deferenced later.
+    /// FIXME(antoyo): fix the rustc API to avoid having this hack.
+    pub structs_as_pointer: RefCell<FxHashSet<RValue<'gcc>>>,
+}
+
+impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
+    pub fn new(context: &'gcc Context<'gcc>, codegen_unit: &'tcx CodegenUnit<'tcx>, tcx: TyCtxt<'tcx>) -> Self {
+        let check_overflow = tcx.sess.overflow_checks();
+        // TODO(antoyo): fix this mess. libgccjit seems to return random type when using new_int_type().
+        let isize_type = context.new_c_type(CType::LongLong);
+        let usize_type = context.new_c_type(CType::ULongLong);
+        let bool_type = context.new_type::<bool>();
+        let i8_type = context.new_type::<i8>();
+        let i16_type = context.new_type::<i16>();
+        let i32_type = context.new_type::<i32>();
+        let i64_type = context.new_c_type(CType::LongLong);
+        let i128_type = context.new_c_type(CType::Int128t).get_aligned(8); // TODO(antoyo): should the alignment be hard-coded?
+        let u8_type = context.new_type::<u8>();
+        let u16_type = context.new_type::<u16>();
+        let u32_type = context.new_type::<u32>();
+        let u64_type = context.new_c_type(CType::ULongLong);
+        let u128_type = context.new_c_type(CType::UInt128t).get_aligned(8); // TODO(antoyo): should the alignment be hard-coded?
+
+        let tls_model = to_gcc_tls_mode(tcx.sess.tls_model());
+
+        let float_type = context.new_type::<f32>();
+        let double_type = context.new_type::<f64>();
+
+        let int_type = context.new_c_type(CType::Int);
+        let uint_type = context.new_c_type(CType::UInt);
+        let long_type = context.new_c_type(CType::Long);
+        let ulong_type = context.new_c_type(CType::ULong);
+        let ulonglong_type = context.new_c_type(CType::ULongLong);
+        let sizet_type = context.new_c_type(CType::SizeT);
+
+        assert_eq!(isize_type, i64_type);
+        assert_eq!(usize_type, u64_type);
+
+        let mut functions = FxHashMap::default();
+        let builtins = [
+            "__builtin_unreachable", "abort", "__builtin_expect", "__builtin_add_overflow", "__builtin_mul_overflow",
+            "__builtin_saddll_overflow", /*"__builtin_sadd_overflow",*/ "__builtin_smulll_overflow", /*"__builtin_smul_overflow",*/
+            "__builtin_ssubll_overflow", /*"__builtin_ssub_overflow",*/ "__builtin_sub_overflow", "__builtin_uaddll_overflow",
+            "__builtin_uadd_overflow", "__builtin_umulll_overflow", "__builtin_umul_overflow", "__builtin_usubll_overflow",
+            "__builtin_usub_overflow", "sqrtf", "sqrt", "__builtin_powif", "__builtin_powi", "sinf", "sin", "cosf", "cos",
+            "powf", "pow", "expf", "exp", "exp2f", "exp2", "logf", "log", "log10f", "log10", "log2f", "log2", "fmaf",
+            "fma", "fabsf", "fabs", "fminf", "fmin", "fmaxf", "fmax", "copysignf", "copysign", "floorf", "floor", "ceilf",
+            "ceil", "truncf", "trunc", "rintf", "rint", "nearbyintf", "nearbyint", "roundf", "round",
+            "__builtin_expect_with_probability",
+        ];
+
+        for builtin in builtins.iter() {
+            functions.insert(builtin.to_string(), context.get_builtin_function(builtin));
+        }
+
+        Self {
+            check_overflow,
+            codegen_unit,
+            context,
+            current_block: RefCell::new(None),
+            current_func: RefCell::new(None),
+            normal_function_addresses: Default::default(),
+            functions: RefCell::new(functions),
+
+            tls_model,
+
+            bool_type,
+            i8_type,
+            i16_type,
+            i32_type,
+            i64_type,
+            i128_type,
+            isize_type,
+            usize_type,
+            u8_type,
+            u16_type,
+            u32_type,
+            u64_type,
+            u128_type,
+            int_type,
+            uint_type,
+            long_type,
+            ulong_type,
+            ulonglong_type,
+            sizet_type,
+
+            float_type,
+            double_type,
+
+            linkage: Cell::new(FunctionType::Internal),
+            instances: Default::default(),
+            function_instances: Default::default(),
+            vtables: Default::default(),
+            const_globals: Default::default(),
+            const_cstr_cache: Default::default(),
+            globals: Default::default(),
+            scalar_types: Default::default(),
+            types: Default::default(),
+            tcx,
+            struct_types: Default::default(),
+            types_with_fields_to_set: Default::default(),
+            local_gen_sym_counter: Cell::new(0),
+            global_gen_sym_counter: Cell::new(0),
+            eh_personality: Cell::new(None),
+            pointee_infos: Default::default(),
+            structs_as_pointer: Default::default(),
+        }
+    }
+
+    pub fn rvalue_as_function(&self, value: RValue<'gcc>) -> Function<'gcc> {
+        let function: Function<'gcc> = unsafe { std::mem::transmute(value) };
+        debug_assert!(self.functions.borrow().values().find(|value| **value == function).is_some(),
+            "{:?} ({:?}) is not a function", value, value.get_type());
+        function
+    }
+
+    pub fn sess(&self) -> &Session {
+        &self.tcx.sess
+    }
+}
+
+impl<'gcc, 'tcx> BackendTypes for CodegenCx<'gcc, 'tcx> {
+    type Value = RValue<'gcc>;
+    type Function = RValue<'gcc>;
+
+    type BasicBlock = Block<'gcc>;
+    type Type = Type<'gcc>;
+    type Funclet = (); // TODO(antoyo)
+
+    type DIScope = (); // TODO(antoyo)
+    type DILocation = (); // TODO(antoyo)
+    type DIVariable = (); // TODO(antoyo)
+}
+
+impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
+    fn vtables(&self) -> &RefCell<FxHashMap<(Ty<'tcx>, Option<PolyExistentialTraitRef<'tcx>>), RValue<'gcc>>> {
+        &self.vtables
+    }
+
+    fn get_fn(&self, instance: Instance<'tcx>) -> RValue<'gcc> {
+        let func = get_fn(self, instance);
+        *self.current_func.borrow_mut() = Some(self.rvalue_as_function(func));
+        func
+    }
+
+    fn get_fn_addr(&self, instance: Instance<'tcx>) -> RValue<'gcc> {
+        let func = get_fn(self, instance);
+        let func = self.rvalue_as_function(func);
+        let ptr = func.get_address(None);
+
+        // TODO(antoyo): don't do this twice: i.e. in declare_fn and here.
+        // FIXME(antoyo): the rustc API seems to call get_fn_addr() when not needed (e.g. for FFI).
+
+        self.normal_function_addresses.borrow_mut().insert(ptr);
+
+        ptr
+    }
+
+    fn eh_personality(&self) -> RValue<'gcc> {
+        // The exception handling personality function.
+        //
+        // If our compilation unit has the `eh_personality` lang item somewhere
+        // within it, then we just need to codegen that. Otherwise, we're
+        // building an rlib which will depend on some upstream implementation of
+        // this function, so we just codegen a generic reference to it. We don't
+        // specify any of the types for the function, we just make it a symbol
+        // that LLVM can later use.
+        //
+        // Note that MSVC is a little special here in that we don't use the
+        // `eh_personality` lang item at all. Currently LLVM has support for
+        // both Dwarf and SEH unwind mechanisms for MSVC targets and uses the
+        // *name of the personality function* to decide what kind of unwind side
+        // tables/landing pads to emit. It looks like Dwarf is used by default,
+        // injecting a dependency on the `_Unwind_Resume` symbol for resuming
+        // an "exception", but for MSVC we want to force SEH. This means that we
+        // can't actually have the personality function be our standard
+        // `rust_eh_personality` function, but rather we wired it up to the
+        // CRT's custom personality function, which forces LLVM to consider
+        // landing pads as "landing pads for SEH".
+        if let Some(llpersonality) = self.eh_personality.get() {
+            return llpersonality;
+        }
+        let tcx = self.tcx;
+        let llfn = match tcx.lang_items().eh_personality() {
+            Some(def_id) if !wants_msvc_seh(self.sess()) => self.get_fn_addr(
+                ty::Instance::resolve(
+                    tcx,
+                    ty::ParamEnv::reveal_all(),
+                    def_id,
+                    tcx.intern_substs(&[]),
+                )
+                .unwrap().unwrap(),
+            ),
+            _ => {
+                let _name = if wants_msvc_seh(self.sess()) {
+                    "__CxxFrameHandler3"
+                } else {
+                    "rust_eh_personality"
+                };
+                //let func = self.declare_func(name, self.type_i32(), &[], true);
+                // FIXME(antoyo): this hack should not be needed. That will probably be removed when
+                // unwinding support is added.
+                self.context.new_rvalue_from_int(self.int_type, 0)
+            }
+        };
+        // TODO(antoyo): apply target cpu attributes.
+        self.eh_personality.set(Some(llfn));
+        llfn
+    }
+
+    fn sess(&self) -> &Session {
+        &self.tcx.sess
+    }
+
+    fn check_overflow(&self) -> bool {
+        self.check_overflow
+    }
+
+    fn codegen_unit(&self) -> &'tcx CodegenUnit<'tcx> {
+        self.codegen_unit
+    }
+
+    fn used_statics(&self) -> &RefCell<Vec<RValue<'gcc>>> {
+        unimplemented!();
+    }
+
+    fn set_frame_pointer_type(&self, _llfn: RValue<'gcc>) {
+        // TODO(antoyo)
+    }
+
+    fn apply_target_cpu_attr(&self, _llfn: RValue<'gcc>) {
+        // TODO(antoyo)
+    }
+
+    fn create_used_variable(&self) {
+        unimplemented!();
+    }
+
+    fn declare_c_main(&self, fn_type: Self::Type) -> Option<Self::Function> {
+        if self.get_declared_value("main").is_none() {
+            Some(self.declare_cfn("main", fn_type))
+        }
+        else {
+            // If the symbol already exists, it is an error: for example, the user wrote
+            // #[no_mangle] extern "C" fn main(..) {..}
+            // instead of #[start]
+            None
+        }
+    }
+
+    fn compiler_used_statics(&self) -> &RefCell<Vec<RValue<'gcc>>> {
+        unimplemented!()
+    }
+
+    fn create_compiler_used_variable(&self) {
+        unimplemented!()
+    }
+}
+
+impl<'gcc, 'tcx> HasTyCtxt<'tcx> for CodegenCx<'gcc, 'tcx> {
+    fn tcx(&self) -> TyCtxt<'tcx> {
+        self.tcx
+    }
+}
+
+impl<'gcc, 'tcx> HasDataLayout for CodegenCx<'gcc, 'tcx> {
+    fn data_layout(&self) -> &TargetDataLayout {
+        &self.tcx.data_layout
+    }
+}
+
+impl<'gcc, 'tcx> HasTargetSpec for CodegenCx<'gcc, 'tcx> {
+    fn target_spec(&self) -> &Target {
+        &self.tcx.sess.target
+    }
+}
+
+impl<'gcc, 'tcx> LayoutOfHelpers<'tcx> for CodegenCx<'gcc, 'tcx> {
+    type LayoutOfResult = TyAndLayout<'tcx>;
+
+    #[inline]
+    fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! {
+        if let LayoutError::SizeOverflow(_) = err {
+            self.sess().span_fatal(span, &err.to_string())
+        } else {
+            span_bug!(span, "failed to get layout for `{}`: {}", ty, err)
+        }
+    }
+}
+
+impl<'gcc, 'tcx> FnAbiOfHelpers<'tcx> for CodegenCx<'gcc, 'tcx> {
+    type FnAbiOfResult = &'tcx FnAbi<'tcx, Ty<'tcx>>;
+
+    #[inline]
+    fn handle_fn_abi_err(
+        &self,
+        err: FnAbiError<'tcx>,
+        span: Span,
+        fn_abi_request: FnAbiRequest<'tcx>,
+    ) -> ! {
+        if let FnAbiError::Layout(LayoutError::SizeOverflow(_)) = err {
+            self.sess().span_fatal(span, &err.to_string())
+        } else {
+            match fn_abi_request {
+                FnAbiRequest::OfFnPtr { sig, extra_args } => {
+                    span_bug!(
+                        span,
+                        "`fn_abi_of_fn_ptr({}, {:?})` failed: {}",
+                        sig,
+                        extra_args,
+                        err
+                    );
+                }
+                FnAbiRequest::OfInstance { instance, extra_args } => {
+                    span_bug!(
+                        span,
+                        "`fn_abi_of_instance({}, {:?})` failed: {}",
+                        instance,
+                        extra_args,
+                        err
+                    );
+                }
+            }
+        }
+    }
+}
+
+impl<'tcx, 'gcc> HasParamEnv<'tcx> for CodegenCx<'gcc, 'tcx> {
+    fn param_env(&self) -> ParamEnv<'tcx> {
+        ParamEnv::reveal_all()
+    }
+}
+
+impl<'b, 'tcx> CodegenCx<'b, 'tcx> {
+    /// Generates a new symbol name with the given prefix. This symbol name must
+    /// only be used for definitions with `internal` or `private` linkage.
+    pub fn generate_local_symbol_name(&self, prefix: &str) -> String {
+        let idx = self.local_gen_sym_counter.get();
+        self.local_gen_sym_counter.set(idx + 1);
+        // Include a '.' character, so there can be no accidental conflicts with
+        // user defined names
+        let mut name = String::with_capacity(prefix.len() + 6);
+        name.push_str(prefix);
+        name.push_str(".");
+        base_n::push_str(idx as u128, base_n::ALPHANUMERIC_ONLY, &mut name);
+        name
+    }
+}
+
+pub fn unit_name<'tcx>(codegen_unit: &CodegenUnit<'tcx>) -> String {
+    let name = &codegen_unit.name().to_string();
+    mangle_name(&name.replace('-', "_"))
+}
+
+fn to_gcc_tls_mode(tls_model: TlsModel) -> gccjit::TlsModel {
+    match tls_model {
+        TlsModel::GeneralDynamic => gccjit::TlsModel::GlobalDynamic,
+        TlsModel::LocalDynamic => gccjit::TlsModel::LocalDynamic,
+        TlsModel::InitialExec => gccjit::TlsModel::InitialExec,
+        TlsModel::LocalExec => gccjit::TlsModel::LocalExec,
+    }
+}
diff --git a/compiler/rustc_codegen_gcc/src/coverageinfo.rs b/compiler/rustc_codegen_gcc/src/coverageinfo.rs
new file mode 100644 (file)
index 0000000..872fc24
--- /dev/null
@@ -0,0 +1,69 @@
+use gccjit::RValue;
+use rustc_codegen_ssa::traits::{CoverageInfoBuilderMethods, CoverageInfoMethods};
+use rustc_hir::def_id::DefId;
+use rustc_middle::mir::coverage::{
+    CodeRegion,
+    CounterValueReference,
+    ExpressionOperandId,
+    InjectedExpressionId,
+    Op,
+};
+use rustc_middle::ty::Instance;
+
+use crate::builder::Builder;
+use crate::context::CodegenCx;
+
+impl<'a, 'gcc, 'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
+    fn set_function_source_hash(
+        &mut self,
+        _instance: Instance<'tcx>,
+        _function_source_hash: u64,
+    ) -> bool {
+        unimplemented!();
+    }
+
+    fn add_coverage_counter(&mut self, _instance: Instance<'tcx>, _id: CounterValueReference, _region: CodeRegion) -> bool {
+        // TODO(antoyo)
+        false
+    }
+
+    fn add_coverage_counter_expression(&mut self, _instance: Instance<'tcx>, _id: InjectedExpressionId, _lhs: ExpressionOperandId, _op: Op, _rhs: ExpressionOperandId, _region: Option<CodeRegion>) -> bool {
+        // TODO(antoyo)
+        false
+    }
+
+    fn add_coverage_unreachable(&mut self, _instance: Instance<'tcx>, _region: CodeRegion) -> bool {
+        // TODO(antoyo)
+        false
+    }
+}
+
+impl<'gcc, 'tcx> CoverageInfoMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
+    fn coverageinfo_finalize(&self) {
+        // TODO(antoyo)
+    }
+
+    fn get_pgo_func_name_var(&self, _instance: Instance<'tcx>) -> RValue<'gcc> {
+        unimplemented!();
+    }
+
+    /// Functions with MIR-based coverage are normally codegenned _only_ if
+    /// called. LLVM coverage tools typically expect every function to be
+    /// defined (even if unused), with at least one call to LLVM intrinsic
+    /// `instrprof.increment`.
+    ///
+    /// Codegen a small function that will never be called, with one counter
+    /// that will never be incremented.
+    ///
+    /// For used/called functions, the coverageinfo was already added to the
+    /// `function_coverage_map` (keyed by function `Instance`) during codegen.
+    /// But in this case, since the unused function was _not_ previously
+    /// codegenned, collect the coverage `CodeRegion`s from the MIR and add
+    /// them. The first `CodeRegion` is used to add a single counter, with the
+    /// same counter ID used in the injected `instrprof.increment` intrinsic
+    /// call. Since the function is never called, all other `CodeRegion`s can be
+    /// added as `unreachable_region`s.
+    fn define_unused_fn(&self, _def_id: DefId) {
+        unimplemented!();
+    }
+}
diff --git a/compiler/rustc_codegen_gcc/src/debuginfo.rs b/compiler/rustc_codegen_gcc/src/debuginfo.rs
new file mode 100644 (file)
index 0000000..4d3b4f0
--- /dev/null
@@ -0,0 +1,62 @@
+use gccjit::RValue;
+use rustc_codegen_ssa::mir::debuginfo::{FunctionDebugContext, VariableKind};
+use rustc_codegen_ssa::traits::{DebugInfoBuilderMethods, DebugInfoMethods};
+use rustc_middle::mir;
+use rustc_middle::ty::{Instance, Ty};
+use rustc_span::{SourceFile, Span, Symbol};
+use rustc_target::abi::Size;
+use rustc_target::abi::call::FnAbi;
+
+use crate::builder::Builder;
+use crate::context::CodegenCx;
+
+impl<'a, 'gcc, 'tcx> DebugInfoBuilderMethods for Builder<'a, 'gcc, 'tcx> {
+    // FIXME(eddyb) find a common convention for all of the debuginfo-related
+    // names (choose between `dbg`, `debug`, `debuginfo`, `debug_info` etc.).
+    fn dbg_var_addr(&mut self, _dbg_var: Self::DIVariable, _scope_metadata: Self::DIScope, _variable_alloca: Self::Value, _direct_offset: Size, _indirect_offsets: &[Size]) {
+        unimplemented!();
+    }
+
+    fn insert_reference_to_gdb_debug_scripts_section_global(&mut self) {
+        // TODO(antoyo): insert reference to gdb debug scripts section global.
+    }
+
+    fn set_var_name(&mut self, _value: RValue<'gcc>, _name: &str) {
+        unimplemented!();
+    }
+
+    fn set_dbg_loc(&mut self, _dbg_loc: Self::DILocation) {
+        unimplemented!();
+    }
+}
+
+impl<'gcc, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
+    fn create_vtable_metadata(&self, _ty: Ty<'tcx>, _vtable: Self::Value) {
+        // TODO(antoyo)
+    }
+
+    fn create_function_debug_context(&self, _instance: Instance<'tcx>, _fn_abi: &FnAbi<'tcx, Ty<'tcx>>, _llfn: RValue<'gcc>, _mir: &mir::Body<'tcx>) -> Option<FunctionDebugContext<Self::DIScope, Self::DILocation>> {
+        // TODO(antoyo)
+        None
+    }
+
+    fn extend_scope_to_file(&self, _scope_metadata: Self::DIScope, _file: &SourceFile) -> Self::DIScope {
+        unimplemented!();
+    }
+
+    fn debuginfo_finalize(&self) {
+        // TODO(antoyo)
+    }
+
+    fn create_dbg_var(&self, _variable_name: Symbol, _variable_type: Ty<'tcx>, _scope_metadata: Self::DIScope, _variable_kind: VariableKind, _span: Span) -> Self::DIVariable {
+        unimplemented!();
+    }
+
+    fn dbg_scope_fn(&self, _instance: Instance<'tcx>, _fn_abi: &FnAbi<'tcx, Ty<'tcx>>, _maybe_definition_llfn: Option<RValue<'gcc>>) -> Self::DIScope {
+        unimplemented!();
+    }
+
+    fn dbg_loc(&self, _scope: Self::DIScope, _inlined_at: Option<Self::DILocation>, _span: Span) -> Self::DILocation {
+        unimplemented!();
+    }
+}
diff --git a/compiler/rustc_codegen_gcc/src/declare.rs b/compiler/rustc_codegen_gcc/src/declare.rs
new file mode 100644 (file)
index 0000000..b79a50d
--- /dev/null
@@ -0,0 +1,144 @@
+use gccjit::{Function, FunctionType, GlobalKind, LValue, RValue, Type};
+use rustc_codegen_ssa::traits::BaseTypeMethods;
+use rustc_middle::ty::Ty;
+use rustc_span::Symbol;
+use rustc_target::abi::call::FnAbi;
+
+use crate::abi::FnAbiGccExt;
+use crate::context::{CodegenCx, unit_name};
+use crate::intrinsic::llvm;
+
+impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
+    pub fn get_or_insert_global(&self, name: &str, ty: Type<'gcc>, is_tls: bool, link_section: Option<Symbol>) -> LValue<'gcc> {
+        if self.globals.borrow().contains_key(name) {
+            let typ = self.globals.borrow().get(name).expect("global").get_type();
+            let global = self.context.new_global(None, GlobalKind::Imported, typ, name);
+            if is_tls {
+                global.set_tls_model(self.tls_model);
+            }
+            if let Some(link_section) = link_section {
+                global.set_link_section(&link_section.as_str());
+            }
+            global
+        }
+        else {
+            self.declare_global(name, ty, is_tls, link_section)
+        }
+    }
+
+    pub fn declare_unnamed_global(&self, ty: Type<'gcc>) -> LValue<'gcc> {
+        let index = self.global_gen_sym_counter.get();
+        self.global_gen_sym_counter.set(index + 1);
+        let name = format!("global_{}_{}", index, unit_name(&self.codegen_unit));
+        self.context.new_global(None, GlobalKind::Exported, ty, &name)
+    }
+
+    pub fn declare_global_with_linkage(&self, name: &str, ty: Type<'gcc>, linkage: GlobalKind) -> LValue<'gcc> {
+        let global = self.context.new_global(None, linkage, ty, name);
+        let global_address = global.get_address(None);
+        self.globals.borrow_mut().insert(name.to_string(), global_address);
+        global
+    }
+
+    /*pub fn declare_func(&self, name: &str, return_type: Type<'gcc>, params: &[Type<'gcc>], variadic: bool) -> RValue<'gcc> {
+        self.linkage.set(FunctionType::Exported);
+        let func = declare_raw_fn(self, name, () /*llvm::CCallConv*/, return_type, params, variadic);
+        // FIXME(antoyo): this is a wrong cast. That requires changing the compiler API.
+        unsafe { std::mem::transmute(func) }
+    }*/
+
+    pub fn declare_global(&self, name: &str, ty: Type<'gcc>, is_tls: bool, link_section: Option<Symbol>) -> LValue<'gcc> {
+        let global = self.context.new_global(None, GlobalKind::Exported, ty, name);
+        if is_tls {
+            global.set_tls_model(self.tls_model);
+        }
+        if let Some(link_section) = link_section {
+            global.set_link_section(&link_section.as_str());
+        }
+        let global_address = global.get_address(None);
+        self.globals.borrow_mut().insert(name.to_string(), global_address);
+        global
+    }
+
+    pub fn declare_private_global(&self, name: &str, ty: Type<'gcc>) -> LValue<'gcc> {
+        let global = self.context.new_global(None, GlobalKind::Internal, ty, name);
+        let global_address = global.get_address(None);
+        self.globals.borrow_mut().insert(name.to_string(), global_address);
+        global
+    }
+
+    pub fn declare_cfn(&self, name: &str, _fn_type: Type<'gcc>) -> RValue<'gcc> {
+        // TODO(antoyo): use the fn_type parameter.
+        let const_string = self.context.new_type::<u8>().make_pointer().make_pointer();
+        let return_type = self.type_i32();
+        let variadic = false;
+        self.linkage.set(FunctionType::Exported);
+        let func = declare_raw_fn(self, name, () /*llvm::CCallConv*/, return_type, &[self.type_i32(), const_string], variadic);
+        // NOTE: it is needed to set the current_func here as well, because get_fn() is not called
+        // for the main function.
+        *self.current_func.borrow_mut() = Some(func);
+        // FIXME(antoyo): this is a wrong cast. That requires changing the compiler API.
+        unsafe { std::mem::transmute(func) }
+    }
+
+    pub fn declare_fn(&self, name: &str, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> RValue<'gcc> {
+        let (return_type, params, variadic) = fn_abi.gcc_type(self);
+        let func = declare_raw_fn(self, name, () /*fn_abi.llvm_cconv()*/, return_type, &params, variadic);
+        // FIXME(antoyo): this is a wrong cast. That requires changing the compiler API.
+        unsafe { std::mem::transmute(func) }
+    }
+
+    pub fn define_global(&self, name: &str, ty: Type<'gcc>, is_tls: bool, link_section: Option<Symbol>) -> LValue<'gcc> {
+        self.get_or_insert_global(name, ty, is_tls, link_section)
+    }
+
+    pub fn get_declared_value(&self, name: &str) -> Option<RValue<'gcc>> {
+        // TODO(antoyo): use a different field than globals, because this seems to return a function?
+        self.globals.borrow().get(name).cloned()
+    }
+}
+
+/// Declare a function.
+///
+/// If there’s a value with the same name already declared, the function will
+/// update the declaration and return existing Value instead.
+fn declare_raw_fn<'gcc>(cx: &CodegenCx<'gcc, '_>, name: &str, _callconv: () /*llvm::CallConv*/, return_type: Type<'gcc>, param_types: &[Type<'gcc>], variadic: bool) -> Function<'gcc> {
+    if name.starts_with("llvm.") {
+        return llvm::intrinsic(name, cx);
+    }
+    let func =
+        if cx.functions.borrow().contains_key(name) {
+            *cx.functions.borrow().get(name).expect("function")
+        }
+        else {
+            let params: Vec<_> = param_types.into_iter().enumerate()
+                .map(|(index, param)| cx.context.new_parameter(None, *param, &format!("param{}", index))) // TODO(antoyo): set name.
+                .collect();
+            let func = cx.context.new_function(None, cx.linkage.get(), return_type, &params, mangle_name(name), variadic);
+            cx.functions.borrow_mut().insert(name.to_string(), func);
+            func
+        };
+
+    // TODO(antoyo): set function calling convention.
+    // TODO(antoyo): set unnamed address.
+    // TODO(antoyo): set no red zone function attribute.
+    // TODO(antoyo): set attributes for optimisation.
+    // TODO(antoyo): set attributes for non lazy bind.
+
+    // FIXME(antoyo): invalid cast.
+    func
+}
+
+// FIXME(antoyo): this is a hack because libgccjit currently only supports alpha, num and _.
+// Unsupported characters: `$` and `.`.
+pub fn mangle_name(name: &str) -> String {
+    name.replace(|char: char| {
+        if !char.is_alphanumeric() && char != '_' {
+            debug_assert!("$.".contains(char), "Unsupported char in function name: {}", char);
+            true
+        }
+        else {
+            false
+        }
+    }, "_")
+}
diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/llvm.rs b/compiler/rustc_codegen_gcc/src/intrinsic/llvm.rs
new file mode 100644 (file)
index 0000000..b074feb
--- /dev/null
@@ -0,0 +1,22 @@
+use gccjit::Function;
+
+use crate::context::CodegenCx;
+
+pub fn intrinsic<'gcc, 'tcx>(name: &str, cx: &CodegenCx<'gcc, 'tcx>) -> Function<'gcc> {
+    let _gcc_name =
+        match name {
+            "llvm.x86.xgetbv" => {
+                let gcc_name = "__builtin_trap";
+                let func = cx.context.get_builtin_function(gcc_name);
+                cx.functions.borrow_mut().insert(gcc_name.to_string(), func);
+                return func;
+            },
+            // NOTE: this doc specifies the equivalent GCC builtins: http://huonw.github.io/llvmint/llvmint/x86/index.html
+            "llvm.x86.sse2.cmp.pd" => "__builtin_ia32_cmppd",
+            "llvm.x86.sse2.movmsk.pd" => "__builtin_ia32_movmskpd",
+            "llvm.x86.sse2.pmovmskb.128" => "__builtin_ia32_pmovmskb128",
+            _ => unimplemented!("unsupported LLVM intrinsic {}", name)
+        };
+
+    unimplemented!();
+}
diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs
new file mode 100644 (file)
index 0000000..375d422
--- /dev/null
@@ -0,0 +1,1067 @@
+pub mod llvm;
+mod simd;
+
+use gccjit::{ComparisonOp, Function, RValue, ToRValue, Type, UnaryOp};
+use rustc_codegen_ssa::MemFlags;
+use rustc_codegen_ssa::base::wants_msvc_seh;
+use rustc_codegen_ssa::common::{IntPredicate, span_invalid_monomorphization_error};
+use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
+use rustc_codegen_ssa::mir::place::PlaceRef;
+use rustc_codegen_ssa::traits::{ArgAbiMethods, BaseTypeMethods, BuilderMethods, ConstMethods, IntrinsicCallMethods};
+use rustc_middle::bug;
+use rustc_middle::ty::{self, Instance, Ty};
+use rustc_middle::ty::layout::LayoutOf;
+use rustc_span::{Span, Symbol, symbol::kw, sym};
+use rustc_target::abi::HasDataLayout;
+use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode};
+use rustc_target::spec::PanicStrategy;
+
+use crate::abi::GccType;
+use crate::builder::Builder;
+use crate::common::{SignType, TypeReflection};
+use crate::context::CodegenCx;
+use crate::type_of::LayoutGccExt;
+use crate::intrinsic::simd::generic_simd_intrinsic;
+
+fn get_simple_intrinsic<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, name: Symbol) -> Option<Function<'gcc>> {
+    let gcc_name = match name {
+        sym::sqrtf32 => "sqrtf",
+        sym::sqrtf64 => "sqrt",
+        sym::powif32 => "__builtin_powif",
+        sym::powif64 => "__builtin_powi",
+        sym::sinf32 => "sinf",
+        sym::sinf64 => "sin",
+        sym::cosf32 => "cosf",
+        sym::cosf64 => "cos",
+        sym::powf32 => "powf",
+        sym::powf64 => "pow",
+        sym::expf32 => "expf",
+        sym::expf64 => "exp",
+        sym::exp2f32 => "exp2f",
+        sym::exp2f64 => "exp2",
+        sym::logf32 => "logf",
+        sym::logf64 => "log",
+        sym::log10f32 => "log10f",
+        sym::log10f64 => "log10",
+        sym::log2f32 => "log2f",
+        sym::log2f64 => "log2",
+        sym::fmaf32 => "fmaf",
+        sym::fmaf64 => "fma",
+        sym::fabsf32 => "fabsf",
+        sym::fabsf64 => "fabs",
+        sym::minnumf32 => "fminf",
+        sym::minnumf64 => "fmin",
+        sym::maxnumf32 => "fmaxf",
+        sym::maxnumf64 => "fmax",
+        sym::copysignf32 => "copysignf",
+        sym::copysignf64 => "copysign",
+        sym::floorf32 => "floorf",
+        sym::floorf64 => "floor",
+        sym::ceilf32 => "ceilf",
+        sym::ceilf64 => "ceil",
+        sym::truncf32 => "truncf",
+        sym::truncf64 => "trunc",
+        sym::rintf32 => "rintf",
+        sym::rintf64 => "rint",
+        sym::nearbyintf32 => "nearbyintf",
+        sym::nearbyintf64 => "nearbyint",
+        sym::roundf32 => "roundf",
+        sym::roundf64 => "round",
+        sym::abort => "abort",
+        _ => return None,
+    };
+    Some(cx.context.get_builtin_function(&gcc_name))
+}
+
+impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
+    fn codegen_intrinsic_call(&mut self, instance: Instance<'tcx>, fn_abi: &FnAbi<'tcx, Ty<'tcx>>, args: &[OperandRef<'tcx, RValue<'gcc>>], llresult: RValue<'gcc>, span: Span) {
+        let tcx = self.tcx;
+        let callee_ty = instance.ty(tcx, ty::ParamEnv::reveal_all());
+
+        let (def_id, substs) = match *callee_ty.kind() {
+            ty::FnDef(def_id, substs) => (def_id, substs),
+            _ => bug!("expected fn item type, found {}", callee_ty),
+        };
+
+        let sig = callee_ty.fn_sig(tcx);
+        let sig = tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig);
+        let arg_tys = sig.inputs();
+        let ret_ty = sig.output();
+        let name = tcx.item_name(def_id);
+        let name_str = &*name.as_str();
+
+        let llret_ty = self.layout_of(ret_ty).gcc_type(self, true);
+        let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout);
+
+        let simple = get_simple_intrinsic(self, name);
+        let llval =
+            match name {
+                _ if simple.is_some() => {
+                    // FIXME(antoyo): remove this cast when the API supports function.
+                    let func = unsafe { std::mem::transmute(simple.expect("simple")) };
+                    self.call(self.type_void(), func, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None)
+                },
+                sym::likely => {
+                    self.expect(args[0].immediate(), true)
+                }
+                sym::unlikely => {
+                    self.expect(args[0].immediate(), false)
+                }
+                kw::Try => {
+                    try_intrinsic(
+                        self,
+                        args[0].immediate(),
+                        args[1].immediate(),
+                        args[2].immediate(),
+                        llresult,
+                    );
+                    return;
+                }
+                sym::breakpoint => {
+                    unimplemented!();
+                }
+                sym::va_copy => {
+                    unimplemented!();
+                }
+                sym::va_arg => {
+                    unimplemented!();
+                }
+
+                sym::volatile_load | sym::unaligned_volatile_load => {
+                    let tp_ty = substs.type_at(0);
+                    let mut ptr = args[0].immediate();
+                    if let PassMode::Cast(ty) = fn_abi.ret.mode {
+                        ptr = self.pointercast(ptr, self.type_ptr_to(ty.gcc_type(self)));
+                    }
+                    let load = self.volatile_load(ptr.get_type(), ptr);
+                    // TODO(antoyo): set alignment.
+                    self.to_immediate(load, self.layout_of(tp_ty))
+                }
+                sym::volatile_store => {
+                    let dst = args[0].deref(self.cx());
+                    args[1].val.volatile_store(self, dst);
+                    return;
+                }
+                sym::unaligned_volatile_store => {
+                    let dst = args[0].deref(self.cx());
+                    args[1].val.unaligned_volatile_store(self, dst);
+                    return;
+                }
+                sym::prefetch_read_data
+                    | sym::prefetch_write_data
+                    | sym::prefetch_read_instruction
+                    | sym::prefetch_write_instruction => {
+                        unimplemented!();
+                    }
+                sym::ctlz
+                    | sym::ctlz_nonzero
+                    | sym::cttz
+                    | sym::cttz_nonzero
+                    | sym::ctpop
+                    | sym::bswap
+                    | sym::bitreverse
+                    | sym::rotate_left
+                    | sym::rotate_right
+                    | sym::saturating_add
+                    | sym::saturating_sub => {
+                        let ty = arg_tys[0];
+                        match int_type_width_signed(ty, self) {
+                            Some((width, signed)) => match name {
+                                sym::ctlz | sym::cttz => {
+                                    let func = self.current_func.borrow().expect("func");
+                                    let then_block = func.new_block("then");
+                                    let else_block = func.new_block("else");
+                                    let after_block = func.new_block("after");
+
+                                    let arg = args[0].immediate();
+                                    let result = func.new_local(None, arg.get_type(), "zeros");
+                                    let zero = self.cx.context.new_rvalue_zero(arg.get_type());
+                                    let cond = self.cx.context.new_comparison(None, ComparisonOp::Equals, arg, zero);
+                                    self.llbb().end_with_conditional(None, cond, then_block, else_block);
+
+                                    let zero_result = self.cx.context.new_rvalue_from_long(arg.get_type(), width as i64);
+                                    then_block.add_assignment(None, result, zero_result);
+                                    then_block.end_with_jump(None, after_block);
+
+                                    // NOTE: since jumps were added in a place
+                                    // count_leading_zeroes() does not expect, the current blocks
+                                    // in the state need to be updated.
+                                    *self.current_block.borrow_mut() = Some(else_block);
+                                    self.block = Some(else_block);
+
+                                    let zeros =
+                                        match name {
+                                            sym::ctlz => self.count_leading_zeroes(width, arg),
+                                            sym::cttz => self.count_trailing_zeroes(width, arg),
+                                            _ => unreachable!(),
+                                        };
+                                    else_block.add_assignment(None, result, zeros);
+                                    else_block.end_with_jump(None, after_block);
+
+                                    // NOTE: since jumps were added in a place rustc does not
+                                    // expect, the current blocks in the state need to be updated.
+                                    *self.current_block.borrow_mut() = Some(after_block);
+                                    self.block = Some(after_block);
+
+                                    result.to_rvalue()
+                                }
+                                sym::ctlz_nonzero => {
+                                    self.count_leading_zeroes(width, args[0].immediate())
+                                },
+                                sym::cttz_nonzero => {
+                                    self.count_trailing_zeroes(width, args[0].immediate())
+                                }
+                                sym::ctpop => self.pop_count(args[0].immediate()),
+                                sym::bswap => {
+                                    if width == 8 {
+                                        args[0].immediate() // byte swap a u8/i8 is just a no-op
+                                    }
+                                    else {
+                                        // TODO(antoyo): check if it's faster to use string literals and a
+                                        // match instead of format!.
+                                        let bswap = self.cx.context.get_builtin_function(&format!("__builtin_bswap{}", width));
+                                        let mut arg = args[0].immediate();
+                                        // FIXME(antoyo): this cast should not be necessary. Remove
+                                        // when having proper sized integer types.
+                                        let param_type = bswap.get_param(0).to_rvalue().get_type();
+                                        if param_type != arg.get_type() {
+                                            arg = self.bitcast(arg, param_type);
+                                        }
+                                        self.cx.context.new_call(None, bswap, &[arg])
+                                    }
+                                },
+                                sym::bitreverse => self.bit_reverse(width, args[0].immediate()),
+                                sym::rotate_left | sym::rotate_right => {
+                                    // TODO(antoyo): implement using algorithm from:
+                                    // https://blog.regehr.org/archives/1063
+                                    // for other platforms.
+                                    let is_left = name == sym::rotate_left;
+                                    let val = args[0].immediate();
+                                    let raw_shift = args[1].immediate();
+                                    if is_left {
+                                        self.rotate_left(val, raw_shift, width)
+                                    }
+                                    else {
+                                        self.rotate_right(val, raw_shift, width)
+                                    }
+                                },
+                                sym::saturating_add => {
+                                    self.saturating_add(args[0].immediate(), args[1].immediate(), signed, width)
+                                },
+                                sym::saturating_sub => {
+                                    self.saturating_sub(args[0].immediate(), args[1].immediate(), signed, width)
+                                },
+                                _ => bug!(),
+                            },
+                            None => {
+                                span_invalid_monomorphization_error(
+                                    tcx.sess,
+                                    span,
+                                    &format!(
+                                        "invalid monomorphization of `{}` intrinsic: \
+                                      expected basic integer type, found `{}`",
+                                      name, ty
+                                    ),
+                                );
+                                return;
+                            }
+                        }
+                    }
+
+                sym::raw_eq => {
+                    use rustc_target::abi::Abi::*;
+                    let tp_ty = substs.type_at(0);
+                    let layout = self.layout_of(tp_ty).layout;
+                    let _use_integer_compare = match layout.abi {
+                        Scalar(_) | ScalarPair(_, _) => true,
+                        Uninhabited | Vector { .. } => false,
+                        Aggregate { .. } => {
+                            // For rusty ABIs, small aggregates are actually passed
+                            // as `RegKind::Integer` (see `FnAbi::adjust_for_abi`),
+                            // so we re-use that same threshold here.
+                            layout.size <= self.data_layout().pointer_size * 2
+                        }
+                    };
+
+                    let a = args[0].immediate();
+                    let b = args[1].immediate();
+                    if layout.size.bytes() == 0 {
+                        self.const_bool(true)
+                    }
+                    /*else if use_integer_compare {
+                        let integer_ty = self.type_ix(layout.size.bits()); // FIXME(antoyo): LLVM creates an integer of 96 bits for [i32; 3], but gcc doesn't support this, so it creates an integer of 128 bits.
+                        let ptr_ty = self.type_ptr_to(integer_ty);
+                        let a_ptr = self.bitcast(a, ptr_ty);
+                        let a_val = self.load(integer_ty, a_ptr, layout.align.abi);
+                        let b_ptr = self.bitcast(b, ptr_ty);
+                        let b_val = self.load(integer_ty, b_ptr, layout.align.abi);
+                        self.icmp(IntPredicate::IntEQ, a_val, b_val)
+                    }*/
+                    else {
+                        let void_ptr_type = self.context.new_type::<*const ()>();
+                        let a_ptr = self.bitcast(a, void_ptr_type);
+                        let b_ptr = self.bitcast(b, void_ptr_type);
+                        let n = self.context.new_cast(None, self.const_usize(layout.size.bytes()), self.sizet_type);
+                        let builtin = self.context.get_builtin_function("memcmp");
+                        let cmp = self.context.new_call(None, builtin, &[a_ptr, b_ptr, n]);
+                        self.icmp(IntPredicate::IntEQ, cmp, self.const_i32(0))
+                    }
+                }
+
+                sym::black_box => {
+                    args[0].val.store(self, result);
+
+                    let block = self.llbb();
+                    let extended_asm = block.add_extended_asm(None, "");
+                    extended_asm.add_input_operand(None, "r", result.llval);
+                    extended_asm.add_clobber("memory");
+                    extended_asm.set_volatile_flag(true);
+                    
+                    // We have copied the value to `result` already.
+                    return;
+                }
+
+                _ if name_str.starts_with("simd_") => {
+                    match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) {
+                        Ok(llval) => llval,
+                        Err(()) => return,
+                    }
+                }
+
+                _ => bug!("unknown intrinsic '{}'", name),
+            };
+
+        if !fn_abi.ret.is_ignore() {
+            if let PassMode::Cast(ty) = fn_abi.ret.mode {
+                let ptr_llty = self.type_ptr_to(ty.gcc_type(self));
+                let ptr = self.pointercast(result.llval, ptr_llty);
+                self.store(llval, ptr, result.align);
+            }
+            else {
+                OperandRef::from_immediate_or_packed_pair(self, llval, result.layout)
+                    .val
+                    .store(self, result);
+            }
+        }
+    }
+
+    fn abort(&mut self) {
+        let func = self.context.get_builtin_function("abort");
+        let func: RValue<'gcc> = unsafe { std::mem::transmute(func) };
+        self.call(self.type_void(), func, &[], None);
+    }
+
+    fn assume(&mut self, value: Self::Value) {
+        // TODO(antoyo): switch to asumme when it exists.
+        // Or use something like this:
+        // #define __assume(cond) do { if (!(cond)) __builtin_unreachable(); } while (0)
+        self.expect(value, true);
+    }
+
+    fn expect(&mut self, cond: Self::Value, _expected: bool) -> Self::Value {
+        // TODO(antoyo)
+        cond
+    }
+
+    fn sideeffect(&mut self) {
+        // TODO(antoyo)
+    }
+
+    fn va_start(&mut self, _va_list: RValue<'gcc>) -> RValue<'gcc> {
+        unimplemented!();
+    }
+
+    fn va_end(&mut self, _va_list: RValue<'gcc>) -> RValue<'gcc> {
+        unimplemented!();
+    }
+}
+
+impl<'a, 'gcc, 'tcx> ArgAbiMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
+    fn store_fn_arg(&mut self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>, idx: &mut usize, dst: PlaceRef<'tcx, Self::Value>) {
+        arg_abi.store_fn_arg(self, idx, dst)
+    }
+
+    fn store_arg(&mut self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>, val: RValue<'gcc>, dst: PlaceRef<'tcx, RValue<'gcc>>) {
+        arg_abi.store(self, val, dst)
+    }
+
+    fn arg_memory_ty(&self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>) -> Type<'gcc> {
+        arg_abi.memory_ty(self)
+    }
+}
+
+pub trait ArgAbiExt<'gcc, 'tcx> {
+    fn memory_ty(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>;
+    fn store(&self, bx: &mut Builder<'_, 'gcc, 'tcx>, val: RValue<'gcc>, dst: PlaceRef<'tcx, RValue<'gcc>>);
+    fn store_fn_arg(&self, bx: &mut Builder<'_, 'gcc, 'tcx>, idx: &mut usize, dst: PlaceRef<'tcx, RValue<'gcc>>);
+}
+
+impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
+    /// Gets the LLVM type for a place of the original Rust type of
+    /// this argument/return, i.e., the result of `type_of::type_of`.
+    fn memory_ty(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
+        self.layout.gcc_type(cx, true)
+    }
+
+    /// Stores a direct/indirect value described by this ArgAbi into a
+    /// place for the original Rust type of this argument/return.
+    /// Can be used for both storing formal arguments into Rust variables
+    /// or results of call/invoke instructions into their destinations.
+    fn store(&self, bx: &mut Builder<'_, 'gcc, 'tcx>, val: RValue<'gcc>, dst: PlaceRef<'tcx, RValue<'gcc>>) {
+        if self.is_ignore() {
+            return;
+        }
+        if self.is_sized_indirect() {
+            OperandValue::Ref(val, None, self.layout.align.abi).store(bx, dst)
+        }
+        else if self.is_unsized_indirect() {
+            bug!("unsized `ArgAbi` must be handled through `store_fn_arg`");
+        }
+        else if let PassMode::Cast(cast) = self.mode {
+            // FIXME(eddyb): Figure out when the simpler Store is safe, clang
+            // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
+            let can_store_through_cast_ptr = false;
+            if can_store_through_cast_ptr {
+                let cast_ptr_llty = bx.type_ptr_to(cast.gcc_type(bx));
+                let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty);
+                bx.store(val, cast_dst, self.layout.align.abi);
+            }
+            else {
+                // The actual return type is a struct, but the ABI
+                // adaptation code has cast it into some scalar type.  The
+                // code that follows is the only reliable way I have
+                // found to do a transform like i64 -> {i32,i32}.
+                // Basically we dump the data onto the stack then memcpy it.
+                //
+                // Other approaches I tried:
+                // - Casting rust ret pointer to the foreign type and using Store
+                //   is (a) unsafe if size of foreign type > size of rust type and
+                //   (b) runs afoul of strict aliasing rules, yielding invalid
+                //   assembly under -O (specifically, the store gets removed).
+                // - Truncating foreign type to correct integral type and then
+                //   bitcasting to the struct type yields invalid cast errors.
+
+                // We instead thus allocate some scratch space...
+                let scratch_size = cast.size(bx);
+                let scratch_align = cast.align(bx);
+                let llscratch = bx.alloca(cast.gcc_type(bx), scratch_align);
+                bx.lifetime_start(llscratch, scratch_size);
+
+                // ... where we first store the value...
+                bx.store(val, llscratch, scratch_align);
+
+                // ... and then memcpy it to the intended destination.
+                bx.memcpy(
+                    dst.llval,
+                    self.layout.align.abi,
+                    llscratch,
+                    scratch_align,
+                    bx.const_usize(self.layout.size.bytes()),
+                    MemFlags::empty(),
+                );
+
+                bx.lifetime_end(llscratch, scratch_size);
+            }
+        }
+        else {
+            OperandValue::Immediate(val).store(bx, dst);
+        }
+    }
+
+    fn store_fn_arg<'a>(&self, bx: &mut Builder<'a, 'gcc, 'tcx>, idx: &mut usize, dst: PlaceRef<'tcx, RValue<'gcc>>) {
+        let mut next = || {
+            let val = bx.current_func().get_param(*idx as i32);
+            *idx += 1;
+            val.to_rvalue()
+        };
+        match self.mode {
+            PassMode::Ignore => {}
+            PassMode::Pair(..) => {
+                OperandValue::Pair(next(), next()).store(bx, dst);
+            }
+            PassMode::Indirect { extra_attrs: Some(_), .. } => {
+                OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst);
+            }
+            PassMode::Direct(_) | PassMode::Indirect { extra_attrs: None, .. } | PassMode::Cast(_) => {
+                let next_arg = next();
+                self.store(bx, next_arg.to_rvalue(), dst);
+            }
+        }
+    }
+}
+
+fn int_type_width_signed<'gcc, 'tcx>(ty: Ty<'tcx>, cx: &CodegenCx<'gcc, 'tcx>) -> Option<(u64, bool)> {
+    match ty.kind() {
+        ty::Int(t) => Some((
+            match t {
+                rustc_middle::ty::IntTy::Isize => u64::from(cx.tcx.sess.target.pointer_width),
+                rustc_middle::ty::IntTy::I8 => 8,
+                rustc_middle::ty::IntTy::I16 => 16,
+                rustc_middle::ty::IntTy::I32 => 32,
+                rustc_middle::ty::IntTy::I64 => 64,
+                rustc_middle::ty::IntTy::I128 => 128,
+            },
+            true,
+        )),
+        ty::Uint(t) => Some((
+            match t {
+                rustc_middle::ty::UintTy::Usize => u64::from(cx.tcx.sess.target.pointer_width),
+                rustc_middle::ty::UintTy::U8 => 8,
+                rustc_middle::ty::UintTy::U16 => 16,
+                rustc_middle::ty::UintTy::U32 => 32,
+                rustc_middle::ty::UintTy::U64 => 64,
+                rustc_middle::ty::UintTy::U128 => 128,
+            },
+            false,
+        )),
+        _ => None,
+    }
+}
+
+impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
+    fn bit_reverse(&mut self, width: u64, value: RValue<'gcc>) -> RValue<'gcc> {
+        let result_type = value.get_type();
+        let typ = result_type.to_unsigned(self.cx);
+
+        let value =
+            if result_type.is_signed(self.cx) {
+                self.context.new_bitcast(None, value, typ)
+            }
+            else {
+                value
+            };
+
+        let context = &self.cx.context;
+        let result =
+            match width {
+                8 => {
+                    // First step.
+                    let left = self.and(value, context.new_rvalue_from_int(typ, 0xF0));
+                    let left = self.lshr(left, context.new_rvalue_from_int(typ, 4));
+                    let right = self.and(value, context.new_rvalue_from_int(typ, 0x0F));
+                    let right = self.shl(right, context.new_rvalue_from_int(typ, 4));
+                    let step1 = self.or(left, right);
+
+                    // Second step.
+                    let left = self.and(step1, context.new_rvalue_from_int(typ, 0xCC));
+                    let left = self.lshr(left, context.new_rvalue_from_int(typ, 2));
+                    let right = self.and(step1, context.new_rvalue_from_int(typ, 0x33));
+                    let right = self.shl(right, context.new_rvalue_from_int(typ, 2));
+                    let step2 = self.or(left, right);
+
+                    // Third step.
+                    let left = self.and(step2, context.new_rvalue_from_int(typ, 0xAA));
+                    let left = self.lshr(left, context.new_rvalue_from_int(typ, 1));
+                    let right = self.and(step2, context.new_rvalue_from_int(typ, 0x55));
+                    let right = self.shl(right, context.new_rvalue_from_int(typ, 1));
+                    let step3 = self.or(left, right);
+
+                    step3
+                },
+                16 => {
+                    // First step.
+                    let left = self.and(value, context.new_rvalue_from_int(typ, 0x5555));
+                    let left = self.shl(left, context.new_rvalue_from_int(typ, 1));
+                    let right = self.and(value, context.new_rvalue_from_int(typ, 0xAAAA));
+                    let right = self.lshr(right, context.new_rvalue_from_int(typ, 1));
+                    let step1 = self.or(left, right);
+
+                    // Second step.
+                    let left = self.and(step1, context.new_rvalue_from_int(typ, 0x3333));
+                    let left = self.shl(left, context.new_rvalue_from_int(typ, 2));
+                    let right = self.and(step1, context.new_rvalue_from_int(typ, 0xCCCC));
+                    let right = self.lshr(right, context.new_rvalue_from_int(typ, 2));
+                    let step2 = self.or(left, right);
+
+                    // Third step.
+                    let left = self.and(step2, context.new_rvalue_from_int(typ, 0x0F0F));
+                    let left = self.shl(left, context.new_rvalue_from_int(typ, 4));
+                    let right = self.and(step2, context.new_rvalue_from_int(typ, 0xF0F0));
+                    let right = self.lshr(right, context.new_rvalue_from_int(typ, 4));
+                    let step3 = self.or(left, right);
+
+                    // Fourth step.
+                    let left = self.and(step3, context.new_rvalue_from_int(typ, 0x00FF));
+                    let left = self.shl(left, context.new_rvalue_from_int(typ, 8));
+                    let right = self.and(step3, context.new_rvalue_from_int(typ, 0xFF00));
+                    let right = self.lshr(right, context.new_rvalue_from_int(typ, 8));
+                    let step4 = self.or(left, right);
+
+                    step4
+                },
+                32 => {
+                    // TODO(antoyo): Refactor with other implementations.
+                    // First step.
+                    let left = self.and(value, context.new_rvalue_from_long(typ, 0x55555555));
+                    let left = self.shl(left, context.new_rvalue_from_long(typ, 1));
+                    let right = self.and(value, context.new_rvalue_from_long(typ, 0xAAAAAAAA));
+                    let right = self.lshr(right, context.new_rvalue_from_long(typ, 1));
+                    let step1 = self.or(left, right);
+
+                    // Second step.
+                    let left = self.and(step1, context.new_rvalue_from_long(typ, 0x33333333));
+                    let left = self.shl(left, context.new_rvalue_from_long(typ, 2));
+                    let right = self.and(step1, context.new_rvalue_from_long(typ, 0xCCCCCCCC));
+                    let right = self.lshr(right, context.new_rvalue_from_long(typ, 2));
+                    let step2 = self.or(left, right);
+
+                    // Third step.
+                    let left = self.and(step2, context.new_rvalue_from_long(typ, 0x0F0F0F0F));
+                    let left = self.shl(left, context.new_rvalue_from_long(typ, 4));
+                    let right = self.and(step2, context.new_rvalue_from_long(typ, 0xF0F0F0F0));
+                    let right = self.lshr(right, context.new_rvalue_from_long(typ, 4));
+                    let step3 = self.or(left, right);
+
+                    // Fourth step.
+                    let left = self.and(step3, context.new_rvalue_from_long(typ, 0x00FF00FF));
+                    let left = self.shl(left, context.new_rvalue_from_long(typ, 8));
+                    let right = self.and(step3, context.new_rvalue_from_long(typ, 0xFF00FF00));
+                    let right = self.lshr(right, context.new_rvalue_from_long(typ, 8));
+                    let step4 = self.or(left, right);
+
+                    // Fifth step.
+                    let left = self.and(step4, context.new_rvalue_from_long(typ, 0x0000FFFF));
+                    let left = self.shl(left, context.new_rvalue_from_long(typ, 16));
+                    let right = self.and(step4, context.new_rvalue_from_long(typ, 0xFFFF0000));
+                    let right = self.lshr(right, context.new_rvalue_from_long(typ, 16));
+                    let step5 = self.or(left, right);
+
+                    step5
+                },
+                64 => {
+                    // First step.
+                    let left = self.shl(value, context.new_rvalue_from_long(typ, 32));
+                    let right = self.lshr(value, context.new_rvalue_from_long(typ, 32));
+                    let step1 = self.or(left, right);
+
+                    // Second step.
+                    let left = self.and(step1, context.new_rvalue_from_long(typ, 0x0001FFFF0001FFFF));
+                    let left = self.shl(left, context.new_rvalue_from_long(typ, 15));
+                    let right = self.and(step1, context.new_rvalue_from_long(typ, 0xFFFE0000FFFE0000u64 as i64)); // TODO(antoyo): transmute the number instead?
+                    let right = self.lshr(right, context.new_rvalue_from_long(typ, 17));
+                    let step2 = self.or(left, right);
+
+                    // Third step.
+                    let left = self.lshr(step2, context.new_rvalue_from_long(typ, 10));
+                    let left = self.xor(step2, left);
+                    let temp = self.and(left, context.new_rvalue_from_long(typ, 0x003F801F003F801F));
+
+                    let left = self.shl(temp, context.new_rvalue_from_long(typ, 10));
+                    let left = self.or(temp, left);
+                    let step3 = self.xor(left, step2);
+
+                    // Fourth step.
+                    let left = self.lshr(step3, context.new_rvalue_from_long(typ, 4));
+                    let left = self.xor(step3, left);
+                    let temp = self.and(left, context.new_rvalue_from_long(typ, 0x0E0384210E038421));
+
+                    let left = self.shl(temp, context.new_rvalue_from_long(typ, 4));
+                    let left = self.or(temp, left);
+                    let step4 = self.xor(left, step3);
+
+                    // Fifth step.
+                    let left = self.lshr(step4, context.new_rvalue_from_long(typ, 2));
+                    let left = self.xor(step4, left);
+                    let temp = self.and(left, context.new_rvalue_from_long(typ, 0x2248884222488842));
+
+                    let left = self.shl(temp, context.new_rvalue_from_long(typ, 2));
+                    let left = self.or(temp, left);
+                    let step5 = self.xor(left, step4);
+
+                    step5
+                },
+                128 => {
+                    // TODO(antoyo): find a more efficient implementation?
+                    let sixty_four = self.context.new_rvalue_from_long(typ, 64);
+                    let high = self.context.new_cast(None, value >> sixty_four, self.u64_type);
+                    let low = self.context.new_cast(None, value, self.u64_type);
+
+                    let reversed_high = self.bit_reverse(64, high);
+                    let reversed_low = self.bit_reverse(64, low);
+
+                    let new_low = self.context.new_cast(None, reversed_high, typ);
+                    let new_high = self.context.new_cast(None, reversed_low, typ) << sixty_four;
+
+                    new_low | new_high
+                },
+                _ => {
+                    panic!("cannot bit reverse with width = {}", width);
+                },
+            };
+
+        self.context.new_bitcast(None, result, result_type)
+    }
+
+    fn count_leading_zeroes(&self, width: u64, arg: RValue<'gcc>) -> RValue<'gcc> {
+        // TODO(antoyo): use width?
+        let arg_type = arg.get_type();
+        let count_leading_zeroes =
+            if arg_type.is_uint(&self.cx) {
+                "__builtin_clz"
+            }
+            else if arg_type.is_ulong(&self.cx) {
+                "__builtin_clzl"
+            }
+            else if arg_type.is_ulonglong(&self.cx) {
+                "__builtin_clzll"
+            }
+            else if width == 128 {
+                // Algorithm from: https://stackoverflow.com/a/28433850/389119
+                let array_type = self.context.new_array_type(None, arg_type, 3);
+                let result = self.current_func()
+                    .new_local(None, array_type, "count_loading_zeroes_results");
+
+                let sixty_four = self.context.new_rvalue_from_long(arg_type, 64);
+                let high = self.context.new_cast(None, arg >> sixty_four, self.u64_type);
+                let low = self.context.new_cast(None, arg, self.u64_type);
+
+                let zero = self.context.new_rvalue_zero(self.usize_type);
+                let one = self.context.new_rvalue_one(self.usize_type);
+                let two = self.context.new_rvalue_from_long(self.usize_type, 2);
+
+                let clzll = self.context.get_builtin_function("__builtin_clzll");
+
+                let first_elem = self.context.new_array_access(None, result, zero);
+                let first_value = self.context.new_cast(None, self.context.new_call(None, clzll, &[high]), arg_type);
+                self.llbb()
+                    .add_assignment(None, first_elem, first_value);
+
+                let second_elem = self.context.new_array_access(None, result, one);
+                let second_value = self.context.new_cast(None, self.context.new_call(None, clzll, &[low]), arg_type) + sixty_four;
+                self.llbb()
+                    .add_assignment(None, second_elem, second_value);
+
+                let third_elem = self.context.new_array_access(None, result, two);
+                let third_value = self.context.new_rvalue_from_long(arg_type, 128);
+                self.llbb()
+                    .add_assignment(None, third_elem, third_value);
+
+                let not_high = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, high);
+                let not_low = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, low);
+                let not_low_and_not_high = not_low & not_high;
+                let index = not_high + not_low_and_not_high;
+
+                let res = self.context.new_array_access(None, result, index);
+
+                return self.context.new_cast(None, res, arg_type);
+            }
+            else {
+                let count_leading_zeroes = self.context.get_builtin_function("__builtin_clz");
+                let arg = self.context.new_cast(None, arg, self.uint_type);
+                let diff = self.int_width(self.uint_type) - self.int_width(arg_type);
+                let diff = self.context.new_rvalue_from_long(self.int_type, diff);
+                let res = self.context.new_call(None, count_leading_zeroes, &[arg]) - diff;
+                return self.context.new_cast(None, res, arg_type);
+            };
+        let count_leading_zeroes = self.context.get_builtin_function(count_leading_zeroes);
+        let res = self.context.new_call(None, count_leading_zeroes, &[arg]);
+        self.context.new_cast(None, res, arg_type)
+    }
+
+    fn count_trailing_zeroes(&self, _width: u64, arg: RValue<'gcc>) -> RValue<'gcc> {
+        let result_type = arg.get_type();
+        let arg =
+            if result_type.is_signed(self.cx) {
+                let new_type = result_type.to_unsigned(self.cx);
+                self.context.new_bitcast(None, arg, new_type)
+            }
+            else {
+                arg
+            };
+        let arg_type = arg.get_type();
+        let (count_trailing_zeroes, expected_type) =
+            if arg_type.is_uchar(&self.cx) || arg_type.is_ushort(&self.cx) || arg_type.is_uint(&self.cx) {
+                // NOTE: we don't need to & 0xFF for uchar because the result is undefined on zero.
+                ("__builtin_ctz", self.cx.uint_type)
+            }
+            else if arg_type.is_ulong(&self.cx) {
+                ("__builtin_ctzl", self.cx.ulong_type)
+            }
+            else if arg_type.is_ulonglong(&self.cx) {
+                ("__builtin_ctzll", self.cx.ulonglong_type)
+            }
+            else if arg_type.is_u128(&self.cx) {
+                // Adapted from the algorithm to count leading zeroes from: https://stackoverflow.com/a/28433850/389119
+                let array_type = self.context.new_array_type(None, arg_type, 3);
+                let result = self.current_func()
+                    .new_local(None, array_type, "count_loading_zeroes_results");
+
+                let sixty_four = self.context.new_rvalue_from_long(arg_type, 64);
+                let high = self.context.new_cast(None, arg >> sixty_four, self.u64_type);
+                let low = self.context.new_cast(None, arg, self.u64_type);
+
+                let zero = self.context.new_rvalue_zero(self.usize_type);
+                let one = self.context.new_rvalue_one(self.usize_type);
+                let two = self.context.new_rvalue_from_long(self.usize_type, 2);
+
+                let ctzll = self.context.get_builtin_function("__builtin_ctzll");
+
+                let first_elem = self.context.new_array_access(None, result, zero);
+                let first_value = self.context.new_cast(None, self.context.new_call(None, ctzll, &[low]), arg_type);
+                self.llbb()
+                    .add_assignment(None, first_elem, first_value);
+
+                let second_elem = self.context.new_array_access(None, result, one);
+                let second_value = self.context.new_cast(None, self.context.new_call(None, ctzll, &[high]), arg_type) + sixty_four;
+                self.llbb()
+                    .add_assignment(None, second_elem, second_value);
+
+                let third_elem = self.context.new_array_access(None, result, two);
+                let third_value = self.context.new_rvalue_from_long(arg_type, 128);
+                self.llbb()
+                    .add_assignment(None, third_elem, third_value);
+
+                let not_low = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, low);
+                let not_high = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, high);
+                let not_low_and_not_high = not_low & not_high;
+                let index = not_low + not_low_and_not_high;
+
+                let res = self.context.new_array_access(None, result, index);
+
+                return self.context.new_bitcast(None, res, result_type);
+            }
+            else {
+                unimplemented!("count_trailing_zeroes for {:?}", arg_type);
+            };
+        let count_trailing_zeroes = self.context.get_builtin_function(count_trailing_zeroes);
+        let arg =
+            if arg_type != expected_type {
+                self.context.new_cast(None, arg, expected_type)
+            }
+            else {
+                arg
+            };
+        let res = self.context.new_call(None, count_trailing_zeroes, &[arg]);
+        self.context.new_bitcast(None, res, result_type)
+    }
+
+    fn int_width(&self, typ: Type<'gcc>) -> i64 {
+        self.cx.int_width(typ) as i64
+    }
+
+    fn pop_count(&self, value: RValue<'gcc>) -> RValue<'gcc> {
+        // TODO(antoyo): use the optimized version with fewer operations.
+        let result_type = value.get_type();
+        let value_type = result_type.to_unsigned(self.cx);
+
+        let value =
+            if result_type.is_signed(self.cx) {
+                self.context.new_bitcast(None, value, value_type)
+            }
+            else {
+                value
+            };
+
+        if value_type.is_u128(&self.cx) {
+            // TODO(antoyo): implement in the normal algorithm below to have a more efficient
+            // implementation (that does not require a call to __popcountdi2).
+            let popcount = self.context.get_builtin_function("__builtin_popcountll");
+            let sixty_four = self.context.new_rvalue_from_long(value_type, 64);
+            let high = self.context.new_cast(None, value >> sixty_four, self.cx.ulonglong_type);
+            let high = self.context.new_call(None, popcount, &[high]);
+            let low = self.context.new_cast(None, value, self.cx.ulonglong_type);
+            let low = self.context.new_call(None, popcount, &[low]);
+            let res = high + low;
+            return self.context.new_bitcast(None, res, result_type);
+        }
+
+        // First step.
+        let mask = self.context.new_rvalue_from_long(value_type, 0x5555555555555555);
+        let left = value & mask;
+        let shifted = value >> self.context.new_rvalue_from_int(value_type, 1);
+        let right = shifted & mask;
+        let value = left + right;
+
+        // Second step.
+        let mask = self.context.new_rvalue_from_long(value_type, 0x3333333333333333);
+        let left = value & mask;
+        let shifted = value >> self.context.new_rvalue_from_int(value_type, 2);
+        let right = shifted & mask;
+        let value = left + right;
+
+        // Third step.
+        let mask = self.context.new_rvalue_from_long(value_type, 0x0F0F0F0F0F0F0F0F);
+        let left = value & mask;
+        let shifted = value >> self.context.new_rvalue_from_int(value_type, 4);
+        let right = shifted & mask;
+        let value = left + right;
+
+        if value_type.is_u8(&self.cx) {
+            return self.context.new_bitcast(None, value, result_type);
+        }
+
+        // Fourth step.
+        let mask = self.context.new_rvalue_from_long(value_type, 0x00FF00FF00FF00FF);
+        let left = value & mask;
+        let shifted = value >> self.context.new_rvalue_from_int(value_type, 8);
+        let right = shifted & mask;
+        let value = left + right;
+
+        if value_type.is_u16(&self.cx) {
+            return self.context.new_bitcast(None, value, result_type);
+        }
+
+        // Fifth step.
+        let mask = self.context.new_rvalue_from_long(value_type, 0x0000FFFF0000FFFF);
+        let left = value & mask;
+        let shifted = value >> self.context.new_rvalue_from_int(value_type, 16);
+        let right = shifted & mask;
+        let value = left + right;
+
+        if value_type.is_u32(&self.cx) {
+            return self.context.new_bitcast(None, value, result_type);
+        }
+
+        // Sixth step.
+        let mask = self.context.new_rvalue_from_long(value_type, 0x00000000FFFFFFFF);
+        let left = value & mask;
+        let shifted = value >> self.context.new_rvalue_from_int(value_type, 32);
+        let right = shifted & mask;
+        let value = left + right;
+
+        self.context.new_bitcast(None, value, result_type)
+    }
+
+    // Algorithm from: https://blog.regehr.org/archives/1063
+    fn rotate_left(&mut self, value: RValue<'gcc>, shift: RValue<'gcc>, width: u64) -> RValue<'gcc> {
+        let max = self.context.new_rvalue_from_long(shift.get_type(), width as i64);
+        let shift = shift % max;
+        let lhs = self.shl(value, shift);
+        let result_and =
+            self.and(
+                self.context.new_unary_op(None, UnaryOp::Minus, shift.get_type(), shift),
+                self.context.new_rvalue_from_long(shift.get_type(), width as i64 - 1),
+            );
+        let rhs = self.lshr(value, result_and);
+        self.or(lhs, rhs)
+    }
+
+    // Algorithm from: https://blog.regehr.org/archives/1063
+    fn rotate_right(&mut self, value: RValue<'gcc>, shift: RValue<'gcc>, width: u64) -> RValue<'gcc> {
+        let max = self.context.new_rvalue_from_long(shift.get_type(), width as i64);
+        let shift = shift % max;
+        let lhs = self.lshr(value, shift);
+        let result_and =
+            self.and(
+                self.context.new_unary_op(None, UnaryOp::Minus, shift.get_type(), shift),
+                self.context.new_rvalue_from_long(shift.get_type(), width as i64 - 1),
+            );
+        let rhs = self.shl(value, result_and);
+        self.or(lhs, rhs)
+    }
+
+    fn saturating_add(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>, signed: bool, width: u64) -> RValue<'gcc> {
+        let func = self.current_func.borrow().expect("func");
+
+        if signed {
+            // Algorithm from: https://stackoverflow.com/a/56531252/389119
+            let after_block = func.new_block("after");
+            let func_name =
+                match width {
+                    8 => "__builtin_add_overflow",
+                    16 => "__builtin_add_overflow",
+                    32 => "__builtin_sadd_overflow",
+                    64 => "__builtin_saddll_overflow",
+                    128 => "__builtin_add_overflow",
+                    _ => unreachable!(),
+                };
+            let overflow_func = self.context.get_builtin_function(func_name);
+            let result_type = lhs.get_type();
+            let res = func.new_local(None, result_type, "saturating_sum");
+            let overflow = self.overflow_call(overflow_func, &[lhs, rhs, res.get_address(None)], None);
+
+            let then_block = func.new_block("then");
+
+            let unsigned_type = self.context.new_int_type(width as i32 / 8, false);
+            let shifted = self.context.new_cast(None, lhs, unsigned_type) >> self.context.new_rvalue_from_int(unsigned_type, width as i32 - 1);
+            let uint_max = self.context.new_unary_op(None, UnaryOp::BitwiseNegate, unsigned_type,
+                self.context.new_rvalue_from_int(unsigned_type, 0)
+            );
+            let int_max = uint_max >> self.context.new_rvalue_one(unsigned_type);
+            then_block.add_assignment(None, res, self.context.new_cast(None, shifted + int_max, result_type));
+            then_block.end_with_jump(None, after_block);
+
+            self.llbb().end_with_conditional(None, overflow, then_block, after_block);
+
+            // NOTE: since jumps were added in a place rustc does not
+            // expect, the current blocks in the state need to be updated.
+            *self.current_block.borrow_mut() = Some(after_block);
+            self.block = Some(after_block);
+
+            res.to_rvalue()
+        }
+        else {
+            // Algorithm from: http://locklessinc.com/articles/sat_arithmetic/
+            let res = lhs + rhs;
+            let res_type = res.get_type();
+            let cond = self.context.new_comparison(None, ComparisonOp::LessThan, res, lhs);
+            let value = self.context.new_unary_op(None, UnaryOp::Minus, res_type, self.context.new_cast(None, cond, res_type));
+            res | value
+        }
+    }
+
+    // Algorithm from: https://locklessinc.com/articles/sat_arithmetic/
+    fn saturating_sub(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>, signed: bool, width: u64) -> RValue<'gcc> {
+        if signed {
+            // Also based on algorithm from: https://stackoverflow.com/a/56531252/389119
+            let func_name =
+                match width {
+                    8 => "__builtin_sub_overflow",
+                    16 => "__builtin_sub_overflow",
+                    32 => "__builtin_ssub_overflow",
+                    64 => "__builtin_ssubll_overflow",
+                    128 => "__builtin_sub_overflow",
+                    _ => unreachable!(),
+                };
+            let overflow_func = self.context.get_builtin_function(func_name);
+            let result_type = lhs.get_type();
+            let func = self.current_func.borrow().expect("func");
+            let res = func.new_local(None, result_type, "saturating_diff");
+            let overflow = self.overflow_call(overflow_func, &[lhs, rhs, res.get_address(None)], None);
+
+            let then_block = func.new_block("then");
+            let after_block = func.new_block("after");
+
+            let unsigned_type = self.context.new_int_type(width as i32 / 8, false);
+            let shifted = self.context.new_cast(None, lhs, unsigned_type) >> self.context.new_rvalue_from_int(unsigned_type, width as i32 - 1);
+            let uint_max = self.context.new_unary_op(None, UnaryOp::BitwiseNegate, unsigned_type,
+                self.context.new_rvalue_from_int(unsigned_type, 0)
+            );
+            let int_max = uint_max >> self.context.new_rvalue_one(unsigned_type);
+            then_block.add_assignment(None, res, self.context.new_cast(None, shifted + int_max, result_type));
+            then_block.end_with_jump(None, after_block);
+
+            self.llbb().end_with_conditional(None, overflow, then_block, after_block);
+
+            // NOTE: since jumps were added in a place rustc does not
+            // expect, the current blocks in the state need to be updated.
+            *self.current_block.borrow_mut() = Some(after_block);
+            self.block = Some(after_block);
+
+            res.to_rvalue()
+        }
+        else {
+            let res = lhs - rhs;
+            let comparison = self.context.new_comparison(None, ComparisonOp::LessThanEquals, res, lhs);
+            let comparison = self.context.new_cast(None, comparison, lhs.get_type());
+            let unary_op = self.context.new_unary_op(None, UnaryOp::Minus, comparison.get_type(), comparison);
+            self.and(res, unary_op)
+        }
+    }
+}
+
+fn try_intrinsic<'gcc, 'tcx>(bx: &mut Builder<'_, 'gcc, 'tcx>, try_func: RValue<'gcc>, data: RValue<'gcc>, _catch_func: RValue<'gcc>, dest: RValue<'gcc>) {
+    if bx.sess().panic_strategy() == PanicStrategy::Abort {
+        bx.call(bx.type_void(), try_func, &[data], None);
+        // Return 0 unconditionally from the intrinsic call;
+        // we can never unwind.
+        let ret_align = bx.tcx.data_layout.i32_align.abi;
+        bx.store(bx.const_i32(0), dest, ret_align);
+    }
+    else if wants_msvc_seh(bx.sess()) {
+        unimplemented!();
+    }
+    else {
+        unimplemented!();
+    }
+}
diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs b/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs
new file mode 100644 (file)
index 0000000..26a4221
--- /dev/null
@@ -0,0 +1,167 @@
+use gccjit::{RValue, Type};
+use rustc_codegen_ssa::base::compare_simd_types;
+use rustc_codegen_ssa::common::{TypeKind, span_invalid_monomorphization_error};
+use rustc_codegen_ssa::mir::operand::OperandRef;
+use rustc_codegen_ssa::traits::{BaseTypeMethods, BuilderMethods};
+use rustc_hir as hir;
+use rustc_middle::span_bug;
+use rustc_middle::ty::layout::HasTyCtxt;
+use rustc_middle::ty::{self, Ty};
+use rustc_span::{Span, Symbol, sym};
+
+use crate::builder::Builder;
+
+pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, name: Symbol, callee_ty: Ty<'tcx>, args: &[OperandRef<'tcx, RValue<'gcc>>], ret_ty: Ty<'tcx>, llret_ty: Type<'gcc>, span: Span) -> Result<RValue<'gcc>, ()> {
+    // macros for error handling:
+    macro_rules! emit_error {
+        ($msg: tt) => {
+            emit_error!($msg, )
+        };
+        ($msg: tt, $($fmt: tt)*) => {
+            span_invalid_monomorphization_error(
+                bx.sess(), span,
+                &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
+                         name, $($fmt)*));
+        }
+    }
+
+    macro_rules! return_error {
+        ($($fmt: tt)*) => {
+            {
+                emit_error!($($fmt)*);
+                return Err(());
+            }
+        }
+    }
+
+    macro_rules! require {
+        ($cond: expr, $($fmt: tt)*) => {
+            if !$cond {
+                return_error!($($fmt)*);
+            }
+        };
+    }
+
+    macro_rules! require_simd {
+        ($ty: expr, $position: expr) => {
+            require!($ty.is_simd(), "expected SIMD {} type, found non-SIMD `{}`", $position, $ty)
+        };
+    }
+
+    let tcx = bx.tcx();
+    let sig =
+        tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), callee_ty.fn_sig(tcx));
+    let arg_tys = sig.inputs();
+    let name_str = &*name.as_str();
+
+    // every intrinsic below takes a SIMD vector as its first argument
+    require_simd!(arg_tys[0], "input");
+    let in_ty = arg_tys[0];
+
+    let comparison = match name {
+        sym::simd_eq => Some(hir::BinOpKind::Eq),
+        sym::simd_ne => Some(hir::BinOpKind::Ne),
+        sym::simd_lt => Some(hir::BinOpKind::Lt),
+        sym::simd_le => Some(hir::BinOpKind::Le),
+        sym::simd_gt => Some(hir::BinOpKind::Gt),
+        sym::simd_ge => Some(hir::BinOpKind::Ge),
+        _ => None,
+    };
+
+    let (in_len, in_elem) = arg_tys[0].simd_size_and_type(bx.tcx());
+    if let Some(cmp_op) = comparison {
+        require_simd!(ret_ty, "return");
+
+        let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx());
+        require!(
+            in_len == out_len,
+            "expected return type with length {} (same as input type `{}`), \
+             found `{}` with length {}",
+            in_len,
+            in_ty,
+            ret_ty,
+            out_len
+        );
+        require!(
+            bx.type_kind(bx.element_type(llret_ty)) == TypeKind::Integer,
+            "expected return type with integer elements, found `{}` with non-integer `{}`",
+            ret_ty,
+            out_ty
+        );
+
+        return Ok(compare_simd_types(
+            bx,
+            args[0].immediate(),
+            args[1].immediate(),
+            in_elem,
+            llret_ty,
+            cmp_op,
+        ));
+    }
+
+    if let Some(stripped) = name_str.strip_prefix("simd_shuffle") {
+        let n: u64 = stripped.parse().unwrap_or_else(|_| {
+            span_bug!(span, "bad `simd_shuffle` instruction only caught in codegen?")
+        });
+
+        require_simd!(ret_ty, "return");
+
+        let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx());
+        require!(
+            out_len == n,
+            "expected return type of length {}, found `{}` with length {}",
+            n,
+            ret_ty,
+            out_len
+        );
+        require!(
+            in_elem == out_ty,
+            "expected return element type `{}` (element of input `{}`), \
+             found `{}` with element type `{}`",
+            in_elem,
+            in_ty,
+            ret_ty,
+            out_ty
+        );
+
+        let vector = args[2].immediate();
+
+        return Ok(bx.shuffle_vector(
+            args[0].immediate(),
+            args[1].immediate(),
+            vector,
+        ));
+    }
+
+    macro_rules! arith_binary {
+        ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
+            $(if name == sym::$name {
+                match in_elem.kind() {
+                    $($(ty::$p(_))|* => {
+                        return Ok(bx.$call(args[0].immediate(), args[1].immediate()))
+                    })*
+                    _ => {},
+                }
+                require!(false,
+                         "unsupported operation on `{}` with element `{}`",
+                         in_ty,
+                         in_elem)
+            })*
+        }
+    }
+
+    arith_binary! {
+        simd_add: Uint, Int => add, Float => fadd;
+        simd_sub: Uint, Int => sub, Float => fsub;
+        simd_mul: Uint, Int => mul, Float => fmul;
+        simd_div: Uint => udiv, Int => sdiv, Float => fdiv;
+        simd_rem: Uint => urem, Int => srem, Float => frem;
+        simd_shl: Uint, Int => shl;
+        simd_shr: Uint => lshr, Int => ashr;
+        simd_and: Uint, Int => and;
+        simd_or: Uint, Int => or; // FIXME(antoyo): calling `or` might not work on vectors.
+        simd_xor: Uint, Int => xor;
+    }
+
+    unimplemented!("simd {}", name);
+}
diff --git a/compiler/rustc_codegen_gcc/src/lib.rs b/compiler/rustc_codegen_gcc/src/lib.rs
new file mode 100644 (file)
index 0000000..f3c02e2
--- /dev/null
@@ -0,0 +1,293 @@
+/*
+ * TODO(antoyo): support #[inline] attributes.
+ * TODO(antoyo): support LTO.
+ *
+ * TODO(antoyo): remove the patches.
+ */
+
+#![feature(rustc_private, decl_macro, associated_type_bounds, never_type, trusted_len)]
+#![allow(broken_intra_doc_links)]
+#![recursion_limit="256"]
+#![warn(rust_2018_idioms)]
+#![warn(unused_lifetimes)]
+
+extern crate rustc_ast;
+extern crate rustc_codegen_ssa;
+extern crate rustc_data_structures;
+extern crate rustc_errors;
+extern crate rustc_hir;
+extern crate rustc_metadata;
+extern crate rustc_middle;
+extern crate rustc_session;
+extern crate rustc_span;
+extern crate rustc_symbol_mangling;
+extern crate rustc_target;
+extern crate snap;
+
+// This prevents duplicating functions and statics that are already part of the host rustc process.
+#[allow(unused_extern_crates)]
+extern crate rustc_driver;
+
+mod abi;
+mod allocator;
+mod archive;
+mod asm;
+mod back;
+mod base;
+mod builder;
+mod callee;
+mod common;
+mod consts;
+mod context;
+mod coverageinfo;
+mod debuginfo;
+mod declare;
+mod intrinsic;
+mod mono_item;
+mod type_;
+mod type_of;
+
+use std::any::Any;
+use std::sync::Arc;
+
+use gccjit::{Context, OptimizationLevel};
+use rustc_ast::expand::allocator::AllocatorKind;
+use rustc_codegen_ssa::{CodegenResults, CompiledModule, ModuleCodegen};
+use rustc_codegen_ssa::base::codegen_crate;
+use rustc_codegen_ssa::back::write::{CodegenContext, FatLTOInput, ModuleConfig, TargetMachineFactoryFn};
+use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule};
+use rustc_codegen_ssa::target_features::supported_target_features;
+use rustc_codegen_ssa::traits::{CodegenBackend, ExtraBackendMethods, ModuleBufferMethods, ThinBufferMethods, WriteBackendMethods};
+use rustc_data_structures::fx::FxHashMap;
+use rustc_errors::{ErrorReported, Handler};
+use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
+use rustc_middle::middle::cstore::EncodedMetadata;
+use rustc_middle::ty::TyCtxt;
+use rustc_session::config::{Lto, OptLevel, OutputFilenames};
+use rustc_session::Session;
+use rustc_span::Symbol;
+use rustc_span::fatal_error::FatalError;
+
+pub struct PrintOnPanic<F: Fn() -> String>(pub F);
+
+impl<F: Fn() -> String> Drop for PrintOnPanic<F> {
+    fn drop(&mut self) {
+        if ::std::thread::panicking() {
+            println!("{}", (self.0)());
+        }
+    }
+}
+
+#[derive(Clone)]
+pub struct GccCodegenBackend;
+
+impl CodegenBackend for GccCodegenBackend {
+    fn init(&self, sess: &Session) {
+        if sess.lto() != Lto::No {
+            sess.warn("LTO is not supported. You may get a linker error.");
+        }
+    }
+
+    fn codegen_crate<'tcx>(&self, tcx: TyCtxt<'tcx>, metadata: EncodedMetadata, need_metadata_module: bool) -> Box<dyn Any> {
+        let target_cpu = target_cpu(tcx.sess);
+        let res = codegen_crate(self.clone(), tcx, target_cpu.to_string(), metadata, need_metadata_module);
+
+        rustc_symbol_mangling::test::report_symbol_names(tcx);
+
+        Box::new(res)
+    }
+
+    fn join_codegen(&self, ongoing_codegen: Box<dyn Any>, sess: &Session) -> Result<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>), ErrorReported> {
+        let (codegen_results, work_products) = ongoing_codegen
+            .downcast::<rustc_codegen_ssa::back::write::OngoingCodegen<GccCodegenBackend>>()
+            .expect("Expected GccCodegenBackend's OngoingCodegen, found Box<Any>")
+            .join(sess);
+
+        Ok((codegen_results, work_products))
+    }
+
+    fn link(&self, sess: &Session, codegen_results: CodegenResults, outputs: &OutputFilenames) -> Result<(), ErrorReported> {
+        use rustc_codegen_ssa::back::link::link_binary;
+
+        link_binary::<crate::archive::ArArchiveBuilder<'_>>(
+            sess,
+            &codegen_results,
+            outputs,
+        )
+    }
+
+    fn target_features(&self, sess: &Session) -> Vec<Symbol> {
+        target_features(sess)
+    }
+}
+
+impl ExtraBackendMethods for GccCodegenBackend {
+    fn new_metadata<'tcx>(&self, _tcx: TyCtxt<'tcx>, _mod_name: &str) -> Self::Module {
+        GccContext {
+            context: Context::default(),
+        }
+    }
+
+    fn write_compressed_metadata<'tcx>(&self, tcx: TyCtxt<'tcx>, metadata: &EncodedMetadata, gcc_module: &mut Self::Module) {
+        base::write_compressed_metadata(tcx, metadata, gcc_module)
+    }
+
+    fn codegen_allocator<'tcx>(&self, tcx: TyCtxt<'tcx>, mods: &mut Self::Module, module_name: &str, kind: AllocatorKind, has_alloc_error_handler: bool) {
+        unsafe { allocator::codegen(tcx, mods, module_name, kind, has_alloc_error_handler) }
+    }
+
+    fn compile_codegen_unit<'tcx>(&self, tcx: TyCtxt<'tcx>, cgu_name: Symbol) -> (ModuleCodegen<Self::Module>, u64) {
+        base::compile_codegen_unit(tcx, cgu_name)
+    }
+
+    fn target_machine_factory(&self, _sess: &Session, _opt_level: OptLevel) -> TargetMachineFactoryFn<Self> {
+        // TODO(antoyo): set opt level.
+        Arc::new(|_| {
+            Ok(())
+        })
+    }
+
+    fn target_cpu<'b>(&self, _sess: &'b Session) -> &'b str {
+        unimplemented!();
+    }
+
+    fn tune_cpu<'b>(&self, _sess: &'b Session) -> Option<&'b str> {
+        None
+        // TODO(antoyo)
+    }
+}
+
+pub struct ModuleBuffer;
+
+impl ModuleBufferMethods for ModuleBuffer {
+    fn data(&self) -> &[u8] {
+        unimplemented!();
+    }
+}
+
+pub struct ThinBuffer;
+
+impl ThinBufferMethods for ThinBuffer {
+    fn data(&self) -> &[u8] {
+        unimplemented!();
+    }
+}
+
+pub struct GccContext {
+    context: Context<'static>,
+}
+
+unsafe impl Send for GccContext {}
+// FIXME(antoyo): that shouldn't be Sync. Parallel compilation is currently disabled with "-Zno-parallel-llvm". Try to disable it here.
+unsafe impl Sync for GccContext {}
+
+impl WriteBackendMethods for GccCodegenBackend {
+    type Module = GccContext;
+    type TargetMachine = ();
+    type ModuleBuffer = ModuleBuffer;
+    type Context = ();
+    type ThinData = ();
+    type ThinBuffer = ThinBuffer;
+
+    fn run_fat_lto(_cgcx: &CodegenContext<Self>, mut modules: Vec<FatLTOInput<Self>>, _cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>) -> Result<LtoModuleCodegen<Self>, FatalError> {
+        // TODO(antoyo): implement LTO by sending -flto to libgccjit and adding the appropriate gcc linker plugins.
+        // NOTE: implemented elsewhere.
+        // TODO: what is implemented elsewhere ^ ?
+        let module =
+            match modules.remove(0) {
+                FatLTOInput::InMemory(module) => module,
+                FatLTOInput::Serialized { .. } => {
+                    unimplemented!();
+                }
+            };
+        Ok(LtoModuleCodegen::Fat { module: Some(module), _serialized_bitcode: vec![] })
+    }
+
+    fn run_thin_lto(_cgcx: &CodegenContext<Self>, _modules: Vec<(String, Self::ThinBuffer)>, _cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>) -> Result<(Vec<LtoModuleCodegen<Self>>, Vec<WorkProduct>), FatalError> {
+        unimplemented!();
+    }
+
+    fn print_pass_timings(&self) {
+        unimplemented!();
+    }
+
+    unsafe fn optimize(_cgcx: &CodegenContext<Self>, _diag_handler: &Handler, module: &ModuleCodegen<Self::Module>, config: &ModuleConfig) -> Result<(), FatalError> {
+        module.module_llvm.context.set_optimization_level(to_gcc_opt_level(config.opt_level));
+        Ok(())
+    }
+
+    unsafe fn optimize_thin(_cgcx: &CodegenContext<Self>, _thin: &mut ThinModule<Self>) -> Result<ModuleCodegen<Self::Module>, FatalError> {
+        unimplemented!();
+    }
+
+    unsafe fn codegen(cgcx: &CodegenContext<Self>, diag_handler: &Handler, module: ModuleCodegen<Self::Module>, config: &ModuleConfig) -> Result<CompiledModule, FatalError> {
+        back::write::codegen(cgcx, diag_handler, module, config)
+    }
+
+    fn prepare_thin(_module: ModuleCodegen<Self::Module>) -> (String, Self::ThinBuffer) {
+        unimplemented!();
+    }
+
+    fn serialize_module(_module: ModuleCodegen<Self::Module>) -> (String, Self::ModuleBuffer) {
+        unimplemented!();
+    }
+
+    fn run_lto_pass_manager(_cgcx: &CodegenContext<Self>, _module: &ModuleCodegen<Self::Module>, _config: &ModuleConfig, _thin: bool) -> Result<(), FatalError> {
+        // TODO(antoyo)
+        Ok(())
+    }
+
+    fn run_link(cgcx: &CodegenContext<Self>, diag_handler: &Handler, modules: Vec<ModuleCodegen<Self::Module>>) -> Result<ModuleCodegen<Self::Module>, FatalError> {
+        back::write::link(cgcx, diag_handler, modules)
+    }
+}
+
+/// This is the entrypoint for a hot plugged rustc_codegen_gccjit
+#[no_mangle]
+pub fn __rustc_codegen_backend() -> Box<dyn CodegenBackend> {
+    Box::new(GccCodegenBackend)
+}
+
+fn to_gcc_opt_level(optlevel: Option<OptLevel>) -> OptimizationLevel {
+    match optlevel {
+        None => OptimizationLevel::None,
+        Some(level) => {
+            match level {
+                OptLevel::No => OptimizationLevel::None,
+                OptLevel::Less => OptimizationLevel::Limited,
+                OptLevel::Default => OptimizationLevel::Standard,
+                OptLevel::Aggressive => OptimizationLevel::Aggressive,
+                OptLevel::Size | OptLevel::SizeMin => OptimizationLevel::Limited,
+            }
+        },
+    }
+}
+
+fn handle_native(name: &str) -> &str {
+    if name != "native" {
+        return name;
+    }
+
+    unimplemented!();
+}
+
+pub fn target_cpu(sess: &Session) -> &str {
+    let name = sess.opts.cg.target_cpu.as_ref().unwrap_or(&sess.target.cpu);
+    handle_native(name)
+}
+
+pub fn target_features(sess: &Session) -> Vec<Symbol> {
+    supported_target_features(sess)
+        .iter()
+        .filter_map(
+            |&(feature, gate)| {
+                if sess.is_nightly_build() || gate.is_none() { Some(feature) } else { None }
+            },
+        )
+        .filter(|_feature| {
+            // TODO(antoyo): implement a way to get enabled feature in libgccjit.
+            false
+        })
+        .map(|feature| Symbol::intern(feature))
+        .collect()
+}
diff --git a/compiler/rustc_codegen_gcc/src/mono_item.rs b/compiler/rustc_codegen_gcc/src/mono_item.rs
new file mode 100644 (file)
index 0000000..e21d40b
--- /dev/null
@@ -0,0 +1,38 @@
+use rustc_codegen_ssa::traits::PreDefineMethods;
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
+use rustc_middle::mir::mono::{Linkage, Visibility};
+use rustc_middle::ty::{self, Instance, TypeFoldable};
+use rustc_middle::ty::layout::{FnAbiOf, LayoutOf};
+use rustc_span::def_id::DefId;
+
+use crate::base;
+use crate::context::CodegenCx;
+use crate::type_of::LayoutGccExt;
+
+impl<'gcc, 'tcx> PreDefineMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
+    fn predefine_static(&self, def_id: DefId, _linkage: Linkage, _visibility: Visibility, symbol_name: &str) {
+        let attrs = self.tcx.codegen_fn_attrs(def_id);
+        let instance = Instance::mono(self.tcx, def_id);
+        let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all());
+        let gcc_type = self.layout_of(ty).gcc_type(self, true);
+
+        let is_tls = attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL);
+        let global = self.define_global(symbol_name, gcc_type, is_tls, attrs.link_section);
+
+        // TODO(antoyo): set linkage and visibility.
+        self.instances.borrow_mut().insert(instance, global);
+    }
+
+    fn predefine_fn(&self, instance: Instance<'tcx>, linkage: Linkage, _visibility: Visibility, symbol_name: &str) {
+        assert!(!instance.substs.needs_infer());
+
+        let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty());
+        self.linkage.set(base::linkage_to_gcc(linkage));
+        let _decl = self.declare_fn(symbol_name, &fn_abi);
+        //let attrs = self.tcx.codegen_fn_attrs(instance.def_id());
+
+        // TODO(antoyo): call set_link_section() to allow initializing argc/argv.
+        // TODO(antoyo): set unique comdat.
+        // TODO(antoyo): use inline attribute from there in linkage.set() above.
+    }
+}
diff --git a/compiler/rustc_codegen_gcc/src/type_.rs b/compiler/rustc_codegen_gcc/src/type_.rs
new file mode 100644 (file)
index 0000000..3545e1b
--- /dev/null
@@ -0,0 +1,282 @@
+use std::convert::TryInto;
+
+use gccjit::{RValue, Struct, Type};
+use rustc_codegen_ssa::traits::{BaseTypeMethods, DerivedTypeMethods};
+use rustc_codegen_ssa::common::TypeKind;
+use rustc_middle::bug;
+use rustc_middle::ty::layout::TyAndLayout;
+use rustc_target::abi::{AddressSpace, Align, Integer, Size};
+
+use crate::common::TypeReflection;
+use crate::context::CodegenCx;
+use crate::type_of::LayoutGccExt;
+
+impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
+    pub fn type_ix(&self, num_bits: u64) -> Type<'gcc> {
+        // gcc only supports 1, 2, 4 or 8-byte integers.
+        // FIXME(antoyo): this is misleading to use the next power of two as rustc_codegen_ssa
+        // sometimes use 96-bit numbers and the following code will give an integer of a different
+        // size.
+        let bytes = (num_bits / 8).next_power_of_two() as i32;
+        match bytes {
+            1 => self.i8_type,
+            2 => self.i16_type,
+            4 => self.i32_type,
+            8 => self.i64_type,
+            16 => self.i128_type,
+            _ => panic!("unexpected num_bits: {}", num_bits),
+        }
+    }
+
+    pub fn type_void(&self) -> Type<'gcc> {
+        self.context.new_type::<()>()
+    }
+
+    pub fn type_size_t(&self) -> Type<'gcc> {
+        self.context.new_type::<usize>()
+    }
+
+    pub fn type_u8(&self) -> Type<'gcc> {
+        self.u8_type
+    }
+
+    pub fn type_u16(&self) -> Type<'gcc> {
+        self.u16_type
+    }
+
+    pub fn type_u32(&self) -> Type<'gcc> {
+        self.u32_type
+    }
+
+    pub fn type_u64(&self) -> Type<'gcc> {
+        self.u64_type
+    }
+
+    pub fn type_u128(&self) -> Type<'gcc> {
+        self.u128_type
+    }
+
+    pub fn type_pointee_for_align(&self, align: Align) -> Type<'gcc> {
+        // FIXME(eddyb) We could find a better approximation if ity.align < align.
+        let ity = Integer::approximate_align(self, align);
+        self.type_from_integer(ity)
+    }
+}
+
+impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
+    fn type_i1(&self) -> Type<'gcc> {
+        self.bool_type
+    }
+
+    fn type_i8(&self) -> Type<'gcc> {
+        self.i8_type
+    }
+
+    fn type_i16(&self) -> Type<'gcc> {
+        self.i16_type
+    }
+
+    fn type_i32(&self) -> Type<'gcc> {
+        self.i32_type
+    }
+
+    fn type_i64(&self) -> Type<'gcc> {
+        self.i64_type
+    }
+
+    fn type_i128(&self) -> Type<'gcc> {
+        self.i128_type
+    }
+
+    fn type_isize(&self) -> Type<'gcc> {
+        self.isize_type
+    }
+
+    fn type_f32(&self) -> Type<'gcc> {
+        self.context.new_type::<f32>()
+    }
+
+    fn type_f64(&self) -> Type<'gcc> {
+        self.context.new_type::<f64>()
+    }
+
+    fn type_func(&self, params: &[Type<'gcc>], return_type: Type<'gcc>) -> Type<'gcc> {
+        self.context.new_function_pointer_type(None, return_type, params, false)
+    }
+
+    fn type_struct(&self, fields: &[Type<'gcc>], _packed: bool) -> Type<'gcc> {
+        let types = fields.to_vec();
+        if let Some(typ) = self.struct_types.borrow().get(fields) {
+            return typ.clone();
+        }
+        let fields: Vec<_> = fields.iter().enumerate()
+            .map(|(index, field)| self.context.new_field(None, *field, &format!("field{}_TODO", index)))
+            .collect();
+        // TODO(antoyo): use packed.
+        let typ = self.context.new_struct_type(None, "struct", &fields).as_type();
+        self.struct_types.borrow_mut().insert(types, typ);
+        typ
+    }
+
+    fn type_kind(&self, typ: Type<'gcc>) -> TypeKind {
+        if typ.is_integral() {
+            TypeKind::Integer
+        }
+        else if typ.is_vector().is_some() {
+            TypeKind::Vector
+        }
+        else {
+            // TODO(antoyo): support other types.
+            TypeKind::Void
+        }
+    }
+
+    fn type_ptr_to(&self, ty: Type<'gcc>) -> Type<'gcc> {
+        ty.make_pointer()
+    }
+
+    fn type_ptr_to_ext(&self, ty: Type<'gcc>, _address_space: AddressSpace) -> Type<'gcc> {
+        // TODO(antoyo): use address_space
+        ty.make_pointer()
+    }
+
+    fn element_type(&self, ty: Type<'gcc>) -> Type<'gcc> {
+        if let Some(typ) = ty.is_array() {
+            typ
+        }
+        else if let Some(vector_type) = ty.is_vector() {
+            vector_type.get_element_type()
+        }
+        else if let Some(typ) = ty.get_pointee() {
+            typ
+        }
+        else {
+            unreachable!()
+        }
+    }
+
+    fn vector_length(&self, _ty: Type<'gcc>) -> usize {
+        unimplemented!();
+    }
+
+    fn float_width(&self, typ: Type<'gcc>) -> usize {
+        let f32 = self.context.new_type::<f32>();
+        let f64 = self.context.new_type::<f64>();
+        if typ == f32 {
+            32
+        }
+        else if typ == f64 {
+            64
+        }
+        else {
+            panic!("Cannot get width of float type {:?}", typ);
+        }
+        // TODO(antoyo): support other sizes.
+    }
+
+    fn int_width(&self, typ: Type<'gcc>) -> u64 {
+        if typ.is_i8(self) || typ.is_u8(self) {
+            8
+        }
+        else if typ.is_i16(self) || typ.is_u16(self) {
+            16
+        }
+        else if typ.is_i32(self) || typ.is_u32(self) {
+            32
+        }
+        else if typ.is_i64(self) || typ.is_u64(self) {
+            64
+        }
+        else if typ.is_i128(self) || typ.is_u128(self) {
+            128
+        }
+        else {
+            panic!("Cannot get width of int type {:?}", typ);
+        }
+    }
+
+    fn val_ty(&self, value: RValue<'gcc>) -> Type<'gcc> {
+        value.get_type()
+    }
+}
+
+impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
+    pub fn type_padding_filler(&self, size: Size, align: Align) -> Type<'gcc> {
+        let unit = Integer::approximate_align(self, align);
+        let size = size.bytes();
+        let unit_size = unit.size().bytes();
+        assert_eq!(size % unit_size, 0);
+        self.type_array(self.type_from_integer(unit), size / unit_size)
+    }
+
+    pub fn set_struct_body(&self, typ: Struct<'gcc>, fields: &[Type<'gcc>], _packed: bool) {
+        // TODO(antoyo): use packed.
+        let fields: Vec<_> = fields.iter().enumerate()
+            .map(|(index, field)| self.context.new_field(None, *field, &format!("field_{}", index)))
+            .collect();
+        typ.set_fields(None, &fields);
+    }
+
+    pub fn type_named_struct(&self, name: &str) -> Struct<'gcc> {
+        self.context.new_opaque_struct_type(None, name)
+    }
+
+    pub fn type_array(&self, ty: Type<'gcc>, mut len: u64) -> Type<'gcc> {
+        if let Some(struct_type) = ty.is_struct() {
+            if struct_type.get_field_count() == 0 {
+                // NOTE: since gccjit only supports i32 for the array size and libcore's tests uses a
+                // size of usize::MAX in test_binary_search, we workaround this by setting the size to
+                // zero for ZSTs.
+                // FIXME(antoyo): fix gccjit API.
+                len = 0;
+            }
+        }
+
+        // NOTE: see note above. Some other test uses usize::MAX.
+        if len == u64::MAX {
+            len = 0;
+        }
+
+        let len: i32 = len.try_into().expect("array len");
+
+        self.context.new_array_type(None, ty, len)
+    }
+}
+
+pub fn struct_fields<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout<'tcx>) -> (Vec<Type<'gcc>>, bool) {
+    let field_count = layout.fields.count();
+
+    let mut packed = false;
+    let mut offset = Size::ZERO;
+    let mut prev_effective_align = layout.align.abi;
+    let mut result: Vec<_> = Vec::with_capacity(1 + field_count * 2);
+    for i in layout.fields.index_by_increasing_offset() {
+        let target_offset = layout.fields.offset(i as usize);
+        let field = layout.field(cx, i);
+        let effective_field_align =
+            layout.align.abi.min(field.align.abi).restrict_for_offset(target_offset);
+        packed |= effective_field_align < field.align.abi;
+
+        assert!(target_offset >= offset);
+        let padding = target_offset - offset;
+        let padding_align = prev_effective_align.min(effective_field_align);
+        assert_eq!(offset.align_to(padding_align) + padding, target_offset);
+        result.push(cx.type_padding_filler(padding, padding_align));
+
+        result.push(field.gcc_type(cx, !field.ty.is_any_ptr())); // FIXME(antoyo): might need to check if the type is inside another, like Box<Type>.
+        offset = target_offset + field.size;
+        prev_effective_align = effective_field_align;
+    }
+    if !layout.is_unsized() && field_count > 0 {
+        if offset > layout.size {
+            bug!("layout: {:#?} stride: {:?} offset: {:?}", layout, layout.size, offset);
+        }
+        let padding = layout.size - offset;
+        let padding_align = prev_effective_align;
+        assert_eq!(offset.align_to(padding_align) + padding, layout.size);
+        result.push(cx.type_padding_filler(padding, padding_align));
+        assert_eq!(result.len(), 1 + field_count * 2);
+    }
+
+    (result, packed)
+}
diff --git a/compiler/rustc_codegen_gcc/src/type_of.rs b/compiler/rustc_codegen_gcc/src/type_of.rs
new file mode 100644 (file)
index 0000000..9c39c8f
--- /dev/null
@@ -0,0 +1,359 @@
+use std::fmt::Write;
+
+use gccjit::{Struct, Type};
+use crate::rustc_codegen_ssa::traits::{BaseTypeMethods, DerivedTypeMethods, LayoutTypeMethods};
+use rustc_middle::bug;
+use rustc_middle::ty::{self, Ty, TypeFoldable};
+use rustc_middle::ty::layout::{FnAbiOf, LayoutOf, TyAndLayout};
+use rustc_middle::ty::print::with_no_trimmed_paths;
+use rustc_target::abi::{self, Abi, F32, F64, FieldsShape, Int, Integer, Pointer, PointeeInfo, Size, TyAbiInterface, Variants};
+use rustc_target::abi::call::{CastTarget, FnAbi, Reg};
+
+use crate::abi::{FnAbiGccExt, GccType};
+use crate::context::CodegenCx;
+use crate::type_::struct_fields;
+
+impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
+    fn type_from_unsigned_integer(&self, i: Integer) -> Type<'gcc> {
+        use Integer::*;
+        match i {
+            I8 => self.type_u8(),
+            I16 => self.type_u16(),
+            I32 => self.type_u32(),
+            I64 => self.type_u64(),
+            I128 => self.type_u128(),
+        }
+    }
+}
+
+pub fn uncached_gcc_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout<'tcx>, defer: &mut Option<(Struct<'gcc>, TyAndLayout<'tcx>)>) -> Type<'gcc> {
+    match layout.abi {
+        Abi::Scalar(_) => bug!("handled elsewhere"),
+        Abi::Vector { ref element, count } => {
+            let element = layout.scalar_gcc_type_at(cx, element, Size::ZERO);
+            return cx.context.new_vector_type(element, count);
+        },
+        Abi::ScalarPair(..) => {
+            return cx.type_struct(
+                &[
+                    layout.scalar_pair_element_gcc_type(cx, 0, false),
+                    layout.scalar_pair_element_gcc_type(cx, 1, false),
+                ],
+                false,
+            );
+        }
+        Abi::Uninhabited | Abi::Aggregate { .. } => {}
+    }
+
+    let name = match layout.ty.kind() {
+        // FIXME(eddyb) producing readable type names for trait objects can result
+        // in problematically distinct types due to HRTB and subtyping (see #47638).
+        // ty::Dynamic(..) |
+        ty::Adt(..) | ty::Closure(..) | ty::Foreign(..) | ty::Generator(..) | ty::Str
+            if !cx.sess().fewer_names() =>
+        {
+            let mut name = with_no_trimmed_paths(|| layout.ty.to_string());
+            if let (&ty::Adt(def, _), &Variants::Single { index }) =
+                (layout.ty.kind(), &layout.variants)
+            {
+                if def.is_enum() && !def.variants.is_empty() {
+                    write!(&mut name, "::{}", def.variants[index].ident).unwrap();
+                }
+            }
+            if let (&ty::Generator(_, _, _), &Variants::Single { index }) =
+                (layout.ty.kind(), &layout.variants)
+            {
+                write!(&mut name, "::{}", ty::GeneratorSubsts::variant_name(index)).unwrap();
+            }
+            Some(name)
+        }
+        ty::Adt(..) => {
+            // If `Some` is returned then a named struct is created in LLVM. Name collisions are
+            // avoided by LLVM (with increasing suffixes). If rustc doesn't generate names then that
+            // can improve perf.
+            // FIXME(antoyo): I don't think that's true for libgccjit.
+            Some(String::new())
+        }
+        _ => None,
+    };
+
+    match layout.fields {
+        FieldsShape::Primitive | FieldsShape::Union(_) => {
+            let fill = cx.type_padding_filler(layout.size, layout.align.abi);
+            let packed = false;
+            match name {
+                None => cx.type_struct(&[fill], packed),
+                Some(ref name) => {
+                    let gcc_type = cx.type_named_struct(name);
+                    cx.set_struct_body(gcc_type, &[fill], packed);
+                    gcc_type.as_type()
+                },
+            }
+        }
+        FieldsShape::Array { count, .. } => cx.type_array(layout.field(cx, 0).gcc_type(cx, true), count),
+        FieldsShape::Arbitrary { .. } =>
+            match name {
+                None => {
+                    let (gcc_fields, packed) = struct_fields(cx, layout);
+                    cx.type_struct(&gcc_fields, packed)
+                },
+                Some(ref name) => {
+                    let gcc_type = cx.type_named_struct(name);
+                    *defer = Some((gcc_type, layout));
+                    gcc_type.as_type()
+                },
+            },
+    }
+}
+
+pub trait LayoutGccExt<'tcx> {
+    fn is_gcc_immediate(&self) -> bool;
+    fn is_gcc_scalar_pair(&self) -> bool;
+    fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, set_fields: bool) -> Type<'gcc>;
+    fn immediate_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>;
+    fn scalar_gcc_type_at<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, scalar: &abi::Scalar, offset: Size) -> Type<'gcc>;
+    fn scalar_pair_element_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, index: usize, immediate: bool) -> Type<'gcc>;
+    fn gcc_field_index(&self, index: usize) -> u64;
+    fn pointee_info_at<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, offset: Size) -> Option<PointeeInfo>;
+}
+
+impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
+    fn is_gcc_immediate(&self) -> bool {
+        match self.abi {
+            Abi::Scalar(_) | Abi::Vector { .. } => true,
+            Abi::ScalarPair(..) => false,
+            Abi::Uninhabited | Abi::Aggregate { .. } => self.is_zst(),
+        }
+    }
+
+    fn is_gcc_scalar_pair(&self) -> bool {
+        match self.abi {
+            Abi::ScalarPair(..) => true,
+            Abi::Uninhabited | Abi::Scalar(_) | Abi::Vector { .. } | Abi::Aggregate { .. } => false,
+        }
+    }
+
+    /// Gets the GCC type corresponding to a Rust type, i.e., `rustc_middle::ty::Ty`.
+    /// The pointee type of the pointer in `PlaceRef` is always this type.
+    /// For sized types, it is also the right LLVM type for an `alloca`
+    /// containing a value of that type, and most immediates (except `bool`).
+    /// Unsized types, however, are represented by a "minimal unit", e.g.
+    /// `[T]` becomes `T`, while `str` and `Trait` turn into `i8` - this
+    /// is useful for indexing slices, as `&[T]`'s data pointer is `T*`.
+    /// If the type is an unsized struct, the regular layout is generated,
+    /// with the inner-most trailing unsized field using the "minimal unit"
+    /// of that field's type - this is useful for taking the address of
+    /// that field and ensuring the struct has the right alignment.
+    //TODO(antoyo): do we still need the set_fields parameter?
+    fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, set_fields: bool) -> Type<'gcc> {
+        if let Abi::Scalar(ref scalar) = self.abi {
+            // Use a different cache for scalars because pointers to DSTs
+            // can be either fat or thin (data pointers of fat pointers).
+            if let Some(&ty) = cx.scalar_types.borrow().get(&self.ty) {
+                return ty;
+            }
+            let ty =
+                match *self.ty.kind() {
+                    ty::Ref(_, ty, _) | ty::RawPtr(ty::TypeAndMut { ty, .. }) => {
+                        cx.type_ptr_to(cx.layout_of(ty).gcc_type(cx, set_fields))
+                    }
+                    ty::Adt(def, _) if def.is_box() => {
+                        cx.type_ptr_to(cx.layout_of(self.ty.boxed_ty()).gcc_type(cx, true))
+                    }
+                    ty::FnPtr(sig) => cx.fn_ptr_backend_type(&cx.fn_abi_of_fn_ptr(sig, ty::List::empty())),
+                    _ => self.scalar_gcc_type_at(cx, scalar, Size::ZERO),
+                };
+            cx.scalar_types.borrow_mut().insert(self.ty, ty);
+            return ty;
+        }
+
+        // Check the cache.
+        let variant_index =
+            match self.variants {
+                Variants::Single { index } => Some(index),
+                _ => None,
+            };
+        let cached_type = cx.types.borrow().get(&(self.ty, variant_index)).cloned();
+        if let Some(ty) = cached_type {
+            let type_to_set_fields = cx.types_with_fields_to_set.borrow_mut().remove(&ty);
+            if let Some((struct_type, layout)) = type_to_set_fields {
+                // Since we might be trying to generate a type containing another type which is not
+                // completely generated yet, we deferred setting the fields until now.
+                let (fields, packed) = struct_fields(cx, layout);
+                cx.set_struct_body(struct_type, &fields, packed);
+            }
+            return ty;
+        }
+
+        assert!(!self.ty.has_escaping_bound_vars(), "{:?} has escaping bound vars", self.ty);
+
+        // Make sure lifetimes are erased, to avoid generating distinct LLVM
+        // types for Rust types that only differ in the choice of lifetimes.
+        let normal_ty = cx.tcx.erase_regions(self.ty);
+
+        let mut defer = None;
+        let ty =
+            if self.ty != normal_ty {
+                let mut layout = cx.layout_of(normal_ty);
+                if let Some(v) = variant_index {
+                    layout = layout.for_variant(cx, v);
+                }
+                layout.gcc_type(cx, true)
+            }
+            else {
+                uncached_gcc_type(cx, *self, &mut defer)
+            };
+
+        cx.types.borrow_mut().insert((self.ty, variant_index), ty);
+
+        if let Some((ty, layout)) = defer {
+            let (fields, packed) = struct_fields(cx, layout);
+            cx.set_struct_body(ty, &fields, packed);
+        }
+
+        ty
+    }
+
+    fn immediate_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
+        if let Abi::Scalar(ref scalar) = self.abi {
+            if scalar.is_bool() {
+                return cx.type_i1();
+            }
+        }
+        self.gcc_type(cx, true)
+    }
+
+    fn scalar_gcc_type_at<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, scalar: &abi::Scalar, offset: Size) -> Type<'gcc> {
+        match scalar.value {
+            Int(i, true) => cx.type_from_integer(i),
+            Int(i, false) => cx.type_from_unsigned_integer(i),
+            F32 => cx.type_f32(),
+            F64 => cx.type_f64(),
+            Pointer => {
+                // If we know the alignment, pick something better than i8.
+                let pointee =
+                    if let Some(pointee) = self.pointee_info_at(cx, offset) {
+                        cx.type_pointee_for_align(pointee.align)
+                    }
+                    else {
+                        cx.type_i8()
+                    };
+                cx.type_ptr_to(pointee)
+            }
+        }
+    }
+
+    fn scalar_pair_element_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, index: usize, immediate: bool) -> Type<'gcc> {
+        // TODO(antoyo): remove llvm hack:
+        // HACK(eddyb) special-case fat pointers until LLVM removes
+        // pointee types, to avoid bitcasting every `OperandRef::deref`.
+        match self.ty.kind() {
+            ty::Ref(..) | ty::RawPtr(_) => {
+                return self.field(cx, index).gcc_type(cx, true);
+            }
+            ty::Adt(def, _) if def.is_box() => {
+                let ptr_ty = cx.tcx.mk_mut_ptr(self.ty.boxed_ty());
+                return cx.layout_of(ptr_ty).scalar_pair_element_gcc_type(cx, index, immediate);
+            }
+            _ => {}
+        }
+
+        let (a, b) = match self.abi {
+            Abi::ScalarPair(ref a, ref b) => (a, b),
+            _ => bug!("TyAndLayout::scalar_pair_element_llty({:?}): not applicable", self),
+        };
+        let scalar = [a, b][index];
+
+        // Make sure to return the same type `immediate_gcc_type` would when
+        // dealing with an immediate pair.  This means that `(bool, bool)` is
+        // effectively represented as `{i8, i8}` in memory and two `i1`s as an
+        // immediate, just like `bool` is typically `i8` in memory and only `i1`
+        // when immediate.  We need to load/store `bool` as `i8` to avoid
+        // crippling LLVM optimizations or triggering other LLVM bugs with `i1`.
+        // TODO(antoyo): this bugs certainly don't happen in this case since the bool type is used instead of i1.
+        if scalar.is_bool() {
+            return cx.type_i1();
+        }
+
+        let offset =
+            if index == 0 {
+                Size::ZERO
+            }
+            else {
+                a.value.size(cx).align_to(b.value.align(cx).abi)
+            };
+        self.scalar_gcc_type_at(cx, scalar, offset)
+    }
+
+    fn gcc_field_index(&self, index: usize) -> u64 {
+        match self.abi {
+            Abi::Scalar(_) | Abi::ScalarPair(..) => {
+                bug!("TyAndLayout::gcc_field_index({:?}): not applicable", self)
+            }
+            _ => {}
+        }
+        match self.fields {
+            FieldsShape::Primitive | FieldsShape::Union(_) => {
+                bug!("TyAndLayout::gcc_field_index({:?}): not applicable", self)
+            }
+
+            FieldsShape::Array { .. } => index as u64,
+
+            FieldsShape::Arbitrary { .. } => 1 + (self.fields.memory_index(index) as u64) * 2,
+        }
+    }
+
+    fn pointee_info_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, offset: Size) -> Option<PointeeInfo> {
+        if let Some(&pointee) = cx.pointee_infos.borrow().get(&(self.ty, offset)) {
+            return pointee;
+        }
+
+        let result = Ty::ty_and_layout_pointee_info_at(*self, cx, offset);
+
+        cx.pointee_infos.borrow_mut().insert((self.ty, offset), result);
+        result
+    }
+}
+
+impl<'gcc, 'tcx> LayoutTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
+    fn backend_type(&self, layout: TyAndLayout<'tcx>) -> Type<'gcc> {
+        layout.gcc_type(self, true)
+    }
+
+    fn immediate_backend_type(&self, layout: TyAndLayout<'tcx>) -> Type<'gcc> {
+        layout.immediate_gcc_type(self)
+    }
+
+    fn is_backend_immediate(&self, layout: TyAndLayout<'tcx>) -> bool {
+        layout.is_gcc_immediate()
+    }
+
+    fn is_backend_scalar_pair(&self, layout: TyAndLayout<'tcx>) -> bool {
+        layout.is_gcc_scalar_pair()
+    }
+
+    fn backend_field_index(&self, layout: TyAndLayout<'tcx>, index: usize) -> u64 {
+        layout.gcc_field_index(index)
+    }
+
+    fn scalar_pair_element_backend_type(&self, layout: TyAndLayout<'tcx>, index: usize, immediate: bool) -> Type<'gcc> {
+        layout.scalar_pair_element_gcc_type(self, index, immediate)
+    }
+
+    fn cast_backend_type(&self, ty: &CastTarget) -> Type<'gcc> {
+        ty.gcc_type(self)
+    }
+
+    fn fn_ptr_backend_type(&self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> Type<'gcc> {
+        fn_abi.ptr_to_gcc_type(self)
+    }
+
+    fn reg_backend_type(&self, _ty: &Reg) -> Type<'gcc> {
+        unimplemented!();
+    }
+
+    fn fn_decl_backend_type(&self, _fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> Type<'gcc> {
+        // FIXME(antoyo): return correct type.
+        self.type_void()
+    }
+}
diff --git a/compiler/rustc_codegen_gcc/test.sh b/compiler/rustc_codegen_gcc/test.sh
new file mode 100755 (executable)
index 0000000..944d0ce
--- /dev/null
@@ -0,0 +1,217 @@
+#!/bin/bash
+
+# TODO(antoyo): rewrite to cargo-make (or just) or something like that to only rebuild the sysroot when needed?
+
+set -e
+
+if [ -f ./gcc_path ]; then 
+    export GCC_PATH=$(cat gcc_path)
+else
+    echo 'Please put the path to your custom build of libgccjit in the file `gcc_path`, see Readme.md for details'
+    exit 1
+fi
+
+export LD_LIBRARY_PATH="$GCC_PATH"
+export LIBRARY_PATH="$GCC_PATH"
+
+if [[ "$1" == "--release" ]]; then
+    export CHANNEL='release'
+    CARGO_INCREMENTAL=1 cargo rustc --release
+    shift
+else
+    echo $LD_LIBRARY_PATH
+    export CHANNEL='debug'
+    cargo rustc
+fi
+
+source config.sh
+
+function clean() {
+    rm -r target/out || true
+    mkdir -p target/out/gccjit
+}
+
+function mini_tests() {
+    echo "[BUILD] mini_core"
+    $RUSTC example/mini_core.rs --crate-name mini_core --crate-type lib,dylib --target $TARGET_TRIPLE
+
+    echo "[BUILD] example"
+    $RUSTC example/example.rs --crate-type lib --target $TARGET_TRIPLE
+
+    echo "[AOT] mini_core_hello_world"
+    $RUSTC example/mini_core_hello_world.rs --crate-name mini_core_hello_world --crate-type bin -g --target $TARGET_TRIPLE
+    $RUN_WRAPPER ./target/out/mini_core_hello_world abc bcd
+}
+
+function build_sysroot() {
+    echo "[BUILD] sysroot"
+    time ./build_sysroot/build_sysroot.sh
+}
+
+function std_tests() {
+    echo "[AOT] arbitrary_self_types_pointers_and_wrappers"
+    $RUSTC example/arbitrary_self_types_pointers_and_wrappers.rs --crate-name arbitrary_self_types_pointers_and_wrappers --crate-type bin --target $TARGET_TRIPLE
+    $RUN_WRAPPER ./target/out/arbitrary_self_types_pointers_and_wrappers
+
+    echo "[AOT] alloc_system"
+    $RUSTC example/alloc_system.rs --crate-type lib --target "$TARGET_TRIPLE"
+
+    echo "[AOT] alloc_example"
+    $RUSTC example/alloc_example.rs --crate-type bin --target $TARGET_TRIPLE
+    $RUN_WRAPPER ./target/out/alloc_example
+
+    echo "[AOT] dst_field_align"
+    # FIXME(antoyo): Re-add -Zmir-opt-level=2 once rust-lang/rust#67529 is fixed.
+    $RUSTC example/dst-field-align.rs --crate-name dst_field_align --crate-type bin --target $TARGET_TRIPLE
+    $RUN_WRAPPER ./target/out/dst_field_align || (echo $?; false)
+
+    echo "[AOT] std_example"
+    $RUSTC example/std_example.rs --crate-type bin --target $TARGET_TRIPLE
+    $RUN_WRAPPER ./target/out/std_example --target $TARGET_TRIPLE
+
+    echo "[AOT] subslice-patterns-const-eval"
+    $RUSTC example/subslice-patterns-const-eval.rs --crate-type bin -Cpanic=abort --target $TARGET_TRIPLE
+    $RUN_WRAPPER ./target/out/subslice-patterns-const-eval
+
+    echo "[AOT] track-caller-attribute"
+    $RUSTC example/track-caller-attribute.rs --crate-type bin -Cpanic=abort --target $TARGET_TRIPLE
+    $RUN_WRAPPER ./target/out/track-caller-attribute
+
+    echo "[BUILD] mod_bench"
+    $RUSTC example/mod_bench.rs --crate-type bin --target $TARGET_TRIPLE
+}
+
+# FIXME(antoyo): linker gives multiple definitions error on Linux
+#echo "[BUILD] sysroot in release mode"
+#./build_sysroot/build_sysroot.sh --release
+
+# TODO(antoyo): uncomment when it works.
+#pushd simple-raytracer
+#if [[ "$HOST_TRIPLE" = "$TARGET_TRIPLE" ]]; then
+    #echo "[BENCH COMPILE] ebobby/simple-raytracer"
+    #hyperfine --runs ${RUN_RUNS:-10} --warmup 1 --prepare "rm -r target/*/debug || true" \
+    #"RUSTFLAGS='' cargo build --target $TARGET_TRIPLE" \
+    #"../cargo.sh build"
+
+    #echo "[BENCH RUN] ebobby/simple-raytracer"
+    #cp ./target/*/debug/main ./raytracer_cg_gccjit
+    #hyperfine --runs ${RUN_RUNS:-10} ./raytracer_cg_llvm ./raytracer_cg_gccjit
+#else
+    #echo "[BENCH COMPILE] ebobby/simple-raytracer (skipped)"
+    #echo "[COMPILE] ebobby/simple-raytracer"
+    #../cargo.sh build
+    #echo "[BENCH RUN] ebobby/simple-raytracer (skipped)"
+#fi
+#popd
+
+function test_libcore() {
+    pushd build_sysroot/sysroot_src/library/core/tests
+    echo "[TEST] libcore"
+    rm -r ./target || true
+    ../../../../../cargo.sh test
+    popd
+}
+
+# TODO(antoyo): uncomment when it works.
+#pushd regex
+#echo "[TEST] rust-lang/regex example shootout-regex-dna"
+#../cargo.sh clean
+## Make sure `[codegen mono items] start` doesn't poison the diff
+#../cargo.sh build --example shootout-regex-dna
+#cat examples/regexdna-input.txt | ../cargo.sh run --example shootout-regex-dna | grep -v "Spawned thread" > res.txt
+#diff -u res.txt examples/regexdna-output.txt
+
+#echo "[TEST] rust-lang/regex tests"
+#../cargo.sh test --tests -- --exclude-should-panic --test-threads 1 -Zunstable-options
+#popd
+
+#echo
+#echo "[BENCH COMPILE] mod_bench"
+
+#COMPILE_MOD_BENCH_INLINE="$RUSTC example/mod_bench.rs --crate-type bin -Zmir-opt-level=3 -O --crate-name mod_bench_inline"
+#COMPILE_MOD_BENCH_LLVM_0="rustc example/mod_bench.rs --crate-type bin -Copt-level=0 -o target/out/mod_bench_llvm_0 -Cpanic=abort"
+#COMPILE_MOD_BENCH_LLVM_1="rustc example/mod_bench.rs --crate-type bin -Copt-level=1 -o target/out/mod_bench_llvm_1 -Cpanic=abort"
+#COMPILE_MOD_BENCH_LLVM_2="rustc example/mod_bench.rs --crate-type bin -Copt-level=2 -o target/out/mod_bench_llvm_2 -Cpanic=abort"
+#COMPILE_MOD_BENCH_LLVM_3="rustc example/mod_bench.rs --crate-type bin -Copt-level=3 -o target/out/mod_bench_llvm_3 -Cpanic=abort"
+
+## Use 100 runs, because a single compilations doesn't take more than ~150ms, so it isn't very slow
+#hyperfine --runs ${COMPILE_RUNS:-100} "$COMPILE_MOD_BENCH_INLINE" "$COMPILE_MOD_BENCH_LLVM_0" "$COMPILE_MOD_BENCH_LLVM_1" "$COMPILE_MOD_BENCH_LLVM_2" "$COMPILE_MOD_BENCH_LLVM_3"
+
+#echo
+#echo "[BENCH RUN] mod_bench"
+#hyperfine --runs ${RUN_RUNS:-10} ./target/out/mod_bench{,_inline} ./target/out/mod_bench_llvm_*
+
+function test_rustc() {
+    echo
+    echo "[TEST] rust-lang/rust"
+
+    rust_toolchain=$(cat rust-toolchain)
+
+    git clone https://github.com/rust-lang/rust.git || true
+    cd rust
+    git fetch
+    git checkout $(rustc -V | cut -d' ' -f3 | tr -d '(')
+    export RUSTFLAGS=
+
+    rm config.toml || true
+
+    cat > config.toml <<EOF
+[rust]
+codegen-backends = []
+deny-warnings = false
+
+[build]
+cargo = "$(which cargo)"
+local-rebuild = true
+rustc = "$HOME/.rustup/toolchains/$rust_toolchain-$TARGET_TRIPLE/bin/rustc"
+EOF
+
+    rustc -V | cut -d' ' -f3 | tr -d '('
+    git checkout $(rustc -V | cut -d' ' -f3 | tr -d '(') src/test
+
+    for test in $(rg -i --files-with-matches "//(\[\w+\])?~|// error-pattern:|// build-fail|// run-fail|-Cllvm-args" src/test/ui); do
+      rm $test
+    done
+
+    git checkout -- src/test/ui/issues/auxiliary/issue-3136-a.rs # contains //~ERROR, but shouldn't be removed
+
+    rm -r src/test/ui/{abi*,extern/,llvm-asm/,panic-runtime/,panics/,unsized-locals/,proc-macro/,threads-sendsync/,thinlto/,simd*,borrowck/,test*,*lto*.rs} || true
+    for test in $(rg --files-with-matches "catch_unwind|should_panic|thread|lto" src/test/ui); do
+      rm $test
+    done
+    git checkout src/test/ui/type-alias-impl-trait/auxiliary/cross_crate_ice.rs
+    git checkout src/test/ui/type-alias-impl-trait/auxiliary/cross_crate_ice2.rs
+    rm src/test/ui/llvm-asm/llvm-asm-in-out-operand.rs || true # TODO(antoyo): Enable back this test if I ever implement the llvm_asm! macro.
+
+    RUSTC_ARGS="-Zpanic-abort-tests -Zsymbol-mangling-version=v0 -Zcodegen-backend="$(pwd)"/../target/"$CHANNEL"/librustc_codegen_gcc."$dylib_ext" --sysroot "$(pwd)"/../build_sysroot/sysroot -Cpanic=abort"
+
+    echo "[TEST] rustc test suite"
+    COMPILETEST_FORCE_STAGE0=1 ./x.py test --run always --stage 0 src/test/ui/ --rustc-args "$RUSTC_ARGS"
+}
+
+function clean_ui_tests() {
+    find rust/build/x86_64-unknown-linux-gnu/test/ui/ -name stamp -exec rm -rf {} \;
+}
+
+case $1 in
+    "--test-rustc")
+        test_rustc
+        ;;
+
+    "--test-libcore")
+        test_libcore
+        ;;
+
+    "--clean-ui-tests")
+        clean_ui_tests
+        ;;
+
+    *)
+        clean
+        mini_tests
+        build_sysroot
+        std_tests
+        test_libcore
+        test_rustc
+        ;;
+esac
diff --git a/compiler/rustc_codegen_gcc/tests/lib.rs b/compiler/rustc_codegen_gcc/tests/lib.rs
new file mode 100644 (file)
index 0000000..8ee35b3
--- /dev/null
@@ -0,0 +1,50 @@
+use std::{
+    env::{self, current_dir},
+    path::PathBuf,
+    process::Command,
+};
+
+use lang_tester::LangTester;
+use tempfile::TempDir;
+
+fn main() {
+    let tempdir = TempDir::new().expect("temp dir");
+    let current_dir = current_dir().expect("current dir");
+    let current_dir = current_dir.to_str().expect("current dir").to_string();
+    let gcc_path = include_str!("../gcc_path");
+    let gcc_path = gcc_path.trim();
+    env::set_var("LD_LIBRARY_PATH", gcc_path);
+    LangTester::new()
+        .test_dir("tests/run")
+        .test_file_filter(|path| path.extension().expect("extension").to_str().expect("to_str") == "rs")
+        .test_extract(|source| {
+            let lines =
+                source.lines()
+                    .skip_while(|l| !l.starts_with("//"))
+                    .take_while(|l| l.starts_with("//"))
+                    .map(|l| &l[2..])
+                    .collect::<Vec<_>>()
+                    .join("\n");
+            Some(lines)
+        })
+        .test_cmds(move |path| {
+            // Test command 1: Compile `x.rs` into `tempdir/x`.
+            let mut exe = PathBuf::new();
+            exe.push(&tempdir);
+            exe.push(path.file_stem().expect("file_stem"));
+            let mut compiler = Command::new("rustc");
+            compiler.args(&[
+                &format!("-Zcodegen-backend={}/target/debug/librustc_codegen_gcc.so", current_dir),
+                "--sysroot", &format!("{}/build_sysroot/sysroot/", current_dir),
+                "-Zno-parallel-llvm",
+                "-C", "panic=abort",
+                "-C", "link-arg=-lc",
+                "-o", exe.to_str().expect("to_str"),
+                path.to_str().expect("to_str"),
+            ]);
+            // Test command 2: run `tempdir/x`.
+            let runtime = Command::new(exe);
+            vec![("Compiler", compiler), ("Run-time", runtime)]
+        })
+        .run();
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/abort1.rs b/compiler/rustc_codegen_gcc/tests/run/abort1.rs
new file mode 100644 (file)
index 0000000..291af59
--- /dev/null
@@ -0,0 +1,51 @@
+// Compiler:
+//
+// Run-time:
+//   status: signal
+
+#![feature(auto_traits, lang_items, no_core, start, intrinsics)]
+
+#![no_std]
+#![no_core]
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {
+}
+
+impl Copy for isize {}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+mod intrinsics {
+    use super::Sized;
+
+    extern "rust-intrinsic" {
+        pub fn abort() -> !;
+    }
+}
+
+/*
+ * Code
+ */
+
+fn test_fail() -> ! {
+    unsafe { intrinsics::abort() };
+}
+
+#[start]
+fn main(mut argc: isize, _argv: *const *const u8) -> isize {
+    test_fail();
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/abort2.rs b/compiler/rustc_codegen_gcc/tests/run/abort2.rs
new file mode 100644 (file)
index 0000000..3c87c56
--- /dev/null
@@ -0,0 +1,53 @@
+// Compiler:
+//
+// Run-time:
+//   status: signal
+
+#![feature(auto_traits, lang_items, no_core, start, intrinsics)]
+
+#![no_std]
+#![no_core]
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {
+}
+
+impl Copy for isize {}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+mod intrinsics {
+    use super::Sized;
+
+    extern "rust-intrinsic" {
+        pub fn abort() -> !;
+    }
+}
+
+/*
+ * Code
+ */
+
+fn fail() -> i32 {
+    unsafe { intrinsics::abort() };
+    0
+}
+
+#[start]
+fn main(mut argc: isize, _argv: *const *const u8) -> isize {
+    fail();
+    0
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/array.rs b/compiler/rustc_codegen_gcc/tests/run/array.rs
new file mode 100644 (file)
index 0000000..8b621d8
--- /dev/null
@@ -0,0 +1,229 @@
+// Compiler:
+//
+// Run-time:
+//   status: 0
+//   stdout: 42
+//     7
+//     5
+//     10
+
+#![feature(arbitrary_self_types, auto_traits, lang_items, no_core, start, intrinsics)]
+
+#![no_std]
+#![no_core]
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {
+}
+
+impl Copy for isize {}
+impl Copy for usize {}
+impl Copy for i32 {}
+impl Copy for u8 {}
+impl Copy for i8 {}
+impl Copy for i16 {}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+mod libc {
+    #[link(name = "c")]
+    extern "C" {
+        pub fn printf(format: *const i8, ...) -> i32;
+        pub fn puts(s: *const u8) -> i32;
+    }
+}
+
+#[lang = "index"]
+pub trait Index<Idx: ?Sized> {
+    type Output: ?Sized;
+    fn index(&self, index: Idx) -> &Self::Output;
+}
+
+impl<T> Index<usize> for [T; 3] {
+    type Output = T;
+
+    fn index(&self, index: usize) -> &Self::Output {
+        &self[index]
+    }
+}
+
+impl<T> Index<usize> for [T] {
+    type Output = T;
+
+    fn index(&self, index: usize) -> &Self::Output {
+        &self[index]
+    }
+}
+
+#[lang = "drop_in_place"]
+#[allow(unconditional_recursion)]
+pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
+    // Code here does not matter - this is replaced by the
+    // real drop glue by the compiler.
+    drop_in_place(to_drop);
+}
+
+#[lang = "panic"]
+#[track_caller]
+#[no_mangle]
+pub fn panic(_msg: &str) -> ! {
+    unsafe {
+        libc::puts("Panicking\0" as *const str as *const u8);
+        intrinsics::abort();
+    }
+}
+
+#[lang = "panic_location"]
+struct PanicLocation {
+    file: &'static str,
+    line: u32,
+    column: u32,
+}
+
+#[lang = "panic_bounds_check"]
+#[track_caller]
+#[no_mangle]
+fn panic_bounds_check(index: usize, len: usize) -> ! {
+    unsafe {
+        libc::printf("index out of bounds: the len is %d but the index is %d\n\0" as *const str as *const i8, len, index);
+        intrinsics::abort();
+    }
+}
+
+mod intrinsics {
+    extern "rust-intrinsic" {
+        pub fn abort() -> !;
+    }
+}
+
+#[lang = "add"]
+trait Add<RHS = Self> {
+    type Output;
+
+    fn add(self, rhs: RHS) -> Self::Output;
+}
+
+impl Add for u8 {
+    type Output = Self;
+
+    fn add(self, rhs: Self) -> Self {
+        self + rhs
+    }
+}
+
+impl Add for i8 {
+    type Output = Self;
+
+    fn add(self, rhs: Self) -> Self {
+        self + rhs
+    }
+}
+
+impl Add for i32 {
+    type Output = Self;
+
+    fn add(self, rhs: Self) -> Self {
+        self + rhs
+    }
+}
+
+impl Add for usize {
+    type Output = Self;
+
+    fn add(self, rhs: Self) -> Self {
+        self + rhs
+    }
+}
+
+impl Add for isize {
+    type Output = Self;
+
+    fn add(self, rhs: Self) -> Self {
+        self + rhs
+    }
+}
+
+#[lang = "sub"]
+pub trait Sub<RHS = Self> {
+    type Output;
+
+    fn sub(self, rhs: RHS) -> Self::Output;
+}
+
+impl Sub for usize {
+    type Output = Self;
+
+    fn sub(self, rhs: Self) -> Self {
+        self - rhs
+    }
+}
+
+impl Sub for isize {
+    type Output = Self;
+
+    fn sub(self, rhs: Self) -> Self {
+        self - rhs
+    }
+}
+
+impl Sub for u8 {
+    type Output = Self;
+
+    fn sub(self, rhs: Self) -> Self {
+        self - rhs
+    }
+}
+
+impl Sub for i8 {
+    type Output = Self;
+
+    fn sub(self, rhs: Self) -> Self {
+        self - rhs
+    }
+}
+
+impl Sub for i16 {
+    type Output = Self;
+
+    fn sub(self, rhs: Self) -> Self {
+        self - rhs
+    }
+}
+
+
+/*
+ * Code
+ */
+
+static mut ONE: usize = 1;
+
+fn make_array() -> [u8; 3] {
+    [42, 10, 5]
+}
+
+#[start]
+fn main(argc: isize, _argv: *const *const u8) -> isize {
+    let array = [42, 7, 5];
+    let array2 = make_array();
+    unsafe {
+        libc::printf(b"%ld\n\0" as *const u8 as *const i8, array[ONE - 1]);
+        libc::printf(b"%ld\n\0" as *const u8 as *const i8, array[ONE]);
+        libc::printf(b"%ld\n\0" as *const u8 as *const i8, array[ONE + 1]);
+
+        libc::printf(b"%d\n\0" as *const u8 as *const i8, array2[argc as usize] as u32);
+    }
+    0
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/asm.rs b/compiler/rustc_codegen_gcc/tests/run/asm.rs
new file mode 100644 (file)
index 0000000..9c0055b
--- /dev/null
@@ -0,0 +1,153 @@
+// Compiler:
+//
+// Run-time:
+//   status: 0
+
+#![feature(asm, global_asm)]
+
+global_asm!("
+    .global add_asm
+add_asm:
+     mov rax, rdi
+     add rax, rsi
+     ret"
+);
+
+extern "C" {
+    fn add_asm(a: i64, b: i64) -> i64;
+}
+
+fn main() {
+    unsafe {
+        asm!("nop");
+    }
+
+    let x: u64;
+    unsafe {
+        asm!("mov $5, {}",
+            out(reg) x,
+            options(att_syntax)
+        );
+    }
+    assert_eq!(x, 5);
+
+    let x: u64;
+    let input: u64 = 42;
+    unsafe {
+        asm!("mov {input}, {output}",
+             "add $1, {output}",
+            input = in(reg) input,
+            output = out(reg) x,
+            options(att_syntax)
+        );
+    }
+    assert_eq!(x, 43);
+
+    let x: u64;
+    unsafe {
+        asm!("mov {}, 6",
+            out(reg) x,
+        );
+    }
+    assert_eq!(x, 6);
+
+    let x: u64;
+    let input: u64 = 42;
+    unsafe {
+        asm!("mov {output}, {input}",
+             "add {output}, 1",
+            input = in(reg) input,
+            output = out(reg) x,
+        );
+    }
+    assert_eq!(x, 43);
+
+    // check inout(reg_class) x 
+    let mut x: u64 = 42;
+    unsafe {
+        asm!("add {0}, {0}",
+            inout(reg) x 
+        );
+    }
+    assert_eq!(x, 84);
+
+    // check inout("reg") x
+    let mut x: u64 = 42;
+    unsafe {
+        asm!("add r11, r11",
+            inout("r11") x 
+        );
+    }
+    assert_eq!(x, 84);
+
+    // check a mix of
+    // in("reg")
+    // inout(class) x => y
+    // inout (class) x
+    let x: u64 = 702;
+    let y: u64 = 100;
+    let res: u64;
+    let mut rem: u64 = 0;
+    unsafe {
+        asm!("div r11",
+            in("r11") y,
+            inout("eax") x => res,
+            inout("edx") rem,
+        );
+    }
+    assert_eq!(res, 7);
+    assert_eq!(rem, 2);
+
+    // check const 
+    let mut x: u64 = 42;
+    unsafe {
+        asm!("add {}, {}",
+            inout(reg) x,
+            const 1 
+        );
+    }
+    assert_eq!(x, 43);
+
+    // check const (ATT syntax)
+    let mut x: u64 = 42;
+    unsafe {
+        asm!("add {}, {}",
+            const 1,
+            inout(reg) x,
+            options(att_syntax)
+        );
+    }
+    assert_eq!(x, 43);
+
+    // check sym fn
+    extern "C" fn foo() -> u64 { 42 }
+    let x: u64;
+    unsafe {
+        asm!("call {}", sym foo, lateout("rax") x);
+    }
+    assert_eq!(x, 42);
+
+    // check sym fn (ATT syntax)
+    let x: u64;
+    unsafe {
+        asm!("call {}", sym foo, lateout("rax") x, options(att_syntax));
+    }
+    assert_eq!(x, 42);
+
+    // check sym static
+    static FOO: u64 = 42;
+    let x: u64;
+    unsafe {
+        asm!("mov {1}, qword ptr [rip + {0}]", sym FOO, lateout(reg) x);
+    }
+    assert_eq!(x, 42);
+
+    // check sym static (ATT syntax)
+    let x: u64;
+    unsafe {
+        asm!("movq {0}(%rip), {1}", sym FOO, lateout(reg) x, options(att_syntax));
+    }
+    assert_eq!(x, 42);
+
+    assert_eq!(unsafe { add_asm(40, 2) }, 42);
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/assign.rs b/compiler/rustc_codegen_gcc/tests/run/assign.rs
new file mode 100644 (file)
index 0000000..cc86470
--- /dev/null
@@ -0,0 +1,153 @@
+// Compiler:
+//
+// Run-time:
+//   stdout: 2
+//     7 8
+//     10
+
+#![allow(unused_attributes)]
+#![feature(auto_traits, lang_items, no_core, start, intrinsics, track_caller)]
+
+#![no_std]
+#![no_core]
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {
+}
+
+impl Copy for isize {}
+impl Copy for *mut i32 {}
+impl Copy for usize {}
+impl Copy for u8 {}
+impl Copy for i8 {}
+impl Copy for i32 {}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+#[lang = "panic_location"]
+struct PanicLocation {
+    file: &'static str,
+    line: u32,
+    column: u32,
+}
+
+mod libc {
+    #[link(name = "c")]
+    extern "C" {
+        pub fn puts(s: *const u8) -> i32;
+        pub fn fflush(stream: *mut i32) -> i32;
+        pub fn printf(format: *const i8, ...) -> i32;
+
+        pub static STDOUT: *mut i32;
+    }
+}
+
+mod intrinsics {
+    extern "rust-intrinsic" {
+        pub fn abort() -> !;
+    }
+}
+
+#[lang = "panic"]
+#[track_caller]
+#[no_mangle]
+pub fn panic(_msg: &str) -> ! {
+    unsafe {
+        libc::puts("Panicking\0" as *const str as *const u8);
+        libc::fflush(libc::STDOUT);
+        intrinsics::abort();
+    }
+}
+
+#[lang = "add"]
+trait Add<RHS = Self> {
+    type Output;
+
+    fn add(self, rhs: RHS) -> Self::Output;
+}
+
+impl Add for u8 {
+    type Output = Self;
+
+    fn add(self, rhs: Self) -> Self {
+        self + rhs
+    }
+}
+
+impl Add for i8 {
+    type Output = Self;
+
+    fn add(self, rhs: Self) -> Self {
+        self + rhs
+    }
+}
+
+impl Add for i32 {
+    type Output = Self;
+
+    fn add(self, rhs: Self) -> Self {
+        self + rhs
+    }
+}
+
+impl Add for usize {
+    type Output = Self;
+
+    fn add(self, rhs: Self) -> Self {
+        self + rhs
+    }
+}
+
+impl Add for isize {
+    type Output = Self;
+
+    fn add(self, rhs: Self) -> Self {
+        self + rhs
+    }
+}
+
+/*
+ * Code
+ */
+
+fn inc_ref(num: &mut isize) -> isize {
+    *num = *num + 5;
+    *num + 1
+}
+
+fn inc(num: isize) -> isize {
+    num + 1
+}
+
+
+#[start]
+fn main(mut argc: isize, _argv: *const *const u8) -> isize {
+    argc = inc(argc);
+    unsafe {
+        libc::printf(b"%ld\n\0" as *const u8 as *const i8, argc);
+    }
+
+    let b = inc_ref(&mut argc);
+    unsafe {
+        libc::printf(b"%ld %ld\n\0" as *const u8 as *const i8, argc, b);
+    }
+
+    argc = 10;
+    unsafe {
+        libc::printf(b"%ld\n\0" as *const u8 as *const i8, argc);
+    }
+    0
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/closure.rs b/compiler/rustc_codegen_gcc/tests/run/closure.rs
new file mode 100644 (file)
index 0000000..7121a5f
--- /dev/null
@@ -0,0 +1,230 @@
+// Compiler:
+//
+// Run-time:
+//   status: 0
+//   stdout: Arg: 1
+//     Argument: 1
+//     String arg: 1
+//     Int argument: 2
+//     Both args: 11
+
+#![feature(arbitrary_self_types, auto_traits, lang_items, no_core, start, intrinsics,
+    unboxed_closures)]
+
+#![no_std]
+#![no_core]
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {
+}
+
+impl Copy for isize {}
+impl Copy for usize {}
+impl Copy for i32 {}
+impl Copy for u32 {}
+impl Copy for u8 {}
+impl Copy for i8 {}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+mod libc {
+    #[link(name = "c")]
+    extern "C" {
+        pub fn puts(s: *const u8) -> i32;
+        pub fn printf(format: *const i8, ...) -> i32;
+    }
+}
+
+#[lang = "index"]
+pub trait Index<Idx: ?Sized> {
+    type Output: ?Sized;
+    fn index(&self, index: Idx) -> &Self::Output;
+}
+
+impl<T> Index<usize> for [T; 3] {
+    type Output = T;
+
+    fn index(&self, index: usize) -> &Self::Output {
+        &self[index]
+    }
+}
+
+impl<T> Index<usize> for [T] {
+    type Output = T;
+
+    fn index(&self, index: usize) -> &Self::Output {
+        &self[index]
+    }
+}
+
+#[lang = "drop_in_place"]
+#[allow(unconditional_recursion)]
+pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
+    // Code here does not matter - this is replaced by the
+    // real drop glue by the compiler.
+    drop_in_place(to_drop);
+}
+
+#[lang = "panic_location"]
+struct PanicLocation {
+    file: &'static str,
+    line: u32,
+    column: u32,
+}
+
+#[lang = "panic_bounds_check"]
+#[track_caller]
+#[no_mangle]
+fn panic_bounds_check(index: usize, len: usize) -> ! {
+    unsafe {
+        libc::printf("index out of bounds: the len is %d but the index is %d\n\0" as *const str as *const i8, len, index);
+        intrinsics::abort();
+    }
+}
+
+mod intrinsics {
+    extern "rust-intrinsic" {
+        pub fn abort() -> !;
+    }
+}
+
+#[lang = "unsize"]
+pub trait Unsize<T: ?Sized> {}
+
+#[lang = "coerce_unsized"]
+pub trait CoerceUnsized<T> {}
+
+impl<'a, 'b: 'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a U> for &'b T {}
+impl<'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a mut U> for &'a mut T {}
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*const U> for *const T {}
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*mut U> for *mut T {}
+
+#[lang = "fn_once"]
+#[rustc_paren_sugar]
+pub trait FnOnce<Args> {
+    #[lang = "fn_once_output"]
+    type Output;
+
+    extern "rust-call" fn call_once(self, args: Args) -> Self::Output;
+}
+
+#[lang = "fn_mut"]
+#[rustc_paren_sugar]
+pub trait FnMut<Args>: FnOnce<Args> {
+    extern "rust-call" fn call_mut(&mut self, args: Args) -> Self::Output;
+}
+
+#[lang = "add"]
+trait Add<RHS = Self> {
+    type Output;
+
+    fn add(self, rhs: RHS) -> Self::Output;
+}
+
+impl Add for u8 {
+    type Output = Self;
+
+    fn add(self, rhs: Self) -> Self {
+        self + rhs
+    }
+}
+
+impl Add for i8 {
+    type Output = Self;
+
+    fn add(self, rhs: Self) -> Self {
+        self + rhs
+    }
+}
+
+impl Add for i32 {
+    type Output = Self;
+
+    fn add(self, rhs: Self) -> Self {
+        self + rhs
+    }
+}
+
+impl Add for usize {
+    type Output = Self;
+
+    fn add(self, rhs: Self) -> Self {
+        self + rhs
+    }
+}
+
+impl Add for isize {
+    type Output = Self;
+
+    fn add(self, rhs: Self) -> Self {
+        self + rhs
+    }
+}
+
+#[lang = "panic"]
+#[track_caller]
+#[no_mangle]
+pub fn panic(_msg: &str) -> ! {
+    unsafe {
+        libc::puts("Panicking\0" as *const str as *const u8);
+        intrinsics::abort();
+    }
+}
+
+/*
+ * Code
+ */
+
+#[start]
+fn main(mut argc: isize, _argv: *const *const u8) -> isize {
+    let string = "Arg: %d\n\0";
+    let mut closure = || {
+        unsafe {
+            libc::printf(string as *const str as *const i8, argc);
+        }
+    };
+    closure();
+
+    let mut closure = || {
+        unsafe {
+            libc::printf("Argument: %d\n\0" as *const str as *const i8, argc);
+        }
+    };
+    closure();
+
+    let mut closure = |string| {
+        unsafe {
+            libc::printf(string as *const str as *const i8, argc);
+        }
+    };
+    closure("String arg: %d\n\0");
+
+    let mut closure = |arg: isize| {
+        unsafe {
+            libc::printf("Int argument: %d\n\0" as *const str as *const i8, arg);
+        }
+    };
+    closure(argc + 1);
+
+    let mut closure = |string, arg: isize| {
+        unsafe {
+            libc::printf(string as *const str as *const i8, arg);
+        }
+    };
+    closure("Both args: %d\n\0", argc + 10);
+
+    0
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/condition.rs b/compiler/rustc_codegen_gcc/tests/run/condition.rs
new file mode 100644 (file)
index 0000000..6a2e2d5
--- /dev/null
@@ -0,0 +1,320 @@
+// Compiler:
+//
+// Run-time:
+//   status: 0
+//   stdout: true
+//     1
+
+#![feature(arbitrary_self_types, auto_traits, lang_items, no_core, start, intrinsics)]
+
+#![no_std]
+#![no_core]
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {
+}
+
+impl Copy for isize {}
+impl Copy for usize {}
+impl Copy for u64 {}
+impl Copy for i32 {}
+impl Copy for u32 {}
+impl Copy for bool {}
+impl Copy for u16 {}
+impl Copy for i16 {}
+impl Copy for char {}
+impl Copy for i8 {}
+impl Copy for u8 {}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+mod libc {
+    #[link(name = "c")]
+    extern "C" {
+        pub fn printf(format: *const i8, ...) -> i32;
+        pub fn puts(s: *const u8) -> i32;
+    }
+}
+
+#[lang = "index"]
+pub trait Index<Idx: ?Sized> {
+    type Output: ?Sized;
+    fn index(&self, index: Idx) -> &Self::Output;
+}
+
+impl<T> Index<usize> for [T; 3] {
+    type Output = T;
+
+    fn index(&self, index: usize) -> &Self::Output {
+        &self[index]
+    }
+}
+
+impl<T> Index<usize> for [T] {
+    type Output = T;
+
+    fn index(&self, index: usize) -> &Self::Output {
+        &self[index]
+    }
+}
+
+#[lang = "drop_in_place"]
+#[allow(unconditional_recursion)]
+pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
+    // Code here does not matter - this is replaced by the
+    // real drop glue by the compiler.
+    drop_in_place(to_drop);
+}
+
+#[lang = "panic"]
+#[track_caller]
+#[no_mangle]
+pub fn panic(_msg: &str) -> ! {
+    unsafe {
+        libc::puts("Panicking\0" as *const str as *const u8);
+        intrinsics::abort();
+    }
+}
+
+#[lang = "panic_location"]
+struct PanicLocation {
+    file: &'static str,
+    line: u32,
+    column: u32,
+}
+
+#[lang = "panic_bounds_check"]
+#[track_caller]
+#[no_mangle]
+fn panic_bounds_check(index: usize, len: usize) -> ! {
+    unsafe {
+        libc::printf("index out of bounds: the len is %d but the index is %d\n\0" as *const str as *const i8, len, index);
+        intrinsics::abort();
+    }
+}
+
+mod intrinsics {
+    extern "rust-intrinsic" {
+        pub fn abort() -> !;
+    }
+}
+
+#[lang = "add"]
+trait Add<RHS = Self> {
+    type Output;
+
+    fn add(self, rhs: RHS) -> Self::Output;
+}
+
+impl Add for u8 {
+    type Output = Self;
+
+    fn add(self, rhs: Self) -> Self {
+        self + rhs
+    }
+}
+
+impl Add for i8 {
+    type Output = Self;
+
+    fn add(self, rhs: Self) -> Self {
+        self + rhs
+    }
+}
+
+impl Add for i32 {
+    type Output = Self;
+
+    fn add(self, rhs: Self) -> Self {
+        self + rhs
+    }
+}
+
+impl Add for usize {
+    type Output = Self;
+
+    fn add(self, rhs: Self) -> Self {
+        self + rhs
+    }
+}
+
+impl Add for isize {
+    type Output = Self;
+
+    fn add(self, rhs: Self) -> Self {
+        self + rhs
+    }
+}
+
+#[lang = "sub"]
+pub trait Sub<RHS = Self> {
+    type Output;
+
+    fn sub(self, rhs: RHS) -> Self::Output;
+}
+
+impl Sub for usize {
+    type Output = Self;
+
+    fn sub(self, rhs: Self) -> Self {
+        self - rhs
+    }
+}
+
+impl Sub for isize {
+    type Output = Self;
+
+    fn sub(self, rhs: Self) -> Self {
+        self - rhs
+    }
+}
+
+impl Sub for u8 {
+    type Output = Self;
+
+    fn sub(self, rhs: Self) -> Self {
+        self - rhs
+    }
+}
+
+impl Sub for i8 {
+    type Output = Self;
+
+    fn sub(self, rhs: Self) -> Self {
+        self - rhs
+    }
+}
+
+impl Sub for i16 {
+    type Output = Self;
+
+    fn sub(self, rhs: Self) -> Self {
+        self - rhs
+    }
+}
+
+#[lang = "eq"]
+pub trait PartialEq<Rhs: ?Sized = Self> {
+    fn eq(&self, other: &Rhs) -> bool;
+    fn ne(&self, other: &Rhs) -> bool;
+}
+
+impl PartialEq for u8 {
+    fn eq(&self, other: &u8) -> bool {
+        (*self) == (*other)
+    }
+    fn ne(&self, other: &u8) -> bool {
+        (*self) != (*other)
+    }
+}
+
+impl PartialEq for u16 {
+    fn eq(&self, other: &u16) -> bool {
+        (*self) == (*other)
+    }
+    fn ne(&self, other: &u16) -> bool {
+        (*self) != (*other)
+    }
+}
+
+impl PartialEq for u32 {
+    fn eq(&self, other: &u32) -> bool {
+        (*self) == (*other)
+    }
+    fn ne(&self, other: &u32) -> bool {
+        (*self) != (*other)
+    }
+}
+
+
+impl PartialEq for u64 {
+    fn eq(&self, other: &u64) -> bool {
+        (*self) == (*other)
+    }
+    fn ne(&self, other: &u64) -> bool {
+        (*self) != (*other)
+    }
+}
+
+impl PartialEq for usize {
+    fn eq(&self, other: &usize) -> bool {
+        (*self) == (*other)
+    }
+    fn ne(&self, other: &usize) -> bool {
+        (*self) != (*other)
+    }
+}
+
+impl PartialEq for i8 {
+    fn eq(&self, other: &i8) -> bool {
+        (*self) == (*other)
+    }
+    fn ne(&self, other: &i8) -> bool {
+        (*self) != (*other)
+    }
+}
+
+impl PartialEq for i32 {
+    fn eq(&self, other: &i32) -> bool {
+        (*self) == (*other)
+    }
+    fn ne(&self, other: &i32) -> bool {
+        (*self) != (*other)
+    }
+}
+
+impl PartialEq for isize {
+    fn eq(&self, other: &isize) -> bool {
+        (*self) == (*other)
+    }
+    fn ne(&self, other: &isize) -> bool {
+        (*self) != (*other)
+    }
+}
+
+impl PartialEq for char {
+    fn eq(&self, other: &char) -> bool {
+        (*self) == (*other)
+    }
+    fn ne(&self, other: &char) -> bool {
+        (*self) != (*other)
+    }
+}
+
+/*
+ * Code
+ */
+
+#[start]
+fn main(argc: isize, _argv: *const *const u8) -> isize {
+    unsafe {
+        if argc == 1 {
+            libc::printf(b"true\n\0" as *const u8 as *const i8);
+        }
+
+        let string =
+            match argc {
+                1 => b"1\n\0",
+                2 => b"2\n\0",
+                3 => b"3\n\0",
+                4 => b"4\n\0",
+                5 => b"5\n\0",
+                _ => b"_\n\0",
+            };
+        libc::printf(string as *const u8 as *const i8);
+    }
+    0
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/empty_main.rs b/compiler/rustc_codegen_gcc/tests/run/empty_main.rs
new file mode 100644 (file)
index 0000000..c02cfd2
--- /dev/null
@@ -0,0 +1,39 @@
+// Compiler:
+//
+// Run-time:
+//   status: 0
+
+#![feature(auto_traits, lang_items, no_core, start)]
+
+#![no_std]
+#![no_core]
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {
+}
+
+impl Copy for isize {}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+/*
+ * Code
+ */
+
+#[start]
+fn main(mut argc: isize, _argv: *const *const u8) -> isize {
+    0
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/exit.rs b/compiler/rustc_codegen_gcc/tests/run/exit.rs
new file mode 100644 (file)
index 0000000..956e53d
--- /dev/null
@@ -0,0 +1,49 @@
+// Compiler:
+//
+// Run-time:
+//   status: 2
+
+#![feature(auto_traits, lang_items, no_core, start, intrinsics)]
+
+#![no_std]
+#![no_core]
+
+mod libc {
+    #[link(name = "c")]
+    extern "C" {
+        pub fn exit(status: i32);
+    }
+}
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {
+}
+
+impl Copy for isize {}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+/*
+ * Code
+ */
+
+#[start]
+fn main(mut argc: isize, _argv: *const *const u8) -> isize {
+    unsafe {
+        libc::exit(2);
+    }
+    0
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/exit_code.rs b/compiler/rustc_codegen_gcc/tests/run/exit_code.rs
new file mode 100644 (file)
index 0000000..eeab352
--- /dev/null
@@ -0,0 +1,39 @@
+// Compiler:
+//
+// Run-time:
+//   status: 1
+
+#![feature(auto_traits, lang_items, no_core, start)]
+
+#![no_std]
+#![no_core]
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {
+}
+
+impl Copy for isize {}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+/*
+ * Code
+ */
+
+#[start]
+fn main(mut argc: isize, _argv: *const *const u8) -> isize {
+    1
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/fun_ptr.rs b/compiler/rustc_codegen_gcc/tests/run/fun_ptr.rs
new file mode 100644 (file)
index 0000000..a226fff
--- /dev/null
@@ -0,0 +1,223 @@
+// Compiler:
+//
+// Run-time:
+//   status: 0
+//   stdout: 1
+
+#![feature(arbitrary_self_types, auto_traits, lang_items, no_core, start, intrinsics)]
+
+#![no_std]
+#![no_core]
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {
+}
+
+impl Copy for isize {}
+impl Copy for usize {}
+impl Copy for i32 {}
+impl Copy for u8 {}
+impl Copy for i8 {}
+impl Copy for i16 {}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+mod libc {
+    #[link(name = "c")]
+    extern "C" {
+        pub fn printf(format: *const i8, ...) -> i32;
+        pub fn puts(s: *const u8) -> i32;
+    }
+}
+
+#[lang = "index"]
+pub trait Index<Idx: ?Sized> {
+    type Output: ?Sized;
+    fn index(&self, index: Idx) -> &Self::Output;
+}
+
+impl<T> Index<usize> for [T; 3] {
+    type Output = T;
+
+    fn index(&self, index: usize) -> &Self::Output {
+        &self[index]
+    }
+}
+
+impl<T> Index<usize> for [T] {
+    type Output = T;
+
+    fn index(&self, index: usize) -> &Self::Output {
+        &self[index]
+    }
+}
+
+#[lang = "drop_in_place"]
+#[allow(unconditional_recursion)]
+pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
+    // Code here does not matter - this is replaced by the
+    // real drop glue by the compiler.
+    drop_in_place(to_drop);
+}
+
+#[lang = "panic"]
+#[track_caller]
+#[no_mangle]
+pub fn panic(_msg: &str) -> ! {
+    unsafe {
+        libc::puts("Panicking\0" as *const str as *const u8);
+        intrinsics::abort();
+    }
+}
+
+#[lang = "panic_location"]
+struct PanicLocation {
+    file: &'static str,
+    line: u32,
+    column: u32,
+}
+
+#[lang = "panic_bounds_check"]
+#[track_caller]
+#[no_mangle]
+fn panic_bounds_check(index: usize, len: usize) -> ! {
+    unsafe {
+        libc::printf("index out of bounds: the len is %d but the index is %d\n\0" as *const str as *const i8, len, index);
+        intrinsics::abort();
+    }
+}
+
+mod intrinsics {
+    extern "rust-intrinsic" {
+        pub fn abort() -> !;
+    }
+}
+
+#[lang = "add"]
+trait Add<RHS = Self> {
+    type Output;
+
+    fn add(self, rhs: RHS) -> Self::Output;
+}
+
+impl Add for u8 {
+    type Output = Self;
+
+    fn add(self, rhs: Self) -> Self {
+        self + rhs
+    }
+}
+
+impl Add for i8 {
+    type Output = Self;
+
+    fn add(self, rhs: Self) -> Self {
+        self + rhs
+    }
+}
+
+impl Add for i32 {
+    type Output = Self;
+
+    fn add(self, rhs: Self) -> Self {
+        self + rhs
+    }
+}
+
+impl Add for usize {
+    type Output = Self;
+
+    fn add(self, rhs: Self) -> Self {
+        self + rhs
+    }
+}
+
+impl Add for isize {
+    type Output = Self;
+
+    fn add(self, rhs: Self) -> Self {
+        self + rhs
+    }
+}
+
+#[lang = "sub"]
+pub trait Sub<RHS = Self> {
+    type Output;
+
+    fn sub(self, rhs: RHS) -> Self::Output;
+}
+
+impl Sub for usize {
+    type Output = Self;
+
+    fn sub(self, rhs: Self) -> Self {
+        self - rhs
+    }
+}
+
+impl Sub for isize {
+    type Output = Self;
+
+    fn sub(self, rhs: Self) -> Self {
+        self - rhs
+    }
+}
+
+impl Sub for u8 {
+    type Output = Self;
+
+    fn sub(self, rhs: Self) -> Self {
+        self - rhs
+    }
+}
+
+impl Sub for i8 {
+    type Output = Self;
+
+    fn sub(self, rhs: Self) -> Self {
+        self - rhs
+    }
+}
+
+impl Sub for i16 {
+    type Output = Self;
+
+    fn sub(self, rhs: Self) -> Self {
+        self - rhs
+    }
+}
+
+
+/*
+ * Code
+ */
+
+fn i16_as_i8(a: i16) -> i8 {
+    a as i8
+}
+
+fn call_func(func: fn(i16) -> i8, param: i16) -> i8 {
+    func(param)
+}
+
+#[start]
+fn main(argc: isize, _argv: *const *const u8) -> isize {
+    unsafe {
+        let result = call_func(i16_as_i8, argc as i16) as isize;
+        libc::printf(b"%ld\n\0" as *const u8 as *const i8, result);
+    }
+    0
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/int_overflow.rs b/compiler/rustc_codegen_gcc/tests/run/int_overflow.rs
new file mode 100644 (file)
index 0000000..7111703
--- /dev/null
@@ -0,0 +1,129 @@
+// Compiler:
+//
+// Run-time:
+//   stdout: Panicking
+//   status: signal
+
+#![allow(unused_attributes)]
+#![feature(auto_traits, lang_items, no_core, start, intrinsics)]
+
+#![no_std]
+#![no_core]
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {
+}
+
+impl Copy for isize {}
+impl Copy for *mut i32 {}
+impl Copy for usize {}
+impl Copy for i32 {}
+impl Copy for u8 {}
+impl Copy for i8 {}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+#[lang = "panic_location"]
+struct PanicLocation {
+    file: &'static str,
+    line: u32,
+    column: u32,
+}
+
+mod libc {
+    #[link(name = "c")]
+    extern "C" {
+        pub fn puts(s: *const u8) -> i32;
+        pub fn fflush(stream: *mut i32) -> i32;
+
+        pub static STDOUT: *mut i32;
+    }
+}
+
+mod intrinsics {
+    extern "rust-intrinsic" {
+        pub fn abort() -> !;
+    }
+}
+
+#[lang = "panic"]
+#[track_caller]
+#[no_mangle]
+pub fn panic(_msg: &str) -> ! {
+    unsafe {
+        libc::puts("Panicking\0" as *const str as *const u8);
+        libc::fflush(libc::STDOUT);
+        intrinsics::abort();
+    }
+}
+
+#[lang = "add"]
+trait Add<RHS = Self> {
+    type Output;
+
+    fn add(self, rhs: RHS) -> Self::Output;
+}
+
+impl Add for u8 {
+    type Output = Self;
+
+    fn add(self, rhs: Self) -> Self {
+        self + rhs
+    }
+}
+
+impl Add for i8 {
+    type Output = Self;
+
+    fn add(self, rhs: Self) -> Self {
+        self + rhs
+    }
+}
+
+impl Add for i32 {
+    type Output = Self;
+
+    fn add(self, rhs: Self) -> Self {
+        self + rhs
+    }
+}
+
+impl Add for usize {
+    type Output = Self;
+
+    fn add(self, rhs: Self) -> Self {
+        self + rhs
+    }
+}
+
+impl Add for isize {
+    type Output = Self;
+
+    fn add(self, rhs: Self) -> Self {
+        self + rhs
+    }
+}
+
+/*
+ * Code
+ */
+
+#[start]
+fn main(mut argc: isize, _argv: *const *const u8) -> isize {
+    let int = 9223372036854775807isize;
+    let int = int + argc;
+    int
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/mut_ref.rs b/compiler/rustc_codegen_gcc/tests/run/mut_ref.rs
new file mode 100644 (file)
index 0000000..e887600
--- /dev/null
@@ -0,0 +1,165 @@
+
+// Compiler:
+//
+// Run-time:
+//   stdout: 2
+//     7
+//     6
+//     11
+
+#![allow(unused_attributes)]
+#![feature(auto_traits, lang_items, no_core, start, intrinsics, track_caller)]
+
+#![no_std]
+#![no_core]
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {
+}
+
+impl Copy for isize {}
+impl Copy for *mut i32 {}
+impl Copy for usize {}
+impl Copy for u8 {}
+impl Copy for i8 {}
+impl Copy for i32 {}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+#[lang = "panic_location"]
+struct PanicLocation {
+    file: &'static str,
+    line: u32,
+    column: u32,
+}
+
+mod libc {
+    #[link(name = "c")]
+    extern "C" {
+        pub fn puts(s: *const u8) -> i32;
+        pub fn fflush(stream: *mut i32) -> i32;
+        pub fn printf(format: *const i8, ...) -> i32;
+
+        pub static STDOUT: *mut i32;
+    }
+}
+
+mod intrinsics {
+    extern "rust-intrinsic" {
+        pub fn abort() -> !;
+    }
+}
+
+#[lang = "panic"]
+#[track_caller]
+#[no_mangle]
+pub fn panic(_msg: &str) -> ! {
+    unsafe {
+        libc::puts("Panicking\0" as *const str as *const u8);
+        libc::fflush(libc::STDOUT);
+        intrinsics::abort();
+    }
+}
+
+#[lang = "add"]
+trait Add<RHS = Self> {
+    type Output;
+
+    fn add(self, rhs: RHS) -> Self::Output;
+}
+
+impl Add for u8 {
+    type Output = Self;
+
+    fn add(self, rhs: Self) -> Self {
+        self + rhs
+    }
+}
+
+impl Add for i8 {
+    type Output = Self;
+
+    fn add(self, rhs: Self) -> Self {
+        self + rhs
+    }
+}
+
+impl Add for i32 {
+    type Output = Self;
+
+    fn add(self, rhs: Self) -> Self {
+        self + rhs
+    }
+}
+
+impl Add for usize {
+    type Output = Self;
+
+    fn add(self, rhs: Self) -> Self {
+        self + rhs
+    }
+}
+
+impl Add for isize {
+    type Output = Self;
+
+    fn add(self, rhs: Self) -> Self {
+        self + rhs
+    }
+}
+
+/*
+ * Code
+ */
+
+struct Test {
+    field: isize,
+}
+
+fn test(num: isize) -> Test {
+    Test {
+        field: num + 1,
+    }
+}
+
+fn update_num(num: &mut isize) {
+    *num = *num + 5;
+}
+
+#[start]
+fn main(mut argc: isize, _argv: *const *const u8) -> isize {
+    let mut test = test(argc);
+    unsafe {
+        libc::printf(b"%ld\n\0" as *const u8 as *const i8, test.field);
+    }
+    update_num(&mut test.field);
+    unsafe {
+        libc::printf(b"%ld\n\0" as *const u8 as *const i8, test.field);
+    }
+
+    update_num(&mut argc);
+    unsafe {
+        libc::printf(b"%ld\n\0" as *const u8 as *const i8, argc);
+    }
+
+    let refe = &mut argc;
+    *refe = *refe + 5;
+    unsafe {
+        libc::printf(b"%ld\n\0" as *const u8 as *const i8, argc);
+    }
+
+    0
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/operations.rs b/compiler/rustc_codegen_gcc/tests/run/operations.rs
new file mode 100644 (file)
index 0000000..4dc3753
--- /dev/null
@@ -0,0 +1,221 @@
+// Compiler:
+//
+// Run-time:
+//   stdout: 41
+//     39
+//     10
+
+#![allow(unused_attributes)]
+#![feature(auto_traits, lang_items, no_core, start, intrinsics, arbitrary_self_types)]
+
+#![no_std]
+#![no_core]
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {
+}
+
+impl Copy for isize {}
+impl Copy for *mut i32 {}
+impl Copy for usize {}
+impl Copy for u8 {}
+impl Copy for i8 {}
+impl Copy for i16 {}
+impl Copy for i32 {}
+
+#[lang = "deref"]
+pub trait Deref {
+    type Target: ?Sized;
+
+    fn deref(&self) -> &Self::Target;
+}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+#[lang = "panic_location"]
+struct PanicLocation {
+    file: &'static str,
+    line: u32,
+    column: u32,
+}
+
+mod libc {
+    #[link(name = "c")]
+    extern "C" {
+        pub fn printf(format: *const i8, ...) -> i32;
+        pub fn puts(s: *const u8) -> i32;
+        pub fn fflush(stream: *mut i32) -> i32;
+
+        pub static STDOUT: *mut i32;
+    }
+}
+
+mod intrinsics {
+    extern "rust-intrinsic" {
+        pub fn abort() -> !;
+    }
+}
+
+#[lang = "panic"]
+#[track_caller]
+#[no_mangle]
+pub fn panic(_msg: &str) -> ! {
+    unsafe {
+        libc::puts("Panicking\0" as *const str as *const u8);
+        libc::fflush(libc::STDOUT);
+        intrinsics::abort();
+    }
+}
+
+#[lang = "add"]
+trait Add<RHS = Self> {
+    type Output;
+
+    fn add(self, rhs: RHS) -> Self::Output;
+}
+
+impl Add for u8 {
+    type Output = Self;
+
+    fn add(self, rhs: Self) -> Self {
+        self + rhs
+    }
+}
+
+impl Add for i8 {
+    type Output = Self;
+
+    fn add(self, rhs: Self) -> Self {
+        self + rhs
+    }
+}
+
+impl Add for i32 {
+    type Output = Self;
+
+    fn add(self, rhs: Self) -> Self {
+        self + rhs
+    }
+}
+
+impl Add for usize {
+    type Output = Self;
+
+    fn add(self, rhs: Self) -> Self {
+        self + rhs
+    }
+}
+
+impl Add for isize {
+    type Output = Self;
+
+    fn add(self, rhs: Self) -> Self {
+        self + rhs
+    }
+}
+
+#[lang = "sub"]
+pub trait Sub<RHS = Self> {
+    type Output;
+
+    fn sub(self, rhs: RHS) -> Self::Output;
+}
+
+impl Sub for usize {
+    type Output = Self;
+
+    fn sub(self, rhs: Self) -> Self {
+        self - rhs
+    }
+}
+
+impl Sub for isize {
+    type Output = Self;
+
+    fn sub(self, rhs: Self) -> Self {
+        self - rhs
+    }
+}
+
+impl Sub for u8 {
+    type Output = Self;
+
+    fn sub(self, rhs: Self) -> Self {
+        self - rhs
+    }
+}
+
+impl Sub for i8 {
+    type Output = Self;
+
+    fn sub(self, rhs: Self) -> Self {
+        self - rhs
+    }
+}
+
+impl Sub for i16 {
+    type Output = Self;
+
+    fn sub(self, rhs: Self) -> Self {
+        self - rhs
+    }
+}
+
+#[lang = "mul"]
+pub trait Mul<RHS = Self> {
+    type Output;
+
+    #[must_use]
+    fn mul(self, rhs: RHS) -> Self::Output;
+}
+
+impl Mul for u8 {
+    type Output = Self;
+
+    fn mul(self, rhs: Self) -> Self::Output {
+        self * rhs
+    }
+}
+
+impl Mul for usize {
+    type Output = Self;
+
+    fn mul(self, rhs: Self) -> Self::Output {
+        self * rhs
+    }
+}
+
+impl Mul for isize {
+    type Output = Self;
+
+    fn mul(self, rhs: Self) -> Self::Output {
+        self * rhs
+    }
+}
+
+/*
+ * Code
+ */
+
+#[start]
+fn main(mut argc: isize, _argv: *const *const u8) -> isize {
+    unsafe {
+        libc::printf(b"%ld\n\0" as *const u8 as *const i8, 40 + argc);
+        libc::printf(b"%ld\n\0" as *const u8 as *const i8, 40 - argc);
+        libc::printf(b"%ld\n\0" as *const u8 as *const i8, 10 * argc);
+    }
+    0
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/ptr_cast.rs b/compiler/rustc_codegen_gcc/tests/run/ptr_cast.rs
new file mode 100644 (file)
index 0000000..6ac099e
--- /dev/null
@@ -0,0 +1,222 @@
+// Compiler:
+//
+// Run-time:
+//   status: 0
+//   stdout: 1
+
+#![feature(arbitrary_self_types, auto_traits, lang_items, no_core, start, intrinsics)]
+
+#![no_std]
+#![no_core]
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {
+}
+
+impl Copy for isize {}
+impl Copy for usize {}
+impl Copy for i32 {}
+impl Copy for u8 {}
+impl Copy for i8 {}
+impl Copy for i16 {}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+mod libc {
+    #[link(name = "c")]
+    extern "C" {
+        pub fn printf(format: *const i8, ...) -> i32;
+        pub fn puts(s: *const u8) -> i32;
+    }
+}
+
+#[lang = "index"]
+pub trait Index<Idx: ?Sized> {
+    type Output: ?Sized;
+    fn index(&self, index: Idx) -> &Self::Output;
+}
+
+impl<T> Index<usize> for [T; 3] {
+    type Output = T;
+
+    fn index(&self, index: usize) -> &Self::Output {
+        &self[index]
+    }
+}
+
+impl<T> Index<usize> for [T] {
+    type Output = T;
+
+    fn index(&self, index: usize) -> &Self::Output {
+        &self[index]
+    }
+}
+
+#[lang = "drop_in_place"]
+#[allow(unconditional_recursion)]
+pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
+    // Code here does not matter - this is replaced by the
+    // real drop glue by the compiler.
+    drop_in_place(to_drop);
+}
+
+#[lang = "panic"]
+#[track_caller]
+#[no_mangle]
+pub fn panic(_msg: &str) -> ! {
+    unsafe {
+        libc::puts("Panicking\0" as *const str as *const u8);
+        intrinsics::abort();
+    }
+}
+
+#[lang = "panic_location"]
+struct PanicLocation {
+    file: &'static str,
+    line: u32,
+    column: u32,
+}
+
+#[lang = "panic_bounds_check"]
+#[track_caller]
+#[no_mangle]
+fn panic_bounds_check(index: usize, len: usize) -> ! {
+    unsafe {
+        libc::printf("index out of bounds: the len is %d but the index is %d\n\0" as *const str as *const i8, len, index);
+        intrinsics::abort();
+    }
+}
+
+mod intrinsics {
+    extern "rust-intrinsic" {
+        pub fn abort() -> !;
+    }
+}
+
+#[lang = "add"]
+trait Add<RHS = Self> {
+    type Output;
+
+    fn add(self, rhs: RHS) -> Self::Output;
+}
+
+impl Add for u8 {
+    type Output = Self;
+
+    fn add(self, rhs: Self) -> Self {
+        self + rhs
+    }
+}
+
+impl Add for i8 {
+    type Output = Self;
+
+    fn add(self, rhs: Self) -> Self {
+        self + rhs
+    }
+}
+
+impl Add for i32 {
+    type Output = Self;
+
+    fn add(self, rhs: Self) -> Self {
+        self + rhs
+    }
+}
+
+impl Add for usize {
+    type Output = Self;
+
+    fn add(self, rhs: Self) -> Self {
+        self + rhs
+    }
+}
+
+impl Add for isize {
+    type Output = Self;
+
+    fn add(self, rhs: Self) -> Self {
+        self + rhs
+    }
+}
+
+#[lang = "sub"]
+pub trait Sub<RHS = Self> {
+    type Output;
+
+    fn sub(self, rhs: RHS) -> Self::Output;
+}
+
+impl Sub for usize {
+    type Output = Self;
+
+    fn sub(self, rhs: Self) -> Self {
+        self - rhs
+    }
+}
+
+impl Sub for isize {
+    type Output = Self;
+
+    fn sub(self, rhs: Self) -> Self {
+        self - rhs
+    }
+}
+
+impl Sub for u8 {
+    type Output = Self;
+
+    fn sub(self, rhs: Self) -> Self {
+        self - rhs
+    }
+}
+
+impl Sub for i8 {
+    type Output = Self;
+
+    fn sub(self, rhs: Self) -> Self {
+        self - rhs
+    }
+}
+
+impl Sub for i16 {
+    type Output = Self;
+
+    fn sub(self, rhs: Self) -> Self {
+        self - rhs
+    }
+}
+
+
+/*
+ * Code
+ */
+
+static mut ONE: usize = 1;
+
+fn make_array() -> [u8; 3] {
+    [42, 10, 5]
+}
+
+#[start]
+fn main(argc: isize, _argv: *const *const u8) -> isize {
+    unsafe {
+        let ptr = ONE as *mut usize;
+        let value = ptr as usize;
+        libc::printf(b"%ld\n\0" as *const u8 as *const i8, value);
+    }
+    0
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/return-tuple.rs b/compiler/rustc_codegen_gcc/tests/run/return-tuple.rs
new file mode 100644 (file)
index 0000000..6fa10dc
--- /dev/null
@@ -0,0 +1,72 @@
+// Compiler:
+//
+// Run-time:
+//   status: 0
+//   stdout: 10
+//     10
+//     42
+
+#![feature(auto_traits, lang_items, no_core, start, intrinsics)]
+
+#![no_std]
+#![no_core]
+
+#[lang = "copy"]
+pub unsafe trait Copy {}
+
+unsafe impl Copy for bool {}
+unsafe impl Copy for u8 {}
+unsafe impl Copy for u16 {}
+unsafe impl Copy for u32 {}
+unsafe impl Copy for u64 {}
+unsafe impl Copy for usize {}
+unsafe impl Copy for i8 {}
+unsafe impl Copy for i16 {}
+unsafe impl Copy for i32 {}
+unsafe impl Copy for isize {}
+unsafe impl Copy for f32 {}
+unsafe impl Copy for char {}
+
+mod libc {
+    #[link(name = "c")]
+    extern "C" {
+        pub fn printf(format: *const i8, ...) -> i32;
+    }
+}
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+/*
+ * Code
+ */
+
+fn int_cast(a: u16, b: i16) -> (u8, u16, u32, usize, i8, i16, i32, isize, u8, u32) {
+    (
+        a as u8, a as u16, a as u32, a as usize, a as i8, a as i16, a as i32, a as isize, b as u8,
+        b as u32,
+    )
+}
+
+#[start]
+fn main(argc: isize, _argv: *const *const u8) -> isize {
+    let (a, b, c, d, e, f, g, h, i, j) = int_cast(10, 42);
+    unsafe {
+        libc::printf(b"%d\n\0" as *const u8 as *const i8, c);
+        libc::printf(b"%ld\n\0" as *const u8 as *const i8, d);
+        libc::printf(b"%ld\n\0" as *const u8 as *const i8, j);
+    }
+    0
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/slice.rs b/compiler/rustc_codegen_gcc/tests/run/slice.rs
new file mode 100644 (file)
index 0000000..ad9258e
--- /dev/null
@@ -0,0 +1,128 @@
+// Compiler:
+//
+// Run-time:
+//   status: 0
+//   stdout: 5
+
+#![feature(arbitrary_self_types, auto_traits, lang_items, no_core, start, intrinsics)]
+
+#![no_std]
+#![no_core]
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {
+}
+
+impl Copy for isize {}
+impl Copy for usize {}
+impl Copy for i32 {}
+impl Copy for u32 {}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+mod libc {
+    #[link(name = "c")]
+    extern "C" {
+        pub fn printf(format: *const i8, ...) -> i32;
+    }
+}
+
+#[lang = "index"]
+pub trait Index<Idx: ?Sized> {
+    type Output: ?Sized;
+    fn index(&self, index: Idx) -> &Self::Output;
+}
+
+impl<T> Index<usize> for [T; 3] {
+    type Output = T;
+
+    fn index(&self, index: usize) -> &Self::Output {
+        &self[index]
+    }
+}
+
+impl<T> Index<usize> for [T] {
+    type Output = T;
+
+    fn index(&self, index: usize) -> &Self::Output {
+        &self[index]
+    }
+}
+
+#[lang = "unsize"]
+pub trait Unsize<T: ?Sized> {}
+
+#[lang = "coerce_unsized"]
+pub trait CoerceUnsized<T> {}
+
+impl<'a, 'b: 'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a U> for &'b T {}
+impl<'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a mut U> for &'a mut T {}
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*const U> for *const T {}
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*mut U> for *mut T {}
+
+#[lang = "drop_in_place"]
+#[allow(unconditional_recursion)]
+pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
+    // Code here does not matter - this is replaced by the
+    // real drop glue by the compiler.
+    drop_in_place(to_drop);
+}
+
+#[lang = "panic_location"]
+struct PanicLocation {
+    file: &'static str,
+    line: u32,
+    column: u32,
+}
+
+#[lang = "panic_bounds_check"]
+#[track_caller]
+#[no_mangle]
+fn panic_bounds_check(index: usize, len: usize) -> ! {
+    unsafe {
+        libc::printf("index out of bounds: the len is %d but the index is %d\n\0" as *const str as *const i8, len, index);
+        intrinsics::abort();
+    }
+}
+
+mod intrinsics {
+    use super::Sized;
+
+    extern "rust-intrinsic" {
+        pub fn abort() -> !;
+    }
+}
+
+/*
+ * Code
+ */
+
+static mut TWO: usize = 2;
+
+fn index_slice(s: &[u32]) -> u32 {
+    unsafe {
+        s[TWO]
+    }
+}
+
+#[start]
+fn main(mut argc: isize, _argv: *const *const u8) -> isize {
+    let array = [42, 7, 5];
+    unsafe {
+        libc::printf(b"%ld\n\0" as *const u8 as *const i8, index_slice(&array));
+    }
+    0
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/static.rs b/compiler/rustc_codegen_gcc/tests/run/static.rs
new file mode 100644 (file)
index 0000000..ab89f6a
--- /dev/null
@@ -0,0 +1,106 @@
+// Compiler:
+//
+// Run-time:
+//   status: 0
+//   stdout: 10
+//      14
+//      1
+//      12
+//      12
+//      1
+
+#![feature(auto_traits, lang_items, no_core, start, intrinsics)]
+
+#![no_std]
+#![no_core]
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {
+}
+
+impl Copy for isize {}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+mod intrinsics {
+    use super::Sized;
+
+    extern "rust-intrinsic" {
+        pub fn abort() -> !;
+    }
+}
+
+mod libc {
+    #[link(name = "c")]
+    extern "C" {
+        pub fn printf(format: *const i8, ...) -> i32;
+    }
+}
+
+#[lang = "structural_peq"]
+pub trait StructuralPartialEq {}
+
+#[lang = "structural_teq"]
+pub trait StructuralEq {}
+
+#[lang = "drop_in_place"]
+#[allow(unconditional_recursion)]
+pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
+    // Code here does not matter - this is replaced by the
+    // real drop glue by the compiler.
+    drop_in_place(to_drop);
+}
+
+/*
+ * Code
+ */
+
+struct Test {
+    field: isize,
+}
+
+struct WithRef {
+    refe: &'static Test,
+}
+
+static mut CONSTANT: isize = 10;
+
+static mut TEST: Test = Test {
+    field: 12,
+};
+
+static mut TEST2: Test = Test {
+    field: 14,
+};
+
+static mut WITH_REF: WithRef = WithRef {
+    refe: unsafe { &TEST },
+};
+
+#[start]
+fn main(mut argc: isize, _argv: *const *const u8) -> isize {
+    unsafe {
+        libc::printf(b"%ld\n\0" as *const u8 as *const i8, CONSTANT);
+        libc::printf(b"%ld\n\0" as *const u8 as *const i8, TEST2.field);
+        TEST2.field = argc;
+        libc::printf(b"%ld\n\0" as *const u8 as *const i8, TEST2.field);
+        libc::printf(b"%ld\n\0" as *const u8 as *const i8, WITH_REF.refe.field);
+        WITH_REF.refe = &TEST2;
+        libc::printf(b"%ld\n\0" as *const u8 as *const i8, TEST.field);
+        libc::printf(b"%ld\n\0" as *const u8 as *const i8, WITH_REF.refe.field);
+    }
+    0
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/structs.rs b/compiler/rustc_codegen_gcc/tests/run/structs.rs
new file mode 100644 (file)
index 0000000..6c88848
--- /dev/null
@@ -0,0 +1,70 @@
+// Compiler:
+//
+// Run-time:
+//   status: 0
+//   stdout: 1
+//     2
+
+#![feature(auto_traits, lang_items, no_core, start, intrinsics)]
+
+#![no_std]
+#![no_core]
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {
+}
+
+impl Copy for isize {}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+mod libc {
+    #[link(name = "c")]
+    extern "C" {
+        pub fn printf(format: *const i8, ...) -> i32;
+    }
+}
+
+/*
+ * Code
+ */
+
+struct Test {
+    field: isize,
+}
+
+struct Two {
+    two: isize,
+}
+
+fn one() -> isize {
+    1
+}
+
+#[start]
+fn main(mut argc: isize, _argv: *const *const u8) -> isize {
+    let test = Test {
+        field: one(),
+    };
+    let two = Two {
+        two: 2,
+    };
+    unsafe {
+        libc::printf(b"%ld\n\0" as *const u8 as *const i8, test.field);
+        libc::printf(b"%ld\n\0" as *const u8 as *const i8, two.two);
+    }
+    0
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/tuple.rs b/compiler/rustc_codegen_gcc/tests/run/tuple.rs
new file mode 100644 (file)
index 0000000..0b670bf
--- /dev/null
@@ -0,0 +1,51 @@
+// Compiler:
+//
+// Run-time:
+//   status: 0
+//   stdout: 3
+
+#![feature(auto_traits, lang_items, no_core, start, intrinsics)]
+
+#![no_std]
+#![no_core]
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {
+}
+
+impl Copy for isize {}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+mod libc {
+    #[link(name = "c")]
+    extern "C" {
+        pub fn printf(format: *const i8, ...) -> i32;
+    }
+}
+
+/*
+ * Code
+ */
+
+#[start]
+fn main(mut argc: isize, _argv: *const *const u8) -> isize {
+    let test: (isize, isize, isize) = (3, 1, 4);
+    unsafe {
+        libc::printf(b"%ld\n\0" as *const u8 as *const i8, test.0);
+    }
+    0
+}
index 480b19a5e933685cde31dddb4c6076b1169c2174..053f3e3ee583b9afbe4783715ad4892f80d88569 100644 (file)
@@ -19,6 +19,7 @@ ignore = [
     "library/backtrace",
     "library/stdarch",
     "compiler/rustc_codegen_cranelift",
+    "compiler/rustc_codegen_gcc",
     "src/doc/book",
     "src/doc/edition-guide",
     "src/doc/embedded-book",
index f66f282bea933e0bfbf817f589e74c814eb3fd16..28e7f1fdca7a19519e957f93690c8402abf6697e 100644 (file)
@@ -243,11 +243,16 @@ impl Step for CodegenBackend {
     const DEFAULT: bool = true;
 
     fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
-        run.paths(&["compiler/rustc_codegen_cranelift", "rustc_codegen_cranelift"])
+        run.paths(&[
+            "compiler/rustc_codegen_cranelift",
+            "rustc_codegen_cranelift",
+            "compiler/rustc_codegen_gcc",
+            "rustc_codegen_gcc",
+        ])
     }
 
     fn make_run(run: RunConfig<'_>) {
-        for &backend in &[INTERNER.intern_str("cranelift")] {
+        for &backend in &[INTERNER.intern_str("cranelift"), INTERNER.intern_str("gcc")] {
             run.builder.ensure(CodegenBackend { target: run.target, backend });
         }
     }
index 35809e599266c8111e6758f84307b1371838ea69..a20ea3235ed46348fc07ac468a1c94c9c84b689f 100644 (file)
@@ -57,6 +57,7 @@ fn filter_dirs(path: &Path) -> bool {
     let skip = [
         "tidy-test-file",
         "compiler/rustc_codegen_cranelift",
+        "compiler/rustc_codegen_gcc",
         "src/llvm-project",
         "library/backtrace",
         "library/stdarch",