]> git.lizzy.rs Git - rust.git/commitdiff
Merge commit '03f01bbe901d60b71cf2c5ec766aef5e532ab79d' into update_cg_clif-2020...
authorbjorn3 <bjorn3@users.noreply.github.com>
Tue, 3 Nov 2020 10:00:04 +0000 (11:00 +0100)
committerbjorn3 <bjorn3@users.noreply.github.com>
Tue, 3 Nov 2020 10:00:04 +0000 (11:00 +0100)
53 files changed:
1  2 
compiler/rustc_codegen_cranelift/.github/workflows/bootstrap_rustc.yml
compiler/rustc_codegen_cranelift/.github/workflows/main.yml
compiler/rustc_codegen_cranelift/.gitignore
compiler/rustc_codegen_cranelift/Cargo.lock
compiler/rustc_codegen_cranelift/Readme.md
compiler/rustc_codegen_cranelift/build.sh
compiler/rustc_codegen_cranelift/build_sysroot/build_sysroot.sh
compiler/rustc_codegen_cranelift/build_sysroot/prepare_sysroot_src.sh
compiler/rustc_codegen_cranelift/clean_all.sh
compiler/rustc_codegen_cranelift/docs/env_vars.md
compiler/rustc_codegen_cranelift/example/mini_core.rs
compiler/rustc_codegen_cranelift/example/mini_core_hello_world.rs
compiler/rustc_codegen_cranelift/example/std_example.rs
compiler/rustc_codegen_cranelift/rust-toolchain
compiler/rustc_codegen_cranelift/scripts/cargo.sh
compiler/rustc_codegen_cranelift/scripts/config.sh
compiler/rustc_codegen_cranelift/scripts/filter_profile.rs
compiler/rustc_codegen_cranelift/scripts/rustup.sh
compiler/rustc_codegen_cranelift/scripts/test_bootstrap.sh
compiler/rustc_codegen_cranelift/scripts/tests.sh
compiler/rustc_codegen_cranelift/src/abi/comments.rs
compiler/rustc_codegen_cranelift/src/abi/mod.rs
compiler/rustc_codegen_cranelift/src/allocator.rs
compiler/rustc_codegen_cranelift/src/archive.rs
compiler/rustc_codegen_cranelift/src/atomic_shim.rs
compiler/rustc_codegen_cranelift/src/backend.rs
compiler/rustc_codegen_cranelift/src/base.rs
compiler/rustc_codegen_cranelift/src/bin/cg_clif.rs
compiler/rustc_codegen_cranelift/src/bin/cg_clif_build_sysroot.rs
compiler/rustc_codegen_cranelift/src/cast.rs
compiler/rustc_codegen_cranelift/src/codegen_i128.rs
compiler/rustc_codegen_cranelift/src/common.rs
compiler/rustc_codegen_cranelift/src/constant.rs
compiler/rustc_codegen_cranelift/src/debuginfo/emit.rs
compiler/rustc_codegen_cranelift/src/debuginfo/line_info.rs
compiler/rustc_codegen_cranelift/src/debuginfo/unwind.rs
compiler/rustc_codegen_cranelift/src/driver/jit.rs
compiler/rustc_codegen_cranelift/src/driver/mod.rs
compiler/rustc_codegen_cranelift/src/inline_asm.rs
compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs
compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs
compiler/rustc_codegen_cranelift/src/lib.rs
compiler/rustc_codegen_cranelift/src/linkage.rs
compiler/rustc_codegen_cranelift/src/main_shim.rs
compiler/rustc_codegen_cranelift/src/metadata.rs
compiler/rustc_codegen_cranelift/src/num.rs
compiler/rustc_codegen_cranelift/src/optimize/stack2reg.rs
compiler/rustc_codegen_cranelift/src/pretty_clif.rs
compiler/rustc_codegen_cranelift/src/trap.rs
compiler/rustc_codegen_cranelift/src/value_and_place.rs
compiler/rustc_codegen_cranelift/src/vtable.rs
compiler/rustc_codegen_cranelift/test.sh

index 0000000000000000000000000000000000000000,0000000000000000000000000000000000000000..8c94a0aa5e6ebe7beba14d52e5ed817053529f8e
new file mode 100644 (file)
--- /dev/null
--- /dev/null
@@@ -1,0 -1,0 +1,44 @@@
++name: Bootstrap rustc using cg_clif
++
++on:
++  - push
++
++jobs:
++  bootstrap_rustc:
++    runs-on: ubuntu-latest
++
++    steps:
++    - uses: actions/checkout@v2
++
++    - name: Cache cargo installed crates
++      uses: actions/cache@v2
++      with:
++        path: ~/.cargo/bin
++        key: ${{ runner.os }}-cargo-installed-crates
++
++    - name: Cache cargo registry and index
++      uses: actions/cache@v2
++      with:
++        path: |
++            ~/.cargo/registry
++            ~/.cargo/git
++        key: ${{ runner.os }}-cargo-registry-and-index-${{ hashFiles('**/Cargo.lock') }}
++
++    - name: Cache cargo target dir
++      uses: actions/cache@v2
++      with:
++        path: target
++        key: ${{ runner.os }}-cargo-build-target-${{ hashFiles('rust-toolchain', '**/Cargo.lock') }}
++
++    - name: Prepare dependencies
++      run: |
++        git config --global user.email "user@example.com"
++        git config --global user.name "User"
++        ./prepare.sh
++
++    - name: Test
++      run: |
++        # Enable backtraces for easier debugging
++        export RUST_BACKTRACE=1
++
++        ./scripts/test_bootstrap.sh
index 841e1a0870ed34da319bbbeb08cee4a36cee98c7,0000000000000000000000000000000000000000..e6d3375fb1bab6240d6d309259aa84657547a559
mode 100644,000000..100644
--- /dev/null
@@@ -1,54 -1,0 +1,63 @@@
-         ./test.sh --release
 +name: CI
 +
 +on:
 +  - push
 +  - pull_request
 +
 +jobs:
 +  build:
 +    runs-on: ${{ matrix.os }}
 +
 +    strategy:
 +      fail-fast: false
 +      matrix:
 +        os: [ubuntu-latest, macos-latest]
 +
 +    steps:
 +    - uses: actions/checkout@v2
 +
 +    - name: Cache cargo installed crates
 +      uses: actions/cache@v2
 +      with:
 +        path: ~/.cargo/bin
 +        key: ${{ runner.os }}-cargo-installed-crates
 +
 +    - name: Cache cargo registry and index
 +      uses: actions/cache@v2
 +      with:
 +        path: |
 +            ~/.cargo/registry
 +            ~/.cargo/git
 +        key: ${{ runner.os }}-cargo-registry-and-index-${{ hashFiles('**/Cargo.lock') }}
 +
 +    - name: Cache cargo target dir
 +      uses: actions/cache@v2
 +      with:
 +        path: target
 +        key: ${{ runner.os }}-cargo-build-target-${{ hashFiles('rust-toolchain', '**/Cargo.lock') }}
 +
 +    - name: Prepare dependencies
 +      run: |
 +        git config --global user.email "user@example.com"
 +        git config --global user.name "User"
 +        ./prepare.sh
 +
 +    - name: Test
 +      run: |
 +        # Enable backtraces for easier debugging
 +        export RUST_BACKTRACE=1
 +
 +        # Reduce amount of benchmark runs as they are slow
 +        export COMPILE_RUNS=2
 +        export RUN_RUNS=2
 +
++        ./test.sh
++
++    - name: Package prebuilt cg_clif
++      run: tar cvfJ cg_clif.tar.xz build
++
++    - name: Upload prebuilt cg_clif
++      uses: actions/upload-artifact@v2
++      with:
++        name: cg_clif-${{ runner.os }}
++        path: cg_clif.tar.xz
index 0da9927b479b45326a4e50ac9500df5d0c60802e,0000000000000000000000000000000000000000..18196bce0094597d583e6e4420c32e0d85022f73
mode 100644,000000..100644
--- /dev/null
@@@ -1,14 -1,0 +1,14 @@@
- /build_sysroot/sysroot
 +target
 +**/*.rs.bk
 +*.rlib
 +*.o
 +perf.data
 +perf.data.old
 +*.events
 +*.string*
++/build
 +/build_sysroot/sysroot_src
 +/rust
 +/rand
 +/regex
 +/simple-raytracer
index 6cfbed0a5e43051d18f85e10e31dea76a946814b,0000000000000000000000000000000000000000..2889fac77f6a4616fd9b976932cd09a15a91c738
mode 100644,000000..100644
--- /dev/null
@@@ -1,425 -1,0 +1,425 @@@
- source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#4fd90dccabb266e983740e1f5daf8bde9266b286"
 +# This file is automatically @generated by Cargo.
 +# It is not intended for manual editing.
 +[[package]]
 +name = "anyhow"
 +version = "1.0.33"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "a1fd36ffbb1fb7c834eac128ea8d0e310c5aeb635548f9d58861e1308d46e71c"
 +
 +[[package]]
 +name = "ar"
 +version = "0.8.0"
 +source = "git+https://github.com/bjorn3/rust-ar.git?branch=do_not_remove_cg_clif_ranlib#de9ab0e56bf3a208381d342aa5b60f9ff2891648"
 +
 +[[package]]
 +name = "autocfg"
 +version = "1.0.1"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
 +
 +[[package]]
 +name = "bitflags"
 +version = "1.2.1"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
 +
 +[[package]]
 +name = "byteorder"
 +version = "1.3.4"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de"
 +
 +[[package]]
 +name = "cc"
 +version = "1.0.61"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "ed67cbde08356238e75fc4656be4749481eeffb09e19f320a25237d5221c985d"
 +
 +[[package]]
 +name = "cfg-if"
 +version = "0.1.10"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
 +
 +[[package]]
 +name = "cranelift-bforest"
 +version = "0.67.0"
- source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#4fd90dccabb266e983740e1f5daf8bde9266b286"
++source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#44cbdecea03c360ea82e6482f0cf6c614effef21"
 +dependencies = [
 + "cranelift-entity",
 +]
 +
 +[[package]]
 +name = "cranelift-codegen"
 +version = "0.67.0"
- source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#4fd90dccabb266e983740e1f5daf8bde9266b286"
++source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#44cbdecea03c360ea82e6482f0cf6c614effef21"
 +dependencies = [
 + "byteorder",
 + "cranelift-bforest",
 + "cranelift-codegen-meta",
 + "cranelift-codegen-shared",
 + "cranelift-entity",
 + "gimli",
 + "log",
 + "regalloc",
 + "smallvec",
 + "target-lexicon",
 + "thiserror",
 +]
 +
 +[[package]]
 +name = "cranelift-codegen-meta"
 +version = "0.67.0"
- source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#4fd90dccabb266e983740e1f5daf8bde9266b286"
++source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#44cbdecea03c360ea82e6482f0cf6c614effef21"
 +dependencies = [
 + "cranelift-codegen-shared",
 + "cranelift-entity",
 +]
 +
 +[[package]]
 +name = "cranelift-codegen-shared"
 +version = "0.67.0"
- source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#4fd90dccabb266e983740e1f5daf8bde9266b286"
++source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#44cbdecea03c360ea82e6482f0cf6c614effef21"
 +
 +[[package]]
 +name = "cranelift-entity"
 +version = "0.67.0"
- source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#4fd90dccabb266e983740e1f5daf8bde9266b286"
++source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#44cbdecea03c360ea82e6482f0cf6c614effef21"
 +
 +[[package]]
 +name = "cranelift-frontend"
 +version = "0.67.0"
- source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#4fd90dccabb266e983740e1f5daf8bde9266b286"
++source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#44cbdecea03c360ea82e6482f0cf6c614effef21"
 +dependencies = [
 + "cranelift-codegen",
 + "log",
 + "smallvec",
 + "target-lexicon",
 +]
 +
 +[[package]]
 +name = "cranelift-module"
 +version = "0.67.0"
- source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#4fd90dccabb266e983740e1f5daf8bde9266b286"
++source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#44cbdecea03c360ea82e6482f0cf6c614effef21"
 +dependencies = [
 + "anyhow",
 + "cranelift-codegen",
 + "cranelift-entity",
 + "log",
 + "thiserror",
 +]
 +
 +[[package]]
 +name = "cranelift-native"
 +version = "0.67.0"
- source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#4fd90dccabb266e983740e1f5daf8bde9266b286"
++source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#44cbdecea03c360ea82e6482f0cf6c614effef21"
 +dependencies = [
 + "cranelift-codegen",
 + "raw-cpuid",
 + "target-lexicon",
 +]
 +
 +[[package]]
 +name = "cranelift-object"
 +version = "0.67.0"
- source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#4fd90dccabb266e983740e1f5daf8bde9266b286"
++source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#44cbdecea03c360ea82e6482f0cf6c614effef21"
 +dependencies = [
 + "anyhow",
 + "cranelift-codegen",
 + "cranelift-module",
 + "log",
 + "object",
 + "target-lexicon",
 +]
 +
 +[[package]]
 +name = "cranelift-simplejit"
 +version = "0.67.0"
++source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#44cbdecea03c360ea82e6482f0cf6c614effef21"
 +dependencies = [
 + "cranelift-codegen",
 + "cranelift-entity",
 + "cranelift-module",
 + "cranelift-native",
 + "errno",
 + "libc",
 + "log",
 + "region",
 + "target-lexicon",
 + "winapi",
 +]
 +
 +[[package]]
 +name = "crc32fast"
 +version = "1.2.0"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "ba125de2af0df55319f41944744ad91c71113bf74a4646efff39afe1f6842db1"
 +dependencies = [
 + "cfg-if",
 +]
 +
 +[[package]]
 +name = "errno"
 +version = "0.2.6"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "6eab5ee3df98a279d9b316b1af6ac95422127b1290317e6d18c1743c99418b01"
 +dependencies = [
 + "errno-dragonfly",
 + "libc",
 + "winapi",
 +]
 +
 +[[package]]
 +name = "errno-dragonfly"
 +version = "0.1.1"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "14ca354e36190500e1e1fb267c647932382b54053c50b14970856c0b00a35067"
 +dependencies = [
 + "gcc",
 + "libc",
 +]
 +
 +[[package]]
 +name = "gcc"
 +version = "0.3.55"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "8f5f3913fa0bfe7ee1fd8248b6b9f42a5af4b9d65ec2dd2c3c26132b950ecfc2"
 +
 +[[package]]
 +name = "gimli"
 +version = "0.22.0"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "aaf91faf136cb47367fa430cd46e37a788775e7fa104f8b4bcb3861dc389b724"
 +dependencies = [
 + "indexmap",
 +]
 +
 +[[package]]
 +name = "hashbrown"
 +version = "0.9.1"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04"
 +
 +[[package]]
 +name = "indexmap"
 +version = "1.6.0"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "55e2e4c765aa53a0424761bf9f41aa7a6ac1efa87238f59560640e27fca028f2"
 +dependencies = [
 + "autocfg",
 + "hashbrown",
 +]
 +
 +[[package]]
 +name = "libc"
 +version = "0.2.79"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "2448f6066e80e3bfc792e9c98bf705b4b0fc6e8ef5b43e5889aff0eaa9c58743"
 +
 +[[package]]
 +name = "libloading"
 +version = "0.6.4"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "3557c9384f7f757f6d139cd3a4c62ef4e850696c16bf27924a5538c8a09717a1"
 +dependencies = [
 + "cfg-if",
 + "winapi",
 +]
 +
 +[[package]]
 +name = "log"
 +version = "0.4.11"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b"
 +dependencies = [
 + "cfg-if",
 +]
 +
 +[[package]]
 +name = "mach"
 +version = "0.3.2"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "b823e83b2affd8f40a9ee8c29dbc56404c1e34cd2710921f2801e2cf29527afa"
 +dependencies = [
 + "libc",
 +]
 +
 +[[package]]
 +name = "object"
 +version = "0.21.1"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "37fd5004feb2ce328a52b0b3d01dbf4ffff72583493900ed15f22d4111c51693"
 +dependencies = [
 + "crc32fast",
 + "indexmap",
 +]
 +
 +[[package]]
 +name = "proc-macro2"
 +version = "1.0.24"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "1e0704ee1a7e00d7bb417d0770ea303c1bccbabf0ef1667dae92b5967f5f8a71"
 +dependencies = [
 + "unicode-xid",
 +]
 +
 +[[package]]
 +name = "quote"
 +version = "1.0.7"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37"
 +dependencies = [
 + "proc-macro2",
 +]
 +
 +[[package]]
 +name = "raw-cpuid"
 +version = "7.0.3"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "b4a349ca83373cfa5d6dbb66fd76e58b2cca08da71a5f6400de0a0a6a9bceeaf"
 +dependencies = [
 + "bitflags",
 + "cc",
 + "rustc_version",
 +]
 +
 +[[package]]
 +name = "regalloc"
 +version = "0.0.31"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "571f7f397d61c4755285cd37853fe8e03271c243424a907415909379659381c5"
 +dependencies = [
 + "log",
 + "rustc-hash",
 + "smallvec",
 +]
 +
 +[[package]]
 +name = "region"
 +version = "2.2.0"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "877e54ea2adcd70d80e9179344c97f93ef0dffd6b03e1f4529e6e83ab2fa9ae0"
 +dependencies = [
 + "bitflags",
 + "libc",
 + "mach",
 + "winapi",
 +]
 +
 +[[package]]
 +name = "rustc-hash"
 +version = "1.1.0"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2"
 +
 +[[package]]
 +name = "rustc_codegen_cranelift"
 +version = "0.1.0"
 +dependencies = [
 + "ar",
 + "cranelift-codegen",
 + "cranelift-frontend",
 + "cranelift-module",
 + "cranelift-object",
 + "cranelift-simplejit",
 + "gimli",
 + "indexmap",
 + "libloading",
 + "object",
 + "target-lexicon",
 +]
 +
 +[[package]]
 +name = "rustc_version"
 +version = "0.2.3"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a"
 +dependencies = [
 + "semver",
 +]
 +
 +[[package]]
 +name = "semver"
 +version = "0.9.0"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403"
 +dependencies = [
 + "semver-parser",
 +]
 +
 +[[package]]
 +name = "semver-parser"
 +version = "0.7.0"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3"
 +
 +[[package]]
 +name = "smallvec"
 +version = "1.4.2"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "fbee7696b84bbf3d89a1c2eccff0850e3047ed46bfcd2e92c29a2d074d57e252"
 +
 +[[package]]
 +name = "syn"
 +version = "1.0.44"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "e03e57e4fcbfe7749842d53e24ccb9aa12b7252dbe5e91d2acad31834c8b8fdd"
 +dependencies = [
 + "proc-macro2",
 + "quote",
 + "unicode-xid",
 +]
 +
 +[[package]]
 +name = "target-lexicon"
 +version = "0.11.0"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "fe2635952a442a01fd4cb53d98858b5e4bb461b02c0d111f22f31772e3e7a8b2"
 +
 +[[package]]
 +name = "thiserror"
 +version = "1.0.21"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "318234ffa22e0920fe9a40d7b8369b5f649d490980cf7aadcf1eb91594869b42"
 +dependencies = [
 + "thiserror-impl",
 +]
 +
 +[[package]]
 +name = "thiserror-impl"
 +version = "1.0.21"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "cae2447b6282786c3493999f40a9be2a6ad20cb8bd268b0a0dbf5a065535c0ab"
 +dependencies = [
 + "proc-macro2",
 + "quote",
 + "syn",
 +]
 +
 +[[package]]
 +name = "unicode-xid"
 +version = "0.2.1"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564"
 +
 +[[package]]
 +name = "winapi"
 +version = "0.3.9"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
 +dependencies = [
 + "winapi-i686-pc-windows-gnu",
 + "winapi-x86_64-pc-windows-gnu",
 +]
 +
 +[[package]]
 +name = "winapi-i686-pc-windows-gnu"
 +version = "0.4.0"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
 +
 +[[package]]
 +name = "winapi-x86_64-pc-windows-gnu"
 +version = "0.4.0"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
index 680ff877656b0fe5279188328d09439173159d84,0000000000000000000000000000000000000000..f8a5e13ed54c177be18e3dd234369b02b79a4d85
mode 100644,000000..100644
--- /dev/null
@@@ -1,88 -1,0 +1,103 @@@
- The goal of this project is to create an alternative codegen backend for the rust compiler based on [Cranelift](https://github.com/bytecodealliance/wasmtime/blob/master/cranelift). This has the potential to improve compilation times in debug mode. If your project doesn't use any of the things listed under "Not yet supported", it should work fine. If not please open an issue.
 +# WIP Cranelift codegen backend for rust
 +
 +> âš âš âš  Certain kinds of FFI don't work yet. âš âš âš 
 +
- ## Building
++The goal of this project is to create an alternative codegen backend for the rust compiler based on [Cranelift](https://github.com/bytecodealliance/wasmtime/blob/master/cranelift).
++This has the potential to improve compilation times in debug mode.
++If your project doesn't use any of the things listed under "Not yet supported", it should work fine.
++If not please open an issue.
 +
- $ ./test.sh --release
++## Building and testing
 +
 +```bash
 +$ git clone https://github.com/bjorn3/rustc_codegen_cranelift.git
 +$ cd rustc_codegen_cranelift
 +$ ./prepare.sh # download and patch sysroot src and install hyperfine for benchmarking
- Assuming `$cg_clif_dir` is the directory you cloned this repo into and you followed the instructions (`prepare.sh` and `test.sh`).
++$ ./build.sh
 +```
 +
++To run the test suite replace the last command with:
++
++```bash
++$ ./test.sh
++```
++
++This will implicitly build cg_clif too. Both `build.sh` and `test.sh` accept a `--debug` argument to
++build in debug mode.
++
++Alternatively you can download a pre built version from [GHA]. It is listed in the artifacts section
++of workflow runs. Unfortunately due to GHA restrictions you need to be logged in to access it.
++
++[GHA]: https://github.com/bjorn3/rustc_codegen_cranelift/actions?query=branch%3Amaster+event%3Apush+is%3Asuccess
++
 +## Usage
 +
 +rustc_codegen_cranelift can be used as a near-drop-in replacement for `cargo build` or `cargo run` for existing projects.
 +
- $ $cg_clif_dir/cargo.sh run
++Assuming `$cg_clif_dir` is the directory you cloned this repo into and you followed the instructions (`prepare.sh` and `build.sh` or `test.sh`).
 +
 +### Cargo
 +
 +In the directory with your project (where you can do the usual `cargo build`), run:
 +
 +```bash
- If you compiled cg_clif in debug mode (aka you didn't pass `--release` to `./test.sh`) you should set `CHANNEL="debug"`.
++$ $cg_clif_dir/build/cargo.sh run
 +```
 +
 +This should build and run your project with rustc_codegen_cranelift instead of the usual LLVM backend.
 +
- $ $cg_clif_dir/target/release/cg_clif my_crate.rs
 +### Rustc
 +
 +> You should prefer using the Cargo method.
 +
 +```bash
- $ $cg_clif_dir/cargo.sh jit
++$ $cg_clif_dir/build/cg_clif my_crate.rs
 +```
 +
 +### Jit mode
 +
 +In jit mode cg_clif will immediately execute your code without creating an executable file.
 +
 +> This requires all dependencies to be available as dynamic library.
 +> The jit mode will probably need cargo integration to make this possible.
 +
 +```bash
- $ $cg_clif_dir/target/release/cg_clif --jit my_crate.rs
++$ $cg_clif_dir/build/cargo.sh jit
 +```
 +
 +or
 +
 +```bash
-     echo "$@" | $cg_clif_dir/target/release/cg_clif - --jit
++$ $cg_clif_dir/build/cg_clif --jit my_crate.rs
 +```
 +
 +### Shell
 +
 +These are a few functions that allow you to easily run rust code from the shell using cg_clif as jit.
 +
 +```bash
 +function jit_naked() {
++    echo "$@" | $cg_clif_dir/build/cg_clif - --jit
 +}
 +
 +function jit() {
 +    jit_naked "fn main() { $@ }"
 +}
 +
 +function jit_calc() {
 +    jit 'println!("0x{:x}", ' $@ ');';
 +}
 +```
 +
 +## Env vars
 +
 +[see env_vars.md](docs/env_vars.md)
 +
 +## Not yet supported
 +
 +* Good non-rust abi support ([several problems](https://github.com/bjorn3/rustc_codegen_cranelift/issues/10))
 +* Inline assembly ([no cranelift support](https://github.com/bytecodealliance/wasmtime/issues/1041)
 +    * On Linux there is support for invoking an external assembler for `global_asm!` and `asm!`.
 +      `llvm_asm!` will remain unimplemented forever. `asm!` doesn't yet support reg classes. You
 +      have to specify specific registers instead.
 +* SIMD ([tracked here](https://github.com/bjorn3/rustc_codegen_cranelift/issues/171), some basic things work)
index 0000000000000000000000000000000000000000,0000000000000000000000000000000000000000..f9a87e68a046a7f7c7960f5a1ee0858685886e17
new file mode 100755 (executable)
--- /dev/null
--- /dev/null
@@@ -1,0 -1,0 +1,47 @@@
++#!/bin/bash
++set -e
++
++# Settings
++export CHANNEL="release"
++build_sysroot=1
++target_dir='build'
++while [[ $# != 0 ]]; do
++    case $1 in
++        "--debug")
++            export CHANNEL="debug"
++            ;;
++        "--without-sysroot")
++            build_sysroot=0
++            ;;
++        "--target-dir")
++            target_dir=$2
++            shift
++            ;;
++        *)
++            echo "Unknown flag '$1'"
++            echo "Usage: ./build.sh [--debug] [--without-sysroot] [--target-dir DIR]"
++            ;;
++    esac
++    shift
++done
++
++# Build cg_clif
++export RUSTFLAGS="-Zrun_dsymutil=no"
++if [[ "$CHANNEL" == "release" ]]; then
++    cargo build --release
++else
++    cargo build
++fi
++
++rm -rf $target_dir
++mkdir $target_dir
++cp -a target/$CHANNEL/cg_clif{,_build_sysroot} target/$CHANNEL/*rustc_codegen_cranelift* $target_dir/
++cp -a rust-toolchain scripts/config.sh scripts/cargo.sh $target_dir
++
++if [[ "$build_sysroot" == "1" ]]; then
++    echo "[BUILD] sysroot"
++    export CG_CLIF_INCR_CACHE_DISABLED=1
++    dir=$(pwd)
++    cd $target_dir
++    time $dir/build_sysroot/build_sysroot.sh
++fi
index 04c82ca2a512855fee6d3776c07970e63a3ca510,0000000000000000000000000000000000000000..eba15c0dd4308ee2eb8218d3455917554c29eac3
mode 100755,000000..100755
--- /dev/null
@@@ -1,36 -1,0 +1,39 @@@
- cd $(dirname "$0")
 +#!/bin/bash
 +
 +# Requires the CHANNEL env var to be set to `debug` or `release.`
 +
 +set -e
- pushd ../ >/dev/null
- source ./scripts/config.sh
- popd >/dev/null
 +
- # Cleanup for previous run
- #     v Clean target dir except for build scripts and incremental cache
- rm -r target/*/{debug,release}/{build,deps,examples,libsysroot*,native} 2>/dev/null || true
- rm -r sysroot/ 2>/dev/null || true
++source ./config.sh
 +
- export RUSTC=$(pwd)/../"target/"$CHANNEL"/cg_clif_build_sysroot"
++dir=$(pwd)
 +
 +# Use rustc with cg_clif as hotpluggable backend instead of the custom cg_clif driver so that
 +# build scripts are still compiled using cg_llvm.
- if [[ "$1" == "--release" ]]; then
++export RUSTC=$dir"/cg_clif_build_sysroot"
 +export RUSTFLAGS=$RUSTFLAGS" --clif"
 +
++cd $(dirname "$0")
++
++# Cleanup for previous run
++#     v Clean target dir except for build scripts and incremental cache
++rm -r target/*/{debug,release}/{build,deps,examples,libsysroot*,native} 2>/dev/null || true
++
++# We expect the target dir in the default location. Guard against the user changing it.
++export CARGO_TARGET_DIR=target
++
 +# Build libs
 +export RUSTFLAGS="$RUSTFLAGS -Zforce-unstable-if-unmarked -Cpanic=abort"
- mkdir -p sysroot/lib/rustlib/$TARGET_TRIPLE/lib/
- cp -r target/$TARGET_TRIPLE/$sysroot_channel/deps/* sysroot/lib/rustlib/$TARGET_TRIPLE/lib/
++if [[ "$1" != "--debug" ]]; then
 +    sysroot_channel='release'
 +    # FIXME Enable incremental again once rust-lang/rust#74946 is fixed
 +    # FIXME Enable -Zmir-opt-level=2 again once it doesn't ice anymore
 +    CARGO_INCREMENTAL=0 RUSTFLAGS="$RUSTFLAGS" cargo build --target $TARGET_TRIPLE --release
 +else
 +    sysroot_channel='debug'
 +    cargo build --target $TARGET_TRIPLE
 +fi
 +
 +# Copy files to sysroot
++mkdir -p $dir/sysroot/lib/rustlib/$TARGET_TRIPLE/lib/
++cp -a target/$TARGET_TRIPLE/$sysroot_channel/deps/* $dir/sysroot/lib/rustlib/$TARGET_TRIPLE/lib/
index 14aa77478f54dbcd12fe1a975f69b3020cce4fe0,0000000000000000000000000000000000000000..d0fb09ce745d4daf860ed9d70d223880d46dc587
mode 100755,000000..100755
--- /dev/null
@@@ -1,32 -1,0 +1,32 @@@
- cp -r $SRC_DIR/library $DST_DIR/
 +#!/bin/bash
 +set -e
 +cd $(dirname "$0")
 +
 +SRC_DIR=$(dirname $(rustup which rustc))"/../lib/rustlib/src/rust/"
 +DST_DIR="sysroot_src"
 +
 +if [ ! -e $SRC_DIR ]; then
 +    echo "Please install rust-src component"
 +    exit 1
 +fi
 +
 +rm -rf $DST_DIR
 +mkdir -p $DST_DIR/library
++cp -a $SRC_DIR/library $DST_DIR/
 +
 +pushd $DST_DIR
 +echo "[GIT] init"
 +git init
 +echo "[GIT] add"
 +git add .
 +echo "[GIT] commit"
 +git commit -m "Initial commit" -q
 +for file in $(ls ../../patches/ | grep -v patcha); do
 +echo "[GIT] apply" $file
 +git apply ../../patches/$file
 +git add -A
 +git commit --no-gpg-sign -m "Patch $file"
 +done
 +popd
 +
 +echo "Successfully prepared libcore for building"
index 3003a0ea2d102d779ec08cafe902a33af5d62340,0000000000000000000000000000000000000000..5a69c862d016d616d967d413d42b598cea2d934f
mode 100755,000000..100755
--- /dev/null
@@@ -1,5 -1,0 +1,5 @@@
- rm -rf target/ build_sysroot/{sysroot/,sysroot_src/,target/} perf.data{,.old}
 +#!/bin/bash --verbose
 +set -e
 +
++rm -rf target/ build/ build_sysroot/{sysroot_src/,target/} perf.data{,.old}
 +rm -rf rand/ regex/ simple-raytracer/
index 07b75622a58ef2dac71189a7e1063bbc7ba5d9b3,0000000000000000000000000000000000000000..f0a0a6ad42ef52af66cc35678e57cb2b415b783d
mode 100644,000000..100644
--- /dev/null
@@@ -1,15 -1,0 +1,12 @@@
-     <dt>CG_CLIF_FUNCTION_SECTIONS</dt>
-     <dd>Use a single section for each function. This will often reduce the executable size at the
-         cost of making linking significantly slower.</dd>
 +# List of env vars recognized by cg_clif
 +
 +<dl>
 +    <dt>CG_CLIF_JIT_ARGS</dt>
 +    <dd>When JIT mode is enable pass these arguments to the program.</dd>
 +    <dt>CG_CLIF_INCR_CACHE_DISABLED</dt>
 +    <dd>Don't cache object files in the incremental cache. Useful during development of cg_clif
 +    to make it possible to use incremental mode for all analyses performed by rustc without caching
 +    object files when their content should have been changed by a change to cg_clif.</dd>
 +    <dt>CG_CLIF_DISPLAY_CG_TIME</dt>
 +    <dd>If "1", display the time it took to perform codegen for a crate</dd>
 +</dl>
index a972beedaa38752f6bb1f476f41e97558bf27e2e,0000000000000000000000000000000000000000..ce07fe83df18f2b09f663113aad431002e98e956
mode 100644,000000..100644
--- /dev/null
@@@ -1,603 -1,0 +1,613 @@@
 +#![feature(
 +    no_core, lang_items, intrinsics, unboxed_closures, type_ascription, extern_types,
 +    untagged_unions, decl_macro, rustc_attrs, transparent_unions, optin_builtin_traits,
 +    thread_local,
 +)]
 +#![no_core]
 +#![allow(dead_code)]
 +
 +#[lang = "sized"]
 +pub trait Sized {}
 +
 +#[lang = "unsize"]
 +pub trait Unsize<T: ?Sized> {}
 +
 +#[lang = "coerce_unsized"]
 +pub trait CoerceUnsized<T> {}
 +
 +impl<'a, 'b: 'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a U> for &'b T {}
 +impl<'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a mut U> for &'a mut T {}
 +impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*const U> for *const T {}
 +impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*mut U> for *mut T {}
 +
 +#[lang = "dispatch_from_dyn"]
 +pub trait DispatchFromDyn<T> {}
 +
 +// &T -> &U
 +impl<'a, T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<&'a U> for &'a T {}
 +// &mut T -> &mut U
 +impl<'a, T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<&'a mut U> for &'a mut T {}
 +// *const T -> *const U
 +impl<T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<*const U> for *const T {}
 +// *mut T -> *mut U
 +impl<T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<*mut U> for *mut T {}
 +impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Box<U>> for Box<T> {}
 +
 +#[lang = "receiver"]
 +pub trait Receiver {}
 +
 +impl<T: ?Sized> Receiver for &T {}
 +impl<T: ?Sized> Receiver for &mut T {}
 +impl<T: ?Sized> Receiver for Box<T> {}
 +
 +#[lang = "copy"]
 +pub unsafe trait Copy {}
 +
 +unsafe impl Copy for bool {}
 +unsafe impl Copy for u8 {}
 +unsafe impl Copy for u16 {}
 +unsafe impl Copy for u32 {}
 +unsafe impl Copy for u64 {}
++unsafe impl Copy for u128 {}
 +unsafe impl Copy for usize {}
 +unsafe impl Copy for i8 {}
 +unsafe impl Copy for i16 {}
 +unsafe impl Copy for i32 {}
 +unsafe impl Copy for isize {}
 +unsafe impl Copy for f32 {}
 +unsafe impl Copy for char {}
 +unsafe impl<'a, T: ?Sized> Copy for &'a T {}
 +unsafe impl<T: ?Sized> Copy for *const T {}
 +unsafe impl<T: ?Sized> Copy for *mut T {}
 +unsafe impl<T: Copy> Copy for Option<T> {}
 +
 +#[lang = "sync"]
 +pub unsafe trait Sync {}
 +
 +unsafe impl Sync for bool {}
 +unsafe impl Sync for u8 {}
 +unsafe impl Sync for u16 {}
 +unsafe impl Sync for u32 {}
 +unsafe impl Sync for u64 {}
 +unsafe impl Sync for usize {}
 +unsafe impl Sync for i8 {}
 +unsafe impl Sync for i16 {}
 +unsafe impl Sync for i32 {}
 +unsafe impl Sync for isize {}
 +unsafe impl Sync for char {}
 +unsafe impl<'a, T: ?Sized> Sync for &'a T {}
 +unsafe impl Sync for [u8; 16] {}
 +
 +#[lang = "freeze"]
 +unsafe auto trait Freeze {}
 +
 +unsafe impl<T: ?Sized> Freeze for PhantomData<T> {}
 +unsafe impl<T: ?Sized> Freeze for *const T {}
 +unsafe impl<T: ?Sized> Freeze for *mut T {}
 +unsafe impl<T: ?Sized> Freeze for &T {}
 +unsafe impl<T: ?Sized> Freeze for &mut T {}
 +
 +#[lang = "structural_peq"]
 +pub trait StructuralPartialEq {}
 +
 +#[lang = "structural_teq"]
 +pub trait StructuralEq {}
 +
 +#[lang = "not"]
 +pub trait Not {
 +    type Output;
 +
 +    fn not(self) -> Self::Output;
 +}
 +
 +impl Not for bool {
 +    type Output = bool;
 +
 +    fn not(self) -> bool {
 +        !self
 +    }
 +}
 +
 +#[lang = "mul"]
 +pub trait Mul<RHS = Self> {
 +    type Output;
 +
 +    #[must_use]
 +    fn mul(self, rhs: RHS) -> Self::Output;
 +}
 +
 +impl Mul for u8 {
 +    type Output = Self;
 +
 +    fn mul(self, rhs: Self) -> Self::Output {
 +        self * rhs
 +    }
 +}
 +
 +impl Mul for usize {
 +    type Output = Self;
 +
 +    fn mul(self, rhs: Self) -> Self::Output {
 +        self * rhs
 +    }
 +}
 +
 +#[lang = "add"]
 +pub trait Add<RHS = Self> {
 +    type Output;
 +
 +    fn add(self, rhs: RHS) -> Self::Output;
 +}
 +
 +impl Add for u8 {
 +    type Output = Self;
 +
 +    fn add(self, rhs: Self) -> Self {
 +        self + rhs
 +    }
 +}
 +
 +impl Add for i8 {
 +    type Output = Self;
 +
 +    fn add(self, rhs: Self) -> Self {
 +        self + rhs
 +    }
 +}
 +
 +impl Add for usize {
 +    type Output = Self;
 +
 +    fn add(self, rhs: Self) -> Self {
 +        self + rhs
 +    }
 +}
 +
 +#[lang = "sub"]
 +pub trait Sub<RHS = Self> {
 +    type Output;
 +
 +    fn sub(self, rhs: RHS) -> Self::Output;
 +}
 +
 +impl Sub for usize {
 +    type Output = Self;
 +
 +    fn sub(self, rhs: Self) -> Self {
 +        self - rhs
 +    }
 +}
 +
 +impl Sub for u8 {
 +    type Output = Self;
 +
 +    fn sub(self, rhs: Self) -> Self {
 +        self - rhs
 +    }
 +}
 +
 +impl Sub for i8 {
 +    type Output = Self;
 +
 +    fn sub(self, rhs: Self) -> Self {
 +        self - rhs
 +    }
 +}
 +
 +impl Sub for i16 {
 +    type Output = Self;
 +
 +    fn sub(self, rhs: Self) -> Self {
 +        self - rhs
 +    }
 +}
 +
 +#[lang = "rem"]
 +pub trait Rem<RHS = Self> {
 +    type Output;
 +
 +    fn rem(self, rhs: RHS) -> Self::Output;
 +}
 +
 +impl Rem for usize {
 +    type Output = Self;
 +
 +    fn rem(self, rhs: Self) -> Self {
 +        self % rhs
 +    }
 +}
 +
 +#[lang = "bitor"]
 +pub trait BitOr<RHS = Self> {
 +    type Output;
 +
 +    #[must_use]
 +    fn bitor(self, rhs: RHS) -> Self::Output;
 +}
 +
 +impl BitOr for bool {
 +    type Output = bool;
 +
 +    fn bitor(self, rhs: bool) -> bool {
 +        self | rhs
 +    }
 +}
 +
 +impl<'a> BitOr<bool> for &'a bool {
 +    type Output = bool;
 +
 +    fn bitor(self, rhs: bool) -> bool {
 +        *self | rhs
 +    }
 +}
 +
 +#[lang = "eq"]
 +pub trait PartialEq<Rhs: ?Sized = Self> {
 +    fn eq(&self, other: &Rhs) -> bool;
 +    fn ne(&self, other: &Rhs) -> bool;
 +}
 +
 +impl PartialEq for u8 {
 +    fn eq(&self, other: &u8) -> bool {
 +        (*self) == (*other)
 +    }
 +    fn ne(&self, other: &u8) -> bool {
 +        (*self) != (*other)
 +    }
 +}
 +
 +impl PartialEq for u16 {
 +    fn eq(&self, other: &u16) -> bool {
 +        (*self) == (*other)
 +    }
 +    fn ne(&self, other: &u16) -> bool {
 +        (*self) != (*other)
 +    }
 +}
 +
 +impl PartialEq for u32 {
 +    fn eq(&self, other: &u32) -> bool {
 +        (*self) == (*other)
 +    }
 +    fn ne(&self, other: &u32) -> bool {
 +        (*self) != (*other)
 +    }
 +}
 +
 +
 +impl PartialEq for u64 {
 +    fn eq(&self, other: &u64) -> bool {
 +        (*self) == (*other)
 +    }
 +    fn ne(&self, other: &u64) -> bool {
 +        (*self) != (*other)
 +    }
 +}
 +
++impl PartialEq for u128 {
++    fn eq(&self, other: &u128) -> bool {
++        (*self) == (*other)
++    }
++    fn ne(&self, other: &u128) -> bool {
++        (*self) != (*other)
++    }
++}
++
 +impl PartialEq for usize {
 +    fn eq(&self, other: &usize) -> bool {
 +        (*self) == (*other)
 +    }
 +    fn ne(&self, other: &usize) -> bool {
 +        (*self) != (*other)
 +    }
 +}
 +
 +impl PartialEq for i8 {
 +    fn eq(&self, other: &i8) -> bool {
 +        (*self) == (*other)
 +    }
 +    fn ne(&self, other: &i8) -> bool {
 +        (*self) != (*other)
 +    }
 +}
 +
 +impl PartialEq for i32 {
 +    fn eq(&self, other: &i32) -> bool {
 +        (*self) == (*other)
 +    }
 +    fn ne(&self, other: &i32) -> bool {
 +        (*self) != (*other)
 +    }
 +}
 +
 +impl PartialEq for isize {
 +    fn eq(&self, other: &isize) -> bool {
 +        (*self) == (*other)
 +    }
 +    fn ne(&self, other: &isize) -> bool {
 +        (*self) != (*other)
 +    }
 +}
 +
 +impl PartialEq for char {
 +    fn eq(&self, other: &char) -> bool {
 +        (*self) == (*other)
 +    }
 +    fn ne(&self, other: &char) -> bool {
 +        (*self) != (*other)
 +    }
 +}
 +
 +impl<T: ?Sized> PartialEq for *const T {
 +    fn eq(&self, other: &*const T) -> bool {
 +        *self == *other
 +    }
 +    fn ne(&self, other: &*const T) -> bool {
 +        *self != *other
 +    }
 +}
 +
 +impl <T: PartialEq> PartialEq for Option<T> {
 +    fn eq(&self, other: &Self) -> bool {
 +        match (self, other) {
 +            (Some(lhs), Some(rhs)) => *lhs == *rhs,
 +            (None, None) => true,
 +            _ => false,
 +        }
 +    }
 +
 +    fn ne(&self, other: &Self) -> bool {
 +        match (self, other) {
 +            (Some(lhs), Some(rhs)) => *lhs != *rhs,
 +            (None, None) => false,
 +            _ => true,
 +        }
 +    }
 +}
 +
 +#[lang = "neg"]
 +pub trait Neg {
 +    type Output;
 +
 +    fn neg(self) -> Self::Output;
 +}
 +
 +impl Neg for i8 {
 +    type Output = i8;
 +
 +    fn neg(self) -> i8 {
 +        -self
 +    }
 +}
 +
 +impl Neg for i16 {
 +    type Output = i16;
 +
 +    fn neg(self) -> i16 {
 +        self
 +    }
 +}
 +
 +impl Neg for isize {
 +    type Output = isize;
 +
 +    fn neg(self) -> isize {
 +        -self
 +    }
 +}
 +
 +impl Neg for f32 {
 +    type Output = f32;
 +
 +    fn neg(self) -> f32 {
 +        -self
 +    }
 +}
 +
 +pub enum Option<T> {
 +    Some(T),
 +    None,
 +}
 +
 +pub use Option::*;
 +
 +#[lang = "phantom_data"]
 +pub struct PhantomData<T: ?Sized>;
 +
 +#[lang = "fn_once"]
 +#[rustc_paren_sugar]
 +pub trait FnOnce<Args> {
 +    #[lang = "fn_once_output"]
 +    type Output;
 +
 +    extern "rust-call" fn call_once(self, args: Args) -> Self::Output;
 +}
 +
 +#[lang = "fn_mut"]
 +#[rustc_paren_sugar]
 +pub trait FnMut<Args>: FnOnce<Args> {
 +    extern "rust-call" fn call_mut(&mut self, args: Args) -> Self::Output;
 +}
 +
 +#[lang = "panic"]
 +#[track_caller]
 +pub fn panic(_msg: &str) -> ! {
 +    unsafe {
 +        libc::puts("Panicking\n\0" as *const str as *const i8);
 +        intrinsics::abort();
 +    }
 +}
 +
 +#[lang = "panic_bounds_check"]
 +#[track_caller]
 +fn panic_bounds_check(index: usize, len: usize) -> ! {
 +    unsafe {
 +        libc::printf("index out of bounds: the len is %d but the index is %d\n\0" as *const str as *const i8, len, index);
 +        intrinsics::abort();
 +    }
 +}
 +
 +#[lang = "eh_personality"]
 +fn eh_personality() -> ! {
 +    loop {}
 +}
 +
 +#[lang = "drop_in_place"]
 +#[allow(unconditional_recursion)]
 +pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
 +    // Code here does not matter - this is replaced by the
 +    // real drop glue by the compiler.
 +    drop_in_place(to_drop);
 +}
 +
 +#[lang = "deref"]
 +pub trait Deref {
 +    type Target: ?Sized;
 +
 +    fn deref(&self) -> &Self::Target;
 +}
 +
 +#[lang = "owned_box"]
 +pub struct Box<T: ?Sized>(*mut T);
 +
 +impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Box<U>> for Box<T> {}
 +
 +impl<T: ?Sized> Drop for Box<T> {
 +    fn drop(&mut self) {
 +        // drop is currently performed by compiler.
 +    }
 +}
 +
 +impl<T> Deref for Box<T> {
 +    type Target = T;
 +
 +    fn deref(&self) -> &Self::Target {
 +        &**self
 +    }
 +}
 +
 +#[lang = "exchange_malloc"]
 +unsafe fn allocate(size: usize, _align: usize) -> *mut u8 {
 +    libc::malloc(size)
 +}
 +
 +#[lang = "box_free"]
 +unsafe fn box_free<T: ?Sized>(ptr: *mut T) {
 +    libc::free(ptr as *mut u8);
 +}
 +
 +#[lang = "drop"]
 +pub trait Drop {
 +    fn drop(&mut self);
 +}
 +
 +#[lang = "manually_drop"]
 +#[repr(transparent)]
 +pub struct ManuallyDrop<T: ?Sized> {
 +    pub value: T,
 +}
 +
 +#[lang = "maybe_uninit"]
 +#[repr(transparent)]
 +pub union MaybeUninit<T> {
 +    pub uninit: (),
 +    pub value: ManuallyDrop<T>,
 +}
 +
 +pub mod intrinsics {
 +    extern "rust-intrinsic" {
 +        pub fn abort() -> !;
 +        pub fn size_of<T>() -> usize;
 +        pub fn size_of_val<T: ?::Sized>(val: *const T) -> usize;
 +        pub fn min_align_of<T>() -> usize;
 +        pub fn min_align_of_val<T: ?::Sized>(val: *const T) -> usize;
 +        pub fn copy<T>(src: *const T, dst: *mut T, count: usize);
 +        pub fn transmute<T, U>(e: T) -> U;
 +        pub fn ctlz_nonzero<T>(x: T) -> T;
 +        pub fn needs_drop<T>() -> bool;
 +        pub fn bitreverse<T>(x: T) -> T;
 +        pub fn bswap<T>(x: T) -> T;
 +        pub fn write_bytes<T>(dst: *mut T, val: u8, count: usize);
 +    }
 +}
 +
 +pub mod libc {
 +    #[cfg_attr(not(windows), link(name = "c"))]
 +    #[cfg_attr(windows, link(name = "msvcrt"))]
 +    extern "C" {
 +        pub fn puts(s: *const i8) -> i32;
 +        pub fn printf(format: *const i8, ...) -> i32;
 +        pub fn malloc(size: usize) -> *mut u8;
 +        pub fn free(ptr: *mut u8);
 +        pub fn memcpy(dst: *mut u8, src: *const u8, size: usize);
 +        pub fn memmove(dst: *mut u8, src: *const u8, size: usize);
 +        pub fn strncpy(dst: *mut u8, src: *const u8, size: usize);
 +    }
 +}
 +
 +#[lang = "index"]
 +pub trait Index<Idx: ?Sized> {
 +    type Output: ?Sized;
 +    fn index(&self, index: Idx) -> &Self::Output;
 +}
 +
 +impl<T> Index<usize> for [T; 3] {
 +    type Output = T;
 +
 +    fn index(&self, index: usize) -> &Self::Output {
 +        &self[index]
 +    }
 +}
 +
 +impl<T> Index<usize> for [T] {
 +    type Output = T;
 +
 +    fn index(&self, index: usize) -> &Self::Output {
 +        &self[index]
 +    }
 +}
 +
 +extern {
 +    type VaListImpl;
 +}
 +
 +#[lang = "va_list"]
 +#[repr(transparent)]
 +pub struct VaList<'a>(&'a mut VaListImpl);
 +
 +#[rustc_builtin_macro]
 +#[rustc_macro_transparency = "semitransparent"]
 +pub macro stringify($($t:tt)*) { /* compiler built-in */ }
 +
 +#[rustc_builtin_macro]
 +#[rustc_macro_transparency = "semitransparent"]
 +pub macro file() { /* compiler built-in */ }
 +
 +#[rustc_builtin_macro]
 +#[rustc_macro_transparency = "semitransparent"]
 +pub macro line() { /* compiler built-in */ }
 +
 +#[rustc_builtin_macro]
 +#[rustc_macro_transparency = "semitransparent"]
 +pub macro cfg() { /* compiler built-in */ }
 +
 +#[rustc_builtin_macro]
 +#[rustc_macro_transparency = "semitransparent"]
 +pub macro global_asm() { /* compiler built-in */ }
 +
 +pub static A_STATIC: u8 = 42;
 +
 +#[lang = "panic_location"]
 +struct PanicLocation {
 +    file: &'static str,
 +    line: u32,
 +    column: u32,
 +}
 +
 +#[no_mangle]
 +pub fn get_tls() -> u8 {
 +    #[thread_local]
 +    static A: u8 = 42;
 +
 +    A
 +}
index 376056e19383fd1f4ac71043726ecc40e7630e43,0000000000000000000000000000000000000000..4a8375afac3cef46815d38becae87267dcc7461c
mode 100644,000000..100644
--- /dev/null
@@@ -1,448 -1,0 +1,470 @@@
 +#![feature(
 +    no_core, start, lang_items, box_syntax, never_type, linkage,
 +    extern_types, thread_local
 +)]
 +#![no_core]
 +#![allow(dead_code, non_camel_case_types)]
 +
 +extern crate mini_core;
 +
 +use mini_core::*;
 +use mini_core::libc::*;
 +
 +unsafe extern "C" fn my_puts(s: *const i8) {
 +    puts(s);
 +}
 +
 +#[lang = "termination"]
 +trait Termination {
 +    fn report(self) -> i32;
 +}
 +
 +impl Termination for () {
 +    fn report(self) -> i32 {
 +        unsafe {
 +            NUM = 6 * 7 + 1 + (1u8 == 1u8) as u8; // 44
 +            *NUM_REF as i32
 +        }
 +    }
 +}
 +
 +trait SomeTrait {
 +    fn object_safe(&self);
 +}
 +
 +impl SomeTrait for &'static str {
 +    fn object_safe(&self) {
 +        unsafe {
 +            puts(*self as *const str as *const i8);
 +        }
 +    }
 +}
 +
 +struct NoisyDrop {
 +    text: &'static str,
 +    inner: NoisyDropInner,
 +}
 +
 +struct NoisyDropInner;
 +
 +impl Drop for NoisyDrop {
 +    fn drop(&mut self) {
 +        unsafe {
 +            puts(self.text as *const str as *const i8);
 +        }
 +    }
 +}
 +
 +impl Drop for NoisyDropInner {
 +    fn drop(&mut self) {
 +        unsafe {
 +            puts("Inner got dropped!\0" as *const str as *const i8);
 +        }
 +    }
 +}
 +
 +impl SomeTrait for NoisyDrop {
 +    fn object_safe(&self) {}
 +}
 +
 +enum Ordering {
 +    Less = -1,
 +    Equal = 0,
 +    Greater = 1,
 +}
 +
 +#[lang = "start"]
 +fn start<T: Termination + 'static>(
 +    main: fn() -> T,
 +    argc: isize,
 +    argv: *const *const u8,
 +) -> isize {
 +    if argc == 3 {
 +        unsafe { puts(*argv as *const i8); }
 +        unsafe { puts(*((argv as usize + intrinsics::size_of::<*const u8>()) as *const *const i8)); }
 +        unsafe { puts(*((argv as usize + 2 * intrinsics::size_of::<*const u8>()) as *const *const i8)); }
 +    }
 +
 +    main().report();
 +    0
 +}
 +
 +static mut NUM: u8 = 6 * 7;
 +static NUM_REF: &'static u8 = unsafe { &NUM };
 +
 +macro_rules! assert {
 +    ($e:expr) => {
 +        if !$e {
 +            panic(stringify!(! $e));
 +        }
 +    };
 +}
 +
 +macro_rules! assert_eq {
 +    ($l:expr, $r: expr) => {
 +        if $l != $r {
 +            panic(stringify!($l != $r));
 +        }
 +    }
 +}
 +
 +struct Unique<T: ?Sized> {
 +    pointer: *const T,
 +    _marker: PhantomData<T>,
 +}
 +
 +impl<T: ?Sized, U: ?Sized> CoerceUnsized<Unique<U>> for Unique<T> where T: Unsize<U> {}
 +
 +unsafe fn zeroed<T>() -> T {
 +    let mut uninit = MaybeUninit { uninit: () };
 +    intrinsics::write_bytes(&mut uninit.value.value as *mut T, 0, 1);
 +    uninit.value.value
 +}
 +
 +fn take_f32(_f: f32) {}
 +fn take_unique(_u: Unique<()>) {}
 +
 +fn return_u128_pair() -> (u128, u128) {
 +    (0, 0)
 +}
 +
 +fn call_return_u128_pair() {
 +    return_u128_pair();
 +}
 +
 +fn main() {
 +    take_unique(Unique {
 +        pointer: 0 as *const (),
 +        _marker: PhantomData,
 +    });
 +    take_f32(0.1);
 +
 +    call_return_u128_pair();
 +
 +    let slice = &[0, 1] as &[i32];
 +    let slice_ptr = slice as *const [i32] as *const i32;
 +
 +    assert_eq!(slice_ptr as usize % 4, 0);
 +
 +    //return;
 +
 +    unsafe {
 +        printf("Hello %s\n\0" as *const str as *const i8, "printf\0" as *const str as *const i8);
 +
 +        let hello: &[u8] = b"Hello\0" as &[u8; 6];
 +        let ptr: *const i8 = hello as *const [u8] as *const i8;
 +        puts(ptr);
 +
 +        let world: Box<&str> = box "World!\0";
 +        puts(*world as *const str as *const i8);
 +        world as Box<dyn SomeTrait>;
 +
 +        assert_eq!(intrinsics::bitreverse(0b10101000u8), 0b00010101u8);
 +
 +        assert_eq!(intrinsics::bswap(0xabu8), 0xabu8);
 +        assert_eq!(intrinsics::bswap(0xddccu16), 0xccddu16);
 +        assert_eq!(intrinsics::bswap(0xffee_ddccu32), 0xccdd_eeffu32);
 +        assert_eq!(intrinsics::bswap(0x1234_5678_ffee_ddccu64), 0xccdd_eeff_7856_3412u64);
 +
 +        assert_eq!(intrinsics::size_of_val(hello) as u8, 6);
 +
 +        let chars = &['C', 'h', 'a', 'r', 's'];
 +        let chars = chars as &[char];
 +        assert_eq!(intrinsics::size_of_val(chars) as u8, 4 * 5);
 +
 +        let a: &dyn SomeTrait = &"abc\0";
 +        a.object_safe();
 +
 +        assert_eq!(intrinsics::size_of_val(a) as u8, 16);
 +        assert_eq!(intrinsics::size_of_val(&0u32) as u8, 4);
 +
 +        assert_eq!(intrinsics::min_align_of::<u16>() as u8, 2);
 +        assert_eq!(intrinsics::min_align_of_val(&a) as u8, intrinsics::min_align_of::<&str>() as u8);
 +
 +        assert!(!intrinsics::needs_drop::<u8>());
 +        assert!(intrinsics::needs_drop::<NoisyDrop>());
 +
 +        Unique {
 +            pointer: 0 as *const &str,
 +            _marker: PhantomData,
 +        } as Unique<dyn SomeTrait>;
 +
 +        struct MyDst<T: ?Sized>(T);
 +
 +        intrinsics::size_of_val(&MyDst([0u8; 4]) as &MyDst<[u8]>);
 +
 +        struct Foo {
 +            x: u8,
 +            y: !,
 +        }
 +
 +        unsafe fn uninitialized<T>() -> T {
 +            MaybeUninit { uninit: () }.value.value
 +        }
 +
 +        zeroed::<(u8, u8)>();
 +        #[allow(unreachable_code)]
 +        {
 +            if false {
 +                zeroed::<!>();
 +                zeroed::<Foo>();
 +                uninitialized::<Foo>();
 +            }
 +        }
 +    }
 +
 +    let _ = box NoisyDrop {
 +        text: "Boxed outer got dropped!\0",
 +        inner: NoisyDropInner,
 +    } as Box<dyn SomeTrait>;
 +
 +    const FUNC_REF: Option<fn()> = Some(main);
 +    match FUNC_REF {
 +        Some(_) => {},
 +        None => assert!(false),
 +    }
 +
 +    match Ordering::Less {
 +        Ordering::Less => {},
 +        _ => assert!(false),
 +    }
 +
 +    [NoisyDropInner, NoisyDropInner];
 +
 +    let x = &[0u32, 42u32] as &[u32];
 +    match x {
 +        [] => assert_eq!(0u32, 1),
 +        [_, ref y @ ..] => assert_eq!(&x[1] as *const u32 as usize, &y[0] as *const u32 as usize),
 +    }
 +
 +    assert_eq!(((|()| 42u8) as fn(()) -> u8)(()), 42);
 +
 +    #[cfg(not(jit))]
 +    {
 +        extern {
 +            #[linkage = "extern_weak"]
 +            static ABC: *const u8;
 +        }
 +
 +        {
 +            extern {
 +                #[linkage = "extern_weak"]
 +                static ABC: *const u8;
 +            }
 +        }
 +
 +        unsafe { assert_eq!(ABC as usize, 0); }
 +    }
 +
 +    &mut (|| Some(0 as *const ())) as &mut dyn FnMut() -> Option<*const ()>;
 +
 +    let f = 1000.0;
 +    assert_eq!(f as u8, 255);
 +    let f2 = -1000.0;
 +    assert_eq!(f2 as i8, -128);
 +    assert_eq!(f2 as u8, 0);
 +
 +    static ANOTHER_STATIC: &u8 = &A_STATIC;
 +    assert_eq!(*ANOTHER_STATIC, 42);
 +
 +    check_niche_behavior();
 +
 +    extern "C" {
 +        type ExternType;
 +    }
 +
 +    struct ExternTypeWrapper {
 +        _a: ExternType,
 +    }
 +
 +    let nullptr = 0 as *const ();
 +    let extern_nullptr = nullptr as *const ExternTypeWrapper;
 +    extern_nullptr as *const ();
 +    let slice_ptr = &[] as *const [u8];
 +    slice_ptr as *const u8;
 +
 +    let repeat = [Some(42); 2];
 +    assert_eq!(repeat[0], Some(42));
 +    assert_eq!(repeat[1], Some(42));
 +
++    from_decimal_string();
++
 +    #[cfg(not(jit))]
 +    test_tls();
 +
 +    #[cfg(all(not(jit), target_os = "linux"))]
 +    unsafe {
 +        global_asm_test();
 +    }
 +}
 +
 +#[cfg(all(not(jit), target_os = "linux"))]
 +extern "C" {
 +    fn global_asm_test();
 +}
 +
 +#[cfg(all(not(jit), target_os = "linux"))]
 +global_asm! {
 +    "
 +    .global global_asm_test
 +    global_asm_test:
 +    // comment that would normally be removed by LLVM
 +    ret
 +    "
 +}
 +
 +#[repr(C)]
 +enum c_void {
 +    _1,
 +    _2,
 +}
 +
 +type c_int = i32;
 +type c_ulong = u64;
 +
 +type pthread_t = c_ulong;
 +
 +#[repr(C)]
 +struct pthread_attr_t {
 +    __size: [u64; 7],
 +}
 +
 +#[link(name = "pthread")]
 +extern "C" {
 +    fn pthread_attr_init(attr: *mut pthread_attr_t) -> c_int;
 +
 +    fn pthread_create(
 +        native: *mut pthread_t,
 +        attr: *const pthread_attr_t,
 +        f: extern "C" fn(_: *mut c_void) -> *mut c_void,
 +        value: *mut c_void
 +    ) -> c_int;
 +
 +    fn pthread_join(
 +        native: pthread_t,
 +        value: *mut *mut c_void
 +    ) -> c_int;
 +}
 +
 +#[thread_local]
 +#[cfg(not(jit))]
 +static mut TLS: u8 = 42;
 +
 +#[cfg(not(jit))]
 +extern "C" fn mutate_tls(_: *mut c_void) -> *mut c_void {
 +    unsafe { TLS = 0; }
 +    0 as *mut c_void
 +}
 +
 +#[cfg(not(jit))]
 +fn test_tls() {
 +    unsafe {
 +        let mut attr: pthread_attr_t = zeroed();
 +        let mut thread: pthread_t = 0;
 +
 +        assert_eq!(TLS, 42);
 +
 +        if pthread_attr_init(&mut attr) != 0 {
 +            assert!(false);
 +        }
 +
 +        if pthread_create(&mut thread, &attr, mutate_tls, 0 as *mut c_void) != 0 {
 +            assert!(false);
 +        }
 +
 +        let mut res = 0 as *mut c_void;
 +        pthread_join(thread, &mut res);
 +
 +        // TLS of main thread must not have been changed by the other thread.
 +        assert_eq!(TLS, 42);
 +
 +        puts("TLS works!\n\0" as *const str as *const i8);
 +    }
 +}
 +
 +// Copied ui/issues/issue-61696.rs
 +
 +pub enum Infallible {}
 +
 +// The check that the `bool` field of `V1` is encoding a "niche variant"
 +// (i.e. not `V1`, so `V3` or `V4`) used to be mathematically incorrect,
 +// causing valid `V1` values to be interpreted as other variants.
 +pub enum E1 {
 +    V1 { f: bool },
 +    V2 { f: Infallible },
 +    V3,
 +    V4,
 +}
 +
 +// Computing the discriminant used to be done using the niche type (here `u8`,
 +// from the `bool` field of `V1`), overflowing for variants with large enough
 +// indices (`V3` and `V4`), causing them to be interpreted as other variants.
 +pub enum E2<X> {
 +    V1 { f: bool },
 +
 +    /*_00*/ _01(X), _02(X), _03(X), _04(X), _05(X), _06(X), _07(X),
 +    _08(X), _09(X), _0A(X), _0B(X), _0C(X), _0D(X), _0E(X), _0F(X),
 +    _10(X), _11(X), _12(X), _13(X), _14(X), _15(X), _16(X), _17(X),
 +    _18(X), _19(X), _1A(X), _1B(X), _1C(X), _1D(X), _1E(X), _1F(X),
 +    _20(X), _21(X), _22(X), _23(X), _24(X), _25(X), _26(X), _27(X),
 +    _28(X), _29(X), _2A(X), _2B(X), _2C(X), _2D(X), _2E(X), _2F(X),
 +    _30(X), _31(X), _32(X), _33(X), _34(X), _35(X), _36(X), _37(X),
 +    _38(X), _39(X), _3A(X), _3B(X), _3C(X), _3D(X), _3E(X), _3F(X),
 +    _40(X), _41(X), _42(X), _43(X), _44(X), _45(X), _46(X), _47(X),
 +    _48(X), _49(X), _4A(X), _4B(X), _4C(X), _4D(X), _4E(X), _4F(X),
 +    _50(X), _51(X), _52(X), _53(X), _54(X), _55(X), _56(X), _57(X),
 +    _58(X), _59(X), _5A(X), _5B(X), _5C(X), _5D(X), _5E(X), _5F(X),
 +    _60(X), _61(X), _62(X), _63(X), _64(X), _65(X), _66(X), _67(X),
 +    _68(X), _69(X), _6A(X), _6B(X), _6C(X), _6D(X), _6E(X), _6F(X),
 +    _70(X), _71(X), _72(X), _73(X), _74(X), _75(X), _76(X), _77(X),
 +    _78(X), _79(X), _7A(X), _7B(X), _7C(X), _7D(X), _7E(X), _7F(X),
 +    _80(X), _81(X), _82(X), _83(X), _84(X), _85(X), _86(X), _87(X),
 +    _88(X), _89(X), _8A(X), _8B(X), _8C(X), _8D(X), _8E(X), _8F(X),
 +    _90(X), _91(X), _92(X), _93(X), _94(X), _95(X), _96(X), _97(X),
 +    _98(X), _99(X), _9A(X), _9B(X), _9C(X), _9D(X), _9E(X), _9F(X),
 +    _A0(X), _A1(X), _A2(X), _A3(X), _A4(X), _A5(X), _A6(X), _A7(X),
 +    _A8(X), _A9(X), _AA(X), _AB(X), _AC(X), _AD(X), _AE(X), _AF(X),
 +    _B0(X), _B1(X), _B2(X), _B3(X), _B4(X), _B5(X), _B6(X), _B7(X),
 +    _B8(X), _B9(X), _BA(X), _BB(X), _BC(X), _BD(X), _BE(X), _BF(X),
 +    _C0(X), _C1(X), _C2(X), _C3(X), _C4(X), _C5(X), _C6(X), _C7(X),
 +    _C8(X), _C9(X), _CA(X), _CB(X), _CC(X), _CD(X), _CE(X), _CF(X),
 +    _D0(X), _D1(X), _D2(X), _D3(X), _D4(X), _D5(X), _D6(X), _D7(X),
 +    _D8(X), _D9(X), _DA(X), _DB(X), _DC(X), _DD(X), _DE(X), _DF(X),
 +    _E0(X), _E1(X), _E2(X), _E3(X), _E4(X), _E5(X), _E6(X), _E7(X),
 +    _E8(X), _E9(X), _EA(X), _EB(X), _EC(X), _ED(X), _EE(X), _EF(X),
 +    _F0(X), _F1(X), _F2(X), _F3(X), _F4(X), _F5(X), _F6(X), _F7(X),
 +    _F8(X), _F9(X), _FA(X), _FB(X), _FC(X), _FD(X), _FE(X), _FF(X),
 +
 +    V3,
 +    V4,
 +}
 +
 +fn check_niche_behavior () {
 +    if let E1::V2 { .. } = (E1::V1 { f: true }) {
 +        intrinsics::abort();
 +    }
 +
 +    if let E2::V1 { .. } = E2::V3::<Infallible> {
 +        intrinsics::abort();
 +    }
 +}
++
++fn from_decimal_string() {
++    loop {
++        let multiplier = 1;
++
++        take_multiplier_ref(&multiplier);
++
++        if multiplier == 1 {
++            break;
++        }
++
++        unreachable();
++    }
++}
++
++fn take_multiplier_ref(_multiplier: &u128) {}
++
++fn unreachable() -> ! {
++    panic("unreachable")
++}
index 079b4299049119e3c87573445bc4e6e224ebfffe,0000000000000000000000000000000000000000..cb512a4aa335e65106967ed7470bd5b45949a0c5
mode 100644,000000..100644
--- /dev/null
@@@ -1,343 -1,0 +1,343 @@@
-     assert_eq!(1u64.checked_mul(u64::max_value()), Some(u64::max_value()));
-     assert_eq!(u64::max_value().checked_mul(u64::max_value()), None);
-     assert_eq!(1i64.checked_mul(i64::max_value()), Some(i64::max_value()));
-     assert_eq!(i64::max_value().checked_mul(i64::max_value()), None);
-     assert_eq!((-1i64).checked_mul(i64::min_value() + 1), Some(i64::max_value()));
-     assert_eq!(1i64.checked_mul(i64::min_value()), Some(i64::min_value()));
-     assert_eq!(i64::min_value().checked_mul(i64::min_value()), None);
 +#![feature(core_intrinsics, generators, generator_trait, is_sorted)]
 +
 +#[cfg(target_arch = "x86_64")]
 +use std::arch::x86_64::*;
 +use std::io::Write;
 +use std::ops::Generator;
 +
 +fn main() {
 +    println!("{:?}", std::env::args().collect::<Vec<_>>());
 +
 +    let mutex = std::sync::Mutex::new(());
 +    let _guard = mutex.lock().unwrap();
 +
 +    let _ = ::std::iter::repeat('a' as u8).take(10).collect::<Vec<_>>();
 +    let stderr = ::std::io::stderr();
 +    let mut stderr = stderr.lock();
 +
 +    std::thread::spawn(move || {
 +        println!("Hello from another thread!");
 +    });
 +
 +    writeln!(stderr, "some {} text", "<unknown>").unwrap();
 +
 +    let _ = std::process::Command::new("true").env("c", "d").spawn();
 +
 +    println!("cargo:rustc-link-lib=z");
 +
 +    static ONCE: std::sync::Once = std::sync::Once::new();
 +    ONCE.call_once(|| {});
 +
 +    let _eq = LoopState::Continue(()) == LoopState::Break(());
 +
 +    // Make sure ByValPair values with differently sized components are correctly passed
 +    map(None::<(u8, Box<Instruction>)>);
 +
 +    println!("{}", 2.3f32.exp());
 +    println!("{}", 2.3f32.exp2());
 +    println!("{}", 2.3f32.abs());
 +    println!("{}", 2.3f32.sqrt());
 +    println!("{}", 2.3f32.floor());
 +    println!("{}", 2.3f32.ceil());
 +    println!("{}", 2.3f32.min(1.0));
 +    println!("{}", 2.3f32.max(1.0));
 +    println!("{}", 2.3f32.powi(2));
 +    println!("{}", 2.3f32.log2());
 +    assert_eq!(2.3f32.copysign(-1.0), -2.3f32);
 +    println!("{}", 2.3f32.powf(2.0));
 +
 +    assert_eq!(-128i8, (-128i8).saturating_sub(1));
 +    assert_eq!(127i8, 127i8.saturating_sub(-128));
 +    assert_eq!(-128i8, (-128i8).saturating_add(-128));
 +    assert_eq!(127i8, 127i8.saturating_add(1));
 +
 +    assert_eq!(0b0000000000000000000000000010000010000000000000000000000000000000_0000000000100000000000000000000000001000000000000100000000000000u128.leading_zeros(), 26);
 +    assert_eq!(0b0000000000000000000000000010000000000000000000000000000000000000_0000000000000000000000000000000000001000000000000000000010000000u128.trailing_zeros(), 7);
 +
 +    let _d = 0i128.checked_div(2i128);
 +    let _d = 0u128.checked_div(2u128);
 +    assert_eq!(1u128 + 2, 3);
 +
 +    assert_eq!(0b100010000000000000000000000000000u128 >> 10, 0b10001000000000000000000u128);
 +    assert_eq!(0xFEDCBA987654321123456789ABCDEFu128 >> 64, 0xFEDCBA98765432u128);
 +    assert_eq!(0xFEDCBA987654321123456789ABCDEFu128 as i128 >> 64, 0xFEDCBA98765432i128);
 +
 +    let tmp = 353985398u128;
 +    assert_eq!(tmp * 932490u128, 330087843781020u128);
 +
 +    let tmp = -0x1234_5678_9ABC_DEF0i64;
 +    assert_eq!(tmp as i128, -0x1234_5678_9ABC_DEF0i128);
 +
 +    // Check that all u/i128 <-> float casts work correctly.
 +    let houndred_u128 = 100u128;
 +    let houndred_i128 = 100i128;
 +    let houndred_f32 = 100.0f32;
 +    let houndred_f64 = 100.0f64;
 +    assert_eq!(houndred_u128 as f32, 100.0);
 +    assert_eq!(houndred_u128 as f64, 100.0);
 +    assert_eq!(houndred_f32 as u128, 100);
 +    assert_eq!(houndred_f64 as u128, 100);
 +    assert_eq!(houndred_i128 as f32, 100.0);
 +    assert_eq!(houndred_i128 as f64, 100.0);
 +    assert_eq!(houndred_f32 as i128, 100);
 +    assert_eq!(houndred_f64 as i128, 100);
 +
 +    // Test signed 128bit comparing
 +    let max = usize::MAX as i128;
 +    if 100i128 < 0i128 || 100i128 > max {
 +        panic!();
 +    }
 +
 +    test_checked_mul();
 +
 +    let _a = 1u32 << 2u8;
 +
 +    let empty: [i32; 0] = [];
 +    assert!(empty.is_sorted());
 +
 +    println!("{:?}", std::intrinsics::caller_location());
 +
 +    #[cfg(target_arch = "x86_64")]
 +    unsafe {
 +        test_simd();
 +    }
 +
 +    Box::pin(move |mut _task_context| {
 +        yield ();
 +    }).as_mut().resume(0);
 +
 +    #[derive(Copy, Clone)]
 +    enum Nums {
 +        NegOne = -1,
 +    }
 +
 +    let kind = Nums::NegOne;
 +    assert_eq!(-1i128, kind as i128);
 +
 +    let options = [1u128];
 +    match options[0] {
 +        1 => (),
 +        0 => loop {},
 +        v => panic(v),
 +    };
 +}
 +
 +fn panic(_: u128) {
 +    panic!();
 +}
 +
 +#[cfg(target_arch = "x86_64")]
 +#[target_feature(enable = "sse2")]
 +unsafe fn test_simd() {
 +    assert!(is_x86_feature_detected!("sse2"));
 +
 +    let x = _mm_setzero_si128();
 +    let y = _mm_set1_epi16(7);
 +    let or = _mm_or_si128(x, y);
 +    let cmp_eq = _mm_cmpeq_epi8(y, y);
 +    let cmp_lt = _mm_cmplt_epi8(y, y);
 +
 +    assert_eq!(std::mem::transmute::<_, [u16; 8]>(or), [7, 7, 7, 7, 7, 7, 7, 7]);
 +    assert_eq!(std::mem::transmute::<_, [u16; 8]>(cmp_eq), [0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff]);
 +    assert_eq!(std::mem::transmute::<_, [u16; 8]>(cmp_lt), [0, 0, 0, 0, 0, 0, 0, 0]);
 +
 +    test_mm_slli_si128();
 +    test_mm_movemask_epi8();
 +    test_mm256_movemask_epi8();
 +    test_mm_add_epi8();
 +    test_mm_add_pd();
 +    test_mm_cvtepi8_epi16();
 +    test_mm_cvtsi128_si64();
 +
 +    test_mm_extract_epi8();
 +    test_mm_insert_epi16();
 +
 +    let mask1 = _mm_movemask_epi8(dbg!(_mm_setr_epi8(255u8 as i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)));
 +    assert_eq!(mask1, 1);
 +}
 +
 +#[cfg(target_arch = "x86_64")]
 +#[target_feature(enable = "sse2")]
 +unsafe fn test_mm_slli_si128() {
 +    #[rustfmt::skip]
 +    let a = _mm_setr_epi8(
 +        1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
 +    );
 +    let r = _mm_slli_si128(a, 1);
 +    let e = _mm_setr_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
 +    assert_eq_m128i(r, e);
 +
 +    #[rustfmt::skip]
 +    let a = _mm_setr_epi8(
 +        1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
 +    );
 +    let r = _mm_slli_si128(a, 15);
 +    let e = _mm_setr_epi8(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1);
 +    assert_eq_m128i(r, e);
 +
 +    #[rustfmt::skip]
 +    let a = _mm_setr_epi8(
 +        1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
 +    );
 +    let r = _mm_slli_si128(a, 16);
 +    assert_eq_m128i(r, _mm_set1_epi8(0));
 +
 +    #[rustfmt::skip]
 +    let a = _mm_setr_epi8(
 +        1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
 +    );
 +    let r = _mm_slli_si128(a, -1);
 +    assert_eq_m128i(_mm_set1_epi8(0), r);
 +
 +    #[rustfmt::skip]
 +    let a = _mm_setr_epi8(
 +        1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
 +    );
 +    let r = _mm_slli_si128(a, -0x80000000);
 +    assert_eq_m128i(r, _mm_set1_epi8(0));
 +}
 +
 +#[cfg(target_arch = "x86_64")]
 +#[target_feature(enable = "sse2")]
 +unsafe fn test_mm_movemask_epi8() {
 +    #[rustfmt::skip]
 +    let a = _mm_setr_epi8(
 +        0b1000_0000u8 as i8, 0b0, 0b1000_0000u8 as i8, 0b01,
 +        0b0101, 0b1111_0000u8 as i8, 0, 0,
 +        0, 0, 0b1111_0000u8 as i8, 0b0101,
 +        0b01, 0b1000_0000u8 as i8, 0b0, 0b1000_0000u8 as i8,
 +    );
 +    let r = _mm_movemask_epi8(a);
 +    assert_eq!(r, 0b10100100_00100101);
 +}
 +
 +#[cfg(target_arch = "x86_64")]
 +#[target_feature(enable = "avx2")]
 +unsafe fn test_mm256_movemask_epi8() {
 +    let a = _mm256_set1_epi8(-1);
 +    let r = _mm256_movemask_epi8(a);
 +    let e = -1;
 +    assert_eq!(r, e);
 +}
 +
 +#[cfg(target_arch = "x86_64")]
 +#[target_feature(enable = "sse2")]
 +unsafe fn test_mm_add_epi8() {
 +    let a = _mm_setr_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
 +    #[rustfmt::skip]
 +    let b = _mm_setr_epi8(
 +        16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
 +    );
 +    let r = _mm_add_epi8(a, b);
 +    #[rustfmt::skip]
 +    let e = _mm_setr_epi8(
 +        16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46,
 +    );
 +    assert_eq_m128i(r, e);
 +}
 +
 +#[cfg(target_arch = "x86_64")]
 +#[target_feature(enable = "sse2")]
 +unsafe fn test_mm_add_pd() {
 +    let a = _mm_setr_pd(1.0, 2.0);
 +    let b = _mm_setr_pd(5.0, 10.0);
 +    let r = _mm_add_pd(a, b);
 +    assert_eq_m128d(r, _mm_setr_pd(6.0, 12.0));
 +}
 +
 +#[cfg(target_arch = "x86_64")]
 +fn assert_eq_m128i(x: std::arch::x86_64::__m128i, y: std::arch::x86_64::__m128i) {
 +    unsafe {
 +        assert_eq!(std::mem::transmute::<_, [u8; 16]>(x), std::mem::transmute::<_, [u8; 16]>(y));
 +    }
 +}
 +
 +#[cfg(target_arch = "x86_64")]
 +#[target_feature(enable = "sse2")]
 +pub unsafe fn assert_eq_m128d(a: __m128d, b: __m128d) {
 +    if _mm_movemask_pd(_mm_cmpeq_pd(a, b)) != 0b11 {
 +        panic!("{:?} != {:?}", a, b);
 +    }
 +}
 +
 +#[cfg(target_arch = "x86_64")]
 +#[target_feature(enable = "sse2")]
 +unsafe fn test_mm_cvtsi128_si64() {
 +    let r = _mm_cvtsi128_si64(std::mem::transmute::<[i64; 2], _>([5, 0]));
 +    assert_eq!(r, 5);
 +}
 +
 +#[cfg(target_arch = "x86_64")]
 +#[target_feature(enable = "sse4.1")]
 +unsafe fn test_mm_cvtepi8_epi16() {
 +    let a = _mm_set1_epi8(10);
 +    let r = _mm_cvtepi8_epi16(a);
 +    let e = _mm_set1_epi16(10);
 +    assert_eq_m128i(r, e);
 +    let a = _mm_set1_epi8(-10);
 +    let r = _mm_cvtepi8_epi16(a);
 +    let e = _mm_set1_epi16(-10);
 +    assert_eq_m128i(r, e);
 +}
 +
 +#[cfg(target_arch = "x86_64")]
 +#[target_feature(enable = "sse4.1")]
 +unsafe fn test_mm_extract_epi8() {
 +    #[rustfmt::skip]
 +    let a = _mm_setr_epi8(
 +        -1, 1, 2, 3, 4, 5, 6, 7,
 +        8, 9, 10, 11, 12, 13, 14, 15
 +    );
 +    let r1 = _mm_extract_epi8(a, 0);
 +    let r2 = _mm_extract_epi8(a, 19);
 +    assert_eq!(r1, 0xFF);
 +    assert_eq!(r2, 3);
 +}
 +
 +#[cfg(target_arch = "x86_64")]
 +#[target_feature(enable = "sse2")]
 +unsafe fn test_mm_insert_epi16() {
 +    let a = _mm_setr_epi16(0, 1, 2, 3, 4, 5, 6, 7);
 +    let r = _mm_insert_epi16(a, 9, 0);
 +    let e = _mm_setr_epi16(9, 1, 2, 3, 4, 5, 6, 7);
 +    assert_eq_m128i(r, e);
 +}
 +
 +fn test_checked_mul() {
 +    let u: Option<u8> = u8::from_str_radix("1000", 10).ok();
 +    assert_eq!(u, None);
 +
 +    assert_eq!(1u8.checked_mul(255u8), Some(255u8));
 +    assert_eq!(255u8.checked_mul(255u8), None);
 +    assert_eq!(1i8.checked_mul(127i8), Some(127i8));
 +    assert_eq!(127i8.checked_mul(127i8), None);
 +    assert_eq!((-1i8).checked_mul(-127i8), Some(127i8));
 +    assert_eq!(1i8.checked_mul(-128i8), Some(-128i8));
 +    assert_eq!((-128i8).checked_mul(-128i8), None);
 +
++    assert_eq!(1u64.checked_mul(u64::MAX), Some(u64::MAX));
++    assert_eq!(u64::MAX.checked_mul(u64::MAX), None);
++    assert_eq!(1i64.checked_mul(i64::MAX), Some(i64::MAX));
++    assert_eq!(i64::MAX.checked_mul(i64::MAX), None);
++    assert_eq!((-1i64).checked_mul(i64::MIN + 1), Some(i64::MAX));
++    assert_eq!(1i64.checked_mul(i64::MIN), Some(i64::MIN));
++    assert_eq!(i64::MIN.checked_mul(i64::MIN), None);
 +}
 +
 +#[derive(PartialEq)]
 +enum LoopState {
 +    Continue(()),
 +    Break(())
 +}
 +
 +pub enum Instruction {
 +    Increment,
 +    Loop,
 +}
 +
 +fn map(a: Option<(u8, Box<Instruction>)>) -> Option<Box<Instruction>> {
 +    match a {
 +        None => None,
 +        Some((_, instr)) => Some(instr),
 +    }
 +}
index 87e54719fdc76316af8e8a35a9f8147a3628c186,0000000000000000000000000000000000000000..0ca96be9ae731fdcecf8e10ade0c9f3130b6b8d6
mode 100644,000000..100644
--- /dev/null
@@@ -1,1 -1,0 +1,1 @@@
- nightly-2020-10-26
++nightly-2020-10-31
index 0000000000000000000000000000000000000000,0000000000000000000000000000000000000000..e63daa40f354099a4b3fdbe3612f73dfb7b8ac70
new file mode 100755 (executable)
--- /dev/null
--- /dev/null
@@@ -1,0 -1,0 +1,16 @@@
++#!/bin/bash
++
++dir=$(dirname "$0")
++source $dir/config.sh
++
++# read nightly compiler from rust-toolchain file
++TOOLCHAIN=$(cat $dir/rust-toolchain)
++
++cmd=$1
++shift || true
++
++if [[ "$cmd" = "jit" ]]; then
++cargo +${TOOLCHAIN} rustc $@ -- --jit
++else
++cargo +${TOOLCHAIN} $cmd $@
++fi
index 530b7f242a0982172d31c90a84174d02dd8441cf,0000000000000000000000000000000000000000..af181f4f724395dab2f756b25ef948ecbeb6a0fb
mode 100644,000000..100644
--- /dev/null
@@@ -1,56 -1,0 +1,57 @@@
- export RUSTC=$(pwd)/"target/"$CHANNEL"/cg_clif"
 +set -e
 +
 +unamestr=`uname`
 +if [[ "$unamestr" == 'Linux' ]]; then
 +   dylib_ext='so'
 +elif [[ "$unamestr" == 'Darwin' ]]; then
 +   dylib_ext='dylib'
 +else
 +   echo "Unsupported os"
 +   exit 1
 +fi
 +
 +HOST_TRIPLE=$(rustc -vV | grep host | cut -d: -f2 | tr -d " ")
 +TARGET_TRIPLE=$HOST_TRIPLE
 +#TARGET_TRIPLE="x86_64-pc-windows-gnu"
 +#TARGET_TRIPLE="aarch64-unknown-linux-gnu"
 +
 +linker=''
 +RUN_WRAPPER=''
 +export JIT_SUPPORTED=1
 +if [[ "$HOST_TRIPLE" != "$TARGET_TRIPLE" ]]; then
 +   export JIT_SUPPORTED=0
 +   if [[ "$TARGET_TRIPLE" == "aarch64-unknown-linux-gnu" ]]; then
 +      # We are cross-compiling for aarch64. Use the correct linker and run tests in qemu.
 +      linker='-Clinker=aarch64-linux-gnu-gcc'
 +      RUN_WRAPPER='qemu-aarch64 -L /usr/aarch64-linux-gnu'
 +   elif [[ "$TARGET_TRIPLE" == "x86_64-pc-windows-gnu" ]]; then
 +      # We are cross-compiling for Windows. Run tests in wine.
 +      RUN_WRAPPER='wine'
 +   else
 +      echo "Unknown non-native platform"
 +   fi
 +fi
 +
 +if echo "$RUSTC_WRAPPER" | grep sccache; then
 +echo
 +echo -e "\x1b[1;93m=== Warning: Unset RUSTC_WRAPPER to prevent interference with sccache ===\x1b[0m"
 +echo
 +export RUSTC_WRAPPER=
 +fi
 +
- '-Zcodegen-backend='$(pwd)'/target/'$CHANNEL'/librustc_codegen_cranelift.'$dylib_ext' --sysroot '$(pwd)'/build_sysroot/sysroot'
++dir=$(cd $(dirname "$BASH_SOURCE"); pwd)
++
++export RUSTC=$dir"/cg_clif"
 +export RUSTFLAGS=$linker
 +export RUSTDOCFLAGS=$linker' -Ztrim-diagnostic-paths=no -Cpanic=abort -Zpanic-abort-tests '\
- export LD_LIBRARY_PATH="$(pwd)/target/out:$(pwd)/build_sysroot/sysroot/lib/rustlib/"$TARGET_TRIPLE"/lib:\
- $(pwd)/target/"$CHANNEL":$(rustc --print sysroot)/lib"
++'-Zcodegen-backend='$dir'/librustc_codegen_cranelift.'$dylib_ext' --sysroot '$dir'/sysroot'
 +
 +# FIXME remove once the atomic shim is gone
 +if [[ `uname` == 'Darwin' ]]; then
 +   export RUSTFLAGS="$RUSTFLAGS -Clink-arg=-undefined -Clink-arg=dynamic_lookup"
 +fi
 +
++export LD_LIBRARY_PATH="$dir:$(rustc --print sysroot)/lib:$dir/target/out:$dir/sysroot/lib/rustlib/"$TARGET_TRIPLE"/lib"
 +export DYLD_LIBRARY_PATH=$LD_LIBRARY_PATH
 +
 +export CG_CLIF_DISPLAY_CG_TIME=1
index c70c3ec47f31fac0fe6a4cd07b6f21edd811a824,0000000000000000000000000000000000000000..3327c10089d9b0847fe9fbdccf12c03f91aab412
mode 100755,000000..100755
--- /dev/null
@@@ -1,126 -1,0 +1,125 @@@
- CHANNEL="release"
 +#!/bin/bash
 +#![forbid(unsafe_code)]/* This line is ignored by bash
 +# This block is ignored by rustc
- source scripts/config.sh
 +pushd $(dirname "$0")/../
++source build/config.sh
 +popd
 +PROFILE=$1 OUTPUT=$2 exec $RUSTC $RUSTFLAGS --jit $0
 +#*/
 +
 +//! This program filters away uninteresting samples and trims uninteresting frames for stackcollapse
 +//! profiles.
 +//!
 +//! Usage: ./filter_profile.rs <profile in stackcollapse format> <output file>
 +//!
 +//! This file is specially crafted to be both a valid bash script and valid rust source file. If
 +//! executed as bash script this will run the rust source using cg_clif in JIT mode.
 +
 +use std::io::Write;
 +
 +fn main() -> Result<(), Box<dyn std::error::Error>> {
 +    let profile_name = std::env::var("PROFILE").unwrap();
 +    let output_name = std::env::var("OUTPUT").unwrap();
 +    if profile_name.is_empty() || output_name.is_empty() {
 +        println!("Usage: ./filter_profile.rs <profile in stackcollapse format> <output file>");
 +        std::process::exit(1);
 +    }
 +    let profile = std::fs::read_to_string(profile_name)
 +        .map_err(|err| format!("Failed to read profile {}", err))?;
 +    let mut output = std::fs::OpenOptions::new()
 +        .create(true)
 +        .write(true)
 +        .truncate(true)
 +        .open(output_name)?;
 +
 +    for line in profile.lines() {
 +        let mut stack = &line[..line.rfind(" ").unwrap()];
 +        let count = &line[line.rfind(" ").unwrap() + 1..];
 +
 +        // Filter away uninteresting samples
 +        if !stack.contains("rustc_codegen_cranelift") {
 +            continue;
 +        }
 +
 +        if stack.contains("rustc_mir::monomorphize::partitioning::collect_and_partition_mono_items")
 +            || stack.contains("rustc_incremental::assert_dep_graph::assert_dep_graph")
 +            || stack.contains("rustc_symbol_mangling::test::report_symbol_names")
 +        {
 +            continue;
 +        }
 +
 +        // Trim start
 +        if let Some(index) = stack.find("rustc_interface::passes::configure_and_expand") {
 +            stack = &stack[index..];
 +        } else if let Some(index) = stack.find("rustc_interface::passes::analysis") {
 +            stack = &stack[index..];
 +        } else if let Some(index) = stack.find("rustc_interface::passes::start_codegen") {
 +            stack = &stack[index..];
 +        } else if let Some(index) = stack.find("rustc_interface::queries::Linker::link") {
 +            stack = &stack[index..];
 +        }
 +
 +        if let Some(index) = stack.find("rustc_codegen_cranelift::driver::aot::module_codegen") {
 +            stack = &stack[index..];
 +        }
 +
 +        // Trim end
 +        const MALLOC: &str = "malloc";
 +        if let Some(index) = stack.find(MALLOC) {
 +            stack = &stack[..index + MALLOC.len()];
 +        }
 +
 +        const FREE: &str = "free";
 +        if let Some(index) = stack.find(FREE) {
 +            stack = &stack[..index + FREE.len()];
 +        }
 +
 +        const TYPECK_ITEM_BODIES: &str = "rustc_typeck::check::typeck_item_bodies";
 +        if let Some(index) = stack.find(TYPECK_ITEM_BODIES) {
 +            stack = &stack[..index + TYPECK_ITEM_BODIES.len()];
 +        }
 +
 +        const COLLECT_AND_PARTITION_MONO_ITEMS: &str =
 +            "rustc_mir::monomorphize::partitioning::collect_and_partition_mono_items";
 +        if let Some(index) = stack.find(COLLECT_AND_PARTITION_MONO_ITEMS) {
 +            stack = &stack[..index + COLLECT_AND_PARTITION_MONO_ITEMS.len()];
 +        }
 +
 +        const ASSERT_DEP_GRAPH: &str = "rustc_incremental::assert_dep_graph::assert_dep_graph";
 +        if let Some(index) = stack.find(ASSERT_DEP_GRAPH) {
 +            stack = &stack[..index + ASSERT_DEP_GRAPH.len()];
 +        }
 +
 +        const REPORT_SYMBOL_NAMES: &str = "rustc_symbol_mangling::test::report_symbol_names";
 +        if let Some(index) = stack.find(REPORT_SYMBOL_NAMES) {
 +            stack = &stack[..index + REPORT_SYMBOL_NAMES.len()];
 +        }
 +
 +        const ENCODE_METADATA: &str = "rustc_middle::ty::context::TyCtxt::encode_metadata";
 +        if let Some(index) = stack.find(ENCODE_METADATA) {
 +            stack = &stack[..index + ENCODE_METADATA.len()];
 +        }
 +
 +        const SUBST_AND_NORMALIZE_ERASING_REGIONS: &str = "rustc_middle::ty::normalize_erasing_regions::<impl rustc_middle::ty::context::TyCtxt>::subst_and_normalize_erasing_regions";
 +        if let Some(index) = stack.find(SUBST_AND_NORMALIZE_ERASING_REGIONS) {
 +            stack = &stack[..index + SUBST_AND_NORMALIZE_ERASING_REGIONS.len()];
 +        }
 +
 +        const NORMALIZE_ERASING_LATE_BOUND_REGIONS: &str = "rustc_middle::ty::normalize_erasing_regions::<impl rustc_middle::ty::context::TyCtxt>::normalize_erasing_late_bound_regions";
 +        if let Some(index) = stack.find(NORMALIZE_ERASING_LATE_BOUND_REGIONS) {
 +            stack = &stack[..index + NORMALIZE_ERASING_LATE_BOUND_REGIONS.len()];
 +        }
 +
 +        const INST_BUILD: &str = "<cranelift_frontend::frontend::FuncInstBuilder as cranelift_codegen::ir::builder::InstBuilderBase>::build";
 +        if let Some(index) = stack.find(INST_BUILD) {
 +            stack = &stack[..index + INST_BUILD.len()];
 +        }
 +
 +        output.write_all(stack.as_bytes())?;
 +        output.write_all(&*b" ")?;
 +        output.write_all(count.as_bytes())?;
 +        output.write_all(&*b"\n")?;
 +    }
 +
 +    Ok(())
 +}
index 38991d6d47dd04a58e0f9f9f473debaae938b657,0000000000000000000000000000000000000000..541b3c6563bab74816241c2f94d33ec5fb164684
mode 100755,000000..100755
--- /dev/null
@@@ -1,33 -1,0 +1,42 @@@
 +#!/bin/bash
 +
 +set -e
 +
 +case $1 in
 +    "prepare")
 +        TOOLCHAIN=$(date +%Y-%m-%d)
 +
 +        echo "=> Installing new nightly"
 +        rustup toolchain install --profile minimal nightly-${TOOLCHAIN} # Sanity check to see if the nightly exists
 +        echo nightly-${TOOLCHAIN} > rust-toolchain
 +        rustup component add rustfmt || true
 +
 +        echo "=> Uninstalling all old nighlies"
 +        for nightly in $(rustup toolchain list | grep nightly | grep -v $TOOLCHAIN | grep -v nightly-x86_64); do
 +            rustup toolchain uninstall $nightly
 +        done
 +
 +        ./clean_all.sh
 +        ./prepare.sh
 +
 +        (cd build_sysroot && cargo update)
 +
 +        ;;
 +    "commit")
 +        git add rust-toolchain build_sysroot/Cargo.lock
 +        git commit -m "Rustup to $(rustc -V)"
 +        ;;
++    "push")
++      cg_clif=$(pwd)
++      pushd ../rust
++      branch=update_cg_clif-$(date +%Y-%m-%d)
++      git checkout -b $branch
++      git subtree pull --prefix=compiler/rustc_codegen_cranelift/ https://github.com/bjorn3/rustc_codegen_cranelift.git master
++      git push -u my $branch
++      popd
++      ;;
 +    *)
 +        echo "Unknown command '$1'"
 +        echo "Usage: ./rustup.sh prepare|commit"
 +        ;;
 +esac
index 0000000000000000000000000000000000000000,0000000000000000000000000000000000000000..7f43f81a6cdcd4aa9f5a539d7c9b50c898f70dce
new file mode 100755 (executable)
--- /dev/null
--- /dev/null
@@@ -1,0 -1,0 +1,65 @@@
++#!/bin/bash
++set -e
++
++cd $(dirname "$0")/../
++
++./build.sh
++source build/config.sh
++
++echo "[TEST] Bootstrap of rustc"
++git clone https://github.com/rust-lang/rust.git || true
++pushd rust
++git fetch
++git checkout -- .
++git checkout $(rustc -V | cut -d' ' -f3 | tr -d '(')
++
++git apply - <<EOF
++diff --git a/.gitmodules b/.gitmodules
++index 984113151de..c1e9d960d56 100644
++--- a/.gitmodules
+++++ b/.gitmodules
++@@ -34,10 +34,6 @@
++ [submodule "src/doc/edition-guide"]
++      path = src/doc/edition-guide
++      url = https://github.com/rust-lang/edition-guide.git
++-[submodule "src/llvm-project"]
++-     path = src/llvm-project
++-     url = https://github.com/rust-lang/llvm-project.git
++-     branch = rustc/11.0-2020-10-12
++ [submodule "src/doc/embedded-book"]
++      path = src/doc/embedded-book
++      url = https://github.com/rust-embedded/book.git
++diff --git a/compiler/rustc_data_structures/Cargo.toml b/compiler/rustc_data_structures/Cargo.toml
++index 23e689fcae7..5f077b765b6 100644
++--- a/compiler/rustc_data_structures/Cargo.toml
+++++ b/compiler/rustc_data_structures/Cargo.toml
++@@ -32,7 +32,6 @@ tempfile = "3.0.5"
++
++ [dependencies.parking_lot]
++ version = "0.11"
++-features = ["nightly"]
++
++ [target.'cfg(windows)'.dependencies]
++ winapi = { version = "0.3", features = ["fileapi", "psapi"] }
++EOF
++
++cat > config.toml <<EOF
++[llvm]
++ninja = false
++
++[build]
++rustc = "$(pwd)/../build/cg_clif"
++cargo = "$(rustup which cargo)"
++full-bootstrap = true
++local-rebuild = true
++
++[rust]
++codegen-backends = ["cranelift"]
++EOF
++
++rm -r compiler/rustc_codegen_cranelift/{Cargo.*,src}
++cp ../Cargo.* compiler/rustc_codegen_cranelift/
++cp -r ../src compiler/rustc_codegen_cranelift/src
++
++./x.py build --stage 1 library/std
++popd
index 0000000000000000000000000000000000000000,0000000000000000000000000000000000000000..d941b73c81bcc01a4796a69ea0fff796e9a5e337
new file mode 100755 (executable)
--- /dev/null
--- /dev/null
@@@ -1,0 -1,0 +1,123 @@@
++#!/bin/bash
++
++set -e
++
++source build/config.sh
++export CG_CLIF_INCR_CACHE_DISABLED=1
++MY_RUSTC=$RUSTC" "$RUSTFLAGS" -L crate=target/out --out-dir target/out -Cdebuginfo=2"
++
++function no_sysroot_tests() {
++    echo "[BUILD] mini_core"
++    $MY_RUSTC example/mini_core.rs --crate-name mini_core --crate-type lib,dylib --target $TARGET_TRIPLE
++
++    echo "[BUILD] example"
++    $MY_RUSTC example/example.rs --crate-type lib --target $TARGET_TRIPLE
++
++    if [[ "$JIT_SUPPORTED" = "1" ]]; then
++        echo "[JIT] mini_core_hello_world"
++        CG_CLIF_JIT_ARGS="abc bcd" $MY_RUSTC --jit example/mini_core_hello_world.rs --cfg jit --target $HOST_TRIPLE
++    else
++        echo "[JIT] mini_core_hello_world (skipped)"
++    fi
++
++    echo "[AOT] mini_core_hello_world"
++    $MY_RUSTC example/mini_core_hello_world.rs --crate-name mini_core_hello_world --crate-type bin -g --target $TARGET_TRIPLE
++    $RUN_WRAPPER ./target/out/mini_core_hello_world abc bcd
++    # (echo "break set -n main"; echo "run"; sleep 1; echo "si -c 10"; sleep 1; echo "frame variable") | lldb -- ./target/out/mini_core_hello_world abc bcd
++
++    echo "[AOT] arbitrary_self_types_pointers_and_wrappers"
++    $MY_RUSTC example/arbitrary_self_types_pointers_and_wrappers.rs --crate-name arbitrary_self_types_pointers_and_wrappers --crate-type bin --target $TARGET_TRIPLE
++    $RUN_WRAPPER ./target/out/arbitrary_self_types_pointers_and_wrappers
++}
++
++function base_sysroot_tests() {
++    echo "[AOT] alloc_example"
++    $MY_RUSTC example/alloc_example.rs --crate-type bin --target $TARGET_TRIPLE
++    $RUN_WRAPPER ./target/out/alloc_example
++
++    if [[ "$JIT_SUPPORTED" = "1" ]]; then
++        echo "[JIT] std_example"
++        $MY_RUSTC --jit example/std_example.rs --target $HOST_TRIPLE
++    else
++        echo "[JIT] std_example (skipped)"
++    fi
++
++    echo "[AOT] dst_field_align"
++    # FIXME Re-add -Zmir-opt-level=2 once rust-lang/rust#67529 is fixed.
++    $MY_RUSTC example/dst-field-align.rs --crate-name dst_field_align --crate-type bin --target $TARGET_TRIPLE
++    $RUN_WRAPPER ./target/out/dst_field_align || (echo $?; false)
++
++    echo "[AOT] std_example"
++    $MY_RUSTC example/std_example.rs --crate-type bin --target $TARGET_TRIPLE
++    $RUN_WRAPPER ./target/out/std_example arg
++
++    echo "[AOT] subslice-patterns-const-eval"
++    $MY_RUSTC example/subslice-patterns-const-eval.rs --crate-type bin -Cpanic=abort --target $TARGET_TRIPLE
++    $RUN_WRAPPER ./target/out/subslice-patterns-const-eval
++
++    echo "[AOT] track-caller-attribute"
++    $MY_RUSTC example/track-caller-attribute.rs --crate-type bin -Cpanic=abort --target $TARGET_TRIPLE
++    $RUN_WRAPPER ./target/out/track-caller-attribute
++
++    echo "[AOT] mod_bench"
++    $MY_RUSTC example/mod_bench.rs --crate-type bin --target $TARGET_TRIPLE
++    $RUN_WRAPPER ./target/out/mod_bench
++
++    pushd rand
++    rm -r ./target || true
++    ../build/cargo.sh test --workspace
++    popd
++}
++
++function extended_sysroot_tests() {
++    pushd simple-raytracer
++    if [[ "$HOST_TRIPLE" = "$TARGET_TRIPLE" ]]; then
++        echo "[BENCH COMPILE] ebobby/simple-raytracer"
++        hyperfine --runs ${RUN_RUNS:-10} --warmup 1 --prepare "cargo clean" \
++        "RUSTC=rustc RUSTFLAGS='' cargo build" \
++        "../build/cargo.sh build"
++
++        echo "[BENCH RUN] ebobby/simple-raytracer"
++        cp ./target/debug/main ./raytracer_cg_clif
++        hyperfine --runs ${RUN_RUNS:-10} ./raytracer_cg_llvm ./raytracer_cg_clif
++    else
++        echo "[BENCH COMPILE] ebobby/simple-raytracer (skipped)"
++        echo "[COMPILE] ebobby/simple-raytracer"
++        ../cargo.sh build
++        echo "[BENCH RUN] ebobby/simple-raytracer (skipped)"
++    fi
++    popd
++
++    pushd build_sysroot/sysroot_src/library/core/tests
++    echo "[TEST] libcore"
++    rm -r ./target || true
++    ../../../../../build/cargo.sh test
++    popd
++
++    pushd regex
++    echo "[TEST] rust-lang/regex example shootout-regex-dna"
++    ../build/cargo.sh clean
++    # Make sure `[codegen mono items] start` doesn't poison the diff
++    ../build/cargo.sh build --example shootout-regex-dna
++    cat examples/regexdna-input.txt | ../build/cargo.sh run --example shootout-regex-dna | grep -v "Spawned thread" > res.txt
++    diff -u res.txt examples/regexdna-output.txt
++
++    echo "[TEST] rust-lang/regex tests"
++    ../build/cargo.sh test --tests -- --exclude-should-panic --test-threads 1 -Zunstable-options -q
++    popd
++}
++
++case "$1" in
++    "no_sysroot")
++        no_sysroot_tests
++        ;;
++    "base_sysroot")
++        base_sysroot_tests
++        ;;
++    "extended_sysroot")
++        extended_sysroot_tests
++        ;;
++    *)
++        echo "unknown test suite"
++        ;;
++esac
index 7bb00c8d46a4c6c296d04c05f68ca81d5f2e15bb,0000000000000000000000000000000000000000..01073d26e832a98fd68cf6ac7463c6fe17abde98
mode 100644,000000..100644
--- /dev/null
@@@ -1,130 -1,0 +1,130 @@@
-     fx.add_global_comment(format!(
-         "kind  loc.idx   param    pass mode                            ty"
-     ));
 +//! Annotate the clif ir with comments describing how arguments are passed into the current function
 +//! and where all locals are stored.
 +
 +use std::borrow::Cow;
 +
 +use rustc_middle::mir;
 +
 +use cranelift_codegen::entity::EntityRef;
 +
 +use crate::abi::pass_mode::*;
 +use crate::prelude::*;
 +
 +pub(super) fn add_args_header_comment(fx: &mut FunctionCx<'_, '_, impl Module>) {
-     fx.add_global_comment(format!(
-         "kind  local ty                              size align (abi,pref)"
-     ));
++    fx.add_global_comment(
++        "kind  loc.idx   param    pass mode                            ty".to_string(),
++    );
 +}
 +
 +pub(super) fn add_arg_comment<'tcx>(
 +    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
 +    kind: &str,
 +    local: Option<mir::Local>,
 +    local_field: Option<usize>,
 +    params: EmptySinglePair<Value>,
 +    pass_mode: PassMode,
 +    ty: Ty<'tcx>,
 +) {
 +    let local = if let Some(local) = local {
 +        Cow::Owned(format!("{:?}", local))
 +    } else {
 +        Cow::Borrowed("???")
 +    };
 +    let local_field = if let Some(local_field) = local_field {
 +        Cow::Owned(format!(".{}", local_field))
 +    } else {
 +        Cow::Borrowed("")
 +    };
 +
 +    let params = match params {
 +        Empty => Cow::Borrowed("-"),
 +        Single(param) => Cow::Owned(format!("= {:?}", param)),
 +        Pair(param_a, param_b) => Cow::Owned(format!("= {:?}, {:?}", param_a, param_b)),
 +    };
 +
 +    let pass_mode = format!("{:?}", pass_mode);
 +    fx.add_global_comment(format!(
 +        "{kind:5}{local:>3}{local_field:<5} {params:10} {pass_mode:36} {ty:?}",
 +        kind = kind,
 +        local = local,
 +        local_field = local_field,
 +        params = params,
 +        pass_mode = pass_mode,
 +        ty = ty,
 +    ));
 +}
 +
 +pub(super) fn add_locals_header_comment(fx: &mut FunctionCx<'_, '_, impl Module>) {
 +    fx.add_global_comment(String::new());
++    fx.add_global_comment(
++        "kind  local ty                              size align (abi,pref)".to_string(),
++    );
 +}
 +
 +pub(super) fn add_local_place_comments<'tcx>(
 +    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
 +    place: CPlace<'tcx>,
 +    local: Local,
 +) {
 +    let TyAndLayout { ty, layout } = place.layout();
 +    let rustc_target::abi::Layout {
 +        size,
 +        align,
 +        abi: _,
 +        variants: _,
 +        fields: _,
 +        largest_niche: _,
 +    } = layout;
 +
 +    let (kind, extra) = match *place.inner() {
 +        CPlaceInner::Var(place_local, var) => {
 +            assert_eq!(local, place_local);
 +            ("ssa", Cow::Owned(format!(",var={}", var.index())))
 +        }
 +        CPlaceInner::VarPair(place_local, var1, var2) => {
 +            assert_eq!(local, place_local);
 +            (
 +                "ssa",
 +                Cow::Owned(format!(",var=({}, {})", var1.index(), var2.index())),
 +            )
 +        }
 +        CPlaceInner::VarLane(_local, _var, _lane) => unreachable!(),
 +        CPlaceInner::Addr(ptr, meta) => {
 +            let meta = if let Some(meta) = meta {
 +                Cow::Owned(format!(",meta={}", meta))
 +            } else {
 +                Cow::Borrowed("")
 +            };
 +            match ptr.base_and_offset() {
 +                (crate::pointer::PointerBase::Addr(addr), offset) => (
 +                    "reuse",
 +                    format!("storage={}{}{}", addr, offset, meta).into(),
 +                ),
 +                (crate::pointer::PointerBase::Stack(stack_slot), offset) => (
 +                    "stack",
 +                    format!("storage={}{}{}", stack_slot, offset, meta).into(),
 +                ),
 +                (crate::pointer::PointerBase::Dangling(align), offset) => (
 +                    "zst",
 +                    format!("align={},offset={}", align.bytes(), offset).into(),
 +                ),
 +            }
 +        }
 +    };
 +
 +    fx.add_global_comment(format!(
 +        "{:<5} {:5} {:30} {:4}b {}, {}{}{}",
 +        kind,
 +        format!("{:?}", local),
 +        format!("{:?}", ty),
 +        size.bytes(),
 +        align.abi.bytes(),
 +        align.pref.bytes(),
 +        if extra.is_empty() {
 +            ""
 +        } else {
 +            "              "
 +        },
 +        extra,
 +    ));
 +}
index 801691228431770e98453154e9d80a110a793653,0000000000000000000000000000000000000000..81091728692f3f4d21017fb04a977bcb7066e1ca
mode 100644,000000..100644
--- /dev/null
@@@ -1,766 -1,0 +1,763 @@@
-             .into_iter()
 +//! Handling of everything related to the calling convention. Also fills `fx.local_map`.
 +
 +#[cfg(debug_assertions)]
 +mod comments;
 +mod pass_mode;
 +mod returning;
 +
 +use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
 +use rustc_target::spec::abi::Abi;
 +
 +use cranelift_codegen::ir::{AbiParam, ArgumentPurpose};
 +
 +use self::pass_mode::*;
 +use crate::prelude::*;
 +
 +pub(crate) use self::returning::{can_return_to_ssa_var, codegen_return};
 +
 +// Copied from https://github.com/rust-lang/rust/blob/f52c72948aa1dd718cc1f168d21c91c584c0a662/src/librustc_middle/ty/layout.rs#L2301
 +#[rustfmt::skip]
 +pub(crate) fn fn_sig_for_fn_abi<'tcx>(tcx: TyCtxt<'tcx>, instance: Instance<'tcx>) -> ty::PolyFnSig<'tcx> {
 +    use rustc_middle::ty::subst::Subst;
 +
 +    // FIXME(davidtwco,eddyb): A `ParamEnv` should be passed through to this function.
 +    let ty = instance.ty(tcx, ty::ParamEnv::reveal_all());
 +    match *ty.kind() {
 +        ty::FnDef(..) => {
 +            // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering
 +            // parameters unused if they show up in the signature, but not in the `mir::Body`
 +            // (i.e. due to being inside a projection that got normalized, see
 +            // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping
 +            // track of a polymorphization `ParamEnv` to allow normalizing later.
 +            let mut sig = match *ty.kind() {
 +                ty::FnDef(def_id, substs) => tcx
 +                    .normalize_erasing_regions(tcx.param_env(def_id), tcx.fn_sig(def_id))
 +                    .subst(tcx, substs),
 +                _ => unreachable!(),
 +            };
 +
 +            if let ty::InstanceDef::VtableShim(..) = instance.def {
 +                // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
 +                sig = sig.map_bound(|mut sig| {
 +                    let mut inputs_and_output = sig.inputs_and_output.to_vec();
 +                    inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
 +                    sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
 +                    sig
 +                });
 +            }
 +            sig
 +        }
 +        ty::Closure(def_id, substs) => {
 +            let sig = substs.as_closure().sig();
 +
 +            let env_ty = tcx.closure_env_ty(def_id, substs).unwrap();
 +            sig.map_bound(|sig| {
 +                tcx.mk_fn_sig(
 +                    std::iter::once(env_ty.skip_binder()).chain(sig.inputs().iter().cloned()),
 +                    sig.output(),
 +                    sig.c_variadic,
 +                    sig.unsafety,
 +                    sig.abi,
 +                )
 +            })
 +        }
 +        ty::Generator(_, substs, _) => {
 +            let sig = substs.as_generator().poly_sig();
 +
 +            let env_region = ty::ReLateBound(ty::INNERMOST, ty::BrEnv);
 +            let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
 +
 +            let pin_did = tcx.require_lang_item(rustc_hir::LangItem::Pin, None);
 +            let pin_adt_ref = tcx.adt_def(pin_did);
 +            let pin_substs = tcx.intern_substs(&[env_ty.into()]);
 +            let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
 +
 +            sig.map_bound(|sig| {
 +                let state_did = tcx.require_lang_item(rustc_hir::LangItem::GeneratorState, None);
 +                let state_adt_ref = tcx.adt_def(state_did);
 +                let state_substs =
 +                    tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
 +                let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
 +
 +                tcx.mk_fn_sig(
 +                    [env_ty, sig.resume_ty].iter(),
 +                    &ret_ty,
 +                    false,
 +                    rustc_hir::Unsafety::Normal,
 +                    rustc_target::spec::abi::Abi::Rust,
 +                )
 +            })
 +        }
 +        _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
 +    }
 +}
 +
 +fn clif_sig_from_fn_sig<'tcx>(
 +    tcx: TyCtxt<'tcx>,
 +    triple: &target_lexicon::Triple,
 +    sig: FnSig<'tcx>,
 +    span: Span,
 +    is_vtable_fn: bool,
 +    requires_caller_location: bool,
 +) -> Signature {
 +    let abi = match sig.abi {
 +        Abi::System => Abi::C,
 +        abi => abi,
 +    };
 +    let (call_conv, inputs, output): (CallConv, Vec<Ty<'tcx>>, Ty<'tcx>) = match abi {
 +        Abi::Rust => (
 +            CallConv::triple_default(triple),
 +            sig.inputs().to_vec(),
 +            sig.output(),
 +        ),
 +        Abi::C | Abi::Unadjusted => (
 +            CallConv::triple_default(triple),
 +            sig.inputs().to_vec(),
 +            sig.output(),
 +        ),
 +        Abi::SysV64 => (CallConv::SystemV, sig.inputs().to_vec(), sig.output()),
 +        Abi::RustCall => {
 +            assert_eq!(sig.inputs().len(), 2);
 +            let extra_args = match sig.inputs().last().unwrap().kind() {
 +                ty::Tuple(ref tupled_arguments) => tupled_arguments,
 +                _ => bug!("argument to function with \"rust-call\" ABI is not a tuple"),
 +            };
 +            let mut inputs: Vec<Ty<'tcx>> = vec![sig.inputs()[0]];
 +            inputs.extend(extra_args.types());
 +            (CallConv::triple_default(triple), inputs, sig.output())
 +        }
 +        Abi::System => unreachable!(),
 +        Abi::RustIntrinsic => (
 +            CallConv::triple_default(triple),
 +            sig.inputs().to_vec(),
 +            sig.output(),
 +        ),
 +        _ => unimplemented!("unsupported abi {:?}", sig.abi),
 +    };
 +
 +    let inputs = inputs
 +        .into_iter()
 +        .enumerate()
 +        .map(|(i, ty)| {
 +            let mut layout = tcx.layout_of(ParamEnv::reveal_all().and(ty)).unwrap();
 +            if i == 0 && is_vtable_fn {
 +                // Virtual calls turn their self param into a thin pointer.
 +                // See https://github.com/rust-lang/rust/blob/37b6a5e5e82497caf5353d9d856e4eb5d14cbe06/src/librustc/ty/layout.rs#L2519-L2572 for more info
 +                layout = tcx
 +                    .layout_of(ParamEnv::reveal_all().and(tcx.mk_mut_ptr(tcx.mk_unit())))
 +                    .unwrap();
 +            }
 +            let pass_mode = get_pass_mode(tcx, layout);
 +            if abi != Abi::Rust && abi != Abi::RustCall && abi != Abi::RustIntrinsic {
 +                match pass_mode {
 +                    PassMode::NoPass | PassMode::ByVal(_) => {}
 +                    PassMode::ByRef { size: Some(size) } => {
 +                        let purpose = ArgumentPurpose::StructArgument(u32::try_from(size.bytes()).expect("struct too big to pass on stack"));
 +                        return EmptySinglePair::Single(AbiParam::special(pointer_ty(tcx), purpose)).into_iter();
 +                    }
 +                    PassMode::ByValPair(_, _) | PassMode::ByRef { size: None } => {
 +                        tcx.sess.span_warn(
 +                            span,
 +                            &format!(
 +                                "Argument of type `{:?}` with pass mode `{:?}` is not yet supported \
 +                                for non-rust abi `{}`. Calling this function may result in a crash.",
 +                                layout.ty,
 +                                pass_mode,
 +                                abi,
 +                            ),
 +                        );
 +                    }
 +                }
 +            }
 +            pass_mode.get_param_ty(tcx).map(AbiParam::new).into_iter()
 +        })
 +        .flatten();
 +
 +    let (mut params, returns): (Vec<_>, Vec<_>) = match get_pass_mode(
 +        tcx,
 +        tcx.layout_of(ParamEnv::reveal_all().and(output)).unwrap(),
 +    ) {
 +        PassMode::NoPass => (inputs.collect(), vec![]),
 +        PassMode::ByVal(ret_ty) => (inputs.collect(), vec![AbiParam::new(ret_ty)]),
 +        PassMode::ByValPair(ret_ty_a, ret_ty_b) => (
 +            inputs.collect(),
 +            vec![AbiParam::new(ret_ty_a), AbiParam::new(ret_ty_b)],
 +        ),
 +        PassMode::ByRef { size: Some(_) } => {
 +            (
 +                Some(pointer_ty(tcx)) // First param is place to put return val
 +                    .into_iter()
 +                    .map(|ty| AbiParam::special(ty, ArgumentPurpose::StructReturn))
 +                    .chain(inputs)
 +                    .collect(),
 +                vec![],
 +            )
 +        }
 +        PassMode::ByRef { size: None } => todo!(),
 +    };
 +
 +    if requires_caller_location {
 +        params.push(AbiParam::new(pointer_ty(tcx)));
 +    }
 +
 +    Signature {
 +        params,
 +        returns,
 +        call_conv,
 +    }
 +}
 +
 +pub(crate) fn get_function_name_and_sig<'tcx>(
 +    tcx: TyCtxt<'tcx>,
 +    triple: &target_lexicon::Triple,
 +    inst: Instance<'tcx>,
 +    support_vararg: bool,
 +) -> (String, Signature) {
 +    assert!(!inst.substs.needs_infer());
 +    let fn_sig = tcx.normalize_erasing_late_bound_regions(
 +        ParamEnv::reveal_all(),
 +        &fn_sig_for_fn_abi(tcx, inst),
 +    );
 +    if fn_sig.c_variadic && !support_vararg {
 +        tcx.sess.span_fatal(
 +            tcx.def_span(inst.def_id()),
 +            "Variadic function definitions are not yet supported",
 +        );
 +    }
 +    let sig = clif_sig_from_fn_sig(
 +        tcx,
 +        triple,
 +        fn_sig,
 +        tcx.def_span(inst.def_id()),
 +        false,
 +        inst.def.requires_caller_location(tcx),
 +    );
 +    (tcx.symbol_name(inst).name.to_string(), sig)
 +}
 +
 +/// Instance must be monomorphized
 +pub(crate) fn import_function<'tcx>(
 +    tcx: TyCtxt<'tcx>,
 +    module: &mut impl Module,
 +    inst: Instance<'tcx>,
 +) -> FuncId {
 +    let (name, sig) = get_function_name_and_sig(tcx, module.isa().triple(), inst, true);
 +    module
 +        .declare_function(&name, Linkage::Import, &sig)
 +        .unwrap()
 +}
 +
 +impl<'tcx, M: Module> FunctionCx<'_, 'tcx, M> {
 +    /// Instance must be monomorphized
 +    pub(crate) fn get_function_ref(&mut self, inst: Instance<'tcx>) -> FuncRef {
 +        let func_id = import_function(self.tcx, &mut self.cx.module, inst);
 +        let func_ref = self
 +            .cx
 +            .module
 +            .declare_func_in_func(func_id, &mut self.bcx.func);
 +
 +        #[cfg(debug_assertions)]
 +        self.add_comment(func_ref, format!("{:?}", inst));
 +
 +        func_ref
 +    }
 +
 +    pub(crate) fn lib_call(
 +        &mut self,
 +        name: &str,
 +        input_tys: Vec<types::Type>,
 +        output_tys: Vec<types::Type>,
 +        args: &[Value],
 +    ) -> &[Value] {
 +        let sig = Signature {
 +            params: input_tys.iter().cloned().map(AbiParam::new).collect(),
 +            returns: output_tys.iter().cloned().map(AbiParam::new).collect(),
 +            call_conv: CallConv::triple_default(self.triple()),
 +        };
 +        let func_id = self
 +            .cx
 +            .module
 +            .declare_function(&name, Linkage::Import, &sig)
 +            .unwrap();
 +        let func_ref = self
 +            .cx
 +            .module
 +            .declare_func_in_func(func_id, &mut self.bcx.func);
 +        let call_inst = self.bcx.ins().call(func_ref, args);
 +        #[cfg(debug_assertions)]
 +        {
 +            self.add_comment(call_inst, format!("easy_call {}", name));
 +        }
 +        let results = self.bcx.inst_results(call_inst);
 +        assert!(results.len() <= 2, "{}", results.len());
 +        results
 +    }
 +
 +    pub(crate) fn easy_call(
 +        &mut self,
 +        name: &str,
 +        args: &[CValue<'tcx>],
 +        return_ty: Ty<'tcx>,
 +    ) -> CValue<'tcx> {
 +        let (input_tys, args): (Vec<_>, Vec<_>) = args
-         match arg_kind {
-             ArgKind::Normal(Some(val)) => {
-                 if let Some((addr, meta)) = val.try_to_ptr() {
-                     let local_decl = &fx.mir.local_decls[local];
-                     //                       v this ! is important
-                     let internally_mutable = !val.layout().ty.is_freeze(
-                         fx.tcx.at(local_decl.source_info.span),
-                         ParamEnv::reveal_all(),
-                     );
-                     if local_decl.mutability == mir::Mutability::Not && !internally_mutable {
-                         // We wont mutate this argument, so it is fine to borrow the backing storage
-                         // of this argument, to prevent a copy.
-                         let place = if let Some(meta) = meta {
-                             CPlace::for_ptr_with_extra(addr, meta, val.layout())
-                         } else {
-                             CPlace::for_ptr(addr, val.layout())
-                         };
-                         #[cfg(debug_assertions)]
-                         self::comments::add_local_place_comments(fx, place, local);
-                         assert_eq!(fx.local_map.push(place), local);
-                         continue;
-                     }
++            .iter()
 +            .map(|arg| {
 +                (
 +                    self.clif_type(arg.layout().ty).unwrap(),
 +                    arg.load_scalar(self),
 +                )
 +            })
 +            .unzip();
 +        let return_layout = self.layout_of(return_ty);
 +        let return_tys = if let ty::Tuple(tup) = return_ty.kind() {
 +            tup.types().map(|ty| self.clif_type(ty).unwrap()).collect()
 +        } else {
 +            vec![self.clif_type(return_ty).unwrap()]
 +        };
 +        let ret_vals = self.lib_call(name, input_tys, return_tys, &args);
 +        match *ret_vals {
 +            [] => CValue::by_ref(
 +                Pointer::const_addr(self, i64::from(self.pointer_type.bytes())),
 +                return_layout,
 +            ),
 +            [val] => CValue::by_val(val, return_layout),
 +            [val, extra] => CValue::by_val_pair(val, extra, return_layout),
 +            _ => unreachable!(),
 +        }
 +    }
 +}
 +
 +/// Make a [`CPlace`] capable of holding value of the specified type.
 +fn make_local_place<'tcx>(
 +    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
 +    local: Local,
 +    layout: TyAndLayout<'tcx>,
 +    is_ssa: bool,
 +) -> CPlace<'tcx> {
 +    let place = if is_ssa {
 +        if let rustc_target::abi::Abi::ScalarPair(_, _) = layout.abi {
 +            CPlace::new_var_pair(fx, local, layout)
 +        } else {
 +            CPlace::new_var(fx, local, layout)
 +        }
 +    } else {
 +        CPlace::new_stack_slot(fx, layout)
 +    };
 +
 +    #[cfg(debug_assertions)]
 +    self::comments::add_local_place_comments(fx, place, local);
 +
 +    place
 +}
 +
 +pub(crate) fn codegen_fn_prelude<'tcx>(
 +    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
 +    start_block: Block,
 +) {
 +    let ssa_analyzed = crate::analyze::analyze(fx);
 +
 +    #[cfg(debug_assertions)]
 +    self::comments::add_args_header_comment(fx);
 +
 +    let ret_place = self::returning::codegen_return_param(fx, &ssa_analyzed, start_block);
 +    assert_eq!(fx.local_map.push(ret_place), RETURN_PLACE);
 +
 +    // None means pass_mode == NoPass
 +    enum ArgKind<'tcx> {
 +        Normal(Option<CValue<'tcx>>),
 +        Spread(Vec<Option<CValue<'tcx>>>),
 +    }
 +
 +    let func_params = fx
 +        .mir
 +        .args_iter()
 +        .map(|local| {
 +            let arg_ty = fx.monomorphize(&fx.mir.local_decls[local].ty);
 +
 +            // Adapted from https://github.com/rust-lang/rust/blob/145155dc96757002c7b2e9de8489416e2fdbbd57/src/librustc_codegen_llvm/mir/mod.rs#L442-L482
 +            if Some(local) == fx.mir.spread_arg {
 +                // This argument (e.g. the last argument in the "rust-call" ABI)
 +                // is a tuple that was spread at the ABI level and now we have
 +                // to reconstruct it into a tuple local variable, from multiple
 +                // individual function arguments.
 +
 +                let tupled_arg_tys = match arg_ty.kind() {
 +                    ty::Tuple(ref tys) => tys,
 +                    _ => bug!("spread argument isn't a tuple?! but {:?}", arg_ty),
 +                };
 +
 +                let mut params = Vec::new();
 +                for (i, arg_ty) in tupled_arg_tys.types().enumerate() {
 +                    let param = cvalue_for_param(fx, start_block, Some(local), Some(i), arg_ty);
 +                    params.push(param);
 +                }
 +
 +                (local, ArgKind::Spread(params), arg_ty)
 +            } else {
 +                let param = cvalue_for_param(fx, start_block, Some(local), None, arg_ty);
 +                (local, ArgKind::Normal(param), arg_ty)
 +            }
 +        })
 +        .collect::<Vec<(Local, ArgKind<'tcx>, Ty<'tcx>)>>();
 +
 +    assert!(fx.caller_location.is_none());
 +    if fx.instance.def.requires_caller_location(fx.tcx) {
 +        // Store caller location for `#[track_caller]`.
 +        fx.caller_location = Some(
 +            cvalue_for_param(fx, start_block, None, None, fx.tcx.caller_location_ty()).unwrap(),
 +        );
 +    }
 +
 +    fx.bcx.switch_to_block(start_block);
 +    fx.bcx.ins().nop();
 +
 +    #[cfg(debug_assertions)]
 +    self::comments::add_locals_header_comment(fx);
 +
 +    for (local, arg_kind, ty) in func_params {
 +        let layout = fx.layout_of(ty);
 +
 +        let is_ssa = ssa_analyzed[local] == crate::analyze::SsaKind::Ssa;
 +
 +        // While this is normally an optimization to prevent an unnecessary copy when an argument is
 +        // not mutated by the current function, this is necessary to support unsized arguments.
-             _ => {}
++        if let ArgKind::Normal(Some(val)) = arg_kind {
++            if let Some((addr, meta)) = val.try_to_ptr() {
++                let local_decl = &fx.mir.local_decls[local];
++                //                       v this ! is important
++                let internally_mutable = !val.layout().ty.is_freeze(
++                    fx.tcx.at(local_decl.source_info.span),
++                    ParamEnv::reveal_all(),
++                );
++                if local_decl.mutability == mir::Mutability::Not && !internally_mutable {
++                    // We wont mutate this argument, so it is fine to borrow the backing storage
++                    // of this argument, to prevent a copy.
++
++                    let place = if let Some(meta) = meta {
++                        CPlace::for_ptr_with_extra(addr, meta, val.layout())
++                    } else {
++                        CPlace::for_ptr(addr, val.layout())
++                    };
++
++                    #[cfg(debug_assertions)]
++                    self::comments::add_local_place_comments(fx, place, local);
++
++                    assert_eq!(fx.local_map.push(place), local);
++                    continue;
 +                }
 +            }
-     let destination = destination.map(|(place, bb)| (trans_place(fx, place), bb));
 +        }
 +
 +        let place = make_local_place(fx, local, layout, is_ssa);
 +        assert_eq!(fx.local_map.push(place), local);
 +
 +        match arg_kind {
 +            ArgKind::Normal(param) => {
 +                if let Some(param) = param {
 +                    place.write_cvalue(fx, param);
 +                }
 +            }
 +            ArgKind::Spread(params) => {
 +                for (i, param) in params.into_iter().enumerate() {
 +                    if let Some(param) = param {
 +                        place
 +                            .place_field(fx, mir::Field::new(i))
 +                            .write_cvalue(fx, param);
 +                    }
 +                }
 +            }
 +        }
 +    }
 +
 +    for local in fx.mir.vars_and_temps_iter() {
 +        let ty = fx.monomorphize(&fx.mir.local_decls[local].ty);
 +        let layout = fx.layout_of(ty);
 +
 +        let is_ssa = ssa_analyzed[local] == crate::analyze::SsaKind::Ssa;
 +
 +        let place = make_local_place(fx, local, layout, is_ssa);
 +        assert_eq!(fx.local_map.push(place), local);
 +    }
 +
 +    fx.bcx
 +        .ins()
 +        .jump(*fx.block_map.get(START_BLOCK).unwrap(), &[]);
 +}
 +
 +pub(crate) fn codegen_terminator_call<'tcx>(
 +    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
 +    span: Span,
 +    current_block: Block,
 +    func: &Operand<'tcx>,
 +    args: &[Operand<'tcx>],
 +    destination: Option<(Place<'tcx>, BasicBlock)>,
 +) {
 +    let fn_ty = fx.monomorphize(&func.ty(fx.mir, fx.tcx));
 +    let fn_sig = fx
 +        .tcx
 +        .normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), &fn_ty.fn_sig(fx.tcx));
 +
-         let self_arg = trans_operand(fx, &args[0]);
-         let pack_arg = trans_operand(fx, &args[1]);
++    let destination = destination.map(|(place, bb)| (codegen_place(fx, place), bb));
 +
 +    // Handle special calls like instrinsics and empty drop glue.
 +    let instance = if let ty::FnDef(def_id, substs) = *fn_ty.kind() {
 +        let instance = ty::Instance::resolve(fx.tcx, ty::ParamEnv::reveal_all(), def_id, substs)
 +            .unwrap()
 +            .unwrap()
 +            .polymorphize(fx.tcx);
 +
 +        if fx.tcx.symbol_name(instance).name.starts_with("llvm.") {
 +            crate::intrinsics::codegen_llvm_intrinsic_call(
 +                fx,
 +                &fx.tcx.symbol_name(instance).name,
 +                substs,
 +                args,
 +                destination,
 +            );
 +            return;
 +        }
 +
 +        match instance.def {
 +            InstanceDef::Intrinsic(_) => {
 +                crate::intrinsics::codegen_intrinsic_call(fx, instance, args, destination, span);
 +                return;
 +            }
 +            InstanceDef::DropGlue(_, None) => {
 +                // empty drop glue - a nop.
 +                let (_, dest) = destination.expect("Non terminating drop_in_place_real???");
 +                let ret_block = fx.get_block(dest);
 +                fx.bcx.ins().jump(ret_block, &[]);
 +                return;
 +            }
 +            _ => Some(instance),
 +        }
 +    } else {
 +        None
 +    };
 +
 +    let is_cold = instance
 +        .map(|inst| {
 +            fx.tcx
 +                .codegen_fn_attrs(inst.def_id())
 +                .flags
 +                .contains(CodegenFnAttrFlags::COLD)
 +        })
 +        .unwrap_or(false);
 +    if is_cold {
 +        fx.cold_blocks.insert(current_block);
 +    }
 +
 +    // Unpack arguments tuple for closures
 +    let args = if fn_sig.abi == Abi::RustCall {
 +        assert_eq!(args.len(), 2, "rust-call abi requires two arguments");
-         args.into_iter()
-             .map(|arg| trans_operand(fx, arg))
++        let self_arg = codegen_operand(fx, &args[0]);
++        let pack_arg = codegen_operand(fx, &args[1]);
 +
 +        let tupled_arguments = match pack_arg.layout().ty.kind() {
 +            ty::Tuple(ref tupled_arguments) => tupled_arguments,
 +            _ => bug!("argument to function with \"rust-call\" ABI is not a tuple"),
 +        };
 +
 +        let mut args = Vec::with_capacity(1 + tupled_arguments.len());
 +        args.push(self_arg);
 +        for i in 0..tupled_arguments.len() {
 +            args.push(pack_arg.value_field(fx, mir::Field::new(i)));
 +        }
 +        args
 +    } else {
-             let func = trans_operand(fx, func).load_scalar(fx);
++        args.iter()
++            .map(|arg| codegen_operand(fx, arg))
 +            .collect::<Vec<_>>()
 +    };
 +
 +    //   | indirect call target
 +    //   |         | the first argument to be passed
 +    //   v         v          v virtual calls are special cased below
 +    let (func_ref, first_arg, is_virtual_call) = match instance {
 +        // Trait object call
 +        Some(Instance {
 +            def: InstanceDef::Virtual(_, idx),
 +            ..
 +        }) => {
 +            #[cfg(debug_assertions)]
 +            {
 +                let nop_inst = fx.bcx.ins().nop();
 +                fx.add_comment(
 +                    nop_inst,
 +                    format!(
 +                        "virtual call; self arg pass mode: {:?}",
 +                        get_pass_mode(fx.tcx, args[0].layout())
 +                    ),
 +                );
 +            }
 +            let (ptr, method) = crate::vtable::get_ptr_and_method_ref(fx, args[0], idx);
 +            (Some(method), Single(ptr), true)
 +        }
 +
 +        // Normal call
 +        Some(_) => (
 +            None,
 +            args.get(0)
 +                .map(|arg| adjust_arg_for_abi(fx, *arg))
 +                .unwrap_or(Empty),
 +            false,
 +        ),
 +
 +        // Indirect call
 +        None => {
 +            #[cfg(debug_assertions)]
 +            {
 +                let nop_inst = fx.bcx.ins().nop();
 +                fx.add_comment(nop_inst, "indirect call");
 +            }
++            let func = codegen_operand(fx, func).load_scalar(fx);
 +            (
 +                Some(func),
 +                args.get(0)
 +                    .map(|arg| adjust_arg_for_abi(fx, *arg))
 +                    .unwrap_or(Empty),
 +                false,
 +            )
 +        }
 +    };
 +
 +    let ret_place = destination.map(|(place, _)| place);
 +    let (call_inst, call_args) =
 +        self::returning::codegen_with_call_return_arg(fx, fn_sig, ret_place, |fx, return_ptr| {
 +            let mut call_args: Vec<Value> = return_ptr
 +                .into_iter()
 +                .chain(first_arg.into_iter())
 +                .chain(
 +                    args.into_iter()
 +                        .skip(1)
 +                        .map(|arg| adjust_arg_for_abi(fx, arg).into_iter())
 +                        .flatten(),
 +                )
 +                .collect::<Vec<_>>();
 +
 +            if instance
 +                .map(|inst| inst.def.requires_caller_location(fx.tcx))
 +                .unwrap_or(false)
 +            {
 +                // Pass the caller location for `#[track_caller]`.
 +                let caller_location = fx.get_caller_location(span);
 +                call_args.extend(adjust_arg_for_abi(fx, caller_location).into_iter());
 +            }
 +
 +            let call_inst = if let Some(func_ref) = func_ref {
 +                let sig = clif_sig_from_fn_sig(
 +                    fx.tcx,
 +                    fx.triple(),
 +                    fn_sig,
 +                    span,
 +                    is_virtual_call,
 +                    false, // calls through function pointers never pass the caller location
 +                );
 +                let sig = fx.bcx.import_signature(sig);
 +                fx.bcx.ins().call_indirect(sig, func_ref, &call_args)
 +            } else {
 +                let func_ref =
 +                    fx.get_function_ref(instance.expect("non-indirect call on non-FnDef type"));
 +                fx.bcx.ins().call(func_ref, &call_args)
 +            };
 +
 +            (call_inst, call_args)
 +        });
 +
 +    // FIXME find a cleaner way to support varargs
 +    if fn_sig.c_variadic {
 +        if fn_sig.abi != Abi::C {
 +            fx.tcx.sess.span_fatal(
 +                span,
 +                &format!("Variadic call for non-C abi {:?}", fn_sig.abi),
 +            );
 +        }
 +        let sig_ref = fx.bcx.func.dfg.call_signature(call_inst).unwrap();
 +        let abi_params = call_args
 +            .into_iter()
 +            .map(|arg| {
 +                let ty = fx.bcx.func.dfg.value_type(arg);
 +                if !ty.is_int() {
 +                    // FIXME set %al to upperbound on float args once floats are supported
 +                    fx.tcx
 +                        .sess
 +                        .span_fatal(span, &format!("Non int ty {:?} for variadic call", ty));
 +                }
 +                AbiParam::new(ty)
 +            })
 +            .collect::<Vec<AbiParam>>();
 +        fx.bcx.func.dfg.signatures[sig_ref].params = abi_params;
 +    }
 +
 +    if let Some((_, dest)) = destination {
 +        let ret_block = fx.get_block(dest);
 +        fx.bcx.ins().jump(ret_block, &[]);
 +    } else {
 +        trap_unreachable(fx, "[corruption] Diverging function returned");
 +    }
 +}
 +
 +pub(crate) fn codegen_drop<'tcx>(
 +    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
 +    span: Span,
 +    drop_place: CPlace<'tcx>,
 +) {
 +    let ty = drop_place.layout().ty;
 +    let drop_fn = Instance::resolve_drop_in_place(fx.tcx, ty).polymorphize(fx.tcx);
 +
 +    if let ty::InstanceDef::DropGlue(_, None) = drop_fn.def {
 +        // we don't actually need to drop anything
 +    } else {
 +        let drop_fn_ty = drop_fn.ty(fx.tcx, ParamEnv::reveal_all());
 +        let fn_sig = fx.tcx.normalize_erasing_late_bound_regions(
 +            ParamEnv::reveal_all(),
 +            &drop_fn_ty.fn_sig(fx.tcx),
 +        );
 +        assert_eq!(fn_sig.output(), fx.tcx.mk_unit());
 +
 +        match ty.kind() {
 +            ty::Dynamic(..) => {
 +                let (ptr, vtable) = drop_place.to_ptr_maybe_unsized();
 +                let ptr = ptr.get_addr(fx);
 +                let drop_fn = crate::vtable::drop_fn_of_obj(fx, vtable.unwrap());
 +
 +                let sig = clif_sig_from_fn_sig(
 +                    fx.tcx,
 +                    fx.triple(),
 +                    fn_sig,
 +                    span,
 +                    true,
 +                    false, // `drop_in_place` is never `#[track_caller]`
 +                );
 +                let sig = fx.bcx.import_signature(sig);
 +                fx.bcx.ins().call_indirect(sig, drop_fn, &[ptr]);
 +            }
 +            _ => {
 +                assert!(!matches!(drop_fn.def, InstanceDef::Virtual(_, _)));
 +
 +                let arg_value = drop_place.place_ref(
 +                    fx,
 +                    fx.layout_of(fx.tcx.mk_ref(
 +                        &ty::RegionKind::ReErased,
 +                        TypeAndMut {
 +                            ty,
 +                            mutbl: crate::rustc_hir::Mutability::Mut,
 +                        },
 +                    )),
 +                );
 +                let arg_value = adjust_arg_for_abi(fx, arg_value);
 +
 +                let mut call_args: Vec<Value> = arg_value.into_iter().collect::<Vec<_>>();
 +
 +                if drop_fn.def.requires_caller_location(fx.tcx) {
 +                    // Pass the caller location for `#[track_caller]`.
 +                    let caller_location = fx.get_caller_location(span);
 +                    call_args.extend(adjust_arg_for_abi(fx, caller_location).into_iter());
 +                }
 +
 +                let func_ref = fx.get_function_ref(drop_fn);
 +                fx.bcx.ins().call(func_ref, &call_args);
 +            }
 +        }
 +    }
 +}
index 0735ad6f83299a1a6eb2721455351475d3b03820,0000000000000000000000000000000000000000..6c5916550ff639f52a99c14bd8ce34c0321f4da0
mode 100644,000000..100644
--- /dev/null
@@@ -1,153 -1,0 +1,153 @@@
-     ctx.func = Function::with_name_signature(ExternalName::user(0, 0), sig.clone());
 +//! Allocator shim
 +// Adapted from rustc
 +
 +use crate::prelude::*;
 +
 +use rustc_ast::expand::allocator::{AllocatorKind, AllocatorTy, ALLOCATOR_METHODS};
 +use rustc_span::symbol::sym;
 +
 +/// Returns whether an allocator shim was created
 +pub(crate) fn codegen(
 +    tcx: TyCtxt<'_>,
 +    module: &mut impl Module,
 +    unwind_context: &mut UnwindContext<'_>,
 +) -> bool {
 +    let any_dynamic_crate = tcx.dependency_formats(LOCAL_CRATE).iter().any(|(_, list)| {
 +        use rustc_middle::middle::dependency_format::Linkage;
 +        list.iter().any(|&linkage| linkage == Linkage::Dynamic)
 +    });
 +    if any_dynamic_crate {
 +        false
 +    } else if let Some(kind) = tcx.allocator_kind() {
 +        codegen_inner(module, unwind_context, kind);
 +        true
 +    } else {
 +        false
 +    }
 +}
 +
 +fn codegen_inner(
 +    module: &mut impl Module,
 +    unwind_context: &mut UnwindContext<'_>,
 +    kind: AllocatorKind,
 +) {
 +    let usize_ty = module.target_config().pointer_type();
 +
 +    for method in ALLOCATOR_METHODS {
 +        let mut arg_tys = Vec::with_capacity(method.inputs.len());
 +        for ty in method.inputs.iter() {
 +            match *ty {
 +                AllocatorTy::Layout => {
 +                    arg_tys.push(usize_ty); // size
 +                    arg_tys.push(usize_ty); // align
 +                }
 +                AllocatorTy::Ptr => arg_tys.push(usize_ty),
 +                AllocatorTy::Usize => arg_tys.push(usize_ty),
 +
 +                AllocatorTy::ResultPtr | AllocatorTy::Unit => panic!("invalid allocator arg"),
 +            }
 +        }
 +        let output = match method.output {
 +            AllocatorTy::ResultPtr => Some(usize_ty),
 +            AllocatorTy::Unit => None,
 +
 +            AllocatorTy::Layout | AllocatorTy::Usize | AllocatorTy::Ptr => {
 +                panic!("invalid allocator output")
 +            }
 +        };
 +
 +        let sig = Signature {
 +            call_conv: CallConv::triple_default(module.isa().triple()),
 +            params: arg_tys.iter().cloned().map(AbiParam::new).collect(),
 +            returns: output.into_iter().map(AbiParam::new).collect(),
 +        };
 +
 +        let caller_name = format!("__rust_{}", method.name);
 +        let callee_name = kind.fn_name(method.name);
 +        //eprintln!("Codegen allocator shim {} -> {} ({:?} -> {:?})", caller_name, callee_name, sig.params, sig.returns);
 +
 +        let func_id = module
 +            .declare_function(&caller_name, Linkage::Export, &sig)
 +            .unwrap();
 +
 +        let callee_func_id = module
 +            .declare_function(&callee_name, Linkage::Import, &sig)
 +            .unwrap();
 +
 +        let mut ctx = Context::new();
 +        ctx.func = Function::with_name_signature(ExternalName::user(0, 0), sig.clone());
 +        {
 +            let mut func_ctx = FunctionBuilderContext::new();
 +            let mut bcx = FunctionBuilder::new(&mut ctx.func, &mut func_ctx);
 +
 +            let block = bcx.create_block();
 +            bcx.switch_to_block(block);
 +            let args = arg_tys
 +                .into_iter()
 +                .map(|ty| bcx.append_block_param(block, ty))
 +                .collect::<Vec<Value>>();
 +
 +            let callee_func_ref = module.declare_func_in_func(callee_func_id, &mut bcx.func);
 +            let call_inst = bcx.ins().call(callee_func_ref, &args);
 +            let results = bcx.inst_results(call_inst).to_vec(); // Clone to prevent borrow error
 +
 +            bcx.ins().return_(&results);
 +            bcx.seal_all_blocks();
 +            bcx.finalize();
 +        }
 +        module
 +            .define_function(
 +                func_id,
 +                &mut ctx,
 +                &mut cranelift_codegen::binemit::NullTrapSink {},
 +            )
 +            .unwrap();
 +        unwind_context.add_function(func_id, &ctx, module.isa());
 +    }
 +
 +    let sig = Signature {
 +        call_conv: CallConv::triple_default(module.isa().triple()),
 +        params: vec![AbiParam::new(usize_ty), AbiParam::new(usize_ty)],
 +        returns: vec![],
 +    };
 +
 +    let callee_name = kind.fn_name(sym::oom);
 +    //eprintln!("Codegen allocator shim {} -> {} ({:?} -> {:?})", caller_name, callee_name, sig.params, sig.returns);
 +
 +    let func_id = module
 +        .declare_function("__rust_alloc_error_handler", Linkage::Export, &sig)
 +        .unwrap();
 +
 +    let callee_func_id = module
 +        .declare_function(&callee_name, Linkage::Import, &sig)
 +        .unwrap();
 +
 +    let mut ctx = Context::new();
-             .into_iter()
++    ctx.func = Function::with_name_signature(ExternalName::user(0, 0), sig);
 +    {
 +        let mut func_ctx = FunctionBuilderContext::new();
 +        let mut bcx = FunctionBuilder::new(&mut ctx.func, &mut func_ctx);
 +
 +        let block = bcx.create_block();
 +        bcx.switch_to_block(block);
 +        let args = (&[usize_ty, usize_ty])
++            .iter()
 +            .map(|&ty| bcx.append_block_param(block, ty))
 +            .collect::<Vec<Value>>();
 +
 +        let callee_func_ref = module.declare_func_in_func(callee_func_id, &mut bcx.func);
 +        bcx.ins().call(callee_func_ref, &args);
 +
 +        bcx.ins().trap(TrapCode::UnreachableCodeReached);
 +        bcx.seal_all_blocks();
 +        bcx.finalize();
 +    }
 +    module
 +        .define_function(
 +            func_id,
 +            &mut ctx,
 +            &mut cranelift_codegen::binemit::NullTrapSink {},
 +        )
 +        .unwrap();
 +    unwind_context.add_function(func_id, &ctx, module.isa());
 +}
index 6382f8df3446b149218d0bef1f40f26614550ad4,0000000000000000000000000000000000000000..9a970efbcfd0b6c653e936a7de28aed4426d129a
mode 100644,000000..100644
--- /dev/null
@@@ -1,309 -1,0 +1,309 @@@
-             return false;
 +//! Creation of ar archives like for the lib and staticlib crate type
 +
 +use std::collections::BTreeMap;
 +use std::fs::File;
 +use std::path::{Path, PathBuf};
 +
 +use rustc_codegen_ssa::back::archive::{find_library, ArchiveBuilder};
 +use rustc_codegen_ssa::METADATA_FILENAME;
 +use rustc_session::Session;
 +
 +use object::{Object, SymbolKind};
 +
 +#[derive(Debug)]
 +enum ArchiveEntry {
 +    FromArchive {
 +        archive_index: usize,
 +        entry_index: usize,
 +    },
 +    File(PathBuf),
 +}
 +
 +pub(crate) struct ArArchiveBuilder<'a> {
 +    sess: &'a Session,
 +    dst: PathBuf,
 +    lib_search_paths: Vec<PathBuf>,
 +    use_gnu_style_archive: bool,
 +    no_builtin_ranlib: bool,
 +
 +    src_archives: Vec<(PathBuf, ar::Archive<File>)>,
 +    // Don't use `HashMap` here, as the order is important. `rust.metadata.bin` must always be at
 +    // the end of an archive for linkers to not get confused.
 +    entries: Vec<(String, ArchiveEntry)>,
 +    update_symbols: bool,
 +}
 +
 +impl<'a> ArchiveBuilder<'a> for ArArchiveBuilder<'a> {
 +    fn new(sess: &'a Session, output: &Path, input: Option<&Path>) -> Self {
 +        use rustc_codegen_ssa::back::link::archive_search_paths;
 +
 +        let (src_archives, entries) = if let Some(input) = input {
 +            let mut archive = ar::Archive::new(File::open(input).unwrap());
 +            let mut entries = Vec::new();
 +
 +            let mut i = 0;
 +            while let Some(entry) = archive.next_entry() {
 +                let entry = entry.unwrap();
 +                entries.push((
 +                    String::from_utf8(entry.header().identifier().to_vec()).unwrap(),
 +                    ArchiveEntry::FromArchive {
 +                        archive_index: 0,
 +                        entry_index: i,
 +                    },
 +                ));
 +                i += 1;
 +            }
 +
 +            (vec![(input.to_owned(), archive)], entries)
 +        } else {
 +            (vec![], Vec::new())
 +        };
 +
 +        ArArchiveBuilder {
 +            sess,
 +            dst: output.to_path_buf(),
 +            lib_search_paths: archive_search_paths(sess),
 +            use_gnu_style_archive: sess.target.options.archive_format == "gnu",
 +            // FIXME fix builtin ranlib on macOS
 +            no_builtin_ranlib: sess.target.options.is_like_osx,
 +
 +            src_archives,
 +            entries,
 +            update_symbols: false,
 +        }
 +    }
 +
 +    fn src_files(&mut self) -> Vec<String> {
 +        self.entries.iter().map(|(name, _)| name.clone()).collect()
 +    }
 +
 +    fn remove_file(&mut self, name: &str) {
 +        let index = self
 +            .entries
 +            .iter()
 +            .position(|(entry_name, _)| entry_name == name)
 +            .expect("Tried to remove file not existing in src archive");
 +        self.entries.remove(index);
 +    }
 +
 +    fn add_file(&mut self, file: &Path) {
 +        self.entries.push((
 +            file.file_name().unwrap().to_str().unwrap().to_string(),
 +            ArchiveEntry::File(file.to_owned()),
 +        ));
 +    }
 +
 +    fn add_native_library(&mut self, name: rustc_span::symbol::Symbol) {
 +        let location = find_library(name, &self.lib_search_paths, self.sess);
 +        self.add_archive(location.clone(), |_| false)
 +            .unwrap_or_else(|e| {
 +                panic!(
 +                    "failed to add native library {}: {}",
 +                    location.to_string_lossy(),
 +                    e
 +                );
 +            });
 +    }
 +
 +    fn add_rlib(
 +        &mut self,
 +        rlib: &Path,
 +        name: &str,
 +        lto: bool,
 +        skip_objects: bool,
 +    ) -> std::io::Result<()> {
 +        let obj_start = name.to_owned();
 +
 +        self.add_archive(rlib.to_owned(), move |fname: &str| {
 +            // Ignore metadata files, no matter the name.
 +            if fname == METADATA_FILENAME {
 +                return true;
 +            }
 +
 +            // Don't include Rust objects if LTO is enabled
 +            if lto && fname.starts_with(&obj_start) && fname.ends_with(".o") {
 +                return true;
 +            }
 +
 +            // Otherwise if this is *not* a rust object and we're skipping
 +            // objects then skip this file
 +            if skip_objects && (!fname.starts_with(&obj_start) || !fname.ends_with(".o")) {
 +                return true;
 +            }
 +
 +            // ok, don't skip this
++            false
 +        })
 +    }
 +
 +    fn update_symbols(&mut self) {
 +        self.update_symbols = true;
 +    }
 +
 +    fn build(mut self) {
 +        enum BuilderKind {
 +            Bsd(ar::Builder<File>),
 +            Gnu(ar::GnuBuilder<File>),
 +        }
 +
 +        let sess = self.sess;
 +
 +        let mut symbol_table = BTreeMap::new();
 +
 +        let mut entries = Vec::new();
 +
 +        for (entry_name, entry) in self.entries {
 +            // FIXME only read the symbol table of the object files to avoid having to keep all
 +            // object files in memory at once, or read them twice.
 +            let data = match entry {
 +                ArchiveEntry::FromArchive {
 +                    archive_index,
 +                    entry_index,
 +                } => {
 +                    // FIXME read symbols from symtab
 +                    use std::io::Read;
 +                    let (ref _src_archive_path, ref mut src_archive) =
 +                        self.src_archives[archive_index];
 +                    let mut entry = src_archive.jump_to_entry(entry_index).unwrap();
 +                    let mut data = Vec::new();
 +                    entry.read_to_end(&mut data).unwrap();
 +                    data
 +                }
 +                ArchiveEntry::File(file) => std::fs::read(file).unwrap_or_else(|err| {
 +                    sess.fatal(&format!(
 +                        "error while reading object file during archive building: {}",
 +                        err
 +                    ));
 +                }),
 +            };
 +
 +            if !self.no_builtin_ranlib {
 +                match object::File::parse(&data) {
 +                    Ok(object) => {
 +                        symbol_table.insert(
 +                            entry_name.as_bytes().to_vec(),
 +                            object
 +                                .symbols()
 +                                .filter_map(|(_index, symbol)| {
 +                                    if symbol.is_undefined()
 +                                        || symbol.is_local()
 +                                        || symbol.kind() != SymbolKind::Data
 +                                            && symbol.kind() != SymbolKind::Text
 +                                            && symbol.kind() != SymbolKind::Tls
 +                                    {
 +                                        None
 +                                    } else {
 +                                        symbol.name().map(|name| name.as_bytes().to_vec())
 +                                    }
 +                                })
 +                                .collect::<Vec<_>>(),
 +                        );
 +                    }
 +                    Err(err) => {
 +                        let err = err.to_string();
 +                        if err == "Unknown file magic" {
 +                            // Not an object file; skip it.
 +                        } else {
 +                            sess.fatal(&format!(
 +                                "error parsing `{}` during archive creation: {}",
 +                                entry_name, err
 +                            ));
 +                        }
 +                    }
 +                }
 +            }
 +
 +            entries.push((entry_name, data));
 +        }
 +
 +        let mut builder = if self.use_gnu_style_archive {
 +            BuilderKind::Gnu(
 +                ar::GnuBuilder::new(
 +                    File::create(&self.dst).unwrap_or_else(|err| {
 +                        sess.fatal(&format!(
 +                            "error opening destination during archive building: {}",
 +                            err
 +                        ));
 +                    }),
 +                    entries
 +                        .iter()
 +                        .map(|(name, _)| name.as_bytes().to_vec())
 +                        .collect(),
 +                    ar::GnuSymbolTableFormat::Size32,
 +                    symbol_table,
 +                )
 +                .unwrap(),
 +            )
 +        } else {
 +            BuilderKind::Bsd(
 +                ar::Builder::new(
 +                    File::create(&self.dst).unwrap_or_else(|err| {
 +                        sess.fatal(&format!(
 +                            "error opening destination during archive building: {}",
 +                            err
 +                        ));
 +                    }),
 +                    symbol_table,
 +                )
 +                .unwrap(),
 +            )
 +        };
 +
 +        // Add all files
 +        for (entry_name, data) in entries.into_iter() {
 +            let header = ar::Header::new(entry_name.into_bytes(), data.len() as u64);
 +            match builder {
 +                BuilderKind::Bsd(ref mut builder) => builder.append(&header, &mut &*data).unwrap(),
 +                BuilderKind::Gnu(ref mut builder) => builder.append(&header, &mut &*data).unwrap(),
 +            }
 +        }
 +
 +        // Finalize archive
 +        std::mem::drop(builder);
 +
 +        if self.no_builtin_ranlib {
 +            let ranlib = crate::toolchain::get_toolchain_binary(self.sess, "ranlib");
 +
 +            // Run ranlib to be able to link the archive
 +            let status = std::process::Command::new(ranlib)
 +                .arg(self.dst)
 +                .status()
 +                .expect("Couldn't run ranlib");
 +
 +            if !status.success() {
 +                self.sess
 +                    .fatal(&format!("Ranlib exited with code {:?}", status.code()));
 +            }
 +        }
 +    }
 +}
 +
 +impl<'a> ArArchiveBuilder<'a> {
 +    fn add_archive<F>(&mut self, archive_path: PathBuf, mut skip: F) -> std::io::Result<()>
 +    where
 +        F: FnMut(&str) -> bool + 'static,
 +    {
 +        let mut archive = ar::Archive::new(std::fs::File::open(&archive_path)?);
 +        let archive_index = self.src_archives.len();
 +
 +        let mut i = 0;
 +        while let Some(entry) = archive.next_entry() {
 +            let entry = entry?;
 +            let file_name = String::from_utf8(entry.header().identifier().to_vec())
 +                .map_err(|err| std::io::Error::new(std::io::ErrorKind::InvalidData, err))?;
 +            if !skip(&file_name) {
 +                self.entries.push((
 +                    file_name,
 +                    ArchiveEntry::FromArchive {
 +                        archive_index,
 +                        entry_index: i,
 +                    },
 +                ));
 +            }
 +            i += 1;
 +        }
 +
 +        self.src_archives.push((archive_path, archive));
 +        Ok(())
 +    }
 +}
index 92281fdacc941ea719593352fec253f87217ecb5,0000000000000000000000000000000000000000..2f0157c257b98cb74ad02b18ab1e02b2ed5d629a
mode 100644,000000..100644
--- /dev/null
@@@ -1,186 -1,0 +1,186 @@@
- pub static mut __cg_clif_global_atomic_mutex: libc::pthread_mutex_t =
 +//! Atomic intrinsics are implemented using a global lock for now, as Cranelift doesn't support
 +//! atomic operations yet.
 +
 +// FIXME implement atomic instructions in Cranelift.
 +
 +use crate::prelude::*;
 +
 +#[cfg(all(feature = "jit", unix))]
 +#[no_mangle]
++static mut __cg_clif_global_atomic_mutex: libc::pthread_mutex_t =
 +    libc::PTHREAD_MUTEX_INITIALIZER;
 +
 +pub(crate) fn init_global_lock(
 +    module: &mut impl Module,
 +    bcx: &mut FunctionBuilder<'_>,
 +    use_jit: bool,
 +) {
 +    if use_jit {
 +        // When using JIT, dylibs won't find the __cg_clif_global_atomic_mutex data object defined here,
 +        // so instead we define it in the cg_clif dylib.
 +
 +        return;
 +    }
 +
 +    let mut data_ctx = DataContext::new();
 +    data_ctx.define_zeroinit(1024); // 1024 bytes should be big enough on all platforms.
 +    data_ctx.set_align(16);
 +    let atomic_mutex = module
 +        .declare_data(
 +            "__cg_clif_global_atomic_mutex",
 +            Linkage::Export,
 +            true,
 +            false,
 +        )
 +        .unwrap();
 +    module.define_data(atomic_mutex, &data_ctx).unwrap();
 +
 +    let pthread_mutex_init = module
 +        .declare_function(
 +            "pthread_mutex_init",
 +            Linkage::Import,
 +            &cranelift_codegen::ir::Signature {
 +                call_conv: module.target_config().default_call_conv,
 +                params: vec![
 +                    AbiParam::new(
 +                        module.target_config().pointer_type(), /* *mut pthread_mutex_t */
 +                    ),
 +                    AbiParam::new(
 +                        module.target_config().pointer_type(), /* *const pthread_mutex_attr_t */
 +                    ),
 +                ],
 +                returns: vec![AbiParam::new(types::I32 /* c_int */)],
 +            },
 +        )
 +        .unwrap();
 +
 +    let pthread_mutex_init = module.declare_func_in_func(pthread_mutex_init, bcx.func);
 +
 +    let atomic_mutex = module.declare_data_in_func(atomic_mutex, bcx.func);
 +    let atomic_mutex = bcx
 +        .ins()
 +        .global_value(module.target_config().pointer_type(), atomic_mutex);
 +
 +    let nullptr = bcx.ins().iconst(module.target_config().pointer_type(), 0);
 +
 +    bcx.ins().call(pthread_mutex_init, &[atomic_mutex, nullptr]);
 +}
 +
 +pub(crate) fn init_global_lock_constructor(
 +    module: &mut impl Module,
 +    constructor_name: &str,
 +) -> FuncId {
 +    let sig = Signature::new(CallConv::SystemV);
 +    let init_func_id = module
 +        .declare_function(constructor_name, Linkage::Export, &sig)
 +        .unwrap();
 +
 +    let mut ctx = Context::new();
 +    ctx.func = Function::with_name_signature(ExternalName::user(0, 0), sig);
 +    {
 +        let mut func_ctx = FunctionBuilderContext::new();
 +        let mut bcx = FunctionBuilder::new(&mut ctx.func, &mut func_ctx);
 +
 +        let block = bcx.create_block();
 +        bcx.switch_to_block(block);
 +
 +        crate::atomic_shim::init_global_lock(module, &mut bcx, false);
 +
 +        bcx.ins().return_(&[]);
 +        bcx.seal_all_blocks();
 +        bcx.finalize();
 +    }
 +    module
 +        .define_function(
 +            init_func_id,
 +            &mut ctx,
 +            &mut cranelift_codegen::binemit::NullTrapSink {},
 +        )
 +        .unwrap();
 +
 +    init_func_id
 +}
 +
 +pub(crate) fn lock_global_lock(fx: &mut FunctionCx<'_, '_, impl Module>) {
 +    let atomic_mutex = fx
 +        .cx
 +        .module
 +        .declare_data(
 +            "__cg_clif_global_atomic_mutex",
 +            Linkage::Import,
 +            true,
 +            false,
 +        )
 +        .unwrap();
 +
 +    let pthread_mutex_lock = fx
 +        .cx
 +        .module
 +        .declare_function(
 +            "pthread_mutex_lock",
 +            Linkage::Import,
 +            &cranelift_codegen::ir::Signature {
 +                call_conv: fx.cx.module.target_config().default_call_conv,
 +                params: vec![AbiParam::new(
 +                    fx.cx.module.target_config().pointer_type(), /* *mut pthread_mutex_t */
 +                )],
 +                returns: vec![AbiParam::new(types::I32 /* c_int */)],
 +            },
 +        )
 +        .unwrap();
 +
 +    let pthread_mutex_lock = fx
 +        .cx
 +        .module
 +        .declare_func_in_func(pthread_mutex_lock, fx.bcx.func);
 +
 +    let atomic_mutex = fx.cx.module.declare_data_in_func(atomic_mutex, fx.bcx.func);
 +    let atomic_mutex = fx
 +        .bcx
 +        .ins()
 +        .global_value(fx.cx.module.target_config().pointer_type(), atomic_mutex);
 +
 +    fx.bcx.ins().call(pthread_mutex_lock, &[atomic_mutex]);
 +}
 +
 +pub(crate) fn unlock_global_lock(fx: &mut FunctionCx<'_, '_, impl Module>) {
 +    let atomic_mutex = fx
 +        .cx
 +        .module
 +        .declare_data(
 +            "__cg_clif_global_atomic_mutex",
 +            Linkage::Import,
 +            true,
 +            false,
 +        )
 +        .unwrap();
 +
 +    let pthread_mutex_unlock = fx
 +        .cx
 +        .module
 +        .declare_function(
 +            "pthread_mutex_unlock",
 +            Linkage::Import,
 +            &cranelift_codegen::ir::Signature {
 +                call_conv: fx.cx.module.target_config().default_call_conv,
 +                params: vec![AbiParam::new(
 +                    fx.cx.module.target_config().pointer_type(), /* *mut pthread_mutex_t */
 +                )],
 +                returns: vec![AbiParam::new(types::I32 /* c_int */)],
 +            },
 +        )
 +        .unwrap();
 +
 +    let pthread_mutex_unlock = fx
 +        .cx
 +        .module
 +        .declare_func_in_func(pthread_mutex_unlock, fx.bcx.func);
 +
 +    let atomic_mutex = fx.cx.module.declare_data_in_func(atomic_mutex, fx.bcx.func);
 +    let atomic_mutex = fx
 +        .bcx
 +        .ins()
 +        .global_value(fx.cx.module.target_config().pointer_type(), atomic_mutex);
 +
 +    fx.bcx.ins().call(pthread_mutex_unlock, &[atomic_mutex]);
 +}
index 8b900fd0dd0c8b58b245e21aa579cf9b32f49e14,0000000000000000000000000000000000000000..9e32259716f5105508d59d8bc0edbc01a5b30f7c
mode 100644,000000..100644
--- /dev/null
@@@ -1,206 -1,0 +1,206 @@@
-             name.clone(),
 +//! Abstraction around the object writing crate
 +
 +use std::convert::{TryFrom, TryInto};
 +
 +use rustc_data_structures::fx::FxHashMap;
 +use rustc_session::Session;
 +
 +use cranelift_module::FuncId;
 +
 +use object::write::*;
 +use object::{RelocationEncoding, RelocationKind, SectionKind, SymbolFlags};
 +
 +use cranelift_object::{ObjectBuilder, ObjectModule, ObjectProduct};
 +
 +use gimli::SectionId;
 +
 +use crate::debuginfo::{DebugReloc, DebugRelocName};
 +
 +pub(crate) trait WriteMetadata {
 +    fn add_rustc_section(&mut self, symbol_name: String, data: Vec<u8>, is_like_osx: bool);
 +}
 +
 +impl WriteMetadata for object::write::Object {
 +    fn add_rustc_section(&mut self, symbol_name: String, data: Vec<u8>, _is_like_osx: bool) {
 +        let segment = self
 +            .segment_name(object::write::StandardSegment::Data)
 +            .to_vec();
 +        let section_id = self.add_section(segment, b".rustc".to_vec(), object::SectionKind::Data);
 +        let offset = self.append_section_data(section_id, &data, 1);
 +        // For MachO and probably PE this is necessary to prevent the linker from throwing away the
 +        // .rustc section. For ELF this isn't necessary, but it also doesn't harm.
 +        self.add_symbol(object::write::Symbol {
 +            name: symbol_name.into_bytes(),
 +            value: offset,
 +            size: data.len() as u64,
 +            kind: object::SymbolKind::Data,
 +            scope: object::SymbolScope::Dynamic,
 +            weak: false,
 +            section: SymbolSection::Section(section_id),
 +            flags: SymbolFlags::None,
 +        });
 +    }
 +}
 +
 +pub(crate) trait WriteDebugInfo {
 +    type SectionId: Copy;
 +
 +    fn add_debug_section(&mut self, name: SectionId, data: Vec<u8>) -> Self::SectionId;
 +    fn add_debug_reloc(
 +        &mut self,
 +        section_map: &FxHashMap<SectionId, Self::SectionId>,
 +        from: &Self::SectionId,
 +        reloc: &DebugReloc,
 +    );
 +}
 +
 +impl WriteDebugInfo for ObjectProduct {
 +    type SectionId = (object::write::SectionId, object::write::SymbolId);
 +
 +    fn add_debug_section(
 +        &mut self,
 +        id: SectionId,
 +        data: Vec<u8>,
 +    ) -> (object::write::SectionId, object::write::SymbolId) {
 +        let name = if self.object.format() == object::BinaryFormat::MachO {
 +            id.name().replace('.', "__") // machO expects __debug_info instead of .debug_info
 +        } else {
 +            id.name().to_string()
 +        }
 +        .into_bytes();
 +
 +        let segment = self.object.segment_name(StandardSegment::Debug).to_vec();
 +        // FIXME use SHT_X86_64_UNWIND for .eh_frame
 +        let section_id = self.object.add_section(
 +            segment,
-     if std::env::var("CG_CLIF_FUNCTION_SECTIONS").is_ok() {
-         builder.per_function_section(true);
-     }
-     let module = ObjectModule::new(builder);
-     module
++            name,
 +            if id == SectionId::EhFrame {
 +                SectionKind::ReadOnlyData
 +            } else {
 +                SectionKind::Debug
 +            },
 +        );
 +        self.object
 +            .section_mut(section_id)
 +            .set_data(data, if id == SectionId::EhFrame { 8 } else { 1 });
 +        let symbol_id = self.object.section_symbol(section_id);
 +        (section_id, symbol_id)
 +    }
 +
 +    fn add_debug_reloc(
 +        &mut self,
 +        section_map: &FxHashMap<SectionId, Self::SectionId>,
 +        from: &Self::SectionId,
 +        reloc: &DebugReloc,
 +    ) {
 +        let (symbol, symbol_offset) = match reloc.name {
 +            DebugRelocName::Section(id) => (section_map.get(&id).unwrap().1, 0),
 +            DebugRelocName::Symbol(id) => {
 +                let symbol_id = self.function_symbol(FuncId::from_u32(id.try_into().unwrap()));
 +                self.object
 +                    .symbol_section_and_offset(symbol_id)
 +                    .expect("Debug reloc for undef sym???")
 +            }
 +        };
 +        self.object
 +            .add_relocation(
 +                from.0,
 +                Relocation {
 +                    offset: u64::from(reloc.offset),
 +                    symbol,
 +                    kind: reloc.kind,
 +                    encoding: RelocationEncoding::Generic,
 +                    size: reloc.size * 8,
 +                    addend: i64::try_from(symbol_offset).unwrap() + reloc.addend,
 +                },
 +            )
 +            .unwrap();
 +    }
 +}
 +
 +// FIXME remove once atomic instructions are implemented in Cranelift.
 +pub(crate) trait AddConstructor {
 +    fn add_constructor(&mut self, func_id: FuncId);
 +}
 +
 +impl AddConstructor for ObjectProduct {
 +    fn add_constructor(&mut self, func_id: FuncId) {
 +        let symbol = self.function_symbol(func_id);
 +        let segment = self
 +            .object
 +            .segment_name(object::write::StandardSegment::Data);
 +        let init_array_section =
 +            self.object
 +                .add_section(segment.to_vec(), b".init_array".to_vec(), SectionKind::Data);
 +        let address_size = self
 +            .object
 +            .architecture()
 +            .address_size()
 +            .expect("address_size must be known")
 +            .bytes();
 +        self.object.append_section_data(
 +            init_array_section,
 +            &std::iter::repeat(0)
 +                .take(address_size.into())
 +                .collect::<Vec<u8>>(),
 +            8,
 +        );
 +        self.object
 +            .add_relocation(
 +                init_array_section,
 +                object::write::Relocation {
 +                    offset: 0,
 +                    size: address_size * 8,
 +                    kind: RelocationKind::Absolute,
 +                    encoding: RelocationEncoding::Generic,
 +                    symbol,
 +                    addend: 0,
 +                },
 +            )
 +            .unwrap();
 +    }
 +}
 +
 +pub(crate) fn with_object(sess: &Session, name: &str, f: impl FnOnce(&mut Object)) -> Vec<u8> {
 +    let triple = crate::build_isa(sess, true).triple().clone();
 +
 +    let binary_format = match triple.binary_format {
 +        target_lexicon::BinaryFormat::Elf => object::BinaryFormat::Elf,
 +        target_lexicon::BinaryFormat::Coff => object::BinaryFormat::Coff,
 +        target_lexicon::BinaryFormat::Macho => object::BinaryFormat::MachO,
 +        binary_format => sess.fatal(&format!("binary format {} is unsupported", binary_format)),
 +    };
 +    let architecture = match triple.architecture {
 +        target_lexicon::Architecture::X86_32(_) => object::Architecture::I386,
 +        target_lexicon::Architecture::X86_64 => object::Architecture::X86_64,
 +        target_lexicon::Architecture::Arm(_) => object::Architecture::Arm,
 +        target_lexicon::Architecture::Aarch64(_) => object::Architecture::Aarch64,
 +        architecture => sess.fatal(&format!(
 +            "target architecture {:?} is unsupported",
 +            architecture,
 +        )),
 +    };
 +    let endian = match triple.endianness().unwrap() {
 +        target_lexicon::Endianness::Little => object::Endianness::Little,
 +        target_lexicon::Endianness::Big => object::Endianness::Big,
 +    };
 +
 +    let mut metadata_object = object::write::Object::new(binary_format, architecture, endian);
 +    metadata_object.add_file_symbol(name.as_bytes().to_vec());
 +    f(&mut metadata_object);
 +    metadata_object.write().unwrap()
 +}
 +
 +pub(crate) fn make_module(sess: &Session, name: String) -> ObjectModule {
 +    let mut builder = ObjectBuilder::new(
 +        crate::build_isa(sess, true),
 +        name + ".o",
 +        cranelift_module::default_libcall_names(),
 +    )
 +    .unwrap();
++    // Unlike cg_llvm, cg_clif defaults to disabling -Zfunction-sections. For cg_llvm binary size
++    // is important, while cg_clif cares more about compilation times. Enabling -Zfunction-sections
++    // can easily double the amount of time necessary to perform linking.
++    builder.per_function_section(sess.opts.debugging_opts.function_sections.unwrap_or(false));
++    ObjectModule::new(builder)
 +}
index fa9b8853d39e9eec25bef7090c838d3dc918c8f0,0000000000000000000000000000000000000000..5474e5960f100a03f9626a8ff3e504db380adf41
mode 100644,000000..100644
--- /dev/null
@@@ -1,1020 -1,0 +1,1020 @@@
- pub(crate) fn trans_fn<'tcx>(
 +//! Codegen of a single function
 +
 +use rustc_index::vec::IndexVec;
 +use rustc_middle::ty::adjustment::PointerCast;
 +
 +use crate::prelude::*;
 +
-             trans_stmt(fx, block, stmt);
++pub(crate) fn codegen_fn<'tcx>(
 +    cx: &mut crate::CodegenCx<'tcx, impl Module>,
 +    instance: Instance<'tcx>,
 +    linkage: Linkage,
 +) {
 +    let tcx = cx.tcx;
 +
 +    let mir = tcx.instance_mir(instance.def);
 +
 +    // Declare function
 +    let (name, sig) = get_function_name_and_sig(tcx, cx.module.isa().triple(), instance, false);
 +    let func_id = cx.module.declare_function(&name, linkage, &sig).unwrap();
 +
 +    cx.cached_context.clear();
 +
 +    // Make the FunctionBuilder
 +    let mut func_ctx = FunctionBuilderContext::new();
 +    let mut func = std::mem::replace(&mut cx.cached_context.func, Function::new());
 +    func.name = ExternalName::user(0, func_id.as_u32());
 +    func.signature = sig;
 +    func.collect_debug_info();
 +
 +    let mut bcx = FunctionBuilder::new(&mut func, &mut func_ctx);
 +
 +    // Predefine blocks
 +    let start_block = bcx.create_block();
 +    let block_map: IndexVec<BasicBlock, Block> = (0..mir.basic_blocks().len())
 +        .map(|_| bcx.create_block())
 +        .collect();
 +
 +    // Make FunctionCx
 +    let pointer_type = cx.module.target_config().pointer_type();
 +    let clif_comments = crate::pretty_clif::CommentWriter::new(tcx, instance);
 +
 +    let mut fx = FunctionCx {
 +        cx,
 +        tcx,
 +        pointer_type,
 +
 +        instance,
 +        mir,
 +
 +        bcx,
 +        block_map,
 +        local_map: IndexVec::with_capacity(mir.local_decls.len()),
 +        caller_location: None, // set by `codegen_fn_prelude`
 +        cold_blocks: EntitySet::new(),
 +
 +        clif_comments,
 +        source_info_set: indexmap::IndexSet::new(),
 +        next_ssa_var: 0,
 +
 +        inline_asm_index: 0,
 +    };
 +
 +    let arg_uninhabited = fx.mir.args_iter().any(|arg| {
 +        fx.layout_of(fx.monomorphize(&fx.mir.local_decls[arg].ty))
 +            .abi
 +            .is_uninhabited()
 +    });
 +
 +    if arg_uninhabited {
 +        fx.bcx
 +            .append_block_params_for_function_params(fx.block_map[START_BLOCK]);
 +        fx.bcx.switch_to_block(fx.block_map[START_BLOCK]);
 +        crate::trap::trap_unreachable(&mut fx, "function has uninhabited argument");
 +    } else {
 +        tcx.sess.time("codegen clif ir", || {
 +            tcx.sess.time("codegen prelude", || {
 +                crate::abi::codegen_fn_prelude(&mut fx, start_block)
 +            });
 +            codegen_fn_content(&mut fx);
 +        });
 +    }
 +
 +    // Recover all necessary data from fx, before accessing func will prevent future access to it.
 +    let instance = fx.instance;
 +    let mut clif_comments = fx.clif_comments;
 +    let source_info_set = fx.source_info_set;
 +    let local_map = fx.local_map;
 +    let cold_blocks = fx.cold_blocks;
 +
 +    // Store function in context
 +    let context = &mut cx.cached_context;
 +    context.func = func;
 +
 +    crate::pretty_clif::write_clif_file(tcx, "unopt", None, instance, &context, &clif_comments);
 +
 +    // Verify function
 +    verify_func(tcx, &clif_comments, &context.func);
 +
 +    // Perform rust specific optimizations
 +    tcx.sess.time("optimize clif ir", || {
 +        crate::optimize::optimize_function(
 +            tcx,
 +            instance,
 +            context,
 +            &cold_blocks,
 +            &mut clif_comments,
 +        );
 +    });
 +
 +    // If the return block is not reachable, then the SSA builder may have inserted an `iconst.i128`
 +    // instruction, which doesn't have an encoding.
 +    context.compute_cfg();
 +    context.compute_domtree();
 +    context.eliminate_unreachable_code(cx.module.isa()).unwrap();
 +    context.dce(cx.module.isa()).unwrap();
 +
 +    // Define function
 +    let module = &mut cx.module;
 +    tcx.sess.time("define function", || {
 +        module
 +            .define_function(
 +                func_id,
 +                context,
 +                &mut cranelift_codegen::binemit::NullTrapSink {},
 +            )
 +            .unwrap()
 +    });
 +
 +    // Write optimized function to file for debugging
 +    crate::pretty_clif::write_clif_file(
 +        tcx,
 +        "opt",
 +        Some(cx.module.isa()),
 +        instance,
 +        &context,
 +        &clif_comments,
 +    );
 +
 +    // Define debuginfo for function
 +    let isa = cx.module.isa();
 +    let debug_context = &mut cx.debug_context;
 +    let unwind_context = &mut cx.unwind_context;
 +    tcx.sess.time("generate debug info", || {
 +        if let Some(debug_context) = debug_context {
 +            debug_context.define_function(
 +                instance,
 +                func_id,
 +                &name,
 +                isa,
 +                context,
 +                &source_info_set,
 +                local_map,
 +            );
 +        }
 +        unwind_context.add_function(func_id, &context, isa);
 +    });
 +
 +    // Clear context to make it usable for the next function
 +    context.clear();
 +}
 +
 +pub(crate) fn verify_func(
 +    tcx: TyCtxt<'_>,
 +    writer: &crate::pretty_clif::CommentWriter,
 +    func: &Function,
 +) {
 +    tcx.sess.time("verify clif ir", || {
 +        let flags = cranelift_codegen::settings::Flags::new(cranelift_codegen::settings::builder());
 +        match cranelift_codegen::verify_function(&func, &flags) {
 +            Ok(_) => {}
 +            Err(err) => {
 +                tcx.sess.err(&format!("{:?}", err));
 +                let pretty_error = cranelift_codegen::print_errors::pretty_verifier_error(
 +                    &func,
 +                    None,
 +                    Some(Box::new(writer)),
 +                    err,
 +                );
 +                tcx.sess
 +                    .fatal(&format!("cranelift verify error:\n{}", pretty_error));
 +            }
 +        }
 +    });
 +}
 +
 +fn codegen_fn_content(fx: &mut FunctionCx<'_, '_, impl Module>) {
 +    crate::constant::check_constants(fx);
 +
 +    for (bb, bb_data) in fx.mir.basic_blocks().iter_enumerated() {
 +        let block = fx.get_block(bb);
 +        fx.bcx.switch_to_block(block);
 +
 +        if bb_data.is_cleanup {
 +            // Unwinding after panicking is not supported
 +            continue;
 +
 +            // FIXME once unwinding is supported uncomment next lines
 +            // // Unwinding is unlikely to happen, so mark cleanup block's as cold.
 +            // fx.cold_blocks.insert(block);
 +        }
 +
 +        fx.bcx.ins().nop();
 +        for stmt in &bb_data.statements {
 +            fx.set_debug_loc(stmt.source_info);
-                 let cond = trans_operand(fx, cond).load_scalar(fx);
++            codegen_stmt(fx, block, stmt);
 +        }
 +
 +        #[cfg(debug_assertions)]
 +        {
 +            let mut terminator_head = "\n".to_string();
 +            bb_data
 +                .terminator()
 +                .kind
 +                .fmt_head(&mut terminator_head)
 +                .unwrap();
 +            let inst = fx.bcx.func.layout.last_inst(block).unwrap();
 +            fx.add_comment(inst, terminator_head);
 +        }
 +
 +        fx.set_debug_loc(bb_data.terminator().source_info);
 +
 +        match &bb_data.terminator().kind {
 +            TerminatorKind::Goto { target } => {
 +                if let TerminatorKind::Return = fx.mir[*target].terminator().kind {
 +                    let mut can_immediately_return = true;
 +                    for stmt in &fx.mir[*target].statements {
 +                        if let StatementKind::StorageDead(_) = stmt.kind {
 +                        } else {
 +                            // FIXME Can sometimes happen, see rust-lang/rust#70531
 +                            can_immediately_return = false;
 +                            break;
 +                        }
 +                    }
 +
 +                    if can_immediately_return {
 +                        crate::abi::codegen_return(fx);
 +                        continue;
 +                    }
 +                }
 +
 +                let block = fx.get_block(*target);
 +                fx.bcx.ins().jump(block, &[]);
 +            }
 +            TerminatorKind::Return => {
 +                crate::abi::codegen_return(fx);
 +            }
 +            TerminatorKind::Assert {
 +                cond,
 +                expected,
 +                msg,
 +                target,
 +                cleanup: _,
 +            } => {
 +                if !fx.tcx.sess.overflow_checks() {
 +                    if let mir::AssertKind::OverflowNeg(_) = *msg {
 +                        let target = fx.get_block(*target);
 +                        fx.bcx.ins().jump(target, &[]);
 +                        continue;
 +                    }
 +                }
-                         let len = trans_operand(fx, len).load_scalar(fx);
-                         let index = trans_operand(fx, index).load_scalar(fx);
++                let cond = codegen_operand(fx, cond).load_scalar(fx);
 +
 +                let target = fx.get_block(*target);
 +                let failure = fx.bcx.create_block();
 +                fx.cold_blocks.insert(failure);
 +
 +                if *expected {
 +                    fx.bcx.ins().brz(cond, failure, &[]);
 +                } else {
 +                    fx.bcx.ins().brnz(cond, failure, &[]);
 +                };
 +                fx.bcx.ins().jump(target, &[]);
 +
 +                fx.bcx.switch_to_block(failure);
 +                fx.bcx.ins().nop();
 +
 +                match msg {
 +                    AssertKind::BoundsCheck { ref len, ref index } => {
-                 let discr = trans_operand(fx, discr).load_scalar(fx);
++                        let len = codegen_operand(fx, len).load_scalar(fx);
++                        let index = codegen_operand(fx, index).load_scalar(fx);
 +                        let location = fx
 +                            .get_caller_location(bb_data.terminator().source_info.span)
 +                            .load_scalar(fx);
 +
 +                        codegen_panic_inner(
 +                            fx,
 +                            rustc_hir::LangItem::PanicBoundsCheck,
 +                            &[index, len, location],
 +                            bb_data.terminator().source_info.span,
 +                        );
 +                    }
 +                    _ => {
 +                        let msg_str = msg.description();
 +                        codegen_panic(fx, msg_str, bb_data.terminator().source_info.span);
 +                    }
 +                }
 +            }
 +
 +            TerminatorKind::SwitchInt {
 +                discr,
 +                switch_ty,
 +                targets,
 +            } => {
-                 bug!("shouldn't exist at trans {:?}", bb_data.terminator());
++                let discr = codegen_operand(fx, discr).load_scalar(fx);
 +
 +                if switch_ty.kind() == fx.tcx.types.bool.kind() {
 +                    assert_eq!(targets.iter().count(), 1);
 +                    let (then_value, then_block) = targets.iter().next().unwrap();
 +                    let then_block = fx.get_block(then_block);
 +                    let else_block = fx.get_block(targets.otherwise());
 +                    let test_zero = match then_value {
 +                        0 => true,
 +                        1 => false,
 +                        _ => unreachable!("{:?}", targets),
 +                    };
 +
 +                    let discr = crate::optimize::peephole::maybe_unwrap_bint(&mut fx.bcx, discr);
 +                    let (discr, is_inverted) =
 +                        crate::optimize::peephole::maybe_unwrap_bool_not(&mut fx.bcx, discr);
 +                    let test_zero = if is_inverted { !test_zero } else { test_zero };
 +                    let discr = crate::optimize::peephole::maybe_unwrap_bint(&mut fx.bcx, discr);
 +                    let discr =
 +                        crate::optimize::peephole::make_branchable_value(&mut fx.bcx, discr);
 +                    if test_zero {
 +                        fx.bcx.ins().brz(discr, then_block, &[]);
 +                        fx.bcx.ins().jump(else_block, &[]);
 +                    } else {
 +                        fx.bcx.ins().brnz(discr, then_block, &[]);
 +                        fx.bcx.ins().jump(else_block, &[]);
 +                    }
 +                } else {
 +                    let mut switch = ::cranelift_frontend::Switch::new();
 +                    for (value, block) in targets.iter() {
 +                        let block = fx.get_block(block);
 +                        switch.set_entry(value, block);
 +                    }
 +                    let otherwise_block = fx.get_block(targets.otherwise());
 +                    switch.emit(&mut fx.bcx, discr, otherwise_block);
 +                }
 +            }
 +            TerminatorKind::Call {
 +                func,
 +                args,
 +                destination,
 +                fn_span,
 +                cleanup: _,
 +                from_hir_call: _,
 +            } => {
 +                fx.tcx.sess.time("codegen call", || {
 +                    crate::abi::codegen_terminator_call(
 +                        fx,
 +                        *fn_span,
 +                        block,
 +                        func,
 +                        args,
 +                        *destination,
 +                    )
 +                });
 +            }
 +            TerminatorKind::InlineAsm {
 +                template,
 +                operands,
 +                options,
 +                destination,
 +                line_spans: _,
 +            } => {
 +                crate::inline_asm::codegen_inline_asm(
 +                    fx,
 +                    bb_data.terminator().source_info.span,
 +                    template,
 +                    operands,
 +                    *options,
 +                );
 +
 +                match *destination {
 +                    Some(destination) => {
 +                        let destination_block = fx.get_block(destination);
 +                        fx.bcx.ins().jump(destination_block, &[]);
 +                    }
 +                    None => {
 +                        crate::trap::trap_unreachable(
 +                            fx,
 +                            "[corruption] Returned from noreturn inline asm",
 +                        );
 +                    }
 +                }
 +            }
 +            TerminatorKind::Resume | TerminatorKind::Abort => {
 +                trap_unreachable(fx, "[corruption] Unwinding bb reached.");
 +            }
 +            TerminatorKind::Unreachable => {
 +                trap_unreachable(fx, "[corruption] Hit unreachable code.");
 +            }
 +            TerminatorKind::Yield { .. }
 +            | TerminatorKind::FalseEdge { .. }
 +            | TerminatorKind::FalseUnwind { .. }
 +            | TerminatorKind::DropAndReplace { .. }
 +            | TerminatorKind::GeneratorDrop => {
-                 let drop_place = trans_place(fx, *place);
++                bug!("shouldn't exist at codegen {:?}", bb_data.terminator());
 +            }
 +            TerminatorKind::Drop {
 +                place,
 +                target,
 +                unwind: _,
 +            } => {
- fn trans_stmt<'tcx>(
++                let drop_place = codegen_place(fx, *place);
 +                crate::abi::codegen_drop(fx, bb_data.terminator().source_info.span, drop_place);
 +
 +                let target_block = fx.get_block(*target);
 +                fx.bcx.ins().jump(target_block, &[]);
 +            }
 +        };
 +    }
 +
 +    fx.bcx.seal_all_blocks();
 +    fx.bcx.finalize();
 +}
 +
-             let place = trans_place(fx, **place);
++fn codegen_stmt<'tcx>(
 +    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
 +    #[allow(unused_variables)] cur_block: Block,
 +    stmt: &Statement<'tcx>,
 +) {
 +    let _print_guard = crate::PrintOnPanic(|| format!("stmt {:?}", stmt));
 +
 +    fx.set_debug_loc(stmt.source_info);
 +
 +    #[cfg(false_debug_assertions)]
 +    match &stmt.kind {
 +        StatementKind::StorageLive(..) | StatementKind::StorageDead(..) => {} // Those are not very useful
 +        _ => {
 +            let inst = fx.bcx.func.layout.last_inst(cur_block).unwrap();
 +            fx.add_comment(inst, format!("{:?}", stmt));
 +        }
 +    }
 +
 +    match &stmt.kind {
 +        StatementKind::SetDiscriminant {
 +            place,
 +            variant_index,
 +        } => {
-             let lval = trans_place(fx, to_place_and_rval.0);
++            let place = codegen_place(fx, **place);
 +            crate::discriminant::codegen_set_discriminant(fx, place, *variant_index);
 +        }
 +        StatementKind::Assign(to_place_and_rval) => {
-                     let val = trans_operand(fx, operand);
++            let lval = codegen_place(fx, to_place_and_rval.0);
 +            let dest_layout = lval.layout();
 +            match &to_place_and_rval.1 {
 +                Rvalue::Use(operand) => {
-                     let place = trans_place(fx, *place);
++                    let val = codegen_operand(fx, operand);
 +                    lval.write_cvalue(fx, val);
 +                }
 +                Rvalue::Ref(_, _, place) | Rvalue::AddressOf(_, place) => {
-                     let lhs = trans_operand(fx, lhs);
-                     let rhs = trans_operand(fx, rhs);
++                    let place = codegen_place(fx, *place);
 +                    let ref_ = place.place_ref(fx, lval.layout());
 +                    lval.write_cvalue(fx, ref_);
 +                }
 +                Rvalue::ThreadLocalRef(def_id) => {
 +                    let val = crate::constant::codegen_tls_ref(fx, *def_id, lval.layout());
 +                    lval.write_cvalue(fx, val);
 +                }
 +                Rvalue::BinaryOp(bin_op, lhs, rhs) => {
-                     let lhs = trans_operand(fx, lhs);
-                     let rhs = trans_operand(fx, rhs);
++                    let lhs = codegen_operand(fx, lhs);
++                    let rhs = codegen_operand(fx, rhs);
 +
 +                    let res = crate::num::codegen_binop(fx, *bin_op, lhs, rhs);
 +                    lval.write_cvalue(fx, res);
 +                }
 +                Rvalue::CheckedBinaryOp(bin_op, lhs, rhs) => {
-                             crate::num::trans_int_binop(fx, *bin_op, lhs, rhs).load_scalar(fx);
++                    let lhs = codegen_operand(fx, lhs);
++                    let rhs = codegen_operand(fx, rhs);
 +
 +                    let res = if !fx.tcx.sess.overflow_checks() {
 +                        let val =
-                         crate::num::trans_checked_int_binop(fx, *bin_op, lhs, rhs)
++                            crate::num::codegen_int_binop(fx, *bin_op, lhs, rhs).load_scalar(fx);
 +                        let is_overflow = fx.bcx.ins().iconst(types::I8, 0);
 +                        CValue::by_val_pair(val, is_overflow, lval.layout())
 +                    } else {
-                     let operand = trans_operand(fx, operand);
++                        crate::num::codegen_checked_int_binop(fx, *bin_op, lhs, rhs)
 +                    };
 +
 +                    lval.write_cvalue(fx, res);
 +                }
 +                Rvalue::UnaryOp(un_op, operand) => {
-                                 crate::num::trans_int_binop(fx, BinOp::Sub, zero, operand)
++                    let operand = codegen_operand(fx, operand);
 +                    let layout = operand.layout();
 +                    let val = operand.load_scalar(fx);
 +                    let res = match un_op {
 +                        UnOp::Not => match layout.ty.kind() {
 +                            ty::Bool => {
 +                                let res = fx.bcx.ins().icmp_imm(IntCC::Equal, val, 0);
 +                                CValue::by_val(fx.bcx.ins().bint(types::I8, res), layout)
 +                            }
 +                            ty::Uint(_) | ty::Int(_) => {
 +                                CValue::by_val(fx.bcx.ins().bnot(val), layout)
 +                            }
 +                            _ => unreachable!("un op Not for {:?}", layout.ty),
 +                        },
 +                        UnOp::Neg => match layout.ty.kind() {
 +                            ty::Int(IntTy::I128) => {
 +                                // FIXME remove this case once ineg.i128 works
 +                                let zero = CValue::const_val(fx, layout, 0);
-                     let operand = trans_operand(fx, operand);
++                                crate::num::codegen_int_binop(fx, BinOp::Sub, zero, operand)
 +                            }
 +                            ty::Int(_) => CValue::by_val(fx.bcx.ins().ineg(val), layout),
 +                            ty::Float(_) => CValue::by_val(fx.bcx.ins().fneg(val), layout),
 +                            _ => unreachable!("un op Neg for {:?}", layout.ty),
 +                        },
 +                    };
 +                    lval.write_cvalue(fx, res);
 +                }
 +                Rvalue::Cast(CastKind::Pointer(PointerCast::ReifyFnPointer), operand, to_ty) => {
 +                    let from_ty = fx.monomorphize(&operand.ty(&fx.mir.local_decls, fx.tcx));
 +                    let to_layout = fx.layout_of(fx.monomorphize(to_ty));
 +                    match *from_ty.kind() {
 +                        ty::FnDef(def_id, substs) => {
 +                            let func_ref = fx.get_function_ref(
 +                                Instance::resolve_for_fn_ptr(
 +                                    fx.tcx,
 +                                    ParamEnv::reveal_all(),
 +                                    def_id,
 +                                    substs,
 +                                )
 +                                .unwrap()
 +                                .polymorphize(fx.tcx),
 +                            );
 +                            let func_addr = fx.bcx.ins().func_addr(fx.pointer_type, func_ref);
 +                            lval.write_cvalue(fx, CValue::by_val(func_addr, to_layout));
 +                        }
 +                        _ => bug!("Trying to ReifyFnPointer on non FnDef {:?}", from_ty),
 +                    }
 +                }
 +                Rvalue::Cast(CastKind::Pointer(PointerCast::UnsafeFnPointer), operand, to_ty)
 +                | Rvalue::Cast(CastKind::Pointer(PointerCast::MutToConstPointer), operand, to_ty)
 +                | Rvalue::Cast(CastKind::Pointer(PointerCast::ArrayToPointer), operand, to_ty) => {
 +                    let to_layout = fx.layout_of(fx.monomorphize(to_ty));
-                     let operand = trans_operand(fx, operand);
++                    let operand = codegen_operand(fx, operand);
 +                    lval.write_cvalue(fx, operand.cast_pointer_to(to_layout));
 +                }
 +                Rvalue::Cast(CastKind::Misc, operand, to_ty) => {
-                     let operand = trans_operand(fx, operand);
++                    let operand = codegen_operand(fx, operand);
 +                    let from_ty = operand.layout().ty;
 +                    let to_ty = fx.monomorphize(to_ty);
 +
 +                    fn is_fat_ptr<'tcx>(
 +                        fx: &FunctionCx<'_, 'tcx, impl Module>,
 +                        ty: Ty<'tcx>,
 +                    ) -> bool {
 +                        ty.builtin_deref(true)
 +                            .map(
 +                                |ty::TypeAndMut {
 +                                     ty: pointee_ty,
 +                                     mutbl: _,
 +                                 }| {
 +                                    has_ptr_meta(fx.tcx, pointee_ty)
 +                                },
 +                            )
 +                            .unwrap_or(false)
 +                    }
 +
 +                    if is_fat_ptr(fx, from_ty) {
 +                        if is_fat_ptr(fx, to_ty) {
 +                            // fat-ptr -> fat-ptr
 +                            lval.write_cvalue(fx, operand.cast_pointer_to(dest_layout));
 +                        } else {
 +                            // fat-ptr -> thin-ptr
 +                            let (ptr, _extra) = operand.load_scalar_pair(fx);
 +                            lval.write_cvalue(fx, CValue::by_val(ptr, dest_layout))
 +                        }
 +                    } else if let ty::Adt(adt_def, _substs) = from_ty.kind() {
 +                        // enum -> discriminant value
 +                        assert!(adt_def.is_enum());
 +                        match to_ty.kind() {
 +                            ty::Uint(_) | ty::Int(_) => {}
 +                            _ => unreachable!("cast adt {} -> {}", from_ty, to_ty),
 +                        }
 +
 +                        use rustc_target::abi::{Int, TagEncoding, Variants};
 +
 +                        match &operand.layout().variants {
 +                            Variants::Single { index } => {
 +                                let discr = operand
 +                                    .layout()
 +                                    .ty
 +                                    .discriminant_for_variant(fx.tcx, *index)
 +                                    .unwrap();
 +                                let discr = if discr.ty.is_signed() {
 +                                    rustc_middle::mir::interpret::sign_extend(
 +                                        discr.val,
 +                                        fx.layout_of(discr.ty).size,
 +                                    )
 +                                } else {
 +                                    discr.val
 +                                };
 +
 +                                let discr = CValue::const_val(fx, fx.layout_of(to_ty), discr);
 +                                lval.write_cvalue(fx, discr);
 +                            }
 +                            Variants::Multiple {
 +                                tag,
 +                                tag_field,
 +                                tag_encoding: TagEncoding::Direct,
 +                                variants: _,
 +                            } => {
 +                                let cast_to = fx.clif_type(dest_layout.ty).unwrap();
 +
 +                                // Read the tag/niche-encoded discriminant from memory.
 +                                let encoded_discr =
 +                                    operand.value_field(fx, mir::Field::new(*tag_field));
 +                                let encoded_discr = encoded_discr.load_scalar(fx);
 +
 +                                // Decode the discriminant (specifically if it's niche-encoded).
 +                                let signed = match tag.value {
 +                                    Int(_, signed) => signed,
 +                                    _ => false,
 +                                };
 +                                let val = clif_intcast(fx, encoded_discr, cast_to, signed);
 +                                let val = CValue::by_val(val, dest_layout);
 +                                lval.write_cvalue(fx, val);
 +                            }
 +                            Variants::Multiple { .. } => unreachable!(),
 +                        }
 +                    } else {
 +                        let to_clif_ty = fx.clif_type(to_ty).unwrap();
 +                        let from = operand.load_scalar(fx);
 +
 +                        let res = clif_int_or_float_cast(
 +                            fx,
 +                            from,
 +                            type_sign(from_ty),
 +                            to_clif_ty,
 +                            type_sign(to_ty),
 +                        );
 +                        lval.write_cvalue(fx, CValue::by_val(res, dest_layout));
 +                    }
 +                }
 +                Rvalue::Cast(
 +                    CastKind::Pointer(PointerCast::ClosureFnPointer(_)),
 +                    operand,
 +                    _to_ty,
 +                ) => {
-                     let operand = trans_operand(fx, operand);
++                    let operand = codegen_operand(fx, operand);
 +                    match *operand.layout().ty.kind() {
 +                        ty::Closure(def_id, substs) => {
 +                            let instance = Instance::resolve_closure(
 +                                fx.tcx,
 +                                def_id,
 +                                substs,
 +                                ty::ClosureKind::FnOnce,
 +                            )
 +                            .polymorphize(fx.tcx);
 +                            let func_ref = fx.get_function_ref(instance);
 +                            let func_addr = fx.bcx.ins().func_addr(fx.pointer_type, func_ref);
 +                            lval.write_cvalue(fx, CValue::by_val(func_addr, lval.layout()));
 +                        }
 +                        _ => bug!("{} cannot be cast to a fn ptr", operand.layout().ty),
 +                    }
 +                }
 +                Rvalue::Cast(CastKind::Pointer(PointerCast::Unsize), operand, _to_ty) => {
-                     let place = trans_place(fx, *place);
++                    let operand = codegen_operand(fx, operand);
 +                    operand.unsize_value(fx, lval);
 +                }
 +                Rvalue::Discriminant(place) => {
-                     let operand = trans_operand(fx, operand);
++                    let place = codegen_place(fx, *place);
 +                    let value = place.to_cvalue(fx);
 +                    let discr =
 +                        crate::discriminant::codegen_get_discriminant(fx, value, dest_layout);
 +                    lval.write_cvalue(fx, discr);
 +                }
 +                Rvalue::Repeat(operand, times) => {
-                     let place = trans_place(fx, *place);
++                    let operand = codegen_operand(fx, operand);
 +                    let times = fx
 +                        .monomorphize(times)
 +                        .eval(fx.tcx, ParamEnv::reveal_all())
 +                        .val
 +                        .try_to_bits(fx.tcx.data_layout.pointer_size)
 +                        .unwrap();
 +                    if fx.clif_type(operand.layout().ty) == Some(types::I8) {
 +                        let times = fx.bcx.ins().iconst(fx.pointer_type, times as i64);
 +                        // FIXME use emit_small_memset where possible
 +                        let addr = lval.to_ptr().get_addr(fx);
 +                        let val = operand.load_scalar(fx);
 +                        fx.bcx
 +                            .call_memset(fx.cx.module.target_config(), addr, val, times);
 +                    } else {
 +                        let loop_block = fx.bcx.create_block();
 +                        let loop_block2 = fx.bcx.create_block();
 +                        let done_block = fx.bcx.create_block();
 +                        let index = fx.bcx.append_block_param(loop_block, fx.pointer_type);
 +                        let zero = fx.bcx.ins().iconst(fx.pointer_type, 0);
 +                        fx.bcx.ins().jump(loop_block, &[zero]);
 +
 +                        fx.bcx.switch_to_block(loop_block);
 +                        let done = fx.bcx.ins().icmp_imm(IntCC::Equal, index, times as i64);
 +                        fx.bcx.ins().brnz(done, done_block, &[]);
 +                        fx.bcx.ins().jump(loop_block2, &[]);
 +
 +                        fx.bcx.switch_to_block(loop_block2);
 +                        let to = lval.place_index(fx, index);
 +                        to.write_cvalue(fx, operand);
 +                        let index = fx.bcx.ins().iadd_imm(index, 1);
 +                        fx.bcx.ins().jump(loop_block, &[index]);
 +
 +                        fx.bcx.switch_to_block(done_block);
 +                        fx.bcx.ins().nop();
 +                    }
 +                }
 +                Rvalue::Len(place) => {
-                         for (i, operand) in operands.into_iter().enumerate() {
-                             let operand = trans_operand(fx, operand);
++                    let place = codegen_place(fx, *place);
 +                    let usize_layout = fx.layout_of(fx.tcx.types.usize);
 +                    let len = codegen_array_len(fx, place);
 +                    lval.write_cvalue(fx, CValue::by_val(len, usize_layout));
 +                }
 +                Rvalue::NullaryOp(NullOp::Box, content_ty) => {
 +                    let usize_type = fx.clif_type(fx.tcx.types.usize).unwrap();
 +                    let content_ty = fx.monomorphize(content_ty);
 +                    let layout = fx.layout_of(content_ty);
 +                    let llsize = fx.bcx.ins().iconst(usize_type, layout.size.bytes() as i64);
 +                    let llalign = fx
 +                        .bcx
 +                        .ins()
 +                        .iconst(usize_type, layout.align.abi.bytes() as i64);
 +                    let box_layout = fx.layout_of(fx.tcx.mk_box(content_ty));
 +
 +                    // Allocate space:
 +                    let def_id = match fx
 +                        .tcx
 +                        .lang_items()
 +                        .require(rustc_hir::LangItem::ExchangeMalloc)
 +                    {
 +                        Ok(id) => id,
 +                        Err(s) => {
 +                            fx.tcx
 +                                .sess
 +                                .fatal(&format!("allocation of `{}` {}", box_layout.ty, s));
 +                        }
 +                    };
 +                    let instance = ty::Instance::mono(fx.tcx, def_id).polymorphize(fx.tcx);
 +                    let func_ref = fx.get_function_ref(instance);
 +                    let call = fx.bcx.ins().call(func_ref, &[llsize, llalign]);
 +                    let ptr = fx.bcx.inst_results(call)[0];
 +                    lval.write_cvalue(fx, CValue::by_val(ptr, box_layout));
 +                }
 +                Rvalue::NullaryOp(NullOp::SizeOf, ty) => {
 +                    assert!(lval
 +                        .layout()
 +                        .ty
 +                        .is_sized(fx.tcx.at(stmt.source_info.span), ParamEnv::reveal_all()));
 +                    let ty_size = fx.layout_of(fx.monomorphize(ty)).size.bytes();
 +                    let val =
 +                        CValue::const_val(fx, fx.layout_of(fx.tcx.types.usize), ty_size.into());
 +                    lval.write_cvalue(fx, val);
 +                }
 +                Rvalue::Aggregate(kind, operands) => match **kind {
 +                    AggregateKind::Array(_ty) => {
-                     _ => unreachable!("shouldn't exist at trans {:?}", to_place_and_rval.1),
++                        for (i, operand) in operands.iter().enumerate() {
++                            let operand = codegen_operand(fx, operand);
 +                            let index = fx.bcx.ins().iconst(fx.pointer_type, i as i64);
 +                            let to = lval.place_index(fx, index);
 +                            to.write_cvalue(fx, operand);
 +                        }
 +                    }
-                     let leaf = trans_operand(fx, &inputs[0].1).load_scalar(fx); // %eax
-                     let subleaf = trans_operand(fx, &inputs[1].1).load_scalar(fx); // %ecx
++                    _ => unreachable!("shouldn't exist at codegen {:?}", to_place_and_rval.1),
 +                },
 +            }
 +        }
 +        StatementKind::StorageLive(_)
 +        | StatementKind::StorageDead(_)
 +        | StatementKind::Nop
 +        | StatementKind::FakeRead(..)
 +        | StatementKind::Retag { .. }
 +        | StatementKind::AscribeUserType(..) => {}
 +
 +        StatementKind::LlvmInlineAsm(asm) => {
 +            use rustc_span::symbol::Symbol;
 +            let LlvmInlineAsm {
 +                asm,
 +                outputs,
 +                inputs,
 +            } = &**asm;
 +            let rustc_hir::LlvmInlineAsmInner {
 +                asm: asm_code,         // Name
 +                outputs: output_names, // Vec<LlvmInlineAsmOutput>
 +                inputs: input_names,   // Vec<Name>
 +                clobbers,              // Vec<Name>
 +                volatile,              // bool
 +                alignstack,            // bool
 +                dialect: _,
 +                asm_str_style: _,
 +            } = asm;
 +            match asm_code.as_str().trim() {
 +                "" => {
 +                    // Black box
 +                }
 +                "mov %rbx, %rsi\n                  cpuid\n                  xchg %rbx, %rsi" => {
 +                    assert_eq!(
 +                        input_names,
 +                        &[Symbol::intern("{eax}"), Symbol::intern("{ecx}")]
 +                    );
 +                    assert_eq!(output_names.len(), 4);
 +                    for (i, c) in (&["={eax}", "={esi}", "={ecx}", "={edx}"])
 +                        .iter()
 +                        .enumerate()
 +                    {
 +                        assert_eq!(&output_names[i].constraint.as_str(), c);
 +                        assert!(!output_names[i].is_rw);
 +                        assert!(!output_names[i].is_indirect);
 +                    }
 +
 +                    assert_eq!(clobbers, &[]);
 +
 +                    assert!(!volatile);
 +                    assert!(!alignstack);
 +
 +                    assert_eq!(inputs.len(), 2);
-                     trans_place(fx, outputs[0])
++                    let leaf = codegen_operand(fx, &inputs[0].1).load_scalar(fx); // %eax
++                    let subleaf = codegen_operand(fx, &inputs[1].1).load_scalar(fx); // %ecx
 +
 +                    let (eax, ebx, ecx, edx) =
 +                        crate::intrinsics::codegen_cpuid_call(fx, leaf, subleaf);
 +
 +                    assert_eq!(outputs.len(), 4);
-                     trans_place(fx, outputs[1])
++                    codegen_place(fx, outputs[0])
 +                        .write_cvalue(fx, CValue::by_val(eax, fx.layout_of(fx.tcx.types.u32)));
-                     trans_place(fx, outputs[2])
++                    codegen_place(fx, outputs[1])
 +                        .write_cvalue(fx, CValue::by_val(ebx, fx.layout_of(fx.tcx.types.u32)));
-                     trans_place(fx, outputs[3])
++                    codegen_place(fx, outputs[2])
 +                        .write_cvalue(fx, CValue::by_val(ecx, fx.layout_of(fx.tcx.types.u32)));
- pub(crate) fn trans_place<'tcx>(
++                    codegen_place(fx, outputs[3])
 +                        .write_cvalue(fx, CValue::by_val(edx, fx.layout_of(fx.tcx.types.u32)));
 +                }
 +                "xgetbv" => {
 +                    assert_eq!(input_names, &[Symbol::intern("{ecx}")]);
 +
 +                    assert_eq!(output_names.len(), 2);
 +                    for (i, c) in (&["={eax}", "={edx}"]).iter().enumerate() {
 +                        assert_eq!(&output_names[i].constraint.as_str(), c);
 +                        assert!(!output_names[i].is_rw);
 +                        assert!(!output_names[i].is_indirect);
 +                    }
 +
 +                    assert_eq!(clobbers, &[]);
 +
 +                    assert!(!volatile);
 +                    assert!(!alignstack);
 +
 +                    crate::trap::trap_unimplemented(fx, "_xgetbv arch intrinsic is not supported");
 +                }
 +                // ___chkstk, ___chkstk_ms and __alloca are only used on Windows
 +                _ if fx
 +                    .tcx
 +                    .symbol_name(fx.instance)
 +                    .name
 +                    .starts_with("___chkstk") =>
 +                {
 +                    crate::trap::trap_unimplemented(fx, "Stack probes are not supported");
 +                }
 +                _ if fx.tcx.symbol_name(fx.instance).name == "__alloca" => {
 +                    crate::trap::trap_unimplemented(fx, "Alloca is not supported");
 +                }
 +                // Used in sys::windows::abort_internal
 +                "int $$0x29" => {
 +                    crate::trap::trap_unimplemented(fx, "Windows abort");
 +                }
 +                _ => fx
 +                    .tcx
 +                    .sess
 +                    .span_fatal(stmt.source_info.span, "Inline assembly is not supported"),
 +            }
 +        }
 +        StatementKind::Coverage { .. } => fx.tcx.sess.fatal("-Zcoverage is unimplemented"),
 +    }
 +}
 +
 +fn codegen_array_len<'tcx>(
 +    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
 +    place: CPlace<'tcx>,
 +) -> Value {
 +    match *place.layout().ty.kind() {
 +        ty::Array(_elem_ty, len) => {
 +            let len = fx
 +                .monomorphize(&len)
 +                .eval(fx.tcx, ParamEnv::reveal_all())
 +                .eval_usize(fx.tcx, ParamEnv::reveal_all()) as i64;
 +            fx.bcx.ins().iconst(fx.pointer_type, len)
 +        }
 +        ty::Slice(_elem_ty) => place
 +            .to_ptr_maybe_unsized()
 +            .1
 +            .expect("Length metadata for slice place"),
 +        _ => bug!("Rvalue::Len({:?})", place),
 +    }
 +}
 +
-                             fx.layout_of(fx.tcx.mk_array(elem_ty, u64::from(to) - u64::from(from))),
++pub(crate) fn codegen_place<'tcx>(
 +    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
 +    place: Place<'tcx>,
 +) -> CPlace<'tcx> {
 +    let mut cplace = fx.get_local_place(place.local);
 +
 +    for elem in place.projection {
 +        match elem {
 +            PlaceElem::Deref => {
 +                cplace = cplace.place_deref(fx);
 +            }
 +            PlaceElem::Field(field, _ty) => {
 +                cplace = cplace.place_field(fx, field);
 +            }
 +            PlaceElem::Index(local) => {
 +                let index = fx.get_local_place(local).to_cvalue(fx).load_scalar(fx);
 +                cplace = cplace.place_index(fx, index);
 +            }
 +            PlaceElem::ConstantIndex {
 +                offset,
 +                min_length: _,
 +                from_end,
 +            } => {
 +                let offset: u64 = offset;
 +                let index = if !from_end {
 +                    fx.bcx.ins().iconst(fx.pointer_type, offset as i64)
 +                } else {
 +                    let len = codegen_array_len(fx, cplace);
 +                    fx.bcx.ins().iadd_imm(len, -(offset as i64))
 +                };
 +                cplace = cplace.place_index(fx, index);
 +            }
 +            PlaceElem::Subslice { from, to, from_end } => {
 +                // These indices are generated by slice patterns.
 +                // slice[from:-to] in Python terms.
 +
 +                let from: u64 = from;
 +                let to: u64 = to;
 +
 +                match cplace.layout().ty.kind() {
 +                    ty::Array(elem_ty, _len) => {
 +                        assert!(!from_end, "array subslices are never `from_end`");
 +                        let elem_layout = fx.layout_of(elem_ty);
 +                        let ptr = cplace.to_ptr();
 +                        cplace = CPlace::for_ptr(
 +                            ptr.offset_i64(fx, elem_layout.size.bytes() as i64 * (from as i64)),
- pub(crate) fn trans_operand<'tcx>(
++                            fx.layout_of(fx.tcx.mk_array(elem_ty, to - from)),
 +                        );
 +                    }
 +                    ty::Slice(elem_ty) => {
 +                        assert!(from_end, "slice subslices should be `from_end`");
 +                        let elem_layout = fx.layout_of(elem_ty);
 +                        let (ptr, len) = cplace.to_ptr_maybe_unsized();
 +                        let len = len.unwrap();
 +                        cplace = CPlace::for_ptr_with_extra(
 +                            ptr.offset_i64(fx, elem_layout.size.bytes() as i64 * (from as i64)),
 +                            fx.bcx.ins().iadd_imm(len, -(from as i64 + to as i64)),
 +                            cplace.layout(),
 +                        );
 +                    }
 +                    _ => unreachable!(),
 +                }
 +            }
 +            PlaceElem::Downcast(_adt_def, variant) => {
 +                cplace = cplace.downcast_variant(fx, variant);
 +            }
 +        }
 +    }
 +
 +    cplace
 +}
 +
-             let cplace = trans_place(fx, *place);
++pub(crate) fn codegen_operand<'tcx>(
 +    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
 +    operand: &Operand<'tcx>,
 +) -> CValue<'tcx> {
 +    match operand {
 +        Operand::Move(place) | Operand::Copy(place) => {
-         Operand::Constant(const_) => crate::constant::trans_constant(fx, const_),
++            let cplace = codegen_place(fx, *place);
 +            cplace.to_cvalue(fx)
 +        }
++        Operand::Constant(const_) => crate::constant::codegen_constant(fx, const_),
 +    }
 +}
 +
 +pub(crate) fn codegen_panic<'tcx>(
 +    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
 +    msg_str: &str,
 +    span: Span,
 +) {
 +    let location = fx.get_caller_location(span).load_scalar(fx);
 +
 +    let msg_ptr = fx.anonymous_str("assert", msg_str);
 +    let msg_len = fx
 +        .bcx
 +        .ins()
 +        .iconst(fx.pointer_type, i64::try_from(msg_str.len()).unwrap());
 +    let args = [msg_ptr, msg_len, location];
 +
 +    codegen_panic_inner(fx, rustc_hir::LangItem::Panic, &args, span);
 +}
 +
 +pub(crate) fn codegen_panic_inner<'tcx>(
 +    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
 +    lang_item: rustc_hir::LangItem,
 +    args: &[Value],
 +    span: Span,
 +) {
 +    let def_id = fx
 +        .tcx
 +        .lang_items()
 +        .require(lang_item)
 +        .unwrap_or_else(|s| fx.tcx.sess.span_fatal(span, &s));
 +
 +    let instance = Instance::mono(fx.tcx, def_id).polymorphize(fx.tcx);
 +    let symbol_name = fx.tcx.symbol_name(instance).name;
 +
 +    fx.lib_call(
 +        &*symbol_name,
 +        vec![fx.pointer_type, fx.pointer_type, fx.pointer_type],
 +        vec![],
 +        args,
 +    );
 +
 +    crate::trap::trap_unreachable(fx, "panic lang item returned");
 +}
index 590c9ef0ce191e6c333a064cfff912f1f5b6ad51,0000000000000000000000000000000000000000..71ef4d2267368467d97492a6e1919298a3762ae8
mode 100644,000000..100644
--- /dev/null
@@@ -1,88 -1,0 +1,82 @@@
-         // FIXME workaround for an ICE
-         config.opts.debugging_opts.trim_diagnostic_paths = false;
 +#![feature(rustc_private)]
 +
 +extern crate rustc_data_structures;
 +extern crate rustc_driver;
 +extern crate rustc_interface;
 +extern crate rustc_session;
 +extern crate rustc_target;
 +
 +use rustc_data_structures::profiling::print_time_passes_entry;
 +use rustc_interface::interface;
 +use rustc_session::config::ErrorOutputType;
 +use rustc_session::early_error;
 +use rustc_target::spec::PanicStrategy;
 +
 +#[derive(Default)]
 +pub struct CraneliftPassesCallbacks {
 +    time_passes: bool,
 +}
 +
 +impl rustc_driver::Callbacks for CraneliftPassesCallbacks {
 +    fn config(&mut self, config: &mut interface::Config) {
 +        // If a --prints=... option has been given, we don't print the "total"
 +        // time because it will mess up the --prints output. See #64339.
 +        self.time_passes = config.opts.prints.is_empty()
 +            && (config.opts.debugging_opts.time_passes || config.opts.debugging_opts.time);
 +
-             std::env::current_exe()
-                 .unwrap()
-                 .parent()
-                 .unwrap()
-                 .parent()
-                 .unwrap()
-                 .parent()
-                 .unwrap()
-                 .join("build_sysroot")
-                 .join("sysroot"),
 +        config.opts.cg.panic = Some(PanicStrategy::Abort);
 +        config.opts.debugging_opts.panic_abort_tests = true;
 +        config.opts.maybe_sysroot = Some(
++            config.opts.maybe_sysroot.clone().unwrap_or(
++                std::env::current_exe()
++                    .unwrap()
++                    .parent()
++                    .unwrap()
++                    .join("sysroot"),
++            ),
 +        );
 +    }
 +}
 +
 +fn main() {
 +    let start = std::time::Instant::now();
 +    rustc_driver::init_rustc_env_logger();
 +    let mut callbacks = CraneliftPassesCallbacks::default();
 +    rustc_driver::install_ice_hook();
 +    let exit_code = rustc_driver::catch_with_exit_code(|| {
 +        let mut use_jit = false;
 +
 +        let mut args = std::env::args_os()
 +            .enumerate()
 +            .map(|(i, arg)| {
 +                arg.into_string().unwrap_or_else(|arg| {
 +                    early_error(
 +                        ErrorOutputType::default(),
 +                        &format!("Argument {} is not valid Unicode: {:?}", i, arg),
 +                    )
 +                })
 +            })
 +            .filter(|arg| {
 +                if arg == "--jit" {
 +                    use_jit = true;
 +                    false
 +                } else {
 +                    true
 +                }
 +            })
 +            .collect::<Vec<_>>();
 +        if use_jit {
 +            args.push("-Cprefer-dynamic".to_string());
 +        }
 +        let mut run_compiler = rustc_driver::RunCompiler::new(&args, &mut callbacks);
 +        run_compiler.set_make_codegen_backend(Some(Box::new(move |_| {
 +            Box::new(rustc_codegen_cranelift::CraneliftCodegenBackend {
 +                config: rustc_codegen_cranelift::BackendConfig { use_jit },
 +            })
 +        })));
 +        run_compiler.run()
 +    });
 +    // The extra `\t` is necessary to align this label with the others.
 +    print_time_passes_entry(callbacks.time_passes, "\ttotal", start.elapsed());
 +    std::process::exit(exit_code)
 +}
index c207d98d6c197bdac2c8a7161e33604c9b6c473d,0000000000000000000000000000000000000000..165d33dcfb50919a625fd0d60cdf51052a82ca1f
mode 100644,000000..100644
--- /dev/null
@@@ -1,106 -1,0 +1,103 @@@
-         // FIXME workaround for an ICE
-         config.opts.debugging_opts.trim_diagnostic_paths = false;
 +//! The only difference between this and cg_clif.rs is that this binary defaults to using cg_llvm
 +//! instead of cg_clif and requires `--clif` to use cg_clif and that this binary doesn't have JIT
 +//! support.
 +//! This is necessary as with Cargo `RUSTC` applies to both target crates and host crates. The host
 +//! crates must be built with cg_llvm as we are currently building a sysroot for cg_clif.
 +//! `RUSTFLAGS` however is only applied to target crates, so `--clif` would only be passed to the
 +//! target crates.
 +
 +#![feature(rustc_private)]
 +
 +extern crate rustc_data_structures;
 +extern crate rustc_driver;
 +extern crate rustc_interface;
 +extern crate rustc_session;
 +extern crate rustc_target;
 +
 +use std::path::PathBuf;
 +
 +use rustc_interface::interface;
 +use rustc_session::config::ErrorOutputType;
 +use rustc_session::early_error;
 +use rustc_target::spec::PanicStrategy;
 +
 +fn find_sysroot() -> String {
 +    // Taken from https://github.com/Manishearth/rust-clippy/pull/911.
 +    let home = option_env!("RUSTUP_HOME").or(option_env!("MULTIRUST_HOME"));
 +    let toolchain = option_env!("RUSTUP_TOOLCHAIN").or(option_env!("MULTIRUST_TOOLCHAIN"));
 +    match (home, toolchain) {
 +        (Some(home), Some(toolchain)) => format!("{}/toolchains/{}", home, toolchain),
 +        _ => option_env!("RUST_SYSROOT")
 +            .expect("need to specify RUST_SYSROOT env var or use rustup or multirust")
 +            .to_owned(),
 +    }
 +}
 +
 +pub struct CraneliftPassesCallbacks {
 +    use_clif: bool,
 +}
 +
 +impl rustc_driver::Callbacks for CraneliftPassesCallbacks {
 +    fn config(&mut self, config: &mut interface::Config) {
 +        if !self.use_clif {
 +            config.opts.maybe_sysroot = Some(PathBuf::from(find_sysroot()));
 +            return;
 +        }
 +
 +        config.opts.cg.panic = Some(PanicStrategy::Abort);
 +        config.opts.debugging_opts.panic_abort_tests = true;
 +        config.opts.maybe_sysroot = Some(
 +            std::env::current_exe()
 +                .unwrap()
 +                .parent()
 +                .unwrap()
 +                .parent()
 +                .unwrap()
 +                .parent()
 +                .unwrap()
 +                .join("build_sysroot")
 +                .join("sysroot"),
 +        );
 +    }
 +}
 +
 +fn main() {
 +    rustc_driver::init_rustc_env_logger();
 +    rustc_driver::install_ice_hook();
 +    let exit_code = rustc_driver::catch_with_exit_code(|| {
 +        let mut use_clif = false;
 +
 +        let args = std::env::args_os()
 +            .enumerate()
 +            .map(|(i, arg)| {
 +                arg.into_string().unwrap_or_else(|arg| {
 +                    early_error(
 +                        ErrorOutputType::default(),
 +                        &format!("Argument {} is not valid Unicode: {:?}", i, arg),
 +                    )
 +                })
 +            })
 +            .filter(|arg| {
 +                if arg == "--clif" {
 +                    use_clif = true;
 +                    false
 +                } else {
 +                    true
 +                }
 +            })
 +            .collect::<Vec<_>>();
 +
 +        let mut callbacks = CraneliftPassesCallbacks { use_clif };
 +
 +        let mut run_compiler = rustc_driver::RunCompiler::new(&args, &mut callbacks);
 +        if use_clif {
 +            run_compiler.set_make_codegen_backend(Some(Box::new(move |_| {
 +                Box::new(rustc_codegen_cranelift::CraneliftCodegenBackend {
 +                    config: rustc_codegen_cranelift::BackendConfig { use_jit: false },
 +                })
 +            })));
 +        }
 +        run_compiler.run()
 +    });
 +    std::process::exit(exit_code)
 +}
index 122a36b5bf741d25c38e0116889307f350b5f856,0000000000000000000000000000000000000000..57204de1135be435c524b2fbbb3ce69d608da6ff
mode 100644,000000..100644
--- /dev/null
@@@ -1,201 -1,0 +1,199 @@@
-             if to_signed {
-                 fx.bcx.ins().fcvt_to_sint_sat(to_ty, from)
-             } else {
-                 fx.bcx.ins().fcvt_to_uint_sat(to_ty, from)
-             }
 +//! Various number casting functions
 +
 +use crate::prelude::*;
 +
 +pub(crate) fn clif_intcast(
 +    fx: &mut FunctionCx<'_, '_, impl Module>,
 +    val: Value,
 +    to: Type,
 +    signed: bool,
 +) -> Value {
 +    let from = fx.bcx.func.dfg.value_type(val);
 +    match (from, to) {
 +        // equal
 +        (_, _) if from == to => val,
 +
 +        // extend
 +        (_, types::I128) => {
 +            let lo = if from == types::I64 {
 +                val
 +            } else if signed {
 +                fx.bcx.ins().sextend(types::I64, val)
 +            } else {
 +                fx.bcx.ins().uextend(types::I64, val)
 +            };
 +            let hi = if signed {
 +                fx.bcx.ins().sshr_imm(lo, 63)
 +            } else {
 +                fx.bcx.ins().iconst(types::I64, 0)
 +            };
 +            fx.bcx.ins().iconcat(lo, hi)
 +        }
 +        (_, _) if to.wider_or_equal(from) => {
 +            if signed {
 +                fx.bcx.ins().sextend(to, val)
 +            } else {
 +                fx.bcx.ins().uextend(to, val)
 +            }
 +        }
 +
 +        // reduce
 +        (types::I128, _) => {
 +            let (lsb, _msb) = fx.bcx.ins().isplit(val);
 +            if to == types::I64 {
 +                lsb
 +            } else {
 +                fx.bcx.ins().ireduce(to, lsb)
 +            }
 +        }
 +        (_, _) => fx.bcx.ins().ireduce(to, val),
 +    }
 +}
 +
 +pub(crate) fn clif_int_or_float_cast(
 +    fx: &mut FunctionCx<'_, '_, impl Module>,
 +    from: Value,
 +    from_signed: bool,
 +    to_ty: Type,
 +    to_signed: bool,
 +) -> Value {
 +    let from_ty = fx.bcx.func.dfg.value_type(from);
 +
 +    if from_ty.is_int() && to_ty.is_int() {
 +        // int-like -> int-like
 +        clif_intcast(
 +            fx,
 +            from,
 +            to_ty,
 +            // This is correct as either from_signed == to_signed (=> this is trivially correct)
 +            // Or from_clif_ty == to_clif_ty, which means this is a no-op.
 +            from_signed,
 +        )
 +    } else if from_ty.is_int() && to_ty.is_float() {
 +        if from_ty == types::I128 {
 +            // _______ss__f_
 +            // __float  tisf: i128 -> f32
 +            // __float  tidf: i128 -> f64
 +            // __floatuntisf: u128 -> f32
 +            // __floatuntidf: u128 -> f64
 +
 +            let name = format!(
 +                "__float{sign}ti{flt}f",
 +                sign = if from_signed { "" } else { "un" },
 +                flt = match to_ty {
 +                    types::F32 => "s",
 +                    types::F64 => "d",
 +                    _ => unreachable!("{:?}", to_ty),
 +                },
 +            );
 +
 +            let from_rust_ty = if from_signed {
 +                fx.tcx.types.i128
 +            } else {
 +                fx.tcx.types.u128
 +            };
 +
 +            let to_rust_ty = match to_ty {
 +                types::F32 => fx.tcx.types.f32,
 +                types::F64 => fx.tcx.types.f64,
 +                _ => unreachable!(),
 +            };
 +
 +            return fx
 +                .easy_call(
 +                    &name,
 +                    &[CValue::by_val(from, fx.layout_of(from_rust_ty))],
 +                    to_rust_ty,
 +                )
 +                .load_scalar(fx);
 +        }
 +
 +        // int-like -> float
 +        if from_signed {
 +            fx.bcx.ins().fcvt_from_sint(to_ty, from)
 +        } else {
 +            fx.bcx.ins().fcvt_from_uint(to_ty, from)
 +        }
 +    } else if from_ty.is_float() && to_ty.is_int() {
 +        if to_ty == types::I128 {
 +            // _____sssf___
 +            // __fix   sfti: f32 -> i128
 +            // __fix   dfti: f64 -> i128
 +            // __fixunssfti: f32 -> u128
 +            // __fixunsdfti: f64 -> u128
 +
 +            let name = format!(
 +                "__fix{sign}{flt}fti",
 +                sign = if to_signed { "" } else { "uns" },
 +                flt = match from_ty {
 +                    types::F32 => "s",
 +                    types::F64 => "d",
 +                    _ => unreachable!("{:?}", to_ty),
 +                },
 +            );
 +
 +            let from_rust_ty = match from_ty {
 +                types::F32 => fx.tcx.types.f32,
 +                types::F64 => fx.tcx.types.f64,
 +                _ => unreachable!(),
 +            };
 +
 +            let to_rust_ty = if to_signed {
 +                fx.tcx.types.i128
 +            } else {
 +                fx.tcx.types.u128
 +            };
 +
 +            return fx
 +                .easy_call(
 +                    &name,
 +                    &[CValue::by_val(from, fx.layout_of(from_rust_ty))],
 +                    to_rust_ty,
 +                )
 +                .load_scalar(fx);
 +        }
 +
 +        // float -> int-like
 +        if to_ty == types::I8 || to_ty == types::I16 {
 +            // FIXME implement fcvt_to_*int_sat.i8/i16
 +            let val = if to_signed {
 +                fx.bcx.ins().fcvt_to_sint_sat(types::I32, from)
 +            } else {
 +                fx.bcx.ins().fcvt_to_uint_sat(types::I32, from)
 +            };
 +            let (min, max) = match (to_ty, to_signed) {
 +                (types::I8, false) => (0, i64::from(u8::MAX)),
 +                (types::I16, false) => (0, i64::from(u16::MAX)),
 +                (types::I8, true) => (i64::from(i8::MIN), i64::from(i8::MAX)),
 +                (types::I16, true) => (i64::from(i16::MIN), i64::from(i16::MAX)),
 +                _ => unreachable!(),
 +            };
 +            let min_val = fx.bcx.ins().iconst(types::I32, min);
 +            let max_val = fx.bcx.ins().iconst(types::I32, max);
 +
 +            let val = if to_signed {
 +                let has_underflow = fx.bcx.ins().icmp_imm(IntCC::SignedLessThan, val, min);
 +                let has_overflow = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThan, val, max);
 +                let bottom_capped = fx.bcx.ins().select(has_underflow, min_val, val);
 +                fx.bcx.ins().select(has_overflow, max_val, bottom_capped)
 +            } else {
 +                let has_overflow = fx.bcx.ins().icmp_imm(IntCC::UnsignedGreaterThan, val, max);
 +                fx.bcx.ins().select(has_overflow, max_val, val)
 +            };
 +            fx.bcx.ins().ireduce(to_ty, val)
++        } else if to_signed {
++            fx.bcx.ins().fcvt_to_sint_sat(to_ty, from)
 +        } else {
++            fx.bcx.ins().fcvt_to_uint_sat(to_ty, from)
 +        }
 +    } else if from_ty.is_float() && to_ty.is_float() {
 +        // float -> float
 +        match (from_ty, to_ty) {
 +            (types::F32, types::F64) => fx.bcx.ins().fpromote(types::F64, from),
 +            (types::F64, types::F32) => fx.bcx.ins().fdemote(types::F32, from),
 +            _ => from,
 +        }
 +    } else {
 +        unreachable!("cast value from {:?} to {:?}", from_ty, to_ty);
 +    }
 +}
index e998403dea6bbea475d96f2e17f8baf2540c1936,0000000000000000000000000000000000000000..d6a38bdafc9ba36c413b4a302af57884d1ad9a60
mode 100644,000000..100644
--- /dev/null
@@@ -1,165 -1,0 +1,165 @@@
-             return None;
 +//! Replaces 128-bit operators with lang item calls where necessary
 +
 +use crate::prelude::*;
 +
 +pub(crate) fn maybe_codegen<'tcx>(
 +    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
 +    bin_op: BinOp,
 +    checked: bool,
 +    lhs: CValue<'tcx>,
 +    rhs: CValue<'tcx>,
 +) -> Option<CValue<'tcx>> {
 +    if lhs.layout().ty != fx.tcx.types.u128 && lhs.layout().ty != fx.tcx.types.i128 {
 +        return None;
 +    }
 +
 +    let lhs_val = lhs.load_scalar(fx);
 +    let rhs_val = rhs.load_scalar(fx);
 +
 +    let is_signed = type_sign(lhs.layout().ty);
 +
 +    match bin_op {
 +        BinOp::BitAnd | BinOp::BitOr | BinOp::BitXor => {
 +            assert!(!checked);
-         BinOp::Add | BinOp::Sub if !checked => return None,
++            None
 +        }
-             return Some(res);
++        BinOp::Add | BinOp::Sub if !checked => None,
 +        BinOp::Add => {
 +            let out_ty = fx.tcx.mk_tup([lhs.layout().ty, fx.tcx.types.bool].iter());
 +            return Some(if is_signed {
 +                fx.easy_call("__rust_i128_addo", &[lhs, rhs], out_ty)
 +            } else {
 +                fx.easy_call("__rust_u128_addo", &[lhs, rhs], out_ty)
 +            });
 +        }
 +        BinOp::Sub => {
 +            let out_ty = fx.tcx.mk_tup([lhs.layout().ty, fx.tcx.types.bool].iter());
 +            return Some(if is_signed {
 +                fx.easy_call("__rust_i128_subo", &[lhs, rhs], out_ty)
 +            } else {
 +                fx.easy_call("__rust_u128_subo", &[lhs, rhs], out_ty)
 +            });
 +        }
 +        BinOp::Offset => unreachable!("offset should only be used on pointers, not 128bit ints"),
 +        BinOp::Mul => {
 +            let res = if checked {
 +                let out_ty = fx.tcx.mk_tup([lhs.layout().ty, fx.tcx.types.bool].iter());
 +                if is_signed {
 +                    fx.easy_call("__rust_i128_mulo", &[lhs, rhs], out_ty)
 +                } else {
 +                    fx.easy_call("__rust_u128_mulo", &[lhs, rhs], out_ty)
 +                }
 +            } else {
 +                let val_ty = if is_signed {
 +                    fx.tcx.types.i128
 +                } else {
 +                    fx.tcx.types.u128
 +                };
 +                fx.easy_call("__multi3", &[lhs, rhs], val_ty)
 +            };
-             return None;
++            Some(res)
 +        }
 +        BinOp::Div => {
 +            assert!(!checked);
 +            if is_signed {
 +                Some(fx.easy_call("__divti3", &[lhs, rhs], fx.tcx.types.i128))
 +            } else {
 +                Some(fx.easy_call("__udivti3", &[lhs, rhs], fx.tcx.types.u128))
 +            }
 +        }
 +        BinOp::Rem => {
 +            assert!(!checked);
 +            if is_signed {
 +                Some(fx.easy_call("__modti3", &[lhs, rhs], fx.tcx.types.i128))
 +            } else {
 +                Some(fx.easy_call("__umodti3", &[lhs, rhs], fx.tcx.types.u128))
 +            }
 +        }
 +        BinOp::Lt | BinOp::Le | BinOp::Eq | BinOp::Ge | BinOp::Gt | BinOp::Ne => {
 +            assert!(!checked);
++            None
 +        }
 +        BinOp::Shl | BinOp::Shr => {
 +            let is_overflow = if checked {
 +                // rhs >= 128
 +
 +                // FIXME support non 128bit rhs
 +                /*let (rhs_lsb, rhs_msb) = fx.bcx.ins().isplit(rhs_val);
 +                let rhs_msb_gt_0 = fx.bcx.ins().icmp_imm(IntCC::NotEqual, rhs_msb, 0);
 +                let rhs_lsb_ge_128 = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThan, rhs_lsb, 127);
 +                let is_overflow = fx.bcx.ins().bor(rhs_msb_gt_0, rhs_lsb_ge_128);*/
 +                let is_overflow = fx.bcx.ins().bconst(types::B1, false);
 +
 +                Some(fx.bcx.ins().bint(types::I8, is_overflow))
 +            } else {
 +                None
 +            };
 +
 +            // Optimize `val >> 64`, because compiler_builtins uses it to deconstruct an 128bit
 +            // integer into its lsb and msb.
 +            // https://github.com/rust-lang-nursery/compiler-builtins/blob/79a6a1603d5672cbb9187ff41ff4d9b5048ac1cb/src/int/mod.rs#L217
 +            if resolve_value_imm(fx.bcx.func, rhs_val) == Some(64) {
 +                let (lhs_lsb, lhs_msb) = fx.bcx.ins().isplit(lhs_val);
 +                let all_zeros = fx.bcx.ins().iconst(types::I64, 0);
 +                let val = match (bin_op, is_signed) {
 +                    (BinOp::Shr, false) => {
 +                        let val = fx.bcx.ins().iconcat(lhs_msb, all_zeros);
 +                        Some(CValue::by_val(val, fx.layout_of(fx.tcx.types.u128)))
 +                    }
 +                    (BinOp::Shr, true) => {
 +                        let sign = fx.bcx.ins().icmp_imm(IntCC::SignedLessThan, lhs_msb, 0);
 +                        let all_ones = fx.bcx.ins().iconst(types::I64, u64::MAX as i64);
 +                        let all_sign_bits = fx.bcx.ins().select(sign, all_zeros, all_ones);
 +
 +                        let val = fx.bcx.ins().iconcat(lhs_msb, all_sign_bits);
 +                        Some(CValue::by_val(val, fx.layout_of(fx.tcx.types.i128)))
 +                    }
 +                    (BinOp::Shl, _) => {
 +                        let val_ty = if is_signed {
 +                            fx.tcx.types.i128
 +                        } else {
 +                            fx.tcx.types.u128
 +                        };
 +                        let val = fx.bcx.ins().iconcat(all_zeros, lhs_lsb);
 +                        Some(CValue::by_val(val, fx.layout_of(val_ty)))
 +                    }
 +                    _ => None,
 +                };
 +                if let Some(val) = val {
 +                    if let Some(is_overflow) = is_overflow {
 +                        let out_ty = fx.tcx.mk_tup([lhs.layout().ty, fx.tcx.types.bool].iter());
 +                        let val = val.load_scalar(fx);
 +                        return Some(CValue::by_val_pair(val, is_overflow, fx.layout_of(out_ty)));
 +                    } else {
 +                        return Some(val);
 +                    }
 +                }
 +            }
 +
 +            let truncated_rhs = clif_intcast(fx, rhs_val, types::I32, false);
 +            let truncated_rhs = CValue::by_val(truncated_rhs, fx.layout_of(fx.tcx.types.u32));
 +            let val = match (bin_op, is_signed) {
 +                (BinOp::Shl, false) => {
 +                    fx.easy_call("__ashlti3", &[lhs, truncated_rhs], fx.tcx.types.u128)
 +                }
 +                (BinOp::Shl, true) => {
 +                    fx.easy_call("__ashlti3", &[lhs, truncated_rhs], fx.tcx.types.i128)
 +                }
 +                (BinOp::Shr, false) => {
 +                    fx.easy_call("__lshrti3", &[lhs, truncated_rhs], fx.tcx.types.u128)
 +                }
 +                (BinOp::Shr, true) => {
 +                    fx.easy_call("__ashrti3", &[lhs, truncated_rhs], fx.tcx.types.i128)
 +                }
 +                (_, _) => unreachable!(),
 +            };
 +            if let Some(is_overflow) = is_overflow {
 +                let out_ty = fx.tcx.mk_tup([lhs.layout().ty, fx.tcx.types.bool].iter());
 +                let val = val.load_scalar(fx);
 +                Some(CValue::by_val_pair(val, is_overflow, fx.layout_of(out_ty)))
 +            } else {
 +                Some(val)
 +            }
 +        }
 +    }
 +}
index 13c62add41a3b520bee56d4daac980f0c05a0a97,0000000000000000000000000000000000000000..eda77bf19d3547f4b3e18b5b03d81d478ab6b412
mode 100644,000000..100644
--- /dev/null
@@@ -1,446 -1,0 +1,446 @@@
-         crate::constant::trans_const_value(self, const_loc, self.tcx.caller_location_ty())
 +use rustc_index::vec::IndexVec;
 +use rustc_target::abi::{Integer, Primitive};
 +use rustc_target::spec::{HasTargetSpec, Target};
 +
 +use cranelift_codegen::ir::{InstructionData, Opcode, ValueDef};
 +
 +use crate::prelude::*;
 +
 +pub(crate) fn pointer_ty(tcx: TyCtxt<'_>) -> types::Type {
 +    match tcx.data_layout.pointer_size.bits() {
 +        16 => types::I16,
 +        32 => types::I32,
 +        64 => types::I64,
 +        bits => bug!("ptr_sized_integer: unknown pointer bit size {}", bits),
 +    }
 +}
 +
 +pub(crate) fn scalar_to_clif_type(tcx: TyCtxt<'_>, scalar: Scalar) -> Type {
 +    match scalar.value {
 +        Primitive::Int(int, _sign) => match int {
 +            Integer::I8 => types::I8,
 +            Integer::I16 => types::I16,
 +            Integer::I32 => types::I32,
 +            Integer::I64 => types::I64,
 +            Integer::I128 => types::I128,
 +        },
 +        Primitive::F32 => types::F32,
 +        Primitive::F64 => types::F64,
 +        Primitive::Pointer => pointer_ty(tcx),
 +    }
 +}
 +
 +fn clif_type_from_ty<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Option<types::Type> {
 +    Some(match ty.kind() {
 +        ty::Bool => types::I8,
 +        ty::Uint(size) => match size {
 +            UintTy::U8 => types::I8,
 +            UintTy::U16 => types::I16,
 +            UintTy::U32 => types::I32,
 +            UintTy::U64 => types::I64,
 +            UintTy::U128 => types::I128,
 +            UintTy::Usize => pointer_ty(tcx),
 +        },
 +        ty::Int(size) => match size {
 +            IntTy::I8 => types::I8,
 +            IntTy::I16 => types::I16,
 +            IntTy::I32 => types::I32,
 +            IntTy::I64 => types::I64,
 +            IntTy::I128 => types::I128,
 +            IntTy::Isize => pointer_ty(tcx),
 +        },
 +        ty::Char => types::I32,
 +        ty::Float(size) => match size {
 +            FloatTy::F32 => types::F32,
 +            FloatTy::F64 => types::F64,
 +        },
 +        ty::FnPtr(_) => pointer_ty(tcx),
 +        ty::RawPtr(TypeAndMut {
 +            ty: pointee_ty,
 +            mutbl: _,
 +        })
 +        | ty::Ref(_, pointee_ty, _) => {
 +            if has_ptr_meta(tcx, pointee_ty) {
 +                return None;
 +            } else {
 +                pointer_ty(tcx)
 +            }
 +        }
 +        ty::Adt(adt_def, _) if adt_def.repr.simd() => {
 +            let (element, count) = match &tcx.layout_of(ParamEnv::reveal_all().and(ty)).unwrap().abi
 +            {
 +                Abi::Vector { element, count } => (element.clone(), *count),
 +                _ => unreachable!(),
 +            };
 +
 +            match scalar_to_clif_type(tcx, element).by(u16::try_from(count).unwrap()) {
 +                // Cranelift currently only implements icmp for 128bit vectors.
 +                Some(vector_ty) if vector_ty.bits() == 128 => vector_ty,
 +                _ => return None,
 +            }
 +        }
 +        ty::Param(_) => bug!("ty param {:?}", ty),
 +        _ => return None,
 +    })
 +}
 +
 +fn clif_pair_type_from_ty<'tcx>(
 +    tcx: TyCtxt<'tcx>,
 +    ty: Ty<'tcx>,
 +) -> Option<(types::Type, types::Type)> {
 +    Some(match ty.kind() {
 +        ty::Tuple(substs) if substs.len() == 2 => {
 +            let mut types = substs.types();
 +            let a = clif_type_from_ty(tcx, types.next().unwrap())?;
 +            let b = clif_type_from_ty(tcx, types.next().unwrap())?;
 +            if a.is_vector() || b.is_vector() {
 +                return None;
 +            }
 +            (a, b)
 +        }
 +        ty::RawPtr(TypeAndMut {
 +            ty: pointee_ty,
 +            mutbl: _,
 +        })
 +        | ty::Ref(_, pointee_ty, _) => {
 +            if has_ptr_meta(tcx, pointee_ty) {
 +                (pointer_ty(tcx), pointer_ty(tcx))
 +            } else {
 +                return None;
 +            }
 +        }
 +        _ => return None,
 +    })
 +}
 +
 +/// Is a pointer to this type a fat ptr?
 +pub(crate) fn has_ptr_meta<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> bool {
 +    let ptr_ty = tcx.mk_ptr(TypeAndMut {
 +        ty,
 +        mutbl: rustc_hir::Mutability::Not,
 +    });
 +    match &tcx
 +        .layout_of(ParamEnv::reveal_all().and(ptr_ty))
 +        .unwrap()
 +        .abi
 +    {
 +        Abi::Scalar(_) => false,
 +        Abi::ScalarPair(_, _) => true,
 +        abi => unreachable!("Abi of ptr to {:?} is {:?}???", ty, abi),
 +    }
 +}
 +
 +pub(crate) fn codegen_icmp_imm(
 +    fx: &mut FunctionCx<'_, '_, impl Module>,
 +    intcc: IntCC,
 +    lhs: Value,
 +    rhs: i128,
 +) -> Value {
 +    let lhs_ty = fx.bcx.func.dfg.value_type(lhs);
 +    if lhs_ty == types::I128 {
 +        // FIXME legalize `icmp_imm.i128` in Cranelift
 +
 +        let (lhs_lsb, lhs_msb) = fx.bcx.ins().isplit(lhs);
 +        let (rhs_lsb, rhs_msb) = (rhs as u128 as u64 as i64, (rhs as u128 >> 64) as u64 as i64);
 +
 +        match intcc {
 +            IntCC::Equal => {
 +                let lsb_eq = fx.bcx.ins().icmp_imm(IntCC::Equal, lhs_lsb, rhs_lsb);
 +                let msb_eq = fx.bcx.ins().icmp_imm(IntCC::Equal, lhs_msb, rhs_msb);
 +                fx.bcx.ins().band(lsb_eq, msb_eq)
 +            }
 +            IntCC::NotEqual => {
 +                let lsb_ne = fx.bcx.ins().icmp_imm(IntCC::NotEqual, lhs_lsb, rhs_lsb);
 +                let msb_ne = fx.bcx.ins().icmp_imm(IntCC::NotEqual, lhs_msb, rhs_msb);
 +                fx.bcx.ins().bor(lsb_ne, msb_ne)
 +            }
 +            _ => {
 +                // if msb_eq {
 +                //     lsb_cc
 +                // } else {
 +                //     msb_cc
 +                // }
 +
 +                let msb_eq = fx.bcx.ins().icmp_imm(IntCC::Equal, lhs_msb, rhs_msb);
 +                let lsb_cc = fx.bcx.ins().icmp_imm(intcc, lhs_lsb, rhs_lsb);
 +                let msb_cc = fx.bcx.ins().icmp_imm(intcc, lhs_msb, rhs_msb);
 +
 +                fx.bcx.ins().select(msb_eq, lsb_cc, msb_cc)
 +            }
 +        }
 +    } else {
 +        let rhs = i64::try_from(rhs).expect("codegen_icmp_imm rhs out of range for <128bit int");
 +        fx.bcx.ins().icmp_imm(intcc, lhs, rhs)
 +    }
 +}
 +
 +fn resolve_normal_value_imm(func: &Function, val: Value) -> Option<i64> {
 +    if let ValueDef::Result(inst, 0 /*param*/) = func.dfg.value_def(val) {
 +        if let InstructionData::UnaryImm {
 +            opcode: Opcode::Iconst,
 +            imm,
 +        } = func.dfg[inst]
 +        {
 +            Some(imm.into())
 +        } else {
 +            None
 +        }
 +    } else {
 +        None
 +    }
 +}
 +
 +fn resolve_128bit_value_imm(func: &Function, val: Value) -> Option<u128> {
 +    let (lsb, msb) = if let ValueDef::Result(inst, 0 /*param*/) = func.dfg.value_def(val) {
 +        if let InstructionData::Binary {
 +            opcode: Opcode::Iconcat,
 +            args: [lsb, msb],
 +        } = func.dfg[inst]
 +        {
 +            (lsb, msb)
 +        } else {
 +            return None;
 +        }
 +    } else {
 +        return None;
 +    };
 +
 +    let lsb = u128::from(resolve_normal_value_imm(func, lsb)? as u64);
 +    let msb = u128::from(resolve_normal_value_imm(func, msb)? as u64);
 +
 +    Some(msb << 64 | lsb)
 +}
 +
 +pub(crate) fn resolve_value_imm(func: &Function, val: Value) -> Option<u128> {
 +    if func.dfg.value_type(val) == types::I128 {
 +        resolve_128bit_value_imm(func, val)
 +    } else {
 +        resolve_normal_value_imm(func, val).map(|imm| u128::from(imm as u64))
 +    }
 +}
 +
 +pub(crate) fn type_min_max_value(
 +    bcx: &mut FunctionBuilder<'_>,
 +    ty: Type,
 +    signed: bool,
 +) -> (Value, Value) {
 +    assert!(ty.is_int());
 +
 +    if ty == types::I128 {
 +        if signed {
 +            let min = i128::MIN as u128;
 +            let min_lsb = bcx.ins().iconst(types::I64, min as u64 as i64);
 +            let min_msb = bcx.ins().iconst(types::I64, (min >> 64) as u64 as i64);
 +            let min = bcx.ins().iconcat(min_lsb, min_msb);
 +
 +            let max = i128::MIN as u128;
 +            let max_lsb = bcx.ins().iconst(types::I64, max as u64 as i64);
 +            let max_msb = bcx.ins().iconst(types::I64, (max >> 64) as u64 as i64);
 +            let max = bcx.ins().iconcat(max_lsb, max_msb);
 +
 +            return (min, max);
 +        } else {
 +            let min_half = bcx.ins().iconst(types::I64, 0);
 +            let min = bcx.ins().iconcat(min_half, min_half);
 +
 +            let max_half = bcx.ins().iconst(types::I64, u64::MAX as i64);
 +            let max = bcx.ins().iconcat(max_half, max_half);
 +
 +            return (min, max);
 +        }
 +    }
 +
 +    let min = match (ty, signed) {
 +        (types::I8, false) | (types::I16, false) | (types::I32, false) | (types::I64, false) => {
 +            0i64
 +        }
 +        (types::I8, true) => i64::from(i8::MIN),
 +        (types::I16, true) => i64::from(i16::MIN),
 +        (types::I32, true) => i64::from(i32::MIN),
 +        (types::I64, true) => i64::MIN,
 +        _ => unreachable!(),
 +    };
 +
 +    let max = match (ty, signed) {
 +        (types::I8, false) => i64::from(u8::MAX),
 +        (types::I16, false) => i64::from(u16::MAX),
 +        (types::I32, false) => i64::from(u32::MAX),
 +        (types::I64, false) => u64::MAX as i64,
 +        (types::I8, true) => i64::from(i8::MAX),
 +        (types::I16, true) => i64::from(i16::MAX),
 +        (types::I32, true) => i64::from(i32::MAX),
 +        (types::I64, true) => i64::MAX,
 +        _ => unreachable!(),
 +    };
 +
 +    let (min, max) = (bcx.ins().iconst(ty, min), bcx.ins().iconst(ty, max));
 +
 +    (min, max)
 +}
 +
 +pub(crate) fn type_sign(ty: Ty<'_>) -> bool {
 +    match ty.kind() {
 +        ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..) | ty::Char | ty::Uint(..) | ty::Bool => false,
 +        ty::Int(..) => true,
 +        ty::Float(..) => false, // `signed` is unused for floats
 +        _ => panic!("{}", ty),
 +    }
 +}
 +
 +pub(crate) struct FunctionCx<'clif, 'tcx, M: Module> {
 +    pub(crate) cx: &'clif mut crate::CodegenCx<'tcx, M>,
 +    pub(crate) tcx: TyCtxt<'tcx>,
 +    pub(crate) pointer_type: Type, // Cached from module
 +
 +    pub(crate) instance: Instance<'tcx>,
 +    pub(crate) mir: &'tcx Body<'tcx>,
 +
 +    pub(crate) bcx: FunctionBuilder<'clif>,
 +    pub(crate) block_map: IndexVec<BasicBlock, Block>,
 +    pub(crate) local_map: IndexVec<Local, CPlace<'tcx>>,
 +
 +    /// When `#[track_caller]` is used, the implicit caller location is stored in this variable.
 +    pub(crate) caller_location: Option<CValue<'tcx>>,
 +
 +    /// See [`crate::optimize::code_layout`] for more information.
 +    pub(crate) cold_blocks: EntitySet<Block>,
 +
 +    pub(crate) clif_comments: crate::pretty_clif::CommentWriter,
 +    pub(crate) source_info_set: indexmap::IndexSet<SourceInfo>,
 +
 +    /// This should only be accessed by `CPlace::new_var`.
 +    pub(crate) next_ssa_var: u32,
 +
 +    pub(crate) inline_asm_index: u32,
 +}
 +
 +impl<'tcx, M: Module> LayoutOf for FunctionCx<'_, 'tcx, M> {
 +    type Ty = Ty<'tcx>;
 +    type TyAndLayout = TyAndLayout<'tcx>;
 +
 +    fn layout_of(&self, ty: Ty<'tcx>) -> TyAndLayout<'tcx> {
 +        assert!(!ty.still_further_specializable());
 +        self.tcx
 +            .layout_of(ParamEnv::reveal_all().and(&ty))
 +            .unwrap_or_else(|e| {
 +                if let layout::LayoutError::SizeOverflow(_) = e {
 +                    self.tcx.sess.fatal(&e.to_string())
 +                } else {
 +                    bug!("failed to get layout for `{}`: {}", ty, e)
 +                }
 +            })
 +    }
 +}
 +
 +impl<'tcx, M: Module> layout::HasTyCtxt<'tcx> for FunctionCx<'_, 'tcx, M> {
 +    fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
 +        self.tcx
 +    }
 +}
 +
 +impl<'tcx, M: Module> rustc_target::abi::HasDataLayout for FunctionCx<'_, 'tcx, M> {
 +    fn data_layout(&self) -> &rustc_target::abi::TargetDataLayout {
 +        &self.tcx.data_layout
 +    }
 +}
 +
 +impl<'tcx, M: Module> layout::HasParamEnv<'tcx> for FunctionCx<'_, 'tcx, M> {
 +    fn param_env(&self) -> ParamEnv<'tcx> {
 +        ParamEnv::reveal_all()
 +    }
 +}
 +
 +impl<'tcx, M: Module> HasTargetSpec for FunctionCx<'_, 'tcx, M> {
 +    fn target_spec(&self) -> &Target {
 +        &self.tcx.sess.target
 +    }
 +}
 +
 +impl<'tcx, M: Module> FunctionCx<'_, 'tcx, M> {
 +    pub(crate) fn monomorphize<T>(&self, value: &T) -> T
 +    where
 +        T: TypeFoldable<'tcx> + Copy,
 +    {
 +        if let Some(substs) = self.instance.substs_for_mir_body() {
 +            self.tcx
 +                .subst_and_normalize_erasing_regions(substs, ty::ParamEnv::reveal_all(), value)
 +        } else {
 +            self.tcx
 +                .normalize_erasing_regions(ty::ParamEnv::reveal_all(), *value)
 +        }
 +    }
 +
 +    pub(crate) fn clif_type(&self, ty: Ty<'tcx>) -> Option<Type> {
 +        clif_type_from_ty(self.tcx, ty)
 +    }
 +
 +    pub(crate) fn clif_pair_type(&self, ty: Ty<'tcx>) -> Option<(Type, Type)> {
 +        clif_pair_type_from_ty(self.tcx, ty)
 +    }
 +
 +    pub(crate) fn get_block(&self, bb: BasicBlock) -> Block {
 +        *self.block_map.get(bb).unwrap()
 +    }
 +
 +    pub(crate) fn get_local_place(&mut self, local: Local) -> CPlace<'tcx> {
 +        *self.local_map.get(local).unwrap_or_else(|| {
 +            panic!("Local {:?} doesn't exist", local);
 +        })
 +    }
 +
 +    pub(crate) fn set_debug_loc(&mut self, source_info: mir::SourceInfo) {
 +        let (index, _) = self.source_info_set.insert_full(source_info);
 +        self.bcx.set_srcloc(SourceLoc::new(index as u32));
 +    }
 +
 +    pub(crate) fn get_caller_location(&mut self, span: Span) -> CValue<'tcx> {
 +        if let Some(loc) = self.caller_location {
 +            // `#[track_caller]` is used; return caller location instead of current location.
 +            return loc;
 +        }
 +
 +        let topmost = span.ctxt().outer_expn().expansion_cause().unwrap_or(span);
 +        let caller = self.tcx.sess.source_map().lookup_char_pos(topmost.lo());
 +        let const_loc = self.tcx.const_caller_location((
 +            rustc_span::symbol::Symbol::intern(&caller.file.name.to_string()),
 +            caller.line as u32,
 +            caller.col_display as u32 + 1,
 +        ));
++        crate::constant::codegen_const_value(self, const_loc, self.tcx.caller_location_ty())
 +    }
 +
 +    pub(crate) fn triple(&self) -> &target_lexicon::Triple {
 +        self.cx.module.isa().triple()
 +    }
 +
 +    pub(crate) fn anonymous_str(&mut self, prefix: &str, msg: &str) -> Value {
 +        use std::collections::hash_map::DefaultHasher;
 +        use std::hash::{Hash, Hasher};
 +
 +        let mut hasher = DefaultHasher::new();
 +        msg.hash(&mut hasher);
 +        let msg_hash = hasher.finish();
 +        let mut data_ctx = DataContext::new();
 +        data_ctx.define(msg.as_bytes().to_vec().into_boxed_slice());
 +        let msg_id = self
 +            .cx
 +            .module
 +            .declare_data(
 +                &format!("__{}_{:08x}", prefix, msg_hash),
 +                Linkage::Local,
 +                false,
 +                false,
 +            )
 +            .unwrap();
 +
 +        // Ignore DuplicateDefinition error, as the data will be the same
 +        let _ = self.cx.module.define_data(msg_id, &data_ctx);
 +
 +        let local_msg_id = self.cx.module.declare_data_in_func(msg_id, self.bcx.func);
 +        #[cfg(debug_assertions)]
 +        {
 +            self.add_comment(local_msg_id, msg);
 +        }
 +        self.bcx.ins().global_value(self.pointer_type, local_msg_id)
 +    }
 +}
index 1b514958a4809dfec736410a8db1d98c38e2a4d6,0000000000000000000000000000000000000000..ce1d5ed2e61780686600c738a02109c3949980a1
mode 100644,000000..100644
--- /dev/null
@@@ -1,476 -1,0 +1,474 @@@
- pub(crate) fn trans_constant<'tcx>(
 +//! Handling of `static`s, `const`s and promoted allocations
 +
 +use rustc_span::DUMMY_SP;
 +
 +use rustc_data_structures::fx::FxHashSet;
 +use rustc_errors::ErrorReported;
 +use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
 +use rustc_middle::mir::interpret::{
 +    read_target_uint, AllocId, Allocation, ConstValue, ErrorHandled, GlobalAlloc, Pointer, Scalar,
 +};
 +use rustc_middle::ty::{Const, ConstKind};
 +
 +use cranelift_codegen::ir::GlobalValueData;
 +use cranelift_module::*;
 +
 +use crate::prelude::*;
 +
 +#[derive(Default)]
 +pub(crate) struct ConstantCx {
 +    todo: Vec<TodoItem>,
 +    done: FxHashSet<DataId>,
 +}
 +
 +#[derive(Copy, Clone, Debug)]
 +enum TodoItem {
 +    Alloc(AllocId),
 +    Static(DefId),
 +}
 +
 +impl ConstantCx {
 +    pub(crate) fn finalize(mut self, tcx: TyCtxt<'_>, module: &mut impl Module) {
 +        //println!("todo {:?}", self.todo);
 +        define_all_allocs(tcx, module, &mut self);
 +        //println!("done {:?}", self.done);
 +        self.done.clear();
 +    }
 +}
 +
 +pub(crate) fn check_constants(fx: &mut FunctionCx<'_, '_, impl Module>) {
 +    for constant in &fx.mir.required_consts {
 +        let const_ = fx.monomorphize(&constant.literal);
 +        match const_.val {
 +            ConstKind::Value(_) => {}
 +            ConstKind::Unevaluated(def, ref substs, promoted) => {
 +                if let Err(err) =
 +                    fx.tcx
 +                        .const_eval_resolve(ParamEnv::reveal_all(), def, substs, promoted, None)
 +                {
 +                    match err {
 +                        ErrorHandled::Reported(ErrorReported) | ErrorHandled::Linted => {
 +                            fx.tcx
 +                                .sess
 +                                .span_err(constant.span, "erroneous constant encountered");
 +                        }
 +                        ErrorHandled::TooGeneric => {
 +                            span_bug!(
 +                                constant.span,
 +                                "codgen encountered polymorphic constant: {:?}",
 +                                err
 +                            );
 +                        }
 +                    }
 +                }
 +            }
 +            ConstKind::Param(_)
 +            | ConstKind::Infer(_)
 +            | ConstKind::Bound(_, _)
 +            | ConstKind::Placeholder(_)
 +            | ConstKind::Error(_) => unreachable!("{:?}", const_),
 +        }
 +    }
 +}
 +
 +pub(crate) fn codegen_static(constants_cx: &mut ConstantCx, def_id: DefId) {
 +    constants_cx.todo.push(TodoItem::Static(def_id));
 +}
 +
 +pub(crate) fn codegen_tls_ref<'tcx>(
 +    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
 +    def_id: DefId,
 +    layout: TyAndLayout<'tcx>,
 +) -> CValue<'tcx> {
 +    let data_id = data_id_for_static(fx.tcx, &mut fx.cx.module, def_id, false);
 +    let local_data_id = fx.cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
 +    #[cfg(debug_assertions)]
 +    fx.add_comment(local_data_id, format!("tls {:?}", def_id));
 +    let tls_ptr = fx.bcx.ins().tls_value(fx.pointer_type, local_data_id);
 +    CValue::by_val(tls_ptr, layout)
 +}
 +
 +fn codegen_static_ref<'tcx>(
 +    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
 +    def_id: DefId,
 +    layout: TyAndLayout<'tcx>,
 +) -> CPlace<'tcx> {
 +    let data_id = data_id_for_static(fx.tcx, &mut fx.cx.module, def_id, false);
 +    let local_data_id = fx.cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
 +    #[cfg(debug_assertions)]
 +    fx.add_comment(local_data_id, format!("{:?}", def_id));
 +    let global_ptr = fx.bcx.ins().global_value(fx.pointer_type, local_data_id);
 +    assert!(!layout.is_unsized(), "unsized statics aren't supported");
 +    assert!(
 +        matches!(fx.bcx.func.global_values[local_data_id], GlobalValueData::Symbol { tls: false, ..}),
 +        "tls static referenced without Rvalue::ThreadLocalRef"
 +    );
 +    CPlace::for_ptr(crate::pointer::Pointer::new(global_ptr), layout)
 +}
 +
-     trans_const_value(fx, const_val, const_.ty)
++pub(crate) fn codegen_constant<'tcx>(
 +    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
 +    constant: &Constant<'tcx>,
 +) -> CValue<'tcx> {
 +    let const_ = fx.monomorphize(&constant.literal);
 +    let const_val = match const_.val {
 +        ConstKind::Value(const_val) => const_val,
 +        ConstKind::Unevaluated(def, ref substs, promoted) if fx.tcx.is_static(def.did) => {
 +            assert!(substs.is_empty());
 +            assert!(promoted.is_none());
 +
 +            return codegen_static_ref(
 +                fx,
 +                def.did,
 +                fx.layout_of(fx.monomorphize(&constant.literal.ty)),
 +            )
 +            .to_cvalue(fx);
 +        }
 +        ConstKind::Unevaluated(def, ref substs, promoted) => {
 +            match fx
 +                .tcx
 +                .const_eval_resolve(ParamEnv::reveal_all(), def, substs, promoted, None)
 +            {
 +                Ok(const_val) => const_val,
 +                Err(_) => {
 +                    if promoted.is_none() {
 +                        fx.tcx
 +                            .sess
 +                            .span_err(constant.span, "erroneous constant encountered");
 +                    }
 +                    return crate::trap::trap_unreachable_ret_value(
 +                        fx,
 +                        fx.layout_of(const_.ty),
 +                        "erroneous constant encountered",
 +                    );
 +                }
 +            }
 +        }
 +        ConstKind::Param(_)
 +        | ConstKind::Infer(_)
 +        | ConstKind::Bound(_, _)
 +        | ConstKind::Placeholder(_)
 +        | ConstKind::Error(_) => unreachable!("{:?}", const_),
 +    };
 +
- pub(crate) fn trans_const_value<'tcx>(
++    codegen_const_value(fx, const_val, const_.ty)
 +}
 +
-             crate::Pointer::const_addr(fx, i64::try_from(layout.align.pref.bytes()).unwrap()),
++pub(crate) fn codegen_const_value<'tcx>(
 +    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
 +    const_val: ConstValue<'tcx>,
 +    ty: Ty<'tcx>,
 +) -> CValue<'tcx> {
 +    let layout = fx.layout_of(ty);
 +    assert!(!layout.is_unsized(), "sized const value");
 +
 +    if layout.is_zst() {
 +        return CValue::by_ref(
-                     return CValue::const_val(fx, layout, data);
++            crate::Pointer::dangling(layout.align.pref),
 +            layout,
 +        );
 +    }
 +
 +    match const_val {
 +        ConstValue::Scalar(x) => {
 +            if fx.clif_type(layout.ty).is_none() {
 +                let (size, align) = (layout.size, layout.align.pref);
 +                let mut alloc = Allocation::from_bytes(
 +                    std::iter::repeat(0)
 +                        .take(size.bytes_usize())
 +                        .collect::<Vec<u8>>(),
 +                    align,
 +                );
 +                let ptr = Pointer::new(AllocId(!0), Size::ZERO); // The alloc id is never used
 +                alloc.write_scalar(fx, ptr, x.into(), size).unwrap();
 +                let alloc = fx.tcx.intern_const_alloc(alloc);
 +                return CValue::by_ref(pointer_for_allocation(fx, alloc), layout);
 +            }
 +
 +            match x {
 +                Scalar::Raw { data, size } => {
 +                    assert_eq!(u64::from(size), layout.size.bytes());
-                     return CValue::by_val(val, layout);
++                    CValue::const_val(fx, layout, data)
 +                }
 +                Scalar::Ptr(ptr) => {
 +                    let alloc_kind = fx.tcx.get_global_alloc(ptr.alloc_id);
 +                    let base_addr = match alloc_kind {
 +                        Some(GlobalAlloc::Memory(alloc)) => {
 +                            fx.cx.constants_cx.todo.push(TodoItem::Alloc(ptr.alloc_id));
 +                            let data_id = data_id_for_alloc_id(
 +                                &mut fx.cx.module,
 +                                ptr.alloc_id,
 +                                alloc.mutability,
 +                            );
 +                            let local_data_id =
 +                                fx.cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
 +                            #[cfg(debug_assertions)]
 +                            fx.add_comment(local_data_id, format!("{:?}", ptr.alloc_id));
 +                            fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
 +                        }
 +                        Some(GlobalAlloc::Function(instance)) => {
 +                            let func_id =
 +                                crate::abi::import_function(fx.tcx, &mut fx.cx.module, instance);
 +                            let local_func_id =
 +                                fx.cx.module.declare_func_in_func(func_id, &mut fx.bcx.func);
 +                            fx.bcx.ins().func_addr(fx.pointer_type, local_func_id)
 +                        }
 +                        Some(GlobalAlloc::Static(def_id)) => {
 +                            assert!(fx.tcx.is_static(def_id));
 +                            let data_id =
 +                                data_id_for_static(fx.tcx, &mut fx.cx.module, def_id, false);
 +                            let local_data_id =
 +                                fx.cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
 +                            #[cfg(debug_assertions)]
 +                            fx.add_comment(local_data_id, format!("{:?}", def_id));
 +                            fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
 +                        }
 +                        None => bug!("missing allocation {:?}", ptr.alloc_id),
 +                    };
 +                    let val = if ptr.offset.bytes() != 0 {
 +                        fx.bcx
 +                            .ins()
 +                            .iadd_imm(base_addr, i64::try_from(ptr.offset.bytes()).unwrap())
 +                    } else {
 +                        base_addr
 +                    };
-             &format!("__alloc_{:x}", alloc_id.0),
++                    CValue::by_val(val, layout)
 +                }
 +            }
 +        }
 +        ConstValue::ByRef { alloc, offset } => CValue::by_ref(
 +            pointer_for_allocation(fx, alloc)
 +                .offset_i64(fx, i64::try_from(offset.bytes()).unwrap()),
 +            layout,
 +        ),
 +        ConstValue::Slice { data, start, end } => {
 +            let ptr = pointer_for_allocation(fx, data)
 +                .offset_i64(fx, i64::try_from(start).unwrap())
 +                .get_addr(fx);
 +            let len = fx.bcx.ins().iconst(
 +                fx.pointer_type,
 +                i64::try_from(end.checked_sub(start).unwrap()).unwrap(),
 +            );
 +            CValue::by_val_pair(ptr, len, layout)
 +        }
 +    }
 +}
 +
 +fn pointer_for_allocation<'tcx>(
 +    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
 +    alloc: &'tcx Allocation,
 +) -> crate::pointer::Pointer {
 +    let alloc_id = fx.tcx.create_memory_alloc(alloc);
 +    fx.cx.constants_cx.todo.push(TodoItem::Alloc(alloc_id));
 +    let data_id = data_id_for_alloc_id(&mut fx.cx.module, alloc_id, alloc.mutability);
 +
 +    let local_data_id = fx.cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
 +    #[cfg(debug_assertions)]
 +    fx.add_comment(local_data_id, format!("{:?}", alloc_id));
 +    let global_ptr = fx.bcx.ins().global_value(fx.pointer_type, local_data_id);
 +    crate::pointer::Pointer::new(global_ptr)
 +}
 +
 +fn data_id_for_alloc_id(
 +    module: &mut impl Module,
 +    alloc_id: AllocId,
 +    mutability: rustc_hir::Mutability,
 +) -> DataId {
 +    module
 +        .declare_data(
-         if rlinkage == Some(rustc_middle::mir::mono::Linkage::ExternalWeak)
-             || rlinkage == Some(rustc_middle::mir::mono::Linkage::WeakAny)
-         {
-             Linkage::Preemptible
-         } else {
-             Linkage::Import
-         }
++            &format!(".L__alloc_{:x}", alloc_id.0),
 +            Linkage::Local,
 +            mutability == rustc_hir::Mutability::Mut,
 +            false,
 +        )
 +        .unwrap()
 +}
 +
 +fn data_id_for_static(
 +    tcx: TyCtxt<'_>,
 +    module: &mut impl Module,
 +    def_id: DefId,
 +    definition: bool,
 +) -> DataId {
 +    let rlinkage = tcx.codegen_fn_attrs(def_id).linkage;
 +    let linkage = if definition {
 +        crate::linkage::get_static_linkage(tcx, def_id)
++    } else if rlinkage == Some(rustc_middle::mir::mono::Linkage::ExternalWeak)
++        || rlinkage == Some(rustc_middle::mir::mono::Linkage::WeakAny)
++    {
++        Linkage::Preemptible
 +    } else {
++        Linkage::Import
 +    };
 +
 +    let instance = Instance::mono(tcx, def_id).polymorphize(tcx);
 +    let symbol_name = tcx.symbol_name(instance).name;
 +    let ty = instance.ty(tcx, ParamEnv::reveal_all());
 +    let is_mutable = if tcx.is_mutable_static(def_id) {
 +        true
 +    } else {
 +        !ty.is_freeze(tcx.at(DUMMY_SP), ParamEnv::reveal_all())
 +    };
 +    let align = tcx
 +        .layout_of(ParamEnv::reveal_all().and(ty))
 +        .unwrap()
 +        .align
 +        .pref
 +        .bytes();
 +
 +    let attrs = tcx.codegen_fn_attrs(def_id);
 +
 +    let data_id = module
 +        .declare_data(
 +            &*symbol_name,
 +            linkage,
 +            is_mutable,
 +            attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL),
 +        )
 +        .unwrap();
 +
 +    if rlinkage.is_some() {
 +        // Comment copied from https://github.com/rust-lang/rust/blob/45060c2a66dfd667f88bd8b94261b28a58d85bd5/src/librustc_codegen_llvm/consts.rs#L141
 +        // Declare an internal global `extern_with_linkage_foo` which
 +        // is initialized with the address of `foo`.  If `foo` is
 +        // discarded during linking (for example, if `foo` has weak
 +        // linkage and there are no definitions), then
 +        // `extern_with_linkage_foo` will instead be initialized to
 +        // zero.
 +
 +        let ref_name = format!("_rust_extern_with_linkage_{}", symbol_name);
 +        let ref_data_id = module
 +            .declare_data(&ref_name, Linkage::Local, false, false)
 +            .unwrap();
 +        let mut data_ctx = DataContext::new();
 +        data_ctx.set_align(align);
 +        let data = module.declare_data_in_data(data_id, &mut data_ctx);
 +        data_ctx.define(
 +            std::iter::repeat(0)
 +                .take(pointer_ty(tcx).bytes() as usize)
 +                .collect(),
 +        );
 +        data_ctx.write_data_addr(0, data, 0);
 +        match module.define_data(ref_data_id, &data_ctx) {
 +            // Every time the static is referenced there will be another definition of this global,
 +            // so duplicate definitions are expected and allowed.
 +            Err(ModuleError::DuplicateDefinition(_)) => {}
 +            res => res.unwrap(),
 +        }
 +        ref_data_id
 +    } else {
 +        data_id
 +    }
 +}
 +
 +fn define_all_allocs(tcx: TyCtxt<'_>, module: &mut impl Module, cx: &mut ConstantCx) {
 +    while let Some(todo_item) = cx.todo.pop() {
 +        let (data_id, alloc, section_name) = match todo_item {
 +            TodoItem::Alloc(alloc_id) => {
 +                //println!("alloc_id {}", alloc_id);
 +                let alloc = match tcx.get_global_alloc(alloc_id).unwrap() {
 +                    GlobalAlloc::Memory(alloc) => alloc,
 +                    GlobalAlloc::Function(_) | GlobalAlloc::Static(_) => unreachable!(),
 +                };
 +                let data_id = data_id_for_alloc_id(module, alloc_id, alloc.mutability);
 +                (data_id, alloc, None)
 +            }
 +            TodoItem::Static(def_id) => {
 +                //println!("static {:?}", def_id);
 +
 +                let section_name = tcx
 +                    .codegen_fn_attrs(def_id)
 +                    .link_section
 +                    .map(|s| s.as_str());
 +
 +                let alloc = tcx.eval_static_initializer(def_id).unwrap();
 +
 +                let data_id = data_id_for_static(tcx, module, def_id, true);
 +                (data_id, alloc, section_name)
 +            }
 +        };
 +
 +        //("data_id {}", data_id);
 +        if cx.done.contains(&data_id) {
 +            continue;
 +        }
 +
 +        let mut data_ctx = DataContext::new();
 +        data_ctx.set_align(alloc.align.bytes());
 +
 +        if let Some(section_name) = section_name {
 +            // FIXME set correct segment for Mach-O files
 +            data_ctx.set_segment_section("", &*section_name);
 +        }
 +
 +        let bytes = alloc
 +            .inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len())
 +            .to_vec();
 +        data_ctx.define(bytes.into_boxed_slice());
 +
 +        for &(offset, (_tag, reloc)) in alloc.relocations().iter() {
 +            let addend = {
 +                let endianness = tcx.data_layout.endian;
 +                let offset = offset.bytes() as usize;
 +                let ptr_size = tcx.data_layout.pointer_size;
 +                let bytes = &alloc.inspect_with_uninit_and_ptr_outside_interpreter(
 +                    offset..offset + ptr_size.bytes() as usize,
 +                );
 +                read_target_uint(endianness, bytes).unwrap()
 +            };
 +
 +            let reloc_target_alloc = tcx.get_global_alloc(reloc).unwrap();
 +            let data_id = match reloc_target_alloc {
 +                GlobalAlloc::Function(instance) => {
 +                    assert_eq!(addend, 0);
 +                    let func_id = crate::abi::import_function(tcx, module, instance);
 +                    let local_func_id = module.declare_func_in_data(func_id, &mut data_ctx);
 +                    data_ctx.write_function_addr(offset.bytes() as u32, local_func_id);
 +                    continue;
 +                }
 +                GlobalAlloc::Memory(target_alloc) => {
 +                    cx.todo.push(TodoItem::Alloc(reloc));
 +                    data_id_for_alloc_id(module, reloc, target_alloc.mutability)
 +                }
 +                GlobalAlloc::Static(def_id) => {
 +                    if tcx
 +                        .codegen_fn_attrs(def_id)
 +                        .flags
 +                        .contains(CodegenFnAttrFlags::THREAD_LOCAL)
 +                    {
 +                        tcx.sess.fatal(&format!(
 +                            "Allocation {:?} contains reference to TLS value {:?}",
 +                            alloc, def_id
 +                        ));
 +                    }
 +
 +                    // Don't push a `TodoItem::Static` here, as it will cause statics used by
 +                    // multiple crates to be duplicated between them. It isn't necessary anyway,
 +                    // as it will get pushed by `codegen_static` when necessary.
 +                    data_id_for_static(tcx, module, def_id, false)
 +                }
 +            };
 +
 +            let global_value = module.declare_data_in_data(data_id, &mut data_ctx);
 +            data_ctx.write_data_addr(offset.bytes() as u32, global_value, addend as i64);
 +        }
 +
 +        module.define_data(data_id, &data_ctx).unwrap();
 +        cx.done.insert(data_id);
 +    }
 +
 +    assert!(cx.todo.is_empty(), "{:?}", cx.todo);
 +}
 +
 +pub(crate) fn mir_operand_get_const_val<'tcx>(
 +    fx: &FunctionCx<'_, 'tcx, impl Module>,
 +    operand: &Operand<'tcx>,
 +) -> Option<&'tcx Const<'tcx>> {
 +    match operand {
 +        Operand::Copy(_) | Operand::Move(_) => None,
 +        Operand::Constant(const_) => Some(
 +            fx.monomorphize(&const_.literal)
 +                .eval(fx.tcx, ParamEnv::reveal_all()),
 +        ),
 +    }
 +}
index cf8fee2b1d17ce7648fe2bed4a25597f1e37463a,0000000000000000000000000000000000000000..f6f795e45615c9d39c898f89e1eac52b68a3fcd9
mode 100644,000000..100644
--- /dev/null
@@@ -1,204 -1,0 +1,202 @@@
-                 _ => {
-                     return Err(gimli::write::Error::UnsupportedPointerEncoding(eh_pe));
-                 }
 +//! Write the debuginfo into an object file.
 +
 +use rustc_data_structures::fx::FxHashMap;
 +
 +use gimli::write::{Address, AttributeValue, EndianVec, Result, Sections, Writer};
 +use gimli::{RunTimeEndian, SectionId};
 +
 +use crate::backend::WriteDebugInfo;
 +
 +use super::DebugContext;
 +
 +impl DebugContext<'_> {
 +    pub(crate) fn emit<P: WriteDebugInfo>(&mut self, product: &mut P) {
 +        let unit_range_list_id = self.dwarf.unit.ranges.add(self.unit_range_list.clone());
 +        let root = self.dwarf.unit.root();
 +        let root = self.dwarf.unit.get_mut(root);
 +        root.set(
 +            gimli::DW_AT_ranges,
 +            AttributeValue::RangeListRef(unit_range_list_id),
 +        );
 +
 +        let mut sections = Sections::new(WriterRelocate::new(self.endian));
 +        self.dwarf.write(&mut sections).unwrap();
 +
 +        let mut section_map = FxHashMap::default();
 +        let _: Result<()> = sections.for_each_mut(|id, section| {
 +            if !section.writer.slice().is_empty() {
 +                let section_id = product.add_debug_section(id, section.writer.take());
 +                section_map.insert(id, section_id);
 +            }
 +            Ok(())
 +        });
 +
 +        let _: Result<()> = sections.for_each(|id, section| {
 +            if let Some(section_id) = section_map.get(&id) {
 +                for reloc in &section.relocs {
 +                    product.add_debug_reloc(&section_map, section_id, reloc);
 +                }
 +            }
 +            Ok(())
 +        });
 +    }
 +}
 +
 +#[derive(Clone)]
 +pub(crate) struct DebugReloc {
 +    pub(crate) offset: u32,
 +    pub(crate) size: u8,
 +    pub(crate) name: DebugRelocName,
 +    pub(crate) addend: i64,
 +    pub(crate) kind: object::RelocationKind,
 +}
 +
 +#[derive(Clone)]
 +pub(crate) enum DebugRelocName {
 +    Section(SectionId),
 +    Symbol(usize),
 +}
 +
 +/// A [`Writer`] that collects all necessary relocations.
 +#[derive(Clone)]
 +pub(super) struct WriterRelocate {
 +    pub(super) relocs: Vec<DebugReloc>,
 +    pub(super) writer: EndianVec<RunTimeEndian>,
 +}
 +
 +impl WriterRelocate {
 +    pub(super) fn new(endian: RunTimeEndian) -> Self {
 +        WriterRelocate {
 +            relocs: Vec::new(),
 +            writer: EndianVec::new(endian),
 +        }
 +    }
 +
 +    /// Perform the collected relocations to be usable for JIT usage.
 +    #[cfg(feature = "jit")]
 +    pub(super) fn relocate_for_jit(
 +        mut self,
 +        jit_product: &cranelift_simplejit::SimpleJITProduct,
 +    ) -> Vec<u8> {
 +        use std::convert::TryInto;
 +
 +        for reloc in self.relocs.drain(..) {
 +            match reloc.name {
 +                super::DebugRelocName::Section(_) => unreachable!(),
 +                super::DebugRelocName::Symbol(sym) => {
 +                    let addr = jit_product
 +                        .lookup_func(cranelift_module::FuncId::from_u32(sym.try_into().unwrap()));
 +                    let val = (addr as u64 as i64 + reloc.addend) as u64;
 +                    self.writer
 +                        .write_udata_at(reloc.offset as usize, val, reloc.size)
 +                        .unwrap();
 +                }
 +            }
 +        }
 +        self.writer.into_vec()
 +    }
 +}
 +
 +impl Writer for WriterRelocate {
 +    type Endian = RunTimeEndian;
 +
 +    fn endian(&self) -> Self::Endian {
 +        self.writer.endian()
 +    }
 +
 +    fn len(&self) -> usize {
 +        self.writer.len()
 +    }
 +
 +    fn write(&mut self, bytes: &[u8]) -> Result<()> {
 +        self.writer.write(bytes)
 +    }
 +
 +    fn write_at(&mut self, offset: usize, bytes: &[u8]) -> Result<()> {
 +        self.writer.write_at(offset, bytes)
 +    }
 +
 +    fn write_address(&mut self, address: Address, size: u8) -> Result<()> {
 +        match address {
 +            Address::Constant(val) => self.write_udata(val, size),
 +            Address::Symbol { symbol, addend } => {
 +                let offset = self.len() as u64;
 +                self.relocs.push(DebugReloc {
 +                    offset: offset as u32,
 +                    size,
 +                    name: DebugRelocName::Symbol(symbol),
 +                    addend: addend as i64,
 +                    kind: object::RelocationKind::Absolute,
 +                });
 +                self.write_udata(0, size)
 +            }
 +        }
 +    }
 +
 +    fn write_offset(&mut self, val: usize, section: SectionId, size: u8) -> Result<()> {
 +        let offset = self.len() as u32;
 +        self.relocs.push(DebugReloc {
 +            offset,
 +            size,
 +            name: DebugRelocName::Section(section),
 +            addend: val as i64,
 +            kind: object::RelocationKind::Absolute,
 +        });
 +        self.write_udata(0, size)
 +    }
 +
 +    fn write_offset_at(
 +        &mut self,
 +        offset: usize,
 +        val: usize,
 +        section: SectionId,
 +        size: u8,
 +    ) -> Result<()> {
 +        self.relocs.push(DebugReloc {
 +            offset: offset as u32,
 +            size,
 +            name: DebugRelocName::Section(section),
 +            addend: val as i64,
 +            kind: object::RelocationKind::Absolute,
 +        });
 +        self.write_udata_at(offset, 0, size)
 +    }
 +
 +    fn write_eh_pointer(&mut self, address: Address, eh_pe: gimli::DwEhPe, size: u8) -> Result<()> {
 +        match address {
 +            // Address::Constant arm copied from gimli
 +            Address::Constant(val) => {
 +                // Indirect doesn't matter here.
 +                let val = match eh_pe.application() {
 +                    gimli::DW_EH_PE_absptr => val,
 +                    gimli::DW_EH_PE_pcrel => {
 +                        // TODO: better handling of sign
 +                        let offset = self.len() as u64;
 +                        offset.wrapping_sub(val)
 +                    }
 +                    _ => {
 +                        return Err(gimli::write::Error::UnsupportedPointerEncoding(eh_pe));
 +                    }
 +                };
 +                self.write_eh_pointer_data(val, eh_pe.format(), size)
 +            }
 +            Address::Symbol { symbol, addend } => match eh_pe.application() {
 +                gimli::DW_EH_PE_pcrel => {
 +                    let size = match eh_pe.format() {
 +                        gimli::DW_EH_PE_sdata4 => 4,
 +                        _ => return Err(gimli::write::Error::UnsupportedPointerEncoding(eh_pe)),
 +                    };
 +                    self.relocs.push(DebugReloc {
 +                        offset: self.len() as u32,
 +                        size,
 +                        name: DebugRelocName::Symbol(symbol),
 +                        addend,
 +                        kind: object::RelocationKind::Relative,
 +                    });
 +                    self.write_udata(0, size)
 +                }
++                _ => Err(gimli::write::Error::UnsupportedPointerEncoding(eh_pe)),
 +            },
 +        }
 +    }
 +}
index 4de848553289603395fa8eee80cc50e7e5532223,0000000000000000000000000000000000000000..d226755d85de0eb631ce6f5978dd5251c74c5434
mode 100644,000000..100644
--- /dev/null
@@@ -1,258 -1,0 +1,258 @@@
- pub fn make_file_info(hash: SourceFileHash) -> Option<FileInfo> {
 +//! Line info generation (`.debug_line`)
 +
 +use std::ffi::OsStr;
 +use std::path::{Component, Path};
 +
 +use crate::prelude::*;
 +
 +use rustc_span::{
 +    FileName, Pos, SourceFile, SourceFileAndLine, SourceFileHash, SourceFileHashAlgorithm,
 +};
 +
 +use cranelift_codegen::binemit::CodeOffset;
 +use cranelift_codegen::machinst::MachSrcLoc;
 +
 +use gimli::write::{
 +    Address, AttributeValue, FileId, FileInfo, LineProgram, LineString, LineStringTable,
 +    UnitEntryId,
 +};
 +
 +// OPTIMIZATION: It is cheaper to do this in one pass than using `.parent()` and `.file_name()`.
 +fn split_path_dir_and_file(path: &Path) -> (&Path, &OsStr) {
 +    let mut iter = path.components();
 +    let file_name = match iter.next_back() {
 +        Some(Component::Normal(p)) => p,
 +        component => {
 +            panic!(
 +                "Path component {:?} of path {} is an invalid filename",
 +                component,
 +                path.display()
 +            );
 +        }
 +    };
 +    let parent = iter.as_path();
 +    (parent, file_name)
 +}
 +
 +// OPTIMIZATION: Avoid UTF-8 validation on UNIX.
 +fn osstr_as_utf8_bytes(path: &OsStr) -> &[u8] {
 +    #[cfg(unix)]
 +    {
 +        use std::os::unix::ffi::OsStrExt;
 +        return path.as_bytes();
 +    }
 +    #[cfg(not(unix))]
 +    {
 +        return path.to_str().unwrap().as_bytes();
 +    }
 +}
 +
 +pub(crate) const MD5_LEN: usize = 16;
 +
-                 last_file = Some(file.clone());
++pub(crate) fn make_file_info(hash: SourceFileHash) -> Option<FileInfo> {
 +    if hash.kind == SourceFileHashAlgorithm::Md5 {
 +        let mut buf = [0u8; MD5_LEN];
 +        buf.copy_from_slice(hash.hash_bytes());
 +        Some(FileInfo {
 +            timestamp: 0,
 +            size: 0,
 +            md5: buf,
 +        })
 +    } else {
 +        None
 +    }
 +}
 +
 +fn line_program_add_file(
 +    line_program: &mut LineProgram,
 +    line_strings: &mut LineStringTable,
 +    file: &SourceFile,
 +) -> FileId {
 +    match &file.name {
 +        FileName::Real(path) => {
 +            let (dir_path, file_name) = split_path_dir_and_file(path.stable_name());
 +            let dir_name = osstr_as_utf8_bytes(dir_path.as_os_str());
 +            let file_name = osstr_as_utf8_bytes(file_name);
 +
 +            let dir_id = if !dir_name.is_empty() {
 +                let dir_name = LineString::new(dir_name, line_program.encoding(), line_strings);
 +                line_program.add_directory(dir_name)
 +            } else {
 +                line_program.default_directory()
 +            };
 +            let file_name = LineString::new(file_name, line_program.encoding(), line_strings);
 +
 +            let info = make_file_info(file.src_hash);
 +
 +            line_program.file_has_md5 &= info.is_some();
 +            line_program.add_file(file_name, dir_id, info)
 +        }
 +        // FIXME give more appropriate file names
 +        filename => {
 +            let dir_id = line_program.default_directory();
 +            let dummy_file_name = LineString::new(
 +                filename.to_string().into_bytes(),
 +                line_program.encoding(),
 +                line_strings,
 +            );
 +            line_program.add_file(dummy_file_name, dir_id, None)
 +        }
 +    }
 +}
 +
 +impl<'tcx> DebugContext<'tcx> {
 +    pub(super) fn emit_location(&mut self, entry_id: UnitEntryId, span: Span) {
 +        let loc = self.tcx.sess.source_map().lookup_char_pos(span.lo());
 +
 +        let file_id = line_program_add_file(
 +            &mut self.dwarf.unit.line_program,
 +            &mut self.dwarf.line_strings,
 +            &loc.file,
 +        );
 +
 +        let entry = self.dwarf.unit.get_mut(entry_id);
 +
 +        entry.set(
 +            gimli::DW_AT_decl_file,
 +            AttributeValue::FileIndex(Some(file_id)),
 +        );
 +        entry.set(
 +            gimli::DW_AT_decl_line,
 +            AttributeValue::Udata(loc.line as u64),
 +        );
 +        // FIXME: probably omit this
 +        entry.set(
 +            gimli::DW_AT_decl_column,
 +            AttributeValue::Udata(loc.col.to_usize() as u64),
 +        );
 +    }
 +
 +    pub(super) fn create_debug_lines(
 +        &mut self,
 +        isa: &dyn cranelift_codegen::isa::TargetIsa,
 +        symbol: usize,
 +        entry_id: UnitEntryId,
 +        context: &Context,
 +        function_span: Span,
 +        source_info_set: &indexmap::IndexSet<SourceInfo>,
 +    ) -> CodeOffset {
 +        let tcx = self.tcx;
 +        let line_program = &mut self.dwarf.unit.line_program;
 +        let func = &context.func;
 +
 +        let line_strings = &mut self.dwarf.line_strings;
 +        let mut last_span = None;
 +        let mut last_file = None;
 +        let mut create_row_for_span = |line_program: &mut LineProgram, span: Span| {
 +            if let Some(last_span) = last_span {
 +                if span == last_span {
 +                    line_program.generate_row();
 +                    return;
 +                }
 +            }
 +            last_span = Some(span);
 +
 +            // Based on https://github.com/rust-lang/rust/blob/e369d87b015a84653343032833d65d0545fd3f26/src/librustc_codegen_ssa/mir/mod.rs#L116-L131
 +            // In order to have a good line stepping behavior in debugger, we overwrite debug
 +            // locations of macro expansions with that of the outermost expansion site
 +            // (unless the crate is being compiled with `-Z debug-macros`).
 +            let span = if !span.from_expansion() || tcx.sess.opts.debugging_opts.debug_macros {
 +                span
 +            } else {
 +                // Walk up the macro expansion chain until we reach a non-expanded span.
 +                // We also stop at the function body level because no line stepping can occur
 +                // at the level above that.
 +                rustc_span::hygiene::walk_chain(span, function_span.ctxt())
 +            };
 +
 +            let (file, line, col) = match tcx.sess.source_map().lookup_line(span.lo()) {
 +                Ok(SourceFileAndLine { sf: file, line }) => {
 +                    let line_pos = file.line_begin_pos(span.lo());
 +
 +                    (
 +                        file,
 +                        u64::try_from(line).unwrap() + 1,
 +                        u64::from((span.lo() - line_pos).to_u32()) + 1,
 +                    )
 +                }
 +                Err(file) => (file, 0, 0),
 +            };
 +
 +            // line_program_add_file is very slow.
 +            // Optimize for the common case of the current file not being changed.
 +            let current_file_changed = if let Some(last_file) = &last_file {
 +                // If the allocations are not equal, then the files may still be equal, but that
 +                // is not a problem, as this is just an optimization.
 +                !rustc_data_structures::sync::Lrc::ptr_eq(last_file, &file)
 +            } else {
 +                true
 +            };
 +            if current_file_changed {
 +                let file_id = line_program_add_file(line_program, line_strings, &file);
 +                line_program.row().file = file_id;
++                last_file = Some(file);
 +            }
 +
 +            line_program.row().line = line;
 +            line_program.row().column = col;
 +            line_program.generate_row();
 +        };
 +
 +        line_program.begin_sequence(Some(Address::Symbol { symbol, addend: 0 }));
 +
 +        let mut func_end = 0;
 +
 +        if let Some(ref mcr) = &context.mach_compile_result {
 +            for &MachSrcLoc { start, end, loc } in mcr.buffer.get_srclocs_sorted() {
 +                line_program.row().address_offset = u64::from(start);
 +                if !loc.is_default() {
 +                    let source_info = *source_info_set.get_index(loc.bits() as usize).unwrap();
 +                    create_row_for_span(line_program, source_info.span);
 +                } else {
 +                    create_row_for_span(line_program, function_span);
 +                }
 +                func_end = end;
 +            }
 +
 +            line_program.end_sequence(u64::from(func_end));
 +
 +            func_end = mcr.buffer.total_size();
 +        } else {
 +            let encinfo = isa.encoding_info();
 +            let mut blocks = func.layout.blocks().collect::<Vec<_>>();
 +            blocks.sort_by_key(|block| func.offsets[*block]); // Ensure inst offsets always increase
 +
 +            for block in blocks {
 +                for (offset, inst, size) in func.inst_offsets(block, &encinfo) {
 +                    let srcloc = func.srclocs[inst];
 +                    line_program.row().address_offset = u64::from(offset);
 +                    if !srcloc.is_default() {
 +                        let source_info =
 +                            *source_info_set.get_index(srcloc.bits() as usize).unwrap();
 +                        create_row_for_span(line_program, source_info.span);
 +                    } else {
 +                        create_row_for_span(line_program, function_span);
 +                    }
 +                    func_end = offset + size;
 +                }
 +            }
 +            line_program.end_sequence(u64::from(func_end));
 +        }
 +
 +        assert_ne!(func_end, 0);
 +
 +        let entry = self.dwarf.unit.get_mut(entry_id);
 +        entry.set(
 +            gimli::DW_AT_low_pc,
 +            AttributeValue::Address(Address::Symbol { symbol, addend: 0 }),
 +        );
 +        entry.set(
 +            gimli::DW_AT_high_pc,
 +            AttributeValue::Udata(u64::from(func_end)),
 +        );
 +
 +        self.emit_location(entry_id, function_span);
 +
 +        func_end
 +    }
 +}
index 61ebd931d2f144e778a2d2158bcee3d39881bcca,0000000000000000000000000000000000000000..68138404c2436819efc9ca440681f85db1d7a1c6
mode 100644,000000..100644
--- /dev/null
@@@ -1,167 -1,0 +1,168 @@@
 +//! Unwind info generation (`.eh_frame`)
 +
 +use crate::prelude::*;
 +
 +use cranelift_codegen::isa::{unwind::UnwindInfo, TargetIsa};
 +
 +use gimli::write::{Address, CieId, EhFrame, FrameTable, Section};
 +
 +use crate::backend::WriteDebugInfo;
 +
 +pub(crate) struct UnwindContext<'tcx> {
 +    tcx: TyCtxt<'tcx>,
 +    frame_table: FrameTable,
 +    cie_id: Option<CieId>,
 +}
 +
 +impl<'tcx> UnwindContext<'tcx> {
 +    pub(crate) fn new(tcx: TyCtxt<'tcx>, isa: &dyn TargetIsa) -> Self {
 +        let mut frame_table = FrameTable::default();
 +
 +        let cie_id = if let Some(mut cie) = isa.create_systemv_cie() {
 +            if isa.flags().is_pic() {
 +                cie.fde_address_encoding =
 +                    gimli::DwEhPe(gimli::DW_EH_PE_pcrel.0 | gimli::DW_EH_PE_sdata4.0);
 +            }
 +            Some(frame_table.add_cie(cie))
 +        } else {
 +            None
 +        };
 +
 +        UnwindContext {
 +            tcx,
 +            frame_table,
 +            cie_id,
 +        }
 +    }
 +
 +    pub(crate) fn add_function(&mut self, func_id: FuncId, context: &Context, isa: &dyn TargetIsa) {
 +        let unwind_info = if let Some(unwind_info) = context.create_unwind_info(isa).unwrap() {
 +            unwind_info
 +        } else {
 +            return;
 +        };
 +
 +        match unwind_info {
 +            UnwindInfo::SystemV(unwind_info) => {
 +                self.frame_table.add_fde(
 +                    self.cie_id.unwrap(),
 +                    unwind_info.to_fde(Address::Symbol {
 +                        symbol: func_id.as_u32() as usize,
 +                        addend: 0,
 +                    }),
 +                );
 +            }
 +            UnwindInfo::WindowsX64(_) => {
 +                // FIXME implement this
 +            }
++            unwind_info => unimplemented!("{:?}", unwind_info),
 +        }
 +    }
 +
 +    pub(crate) fn emit<P: WriteDebugInfo>(self, product: &mut P) {
 +        let mut eh_frame = EhFrame::from(super::emit::WriterRelocate::new(super::target_endian(
 +            self.tcx,
 +        )));
 +        self.frame_table.write_eh_frame(&mut eh_frame).unwrap();
 +
 +        if !eh_frame.0.writer.slice().is_empty() {
 +            let id = eh_frame.id();
 +            let section_id = product.add_debug_section(id, eh_frame.0.writer.into_vec());
 +            let mut section_map = FxHashMap::default();
 +            section_map.insert(id, section_id);
 +
 +            for reloc in &eh_frame.0.relocs {
 +                product.add_debug_reloc(&section_map, &section_id, reloc);
 +            }
 +        }
 +    }
 +
 +    #[cfg(feature = "jit")]
 +    pub(crate) unsafe fn register_jit(
 +        self,
 +        jit_product: &cranelift_simplejit::SimpleJITProduct,
 +    ) -> Option<UnwindRegistry> {
 +        let mut eh_frame = EhFrame::from(super::emit::WriterRelocate::new(super::target_endian(
 +            self.tcx,
 +        )));
 +        self.frame_table.write_eh_frame(&mut eh_frame).unwrap();
 +
 +        if eh_frame.0.writer.slice().is_empty() {
 +            return None;
 +        }
 +
 +        let mut eh_frame = eh_frame.0.relocate_for_jit(jit_product);
 +
 +        // GCC expects a terminating "empty" length, so write a 0 length at the end of the table.
 +        eh_frame.extend(&[0, 0, 0, 0]);
 +
 +        let mut registrations = Vec::new();
 +
 +        // =======================================================================
 +        // Everything after this line up to the end of the file is loosly based on
 +        // https://github.com/bytecodealliance/wasmtime/blob/4471a82b0c540ff48960eca6757ccce5b1b5c3e4/crates/jit/src/unwind/systemv.rs
 +        #[cfg(target_os = "macos")]
 +        {
 +            // On macOS, `__register_frame` takes a pointer to a single FDE
 +            let start = eh_frame.as_ptr();
 +            let end = start.add(eh_frame.len());
 +            let mut current = start;
 +
 +            // Walk all of the entries in the frame table and register them
 +            while current < end {
 +                let len = std::ptr::read::<u32>(current as *const u32) as usize;
 +
 +                // Skip over the CIE
 +                if current != start {
 +                    __register_frame(current);
 +                    registrations.push(current as usize);
 +                }
 +
 +                // Move to the next table entry (+4 because the length itself is not inclusive)
 +                current = current.add(len + 4);
 +            }
 +        }
 +        #[cfg(not(target_os = "macos"))]
 +        {
 +            // On other platforms, `__register_frame` will walk the FDEs until an entry of length 0
 +            let ptr = eh_frame.as_ptr();
 +            __register_frame(ptr);
 +            registrations.push(ptr as usize);
 +        }
 +
 +        Some(UnwindRegistry {
 +            _frame_table: eh_frame,
 +            registrations,
 +        })
 +    }
 +}
 +
 +/// Represents a registry of function unwind information for System V ABI.
 +pub(crate) struct UnwindRegistry {
 +    _frame_table: Vec<u8>,
 +    registrations: Vec<usize>,
 +}
 +
 +extern "C" {
 +    // libunwind import
 +    fn __register_frame(fde: *const u8);
 +    fn __deregister_frame(fde: *const u8);
 +}
 +
 +impl Drop for UnwindRegistry {
 +    fn drop(&mut self) {
 +        unsafe {
 +            // libgcc stores the frame entries as a linked list in decreasing sort order
 +            // based on the PC value of the registered entry.
 +            //
 +            // As we store the registrations in increasing order, it would be O(N^2) to
 +            // deregister in that order.
 +            //
 +            // To ensure that we just pop off the first element in the list upon every
 +            // deregistration, walk our list of registrations backwards.
 +            for fde in self.registrations.iter().rev() {
 +                __deregister_frame(*fde as *const _);
 +            }
 +        }
 +    }
 +}
index b5bab3d9e1ed10114636d2b7f754222b90b9b160,0000000000000000000000000000000000000000..3f47df7d844b326dc47cc4a7dfd12bae2da29cf0
mode 100644,000000..100644
--- /dev/null
@@@ -1,169 -1,0 +1,169 @@@
-         .chain(args.split(" "))
 +//! The JIT driver uses [`cranelift_simplejit`] to JIT execute programs without writing any object
 +//! files.
 +
 +use std::ffi::CString;
 +use std::os::raw::{c_char, c_int};
 +
 +use rustc_codegen_ssa::CrateInfo;
 +
 +use cranelift_simplejit::{SimpleJITBuilder, SimpleJITModule};
 +
 +use crate::prelude::*;
 +
 +pub(super) fn run_jit(tcx: TyCtxt<'_>) -> ! {
 +    if !tcx.sess.opts.output_types.should_codegen() {
 +        tcx.sess.fatal("JIT mode doesn't work with `cargo check`.");
 +    }
 +
 +    #[cfg(unix)]
 +    unsafe {
 +        // When not using our custom driver rustc will open us without the RTLD_GLOBAL flag, so
 +        // __cg_clif_global_atomic_mutex will not be exported. We fix this by opening ourself again
 +        // as global.
 +        // FIXME remove once atomic_shim is gone
 +
 +        let mut dl_info: libc::Dl_info = std::mem::zeroed();
 +        assert_ne!(
 +            libc::dladdr(run_jit as *const libc::c_void, &mut dl_info),
 +            0
 +        );
 +        assert_ne!(
 +            libc::dlopen(dl_info.dli_fname, libc::RTLD_NOW | libc::RTLD_GLOBAL),
 +            std::ptr::null_mut(),
 +        );
 +    }
 +
 +    let imported_symbols = load_imported_symbols_for_jit(tcx);
 +
 +    let mut jit_builder = SimpleJITBuilder::with_isa(
 +        crate::build_isa(tcx.sess, false),
 +        cranelift_module::default_libcall_names(),
 +    );
 +    jit_builder.symbols(imported_symbols);
 +    let mut jit_module = SimpleJITModule::new(jit_builder);
 +    assert_eq!(pointer_ty(tcx), jit_module.target_config().pointer_type());
 +
 +    let sig = Signature {
 +        params: vec![
 +            AbiParam::new(jit_module.target_config().pointer_type()),
 +            AbiParam::new(jit_module.target_config().pointer_type()),
 +        ],
 +        returns: vec![AbiParam::new(
 +            jit_module.target_config().pointer_type(), /*isize*/
 +        )],
 +        call_conv: CallConv::triple_default(&crate::target_triple(tcx.sess)),
 +    };
 +    let main_func_id = jit_module
 +        .declare_function("main", Linkage::Import, &sig)
 +        .unwrap();
 +
 +    let (_, cgus) = tcx.collect_and_partition_mono_items(LOCAL_CRATE);
 +    let mono_items = cgus
 +        .iter()
 +        .map(|cgu| cgu.items_in_deterministic_order(tcx).into_iter())
 +        .flatten()
 +        .collect::<FxHashMap<_, (_, _)>>()
 +        .into_iter()
 +        .collect::<Vec<(_, (_, _))>>();
 +
 +    let mut cx = crate::CodegenCx::new(tcx, jit_module, false);
 +
 +    let (mut jit_module, global_asm, _debug, mut unwind_context) =
 +        super::time(tcx, "codegen mono items", || {
 +            super::codegen_mono_items(&mut cx, mono_items);
 +            tcx.sess.time("finalize CodegenCx", || cx.finalize())
 +        });
 +    if !global_asm.is_empty() {
 +        tcx.sess.fatal("Global asm is not supported in JIT mode");
 +    }
 +    crate::main_shim::maybe_create_entry_wrapper(tcx, &mut jit_module, &mut unwind_context, true);
 +    crate::allocator::codegen(tcx, &mut jit_module, &mut unwind_context);
 +
 +    tcx.sess.abort_if_errors();
 +
 +    let jit_product = jit_module.finish();
 +
 +    let _unwind_register_guard = unsafe { unwind_context.register_jit(&jit_product) };
 +
 +    let finalized_main: *const u8 = jit_product.lookup_func(main_func_id);
 +
 +    println!("Rustc codegen cranelift will JIT run the executable, because --jit was passed");
 +
 +    let f: extern "C" fn(c_int, *const *const c_char) -> c_int =
 +        unsafe { ::std::mem::transmute(finalized_main) };
 +
 +    let args = ::std::env::var("CG_CLIF_JIT_ARGS").unwrap_or_else(|_| String::new());
 +    let args = std::iter::once(&*tcx.crate_name(LOCAL_CRATE).as_str().to_string())
-                 assert!(name.starts_with("_"), "{:?}", name);
++        .chain(args.split(' '))
 +        .map(|arg| CString::new(arg).unwrap())
 +        .collect::<Vec<_>>();
 +    let mut argv = args.iter().map(|arg| arg.as_ptr()).collect::<Vec<_>>();
 +
 +    // Push a null pointer as a terminating argument. This is required by POSIX and
 +    // useful as some dynamic linkers use it as a marker to jump over.
 +    argv.push(std::ptr::null());
 +
 +    let ret = f(args.len() as c_int, argv.as_ptr());
 +
 +    std::process::exit(ret);
 +}
 +
 +fn load_imported_symbols_for_jit(tcx: TyCtxt<'_>) -> Vec<(String, *const u8)> {
 +    use rustc_middle::middle::dependency_format::Linkage;
 +
 +    let mut dylib_paths = Vec::new();
 +
 +    let crate_info = CrateInfo::new(tcx);
 +    let formats = tcx.dependency_formats(LOCAL_CRATE);
 +    let data = &formats
 +        .iter()
 +        .find(|(crate_type, _data)| *crate_type == rustc_session::config::CrateType::Executable)
 +        .unwrap()
 +        .1;
 +    for &(cnum, _) in &crate_info.used_crates_dynamic {
 +        let src = &crate_info.used_crate_source[&cnum];
 +        match data[cnum.as_usize() - 1] {
 +            Linkage::NotLinked | Linkage::IncludedFromDylib => {}
 +            Linkage::Static => {
 +                let name = tcx.crate_name(cnum);
 +                let mut err = tcx
 +                    .sess
 +                    .struct_err(&format!("Can't load static lib {}", name.as_str()));
 +                err.note("rustc_codegen_cranelift can only load dylibs in JIT mode.");
 +                err.emit();
 +            }
 +            Linkage::Dynamic => {
 +                dylib_paths.push(src.dylib.as_ref().unwrap().0.clone());
 +            }
 +        }
 +    }
 +
 +    let mut imported_symbols = Vec::new();
 +    for path in dylib_paths {
 +        use object::Object;
 +        let lib = libloading::Library::new(&path).unwrap();
 +        let obj = std::fs::read(path).unwrap();
 +        let obj = object::File::parse(&obj).unwrap();
 +        imported_symbols.extend(obj.dynamic_symbols().filter_map(|(_idx, symbol)| {
 +            let name = symbol.name().unwrap().to_string();
 +            if name.is_empty() || !symbol.is_global() || symbol.is_undefined() {
 +                return None;
 +            }
 +            let dlsym_name = if cfg!(target_os = "macos") {
 +                // On macOS `dlsym` expects the name without leading `_`.
++                assert!(name.starts_with('_'), "{:?}", name);
 +                &name[1..]
 +            } else {
 +                &name
 +            };
 +            let symbol: libloading::Symbol<'_, *const u8> =
 +                unsafe { lib.get(dlsym_name.as_bytes()) }.unwrap();
 +            Some((name, *symbol))
 +        }));
 +        std::mem::forget(lib)
 +    }
 +
 +    tcx.sess.abort_if_errors();
 +
 +    imported_symbols
 +}
index 2fb353ca1628a314da40ef23bfcf016677160793,0000000000000000000000000000000000000000..a11dc57ee64536ce9893d0033e77932bff09e133
mode 100644,000000..100644
--- /dev/null
@@@ -1,120 -1,0 +1,120 @@@
-         trans_mono_item(cx, mono_item, linkage);
 +//! Drivers are responsible for calling [`codegen_mono_items`] and performing any further actions
 +//! like JIT executing or writing object files.
 +
 +use std::any::Any;
 +
 +use rustc_middle::middle::cstore::EncodedMetadata;
 +use rustc_middle::mir::mono::{Linkage as RLinkage, MonoItem, Visibility};
 +
 +use crate::prelude::*;
 +
 +mod aot;
 +#[cfg(feature = "jit")]
 +mod jit;
 +
 +pub(crate) fn codegen_crate(
 +    tcx: TyCtxt<'_>,
 +    metadata: EncodedMetadata,
 +    need_metadata_module: bool,
 +    config: crate::BackendConfig,
 +) -> Box<dyn Any> {
 +    tcx.sess.abort_if_errors();
 +
 +    if config.use_jit {
 +        let is_executable = tcx
 +            .sess
 +            .crate_types()
 +            .contains(&rustc_session::config::CrateType::Executable);
 +        if !is_executable {
 +            tcx.sess.fatal("can't jit non-executable crate");
 +        }
 +
 +        #[cfg(feature = "jit")]
 +        let _: ! = jit::run_jit(tcx);
 +
 +        #[cfg(not(feature = "jit"))]
 +        tcx.sess
 +            .fatal("jit support was disabled when compiling rustc_codegen_cranelift");
 +    }
 +
 +    aot::run_aot(tcx, metadata, need_metadata_module)
 +}
 +
 +fn codegen_mono_items<'tcx>(
 +    cx: &mut crate::CodegenCx<'tcx, impl Module>,
 +    mono_items: Vec<(MonoItem<'tcx>, (RLinkage, Visibility))>,
 +) {
 +    cx.tcx.sess.time("predefine functions", || {
 +        for &(mono_item, (linkage, visibility)) in &mono_items {
 +            match mono_item {
 +                MonoItem::Fn(instance) => {
 +                    let (name, sig) = get_function_name_and_sig(
 +                        cx.tcx,
 +                        cx.module.isa().triple(),
 +                        instance,
 +                        false,
 +                    );
 +                    let linkage = crate::linkage::get_clif_linkage(mono_item, linkage, visibility);
 +                    cx.module.declare_function(&name, linkage, &sig).unwrap();
 +                }
 +                MonoItem::Static(_) | MonoItem::GlobalAsm(_) => {}
 +            }
 +        }
 +    });
 +
 +    for (mono_item, (linkage, visibility)) in mono_items {
 +        let linkage = crate::linkage::get_clif_linkage(mono_item, linkage, visibility);
- fn trans_mono_item<'tcx, M: Module>(
++        codegen_mono_item(cx, mono_item, linkage);
 +    }
 +}
 +
-                 .time("codegen fn", || crate::base::trans_fn(cx, inst, linkage));
++fn codegen_mono_item<'tcx, M: Module>(
 +    cx: &mut crate::CodegenCx<'tcx, M>,
 +    mono_item: MonoItem<'tcx>,
 +    linkage: Linkage,
 +) {
 +    let tcx = cx.tcx;
 +    match mono_item {
 +        MonoItem::Fn(inst) => {
 +            let _inst_guard =
 +                crate::PrintOnPanic(|| format!("{:?} {}", inst, tcx.symbol_name(inst).name));
 +            debug_assert!(!inst.substs.needs_infer());
 +            tcx.sess
++                .time("codegen fn", || crate::base::codegen_fn(cx, inst, linkage));
 +        }
 +        MonoItem::Static(def_id) => {
 +            crate::constant::codegen_static(&mut cx.constants_cx, def_id);
 +        }
 +        MonoItem::GlobalAsm(hir_id) => {
 +            let item = tcx.hir().expect_item(hir_id);
 +            if let rustc_hir::ItemKind::GlobalAsm(rustc_hir::GlobalAsm { asm }) = item.kind {
 +                cx.global_asm.push_str(&*asm.as_str());
 +                cx.global_asm.push_str("\n\n");
 +            } else {
 +                bug!("Expected GlobalAsm found {:?}", item);
 +            }
 +        }
 +    }
 +}
 +
 +fn time<R>(tcx: TyCtxt<'_>, name: &'static str, f: impl FnOnce() -> R) -> R {
 +    if std::env::var("CG_CLIF_DISPLAY_CG_TIME")
 +        .as_ref()
 +        .map(|val| &**val)
 +        == Ok("1")
 +    {
 +        println!("[{:<30}: {}] start", tcx.crate_name(LOCAL_CRATE), name);
 +        let before = std::time::Instant::now();
 +        let res = tcx.sess.time(name, f);
 +        let after = std::time::Instant::now();
 +        println!(
 +            "[{:<30}: {}] end time: {:?}",
 +            tcx.crate_name(LOCAL_CRATE),
 +            name,
 +            after - before
 +        );
 +        res
 +    } else {
 +        tcx.sess.time(name, f)
 +    }
 +}
index aa2edb2dfd4f7231ddfe99a0370742a64a3654c1,0000000000000000000000000000000000000000..04aac780125d93a354eff2a1f5ef18a69e5fda16
mode 100644,000000..100644
--- /dev/null
@@@ -1,293 -1,0 +1,293 @@@
-                     crate::base::trans_operand(fx, value).load_scalar(fx),
 +//! Codegen of [`asm!`] invocations.
 +
 +use crate::prelude::*;
 +
 +use std::fmt::Write;
 +
 +use rustc_ast::ast::{InlineAsmOptions, InlineAsmTemplatePiece};
 +use rustc_middle::mir::InlineAsmOperand;
 +use rustc_target::asm::*;
 +
 +pub(crate) fn codegen_inline_asm<'tcx>(
 +    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
 +    _span: Span,
 +    template: &[InlineAsmTemplatePiece],
 +    operands: &[InlineAsmOperand<'tcx>],
 +    options: InlineAsmOptions,
 +) {
 +    // FIXME add .eh_frame unwind info directives
 +
 +    if template.is_empty() {
 +        // Black box
 +        return;
 +    }
 +
 +    let mut slot_size = Size::from_bytes(0);
 +    let mut clobbered_regs = Vec::new();
 +    let mut inputs = Vec::new();
 +    let mut outputs = Vec::new();
 +
 +    let mut new_slot = |reg_class: InlineAsmRegClass| {
 +        let reg_size = reg_class
 +            .supported_types(InlineAsmArch::X86_64)
 +            .iter()
 +            .map(|(ty, _)| ty.size())
 +            .max()
 +            .unwrap();
 +        let align = rustc_target::abi::Align::from_bytes(reg_size.bytes()).unwrap();
 +        slot_size = slot_size.align_to(align);
 +        let offset = slot_size;
 +        slot_size += reg_size;
 +        offset
 +    };
 +
 +    // FIXME overlap input and output slots to save stack space
 +    for operand in operands {
 +        match *operand {
 +            InlineAsmOperand::In { reg, ref value } => {
 +                let reg = expect_reg(reg);
 +                clobbered_regs.push((reg, new_slot(reg.reg_class())));
 +                inputs.push((
 +                    reg,
 +                    new_slot(reg.reg_class()),
-                         crate::base::trans_place(fx, place),
++                    crate::base::codegen_operand(fx, value).load_scalar(fx),
 +                ));
 +            }
 +            InlineAsmOperand::Out {
 +                reg,
 +                late: _,
 +                place,
 +            } => {
 +                let reg = expect_reg(reg);
 +                clobbered_regs.push((reg, new_slot(reg.reg_class())));
 +                if let Some(place) = place {
 +                    outputs.push((
 +                        reg,
 +                        new_slot(reg.reg_class()),
-                     crate::base::trans_operand(fx, in_value).load_scalar(fx),
++                        crate::base::codegen_place(fx, place),
 +                    ));
 +                }
 +            }
 +            InlineAsmOperand::InOut {
 +                reg,
 +                late: _,
 +                ref in_value,
 +                out_place,
 +            } => {
 +                let reg = expect_reg(reg);
 +                clobbered_regs.push((reg, new_slot(reg.reg_class())));
 +                inputs.push((
 +                    reg,
 +                    new_slot(reg.reg_class()),
-                         crate::base::trans_place(fx, out_place),
++                    crate::base::codegen_operand(fx, in_value).load_scalar(fx),
 +                ));
 +                if let Some(out_place) = out_place {
 +                    outputs.push((
 +                        reg,
 +                        new_slot(reg.reg_class()),
++                        crate::base::codegen_place(fx, out_place),
 +                    ));
 +                }
 +            }
 +            InlineAsmOperand::Const { value: _ } => todo!(),
 +            InlineAsmOperand::SymFn { value: _ } => todo!(),
 +            InlineAsmOperand::SymStatic { def_id: _ } => todo!(),
 +        }
 +    }
 +
 +    let inline_asm_index = fx.inline_asm_index;
 +    fx.inline_asm_index += 1;
 +    let asm_name = format!(
 +        "{}__inline_asm_{}",
 +        fx.tcx.symbol_name(fx.instance).name,
 +        inline_asm_index
 +    );
 +
 +    let generated_asm = generate_asm_wrapper(
 +        &asm_name,
 +        InlineAsmArch::X86_64,
 +        options,
 +        template,
 +        clobbered_regs,
 +        &inputs,
 +        &outputs,
 +    );
 +    fx.cx.global_asm.push_str(&generated_asm);
 +
 +    call_inline_asm(fx, &asm_name, slot_size, inputs, outputs);
 +}
 +
 +fn generate_asm_wrapper(
 +    asm_name: &str,
 +    arch: InlineAsmArch,
 +    options: InlineAsmOptions,
 +    template: &[InlineAsmTemplatePiece],
 +    clobbered_regs: Vec<(InlineAsmReg, Size)>,
 +    inputs: &[(InlineAsmReg, Size, Value)],
 +    outputs: &[(InlineAsmReg, Size, CPlace<'_>)],
 +) -> String {
 +    let mut generated_asm = String::new();
 +    writeln!(generated_asm, ".globl {}", asm_name).unwrap();
 +    writeln!(generated_asm, ".type {},@function", asm_name).unwrap();
 +    writeln!(
 +        generated_asm,
 +        ".section .text.{},\"ax\",@progbits",
 +        asm_name
 +    )
 +    .unwrap();
 +    writeln!(generated_asm, "{}:", asm_name).unwrap();
 +
 +    generated_asm.push_str(".intel_syntax noprefix\n");
 +    generated_asm.push_str("    push rbp\n");
 +    generated_asm.push_str("    mov rbp,rdi\n");
 +
 +    // Save clobbered registers
 +    if !options.contains(InlineAsmOptions::NORETURN) {
 +        // FIXME skip registers saved by the calling convention
 +        for &(reg, offset) in &clobbered_regs {
 +            save_register(&mut generated_asm, arch, reg, offset);
 +        }
 +    }
 +
 +    // Write input registers
 +    for &(reg, offset, _value) in inputs {
 +        restore_register(&mut generated_asm, arch, reg, offset);
 +    }
 +
 +    if options.contains(InlineAsmOptions::ATT_SYNTAX) {
 +        generated_asm.push_str(".att_syntax\n");
 +    }
 +
 +    // The actual inline asm
 +    for piece in template {
 +        match piece {
 +            InlineAsmTemplatePiece::String(s) => {
 +                generated_asm.push_str(s);
 +            }
 +            InlineAsmTemplatePiece::Placeholder {
 +                operand_idx: _,
 +                modifier: _,
 +                span: _,
 +            } => todo!(),
 +        }
 +    }
 +    generated_asm.push('\n');
 +
 +    if options.contains(InlineAsmOptions::ATT_SYNTAX) {
 +        generated_asm.push_str(".intel_syntax noprefix\n");
 +    }
 +
 +    if !options.contains(InlineAsmOptions::NORETURN) {
 +        // Read output registers
 +        for &(reg, offset, _place) in outputs {
 +            save_register(&mut generated_asm, arch, reg, offset);
 +        }
 +
 +        // Restore clobbered registers
 +        for &(reg, offset) in clobbered_regs.iter().rev() {
 +            restore_register(&mut generated_asm, arch, reg, offset);
 +        }
 +
 +        generated_asm.push_str("    pop rbp\n");
 +        generated_asm.push_str("    ret\n");
 +    } else {
 +        generated_asm.push_str("    ud2\n");
 +    }
 +
 +    generated_asm.push_str(".att_syntax\n");
 +    writeln!(generated_asm, ".size {name}, .-{name}", name = asm_name).unwrap();
 +    generated_asm.push_str(".text\n");
 +    generated_asm.push_str("\n\n");
 +
 +    generated_asm
 +}
 +
 +fn call_inline_asm<'tcx>(
 +    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
 +    asm_name: &str,
 +    slot_size: Size,
 +    inputs: Vec<(InlineAsmReg, Size, Value)>,
 +    outputs: Vec<(InlineAsmReg, Size, CPlace<'tcx>)>,
 +) {
 +    let stack_slot = fx.bcx.func.create_stack_slot(StackSlotData {
 +        kind: StackSlotKind::ExplicitSlot,
 +        offset: None,
 +        size: u32::try_from(slot_size.bytes()).unwrap(),
 +    });
 +    #[cfg(debug_assertions)]
 +    fx.add_comment(stack_slot, "inline asm scratch slot");
 +
 +    let inline_asm_func = fx
 +        .cx
 +        .module
 +        .declare_function(
 +            asm_name,
 +            Linkage::Import,
 +            &Signature {
 +                call_conv: CallConv::SystemV,
 +                params: vec![AbiParam::new(fx.pointer_type)],
 +                returns: vec![],
 +            },
 +        )
 +        .unwrap();
 +    let inline_asm_func = fx
 +        .cx
 +        .module
 +        .declare_func_in_func(inline_asm_func, &mut fx.bcx.func);
 +    #[cfg(debug_assertions)]
 +    fx.add_comment(inline_asm_func, asm_name);
 +
 +    for (_reg, offset, value) in inputs {
 +        fx.bcx
 +            .ins()
 +            .stack_store(value, stack_slot, i32::try_from(offset.bytes()).unwrap());
 +    }
 +
 +    let stack_slot_addr = fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, 0);
 +    fx.bcx.ins().call(inline_asm_func, &[stack_slot_addr]);
 +
 +    for (_reg, offset, place) in outputs {
 +        let ty = fx.clif_type(place.layout().ty).unwrap();
 +        let value = fx
 +            .bcx
 +            .ins()
 +            .stack_load(ty, stack_slot, i32::try_from(offset.bytes()).unwrap());
 +        place.write_cvalue(fx, CValue::by_val(value, place.layout()));
 +    }
 +}
 +
 +fn expect_reg(reg_or_class: InlineAsmRegOrRegClass) -> InlineAsmReg {
 +    match reg_or_class {
 +        InlineAsmRegOrRegClass::Reg(reg) => reg,
 +        InlineAsmRegOrRegClass::RegClass(class) => unimplemented!("{:?}", class),
 +    }
 +}
 +
 +fn save_register(generated_asm: &mut String, arch: InlineAsmArch, reg: InlineAsmReg, offset: Size) {
 +    match arch {
 +        InlineAsmArch::X86_64 => {
 +            write!(generated_asm, "    mov [rbp+0x{:x}], ", offset.bytes()).unwrap();
 +            reg.emit(generated_asm, InlineAsmArch::X86_64, None)
 +                .unwrap();
 +            generated_asm.push('\n');
 +        }
 +        _ => unimplemented!("save_register for {:?}", arch),
 +    }
 +}
 +
 +fn restore_register(
 +    generated_asm: &mut String,
 +    arch: InlineAsmArch,
 +    reg: InlineAsmReg,
 +    offset: Size,
 +) {
 +    match arch {
 +        InlineAsmArch::X86_64 => {
 +            generated_asm.push_str("    mov ");
 +            reg.emit(generated_asm, InlineAsmArch::X86_64, None)
 +                .unwrap();
 +            writeln!(generated_asm, ", [rbp+0x{:x}]", offset.bytes()).unwrap();
 +        }
 +        _ => unimplemented!("restore_register for {:?}", arch),
 +    }
 +}
index 18d86f0c5f959f31805fac9712d9ab5b627b1ff3,0000000000000000000000000000000000000000..171445f2d71b62840204f24837e835ea7c3712ee
mode 100644,000000..100644
--- /dev/null
@@@ -1,123 -1,0 +1,123 @@@
-             let flt_cc = match kind_const.val.try_to_bits(Size::from_bytes(1)).expect(&format!("kind not scalar: {:?}", kind_const)) {
 +//! Emulate LLVM intrinsics
 +
 +use crate::intrinsics::*;
 +use crate::prelude::*;
 +
 +use rustc_middle::ty::subst::SubstsRef;
 +
 +pub(crate) fn codegen_llvm_intrinsic_call<'tcx>(
 +    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
 +    intrinsic: &str,
 +    substs: SubstsRef<'tcx>,
 +    args: &[mir::Operand<'tcx>],
 +    destination: Option<(CPlace<'tcx>, BasicBlock)>,
 +) {
 +    let ret = destination.unwrap().0;
 +
 +    intrinsic_match! {
 +        fx, intrinsic, substs, args,
 +        _ => {
 +            fx.tcx.sess.warn(&format!("unsupported llvm intrinsic {}; replacing with trap", intrinsic));
 +            crate::trap::trap_unimplemented(fx, intrinsic);
 +        };
 +
 +        // Used by `_mm_movemask_epi8` and `_mm256_movemask_epi8`
 +        llvm.x86.sse2.pmovmskb.128 | llvm.x86.avx2.pmovmskb | llvm.x86.sse2.movmsk.pd, (c a) {
 +            let (lane_layout, lane_count) = lane_type_and_count(fx.tcx, a.layout());
 +            let lane_ty = fx.clif_type(lane_layout.ty).unwrap();
 +            assert!(lane_count <= 32);
 +
 +            let mut res = fx.bcx.ins().iconst(types::I32, 0);
 +
 +            for lane in (0..lane_count).rev() {
 +                let a_lane = a.value_field(fx, mir::Field::new(lane.try_into().unwrap())).load_scalar(fx);
 +
 +                // cast float to int
 +                let a_lane = match lane_ty {
 +                    types::F32 => fx.bcx.ins().bitcast(types::I32, a_lane),
 +                    types::F64 => fx.bcx.ins().bitcast(types::I64, a_lane),
 +                    _ => a_lane,
 +                };
 +
 +                // extract sign bit of an int
 +                let a_lane_sign = fx.bcx.ins().ushr_imm(a_lane, i64::from(lane_ty.bits() - 1));
 +
 +                // shift sign bit into result
 +                let a_lane_sign = clif_intcast(fx, a_lane_sign, types::I32, false);
 +                res = fx.bcx.ins().ishl_imm(res, 1);
 +                res = fx.bcx.ins().bor(res, a_lane_sign);
 +            }
 +
 +            let res = CValue::by_val(res, fx.layout_of(fx.tcx.types.i32));
 +            ret.write_cvalue(fx, res);
 +        };
 +        llvm.x86.sse2.cmp.ps | llvm.x86.sse2.cmp.pd, (c x, c y, o kind) {
 +            let kind_const = crate::constant::mir_operand_get_const_val(fx, kind).expect("llvm.x86.sse2.cmp.* kind not const");
-                 let res_lane = match imm8.val.try_to_bits(Size::from_bytes(4)).expect(&format!("imm8 not scalar: {:?}", imm8)) {
++            let flt_cc = match kind_const.val.try_to_bits(Size::from_bytes(1)).unwrap_or_else(|| panic!("kind not scalar: {:?}", kind_const)) {
 +                0 => FloatCC::Equal,
 +                1 => FloatCC::LessThan,
 +                2 => FloatCC::LessThanOrEqual,
 +                7 => {
 +                    unimplemented!("Compares corresponding elements in `a` and `b` to see if neither is `NaN`.");
 +                }
 +                3 => {
 +                    unimplemented!("Compares corresponding elements in `a` and `b` to see if either is `NaN`.");
 +                }
 +                4 => FloatCC::NotEqual,
 +                5 => {
 +                    unimplemented!("not less than");
 +                }
 +                6 => {
 +                    unimplemented!("not less than or equal");
 +                }
 +                kind => unreachable!("kind {:?}", kind),
 +            };
 +
 +            simd_pair_for_each_lane(fx, x, y, ret, |fx, lane_layout, res_lane_layout, x_lane, y_lane| {
 +                let res_lane = match lane_layout.ty.kind() {
 +                    ty::Float(_) => fx.bcx.ins().fcmp(flt_cc, x_lane, y_lane),
 +                    _ => unreachable!("{:?}", lane_layout.ty),
 +                };
 +                bool_to_zero_or_max_uint(fx, res_lane_layout, res_lane)
 +            });
 +        };
 +        llvm.x86.sse2.psrli.d, (c a, o imm8) {
 +            let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8).expect("llvm.x86.sse2.psrli.d imm8 not const");
 +            simd_for_each_lane(fx, a, ret, |fx, _lane_layout, res_lane_layout, lane| {
-                 let res_lane = match imm8.val.try_to_bits(Size::from_bytes(4)).expect(&format!("imm8 not scalar: {:?}", imm8)) {
++                let res_lane = match imm8.val.try_to_bits(Size::from_bytes(4)).unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8)) {
 +                    imm8 if imm8 < 32 => fx.bcx.ins().ushr_imm(lane, i64::from(imm8 as u8)),
 +                    _ => fx.bcx.ins().iconst(types::I32, 0),
 +                };
 +                CValue::by_val(res_lane, res_lane_layout)
 +            });
 +        };
 +        llvm.x86.sse2.pslli.d, (c a, o imm8) {
 +            let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8).expect("llvm.x86.sse2.psrli.d imm8 not const");
 +            simd_for_each_lane(fx, a, ret, |fx, _lane_layout, res_lane_layout, lane| {
++                let res_lane = match imm8.val.try_to_bits(Size::from_bytes(4)).unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8)) {
 +                    imm8 if imm8 < 32 => fx.bcx.ins().ishl_imm(lane, i64::from(imm8 as u8)),
 +                    _ => fx.bcx.ins().iconst(types::I32, 0),
 +                };
 +                CValue::by_val(res_lane, res_lane_layout)
 +            });
 +        };
 +        llvm.x86.sse2.storeu.dq, (v mem_addr, c a) {
 +            // FIXME correctly handle the unalignment
 +            let dest = CPlace::for_ptr(Pointer::new(mem_addr), a.layout());
 +            dest.write_cvalue(fx, a);
 +        };
 +    }
 +
 +    if let Some((_, dest)) = destination {
 +        let ret_block = fx.get_block(dest);
 +        fx.bcx.ins().jump(ret_block, &[]);
 +    } else {
 +        trap_unreachable(fx, "[corruption] Diverging intrinsic returned.");
 +    }
 +}
 +
 +// llvm.x86.avx2.vperm2i128
 +// llvm.x86.ssse3.pshuf.b.128
 +// llvm.x86.avx2.pshuf.b
 +// llvm.x86.avx2.psrli.w
 +// llvm.x86.sse2.psrli.w
index 9a3e4c7b56e9c24629ad2388cecb313450d8a3ac,0000000000000000000000000000000000000000..a5f45b7abf4c802118edab6131b5a16303163ed6
mode 100644,000000..100644
--- /dev/null
@@@ -1,1099 -1,0 +1,1100 @@@
-         trans_operand($fx, $arg)
 +//! Codegen of intrinsics. This includes `extern "rust-intrinsic"`, `extern "platform-intrinsic"`
 +//! and LLVM intrinsics that have symbol names starting with `llvm.`.
 +
 +mod cpuid;
 +mod llvm;
 +mod simd;
 +
 +pub(crate) use cpuid::codegen_cpuid_call;
 +pub(crate) use llvm::codegen_llvm_intrinsic_call;
 +
 +use crate::prelude::*;
++use rustc_middle::ty::print::with_no_trimmed_paths;
 +
 +macro intrinsic_pat {
 +    (_) => {
 +        _
 +    },
 +    ($name:ident) => {
 +        stringify!($name)
 +    },
 +    ($name:literal) => {
 +        stringify!($name)
 +    },
 +    ($x:ident . $($xs:tt).*) => {
 +        concat!(stringify!($x), ".", intrinsic_pat!($($xs).*))
 +    }
 +}
 +
 +macro intrinsic_arg {
 +    (o $fx:expr, $arg:ident) => {
 +        $arg
 +    },
 +    (c $fx:expr, $arg:ident) => {
-         trans_operand($fx, $arg).load_scalar($fx)
++        codegen_operand($fx, $arg)
 +    },
 +    (v $fx:expr, $arg:ident) => {
-                             $(trans_operand($fx, $arg),)*
++        codegen_operand($fx, $arg).load_scalar($fx)
 +    }
 +}
 +
 +macro intrinsic_substs {
 +    ($substs:expr, $index:expr,) => {},
 +    ($substs:expr, $index:expr, $first:ident $(,$rest:ident)*) => {
 +        let $first = $substs.type_at($index);
 +        intrinsic_substs!($substs, $index+1, $($rest),*);
 +    }
 +}
 +
 +macro intrinsic_match {
 +    ($fx:expr, $intrinsic:expr, $substs:expr, $args:expr,
 +    _ => $unknown:block;
 +    $(
 +        $($($name:tt).*)|+ $(if $cond:expr)?, $(<$($subst:ident),*>)? ($($a:ident $arg:ident),*) $content:block;
 +    )*) => {
 +        let _ = $substs; // Silence warning when substs is unused.
 +        match $intrinsic {
 +            $(
 +                $(intrinsic_pat!($($name).*))|* $(if $cond)? => {
 +                    #[allow(unused_parens, non_snake_case)]
 +                    {
 +                        $(
 +                            intrinsic_substs!($substs, 0, $($subst),*);
 +                        )?
 +                        if let [$($arg),*] = $args {
 +                            let ($($arg,)*) = (
 +                                $(intrinsic_arg!($a $fx, $arg),)*
 +                            );
 +                            #[warn(unused_parens, non_snake_case)]
 +                            {
 +                                $content
 +                            }
 +                        } else {
 +                            bug!("wrong number of args for intrinsic {:?}", $intrinsic);
 +                        }
 +                    }
 +                }
 +            )*
 +            _ => $unknown,
 +        }
 +    }
 +}
 +
 +macro call_intrinsic_match {
 +    ($fx:expr, $intrinsic:expr, $substs:expr, $ret:expr, $destination:expr, $args:expr, $(
 +        $name:ident($($arg:ident),*) -> $ty:ident => $func:ident,
 +    )*) => {
 +        match $intrinsic {
 +            $(
 +                stringify!($name) => {
 +                    assert!($substs.is_noop());
 +                    if let [$(ref $arg),*] = *$args {
 +                        let ($($arg,)*) = (
-             let res = crate::num::trans_int_binop(fx, bin_op, x, y);
++                            $(codegen_operand($fx, $arg),)*
 +                        );
 +                        let res = $fx.easy_call(stringify!($func), &[$($arg),*], $fx.tcx.types.$ty);
 +                        $ret.write_cvalue($fx, res);
 +
 +                        if let Some((_, dest)) = $destination {
 +                            let ret_block = $fx.get_block(dest);
 +                            $fx.bcx.ins().jump(ret_block, &[]);
 +                            return;
 +                        } else {
 +                            unreachable!();
 +                        }
 +                    } else {
 +                        bug!("wrong number of args for intrinsic {:?}", $intrinsic);
 +                    }
 +                }
 +            )*
 +            _ => {}
 +        }
 +    }
 +}
 +
 +macro atomic_binop_return_old($fx:expr, $op:ident<$T:ident>($ptr:ident, $src:ident) -> $ret:ident) {
 +    crate::atomic_shim::lock_global_lock($fx);
 +
 +    let clif_ty = $fx.clif_type($T).unwrap();
 +    let old = $fx.bcx.ins().load(clif_ty, MemFlags::new(), $ptr, 0);
 +    let new = $fx.bcx.ins().$op(old, $src);
 +    $fx.bcx.ins().store(MemFlags::new(), new, $ptr, 0);
 +    $ret.write_cvalue($fx, CValue::by_val(old, $fx.layout_of($T)));
 +
 +    crate::atomic_shim::unlock_global_lock($fx);
 +}
 +
 +macro atomic_minmax($fx:expr, $cc:expr, <$T:ident> ($ptr:ident, $src:ident) -> $ret:ident) {
 +    crate::atomic_shim::lock_global_lock($fx);
 +
 +    // Read old
 +    let clif_ty = $fx.clif_type($T).unwrap();
 +    let old = $fx.bcx.ins().load(clif_ty, MemFlags::new(), $ptr, 0);
 +
 +    // Compare
 +    let is_eq = $fx.bcx.ins().icmp(IntCC::SignedGreaterThan, old, $src);
 +    let new = $fx.bcx.ins().select(is_eq, old, $src);
 +
 +    // Write new
 +    $fx.bcx.ins().store(MemFlags::new(), new, $ptr, 0);
 +
 +    let ret_val = CValue::by_val(old, $ret.layout());
 +    $ret.write_cvalue($fx, ret_val);
 +
 +    crate::atomic_shim::unlock_global_lock($fx);
 +}
 +
 +macro validate_atomic_type($fx:ident, $intrinsic:ident, $span:ident, $ty:expr) {
 +    match $ty.kind() {
 +        ty::Uint(_) | ty::Int(_) => {}
 +        _ => {
 +            $fx.tcx.sess.span_err(
 +                $span,
 +                &format!(
 +                    "`{}` intrinsic: expected basic integer type, found `{:?}`",
 +                    $intrinsic, $ty
 +                ),
 +            );
 +            // Prevent verifier error
 +            crate::trap::trap_unreachable($fx, "compilation should not have succeeded");
 +            return;
 +        }
 +    }
 +}
 +
 +macro validate_simd_type($fx:ident, $intrinsic:ident, $span:ident, $ty:expr) {
 +    if !$ty.is_simd() {
 +        $fx.tcx.sess.span_err($span, &format!("invalid monomorphization of `{}` intrinsic: expected SIMD input type, found non-SIMD `{}`", $intrinsic, $ty));
 +        // Prevent verifier error
 +        crate::trap::trap_unreachable($fx, "compilation should not have succeeded");
 +        return;
 +    }
 +}
 +
 +fn lane_type_and_count<'tcx>(
 +    tcx: TyCtxt<'tcx>,
 +    layout: TyAndLayout<'tcx>,
 +) -> (TyAndLayout<'tcx>, u16) {
 +    assert!(layout.ty.is_simd());
 +    let lane_count = match layout.fields {
 +        rustc_target::abi::FieldsShape::Array { stride: _, count } => u16::try_from(count).unwrap(),
 +        _ => unreachable!("lane_type_and_count({:?})", layout),
 +    };
 +    let lane_layout = layout
 +        .field(
 +            &ty::layout::LayoutCx {
 +                tcx,
 +                param_env: ParamEnv::reveal_all(),
 +            },
 +            0,
 +        )
 +        .unwrap();
 +    (lane_layout, lane_count)
 +}
 +
 +pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Option<Type> {
 +    let (element, count) = match &layout.abi {
 +        Abi::Vector { element, count } => (element.clone(), *count),
 +        _ => unreachable!(),
 +    };
 +
 +    match scalar_to_clif_type(tcx, element).by(u16::try_from(count).unwrap()) {
 +        // Cranelift currently only implements icmp for 128bit vectors.
 +        Some(vector_ty) if vector_ty.bits() == 128 => Some(vector_ty),
 +        _ => None,
 +    }
 +}
 +
 +fn simd_for_each_lane<'tcx, M: Module>(
 +    fx: &mut FunctionCx<'_, 'tcx, M>,
 +    val: CValue<'tcx>,
 +    ret: CPlace<'tcx>,
 +    f: impl Fn(
 +        &mut FunctionCx<'_, 'tcx, M>,
 +        TyAndLayout<'tcx>,
 +        TyAndLayout<'tcx>,
 +        Value,
 +    ) -> CValue<'tcx>,
 +) {
 +    let layout = val.layout();
 +
 +    let (lane_layout, lane_count) = lane_type_and_count(fx.tcx, layout);
 +    let (ret_lane_layout, ret_lane_count) = lane_type_and_count(fx.tcx, ret.layout());
 +    assert_eq!(lane_count, ret_lane_count);
 +
 +    for lane_idx in 0..lane_count {
 +        let lane_idx = mir::Field::new(lane_idx.try_into().unwrap());
 +        let lane = val.value_field(fx, lane_idx).load_scalar(fx);
 +
 +        let res_lane = f(fx, lane_layout, ret_lane_layout, lane);
 +
 +        ret.place_field(fx, lane_idx).write_cvalue(fx, res_lane);
 +    }
 +}
 +
 +fn simd_pair_for_each_lane<'tcx, M: Module>(
 +    fx: &mut FunctionCx<'_, 'tcx, M>,
 +    x: CValue<'tcx>,
 +    y: CValue<'tcx>,
 +    ret: CPlace<'tcx>,
 +    f: impl Fn(
 +        &mut FunctionCx<'_, 'tcx, M>,
 +        TyAndLayout<'tcx>,
 +        TyAndLayout<'tcx>,
 +        Value,
 +        Value,
 +    ) -> CValue<'tcx>,
 +) {
 +    assert_eq!(x.layout(), y.layout());
 +    let layout = x.layout();
 +
 +    let (lane_layout, lane_count) = lane_type_and_count(fx.tcx, layout);
 +    let (ret_lane_layout, ret_lane_count) = lane_type_and_count(fx.tcx, ret.layout());
 +    assert_eq!(lane_count, ret_lane_count);
 +
 +    for lane in 0..lane_count {
 +        let lane = mir::Field::new(lane.try_into().unwrap());
 +        let x_lane = x.value_field(fx, lane).load_scalar(fx);
 +        let y_lane = y.value_field(fx, lane).load_scalar(fx);
 +
 +        let res_lane = f(fx, lane_layout, ret_lane_layout, x_lane, y_lane);
 +
 +        ret.place_field(fx, lane).write_cvalue(fx, res_lane);
 +    }
 +}
 +
 +fn bool_to_zero_or_max_uint<'tcx>(
 +    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
 +    layout: TyAndLayout<'tcx>,
 +    val: Value,
 +) -> CValue<'tcx> {
 +    let ty = fx.clif_type(layout.ty).unwrap();
 +
 +    let int_ty = match ty {
 +        types::F32 => types::I32,
 +        types::F64 => types::I64,
 +        ty => ty,
 +    };
 +
 +    let val = fx.bcx.ins().bint(int_ty, val);
 +    let mut res = fx.bcx.ins().ineg(val);
 +
 +    if ty.is_float() {
 +        res = fx.bcx.ins().bitcast(ty, res);
 +    }
 +
 +    CValue::by_val(res, layout)
 +}
 +
 +macro simd_cmp {
 +    ($fx:expr, $cc:ident($x:ident, $y:ident) -> $ret:ident) => {
 +        let vector_ty = clif_vector_type($fx.tcx, $x.layout());
 +
 +        if let Some(vector_ty) = vector_ty {
 +            let x = $x.load_scalar($fx);
 +            let y = $y.load_scalar($fx);
 +            let val = $fx.bcx.ins().icmp(IntCC::$cc, x, y);
 +
 +            // HACK This depends on the fact that icmp for vectors represents bools as 0 and !0, not 0 and 1.
 +            let val = $fx.bcx.ins().raw_bitcast(vector_ty, val);
 +
 +            $ret.write_cvalue($fx, CValue::by_val(val, $ret.layout()));
 +        } else {
 +            simd_pair_for_each_lane(
 +                $fx,
 +                $x,
 +                $y,
 +                $ret,
 +                |fx, lane_layout, res_lane_layout, x_lane, y_lane| {
 +                    let res_lane = match lane_layout.ty.kind() {
 +                        ty::Uint(_) | ty::Int(_) => fx.bcx.ins().icmp(IntCC::$cc, x_lane, y_lane),
 +                        _ => unreachable!("{:?}", lane_layout.ty),
 +                    };
 +                    bool_to_zero_or_max_uint(fx, res_lane_layout, res_lane)
 +                },
 +            );
 +        }
 +    },
 +    ($fx:expr, $cc_u:ident|$cc_s:ident($x:ident, $y:ident) -> $ret:ident) => {
 +        // FIXME use vector icmp when possible
 +        simd_pair_for_each_lane(
 +            $fx,
 +            $x,
 +            $y,
 +            $ret,
 +            |fx, lane_layout, res_lane_layout, x_lane, y_lane| {
 +                let res_lane = match lane_layout.ty.kind() {
 +                    ty::Uint(_) => fx.bcx.ins().icmp(IntCC::$cc_u, x_lane, y_lane),
 +                    ty::Int(_) => fx.bcx.ins().icmp(IntCC::$cc_s, x_lane, y_lane),
 +                    _ => unreachable!("{:?}", lane_layout.ty),
 +                };
 +                bool_to_zero_or_max_uint(fx, res_lane_layout, res_lane)
 +            },
 +        );
 +    },
 +}
 +
 +macro simd_int_binop {
 +    ($fx:expr, $op:ident($x:ident, $y:ident) -> $ret:ident) => {
 +        simd_int_binop!($fx, $op|$op($x, $y) -> $ret);
 +    },
 +    ($fx:expr, $op_u:ident|$op_s:ident($x:ident, $y:ident) -> $ret:ident) => {
 +        simd_pair_for_each_lane(
 +            $fx,
 +            $x,
 +            $y,
 +            $ret,
 +            |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
 +                let res_lane = match lane_layout.ty.kind() {
 +                    ty::Uint(_) => fx.bcx.ins().$op_u(x_lane, y_lane),
 +                    ty::Int(_) => fx.bcx.ins().$op_s(x_lane, y_lane),
 +                    _ => unreachable!("{:?}", lane_layout.ty),
 +                };
 +                CValue::by_val(res_lane, ret_lane_layout)
 +            },
 +        );
 +    },
 +}
 +
 +macro simd_int_flt_binop {
 +    ($fx:expr, $op:ident|$op_f:ident($x:ident, $y:ident) -> $ret:ident) => {
 +        simd_int_flt_binop!($fx, $op|$op|$op_f($x, $y) -> $ret);
 +    },
 +    ($fx:expr, $op_u:ident|$op_s:ident|$op_f:ident($x:ident, $y:ident) -> $ret:ident) => {
 +        simd_pair_for_each_lane(
 +            $fx,
 +            $x,
 +            $y,
 +            $ret,
 +            |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
 +                let res_lane = match lane_layout.ty.kind() {
 +                    ty::Uint(_) => fx.bcx.ins().$op_u(x_lane, y_lane),
 +                    ty::Int(_) => fx.bcx.ins().$op_s(x_lane, y_lane),
 +                    ty::Float(_) => fx.bcx.ins().$op_f(x_lane, y_lane),
 +                    _ => unreachable!("{:?}", lane_layout.ty),
 +                };
 +                CValue::by_val(res_lane, ret_lane_layout)
 +            },
 +        );
 +    },
 +}
 +
 +macro simd_flt_binop($fx:expr, $op:ident($x:ident, $y:ident) -> $ret:ident) {
 +    simd_pair_for_each_lane(
 +        $fx,
 +        $x,
 +        $y,
 +        $ret,
 +        |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
 +            let res_lane = match lane_layout.ty.kind() {
 +                ty::Float(_) => fx.bcx.ins().$op(x_lane, y_lane),
 +                _ => unreachable!("{:?}", lane_layout.ty),
 +            };
 +            CValue::by_val(res_lane, ret_lane_layout)
 +        },
 +    );
 +}
 +
 +pub(crate) fn codegen_intrinsic_call<'tcx>(
 +    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
 +    instance: Instance<'tcx>,
 +    args: &[mir::Operand<'tcx>],
 +    destination: Option<(CPlace<'tcx>, BasicBlock)>,
 +    span: Span,
 +) {
 +    let def_id = instance.def_id();
 +    let substs = instance.substs;
 +
 +    let intrinsic = fx.tcx.item_name(def_id).as_str();
 +    let intrinsic = &intrinsic[..];
 +
 +    let ret = match destination {
 +        Some((place, _)) => place,
 +        None => {
 +            // Insert non returning intrinsics here
 +            match intrinsic {
 +                "abort" => {
 +                    trap_abort(fx, "Called intrinsic::abort.");
 +                }
 +                "unreachable" => {
 +                    trap_unreachable(fx, "[corruption] Called intrinsic::unreachable.");
 +                }
 +                "transmute" => {
 +                    crate::base::codegen_panic(fx, "Transmuting to uninhabited type.", span);
 +                }
 +                _ => unimplemented!("unsupported instrinsic {}", intrinsic),
 +            }
 +            return;
 +        }
 +    };
 +
 +    if intrinsic.starts_with("simd_") {
 +        self::simd::codegen_simd_intrinsic_call(fx, instance, args, ret, span);
 +        let ret_block = fx.get_block(destination.expect("SIMD intrinsics don't diverge").1);
 +        fx.bcx.ins().jump(ret_block, &[]);
 +        return;
 +    }
 +
 +    let usize_layout = fx.layout_of(fx.tcx.types.usize);
 +
 +    call_intrinsic_match! {
 +        fx, intrinsic, substs, ret, destination, args,
 +        expf32(flt) -> f32 => expf,
 +        expf64(flt) -> f64 => exp,
 +        exp2f32(flt) -> f32 => exp2f,
 +        exp2f64(flt) -> f64 => exp2,
 +        sqrtf32(flt) -> f32 => sqrtf,
 +        sqrtf64(flt) -> f64 => sqrt,
 +        powif32(a, x) -> f32 => __powisf2, // compiler-builtins
 +        powif64(a, x) -> f64 => __powidf2, // compiler-builtins
 +        powf32(a, x) -> f32 => powf,
 +        powf64(a, x) -> f64 => pow,
 +        logf32(flt) -> f32 => logf,
 +        logf64(flt) -> f64 => log,
 +        log2f32(flt) -> f32 => log2f,
 +        log2f64(flt) -> f64 => log2,
 +        log10f32(flt) -> f32 => log10f,
 +        log10f64(flt) -> f64 => log10,
 +        fabsf32(flt) -> f32 => fabsf,
 +        fabsf64(flt) -> f64 => fabs,
 +        fmaf32(x, y, z) -> f32 => fmaf,
 +        fmaf64(x, y, z) -> f64 => fma,
 +        copysignf32(x, y) -> f32 => copysignf,
 +        copysignf64(x, y) -> f64 => copysign,
 +
 +        // rounding variants
 +        // FIXME use clif insts
 +        floorf32(flt) -> f32 => floorf,
 +        floorf64(flt) -> f64 => floor,
 +        ceilf32(flt) -> f32 => ceilf,
 +        ceilf64(flt) -> f64 => ceil,
 +        truncf32(flt) -> f32 => truncf,
 +        truncf64(flt) -> f64 => trunc,
 +        roundf32(flt) -> f32 => roundf,
 +        roundf64(flt) -> f64 => round,
 +
 +        // trigonometry
 +        sinf32(flt) -> f32 => sinf,
 +        sinf64(flt) -> f64 => sin,
 +        cosf32(flt) -> f32 => cosf,
 +        cosf64(flt) -> f64 => cos,
 +        tanf32(flt) -> f32 => tanf,
 +        tanf64(flt) -> f64 => tan,
 +    }
 +
 +    intrinsic_match! {
 +        fx, intrinsic, substs, args,
 +        _ => {
 +            fx.tcx.sess.span_fatal(span, &format!("unsupported intrinsic {}", intrinsic));
 +        };
 +
 +        assume, (c _a) {};
 +        likely | unlikely, (c a) {
 +            ret.write_cvalue(fx, a);
 +        };
 +        breakpoint, () {
 +            fx.bcx.ins().debugtrap();
 +        };
 +        copy | copy_nonoverlapping, <elem_ty> (v src, v dst, v count) {
 +            let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
 +            let elem_size = fx
 +                .bcx
 +                .ins()
 +                .iconst(fx.pointer_type, elem_size as i64);
 +            assert_eq!(args.len(), 3);
 +            let byte_amount = fx.bcx.ins().imul(count, elem_size);
 +
 +            if intrinsic.contains("nonoverlapping") {
 +                // FIXME emit_small_memcpy
 +                fx.bcx.call_memcpy(fx.cx.module.target_config(), dst, src, byte_amount);
 +            } else {
 +                // FIXME emit_small_memmove
 +                fx.bcx.call_memmove(fx.cx.module.target_config(), dst, src, byte_amount);
 +            }
 +        };
 +        // NOTE: the volatile variants have src and dst swapped
 +        volatile_copy_memory | volatile_copy_nonoverlapping_memory, <elem_ty> (v dst, v src, v count) {
 +            let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
 +            let elem_size = fx
 +                .bcx
 +                .ins()
 +                .iconst(fx.pointer_type, elem_size as i64);
 +            assert_eq!(args.len(), 3);
 +            let byte_amount = fx.bcx.ins().imul(count, elem_size);
 +
 +            // FIXME make the copy actually volatile when using emit_small_mem{cpy,move}
 +            if intrinsic.contains("nonoverlapping") {
 +                // FIXME emit_small_memcpy
 +                fx.bcx.call_memcpy(fx.cx.module.target_config(), dst, src, byte_amount);
 +            } else {
 +                // FIXME emit_small_memmove
 +                fx.bcx.call_memmove(fx.cx.module.target_config(), dst, src, byte_amount);
 +            }
 +        };
 +        discriminant_value, (c ptr) {
 +            let pointee_layout = fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
 +            let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), pointee_layout);
 +            let discr = crate::discriminant::codegen_get_discriminant(fx, val, ret.layout());
 +            ret.write_cvalue(fx, discr);
 +        };
 +        size_of_val, <T> (c ptr) {
 +            let layout = fx.layout_of(T);
 +            let size = if layout.is_unsized() {
 +                let (_ptr, info) = ptr.load_scalar_pair(fx);
 +                let (size, _align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
 +                size
 +            } else {
 +                fx
 +                    .bcx
 +                    .ins()
 +                    .iconst(fx.pointer_type, layout.size.bytes() as i64)
 +            };
 +            ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
 +        };
 +        min_align_of_val, <T> (c ptr) {
 +            let layout = fx.layout_of(T);
 +            let align = if layout.is_unsized() {
 +                let (_ptr, info) = ptr.load_scalar_pair(fx);
 +                let (_size, align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
 +                align
 +            } else {
 +                fx
 +                    .bcx
 +                    .ins()
 +                    .iconst(fx.pointer_type, layout.align.abi.bytes() as i64)
 +            };
 +            ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
 +        };
 +
 +        _ if intrinsic.starts_with("unchecked_") || intrinsic == "exact_div", (c x, c y) {
 +            // FIXME trap on overflow
 +            let bin_op = match intrinsic {
 +                "unchecked_add" => BinOp::Add,
 +                "unchecked_sub" => BinOp::Sub,
 +                "unchecked_div" | "exact_div" => BinOp::Div,
 +                "unchecked_rem" => BinOp::Rem,
 +                "unchecked_shl" => BinOp::Shl,
 +                "unchecked_shr" => BinOp::Shr,
 +                _ => unreachable!("intrinsic {}", intrinsic),
 +            };
-             let res = crate::num::trans_checked_int_binop(
++            let res = crate::num::codegen_int_binop(fx, bin_op, x, y);
 +            ret.write_cvalue(fx, res);
 +        };
 +        _ if intrinsic.ends_with("_with_overflow"), (c x, c y) {
 +            assert_eq!(x.layout().ty, y.layout().ty);
 +            let bin_op = match intrinsic {
 +                "add_with_overflow" => BinOp::Add,
 +                "sub_with_overflow" => BinOp::Sub,
 +                "mul_with_overflow" => BinOp::Mul,
 +                _ => unreachable!("intrinsic {}", intrinsic),
 +            };
 +
-             let res = crate::num::trans_int_binop(
++            let res = crate::num::codegen_checked_int_binop(
 +                fx,
 +                bin_op,
 +                x,
 +                y,
 +            );
 +            ret.write_cvalue(fx, res);
 +        };
 +        _ if intrinsic.starts_with("wrapping_"), (c x, c y) {
 +            assert_eq!(x.layout().ty, y.layout().ty);
 +            let bin_op = match intrinsic {
 +                "wrapping_add" => BinOp::Add,
 +                "wrapping_sub" => BinOp::Sub,
 +                "wrapping_mul" => BinOp::Mul,
 +                _ => unreachable!("intrinsic {}", intrinsic),
 +            };
-             let checked_res = crate::num::trans_checked_int_binop(
++            let res = crate::num::codegen_int_binop(
 +                fx,
 +                bin_op,
 +                x,
 +                y,
 +            );
 +            ret.write_cvalue(fx, res);
 +        };
 +        _ if intrinsic.starts_with("saturating_"), <T> (c lhs, c rhs) {
 +            assert_eq!(lhs.layout().ty, rhs.layout().ty);
 +            let bin_op = match intrinsic {
 +                "saturating_add" => BinOp::Add,
 +                "saturating_sub" => BinOp::Sub,
 +                _ => unreachable!("intrinsic {}", intrinsic),
 +            };
 +
 +            let signed = type_sign(T);
 +
-                 crate::base::codegen_panic(
++            let checked_res = crate::num::codegen_checked_int_binop(
 +                fx,
 +                bin_op,
 +                lhs,
 +                rhs,
 +            );
 +
 +            let (val, has_overflow) = checked_res.load_scalar_pair(fx);
 +            let clif_ty = fx.clif_type(T).unwrap();
 +
 +            // `select.i8` is not implemented by Cranelift.
 +            let has_overflow = fx.bcx.ins().uextend(types::I32, has_overflow);
 +
 +            let (min, max) = type_min_max_value(&mut fx.bcx, clif_ty, signed);
 +
 +            let val = match (intrinsic, signed) {
 +                ("saturating_add", false) => fx.bcx.ins().select(has_overflow, max, val),
 +                ("saturating_sub", false) => fx.bcx.ins().select(has_overflow, min, val),
 +                ("saturating_add", true) => {
 +                    let rhs = rhs.load_scalar(fx);
 +                    let rhs_ge_zero = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
 +                    let sat_val = fx.bcx.ins().select(rhs_ge_zero, max, min);
 +                    fx.bcx.ins().select(has_overflow, sat_val, val)
 +                }
 +                ("saturating_sub", true) => {
 +                    let rhs = rhs.load_scalar(fx);
 +                    let rhs_ge_zero = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
 +                    let sat_val = fx.bcx.ins().select(rhs_ge_zero, min, max);
 +                    fx.bcx.ins().select(has_overflow, sat_val, val)
 +                }
 +                _ => unreachable!(),
 +            };
 +
 +            let res = CValue::by_val(val, fx.layout_of(T));
 +
 +            ret.write_cvalue(fx, res);
 +        };
 +        rotate_left, <T>(v x, v y) {
 +            let layout = fx.layout_of(T);
 +            let res = fx.bcx.ins().rotl(x, y);
 +            ret.write_cvalue(fx, CValue::by_val(res, layout));
 +        };
 +        rotate_right, <T>(v x, v y) {
 +            let layout = fx.layout_of(T);
 +            let res = fx.bcx.ins().rotr(x, y);
 +            ret.write_cvalue(fx, CValue::by_val(res, layout));
 +        };
 +
 +        // The only difference between offset and arith_offset is regarding UB. Because Cranelift
 +        // doesn't have UB both are codegen'ed the same way
 +        offset | arith_offset, (c base, v offset) {
 +            let pointee_ty = base.layout().ty.builtin_deref(true).unwrap().ty;
 +            let pointee_size = fx.layout_of(pointee_ty).size.bytes();
 +            let ptr_diff = fx.bcx.ins().imul_imm(offset, pointee_size as i64);
 +            let base_val = base.load_scalar(fx);
 +            let res = fx.bcx.ins().iadd(base_val, ptr_diff);
 +            ret.write_cvalue(fx, CValue::by_val(res, base.layout()));
 +        };
 +
 +        transmute, (c from) {
 +            ret.write_cvalue_transmute(fx, from);
 +        };
 +        write_bytes | volatile_set_memory, (c dst, v val, v count) {
 +            let pointee_ty = dst.layout().ty.builtin_deref(true).unwrap().ty;
 +            let pointee_size = fx.layout_of(pointee_ty).size.bytes();
 +            let count = fx.bcx.ins().imul_imm(count, pointee_size as i64);
 +            let dst_ptr = dst.load_scalar(fx);
 +            // FIXME make the memset actually volatile when switching to emit_small_memset
 +            // FIXME use emit_small_memset
 +            fx.bcx.call_memset(fx.cx.module.target_config(), dst_ptr, val, count);
 +        };
 +        ctlz | ctlz_nonzero, <T> (v arg) {
 +            // FIXME trap on `ctlz_nonzero` with zero arg.
 +            let res = if T == fx.tcx.types.u128 || T == fx.tcx.types.i128 {
 +                // FIXME verify this algorithm is correct
 +                let (lsb, msb) = fx.bcx.ins().isplit(arg);
 +                let lsb_lz = fx.bcx.ins().clz(lsb);
 +                let msb_lz = fx.bcx.ins().clz(msb);
 +                let msb_is_zero = fx.bcx.ins().icmp_imm(IntCC::Equal, msb, 0);
 +                let lsb_lz_plus_64 = fx.bcx.ins().iadd_imm(lsb_lz, 64);
 +                let res = fx.bcx.ins().select(msb_is_zero, lsb_lz_plus_64, msb_lz);
 +                fx.bcx.ins().uextend(types::I128, res)
 +            } else {
 +                fx.bcx.ins().clz(arg)
 +            };
 +            let res = CValue::by_val(res, fx.layout_of(T));
 +            ret.write_cvalue(fx, res);
 +        };
 +        cttz | cttz_nonzero, <T> (v arg) {
 +            // FIXME trap on `cttz_nonzero` with zero arg.
 +            let res = if T == fx.tcx.types.u128 || T == fx.tcx.types.i128 {
 +                // FIXME verify this algorithm is correct
 +                let (lsb, msb) = fx.bcx.ins().isplit(arg);
 +                let lsb_tz = fx.bcx.ins().ctz(lsb);
 +                let msb_tz = fx.bcx.ins().ctz(msb);
 +                let lsb_is_zero = fx.bcx.ins().icmp_imm(IntCC::Equal, lsb, 0);
 +                let msb_tz_plus_64 = fx.bcx.ins().iadd_imm(msb_tz, 64);
 +                let res = fx.bcx.ins().select(lsb_is_zero, msb_tz_plus_64, lsb_tz);
 +                fx.bcx.ins().uextend(types::I128, res)
 +            } else {
 +                fx.bcx.ins().ctz(arg)
 +            };
 +            let res = CValue::by_val(res, fx.layout_of(T));
 +            ret.write_cvalue(fx, res);
 +        };
 +        ctpop, <T> (v arg) {
 +            let res = fx.bcx.ins().popcnt(arg);
 +            let res = CValue::by_val(res, fx.layout_of(T));
 +            ret.write_cvalue(fx, res);
 +        };
 +        bitreverse, <T> (v arg) {
 +            let res = fx.bcx.ins().bitrev(arg);
 +            let res = CValue::by_val(res, fx.layout_of(T));
 +            ret.write_cvalue(fx, res);
 +        };
 +        bswap, <T> (v arg) {
 +            // FIXME(CraneStation/cranelift#794) add bswap instruction to cranelift
 +            fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value {
 +                match bcx.func.dfg.value_type(v) {
 +                    types::I8 => v,
 +
 +                    // https://code.woboq.org/gcc/include/bits/byteswap.h.html
 +                    types::I16 => {
 +                        let tmp1 = bcx.ins().ishl_imm(v, 8);
 +                        let n1 = bcx.ins().band_imm(tmp1, 0xFF00);
 +
 +                        let tmp2 = bcx.ins().ushr_imm(v, 8);
 +                        let n2 = bcx.ins().band_imm(tmp2, 0x00FF);
 +
 +                        bcx.ins().bor(n1, n2)
 +                    }
 +                    types::I32 => {
 +                        let tmp1 = bcx.ins().ishl_imm(v, 24);
 +                        let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000);
 +
 +                        let tmp2 = bcx.ins().ishl_imm(v, 8);
 +                        let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000);
 +
 +                        let tmp3 = bcx.ins().ushr_imm(v, 8);
 +                        let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00);
 +
 +                        let tmp4 = bcx.ins().ushr_imm(v, 24);
 +                        let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF);
 +
 +                        let or_tmp1 = bcx.ins().bor(n1, n2);
 +                        let or_tmp2 = bcx.ins().bor(n3, n4);
 +                        bcx.ins().bor(or_tmp1, or_tmp2)
 +                    }
 +                    types::I64 => {
 +                        let tmp1 = bcx.ins().ishl_imm(v, 56);
 +                        let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000_0000_0000u64 as i64);
 +
 +                        let tmp2 = bcx.ins().ishl_imm(v, 40);
 +                        let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000_0000_0000u64 as i64);
 +
 +                        let tmp3 = bcx.ins().ishl_imm(v, 24);
 +                        let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00_0000_0000u64 as i64);
 +
 +                        let tmp4 = bcx.ins().ishl_imm(v, 8);
 +                        let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF_0000_0000u64 as i64);
 +
 +                        let tmp5 = bcx.ins().ushr_imm(v, 8);
 +                        let n5 = bcx.ins().band_imm(tmp5, 0x0000_0000_FF00_0000u64 as i64);
 +
 +                        let tmp6 = bcx.ins().ushr_imm(v, 24);
 +                        let n6 = bcx.ins().band_imm(tmp6, 0x0000_0000_00FF_0000u64 as i64);
 +
 +                        let tmp7 = bcx.ins().ushr_imm(v, 40);
 +                        let n7 = bcx.ins().band_imm(tmp7, 0x0000_0000_0000_FF00u64 as i64);
 +
 +                        let tmp8 = bcx.ins().ushr_imm(v, 56);
 +                        let n8 = bcx.ins().band_imm(tmp8, 0x0000_0000_0000_00FFu64 as i64);
 +
 +                        let or_tmp1 = bcx.ins().bor(n1, n2);
 +                        let or_tmp2 = bcx.ins().bor(n3, n4);
 +                        let or_tmp3 = bcx.ins().bor(n5, n6);
 +                        let or_tmp4 = bcx.ins().bor(n7, n8);
 +
 +                        let or_tmp5 = bcx.ins().bor(or_tmp1, or_tmp2);
 +                        let or_tmp6 = bcx.ins().bor(or_tmp3, or_tmp4);
 +                        bcx.ins().bor(or_tmp5, or_tmp6)
 +                    }
 +                    types::I128 => {
 +                        let (lo, hi) = bcx.ins().isplit(v);
 +                        let lo = swap(bcx, lo);
 +                        let hi = swap(bcx, hi);
 +                        bcx.ins().iconcat(hi, lo)
 +                    }
 +                    ty => unreachable!("bswap {}", ty),
 +                }
 +            };
 +            let res = CValue::by_val(swap(&mut fx.bcx, arg), fx.layout_of(T));
 +            ret.write_cvalue(fx, res);
 +        };
 +        assert_inhabited | assert_zero_valid | assert_uninit_valid, <T> () {
 +            let layout = fx.layout_of(T);
 +            if layout.abi.is_uninhabited() {
-                 );
++                with_no_trimmed_paths(|| crate::base::codegen_panic(
 +                    fx,
 +                    &format!("attempted to instantiate uninhabited type `{}`", T),
 +                    span,
-                 crate::base::codegen_panic(
++                ));
 +                return;
 +            }
 +
 +            if intrinsic == "assert_zero_valid" && !layout.might_permit_raw_init(fx, /*zero:*/ true).unwrap() {
-                 );
++                with_no_trimmed_paths(|| crate::base::codegen_panic(
 +                    fx,
 +                    &format!("attempted to zero-initialize type `{}`, which is invalid", T),
 +                    span,
-                 crate::base::codegen_panic(
++                ));
 +                return;
 +            }
 +
 +            if intrinsic == "assert_uninit_valid" && !layout.might_permit_raw_init(fx, /*zero:*/ false).unwrap() {
-                 );
++                with_no_trimmed_paths(|| crate::base::codegen_panic(
 +                    fx,
 +                    &format!("attempted to leave type `{}` uninitialized, which is invalid", T),
 +                    span,
-             let val = crate::constant::trans_const_value(
++                ));
 +                return;
 +            }
 +        };
 +
 +        volatile_load | unaligned_volatile_load, (c ptr) {
 +            // Cranelift treats loads as volatile by default
 +            // FIXME ignore during stack2reg optimization
 +            // FIXME correctly handle unaligned_volatile_load
 +            let inner_layout =
 +                fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
 +            let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
 +            ret.write_cvalue(fx, val);
 +        };
 +        volatile_store | unaligned_volatile_store, (v ptr, c val) {
 +            // Cranelift treats stores as volatile by default
 +            // FIXME ignore during stack2reg optimization
 +            // FIXME correctly handle unaligned_volatile_store
 +            let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
 +            dest.write_cvalue(fx, val);
 +        };
 +
 +        size_of | pref_align_of | min_align_of | needs_drop | type_id | type_name | variant_count, () {
 +            let const_val =
 +                fx.tcx.const_eval_instance(ParamEnv::reveal_all(), instance, None).unwrap();
-             let val = crate::num::trans_ptr_binop(fx, BinOp::Eq, a, b);
++            let val = crate::constant::codegen_const_value(
 +                fx,
 +                const_val,
 +                ret.layout().ty,
 +            );
 +            ret.write_cvalue(fx, val);
 +        };
 +
 +        ptr_offset_from, <T> (v ptr, v base) {
 +            let isize_layout = fx.layout_of(fx.tcx.types.isize);
 +
 +            let pointee_size: u64 = fx.layout_of(T).size.bytes();
 +            let diff = fx.bcx.ins().isub(ptr, base);
 +            // FIXME this can be an exact division.
 +            let val = CValue::by_val(fx.bcx.ins().sdiv_imm(diff, pointee_size as i64), isize_layout);
 +            ret.write_cvalue(fx, val);
 +        };
 +
 +        ptr_guaranteed_eq, (c a, c b) {
-             let val = crate::num::trans_ptr_binop(fx, BinOp::Ne, a, b);
++            let val = crate::num::codegen_ptr_binop(fx, BinOp::Eq, a, b);
 +            ret.write_cvalue(fx, val);
 +        };
 +
 +        ptr_guaranteed_ne, (c a, c b) {
-             let res = crate::num::trans_float_binop(fx, match intrinsic {
++            let val = crate::num::codegen_ptr_binop(fx, BinOp::Ne, a, b);
 +            ret.write_cvalue(fx, val);
 +        };
 +
 +        caller_location, () {
 +            let caller_location = fx.get_caller_location(span);
 +            ret.write_cvalue(fx, caller_location);
 +        };
 +
 +        _ if intrinsic.starts_with("atomic_fence"), () {
 +            crate::atomic_shim::lock_global_lock(fx);
 +            crate::atomic_shim::unlock_global_lock(fx);
 +        };
 +        _ if intrinsic.starts_with("atomic_singlethreadfence"), () {
 +            crate::atomic_shim::lock_global_lock(fx);
 +            crate::atomic_shim::unlock_global_lock(fx);
 +        };
 +        _ if intrinsic.starts_with("atomic_load"), (c ptr) {
 +            crate::atomic_shim::lock_global_lock(fx);
 +
 +            let inner_layout =
 +                fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
 +            validate_atomic_type!(fx, intrinsic, span, inner_layout.ty);
 +            let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
 +            ret.write_cvalue(fx, val);
 +
 +            crate::atomic_shim::unlock_global_lock(fx);
 +        };
 +        _ if intrinsic.starts_with("atomic_store"), (v ptr, c val) {
 +            validate_atomic_type!(fx, intrinsic, span, val.layout().ty);
 +
 +            crate::atomic_shim::lock_global_lock(fx);
 +
 +            let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
 +            dest.write_cvalue(fx, val);
 +
 +            crate::atomic_shim::unlock_global_lock(fx);
 +        };
 +        _ if intrinsic.starts_with("atomic_xchg"), <T> (v ptr, c src) {
 +            validate_atomic_type!(fx, intrinsic, span, T);
 +
 +            crate::atomic_shim::lock_global_lock(fx);
 +
 +            // Read old
 +            let clif_ty = fx.clif_type(T).unwrap();
 +            let old = fx.bcx.ins().load(clif_ty, MemFlags::new(), ptr, 0);
 +            ret.write_cvalue(fx, CValue::by_val(old, fx.layout_of(T)));
 +
 +            // Write new
 +            let dest = CPlace::for_ptr(Pointer::new(ptr), src.layout());
 +            dest.write_cvalue(fx, src);
 +
 +            crate::atomic_shim::unlock_global_lock(fx);
 +        };
 +        _ if intrinsic.starts_with("atomic_cxchg"), <T> (v ptr, c test_old, c new) { // both atomic_cxchg_* and atomic_cxchgweak_*
 +            validate_atomic_type!(fx, intrinsic, span, T);
 +
 +            let test_old = test_old.load_scalar(fx);
 +            let new = new.load_scalar(fx);
 +
 +            crate::atomic_shim::lock_global_lock(fx);
 +
 +            // Read old
 +            let clif_ty = fx.clif_type(T).unwrap();
 +            let old = fx.bcx.ins().load(clif_ty, MemFlags::new(), ptr, 0);
 +
 +            // Compare
 +            let is_eq = fx.bcx.ins().icmp(IntCC::Equal, old, test_old);
 +            let new = fx.bcx.ins().select(is_eq, new, old); // Keep old if not equal to test_old
 +
 +            // Write new
 +            fx.bcx.ins().store(MemFlags::new(), new, ptr, 0);
 +
 +            let ret_val = CValue::by_val_pair(old, fx.bcx.ins().bint(types::I8, is_eq), ret.layout());
 +            ret.write_cvalue(fx, ret_val);
 +
 +            crate::atomic_shim::unlock_global_lock(fx);
 +        };
 +
 +        _ if intrinsic.starts_with("atomic_xadd"), <T> (v ptr, c amount) {
 +            validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
 +            let amount = amount.load_scalar(fx);
 +            atomic_binop_return_old! (fx, iadd<T>(ptr, amount) -> ret);
 +        };
 +        _ if intrinsic.starts_with("atomic_xsub"), <T> (v ptr, c amount) {
 +            validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
 +            let amount = amount.load_scalar(fx);
 +            atomic_binop_return_old! (fx, isub<T>(ptr, amount) -> ret);
 +        };
 +        _ if intrinsic.starts_with("atomic_and"), <T> (v ptr, c src) {
 +            validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
 +            let src = src.load_scalar(fx);
 +            atomic_binop_return_old! (fx, band<T>(ptr, src) -> ret);
 +        };
 +        _ if intrinsic.starts_with("atomic_nand"), <T> (v ptr, c src) {
 +            validate_atomic_type!(fx, intrinsic, span, T);
 +
 +            let src = src.load_scalar(fx);
 +
 +            crate::atomic_shim::lock_global_lock(fx);
 +
 +            let clif_ty = fx.clif_type(T).unwrap();
 +            let old = fx.bcx.ins().load(clif_ty, MemFlags::new(), ptr, 0);
 +            let and = fx.bcx.ins().band(old, src);
 +            let new = fx.bcx.ins().bnot(and);
 +            fx.bcx.ins().store(MemFlags::new(), new, ptr, 0);
 +            ret.write_cvalue(fx, CValue::by_val(old, fx.layout_of(T)));
 +
 +            crate::atomic_shim::unlock_global_lock(fx);
 +        };
 +        _ if intrinsic.starts_with("atomic_or"), <T> (v ptr, c src) {
 +            validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
 +            let src = src.load_scalar(fx);
 +            atomic_binop_return_old! (fx, bor<T>(ptr, src) -> ret);
 +        };
 +        _ if intrinsic.starts_with("atomic_xor"), <T> (v ptr, c src) {
 +            validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
 +            let src = src.load_scalar(fx);
 +            atomic_binop_return_old! (fx, bxor<T>(ptr, src) -> ret);
 +        };
 +
 +        _ if intrinsic.starts_with("atomic_max"), <T> (v ptr, c src) {
 +            validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
 +            let src = src.load_scalar(fx);
 +            atomic_minmax!(fx, IntCC::SignedGreaterThan, <T> (ptr, src) -> ret);
 +        };
 +        _ if intrinsic.starts_with("atomic_umax"), <T> (v ptr, c src) {
 +            validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
 +            let src = src.load_scalar(fx);
 +            atomic_minmax!(fx, IntCC::UnsignedGreaterThan, <T> (ptr, src) -> ret);
 +        };
 +        _ if intrinsic.starts_with("atomic_min"), <T> (v ptr, c src) {
 +            validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
 +            let src = src.load_scalar(fx);
 +            atomic_minmax!(fx, IntCC::SignedLessThan, <T> (ptr, src) -> ret);
 +        };
 +        _ if intrinsic.starts_with("atomic_umin"), <T> (v ptr, c src) {
 +            validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
 +            let src = src.load_scalar(fx);
 +            atomic_minmax!(fx, IntCC::UnsignedLessThan, <T> (ptr, src) -> ret);
 +        };
 +
 +        minnumf32, (v a, v b) {
 +            let val = fx.bcx.ins().fmin(a, b);
 +            let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
 +            ret.write_cvalue(fx, val);
 +        };
 +        minnumf64, (v a, v b) {
 +            let val = fx.bcx.ins().fmin(a, b);
 +            let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
 +            ret.write_cvalue(fx, val);
 +        };
 +        maxnumf32, (v a, v b) {
 +            let val = fx.bcx.ins().fmax(a, b);
 +            let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
 +            ret.write_cvalue(fx, val);
 +        };
 +        maxnumf64, (v a, v b) {
 +            let val = fx.bcx.ins().fmax(a, b);
 +            let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
 +            ret.write_cvalue(fx, val);
 +        };
 +
 +        try, (v f, v data, v _catch_fn) {
 +            // FIXME once unwinding is supported, change this to actually catch panics
 +            let f_sig = fx.bcx.func.import_signature(Signature {
 +                call_conv: CallConv::triple_default(fx.triple()),
 +                params: vec![AbiParam::new(fx.bcx.func.dfg.value_type(data))],
 +                returns: vec![],
 +            });
 +
 +            fx.bcx.ins().call_indirect(f_sig, f, &[data]);
 +
 +            let ret_val = CValue::const_val(fx, ret.layout(), 0);
 +            ret.write_cvalue(fx, ret_val);
 +        };
 +
 +        fadd_fast | fsub_fast | fmul_fast | fdiv_fast | frem_fast, (c x, c y) {
++            let res = crate::num::codegen_float_binop(fx, match intrinsic {
 +                "fadd_fast" => BinOp::Add,
 +                "fsub_fast" => BinOp::Sub,
 +                "fmul_fast" => BinOp::Mul,
 +                "fdiv_fast" => BinOp::Div,
 +                "frem_fast" => BinOp::Rem,
 +                _ => unreachable!(),
 +            }, x, y);
 +            ret.write_cvalue(fx, res);
 +        };
 +        float_to_int_unchecked, (v f) {
 +            let res = crate::cast::clif_int_or_float_cast(
 +                fx,
 +                f,
 +                false,
 +                fx.clif_type(ret.layout().ty).unwrap(),
 +                type_sign(ret.layout().ty),
 +            );
 +            ret.write_cvalue(fx, CValue::by_val(res, ret.layout()));
 +        };
 +    }
 +
 +    if let Some((_, dest)) = destination {
 +        let ret_block = fx.get_block(dest);
 +        fx.bcx.ins().jump(ret_block, &[]);
 +    } else {
 +        trap_unreachable(fx, "[corruption] Diverging intrinsic returned.");
 +    }
 +}
index b4269f4fafa0b47fbd98672d9eb5eefa23a402f4,0000000000000000000000000000000000000000..2e31c4669e25bb16be06813d419695493377566e
mode 100644,000000..100644
--- /dev/null
@@@ -1,238 -1,0 +1,238 @@@
-             let idx = idx_const.val.try_to_bits(Size::from_bytes(4 /* u32*/)).expect(&format!("kind not scalar: {:?}", idx_const));
 +//! Codegen `extern "platform-intrinsic"` intrinsics.
 +
 +use super::*;
 +use crate::prelude::*;
 +
 +pub(super) fn codegen_simd_intrinsic_call<'tcx>(
 +    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
 +    instance: Instance<'tcx>,
 +    args: &[mir::Operand<'tcx>],
 +    ret: CPlace<'tcx>,
 +    span: Span,
 +) {
 +    let def_id = instance.def_id();
 +    let substs = instance.substs;
 +
 +    let intrinsic = fx.tcx.item_name(def_id).as_str();
 +    let intrinsic = &intrinsic[..];
 +
 +    intrinsic_match! {
 +        fx, intrinsic, substs, args,
 +        _ => {
 +            fx.tcx.sess.span_fatal(span, &format!("Unknown SIMD intrinsic {}", intrinsic));
 +        };
 +
 +        simd_cast, (c a) {
 +            validate_simd_type!(fx, intrinsic, span, a.layout().ty);
 +            simd_for_each_lane(fx, a, ret, |fx, lane_layout, ret_lane_layout, lane| {
 +                let ret_lane_ty = fx.clif_type(ret_lane_layout.ty).unwrap();
 +
 +                let from_signed = type_sign(lane_layout.ty);
 +                let to_signed = type_sign(ret_lane_layout.ty);
 +
 +                let ret_lane = clif_int_or_float_cast(fx, lane, from_signed, ret_lane_ty, to_signed);
 +                CValue::by_val(ret_lane, ret_lane_layout)
 +            });
 +        };
 +
 +        // FIXME support float comparisons
 +        simd_eq, (c x, c y) {
 +            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
 +            simd_cmp!(fx, Equal(x, y) -> ret);
 +        };
 +        simd_ne, (c x, c y) {
 +            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
 +            simd_cmp!(fx, NotEqual(x, y) -> ret);
 +        };
 +        simd_lt, (c x, c y) {
 +            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
 +            simd_cmp!(fx, UnsignedLessThan|SignedLessThan(x, y) -> ret);
 +        };
 +        simd_le, (c x, c y) {
 +            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
 +            simd_cmp!(fx, UnsignedLessThanOrEqual|SignedLessThanOrEqual(x, y) -> ret);
 +        };
 +        simd_gt, (c x, c y) {
 +            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
 +            simd_cmp!(fx, UnsignedGreaterThan|SignedGreaterThan(x, y) -> ret);
 +        };
 +        simd_ge, (c x, c y) {
 +            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
 +            simd_cmp!(fx, UnsignedGreaterThanOrEqual|SignedGreaterThanOrEqual(x, y) -> ret);
 +        };
 +
 +        // simd_shuffle32<T, U>(x: T, y: T, idx: [u32; 32]) -> U
 +        _ if intrinsic.starts_with("simd_shuffle"), (c x, c y, o idx) {
 +            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
 +
 +            let n: u16 = intrinsic["simd_shuffle".len()..].parse().unwrap();
 +
 +            assert_eq!(x.layout(), y.layout());
 +            let layout = x.layout();
 +
 +            let (lane_type, lane_count) = lane_type_and_count(fx.tcx, layout);
 +            let (ret_lane_type, ret_lane_count) = lane_type_and_count(fx.tcx, ret.layout());
 +
 +            assert_eq!(lane_type, ret_lane_type);
 +            assert_eq!(n, ret_lane_count);
 +
 +            let total_len = lane_count * 2;
 +
 +            let indexes = {
 +                use rustc_middle::mir::interpret::*;
 +                let idx_const = crate::constant::mir_operand_get_const_val(fx, idx).expect("simd_shuffle* idx not const");
 +
 +                let idx_bytes = match idx_const.val {
 +                    ty::ConstKind::Value(ConstValue::ByRef { alloc, offset }) => {
 +                        let ptr = Pointer::new(AllocId(0 /* dummy */), offset);
 +                        let size = Size::from_bytes(4 * u64::from(ret_lane_count) /* size_of([u32; ret_lane_count]) */);
 +                        alloc.get_bytes(fx, ptr, size).unwrap()
 +                    }
 +                    _ => unreachable!("{:?}", idx_const),
 +                };
 +
 +                (0..ret_lane_count).map(|i| {
 +                    let i = usize::try_from(i).unwrap();
 +                    let idx = rustc_middle::mir::interpret::read_target_uint(
 +                        fx.tcx.data_layout.endian,
 +                        &idx_bytes[4*i.. 4*i + 4],
 +                    ).expect("read_target_uint");
 +                    u16::try_from(idx).expect("try_from u32")
 +                }).collect::<Vec<u16>>()
 +            };
 +
 +            for &idx in &indexes {
 +                assert!(idx < total_len, "idx {} out of range 0..{}", idx, total_len);
 +            }
 +
 +            for (out_idx, in_idx) in indexes.into_iter().enumerate() {
 +                let in_lane = if in_idx < lane_count {
 +                    x.value_field(fx, mir::Field::new(in_idx.try_into().unwrap()))
 +                } else {
 +                    y.value_field(fx, mir::Field::new((in_idx - lane_count).try_into().unwrap()))
 +                };
 +                let out_lane = ret.place_field(fx, mir::Field::new(out_idx));
 +                out_lane.write_cvalue(fx, in_lane);
 +            }
 +        };
 +
 +        simd_insert, (c base, o idx, c val) {
 +            // FIXME validate
 +            let idx_const = if let Some(idx_const) = crate::constant::mir_operand_get_const_val(fx, idx) {
 +                idx_const
 +            } else {
 +                fx.tcx.sess.span_fatal(
 +                    span,
 +                    "Index argument for `simd_insert` is not a constant",
 +                );
 +            };
 +
-             let idx = idx_const.val.try_to_bits(Size::from_bytes(4 /* u32*/)).expect(&format!("kind not scalar: {:?}", idx_const));
++            let idx = idx_const.val.try_to_bits(Size::from_bytes(4 /* u32*/)).unwrap_or_else(|| panic!("kind not scalar: {:?}", idx_const));
 +            let (_lane_type, lane_count) = lane_type_and_count(fx.tcx, base.layout());
 +            if idx >= lane_count.into() {
 +                fx.tcx.sess.span_fatal(fx.mir.span, &format!("[simd_insert] idx {} >= lane_count {}", idx, lane_count));
 +            }
 +
 +            ret.write_cvalue(fx, base);
 +            let ret_lane = ret.place_field(fx, mir::Field::new(idx.try_into().unwrap()));
 +            ret_lane.write_cvalue(fx, val);
 +        };
 +
 +        simd_extract, (c v, o idx) {
 +            validate_simd_type!(fx, intrinsic, span, v.layout().ty);
 +            let idx_const = if let Some(idx_const) = crate::constant::mir_operand_get_const_val(fx, idx) {
 +                idx_const
 +            } else {
 +                fx.tcx.sess.span_fatal(
 +                    span,
 +                    "Index argument for `simd_extract` is not a constant",
 +                );
 +            };
 +
++            let idx = idx_const.val.try_to_bits(Size::from_bytes(4 /* u32*/)).unwrap_or_else(|| panic!("kind not scalar: {:?}", idx_const));
 +            let (_lane_type, lane_count) = lane_type_and_count(fx.tcx, v.layout());
 +            if idx >= lane_count.into() {
 +                fx.tcx.sess.span_fatal(fx.mir.span, &format!("[simd_extract] idx {} >= lane_count {}", idx, lane_count));
 +            }
 +
 +            let ret_lane = v.value_field(fx, mir::Field::new(idx.try_into().unwrap()));
 +            ret.write_cvalue(fx, ret_lane);
 +        };
 +
 +        simd_add, (c x, c y) {
 +            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
 +            simd_int_flt_binop!(fx, iadd|fadd(x, y) -> ret);
 +        };
 +        simd_sub, (c x, c y) {
 +            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
 +            simd_int_flt_binop!(fx, isub|fsub(x, y) -> ret);
 +        };
 +        simd_mul, (c x, c y) {
 +            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
 +            simd_int_flt_binop!(fx, imul|fmul(x, y) -> ret);
 +        };
 +        simd_div, (c x, c y) {
 +            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
 +            simd_int_flt_binop!(fx, udiv|sdiv|fdiv(x, y) -> ret);
 +        };
 +        simd_shl, (c x, c y) {
 +            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
 +            simd_int_binop!(fx, ishl(x, y) -> ret);
 +        };
 +        simd_shr, (c x, c y) {
 +            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
 +            simd_int_binop!(fx, ushr|sshr(x, y) -> ret);
 +        };
 +        simd_and, (c x, c y) {
 +            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
 +            simd_int_binop!(fx, band(x, y) -> ret);
 +        };
 +        simd_or, (c x, c y) {
 +            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
 +            simd_int_binop!(fx, bor(x, y) -> ret);
 +        };
 +        simd_xor, (c x, c y) {
 +            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
 +            simd_int_binop!(fx, bxor(x, y) -> ret);
 +        };
 +
 +        simd_fma, (c a, c b, c c) {
 +            validate_simd_type!(fx, intrinsic, span, a.layout().ty);
 +            assert_eq!(a.layout(), b.layout());
 +            assert_eq!(a.layout(), c.layout());
 +            let layout = a.layout();
 +
 +            let (_lane_layout, lane_count) = lane_type_and_count(fx.tcx, layout);
 +            let (ret_lane_layout, ret_lane_count) = lane_type_and_count(fx.tcx, ret.layout());
 +            assert_eq!(lane_count, ret_lane_count);
 +
 +            for lane in 0..lane_count {
 +                let lane = mir::Field::new(lane.try_into().unwrap());
 +                let a_lane = a.value_field(fx, lane).load_scalar(fx);
 +                let b_lane = b.value_field(fx, lane).load_scalar(fx);
 +                let c_lane = c.value_field(fx, lane).load_scalar(fx);
 +
 +                let mul_lane = fx.bcx.ins().fmul(a_lane, b_lane);
 +                let res_lane = CValue::by_val(fx.bcx.ins().fadd(mul_lane, c_lane), ret_lane_layout);
 +
 +                ret.place_field(fx, lane).write_cvalue(fx, res_lane);
 +            }
 +        };
 +
 +        simd_fmin, (c x, c y) {
 +            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
 +            simd_flt_binop!(fx, fmin(x, y) -> ret);
 +        };
 +        simd_fmax, (c x, c y) {
 +            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
 +            simd_flt_binop!(fx, fmax(x, y) -> ret);
 +        };
 +
 +        // simd_fabs
 +        // simd_saturating_add
 +        // simd_bitmask
 +        // simd_select
 +        // simd_reduce_add_{,un}ordered
 +        // simd_rem
 +    }
 +}
index fd00a2e00a6a4457cce0ab897e64b04b6803fb80,0000000000000000000000000000000000000000..ba9ee0d450ee66c68821acab3f094304f92c0ba3
mode 100644,000000..100644
--- /dev/null
@@@ -1,316 -1,0 +1,317 @@@
-     pub(crate) use crate::base::{trans_operand, trans_place};
 +#![feature(
 +    rustc_private,
 +    decl_macro,
 +    type_alias_impl_trait,
 +    associated_type_bounds,
 +    never_type,
 +    try_blocks,
 +    hash_drain_filter
 +)]
 +#![warn(rust_2018_idioms)]
 +#![warn(unused_lifetimes)]
++#![warn(unreachable_pub)]
 +
 +#[cfg(feature = "jit")]
 +extern crate libc;
 +extern crate snap;
 +#[macro_use]
 +extern crate rustc_middle;
 +extern crate rustc_ast;
 +extern crate rustc_codegen_ssa;
 +extern crate rustc_data_structures;
 +extern crate rustc_errors;
 +extern crate rustc_fs_util;
 +extern crate rustc_hir;
 +extern crate rustc_incremental;
 +extern crate rustc_index;
 +extern crate rustc_session;
 +extern crate rustc_span;
 +extern crate rustc_symbol_mangling;
 +extern crate rustc_target;
 +
 +// This prevents duplicating functions and statics that are already part of the host rustc process.
 +#[allow(unused_extern_crates)]
 +extern crate rustc_driver;
 +
 +use std::any::Any;
 +
 +use rustc_codegen_ssa::traits::CodegenBackend;
 +use rustc_codegen_ssa::CodegenResults;
 +use rustc_errors::ErrorReported;
 +use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
 +use rustc_middle::middle::cstore::{EncodedMetadata, MetadataLoader};
 +use rustc_middle::ty::query::Providers;
 +use rustc_session::config::OutputFilenames;
 +use rustc_session::Session;
 +
 +use cranelift_codegen::settings::{self, Configurable};
 +
 +use crate::constant::ConstantCx;
 +use crate::prelude::*;
 +
 +mod abi;
 +mod allocator;
 +mod analyze;
 +mod archive;
 +mod atomic_shim;
 +mod backend;
 +mod base;
 +mod cast;
 +mod codegen_i128;
 +mod common;
 +mod constant;
 +mod debuginfo;
 +mod discriminant;
 +mod driver;
 +mod inline_asm;
 +mod intrinsics;
 +mod linkage;
 +mod main_shim;
 +mod metadata;
 +mod num;
 +mod optimize;
 +mod pointer;
 +mod pretty_clif;
 +mod toolchain;
 +mod trap;
 +mod unsize;
 +mod value_and_place;
 +mod vtable;
 +
 +mod prelude {
 +    pub(crate) use std::convert::{TryFrom, TryInto};
 +
 +    pub(crate) use rustc_ast::ast::{FloatTy, IntTy, UintTy};
 +    pub(crate) use rustc_span::Span;
 +
 +    pub(crate) use rustc_hir::def_id::{DefId, LOCAL_CRATE};
 +    pub(crate) use rustc_middle::bug;
 +    pub(crate) use rustc_middle::mir::{self, *};
 +    pub(crate) use rustc_middle::ty::layout::{self, TyAndLayout};
 +    pub(crate) use rustc_middle::ty::{
 +        self, FnSig, Instance, InstanceDef, ParamEnv, Ty, TyCtxt, TypeAndMut, TypeFoldable,
 +    };
 +    pub(crate) use rustc_target::abi::{Abi, LayoutOf, Scalar, Size, VariantIdx};
 +
 +    pub(crate) use rustc_data_structures::fx::FxHashMap;
 +
 +    pub(crate) use rustc_index::vec::Idx;
 +
 +    pub(crate) use cranelift_codegen::entity::EntitySet;
 +    pub(crate) use cranelift_codegen::ir::condcodes::{FloatCC, IntCC};
 +    pub(crate) use cranelift_codegen::ir::function::Function;
 +    pub(crate) use cranelift_codegen::ir::types;
 +    pub(crate) use cranelift_codegen::ir::{
 +        AbiParam, Block, ExternalName, FuncRef, Inst, InstBuilder, MemFlags, Signature, SourceLoc,
 +        StackSlot, StackSlotData, StackSlotKind, TrapCode, Type, Value,
 +    };
 +    pub(crate) use cranelift_codegen::isa::{self, CallConv};
 +    pub(crate) use cranelift_codegen::Context;
 +    pub(crate) use cranelift_frontend::{FunctionBuilder, FunctionBuilderContext, Variable};
 +    pub(crate) use cranelift_module::{self, DataContext, DataId, FuncId, Linkage, Module};
 +
 +    pub(crate) use crate::abi::*;
++    pub(crate) use crate::base::{codegen_operand, codegen_place};
 +    pub(crate) use crate::cast::*;
 +    pub(crate) use crate::common::*;
 +    pub(crate) use crate::debuginfo::{DebugContext, UnwindContext};
 +    pub(crate) use crate::pointer::Pointer;
 +    pub(crate) use crate::trap::*;
 +    pub(crate) use crate::value_and_place::{CPlace, CPlaceInner, CValue};
 +}
 +
 +struct PrintOnPanic<F: Fn() -> String>(F);
 +impl<F: Fn() -> String> Drop for PrintOnPanic<F> {
 +    fn drop(&mut self) {
 +        if ::std::thread::panicking() {
 +            println!("{}", (self.0)());
 +        }
 +    }
 +}
 +
 +struct CodegenCx<'tcx, M: Module> {
 +    tcx: TyCtxt<'tcx>,
 +    module: M,
 +    global_asm: String,
 +    constants_cx: ConstantCx,
 +    cached_context: Context,
 +    vtables: FxHashMap<(Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>), DataId>,
 +    debug_context: Option<DebugContext<'tcx>>,
 +    unwind_context: UnwindContext<'tcx>,
 +}
 +
 +impl<'tcx, M: Module> CodegenCx<'tcx, M> {
 +    fn new(tcx: TyCtxt<'tcx>, module: M, debug_info: bool) -> Self {
 +        let unwind_context = UnwindContext::new(tcx, module.isa());
 +        let debug_context = if debug_info {
 +            Some(DebugContext::new(tcx, module.isa()))
 +        } else {
 +            None
 +        };
 +        CodegenCx {
 +            tcx,
 +            module,
 +            global_asm: String::new(),
 +            constants_cx: ConstantCx::default(),
 +            cached_context: Context::new(),
 +            vtables: FxHashMap::default(),
 +            debug_context,
 +            unwind_context,
 +        }
 +    }
 +
 +    fn finalize(mut self) -> (M, String, Option<DebugContext<'tcx>>, UnwindContext<'tcx>) {
 +        self.constants_cx.finalize(self.tcx, &mut self.module);
 +        (
 +            self.module,
 +            self.global_asm,
 +            self.debug_context,
 +            self.unwind_context,
 +        )
 +    }
 +}
 +
 +#[derive(Copy, Clone, Debug)]
 +pub struct BackendConfig {
 +    pub use_jit: bool,
 +}
 +
 +pub struct CraneliftCodegenBackend {
 +    pub config: BackendConfig,
 +}
 +
 +impl CodegenBackend for CraneliftCodegenBackend {
 +    fn init(&self, sess: &Session) {
 +        if sess.lto() != rustc_session::config::Lto::No && sess.opts.cg.embed_bitcode {
 +            sess.warn("LTO is not supported. You may get a linker error.");
 +        }
 +    }
 +
 +    fn metadata_loader(&self) -> Box<dyn MetadataLoader + Sync> {
 +        Box::new(crate::metadata::CraneliftMetadataLoader)
 +    }
 +
 +    fn provide(&self, _providers: &mut Providers) {}
 +    fn provide_extern(&self, _providers: &mut Providers) {}
 +
 +    fn target_features(&self, _sess: &Session) -> Vec<rustc_span::Symbol> {
 +        vec![]
 +    }
 +
 +    fn codegen_crate<'tcx>(
 +        &self,
 +        tcx: TyCtxt<'tcx>,
 +        metadata: EncodedMetadata,
 +        need_metadata_module: bool,
 +    ) -> Box<dyn Any> {
 +        let res = driver::codegen_crate(tcx, metadata, need_metadata_module, self.config);
 +
 +        rustc_symbol_mangling::test::report_symbol_names(tcx);
 +
 +        res
 +    }
 +
 +    fn join_codegen(
 +        &self,
 +        ongoing_codegen: Box<dyn Any>,
 +        _sess: &Session,
 +    ) -> Result<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>), ErrorReported> {
 +        Ok(*ongoing_codegen
 +            .downcast::<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>)>()
 +            .unwrap())
 +    }
 +
 +    fn link(
 +        &self,
 +        sess: &Session,
 +        codegen_results: CodegenResults,
 +        outputs: &OutputFilenames,
 +    ) -> Result<(), ErrorReported> {
 +        use rustc_codegen_ssa::back::link::link_binary;
 +
 +        let _timer = sess.prof.generic_activity("link_crate");
 +
 +        sess.time("linking", || {
 +            let target_cpu = crate::target_triple(sess).to_string();
 +            link_binary::<crate::archive::ArArchiveBuilder<'_>>(
 +                sess,
 +                &codegen_results,
 +                outputs,
 +                &codegen_results.crate_name.as_str(),
 +                &target_cpu,
 +            );
 +        });
 +
 +        Ok(())
 +    }
 +}
 +
 +fn target_triple(sess: &Session) -> target_lexicon::Triple {
 +    sess.target.llvm_target.parse().unwrap()
 +}
 +
 +fn build_isa(sess: &Session, enable_pic: bool) -> Box<dyn isa::TargetIsa + 'static> {
 +    use target_lexicon::BinaryFormat;
 +
 +    let target_triple = crate::target_triple(sess);
 +
 +    let mut flags_builder = settings::builder();
 +    if enable_pic {
 +        flags_builder.enable("is_pic").unwrap();
 +    } else {
 +        flags_builder.set("is_pic", "false").unwrap();
 +    }
 +    flags_builder.set("enable_probestack", "false").unwrap(); // __cranelift_probestack is not provided
 +    flags_builder
 +        .set(
 +            "enable_verifier",
 +            if cfg!(debug_assertions) {
 +                "true"
 +            } else {
 +                "false"
 +            },
 +        )
 +        .unwrap();
 +
 +    let tls_model = match target_triple.binary_format {
 +        BinaryFormat::Elf => "elf_gd",
 +        BinaryFormat::Macho => "macho",
 +        BinaryFormat::Coff => "coff",
 +        _ => "none",
 +    };
 +    flags_builder.set("tls_model", tls_model).unwrap();
 +
 +    flags_builder.set("enable_simd", "true").unwrap();
 +
 +    // FIXME(CraneStation/cranelift#732) fix LICM in presence of jump tables
 +    /*
 +    use rustc_session::config::OptLevel;
 +    match sess.opts.optimize {
 +        OptLevel::No => {
 +            flags_builder.set("opt_level", "none").unwrap();
 +        }
 +        OptLevel::Less | OptLevel::Default => {}
 +        OptLevel::Aggressive => {
 +            flags_builder.set("opt_level", "speed_and_size").unwrap();
 +        }
 +        OptLevel::Size | OptLevel::SizeMin => {
 +            sess.warn("Optimizing for size is not supported. Just ignoring the request");
 +        }
 +    }*/
 +
 +    let flags = settings::Flags::new(flags_builder);
 +
 +    let mut isa_builder = cranelift_codegen::isa::lookup(target_triple).unwrap();
 +    // Don't use "haswell", as it implies `has_lzcnt`.macOS CI is still at Ivy Bridge EP, so `lzcnt`
 +    // is interpreted as `bsr`.
 +    isa_builder.enable("nehalem").unwrap();
 +    isa_builder.finish(flags)
 +}
 +
 +/// This is the entrypoint for a hot plugged rustc_codegen_cranelift
 +#[no_mangle]
 +pub fn __rustc_codegen_backend() -> Box<dyn CodegenBackend> {
 +    Box::new(CraneliftCodegenBackend {
 +        config: BackendConfig { use_jit: false },
 +    })
 +}
index fe5d1d64443630b5e8905ef36ce49f797716aa2b,0000000000000000000000000000000000000000..dc1e2107ce712d2cf51032434ddbbfe77316e3a6
mode 100644,000000..100644
--- /dev/null
@@@ -1,35 -1,0 +1,33 @@@
-         if tcx.is_reachable_non_generic(def_id) {
-             Linkage::Export
-         } else {
-             Linkage::Hidden
-         }
 +use rustc_middle::mir::mono::{Linkage as RLinkage, MonoItem, Visibility};
 +
 +use crate::prelude::*;
 +
 +pub(crate) fn get_clif_linkage(
 +    mono_item: MonoItem<'_>,
 +    linkage: RLinkage,
 +    visibility: Visibility,
 +) -> Linkage {
 +    match (linkage, visibility) {
 +        (RLinkage::External, Visibility::Default) => Linkage::Export,
 +        (RLinkage::Internal, Visibility::Default) => Linkage::Local,
 +        (RLinkage::External, Visibility::Hidden) => Linkage::Hidden,
 +        _ => panic!("{:?} = {:?} {:?}", mono_item, linkage, visibility),
 +    }
 +}
 +
 +pub(crate) fn get_static_linkage(tcx: TyCtxt<'_>, def_id: DefId) -> Linkage {
 +    let fn_attrs = tcx.codegen_fn_attrs(def_id);
 +
 +    if let Some(linkage) = fn_attrs.linkage {
 +        match linkage {
 +            RLinkage::External => Linkage::Export,
 +            RLinkage::Internal => Linkage::Local,
 +            RLinkage::ExternalWeak | RLinkage::WeakAny => Linkage::Preemptible,
 +            _ => panic!("{:?}", linkage),
 +        }
++    } else if tcx.is_reachable_non_generic(def_id) {
++        Linkage::Export
 +    } else {
++        Linkage::Hidden
 +    }
 +}
index db34d89fe2be717c5ce9ef1a2965ee10753aa5c9,0000000000000000000000000000000000000000..10f515e38ead279f1a080d3b94511e10fe10a4f6
mode 100644,000000..100644
--- /dev/null
@@@ -1,130 -1,0 +1,130 @@@
-         ctx.func = Function::with_name_signature(ExternalName::user(0, 0), cmain_sig.clone());
 +use rustc_hir::LangItem;
 +use rustc_session::config::EntryFnType;
 +
 +use crate::prelude::*;
 +
 +/// Create the `main` function which will initialize the rust runtime and call
 +/// users main function.
 +pub(crate) fn maybe_create_entry_wrapper(
 +    tcx: TyCtxt<'_>,
 +    module: &mut impl Module,
 +    unwind_context: &mut UnwindContext<'_>,
 +    use_jit: bool,
 +) {
 +    let (main_def_id, use_start_lang_item) = match tcx.entry_fn(LOCAL_CRATE) {
 +        Some((def_id, entry_ty)) => (
 +            def_id.to_def_id(),
 +            match entry_ty {
 +                EntryFnType::Main => true,
 +                EntryFnType::Start => false,
 +            },
 +        ),
 +        None => return,
 +    };
 +
 +    let instance = Instance::mono(tcx, main_def_id).polymorphize(tcx);
 +    if module.get_name(&*tcx.symbol_name(instance).name).is_none() {
 +        return;
 +    }
 +
 +    create_entry_fn(
 +        tcx,
 +        module,
 +        unwind_context,
 +        main_def_id,
 +        use_start_lang_item,
 +        use_jit,
 +    );
 +
 +    fn create_entry_fn(
 +        tcx: TyCtxt<'_>,
 +        m: &mut impl Module,
 +        unwind_context: &mut UnwindContext<'_>,
 +        rust_main_def_id: DefId,
 +        use_start_lang_item: bool,
 +        use_jit: bool,
 +    ) {
 +        let main_ret_ty = tcx.fn_sig(rust_main_def_id).output();
 +        // Given that `main()` has no arguments,
 +        // then its return type cannot have
 +        // late-bound regions, since late-bound
 +        // regions must appear in the argument
 +        // listing.
 +        let main_ret_ty = tcx.erase_regions(&main_ret_ty.no_bound_vars().unwrap());
 +
 +        let cmain_sig = Signature {
 +            params: vec![
 +                AbiParam::new(m.target_config().pointer_type()),
 +                AbiParam::new(m.target_config().pointer_type()),
 +            ],
 +            returns: vec![AbiParam::new(
 +                m.target_config().pointer_type(), /*isize*/
 +            )],
 +            call_conv: CallConv::triple_default(m.isa().triple()),
 +        };
 +
 +        let cmain_func_id = m
 +            .declare_function("main", Linkage::Export, &cmain_sig)
 +            .unwrap();
 +
 +        let instance = Instance::mono(tcx, rust_main_def_id).polymorphize(tcx);
 +
 +        let (main_name, main_sig) =
 +            get_function_name_and_sig(tcx, m.isa().triple(), instance, false);
 +        let main_func_id = m
 +            .declare_function(&main_name, Linkage::Import, &main_sig)
 +            .unwrap();
 +
 +        let mut ctx = Context::new();
++        ctx.func = Function::with_name_signature(ExternalName::user(0, 0), cmain_sig);
 +        {
 +            let mut func_ctx = FunctionBuilderContext::new();
 +            let mut bcx = FunctionBuilder::new(&mut ctx.func, &mut func_ctx);
 +
 +            let block = bcx.create_block();
 +            bcx.switch_to_block(block);
 +            let arg_argc = bcx.append_block_param(block, m.target_config().pointer_type());
 +            let arg_argv = bcx.append_block_param(block, m.target_config().pointer_type());
 +
 +            crate::atomic_shim::init_global_lock(m, &mut bcx, use_jit);
 +
 +            let main_func_ref = m.declare_func_in_func(main_func_id, &mut bcx.func);
 +
 +            let call_inst = if use_start_lang_item {
 +                let start_def_id = tcx.require_lang_item(LangItem::Start, None);
 +                let start_instance = Instance::resolve(
 +                    tcx,
 +                    ParamEnv::reveal_all(),
 +                    start_def_id,
 +                    tcx.intern_substs(&[main_ret_ty.into()]),
 +                )
 +                .unwrap()
 +                .unwrap()
 +                .polymorphize(tcx);
 +                let start_func_id = import_function(tcx, m, start_instance);
 +
 +                let main_val = bcx
 +                    .ins()
 +                    .func_addr(m.target_config().pointer_type(), main_func_ref);
 +
 +                let func_ref = m.declare_func_in_func(start_func_id, &mut bcx.func);
 +                bcx.ins().call(func_ref, &[main_val, arg_argc, arg_argv])
 +            } else {
 +                // using user-defined start fn
 +                bcx.ins().call(main_func_ref, &[arg_argc, arg_argv])
 +            };
 +
 +            let result = bcx.inst_results(call_inst)[0];
 +            bcx.ins().return_(&[result]);
 +            bcx.seal_all_blocks();
 +            bcx.finalize();
 +        }
 +        m.define_function(
 +            cmain_func_id,
 +            &mut ctx,
 +            &mut cranelift_codegen::binemit::NullTrapSink {},
 +        )
 +        .unwrap();
 +        unwind_context.add_function(cmain_func_id, &ctx, m.isa());
 +    }
 +}
index 04369bf89fd2d81ac03171bd7ad4ede3ddeba434,0000000000000000000000000000000000000000..cda2a187ff9b7c1f4ac0b3f3755c7f9429db90e0
mode 100644,000000..100644
--- /dev/null
@@@ -1,108 -1,0 +1,108 @@@
-                 let buf: OwningRef<Vec<u8>, [u8]> = OwningRef::new(buf).into();
 +//! Reading and writing of the rustc metadata for rlibs and dylibs
 +
 +use std::convert::TryFrom;
 +use std::fs::File;
 +use std::path::Path;
 +
 +use rustc_codegen_ssa::METADATA_FILENAME;
 +use rustc_data_structures::owning_ref::OwningRef;
 +use rustc_data_structures::rustc_erase_owner;
 +use rustc_data_structures::sync::MetadataRef;
 +use rustc_middle::middle::cstore::{EncodedMetadata, MetadataLoader};
 +use rustc_middle::ty::TyCtxt;
 +use rustc_session::config;
 +use rustc_target::spec::Target;
 +
 +use crate::backend::WriteMetadata;
 +
 +pub(crate) struct CraneliftMetadataLoader;
 +
 +impl MetadataLoader for CraneliftMetadataLoader {
 +    fn get_rlib_metadata(&self, _target: &Target, path: &Path) -> Result<MetadataRef, String> {
 +        let mut archive = ar::Archive::new(File::open(path).map_err(|e| format!("{:?}", e))?);
 +        // Iterate over all entries in the archive:
 +        while let Some(entry_result) = archive.next_entry() {
 +            let mut entry = entry_result.map_err(|e| format!("{:?}", e))?;
 +            if entry.header().identifier() == METADATA_FILENAME.as_bytes() {
 +                let mut buf = Vec::with_capacity(
 +                    usize::try_from(entry.header().size())
 +                        .expect("Rlib metadata file too big to load into memory."),
 +                );
 +                ::std::io::copy(&mut entry, &mut buf).map_err(|e| format!("{:?}", e))?;
-         let buf: OwningRef<Vec<u8>, [u8]> = OwningRef::new(buf).into();
++                let buf: OwningRef<Vec<u8>, [u8]> = OwningRef::new(buf);
 +                return Ok(rustc_erase_owner!(buf.map_owner_box()));
 +            }
 +        }
 +
 +        Err("couldn't find metadata entry".to_string())
 +    }
 +
 +    fn get_dylib_metadata(&self, _target: &Target, path: &Path) -> Result<MetadataRef, String> {
 +        use object::{Object, ObjectSection};
 +        let file = std::fs::read(path).map_err(|e| format!("read:{:?}", e))?;
 +        let file = object::File::parse(&file).map_err(|e| format!("parse: {:?}", e))?;
 +        let buf = file
 +            .section_by_name(".rustc")
 +            .ok_or("no .rustc section")?
 +            .data()
 +            .map_err(|e| format!("failed to read .rustc section: {:?}", e))?
 +            .to_owned();
++        let buf: OwningRef<Vec<u8>, [u8]> = OwningRef::new(buf);
 +        Ok(rustc_erase_owner!(buf.map_owner_box()))
 +    }
 +}
 +
 +// Adapted from https://github.com/rust-lang/rust/blob/da573206f87b5510de4b0ee1a9c044127e409bd3/src/librustc_codegen_llvm/base.rs#L47-L112
 +pub(crate) fn write_metadata<P: WriteMetadata>(
 +    tcx: TyCtxt<'_>,
 +    product: &mut P,
 +) -> EncodedMetadata {
 +    use snap::write::FrameEncoder;
 +    use std::io::Write;
 +
 +    #[derive(PartialEq, Eq, PartialOrd, Ord)]
 +    enum MetadataKind {
 +        None,
 +        Uncompressed,
 +        Compressed,
 +    }
 +
 +    let kind = tcx
 +        .sess
 +        .crate_types()
 +        .iter()
 +        .map(|ty| match *ty {
 +            config::CrateType::Executable
 +            | config::CrateType::Staticlib
 +            | config::CrateType::Cdylib => MetadataKind::None,
 +
 +            config::CrateType::Rlib => MetadataKind::Uncompressed,
 +
 +            config::CrateType::Dylib | config::CrateType::ProcMacro => MetadataKind::Compressed,
 +        })
 +        .max()
 +        .unwrap_or(MetadataKind::None);
 +
 +    if kind == MetadataKind::None {
 +        return EncodedMetadata::new();
 +    }
 +
 +    let metadata = tcx.encode_metadata();
 +    if kind == MetadataKind::Uncompressed {
 +        return metadata;
 +    }
 +
 +    assert!(kind == MetadataKind::Compressed);
 +    let mut compressed = tcx.metadata_encoding_version();
 +    FrameEncoder::new(&mut compressed)
 +        .write_all(&metadata.raw_data)
 +        .unwrap();
 +
 +    product.add_rustc_section(
 +        rustc_middle::middle::exported_symbols::metadata_symbol_name(tcx),
 +        compressed,
 +        tcx.sess.target.options.is_like_osx,
 +    );
 +
 +    metadata
 +}
index b37826d71f4e04091152c2298c36bfb1d94bc494,0000000000000000000000000000000000000000..41f4a9b9662bcfc8ce9d9f722dd9e8df96ebb829
mode 100644,000000..100644
--- /dev/null
@@@ -1,475 -1,0 +1,475 @@@
-         ty::Bool => crate::num::trans_bool_binop(fx, bin_op, in_lhs, in_rhs),
-         ty::Uint(_) | ty::Int(_) => crate::num::trans_int_binop(fx, bin_op, in_lhs, in_rhs),
-         ty::Float(_) => crate::num::trans_float_binop(fx, bin_op, in_lhs, in_rhs),
-         ty::RawPtr(..) | ty::FnPtr(..) => crate::num::trans_ptr_binop(fx, bin_op, in_lhs, in_rhs),
 +//! Various operations on integer and floating-point numbers
 +
 +use crate::prelude::*;
 +
 +pub(crate) fn bin_op_to_intcc(bin_op: BinOp, signed: bool) -> Option<IntCC> {
 +    use BinOp::*;
 +    use IntCC::*;
 +    Some(match bin_op {
 +        Eq => Equal,
 +        Lt => {
 +            if signed {
 +                SignedLessThan
 +            } else {
 +                UnsignedLessThan
 +            }
 +        }
 +        Le => {
 +            if signed {
 +                SignedLessThanOrEqual
 +            } else {
 +                UnsignedLessThanOrEqual
 +            }
 +        }
 +        Ne => NotEqual,
 +        Ge => {
 +            if signed {
 +                SignedGreaterThanOrEqual
 +            } else {
 +                UnsignedGreaterThanOrEqual
 +            }
 +        }
 +        Gt => {
 +            if signed {
 +                SignedGreaterThan
 +            } else {
 +                UnsignedGreaterThan
 +            }
 +        }
 +        _ => return None,
 +    })
 +}
 +
 +fn codegen_compare_bin_op<'tcx>(
 +    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
 +    bin_op: BinOp,
 +    signed: bool,
 +    lhs: Value,
 +    rhs: Value,
 +) -> CValue<'tcx> {
 +    let intcc = crate::num::bin_op_to_intcc(bin_op, signed).unwrap();
 +    let val = fx.bcx.ins().icmp(intcc, lhs, rhs);
 +    let val = fx.bcx.ins().bint(types::I8, val);
 +    CValue::by_val(val, fx.layout_of(fx.tcx.types.bool))
 +}
 +
 +pub(crate) fn codegen_binop<'tcx>(
 +    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
 +    bin_op: BinOp,
 +    in_lhs: CValue<'tcx>,
 +    in_rhs: CValue<'tcx>,
 +) -> CValue<'tcx> {
 +    match bin_op {
 +        BinOp::Eq | BinOp::Lt | BinOp::Le | BinOp::Ne | BinOp::Ge | BinOp::Gt => {
 +            match in_lhs.layout().ty.kind() {
 +                ty::Bool | ty::Uint(_) | ty::Int(_) | ty::Char => {
 +                    let signed = type_sign(in_lhs.layout().ty);
 +                    let lhs = in_lhs.load_scalar(fx);
 +                    let rhs = in_rhs.load_scalar(fx);
 +
 +                    let (lhs, rhs) = if (bin_op == BinOp::Eq || bin_op == BinOp::Ne)
 +                        && (in_lhs.layout().ty.kind() == fx.tcx.types.i8.kind()
 +                            || in_lhs.layout().ty.kind() == fx.tcx.types.i16.kind())
 +                    {
 +                        // FIXME(CraneStation/cranelift#896) icmp_imm.i8/i16 with eq/ne for signed ints is implemented wrong.
 +                        (
 +                            fx.bcx.ins().sextend(types::I32, lhs),
 +                            fx.bcx.ins().sextend(types::I32, rhs),
 +                        )
 +                    } else {
 +                        (lhs, rhs)
 +                    };
 +
 +                    return codegen_compare_bin_op(fx, bin_op, signed, lhs, rhs);
 +                }
 +                _ => {}
 +            }
 +        }
 +        _ => {}
 +    }
 +
 +    match in_lhs.layout().ty.kind() {
- pub(crate) fn trans_bool_binop<'tcx>(
++        ty::Bool => crate::num::codegen_bool_binop(fx, bin_op, in_lhs, in_rhs),
++        ty::Uint(_) | ty::Int(_) => crate::num::codegen_int_binop(fx, bin_op, in_lhs, in_rhs),
++        ty::Float(_) => crate::num::codegen_float_binop(fx, bin_op, in_lhs, in_rhs),
++        ty::RawPtr(..) | ty::FnPtr(..) => crate::num::codegen_ptr_binop(fx, bin_op, in_lhs, in_rhs),
 +        _ => unreachable!(
 +            "{:?}({:?}, {:?})",
 +            bin_op,
 +            in_lhs.layout().ty,
 +            in_rhs.layout().ty
 +        ),
 +    }
 +}
 +
- pub(crate) fn trans_int_binop<'tcx>(
++pub(crate) fn codegen_bool_binop<'tcx>(
 +    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
 +    bin_op: BinOp,
 +    in_lhs: CValue<'tcx>,
 +    in_rhs: CValue<'tcx>,
 +) -> CValue<'tcx> {
 +    let lhs = in_lhs.load_scalar(fx);
 +    let rhs = in_rhs.load_scalar(fx);
 +
 +    let b = fx.bcx.ins();
 +    let res = match bin_op {
 +        BinOp::BitXor => b.bxor(lhs, rhs),
 +        BinOp::BitAnd => b.band(lhs, rhs),
 +        BinOp::BitOr => b.bor(lhs, rhs),
 +        // Compare binops handles by `codegen_binop`.
 +        _ => unreachable!("{:?}({:?}, {:?})", bin_op, in_lhs, in_rhs),
 +    };
 +
 +    CValue::by_val(res, fx.layout_of(fx.tcx.types.bool))
 +}
 +
- pub(crate) fn trans_checked_int_binop<'tcx>(
++pub(crate) fn codegen_int_binop<'tcx>(
 +    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
 +    bin_op: BinOp,
 +    in_lhs: CValue<'tcx>,
 +    in_rhs: CValue<'tcx>,
 +) -> CValue<'tcx> {
 +    if bin_op != BinOp::Shl && bin_op != BinOp::Shr {
 +        assert_eq!(
 +            in_lhs.layout().ty,
 +            in_rhs.layout().ty,
 +            "int binop requires lhs and rhs of same type"
 +        );
 +    }
 +
 +    if let Some(res) = crate::codegen_i128::maybe_codegen(fx, bin_op, false, in_lhs, in_rhs) {
 +        return res;
 +    }
 +
 +    let signed = type_sign(in_lhs.layout().ty);
 +
 +    let lhs = in_lhs.load_scalar(fx);
 +    let rhs = in_rhs.load_scalar(fx);
 +
 +    let b = fx.bcx.ins();
 +    let val = match bin_op {
 +        BinOp::Add => b.iadd(lhs, rhs),
 +        BinOp::Sub => b.isub(lhs, rhs),
 +        BinOp::Mul => b.imul(lhs, rhs),
 +        BinOp::Div => {
 +            if signed {
 +                b.sdiv(lhs, rhs)
 +            } else {
 +                b.udiv(lhs, rhs)
 +            }
 +        }
 +        BinOp::Rem => {
 +            if signed {
 +                b.srem(lhs, rhs)
 +            } else {
 +                b.urem(lhs, rhs)
 +            }
 +        }
 +        BinOp::BitXor => b.bxor(lhs, rhs),
 +        BinOp::BitAnd => b.band(lhs, rhs),
 +        BinOp::BitOr => b.bor(lhs, rhs),
 +        BinOp::Shl => {
 +            let lhs_ty = fx.bcx.func.dfg.value_type(lhs);
 +            let actual_shift = fx.bcx.ins().band_imm(rhs, i64::from(lhs_ty.bits() - 1));
 +            let actual_shift = clif_intcast(fx, actual_shift, types::I8, false);
 +            fx.bcx.ins().ishl(lhs, actual_shift)
 +        }
 +        BinOp::Shr => {
 +            let lhs_ty = fx.bcx.func.dfg.value_type(lhs);
 +            let actual_shift = fx.bcx.ins().band_imm(rhs, i64::from(lhs_ty.bits() - 1));
 +            let actual_shift = clif_intcast(fx, actual_shift, types::I8, false);
 +            if signed {
 +                fx.bcx.ins().sshr(lhs, actual_shift)
 +            } else {
 +                fx.bcx.ins().ushr(lhs, actual_shift)
 +            }
 +        }
 +        // Compare binops handles by `codegen_binop`.
 +        _ => unreachable!(
 +            "{:?}({:?}, {:?})",
 +            bin_op,
 +            in_lhs.layout().ty,
 +            in_rhs.layout().ty
 +        ),
 +    };
 +
 +    CValue::by_val(val, in_lhs.layout())
 +}
 +
- pub(crate) fn trans_float_binop<'tcx>(
++pub(crate) fn codegen_checked_int_binop<'tcx>(
 +    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
 +    bin_op: BinOp,
 +    in_lhs: CValue<'tcx>,
 +    in_rhs: CValue<'tcx>,
 +) -> CValue<'tcx> {
 +    if bin_op != BinOp::Shl && bin_op != BinOp::Shr {
 +        assert_eq!(
 +            in_lhs.layout().ty,
 +            in_rhs.layout().ty,
 +            "checked int binop requires lhs and rhs of same type"
 +        );
 +    }
 +
 +    let lhs = in_lhs.load_scalar(fx);
 +    let rhs = in_rhs.load_scalar(fx);
 +
 +    if let Some(res) = crate::codegen_i128::maybe_codegen(fx, bin_op, true, in_lhs, in_rhs) {
 +        return res;
 +    }
 +
 +    let signed = type_sign(in_lhs.layout().ty);
 +
 +    let (res, has_overflow) = match bin_op {
 +        BinOp::Add => {
 +            /*let (val, c_out) = fx.bcx.ins().iadd_cout(lhs, rhs);
 +            (val, c_out)*/
 +            // FIXME(CraneStation/cranelift#849) legalize iadd_cout for i8 and i16
 +            let val = fx.bcx.ins().iadd(lhs, rhs);
 +            let has_overflow = if !signed {
 +                fx.bcx.ins().icmp(IntCC::UnsignedLessThan, val, lhs)
 +            } else {
 +                let rhs_is_negative = fx.bcx.ins().icmp_imm(IntCC::SignedLessThan, rhs, 0);
 +                let slt = fx.bcx.ins().icmp(IntCC::SignedLessThan, val, lhs);
 +                fx.bcx.ins().bxor(rhs_is_negative, slt)
 +            };
 +            (val, has_overflow)
 +        }
 +        BinOp::Sub => {
 +            /*let (val, b_out) = fx.bcx.ins().isub_bout(lhs, rhs);
 +            (val, b_out)*/
 +            // FIXME(CraneStation/cranelift#849) legalize isub_bout for i8 and i16
 +            let val = fx.bcx.ins().isub(lhs, rhs);
 +            let has_overflow = if !signed {
 +                fx.bcx.ins().icmp(IntCC::UnsignedGreaterThan, val, lhs)
 +            } else {
 +                let rhs_is_negative = fx.bcx.ins().icmp_imm(IntCC::SignedLessThan, rhs, 0);
 +                let sgt = fx.bcx.ins().icmp(IntCC::SignedGreaterThan, val, lhs);
 +                fx.bcx.ins().bxor(rhs_is_negative, sgt)
 +            };
 +            (val, has_overflow)
 +        }
 +        BinOp::Mul => {
 +            let ty = fx.bcx.func.dfg.value_type(lhs);
 +            match ty {
 +                types::I8 | types::I16 | types::I32 if !signed => {
 +                    let lhs = fx.bcx.ins().uextend(ty.double_width().unwrap(), lhs);
 +                    let rhs = fx.bcx.ins().uextend(ty.double_width().unwrap(), rhs);
 +                    let val = fx.bcx.ins().imul(lhs, rhs);
 +                    let has_overflow = fx.bcx.ins().icmp_imm(
 +                        IntCC::UnsignedGreaterThan,
 +                        val,
 +                        (1 << ty.bits()) - 1,
 +                    );
 +                    let val = fx.bcx.ins().ireduce(ty, val);
 +                    (val, has_overflow)
 +                }
 +                types::I8 | types::I16 | types::I32 if signed => {
 +                    let lhs = fx.bcx.ins().sextend(ty.double_width().unwrap(), lhs);
 +                    let rhs = fx.bcx.ins().sextend(ty.double_width().unwrap(), rhs);
 +                    let val = fx.bcx.ins().imul(lhs, rhs);
 +                    let has_underflow =
 +                        fx.bcx
 +                            .ins()
 +                            .icmp_imm(IntCC::SignedLessThan, val, -(1 << (ty.bits() - 1)));
 +                    let has_overflow = fx.bcx.ins().icmp_imm(
 +                        IntCC::SignedGreaterThan,
 +                        val,
 +                        (1 << (ty.bits() - 1)) - 1,
 +                    );
 +                    let val = fx.bcx.ins().ireduce(ty, val);
 +                    (val, fx.bcx.ins().bor(has_underflow, has_overflow))
 +                }
 +                types::I64 => {
 +                    //let val = fx.easy_call("__mulodi4", &[lhs, rhs, overflow_ptr], types::I64);
 +                    let val = fx.bcx.ins().imul(lhs, rhs);
 +                    let has_overflow = if !signed {
 +                        let val_hi = fx.bcx.ins().umulhi(lhs, rhs);
 +                        fx.bcx.ins().icmp_imm(IntCC::NotEqual, val_hi, 0)
 +                    } else {
 +                        let val_hi = fx.bcx.ins().smulhi(lhs, rhs);
 +                        let not_all_zero = fx.bcx.ins().icmp_imm(IntCC::NotEqual, val_hi, 0);
 +                        let not_all_ones = fx.bcx.ins().icmp_imm(
 +                            IntCC::NotEqual,
 +                            val_hi,
 +                            u64::try_from((1u128 << ty.bits()) - 1).unwrap() as i64,
 +                        );
 +                        fx.bcx.ins().band(not_all_zero, not_all_ones)
 +                    };
 +                    (val, has_overflow)
 +                }
 +                types::I128 => {
 +                    unreachable!("i128 should have been handled by codegen_i128::maybe_codegen")
 +                }
 +                _ => unreachable!("invalid non-integer type {}", ty),
 +            }
 +        }
 +        BinOp::Shl => {
 +            let lhs_ty = fx.bcx.func.dfg.value_type(lhs);
 +            let actual_shift = fx.bcx.ins().band_imm(rhs, i64::from(lhs_ty.bits() - 1));
 +            let actual_shift = clif_intcast(fx, actual_shift, types::I8, false);
 +            let val = fx.bcx.ins().ishl(lhs, actual_shift);
 +            let ty = fx.bcx.func.dfg.value_type(val);
 +            let max_shift = i64::from(ty.bits()) - 1;
 +            let has_overflow = fx
 +                .bcx
 +                .ins()
 +                .icmp_imm(IntCC::UnsignedGreaterThan, rhs, max_shift);
 +            (val, has_overflow)
 +        }
 +        BinOp::Shr => {
 +            let lhs_ty = fx.bcx.func.dfg.value_type(lhs);
 +            let actual_shift = fx.bcx.ins().band_imm(rhs, i64::from(lhs_ty.bits() - 1));
 +            let actual_shift = clif_intcast(fx, actual_shift, types::I8, false);
 +            let val = if !signed {
 +                fx.bcx.ins().ushr(lhs, actual_shift)
 +            } else {
 +                fx.bcx.ins().sshr(lhs, actual_shift)
 +            };
 +            let ty = fx.bcx.func.dfg.value_type(val);
 +            let max_shift = i64::from(ty.bits()) - 1;
 +            let has_overflow = fx
 +                .bcx
 +                .ins()
 +                .icmp_imm(IntCC::UnsignedGreaterThan, rhs, max_shift);
 +            (val, has_overflow)
 +        }
 +        _ => bug!(
 +            "binop {:?} on checked int/uint lhs: {:?} rhs: {:?}",
 +            bin_op,
 +            in_lhs,
 +            in_rhs
 +        ),
 +    };
 +
 +    let has_overflow = fx.bcx.ins().bint(types::I8, has_overflow);
 +
 +    // FIXME directly write to result place instead
 +    let out_place = CPlace::new_stack_slot(
 +        fx,
 +        fx.layout_of(
 +            fx.tcx
 +                .mk_tup([in_lhs.layout().ty, fx.tcx.types.bool].iter()),
 +        ),
 +    );
 +    let out_layout = out_place.layout();
 +    out_place.write_cvalue(fx, CValue::by_val_pair(res, has_overflow, out_layout));
 +
 +    out_place.to_cvalue(fx)
 +}
 +
- pub(crate) fn trans_ptr_binop<'tcx>(
++pub(crate) fn codegen_float_binop<'tcx>(
 +    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
 +    bin_op: BinOp,
 +    in_lhs: CValue<'tcx>,
 +    in_rhs: CValue<'tcx>,
 +) -> CValue<'tcx> {
 +    assert_eq!(in_lhs.layout().ty, in_rhs.layout().ty);
 +
 +    let lhs = in_lhs.load_scalar(fx);
 +    let rhs = in_rhs.load_scalar(fx);
 +
 +    let b = fx.bcx.ins();
 +    let res = match bin_op {
 +        BinOp::Add => b.fadd(lhs, rhs),
 +        BinOp::Sub => b.fsub(lhs, rhs),
 +        BinOp::Mul => b.fmul(lhs, rhs),
 +        BinOp::Div => b.fdiv(lhs, rhs),
 +        BinOp::Rem => {
 +            let name = match in_lhs.layout().ty.kind() {
 +                ty::Float(FloatTy::F32) => "fmodf",
 +                ty::Float(FloatTy::F64) => "fmod",
 +                _ => bug!(),
 +            };
 +            return fx.easy_call(name, &[in_lhs, in_rhs], in_lhs.layout().ty);
 +        }
 +        BinOp::Eq | BinOp::Lt | BinOp::Le | BinOp::Ne | BinOp::Ge | BinOp::Gt => {
 +            let fltcc = match bin_op {
 +                BinOp::Eq => FloatCC::Equal,
 +                BinOp::Lt => FloatCC::LessThan,
 +                BinOp::Le => FloatCC::LessThanOrEqual,
 +                BinOp::Ne => FloatCC::NotEqual,
 +                BinOp::Ge => FloatCC::GreaterThanOrEqual,
 +                BinOp::Gt => FloatCC::GreaterThan,
 +                _ => unreachable!(),
 +            };
 +            let val = fx.bcx.ins().fcmp(fltcc, lhs, rhs);
 +            let val = fx.bcx.ins().bint(types::I8, val);
 +            return CValue::by_val(val, fx.layout_of(fx.tcx.types.bool));
 +        }
 +        _ => unreachable!("{:?}({:?}, {:?})", bin_op, in_lhs, in_rhs),
 +    };
 +
 +    CValue::by_val(res, in_lhs.layout())
 +}
 +
++pub(crate) fn codegen_ptr_binop<'tcx>(
 +    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
 +    bin_op: BinOp,
 +    in_lhs: CValue<'tcx>,
 +    in_rhs: CValue<'tcx>,
 +) -> CValue<'tcx> {
 +    let is_thin_ptr = in_lhs
 +        .layout()
 +        .ty
 +        .builtin_deref(true)
 +        .map(|TypeAndMut { ty, mutbl: _ }| !has_ptr_meta(fx.tcx, ty))
 +        .unwrap_or(true);
 +
 +    if is_thin_ptr {
 +        match bin_op {
 +            BinOp::Eq | BinOp::Lt | BinOp::Le | BinOp::Ne | BinOp::Ge | BinOp::Gt => {
 +                let lhs = in_lhs.load_scalar(fx);
 +                let rhs = in_rhs.load_scalar(fx);
 +
 +                return codegen_compare_bin_op(fx, bin_op, false, lhs, rhs);
 +            }
 +            BinOp::Offset => {
 +                let pointee_ty = in_lhs.layout().ty.builtin_deref(true).unwrap().ty;
 +                let (base, offset) = (in_lhs, in_rhs.load_scalar(fx));
 +                let pointee_size = fx.layout_of(pointee_ty).size.bytes();
 +                let ptr_diff = fx.bcx.ins().imul_imm(offset, pointee_size as i64);
 +                let base_val = base.load_scalar(fx);
 +                let res = fx.bcx.ins().iadd(base_val, ptr_diff);
 +                return CValue::by_val(res, base.layout());
 +            }
 +            _ => unreachable!("{:?}({:?}, {:?})", bin_op, in_lhs, in_rhs),
 +        };
 +    } else {
 +        let (lhs_ptr, lhs_extra) = in_lhs.load_scalar_pair(fx);
 +        let (rhs_ptr, rhs_extra) = in_rhs.load_scalar_pair(fx);
 +
 +        let res = match bin_op {
 +            BinOp::Eq => {
 +                let ptr_eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_ptr, rhs_ptr);
 +                let extra_eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_extra, rhs_extra);
 +                fx.bcx.ins().band(ptr_eq, extra_eq)
 +            }
 +            BinOp::Ne => {
 +                let ptr_ne = fx.bcx.ins().icmp(IntCC::NotEqual, lhs_ptr, rhs_ptr);
 +                let extra_ne = fx.bcx.ins().icmp(IntCC::NotEqual, lhs_extra, rhs_extra);
 +                fx.bcx.ins().bor(ptr_ne, extra_ne)
 +            }
 +            BinOp::Lt | BinOp::Le | BinOp::Ge | BinOp::Gt => {
 +                let ptr_eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_ptr, rhs_ptr);
 +
 +                let ptr_cmp =
 +                    fx.bcx
 +                        .ins()
 +                        .icmp(bin_op_to_intcc(bin_op, false).unwrap(), lhs_ptr, rhs_ptr);
 +                let extra_cmp = fx.bcx.ins().icmp(
 +                    bin_op_to_intcc(bin_op, false).unwrap(),
 +                    lhs_extra,
 +                    rhs_extra,
 +                );
 +
 +                fx.bcx.ins().select(ptr_eq, extra_cmp, ptr_cmp)
 +            }
 +            _ => panic!("bin_op {:?} on ptr", bin_op),
 +        };
 +
 +        CValue::by_val(
 +            fx.bcx.ins().bint(types::I8, res),
 +            fx.layout_of(fx.tcx.types.bool),
 +        )
 +    }
 +}
index f368d65f7f8d8adf1d84c899547ab8ed1af20fd9,0000000000000000000000000000000000000000..3c939d5a58639869e2dc22dc8b0ec1dd602a51df
mode 100644,000000..100644
--- /dev/null
@@@ -1,507 -1,0 +1,508 @@@
-                     clif_comments.add_comment(load, format!("[BUG?] Reading uninitialized memory"));
 +//! This optimization replaces stack accesses with SSA variables and removes dead stores when possible.
 +//!
 +//! # Undefined behaviour
 +//!
 +//! This optimization is based on the assumption that stack slots which don't have their address
 +//! leaked through `stack_addr` are only accessed using `stack_load` and `stack_store` in the
 +//! function which has the stack slots. This optimization also assumes that stack slot accesses
 +//! are never out of bounds. If these assumptions are not correct, then this optimization may remove
 +//! `stack_store` instruction incorrectly, or incorrectly use a previously stored value as the value
 +//! being loaded by a `stack_load`.
 +
 +use std::collections::BTreeMap;
 +use std::fmt;
 +use std::ops::Not;
 +
 +use rustc_data_structures::fx::FxHashSet;
 +
 +use cranelift_codegen::cursor::{Cursor, FuncCursor};
 +use cranelift_codegen::ir::immediates::Offset32;
 +use cranelift_codegen::ir::{InstructionData, Opcode, ValueDef};
 +
 +use crate::prelude::*;
 +
 +/// Workaround for `StackSlot` not implementing `Ord`.
 +#[derive(Copy, Clone, PartialEq, Eq)]
 +struct OrdStackSlot(StackSlot);
 +
 +impl fmt::Debug for OrdStackSlot {
 +    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
 +        write!(f, "{:?}", self.0)
 +    }
 +}
 +
 +impl PartialOrd for OrdStackSlot {
 +    fn partial_cmp(&self, rhs: &Self) -> Option<std::cmp::Ordering> {
 +        self.0.as_u32().partial_cmp(&rhs.0.as_u32())
 +    }
 +}
 +
 +impl Ord for OrdStackSlot {
 +    fn cmp(&self, rhs: &Self) -> std::cmp::Ordering {
 +        self.0.as_u32().cmp(&rhs.0.as_u32())
 +    }
 +}
 +
 +#[derive(Debug, Default)]
 +struct StackSlotUsage {
 +    stack_addr: FxHashSet<Inst>,
 +    stack_load: FxHashSet<Inst>,
 +    stack_store: FxHashSet<Inst>,
 +}
 +
 +impl StackSlotUsage {
 +    fn potential_stores_for_load(&self, ctx: &Context, load: Inst) -> Vec<Inst> {
 +        self.stack_store
 +            .iter()
 +            .cloned()
 +            .filter(|&store| {
 +                match spatial_overlap(&ctx.func, store, load) {
 +                    SpatialOverlap::No => false, // Can never be the source of the loaded value.
 +                    SpatialOverlap::Partial | SpatialOverlap::Full => true,
 +                }
 +            })
 +            .filter(|&store| {
 +                match temporal_order(ctx, store, load) {
 +                    TemporalOrder::NeverBefore => false, // Can never be the source of the loaded value.
 +                    TemporalOrder::MaybeBefore | TemporalOrder::DefinitivelyBefore => true,
 +                }
 +            })
 +            .collect::<Vec<Inst>>()
 +    }
 +
 +    fn potential_loads_of_store(&self, ctx: &Context, store: Inst) -> Vec<Inst> {
 +        self.stack_load
 +            .iter()
 +            .cloned()
 +            .filter(|&load| {
 +                match spatial_overlap(&ctx.func, store, load) {
 +                    SpatialOverlap::No => false, // Can never be the source of the loaded value.
 +                    SpatialOverlap::Partial | SpatialOverlap::Full => true,
 +                }
 +            })
 +            .filter(|&load| {
 +                match temporal_order(ctx, store, load) {
 +                    TemporalOrder::NeverBefore => false, // Can never be the source of the loaded value.
 +                    TemporalOrder::MaybeBefore | TemporalOrder::DefinitivelyBefore => true,
 +                }
 +            })
 +            .collect::<Vec<Inst>>()
 +    }
 +
 +    fn remove_unused_stack_addr(func: &mut Function, inst: Inst) {
 +        func.dfg.detach_results(inst);
 +        func.dfg.replace(inst).nop();
 +    }
 +
 +    fn remove_unused_load(func: &mut Function, load: Inst) {
 +        func.dfg.detach_results(load);
 +        func.dfg.replace(load).nop();
 +    }
 +
 +    fn remove_dead_store(&mut self, func: &mut Function, store: Inst) {
 +        func.dfg.replace(store).nop();
 +        self.stack_store.remove(&store);
 +    }
 +
 +    fn change_load_to_alias(&mut self, func: &mut Function, load: Inst, value: Value) {
 +        let loaded_value = func.dfg.inst_results(load)[0];
 +        let loaded_type = func.dfg.value_type(loaded_value);
 +
 +        if func.dfg.value_type(value) == loaded_type {
 +            func.dfg.detach_results(load);
 +            func.dfg.replace(load).nop();
 +            func.dfg.change_to_alias(loaded_value, value);
 +        } else {
 +            func.dfg.replace(load).bitcast(loaded_type, value);
 +        }
 +
 +        self.stack_load.remove(&load);
 +    }
 +}
 +
 +struct OptimizeContext<'a> {
 +    ctx: &'a mut Context,
 +    stack_slot_usage_map: BTreeMap<OrdStackSlot, StackSlotUsage>,
 +}
 +
 +impl<'a> OptimizeContext<'a> {
 +    fn for_context(ctx: &'a mut Context) -> Self {
 +        ctx.flowgraph(); // Compute cfg and domtree.
 +
 +        // Record all stack_addr, stack_load and stack_store instructions.
 +        let mut stack_slot_usage_map = BTreeMap::<OrdStackSlot, StackSlotUsage>::new();
 +
 +        let mut cursor = FuncCursor::new(&mut ctx.func);
 +        while let Some(_block) = cursor.next_block() {
 +            while let Some(inst) = cursor.next_inst() {
 +                match cursor.func.dfg[inst] {
 +                    InstructionData::StackLoad {
 +                        opcode: Opcode::StackAddr,
 +                        stack_slot,
 +                        offset: _,
 +                    } => {
 +                        stack_slot_usage_map
 +                            .entry(OrdStackSlot(stack_slot))
 +                            .or_insert_with(StackSlotUsage::default)
 +                            .stack_addr
 +                            .insert(inst);
 +                    }
 +                    InstructionData::StackLoad {
 +                        opcode: Opcode::StackLoad,
 +                        stack_slot,
 +                        offset: _,
 +                    } => {
 +                        stack_slot_usage_map
 +                            .entry(OrdStackSlot(stack_slot))
 +                            .or_insert_with(StackSlotUsage::default)
 +                            .stack_load
 +                            .insert(inst);
 +                    }
 +                    InstructionData::StackStore {
 +                        opcode: Opcode::StackStore,
 +                        arg: _,
 +                        stack_slot,
 +                        offset: _,
 +                    } => {
 +                        stack_slot_usage_map
 +                            .entry(OrdStackSlot(stack_slot))
 +                            .or_insert_with(StackSlotUsage::default)
 +                            .stack_store
 +                            .insert(inst);
 +                    }
 +                    _ => {}
 +                }
 +            }
 +        }
 +
 +        OptimizeContext {
 +            ctx,
 +            stack_slot_usage_map,
 +        }
 +    }
 +}
 +
 +pub(super) fn optimize_function(
 +    ctx: &mut Context,
 +    #[cfg_attr(not(debug_assertions), allow(unused_variables))] clif_comments: &mut crate::pretty_clif::CommentWriter,
 +) {
 +    combine_stack_addr_with_load_store(&mut ctx.func);
 +
 +    let mut opt_ctx = OptimizeContext::for_context(ctx);
 +
 +    // FIXME Repeat following instructions until fixpoint.
 +
 +    remove_unused_stack_addr_and_stack_load(&mut opt_ctx);
 +
 +    #[cfg(debug_assertions)]
 +    {
 +        for (&OrdStackSlot(stack_slot), usage) in &opt_ctx.stack_slot_usage_map {
 +            clif_comments.add_comment(stack_slot, format!("used by: {:?}", usage));
 +        }
 +    }
 +
 +    for (stack_slot, users) in opt_ctx.stack_slot_usage_map.iter_mut() {
 +        if users.stack_addr.is_empty().not() {
 +            // Stack addr leaked; there may be unknown loads and stores.
 +            // FIXME use stacked borrows to optimize
 +            continue;
 +        }
 +
 +        for load in users.stack_load.clone().into_iter() {
 +            let potential_stores = users.potential_stores_for_load(&opt_ctx.ctx, load);
 +
 +            #[cfg(debug_assertions)]
 +            for &store in &potential_stores {
 +                clif_comments.add_comment(
 +                    load,
 +                    format!(
 +                        "Potential store -> load forwarding {} -> {} ({:?}, {:?})",
 +                        opt_ctx.ctx.func.dfg.display_inst(store, None),
 +                        opt_ctx.ctx.func.dfg.display_inst(load, None),
 +                        spatial_overlap(&opt_ctx.ctx.func, store, load),
 +                        temporal_order(&opt_ctx.ctx, store, load),
 +                    ),
 +                );
 +            }
 +
 +            match *potential_stores {
 +                [] => {
 +                    #[cfg(debug_assertions)]
++                    clif_comments
++                        .add_comment(load, "[BUG?] Reading uninitialized memory".to_string());
 +                }
 +                [store]
 +                    if spatial_overlap(&opt_ctx.ctx.func, store, load) == SpatialOverlap::Full
 +                        && temporal_order(&opt_ctx.ctx, store, load)
 +                            == TemporalOrder::DefinitivelyBefore =>
 +                {
 +                    // Only one store could have been the origin of the value.
 +                    let stored_value = opt_ctx.ctx.func.dfg.inst_args(store)[0];
 +
 +                    #[cfg(debug_assertions)]
 +                    clif_comments
 +                        .add_comment(load, format!("Store to load forward {} -> {}", store, load));
 +
 +                    users.change_load_to_alias(&mut opt_ctx.ctx.func, load, stored_value);
 +                }
 +                _ => {} // FIXME implement this
 +            }
 +        }
 +
 +        for store in users.stack_store.clone().into_iter() {
 +            let potential_loads = users.potential_loads_of_store(&opt_ctx.ctx, store);
 +
 +            #[cfg(debug_assertions)]
 +            for &load in &potential_loads {
 +                clif_comments.add_comment(
 +                    store,
 +                    format!(
 +                        "Potential load from store {} <- {} ({:?}, {:?})",
 +                        opt_ctx.ctx.func.dfg.display_inst(load, None),
 +                        opt_ctx.ctx.func.dfg.display_inst(store, None),
 +                        spatial_overlap(&opt_ctx.ctx.func, store, load),
 +                        temporal_order(&opt_ctx.ctx, store, load),
 +                    ),
 +                );
 +            }
 +
 +            if potential_loads.is_empty() {
 +                // Never loaded; can safely remove all stores and the stack slot.
 +                // FIXME also remove stores when there is always a next store before a load.
 +
 +                #[cfg(debug_assertions)]
 +                clif_comments.add_comment(
 +                    store,
 +                    format!(
 +                        "Remove dead stack store {} of {}",
 +                        opt_ctx.ctx.func.dfg.display_inst(store, None),
 +                        stack_slot.0
 +                    ),
 +                );
 +
 +                users.remove_dead_store(&mut opt_ctx.ctx.func, store);
 +            }
 +        }
 +
 +        if users.stack_store.is_empty() && users.stack_load.is_empty() {
 +            opt_ctx.ctx.func.stack_slots[stack_slot.0].size = 0;
 +        }
 +    }
 +}
 +
 +fn combine_stack_addr_with_load_store(func: &mut Function) {
 +    // Turn load and store into stack_load and stack_store when possible.
 +    let mut cursor = FuncCursor::new(func);
 +    while let Some(_block) = cursor.next_block() {
 +        while let Some(inst) = cursor.next_inst() {
 +            match cursor.func.dfg[inst] {
 +                InstructionData::Load {
 +                    opcode: Opcode::Load,
 +                    arg: addr,
 +                    flags: _,
 +                    offset,
 +                } => {
 +                    if cursor.func.dfg.ctrl_typevar(inst) == types::I128
 +                        || cursor.func.dfg.ctrl_typevar(inst).is_vector()
 +                    {
 +                        continue; // WORKAROUD: stack_load.i128 not yet implemented
 +                    }
 +                    if let Some((stack_slot, stack_addr_offset)) =
 +                        try_get_stack_slot_and_offset_for_addr(cursor.func, addr)
 +                    {
 +                        if let Some(combined_offset) = offset.try_add_i64(stack_addr_offset.into())
 +                        {
 +                            let ty = cursor.func.dfg.ctrl_typevar(inst);
 +                            cursor.func.dfg.replace(inst).stack_load(
 +                                ty,
 +                                stack_slot,
 +                                combined_offset,
 +                            );
 +                        }
 +                    }
 +                }
 +                InstructionData::Store {
 +                    opcode: Opcode::Store,
 +                    args: [value, addr],
 +                    flags: _,
 +                    offset,
 +                } => {
 +                    if cursor.func.dfg.ctrl_typevar(inst) == types::I128
 +                        || cursor.func.dfg.ctrl_typevar(inst).is_vector()
 +                    {
 +                        continue; // WORKAROUND: stack_store.i128 not yet implemented
 +                    }
 +                    if let Some((stack_slot, stack_addr_offset)) =
 +                        try_get_stack_slot_and_offset_for_addr(cursor.func, addr)
 +                    {
 +                        if let Some(combined_offset) = offset.try_add_i64(stack_addr_offset.into())
 +                        {
 +                            cursor.func.dfg.replace(inst).stack_store(
 +                                value,
 +                                stack_slot,
 +                                combined_offset,
 +                            );
 +                        }
 +                    }
 +                }
 +                _ => {}
 +            }
 +        }
 +    }
 +}
 +
 +fn remove_unused_stack_addr_and_stack_load(opt_ctx: &mut OptimizeContext<'_>) {
 +    // FIXME incrementally rebuild on each call?
 +    let mut stack_addr_load_insts_users = FxHashMap::<Inst, FxHashSet<Inst>>::default();
 +
 +    let mut cursor = FuncCursor::new(&mut opt_ctx.ctx.func);
 +    while let Some(_block) = cursor.next_block() {
 +        while let Some(inst) = cursor.next_inst() {
 +            for &arg in cursor.func.dfg.inst_args(inst) {
 +                if let ValueDef::Result(arg_origin, 0) = cursor.func.dfg.value_def(arg) {
 +                    match cursor.func.dfg[arg_origin].opcode() {
 +                        Opcode::StackAddr | Opcode::StackLoad => {
 +                            stack_addr_load_insts_users
 +                                .entry(arg_origin)
 +                                .or_insert_with(FxHashSet::default)
 +                                .insert(inst);
 +                        }
 +                        _ => {}
 +                    }
 +                }
 +            }
 +        }
 +    }
 +
 +    #[cfg(debug_assertions)]
 +    for inst in stack_addr_load_insts_users.keys() {
 +        let mut is_recorded_stack_addr_or_stack_load = false;
 +        for stack_slot_users in opt_ctx.stack_slot_usage_map.values() {
 +            is_recorded_stack_addr_or_stack_load |= stack_slot_users.stack_addr.contains(inst)
 +                || stack_slot_users.stack_load.contains(inst);
 +        }
 +        assert!(is_recorded_stack_addr_or_stack_load);
 +    }
 +
 +    // Replace all unused stack_addr and stack_load instructions with nop.
 +    let mut func = &mut opt_ctx.ctx.func;
 +
 +    for stack_slot_users in opt_ctx.stack_slot_usage_map.values_mut() {
 +        stack_slot_users
 +            .stack_addr
 +            .drain_filter(|inst| {
 +                stack_addr_load_insts_users
 +                    .get(inst)
 +                    .map(|users| users.is_empty())
 +                    .unwrap_or(true)
 +            })
 +            .for_each(|inst| StackSlotUsage::remove_unused_stack_addr(&mut func, inst));
 +
 +        stack_slot_users
 +            .stack_load
 +            .drain_filter(|inst| {
 +                stack_addr_load_insts_users
 +                    .get(inst)
 +                    .map(|users| users.is_empty())
 +                    .unwrap_or(true)
 +            })
 +            .for_each(|inst| StackSlotUsage::remove_unused_load(&mut func, inst));
 +    }
 +}
 +
 +fn try_get_stack_slot_and_offset_for_addr(
 +    func: &Function,
 +    addr: Value,
 +) -> Option<(StackSlot, Offset32)> {
 +    if let ValueDef::Result(addr_inst, 0) = func.dfg.value_def(addr) {
 +        if let InstructionData::StackLoad {
 +            opcode: Opcode::StackAddr,
 +            stack_slot,
 +            offset,
 +        } = func.dfg[addr_inst]
 +        {
 +            return Some((stack_slot, offset));
 +        }
 +    }
 +    None
 +}
 +
 +#[derive(Copy, Clone, Debug, PartialEq, Eq)]
 +enum SpatialOverlap {
 +    No,
 +    Partial,
 +    Full,
 +}
 +
 +fn spatial_overlap(func: &Function, src: Inst, dest: Inst) -> SpatialOverlap {
 +    fn inst_info(func: &Function, inst: Inst) -> (StackSlot, Offset32, u32) {
 +        match func.dfg[inst] {
 +            InstructionData::StackLoad {
 +                opcode: Opcode::StackAddr,
 +                stack_slot,
 +                offset,
 +            }
 +            | InstructionData::StackLoad {
 +                opcode: Opcode::StackLoad,
 +                stack_slot,
 +                offset,
 +            }
 +            | InstructionData::StackStore {
 +                opcode: Opcode::StackStore,
 +                stack_slot,
 +                offset,
 +                arg: _,
 +            } => (stack_slot, offset, func.dfg.ctrl_typevar(inst).bytes()),
 +            _ => unreachable!("{:?}", func.dfg[inst]),
 +        }
 +    }
 +
 +    debug_assert_ne!(src, dest);
 +
 +    let (src_ss, src_offset, src_size) = inst_info(func, src);
 +    let (dest_ss, dest_offset, dest_size) = inst_info(func, dest);
 +
 +    if src_ss != dest_ss {
 +        return SpatialOverlap::No;
 +    }
 +
 +    if src_offset == dest_offset && src_size == dest_size {
 +        return SpatialOverlap::Full;
 +    }
 +
 +    let src_end: i64 = src_offset.try_add_i64(i64::from(src_size)).unwrap().into();
 +    let dest_end: i64 = dest_offset
 +        .try_add_i64(i64::from(dest_size))
 +        .unwrap()
 +        .into();
 +    if src_end <= dest_offset.into() || dest_end <= src_offset.into() {
 +        return SpatialOverlap::No;
 +    }
 +
 +    SpatialOverlap::Partial
 +}
 +
 +#[derive(Copy, Clone, Debug, PartialEq, Eq)]
 +enum TemporalOrder {
 +    /// `src` will never be executed before `dest`.
 +    NeverBefore,
 +
 +    /// `src` may be executed before `dest`.
 +    MaybeBefore,
 +
 +    /// `src` will always be executed before `dest`.
 +    /// There may still be other instructions in between.
 +    DefinitivelyBefore,
 +}
 +
 +fn temporal_order(ctx: &Context, src: Inst, dest: Inst) -> TemporalOrder {
 +    debug_assert_ne!(src, dest);
 +
 +    if ctx.domtree.dominates(src, dest, &ctx.func.layout) {
 +        TemporalOrder::DefinitivelyBefore
 +    } else if ctx.domtree.dominates(src, dest, &ctx.func.layout) {
 +        TemporalOrder::NeverBefore
 +    } else {
 +        TemporalOrder::MaybeBefore
 +    }
 +}
index 7f8ab953d7199d62b3f53914f50584e11aeb20ab,0000000000000000000000000000000000000000..ff878af7f5eef9049199acd55e2c4a8405b7b8d6
mode 100644,000000..100644
--- /dev/null
@@@ -1,287 -1,0 +1,287 @@@
-                 writeln!(w, "")?;
 +//! This module provides the [CommentWriter] which makes it possible
 +//! to add comments to the written cranelift ir.
 +//!
 +//! # Example
 +//!
 +//! ```clif
 +//! test compile
 +//! target x86_64
 +//!
 +//! function u0:0(i64, i64, i64) system_v {
 +//! ; symbol _ZN119_$LT$example..IsNotEmpty$u20$as$u20$mini_core..FnOnce$LT$$LP$$RF$$u27$a$u20$$RF$$u27$b$u20$$u5b$u16$u5d$$C$$RP$$GT$$GT$9call_once17he85059d5e6a760a0E
 +//! ; instance Instance { def: Item(DefId(0/0:29 ~ example[8787]::{{impl}}[0]::call_once[0])), substs: [ReErased, ReErased] }
 +//! ; sig ([IsNotEmpty, (&&[u16],)]; c_variadic: false)->(u8, u8)
 +//!
 +//! ; ssa {_2: NOT_SSA, _4: NOT_SSA, _0: NOT_SSA, _3: (empty), _1: NOT_SSA}
 +//! ; msg   loc.idx    param    pass mode            ssa flags  ty
 +//! ; ret    _0      = v0       ByRef                NOT_SSA    (u8, u8)
 +//! ; arg    _1      = v1       ByRef                NOT_SSA    IsNotEmpty
 +//! ; arg    _2.0    = v2       ByVal(types::I64)    NOT_SSA    &&[u16]
 +//!
 +//!     ss0 = explicit_slot 0 ; _1: IsNotEmpty size=0 align=1,8
 +//!     ss1 = explicit_slot 8 ; _2: (&&[u16],) size=8 align=8,8
 +//!     ss2 = explicit_slot 8 ; _4: (&&[u16],) size=8 align=8,8
 +//!     sig0 = (i64, i64, i64) system_v
 +//!     sig1 = (i64, i64, i64) system_v
 +//!     fn0 = colocated u0:6 sig1 ; Instance { def: Item(DefId(0/0:31 ~ example[8787]::{{impl}}[1]::call_mut[0])), substs: [ReErased, ReErased] }
 +//!
 +//! block0(v0: i64, v1: i64, v2: i64):
 +//!     v3 = stack_addr.i64 ss0
 +//!     v4 = stack_addr.i64 ss1
 +//!     store v2, v4
 +//!     v5 = stack_addr.i64 ss2
 +//!     jump block1
 +//!
 +//! block1:
 +//!     nop
 +//! ; _3 = &mut _1
 +//! ; _4 = _2
 +//!     v6 = load.i64 v4
 +//!     store v6, v5
 +//! ;
 +//! ; _0 = const mini_core::FnMut::call_mut(move _3, move _4)
 +//!     v7 = load.i64 v5
 +//!     call fn0(v0, v3, v7)
 +//!     jump block2
 +//!
 +//! block2:
 +//!     nop
 +//! ;
 +//! ; return
 +//!     return
 +//! }
 +//! ```
 +
 +use std::fmt;
 +
 +use cranelift_codegen::{
 +    entity::SecondaryMap,
 +    ir::{entities::AnyEntity, function::DisplayFunctionAnnotations},
 +    write::{FuncWriter, PlainWriter},
 +};
 +
 +use rustc_session::config::OutputType;
 +
 +use crate::prelude::*;
 +
 +#[derive(Debug)]
 +pub(crate) struct CommentWriter {
 +    global_comments: Vec<String>,
 +    entity_comments: FxHashMap<AnyEntity, String>,
 +}
 +
 +impl CommentWriter {
 +    pub(crate) fn new<'tcx>(tcx: TyCtxt<'tcx>, instance: Instance<'tcx>) -> Self {
 +        let global_comments = if cfg!(debug_assertions) {
 +            vec![
 +                format!("symbol {}", tcx.symbol_name(instance).name),
 +                format!("instance {:?}", instance),
 +                format!(
 +                    "sig {:?}",
 +                    tcx.normalize_erasing_late_bound_regions(
 +                        ParamEnv::reveal_all(),
 +                        &crate::abi::fn_sig_for_fn_abi(tcx, instance)
 +                    )
 +                ),
 +                String::new(),
 +            ]
 +        } else {
 +            vec![]
 +        };
 +
 +        CommentWriter {
 +            global_comments,
 +            entity_comments: FxHashMap::default(),
 +        }
 +    }
 +}
 +
 +#[cfg(debug_assertions)]
 +impl CommentWriter {
 +    pub(crate) fn add_global_comment<S: Into<String>>(&mut self, comment: S) {
 +        self.global_comments.push(comment.into());
 +    }
 +
 +    pub(crate) fn add_comment<S: Into<String> + AsRef<str>, E: Into<AnyEntity>>(
 +        &mut self,
 +        entity: E,
 +        comment: S,
 +    ) {
 +        use std::collections::hash_map::Entry;
 +        match self.entity_comments.entry(entity.into()) {
 +            Entry::Occupied(mut occ) => {
 +                occ.get_mut().push('\n');
 +                occ.get_mut().push_str(comment.as_ref());
 +            }
 +            Entry::Vacant(vac) => {
 +                vac.insert(comment.into());
 +            }
 +        }
 +    }
 +}
 +
 +impl FuncWriter for &'_ CommentWriter {
 +    fn write_preamble(
 +        &mut self,
 +        w: &mut dyn fmt::Write,
 +        func: &Function,
 +        reg_info: Option<&isa::RegInfo>,
 +    ) -> Result<bool, fmt::Error> {
 +        for comment in &self.global_comments {
 +            if !comment.is_empty() {
 +                writeln!(w, "; {}", comment)?;
 +            } else {
-             writeln!(w, "")?;
++                writeln!(w)?;
 +            }
 +        }
 +        if !self.global_comments.is_empty() {
-             writeln!(w, "")
++            writeln!(w)?;
 +        }
 +
 +        self.super_preamble(w, func, reg_info)
 +    }
 +
 +    fn write_entity_definition(
 +        &mut self,
 +        w: &mut dyn fmt::Write,
 +        _func: &Function,
 +        entity: AnyEntity,
 +        value: &dyn fmt::Display,
 +    ) -> fmt::Result {
 +        write!(w, "    {} = {}", entity, value)?;
 +
 +        if let Some(comment) = self.entity_comments.get(&entity) {
 +            writeln!(w, " ; {}", comment.replace('\n', "\n; "))
 +        } else {
-         writeln!(file, "")?;
++            writeln!(w)
 +        }
 +    }
 +
 +    fn write_block_header(
 +        &mut self,
 +        w: &mut dyn fmt::Write,
 +        func: &Function,
 +        isa: Option<&dyn isa::TargetIsa>,
 +        block: Block,
 +        indent: usize,
 +    ) -> fmt::Result {
 +        PlainWriter.write_block_header(w, func, isa, block, indent)
 +    }
 +
 +    fn write_instruction(
 +        &mut self,
 +        w: &mut dyn fmt::Write,
 +        func: &Function,
 +        aliases: &SecondaryMap<Value, Vec<Value>>,
 +        isa: Option<&dyn isa::TargetIsa>,
 +        inst: Inst,
 +        indent: usize,
 +    ) -> fmt::Result {
 +        PlainWriter.write_instruction(w, func, aliases, isa, inst, indent)?;
 +        if let Some(comment) = self.entity_comments.get(&inst.into()) {
 +            writeln!(w, "; {}", comment.replace('\n', "\n; "))?;
 +        }
 +        Ok(())
 +    }
 +}
 +
 +#[cfg(debug_assertions)]
 +impl<M: Module> FunctionCx<'_, '_, M> {
 +    pub(crate) fn add_global_comment<S: Into<String>>(&mut self, comment: S) {
 +        self.clif_comments.add_global_comment(comment);
 +    }
 +
 +    pub(crate) fn add_comment<S: Into<String> + AsRef<str>, E: Into<AnyEntity>>(
 +        &mut self,
 +        entity: E,
 +        comment: S,
 +    ) {
 +        self.clif_comments.add_comment(entity, comment);
 +    }
 +}
 +
 +pub(crate) fn write_clif_file<'tcx>(
 +    tcx: TyCtxt<'tcx>,
 +    postfix: &str,
 +    isa: Option<&dyn cranelift_codegen::isa::TargetIsa>,
 +    instance: Instance<'tcx>,
 +    context: &cranelift_codegen::Context,
 +    mut clif_comments: &CommentWriter,
 +) {
 +    use std::io::Write;
 +
 +    if !cfg!(debug_assertions)
 +        && !tcx
 +            .sess
 +            .opts
 +            .output_types
 +            .contains_key(&OutputType::LlvmAssembly)
 +    {
 +        return;
 +    }
 +
 +    let value_ranges = isa.map(|isa| {
 +        context
 +            .build_value_labels_ranges(isa)
 +            .expect("value location ranges")
 +    });
 +
 +    let clif_output_dir = tcx.output_filenames(LOCAL_CRATE).with_extension("clif");
 +
 +    match std::fs::create_dir(&clif_output_dir) {
 +        Ok(()) => {}
 +        Err(err) if err.kind() == std::io::ErrorKind::AlreadyExists => {}
 +        res @ Err(_) => res.unwrap(),
 +    }
 +
 +    let clif_file_name = clif_output_dir.join(format!(
 +        "{}.{}.clif",
 +        tcx.symbol_name(instance).name,
 +        postfix
 +    ));
 +
 +    let mut clif = String::new();
 +    cranelift_codegen::write::decorate_function(
 +        &mut clif_comments,
 +        &mut clif,
 +        &context.func,
 +        &DisplayFunctionAnnotations {
 +            isa: Some(&*crate::build_isa(
 +                tcx.sess, true, /* PIC doesn't matter here */
 +            )),
 +            value_ranges: value_ranges.as_ref(),
 +        },
 +    )
 +    .unwrap();
 +
 +    let res: std::io::Result<()> = try {
 +        let mut file = std::fs::File::create(clif_file_name)?;
 +        let target_triple = crate::target_triple(tcx.sess);
 +        writeln!(file, "test compile")?;
 +        writeln!(file, "set is_pic")?;
 +        writeln!(file, "set enable_simd")?;
 +        writeln!(file, "target {} haswell", target_triple)?;
++        writeln!(file)?;
 +        file.write_all(clif.as_bytes())?;
 +    };
 +    if let Err(err) = res {
 +        tcx.sess.warn(&format!("err writing clif file: {}", err));
 +    }
 +}
 +
 +impl<M: Module> fmt::Debug for FunctionCx<'_, '_, M> {
 +    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
 +        writeln!(f, "{:?}", self.instance.substs)?;
 +        writeln!(f, "{:?}", self.local_map)?;
 +
 +        let mut clif = String::new();
 +        ::cranelift_codegen::write::decorate_function(
 +            &mut &self.clif_comments,
 +            &mut clif,
 +            &self.bcx.func,
 +            &DisplayFunctionAnnotations::default(),
 +        )
 +        .unwrap();
 +        writeln!(f, "\n{}", clif)
 +    }
 +}
index 37dca77bdbd093e8baa0050099145bfde5a9fd2b,0000000000000000000000000000000000000000..690d96764a8f588d1e8ed0e5827c71aec89c0d05
mode 100644,000000..100644
--- /dev/null
@@@ -1,70 -1,0 +1,69 @@@
 +//! Helpers used to print a message and abort in case of certain panics and some detected UB.
 +
 +use crate::prelude::*;
 +
 +fn codegen_print(fx: &mut FunctionCx<'_, '_, impl Module>, msg: &str) {
 +    let puts = fx
 +        .cx
 +        .module
 +        .declare_function(
 +            "puts",
 +            Linkage::Import,
 +            &Signature {
 +                call_conv: CallConv::triple_default(fx.triple()),
 +                params: vec![AbiParam::new(pointer_ty(fx.tcx))],
 +                returns: vec![AbiParam::new(types::I32)],
 +            },
 +        )
 +        .unwrap();
 +    let puts = fx.cx.module.declare_func_in_func(puts, &mut fx.bcx.func);
 +    #[cfg(debug_assertions)]
 +    {
 +        fx.add_comment(puts, "puts");
 +    }
 +
 +    let symbol_name = fx.tcx.symbol_name(fx.instance);
 +    let real_msg = format!("trap at {:?} ({}): {}\0", fx.instance, symbol_name, msg);
 +    let msg_ptr = fx.anonymous_str("trap", &real_msg);
 +    fx.bcx.ins().call(puts, &[msg_ptr]);
 +}
 +
 +/// Trap code: user1
 +pub(crate) fn trap_abort(fx: &mut FunctionCx<'_, '_, impl Module>, msg: impl AsRef<str>) {
 +    codegen_print(fx, msg.as_ref());
 +    fx.bcx.ins().trap(TrapCode::User(1));
 +}
 +
 +/// Use this for example when a function call should never return. This will fill the current block,
 +/// so you can **not** add instructions to it afterwards.
 +///
 +/// Trap code: user65535
 +pub(crate) fn trap_unreachable(fx: &mut FunctionCx<'_, '_, impl Module>, msg: impl AsRef<str>) {
 +    codegen_print(fx, msg.as_ref());
 +    fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
 +}
 +
 +/// Like `trap_unreachable` but returns a fake value of the specified type.
 +///
 +/// Trap code: user65535
 +pub(crate) fn trap_unreachable_ret_value<'tcx>(
 +    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
 +    dest_layout: TyAndLayout<'tcx>,
 +    msg: impl AsRef<str>,
 +) -> CValue<'tcx> {
 +    codegen_print(fx, msg.as_ref());
 +    let true_ = fx.bcx.ins().iconst(types::I32, 1);
 +    fx.bcx.ins().trapnz(true_, TrapCode::UnreachableCodeReached);
 +    CValue::by_ref(Pointer::const_addr(fx, 0), dest_layout)
 +}
 +
 +/// Use this when something is unimplemented, but `libcore` or `libstd` requires it to codegen.
 +/// Unlike `trap_unreachable` this will not fill the current block, so you **must** add instructions
 +/// to it afterwards.
 +///
 +/// Trap code: user65535
 +pub(crate) fn trap_unimplemented(fx: &mut FunctionCx<'_, '_, impl Module>, msg: impl AsRef<str>) {
 +    codegen_print(fx, msg.as_ref());
 +    let true_ = fx.bcx.ins().iconst(types::I32, 1);
 +    fx.bcx.ins().trapnz(true_, TrapCode::User(!0));
 +}
index 5d513cb3ea022b8d2d74ab88da7c6b3ed8009f3d,0000000000000000000000000000000000000000..2b9ea5273b608221abebd5fb84b6531aada3ee11
mode 100644,000000..100644
--- /dev/null
@@@ -1,777 -1,0 +1,792 @@@
-             ty::Slice(..) | ty::Str | ty::Foreign(..) => return simple(fx),
 +//! Definition of [`CValue`] and [`CPlace`]
 +
 +use crate::prelude::*;
 +
 +use cranelift_codegen::entity::EntityRef;
 +use cranelift_codegen::ir::immediates::Offset32;
 +
 +fn codegen_field<'tcx>(
 +    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
 +    base: Pointer,
 +    extra: Option<Value>,
 +    layout: TyAndLayout<'tcx>,
 +    field: mir::Field,
 +) -> (Pointer, TyAndLayout<'tcx>) {
 +    let field_offset = layout.fields.offset(field.index());
 +    let field_layout = layout.field(&*fx, field.index());
 +
 +    let simple = |fx: &mut FunctionCx<'_, '_, _>| {
 +        (
 +            base.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap()),
 +            field_layout,
 +        )
 +    };
 +
 +    if let Some(extra) = extra {
 +        if !field_layout.is_unsized() {
 +            return simple(fx);
 +        }
 +        match field_layout.ty.kind() {
-                 return simple(fx);
++            ty::Slice(..) | ty::Str | ty::Foreign(..) => simple(fx),
 +            ty::Adt(def, _) if def.repr.packed() => {
 +                assert_eq!(layout.align.abi.bytes(), 1);
-         match layout.ty.kind() {
-             ty::Bool => {
-                 assert!(
-                     const_val == 0 || const_val == 1,
-                     "Invalid bool 0x{:032X}",
-                     const_val
-                 );
-             }
-             _ => {}
++                simple(fx)
 +            }
 +            _ => {
 +                // We have to align the offset for DST's
 +                let unaligned_offset = field_offset.bytes();
 +                let (_, unsized_align) =
 +                    crate::unsize::size_and_align_of_dst(fx, field_layout, extra);
 +
 +                let one = fx.bcx.ins().iconst(pointer_ty(fx.tcx), 1);
 +                let align_sub_1 = fx.bcx.ins().isub(unsized_align, one);
 +                let and_lhs = fx.bcx.ins().iadd_imm(align_sub_1, unaligned_offset as i64);
 +                let zero = fx.bcx.ins().iconst(pointer_ty(fx.tcx), 0);
 +                let and_rhs = fx.bcx.ins().isub(zero, unsized_align);
 +                let offset = fx.bcx.ins().band(and_lhs, and_rhs);
 +
 +                (base.offset_value(fx, offset), field_layout)
 +            }
 +        }
 +    } else {
 +        simple(fx)
 +    }
 +}
 +
 +fn scalar_pair_calculate_b_offset(
 +    tcx: TyCtxt<'_>,
 +    a_scalar: &Scalar,
 +    b_scalar: &Scalar,
 +) -> Offset32 {
 +    let b_offset = a_scalar
 +        .value
 +        .size(&tcx)
 +        .align_to(b_scalar.value.align(&tcx).abi);
 +    Offset32::new(b_offset.bytes().try_into().unwrap())
 +}
 +
 +/// A read-only value
 +#[derive(Debug, Copy, Clone)]
 +pub(crate) struct CValue<'tcx>(CValueInner, TyAndLayout<'tcx>);
 +
 +#[derive(Debug, Copy, Clone)]
 +enum CValueInner {
 +    ByRef(Pointer, Option<Value>),
 +    ByVal(Value),
 +    ByValPair(Value, Value),
 +}
 +
 +impl<'tcx> CValue<'tcx> {
 +    pub(crate) fn by_ref(ptr: Pointer, layout: TyAndLayout<'tcx>) -> CValue<'tcx> {
 +        CValue(CValueInner::ByRef(ptr, None), layout)
 +    }
 +
 +    pub(crate) fn by_ref_unsized(
 +        ptr: Pointer,
 +        meta: Value,
 +        layout: TyAndLayout<'tcx>,
 +    ) -> CValue<'tcx> {
 +        CValue(CValueInner::ByRef(ptr, Some(meta)), layout)
 +    }
 +
 +    pub(crate) fn by_val(value: Value, layout: TyAndLayout<'tcx>) -> CValue<'tcx> {
 +        CValue(CValueInner::ByVal(value), layout)
 +    }
 +
 +    pub(crate) fn by_val_pair(
 +        value: Value,
 +        extra: Value,
 +        layout: TyAndLayout<'tcx>,
 +    ) -> CValue<'tcx> {
 +        CValue(CValueInner::ByValPair(value, extra), layout)
 +    }
 +
 +    pub(crate) fn layout(&self) -> TyAndLayout<'tcx> {
 +        self.1
 +    }
 +
 +    // FIXME remove
 +    pub(crate) fn force_stack(
 +        self,
 +        fx: &mut FunctionCx<'_, 'tcx, impl Module>,
 +    ) -> (Pointer, Option<Value>) {
 +        let layout = self.1;
 +        match self.0 {
 +            CValueInner::ByRef(ptr, meta) => (ptr, meta),
 +            CValueInner::ByVal(_) | CValueInner::ByValPair(_, _) => {
 +                let cplace = CPlace::new_stack_slot(fx, layout);
 +                cplace.write_cvalue(fx, self);
 +                (cplace.to_ptr(), None)
 +            }
 +        }
 +    }
 +
 +    pub(crate) fn try_to_ptr(self) -> Option<(Pointer, Option<Value>)> {
 +        match self.0 {
 +            CValueInner::ByRef(ptr, meta) => Some((ptr, meta)),
 +            CValueInner::ByVal(_) | CValueInner::ByValPair(_, _) => None,
 +        }
 +    }
 +
 +    /// Load a value with layout.abi of scalar
 +    pub(crate) fn load_scalar(self, fx: &mut FunctionCx<'_, 'tcx, impl Module>) -> Value {
 +        let layout = self.1;
 +        match self.0 {
 +            CValueInner::ByRef(ptr, None) => {
 +                let clif_ty = match layout.abi {
 +                    Abi::Scalar(ref scalar) => scalar_to_clif_type(fx.tcx, scalar.clone()),
 +                    Abi::Vector { ref element, count } => {
 +                        scalar_to_clif_type(fx.tcx, element.clone())
 +                            .by(u16::try_from(count).unwrap())
 +                            .unwrap()
 +                    }
 +                    _ => unreachable!("{:?}", layout.ty),
 +                };
 +                let mut flags = MemFlags::new();
 +                flags.set_notrap();
 +                ptr.load(fx, clif_ty, flags)
 +            }
 +            CValueInner::ByVal(value) => value,
 +            CValueInner::ByRef(_, Some(_)) => bug!("load_scalar for unsized value not allowed"),
 +            CValueInner::ByValPair(_, _) => bug!("Please use load_scalar_pair for ByValPair"),
 +        }
 +    }
 +
 +    /// Load a value pair with layout.abi of scalar pair
 +    pub(crate) fn load_scalar_pair(
 +        self,
 +        fx: &mut FunctionCx<'_, 'tcx, impl Module>,
 +    ) -> (Value, Value) {
 +        let layout = self.1;
 +        match self.0 {
 +            CValueInner::ByRef(ptr, None) => {
 +                let (a_scalar, b_scalar) = match &layout.abi {
 +                    Abi::ScalarPair(a, b) => (a, b),
 +                    _ => unreachable!("load_scalar_pair({:?})", self),
 +                };
 +                let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
 +                let clif_ty1 = scalar_to_clif_type(fx.tcx, a_scalar.clone());
 +                let clif_ty2 = scalar_to_clif_type(fx.tcx, b_scalar.clone());
 +                let mut flags = MemFlags::new();
 +                flags.set_notrap();
 +                let val1 = ptr.load(fx, clif_ty1, flags);
 +                let val2 = ptr.offset(fx, b_offset).load(fx, clif_ty2, flags);
 +                (val1, val2)
 +            }
 +            CValueInner::ByRef(_, Some(_)) => {
 +                bug!("load_scalar_pair for unsized value not allowed")
 +            }
 +            CValueInner::ByVal(_) => bug!("Please use load_scalar for ByVal"),
 +            CValueInner::ByValPair(val1, val2) => (val1, val2),
 +        }
 +    }
 +
 +    pub(crate) fn value_field(
 +        self,
 +        fx: &mut FunctionCx<'_, 'tcx, impl Module>,
 +        field: mir::Field,
 +    ) -> CValue<'tcx> {
 +        let layout = self.1;
 +        match self.0 {
 +            CValueInner::ByVal(val) => match layout.abi {
 +                Abi::Vector { element: _, count } => {
 +                    let count = u8::try_from(count).expect("SIMD type with more than 255 lanes???");
 +                    let field = u8::try_from(field.index()).unwrap();
 +                    assert!(field < count);
 +                    let lane = fx.bcx.ins().extractlane(val, field);
 +                    let field_layout = layout.field(&*fx, usize::from(field));
 +                    CValue::by_val(lane, field_layout)
 +                }
 +                _ => unreachable!("value_field for ByVal with abi {:?}", layout.abi),
 +            },
 +            CValueInner::ByValPair(val1, val2) => match layout.abi {
 +                Abi::ScalarPair(_, _) => {
 +                    let val = match field.as_u32() {
 +                        0 => val1,
 +                        1 => val2,
 +                        _ => bug!("field should be 0 or 1"),
 +                    };
 +                    let field_layout = layout.field(&*fx, usize::from(field));
 +                    CValue::by_val(val, field_layout)
 +                }
 +                _ => unreachable!("value_field for ByValPair with abi {:?}", layout.abi),
 +            },
 +            CValueInner::ByRef(ptr, None) => {
 +                let (field_ptr, field_layout) = codegen_field(fx, ptr, None, layout, field);
 +                CValue::by_ref(field_ptr, field_layout)
 +            }
 +            CValueInner::ByRef(_, Some(_)) => todo!(),
 +        }
 +    }
 +
 +    pub(crate) fn unsize_value(
 +        self,
 +        fx: &mut FunctionCx<'_, 'tcx, impl Module>,
 +        dest: CPlace<'tcx>,
 +    ) {
 +        crate::unsize::coerce_unsized_into(fx, self, dest);
 +    }
 +
 +    /// If `ty` is signed, `const_val` must already be sign extended.
 +    pub(crate) fn const_val(
 +        fx: &mut FunctionCx<'_, 'tcx, impl Module>,
 +        layout: TyAndLayout<'tcx>,
 +        const_val: u128,
 +    ) -> CValue<'tcx> {
 +        use cranelift_codegen::ir::immediates::{Ieee32, Ieee64};
 +
 +        let clif_ty = fx.clif_type(layout.ty).unwrap();
 +
-             size: layout.size.bytes() as u32,
++        if let ty::Bool = layout.ty.kind() {
++            assert!(
++                const_val == 0 || const_val == 1,
++                "Invalid bool 0x{:032X}",
++                const_val
++            );
 +        }
 +
 +        let val = match layout.ty.kind() {
 +            ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
 +                let lsb = fx.bcx.ins().iconst(types::I64, const_val as u64 as i64);
 +                let msb = fx
 +                    .bcx
 +                    .ins()
 +                    .iconst(types::I64, (const_val >> 64) as u64 as i64);
 +                fx.bcx.ins().iconcat(lsb, msb)
 +            }
 +            ty::Bool | ty::Char | ty::Uint(_) | ty::Int(_) | ty::Ref(..)
 +            | ty::RawPtr(..) => {
 +                fx
 +                    .bcx
 +                    .ins()
 +                    .iconst(clif_ty, u64::try_from(const_val).expect("uint") as i64)
 +            }
 +            ty::Float(FloatTy::F32) => {
 +                fx.bcx.ins().f32const(Ieee32::with_bits(u32::try_from(const_val).unwrap()))
 +            }
 +            ty::Float(FloatTy::F64) => {
 +                fx.bcx.ins().f64const(Ieee64::with_bits(u64::try_from(const_val).unwrap()))
 +            }
 +            _ => panic!(
 +                "CValue::const_val for non bool/char/float/integer/pointer type {:?} is not allowed",
 +                layout.ty
 +            ),
 +        };
 +
 +        CValue::by_val(val, layout)
 +    }
 +
 +    pub(crate) fn cast_pointer_to(self, layout: TyAndLayout<'tcx>) -> Self {
 +        assert!(matches!(
 +            self.layout().ty.kind(),
 +            ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..)
 +        ));
 +        assert!(matches!(
 +            layout.ty.kind(),
 +            ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..)
 +        ));
 +        assert_eq!(self.layout().abi, layout.abi);
 +        CValue(self.0, layout)
 +    }
 +}
 +
 +/// A place where you can write a value to or read a value from
 +#[derive(Debug, Copy, Clone)]
 +pub(crate) struct CPlace<'tcx> {
 +    inner: CPlaceInner,
 +    layout: TyAndLayout<'tcx>,
 +}
 +
 +#[derive(Debug, Copy, Clone)]
 +pub(crate) enum CPlaceInner {
 +    Var(Local, Variable),
 +    VarPair(Local, Variable, Variable),
 +    VarLane(Local, Variable, u8),
 +    Addr(Pointer, Option<Value>),
 +}
 +
 +impl<'tcx> CPlace<'tcx> {
 +    pub(crate) fn layout(&self) -> TyAndLayout<'tcx> {
 +        self.layout
 +    }
 +
 +    pub(crate) fn inner(&self) -> &CPlaceInner {
 +        &self.inner
 +    }
 +
 +    pub(crate) fn no_place(layout: TyAndLayout<'tcx>) -> CPlace<'tcx> {
 +        CPlace {
 +            inner: CPlaceInner::Addr(Pointer::dangling(layout.align.pref), None),
 +            layout,
 +        }
 +    }
 +
 +    pub(crate) fn new_stack_slot(
 +        fx: &mut FunctionCx<'_, 'tcx, impl Module>,
 +        layout: TyAndLayout<'tcx>,
 +    ) -> CPlace<'tcx> {
 +        assert!(!layout.is_unsized());
 +        if layout.size.bytes() == 0 {
 +            return CPlace::no_place(layout);
 +        }
 +
 +        let stack_slot = fx.bcx.create_stack_slot(StackSlotData {
 +            kind: StackSlotKind::ExplicitSlot,
++            size: u32::try_from(layout.size.bytes()).unwrap(),
 +            offset: None,
 +        });
 +        CPlace {
 +            inner: CPlaceInner::Addr(Pointer::stack_slot(stack_slot), None),
 +            layout,
 +        }
 +    }
 +
 +    pub(crate) fn new_var(
 +        fx: &mut FunctionCx<'_, 'tcx, impl Module>,
 +        local: Local,
 +        layout: TyAndLayout<'tcx>,
 +    ) -> CPlace<'tcx> {
 +        let var = Variable::with_u32(fx.next_ssa_var);
 +        fx.next_ssa_var += 1;
 +        fx.bcx.declare_var(var, fx.clif_type(layout.ty).unwrap());
 +        CPlace {
 +            inner: CPlaceInner::Var(local, var),
 +            layout,
 +        }
 +    }
 +
 +    pub(crate) fn new_var_pair(
 +        fx: &mut FunctionCx<'_, 'tcx, impl Module>,
 +        local: Local,
 +        layout: TyAndLayout<'tcx>,
 +    ) -> CPlace<'tcx> {
 +        let var1 = Variable::with_u32(fx.next_ssa_var);
 +        fx.next_ssa_var += 1;
 +        let var2 = Variable::with_u32(fx.next_ssa_var);
 +        fx.next_ssa_var += 1;
 +
 +        let (ty1, ty2) = fx.clif_pair_type(layout.ty).unwrap();
 +        fx.bcx.declare_var(var1, ty1);
 +        fx.bcx.declare_var(var2, ty2);
 +        CPlace {
 +            inner: CPlaceInner::VarPair(local, var1, var2),
 +            layout,
 +        }
 +    }
 +
 +    pub(crate) fn for_ptr(ptr: Pointer, layout: TyAndLayout<'tcx>) -> CPlace<'tcx> {
 +        CPlace {
 +            inner: CPlaceInner::Addr(ptr, None),
 +            layout,
 +        }
 +    }
 +
 +    pub(crate) fn for_ptr_with_extra(
 +        ptr: Pointer,
 +        extra: Value,
 +        layout: TyAndLayout<'tcx>,
 +    ) -> CPlace<'tcx> {
 +        CPlace {
 +            inner: CPlaceInner::Addr(ptr, Some(extra)),
 +            layout,
 +        }
 +    }
 +
 +    pub(crate) fn to_cvalue(self, fx: &mut FunctionCx<'_, 'tcx, impl Module>) -> CValue<'tcx> {
 +        let layout = self.layout();
 +        match self.inner {
 +            CPlaceInner::Var(_local, var) => {
 +                let val = fx.bcx.use_var(var);
 +                fx.bcx
 +                    .set_val_label(val, cranelift_codegen::ir::ValueLabel::new(var.index()));
 +                CValue::by_val(val, layout)
 +            }
 +            CPlaceInner::VarPair(_local, var1, var2) => {
 +                let val1 = fx.bcx.use_var(var1);
 +                fx.bcx
 +                    .set_val_label(val1, cranelift_codegen::ir::ValueLabel::new(var1.index()));
 +                let val2 = fx.bcx.use_var(var2);
 +                fx.bcx
 +                    .set_val_label(val2, cranelift_codegen::ir::ValueLabel::new(var2.index()));
 +                CValue::by_val_pair(val1, val2, layout)
 +            }
 +            CPlaceInner::VarLane(_local, var, lane) => {
 +                let val = fx.bcx.use_var(var);
 +                fx.bcx
 +                    .set_val_label(val, cranelift_codegen::ir::ValueLabel::new(var.index()));
 +                let val = fx.bcx.ins().extractlane(val, lane);
 +                CValue::by_val(val, layout)
 +            }
 +            CPlaceInner::Addr(ptr, extra) => {
 +                if let Some(extra) = extra {
 +                    CValue::by_ref_unsized(ptr, extra, layout)
 +                } else {
 +                    CValue::by_ref(ptr, layout)
 +                }
 +            }
 +        }
 +    }
 +
 +    pub(crate) fn to_ptr(self) -> Pointer {
 +        match self.to_ptr_maybe_unsized() {
 +            (ptr, None) => ptr,
 +            (_, Some(_)) => bug!("Expected sized cplace, found {:?}", self),
 +        }
 +    }
 +
 +    pub(crate) fn to_ptr_maybe_unsized(self) -> (Pointer, Option<Value>) {
 +        match self.inner {
 +            CPlaceInner::Addr(ptr, extra) => (ptr, extra),
 +            CPlaceInner::Var(_, _)
 +            | CPlaceInner::VarPair(_, _, _)
 +            | CPlaceInner::VarLane(_, _, _) => bug!("Expected CPlace::Addr, found {:?}", self),
 +        }
 +    }
 +
 +    pub(crate) fn write_cvalue(
 +        self,
 +        fx: &mut FunctionCx<'_, 'tcx, impl Module>,
 +        from: CValue<'tcx>,
 +    ) {
 +        fn assert_assignable<'tcx>(
 +            fx: &FunctionCx<'_, 'tcx, impl Module>,
 +            from_ty: Ty<'tcx>,
 +            to_ty: Ty<'tcx>,
 +        ) {
 +            match (&from_ty.kind(), &to_ty.kind()) {
 +                (ty::Ref(_, a, _), ty::Ref(_, b, _))
 +                | (
 +                    ty::RawPtr(TypeAndMut { ty: a, mutbl: _ }),
 +                    ty::RawPtr(TypeAndMut { ty: b, mutbl: _ }),
 +                ) => {
 +                    assert_assignable(fx, a, b);
 +                }
 +                (ty::FnPtr(_), ty::FnPtr(_)) => {
 +                    let from_sig = fx.tcx.normalize_erasing_late_bound_regions(
 +                        ParamEnv::reveal_all(),
 +                        &from_ty.fn_sig(fx.tcx),
 +                    );
 +                    let to_sig = fx.tcx.normalize_erasing_late_bound_regions(
 +                        ParamEnv::reveal_all(),
 +                        &to_ty.fn_sig(fx.tcx),
 +                    );
 +                    assert_eq!(
 +                        from_sig, to_sig,
 +                        "Can't write fn ptr with incompatible sig {:?} to place with sig {:?}\n\n{:#?}",
 +                        from_sig, to_sig, fx,
 +                    );
 +                    // fn(&T) -> for<'l> fn(&'l T) is allowed
 +                }
 +                (ty::Dynamic(from_traits, _), ty::Dynamic(to_traits, _)) => {
 +                    let from_traits = fx
 +                        .tcx
 +                        .normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), from_traits);
 +                    let to_traits = fx
 +                        .tcx
 +                        .normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), to_traits);
 +                    assert_eq!(
 +                        from_traits, to_traits,
 +                        "Can't write trait object of incompatible traits {:?} to place with traits {:?}\n\n{:#?}",
 +                        from_traits, to_traits, fx,
 +                    );
 +                    // dyn for<'r> Trait<'r> -> dyn Trait<'_> is allowed
 +                }
 +                _ => {
 +                    assert_eq!(
 +                        from_ty,
 +                        to_ty,
 +                        "Can't write value with incompatible type {:?} to place with type {:?}\n\n{:#?}",
 +                        from_ty,
 +                        to_ty,
 +                        fx,
 +                    );
 +                }
 +            }
 +        }
 +
 +        assert_assignable(fx, from.layout().ty, self.layout().ty);
 +
 +        self.write_cvalue_maybe_transmute(fx, from, "write_cvalue");
 +    }
 +
 +    pub(crate) fn write_cvalue_transmute(
 +        self,
 +        fx: &mut FunctionCx<'_, 'tcx, impl Module>,
 +        from: CValue<'tcx>,
 +    ) {
 +        self.write_cvalue_maybe_transmute(fx, from, "write_cvalue_transmute");
 +    }
 +
 +    fn write_cvalue_maybe_transmute(
 +        self,
 +        fx: &mut FunctionCx<'_, 'tcx, impl Module>,
 +        from: CValue<'tcx>,
 +        #[cfg_attr(not(debug_assertions), allow(unused_variables))] method: &'static str,
 +    ) {
 +        fn transmute_value<'tcx>(
 +            fx: &mut FunctionCx<'_, 'tcx, impl Module>,
 +            var: Variable,
 +            data: Value,
 +            dst_ty: Type,
 +        ) {
 +            let src_ty = fx.bcx.func.dfg.value_type(data);
++            assert_eq!(
++                src_ty.bytes(),
++                dst_ty.bytes(),
++                "write_cvalue_transmute: {:?} -> {:?}",
++                src_ty,
++                dst_ty,
++            );
 +            let data = match (src_ty, dst_ty) {
 +                (_, _) if src_ty == dst_ty => data,
 +
 +                // This is a `write_cvalue_transmute`.
 +                (types::I32, types::F32)
 +                | (types::F32, types::I32)
 +                | (types::I64, types::F64)
 +                | (types::F64, types::I64) => fx.bcx.ins().bitcast(dst_ty, data),
 +                _ if src_ty.is_vector() && dst_ty.is_vector() => {
 +                    fx.bcx.ins().raw_bitcast(dst_ty, data)
 +                }
++                _ if src_ty.is_vector() || dst_ty.is_vector() => {
++                    // FIXME do something more efficient for transmutes between vectors and integers.
++                    let stack_slot = fx.bcx.create_stack_slot(StackSlotData {
++                        kind: StackSlotKind::ExplicitSlot,
++                        size: src_ty.bytes(),
++                        offset: None,
++                    });
++                    let ptr = Pointer::stack_slot(stack_slot);
++                    ptr.store(fx, data, MemFlags::trusted());
++                    ptr.load(fx, dst_ty, MemFlags::trusted())
++                }
 +                _ => unreachable!("write_cvalue_transmute: {:?} -> {:?}", src_ty, dst_ty),
 +            };
 +            fx.bcx
 +                .set_val_label(data, cranelift_codegen::ir::ValueLabel::new(var.index()));
 +            fx.bcx.def_var(var, data);
 +        }
 +
 +        assert_eq!(self.layout().size, from.layout().size);
 +
 +        #[cfg(debug_assertions)]
 +        {
 +            use cranelift_codegen::cursor::{Cursor, CursorPosition};
 +            let cur_block = match fx.bcx.cursor().position() {
 +                CursorPosition::After(block) => block,
 +                _ => unreachable!(),
 +            };
 +            fx.add_comment(
 +                fx.bcx.func.layout.last_inst(cur_block).unwrap(),
 +                format!(
 +                    "{}: {:?}: {:?} <- {:?}: {:?}",
 +                    method,
 +                    self.inner(),
 +                    self.layout().ty,
 +                    from.0,
 +                    from.layout().ty
 +                ),
 +            );
 +        }
 +
 +        let dst_layout = self.layout();
 +        let to_ptr = match self.inner {
 +            CPlaceInner::Var(_local, var) => {
 +                let data = CValue(from.0, dst_layout).load_scalar(fx);
 +                let dst_ty = fx.clif_type(self.layout().ty).unwrap();
 +                transmute_value(fx, var, data, dst_ty);
 +                return;
 +            }
 +            CPlaceInner::VarPair(_local, var1, var2) => {
 +                let (data1, data2) = CValue(from.0, dst_layout).load_scalar_pair(fx);
 +                let (dst_ty1, dst_ty2) = fx.clif_pair_type(self.layout().ty).unwrap();
 +                transmute_value(fx, var1, data1, dst_ty1);
 +                transmute_value(fx, var2, data2, dst_ty2);
 +                return;
 +            }
 +            CPlaceInner::VarLane(_local, var, lane) => {
 +                let data = from.load_scalar(fx);
 +
 +                // First get the old vector
 +                let vector = fx.bcx.use_var(var);
 +                fx.bcx
 +                    .set_val_label(vector, cranelift_codegen::ir::ValueLabel::new(var.index()));
 +
 +                // Next insert the written lane into the vector
 +                let vector = fx.bcx.ins().insertlane(vector, data, lane);
 +
 +                // Finally write the new vector
 +                fx.bcx
 +                    .set_val_label(vector, cranelift_codegen::ir::ValueLabel::new(var.index()));
 +                fx.bcx.def_var(var, vector);
 +
 +                return;
 +            }
 +            CPlaceInner::Addr(ptr, None) => {
 +                if dst_layout.size == Size::ZERO || dst_layout.abi == Abi::Uninhabited {
 +                    return;
 +                }
 +                ptr
 +            }
 +            CPlaceInner::Addr(_, Some(_)) => bug!("Can't write value to unsized place {:?}", self),
 +        };
 +
 +        let mut flags = MemFlags::new();
 +        flags.set_notrap();
 +        match from.layout().abi {
 +            // FIXME make Abi::Vector work too
 +            Abi::Scalar(_) => {
 +                let val = from.load_scalar(fx);
 +                to_ptr.store(fx, val, flags);
 +                return;
 +            }
 +            Abi::ScalarPair(ref a_scalar, ref b_scalar) => {
 +                let (value, extra) = from.load_scalar_pair(fx);
 +                let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
 +                to_ptr.store(fx, value, flags);
 +                to_ptr.offset(fx, b_offset).store(fx, extra, flags);
 +                return;
 +            }
 +            _ => {}
 +        }
 +
 +        match from.0 {
 +            CValueInner::ByVal(val) => {
 +                to_ptr.store(fx, val, flags);
 +            }
 +            CValueInner::ByValPair(_, _) => {
 +                bug!(
 +                    "Non ScalarPair abi {:?} for ByValPair CValue",
 +                    dst_layout.abi
 +                );
 +            }
 +            CValueInner::ByRef(from_ptr, None) => {
 +                let from_addr = from_ptr.get_addr(fx);
 +                let to_addr = to_ptr.get_addr(fx);
 +                let src_layout = from.1;
 +                let size = dst_layout.size.bytes();
 +                let src_align = src_layout.align.abi.bytes() as u8;
 +                let dst_align = dst_layout.align.abi.bytes() as u8;
 +                fx.bcx.emit_small_memory_copy(
 +                    fx.cx.module.target_config(),
 +                    to_addr,
 +                    from_addr,
 +                    size,
 +                    dst_align,
 +                    src_align,
 +                    true,
 +                );
 +            }
 +            CValueInner::ByRef(_, Some(_)) => todo!(),
 +        }
 +    }
 +
 +    pub(crate) fn place_field(
 +        self,
 +        fx: &mut FunctionCx<'_, 'tcx, impl Module>,
 +        field: mir::Field,
 +    ) -> CPlace<'tcx> {
 +        let layout = self.layout();
 +
 +        match self.inner {
 +            CPlaceInner::Var(local, var) => {
 +                if let Abi::Vector { .. } = layout.abi {
 +                    return CPlace {
 +                        inner: CPlaceInner::VarLane(local, var, field.as_u32().try_into().unwrap()),
 +                        layout: layout.field(fx, field.as_u32().try_into().unwrap()),
 +                    };
 +                }
 +            }
 +            CPlaceInner::VarPair(local, var1, var2) => {
 +                let layout = layout.field(&*fx, field.index());
 +
 +                match field.as_u32() {
 +                    0 => {
 +                        return CPlace {
 +                            inner: CPlaceInner::Var(local, var1),
 +                            layout,
 +                        }
 +                    }
 +                    1 => {
 +                        return CPlace {
 +                            inner: CPlaceInner::Var(local, var2),
 +                            layout,
 +                        }
 +                    }
 +                    _ => unreachable!("field should be 0 or 1"),
 +                }
 +            }
 +            _ => {}
 +        }
 +
 +        let (base, extra) = self.to_ptr_maybe_unsized();
 +
 +        let (field_ptr, field_layout) = codegen_field(fx, base, extra, layout, field);
 +        if field_layout.is_unsized() {
 +            CPlace::for_ptr_with_extra(field_ptr, extra.unwrap(), field_layout)
 +        } else {
 +            CPlace::for_ptr(field_ptr, field_layout)
 +        }
 +    }
 +
 +    pub(crate) fn place_index(
 +        self,
 +        fx: &mut FunctionCx<'_, 'tcx, impl Module>,
 +        index: Value,
 +    ) -> CPlace<'tcx> {
 +        let (elem_layout, ptr) = match self.layout().ty.kind() {
 +            ty::Array(elem_ty, _) => (fx.layout_of(elem_ty), self.to_ptr()),
 +            ty::Slice(elem_ty) => (fx.layout_of(elem_ty), self.to_ptr_maybe_unsized().0),
 +            _ => bug!("place_index({:?})", self.layout().ty),
 +        };
 +
 +        let offset = fx
 +            .bcx
 +            .ins()
 +            .imul_imm(index, elem_layout.size.bytes() as i64);
 +
 +        CPlace::for_ptr(ptr.offset_value(fx, offset), elem_layout)
 +    }
 +
 +    pub(crate) fn place_deref(self, fx: &mut FunctionCx<'_, 'tcx, impl Module>) -> CPlace<'tcx> {
 +        let inner_layout = fx.layout_of(self.layout().ty.builtin_deref(true).unwrap().ty);
 +        if has_ptr_meta(fx.tcx, inner_layout.ty) {
 +            let (addr, extra) = self.to_cvalue(fx).load_scalar_pair(fx);
 +            CPlace::for_ptr_with_extra(Pointer::new(addr), extra, inner_layout)
 +        } else {
 +            CPlace::for_ptr(
 +                Pointer::new(self.to_cvalue(fx).load_scalar(fx)),
 +                inner_layout,
 +            )
 +        }
 +    }
 +
 +    pub(crate) fn place_ref(
 +        self,
 +        fx: &mut FunctionCx<'_, 'tcx, impl Module>,
 +        layout: TyAndLayout<'tcx>,
 +    ) -> CValue<'tcx> {
 +        if has_ptr_meta(fx.tcx, self.layout().ty) {
 +            let (ptr, extra) = self.to_ptr_maybe_unsized();
 +            CValue::by_val_pair(
 +                ptr.get_addr(fx),
 +                extra.expect("unsized type without metadata"),
 +                layout,
 +            )
 +        } else {
 +            CValue::by_val(self.to_ptr().get_addr(fx), layout)
 +        }
 +    }
 +
 +    pub(crate) fn downcast_variant(
 +        self,
 +        fx: &FunctionCx<'_, 'tcx, impl Module>,
 +        variant: VariantIdx,
 +    ) -> Self {
 +        assert!(!self.layout().is_unsized());
 +        let layout = self.layout().for_variant(fx, variant);
 +        CPlace {
 +            inner: self.inner,
 +            layout,
 +        }
 +    }
 +}
index bb3cf8b3f3a3ad555ad33b7ceb6527d48a09148a,0000000000000000000000000000000000000000..238abc0d8bdfa557ecd65611a91af4063383d8c8
mode 100644,000000..100644
--- /dev/null
@@@ -1,194 -1,0 +1,186 @@@
-         opt_mth.map_or(None, |(def_id, substs)| {
-             Some(import_function(
 +//! Codegen vtables and vtable accesses.
 +//!
 +//! See librustc_codegen_llvm/meth.rs for reference
 +// FIXME dedup this logic between miri, cg_llvm and cg_clif
 +
 +use crate::prelude::*;
 +
 +const DROP_FN_INDEX: usize = 0;
 +const SIZE_INDEX: usize = 1;
 +const ALIGN_INDEX: usize = 2;
 +
 +fn vtable_memflags() -> MemFlags {
 +    let mut flags = MemFlags::trusted(); // A vtable access is always aligned and will never trap.
 +    flags.set_readonly(); // A vtable is always read-only.
 +    flags
 +}
 +
 +pub(crate) fn drop_fn_of_obj(fx: &mut FunctionCx<'_, '_, impl Module>, vtable: Value) -> Value {
 +    let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes() as usize;
 +    fx.bcx.ins().load(
 +        pointer_ty(fx.tcx),
 +        vtable_memflags(),
 +        vtable,
 +        (DROP_FN_INDEX * usize_size) as i32,
 +    )
 +}
 +
 +pub(crate) fn size_of_obj(fx: &mut FunctionCx<'_, '_, impl Module>, vtable: Value) -> Value {
 +    let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes() as usize;
 +    fx.bcx.ins().load(
 +        pointer_ty(fx.tcx),
 +        vtable_memflags(),
 +        vtable,
 +        (SIZE_INDEX * usize_size) as i32,
 +    )
 +}
 +
 +pub(crate) fn min_align_of_obj(fx: &mut FunctionCx<'_, '_, impl Module>, vtable: Value) -> Value {
 +    let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes() as usize;
 +    fx.bcx.ins().load(
 +        pointer_ty(fx.tcx),
 +        vtable_memflags(),
 +        vtable,
 +        (ALIGN_INDEX * usize_size) as i32,
 +    )
 +}
 +
 +pub(crate) fn get_ptr_and_method_ref<'tcx>(
 +    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
 +    arg: CValue<'tcx>,
 +    idx: usize,
 +) -> (Value, Value) {
 +    let (ptr, vtable) = if let Abi::ScalarPair(_, _) = arg.layout().abi {
 +        arg.load_scalar_pair(fx)
 +    } else {
 +        let (ptr, vtable) = arg.try_to_ptr().unwrap();
 +        (ptr.get_addr(fx), vtable.unwrap())
 +    };
 +
 +    let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes();
 +    let func_ref = fx.bcx.ins().load(
 +        pointer_ty(fx.tcx),
 +        vtable_memflags(),
 +        vtable,
 +        ((idx + 3) * usize_size as usize) as i32,
 +    );
 +    (ptr, func_ref)
 +}
 +
 +pub(crate) fn get_vtable<'tcx>(
 +    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
 +    layout: TyAndLayout<'tcx>,
 +    trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
 +) -> Value {
 +    let data_id = if let Some(data_id) = fx.cx.vtables.get(&(layout.ty, trait_ref)) {
 +        *data_id
 +    } else {
 +        let data_id = build_vtable(fx, layout, trait_ref);
 +        fx.cx.vtables.insert((layout.ty, trait_ref), data_id);
 +        data_id
 +    };
 +
 +    let local_data_id = fx.cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
 +    fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
 +}
 +
 +fn build_vtable<'tcx>(
 +    fx: &mut FunctionCx<'_, 'tcx, impl Module>,
 +    layout: TyAndLayout<'tcx>,
 +    trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
 +) -> DataId {
 +    let tcx = fx.tcx;
 +    let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes() as usize;
 +
 +    let drop_in_place_fn = import_function(
 +        tcx,
 +        &mut fx.cx.module,
 +        Instance::resolve_drop_in_place(tcx, layout.ty).polymorphize(fx.tcx),
 +    );
 +
 +    let mut components: Vec<_> = vec![Some(drop_in_place_fn), None, None];
 +
 +    let methods_root;
 +    let methods = if let Some(trait_ref) = trait_ref {
 +        methods_root = tcx.vtable_methods(trait_ref.with_self_ty(tcx, layout.ty));
 +        methods_root.iter()
 +    } else {
 +        (&[]).iter()
 +    };
 +    let methods = methods.cloned().map(|opt_mth| {
-             ))
++        opt_mth.map(|(def_id, substs)| {
++            import_function(
 +                tcx,
 +                &mut fx.cx.module,
 +                Instance::resolve_for_vtable(tcx, ParamEnv::reveal_all(), def_id, substs)
 +                    .unwrap()
 +                    .polymorphize(fx.tcx),
-     data_ctx.set_align(
-         fx.tcx
-             .data_layout
-             .pointer_align
-             .pref
-             .bytes()
-             .try_into()
-             .unwrap(),
-     );
++            )
 +        })
 +    });
 +    components.extend(methods);
 +
 +    let mut data_ctx = DataContext::new();
 +    let mut data = ::std::iter::repeat(0u8)
 +        .take(components.len() * usize_size)
 +        .collect::<Vec<u8>>()
 +        .into_boxed_slice();
 +
 +    write_usize(fx.tcx, &mut data, SIZE_INDEX, layout.size.bytes());
 +    write_usize(fx.tcx, &mut data, ALIGN_INDEX, layout.align.abi.bytes());
 +    data_ctx.define(data);
 +
 +    for (i, component) in components.into_iter().enumerate() {
 +        if let Some(func_id) = component {
 +            let func_ref = fx.cx.module.declare_func_in_data(func_id, &mut data_ctx);
 +            data_ctx.write_function_addr((i * usize_size) as u32, func_ref);
 +        }
 +    }
 +
++    data_ctx.set_align(fx.tcx.data_layout.pointer_align.pref.bytes());
 +
 +    let data_id = fx
 +        .cx
 +        .module
 +        .declare_data(
 +            &format!(
 +                "__vtable.{}.for.{:?}.{}",
 +                trait_ref
 +                    .as_ref()
 +                    .map(|trait_ref| format!("{:?}", trait_ref.skip_binder()).into())
 +                    .unwrap_or(std::borrow::Cow::Borrowed("???")),
 +                layout.ty,
 +                fx.cx.vtables.len(),
 +            ),
 +            Linkage::Local,
 +            false,
 +            false,
 +        )
 +        .unwrap();
 +
 +    fx.cx.module.define_data(data_id, &data_ctx).unwrap();
 +
 +    data_id
 +}
 +
 +fn write_usize(tcx: TyCtxt<'_>, buf: &mut [u8], idx: usize, num: u64) {
 +    let pointer_size = tcx
 +        .layout_of(ParamEnv::reveal_all().and(tcx.types.usize))
 +        .unwrap()
 +        .size
 +        .bytes() as usize;
 +    let target = &mut buf[idx * pointer_size..(idx + 1) * pointer_size];
 +
 +    match tcx.data_layout.endian {
 +        rustc_target::abi::Endian::Little => match pointer_size {
 +            4 => target.copy_from_slice(&(num as u32).to_le_bytes()),
 +            8 => target.copy_from_slice(&(num as u64).to_le_bytes()),
 +            _ => todo!("pointer size {} is not yet supported", pointer_size),
 +        },
 +        rustc_target::abi::Endian::Big => match pointer_size {
 +            4 => target.copy_from_slice(&(num as u32).to_be_bytes()),
 +            8 => target.copy_from_slice(&(num as u64).to_be_bytes()),
 +            _ => todo!("pointer size {} is not yet supported", pointer_size),
 +        },
 +    }
 +}
index a1c4d9f28728304bd918579771c84ca54a9045c2,0000000000000000000000000000000000000000..3cdd4119d794cda6c8a045dce1c5a80566986030
mode 100755,000000..100755
--- /dev/null
@@@ -1,119 -1,0 +1,15 @@@
- # Build cg_clif
 +#!/bin/bash
 +set -e
 +
- if [[ "$1" == "--release" ]]; then
-     export CHANNEL='release'
-     cargo build --release
- else
-     export CHANNEL='debug'
-     cargo build --bin cg_clif
- fi
 +export RUSTFLAGS="-Zrun_dsymutil=no"
- # Config
- source scripts/config.sh
- export CG_CLIF_INCR_CACHE_DISABLED=1
- RUSTC=$RUSTC" "$RUSTFLAGS" -L crate=target/out --out-dir target/out -Cdebuginfo=2"
 +
- # Cleanup
++./build.sh --without-sysroot $@
 +
- # Perform all tests
- echo "[BUILD] mini_core"
- $RUSTC example/mini_core.rs --crate-name mini_core --crate-type lib,dylib --target $TARGET_TRIPLE
 +rm -r target/out || true
 +
- echo "[BUILD] example"
- $RUSTC example/example.rs --crate-type lib --target $TARGET_TRIPLE
++scripts/tests.sh no_sysroot
 +
- if [[ "$JIT_SUPPORTED" = "1" ]]; then
-     echo "[JIT] mini_core_hello_world"
-     CG_CLIF_JIT_ARGS="abc bcd" $RUSTC --jit example/mini_core_hello_world.rs --cfg jit --target $HOST_TRIPLE
- else
-     echo "[JIT] mini_core_hello_world (skipped)"
- fi
- echo "[AOT] mini_core_hello_world"
- $RUSTC example/mini_core_hello_world.rs --crate-name mini_core_hello_world --crate-type bin -g --target $TARGET_TRIPLE
- $RUN_WRAPPER ./target/out/mini_core_hello_world abc bcd
- # (echo "break set -n main"; echo "run"; sleep 1; echo "si -c 10"; sleep 1; echo "frame variable") | lldb -- ./target/out/mini_core_hello_world abc bcd
- echo "[AOT] arbitrary_self_types_pointers_and_wrappers"
- $RUSTC example/arbitrary_self_types_pointers_and_wrappers.rs --crate-name arbitrary_self_types_pointers_and_wrappers --crate-type bin --target $TARGET_TRIPLE
- $RUN_WRAPPER ./target/out/arbitrary_self_types_pointers_and_wrappers
- echo "[BUILD] sysroot"
- time ./build_sysroot/build_sysroot.sh --release
- echo "[AOT] alloc_example"
- $RUSTC example/alloc_example.rs --crate-type bin --target $TARGET_TRIPLE
- $RUN_WRAPPER ./target/out/alloc_example
- if [[ "$JIT_SUPPORTED" = "1" ]]; then
-     echo "[JIT] std_example"
-     $RUSTC --jit example/std_example.rs --target $HOST_TRIPLE
- else
-     echo "[JIT] std_example (skipped)"
- fi
- echo "[AOT] dst_field_align"
- # FIXME Re-add -Zmir-opt-level=2 once rust-lang/rust#67529 is fixed.
- $RUSTC example/dst-field-align.rs --crate-name dst_field_align --crate-type bin --target $TARGET_TRIPLE
- $RUN_WRAPPER ./target/out/dst_field_align || (echo $?; false)
- echo "[AOT] std_example"
- $RUSTC example/std_example.rs --crate-type bin --target $TARGET_TRIPLE
- $RUN_WRAPPER ./target/out/std_example arg
- echo "[AOT] subslice-patterns-const-eval"
- $RUSTC example/subslice-patterns-const-eval.rs --crate-type bin -Cpanic=abort --target $TARGET_TRIPLE
- $RUN_WRAPPER ./target/out/subslice-patterns-const-eval
- echo "[AOT] track-caller-attribute"
- $RUSTC example/track-caller-attribute.rs --crate-type bin -Cpanic=abort --target $TARGET_TRIPLE
- $RUN_WRAPPER ./target/out/track-caller-attribute
- echo "[AOT] mod_bench"
- $RUSTC example/mod_bench.rs --crate-type bin --target $TARGET_TRIPLE
- $RUN_WRAPPER ./target/out/mod_bench
- pushd rand
- rm -r ./target || true
- ../cargo.sh test --workspace
- popd
- pushd simple-raytracer
- if [[ "$HOST_TRIPLE" = "$TARGET_TRIPLE" ]]; then
-     echo "[BENCH COMPILE] ebobby/simple-raytracer"
-     hyperfine --runs ${RUN_RUNS:-10} --warmup 1 --prepare "cargo clean" \
-     "RUSTC=rustc RUSTFLAGS='' cargo build" \
-     "../cargo.sh build"
-     echo "[BENCH RUN] ebobby/simple-raytracer"
-     cp ./target/debug/main ./raytracer_cg_clif
-     hyperfine --runs ${RUN_RUNS:-10} ./raytracer_cg_llvm ./raytracer_cg_clif
- else
-     echo "[BENCH COMPILE] ebobby/simple-raytracer (skipped)"
-     echo "[COMPILE] ebobby/simple-raytracer"
-     ../cargo.sh build
-     echo "[BENCH RUN] ebobby/simple-raytracer (skipped)"
- fi
- popd
- pushd build_sysroot/sysroot_src/library/core/tests
- echo "[TEST] libcore"
- rm -r ./target || true
- ../../../../../cargo.sh test
- popd
- pushd regex
- echo "[TEST] rust-lang/regex example shootout-regex-dna"
- ../cargo.sh clean
- # Make sure `[codegen mono items] start` doesn't poison the diff
- ../cargo.sh build --example shootout-regex-dna
- cat examples/regexdna-input.txt | ../cargo.sh run --example shootout-regex-dna | grep -v "Spawned thread" > res.txt
- diff -u res.txt examples/regexdna-output.txt
- echo "[TEST] rust-lang/regex tests"
- ../cargo.sh test --tests -- --exclude-should-panic --test-threads 1 -Zunstable-options
- popd
++./build.sh $@
 +
++scripts/tests.sh base_sysroot
++scripts/tests.sh extended_sysroot