]> git.lizzy.rs Git - rust.git/commitdiff
Merge commit '05677b6bd6c938ed760835d9b1f6514992654ae3' into sync_cg_clif-2021-08-06
authorbjorn3 <bjorn3@users.noreply.github.com>
Fri, 6 Aug 2021 14:26:56 +0000 (16:26 +0200)
committerbjorn3 <bjorn3@users.noreply.github.com>
Fri, 6 Aug 2021 14:26:56 +0000 (16:26 +0200)
40 files changed:
1  2 
compiler/rustc_codegen_cranelift/.github/workflows/main.yml
compiler/rustc_codegen_cranelift/.gitignore
compiler/rustc_codegen_cranelift/Cargo.lock
compiler/rustc_codegen_cranelift/Cargo.toml
compiler/rustc_codegen_cranelift/build_sysroot/Cargo.lock
compiler/rustc_codegen_cranelift/build_system/build_backend.rs
compiler/rustc_codegen_cranelift/build_system/build_sysroot.rs
compiler/rustc_codegen_cranelift/build_system/prepare.rs
compiler/rustc_codegen_cranelift/clean_all.sh
compiler/rustc_codegen_cranelift/docs/usage.md
compiler/rustc_codegen_cranelift/patches/0001-stdsimd-Disable-unsupported-tests.patch
compiler/rustc_codegen_cranelift/patches/0022-sysroot-Disable-not-compiling-tests.patch
compiler/rustc_codegen_cranelift/patches/0023-sysroot-Ignore-failing-tests.patch
compiler/rustc_codegen_cranelift/patches/0027-sysroot-128bit-atomic-operations.patch
compiler/rustc_codegen_cranelift/rust-toolchain
compiler/rustc_codegen_cranelift/scripts/cargo.rs
compiler/rustc_codegen_cranelift/scripts/filter_profile.rs
compiler/rustc_codegen_cranelift/scripts/setup_rust_fork.sh
compiler/rustc_codegen_cranelift/scripts/test_rustc_tests.sh
compiler/rustc_codegen_cranelift/scripts/tests.sh
compiler/rustc_codegen_cranelift/src/abi/mod.rs
compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs
compiler/rustc_codegen_cranelift/src/abi/returning.rs
compiler/rustc_codegen_cranelift/src/allocator.rs
compiler/rustc_codegen_cranelift/src/analyze.rs
compiler/rustc_codegen_cranelift/src/base.rs
compiler/rustc_codegen_cranelift/src/cast.rs
compiler/rustc_codegen_cranelift/src/codegen_i128.rs
compiler/rustc_codegen_cranelift/src/debuginfo/emit.rs
compiler/rustc_codegen_cranelift/src/debuginfo/mod.rs
compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs
compiler/rustc_codegen_cranelift/src/lib.rs
compiler/rustc_codegen_cranelift/src/num.rs
compiler/rustc_codegen_cranelift/src/optimize/peephole.rs
compiler/rustc_codegen_cranelift/src/trap.rs
compiler/rustc_codegen_cranelift/src/unsize.rs
compiler/rustc_codegen_cranelift/src/value_and_place.rs
compiler/rustc_codegen_cranelift/src/vtable.rs
compiler/rustc_codegen_cranelift/y.rs

index f81ac87726052c7c16e78869bd4baf2866be1fa1,0000000000000000000000000000000000000000..f524b42c5eecda06098bdec6a7518295e3994013
mode 100644,000000..100644
--- /dev/null
@@@ -1,160 -1,0 +1,162 @@@
 +name: CI
 +
 +on:
 +  - push
 +  - pull_request
 +
 +jobs:
 +  build:
 +    runs-on: ${{ matrix.os }}
 +    timeout-minutes: 60
 +
 +    strategy:
 +      fail-fast: false
 +      matrix:
 +        include:
 +          - os: ubuntu-latest
 +          - os: macos-latest
 +          # cross-compile from Linux to Windows using mingw
 +          - os: ubuntu-latest
 +            env:
 +              TARGET_TRIPLE: x86_64-pc-windows-gnu
 +          - os: ubuntu-latest
 +            env:
 +              TARGET_TRIPLE: aarch64-unknown-linux-gnu
 +
 +    steps:
 +    - uses: actions/checkout@v2
 +
 +    - name: Cache cargo installed crates
 +      uses: actions/cache@v2
 +      with:
 +        path: ~/.cargo/bin
 +        key: ${{ runner.os }}-cargo-installed-crates
 +
 +    - name: Cache cargo registry and index
 +      uses: actions/cache@v2
 +      with:
 +        path: |
 +            ~/.cargo/registry
 +            ~/.cargo/git
 +        key: ${{ runner.os }}-cargo-registry-and-index-${{ hashFiles('**/Cargo.lock') }}
 +
 +    - name: Cache cargo target dir
 +      uses: actions/cache@v2
 +      with:
 +        path: target
 +        key: ${{ runner.os }}-cargo-build-target-${{ hashFiles('rust-toolchain', '**/Cargo.lock') }}
 +
 +    - name: Install MinGW toolchain and wine
 +      if: matrix.os == 'ubuntu-latest' && matrix.env.TARGET_TRIPLE == 'x86_64-pc-windows-gnu'
 +      run: |
++        sudo apt-get update
 +        sudo apt-get install -y gcc-mingw-w64-x86-64 wine-stable
 +        rustup target add x86_64-pc-windows-gnu
 +
 +    - name: Install AArch64 toolchain and qemu
 +      if: matrix.os == 'ubuntu-latest' && matrix.env.TARGET_TRIPLE == 'aarch64-unknown-linux-gnu'
 +      run: |
++        sudo apt-get update
 +        sudo apt-get install -y gcc-aarch64-linux-gnu qemu-user
 +
 +    - name: Prepare dependencies
 +      run: |
 +        git config --global user.email "user@example.com"
 +        git config --global user.name "User"
 +        ./y.rs prepare
 +
 +    - name: Build
 +      run: ./y.rs build --sysroot none
 +
 +    - name: Test
 +      env:
 +        TARGET_TRIPLE: ${{ matrix.env.TARGET_TRIPLE }}
 +      run: |
 +        # Enable backtraces for easier debugging
 +        export RUST_BACKTRACE=1
 +
 +        # Reduce amount of benchmark runs as they are slow
 +        export COMPILE_RUNS=2
 +        export RUN_RUNS=2
 +
 +        # Enable extra checks
 +        export CG_CLIF_ENABLE_VERIFIER=1
 +
 +        ./test.sh
 +
 +    - name: Package prebuilt cg_clif
 +      run: tar cvfJ cg_clif.tar.xz build
 +
 +    - name: Upload prebuilt cg_clif
 +      if: matrix.env.TARGET_TRIPLE != 'x86_64-pc-windows-gnu'
 +      uses: actions/upload-artifact@v2
 +      with:
 +        name: cg_clif-${{ runner.os }}
 +        path: cg_clif.tar.xz
 +
 +    - name: Upload prebuilt cg_clif (cross compile)
 +      if: matrix.env.TARGET_TRIPLE == 'x86_64-pc-windows-gnu'
 +      uses: actions/upload-artifact@v2
 +      with:
 +        name: cg_clif-${{ runner.os }}-cross-x86_64-mingw
 +        path: cg_clif.tar.xz
 +
 +  build_windows:
 +    runs-on: windows-latest
 +    timeout-minutes: 60
 +
 +    steps:
 +    - uses: actions/checkout@v2
 +
 +    #- name: Cache cargo installed crates
 +    #  uses: actions/cache@v2
 +    #  with:
 +    #    path: ~/.cargo/bin
 +    #    key: ${{ runner.os }}-cargo-installed-crates
 +
 +    #- name: Cache cargo registry and index
 +    #  uses: actions/cache@v2
 +    #  with:
 +    #    path: |
 +    #        ~/.cargo/registry
 +    #        ~/.cargo/git
 +    #    key: ${{ runner.os }}-cargo-registry-and-index-${{ hashFiles('**/Cargo.lock') }}
 +
 +    #- name: Cache cargo target dir
 +    #  uses: actions/cache@v2
 +    #  with:
 +    #    path: target
 +    #    key: ${{ runner.os }}-cargo-build-target-${{ hashFiles('rust-toolchain', '**/Cargo.lock') }}
 +
 +    - name: Prepare dependencies
 +      run: |
 +        git config --global user.email "user@example.com"
 +        git config --global user.name "User"
 +        git config --global core.autocrlf false
 +        rustup set default-host x86_64-pc-windows-gnu
 +        rustc y.rs -o y.exe -g
 +        ./y.exe prepare
 +
 +    - name: Build
 +      #name: Test
 +      run: |
 +        # Enable backtraces for easier debugging
 +        #export RUST_BACKTRACE=1
 +
 +        # Reduce amount of benchmark runs as they are slow
 +        #export COMPILE_RUNS=2
 +        #export RUN_RUNS=2
 +
 +        # Enable extra checks
 +        #export CG_CLIF_ENABLE_VERIFIER=1
 +
 +        ./y.exe build
 +
 +    #- name: Package prebuilt cg_clif
 +    #  run: tar cvfJ cg_clif.tar.xz build
 +
 +    #- name: Upload prebuilt cg_clif
 +    #  uses: actions/upload-artifact@v2
 +    #  with:
 +    #    name: cg_clif-${{ runner.os }}
 +    #    path: cg_clif.tar.xz
index 12e779fe7c7d7b8bb050611e643a6355776ed518,0000000000000000000000000000000000000000..25080488a88b525f30b290caee0f6874851564fb
mode 100644,000000..100644
--- /dev/null
@@@ -1,17 -1,0 +1,18 @@@
 +target
 +**/*.rs.bk
 +*.rlib
 +*.o
 +perf.data
 +perf.data.old
 +*.events
 +*.string*
 +/y.bin
 +/build
 +/build_sysroot/sysroot_src
 +/build_sysroot/compiler-builtins
 +/build_sysroot/rustc_version
 +/rust
 +/rand
 +/regex
 +/simple-raytracer
++/stdsimd
index 56d0974b25371b6e2abef4451c5eefef3879668c,0000000000000000000000000000000000000000..23c1fdc6ee425c7b73404c78a0303fc6aea9e73c
mode 100644,000000..100644
--- /dev/null
@@@ -1,304 -1,0 +1,304 @@@
- version = "1.0.38"
 +# This file is automatically @generated by Cargo.
 +# It is not intended for manual editing.
 +version = 3
 +
 +[[package]]
 +name = "anyhow"
- checksum = "afddf7f520a80dbf76e6f50a35bca42a2331ef227a28b3b6dc5c2e2338d114b1"
++version = "1.0.42"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
- source = "git+https://github.com/bytecodealliance/wasmtime.git?branch=main#c71ad9490e7f3e19bbcae7e28bbe50f8a0b4a5d8"
++checksum = "595d3cfa7a60d4555cb5067b99f07142a08ea778de5cf993f7b75c7d8fabc486"
 +
 +[[package]]
 +name = "ar"
 +version = "0.8.0"
 +source = "git+https://github.com/bjorn3/rust-ar.git?branch=do_not_remove_cg_clif_ranlib#de9ab0e56bf3a208381d342aa5b60f9ff2891648"
 +
 +[[package]]
 +name = "autocfg"
 +version = "1.0.1"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
 +
 +[[package]]
 +name = "bitflags"
 +version = "1.2.1"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
 +
 +[[package]]
 +name = "cfg-if"
 +version = "1.0.0"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
 +
 +[[package]]
 +name = "cranelift-bforest"
 +version = "0.75.0"
- source = "git+https://github.com/bytecodealliance/wasmtime.git?branch=main#c71ad9490e7f3e19bbcae7e28bbe50f8a0b4a5d8"
++source = "git+https://github.com/bytecodealliance/wasmtime.git#5deda279775dca5e37449c829cda1f6276d6542b"
 +dependencies = [
 + "cranelift-entity",
 +]
 +
 +[[package]]
 +name = "cranelift-codegen"
 +version = "0.75.0"
- source = "git+https://github.com/bytecodealliance/wasmtime.git?branch=main#c71ad9490e7f3e19bbcae7e28bbe50f8a0b4a5d8"
++source = "git+https://github.com/bytecodealliance/wasmtime.git#5deda279775dca5e37449c829cda1f6276d6542b"
 +dependencies = [
 + "cranelift-bforest",
 + "cranelift-codegen-meta",
 + "cranelift-codegen-shared",
 + "cranelift-entity",
 + "gimli",
 + "log",
 + "regalloc",
 + "smallvec",
 + "target-lexicon",
 +]
 +
 +[[package]]
 +name = "cranelift-codegen-meta"
 +version = "0.75.0"
- source = "git+https://github.com/bytecodealliance/wasmtime.git?branch=main#c71ad9490e7f3e19bbcae7e28bbe50f8a0b4a5d8"
++source = "git+https://github.com/bytecodealliance/wasmtime.git#5deda279775dca5e37449c829cda1f6276d6542b"
 +dependencies = [
 + "cranelift-codegen-shared",
 + "cranelift-entity",
 +]
 +
 +[[package]]
 +name = "cranelift-codegen-shared"
 +version = "0.75.0"
- source = "git+https://github.com/bytecodealliance/wasmtime.git?branch=main#c71ad9490e7f3e19bbcae7e28bbe50f8a0b4a5d8"
++source = "git+https://github.com/bytecodealliance/wasmtime.git#5deda279775dca5e37449c829cda1f6276d6542b"
 +
 +[[package]]
 +name = "cranelift-entity"
 +version = "0.75.0"
- source = "git+https://github.com/bytecodealliance/wasmtime.git?branch=main#c71ad9490e7f3e19bbcae7e28bbe50f8a0b4a5d8"
++source = "git+https://github.com/bytecodealliance/wasmtime.git#5deda279775dca5e37449c829cda1f6276d6542b"
 +
 +[[package]]
 +name = "cranelift-frontend"
 +version = "0.75.0"
- source = "git+https://github.com/bytecodealliance/wasmtime.git?branch=main#c71ad9490e7f3e19bbcae7e28bbe50f8a0b4a5d8"
++source = "git+https://github.com/bytecodealliance/wasmtime.git#5deda279775dca5e37449c829cda1f6276d6542b"
 +dependencies = [
 + "cranelift-codegen",
 + "log",
 + "smallvec",
 + "target-lexicon",
 +]
 +
 +[[package]]
 +name = "cranelift-jit"
 +version = "0.75.0"
- source = "git+https://github.com/bytecodealliance/wasmtime.git?branch=main#c71ad9490e7f3e19bbcae7e28bbe50f8a0b4a5d8"
++source = "git+https://github.com/bytecodealliance/wasmtime.git#5deda279775dca5e37449c829cda1f6276d6542b"
 +dependencies = [
 + "anyhow",
 + "cranelift-codegen",
 + "cranelift-entity",
 + "cranelift-module",
 + "cranelift-native",
 + "libc",
 + "log",
 + "region",
 + "target-lexicon",
 + "winapi",
 +]
 +
 +[[package]]
 +name = "cranelift-module"
 +version = "0.75.0"
- source = "git+https://github.com/bytecodealliance/wasmtime.git?branch=main#c71ad9490e7f3e19bbcae7e28bbe50f8a0b4a5d8"
++source = "git+https://github.com/bytecodealliance/wasmtime.git#5deda279775dca5e37449c829cda1f6276d6542b"
 +dependencies = [
 + "anyhow",
 + "cranelift-codegen",
 + "cranelift-entity",
 + "log",
 +]
 +
 +[[package]]
 +name = "cranelift-native"
 +version = "0.75.0"
- source = "git+https://github.com/bytecodealliance/wasmtime.git?branch=main#c71ad9490e7f3e19bbcae7e28bbe50f8a0b4a5d8"
++source = "git+https://github.com/bytecodealliance/wasmtime.git#5deda279775dca5e37449c829cda1f6276d6542b"
 +dependencies = [
 + "cranelift-codegen",
 + "libc",
 + "target-lexicon",
 +]
 +
 +[[package]]
 +name = "cranelift-object"
 +version = "0.75.0"
- version = "0.24.0"
++source = "git+https://github.com/bytecodealliance/wasmtime.git#5deda279775dca5e37449c829cda1f6276d6542b"
 +dependencies = [
 + "anyhow",
 + "cranelift-codegen",
 + "cranelift-module",
 + "log",
 + "object",
 + "target-lexicon",
 +]
 +
 +[[package]]
 +name = "crc32fast"
 +version = "1.2.1"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a"
 +dependencies = [
 + "cfg-if",
 +]
 +
 +[[package]]
 +name = "gimli"
- checksum = "0e4075386626662786ddb0ec9081e7c7eeb1ba31951f447ca780ef9f5d568189"
++version = "0.25.0"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.9.1"
++checksum = "f0a01e0497841a3b2db4f8afa483cce65f7e96a3498bd6c541734792aeac8fe7"
 +dependencies = [
 + "indexmap",
 +]
 +
 +[[package]]
 +name = "hashbrown"
- checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04"
++version = "0.11.2"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "1.6.1"
++checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e"
 +
 +[[package]]
 +name = "indexmap"
- checksum = "4fb1fa934250de4de8aef298d81c729a7d33d8c239daa3a7575e6b92bfc7313b"
++version = "1.7.0"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.2.97"
++checksum = "bc633605454125dec4b66843673f01c7df2b89479b32e0ed634e43a91cff62a5"
 +dependencies = [
 + "autocfg",
 + "hashbrown",
 +]
 +
 +[[package]]
 +name = "libc"
- checksum = "12b8adadd720df158f4d70dfe7ccc6adb0472d7c55ca83445f6a5ab3e36f8fb6"
++version = "0.2.98"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.25.3"
++checksum = "320cfe77175da3a483efed4bc0adc1968ca050b098ce4f2f1c13a56626128790"
 +
 +[[package]]
 +name = "libloading"
 +version = "0.6.7"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "351a32417a12d5f7e82c368a66781e307834dae04c6ce0cd4456d52989229883"
 +dependencies = [
 + "cfg-if",
 + "winapi",
 +]
 +
 +[[package]]
 +name = "log"
 +version = "0.4.14"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710"
 +dependencies = [
 + "cfg-if",
 +]
 +
 +[[package]]
 +name = "mach"
 +version = "0.3.2"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "b823e83b2affd8f40a9ee8c29dbc56404c1e34cd2710921f2801e2cf29527afa"
 +dependencies = [
 + "libc",
 +]
 +
 +[[package]]
 +name = "memchr"
 +version = "2.4.0"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "b16bd47d9e329435e309c58469fe0791c2d0d1ba96ec0954152a5ae2b04387dc"
 +
 +[[package]]
 +name = "object"
- checksum = "a38f2be3697a57b4060074ff41b44c16870d916ad7877c17696e063257482bc7"
++version = "0.26.0"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.12.0"
++checksum = "c55827317fb4c08822499848a14237d2874d6f139828893017237e7ab93eb386"
 +dependencies = [
 + "crc32fast",
 + "indexmap",
 + "memchr",
 +]
 +
 +[[package]]
 +name = "regalloc"
 +version = "0.0.31"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "571f7f397d61c4755285cd37853fe8e03271c243424a907415909379659381c5"
 +dependencies = [
 + "log",
 + "rustc-hash",
 + "smallvec",
 +]
 +
 +[[package]]
 +name = "region"
 +version = "2.2.0"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "877e54ea2adcd70d80e9179344c97f93ef0dffd6b03e1f4529e6e83ab2fa9ae0"
 +dependencies = [
 + "bitflags",
 + "libc",
 + "mach",
 + "winapi",
 +]
 +
 +[[package]]
 +name = "rustc-hash"
 +version = "1.1.0"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2"
 +
 +[[package]]
 +name = "rustc_codegen_cranelift"
 +version = "0.1.0"
 +dependencies = [
 + "ar",
 + "cranelift-codegen",
 + "cranelift-frontend",
 + "cranelift-jit",
 + "cranelift-module",
 + "cranelift-native",
 + "cranelift-object",
 + "gimli",
 + "indexmap",
 + "libloading",
 + "object",
 + "smallvec",
 + "target-lexicon",
 +]
 +
 +[[package]]
 +name = "smallvec"
 +version = "1.6.1"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "fe0f37c9e8f3c5a4a66ad655a93c74daac4ad00c441533bf5c6e7990bb42604e"
 +
 +[[package]]
 +name = "target-lexicon"
- checksum = "64ae3b39281e4b14b8123bdbaddd472b7dfe215e444181f2f9d2443c2444f834"
++version = "0.12.1"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "b0652da4c4121005e9ed22b79f6c5f2d9e2752906b53a33e9490489ba421a6fb"
 +
 +[[package]]
 +name = "winapi"
 +version = "0.3.9"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
 +dependencies = [
 + "winapi-i686-pc-windows-gnu",
 + "winapi-x86_64-pc-windows-gnu",
 +]
 +
 +[[package]]
 +name = "winapi-i686-pc-windows-gnu"
 +version = "0.4.0"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
 +
 +[[package]]
 +name = "winapi-x86_64-pc-windows-gnu"
 +version = "0.4.0"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
index 6593ac738fe8467b2e8463039f9ce85f1ed8ee67,0000000000000000000000000000000000000000..6f40fc0fcb881e77fa67e548191d380321f70550
mode 100644,000000..100644
--- /dev/null
@@@ -1,73 -1,0 +1,74 @@@
- cranelift-codegen = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "main", features = ["unwind", "all-arch"] }
- cranelift-frontend = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "main" }
- cranelift-module = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "main" }
- cranelift-native = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "main" }
- cranelift-jit = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "main", optional = true }
- cranelift-object = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "main" }
 +[package]
 +name = "rustc_codegen_cranelift"
 +version = "0.1.0"
 +edition = "2018"
 +
 +[lib]
 +crate-type = ["dylib"]
 +
 +[dependencies]
 +# These have to be in sync with each other
- gimli = { version = "0.24.0", default-features = false, features = ["write"]}
- object = { version = "0.25.0", default-features = false, features = ["std", "read_core", "write", "archive", "coff", "elf", "macho", "pe"] }
++cranelift-codegen = { git = "https://github.com/bytecodealliance/wasmtime.git", features = ["unwind", "all-arch"] }
++cranelift-frontend = { git = "https://github.com/bytecodealliance/wasmtime.git" }
++cranelift-module = { git = "https://github.com/bytecodealliance/wasmtime.git" }
++cranelift-native = { git = "https://github.com/bytecodealliance/wasmtime.git" }
++cranelift-jit = { git = "https://github.com/bytecodealliance/wasmtime.git", optional = true }
++cranelift-object = { git = "https://github.com/bytecodealliance/wasmtime.git" }
 +target-lexicon = "0.12.0"
- default = ["jit", "inline_asm"]
++gimli = { version = "0.25.0", default-features = false, features = ["write"]}
++object = { version = "0.26.0", default-features = false, features = ["std", "read_core", "write", "archive", "coff", "elf", "macho", "pe"] }
 +
 +ar = { git = "https://github.com/bjorn3/rust-ar.git", branch = "do_not_remove_cg_clif_ranlib" }
 +indexmap = "1.0.2"
 +libloading = { version = "0.6.0", optional = true }
 +smallvec = "1.6.1"
 +
 +# Uncomment to use local checkout of cranelift
 +#[patch."https://github.com/bytecodealliance/wasmtime.git"]
 +#cranelift-codegen = { path = "../wasmtime/cranelift/codegen" }
 +#cranelift-frontend = { path = "../wasmtime/cranelift/frontend" }
 +#cranelift-module = { path = "../wasmtime/cranelift/module" }
 +#cranelift-native = { path = "../wasmtime/cranelift/native" }
 +#cranelift-jit = { path = "../wasmtime/cranelift/jit" }
 +#cranelift-object = { path = "../wasmtime/cranelift/object" }
 +
 +#[patch.crates-io]
 +#gimli = { path = "../" }
 +
 +[features]
++# Enable features not ready to be enabled when compiling as part of rustc
++unstable-features = ["jit", "inline_asm"]
 +jit = ["cranelift-jit", "libloading"]
 +inline_asm = []
 +
 +[profile.dev]
 +# By compiling dependencies with optimizations, performing tests gets much faster.
 +opt-level = 3
 +
 +[profile.dev.package.rustc_codegen_cranelift]
 +# Disabling optimizations for cg_clif itself makes compilation after a change faster.
 +opt-level = 0
 +
 +[profile.release.package.rustc_codegen_cranelift]
 +incremental = true
 +
 +# Disable optimizations and debuginfo of build scripts and some of the heavy build deps, as the
 +# execution time of build scripts is so fast that optimizing them slows down the total build time.
 +[profile.dev.build-override]
 +opt-level = 0
 +debug = false
 +
 +[profile.release.build-override]
 +opt-level = 0
 +debug = false
 +
 +[profile.dev.package.cranelift-codegen-meta]
 +opt-level = 0
 +debug = false
 +
 +[profile.release.package.cranelift-codegen-meta]
 +opt-level = 0
 +debug = false
 +
 +[package.metadata.rust-analyzer]
 +rustc_private = true
index 46f661107e73b43ab0b9a2713e51661a60a64708,0000000000000000000000000000000000000000..e068f084234bc734fc2e21c5e218a8089dc10d92
mode 100644,000000..100644
--- /dev/null
@@@ -1,327 -1,0 +1,318 @@@
- version = "1.0.68"
 +# This file is automatically @generated by Cargo.
 +# It is not intended for manual editing.
 +version = 3
 +
 +[[package]]
 +name = "addr2line"
 +version = "0.14.1"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "a55f82cfe485775d02112886f4169bde0c5894d75e79ead7eafe7e40a25e45f7"
 +dependencies = [
 + "compiler_builtins",
 + "gimli",
 + "rustc-std-workspace-alloc",
 + "rustc-std-workspace-core",
 +]
 +
 +[[package]]
 +name = "adler"
 +version = "1.0.2"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
 +dependencies = [
 + "compiler_builtins",
 + "rustc-std-workspace-core",
 +]
 +
 +[[package]]
 +name = "alloc"
 +version = "0.0.0"
 +dependencies = [
 + "compiler_builtins",
 + "core",
 +]
 +
 +[[package]]
 +name = "autocfg"
 +version = "1.0.1"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
 +
 +[[package]]
 +name = "cc"
- checksum = "4a72c244c1ff497a746a7e1fb3d14bd08420ecda70c8f25c7112f2781652d787"
++version = "1.0.69"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.2.97"
++checksum = "e70cc2f62c6ce1868963827bd677764c62d07c3d9a3e1fb1177ee1a9ab199eb2"
 +
 +[[package]]
 +name = "cfg-if"
 +version = "0.1.10"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
 +dependencies = [
 + "compiler_builtins",
 + "rustc-std-workspace-core",
 +]
 +
 +[[package]]
 +name = "compiler_builtins"
 +version = "0.1.46"
 +dependencies = [
 + "rustc-std-workspace-core",
 +]
 +
 +[[package]]
 +name = "core"
 +version = "0.0.0"
 +
 +[[package]]
 +name = "dlmalloc"
 +version = "0.2.1"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "332570860c2edf2d57914987bf9e24835425f75825086b6ba7d1e6a3e4f1f254"
 +dependencies = [
 + "compiler_builtins",
 + "libc",
 + "rustc-std-workspace-core",
 +]
 +
 +[[package]]
 +name = "fortanix-sgx-abi"
 +version = "0.3.3"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "c56c422ef86062869b2d57ae87270608dc5929969dd130a6e248979cf4fb6ca6"
 +dependencies = [
 + "compiler_builtins",
 + "rustc-std-workspace-core",
 +]
 +
 +[[package]]
 +name = "getopts"
 +version = "0.2.21"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "14dbbfd5c71d70241ecf9e6f13737f7b5ce823821063188d7e46c41d371eebd5"
 +dependencies = [
 + "rustc-std-workspace-core",
 + "rustc-std-workspace-std",
 + "unicode-width",
 +]
 +
 +[[package]]
 +name = "gimli"
 +version = "0.23.0"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "f6503fe142514ca4799d4c26297c4248239fe8838d827db6bd6065c6ed29a6ce"
 +dependencies = [
 + "compiler_builtins",
 + "rustc-std-workspace-alloc",
 + "rustc-std-workspace-core",
 +]
 +
 +[[package]]
 +name = "hashbrown"
 +version = "0.11.2"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e"
 +dependencies = [
 + "compiler_builtins",
 + "rustc-std-workspace-alloc",
 + "rustc-std-workspace-core",
 +]
 +
 +[[package]]
 +name = "hermit-abi"
 +version = "0.1.19"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33"
 +dependencies = [
 + "compiler_builtins",
 + "libc",
 + "rustc-std-workspace-core",
 +]
 +
 +[[package]]
 +name = "libc"
- checksum = "12b8adadd720df158f4d70dfe7ccc6adb0472d7c55ca83445f6a5ab3e36f8fb6"
++version = "0.2.98"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
- [[package]]
- name = "term"
- version = "0.0.0"
- dependencies = [
-  "core",
-  "std",
- ]
++checksum = "320cfe77175da3a483efed4bc0adc1968ca050b098ce4f2f1c13a56626128790"
 +dependencies = [
 + "rustc-std-workspace-core",
 +]
 +
 +[[package]]
 +name = "miniz_oxide"
 +version = "0.4.4"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b"
 +dependencies = [
 + "adler",
 + "autocfg",
 + "compiler_builtins",
 + "rustc-std-workspace-alloc",
 + "rustc-std-workspace-core",
 +]
 +
 +[[package]]
 +name = "object"
 +version = "0.22.0"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "8d3b63360ec3cb337817c2dbd47ab4a0f170d285d8e5a2064600f3def1402397"
 +dependencies = [
 + "compiler_builtins",
 + "rustc-std-workspace-alloc",
 + "rustc-std-workspace-core",
 +]
 +
 +[[package]]
 +name = "panic_abort"
 +version = "0.0.0"
 +dependencies = [
 + "alloc",
 + "cfg-if",
 + "compiler_builtins",
 + "core",
 + "libc",
 +]
 +
 +[[package]]
 +name = "panic_unwind"
 +version = "0.0.0"
 +dependencies = [
 + "alloc",
 + "cfg-if",
 + "compiler_builtins",
 + "core",
 + "libc",
 + "unwind",
 +]
 +
 +[[package]]
 +name = "proc_macro"
 +version = "0.0.0"
 +dependencies = [
 + "std",
 +]
 +
 +[[package]]
 +name = "rustc-demangle"
 +version = "0.1.20"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "dead70b0b5e03e9c814bcb6b01e03e68f7c57a80aa48c72ec92152ab3e818d49"
 +dependencies = [
 + "compiler_builtins",
 + "rustc-std-workspace-core",
 +]
 +
 +[[package]]
 +name = "rustc-std-workspace-alloc"
 +version = "1.99.0"
 +dependencies = [
 + "alloc",
 +]
 +
 +[[package]]
 +name = "rustc-std-workspace-core"
 +version = "1.99.0"
 +dependencies = [
 + "core",
 +]
 +
 +[[package]]
 +name = "rustc-std-workspace-std"
 +version = "1.99.0"
 +dependencies = [
 + "std",
 +]
 +
 +[[package]]
 +name = "std"
 +version = "0.0.0"
 +dependencies = [
 + "addr2line",
 + "alloc",
 + "cfg-if",
 + "compiler_builtins",
 + "core",
 + "dlmalloc",
 + "fortanix-sgx-abi",
 + "hashbrown",
 + "hermit-abi",
 + "libc",
 + "miniz_oxide",
 + "object",
 + "panic_abort",
 + "panic_unwind",
 + "rustc-demangle",
 + "std_detect",
 + "unwind",
 + "wasi",
 +]
 +
 +[[package]]
 +name = "std_detect"
 +version = "0.1.5"
 +dependencies = [
 + "cfg-if",
 + "compiler_builtins",
 + "libc",
 + "rustc-std-workspace-alloc",
 + "rustc-std-workspace-core",
 +]
 +
 +[[package]]
 +name = "sysroot"
 +version = "0.0.0"
 +dependencies = [
 + "alloc",
 + "compiler_builtins",
 + "core",
 + "std",
 + "test",
 +]
 +
-  "term",
 +[[package]]
 +name = "test"
 +version = "0.0.0"
 +dependencies = [
 + "cfg-if",
 + "core",
 + "getopts",
 + "libc",
 + "panic_abort",
 + "panic_unwind",
 + "proc_macro",
 + "std",
 +]
 +
 +[[package]]
 +name = "unicode-width"
 +version = "0.1.8"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3"
 +dependencies = [
 + "compiler_builtins",
 + "rustc-std-workspace-core",
 + "rustc-std-workspace-std",
 +]
 +
 +[[package]]
 +name = "unwind"
 +version = "0.0.0"
 +dependencies = [
 + "cc",
 + "cfg-if",
 + "compiler_builtins",
 + "core",
 + "libc",
 +]
 +
 +[[package]]
 +name = "wasi"
 +version = "0.9.0+wasi-snapshot-preview1"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519"
 +dependencies = [
 + "compiler_builtins",
 + "rustc-std-workspace-alloc",
 + "rustc-std-workspace-core",
 +]
index 1df2bcc4541ca1b9fdbad003df57ea3f6c67d265,0000000000000000000000000000000000000000..150b6d01a6b30858091fec5b0f6cdd1b07a20ad9
mode 100644,000000..100644
--- /dev/null
@@@ -1,40 -1,0 +1,40 @@@
-     cmd.arg("build").arg("--target").arg(host_triple);
 +use std::env;
 +use std::path::{Path, PathBuf};
 +use std::process::Command;
 +
 +pub(crate) fn build_backend(channel: &str, host_triple: &str) -> PathBuf {
 +    let mut cmd = Command::new("cargo");
++    cmd.arg("build").arg("--target").arg(host_triple).arg("--features").arg("unstable-features");
 +
 +    match channel {
 +        "debug" => {}
 +        "release" => {
 +            cmd.arg("--release");
 +        }
 +        _ => unreachable!(),
 +    }
 +
 +    if cfg!(unix) {
 +        if cfg!(target_os = "macos") {
 +            cmd.env(
 +                "RUSTFLAGS",
 +                "-Csplit-debuginfo=unpacked \
 +                -Clink-arg=-Wl,-rpath,@loader_path/../lib \
 +                -Zosx-rpath-install-name"
 +                    .to_string()
 +                    + env::var("RUSTFLAGS").as_deref().unwrap_or(""),
 +            );
 +        } else {
 +            cmd.env(
 +                "RUSTFLAGS",
 +                "-Clink-arg=-Wl,-rpath=$ORIGIN/../lib ".to_string()
 +                    + env::var("RUSTFLAGS").as_deref().unwrap_or(""),
 +            );
 +        }
 +    }
 +
 +    eprintln!("[BUILD] rustc_codegen_cranelift");
 +    crate::utils::spawn_and_wait(cmd);
 +
 +    Path::new("target").join(host_triple).join(channel)
 +}
index 9fb88c279613fb85627f262183fe23fe079553de,0000000000000000000000000000000000000000..642abc41f45a7ac2517458c22858ebd05a279a90
mode 100644,000000..100644
--- /dev/null
@@@ -1,216 -1,0 +1,218 @@@
-                 if file_name_str.contains("rustc_")
 +use std::env;
 +use std::fs;
 +use std::path::{Path, PathBuf};
 +use std::process::{self, Command};
 +
 +use crate::rustc_info::{get_file_name, get_rustc_version};
 +use crate::utils::{spawn_and_wait, try_hard_link};
 +use crate::SysrootKind;
 +
 +pub(crate) fn build_sysroot(
 +    channel: &str,
 +    sysroot_kind: SysrootKind,
 +    target_dir: &Path,
 +    cg_clif_build_dir: PathBuf,
 +    host_triple: &str,
 +    target_triple: &str,
 +) {
 +    if target_dir.exists() {
 +        fs::remove_dir_all(target_dir).unwrap();
 +    }
 +    fs::create_dir_all(target_dir.join("bin")).unwrap();
 +    fs::create_dir_all(target_dir.join("lib")).unwrap();
 +
 +    // Copy the backend
 +    for file in ["cg_clif", "cg_clif_build_sysroot"] {
 +        try_hard_link(
 +            cg_clif_build_dir.join(get_file_name(file, "bin")),
 +            target_dir.join("bin").join(get_file_name(file, "bin")),
 +        );
 +    }
 +
 +    let cg_clif_dylib = get_file_name("rustc_codegen_cranelift", "dylib");
 +    try_hard_link(
 +        cg_clif_build_dir.join(&cg_clif_dylib),
 +        target_dir
 +            .join(if cfg!(windows) {
 +                // Windows doesn't have rpath support, so the cg_clif dylib needs to be next to the
 +                // binaries.
 +                "bin"
 +            } else {
 +                "lib"
 +            })
 +            .join(cg_clif_dylib),
 +    );
 +
 +    // Build and copy cargo wrapper
 +    let mut build_cargo_wrapper_cmd = Command::new("rustc");
 +    build_cargo_wrapper_cmd
 +        .arg("scripts/cargo.rs")
 +        .arg("-o")
 +        .arg(target_dir.join("cargo"))
 +        .arg("-g");
 +    spawn_and_wait(build_cargo_wrapper_cmd);
 +
 +    let default_sysroot = crate::rustc_info::get_default_sysroot();
 +
 +    let rustlib = target_dir.join("lib").join("rustlib");
 +    let host_rustlib_lib = rustlib.join(host_triple).join("lib");
 +    let target_rustlib_lib = rustlib.join(target_triple).join("lib");
 +    fs::create_dir_all(&host_rustlib_lib).unwrap();
 +    fs::create_dir_all(&target_rustlib_lib).unwrap();
 +
 +    if target_triple == "x86_64-pc-windows-gnu" {
 +        if !default_sysroot.join("lib").join("rustlib").join(target_triple).join("lib").exists() {
 +            eprintln!(
 +                "The x86_64-pc-windows-gnu target needs to be installed first before it is possible \
 +                to compile a sysroot for it.",
 +            );
 +            process::exit(1);
 +        }
 +        for file in fs::read_dir(
 +            default_sysroot.join("lib").join("rustlib").join(target_triple).join("lib"),
 +        )
 +        .unwrap()
 +        {
 +            let file = file.unwrap().path();
 +            if file.extension().map_or(true, |ext| ext.to_str().unwrap() != "o") {
 +                continue; // only copy object files
 +            }
 +            try_hard_link(&file, target_rustlib_lib.join(file.file_name().unwrap()));
 +        }
 +    }
 +
 +    match sysroot_kind {
 +        SysrootKind::None => {} // Nothing to do
 +        SysrootKind::Llvm => {
 +            for file in fs::read_dir(
 +                default_sysroot.join("lib").join("rustlib").join(host_triple).join("lib"),
 +            )
 +            .unwrap()
 +            {
 +                let file = file.unwrap().path();
 +                let file_name_str = file.file_name().unwrap().to_str().unwrap();
++                if (file_name_str.contains("rustc_")
++                    && !file_name_str.contains("rustc_std_workspace_")
++                    && !file_name_str.contains("rustc_demangle"))
 +                    || file_name_str.contains("chalk")
 +                    || file_name_str.contains("tracing")
 +                    || file_name_str.contains("regex")
 +                {
 +                    // These are large crates that are part of the rustc-dev component and are not
 +                    // necessary to run regular programs.
 +                    continue;
 +                }
 +                try_hard_link(&file, host_rustlib_lib.join(file.file_name().unwrap()));
 +            }
 +
 +            if target_triple != host_triple {
 +                for file in fs::read_dir(
 +                    default_sysroot.join("lib").join("rustlib").join(target_triple).join("lib"),
 +                )
 +                .unwrap()
 +                {
 +                    let file = file.unwrap().path();
 +                    try_hard_link(&file, target_rustlib_lib.join(file.file_name().unwrap()));
 +                }
 +            }
 +        }
 +        SysrootKind::Clif => {
 +            build_clif_sysroot_for_triple(channel, target_dir, host_triple, None);
 +
 +            if host_triple != target_triple {
 +                // When cross-compiling it is often necessary to manually pick the right linker
 +                let linker = if target_triple == "aarch64-unknown-linux-gnu" {
 +                    Some("aarch64-linux-gnu-gcc")
 +                } else {
 +                    None
 +                };
 +                build_clif_sysroot_for_triple(channel, target_dir, target_triple, linker);
 +            }
 +
 +            // Copy std for the host to the lib dir. This is necessary for the jit mode to find
 +            // libstd.
 +            for file in fs::read_dir(host_rustlib_lib).unwrap() {
 +                let file = file.unwrap().path();
 +                if file.file_name().unwrap().to_str().unwrap().contains("std-") {
 +                    try_hard_link(&file, target_dir.join("lib").join(file.file_name().unwrap()));
 +                }
 +            }
 +        }
 +    }
 +}
 +
 +fn build_clif_sysroot_for_triple(
 +    channel: &str,
 +    target_dir: &Path,
 +    triple: &str,
 +    linker: Option<&str>,
 +) {
 +    match fs::read_to_string(Path::new("build_sysroot").join("rustc_version")) {
 +        Err(e) => {
 +            eprintln!("Failed to get rustc version for patched sysroot source: {}", e);
 +            eprintln!("Hint: Try `./y.rs prepare` to patch the sysroot source");
 +            process::exit(1);
 +        }
 +        Ok(source_version) => {
 +            let rustc_version = get_rustc_version();
 +            if source_version != rustc_version {
 +                eprintln!("The patched sysroot source is outdated");
 +                eprintln!("Source version: {}", source_version.trim());
 +                eprintln!("Rustc version:  {}", rustc_version.trim());
 +                eprintln!("Hint: Try `./y.rs prepare` to update the patched sysroot source");
 +                process::exit(1);
 +            }
 +        }
 +    }
 +
 +    let build_dir = Path::new("build_sysroot").join("target").join(triple).join(channel);
 +
 +    if !crate::config::get_bool("keep_sysroot") {
 +        // Cleanup the target dir with the exception of build scripts and the incremental cache
 +        for dir in ["build", "deps", "examples", "native"] {
 +            if build_dir.join(dir).exists() {
 +                fs::remove_dir_all(build_dir.join(dir)).unwrap();
 +            }
 +        }
 +    }
 +
 +    // Build sysroot
 +    let mut build_cmd = Command::new("cargo");
 +    build_cmd.arg("build").arg("--target").arg(triple).current_dir("build_sysroot");
 +    let mut rustflags = "--clif -Zforce-unstable-if-unmarked".to_string();
 +    if channel == "release" {
 +        build_cmd.arg("--release");
 +        rustflags.push_str(" -Zmir-opt-level=3");
 +    }
 +    if let Some(linker) = linker {
 +        use std::fmt::Write;
 +        write!(rustflags, " -Clinker={}", linker).unwrap();
 +    }
 +    build_cmd.env("RUSTFLAGS", rustflags);
 +    build_cmd.env(
 +        "RUSTC",
 +        env::current_dir().unwrap().join(target_dir).join("bin").join("cg_clif_build_sysroot"),
 +    );
 +    // FIXME Enable incremental again once rust-lang/rust#74946 is fixed
 +    build_cmd.env("CARGO_INCREMENTAL", "0").env("__CARGO_DEFAULT_LIB_METADATA", "cg_clif");
 +    spawn_and_wait(build_cmd);
 +
 +    // Copy all relevant files to the sysroot
 +    for entry in
 +        fs::read_dir(Path::new("build_sysroot/target").join(triple).join(channel).join("deps"))
 +            .unwrap()
 +    {
 +        let entry = entry.unwrap();
 +        if let Some(ext) = entry.path().extension() {
 +            if ext == "rmeta" || ext == "d" || ext == "dSYM" {
 +                continue;
 +            }
 +        } else {
 +            continue;
 +        };
 +        try_hard_link(
 +            entry.path(),
 +            target_dir.join("lib").join("rustlib").join(triple).join("lib").join(entry.file_name()),
 +        );
 +    }
 +}
index 401b8271abcc52e63f4b4c493a5ac2ec24a59066,0000000000000000000000000000000000000000..4b2051b605abdd726c99682639f95f5c8a7deeb3
mode 100644,000000..100644
--- /dev/null
@@@ -1,133 -1,0 +1,136 @@@
-     fs::write(
-         Path::new("build_sysroot").join("rustc_version"),
-         &rustc_version,
-     )
-     .unwrap();
 +use std::env;
 +use std::ffi::OsStr;
 +use std::ffi::OsString;
 +use std::fs;
 +use std::path::Path;
 +use std::process::Command;
 +
 +use crate::rustc_info::{get_file_name, get_rustc_path, get_rustc_version};
 +use crate::utils::{copy_dir_recursively, spawn_and_wait};
 +
 +pub(crate) fn prepare() {
 +    prepare_sysroot();
 +
 +    eprintln!("[INSTALL] hyperfine");
 +    Command::new("cargo").arg("install").arg("hyperfine").spawn().unwrap().wait().unwrap();
 +
 +    clone_repo(
 +        "rand",
 +        "https://github.com/rust-random/rand.git",
 +        "0f933f9c7176e53b2a3c7952ded484e1783f0bf1",
 +    );
 +    apply_patches("rand", Path::new("rand"));
 +
 +    clone_repo(
 +        "regex",
 +        "https://github.com/rust-lang/regex.git",
 +        "341f207c1071f7290e3f228c710817c280c8dca1",
 +    );
 +
++    clone_repo(
++        "stdsimd",
++        "https://github.com/rust-lang/stdsimd",
++        "be96995d8ddec03fac9a0caf4d4c51c7fbc33507",
++    );
++    apply_patches("stdsimd", Path::new("stdsimd"));
++
 +    clone_repo(
 +        "simple-raytracer",
 +        "https://github.com/ebobby/simple-raytracer",
 +        "804a7a21b9e673a482797aa289a18ed480e4d813",
 +    );
 +
 +    eprintln!("[LLVM BUILD] simple-raytracer");
 +    let mut build_cmd = Command::new("cargo");
 +    build_cmd.arg("build").env_remove("CARGO_TARGET_DIR").current_dir("simple-raytracer");
 +    spawn_and_wait(build_cmd);
 +    fs::copy(
 +        Path::new("simple-raytracer/target/debug").join(get_file_name("main", "bin")),
 +        // FIXME use get_file_name here too once testing is migrated to rust
 +        "simple-raytracer/raytracer_cg_llvm",
 +    )
 +    .unwrap();
 +}
 +
 +fn prepare_sysroot() {
 +    let rustc_path = get_rustc_path();
 +    let sysroot_src_orig = rustc_path.parent().unwrap().join("../lib/rustlib/src/rust");
 +    let sysroot_src = env::current_dir().unwrap().join("build_sysroot").join("sysroot_src");
 +
 +    assert!(sysroot_src_orig.exists());
 +
 +    if sysroot_src.exists() {
 +        fs::remove_dir_all(&sysroot_src).unwrap();
 +    }
 +    fs::create_dir_all(sysroot_src.join("library")).unwrap();
 +    eprintln!("[COPY] sysroot src");
 +    copy_dir_recursively(&sysroot_src_orig.join("library"), &sysroot_src.join("library"));
 +
 +    let rustc_version = get_rustc_version();
++    fs::write(Path::new("build_sysroot").join("rustc_version"), &rustc_version).unwrap();
 +
 +    eprintln!("[GIT] init");
 +    let mut git_init_cmd = Command::new("git");
 +    git_init_cmd.arg("init").arg("-q").current_dir(&sysroot_src);
 +    spawn_and_wait(git_init_cmd);
 +
 +    let mut git_add_cmd = Command::new("git");
 +    git_add_cmd.arg("add").arg(".").current_dir(&sysroot_src);
 +    spawn_and_wait(git_add_cmd);
 +
 +    let mut git_commit_cmd = Command::new("git");
 +    git_commit_cmd
 +        .arg("commit")
 +        .arg("-m")
 +        .arg("Initial commit")
 +        .arg("-q")
 +        .current_dir(&sysroot_src);
 +    spawn_and_wait(git_commit_cmd);
 +
 +    apply_patches("sysroot", &sysroot_src);
 +
 +    clone_repo(
 +        "build_sysroot/compiler-builtins",
 +        "https://github.com/rust-lang/compiler-builtins.git",
 +        "0.1.46",
 +    );
 +    apply_patches("compiler-builtins", Path::new("build_sysroot/compiler-builtins"));
 +}
 +
 +fn clone_repo(target_dir: &str, repo: &str, rev: &str) {
 +    eprintln!("[CLONE] {}", repo);
 +    // Ignore exit code as the repo may already have been checked out
 +    Command::new("git").arg("clone").arg(repo).arg(target_dir).spawn().unwrap().wait().unwrap();
 +
 +    let mut clean_cmd = Command::new("git");
 +    clean_cmd.arg("checkout").arg("--").arg(".").current_dir(target_dir);
 +    spawn_and_wait(clean_cmd);
 +
 +    let mut checkout_cmd = Command::new("git");
 +    checkout_cmd.arg("checkout").arg("-q").arg(rev).current_dir(target_dir);
 +    spawn_and_wait(checkout_cmd);
 +}
 +
 +fn get_patches(crate_name: &str) -> Vec<OsString> {
 +    let mut patches: Vec<_> = fs::read_dir("patches")
 +        .unwrap()
 +        .map(|entry| entry.unwrap().path())
 +        .filter(|path| path.extension() == Some(OsStr::new("patch")))
 +        .map(|path| path.file_name().unwrap().to_owned())
 +        .filter(|file_name| {
 +            file_name.to_str().unwrap().split_once("-").unwrap().1.starts_with(crate_name)
 +        })
 +        .collect();
 +    patches.sort();
 +    patches
 +}
 +
 +fn apply_patches(crate_name: &str, target_dir: &Path) {
 +    for patch in get_patches(crate_name) {
 +        eprintln!("[PATCH] {:?} <- {:?}", target_dir.file_name().unwrap(), patch);
 +        let patch_arg = env::current_dir().unwrap().join("patches").join(patch);
 +        let mut apply_patch_cmd = Command::new("git");
 +        apply_patch_cmd.arg("am").arg(patch_arg).arg("-q").current_dir(target_dir);
 +        spawn_and_wait(apply_patch_cmd);
 +    }
 +}
index f4f8c82d69f10814a052af9e45b7e79bfc61d301,0000000000000000000000000000000000000000..23e5bf2e0a8fd77fd08e877fe1a2d2baa29cecaf
mode 100755,000000..100755
--- /dev/null
@@@ -1,6 -1,0 +1,6 @@@
- rm -rf rand/ regex/ simple-raytracer/
 +#!/usr/bin/env bash
 +set -e
 +
 +rm -rf build_sysroot/{sysroot_src/,target/,compiler-builtins/,rustc_version}
 +rm -rf target/ build/ perf.data{,.old}
++rm -rf rand/ regex/ simple-raytracer/ stdsimd/
index 956d5905a97adfbf460676559a73b32728b06b00,0000000000000000000000000000000000000000..87eec0e818bb2407695c4fa6b82ff49eada037a1
mode 100644,000000..100644
--- /dev/null
@@@ -1,65 -1,0 +1,65 @@@
- $ $cg_clif_dir/build/bin/cg_clif -Cllvm-args=mode=jit -Cprefer-dynamic my_crate.rs
 +# Usage
 +
 +rustc_codegen_cranelift can be used as a near-drop-in replacement for `cargo build` or `cargo run` for existing projects.
 +
 +Assuming `$cg_clif_dir` is the directory you cloned this repo into and you followed the instructions (`y.rs prepare` and `y.rs build` or `test.sh`).
 +
 +## Cargo
 +
 +In the directory with your project (where you can do the usual `cargo build`), run:
 +
 +```bash
 +$ $cg_clif_dir/build/cargo build
 +```
 +
 +This will build your project with rustc_codegen_cranelift instead of the usual LLVM backend.
 +
 +## Rustc
 +
 +> You should prefer using the Cargo method.
 +
 +```bash
 +$ $cg_clif_dir/build/bin/cg_clif my_crate.rs
 +```
 +
 +## Jit mode
 +
 +In jit mode cg_clif will immediately execute your code without creating an executable file.
 +
 +> This requires all dependencies to be available as dynamic library.
 +> The jit mode will probably need cargo integration to make this possible.
 +
 +```bash
 +$ $cg_clif_dir/build/cargo jit
 +```
 +
 +or
 +
 +```bash
-     echo "$@" | $cg_clif_dir/build/bin/cg_clif - -Cllvm-args=mode=jit -Cprefer-dynamic
++$ $cg_clif_dir/build/bin/cg_clif -Zunstable-features -Cllvm-args=mode=jit -Cprefer-dynamic my_crate.rs
 +```
 +
 +There is also an experimental lazy jit mode. In this mode functions are only compiled once they are
 +first called.
 +
 +```bash
 +$ $cg_clif_dir/build/cargo lazy-jit
 +```
 +
 +## Shell
 +
 +These are a few functions that allow you to easily run rust code from the shell using cg_clif as jit.
 +
 +```bash
 +function jit_naked() {
++    echo "$@" | $cg_clif_dir/build/bin/cg_clif - -Zunstable-features -Cllvm-args=mode=jit -Cprefer-dynamic
 +}
 +
 +function jit() {
 +    jit_naked "fn main() { $@ }"
 +}
 +
 +function jit_calc() {
 +    jit 'println!("0x{:x}", ' $@ ');';
 +}
 +```
index 0000000000000000000000000000000000000000,0000000000000000000000000000000000000000..731c60fda58d675de31323addb9f71520b51683b
new file mode 100644 (file)
--- /dev/null
--- /dev/null
@@@ -1,0 -1,0 +1,165 @@@
++From 6bfce5dc2cbf834c74dbccb7538adc08c6eb57e7 Mon Sep 17 00:00:00 2001
++From: bjorn3 <bjorn3@users.noreply.github.com>
++Date: Sun, 25 Jul 2021 18:39:31 +0200
++Subject: [PATCH] Disable unsupported tests
++
++---
++ crates/core_simd/src/array.rs        |  2 ++
++ crates/core_simd/src/lib.rs          |  2 +-
++ crates/core_simd/src/math.rs         |  4 ++++
++ crates/core_simd/tests/masks.rs      | 12 ------------
++ crates/core_simd/tests/ops_macros.rs |  6 ++++++
++ crates/core_simd/tests/round.rs      |  2 ++
++ 6 files changed, 15 insertions(+), 13 deletions(-)
++
++diff --git a/crates/core_simd/src/array.rs b/crates/core_simd/src/array.rs
++index 25c5309..2b3d819 100644
++--- a/crates/core_simd/src/array.rs
+++++ b/crates/core_simd/src/array.rs
++@@ -22,6 +22,7 @@ where
++     #[must_use]
++     fn splat(val: Self::Scalar) -> Self;
++ 
+++    /*
++     /// SIMD gather: construct a SIMD vector by reading from a slice, using potentially discontiguous indices.
++     /// If an index is out of bounds, that lane instead selects the value from the "or" vector.
++     /// ```
++@@ -150,6 +151,7 @@ where
++             // Cleared ☢️ *mut T Zone
++         }
++     }
+++    */
++ }
++ 
++ macro_rules! impl_simdarray_for {
++diff --git a/crates/core_simd/src/lib.rs b/crates/core_simd/src/lib.rs
++index a64904d..299eb11 100644
++--- a/crates/core_simd/src/lib.rs
+++++ b/crates/core_simd/src/lib.rs
++@@ -1,7 +1,7 @@
++ #![no_std]
++ #![allow(incomplete_features)]
++ #![feature(
++-    const_generics, 
+++    const_generics,
++     platform_intrinsics,
++     repr_simd,
++     simd_ffi,
++diff --git a/crates/core_simd/src/math.rs b/crates/core_simd/src/math.rs
++index 7290a28..e394730 100644
++--- a/crates/core_simd/src/math.rs
+++++ b/crates/core_simd/src/math.rs
++@@ -2,6 +2,7 @@ macro_rules! impl_uint_arith {
++     ($(($name:ident, $n:ident)),+) => {
++         $( impl<const LANES: usize> $name<LANES> where Self: crate::LanesAtMost32 {
++ 
+++            /*
++             /// Lanewise saturating add.
++             ///
++             /// # Examples
++@@ -38,6 +39,7 @@ macro_rules! impl_uint_arith {
++             pub fn saturating_sub(self, second: Self) -> Self {
++                 unsafe { crate::intrinsics::simd_saturating_sub(self, second) }
++             }
+++            */
++         })+
++     }
++ }
++@@ -46,6 +48,7 @@ macro_rules! impl_int_arith {
++     ($(($name:ident, $n:ident)),+) => {
++         $( impl<const LANES: usize> $name<LANES> where Self: crate::LanesAtMost32 {
++ 
+++            /*
++             /// Lanewise saturating add.
++             ///
++             /// # Examples
++@@ -141,6 +144,7 @@ macro_rules! impl_int_arith {
++             pub fn saturating_neg(self) -> Self {
++                 Self::splat(0).saturating_sub(self)
++             }
+++            */
++         })+
++     }
++ }
++diff --git a/crates/core_simd/tests/masks.rs b/crates/core_simd/tests/masks.rs
++index 61d8e44..2bccae2 100644
++--- a/crates/core_simd/tests/masks.rs
+++++ b/crates/core_simd/tests/masks.rs
++@@ -67,18 +67,6 @@ macro_rules! test_mask_api {
++                 assert_eq!(int.to_array(), [-1, 0, 0, -1, 0, 0, -1, 0]);
++                 assert_eq!(core_simd::$name::<8>::from_int(int), mask);
++             }
++-
++-            #[test]
++-            fn roundtrip_bitmask_conversion() {
++-                let values = [
++-                    true, false, false, true, false, false, true, false,
++-                    true, true, false, false, false, false, false, true,
++-                ];
++-                let mask = core_simd::$name::<16>::from_array(values);
++-                let bitmask = mask.to_bitmask();
++-                assert_eq!(bitmask, [0b01001001, 0b10000011]);
++-                assert_eq!(core_simd::$name::<16>::from_bitmask(bitmask), mask);
++-            }
++         }
++     }
++ }
++diff --git a/crates/core_simd/tests/ops_macros.rs b/crates/core_simd/tests/ops_macros.rs
++index cb39e73..fc0ebe1 100644
++--- a/crates/core_simd/tests/ops_macros.rs
+++++ b/crates/core_simd/tests/ops_macros.rs
++@@ -435,6 +435,7 @@ macro_rules! impl_float_tests {
++                     )
++                 }
++ 
+++                /*
++                 fn mul_add<const LANES: usize>() {
++                     test_helpers::test_ternary_elementwise(
++                         &Vector::<LANES>::mul_add,
++@@ -442,6 +443,7 @@ macro_rules! impl_float_tests {
++                         &|_, _, _| true,
++                     )
++                 }
+++                */
++ 
++                 fn sqrt<const LANES: usize>() {
++                     test_helpers::test_unary_elementwise(
++@@ -581,6 +585,7 @@ macro_rules! impl_float_tests {
++                     });
++                 }
++ 
+++                /*
++                 fn horizontal_max<const LANES: usize>() {
++                     test_helpers::test_1(&|x| {
++                         let vmax = Vector::<LANES>::from_array(x).horizontal_max();
++@@ -604,6 +609,7 @@ macro_rules! impl_float_tests {
++                         Ok(())
++                     });
++                 }
+++                */
++             }
++         }
++     }
++diff --git a/crates/core_simd/tests/round.rs b/crates/core_simd/tests/round.rs
++index 37044a7..4cdc6b7 100644
++--- a/crates/core_simd/tests/round.rs
+++++ b/crates/core_simd/tests/round.rs
++@@ -25,6 +25,7 @@ macro_rules! float_rounding_test {
++                     )
++                 }
++ 
+++                /*
++                 fn round<const LANES: usize>() {
++                     test_helpers::test_unary_elementwise(
++                         &Vector::<LANES>::round,
++@@ -32,6 +33,7 @@ macro_rules! float_rounding_test {
++                         &|_| true,
++                     )
++                 }
+++                */
++ 
++                 fn trunc<const LANES: usize>() {
++                     test_helpers::test_unary_elementwise(
++-- 
++2.26.2.7.g19db9cfb68
++
index ba0eaacd82870fd0a12952989c55b70317ef3b3a,0000000000000000000000000000000000000000..25a315f666e27c0906bdcec9e69c25338c2dffcf
mode 100644,000000..100644
--- /dev/null
@@@ -1,83 -1,0 +1,83 @@@
- @@ -289,6 +290,7 @@ fn write_unaligned_drop() {
-      }
-      DROPS.with(|d| assert_eq!(*d.borrow(), [0]));
 +From f6befc4bb51d84f5f1cf35938a168c953d421350 Mon Sep 17 00:00:00 2001
 +From: bjorn3 <bjorn3@users.noreply.github.com>
 +Date: Sun, 24 Nov 2019 15:10:23 +0100
 +Subject: [PATCH] [core] Disable not compiling tests
 +
 +---
 + library/core/tests/Cargo.toml         | 8 ++++++++
 + library/core/tests/num/flt2dec/mod.rs | 1 -
 + library/core/tests/num/int_macros.rs  | 2 ++
 + library/core/tests/num/uint_macros.rs | 2 ++
 + library/core/tests/ptr.rs             | 2 ++
 + library/core/tests/slice.rs           | 2 ++
 + 6 files changed, 16 insertions(+), 1 deletion(-)
 + create mode 100644 library/core/tests/Cargo.toml
 +
 +diff --git a/library/core/tests/Cargo.toml b/library/core/tests/Cargo.toml
 +new file mode 100644
 +index 0000000..46fd999
 +--- /dev/null
 ++++ b/library/core/tests/Cargo.toml
 +@@ -0,0 +1,8 @@
 ++[package]
 ++name = "core"
 ++version = "0.0.0"
 ++edition = "2018"
 ++
 ++[lib]
 ++name = "coretests"
 ++path = "lib.rs"
 +diff --git a/library/core/tests/num/flt2dec/mod.rs b/library/core/tests/num/flt2dec/mod.rs
 +index a35897e..f0bf645 100644
 +--- a/library/core/tests/num/flt2dec/mod.rs
 ++++ b/library/core/tests/num/flt2dec/mod.rs
 +@@ -13,7 +13,6 @@ mod strategy {
 +     mod dragon;
 +     mod grisu;
 + }
 +-mod random;
 + 
 + pub fn decode_finite<T: DecodableFloat>(v: T) -> Decoded {
 +     match decode(v).1 {
 +diff --git a/library/core/tests/ptr.rs b/library/core/tests/ptr.rs
 +index 1a6be3a..42dbd59 100644
 +--- a/library/core/tests/ptr.rs
 ++++ b/library/core/tests/ptr.rs
 +@@ -250,6 +250,7 @@ fn test_unsized_nonnull() {
 +     assert!(ys == zs);
 + }
 + 
 ++/*
 + #[test]
 + #[allow(warnings)]
 + // Have a symbol for the test below. It doesn’t need to be an actual variadic function, match the
-  fn align_offset_zst() {
++@@ -277,6 +277,7 @@ pub fn test_variadic_fnptr() {
++     let mut s = SipHasher::new();
++     assert_eq!(p.hash(&mut s), q.hash(&mut s));
 + }
 ++*/
 + 
 + #[test]
++ fn write_unaligned_drop() {
 +diff --git a/library/core/tests/slice.rs b/library/core/tests/slice.rs
 +index 6609bc3..241b497 100644
 +--- a/library/core/tests/slice.rs
 ++++ b/library/core/tests/slice.rs
 +@@ -1209,6 +1209,7 @@ fn brute_force_rotate_test_1() {
 +     }
 + }
 + 
 ++/*
 + #[test]
 + #[cfg(not(target_arch = "wasm32"))]
 + fn sort_unstable() {
 +@@ -1394,6 +1395,7 @@ fn partition_at_index() {
 +     v.select_nth_unstable(0);
 +     assert!(v == [0xDEADBEEF]);
 + }
 ++*/
 + 
 + #[test]
 + #[should_panic(expected = "index 0 greater than length of slice")]
 +--
 +2.21.0 (Apple Git-122)
index 5d2c3049f60ebfb03d44e5885d14390d1e0371d2,0000000000000000000000000000000000000000..50ef0bd9418c74f8a760ca8b80629edc223be08c
mode 100644,000000..100644
--- /dev/null
@@@ -1,90 -1,0 +1,50 @@@
- diff --git a/library/core/tests/num/mod.rs b/library/core/tests/num/mod.rs
- index a17c094..5bb11d2 100644
- --- a/library/core/tests/num/mod.rs
- +++ b/library/core/tests/num/mod.rs
- @@ -651,11 +651,12 @@ macro_rules! test_float {
-                  assert_eq!((9.0 as $fty).min($neginf), $neginf);
-                  assert_eq!(($neginf as $fty).min(-9.0), $neginf);
-                  assert_eq!((-9.0 as $fty).min($neginf), $neginf);
- -                assert_eq!(($nan as $fty).min(9.0), 9.0);
- -                assert_eq!(($nan as $fty).min(-9.0), -9.0);
- -                assert_eq!((9.0 as $fty).min($nan), 9.0);
- -                assert_eq!((-9.0 as $fty).min($nan), -9.0);
- -                assert!(($nan as $fty).min($nan).is_nan());
- +                // Cranelift fmin has NaN propagation
- +                //assert_eq!(($nan as $fty).min(9.0), 9.0);
- +                //assert_eq!(($nan as $fty).min(-9.0), -9.0);
- +                //assert_eq!((9.0 as $fty).min($nan), 9.0);
- +                //assert_eq!((-9.0 as $fty).min($nan), -9.0);
- +                //assert!(($nan as $fty).min($nan).is_nan());
-              }
-              #[test]
-              fn max() {
- @@ -673,11 +674,12 @@ macro_rules! test_float {
-                  assert_eq!((9.0 as $fty).max($neginf), 9.0);
-                  assert_eq!(($neginf as $fty).max(-9.0), -9.0);
-                  assert_eq!((-9.0 as $fty).max($neginf), -9.0);
- -                assert_eq!(($nan as $fty).max(9.0), 9.0);
- -                assert_eq!(($nan as $fty).max(-9.0), -9.0);
- -                assert_eq!((9.0 as $fty).max($nan), 9.0);
- -                assert_eq!((-9.0 as $fty).max($nan), -9.0);
- -                assert!(($nan as $fty).max($nan).is_nan());
- +                // Cranelift fmax has NaN propagation
- +                //assert_eq!(($nan as $fty).max(9.0), 9.0);
- +                //assert_eq!(($nan as $fty).max(-9.0), -9.0);
- +                //assert_eq!((9.0 as $fty).max($nan), 9.0);
- +                //assert_eq!((-9.0 as $fty).max($nan), -9.0);
- +                //assert!(($nan as $fty).max($nan).is_nan());
-              }
-              #[test]
-              fn rem_euclid() {
 +From dd82e95c9de212524e14fc60155de1ae40156dfc Mon Sep 17 00:00:00 2001
 +From: bjorn3 <bjorn3@users.noreply.github.com>
 +Date: Sun, 24 Nov 2019 15:34:06 +0100
 +Subject: [PATCH] [core] Ignore failing tests
 +
 +---
 + library/core/tests/iter.rs       |  4 ++++
 + library/core/tests/num/bignum.rs | 10 ++++++++++
 + library/core/tests/num/mod.rs    |  5 +++--
 + library/core/tests/time.rs       |  1 +
 + 4 files changed, 18 insertions(+), 2 deletions(-)
 +
 +diff --git a/library/core/tests/array.rs b/library/core/tests/array.rs
 +index 4bc44e9..8e3c7a4 100644
 +--- a/library/core/tests/array.rs
 ++++ b/library/core/tests/array.rs
 +@@ -242,6 +242,7 @@ fn iterator_drops() {
 +     assert_eq!(i.get(), 5);
 + }
 + 
 ++/*
 + // This test does not work on targets without panic=unwind support.
 + // To work around this problem, test is marked is should_panic, so it will
 + // be automagically skipped on unsuitable targets, such as
 +@@ -283,6 +284,7 @@ fn array_default_impl_avoids_leaks_on_panic() {
 +     assert_eq!(COUNTER.load(Relaxed), 0);
 +     panic!("test succeeded")
 + }
 ++*/
 + 
 + #[test]
 + fn empty_array_is_always_default() {
 +@@ -304,6 +304,7 @@ fn array_map() {
 +     assert_eq!(b, [1, 2, 3]);
 + }
 + 
 ++/*
 + // See note on above test for why `should_panic` is used.
 + #[test]
 + #[should_panic(expected = "test succeeded")]
 +@@ -332,6 +333,7 @@ fn array_map_drop_safety() {
 +     assert_eq!(DROPPED.load(Ordering::SeqCst), num_to_create);
 +     panic!("test succeeded")
 + }
 ++*/
 + 
 + #[test]
 + fn cell_allows_array_cycle() {
 +-- 
 +2.21.0 (Apple Git-122)
index 32e5930969061f0231ecc6e89c72eb77be7002cb,0000000000000000000000000000000000000000..cda8153083c337fd004cf2e3d5c0ac631156fd19
mode 100644,000000..100644
--- /dev/null
@@@ -1,103 -1,0 +1,103 @@@
- From 894e07dfec2624ba539129b1c1d63e1d7d812bda Mon Sep 17 00:00:00 2001
++From 6a4e6f5dc8c8a529a822eb9b57f9e57519595439 Mon Sep 17 00:00:00 2001
 +From: bjorn3 <bjorn3@users.noreply.github.com>
 +Date: Thu, 18 Feb 2021 18:45:28 +0100
 +Subject: [PATCH] Disable 128bit atomic operations
 +
 +Cranelift doesn't support them yet
 +---
-  library/core/src/sync/atomic.rs | 38 ---------------------------------
-  library/core/tests/atomic.rs    |  4 ----
-  library/std/src/panic.rs        |  6 ------
++ library/core/src/panic/unwind_safe.rs |  6 -----
++ library/core/src/sync/atomic.rs       | 38 ---------------------------
++ library/core/tests/atomic.rs          |  4 ---
 + 3 files changed, 48 deletions(-)
 +
++diff --git a/library/core/src/panic/unwind_safe.rs b/library/core/src/panic/unwind_safe.rs
++index 092b7cf..158cf71 100644
++--- a/library/core/src/panic/unwind_safe.rs
+++++ b/library/core/src/panic/unwind_safe.rs
++@@ -216,9 +216,6 @@ impl RefUnwindSafe for crate::sync::atomic::AtomicI32 {}
++ #[cfg(target_has_atomic_load_store = "64")]
++ #[stable(feature = "integer_atomics_stable", since = "1.34.0")]
++ impl RefUnwindSafe for crate::sync::atomic::AtomicI64 {}
++-#[cfg(target_has_atomic_load_store = "128")]
++-#[unstable(feature = "integer_atomics", issue = "32976")]
++-impl RefUnwindSafe for crate::sync::atomic::AtomicI128 {}
++ 
++ #[cfg(target_has_atomic_load_store = "ptr")]
++ #[stable(feature = "unwind_safe_atomic_refs", since = "1.14.0")]
++@@ -235,9 +232,6 @@ impl RefUnwindSafe for crate::sync::atomic::AtomicU32 {}
++ #[cfg(target_has_atomic_load_store = "64")]
++ #[stable(feature = "integer_atomics_stable", since = "1.34.0")]
++ impl RefUnwindSafe for crate::sync::atomic::AtomicU64 {}
++-#[cfg(target_has_atomic_load_store = "128")]
++-#[unstable(feature = "integer_atomics", issue = "32976")]
++-impl RefUnwindSafe for crate::sync::atomic::AtomicU128 {}
++ 
++ #[cfg(target_has_atomic_load_store = "8")]
++ #[stable(feature = "unwind_safe_atomic_refs", since = "1.14.0")]
 +diff --git a/library/core/src/sync/atomic.rs b/library/core/src/sync/atomic.rs
- index 81c9e1d..65c9503 100644
++index 0194c58..25a0038 100644
 +--- a/library/core/src/sync/atomic.rs
 ++++ b/library/core/src/sync/atomic.rs
- @@ -2228,44 +2228,6 @@ atomic_int! {
++@@ -2229,44 +2229,6 @@ atomic_int! {
 +     "AtomicU64::new(0)",
 +     u64 AtomicU64 ATOMIC_U64_INIT
 + }
 +-#[cfg(target_has_atomic_load_store = "128")]
 +-atomic_int! {
 +-    cfg(target_has_atomic = "128"),
 +-    cfg(target_has_atomic_equal_alignment = "128"),
 +-    unstable(feature = "integer_atomics", issue = "32976"),
 +-    unstable(feature = "integer_atomics", issue = "32976"),
 +-    unstable(feature = "integer_atomics", issue = "32976"),
 +-    unstable(feature = "integer_atomics", issue = "32976"),
 +-    unstable(feature = "integer_atomics", issue = "32976"),
 +-    unstable(feature = "integer_atomics", issue = "32976"),
 +-    rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
 +-    unstable(feature = "integer_atomics", issue = "32976"),
 +-    "i128",
 +-    "#![feature(integer_atomics)]\n\n",
 +-    atomic_min, atomic_max,
 +-    16,
 +-    "AtomicI128::new(0)",
 +-    i128 AtomicI128 ATOMIC_I128_INIT
 +-}
 +-#[cfg(target_has_atomic_load_store = "128")]
 +-atomic_int! {
 +-    cfg(target_has_atomic = "128"),
 +-    cfg(target_has_atomic_equal_alignment = "128"),
 +-    unstable(feature = "integer_atomics", issue = "32976"),
 +-    unstable(feature = "integer_atomics", issue = "32976"),
 +-    unstable(feature = "integer_atomics", issue = "32976"),
 +-    unstable(feature = "integer_atomics", issue = "32976"),
 +-    unstable(feature = "integer_atomics", issue = "32976"),
 +-    unstable(feature = "integer_atomics", issue = "32976"),
 +-    rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
 +-    unstable(feature = "integer_atomics", issue = "32976"),
 +-    "u128",
 +-    "#![feature(integer_atomics)]\n\n",
 +-    atomic_umin, atomic_umax,
 +-    16,
 +-    "AtomicU128::new(0)",
 +-    u128 AtomicU128 ATOMIC_U128_INIT
 +-}
 + 
 + macro_rules! atomic_int_ptr_sized {
 +     ( $($target_pointer_width:literal $align:literal)* ) => { $(
 +diff --git a/library/core/tests/atomic.rs b/library/core/tests/atomic.rs
- index 2d1e449..cb6da5d 100644
++index b735957..ea728b6 100644
 +--- a/library/core/tests/atomic.rs
 ++++ b/library/core/tests/atomic.rs
- @@ -145,10 +145,6 @@ fn atomic_alignment() {
++@@ -185,10 +185,6 @@ fn atomic_alignment() {
 +     assert_eq!(align_of::<AtomicU64>(), size_of::<AtomicU64>());
 +     #[cfg(target_has_atomic = "64")]
 +     assert_eq!(align_of::<AtomicI64>(), size_of::<AtomicI64>());
 +-    #[cfg(target_has_atomic = "128")]
 +-    assert_eq!(align_of::<AtomicU128>(), size_of::<AtomicU128>());
 +-    #[cfg(target_has_atomic = "128")]
 +-    assert_eq!(align_of::<AtomicI128>(), size_of::<AtomicI128>());
 +     #[cfg(target_has_atomic = "ptr")]
 +     assert_eq!(align_of::<AtomicUsize>(), size_of::<AtomicUsize>());
 +     #[cfg(target_has_atomic = "ptr")]
- diff --git a/library/std/src/panic.rs b/library/std/src/panic.rs
- index 89a822a..779fd88 100644
- --- a/library/std/src/panic.rs
- +++ b/library/std/src/panic.rs
- @@ -279,9 +279,6 @@ impl RefUnwindSafe for atomic::AtomicI32 {}
-  #[cfg(target_has_atomic_load_store = "64")]
-  #[stable(feature = "integer_atomics_stable", since = "1.34.0")]
-  impl RefUnwindSafe for atomic::AtomicI64 {}
- -#[cfg(target_has_atomic_load_store = "128")]
- -#[unstable(feature = "integer_atomics", issue = "32976")]
- -impl RefUnwindSafe for atomic::AtomicI128 {}
-  
-  #[cfg(target_has_atomic_load_store = "ptr")]
-  #[stable(feature = "unwind_safe_atomic_refs", since = "1.14.0")]
- @@ -298,9 +295,6 @@ impl RefUnwindSafe for atomic::AtomicU32 {}
-  #[cfg(target_has_atomic_load_store = "64")]
-  #[stable(feature = "integer_atomics_stable", since = "1.34.0")]
-  impl RefUnwindSafe for atomic::AtomicU64 {}
- -#[cfg(target_has_atomic_load_store = "128")]
- -#[unstable(feature = "integer_atomics", issue = "32976")]
- -impl RefUnwindSafe for atomic::AtomicU128 {}
-  
-  #[cfg(target_has_atomic_load_store = "8")]
-  #[stable(feature = "unwind_safe_atomic_refs", since = "1.14.0")]
 +-- 
 +2.26.2.7.g19db9cfb68
 +
index f806f7bdcd98a72e2c858d5b4f915be04e396bd1,0000000000000000000000000000000000000000..f074ebe7a42e0dabf88c234aadb2ebcf9896ef64
mode 100644,000000..100644
--- /dev/null
@@@ -1,3 -1,0 +1,3 @@@
- channel = "nightly-2021-07-07"
 +[toolchain]
++channel = "nightly-2021-08-05"
 +components = ["rust-src", "rustc-dev", "llvm-tools-preview"]
index b7e8dd44974794294ac0621138ac6bdf95c1ee56,0000000000000000000000000000000000000000..89ec8da77d3ec5f6a1ade1ba67fe7aeb7500bac6
mode 100644,000000..100644
--- /dev/null
@@@ -1,70 -1,0 +1,78 @@@
-                 .chain(["--".to_string(), "-Cllvm-args=mode=jit".to_string()])
 +use std::env;
 +#[cfg(unix)]
 +use std::os::unix::process::CommandExt;
 +use std::path::PathBuf;
 +use std::process::Command;
 +
 +fn main() {
 +    if env::var("RUSTC_WRAPPER").map_or(false, |wrapper| wrapper.contains("sccache")) {
 +        eprintln!(
 +            "\x1b[1;93m=== Warning: Unsetting RUSTC_WRAPPER to prevent interference with sccache ===\x1b[0m"
 +        );
 +        env::remove_var("RUSTC_WRAPPER");
 +    }
 +
 +    let sysroot = PathBuf::from(env::current_exe().unwrap().parent().unwrap());
 +
 +    env::set_var("RUSTC", sysroot.join("bin/cg_clif".to_string() + env::consts::EXE_SUFFIX));
 +
 +    let mut rustdoc_flags = env::var("RUSTDOCFLAGS").unwrap_or(String::new());
 +    rustdoc_flags.push_str(" -Cpanic=abort -Zpanic-abort-tests -Zcodegen-backend=");
 +    rustdoc_flags.push_str(
 +        sysroot
 +            .join(if cfg!(windows) { "bin" } else { "lib" })
 +            .join(
 +                env::consts::DLL_PREFIX.to_string()
 +                    + "rustc_codegen_cranelift"
 +                    + env::consts::DLL_SUFFIX,
 +            )
 +            .to_str()
 +            .unwrap(),
 +    );
 +    rustdoc_flags.push_str(" --sysroot ");
 +    rustdoc_flags.push_str(sysroot.to_str().unwrap());
 +    env::set_var("RUSTDOCFLAGS", rustdoc_flags);
 +
 +    // Ensure that the right toolchain is used
 +    env::set_var("RUSTUP_TOOLCHAIN", env!("RUSTUP_TOOLCHAIN"));
 +
 +    let args: Vec<_> = match env::args().nth(1).as_deref() {
 +        Some("jit") => {
 +            env::set_var(
 +                "RUSTFLAGS",
 +                env::var("RUSTFLAGS").unwrap_or(String::new()) + " -Cprefer-dynamic",
 +            );
 +            std::array::IntoIter::new(["rustc".to_string()])
 +                .chain(env::args().skip(2))
-                 .chain(["--".to_string(), "-Cllvm-args=mode=jit-lazy".to_string()])
++                .chain([
++                    "--".to_string(),
++                    "-Zunstable-features".to_string(),
++                    "-Cllvm-args=mode=jit".to_string(),
++                ])
 +                .collect()
 +        }
 +        Some("lazy-jit") => {
 +            env::set_var(
 +                "RUSTFLAGS",
 +                env::var("RUSTFLAGS").unwrap_or(String::new()) + " -Cprefer-dynamic",
 +            );
 +            std::array::IntoIter::new(["rustc".to_string()])
 +                .chain(env::args().skip(2))
++                .chain([
++                    "--".to_string(),
++                    "-Zunstable-features".to_string(),
++                    "-Cllvm-args=mode=jit-lazy".to_string(),
++                ])
 +                .collect()
 +        }
 +        _ => env::args().skip(1).collect(),
 +    };
 +
 +    #[cfg(unix)]
 +    Command::new("cargo").args(args).exec();
 +
 +    #[cfg(not(unix))]
 +    std::process::exit(
 +        Command::new("cargo").args(args).spawn().unwrap().wait().unwrap().code().unwrap_or(1),
 +    );
 +}
index 9e196afbe4f57c38b36576fb9621f4cee56373b5,0000000000000000000000000000000000000000..c4801a0a87b886feef8df77275caf0113f7c8f72
mode 100755,000000..100755
--- /dev/null
@@@ -1,126 -1,0 +1,126 @@@
- PROFILE=$1 OUTPUT=$2 exec $RUSTC -Cllvm-args=mode=jit -Cprefer-dynamic $0
 +#!/bin/bash
 +#![forbid(unsafe_code)]/* This line is ignored by bash
 +# This block is ignored by rustc
 +pushd $(dirname "$0")/../
 +source scripts/config.sh
 +RUSTC="$(pwd)/build/bin/cg_clif"
 +popd
++PROFILE=$1 OUTPUT=$2 exec $RUSTC -Zunstable-options -Cllvm-args=mode=jit -Cprefer-dynamic $0
 +#*/
 +
 +//! This program filters away uninteresting samples and trims uninteresting frames for stackcollapse
 +//! profiles.
 +//!
 +//! Usage: ./filter_profile.rs <profile in stackcollapse format> <output file>
 +//!
 +//! This file is specially crafted to be both a valid bash script and valid rust source file. If
 +//! executed as bash script this will run the rust source using cg_clif in JIT mode.
 +
 +use std::io::Write;
 +
 +fn main() -> Result<(), Box<dyn std::error::Error>> {
 +    let profile_name = std::env::var("PROFILE").unwrap();
 +    let output_name = std::env::var("OUTPUT").unwrap();
 +    if profile_name.is_empty() || output_name.is_empty() {
 +        println!("Usage: ./filter_profile.rs <profile in stackcollapse format> <output file>");
 +        std::process::exit(1);
 +    }
 +    let profile = std::fs::read_to_string(profile_name)
 +        .map_err(|err| format!("Failed to read profile {}", err))?;
 +    let mut output = std::fs::OpenOptions::new()
 +        .create(true)
 +        .write(true)
 +        .truncate(true)
 +        .open(output_name)?;
 +
 +    for line in profile.lines() {
 +        let mut stack = &line[..line.rfind(" ").unwrap()];
 +        let count = &line[line.rfind(" ").unwrap() + 1..];
 +
 +        // Filter away uninteresting samples
 +        if !stack.contains("rustc_codegen_cranelift") {
 +            continue;
 +        }
 +
 +        if stack.contains("rustc_mir::monomorphize::partitioning::collect_and_partition_mono_items")
 +            || stack.contains("rustc_incremental::assert_dep_graph::assert_dep_graph")
 +            || stack.contains("rustc_symbol_mangling::test::report_symbol_names")
 +        {
 +            continue;
 +        }
 +
 +        // Trim start
 +        if let Some(index) = stack.find("rustc_interface::passes::configure_and_expand") {
 +            stack = &stack[index..];
 +        } else if let Some(index) = stack.find("rustc_interface::passes::analysis") {
 +            stack = &stack[index..];
 +        } else if let Some(index) = stack.find("rustc_interface::passes::start_codegen") {
 +            stack = &stack[index..];
 +        } else if let Some(index) = stack.find("rustc_interface::queries::Linker::link") {
 +            stack = &stack[index..];
 +        }
 +
 +        if let Some(index) = stack.find("rustc_codegen_cranelift::driver::aot::module_codegen") {
 +            stack = &stack[index..];
 +        }
 +
 +        // Trim end
 +        const MALLOC: &str = "malloc";
 +        if let Some(index) = stack.find(MALLOC) {
 +            stack = &stack[..index + MALLOC.len()];
 +        }
 +
 +        const FREE: &str = "free";
 +        if let Some(index) = stack.find(FREE) {
 +            stack = &stack[..index + FREE.len()];
 +        }
 +
 +        const TYPECK_ITEM_BODIES: &str = "rustc_typeck::check::typeck_item_bodies";
 +        if let Some(index) = stack.find(TYPECK_ITEM_BODIES) {
 +            stack = &stack[..index + TYPECK_ITEM_BODIES.len()];
 +        }
 +
 +        const COLLECT_AND_PARTITION_MONO_ITEMS: &str =
 +            "rustc_mir::monomorphize::partitioning::collect_and_partition_mono_items";
 +        if let Some(index) = stack.find(COLLECT_AND_PARTITION_MONO_ITEMS) {
 +            stack = &stack[..index + COLLECT_AND_PARTITION_MONO_ITEMS.len()];
 +        }
 +
 +        const ASSERT_DEP_GRAPH: &str = "rustc_incremental::assert_dep_graph::assert_dep_graph";
 +        if let Some(index) = stack.find(ASSERT_DEP_GRAPH) {
 +            stack = &stack[..index + ASSERT_DEP_GRAPH.len()];
 +        }
 +
 +        const REPORT_SYMBOL_NAMES: &str = "rustc_symbol_mangling::test::report_symbol_names";
 +        if let Some(index) = stack.find(REPORT_SYMBOL_NAMES) {
 +            stack = &stack[..index + REPORT_SYMBOL_NAMES.len()];
 +        }
 +
 +        const ENCODE_METADATA: &str = "rustc_middle::ty::context::TyCtxt::encode_metadata";
 +        if let Some(index) = stack.find(ENCODE_METADATA) {
 +            stack = &stack[..index + ENCODE_METADATA.len()];
 +        }
 +
 +        const SUBST_AND_NORMALIZE_ERASING_REGIONS: &str = "rustc_middle::ty::normalize_erasing_regions::<impl rustc_middle::ty::context::TyCtxt>::subst_and_normalize_erasing_regions";
 +        if let Some(index) = stack.find(SUBST_AND_NORMALIZE_ERASING_REGIONS) {
 +            stack = &stack[..index + SUBST_AND_NORMALIZE_ERASING_REGIONS.len()];
 +        }
 +
 +        const NORMALIZE_ERASING_LATE_BOUND_REGIONS: &str = "rustc_middle::ty::normalize_erasing_regions::<impl rustc_middle::ty::context::TyCtxt>::normalize_erasing_late_bound_regions";
 +        if let Some(index) = stack.find(NORMALIZE_ERASING_LATE_BOUND_REGIONS) {
 +            stack = &stack[..index + NORMALIZE_ERASING_LATE_BOUND_REGIONS.len()];
 +        }
 +
 +        const INST_BUILD: &str = "<cranelift_frontend::frontend::FuncInstBuilder as cranelift_codegen::ir::builder::InstBuilderBase>::build";
 +        if let Some(index) = stack.find(INST_BUILD) {
 +            stack = &stack[..index + INST_BUILD.len()];
 +        }
 +
 +        output.write_all(stack.as_bytes())?;
 +        output.write_all(&*b" ")?;
 +        output.write_all(count.as_bytes())?;
 +        output.write_all(&*b"\n")?;
 +    }
 +
 +    Ok(())
 +}
index 52adaaa8de673d3661903b9353ed96ec1bbbb5dc,0000000000000000000000000000000000000000..ca83e7096b86db354e50a19eab683b90f1da36a2
mode 100644,000000..100644
--- /dev/null
@@@ -1,57 -1,0 +1,57 @@@
- +compiler_builtins = { version = "0.1.45", features = ['rustc-dep-of-std', 'no-asm'] }
 +#!/bin/bash
 +set -e
 +
 +./y.rs build
 +source scripts/config.sh
 +
 +echo "[SETUP] Rust fork"
 +git clone https://github.com/rust-lang/rust.git || true
 +pushd rust
 +git fetch
 +git checkout -- .
 +git checkout "$(rustc -V | cut -d' ' -f3 | tr -d '(')"
 +
 +git apply - <<EOF
 +diff --git a/Cargo.toml b/Cargo.toml
 +index 5bd1147cad5..10d68a2ff14 100644
 +--- a/Cargo.toml
 ++++ b/Cargo.toml
 +@@ -111,5 +111,7 @@ rustc-std-workspace-std = { path = 'library/rustc-std-workspace-std' }
 + rustc-std-workspace-alloc = { path = 'library/rustc-std-workspace-alloc' }
 + rustc-std-workspace-std = { path = 'library/rustc-std-workspace-std' }
 +
 ++compiler_builtins = { path = "../build_sysroot/compiler-builtins" }
 ++
 + [patch."https://github.com/rust-lang/rust-clippy"]
 + clippy_lints = { path = "src/tools/clippy/clippy_lints" }
 +diff --git a/library/alloc/Cargo.toml b/library/alloc/Cargo.toml
 +index d95b5b7f17f..00b6f0e3635 100644
 +--- a/library/alloc/Cargo.toml
 ++++ b/library/alloc/Cargo.toml
 +@@ -8,7 +8,7 @@ edition = "2018"
 +
 + [dependencies]
 + core = { path = "../core" }
 +-compiler_builtins = { version = "0.1.40", features = ['rustc-dep-of-std'] }
+++compiler_builtins = { version = "0.1.46", features = ['rustc-dep-of-std', 'no-asm'] }
 +
 + [dev-dependencies]
 + rand = "0.7"
 + rand_xorshift = "0.2"
 +EOF
 +
 +cat > config.toml <<EOF
 +[llvm]
 +ninja = false
 +
 +[build]
 +rustc = "$(pwd)/../build/bin/cg_clif"
 +cargo = "$(rustup which cargo)"
 +full-bootstrap = true
 +local-rebuild = true
 +
 +[rust]
 +codegen-backends = ["cranelift"]
 +deny-warnings = false
 +EOF
 +popd
index 2f5c2cf737b056be7560f2caa6d6c7d0d6160a8c,0000000000000000000000000000000000000000..0ac49dd35740f51e36f1a7ba9265a5ef1f63f0cf
mode 100755,000000..100755
--- /dev/null
@@@ -1,94 -1,0 +1,93 @@@
- rm src/test/ui/default-alloc-error-hook.rs
 +#!/bin/bash
 +set -e
 +
 +cd $(dirname "$0")/../
 +
 +source ./scripts/setup_rust_fork.sh
 +
 +echo "[TEST] Test suite of rustc"
 +pushd rust
 +
 +cargo install ripgrep
 +
 +rm -r src/test/ui/{extern/,panics/,unsized-locals/,thinlto/,simd*,*lto*.rs,linkage*,unwind-*.rs} || true
 +for test in $(rg --files-with-matches "asm!|catch_unwind|should_panic|lto" src/test/ui); do
 +  rm $test
 +done
 +
 +for test in $(rg -i --files-with-matches "//(\[\w+\])?~|// error-pattern:|// build-fail|// run-fail|-Cllvm-args" src/test/ui); do
 +  rm $test
 +done
 +
 +git checkout -- src/test/ui/issues/auxiliary/issue-3136-a.rs # contains //~ERROR, but shouldn't be removed
 +
 +# these all depend on unwinding support
 +rm src/test/ui/backtrace.rs
 +rm src/test/ui/array-slice-vec/box-of-array-of-drop-*.rs
 +rm src/test/ui/array-slice-vec/slice-panic-*.rs
 +rm src/test/ui/array-slice-vec/nested-vec-3.rs
 +rm src/test/ui/cleanup-rvalue-temp-during-incomplete-alloc.rs
 +rm src/test/ui/issues/issue-26655.rs
 +rm src/test/ui/issues/issue-29485.rs
 +rm src/test/ui/issues/issue-30018-panic.rs
 +rm src/test/ui/multi-panic.rs
 +rm src/test/ui/sepcomp/sepcomp-unwind.rs
 +rm src/test/ui/structs-enums/unit-like-struct-drop-run.rs
 +rm src/test/ui/terminate-in-initializer.rs
 +rm src/test/ui/threads-sendsync/task-stderr.rs
 +rm src/test/ui/numbers-arithmetic/int-abs-overflow.rs
 +rm src/test/ui/drop/drop-trait-enum.rs
 +rm src/test/ui/numbers-arithmetic/issue-8460.rs
 +rm src/test/ui/rt-explody-panic-payloads.rs
 +rm src/test/incremental/change_crate_dep_kind.rs
 +
 +rm src/test/ui/issues/issue-28950.rs # depends on stack size optimizations
 +rm src/test/ui/init-large-type.rs # same
 +rm src/test/ui/sse2.rs # cpuid not supported, so sse2 not detected
 +rm src/test/ui/issues/issue-33992.rs # unsupported linkages
 +rm src/test/ui/issues/issue-51947.rs # same
 +rm src/test/ui/numbers-arithmetic/saturating-float-casts.rs # intrinsic gives different but valid result
 +rm src/test/ui/mir/mir_misc_casts.rs # depends on deduplication of constants
 +rm src/test/ui/mir/mir_raw_fat_ptr.rs # same
 +rm src/test/ui/consts/issue-33537.rs # same
 +rm src/test/ui/async-await/async-fn-size-moved-locals.rs # -Cpanic=abort shrinks some generator by one byte
 +rm src/test/ui/async-await/async-fn-size-uninit-locals.rs # same
 +rm src/test/ui/generator/size-moved-locals.rs # same
 +rm src/test/ui/fn/dyn-fn-alignment.rs # wants a 256 byte alignment
 +rm src/test/ui/test-attrs/test-fn-signature-verification-for-explicit-return-type.rs # "Cannot run dynamic test fn out-of-process"
 +rm src/test/ui/intrinsics/intrinsic-nearby.rs # unimplemented nearbyintf32 and nearbyintf64 intrinsics
 +
 +rm src/test/incremental/hashes/inline_asm.rs # inline asm
 +rm src/test/incremental/issue-72386.rs # same
 +rm src/test/incremental/issue-49482.rs # same
 +rm src/test/incremental/issue-54059.rs # same
 +rm src/test/incremental/lto.rs # requires lto
 +
 +rm -r src/test/run-make/emit-shared-files # requires the rustdoc executable in build/bin/
 +rm -r src/test/run-make/unstable-flag-required # same
 +rm -r src/test/run-make/emit-named-files # requires full --emit support
 +
 +rm src/test/pretty/asm.rs # inline asm
 +rm src/test/pretty/raw-str-nonexpr.rs # same
 +
 +rm -r src/test/run-pass-valgrind/unsized-locals
 +
 +rm src/test/ui/json-bom-plus-crlf-multifile.rs # differing warning
 +rm src/test/ui/json-bom-plus-crlf.rs # same
 +rm src/test/ui/match/issue-82392.rs # differing error
 +rm src/test/ui/type-alias-impl-trait/cross_crate_ice*.rs # requires removed aux dep
 +
 +rm src/test/ui/allocator/no_std-alloc-error-handler-default.rs # missing rust_oom definition
 +rm src/test/ui/cfg/cfg-panic.rs
 +rm -r src/test/ui/hygiene/
 +
 +rm -r src/test/ui/polymorphization/ # polymorphization not yet supported
 +rm src/test/codegen-units/polymorphization/unused_type_parameters.rs # same
 +
 +rm -r src/test/run-make/fmt-write-bloat/ # tests an optimization
 +rm src/test/ui/abi/mir/mir_codegen_calls_variadic.rs # requires float varargs
 +rm src/test/ui/abi/variadic-ffi.rs # requires callee side vararg support
 +
 +echo "[TEST] rustc test suite"
 +RUST_TEST_NOCAPTURE=1 COMPILETEST_FORCE_STAGE0=1 ./x.py test --stage 0 src/test/{codegen-units,run-make,run-pass-valgrind,ui}
 +popd
index 5df04c533a70e38fb009dec4ec62f337e0a87a38,0000000000000000000000000000000000000000..0eef710239bdd456e90df4ce7f69a8f2d5832a6b
mode 100755,000000..100755
--- /dev/null
@@@ -1,154 -1,0 +1,163 @@@
-         CG_CLIF_JIT_ARGS="abc bcd" $MY_RUSTC -Cllvm-args=mode=jit -Cprefer-dynamic example/mini_core_hello_world.rs --cfg jit --target "$HOST_TRIPLE"
 +#!/usr/bin/env bash
 +
 +set -e
 +
 +source scripts/config.sh
 +source scripts/ext_config.sh
 +export RUSTC=false # ensure that cg_llvm isn't accidentally used
 +MY_RUSTC="$(pwd)/build/bin/cg_clif $RUSTFLAGS -L crate=target/out --out-dir target/out -Cdebuginfo=2"
 +
 +function no_sysroot_tests() {
 +    echo "[BUILD] mini_core"
 +    $MY_RUSTC example/mini_core.rs --crate-name mini_core --crate-type lib,dylib --target "$TARGET_TRIPLE"
 +
 +    echo "[BUILD] example"
 +    $MY_RUSTC example/example.rs --crate-type lib --target "$TARGET_TRIPLE"
 +
 +    if [[ "$JIT_SUPPORTED" = "1" ]]; then
 +        echo "[JIT] mini_core_hello_world"
-         CG_CLIF_JIT_ARGS="abc bcd" $MY_RUSTC -Cllvm-args=mode=jit-lazy -Cprefer-dynamic example/mini_core_hello_world.rs --cfg jit --target "$HOST_TRIPLE"
++        CG_CLIF_JIT_ARGS="abc bcd" $MY_RUSTC -Zunstable-options -Cllvm-args=mode=jit -Cprefer-dynamic example/mini_core_hello_world.rs --cfg jit --target "$HOST_TRIPLE"
 +
 +        echo "[JIT-lazy] mini_core_hello_world"
-         $MY_RUSTC -Cllvm-args=mode=jit -Cprefer-dynamic example/std_example.rs --target "$HOST_TRIPLE"
++        CG_CLIF_JIT_ARGS="abc bcd" $MY_RUSTC -Zunstable-options -Cllvm-args=mode=jit-lazy -Cprefer-dynamic example/mini_core_hello_world.rs --cfg jit --target "$HOST_TRIPLE"
 +    else
 +        echo "[JIT] mini_core_hello_world (skipped)"
 +    fi
 +
 +    echo "[AOT] mini_core_hello_world"
 +    $MY_RUSTC example/mini_core_hello_world.rs --crate-name mini_core_hello_world --crate-type bin -g --target "$TARGET_TRIPLE"
 +    $RUN_WRAPPER ./target/out/mini_core_hello_world abc bcd
 +    # (echo "break set -n main"; echo "run"; sleep 1; echo "si -c 10"; sleep 1; echo "frame variable") | lldb -- ./target/out/mini_core_hello_world abc bcd
 +}
 +
 +function base_sysroot_tests() {
 +    echo "[AOT] arbitrary_self_types_pointers_and_wrappers"
 +    $MY_RUSTC example/arbitrary_self_types_pointers_and_wrappers.rs --crate-name arbitrary_self_types_pointers_and_wrappers --crate-type bin --target "$TARGET_TRIPLE"
 +    $RUN_WRAPPER ./target/out/arbitrary_self_types_pointers_and_wrappers
 +
 +    echo "[AOT] alloc_system"
 +    $MY_RUSTC example/alloc_system.rs --crate-type lib --target "$TARGET_TRIPLE"
 +
 +    echo "[AOT] alloc_example"
 +    $MY_RUSTC example/alloc_example.rs --crate-type bin --target "$TARGET_TRIPLE"
 +    $RUN_WRAPPER ./target/out/alloc_example
 +
 +    if [[ "$JIT_SUPPORTED" = "1" ]]; then
 +        echo "[JIT] std_example"
-         $MY_RUSTC -Cllvm-args=mode=jit-lazy -Cprefer-dynamic example/std_example.rs --target "$HOST_TRIPLE"
++        $MY_RUSTC -Zunstable-options -Cllvm-args=mode=jit -Cprefer-dynamic example/std_example.rs --target "$HOST_TRIPLE"
 +
 +        echo "[JIT-lazy] std_example"
++        $MY_RUSTC -Zunstable-options -Cllvm-args=mode=jit-lazy -Cprefer-dynamic example/std_example.rs --target "$HOST_TRIPLE"
 +    else
 +        echo "[JIT] std_example (skipped)"
 +    fi
 +
 +    echo "[AOT] dst_field_align"
 +    # FIXME Re-add -Zmir-opt-level=2 once rust-lang/rust#67529 is fixed.
 +    $MY_RUSTC example/dst-field-align.rs --crate-name dst_field_align --crate-type bin --target "$TARGET_TRIPLE"
 +    $RUN_WRAPPER ./target/out/dst_field_align || (echo $?; false)
 +
 +    echo "[AOT] std_example"
 +    $MY_RUSTC example/std_example.rs --crate-type bin --target "$TARGET_TRIPLE"
 +    $RUN_WRAPPER ./target/out/std_example arg
 +
 +    echo "[AOT] subslice-patterns-const-eval"
 +    $MY_RUSTC example/subslice-patterns-const-eval.rs --crate-type bin -Cpanic=abort --target "$TARGET_TRIPLE"
 +    $RUN_WRAPPER ./target/out/subslice-patterns-const-eval
 +
 +    echo "[AOT] track-caller-attribute"
 +    $MY_RUSTC example/track-caller-attribute.rs --crate-type bin -Cpanic=abort --target "$TARGET_TRIPLE"
 +    $RUN_WRAPPER ./target/out/track-caller-attribute
 +
 +    echo "[AOT] mod_bench"
 +    $MY_RUSTC example/mod_bench.rs --crate-type bin --target "$TARGET_TRIPLE"
 +    $RUN_WRAPPER ./target/out/mod_bench
 +}
 +
 +function extended_sysroot_tests() {
 +    pushd rand
 +    ../build/cargo clean
 +    if [[ "$HOST_TRIPLE" = "$TARGET_TRIPLE" ]]; then
 +        echo "[TEST] rust-random/rand"
 +        ../build/cargo test --workspace
 +    else
 +        echo "[AOT] rust-random/rand"
 +        ../build/cargo build --workspace --target $TARGET_TRIPLE --tests
 +    fi
 +    popd
 +
 +    pushd simple-raytracer
 +    if [[ "$HOST_TRIPLE" = "$TARGET_TRIPLE" ]]; then
 +        echo "[BENCH COMPILE] ebobby/simple-raytracer"
 +        hyperfine --runs "${RUN_RUNS:-10}" --warmup 1 --prepare "../build/cargo clean" \
 +        "RUSTC=rustc RUSTFLAGS='' cargo build" \
 +        "../build/cargo build"
 +
 +        echo "[BENCH RUN] ebobby/simple-raytracer"
 +        cp ./target/debug/main ./raytracer_cg_clif
 +        hyperfine --runs "${RUN_RUNS:-10}" ./raytracer_cg_llvm ./raytracer_cg_clif
 +    else
 +        ../build/cargo clean
 +        echo "[BENCH COMPILE] ebobby/simple-raytracer (skipped)"
 +        echo "[COMPILE] ebobby/simple-raytracer"
 +        ../build/cargo build --target $TARGET_TRIPLE
 +        echo "[BENCH RUN] ebobby/simple-raytracer (skipped)"
 +    fi
 +    popd
 +
 +    pushd build_sysroot/sysroot_src/library/core/tests
 +    echo "[TEST] libcore"
 +    ../../../../../build/cargo clean
 +    if [[ "$HOST_TRIPLE" = "$TARGET_TRIPLE" ]]; then
 +        ../../../../../build/cargo test
 +    else
 +        ../../../../../build/cargo build --target $TARGET_TRIPLE --tests
 +    fi
 +    popd
 +
 +    pushd regex
 +    echo "[TEST] rust-lang/regex example shootout-regex-dna"
 +    ../build/cargo clean
 +    export RUSTFLAGS="$RUSTFLAGS --cap-lints warn" # newer aho_corasick versions throw a deprecation warning
 +    # Make sure `[codegen mono items] start` doesn't poison the diff
 +    ../build/cargo build --example shootout-regex-dna --target $TARGET_TRIPLE
 +    if [[ "$HOST_TRIPLE" = "$TARGET_TRIPLE" ]]; then
 +        cat examples/regexdna-input.txt \
 +            | ../build/cargo run --example shootout-regex-dna --target $TARGET_TRIPLE \
 +            | grep -v "Spawned thread" > res.txt
 +        diff -u res.txt examples/regexdna-output.txt
 +    fi
 +
 +    if [[ "$HOST_TRIPLE" = "$TARGET_TRIPLE" ]]; then
 +        echo "[TEST] rust-lang/regex tests"
 +        ../build/cargo test --tests -- --exclude-should-panic --test-threads 1 -Zunstable-options -q
 +    else
 +        echo "[AOT] rust-lang/regex tests"
 +        ../build/cargo build --tests --target $TARGET_TRIPLE
 +    fi
 +    popd
++
++    pushd stdsimd
++    echo "[TEST] rust-lang/stdsimd"
++    ../build/cargo clean
++    ../build/cargo build --all-targets --target $TARGET_TRIPLE
++    if [[ "$HOST_TRIPLE" = "$TARGET_TRIPLE" ]]; then
++        ../build/cargo test -q
++    fi
++    popd
 +}
 +
 +case "$1" in
 +    "no_sysroot")
 +        no_sysroot_tests
 +        ;;
 +    "base_sysroot")
 +        base_sysroot_tests
 +        ;;
 +    "extended_sysroot")
 +        extended_sysroot_tests
 +        ;;
 +    *)
 +        echo "unknown test suite"
 +        ;;
 +esac
index 54c8fb0e7b80be401a13d87c8de39e46bd9ebc99,0000000000000000000000000000000000000000..13790409e59f633f28292f8e2c3709f189e37806
mode 100644,000000..100644
--- /dev/null
@@@ -1,555 -1,0 +1,563 @@@
- use cranelift_codegen::ir::AbiParam;
- use smallvec::smallvec;
 +//! Handling of everything related to the calling convention. Also fills `fx.local_map`.
 +
 +mod comments;
 +mod pass_mode;
 +mod returning;
 +
 +use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
 +use rustc_middle::ty::layout::FnAbiExt;
 +use rustc_target::abi::call::{Conv, FnAbi};
 +use rustc_target::spec::abi::Abi;
 +
- pub(crate) use self::returning::{can_return_to_ssa_var, codegen_return};
++use cranelift_codegen::ir::{AbiParam, SigRef};
 +
 +use self::pass_mode::*;
 +use crate::prelude::*;
 +
-                 let local_decl = &fx.mir.local_decls[local];
-                 //                       v this ! is important
-                 let internally_mutable = !val
-                     .layout()
-                     .ty
-                     .is_freeze(fx.tcx.at(local_decl.source_info.span), ParamEnv::reveal_all());
-                 if local_decl.mutability == mir::Mutability::Not && !internally_mutable {
-                     // We wont mutate this argument, so it is fine to borrow the backing storage
-                     // of this argument, to prevent a copy.
-                     let place = if let Some(meta) = meta {
-                         CPlace::for_ptr_with_extra(addr, meta, val.layout())
-                     } else {
-                         CPlace::for_ptr(addr, val.layout())
-                     };
-                     self::comments::add_local_place_comments(fx, place, local);
-                     assert_eq!(fx.local_map.push(place), local);
-                     continue;
-                 }
++pub(crate) use self::returning::codegen_return;
 +
 +fn clif_sig_from_fn_abi<'tcx>(
 +    tcx: TyCtxt<'tcx>,
 +    triple: &target_lexicon::Triple,
 +    fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
 +) -> Signature {
 +    let call_conv = match fn_abi.conv {
 +        Conv::Rust | Conv::C => CallConv::triple_default(triple),
 +        Conv::X86_64SysV => CallConv::SystemV,
 +        Conv::X86_64Win64 => CallConv::WindowsFastcall,
 +        Conv::ArmAapcs
 +        | Conv::CCmseNonSecureCall
 +        | Conv::Msp430Intr
 +        | Conv::PtxKernel
 +        | Conv::X86Fastcall
 +        | Conv::X86Intr
 +        | Conv::X86Stdcall
 +        | Conv::X86ThisCall
 +        | Conv::X86VectorCall
 +        | Conv::AmdGpuKernel
 +        | Conv::AvrInterrupt
 +        | Conv::AvrNonBlockingInterrupt => todo!("{:?}", fn_abi.conv),
 +    };
 +    let inputs = fn_abi.args.iter().map(|arg_abi| arg_abi.get_abi_param(tcx).into_iter()).flatten();
 +
 +    let (return_ptr, returns) = fn_abi.ret.get_abi_return(tcx);
 +    // Sometimes the first param is an pointer to the place where the return value needs to be stored.
 +    let params: Vec<_> = return_ptr.into_iter().chain(inputs).collect();
 +
 +    Signature { params, returns, call_conv }
 +}
 +
 +pub(crate) fn get_function_sig<'tcx>(
 +    tcx: TyCtxt<'tcx>,
 +    triple: &target_lexicon::Triple,
 +    inst: Instance<'tcx>,
 +) -> Signature {
 +    assert!(!inst.substs.needs_infer());
 +    clif_sig_from_fn_abi(tcx, triple, &FnAbi::of_instance(&RevealAllLayoutCx(tcx), inst, &[]))
 +}
 +
 +/// Instance must be monomorphized
 +pub(crate) fn import_function<'tcx>(
 +    tcx: TyCtxt<'tcx>,
 +    module: &mut dyn Module,
 +    inst: Instance<'tcx>,
 +) -> FuncId {
 +    let name = tcx.symbol_name(inst).name;
 +    let sig = get_function_sig(tcx, module.isa().triple(), inst);
 +    module.declare_function(name, Linkage::Import, &sig).unwrap()
 +}
 +
 +impl<'tcx> FunctionCx<'_, '_, 'tcx> {
 +    /// Instance must be monomorphized
 +    pub(crate) fn get_function_ref(&mut self, inst: Instance<'tcx>) -> FuncRef {
 +        let func_id = import_function(self.tcx, self.module, inst);
 +        let func_ref = self.module.declare_func_in_func(func_id, &mut self.bcx.func);
 +
 +        if self.clif_comments.enabled() {
 +            self.add_comment(func_ref, format!("{:?}", inst));
 +        }
 +
 +        func_ref
 +    }
 +
 +    pub(crate) fn lib_call(
 +        &mut self,
 +        name: &str,
 +        params: Vec<AbiParam>,
 +        returns: Vec<AbiParam>,
 +        args: &[Value],
 +    ) -> &[Value] {
 +        let sig = Signature { params, returns, call_conv: CallConv::triple_default(self.triple()) };
 +        let func_id = self.module.declare_function(name, Linkage::Import, &sig).unwrap();
 +        let func_ref = self.module.declare_func_in_func(func_id, &mut self.bcx.func);
 +        let call_inst = self.bcx.ins().call(func_ref, args);
 +        if self.clif_comments.enabled() {
 +            self.add_comment(call_inst, format!("easy_call {}", name));
 +        }
 +        let results = self.bcx.inst_results(call_inst);
 +        assert!(results.len() <= 2, "{}", results.len());
 +        results
 +    }
 +
 +    pub(crate) fn easy_call(
 +        &mut self,
 +        name: &str,
 +        args: &[CValue<'tcx>],
 +        return_ty: Ty<'tcx>,
 +    ) -> CValue<'tcx> {
 +        let (input_tys, args): (Vec<_>, Vec<_>) = args
 +            .iter()
 +            .map(|arg| {
 +                (AbiParam::new(self.clif_type(arg.layout().ty).unwrap()), arg.load_scalar(self))
 +            })
 +            .unzip();
 +        let return_layout = self.layout_of(return_ty);
 +        let return_tys = if let ty::Tuple(tup) = return_ty.kind() {
 +            tup.types().map(|ty| AbiParam::new(self.clif_type(ty).unwrap())).collect()
 +        } else {
 +            vec![AbiParam::new(self.clif_type(return_ty).unwrap())]
 +        };
 +        let ret_vals = self.lib_call(name, input_tys, return_tys, &args);
 +        match *ret_vals {
 +            [] => CValue::by_ref(
 +                Pointer::const_addr(self, i64::from(self.pointer_type.bytes())),
 +                return_layout,
 +            ),
 +            [val] => CValue::by_val(val, return_layout),
 +            [val, extra] => CValue::by_val_pair(val, extra, return_layout),
 +            _ => unreachable!(),
 +        }
 +    }
 +}
 +
 +/// Make a [`CPlace`] capable of holding value of the specified type.
 +fn make_local_place<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    local: Local,
 +    layout: TyAndLayout<'tcx>,
 +    is_ssa: bool,
 +) -> CPlace<'tcx> {
 +    let place = if is_ssa {
 +        if let rustc_target::abi::Abi::ScalarPair(_, _) = layout.abi {
 +            CPlace::new_var_pair(fx, local, layout)
 +        } else {
 +            CPlace::new_var(fx, local, layout)
 +        }
 +    } else {
 +        CPlace::new_stack_slot(fx, layout)
 +    };
 +
 +    self::comments::add_local_place_comments(fx, place, local);
 +
 +    place
 +}
 +
 +pub(crate) fn codegen_fn_prelude<'tcx>(fx: &mut FunctionCx<'_, '_, 'tcx>, start_block: Block) {
 +    fx.bcx.append_block_params_for_function_params(start_block);
 +
 +    fx.bcx.switch_to_block(start_block);
 +    fx.bcx.ins().nop();
 +
 +    let ssa_analyzed = crate::analyze::analyze(fx);
 +
 +    self::comments::add_args_header_comment(fx);
 +
 +    let mut block_params_iter = fx.bcx.func.dfg.block_params(start_block).to_vec().into_iter();
 +    let ret_place =
 +        self::returning::codegen_return_param(fx, &ssa_analyzed, &mut block_params_iter);
 +    assert_eq!(fx.local_map.push(ret_place), RETURN_PLACE);
 +
 +    // None means pass_mode == NoPass
 +    enum ArgKind<'tcx> {
 +        Normal(Option<CValue<'tcx>>),
 +        Spread(Vec<Option<CValue<'tcx>>>),
 +    }
 +
 +    let fn_abi = fx.fn_abi.take().unwrap();
 +    let mut arg_abis_iter = fn_abi.args.iter();
 +
 +    let func_params = fx
 +        .mir
 +        .args_iter()
 +        .map(|local| {
 +            let arg_ty = fx.monomorphize(fx.mir.local_decls[local].ty);
 +
 +            // Adapted from https://github.com/rust-lang/rust/blob/145155dc96757002c7b2e9de8489416e2fdbbd57/src/librustc_codegen_llvm/mir/mod.rs#L442-L482
 +            if Some(local) == fx.mir.spread_arg {
 +                // This argument (e.g. the last argument in the "rust-call" ABI)
 +                // is a tuple that was spread at the ABI level and now we have
 +                // to reconstruct it into a tuple local variable, from multiple
 +                // individual function arguments.
 +
 +                let tupled_arg_tys = match arg_ty.kind() {
 +                    ty::Tuple(ref tys) => tys,
 +                    _ => bug!("spread argument isn't a tuple?! but {:?}", arg_ty),
 +                };
 +
 +                let mut params = Vec::new();
 +                for (i, _arg_ty) in tupled_arg_tys.types().enumerate() {
 +                    let arg_abi = arg_abis_iter.next().unwrap();
 +                    let param =
 +                        cvalue_for_param(fx, Some(local), Some(i), arg_abi, &mut block_params_iter);
 +                    params.push(param);
 +                }
 +
 +                (local, ArgKind::Spread(params), arg_ty)
 +            } else {
 +                let arg_abi = arg_abis_iter.next().unwrap();
 +                let param =
 +                    cvalue_for_param(fx, Some(local), None, arg_abi, &mut block_params_iter);
 +                (local, ArgKind::Normal(param), arg_ty)
 +            }
 +        })
 +        .collect::<Vec<(Local, ArgKind<'tcx>, Ty<'tcx>)>>();
 +
 +    assert!(fx.caller_location.is_none());
 +    if fx.instance.def.requires_caller_location(fx.tcx) {
 +        // Store caller location for `#[track_caller]`.
 +        let arg_abi = arg_abis_iter.next().unwrap();
 +        fx.caller_location =
 +            Some(cvalue_for_param(fx, None, None, arg_abi, &mut block_params_iter).unwrap());
 +    }
 +
 +    assert!(arg_abis_iter.next().is_none(), "ArgAbi left behind");
 +    fx.fn_abi = Some(fn_abi);
 +    assert!(block_params_iter.next().is_none(), "arg_value left behind");
 +
 +    self::comments::add_locals_header_comment(fx);
 +
 +    for (local, arg_kind, ty) in func_params {
 +        let layout = fx.layout_of(ty);
 +
 +        let is_ssa = ssa_analyzed[local] == crate::analyze::SsaKind::Ssa;
 +
 +        // While this is normally an optimization to prevent an unnecessary copy when an argument is
 +        // not mutated by the current function, this is necessary to support unsized arguments.
 +        if let ArgKind::Normal(Some(val)) = arg_kind {
 +            if let Some((addr, meta)) = val.try_to_ptr() {
-     let args = if fn_sig.abi == Abi::RustCall {
++                // Ownership of the value at the backing storage for an argument is passed to the
++                // callee per the ABI, so it is fine to borrow the backing storage of this argument
++                // to prevent a copy.
++
++                let place = if let Some(meta) = meta {
++                    CPlace::for_ptr_with_extra(addr, meta, val.layout())
++                } else {
++                    CPlace::for_ptr(addr, val.layout())
++                };
++
++                self::comments::add_local_place_comments(fx, place, local);
++
++                assert_eq!(fx.local_map.push(place), local);
++                continue;
 +            }
 +        }
 +
 +        let place = make_local_place(fx, local, layout, is_ssa);
 +        assert_eq!(fx.local_map.push(place), local);
 +
 +        match arg_kind {
 +            ArgKind::Normal(param) => {
 +                if let Some(param) = param {
 +                    place.write_cvalue(fx, param);
 +                }
 +            }
 +            ArgKind::Spread(params) => {
 +                for (i, param) in params.into_iter().enumerate() {
 +                    if let Some(param) = param {
 +                        place.place_field(fx, mir::Field::new(i)).write_cvalue(fx, param);
 +                    }
 +                }
 +            }
 +        }
 +    }
 +
 +    for local in fx.mir.vars_and_temps_iter() {
 +        let ty = fx.monomorphize(fx.mir.local_decls[local].ty);
 +        let layout = fx.layout_of(ty);
 +
 +        let is_ssa = ssa_analyzed[local] == crate::analyze::SsaKind::Ssa;
 +
 +        let place = make_local_place(fx, local, layout, is_ssa);
 +        assert_eq!(fx.local_map.push(place), local);
 +    }
 +
 +    fx.bcx.ins().jump(*fx.block_map.get(START_BLOCK).unwrap(), &[]);
 +}
 +
++struct CallArgument<'tcx> {
++    value: CValue<'tcx>,
++    is_owned: bool,
++}
++
++// FIXME avoid intermediate `CValue` before calling `adjust_arg_for_abi`
++fn codegen_call_argument_operand<'tcx>(
++    fx: &mut FunctionCx<'_, '_, 'tcx>,
++    operand: &Operand<'tcx>,
++) -> CallArgument<'tcx> {
++    CallArgument {
++        value: codegen_operand(fx, operand),
++        is_owned: matches!(operand, Operand::Move(_)),
++    }
++}
++
 +pub(crate) fn codegen_terminator_call<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    span: Span,
 +    func: &Operand<'tcx>,
 +    args: &[Operand<'tcx>],
 +    destination: Option<(Place<'tcx>, BasicBlock)>,
 +) {
 +    let fn_ty = fx.monomorphize(func.ty(fx.mir, fx.tcx));
 +    let fn_sig =
 +        fx.tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), fn_ty.fn_sig(fx.tcx));
 +
 +    let destination = destination.map(|(place, bb)| (codegen_place(fx, place), bb));
 +
 +    // Handle special calls like instrinsics and empty drop glue.
 +    let instance = if let ty::FnDef(def_id, substs) = *fn_ty.kind() {
 +        let instance = ty::Instance::resolve(fx.tcx, ty::ParamEnv::reveal_all(), def_id, substs)
 +            .unwrap()
 +            .unwrap()
 +            .polymorphize(fx.tcx);
 +
 +        if fx.tcx.symbol_name(instance).name.starts_with("llvm.") {
 +            crate::intrinsics::codegen_llvm_intrinsic_call(
 +                fx,
 +                &fx.tcx.symbol_name(instance).name,
 +                substs,
 +                args,
 +                destination,
 +            );
 +            return;
 +        }
 +
 +        match instance.def {
 +            InstanceDef::Intrinsic(_) => {
 +                crate::intrinsics::codegen_intrinsic_call(fx, instance, args, destination, span);
 +                return;
 +            }
 +            InstanceDef::DropGlue(_, None) => {
 +                // empty drop glue - a nop.
 +                let (_, dest) = destination.expect("Non terminating drop_in_place_real???");
 +                let ret_block = fx.get_block(dest);
 +                fx.bcx.ins().jump(ret_block, &[]);
 +                return;
 +            }
 +            _ => Some(instance),
 +        }
 +    } else {
 +        None
 +    };
 +
 +    let extra_args = &args[fn_sig.inputs().len()..];
 +    let extra_args = extra_args
 +        .iter()
 +        .map(|op_arg| fx.monomorphize(op_arg.ty(fx.mir, fx.tcx)))
 +        .collect::<Vec<_>>();
 +    let fn_abi = if let Some(instance) = instance {
 +        FnAbi::of_instance(&RevealAllLayoutCx(fx.tcx), instance, &extra_args)
 +    } else {
 +        FnAbi::of_fn_ptr(&RevealAllLayoutCx(fx.tcx), fn_ty.fn_sig(fx.tcx), &extra_args)
 +    };
 +
 +    let is_cold = instance
 +        .map(|inst| fx.tcx.codegen_fn_attrs(inst.def_id()).flags.contains(CodegenFnAttrFlags::COLD))
 +        .unwrap_or(false);
 +    if is_cold {
 +        // FIXME Mark current_block block as cold once Cranelift supports it
 +    }
 +
 +    // Unpack arguments tuple for closures
-         let self_arg = codegen_operand(fx, &args[0]);
-         let pack_arg = codegen_operand(fx, &args[1]);
++    let mut args = if fn_sig.abi == Abi::RustCall {
 +        assert_eq!(args.len(), 2, "rust-call abi requires two arguments");
-         let tupled_arguments = match pack_arg.layout().ty.kind() {
++        let self_arg = codegen_call_argument_operand(fx, &args[0]);
++        let pack_arg = codegen_call_argument_operand(fx, &args[1]);
 +
-             args.push(pack_arg.value_field(fx, mir::Field::new(i)));
++        let tupled_arguments = match pack_arg.value.layout().ty.kind() {
 +            ty::Tuple(ref tupled_arguments) => tupled_arguments,
 +            _ => bug!("argument to function with \"rust-call\" ABI is not a tuple"),
 +        };
 +
 +        let mut args = Vec::with_capacity(1 + tupled_arguments.len());
 +        args.push(self_arg);
 +        for i in 0..tupled_arguments.len() {
-         args.iter().map(|arg| codegen_operand(fx, arg)).collect::<Vec<_>>()
++            args.push(CallArgument {
++                value: pack_arg.value.value_field(fx, mir::Field::new(i)),
++                is_owned: pack_arg.is_owned,
++            });
 +        }
 +        args
 +    } else {
-     //   | indirect call target
-     //   |         | the first argument to be passed
-     //   v         v
-     let (func_ref, first_arg) = match instance {
++        args.iter().map(|arg| codegen_call_argument_operand(fx, arg)).collect::<Vec<_>>()
 +    };
 +
-                     format!("virtual call; self arg pass mode: {:?}", &fn_abi.args[0],),
++    // Pass the caller location for `#[track_caller]`.
++    if instance.map(|inst| inst.def.requires_caller_location(fx.tcx)).unwrap_or(false) {
++        let caller_location = fx.get_caller_location(span);
++        args.push(CallArgument { value: caller_location, is_owned: false });
++    }
++
++    let args = args;
++    assert_eq!(fn_abi.args.len(), args.len());
++
++    enum CallTarget {
++        Direct(FuncRef),
++        Indirect(SigRef, Value),
++    }
++
++    let (func_ref, first_arg_override) = match instance {
 +        // Trait object call
 +        Some(Instance { def: InstanceDef::Virtual(_, idx), .. }) => {
 +            if fx.clif_comments.enabled() {
 +                let nop_inst = fx.bcx.ins().nop();
 +                fx.add_comment(
 +                    nop_inst,
-             let (ptr, method) = crate::vtable::get_ptr_and_method_ref(fx, args[0], idx);
-             (Some(method), smallvec![ptr])
++                    format!("virtual call; self arg pass mode: {:?}", &fn_abi.args[0]),
 +                );
 +            }
-         Some(_) => (
-             None,
-             args.get(0)
-                 .map(|arg| adjust_arg_for_abi(fx, *arg, &fn_abi.args[0]))
-                 .unwrap_or(smallvec![]),
-         ),
++
++            let (ptr, method) = crate::vtable::get_ptr_and_method_ref(fx, args[0].value, idx);
++            let sig = clif_sig_from_fn_abi(fx.tcx, fx.triple(), &fn_abi);
++            let sig = fx.bcx.import_signature(sig);
++
++            (CallTarget::Indirect(sig, method), Some(ptr))
 +        }
 +
 +        // Normal call
-             (
-                 Some(func),
-                 args.get(0)
-                     .map(|arg| adjust_arg_for_abi(fx, *arg, &fn_abi.args[0]))
-                     .unwrap_or(smallvec![]),
-             )
++        Some(instance) => {
++            let func_ref = fx.get_function_ref(instance);
++            (CallTarget::Direct(func_ref), None)
++        }
 +
 +        // Indirect call
 +        None => {
 +            if fx.clif_comments.enabled() {
 +                let nop_inst = fx.bcx.ins().nop();
 +                fx.add_comment(nop_inst, "indirect call");
 +            }
++
 +            let func = codegen_operand(fx, func).load_scalar(fx);
-     let (call_inst, call_args) = self::returning::codegen_with_call_return_arg(
-         fx,
-         &fn_abi.ret,
-         ret_place,
-         |fx, return_ptr| {
-             let regular_args_count = args.len();
-             let mut call_args: Vec<Value> = return_ptr
-                 .into_iter()
-                 .chain(first_arg.into_iter())
-                 .chain(
-                     args.into_iter()
-                         .enumerate()
-                         .skip(1)
-                         .map(|(i, arg)| adjust_arg_for_abi(fx, arg, &fn_abi.args[i]).into_iter())
-                         .flatten(),
-                 )
-                 .collect::<Vec<_>>();
-             if instance.map(|inst| inst.def.requires_caller_location(fx.tcx)).unwrap_or(false) {
-                 // Pass the caller location for `#[track_caller]`.
-                 let caller_location = fx.get_caller_location(span);
-                 call_args.extend(
-                     adjust_arg_for_abi(fx, caller_location, &fn_abi.args[regular_args_count])
-                         .into_iter(),
-                 );
-                 assert_eq!(fn_abi.args.len(), regular_args_count + 1);
-             } else {
-                 assert_eq!(fn_abi.args.len(), regular_args_count);
++            let sig = clif_sig_from_fn_abi(fx.tcx, fx.triple(), &fn_abi);
++            let sig = fx.bcx.import_signature(sig);
++
++            (CallTarget::Indirect(sig, func), None)
 +        }
 +    };
 +
 +    let ret_place = destination.map(|(place, _)| place);
-             let call_inst = if let Some(func_ref) = func_ref {
-                 let sig = clif_sig_from_fn_abi(fx.tcx, fx.triple(), &fn_abi);
-                 let sig = fx.bcx.import_signature(sig);
-                 fx.bcx.ins().call_indirect(sig, func_ref, &call_args)
-             } else {
-                 let func_ref =
-                     fx.get_function_ref(instance.expect("non-indirect call on non-FnDef type"));
-                 fx.bcx.ins().call(func_ref, &call_args)
-             };
-             (call_inst, call_args)
-         },
-     );
-     // FIXME find a cleaner way to support varargs
-     if fn_sig.c_variadic {
-         if !matches!(fn_sig.abi, Abi::C { .. }) {
-             fx.tcx.sess.span_fatal(span, &format!("Variadic call for non-C abi {:?}", fn_sig.abi));
++    self::returning::codegen_with_call_return_arg(fx, &fn_abi.ret, ret_place, |fx, return_ptr| {
++        let call_args = return_ptr
++            .into_iter()
++            .chain(first_arg_override.into_iter())
++            .chain(
++                args.into_iter()
++                    .enumerate()
++                    .skip(if first_arg_override.is_some() { 1 } else { 0 })
++                    .map(|(i, arg)| {
++                        adjust_arg_for_abi(fx, arg.value, &fn_abi.args[i], arg.is_owned).into_iter()
++                    })
++                    .flatten(),
++            )
++            .collect::<Vec<Value>>();
++
++        let call_inst = match func_ref {
++            CallTarget::Direct(func_ref) => fx.bcx.ins().call(func_ref, &call_args),
++            CallTarget::Indirect(sig, func_ptr) => {
++                fx.bcx.ins().call_indirect(sig, func_ptr, &call_args)
 +            }
++        };
 +
-         let sig_ref = fx.bcx.func.dfg.call_signature(call_inst).unwrap();
-         let abi_params = call_args
-             .into_iter()
-             .map(|arg| {
-                 let ty = fx.bcx.func.dfg.value_type(arg);
-                 if !ty.is_int() {
-                     // FIXME set %al to upperbound on float args once floats are supported
-                     fx.tcx.sess.span_fatal(span, &format!("Non int ty {:?} for variadic call", ty));
-                 }
-                 AbiParam::new(ty)
-             })
-             .collect::<Vec<AbiParam>>();
-         fx.bcx.func.dfg.signatures[sig_ref].params = abi_params;
-     }
++        // FIXME find a cleaner way to support varargs
++        if fn_sig.c_variadic {
++            if !matches!(fn_sig.abi, Abi::C { .. }) {
++                fx.tcx
++                    .sess
++                    .span_fatal(span, &format!("Variadic call for non-C abi {:?}", fn_sig.abi));
++            }
++            let sig_ref = fx.bcx.func.dfg.call_signature(call_inst).unwrap();
++            let abi_params = call_args
++                .into_iter()
++                .map(|arg| {
++                    let ty = fx.bcx.func.dfg.value_type(arg);
++                    if !ty.is_int() {
++                        // FIXME set %al to upperbound on float args once floats are supported
++                        fx.tcx
++                            .sess
++                            .span_fatal(span, &format!("Non int ty {:?} for variadic call", ty));
++                    }
++                    AbiParam::new(ty)
++                })
++                .collect::<Vec<AbiParam>>();
++            fx.bcx.func.dfg.signatures[sig_ref].params = abi_params;
 +        }
-                 let arg_value = adjust_arg_for_abi(fx, arg_value, &fn_abi.args[0]);
++
++        call_inst
++    });
 +
 +    if let Some((_, dest)) = destination {
 +        let ret_block = fx.get_block(dest);
 +        fx.bcx.ins().jump(ret_block, &[]);
 +    } else {
 +        trap_unreachable(fx, "[corruption] Diverging function returned");
 +    }
 +}
 +
 +pub(crate) fn codegen_drop<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    span: Span,
 +    drop_place: CPlace<'tcx>,
 +) {
 +    let ty = drop_place.layout().ty;
 +    let drop_instance = Instance::resolve_drop_in_place(fx.tcx, ty).polymorphize(fx.tcx);
 +
 +    if let ty::InstanceDef::DropGlue(_, None) = drop_instance.def {
 +        // we don't actually need to drop anything
 +    } else {
 +        match ty.kind() {
 +            ty::Dynamic(..) => {
 +                let (ptr, vtable) = drop_place.to_ptr_maybe_unsized();
 +                let ptr = ptr.get_addr(fx);
 +                let drop_fn = crate::vtable::drop_fn_of_obj(fx, vtable.unwrap());
 +
 +                // FIXME(eddyb) perhaps move some of this logic into
 +                // `Instance::resolve_drop_in_place`?
 +                let virtual_drop = Instance {
 +                    def: ty::InstanceDef::Virtual(drop_instance.def_id(), 0),
 +                    substs: drop_instance.substs,
 +                };
 +                let fn_abi = FnAbi::of_instance(&RevealAllLayoutCx(fx.tcx), virtual_drop, &[]);
 +
 +                let sig = clif_sig_from_fn_abi(fx.tcx, fx.triple(), &fn_abi);
 +                let sig = fx.bcx.import_signature(sig);
 +                fx.bcx.ins().call_indirect(sig, drop_fn, &[ptr]);
 +            }
 +            _ => {
 +                assert!(!matches!(drop_instance.def, InstanceDef::Virtual(_, _)));
 +
 +                let fn_abi = FnAbi::of_instance(&RevealAllLayoutCx(fx.tcx), drop_instance, &[]);
 +
 +                let arg_value = drop_place.place_ref(
 +                    fx,
 +                    fx.layout_of(fx.tcx.mk_ref(
 +                        &ty::RegionKind::ReErased,
 +                        TypeAndMut { ty, mutbl: crate::rustc_hir::Mutability::Mut },
 +                    )),
 +                );
-                         adjust_arg_for_abi(fx, caller_location, &fn_abi.args[1]).into_iter(),
++                let arg_value = adjust_arg_for_abi(fx, arg_value, &fn_abi.args[0], true);
 +
 +                let mut call_args: Vec<Value> = arg_value.into_iter().collect::<Vec<_>>();
 +
 +                if drop_instance.def.requires_caller_location(fx.tcx) {
 +                    // Pass the caller location for `#[track_caller]`.
 +                    let caller_location = fx.get_caller_location(span);
 +                    call_args.extend(
++                        adjust_arg_for_abi(fx, caller_location, &fn_abi.args[1], false).into_iter(),
 +                    );
 +                }
 +
 +                let func_ref = fx.get_function_ref(drop_instance);
 +                fx.bcx.ins().call(func_ref, &call_args);
 +            }
 +        }
 +    }
 +}
index 7c275965199e056567512a0b71118943b80838cc,0000000000000000000000000000000000000000..44eae706ea8f6df56448214f8afab5383bc67ade
mode 100644,000000..100644
--- /dev/null
@@@ -1,300 -1,0 +1,312 @@@
-         PassMode::Indirect { .. } => match arg.force_stack(fx) {
-             (ptr, None) => smallvec![ptr.get_addr(fx)],
-             (ptr, Some(meta)) => smallvec![ptr.get_addr(fx), meta],
-         },
 +//! Argument passing
 +
 +use crate::prelude::*;
 +use crate::value_and_place::assert_assignable;
 +
 +use cranelift_codegen::ir::{ArgumentExtension, ArgumentPurpose};
 +use rustc_target::abi::call::{
 +    ArgAbi, ArgAttributes, ArgExtension as RustcArgExtension, CastTarget, PassMode, Reg, RegKind,
 +};
 +use smallvec::{smallvec, SmallVec};
 +
 +pub(super) trait ArgAbiExt<'tcx> {
 +    fn get_abi_param(&self, tcx: TyCtxt<'tcx>) -> SmallVec<[AbiParam; 2]>;
 +    fn get_abi_return(&self, tcx: TyCtxt<'tcx>) -> (Option<AbiParam>, Vec<AbiParam>);
 +}
 +
 +fn reg_to_abi_param(reg: Reg) -> AbiParam {
 +    let clif_ty = match (reg.kind, reg.size.bytes()) {
 +        (RegKind::Integer, 1) => types::I8,
 +        (RegKind::Integer, 2) => types::I16,
 +        (RegKind::Integer, 4) => types::I32,
 +        (RegKind::Integer, 8) => types::I64,
 +        (RegKind::Integer, 16) => types::I128,
 +        (RegKind::Float, 4) => types::F32,
 +        (RegKind::Float, 8) => types::F64,
 +        (RegKind::Vector, size) => types::I8.by(u16::try_from(size).unwrap()).unwrap(),
 +        _ => unreachable!("{:?}", reg),
 +    };
 +    AbiParam::new(clif_ty)
 +}
 +
 +fn apply_arg_attrs_to_abi_param(mut param: AbiParam, arg_attrs: ArgAttributes) -> AbiParam {
 +    match arg_attrs.arg_ext {
 +        RustcArgExtension::None => {}
 +        RustcArgExtension::Zext => param.extension = ArgumentExtension::Uext,
 +        RustcArgExtension::Sext => param.extension = ArgumentExtension::Sext,
 +    }
 +    param
 +}
 +
 +fn cast_target_to_abi_params(cast: CastTarget) -> SmallVec<[AbiParam; 2]> {
 +    let (rest_count, rem_bytes) = if cast.rest.unit.size.bytes() == 0 {
 +        (0, 0)
 +    } else {
 +        (
 +            cast.rest.total.bytes() / cast.rest.unit.size.bytes(),
 +            cast.rest.total.bytes() % cast.rest.unit.size.bytes(),
 +        )
 +    };
 +
 +    if cast.prefix.iter().all(|x| x.is_none()) {
 +        // Simplify to a single unit when there is no prefix and size <= unit size
 +        if cast.rest.total <= cast.rest.unit.size {
 +            let clif_ty = match (cast.rest.unit.kind, cast.rest.unit.size.bytes()) {
 +                (RegKind::Integer, 1) => types::I8,
 +                (RegKind::Integer, 2) => types::I16,
 +                (RegKind::Integer, 3..=4) => types::I32,
 +                (RegKind::Integer, 5..=8) => types::I64,
 +                (RegKind::Integer, 9..=16) => types::I128,
 +                (RegKind::Float, 4) => types::F32,
 +                (RegKind::Float, 8) => types::F64,
 +                (RegKind::Vector, size) => types::I8.by(u16::try_from(size).unwrap()).unwrap(),
 +                _ => unreachable!("{:?}", cast.rest.unit),
 +            };
 +            return smallvec![AbiParam::new(clif_ty)];
 +        }
 +    }
 +
 +    // Create list of fields in the main structure
 +    let mut args = cast
 +        .prefix
 +        .iter()
 +        .flatten()
 +        .map(|&kind| reg_to_abi_param(Reg { kind, size: cast.prefix_chunk_size }))
 +        .chain((0..rest_count).map(|_| reg_to_abi_param(cast.rest.unit)))
 +        .collect::<SmallVec<_>>();
 +
 +    // Append final integer
 +    if rem_bytes != 0 {
 +        // Only integers can be really split further.
 +        assert_eq!(cast.rest.unit.kind, RegKind::Integer);
 +        args.push(reg_to_abi_param(Reg {
 +            kind: RegKind::Integer,
 +            size: Size::from_bytes(rem_bytes),
 +        }));
 +    }
 +
 +    args
 +}
 +
 +impl<'tcx> ArgAbiExt<'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
 +    fn get_abi_param(&self, tcx: TyCtxt<'tcx>) -> SmallVec<[AbiParam; 2]> {
 +        match self.mode {
 +            PassMode::Ignore => smallvec![],
 +            PassMode::Direct(attrs) => match &self.layout.abi {
 +                Abi::Scalar(scalar) => smallvec![apply_arg_attrs_to_abi_param(
 +                    AbiParam::new(scalar_to_clif_type(tcx, scalar.clone())),
 +                    attrs
 +                )],
 +                Abi::Vector { .. } => {
 +                    let vector_ty = crate::intrinsics::clif_vector_type(tcx, self.layout).unwrap();
 +                    smallvec![AbiParam::new(vector_ty)]
 +                }
 +                _ => unreachable!("{:?}", self.layout.abi),
 +            },
 +            PassMode::Pair(attrs_a, attrs_b) => match &self.layout.abi {
 +                Abi::ScalarPair(a, b) => {
 +                    let a = scalar_to_clif_type(tcx, a.clone());
 +                    let b = scalar_to_clif_type(tcx, b.clone());
 +                    smallvec![
 +                        apply_arg_attrs_to_abi_param(AbiParam::new(a), attrs_a),
 +                        apply_arg_attrs_to_abi_param(AbiParam::new(b), attrs_b),
 +                    ]
 +                }
 +                _ => unreachable!("{:?}", self.layout.abi),
 +            },
 +            PassMode::Cast(cast) => cast_target_to_abi_params(cast),
 +            PassMode::Indirect { attrs, extra_attrs: None, on_stack } => {
 +                if on_stack {
 +                    let size = u32::try_from(self.layout.size.bytes()).unwrap();
 +                    smallvec![apply_arg_attrs_to_abi_param(
 +                        AbiParam::special(pointer_ty(tcx), ArgumentPurpose::StructArgument(size),),
 +                        attrs
 +                    )]
 +                } else {
 +                    smallvec![apply_arg_attrs_to_abi_param(AbiParam::new(pointer_ty(tcx)), attrs)]
 +                }
 +            }
 +            PassMode::Indirect { attrs, extra_attrs: Some(extra_attrs), on_stack } => {
 +                assert!(!on_stack);
 +                smallvec![
 +                    apply_arg_attrs_to_abi_param(AbiParam::new(pointer_ty(tcx)), attrs),
 +                    apply_arg_attrs_to_abi_param(AbiParam::new(pointer_ty(tcx)), extra_attrs),
 +                ]
 +            }
 +        }
 +    }
 +
 +    fn get_abi_return(&self, tcx: TyCtxt<'tcx>) -> (Option<AbiParam>, Vec<AbiParam>) {
 +        match self.mode {
 +            PassMode::Ignore => (None, vec![]),
 +            PassMode::Direct(_) => match &self.layout.abi {
 +                Abi::Scalar(scalar) => {
 +                    (None, vec![AbiParam::new(scalar_to_clif_type(tcx, scalar.clone()))])
 +                }
 +                Abi::Vector { .. } => {
 +                    let vector_ty = crate::intrinsics::clif_vector_type(tcx, self.layout).unwrap();
 +                    (None, vec![AbiParam::new(vector_ty)])
 +                }
 +                _ => unreachable!("{:?}", self.layout.abi),
 +            },
 +            PassMode::Pair(_, _) => match &self.layout.abi {
 +                Abi::ScalarPair(a, b) => {
 +                    let a = scalar_to_clif_type(tcx, a.clone());
 +                    let b = scalar_to_clif_type(tcx, b.clone());
 +                    (None, vec![AbiParam::new(a), AbiParam::new(b)])
 +                }
 +                _ => unreachable!("{:?}", self.layout.abi),
 +            },
 +            PassMode::Cast(cast) => (None, cast_target_to_abi_params(cast).into_iter().collect()),
 +            PassMode::Indirect { attrs: _, extra_attrs: None, on_stack } => {
 +                assert!(!on_stack);
 +                (Some(AbiParam::special(pointer_ty(tcx), ArgumentPurpose::StructReturn)), vec![])
 +            }
 +            PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
 +                unreachable!("unsized return value")
 +            }
 +        }
 +    }
 +}
 +
 +pub(super) fn to_casted_value<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    arg: CValue<'tcx>,
 +    cast: CastTarget,
 +) -> SmallVec<[Value; 2]> {
 +    let (ptr, meta) = arg.force_stack(fx);
 +    assert!(meta.is_none());
 +    let mut offset = 0;
 +    cast_target_to_abi_params(cast)
 +        .into_iter()
 +        .map(|param| {
 +            let val = ptr.offset_i64(fx, offset).load(fx, param.value_type, MemFlags::new());
 +            offset += i64::from(param.value_type.bytes());
 +            val
 +        })
 +        .collect()
 +}
 +
 +pub(super) fn from_casted_value<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    block_params: &[Value],
 +    layout: TyAndLayout<'tcx>,
 +    cast: CastTarget,
 +) -> CValue<'tcx> {
 +    let abi_params = cast_target_to_abi_params(cast);
 +    let abi_param_size: u32 = abi_params.iter().map(|param| param.value_type.bytes()).sum();
 +    let layout_size = u32::try_from(layout.size.bytes()).unwrap();
 +    let stack_slot = fx.bcx.create_stack_slot(StackSlotData {
 +        kind: StackSlotKind::ExplicitSlot,
 +        // FIXME Don't force the size to a multiple of 16 bytes once Cranelift gets a way to
 +        // specify stack slot alignment.
 +        // Stack slot size may be bigger for for example `[u8; 3]` which is packed into an `i32`.
 +        // It may also be smaller for example when the type is a wrapper around an integer with a
 +        // larger alignment than the integer.
 +        size: (std::cmp::max(abi_param_size, layout_size) + 15) / 16 * 16,
 +        offset: None,
 +    });
 +    let ptr = Pointer::new(fx.bcx.ins().stack_addr(pointer_ty(fx.tcx), stack_slot, 0));
 +    let mut offset = 0;
 +    let mut block_params_iter = block_params.iter().copied();
 +    for param in abi_params {
 +        let val = ptr.offset_i64(fx, offset).store(
 +            fx,
 +            block_params_iter.next().unwrap(),
 +            MemFlags::new(),
 +        );
 +        offset += i64::from(param.value_type.bytes());
 +        val
 +    }
 +    assert_eq!(block_params_iter.next(), None, "Leftover block param");
 +    CValue::by_ref(ptr, layout)
 +}
 +
 +/// Get a set of values to be passed as function arguments.
 +pub(super) fn adjust_arg_for_abi<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    arg: CValue<'tcx>,
 +    arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
++    is_owned: bool,
 +) -> SmallVec<[Value; 2]> {
 +    assert_assignable(fx, arg.layout().ty, arg_abi.layout.ty);
 +    match arg_abi.mode {
 +        PassMode::Ignore => smallvec![],
 +        PassMode::Direct(_) => smallvec![arg.load_scalar(fx)],
 +        PassMode::Pair(_, _) => {
 +            let (a, b) = arg.load_scalar_pair(fx);
 +            smallvec![a, b]
 +        }
 +        PassMode::Cast(cast) => to_casted_value(fx, arg, cast),
++        PassMode::Indirect { .. } => {
++            if is_owned {
++                match arg.force_stack(fx) {
++                    (ptr, None) => smallvec![ptr.get_addr(fx)],
++                    (ptr, Some(meta)) => smallvec![ptr.get_addr(fx), meta],
++                }
++            } else {
++                // Ownership of the value at the backing storage for an argument is passed to the
++                // callee per the ABI, so we must make a copy of the argument unless the argument
++                // local is moved.
++                let place = CPlace::new_stack_slot(fx, arg.layout());
++                place.write_cvalue(fx, arg);
++                smallvec![place.to_ptr().get_addr(fx)]
++            }
++        }
 +    }
 +}
 +
 +/// Create a [`CValue`] containing the value of a function parameter adding clif function parameters
 +/// as necessary.
 +pub(super) fn cvalue_for_param<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    local: Option<mir::Local>,
 +    local_field: Option<usize>,
 +    arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
 +    block_params_iter: &mut impl Iterator<Item = Value>,
 +) -> Option<CValue<'tcx>> {
 +    let block_params = arg_abi
 +        .get_abi_param(fx.tcx)
 +        .into_iter()
 +        .map(|abi_param| {
 +            let block_param = block_params_iter.next().unwrap();
 +            assert_eq!(fx.bcx.func.dfg.value_type(block_param), abi_param.value_type);
 +            block_param
 +        })
 +        .collect::<SmallVec<[_; 2]>>();
 +
 +    crate::abi::comments::add_arg_comment(
 +        fx,
 +        "arg",
 +        local,
 +        local_field,
 +        &block_params,
 +        arg_abi.mode,
 +        arg_abi.layout,
 +    );
 +
 +    match arg_abi.mode {
 +        PassMode::Ignore => None,
 +        PassMode::Direct(_) => {
 +            assert_eq!(block_params.len(), 1, "{:?}", block_params);
 +            Some(CValue::by_val(block_params[0], arg_abi.layout))
 +        }
 +        PassMode::Pair(_, _) => {
 +            assert_eq!(block_params.len(), 2, "{:?}", block_params);
 +            Some(CValue::by_val_pair(block_params[0], block_params[1], arg_abi.layout))
 +        }
 +        PassMode::Cast(cast) => Some(from_casted_value(fx, &block_params, arg_abi.layout, cast)),
 +        PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
 +            assert_eq!(block_params.len(), 1, "{:?}", block_params);
 +            Some(CValue::by_ref(Pointer::new(block_params[0]), arg_abi.layout))
 +        }
 +        PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
 +            assert_eq!(block_params.len(), 2, "{:?}", block_params);
 +            Some(CValue::by_ref_unsized(
 +                Pointer::new(block_params[0]),
 +                block_params[1],
 +                arg_abi.layout,
 +            ))
 +        }
 +    }
 +}
index e1c53224b4f841751bba96f80a37b99fbcdd9e93,0000000000000000000000000000000000000000..c1bdba43e6ccb4e65e622d66bbf358729c701aff
mode 100644,000000..100644
--- /dev/null
@@@ -1,188 -1,0 +1,156 @@@
- use rustc_middle::ty::layout::FnAbiExt;
- use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode};
 +//! Return value handling
 +
 +use crate::prelude::*;
 +
- /// Can the given type be returned into an ssa var or does it need to be returned on the stack.
- pub(crate) fn can_return_to_ssa_var<'tcx>(
-     fx: &FunctionCx<'_, '_, 'tcx>,
-     func: &mir::Operand<'tcx>,
-     args: &[mir::Operand<'tcx>],
- ) -> bool {
-     let fn_ty = fx.monomorphize(func.ty(fx.mir, fx.tcx));
-     let fn_sig =
-         fx.tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), fn_ty.fn_sig(fx.tcx));
-     // Handle special calls like instrinsics and empty drop glue.
-     let instance = if let ty::FnDef(def_id, substs) = *fn_ty.kind() {
-         let instance = ty::Instance::resolve(fx.tcx, ty::ParamEnv::reveal_all(), def_id, substs)
-             .unwrap()
-             .unwrap()
-             .polymorphize(fx.tcx);
-         match instance.def {
-             InstanceDef::Intrinsic(_) | InstanceDef::DropGlue(_, _) => {
-                 return true;
-             }
-             _ => Some(instance),
-         }
-     } else {
-         None
-     };
-     let extra_args = &args[fn_sig.inputs().len()..];
-     let extra_args = extra_args
-         .iter()
-         .map(|op_arg| fx.monomorphize(op_arg.ty(fx.mir, fx.tcx)))
-         .collect::<Vec<_>>();
-     let fn_abi = if let Some(instance) = instance {
-         FnAbi::of_instance(&RevealAllLayoutCx(fx.tcx), instance, &extra_args)
-     } else {
-         FnAbi::of_fn_ptr(&RevealAllLayoutCx(fx.tcx), fn_ty.fn_sig(fx.tcx), &extra_args)
-     };
-     match fn_abi.ret.mode {
-         PassMode::Ignore | PassMode::Direct(_) | PassMode::Pair(_, _) => true,
-         // FIXME Make it possible to return Cast and Indirect to an ssa var.
-         PassMode::Cast(_) | PassMode::Indirect { .. } => false,
-     }
- }
++use rustc_target::abi::call::{ArgAbi, PassMode};
 +use smallvec::{smallvec, SmallVec};
 +
-         PassMode::Ignore => (CPlace::no_place(fx.fn_abi.as_ref().unwrap().ret.layout), smallvec![]),
-         PassMode::Direct(_) | PassMode::Pair(_, _) | PassMode::Cast(_) => {
 +/// Return a place where the return value of the current function can be written to. If necessary
 +/// this adds an extra parameter pointing to where the return value needs to be stored.
 +pub(super) fn codegen_return_param<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    ssa_analyzed: &rustc_index::vec::IndexVec<Local, crate::analyze::SsaKind>,
 +    block_params_iter: &mut impl Iterator<Item = Value>,
 +) -> CPlace<'tcx> {
 +    let (ret_place, ret_param): (_, SmallVec<[_; 2]>) = match fx.fn_abi.as_ref().unwrap().ret.mode {
-             assert_eq!(fx.bcx.func.dfg.value_type(ret_param), pointer_ty(fx.tcx));
++        PassMode::Ignore | PassMode::Direct(_) | PassMode::Pair(_, _) | PassMode::Cast(_) => {
 +            let is_ssa = ssa_analyzed[RETURN_PLACE] == crate::analyze::SsaKind::Ssa;
 +            (
 +                super::make_local_place(
 +                    fx,
 +                    RETURN_PLACE,
 +                    fx.fn_abi.as_ref().unwrap().ret.layout,
 +                    is_ssa,
 +                ),
 +                smallvec![],
 +            )
 +        }
 +        PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
 +            let ret_param = block_params_iter.next().unwrap();
- pub(super) fn codegen_with_call_return_arg<'tcx, T>(
++            assert_eq!(fx.bcx.func.dfg.value_type(ret_param), fx.pointer_type);
 +            (
 +                CPlace::for_ptr(Pointer::new(ret_param), fx.fn_abi.as_ref().unwrap().ret.layout),
 +                smallvec![ret_param],
 +            )
 +        }
 +        PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
 +            unreachable!("unsized return value")
 +        }
 +    };
 +
 +    crate::abi::comments::add_arg_comment(
 +        fx,
 +        "ret",
 +        Some(RETURN_PLACE),
 +        None,
 +        &ret_param,
 +        fx.fn_abi.as_ref().unwrap().ret.mode,
 +        fx.fn_abi.as_ref().unwrap().ret.layout,
 +    );
 +
 +    ret_place
 +}
 +
 +/// Invokes the closure with if necessary a value representing the return pointer. When the closure
 +/// returns the call return value(s) if any are written to the correct place.
-     f: impl FnOnce(&mut FunctionCx<'_, '_, 'tcx>, Option<Value>) -> (Inst, T),
- ) -> (Inst, T) {
-     let return_ptr = match ret_arg_abi.mode {
-         PassMode::Ignore => None,
++pub(super) fn codegen_with_call_return_arg<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    ret_arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
 +    ret_place: Option<CPlace<'tcx>>,
-             Some(ret_place) => Some(ret_place.to_ptr().get_addr(fx)),
-             None => Some(fx.bcx.ins().iconst(fx.pointer_type, 43)), // FIXME allocate temp stack slot
++    f: impl FnOnce(&mut FunctionCx<'_, '_, 'tcx>, Option<Value>) -> Inst,
++) {
++    let (ret_temp_place, return_ptr) = match ret_arg_abi.mode {
++        PassMode::Ignore => (None, None),
 +        PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => match ret_place {
-         PassMode::Direct(_) | PassMode::Pair(_, _) | PassMode::Cast(_) => None,
++            Some(ret_place) if matches!(ret_place.inner(), CPlaceInner::Addr(_, None)) => {
++                // This is an optimization to prevent unnecessary copies of the return value when
++                // the return place is already a memory place as opposed to a register.
++                // This match arm can be safely removed.
++                (None, Some(ret_place.to_ptr().get_addr(fx)))
++            }
++            _ => {
++                let place = CPlace::new_stack_slot(fx, ret_arg_abi.layout);
++                (Some(place), Some(place.to_ptr().get_addr(fx)))
++            }
 +        },
 +        PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
 +            unreachable!("unsized return value")
 +        }
-     let (call_inst, meta) = f(fx, return_ptr);
++        PassMode::Direct(_) | PassMode::Pair(_, _) | PassMode::Cast(_) => (None, None),
 +    };
 +
-         PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {}
++    let call_inst = f(fx, return_ptr);
 +
 +    match ret_arg_abi.mode {
 +        PassMode::Ignore => {}
 +        PassMode::Direct(_) => {
 +            if let Some(ret_place) = ret_place {
 +                let ret_val = fx.bcx.inst_results(call_inst)[0];
 +                ret_place.write_cvalue(fx, CValue::by_val(ret_val, ret_arg_abi.layout));
 +            }
 +        }
 +        PassMode::Pair(_, _) => {
 +            if let Some(ret_place) = ret_place {
 +                let ret_val_a = fx.bcx.inst_results(call_inst)[0];
 +                let ret_val_b = fx.bcx.inst_results(call_inst)[1];
 +                ret_place.write_cvalue(
 +                    fx,
 +                    CValue::by_val_pair(ret_val_a, ret_val_b, ret_arg_abi.layout),
 +                );
 +            }
 +        }
 +        PassMode::Cast(cast) => {
 +            if let Some(ret_place) = ret_place {
 +                let results = fx
 +                    .bcx
 +                    .inst_results(call_inst)
 +                    .iter()
 +                    .copied()
 +                    .collect::<SmallVec<[Value; 2]>>();
 +                let result =
 +                    super::pass_mode::from_casted_value(fx, &results, ret_place.layout(), cast);
 +                ret_place.write_cvalue(fx, result);
 +            }
 +        }
-     (call_inst, meta)
++        PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
++            if let (Some(ret_place), Some(ret_temp_place)) = (ret_place, ret_temp_place) {
++                // Both ret_place and ret_temp_place must be Some. If ret_place is None, this is
++                // a non-returning call. If ret_temp_place is None, it is not necessary to copy the
++                // return value.
++                let ret_temp_value = ret_temp_place.to_cvalue(fx);
++                ret_place.write_cvalue(fx, ret_temp_value);
++            }
++        }
 +        PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
 +            unreachable!("unsized return value")
 +        }
 +    }
 +}
 +
 +/// Codegen a return instruction with the right return value(s) if any.
 +pub(crate) fn codegen_return(fx: &mut FunctionCx<'_, '_, '_>) {
 +    match fx.fn_abi.as_ref().unwrap().ret.mode {
 +        PassMode::Ignore | PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
 +            fx.bcx.ins().return_(&[]);
 +        }
 +        PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
 +            unreachable!("unsized return value")
 +        }
 +        PassMode::Direct(_) => {
 +            let place = fx.get_local_place(RETURN_PLACE);
 +            let ret_val = place.to_cvalue(fx).load_scalar(fx);
 +            fx.bcx.ins().return_(&[ret_val]);
 +        }
 +        PassMode::Pair(_, _) => {
 +            let place = fx.get_local_place(RETURN_PLACE);
 +            let (ret_val_a, ret_val_b) = place.to_cvalue(fx).load_scalar_pair(fx);
 +            fx.bcx.ins().return_(&[ret_val_a, ret_val_b]);
 +        }
 +        PassMode::Cast(cast) => {
 +            let place = fx.get_local_place(RETURN_PLACE);
 +            let ret_val = place.to_cvalue(fx);
 +            let ret_vals = super::pass_mode::to_casted_value(fx, ret_val, cast);
 +            fx.bcx.ins().return_(&ret_vals);
 +        }
 +    }
 +}
index d39486c2f1002e485f849fb0c27fb3564c384e46,0000000000000000000000000000000000000000..637d30f9344f98b6f7c77a0fc46cb2a910ff8b49
mode 100644,000000..100644
--- /dev/null
@@@ -1,139 -1,0 +1,137 @@@
- use rustc_span::symbol::sym;
 +//! Allocator shim
 +// Adapted from rustc
 +
 +use crate::prelude::*;
 +
 +use cranelift_codegen::binemit::{NullStackMapSink, NullTrapSink};
 +use rustc_ast::expand::allocator::{AllocatorKind, AllocatorTy, ALLOCATOR_METHODS};
-         codegen_inner(module, unwind_context, kind);
 +
 +/// Returns whether an allocator shim was created
 +pub(crate) fn codegen(
 +    tcx: TyCtxt<'_>,
 +    module: &mut impl Module,
 +    unwind_context: &mut UnwindContext,
 +) -> bool {
 +    let any_dynamic_crate = tcx.dependency_formats(()).iter().any(|(_, list)| {
 +        use rustc_middle::middle::dependency_format::Linkage;
 +        list.iter().any(|&linkage| linkage == Linkage::Dynamic)
 +    });
 +    if any_dynamic_crate {
 +        false
 +    } else if let Some(kind) = tcx.allocator_kind(()) {
-         //eprintln!("Codegen allocator shim {} -> {} ({:?} -> {:?})", caller_name, callee_name, sig.params, sig.returns);
++        codegen_inner(module, unwind_context, kind, tcx.lang_items().oom().is_some());
 +        true
 +    } else {
 +        false
 +    }
 +}
 +
 +fn codegen_inner(
 +    module: &mut impl Module,
 +    unwind_context: &mut UnwindContext,
 +    kind: AllocatorKind,
++    has_alloc_error_handler: bool,
 +) {
 +    let usize_ty = module.target_config().pointer_type();
 +
 +    for method in ALLOCATOR_METHODS {
 +        let mut arg_tys = Vec::with_capacity(method.inputs.len());
 +        for ty in method.inputs.iter() {
 +            match *ty {
 +                AllocatorTy::Layout => {
 +                    arg_tys.push(usize_ty); // size
 +                    arg_tys.push(usize_ty); // align
 +                }
 +                AllocatorTy::Ptr => arg_tys.push(usize_ty),
 +                AllocatorTy::Usize => arg_tys.push(usize_ty),
 +
 +                AllocatorTy::ResultPtr | AllocatorTy::Unit => panic!("invalid allocator arg"),
 +            }
 +        }
 +        let output = match method.output {
 +            AllocatorTy::ResultPtr => Some(usize_ty),
 +            AllocatorTy::Unit => None,
 +
 +            AllocatorTy::Layout | AllocatorTy::Usize | AllocatorTy::Ptr => {
 +                panic!("invalid allocator output")
 +            }
 +        };
 +
 +        let sig = Signature {
 +            call_conv: CallConv::triple_default(module.isa().triple()),
 +            params: arg_tys.iter().cloned().map(AbiParam::new).collect(),
 +            returns: output.into_iter().map(AbiParam::new).collect(),
 +        };
 +
 +        let caller_name = format!("__rust_{}", method.name);
 +        let callee_name = kind.fn_name(method.name);
-     let callee_name = kind.fn_name(sym::oom);
-     //eprintln!("Codegen allocator shim {} -> {} ({:?} -> {:?})", caller_name, callee_name, sig.params, sig.returns);
 +
 +        let func_id = module.declare_function(&caller_name, Linkage::Export, &sig).unwrap();
 +
 +        let callee_func_id = module.declare_function(&callee_name, Linkage::Import, &sig).unwrap();
 +
 +        let mut ctx = Context::new();
 +        ctx.func = Function::with_name_signature(ExternalName::user(0, 0), sig.clone());
 +        {
 +            let mut func_ctx = FunctionBuilderContext::new();
 +            let mut bcx = FunctionBuilder::new(&mut ctx.func, &mut func_ctx);
 +
 +            let block = bcx.create_block();
 +            bcx.switch_to_block(block);
 +            let args = arg_tys
 +                .into_iter()
 +                .map(|ty| bcx.append_block_param(block, ty))
 +                .collect::<Vec<Value>>();
 +
 +            let callee_func_ref = module.declare_func_in_func(callee_func_id, &mut bcx.func);
 +            let call_inst = bcx.ins().call(callee_func_ref, &args);
 +            let results = bcx.inst_results(call_inst).to_vec(); // Clone to prevent borrow error
 +
 +            bcx.ins().return_(&results);
 +            bcx.seal_all_blocks();
 +            bcx.finalize();
 +        }
 +        module
 +            .define_function(func_id, &mut ctx, &mut NullTrapSink {}, &mut NullStackMapSink {})
 +            .unwrap();
 +        unwind_context.add_function(func_id, &ctx, module.isa());
 +    }
 +
 +    let sig = Signature {
 +        call_conv: CallConv::triple_default(module.isa().triple()),
 +        params: vec![AbiParam::new(usize_ty), AbiParam::new(usize_ty)],
 +        returns: vec![],
 +    };
 +
-     let callee_func_id = module.declare_function(&callee_name, Linkage::Import, &sig).unwrap();
++    let callee_name = if has_alloc_error_handler { "__rg_oom" } else { "__rdl_oom" };
 +
 +    let func_id =
 +        module.declare_function("__rust_alloc_error_handler", Linkage::Export, &sig).unwrap();
 +
++    let callee_func_id = module.declare_function(callee_name, Linkage::Import, &sig).unwrap();
 +
 +    let mut ctx = Context::new();
 +    ctx.func = Function::with_name_signature(ExternalName::user(0, 0), sig);
 +    {
 +        let mut func_ctx = FunctionBuilderContext::new();
 +        let mut bcx = FunctionBuilder::new(&mut ctx.func, &mut func_ctx);
 +
 +        let block = bcx.create_block();
 +        bcx.switch_to_block(block);
 +        let args = (&[usize_ty, usize_ty])
 +            .iter()
 +            .map(|&ty| bcx.append_block_param(block, ty))
 +            .collect::<Vec<Value>>();
 +
 +        let callee_func_ref = module.declare_func_in_func(callee_func_id, &mut bcx.func);
 +        bcx.ins().call(callee_func_ref, &args);
 +
 +        bcx.ins().trap(TrapCode::UnreachableCodeReached);
 +        bcx.seal_all_blocks();
 +        bcx.finalize();
 +    }
 +    module
 +        .define_function(func_id, &mut ctx, &mut NullTrapSink {}, &mut NullStackMapSink {})
 +        .unwrap();
 +    unwind_context.add_function(func_id, &ctx, module.isa());
 +}
index efead25552f4d1ca203e35b49a2c4912ab253c4b,0000000000000000000000000000000000000000..35b89358b1984ee40a2c184a38753987ffbd46f7
mode 100644,000000..100644
--- /dev/null
@@@ -1,59 -1,0 +1,48 @@@
-         match &bb.terminator().kind {
-             TerminatorKind::Call { destination, func, args, .. } => {
-                 if let Some((dest_place, _dest_bb)) = destination {
-                     if !crate::abi::can_return_to_ssa_var(fx, func, args) {
-                         not_ssa(&mut flag_map, dest_place.local)
-                     }
-                 }
-             }
-             _ => {}
-         }
 +//! SSA analysis
 +
 +use crate::prelude::*;
 +
 +use rustc_index::vec::IndexVec;
 +use rustc_middle::mir::StatementKind::*;
 +
 +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
 +pub(crate) enum SsaKind {
 +    NotSsa,
 +    Ssa,
 +}
 +
 +pub(crate) fn analyze(fx: &FunctionCx<'_, '_, '_>) -> IndexVec<Local, SsaKind> {
 +    let mut flag_map = fx
 +        .mir
 +        .local_decls
 +        .iter()
 +        .map(|local_decl| {
 +            let ty = fx.monomorphize(local_decl.ty);
 +            if fx.clif_type(ty).is_some() || fx.clif_pair_type(ty).is_some() {
 +                SsaKind::Ssa
 +            } else {
 +                SsaKind::NotSsa
 +            }
 +        })
 +        .collect::<IndexVec<Local, SsaKind>>();
 +
 +    for bb in fx.mir.basic_blocks().iter() {
 +        for stmt in bb.statements.iter() {
 +            match &stmt.kind {
 +                Assign(place_and_rval) => match &place_and_rval.1 {
 +                    Rvalue::Ref(_, _, place) | Rvalue::AddressOf(_, place) => {
 +                        not_ssa(&mut flag_map, place.local)
 +                    }
 +                    _ => {}
 +                },
 +                _ => {}
 +            }
 +        }
 +    }
 +
 +    flag_map
 +}
 +
 +fn not_ssa(flag_map: &mut IndexVec<Local, SsaKind>, local: Local) {
 +    flag_map[local] = SsaKind::NotSsa;
 +}
index 3d78eed77b94c373cfe533920501dbabb6f99470,0000000000000000000000000000000000000000..e99a227a3a6eafc49b7156bc0fd0582e00ab8ff8
mode 100644,000000..100644
--- /dev/null
@@@ -1,920 -1,0 +1,918 @@@
-                     let discr =
-                         crate::optimize::peephole::make_branchable_value(&mut fx.bcx, discr);
 +//! Codegen of a single function
 +
 +use cranelift_codegen::binemit::{NullStackMapSink, NullTrapSink};
 +use rustc_index::vec::IndexVec;
 +use rustc_middle::ty::adjustment::PointerCast;
 +use rustc_middle::ty::layout::FnAbiExt;
 +use rustc_target::abi::call::FnAbi;
 +
 +use crate::constant::ConstantCx;
 +use crate::prelude::*;
 +
 +pub(crate) fn codegen_fn<'tcx>(
 +    cx: &mut crate::CodegenCx<'tcx>,
 +    module: &mut dyn Module,
 +    instance: Instance<'tcx>,
 +) {
 +    let tcx = cx.tcx;
 +
 +    let _inst_guard =
 +        crate::PrintOnPanic(|| format!("{:?} {}", instance, tcx.symbol_name(instance).name));
 +    debug_assert!(!instance.substs.needs_infer());
 +
 +    let mir = tcx.instance_mir(instance.def);
 +    let _mir_guard = crate::PrintOnPanic(|| {
 +        let mut buf = Vec::new();
 +        rustc_mir::util::write_mir_pretty(tcx, Some(instance.def_id()), &mut buf).unwrap();
 +        String::from_utf8_lossy(&buf).into_owned()
 +    });
 +
 +    // Declare function
 +    let symbol_name = tcx.symbol_name(instance);
 +    let sig = get_function_sig(tcx, module.isa().triple(), instance);
 +    let func_id = module.declare_function(symbol_name.name, Linkage::Local, &sig).unwrap();
 +
 +    cx.cached_context.clear();
 +
 +    // Make the FunctionBuilder
 +    let mut func_ctx = FunctionBuilderContext::new();
 +    let mut func = std::mem::replace(&mut cx.cached_context.func, Function::new());
 +    func.name = ExternalName::user(0, func_id.as_u32());
 +    func.signature = sig;
 +    func.collect_debug_info();
 +
 +    let mut bcx = FunctionBuilder::new(&mut func, &mut func_ctx);
 +
 +    // Predefine blocks
 +    let start_block = bcx.create_block();
 +    let block_map: IndexVec<BasicBlock, Block> =
 +        (0..mir.basic_blocks().len()).map(|_| bcx.create_block()).collect();
 +
 +    // Make FunctionCx
 +    let pointer_type = module.target_config().pointer_type();
 +    let clif_comments = crate::pretty_clif::CommentWriter::new(tcx, instance);
 +
 +    let mut fx = FunctionCx {
 +        cx,
 +        module,
 +        tcx,
 +        pointer_type,
 +        constants_cx: ConstantCx::new(),
 +
 +        instance,
 +        symbol_name,
 +        mir,
 +        fn_abi: Some(FnAbi::of_instance(&RevealAllLayoutCx(tcx), instance, &[])),
 +
 +        bcx,
 +        block_map,
 +        local_map: IndexVec::with_capacity(mir.local_decls.len()),
 +        caller_location: None, // set by `codegen_fn_prelude`
 +
 +        clif_comments,
 +        source_info_set: indexmap::IndexSet::new(),
 +        next_ssa_var: 0,
 +
 +        inline_asm_index: 0,
 +    };
 +
 +    let arg_uninhabited = fx
 +        .mir
 +        .args_iter()
 +        .any(|arg| fx.layout_of(fx.monomorphize(&fx.mir.local_decls[arg].ty)).abi.is_uninhabited());
 +
 +    if !crate::constant::check_constants(&mut fx) {
 +        fx.bcx.append_block_params_for_function_params(fx.block_map[START_BLOCK]);
 +        fx.bcx.switch_to_block(fx.block_map[START_BLOCK]);
 +        crate::trap::trap_unreachable(&mut fx, "compilation should have been aborted");
 +    } else if arg_uninhabited {
 +        fx.bcx.append_block_params_for_function_params(fx.block_map[START_BLOCK]);
 +        fx.bcx.switch_to_block(fx.block_map[START_BLOCK]);
 +        crate::trap::trap_unreachable(&mut fx, "function has uninhabited argument");
 +    } else {
 +        tcx.sess.time("codegen clif ir", || {
 +            tcx.sess
 +                .time("codegen prelude", || crate::abi::codegen_fn_prelude(&mut fx, start_block));
 +            codegen_fn_content(&mut fx);
 +        });
 +    }
 +
 +    // Recover all necessary data from fx, before accessing func will prevent future access to it.
 +    let instance = fx.instance;
 +    let mut clif_comments = fx.clif_comments;
 +    let source_info_set = fx.source_info_set;
 +    let local_map = fx.local_map;
 +
 +    fx.constants_cx.finalize(fx.tcx, &mut *fx.module);
 +
 +    // Store function in context
 +    let context = &mut cx.cached_context;
 +    context.func = func;
 +
 +    crate::pretty_clif::write_clif_file(
 +        tcx,
 +        "unopt",
 +        module.isa(),
 +        instance,
 +        &context,
 +        &clif_comments,
 +    );
 +
 +    // Verify function
 +    verify_func(tcx, &clif_comments, &context.func);
 +
 +    // If the return block is not reachable, then the SSA builder may have inserted an `iconst.i128`
 +    // instruction, which doesn't have an encoding.
 +    context.compute_cfg();
 +    context.compute_domtree();
 +    context.eliminate_unreachable_code(module.isa()).unwrap();
 +    context.dce(module.isa()).unwrap();
 +    // Some Cranelift optimizations expect the domtree to not yet be computed and as such don't
 +    // invalidate it when it would change.
 +    context.domtree.clear();
 +
 +    // Perform rust specific optimizations
 +    tcx.sess.time("optimize clif ir", || {
 +        crate::optimize::optimize_function(
 +            tcx,
 +            module.isa(),
 +            instance,
 +            context,
 +            &mut clif_comments,
 +        );
 +    });
 +
 +    // Define function
 +    tcx.sess.time("define function", || {
 +        context.want_disasm = crate::pretty_clif::should_write_ir(tcx);
 +        module
 +            .define_function(func_id, context, &mut NullTrapSink {}, &mut NullStackMapSink {})
 +            .unwrap()
 +    });
 +
 +    // Write optimized function to file for debugging
 +    crate::pretty_clif::write_clif_file(
 +        tcx,
 +        "opt",
 +        module.isa(),
 +        instance,
 +        &context,
 +        &clif_comments,
 +    );
 +
 +    if let Some(disasm) = &context.mach_compile_result.as_ref().unwrap().disasm {
 +        crate::pretty_clif::write_ir_file(
 +            tcx,
 +            || format!("{}.vcode", tcx.symbol_name(instance).name),
 +            |file| file.write_all(disasm.as_bytes()),
 +        )
 +    }
 +
 +    // Define debuginfo for function
 +    let isa = module.isa();
 +    let debug_context = &mut cx.debug_context;
 +    let unwind_context = &mut cx.unwind_context;
 +    tcx.sess.time("generate debug info", || {
 +        if let Some(debug_context) = debug_context {
 +            debug_context.define_function(
 +                instance,
 +                func_id,
 +                symbol_name.name,
 +                isa,
 +                context,
 +                &source_info_set,
 +                local_map,
 +            );
 +        }
 +        unwind_context.add_function(func_id, &context, isa);
 +    });
 +
 +    // Clear context to make it usable for the next function
 +    context.clear();
 +}
 +
 +pub(crate) fn verify_func(
 +    tcx: TyCtxt<'_>,
 +    writer: &crate::pretty_clif::CommentWriter,
 +    func: &Function,
 +) {
 +    tcx.sess.time("verify clif ir", || {
 +        let flags = cranelift_codegen::settings::Flags::new(cranelift_codegen::settings::builder());
 +        match cranelift_codegen::verify_function(&func, &flags) {
 +            Ok(_) => {}
 +            Err(err) => {
 +                tcx.sess.err(&format!("{:?}", err));
 +                let pretty_error = cranelift_codegen::print_errors::pretty_verifier_error(
 +                    &func,
 +                    None,
 +                    Some(Box::new(writer)),
 +                    err,
 +                );
 +                tcx.sess.fatal(&format!("cranelift verify error:\n{}", pretty_error));
 +            }
 +        }
 +    });
 +}
 +
 +fn codegen_fn_content(fx: &mut FunctionCx<'_, '_, '_>) {
 +    for (bb, bb_data) in fx.mir.basic_blocks().iter_enumerated() {
 +        let block = fx.get_block(bb);
 +        fx.bcx.switch_to_block(block);
 +
 +        if bb_data.is_cleanup {
 +            // Unwinding after panicking is not supported
 +            continue;
 +
 +            // FIXME Once unwinding is supported and Cranelift supports marking blocks as cold, do
 +            // so for cleanup blocks.
 +        }
 +
 +        fx.bcx.ins().nop();
 +        for stmt in &bb_data.statements {
 +            fx.set_debug_loc(stmt.source_info);
 +            codegen_stmt(fx, block, stmt);
 +        }
 +
 +        if fx.clif_comments.enabled() {
 +            let mut terminator_head = "\n".to_string();
 +            bb_data.terminator().kind.fmt_head(&mut terminator_head).unwrap();
 +            let inst = fx.bcx.func.layout.last_inst(block).unwrap();
 +            fx.add_comment(inst, terminator_head);
 +        }
 +
 +        fx.set_debug_loc(bb_data.terminator().source_info);
 +
 +        match &bb_data.terminator().kind {
 +            TerminatorKind::Goto { target } => {
 +                if let TerminatorKind::Return = fx.mir[*target].terminator().kind {
 +                    let mut can_immediately_return = true;
 +                    for stmt in &fx.mir[*target].statements {
 +                        if let StatementKind::StorageDead(_) = stmt.kind {
 +                        } else {
 +                            // FIXME Can sometimes happen, see rust-lang/rust#70531
 +                            can_immediately_return = false;
 +                            break;
 +                        }
 +                    }
 +
 +                    if can_immediately_return {
 +                        crate::abi::codegen_return(fx);
 +                        continue;
 +                    }
 +                }
 +
 +                let block = fx.get_block(*target);
 +                fx.bcx.ins().jump(block, &[]);
 +            }
 +            TerminatorKind::Return => {
 +                crate::abi::codegen_return(fx);
 +            }
 +            TerminatorKind::Assert { cond, expected, msg, target, cleanup: _ } => {
 +                if !fx.tcx.sess.overflow_checks() {
 +                    if let mir::AssertKind::OverflowNeg(_) = *msg {
 +                        let target = fx.get_block(*target);
 +                        fx.bcx.ins().jump(target, &[]);
 +                        continue;
 +                    }
 +                }
 +                let cond = codegen_operand(fx, cond).load_scalar(fx);
 +
 +                let target = fx.get_block(*target);
 +                let failure = fx.bcx.create_block();
 +                // FIXME Mark failure block as cold once Cranelift supports it
 +
 +                if *expected {
 +                    fx.bcx.ins().brz(cond, failure, &[]);
 +                } else {
 +                    fx.bcx.ins().brnz(cond, failure, &[]);
 +                };
 +                fx.bcx.ins().jump(target, &[]);
 +
 +                fx.bcx.switch_to_block(failure);
 +                fx.bcx.ins().nop();
 +
 +                match msg {
 +                    AssertKind::BoundsCheck { ref len, ref index } => {
 +                        let len = codegen_operand(fx, len).load_scalar(fx);
 +                        let index = codegen_operand(fx, index).load_scalar(fx);
 +                        let location = fx
 +                            .get_caller_location(bb_data.terminator().source_info.span)
 +                            .load_scalar(fx);
 +
 +                        codegen_panic_inner(
 +                            fx,
 +                            rustc_hir::LangItem::PanicBoundsCheck,
 +                            &[index, len, location],
 +                            bb_data.terminator().source_info.span,
 +                        );
 +                    }
 +                    _ => {
 +                        let msg_str = msg.description();
 +                        codegen_panic(fx, msg_str, bb_data.terminator().source_info.span);
 +                    }
 +                }
 +            }
 +
 +            TerminatorKind::SwitchInt { discr, switch_ty, targets } => {
 +                let discr = codegen_operand(fx, discr).load_scalar(fx);
 +
 +                let use_bool_opt = switch_ty.kind() == fx.tcx.types.bool.kind()
 +                    || (targets.iter().count() == 1 && targets.iter().next().unwrap().0 == 0);
 +                if use_bool_opt {
 +                    assert_eq!(targets.iter().count(), 1);
 +                    let (then_value, then_block) = targets.iter().next().unwrap();
 +                    let then_block = fx.get_block(then_block);
 +                    let else_block = fx.get_block(targets.otherwise());
 +                    let test_zero = match then_value {
 +                        0 => true,
 +                        1 => false,
 +                        _ => unreachable!("{:?}", targets),
 +                    };
 +
 +                    let discr = crate::optimize::peephole::maybe_unwrap_bint(&mut fx.bcx, discr);
 +                    let (discr, is_inverted) =
 +                        crate::optimize::peephole::maybe_unwrap_bool_not(&mut fx.bcx, discr);
 +                    let test_zero = if is_inverted { !test_zero } else { test_zero };
 +                    let discr = crate::optimize::peephole::maybe_unwrap_bint(&mut fx.bcx, discr);
 +                    if let Some(taken) = crate::optimize::peephole::maybe_known_branch_taken(
 +                        &fx.bcx, discr, test_zero,
 +                    ) {
 +                        if taken {
 +                            fx.bcx.ins().jump(then_block, &[]);
 +                        } else {
 +                            fx.bcx.ins().jump(else_block, &[]);
 +                        }
 +                    } else {
 +                        if test_zero {
 +                            fx.bcx.ins().brz(discr, then_block, &[]);
 +                            fx.bcx.ins().jump(else_block, &[]);
 +                        } else {
 +                            fx.bcx.ins().brnz(discr, then_block, &[]);
 +                            fx.bcx.ins().jump(else_block, &[]);
 +                        }
 +                    }
 +                } else {
 +                    let mut switch = ::cranelift_frontend::Switch::new();
 +                    for (value, block) in targets.iter() {
 +                        let block = fx.get_block(block);
 +                        switch.set_entry(value, block);
 +                    }
 +                    let otherwise_block = fx.get_block(targets.otherwise());
 +                    switch.emit(&mut fx.bcx, discr, otherwise_block);
 +                }
 +            }
 +            TerminatorKind::Call {
 +                func,
 +                args,
 +                destination,
 +                fn_span,
 +                cleanup: _,
 +                from_hir_call: _,
 +            } => {
 +                fx.tcx.sess.time("codegen call", || {
 +                    crate::abi::codegen_terminator_call(fx, *fn_span, func, args, *destination)
 +                });
 +            }
 +            TerminatorKind::InlineAsm {
 +                template,
 +                operands,
 +                options,
 +                destination,
 +                line_spans: _,
 +            } => {
 +                crate::inline_asm::codegen_inline_asm(
 +                    fx,
 +                    bb_data.terminator().source_info.span,
 +                    template,
 +                    operands,
 +                    *options,
 +                );
 +
 +                match *destination {
 +                    Some(destination) => {
 +                        let destination_block = fx.get_block(destination);
 +                        fx.bcx.ins().jump(destination_block, &[]);
 +                    }
 +                    None => {
 +                        crate::trap::trap_unreachable(
 +                            fx,
 +                            "[corruption] Returned from noreturn inline asm",
 +                        );
 +                    }
 +                }
 +            }
 +            TerminatorKind::Resume | TerminatorKind::Abort => {
 +                trap_unreachable(fx, "[corruption] Unwinding bb reached.");
 +            }
 +            TerminatorKind::Unreachable => {
 +                trap_unreachable(fx, "[corruption] Hit unreachable code.");
 +            }
 +            TerminatorKind::Yield { .. }
 +            | TerminatorKind::FalseEdge { .. }
 +            | TerminatorKind::FalseUnwind { .. }
 +            | TerminatorKind::DropAndReplace { .. }
 +            | TerminatorKind::GeneratorDrop => {
 +                bug!("shouldn't exist at codegen {:?}", bb_data.terminator());
 +            }
 +            TerminatorKind::Drop { place, target, unwind: _ } => {
 +                let drop_place = codegen_place(fx, *place);
 +                crate::abi::codegen_drop(fx, bb_data.terminator().source_info.span, drop_place);
 +
 +                let target_block = fx.get_block(*target);
 +                fx.bcx.ins().jump(target_block, &[]);
 +            }
 +        };
 +    }
 +
 +    fx.bcx.seal_all_blocks();
 +    fx.bcx.finalize();
 +}
 +
 +fn codegen_stmt<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    #[allow(unused_variables)] cur_block: Block,
 +    stmt: &Statement<'tcx>,
 +) {
 +    let _print_guard = crate::PrintOnPanic(|| format!("stmt {:?}", stmt));
 +
 +    fx.set_debug_loc(stmt.source_info);
 +
 +    #[cfg(disabled)]
 +    match &stmt.kind {
 +        StatementKind::StorageLive(..) | StatementKind::StorageDead(..) => {} // Those are not very useful
 +        _ => {
 +            if fx.clif_comments.enabled() {
 +                let inst = fx.bcx.func.layout.last_inst(cur_block).unwrap();
 +                fx.add_comment(inst, format!("{:?}", stmt));
 +            }
 +        }
 +    }
 +
 +    match &stmt.kind {
 +        StatementKind::SetDiscriminant { place, variant_index } => {
 +            let place = codegen_place(fx, **place);
 +            crate::discriminant::codegen_set_discriminant(fx, place, *variant_index);
 +        }
 +        StatementKind::Assign(to_place_and_rval) => {
 +            let lval = codegen_place(fx, to_place_and_rval.0);
 +            let dest_layout = lval.layout();
 +            match to_place_and_rval.1 {
 +                Rvalue::Use(ref operand) => {
 +                    let val = codegen_operand(fx, operand);
 +                    lval.write_cvalue(fx, val);
 +                }
 +                Rvalue::Ref(_, _, place) | Rvalue::AddressOf(_, place) => {
 +                    let place = codegen_place(fx, place);
 +                    let ref_ = place.place_ref(fx, lval.layout());
 +                    lval.write_cvalue(fx, ref_);
 +                }
 +                Rvalue::ThreadLocalRef(def_id) => {
 +                    let val = crate::constant::codegen_tls_ref(fx, def_id, lval.layout());
 +                    lval.write_cvalue(fx, val);
 +                }
 +                Rvalue::BinaryOp(bin_op, ref lhs_rhs) => {
 +                    let lhs = codegen_operand(fx, &lhs_rhs.0);
 +                    let rhs = codegen_operand(fx, &lhs_rhs.1);
 +
 +                    let res = crate::num::codegen_binop(fx, bin_op, lhs, rhs);
 +                    lval.write_cvalue(fx, res);
 +                }
 +                Rvalue::CheckedBinaryOp(bin_op, ref lhs_rhs) => {
 +                    let lhs = codegen_operand(fx, &lhs_rhs.0);
 +                    let rhs = codegen_operand(fx, &lhs_rhs.1);
 +
 +                    let res = if !fx.tcx.sess.overflow_checks() {
 +                        let val =
 +                            crate::num::codegen_int_binop(fx, bin_op, lhs, rhs).load_scalar(fx);
 +                        let is_overflow = fx.bcx.ins().iconst(types::I8, 0);
 +                        CValue::by_val_pair(val, is_overflow, lval.layout())
 +                    } else {
 +                        crate::num::codegen_checked_int_binop(fx, bin_op, lhs, rhs)
 +                    };
 +
 +                    lval.write_cvalue(fx, res);
 +                }
 +                Rvalue::UnaryOp(un_op, ref operand) => {
 +                    let operand = codegen_operand(fx, operand);
 +                    let layout = operand.layout();
 +                    let val = operand.load_scalar(fx);
 +                    let res = match un_op {
 +                        UnOp::Not => match layout.ty.kind() {
 +                            ty::Bool => {
 +                                let res = fx.bcx.ins().icmp_imm(IntCC::Equal, val, 0);
 +                                CValue::by_val(fx.bcx.ins().bint(types::I8, res), layout)
 +                            }
 +                            ty::Uint(_) | ty::Int(_) => {
 +                                CValue::by_val(fx.bcx.ins().bnot(val), layout)
 +                            }
 +                            _ => unreachable!("un op Not for {:?}", layout.ty),
 +                        },
 +                        UnOp::Neg => match layout.ty.kind() {
 +                            ty::Int(IntTy::I128) => {
 +                                // FIXME remove this case once ineg.i128 works
 +                                let zero =
 +                                    CValue::const_val(fx, layout, ty::ScalarInt::null(layout.size));
 +                                crate::num::codegen_int_binop(fx, BinOp::Sub, zero, operand)
 +                            }
 +                            ty::Int(_) => CValue::by_val(fx.bcx.ins().ineg(val), layout),
 +                            ty::Float(_) => CValue::by_val(fx.bcx.ins().fneg(val), layout),
 +                            _ => unreachable!("un op Neg for {:?}", layout.ty),
 +                        },
 +                    };
 +                    lval.write_cvalue(fx, res);
 +                }
 +                Rvalue::Cast(
 +                    CastKind::Pointer(PointerCast::ReifyFnPointer),
 +                    ref operand,
 +                    to_ty,
 +                ) => {
 +                    let from_ty = fx.monomorphize(operand.ty(&fx.mir.local_decls, fx.tcx));
 +                    let to_layout = fx.layout_of(fx.monomorphize(to_ty));
 +                    match *from_ty.kind() {
 +                        ty::FnDef(def_id, substs) => {
 +                            let func_ref = fx.get_function_ref(
 +                                Instance::resolve_for_fn_ptr(
 +                                    fx.tcx,
 +                                    ParamEnv::reveal_all(),
 +                                    def_id,
 +                                    substs,
 +                                )
 +                                .unwrap()
 +                                .polymorphize(fx.tcx),
 +                            );
 +                            let func_addr = fx.bcx.ins().func_addr(fx.pointer_type, func_ref);
 +                            lval.write_cvalue(fx, CValue::by_val(func_addr, to_layout));
 +                        }
 +                        _ => bug!("Trying to ReifyFnPointer on non FnDef {:?}", from_ty),
 +                    }
 +                }
 +                Rvalue::Cast(
 +                    CastKind::Pointer(PointerCast::UnsafeFnPointer),
 +                    ref operand,
 +                    to_ty,
 +                )
 +                | Rvalue::Cast(
 +                    CastKind::Pointer(PointerCast::MutToConstPointer),
 +                    ref operand,
 +                    to_ty,
 +                )
 +                | Rvalue::Cast(
 +                    CastKind::Pointer(PointerCast::ArrayToPointer),
 +                    ref operand,
 +                    to_ty,
 +                ) => {
 +                    let to_layout = fx.layout_of(fx.monomorphize(to_ty));
 +                    let operand = codegen_operand(fx, operand);
 +                    lval.write_cvalue(fx, operand.cast_pointer_to(to_layout));
 +                }
 +                Rvalue::Cast(CastKind::Misc, ref operand, to_ty) => {
 +                    let operand = codegen_operand(fx, operand);
 +                    let from_ty = operand.layout().ty;
 +                    let to_ty = fx.monomorphize(to_ty);
 +
 +                    fn is_fat_ptr<'tcx>(fx: &FunctionCx<'_, '_, 'tcx>, ty: Ty<'tcx>) -> bool {
 +                        ty.builtin_deref(true)
 +                            .map(|ty::TypeAndMut { ty: pointee_ty, mutbl: _ }| {
 +                                has_ptr_meta(fx.tcx, pointee_ty)
 +                            })
 +                            .unwrap_or(false)
 +                    }
 +
 +                    if is_fat_ptr(fx, from_ty) {
 +                        if is_fat_ptr(fx, to_ty) {
 +                            // fat-ptr -> fat-ptr
 +                            lval.write_cvalue(fx, operand.cast_pointer_to(dest_layout));
 +                        } else {
 +                            // fat-ptr -> thin-ptr
 +                            let (ptr, _extra) = operand.load_scalar_pair(fx);
 +                            lval.write_cvalue(fx, CValue::by_val(ptr, dest_layout))
 +                        }
 +                    } else if let ty::Adt(adt_def, _substs) = from_ty.kind() {
 +                        // enum -> discriminant value
 +                        assert!(adt_def.is_enum());
 +                        match to_ty.kind() {
 +                            ty::Uint(_) | ty::Int(_) => {}
 +                            _ => unreachable!("cast adt {} -> {}", from_ty, to_ty),
 +                        }
 +                        let to_clif_ty = fx.clif_type(to_ty).unwrap();
 +
 +                        let discriminant = crate::discriminant::codegen_get_discriminant(
 +                            fx,
 +                            operand,
 +                            fx.layout_of(operand.layout().ty.discriminant_ty(fx.tcx)),
 +                        )
 +                        .load_scalar(fx);
 +
 +                        let res = crate::cast::clif_intcast(
 +                            fx,
 +                            discriminant,
 +                            to_clif_ty,
 +                            to_ty.is_signed(),
 +                        );
 +                        lval.write_cvalue(fx, CValue::by_val(res, dest_layout));
 +                    } else {
 +                        let to_clif_ty = fx.clif_type(to_ty).unwrap();
 +                        let from = operand.load_scalar(fx);
 +
 +                        let res = clif_int_or_float_cast(
 +                            fx,
 +                            from,
 +                            type_sign(from_ty),
 +                            to_clif_ty,
 +                            type_sign(to_ty),
 +                        );
 +                        lval.write_cvalue(fx, CValue::by_val(res, dest_layout));
 +                    }
 +                }
 +                Rvalue::Cast(
 +                    CastKind::Pointer(PointerCast::ClosureFnPointer(_)),
 +                    ref operand,
 +                    _to_ty,
 +                ) => {
 +                    let operand = codegen_operand(fx, operand);
 +                    match *operand.layout().ty.kind() {
 +                        ty::Closure(def_id, substs) => {
 +                            let instance = Instance::resolve_closure(
 +                                fx.tcx,
 +                                def_id,
 +                                substs,
 +                                ty::ClosureKind::FnOnce,
 +                            )
 +                            .polymorphize(fx.tcx);
 +                            let func_ref = fx.get_function_ref(instance);
 +                            let func_addr = fx.bcx.ins().func_addr(fx.pointer_type, func_ref);
 +                            lval.write_cvalue(fx, CValue::by_val(func_addr, lval.layout()));
 +                        }
 +                        _ => bug!("{} cannot be cast to a fn ptr", operand.layout().ty),
 +                    }
 +                }
 +                Rvalue::Cast(CastKind::Pointer(PointerCast::Unsize), ref operand, _to_ty) => {
 +                    let operand = codegen_operand(fx, operand);
 +                    operand.unsize_value(fx, lval);
 +                }
 +                Rvalue::Discriminant(place) => {
 +                    let place = codegen_place(fx, place);
 +                    let value = place.to_cvalue(fx);
 +                    let discr =
 +                        crate::discriminant::codegen_get_discriminant(fx, value, dest_layout);
 +                    lval.write_cvalue(fx, discr);
 +                }
 +                Rvalue::Repeat(ref operand, times) => {
 +                    let operand = codegen_operand(fx, operand);
 +                    let times = fx
 +                        .monomorphize(times)
 +                        .eval(fx.tcx, ParamEnv::reveal_all())
 +                        .val
 +                        .try_to_bits(fx.tcx.data_layout.pointer_size)
 +                        .unwrap();
 +                    if operand.layout().size.bytes() == 0 {
 +                        // Do nothing for ZST's
 +                    } else if fx.clif_type(operand.layout().ty) == Some(types::I8) {
 +                        let times = fx.bcx.ins().iconst(fx.pointer_type, times as i64);
 +                        // FIXME use emit_small_memset where possible
 +                        let addr = lval.to_ptr().get_addr(fx);
 +                        let val = operand.load_scalar(fx);
 +                        fx.bcx.call_memset(fx.module.target_config(), addr, val, times);
 +                    } else {
 +                        let loop_block = fx.bcx.create_block();
 +                        let loop_block2 = fx.bcx.create_block();
 +                        let done_block = fx.bcx.create_block();
 +                        let index = fx.bcx.append_block_param(loop_block, fx.pointer_type);
 +                        let zero = fx.bcx.ins().iconst(fx.pointer_type, 0);
 +                        fx.bcx.ins().jump(loop_block, &[zero]);
 +
 +                        fx.bcx.switch_to_block(loop_block);
 +                        let done = fx.bcx.ins().icmp_imm(IntCC::Equal, index, times as i64);
 +                        fx.bcx.ins().brnz(done, done_block, &[]);
 +                        fx.bcx.ins().jump(loop_block2, &[]);
 +
 +                        fx.bcx.switch_to_block(loop_block2);
 +                        let to = lval.place_index(fx, index);
 +                        to.write_cvalue(fx, operand);
 +                        let index = fx.bcx.ins().iadd_imm(index, 1);
 +                        fx.bcx.ins().jump(loop_block, &[index]);
 +
 +                        fx.bcx.switch_to_block(done_block);
 +                        fx.bcx.ins().nop();
 +                    }
 +                }
 +                Rvalue::Len(place) => {
 +                    let place = codegen_place(fx, place);
 +                    let usize_layout = fx.layout_of(fx.tcx.types.usize);
 +                    let len = codegen_array_len(fx, place);
 +                    lval.write_cvalue(fx, CValue::by_val(len, usize_layout));
 +                }
 +                Rvalue::NullaryOp(NullOp::Box, content_ty) => {
 +                    let usize_type = fx.clif_type(fx.tcx.types.usize).unwrap();
 +                    let content_ty = fx.monomorphize(content_ty);
 +                    let layout = fx.layout_of(content_ty);
 +                    let llsize = fx.bcx.ins().iconst(usize_type, layout.size.bytes() as i64);
 +                    let llalign = fx.bcx.ins().iconst(usize_type, layout.align.abi.bytes() as i64);
 +                    let box_layout = fx.layout_of(fx.tcx.mk_box(content_ty));
 +
 +                    // Allocate space:
 +                    let def_id =
 +                        match fx.tcx.lang_items().require(rustc_hir::LangItem::ExchangeMalloc) {
 +                            Ok(id) => id,
 +                            Err(s) => {
 +                                fx.tcx
 +                                    .sess
 +                                    .fatal(&format!("allocation of `{}` {}", box_layout.ty, s));
 +                            }
 +                        };
 +                    let instance = ty::Instance::mono(fx.tcx, def_id).polymorphize(fx.tcx);
 +                    let func_ref = fx.get_function_ref(instance);
 +                    let call = fx.bcx.ins().call(func_ref, &[llsize, llalign]);
 +                    let ptr = fx.bcx.inst_results(call)[0];
 +                    lval.write_cvalue(fx, CValue::by_val(ptr, box_layout));
 +                }
 +                Rvalue::NullaryOp(NullOp::SizeOf, ty) => {
 +                    assert!(
 +                        lval.layout()
 +                            .ty
 +                            .is_sized(fx.tcx.at(stmt.source_info.span), ParamEnv::reveal_all())
 +                    );
 +                    let ty_size = fx.layout_of(fx.monomorphize(ty)).size.bytes();
 +                    let val =
 +                        CValue::const_val(fx, fx.layout_of(fx.tcx.types.usize), ty_size.into());
 +                    lval.write_cvalue(fx, val);
 +                }
 +                Rvalue::Aggregate(ref kind, ref operands) => match kind.as_ref() {
 +                    AggregateKind::Array(_ty) => {
 +                        for (i, operand) in operands.iter().enumerate() {
 +                            let operand = codegen_operand(fx, operand);
 +                            let index = fx.bcx.ins().iconst(fx.pointer_type, i as i64);
 +                            let to = lval.place_index(fx, index);
 +                            to.write_cvalue(fx, operand);
 +                        }
 +                    }
 +                    _ => unreachable!("shouldn't exist at codegen {:?}", to_place_and_rval.1),
 +                },
 +            }
 +        }
 +        StatementKind::StorageLive(_)
 +        | StatementKind::StorageDead(_)
 +        | StatementKind::Nop
 +        | StatementKind::FakeRead(..)
 +        | StatementKind::Retag { .. }
 +        | StatementKind::AscribeUserType(..) => {}
 +
 +        StatementKind::LlvmInlineAsm(asm) => {
 +            match asm.asm.asm.as_str().trim() {
 +                "" => {
 +                    // Black box
 +                }
 +                _ => fx.tcx.sess.span_fatal(
 +                    stmt.source_info.span,
 +                    "Legacy `llvm_asm!` inline assembly is not supported. \
 +                    Try using the new `asm!` instead.",
 +                ),
 +            }
 +        }
 +        StatementKind::Coverage { .. } => fx.tcx.sess.fatal("-Zcoverage is unimplemented"),
 +        StatementKind::CopyNonOverlapping(inner) => {
 +            let dst = codegen_operand(fx, &inner.dst);
 +            let pointee = dst
 +                .layout()
 +                .pointee_info_at(fx, rustc_target::abi::Size::ZERO)
 +                .expect("Expected pointer");
 +            let dst = dst.load_scalar(fx);
 +            let src = codegen_operand(fx, &inner.src).load_scalar(fx);
 +            let count = codegen_operand(fx, &inner.count).load_scalar(fx);
 +            let elem_size: u64 = pointee.size.bytes();
 +            let bytes =
 +                if elem_size != 1 { fx.bcx.ins().imul_imm(count, elem_size as i64) } else { count };
 +            fx.bcx.call_memcpy(fx.module.target_config(), dst, src, bytes);
 +        }
 +    }
 +}
 +
 +fn codegen_array_len<'tcx>(fx: &mut FunctionCx<'_, '_, 'tcx>, place: CPlace<'tcx>) -> Value {
 +    match *place.layout().ty.kind() {
 +        ty::Array(_elem_ty, len) => {
 +            let len = fx.monomorphize(len).eval_usize(fx.tcx, ParamEnv::reveal_all()) as i64;
 +            fx.bcx.ins().iconst(fx.pointer_type, len)
 +        }
 +        ty::Slice(_elem_ty) => {
 +            place.to_ptr_maybe_unsized().1.expect("Length metadata for slice place")
 +        }
 +        _ => bug!("Rvalue::Len({:?})", place),
 +    }
 +}
 +
 +pub(crate) fn codegen_place<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    place: Place<'tcx>,
 +) -> CPlace<'tcx> {
 +    let mut cplace = fx.get_local_place(place.local);
 +
 +    for elem in place.projection {
 +        match elem {
 +            PlaceElem::Deref => {
 +                cplace = cplace.place_deref(fx);
 +            }
 +            PlaceElem::Field(field, _ty) => {
 +                cplace = cplace.place_field(fx, field);
 +            }
 +            PlaceElem::Index(local) => {
 +                let index = fx.get_local_place(local).to_cvalue(fx).load_scalar(fx);
 +                cplace = cplace.place_index(fx, index);
 +            }
 +            PlaceElem::ConstantIndex { offset, min_length: _, from_end } => {
 +                let offset: u64 = offset;
 +                let index = if !from_end {
 +                    fx.bcx.ins().iconst(fx.pointer_type, offset as i64)
 +                } else {
 +                    let len = codegen_array_len(fx, cplace);
 +                    fx.bcx.ins().iadd_imm(len, -(offset as i64))
 +                };
 +                cplace = cplace.place_index(fx, index);
 +            }
 +            PlaceElem::Subslice { from, to, from_end } => {
 +                // These indices are generated by slice patterns.
 +                // slice[from:-to] in Python terms.
 +
 +                let from: u64 = from;
 +                let to: u64 = to;
 +
 +                match cplace.layout().ty.kind() {
 +                    ty::Array(elem_ty, _len) => {
 +                        assert!(!from_end, "array subslices are never `from_end`");
 +                        let elem_layout = fx.layout_of(elem_ty);
 +                        let ptr = cplace.to_ptr();
 +                        cplace = CPlace::for_ptr(
 +                            ptr.offset_i64(fx, elem_layout.size.bytes() as i64 * (from as i64)),
 +                            fx.layout_of(fx.tcx.mk_array(elem_ty, to - from)),
 +                        );
 +                    }
 +                    ty::Slice(elem_ty) => {
 +                        assert!(from_end, "slice subslices should be `from_end`");
 +                        let elem_layout = fx.layout_of(elem_ty);
 +                        let (ptr, len) = cplace.to_ptr_maybe_unsized();
 +                        let len = len.unwrap();
 +                        cplace = CPlace::for_ptr_with_extra(
 +                            ptr.offset_i64(fx, elem_layout.size.bytes() as i64 * (from as i64)),
 +                            fx.bcx.ins().iadd_imm(len, -(from as i64 + to as i64)),
 +                            cplace.layout(),
 +                        );
 +                    }
 +                    _ => unreachable!(),
 +                }
 +            }
 +            PlaceElem::Downcast(_adt_def, variant) => {
 +                cplace = cplace.downcast_variant(fx, variant);
 +            }
 +        }
 +    }
 +
 +    cplace
 +}
 +
 +pub(crate) fn codegen_operand<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    operand: &Operand<'tcx>,
 +) -> CValue<'tcx> {
 +    match operand {
 +        Operand::Move(place) | Operand::Copy(place) => {
 +            let cplace = codegen_place(fx, *place);
 +            cplace.to_cvalue(fx)
 +        }
 +        Operand::Constant(const_) => crate::constant::codegen_constant(fx, const_),
 +    }
 +}
 +
 +pub(crate) fn codegen_panic<'tcx>(fx: &mut FunctionCx<'_, '_, 'tcx>, msg_str: &str, span: Span) {
 +    let location = fx.get_caller_location(span).load_scalar(fx);
 +
 +    let msg_ptr = fx.anonymous_str(msg_str);
 +    let msg_len = fx.bcx.ins().iconst(fx.pointer_type, i64::try_from(msg_str.len()).unwrap());
 +    let args = [msg_ptr, msg_len, location];
 +
 +    codegen_panic_inner(fx, rustc_hir::LangItem::Panic, &args, span);
 +}
 +
 +pub(crate) fn codegen_panic_inner<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    lang_item: rustc_hir::LangItem,
 +    args: &[Value],
 +    span: Span,
 +) {
 +    let def_id =
 +        fx.tcx.lang_items().require(lang_item).unwrap_or_else(|s| fx.tcx.sess.span_fatal(span, &s));
 +
 +    let instance = Instance::mono(fx.tcx, def_id).polymorphize(fx.tcx);
 +    let symbol_name = fx.tcx.symbol_name(instance).name;
 +
 +    fx.lib_call(
 +        &*symbol_name,
 +        vec![
 +            AbiParam::new(fx.pointer_type),
 +            AbiParam::new(fx.pointer_type),
 +            AbiParam::new(fx.pointer_type),
 +        ],
 +        vec![],
 +        args,
 +    );
 +
 +    crate::trap::trap_unreachable(fx, "panic lang item returned");
 +}
index 74c5e09f08da008decbad6a70710b106adb4b0bf,0000000000000000000000000000000000000000..e7e6afeb865bb005bf436b7ec6e2802930dc7fe2
mode 100644,000000..100644
--- /dev/null
@@@ -1,179 -1,0 +1,160 @@@
-         (_, types::I128) => {
-             let lo = if from == types::I64 {
-                 val
-             } else if signed {
-                 fx.bcx.ins().sextend(types::I64, val)
-             } else {
-                 fx.bcx.ins().uextend(types::I64, val)
-             };
-             let hi = if signed {
-                 fx.bcx.ins().sshr_imm(lo, 63)
-             } else {
-                 fx.bcx.ins().iconst(types::I64, 0)
-             };
-             fx.bcx.ins().iconcat(lo, hi)
-         }
 +//! Various number casting functions
 +
 +use crate::prelude::*;
 +
 +pub(crate) fn clif_intcast(
 +    fx: &mut FunctionCx<'_, '_, '_>,
 +    val: Value,
 +    to: Type,
 +    signed: bool,
 +) -> Value {
 +    let from = fx.bcx.func.dfg.value_type(val);
 +    match (from, to) {
 +        // equal
 +        (_, _) if from == to => val,
 +
 +        // extend
-         (types::I128, _) => {
-             let (lsb, _msb) = fx.bcx.ins().isplit(val);
-             if to == types::I64 { lsb } else { fx.bcx.ins().ireduce(to, lsb) }
-         }
 +        (_, _) if to.wider_or_equal(from) => {
 +            if signed {
 +                fx.bcx.ins().sextend(to, val)
 +            } else {
 +                fx.bcx.ins().uextend(to, val)
 +            }
 +        }
 +
 +        // reduce
 +        (_, _) => fx.bcx.ins().ireduce(to, val),
 +    }
 +}
 +
 +pub(crate) fn clif_int_or_float_cast(
 +    fx: &mut FunctionCx<'_, '_, '_>,
 +    from: Value,
 +    from_signed: bool,
 +    to_ty: Type,
 +    to_signed: bool,
 +) -> Value {
 +    let from_ty = fx.bcx.func.dfg.value_type(from);
 +
 +    if from_ty.is_int() && to_ty.is_int() {
 +        // int-like -> int-like
 +        clif_intcast(
 +            fx,
 +            from,
 +            to_ty,
 +            // This is correct as either from_signed == to_signed (=> this is trivially correct)
 +            // Or from_clif_ty == to_clif_ty, which means this is a no-op.
 +            from_signed,
 +        )
 +    } else if from_ty.is_int() && to_ty.is_float() {
 +        if from_ty == types::I128 {
 +            // _______ss__f_
 +            // __float  tisf: i128 -> f32
 +            // __float  tidf: i128 -> f64
 +            // __floatuntisf: u128 -> f32
 +            // __floatuntidf: u128 -> f64
 +
 +            let name = format!(
 +                "__float{sign}ti{flt}f",
 +                sign = if from_signed { "" } else { "un" },
 +                flt = match to_ty {
 +                    types::F32 => "s",
 +                    types::F64 => "d",
 +                    _ => unreachable!("{:?}", to_ty),
 +                },
 +            );
 +
 +            let from_rust_ty = if from_signed { fx.tcx.types.i128 } else { fx.tcx.types.u128 };
 +
 +            let to_rust_ty = match to_ty {
 +                types::F32 => fx.tcx.types.f32,
 +                types::F64 => fx.tcx.types.f64,
 +                _ => unreachable!(),
 +            };
 +
 +            return fx
 +                .easy_call(&name, &[CValue::by_val(from, fx.layout_of(from_rust_ty))], to_rust_ty)
 +                .load_scalar(fx);
 +        }
 +
 +        // int-like -> float
 +        if from_signed {
 +            fx.bcx.ins().fcvt_from_sint(to_ty, from)
 +        } else {
 +            fx.bcx.ins().fcvt_from_uint(to_ty, from)
 +        }
 +    } else if from_ty.is_float() && to_ty.is_int() {
 +        if to_ty == types::I128 {
 +            // _____sssf___
 +            // __fix   sfti: f32 -> i128
 +            // __fix   dfti: f64 -> i128
 +            // __fixunssfti: f32 -> u128
 +            // __fixunsdfti: f64 -> u128
 +
 +            let name = format!(
 +                "__fix{sign}{flt}fti",
 +                sign = if to_signed { "" } else { "uns" },
 +                flt = match from_ty {
 +                    types::F32 => "s",
 +                    types::F64 => "d",
 +                    _ => unreachable!("{:?}", to_ty),
 +                },
 +            );
 +
 +            let from_rust_ty = match from_ty {
 +                types::F32 => fx.tcx.types.f32,
 +                types::F64 => fx.tcx.types.f64,
 +                _ => unreachable!(),
 +            };
 +
 +            let to_rust_ty = if to_signed { fx.tcx.types.i128 } else { fx.tcx.types.u128 };
 +
 +            return fx
 +                .easy_call(&name, &[CValue::by_val(from, fx.layout_of(from_rust_ty))], to_rust_ty)
 +                .load_scalar(fx);
 +        }
 +
 +        // float -> int-like
 +        if to_ty == types::I8 || to_ty == types::I16 {
 +            // FIXME implement fcvt_to_*int_sat.i8/i16
 +            let val = if to_signed {
 +                fx.bcx.ins().fcvt_to_sint_sat(types::I32, from)
 +            } else {
 +                fx.bcx.ins().fcvt_to_uint_sat(types::I32, from)
 +            };
 +            let (min, max) = match (to_ty, to_signed) {
 +                (types::I8, false) => (0, i64::from(u8::MAX)),
 +                (types::I16, false) => (0, i64::from(u16::MAX)),
 +                (types::I8, true) => (i64::from(i8::MIN), i64::from(i8::MAX)),
 +                (types::I16, true) => (i64::from(i16::MIN), i64::from(i16::MAX)),
 +                _ => unreachable!(),
 +            };
 +            let min_val = fx.bcx.ins().iconst(types::I32, min);
 +            let max_val = fx.bcx.ins().iconst(types::I32, max);
 +
 +            let val = if to_signed {
 +                let has_underflow = fx.bcx.ins().icmp_imm(IntCC::SignedLessThan, val, min);
 +                let has_overflow = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThan, val, max);
 +                let bottom_capped = fx.bcx.ins().select(has_underflow, min_val, val);
 +                fx.bcx.ins().select(has_overflow, max_val, bottom_capped)
 +            } else {
 +                let has_overflow = fx.bcx.ins().icmp_imm(IntCC::UnsignedGreaterThan, val, max);
 +                fx.bcx.ins().select(has_overflow, max_val, val)
 +            };
 +            fx.bcx.ins().ireduce(to_ty, val)
 +        } else if to_signed {
 +            fx.bcx.ins().fcvt_to_sint_sat(to_ty, from)
 +        } else {
 +            fx.bcx.ins().fcvt_to_uint_sat(to_ty, from)
 +        }
 +    } else if from_ty.is_float() && to_ty.is_float() {
 +        // float -> float
 +        match (from_ty, to_ty) {
 +            (types::F32, types::F64) => fx.bcx.ins().fpromote(types::F64, from),
 +            (types::F64, types::F32) => fx.bcx.ins().fdemote(types::F32, from),
 +            _ => from,
 +        }
 +    } else {
 +        unreachable!("cast value from {:?} to {:?}", from_ty, to_ty);
 +    }
 +}
index ffe1922ab9056dc3c82a303e21d8d147b7243a2c,0000000000000000000000000000000000000000..638b2d573b5ddbe00ae715e12417cde097f12072
mode 100644,000000..100644
--- /dev/null
@@@ -1,167 -1,0 +1,153 @@@
-     let lhs_val = lhs.load_scalar(fx);
-     let rhs_val = rhs.load_scalar(fx);
 +//! Replaces 128-bit operators with lang item calls where necessary
 +
 +use cranelift_codegen::ir::ArgumentPurpose;
 +
 +use crate::prelude::*;
 +
 +pub(crate) fn maybe_codegen<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    bin_op: BinOp,
 +    checked: bool,
 +    lhs: CValue<'tcx>,
 +    rhs: CValue<'tcx>,
 +) -> Option<CValue<'tcx>> {
 +    if lhs.layout().ty != fx.tcx.types.u128
 +        && lhs.layout().ty != fx.tcx.types.i128
 +        && rhs.layout().ty != fx.tcx.types.u128
 +        && rhs.layout().ty != fx.tcx.types.i128
 +    {
 +        return None;
 +    }
 +
-         BinOp::Mul if !checked => {
-             let val_ty = if is_signed { fx.tcx.types.i128 } else { fx.tcx.types.u128 };
-             if fx.tcx.sess.target.is_like_windows {
-                 let ret_place = CPlace::new_stack_slot(fx, lhs.layout());
-                 let (lhs_ptr, lhs_extra) = lhs.force_stack(fx);
-                 let (rhs_ptr, rhs_extra) = rhs.force_stack(fx);
-                 assert!(lhs_extra.is_none());
-                 assert!(rhs_extra.is_none());
-                 let args =
-                     [ret_place.to_ptr().get_addr(fx), lhs_ptr.get_addr(fx), rhs_ptr.get_addr(fx)];
-                 fx.lib_call(
-                     "__multi3",
 +    let is_signed = type_sign(lhs.layout().ty);
 +
 +    match bin_op {
 +        BinOp::BitAnd | BinOp::BitOr | BinOp::BitXor => {
 +            assert!(!checked);
 +            None
 +        }
 +        BinOp::Add | BinOp::Sub if !checked => None,
-                         AbiParam::special(pointer_ty(fx.tcx), ArgumentPurpose::StructReturn),
-                         AbiParam::new(pointer_ty(fx.tcx)),
-                         AbiParam::new(pointer_ty(fx.tcx)),
++        BinOp::Mul if !checked || is_signed => {
++            if !checked {
++                let val_ty = if is_signed { fx.tcx.types.i128 } else { fx.tcx.types.u128 };
++                if fx.tcx.sess.target.is_like_windows {
++                    let ret_place = CPlace::new_stack_slot(fx, lhs.layout());
++                    let (lhs_ptr, lhs_extra) = lhs.force_stack(fx);
++                    let (rhs_ptr, rhs_extra) = rhs.force_stack(fx);
++                    assert!(lhs_extra.is_none());
++                    assert!(rhs_extra.is_none());
++                    let args = [
++                        ret_place.to_ptr().get_addr(fx),
++                        lhs_ptr.get_addr(fx),
++                        rhs_ptr.get_addr(fx),
++                    ];
++                    fx.lib_call(
++                        "__multi3",
++                        vec![
++                            AbiParam::special(fx.pointer_type, ArgumentPurpose::StructReturn),
++                            AbiParam::new(fx.pointer_type),
++                            AbiParam::new(fx.pointer_type),
++                        ],
++                        vec![],
++                        &args,
++                    );
++                    Some(ret_place.to_cvalue(fx))
++                } else {
++                    Some(fx.easy_call("__multi3", &[lhs, rhs], val_ty))
++                }
++            } else {
++                let out_ty = fx.tcx.mk_tup([lhs.layout().ty, fx.tcx.types.bool].iter());
++                let oflow = CPlace::new_stack_slot(fx, fx.layout_of(fx.tcx.types.i32));
++                let lhs = lhs.load_scalar(fx);
++                let rhs = rhs.load_scalar(fx);
++                let oflow_ptr = oflow.to_ptr().get_addr(fx);
++                let res = fx.lib_call(
++                    "__muloti4",
 +                    vec![
-                     vec![],
-                     &args,
-                 );
-                 Some(ret_place.to_cvalue(fx))
-             } else {
-                 Some(fx.easy_call("__multi3", &[lhs, rhs], val_ty))
++                        AbiParam::new(types::I128),
++                        AbiParam::new(types::I128),
++                        AbiParam::new(fx.pointer_type),
 +                    ],
-                         AbiParam::special(pointer_ty(fx.tcx), ArgumentPurpose::StructReturn),
-                         AbiParam::new(pointer_ty(fx.tcx)),
-                         AbiParam::new(pointer_ty(fx.tcx)),
++                    vec![AbiParam::new(types::I128)],
++                    &[lhs, rhs, oflow_ptr],
++                )[0];
++                let oflow = oflow.to_cvalue(fx).load_scalar(fx);
++                let oflow = fx.bcx.ins().ireduce(types::I8, oflow);
++                Some(CValue::by_val_pair(res, oflow, fx.layout_of(out_ty)))
 +            }
 +        }
 +        BinOp::Add | BinOp::Sub | BinOp::Mul => {
 +            assert!(checked);
 +            let out_ty = fx.tcx.mk_tup([lhs.layout().ty, fx.tcx.types.bool].iter());
 +            let out_place = CPlace::new_stack_slot(fx, fx.layout_of(out_ty));
 +            let (param_types, args) = if fx.tcx.sess.target.is_like_windows {
 +                let (lhs_ptr, lhs_extra) = lhs.force_stack(fx);
 +                let (rhs_ptr, rhs_extra) = rhs.force_stack(fx);
 +                assert!(lhs_extra.is_none());
 +                assert!(rhs_extra.is_none());
 +                (
 +                    vec![
-                         AbiParam::special(pointer_ty(fx.tcx), ArgumentPurpose::StructReturn),
++                        AbiParam::special(fx.pointer_type, ArgumentPurpose::StructReturn),
++                        AbiParam::new(fx.pointer_type),
++                        AbiParam::new(fx.pointer_type),
 +                    ],
 +                    [out_place.to_ptr().get_addr(fx), lhs_ptr.get_addr(fx), rhs_ptr.get_addr(fx)],
 +                )
 +            } else {
 +                (
 +                    vec![
-                 (BinOp::Mul, true) => "__rust_i128_mulo",
++                        AbiParam::special(fx.pointer_type, ArgumentPurpose::StructReturn),
 +                        AbiParam::new(types::I128),
 +                        AbiParam::new(types::I128),
 +                    ],
 +                    [out_place.to_ptr().get_addr(fx), lhs.load_scalar(fx), rhs.load_scalar(fx)],
 +                )
 +            };
 +            let name = match (bin_op, is_signed) {
 +                (BinOp::Add, false) => "__rust_u128_addo",
 +                (BinOp::Add, true) => "__rust_i128_addo",
 +                (BinOp::Sub, false) => "__rust_u128_subo",
 +                (BinOp::Sub, true) => "__rust_i128_subo",
 +                (BinOp::Mul, false) => "__rust_u128_mulo",
-                     vec![AbiParam::new(pointer_ty(fx.tcx)), AbiParam::new(pointer_ty(fx.tcx))],
 +                _ => unreachable!(),
 +            };
 +            fx.lib_call(name, param_types, vec![], &args);
 +            Some(out_place.to_cvalue(fx))
 +        }
 +        BinOp::Offset => unreachable!("offset should only be used on pointers, not 128bit ints"),
 +        BinOp::Div | BinOp::Rem => {
 +            assert!(!checked);
 +            let name = match (bin_op, is_signed) {
 +                (BinOp::Div, false) => "__udivti3",
 +                (BinOp::Div, true) => "__divti3",
 +                (BinOp::Rem, false) => "__umodti3",
 +                (BinOp::Rem, true) => "__modti3",
 +                _ => unreachable!(),
 +            };
 +            if fx.tcx.sess.target.is_like_windows {
 +                let (lhs_ptr, lhs_extra) = lhs.force_stack(fx);
 +                let (rhs_ptr, rhs_extra) = rhs.force_stack(fx);
 +                assert!(lhs_extra.is_none());
 +                assert!(rhs_extra.is_none());
 +                let args = [lhs_ptr.get_addr(fx), rhs_ptr.get_addr(fx)];
 +                let ret = fx.lib_call(
 +                    name,
-         BinOp::Shl | BinOp::Shr => {
-             let is_overflow = if checked {
-                 // rhs >= 128
-                 // FIXME support non 128bit rhs
-                 /*let (rhs_lsb, rhs_msb) = fx.bcx.ins().isplit(rhs_val);
-                 let rhs_msb_gt_0 = fx.bcx.ins().icmp_imm(IntCC::NotEqual, rhs_msb, 0);
-                 let rhs_lsb_ge_128 = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThan, rhs_lsb, 127);
-                 let is_overflow = fx.bcx.ins().bor(rhs_msb_gt_0, rhs_lsb_ge_128);*/
-                 let is_overflow = fx.bcx.ins().bconst(types::B1, false);
-                 Some(fx.bcx.ins().bint(types::I8, is_overflow))
-             } else {
-                 None
-             };
-             let truncated_rhs = clif_intcast(fx, rhs_val, types::I32, false);
-             let val = match bin_op {
-                 BinOp::Shl => fx.bcx.ins().ishl(lhs_val, truncated_rhs),
-                 BinOp::Shr => {
-                     if is_signed {
-                         fx.bcx.ins().sshr(lhs_val, truncated_rhs)
-                     } else {
-                         fx.bcx.ins().ushr(lhs_val, truncated_rhs)
-                     }
-                 }
-                 _ => unreachable!(),
-             };
-             if let Some(is_overflow) = is_overflow {
-                 let out_ty = fx.tcx.mk_tup([lhs.layout().ty, fx.tcx.types.bool].iter());
-                 Some(CValue::by_val_pair(val, is_overflow, fx.layout_of(out_ty)))
-             } else {
-                 Some(CValue::by_val(val, lhs.layout()))
-             }
-         }
++                    vec![AbiParam::new(fx.pointer_type), AbiParam::new(fx.pointer_type)],
 +                    vec![AbiParam::new(types::I64X2)],
 +                    &args,
 +                )[0];
 +                // FIXME use bitcast instead of store to get from i64x2 to i128
 +                let ret_place = CPlace::new_stack_slot(fx, lhs.layout());
 +                ret_place.to_ptr().store(fx, ret, MemFlags::trusted());
 +                Some(ret_place.to_cvalue(fx))
 +            } else {
 +                Some(fx.easy_call(name, &[lhs, rhs], lhs.layout().ty))
 +            }
 +        }
 +        BinOp::Lt | BinOp::Le | BinOp::Eq | BinOp::Ge | BinOp::Gt | BinOp::Ne => {
 +            assert!(!checked);
 +            None
 +        }
++        BinOp::Shl | BinOp::Shr => None,
 +    }
 +}
index 6018eefcd42fb4ae4b196729bed6ecbe19d33dc8,0000000000000000000000000000000000000000..fb6ccd7c535845b85d9582de3d82f4638bc1cbe4
mode 100644,000000..100644
--- /dev/null
@@@ -1,192 -1,0 +1,192 @@@
-                         // TODO: better handling of sign
 +//! Write the debuginfo into an object file.
 +
 +use rustc_data_structures::fx::FxHashMap;
 +
 +use gimli::write::{Address, AttributeValue, EndianVec, Result, Sections, Writer};
 +use gimli::{RunTimeEndian, SectionId};
 +
 +use crate::backend::WriteDebugInfo;
 +
 +use super::DebugContext;
 +
 +impl DebugContext<'_> {
 +    pub(crate) fn emit<P: WriteDebugInfo>(&mut self, product: &mut P) {
 +        let unit_range_list_id = self.dwarf.unit.ranges.add(self.unit_range_list.clone());
 +        let root = self.dwarf.unit.root();
 +        let root = self.dwarf.unit.get_mut(root);
 +        root.set(gimli::DW_AT_ranges, AttributeValue::RangeListRef(unit_range_list_id));
 +
 +        let mut sections = Sections::new(WriterRelocate::new(self.endian));
 +        self.dwarf.write(&mut sections).unwrap();
 +
 +        let mut section_map = FxHashMap::default();
 +        let _: Result<()> = sections.for_each_mut(|id, section| {
 +            if !section.writer.slice().is_empty() {
 +                let section_id = product.add_debug_section(id, section.writer.take());
 +                section_map.insert(id, section_id);
 +            }
 +            Ok(())
 +        });
 +
 +        let _: Result<()> = sections.for_each(|id, section| {
 +            if let Some(section_id) = section_map.get(&id) {
 +                for reloc in &section.relocs {
 +                    product.add_debug_reloc(&section_map, section_id, reloc);
 +                }
 +            }
 +            Ok(())
 +        });
 +    }
 +}
 +
 +#[derive(Clone)]
 +pub(crate) struct DebugReloc {
 +    pub(crate) offset: u32,
 +    pub(crate) size: u8,
 +    pub(crate) name: DebugRelocName,
 +    pub(crate) addend: i64,
 +    pub(crate) kind: object::RelocationKind,
 +}
 +
 +#[derive(Clone)]
 +pub(crate) enum DebugRelocName {
 +    Section(SectionId),
 +    Symbol(usize),
 +}
 +
 +/// A [`Writer`] that collects all necessary relocations.
 +#[derive(Clone)]
 +pub(super) struct WriterRelocate {
 +    pub(super) relocs: Vec<DebugReloc>,
 +    pub(super) writer: EndianVec<RunTimeEndian>,
 +}
 +
 +impl WriterRelocate {
 +    pub(super) fn new(endian: RunTimeEndian) -> Self {
 +        WriterRelocate { relocs: Vec::new(), writer: EndianVec::new(endian) }
 +    }
 +
 +    /// Perform the collected relocations to be usable for JIT usage.
 +    #[cfg(feature = "jit")]
 +    pub(super) fn relocate_for_jit(mut self, jit_module: &cranelift_jit::JITModule) -> Vec<u8> {
 +        use std::convert::TryInto;
 +
 +        for reloc in self.relocs.drain(..) {
 +            match reloc.name {
 +                super::DebugRelocName::Section(_) => unreachable!(),
 +                super::DebugRelocName::Symbol(sym) => {
 +                    let addr = jit_module.get_finalized_function(
 +                        cranelift_module::FuncId::from_u32(sym.try_into().unwrap()),
 +                    );
 +                    let val = (addr as u64 as i64 + reloc.addend) as u64;
 +                    self.writer.write_udata_at(reloc.offset as usize, val, reloc.size).unwrap();
 +                }
 +            }
 +        }
 +        self.writer.into_vec()
 +    }
 +}
 +
 +impl Writer for WriterRelocate {
 +    type Endian = RunTimeEndian;
 +
 +    fn endian(&self) -> Self::Endian {
 +        self.writer.endian()
 +    }
 +
 +    fn len(&self) -> usize {
 +        self.writer.len()
 +    }
 +
 +    fn write(&mut self, bytes: &[u8]) -> Result<()> {
 +        self.writer.write(bytes)
 +    }
 +
 +    fn write_at(&mut self, offset: usize, bytes: &[u8]) -> Result<()> {
 +        self.writer.write_at(offset, bytes)
 +    }
 +
 +    fn write_address(&mut self, address: Address, size: u8) -> Result<()> {
 +        match address {
 +            Address::Constant(val) => self.write_udata(val, size),
 +            Address::Symbol { symbol, addend } => {
 +                let offset = self.len() as u64;
 +                self.relocs.push(DebugReloc {
 +                    offset: offset as u32,
 +                    size,
 +                    name: DebugRelocName::Symbol(symbol),
 +                    addend: addend as i64,
 +                    kind: object::RelocationKind::Absolute,
 +                });
 +                self.write_udata(0, size)
 +            }
 +        }
 +    }
 +
 +    fn write_offset(&mut self, val: usize, section: SectionId, size: u8) -> Result<()> {
 +        let offset = self.len() as u32;
 +        self.relocs.push(DebugReloc {
 +            offset,
 +            size,
 +            name: DebugRelocName::Section(section),
 +            addend: val as i64,
 +            kind: object::RelocationKind::Absolute,
 +        });
 +        self.write_udata(0, size)
 +    }
 +
 +    fn write_offset_at(
 +        &mut self,
 +        offset: usize,
 +        val: usize,
 +        section: SectionId,
 +        size: u8,
 +    ) -> Result<()> {
 +        self.relocs.push(DebugReloc {
 +            offset: offset as u32,
 +            size,
 +            name: DebugRelocName::Section(section),
 +            addend: val as i64,
 +            kind: object::RelocationKind::Absolute,
 +        });
 +        self.write_udata_at(offset, 0, size)
 +    }
 +
 +    fn write_eh_pointer(&mut self, address: Address, eh_pe: gimli::DwEhPe, size: u8) -> Result<()> {
 +        match address {
 +            // Address::Constant arm copied from gimli
 +            Address::Constant(val) => {
 +                // Indirect doesn't matter here.
 +                let val = match eh_pe.application() {
 +                    gimli::DW_EH_PE_absptr => val,
 +                    gimli::DW_EH_PE_pcrel => {
++                        // FIXME better handling of sign
 +                        let offset = self.len() as u64;
 +                        offset.wrapping_sub(val)
 +                    }
 +                    _ => {
 +                        return Err(gimli::write::Error::UnsupportedPointerEncoding(eh_pe));
 +                    }
 +                };
 +                self.write_eh_pointer_data(val, eh_pe.format(), size)
 +            }
 +            Address::Symbol { symbol, addend } => match eh_pe.application() {
 +                gimli::DW_EH_PE_pcrel => {
 +                    let size = match eh_pe.format() {
 +                        gimli::DW_EH_PE_sdata4 => 4,
 +                        _ => return Err(gimli::write::Error::UnsupportedPointerEncoding(eh_pe)),
 +                    };
 +                    self.relocs.push(DebugReloc {
 +                        offset: self.len() as u32,
 +                        size,
 +                        name: DebugRelocName::Symbol(symbol),
 +                        addend,
 +                        kind: object::RelocationKind::Relative,
 +                    });
 +                    self.write_udata(0, size)
 +                }
 +                _ => Err(gimli::write::Error::UnsupportedPointerEncoding(eh_pe)),
 +            },
 +        }
 +    }
 +}
index c67336eb3f2c3125ee0427511991fbd3d01d2e09,0000000000000000000000000000000000000000..ceef65d54785fc92338045dd7e40cd9af627fa72
mode 100644,000000..100644
--- /dev/null
@@@ -1,384 -1,0 +1,384 @@@
-             // TODO: this should be configurable
 +//! Handling of everything related to debuginfo.
 +
 +mod emit;
 +mod line_info;
 +mod unwind;
 +
 +use crate::prelude::*;
 +
 +use rustc_index::vec::IndexVec;
 +
 +use cranelift_codegen::entity::EntityRef;
 +use cranelift_codegen::ir::{LabelValueLoc, StackSlots, ValueLabel, ValueLoc};
 +use cranelift_codegen::isa::TargetIsa;
 +use cranelift_codegen::ValueLocRange;
 +
 +use gimli::write::{
 +    Address, AttributeValue, DwarfUnit, Expression, LineProgram, LineString, Location,
 +    LocationList, Range, RangeList, UnitEntryId,
 +};
 +use gimli::{Encoding, Format, LineEncoding, RunTimeEndian, X86_64};
 +
 +pub(crate) use emit::{DebugReloc, DebugRelocName};
 +pub(crate) use unwind::UnwindContext;
 +
 +fn target_endian(tcx: TyCtxt<'_>) -> RunTimeEndian {
 +    use rustc_target::abi::Endian;
 +
 +    match tcx.data_layout.endian {
 +        Endian::Big => RunTimeEndian::Big,
 +        Endian::Little => RunTimeEndian::Little,
 +    }
 +}
 +
 +pub(crate) struct DebugContext<'tcx> {
 +    tcx: TyCtxt<'tcx>,
 +
 +    endian: RunTimeEndian,
 +
 +    dwarf: DwarfUnit,
 +    unit_range_list: RangeList,
 +
 +    types: FxHashMap<Ty<'tcx>, UnitEntryId>,
 +}
 +
 +impl<'tcx> DebugContext<'tcx> {
 +    pub(crate) fn new(tcx: TyCtxt<'tcx>, isa: &dyn TargetIsa) -> Self {
 +        let encoding = Encoding {
 +            format: Format::Dwarf32,
++            // FIXME this should be configurable
 +            // macOS doesn't seem to support DWARF > 3
 +            // 5 version is required for md5 file hash
 +            version: if tcx.sess.target.is_like_osx {
 +                3
 +            } else {
 +                // FIXME change to version 5 once the gdb and lldb shipping with the latest debian
 +                // support it.
 +                4
 +            },
 +            address_size: isa.frontend_config().pointer_bytes(),
 +        };
 +
 +        let mut dwarf = DwarfUnit::new(encoding);
 +
 +        let producer = format!(
 +            "cg_clif (rustc {}, cranelift {})",
 +            rustc_interface::util::version_str().unwrap_or("unknown version"),
 +            cranelift_codegen::VERSION,
 +        );
 +        let comp_dir = tcx.sess.working_dir.to_string_lossy(false).into_owned();
 +        let (name, file_info) = match tcx.sess.local_crate_source_file.clone() {
 +            Some(path) => {
 +                let name = path.to_string_lossy().into_owned();
 +                (name, None)
 +            }
 +            None => (tcx.crate_name(LOCAL_CRATE).to_string(), None),
 +        };
 +
 +        let mut line_program = LineProgram::new(
 +            encoding,
 +            LineEncoding::default(),
 +            LineString::new(comp_dir.as_bytes(), encoding, &mut dwarf.line_strings),
 +            LineString::new(name.as_bytes(), encoding, &mut dwarf.line_strings),
 +            file_info,
 +        );
 +        line_program.file_has_md5 = file_info.is_some();
 +
 +        dwarf.unit.line_program = line_program;
 +
 +        {
 +            let name = dwarf.strings.add(name);
 +            let comp_dir = dwarf.strings.add(comp_dir);
 +
 +            let root = dwarf.unit.root();
 +            let root = dwarf.unit.get_mut(root);
 +            root.set(gimli::DW_AT_producer, AttributeValue::StringRef(dwarf.strings.add(producer)));
 +            root.set(gimli::DW_AT_language, AttributeValue::Language(gimli::DW_LANG_Rust));
 +            root.set(gimli::DW_AT_name, AttributeValue::StringRef(name));
 +            root.set(gimli::DW_AT_comp_dir, AttributeValue::StringRef(comp_dir));
 +            root.set(gimli::DW_AT_low_pc, AttributeValue::Address(Address::Constant(0)));
 +        }
 +
 +        DebugContext {
 +            tcx,
 +
 +            endian: target_endian(tcx),
 +
 +            dwarf,
 +            unit_range_list: RangeList(Vec::new()),
 +
 +            types: FxHashMap::default(),
 +        }
 +    }
 +
 +    fn dwarf_ty(&mut self, ty: Ty<'tcx>) -> UnitEntryId {
 +        if let Some(type_id) = self.types.get(ty) {
 +            return *type_id;
 +        }
 +
 +        let new_entry = |dwarf: &mut DwarfUnit, tag| dwarf.unit.add(dwarf.unit.root(), tag);
 +
 +        let primitive = |dwarf: &mut DwarfUnit, ate| {
 +            let type_id = new_entry(dwarf, gimli::DW_TAG_base_type);
 +            let type_entry = dwarf.unit.get_mut(type_id);
 +            type_entry.set(gimli::DW_AT_encoding, AttributeValue::Encoding(ate));
 +            type_id
 +        };
 +
 +        let name = format!("{}", ty);
 +        let layout = self.tcx.layout_of(ParamEnv::reveal_all().and(ty)).unwrap();
 +
 +        let type_id = match ty.kind() {
 +            ty::Bool => primitive(&mut self.dwarf, gimli::DW_ATE_boolean),
 +            ty::Char => primitive(&mut self.dwarf, gimli::DW_ATE_UTF),
 +            ty::Uint(_) => primitive(&mut self.dwarf, gimli::DW_ATE_unsigned),
 +            ty::Int(_) => primitive(&mut self.dwarf, gimli::DW_ATE_signed),
 +            ty::Float(_) => primitive(&mut self.dwarf, gimli::DW_ATE_float),
 +            ty::Ref(_, pointee_ty, _mutbl)
 +            | ty::RawPtr(ty::TypeAndMut { ty: pointee_ty, mutbl: _mutbl }) => {
 +                let type_id = new_entry(&mut self.dwarf, gimli::DW_TAG_pointer_type);
 +
 +                // Ensure that type is inserted before recursing to avoid duplicates
 +                self.types.insert(ty, type_id);
 +
 +                let pointee = self.dwarf_ty(pointee_ty);
 +
 +                let type_entry = self.dwarf.unit.get_mut(type_id);
 +
 +                //type_entry.set(gimli::DW_AT_mutable, AttributeValue::Flag(mutbl == rustc_hir::Mutability::Mut));
 +                type_entry.set(gimli::DW_AT_type, AttributeValue::UnitRef(pointee));
 +
 +                type_id
 +            }
 +            ty::Adt(adt_def, _substs) if adt_def.is_struct() && !layout.is_unsized() => {
 +                let type_id = new_entry(&mut self.dwarf, gimli::DW_TAG_structure_type);
 +
 +                // Ensure that type is inserted before recursing to avoid duplicates
 +                self.types.insert(ty, type_id);
 +
 +                let variant = adt_def.non_enum_variant();
 +
 +                for (field_idx, field_def) in variant.fields.iter().enumerate() {
 +                    let field_offset = layout.fields.offset(field_idx);
 +                    let field_layout = layout
 +                        .field(
 +                            &layout::LayoutCx { tcx: self.tcx, param_env: ParamEnv::reveal_all() },
 +                            field_idx,
 +                        )
 +                        .unwrap();
 +
 +                    let field_type = self.dwarf_ty(field_layout.ty);
 +
 +                    let field_id = self.dwarf.unit.add(type_id, gimli::DW_TAG_member);
 +                    let field_entry = self.dwarf.unit.get_mut(field_id);
 +
 +                    field_entry.set(
 +                        gimli::DW_AT_name,
 +                        AttributeValue::String(field_def.ident.as_str().to_string().into_bytes()),
 +                    );
 +                    field_entry.set(
 +                        gimli::DW_AT_data_member_location,
 +                        AttributeValue::Udata(field_offset.bytes()),
 +                    );
 +                    field_entry.set(gimli::DW_AT_type, AttributeValue::UnitRef(field_type));
 +                }
 +
 +                type_id
 +            }
 +            _ => new_entry(&mut self.dwarf, gimli::DW_TAG_structure_type),
 +        };
 +
 +        let type_entry = self.dwarf.unit.get_mut(type_id);
 +
 +        type_entry.set(gimli::DW_AT_name, AttributeValue::String(name.into_bytes()));
 +        type_entry.set(gimli::DW_AT_byte_size, AttributeValue::Udata(layout.size.bytes()));
 +
 +        self.types.insert(ty, type_id);
 +
 +        type_id
 +    }
 +
 +    fn define_local(&mut self, scope: UnitEntryId, name: String, ty: Ty<'tcx>) -> UnitEntryId {
 +        let dw_ty = self.dwarf_ty(ty);
 +
 +        let var_id = self.dwarf.unit.add(scope, gimli::DW_TAG_variable);
 +        let var_entry = self.dwarf.unit.get_mut(var_id);
 +
 +        var_entry.set(gimli::DW_AT_name, AttributeValue::String(name.into_bytes()));
 +        var_entry.set(gimli::DW_AT_type, AttributeValue::UnitRef(dw_ty));
 +
 +        var_id
 +    }
 +
 +    pub(crate) fn define_function(
 +        &mut self,
 +        instance: Instance<'tcx>,
 +        func_id: FuncId,
 +        name: &str,
 +        isa: &dyn TargetIsa,
 +        context: &Context,
 +        source_info_set: &indexmap::IndexSet<SourceInfo>,
 +        local_map: IndexVec<mir::Local, CPlace<'tcx>>,
 +    ) {
 +        let symbol = func_id.as_u32() as usize;
 +        let mir = self.tcx.instance_mir(instance.def);
 +
 +        // FIXME: add to appropriate scope instead of root
 +        let scope = self.dwarf.unit.root();
 +
 +        let entry_id = self.dwarf.unit.add(scope, gimli::DW_TAG_subprogram);
 +        let entry = self.dwarf.unit.get_mut(entry_id);
 +        let name_id = self.dwarf.strings.add(name);
 +        // Gdb requires DW_AT_name. Otherwise the DW_TAG_subprogram is skipped.
 +        entry.set(gimli::DW_AT_name, AttributeValue::StringRef(name_id));
 +        entry.set(gimli::DW_AT_linkage_name, AttributeValue::StringRef(name_id));
 +
 +        let end = self.create_debug_lines(symbol, entry_id, context, mir.span, source_info_set);
 +
 +        self.unit_range_list.0.push(Range::StartLength {
 +            begin: Address::Symbol { symbol, addend: 0 },
 +            length: u64::from(end),
 +        });
 +
 +        let func_entry = self.dwarf.unit.get_mut(entry_id);
 +        // Gdb requires both DW_AT_low_pc and DW_AT_high_pc. Otherwise the DW_TAG_subprogram is skipped.
 +        func_entry.set(
 +            gimli::DW_AT_low_pc,
 +            AttributeValue::Address(Address::Symbol { symbol, addend: 0 }),
 +        );
 +        // Using Udata for DW_AT_high_pc requires at least DWARF4
 +        func_entry.set(gimli::DW_AT_high_pc, AttributeValue::Udata(u64::from(end)));
 +
 +        // FIXME make it more reliable and implement scopes before re-enabling this.
 +        if false {
 +            let value_labels_ranges = context.build_value_labels_ranges(isa).unwrap();
 +
 +            for (local, _local_decl) in mir.local_decls.iter_enumerated() {
 +                let ty = self.tcx.subst_and_normalize_erasing_regions(
 +                    instance.substs,
 +                    ty::ParamEnv::reveal_all(),
 +                    mir.local_decls[local].ty,
 +                );
 +                let var_id = self.define_local(entry_id, format!("{:?}", local), ty);
 +
 +                let location = place_location(
 +                    self,
 +                    isa,
 +                    symbol,
 +                    context,
 +                    &local_map,
 +                    &value_labels_ranges,
 +                    Place { local, projection: ty::List::empty() },
 +                );
 +
 +                let var_entry = self.dwarf.unit.get_mut(var_id);
 +                var_entry.set(gimli::DW_AT_location, location);
 +            }
 +        }
 +
 +        // FIXME create locals for all entries in mir.var_debug_info
 +    }
 +}
 +
 +fn place_location<'tcx>(
 +    debug_context: &mut DebugContext<'tcx>,
 +    isa: &dyn TargetIsa,
 +    symbol: usize,
 +    context: &Context,
 +    local_map: &IndexVec<mir::Local, CPlace<'tcx>>,
 +    #[allow(rustc::default_hash_types)] value_labels_ranges: &std::collections::HashMap<
 +        ValueLabel,
 +        Vec<ValueLocRange>,
 +    >,
 +    place: Place<'tcx>,
 +) -> AttributeValue {
 +    assert!(place.projection.is_empty()); // FIXME implement them
 +
 +    match local_map[place.local].inner() {
 +        CPlaceInner::Var(_local, var) => {
 +            let value_label = cranelift_codegen::ir::ValueLabel::new(var.index());
 +            if let Some(value_loc_ranges) = value_labels_ranges.get(&value_label) {
 +                let loc_list = LocationList(
 +                    value_loc_ranges
 +                        .iter()
 +                        .map(|value_loc_range| Location::StartEnd {
 +                            begin: Address::Symbol {
 +                                symbol,
 +                                addend: i64::from(value_loc_range.start),
 +                            },
 +                            end: Address::Symbol { symbol, addend: i64::from(value_loc_range.end) },
 +                            data: translate_loc(
 +                                isa,
 +                                value_loc_range.loc,
 +                                &context.func.stack_slots,
 +                            )
 +                            .unwrap(),
 +                        })
 +                        .collect(),
 +                );
 +                let loc_list_id = debug_context.dwarf.unit.locations.add(loc_list);
 +
 +                AttributeValue::LocationListRef(loc_list_id)
 +            } else {
 +                // FIXME set value labels for unused locals
 +
 +                AttributeValue::Exprloc(Expression::new())
 +            }
 +        }
 +        CPlaceInner::VarPair(_, _, _) => {
 +            // FIXME implement this
 +
 +            AttributeValue::Exprloc(Expression::new())
 +        }
 +        CPlaceInner::VarLane(_, _, _) => {
 +            // FIXME implement this
 +
 +            AttributeValue::Exprloc(Expression::new())
 +        }
 +        CPlaceInner::Addr(_, _) => {
 +            // FIXME implement this (used by arguments and returns)
 +
 +            AttributeValue::Exprloc(Expression::new())
 +
 +            // For PointerBase::Stack:
 +            //AttributeValue::Exprloc(translate_loc(ValueLoc::Stack(*stack_slot), &context.func.stack_slots).unwrap())
 +        }
 +    }
 +}
 +
 +// Adapted from https://github.com/CraneStation/wasmtime/blob/5a1845b4caf7a5dba8eda1fef05213a532ed4259/crates/debug/src/transform/expression.rs#L59-L137
 +fn translate_loc(
 +    isa: &dyn TargetIsa,
 +    loc: LabelValueLoc,
 +    stack_slots: &StackSlots,
 +) -> Option<Expression> {
 +    match loc {
 +        LabelValueLoc::ValueLoc(ValueLoc::Reg(reg)) => {
 +            let machine_reg = isa.map_dwarf_register(reg).unwrap();
 +            let mut expr = Expression::new();
 +            expr.op_reg(gimli::Register(machine_reg));
 +            Some(expr)
 +        }
 +        LabelValueLoc::ValueLoc(ValueLoc::Stack(ss)) => {
 +            if let Some(ss_offset) = stack_slots[ss].offset {
 +                let mut expr = Expression::new();
 +                expr.op_breg(X86_64::RBP, i64::from(ss_offset) + 16);
 +                Some(expr)
 +            } else {
 +                None
 +            }
 +        }
 +        LabelValueLoc::ValueLoc(ValueLoc::Unassigned) => unreachable!(),
 +        LabelValueLoc::Reg(reg) => {
 +            let machine_reg = isa.map_regalloc_reg_to_dwarf(reg).unwrap();
 +            let mut expr = Expression::new();
 +            expr.op_reg(gimli::Register(machine_reg));
 +            Some(expr)
 +        }
 +        LabelValueLoc::SPOffset(offset) => {
 +            let mut expr = Expression::new();
 +            expr.op_breg(X86_64::RSP, offset);
 +            Some(expr)
 +        }
 +    }
 +}
index 3979886e10cfc17997d1a30362860cdb85adf6c1,0000000000000000000000000000000000000000..86698460747494f7a95711fe58bfada5da0a34e0
mode 100644,000000..100644
--- /dev/null
@@@ -1,1160 -1,0 +1,1147 @@@
-         let lane_idx = mir::Field::new(lane_idx.try_into().unwrap());
-         let lane = val.value_field(fx, lane_idx).load_scalar(fx);
 +//! Codegen of intrinsics. This includes `extern "rust-intrinsic"`, `extern "platform-intrinsic"`
 +//! and LLVM intrinsics that have symbol names starting with `llvm.`.
 +
 +mod cpuid;
 +mod llvm;
 +mod simd;
 +
 +pub(crate) use cpuid::codegen_cpuid_call;
 +pub(crate) use llvm::codegen_llvm_intrinsic_call;
 +
 +use rustc_middle::ty::print::with_no_trimmed_paths;
 +use rustc_span::symbol::{kw, sym};
 +
 +use crate::prelude::*;
 +use cranelift_codegen::ir::AtomicRmwOp;
 +
 +macro intrinsic_pat {
 +    (_) => {
 +        _
 +    },
 +    ($name:ident) => {
 +        sym::$name
 +    },
 +    (kw.$name:ident) => {
 +        kw::$name
 +    },
 +    ($name:literal) => {
 +        $name
 +    },
 +}
 +
 +macro intrinsic_arg {
 +    (o $fx:expr, $arg:ident) => {
 +        $arg
 +    },
 +    (c $fx:expr, $arg:ident) => {
 +        codegen_operand($fx, $arg)
 +    },
 +    (v $fx:expr, $arg:ident) => {
 +        codegen_operand($fx, $arg).load_scalar($fx)
 +    }
 +}
 +
 +macro intrinsic_substs {
 +    ($substs:expr, $index:expr,) => {},
 +    ($substs:expr, $index:expr, $first:ident $(,$rest:ident)*) => {
 +        let $first = $substs.type_at($index);
 +        intrinsic_substs!($substs, $index+1, $($rest),*);
 +    }
 +}
 +
 +macro intrinsic_match {
 +    ($fx:expr, $intrinsic:expr, $substs:expr, $args:expr,
 +    _ => $unknown:block;
 +    $(
 +        $($($name:tt).*)|+ $(if $cond:expr)?, $(<$($subst:ident),*>)? ($($a:ident $arg:ident),*) $content:block;
 +    )*) => {
 +        let _ = $substs; // Silence warning when substs is unused.
 +        match $intrinsic {
 +            $(
 +                $(intrinsic_pat!($($name).*))|* $(if $cond)? => {
 +                    #[allow(unused_parens, non_snake_case)]
 +                    {
 +                        $(
 +                            intrinsic_substs!($substs, 0, $($subst),*);
 +                        )?
 +                        if let [$($arg),*] = $args {
 +                            let ($($arg,)*) = (
 +                                $(intrinsic_arg!($a $fx, $arg),)*
 +                            );
 +                            #[warn(unused_parens, non_snake_case)]
 +                            {
 +                                $content
 +                            }
 +                        } else {
 +                            bug!("wrong number of args for intrinsic {:?}", $intrinsic);
 +                        }
 +                    }
 +                }
 +            )*
 +            _ => $unknown,
 +        }
 +    }
 +}
 +
 +macro call_intrinsic_match {
 +    ($fx:expr, $intrinsic:expr, $substs:expr, $ret:expr, $destination:expr, $args:expr, $(
 +        $name:ident($($arg:ident),*) -> $ty:ident => $func:ident,
 +    )*) => {
 +        match $intrinsic {
 +            $(
 +                sym::$name => {
 +                    assert!($substs.is_noop());
 +                    if let [$(ref $arg),*] = *$args {
 +                        let ($($arg,)*) = (
 +                            $(codegen_operand($fx, $arg),)*
 +                        );
 +                        let res = $fx.easy_call(stringify!($func), &[$($arg),*], $fx.tcx.types.$ty);
 +                        $ret.write_cvalue($fx, res);
 +
 +                        if let Some((_, dest)) = $destination {
 +                            let ret_block = $fx.get_block(dest);
 +                            $fx.bcx.ins().jump(ret_block, &[]);
 +                            return;
 +                        } else {
 +                            unreachable!();
 +                        }
 +                    } else {
 +                        bug!("wrong number of args for intrinsic {:?}", $intrinsic);
 +                    }
 +                }
 +            )*
 +            _ => {}
 +        }
 +    }
 +}
 +
 +macro validate_atomic_type($fx:ident, $intrinsic:ident, $span:ident, $ty:expr) {
 +    match $ty.kind() {
 +        ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
 +        _ => {
 +            $fx.tcx.sess.span_err(
 +                $span,
 +                &format!(
 +                    "`{}` intrinsic: expected basic integer or raw pointer type, found `{:?}`",
 +                    $intrinsic, $ty
 +                ),
 +            );
 +            // Prevent verifier error
 +            crate::trap::trap_unreachable($fx, "compilation should not have succeeded");
 +            return;
 +        }
 +    }
 +}
 +
 +macro validate_simd_type($fx:ident, $intrinsic:ident, $span:ident, $ty:expr) {
 +    if !$ty.is_simd() {
 +        $fx.tcx.sess.span_err($span, &format!("invalid monomorphization of `{}` intrinsic: expected SIMD input type, found non-SIMD `{}`", $intrinsic, $ty));
 +        // Prevent verifier error
 +        crate::trap::trap_unreachable($fx, "compilation should not have succeeded");
 +        return;
 +    }
 +}
 +
 +pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Option<Type> {
 +    let (element, count) = match &layout.abi {
 +        Abi::Vector { element, count } => (element.clone(), *count),
 +        _ => unreachable!(),
 +    };
 +
 +    match scalar_to_clif_type(tcx, element).by(u16::try_from(count).unwrap()) {
 +        // Cranelift currently only implements icmp for 128bit vectors.
 +        Some(vector_ty) if vector_ty.bits() == 128 => Some(vector_ty),
 +        _ => None,
 +    }
 +}
 +
 +fn simd_for_each_lane<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    val: CValue<'tcx>,
 +    ret: CPlace<'tcx>,
 +    f: impl Fn(
 +        &mut FunctionCx<'_, '_, 'tcx>,
 +        TyAndLayout<'tcx>,
 +        TyAndLayout<'tcx>,
 +        Value,
 +    ) -> CValue<'tcx>,
 +) {
 +    let layout = val.layout();
 +
 +    let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
 +    let lane_layout = fx.layout_of(lane_ty);
 +    let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
 +    let ret_lane_layout = fx.layout_of(ret_lane_ty);
 +    assert_eq!(lane_count, ret_lane_count);
 +
 +    for lane_idx in 0..lane_count {
-         ret.place_field(fx, lane_idx).write_cvalue(fx, res_lane);
++        let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
 +
 +        let res_lane = f(fx, lane_layout, ret_lane_layout, lane);
 +
-     for lane in 0..lane_count {
-         let lane = mir::Field::new(lane.try_into().unwrap());
-         let x_lane = x.value_field(fx, lane).load_scalar(fx);
-         let y_lane = y.value_field(fx, lane).load_scalar(fx);
++        ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
 +    }
 +}
 +
 +fn simd_pair_for_each_lane<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    x: CValue<'tcx>,
 +    y: CValue<'tcx>,
 +    ret: CPlace<'tcx>,
 +    f: impl Fn(
 +        &mut FunctionCx<'_, '_, 'tcx>,
 +        TyAndLayout<'tcx>,
 +        TyAndLayout<'tcx>,
 +        Value,
 +        Value,
 +    ) -> CValue<'tcx>,
 +) {
 +    assert_eq!(x.layout(), y.layout());
 +    let layout = x.layout();
 +
 +    let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
 +    let lane_layout = fx.layout_of(lane_ty);
 +    let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
 +    let ret_lane_layout = fx.layout_of(ret_lane_ty);
 +    assert_eq!(lane_count, ret_lane_count);
 +
-         ret.place_field(fx, lane).write_cvalue(fx, res_lane);
++    for lane_idx in 0..lane_count {
++        let x_lane = x.value_lane(fx, lane_idx).load_scalar(fx);
++        let y_lane = y.value_lane(fx, lane_idx).load_scalar(fx);
 +
 +        let res_lane = f(fx, lane_layout, ret_lane_layout, x_lane, y_lane);
 +
-     let mut res_val = val.value_field(fx, mir::Field::new(0)).load_scalar(fx);
-     for lane_idx in 1..lane_count {
-         let lane =
-             val.value_field(fx, mir::Field::new(lane_idx.try_into().unwrap())).load_scalar(fx);
++        ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
 +    }
 +}
 +
 +fn simd_reduce<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    val: CValue<'tcx>,
++    acc: Option<Value>,
 +    ret: CPlace<'tcx>,
 +    f: impl Fn(&mut FunctionCx<'_, '_, 'tcx>, TyAndLayout<'tcx>, Value, Value) -> Value,
 +) {
 +    let (lane_count, lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
 +    let lane_layout = fx.layout_of(lane_ty);
 +    assert_eq!(lane_layout, ret.layout());
 +
-     let res_val = val.value_field(fx, mir::Field::new(0)).load_scalar(fx);
++    let (mut res_val, start_lane) =
++        if let Some(acc) = acc { (acc, 0) } else { (val.value_lane(fx, 0).load_scalar(fx), 1) };
++    for lane_idx in start_lane..lane_count {
++        let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
 +        res_val = f(fx, lane_layout, res_val, lane);
 +    }
 +    let res = CValue::by_val(res_val, lane_layout);
 +    ret.write_cvalue(fx, res);
 +}
 +
++// FIXME move all uses to `simd_reduce`
 +fn simd_reduce_bool<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    val: CValue<'tcx>,
 +    ret: CPlace<'tcx>,
 +    f: impl Fn(&mut FunctionCx<'_, '_, 'tcx>, Value, Value) -> Value,
 +) {
 +    let (lane_count, _lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
 +    assert!(ret.layout().ty.is_bool());
 +
-         let lane =
-             val.value_field(fx, mir::Field::new(lane_idx.try_into().unwrap())).load_scalar(fx);
++    let res_val = val.value_lane(fx, 0).load_scalar(fx);
 +    let mut res_val = fx.bcx.ins().band_imm(res_val, 1); // mask to boolean
 +    for lane_idx in 1..lane_count {
-             let val = $fx.bcx.ins().icmp(IntCC::$cc, x, y);
++        let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
 +        let lane = fx.bcx.ins().band_imm(lane, 1); // mask to boolean
 +        res_val = f(fx, res_val, lane);
 +    }
++    let res_val = if fx.bcx.func.dfg.value_type(res_val) != types::I8 {
++        fx.bcx.ins().ireduce(types::I8, res_val)
++    } else {
++        res_val
++    };
 +    let res = CValue::by_val(res_val, ret.layout());
 +    ret.write_cvalue(fx, res);
 +}
 +
 +fn bool_to_zero_or_max_uint<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    layout: TyAndLayout<'tcx>,
 +    val: Value,
 +) -> CValue<'tcx> {
 +    let ty = fx.clif_type(layout.ty).unwrap();
 +
 +    let int_ty = match ty {
 +        types::F32 => types::I32,
 +        types::F64 => types::I64,
 +        ty => ty,
 +    };
 +
 +    let val = fx.bcx.ins().bint(int_ty, val);
 +    let mut res = fx.bcx.ins().ineg(val);
 +
 +    if ty.is_float() {
 +        res = fx.bcx.ins().bitcast(ty, res);
 +    }
 +
 +    CValue::by_val(res, layout)
 +}
 +
 +macro simd_cmp {
 +    ($fx:expr, $cc:ident|$cc_f:ident($x:ident, $y:ident) -> $ret:ident) => {
 +        let vector_ty = clif_vector_type($fx.tcx, $x.layout());
 +
 +        if let Some(vector_ty) = vector_ty {
 +            let x = $x.load_scalar($fx);
 +            let y = $y.load_scalar($fx);
-             // `select.i8` is not implemented by Cranelift.
-             let has_overflow = fx.bcx.ins().uextend(types::I32, has_overflow);
++            let val = if vector_ty.lane_type().is_float() {
++                $fx.bcx.ins().fcmp(FloatCC::$cc_f, x, y)
++            } else {
++                $fx.bcx.ins().icmp(IntCC::$cc, x, y)
++            };
 +
 +            // HACK This depends on the fact that icmp for vectors represents bools as 0 and !0, not 0 and 1.
 +            let val = $fx.bcx.ins().raw_bitcast(vector_ty, val);
 +
 +            $ret.write_cvalue($fx, CValue::by_val(val, $ret.layout()));
 +        } else {
 +            simd_pair_for_each_lane(
 +                $fx,
 +                $x,
 +                $y,
 +                $ret,
 +                |fx, lane_layout, res_lane_layout, x_lane, y_lane| {
 +                    let res_lane = match lane_layout.ty.kind() {
 +                        ty::Uint(_) | ty::Int(_) => fx.bcx.ins().icmp(IntCC::$cc, x_lane, y_lane),
 +                        ty::Float(_) => fx.bcx.ins().fcmp(FloatCC::$cc_f, x_lane, y_lane),
 +                        _ => unreachable!("{:?}", lane_layout.ty),
 +                    };
 +                    bool_to_zero_or_max_uint(fx, res_lane_layout, res_lane)
 +                },
 +            );
 +        }
 +    },
 +    ($fx:expr, $cc_u:ident|$cc_s:ident|$cc_f:ident($x:ident, $y:ident) -> $ret:ident) => {
 +        // FIXME use vector icmp when possible
 +        simd_pair_for_each_lane(
 +            $fx,
 +            $x,
 +            $y,
 +            $ret,
 +            |fx, lane_layout, res_lane_layout, x_lane, y_lane| {
 +                let res_lane = match lane_layout.ty.kind() {
 +                    ty::Uint(_) => fx.bcx.ins().icmp(IntCC::$cc_u, x_lane, y_lane),
 +                    ty::Int(_) => fx.bcx.ins().icmp(IntCC::$cc_s, x_lane, y_lane),
 +                    ty::Float(_) => fx.bcx.ins().fcmp(FloatCC::$cc_f, x_lane, y_lane),
 +                    _ => unreachable!("{:?}", lane_layout.ty),
 +                };
 +                bool_to_zero_or_max_uint(fx, res_lane_layout, res_lane)
 +            },
 +        );
 +    },
 +}
 +
 +macro simd_int_binop {
 +    ($fx:expr, $op:ident($x:ident, $y:ident) -> $ret:ident) => {
 +        simd_int_binop!($fx, $op|$op($x, $y) -> $ret);
 +    },
 +    ($fx:expr, $op_u:ident|$op_s:ident($x:ident, $y:ident) -> $ret:ident) => {
 +        simd_pair_for_each_lane(
 +            $fx,
 +            $x,
 +            $y,
 +            $ret,
 +            |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
 +                let res_lane = match lane_layout.ty.kind() {
 +                    ty::Uint(_) => fx.bcx.ins().$op_u(x_lane, y_lane),
 +                    ty::Int(_) => fx.bcx.ins().$op_s(x_lane, y_lane),
 +                    _ => unreachable!("{:?}", lane_layout.ty),
 +                };
 +                CValue::by_val(res_lane, ret_lane_layout)
 +            },
 +        );
 +    },
 +}
 +
 +macro simd_int_flt_binop {
 +    ($fx:expr, $op:ident|$op_f:ident($x:ident, $y:ident) -> $ret:ident) => {
 +        simd_int_flt_binop!($fx, $op|$op|$op_f($x, $y) -> $ret);
 +    },
 +    ($fx:expr, $op_u:ident|$op_s:ident|$op_f:ident($x:ident, $y:ident) -> $ret:ident) => {
 +        simd_pair_for_each_lane(
 +            $fx,
 +            $x,
 +            $y,
 +            $ret,
 +            |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
 +                let res_lane = match lane_layout.ty.kind() {
 +                    ty::Uint(_) => fx.bcx.ins().$op_u(x_lane, y_lane),
 +                    ty::Int(_) => fx.bcx.ins().$op_s(x_lane, y_lane),
 +                    ty::Float(_) => fx.bcx.ins().$op_f(x_lane, y_lane),
 +                    _ => unreachable!("{:?}", lane_layout.ty),
 +                };
 +                CValue::by_val(res_lane, ret_lane_layout)
 +            },
 +        );
 +    },
 +}
 +
 +macro simd_flt_binop($fx:expr, $op:ident($x:ident, $y:ident) -> $ret:ident) {
 +    simd_pair_for_each_lane(
 +        $fx,
 +        $x,
 +        $y,
 +        $ret,
 +        |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
 +            let res_lane = match lane_layout.ty.kind() {
 +                ty::Float(_) => fx.bcx.ins().$op(x_lane, y_lane),
 +                _ => unreachable!("{:?}", lane_layout.ty),
 +            };
 +            CValue::by_val(res_lane, ret_lane_layout)
 +        },
 +    );
 +}
 +
 +pub(crate) fn codegen_intrinsic_call<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    instance: Instance<'tcx>,
 +    args: &[mir::Operand<'tcx>],
 +    destination: Option<(CPlace<'tcx>, BasicBlock)>,
 +    span: Span,
 +) {
 +    let def_id = instance.def_id();
 +    let substs = instance.substs;
 +
 +    let intrinsic = fx.tcx.item_name(def_id);
 +
 +    let ret = match destination {
 +        Some((place, _)) => place,
 +        None => {
 +            // Insert non returning intrinsics here
 +            match intrinsic {
 +                sym::abort => {
 +                    trap_abort(fx, "Called intrinsic::abort.");
 +                }
 +                sym::transmute => {
 +                    crate::base::codegen_panic(fx, "Transmuting to uninhabited type.", span);
 +                }
 +                _ => unimplemented!("unsupported instrinsic {}", intrinsic),
 +            }
 +            return;
 +        }
 +    };
 +
 +    if intrinsic.as_str().starts_with("simd_") {
 +        self::simd::codegen_simd_intrinsic_call(fx, instance, args, ret, span);
 +        let ret_block = fx.get_block(destination.expect("SIMD intrinsics don't diverge").1);
 +        fx.bcx.ins().jump(ret_block, &[]);
 +        return;
 +    }
 +
 +    let usize_layout = fx.layout_of(fx.tcx.types.usize);
 +
 +    call_intrinsic_match! {
 +        fx, intrinsic, substs, ret, destination, args,
 +        expf32(flt) -> f32 => expf,
 +        expf64(flt) -> f64 => exp,
 +        exp2f32(flt) -> f32 => exp2f,
 +        exp2f64(flt) -> f64 => exp2,
 +        sqrtf32(flt) -> f32 => sqrtf,
 +        sqrtf64(flt) -> f64 => sqrt,
 +        powif32(a, x) -> f32 => __powisf2, // compiler-builtins
 +        powif64(a, x) -> f64 => __powidf2, // compiler-builtins
 +        powf32(a, x) -> f32 => powf,
 +        powf64(a, x) -> f64 => pow,
 +        logf32(flt) -> f32 => logf,
 +        logf64(flt) -> f64 => log,
 +        log2f32(flt) -> f32 => log2f,
 +        log2f64(flt) -> f64 => log2,
 +        log10f32(flt) -> f32 => log10f,
 +        log10f64(flt) -> f64 => log10,
 +        fabsf32(flt) -> f32 => fabsf,
 +        fabsf64(flt) -> f64 => fabs,
 +        fmaf32(x, y, z) -> f32 => fmaf,
 +        fmaf64(x, y, z) -> f64 => fma,
 +        copysignf32(x, y) -> f32 => copysignf,
 +        copysignf64(x, y) -> f64 => copysign,
 +
 +        // rounding variants
 +        // FIXME use clif insts
 +        floorf32(flt) -> f32 => floorf,
 +        floorf64(flt) -> f64 => floor,
 +        ceilf32(flt) -> f32 => ceilf,
 +        ceilf64(flt) -> f64 => ceil,
 +        truncf32(flt) -> f32 => truncf,
 +        truncf64(flt) -> f64 => trunc,
 +        roundf32(flt) -> f32 => roundf,
 +        roundf64(flt) -> f64 => round,
 +
 +        // trigonometry
 +        sinf32(flt) -> f32 => sinf,
 +        sinf64(flt) -> f64 => sin,
 +        cosf32(flt) -> f32 => cosf,
 +        cosf64(flt) -> f64 => cos,
 +    }
 +
 +    intrinsic_match! {
 +        fx, intrinsic, substs, args,
 +        _ => {
 +            fx.tcx.sess.span_fatal(span, &format!("unsupported intrinsic {}", intrinsic));
 +        };
 +
 +        assume, (c _a) {};
 +        likely | unlikely, (c a) {
 +            ret.write_cvalue(fx, a);
 +        };
 +        breakpoint, () {
 +            fx.bcx.ins().debugtrap();
 +        };
 +        copy | copy_nonoverlapping, <elem_ty> (v src, v dst, v count) {
 +            let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
 +            assert_eq!(args.len(), 3);
 +            let byte_amount = if elem_size != 1 {
 +                fx.bcx.ins().imul_imm(count, elem_size as i64)
 +            } else {
 +                count
 +            };
 +
 +            if intrinsic == sym::copy_nonoverlapping {
 +                // FIXME emit_small_memcpy
 +                fx.bcx.call_memcpy(fx.module.target_config(), dst, src, byte_amount);
 +            } else {
 +                // FIXME emit_small_memmove
 +                fx.bcx.call_memmove(fx.module.target_config(), dst, src, byte_amount);
 +            }
 +        };
 +        // NOTE: the volatile variants have src and dst swapped
 +        volatile_copy_memory | volatile_copy_nonoverlapping_memory, <elem_ty> (v dst, v src, v count) {
 +            let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
 +            assert_eq!(args.len(), 3);
 +            let byte_amount = if elem_size != 1 {
 +                fx.bcx.ins().imul_imm(count, elem_size as i64)
 +            } else {
 +                count
 +            };
 +
 +            // FIXME make the copy actually volatile when using emit_small_mem{cpy,move}
 +            if intrinsic == sym::volatile_copy_nonoverlapping_memory {
 +                // FIXME emit_small_memcpy
 +                fx.bcx.call_memcpy(fx.module.target_config(), dst, src, byte_amount);
 +            } else {
 +                // FIXME emit_small_memmove
 +                fx.bcx.call_memmove(fx.module.target_config(), dst, src, byte_amount);
 +            }
 +        };
 +        size_of_val, <T> (c ptr) {
 +            let layout = fx.layout_of(T);
 +            let size = if layout.is_unsized() {
 +                let (_ptr, info) = ptr.load_scalar_pair(fx);
 +                let (size, _align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
 +                size
 +            } else {
 +                fx
 +                    .bcx
 +                    .ins()
 +                    .iconst(fx.pointer_type, layout.size.bytes() as i64)
 +            };
 +            ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
 +        };
 +        min_align_of_val, <T> (c ptr) {
 +            let layout = fx.layout_of(T);
 +            let align = if layout.is_unsized() {
 +                let (_ptr, info) = ptr.load_scalar_pair(fx);
 +                let (_size, align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
 +                align
 +            } else {
 +                fx
 +                    .bcx
 +                    .ins()
 +                    .iconst(fx.pointer_type, layout.align.abi.bytes() as i64)
 +            };
 +            ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
 +        };
 +
 +        unchecked_add | unchecked_sub | unchecked_div | exact_div | unchecked_rem
 +        | unchecked_shl | unchecked_shr, (c x, c y) {
 +            // FIXME trap on overflow
 +            let bin_op = match intrinsic {
 +                sym::unchecked_add => BinOp::Add,
 +                sym::unchecked_sub => BinOp::Sub,
 +                sym::unchecked_div | sym::exact_div => BinOp::Div,
 +                sym::unchecked_rem => BinOp::Rem,
 +                sym::unchecked_shl => BinOp::Shl,
 +                sym::unchecked_shr => BinOp::Shr,
 +                _ => unreachable!(),
 +            };
 +            let res = crate::num::codegen_int_binop(fx, bin_op, x, y);
 +            ret.write_cvalue(fx, res);
 +        };
 +        add_with_overflow | sub_with_overflow | mul_with_overflow, (c x, c y) {
 +            assert_eq!(x.layout().ty, y.layout().ty);
 +            let bin_op = match intrinsic {
 +                sym::add_with_overflow => BinOp::Add,
 +                sym::sub_with_overflow => BinOp::Sub,
 +                sym::mul_with_overflow => BinOp::Mul,
 +                _ => unreachable!(),
 +            };
 +
 +            let res = crate::num::codegen_checked_int_binop(
 +                fx,
 +                bin_op,
 +                x,
 +                y,
 +            );
 +            ret.write_cvalue(fx, res);
 +        };
 +        saturating_add | saturating_sub, <T> (c lhs, c rhs) {
 +            assert_eq!(lhs.layout().ty, rhs.layout().ty);
 +            let bin_op = match intrinsic {
 +                sym::saturating_add => BinOp::Add,
 +                sym::saturating_sub => BinOp::Sub,
 +                _ => unreachable!(),
 +            };
 +
 +            let signed = type_sign(T);
 +
 +            let checked_res = crate::num::codegen_checked_int_binop(
 +                fx,
 +                bin_op,
 +                lhs,
 +                rhs,
 +            );
 +
 +            let (val, has_overflow) = checked_res.load_scalar_pair(fx);
 +            let clif_ty = fx.clif_type(T).unwrap();
 +
-             let y = if fx.bcx.func.dfg.value_type(y) == types::I128 {
-                 fx.bcx.ins().ireduce(types::I64, y)
-             } else {
-                 y
-             };
 +            let (min, max) = type_min_max_value(&mut fx.bcx, clif_ty, signed);
 +
 +            let val = match (intrinsic, signed) {
 +                (sym::saturating_add, false) => fx.bcx.ins().select(has_overflow, max, val),
 +                (sym::saturating_sub, false) => fx.bcx.ins().select(has_overflow, min, val),
 +                (sym::saturating_add, true) => {
 +                    let rhs = rhs.load_scalar(fx);
 +                    let rhs_ge_zero = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
 +                    let sat_val = fx.bcx.ins().select(rhs_ge_zero, max, min);
 +                    fx.bcx.ins().select(has_overflow, sat_val, val)
 +                }
 +                (sym::saturating_sub, true) => {
 +                    let rhs = rhs.load_scalar(fx);
 +                    let rhs_ge_zero = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
 +                    let sat_val = fx.bcx.ins().select(rhs_ge_zero, min, max);
 +                    fx.bcx.ins().select(has_overflow, sat_val, val)
 +                }
 +                _ => unreachable!(),
 +            };
 +
 +            let res = CValue::by_val(val, fx.layout_of(T));
 +
 +            ret.write_cvalue(fx, res);
 +        };
 +        rotate_left, <T>(v x, v y) {
 +            let layout = fx.layout_of(T);
-             let y = if fx.bcx.func.dfg.value_type(y) == types::I128 {
-                 fx.bcx.ins().ireduce(types::I64, y)
-             } else {
-                 y
-             };
 +            let res = fx.bcx.ins().rotl(x, y);
 +            ret.write_cvalue(fx, CValue::by_val(res, layout));
 +        };
 +        rotate_right, <T>(v x, v y) {
 +            let layout = fx.layout_of(T);
-             let res = if T == fx.tcx.types.u128 || T == fx.tcx.types.i128 {
-                 // FIXME verify this algorithm is correct
-                 let (lsb, msb) = fx.bcx.ins().isplit(arg);
-                 let lsb_lz = fx.bcx.ins().clz(lsb);
-                 let msb_lz = fx.bcx.ins().clz(msb);
-                 let msb_is_zero = fx.bcx.ins().icmp_imm(IntCC::Equal, msb, 0);
-                 let lsb_lz_plus_64 = fx.bcx.ins().iadd_imm(lsb_lz, 64);
-                 let res = fx.bcx.ins().select(msb_is_zero, lsb_lz_plus_64, msb_lz);
-                 fx.bcx.ins().uextend(types::I128, res)
-             } else {
-                 fx.bcx.ins().clz(arg)
-             };
 +            let res = fx.bcx.ins().rotr(x, y);
 +            ret.write_cvalue(fx, CValue::by_val(res, layout));
 +        };
 +
 +        // The only difference between offset and arith_offset is regarding UB. Because Cranelift
 +        // doesn't have UB both are codegen'ed the same way
 +        offset | arith_offset, (c base, v offset) {
 +            let pointee_ty = base.layout().ty.builtin_deref(true).unwrap().ty;
 +            let pointee_size = fx.layout_of(pointee_ty).size.bytes();
 +            let ptr_diff = if pointee_size != 1 {
 +                fx.bcx.ins().imul_imm(offset, pointee_size as i64)
 +            } else {
 +                offset
 +            };
 +            let base_val = base.load_scalar(fx);
 +            let res = fx.bcx.ins().iadd(base_val, ptr_diff);
 +            ret.write_cvalue(fx, CValue::by_val(res, base.layout()));
 +        };
 +
 +        transmute, (c from) {
 +            ret.write_cvalue_transmute(fx, from);
 +        };
 +        write_bytes | volatile_set_memory, (c dst, v val, v count) {
 +            let pointee_ty = dst.layout().ty.builtin_deref(true).unwrap().ty;
 +            let pointee_size = fx.layout_of(pointee_ty).size.bytes();
 +            let count = if pointee_size != 1 {
 +                fx.bcx.ins().imul_imm(count, pointee_size as i64)
 +            } else {
 +                count
 +            };
 +            let dst_ptr = dst.load_scalar(fx);
 +            // FIXME make the memset actually volatile when switching to emit_small_memset
 +            // FIXME use emit_small_memset
 +            fx.bcx.call_memset(fx.module.target_config(), dst_ptr, val, count);
 +        };
 +        ctlz | ctlz_nonzero, <T> (v arg) {
 +            // FIXME trap on `ctlz_nonzero` with zero arg.
-             let res = if T == fx.tcx.types.u128 || T == fx.tcx.types.i128 {
-                 // FIXME verify this algorithm is correct
-                 let (lsb, msb) = fx.bcx.ins().isplit(arg);
-                 let lsb_tz = fx.bcx.ins().ctz(lsb);
-                 let msb_tz = fx.bcx.ins().ctz(msb);
-                 let lsb_is_zero = fx.bcx.ins().icmp_imm(IntCC::Equal, lsb, 0);
-                 let msb_tz_plus_64 = fx.bcx.ins().iadd_imm(msb_tz, 64);
-                 let res = fx.bcx.ins().select(lsb_is_zero, msb_tz_plus_64, lsb_tz);
-                 fx.bcx.ins().uextend(types::I128, res)
-             } else {
-                 fx.bcx.ins().ctz(arg)
-             };
++            let res = fx.bcx.ins().clz(arg);
 +            let res = CValue::by_val(res, fx.layout_of(T));
 +            ret.write_cvalue(fx, res);
 +        };
 +        cttz | cttz_nonzero, <T> (v arg) {
 +            // FIXME trap on `cttz_nonzero` with zero arg.
-         // FIXME https://github.com/bytecodealliance/wasmtime/issues/2647
++            let res = fx.bcx.ins().ctz(arg);
 +            let res = CValue::by_val(res, fx.layout_of(T));
 +            ret.write_cvalue(fx, res);
 +        };
 +        ctpop, <T> (v arg) {
 +            let res = fx.bcx.ins().popcnt(arg);
 +            let res = CValue::by_val(res, fx.layout_of(T));
 +            ret.write_cvalue(fx, res);
 +        };
 +        bitreverse, <T> (v arg) {
 +            let res = fx.bcx.ins().bitrev(arg);
 +            let res = CValue::by_val(res, fx.layout_of(T));
 +            ret.write_cvalue(fx, res);
 +        };
 +        bswap, <T> (v arg) {
 +            // FIXME(CraneStation/cranelift#794) add bswap instruction to cranelift
 +            fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value {
 +                match bcx.func.dfg.value_type(v) {
 +                    types::I8 => v,
 +
 +                    // https://code.woboq.org/gcc/include/bits/byteswap.h.html
 +                    types::I16 => {
 +                        let tmp1 = bcx.ins().ishl_imm(v, 8);
 +                        let n1 = bcx.ins().band_imm(tmp1, 0xFF00);
 +
 +                        let tmp2 = bcx.ins().ushr_imm(v, 8);
 +                        let n2 = bcx.ins().band_imm(tmp2, 0x00FF);
 +
 +                        bcx.ins().bor(n1, n2)
 +                    }
 +                    types::I32 => {
 +                        let tmp1 = bcx.ins().ishl_imm(v, 24);
 +                        let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000);
 +
 +                        let tmp2 = bcx.ins().ishl_imm(v, 8);
 +                        let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000);
 +
 +                        let tmp3 = bcx.ins().ushr_imm(v, 8);
 +                        let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00);
 +
 +                        let tmp4 = bcx.ins().ushr_imm(v, 24);
 +                        let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF);
 +
 +                        let or_tmp1 = bcx.ins().bor(n1, n2);
 +                        let or_tmp2 = bcx.ins().bor(n3, n4);
 +                        bcx.ins().bor(or_tmp1, or_tmp2)
 +                    }
 +                    types::I64 => {
 +                        let tmp1 = bcx.ins().ishl_imm(v, 56);
 +                        let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000_0000_0000u64 as i64);
 +
 +                        let tmp2 = bcx.ins().ishl_imm(v, 40);
 +                        let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000_0000_0000u64 as i64);
 +
 +                        let tmp3 = bcx.ins().ishl_imm(v, 24);
 +                        let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00_0000_0000u64 as i64);
 +
 +                        let tmp4 = bcx.ins().ishl_imm(v, 8);
 +                        let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF_0000_0000u64 as i64);
 +
 +                        let tmp5 = bcx.ins().ushr_imm(v, 8);
 +                        let n5 = bcx.ins().band_imm(tmp5, 0x0000_0000_FF00_0000u64 as i64);
 +
 +                        let tmp6 = bcx.ins().ushr_imm(v, 24);
 +                        let n6 = bcx.ins().band_imm(tmp6, 0x0000_0000_00FF_0000u64 as i64);
 +
 +                        let tmp7 = bcx.ins().ushr_imm(v, 40);
 +                        let n7 = bcx.ins().band_imm(tmp7, 0x0000_0000_0000_FF00u64 as i64);
 +
 +                        let tmp8 = bcx.ins().ushr_imm(v, 56);
 +                        let n8 = bcx.ins().band_imm(tmp8, 0x0000_0000_0000_00FFu64 as i64);
 +
 +                        let or_tmp1 = bcx.ins().bor(n1, n2);
 +                        let or_tmp2 = bcx.ins().bor(n3, n4);
 +                        let or_tmp3 = bcx.ins().bor(n5, n6);
 +                        let or_tmp4 = bcx.ins().bor(n7, n8);
 +
 +                        let or_tmp5 = bcx.ins().bor(or_tmp1, or_tmp2);
 +                        let or_tmp6 = bcx.ins().bor(or_tmp3, or_tmp4);
 +                        bcx.ins().bor(or_tmp5, or_tmp6)
 +                    }
 +                    types::I128 => {
 +                        let (lo, hi) = bcx.ins().isplit(v);
 +                        let lo = swap(bcx, lo);
 +                        let hi = swap(bcx, hi);
 +                        bcx.ins().iconcat(hi, lo)
 +                    }
 +                    ty => unreachable!("bswap {}", ty),
 +                }
 +            }
 +            let res = CValue::by_val(swap(&mut fx.bcx, arg), fx.layout_of(T));
 +            ret.write_cvalue(fx, res);
 +        };
 +        assert_inhabited | assert_zero_valid | assert_uninit_valid, <T> () {
 +            let layout = fx.layout_of(T);
 +            if layout.abi.is_uninhabited() {
 +                with_no_trimmed_paths(|| crate::base::codegen_panic(
 +                    fx,
 +                    &format!("attempted to instantiate uninhabited type `{}`", T),
 +                    span,
 +                ));
 +                return;
 +            }
 +
 +            if intrinsic == sym::assert_zero_valid && !layout.might_permit_raw_init(fx, /*zero:*/ true).unwrap() {
 +                with_no_trimmed_paths(|| crate::base::codegen_panic(
 +                    fx,
 +                    &format!("attempted to zero-initialize type `{}`, which is invalid", T),
 +                    span,
 +                ));
 +                return;
 +            }
 +
 +            if intrinsic == sym::assert_uninit_valid && !layout.might_permit_raw_init(fx, /*zero:*/ false).unwrap() {
 +                with_no_trimmed_paths(|| crate::base::codegen_panic(
 +                    fx,
 +                    &format!("attempted to leave type `{}` uninitialized, which is invalid", T),
 +                    span,
 +                ));
 +                return;
 +            }
 +        };
 +
 +        volatile_load | unaligned_volatile_load, (c ptr) {
 +            // Cranelift treats loads as volatile by default
 +            // FIXME correctly handle unaligned_volatile_load
 +            let inner_layout =
 +                fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
 +            let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
 +            ret.write_cvalue(fx, val);
 +        };
 +        volatile_store | unaligned_volatile_store, (v ptr, c val) {
 +            // Cranelift treats stores as volatile by default
 +            // FIXME correctly handle unaligned_volatile_store
 +            let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
 +            dest.write_cvalue(fx, val);
 +        };
 +
 +        pref_align_of | min_align_of | needs_drop | type_id | type_name | variant_count, () {
 +            let const_val =
 +                fx.tcx.const_eval_instance(ParamEnv::reveal_all(), instance, None).unwrap();
 +            let val = crate::constant::codegen_const_value(
 +                fx,
 +                const_val,
 +                ret.layout().ty,
 +            );
 +            ret.write_cvalue(fx, val);
 +        };
 +
 +        ptr_offset_from, <T> (v ptr, v base) {
 +            let isize_layout = fx.layout_of(fx.tcx.types.isize);
 +
 +            let pointee_size: u64 = fx.layout_of(T).size.bytes();
 +            let diff = fx.bcx.ins().isub(ptr, base);
 +            // FIXME this can be an exact division.
 +            let val = CValue::by_val(fx.bcx.ins().sdiv_imm(diff, pointee_size as i64), isize_layout);
 +            ret.write_cvalue(fx, val);
 +        };
 +
 +        ptr_guaranteed_eq, (c a, c b) {
 +            let val = crate::num::codegen_ptr_binop(fx, BinOp::Eq, a, b);
 +            ret.write_cvalue(fx, val);
 +        };
 +
 +        ptr_guaranteed_ne, (c a, c b) {
 +            let val = crate::num::codegen_ptr_binop(fx, BinOp::Ne, a, b);
 +            ret.write_cvalue(fx, val);
 +        };
 +
 +        caller_location, () {
 +            let caller_location = fx.get_caller_location(span);
 +            ret.write_cvalue(fx, caller_location);
 +        };
 +
 +        _ if intrinsic.as_str().starts_with("atomic_fence"), () {
 +            fx.bcx.ins().fence();
 +        };
 +        _ if intrinsic.as_str().starts_with("atomic_singlethreadfence"), () {
 +            // FIXME use a compiler fence once Cranelift supports it
 +            fx.bcx.ins().fence();
 +        };
 +        _ if intrinsic.as_str().starts_with("atomic_load"), <T> (v ptr) {
 +            validate_atomic_type!(fx, intrinsic, span, T);
 +            let ty = fx.clif_type(T).unwrap();
 +
 +            let val = fx.bcx.ins().atomic_load(ty, MemFlags::trusted(), ptr);
 +
 +            let val = CValue::by_val(val, fx.layout_of(T));
 +            ret.write_cvalue(fx, val);
 +        };
 +        _ if intrinsic.as_str().starts_with("atomic_store"), (v ptr, c val) {
 +            validate_atomic_type!(fx, intrinsic, span, val.layout().ty);
 +
 +            let val = val.load_scalar(fx);
 +
 +            fx.bcx.ins().atomic_store(MemFlags::trusted(), val, ptr);
 +        };
 +        _ if intrinsic.as_str().starts_with("atomic_xchg"), (v ptr, c new) {
 +            let layout = new.layout();
 +            validate_atomic_type!(fx, intrinsic, span, layout.ty);
 +            let ty = fx.clif_type(layout.ty).unwrap();
 +
 +            let new = new.load_scalar(fx);
 +
 +            let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xchg, ptr, new);
 +
 +            let old = CValue::by_val(old, layout);
 +            ret.write_cvalue(fx, old);
 +        };
 +        _ if intrinsic.as_str().starts_with("atomic_cxchg"), (v ptr, c test_old, c new) { // both atomic_cxchg_* and atomic_cxchgweak_*
 +            let layout = new.layout();
 +            validate_atomic_type!(fx, intrinsic, span, layout.ty);
 +
 +            let test_old = test_old.load_scalar(fx);
 +            let new = new.load_scalar(fx);
 +
 +            let old = fx.bcx.ins().atomic_cas(MemFlags::trusted(), ptr, test_old, new);
 +            let is_eq = fx.bcx.ins().icmp(IntCC::Equal, old, test_old);
 +
 +            let ret_val = CValue::by_val_pair(old, fx.bcx.ins().bint(types::I8, is_eq), ret.layout());
 +            ret.write_cvalue(fx, ret_val)
 +        };
 +
 +        _ if intrinsic.as_str().starts_with("atomic_xadd"), (v ptr, c amount) {
 +            let layout = amount.layout();
 +            validate_atomic_type!(fx, intrinsic, span, layout.ty);
 +            let ty = fx.clif_type(layout.ty).unwrap();
 +
 +            let amount = amount.load_scalar(fx);
 +
 +            let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Add, ptr, amount);
 +
 +            let old = CValue::by_val(old, layout);
 +            ret.write_cvalue(fx, old);
 +        };
 +        _ if intrinsic.as_str().starts_with("atomic_xsub"), (v ptr, c amount) {
 +            let layout = amount.layout();
 +            validate_atomic_type!(fx, intrinsic, span, layout.ty);
 +            let ty = fx.clif_type(layout.ty).unwrap();
 +
 +            let amount = amount.load_scalar(fx);
 +
 +            let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Sub, ptr, amount);
 +
 +            let old = CValue::by_val(old, layout);
 +            ret.write_cvalue(fx, old);
 +        };
 +        _ if intrinsic.as_str().starts_with("atomic_and"), (v ptr, c src) {
 +            let layout = src.layout();
 +            validate_atomic_type!(fx, intrinsic, span, layout.ty);
 +            let ty = fx.clif_type(layout.ty).unwrap();
 +
 +            let src = src.load_scalar(fx);
 +
 +            let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::And, ptr, src);
 +
 +            let old = CValue::by_val(old, layout);
 +            ret.write_cvalue(fx, old);
 +        };
 +        _ if intrinsic.as_str().starts_with("atomic_or"), (v ptr, c src) {
 +            let layout = src.layout();
 +            validate_atomic_type!(fx, intrinsic, span, layout.ty);
 +            let ty = fx.clif_type(layout.ty).unwrap();
 +
 +            let src = src.load_scalar(fx);
 +
 +            let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Or, ptr, src);
 +
 +            let old = CValue::by_val(old, layout);
 +            ret.write_cvalue(fx, old);
 +        };
 +        _ if intrinsic.as_str().starts_with("atomic_xor"), (v ptr, c src) {
 +            let layout = src.layout();
 +            validate_atomic_type!(fx, intrinsic, span, layout.ty);
 +            let ty = fx.clif_type(layout.ty).unwrap();
 +
 +            let src = src.load_scalar(fx);
 +
 +            let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xor, ptr, src);
 +
 +            let old = CValue::by_val(old, layout);
 +            ret.write_cvalue(fx, old);
 +        };
-             let val = fx.bcx.ins().fmin(a, b);
 +        _ if intrinsic.as_str().starts_with("atomic_nand"), (v ptr, c src) {
 +            let layout = src.layout();
 +            validate_atomic_type!(fx, intrinsic, span, layout.ty);
 +            let ty = fx.clif_type(layout.ty).unwrap();
 +
 +            let src = src.load_scalar(fx);
 +
 +            let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Nand, ptr, src);
 +
 +            let old = CValue::by_val(old, layout);
 +            ret.write_cvalue(fx, old);
 +        };
 +        _ if intrinsic.as_str().starts_with("atomic_max"), (v ptr, c src) {
 +            let layout = src.layout();
 +            validate_atomic_type!(fx, intrinsic, span, layout.ty);
 +            let ty = fx.clif_type(layout.ty).unwrap();
 +
 +            let src = src.load_scalar(fx);
 +
 +            let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smax, ptr, src);
 +
 +            let old = CValue::by_val(old, layout);
 +            ret.write_cvalue(fx, old);
 +        };
 +        _ if intrinsic.as_str().starts_with("atomic_umax"), (v ptr, c src) {
 +            let layout = src.layout();
 +            validate_atomic_type!(fx, intrinsic, span, layout.ty);
 +            let ty = fx.clif_type(layout.ty).unwrap();
 +
 +            let src = src.load_scalar(fx);
 +
 +            let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umax, ptr, src);
 +
 +            let old = CValue::by_val(old, layout);
 +            ret.write_cvalue(fx, old);
 +        };
 +        _ if intrinsic.as_str().starts_with("atomic_min"), (v ptr, c src) {
 +            let layout = src.layout();
 +            validate_atomic_type!(fx, intrinsic, span, layout.ty);
 +            let ty = fx.clif_type(layout.ty).unwrap();
 +
 +            let src = src.load_scalar(fx);
 +
 +            let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smin, ptr, src);
 +
 +            let old = CValue::by_val(old, layout);
 +            ret.write_cvalue(fx, old);
 +        };
 +        _ if intrinsic.as_str().starts_with("atomic_umin"), (v ptr, c src) {
 +            let layout = src.layout();
 +            validate_atomic_type!(fx, intrinsic, span, layout.ty);
 +            let ty = fx.clif_type(layout.ty).unwrap();
 +
 +            let src = src.load_scalar(fx);
 +
 +            let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umin, ptr, src);
 +
 +            let old = CValue::by_val(old, layout);
 +            ret.write_cvalue(fx, old);
 +        };
 +
++        // In Rust floating point min and max don't propagate NaN. In Cranelift they do however.
++        // For this reason it is necessary to use `a.is_nan() ? b : (a >= b ? b : a)` for `minnumf*`
++        // and `a.is_nan() ? b : (a <= b ? b : a)` for `maxnumf*`. NaN checks are done by comparing
++        // a float against itself. Only in case of NaN is it not equal to itself.
 +        minnumf32, (v a, v b) {
-             let val = fx.bcx.ins().fmin(a, b);
++            let a_is_nan = fx.bcx.ins().fcmp(FloatCC::NotEqual, a, a);
++            let a_ge_b = fx.bcx.ins().fcmp(FloatCC::GreaterThanOrEqual, a, b);
++            let temp = fx.bcx.ins().select(a_ge_b, b, a);
++            let val = fx.bcx.ins().select(a_is_nan, b, temp);
 +            let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
 +            ret.write_cvalue(fx, val);
 +        };
 +        minnumf64, (v a, v b) {
-             let val = fx.bcx.ins().fmax(a, b);
++            let a_is_nan = fx.bcx.ins().fcmp(FloatCC::NotEqual, a, a);
++            let a_ge_b = fx.bcx.ins().fcmp(FloatCC::GreaterThanOrEqual, a, b);
++            let temp = fx.bcx.ins().select(a_ge_b, b, a);
++            let val = fx.bcx.ins().select(a_is_nan, b, temp);
 +            let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
 +            ret.write_cvalue(fx, val);
 +        };
 +        maxnumf32, (v a, v b) {
-             let val = fx.bcx.ins().fmax(a, b);
++            let a_is_nan = fx.bcx.ins().fcmp(FloatCC::NotEqual, a, a);
++            let a_le_b = fx.bcx.ins().fcmp(FloatCC::LessThanOrEqual, a, b);
++            let temp = fx.bcx.ins().select(a_le_b, b, a);
++            let val = fx.bcx.ins().select(a_is_nan, b, temp);
 +            let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
 +            ret.write_cvalue(fx, val);
 +        };
 +        maxnumf64, (v a, v b) {
-                     let ptr_ty = pointer_ty(fx.tcx);
++            let a_is_nan = fx.bcx.ins().fcmp(FloatCC::NotEqual, a, a);
++            let a_le_b = fx.bcx.ins().fcmp(FloatCC::LessThanOrEqual, a, b);
++            let temp = fx.bcx.ins().select(a_le_b, b, a);
++            let val = fx.bcx.ins().select(a_is_nan, b, temp);
 +            let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
 +            ret.write_cvalue(fx, val);
 +        };
 +
 +        kw.Try, (v f, v data, v _catch_fn) {
 +            // FIXME once unwinding is supported, change this to actually catch panics
 +            let f_sig = fx.bcx.func.import_signature(Signature {
 +                call_conv: CallConv::triple_default(fx.triple()),
 +                params: vec![AbiParam::new(fx.bcx.func.dfg.value_type(data))],
 +                returns: vec![],
 +            });
 +
 +            fx.bcx.ins().call_indirect(f_sig, f, &[data]);
 +
 +            let layout = ret.layout();
 +            let ret_val = CValue::const_val(fx, layout, ty::ScalarInt::null(layout.size));
 +            ret.write_cvalue(fx, ret_val);
 +        };
 +
 +        fadd_fast | fsub_fast | fmul_fast | fdiv_fast | frem_fast, (c x, c y) {
 +            let res = crate::num::codegen_float_binop(fx, match intrinsic {
 +                sym::fadd_fast => BinOp::Add,
 +                sym::fsub_fast => BinOp::Sub,
 +                sym::fmul_fast => BinOp::Mul,
 +                sym::fdiv_fast => BinOp::Div,
 +                sym::frem_fast => BinOp::Rem,
 +                _ => unreachable!(),
 +            }, x, y);
 +            ret.write_cvalue(fx, res);
 +        };
 +        float_to_int_unchecked, (v f) {
 +            let res = crate::cast::clif_int_or_float_cast(
 +                fx,
 +                f,
 +                false,
 +                fx.clif_type(ret.layout().ty).unwrap(),
 +                type_sign(ret.layout().ty),
 +            );
 +            ret.write_cvalue(fx, CValue::by_val(res, ret.layout()));
 +        };
 +
 +        raw_eq, <T>(v lhs_ref, v rhs_ref) {
 +            fn type_by_size(size: Size) -> Option<Type> {
 +                Type::int(size.bits().try_into().ok()?)
 +            }
 +
 +            let size = fx.layout_of(T).layout.size;
++            // FIXME add and use emit_small_memcmp
 +            let is_eq_value =
 +                if size == Size::ZERO {
 +                    // No bytes means they're trivially equal
 +                    fx.bcx.ins().iconst(types::I8, 1)
 +                } else if let Some(clty) = type_by_size(size) {
 +                    // Can't use `trusted` for these loads; they could be unaligned.
 +                    let mut flags = MemFlags::new();
 +                    flags.set_notrap();
 +                    let lhs_val = fx.bcx.ins().load(clty, flags, lhs_ref, 0);
 +                    let rhs_val = fx.bcx.ins().load(clty, flags, rhs_ref, 0);
 +                    let eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_val, rhs_val);
 +                    fx.bcx.ins().bint(types::I8, eq)
 +                } else {
 +                    // Just call `memcmp` (like slices do in core) when the
 +                    // size is too large or it's not a power-of-two.
-                     let bytes_val = fx.bcx.ins().iconst(ptr_ty, signed_bytes);
-                     let params = vec![AbiParam::new(ptr_ty); 3];
 +                    let signed_bytes = i64::try_from(size.bytes()).unwrap();
++                    let bytes_val = fx.bcx.ins().iconst(fx.pointer_type, signed_bytes);
++                    let params = vec![AbiParam::new(fx.pointer_type); 3];
 +                    let returns = vec![AbiParam::new(types::I32)];
 +                    let args = &[lhs_ref, rhs_ref, bytes_val];
 +                    let cmp = fx.lib_call("memcmp", params, returns, args)[0];
 +                    let eq = fx.bcx.ins().icmp_imm(IntCC::Equal, cmp, 0);
 +                    fx.bcx.ins().bint(types::I8, eq)
 +                };
 +            ret.write_cvalue(fx, CValue::by_val(is_eq_value, ret.layout()));
 +        };
 +    }
 +
 +    if let Some((_, dest)) = destination {
 +        let ret_block = fx.get_block(dest);
 +        fx.bcx.ins().jump(ret_block, &[]);
 +    } else {
 +        trap_unreachable(fx, "[corruption] Diverging intrinsic returned.");
 +    }
 +}
index c2f469fa021e15ab0703f65b6bc1596de4c80a20,0000000000000000000000000000000000000000..43e68b4afa9eac0140264ebf9259a90b77eb9599
mode 100644,000000..100644
--- /dev/null
@@@ -1,281 -1,0 +1,431 @@@
-                     x.value_field(fx, mir::Field::new(in_idx.into()))
 +//! Codegen `extern "platform-intrinsic"` intrinsics.
 +
 +use super::*;
 +use crate::prelude::*;
 +
 +pub(super) fn codegen_simd_intrinsic_call<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    instance: Instance<'tcx>,
 +    args: &[mir::Operand<'tcx>],
 +    ret: CPlace<'tcx>,
 +    span: Span,
 +) {
 +    let def_id = instance.def_id();
 +    let substs = instance.substs;
 +
 +    let intrinsic = fx.tcx.item_name(def_id);
 +
 +    intrinsic_match! {
 +        fx, intrinsic, substs, args,
 +        _ => {
 +            fx.tcx.sess.span_fatal(span, &format!("Unknown SIMD intrinsic {}", intrinsic));
 +        };
 +
 +        simd_cast, (c a) {
 +            validate_simd_type!(fx, intrinsic, span, a.layout().ty);
 +            simd_for_each_lane(fx, a, ret, |fx, lane_layout, ret_lane_layout, lane| {
 +                let ret_lane_ty = fx.clif_type(ret_lane_layout.ty).unwrap();
 +
 +                let from_signed = type_sign(lane_layout.ty);
 +                let to_signed = type_sign(ret_lane_layout.ty);
 +
 +                let ret_lane = clif_int_or_float_cast(fx, lane, from_signed, ret_lane_ty, to_signed);
 +                CValue::by_val(ret_lane, ret_lane_layout)
 +            });
 +        };
 +
 +        simd_eq, (c x, c y) {
 +            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
 +            simd_cmp!(fx, Equal|Equal(x, y) -> ret);
 +        };
 +        simd_ne, (c x, c y) {
 +            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
 +            simd_cmp!(fx, NotEqual|NotEqual(x, y) -> ret);
 +        };
 +        simd_lt, (c x, c y) {
 +            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
 +            simd_cmp!(fx, UnsignedLessThan|SignedLessThan|LessThan(x, y) -> ret);
 +        };
 +        simd_le, (c x, c y) {
 +            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
 +            simd_cmp!(fx, UnsignedLessThanOrEqual|SignedLessThanOrEqual|LessThanOrEqual(x, y) -> ret);
 +        };
 +        simd_gt, (c x, c y) {
 +            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
 +            simd_cmp!(fx, UnsignedGreaterThan|SignedGreaterThan|GreaterThan(x, y) -> ret);
 +        };
 +        simd_ge, (c x, c y) {
 +            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
 +            simd_cmp!(
 +                fx,
 +                UnsignedGreaterThanOrEqual|SignedGreaterThanOrEqual|GreaterThanOrEqual
 +                (x, y) -> ret
 +            );
 +        };
 +
 +        // simd_shuffle32<T, U>(x: T, y: T, idx: [u32; 32]) -> U
 +        _ if intrinsic.as_str().starts_with("simd_shuffle"), (c x, c y, o idx) {
 +            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
 +
 +            let n: u16 = intrinsic.as_str()["simd_shuffle".len()..].parse().unwrap();
 +
 +            assert_eq!(x.layout(), y.layout());
 +            let layout = x.layout();
 +
 +            let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
 +            let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
 +
 +            assert_eq!(lane_ty, ret_lane_ty);
 +            assert_eq!(u64::from(n), ret_lane_count);
 +
 +            let total_len = lane_count * 2;
 +
 +            let indexes = {
 +                use rustc_middle::mir::interpret::*;
 +                let idx_const = crate::constant::mir_operand_get_const_val(fx, idx).expect("simd_shuffle* idx not const");
 +
 +                let idx_bytes = match idx_const {
 +                    ConstValue::ByRef { alloc, offset } => {
 +                        let size = Size::from_bytes(4 * ret_lane_count /* size_of([u32; ret_lane_count]) */);
 +                        alloc.get_bytes(fx, alloc_range(offset, size)).unwrap()
 +                    }
 +                    _ => unreachable!("{:?}", idx_const),
 +                };
 +
 +                (0..ret_lane_count).map(|i| {
 +                    let i = usize::try_from(i).unwrap();
 +                    let idx = rustc_middle::mir::interpret::read_target_uint(
 +                        fx.tcx.data_layout.endian,
 +                        &idx_bytes[4*i.. 4*i + 4],
 +                    ).expect("read_target_uint");
 +                    u16::try_from(idx).expect("try_from u32")
 +                }).collect::<Vec<u16>>()
 +            };
 +
 +            for &idx in &indexes {
 +                assert!(u64::from(idx) < total_len, "idx {} out of range 0..{}", idx, total_len);
 +            }
 +
 +            for (out_idx, in_idx) in indexes.into_iter().enumerate() {
 +                let in_lane = if u64::from(in_idx) < lane_count {
-                     y.value_field(fx, mir::Field::new(usize::from(in_idx) - usize::try_from(lane_count).unwrap()))
++                    x.value_lane(fx, in_idx.into())
 +                } else {
-                 let out_lane = ret.place_field(fx, mir::Field::new(out_idx));
++                    y.value_lane(fx, u64::from(in_idx) - lane_count)
 +                };
-             let ret_lane = v.value_field(fx, mir::Field::new(idx.try_into().unwrap()));
++                let out_lane = ret.place_lane(fx, u64::try_from(out_idx).unwrap());
 +                out_lane.write_cvalue(fx, in_lane);
 +            }
 +        };
 +
 +        simd_insert, (c base, o idx, c val) {
 +            // FIXME validate
 +            let idx_const = if let Some(idx_const) = crate::constant::mir_operand_get_const_val(fx, idx) {
 +                idx_const
 +            } else {
 +                fx.tcx.sess.span_fatal(
 +                    span,
 +                    "Index argument for `simd_insert` is not a constant",
 +                );
 +            };
 +
 +            let idx = idx_const.try_to_bits(Size::from_bytes(4 /* u32*/)).unwrap_or_else(|| panic!("kind not scalar: {:?}", idx_const));
 +            let (lane_count, _lane_ty) = base.layout().ty.simd_size_and_type(fx.tcx);
 +            if idx >= lane_count.into() {
 +                fx.tcx.sess.span_fatal(fx.mir.span, &format!("[simd_insert] idx {} >= lane_count {}", idx, lane_count));
 +            }
 +
 +            ret.write_cvalue(fx, base);
 +            let ret_lane = ret.place_field(fx, mir::Field::new(idx.try_into().unwrap()));
 +            ret_lane.write_cvalue(fx, val);
 +        };
 +
 +        simd_extract, (c v, o idx) {
 +            validate_simd_type!(fx, intrinsic, span, v.layout().ty);
 +            let idx_const = if let Some(idx_const) = crate::constant::mir_operand_get_const_val(fx, idx) {
 +                idx_const
 +            } else {
 +                fx.tcx.sess.span_warn(
 +                    span,
 +                    "Index argument for `simd_extract` is not a constant",
 +                );
 +                let res = crate::trap::trap_unimplemented_ret_value(
 +                    fx,
 +                    ret.layout(),
 +                    "Index argument for `simd_extract` is not a constant",
 +                );
 +                ret.write_cvalue(fx, res);
 +                return;
 +            };
 +
 +            let idx = idx_const.try_to_bits(Size::from_bytes(4 /* u32*/)).unwrap_or_else(|| panic!("kind not scalar: {:?}", idx_const));
 +            let (lane_count, _lane_ty) = v.layout().ty.simd_size_and_type(fx.tcx);
 +            if idx >= lane_count.into() {
 +                fx.tcx.sess.span_fatal(fx.mir.span, &format!("[simd_extract] idx {} >= lane_count {}", idx, lane_count));
 +            }
 +
-                 let lane = mir::Field::new(lane.try_into().unwrap());
-                 let a_lane = a.value_field(fx, lane).load_scalar(fx);
-                 let b_lane = b.value_field(fx, lane).load_scalar(fx);
-                 let c_lane = c.value_field(fx, lane).load_scalar(fx);
++            let ret_lane = v.value_lane(fx, idx.try_into().unwrap());
 +            ret.write_cvalue(fx, ret_lane);
 +        };
 +
++        simd_neg, (c a) {
++            validate_simd_type!(fx, intrinsic, span, a.layout().ty);
++            simd_for_each_lane(fx, a, ret, |fx, lane_layout, ret_lane_layout, lane| {
++                let ret_lane = match lane_layout.ty.kind() {
++                    ty::Int(_) => fx.bcx.ins().ineg(lane),
++                    ty::Float(_) => fx.bcx.ins().fneg(lane),
++                    _ => unreachable!(),
++                };
++                CValue::by_val(ret_lane, ret_lane_layout)
++            });
++        };
++
++        simd_fabs, (c a) {
++            validate_simd_type!(fx, intrinsic, span, a.layout().ty);
++            simd_for_each_lane(fx, a, ret, |fx, _lane_layout, ret_lane_layout, lane| {
++                let ret_lane = fx.bcx.ins().fabs(lane);
++                CValue::by_val(ret_lane, ret_lane_layout)
++            });
++        };
++
++        simd_fsqrt, (c a) {
++            validate_simd_type!(fx, intrinsic, span, a.layout().ty);
++            simd_for_each_lane(fx, a, ret, |fx, _lane_layout, ret_lane_layout, lane| {
++                let ret_lane = fx.bcx.ins().sqrt(lane);
++                CValue::by_val(ret_lane, ret_lane_layout)
++            });
++        };
++
 +        simd_add, (c x, c y) {
 +            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
 +            simd_int_flt_binop!(fx, iadd|fadd(x, y) -> ret);
 +        };
 +        simd_sub, (c x, c y) {
 +            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
 +            simd_int_flt_binop!(fx, isub|fsub(x, y) -> ret);
 +        };
 +        simd_mul, (c x, c y) {
 +            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
 +            simd_int_flt_binop!(fx, imul|fmul(x, y) -> ret);
 +        };
 +        simd_div, (c x, c y) {
 +            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
 +            simd_int_flt_binop!(fx, udiv|sdiv|fdiv(x, y) -> ret);
 +        };
++        simd_rem, (c x, c y) {
++            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
++            simd_pair_for_each_lane(fx, x, y, ret, |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
++                let res_lane = match lane_layout.ty.kind() {
++                    ty::Uint(_) => fx.bcx.ins().urem(x_lane, y_lane),
++                    ty::Int(_) => fx.bcx.ins().srem(x_lane, y_lane),
++                    ty::Float(FloatTy::F32) => fx.lib_call(
++                        "fmodf",
++                        vec![AbiParam::new(types::F32), AbiParam::new(types::F32)],
++                        vec![AbiParam::new(types::F32)],
++                        &[x_lane, y_lane],
++                    )[0],
++                    ty::Float(FloatTy::F64) => fx.lib_call(
++                        "fmod",
++                        vec![AbiParam::new(types::F64), AbiParam::new(types::F64)],
++                        vec![AbiParam::new(types::F64)],
++                        &[x_lane, y_lane],
++                    )[0],
++                    _ => unreachable!("{:?}", lane_layout.ty),
++                };
++                CValue::by_val(res_lane, ret_lane_layout)
++            });
++        };
 +        simd_shl, (c x, c y) {
 +            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
 +            simd_int_binop!(fx, ishl(x, y) -> ret);
 +        };
 +        simd_shr, (c x, c y) {
 +            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
 +            simd_int_binop!(fx, ushr|sshr(x, y) -> ret);
 +        };
 +        simd_and, (c x, c y) {
 +            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
 +            simd_int_binop!(fx, band(x, y) -> ret);
 +        };
 +        simd_or, (c x, c y) {
 +            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
 +            simd_int_binop!(fx, bor(x, y) -> ret);
 +        };
 +        simd_xor, (c x, c y) {
 +            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
 +            simd_int_binop!(fx, bxor(x, y) -> ret);
 +        };
 +
 +        simd_fma, (c a, c b, c c) {
 +            validate_simd_type!(fx, intrinsic, span, a.layout().ty);
 +            assert_eq!(a.layout(), b.layout());
 +            assert_eq!(a.layout(), c.layout());
 +            let layout = a.layout();
 +
 +            let (lane_count, _lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
 +            let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
 +            assert_eq!(lane_count, ret_lane_count);
 +            let ret_lane_layout = fx.layout_of(ret_lane_ty);
 +
 +            for lane in 0..lane_count {
-                 ret.place_field(fx, lane).write_cvalue(fx, res_lane);
++                let a_lane = a.value_lane(fx, lane).load_scalar(fx);
++                let b_lane = b.value_lane(fx, lane).load_scalar(fx);
++                let c_lane = c.value_lane(fx, lane).load_scalar(fx);
 +
 +                let mul_lane = fx.bcx.ins().fmul(a_lane, b_lane);
 +                let res_lane = CValue::by_val(fx.bcx.ins().fadd(mul_lane, c_lane), ret_lane_layout);
 +
-         simd_reduce_add_ordered | simd_reduce_add_unordered, (c v) {
++                ret.place_lane(fx, lane).write_cvalue(fx, res_lane);
 +            }
 +        };
 +
 +        simd_fmin, (c x, c y) {
 +            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
 +            simd_flt_binop!(fx, fmin(x, y) -> ret);
 +        };
 +        simd_fmax, (c x, c y) {
 +            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
 +            simd_flt_binop!(fx, fmax(x, y) -> ret);
 +        };
 +
-             simd_reduce(fx, v, ret, |fx, lane_layout, a, b| {
++        simd_round, (c a) {
++            validate_simd_type!(fx, intrinsic, span, a.layout().ty);
++            simd_for_each_lane(fx, a, ret, |fx, lane_layout, ret_lane_layout, lane| {
++                let res_lane = match lane_layout.ty.kind() {
++                    ty::Float(FloatTy::F32) => fx.lib_call(
++                        "roundf",
++                        vec![AbiParam::new(types::F32)],
++                        vec![AbiParam::new(types::F32)],
++                        &[lane],
++                    )[0],
++                    ty::Float(FloatTy::F64) => fx.lib_call(
++                        "round",
++                        vec![AbiParam::new(types::F64)],
++                        vec![AbiParam::new(types::F64)],
++                        &[lane],
++                    )[0],
++                    _ => unreachable!("{:?}", lane_layout.ty),
++                };
++                CValue::by_val(res_lane, ret_lane_layout)
++            });
++        };
++        simd_ceil, (c a) {
++            validate_simd_type!(fx, intrinsic, span, a.layout().ty);
++            simd_for_each_lane(fx, a, ret, |fx, _lane_layout, ret_lane_layout, lane| {
++                let ret_lane = fx.bcx.ins().ceil(lane);
++                CValue::by_val(ret_lane, ret_lane_layout)
++            });
++        };
++        simd_floor, (c a) {
++            validate_simd_type!(fx, intrinsic, span, a.layout().ty);
++            simd_for_each_lane(fx, a, ret, |fx, _lane_layout, ret_lane_layout, lane| {
++                let ret_lane = fx.bcx.ins().floor(lane);
++                CValue::by_val(ret_lane, ret_lane_layout)
++            });
++        };
++        simd_trunc, (c a) {
++            validate_simd_type!(fx, intrinsic, span, a.layout().ty);
++            simd_for_each_lane(fx, a, ret, |fx, _lane_layout, ret_lane_layout, lane| {
++                let ret_lane = fx.bcx.ins().trunc(lane);
++                CValue::by_val(ret_lane, ret_lane_layout)
++            });
++        };
++
++        simd_reduce_add_ordered | simd_reduce_add_unordered, (c v, v acc) {
 +            validate_simd_type!(fx, intrinsic, span, v.layout().ty);
-         simd_reduce_mul_ordered | simd_reduce_mul_unordered, (c v) {
++            simd_reduce(fx, v, Some(acc), ret, |fx, lane_layout, a, b| {
 +                if lane_layout.ty.is_floating_point() {
 +                    fx.bcx.ins().fadd(a, b)
 +                } else {
 +                    fx.bcx.ins().iadd(a, b)
 +                }
 +            });
 +        };
 +
-             simd_reduce(fx, v, ret, |fx, lane_layout, a, b| {
++        simd_reduce_mul_ordered | simd_reduce_mul_unordered, (c v, v acc) {
 +            validate_simd_type!(fx, intrinsic, span, v.layout().ty);
-         // simd_fabs
-         // simd_saturating_add
++            simd_reduce(fx, v, Some(acc), ret, |fx, lane_layout, a, b| {
 +                if lane_layout.ty.is_floating_point() {
 +                    fx.bcx.ins().fmul(a, b)
 +                } else {
 +                    fx.bcx.ins().imul(a, b)
 +                }
 +            });
 +        };
 +
 +        simd_reduce_all, (c v) {
 +            validate_simd_type!(fx, intrinsic, span, v.layout().ty);
 +            simd_reduce_bool(fx, v, ret, |fx, a, b| fx.bcx.ins().band(a, b));
 +        };
 +
 +        simd_reduce_any, (c v) {
 +            validate_simd_type!(fx, intrinsic, span, v.layout().ty);
 +            simd_reduce_bool(fx, v, ret, |fx, a, b| fx.bcx.ins().bor(a, b));
 +        };
 +
-         // simd_select
-         // simd_rem
-         // simd_neg
-         // simd_trunc
-         // simd_floor
++        simd_reduce_and, (c v) {
++            validate_simd_type!(fx, intrinsic, span, v.layout().ty);
++            simd_reduce(fx, v, None, ret, |fx, _layout, a, b| fx.bcx.ins().band(a, b));
++        };
++
++        simd_reduce_or, (c v) {
++            validate_simd_type!(fx, intrinsic, span, v.layout().ty);
++            simd_reduce(fx, v, None, ret, |fx, _layout, a, b| fx.bcx.ins().bor(a, b));
++        };
++
++        simd_reduce_xor, (c v) {
++            validate_simd_type!(fx, intrinsic, span, v.layout().ty);
++            simd_reduce(fx, v, None, ret, |fx, _layout, a, b| fx.bcx.ins().bxor(a, b));
++        };
++
++        simd_reduce_min, (c v) {
++            // FIXME support floats
++            validate_simd_type!(fx, intrinsic, span, v.layout().ty);
++            simd_reduce(fx, v, None, ret, |fx, layout, a, b| {
++                let lt = fx.bcx.ins().icmp(if layout.ty.is_signed() {
++                    IntCC::SignedLessThan
++                } else {
++                    IntCC::UnsignedLessThan
++                }, a, b);
++                fx.bcx.ins().select(lt, a, b)
++            });
++        };
++
++        simd_reduce_max, (c v) {
++            // FIXME support floats
++            validate_simd_type!(fx, intrinsic, span, v.layout().ty);
++            simd_reduce(fx, v, None, ret, |fx, layout, a, b| {
++                let gt = fx.bcx.ins().icmp(if layout.ty.is_signed() {
++                    IntCC::SignedGreaterThan
++                } else {
++                    IntCC::UnsignedGreaterThan
++                }, a, b);
++                fx.bcx.ins().select(gt, a, b)
++            });
++        };
++
++        simd_select, (c m, c a, c b) {
++            validate_simd_type!(fx, intrinsic, span, m.layout().ty);
++            validate_simd_type!(fx, intrinsic, span, a.layout().ty);
++            assert_eq!(a.layout(), b.layout());
++
++            let (lane_count, lane_ty) = a.layout().ty.simd_size_and_type(fx.tcx);
++            let lane_layout = fx.layout_of(lane_ty);
++
++            for lane in 0..lane_count {
++                let m_lane = m.value_lane(fx, lane).load_scalar(fx);
++                let a_lane = a.value_lane(fx, lane).load_scalar(fx);
++                let b_lane = b.value_lane(fx, lane).load_scalar(fx);
++
++                let m_lane = fx.bcx.ins().icmp_imm(IntCC::Equal, m_lane, 0);
++                let res_lane = CValue::by_val(fx.bcx.ins().select(m_lane, b_lane, a_lane), lane_layout);
++
++                ret.place_lane(fx, lane).write_cvalue(fx, res_lane);
++            }
++        };
++
++        // simd_saturating_*
 +        // simd_bitmask
++        // simd_scatter
++        // simd_gather
 +    }
 +}
index e32dae49131ab0487d7e0a704425f54ffc7cf6b0,0000000000000000000000000000000000000000..4ef53663ca0d95219549c88c440ecf9a0b50522a
mode 100644,000000..100644
--- /dev/null
@@@ -1,304 -1,0 +1,312 @@@
-         link_binary::<crate::archive::ArArchiveBuilder<'_>>(
-             sess,
-             &codegen_results,
-             outputs,
-         )
 +#![feature(rustc_private, decl_macro, never_type, hash_drain_filter, vec_into_raw_parts, once_cell)]
 +#![warn(rust_2018_idioms)]
 +#![warn(unused_lifetimes)]
 +#![warn(unreachable_pub)]
 +
 +extern crate snap;
 +#[macro_use]
 +extern crate rustc_middle;
 +extern crate rustc_ast;
 +extern crate rustc_codegen_ssa;
 +extern crate rustc_data_structures;
 +extern crate rustc_errors;
 +extern crate rustc_fs_util;
 +extern crate rustc_hir;
 +extern crate rustc_incremental;
 +extern crate rustc_index;
 +extern crate rustc_interface;
 +extern crate rustc_metadata;
 +extern crate rustc_mir;
 +extern crate rustc_session;
 +extern crate rustc_span;
 +extern crate rustc_target;
 +
 +// This prevents duplicating functions and statics that are already part of the host rustc process.
 +#[allow(unused_extern_crates)]
 +extern crate rustc_driver;
 +
 +use std::any::Any;
 +
 +use rustc_codegen_ssa::traits::CodegenBackend;
 +use rustc_codegen_ssa::CodegenResults;
 +use rustc_errors::ErrorReported;
 +use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
 +use rustc_middle::middle::cstore::EncodedMetadata;
 +use rustc_session::config::OutputFilenames;
 +use rustc_session::Session;
 +
 +use cranelift_codegen::isa::TargetIsa;
 +use cranelift_codegen::settings::{self, Configurable};
 +
 +pub use crate::config::*;
 +use crate::prelude::*;
 +
 +mod abi;
 +mod allocator;
 +mod analyze;
 +mod archive;
 +mod backend;
 +mod base;
 +mod cast;
 +mod codegen_i128;
 +mod common;
 +mod compiler_builtins;
 +mod config;
 +mod constant;
 +mod debuginfo;
 +mod discriminant;
 +mod driver;
 +mod inline_asm;
 +mod intrinsics;
 +mod linkage;
 +mod main_shim;
 +mod metadata;
 +mod num;
 +mod optimize;
 +mod pointer;
 +mod pretty_clif;
 +mod toolchain;
 +mod trap;
 +mod unsize;
 +mod value_and_place;
 +mod vtable;
 +
 +mod prelude {
 +    pub(crate) use std::convert::{TryFrom, TryInto};
 +
 +    pub(crate) use rustc_span::Span;
 +
 +    pub(crate) use rustc_hir::def_id::{DefId, LOCAL_CRATE};
 +    pub(crate) use rustc_middle::bug;
 +    pub(crate) use rustc_middle::mir::{self, *};
 +    pub(crate) use rustc_middle::ty::layout::{self, TyAndLayout};
 +    pub(crate) use rustc_middle::ty::{
 +        self, FloatTy, Instance, InstanceDef, IntTy, ParamEnv, Ty, TyCtxt, TypeAndMut,
 +        TypeFoldable, UintTy,
 +    };
 +    pub(crate) use rustc_target::abi::{Abi, LayoutOf, Scalar, Size, VariantIdx};
 +
 +    pub(crate) use rustc_data_structures::fx::FxHashMap;
 +
 +    pub(crate) use rustc_index::vec::Idx;
 +
 +    pub(crate) use cranelift_codegen::ir::condcodes::{FloatCC, IntCC};
 +    pub(crate) use cranelift_codegen::ir::function::Function;
 +    pub(crate) use cranelift_codegen::ir::types;
 +    pub(crate) use cranelift_codegen::ir::{
 +        AbiParam, Block, ExternalName, FuncRef, Inst, InstBuilder, MemFlags, Signature, SourceLoc,
 +        StackSlot, StackSlotData, StackSlotKind, TrapCode, Type, Value,
 +    };
 +    pub(crate) use cranelift_codegen::isa::{self, CallConv};
 +    pub(crate) use cranelift_codegen::Context;
 +    pub(crate) use cranelift_frontend::{FunctionBuilder, FunctionBuilderContext, Variable};
 +    pub(crate) use cranelift_module::{self, DataContext, FuncId, Linkage, Module};
 +
 +    pub(crate) use crate::abi::*;
 +    pub(crate) use crate::base::{codegen_operand, codegen_place};
 +    pub(crate) use crate::cast::*;
 +    pub(crate) use crate::common::*;
 +    pub(crate) use crate::debuginfo::{DebugContext, UnwindContext};
 +    pub(crate) use crate::pointer::Pointer;
 +    pub(crate) use crate::trap::*;
 +    pub(crate) use crate::value_and_place::{CPlace, CPlaceInner, CValue};
 +}
 +
 +struct PrintOnPanic<F: Fn() -> String>(F);
 +impl<F: Fn() -> String> Drop for PrintOnPanic<F> {
 +    fn drop(&mut self) {
 +        if ::std::thread::panicking() {
 +            println!("{}", (self.0)());
 +        }
 +    }
 +}
 +
 +/// The codegen context holds any information shared between the codegen of individual functions
 +/// inside a single codegen unit with the exception of the Cranelift [`Module`](cranelift_module::Module).
 +struct CodegenCx<'tcx> {
 +    tcx: TyCtxt<'tcx>,
 +    global_asm: String,
 +    cached_context: Context,
 +    debug_context: Option<DebugContext<'tcx>>,
 +    unwind_context: UnwindContext,
 +}
 +
 +impl<'tcx> CodegenCx<'tcx> {
 +    fn new(
 +        tcx: TyCtxt<'tcx>,
 +        backend_config: BackendConfig,
 +        isa: &dyn TargetIsa,
 +        debug_info: bool,
 +    ) -> Self {
 +        assert_eq!(pointer_ty(tcx), isa.pointer_type());
 +
 +        let unwind_context =
 +            UnwindContext::new(tcx, isa, matches!(backend_config.codegen_mode, CodegenMode::Aot));
 +        let debug_context = if debug_info { Some(DebugContext::new(tcx, isa)) } else { None };
 +        CodegenCx {
 +            tcx,
 +            global_asm: String::new(),
 +            cached_context: Context::new(),
 +            debug_context,
 +            unwind_context,
 +        }
 +    }
 +}
 +
 +pub struct CraneliftCodegenBackend {
 +    pub config: Option<BackendConfig>,
 +}
 +
 +impl CodegenBackend for CraneliftCodegenBackend {
 +    fn init(&self, sess: &Session) {
 +        use rustc_session::config::Lto;
 +        match sess.lto() {
 +            Lto::No | Lto::ThinLocal => {}
 +            Lto::Thin | Lto::Fat => sess.warn("LTO is not supported. You may get a linker error."),
 +        }
 +    }
 +
 +    fn target_features(&self, _sess: &Session) -> Vec<rustc_span::Symbol> {
 +        vec![]
 +    }
 +
 +    fn print_version(&self) {
 +        println!("Cranelift version: {}", cranelift_codegen::VERSION);
 +    }
 +
 +    fn codegen_crate(
 +        &self,
 +        tcx: TyCtxt<'_>,
 +        metadata: EncodedMetadata,
 +        need_metadata_module: bool,
 +    ) -> Box<dyn Any> {
 +        tcx.sess.abort_if_errors();
 +        let config = if let Some(config) = self.config.clone() {
 +            config
 +        } else {
++            if !tcx.sess.unstable_options() && !tcx.sess.opts.cg.llvm_args.is_empty() {
++                tcx.sess.fatal("`-Z unstable-options` must be passed to allow configuring cg_clif");
++            }
 +            BackendConfig::from_opts(&tcx.sess.opts.cg.llvm_args)
 +                .unwrap_or_else(|err| tcx.sess.fatal(&err))
 +        };
 +        match config.codegen_mode {
 +            CodegenMode::Aot => driver::aot::run_aot(tcx, config, metadata, need_metadata_module),
 +            CodegenMode::Jit | CodegenMode::JitLazy => {
 +                #[cfg(feature = "jit")]
 +                let _: ! = driver::jit::run_jit(tcx, config);
 +
 +                #[cfg(not(feature = "jit"))]
 +                tcx.sess.fatal("jit support was disabled when compiling rustc_codegen_cranelift");
 +            }
 +        }
 +    }
 +
 +    fn join_codegen(
 +        &self,
 +        ongoing_codegen: Box<dyn Any>,
 +        _sess: &Session,
 +    ) -> Result<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>), ErrorReported> {
 +        Ok(*ongoing_codegen
 +            .downcast::<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>)>()
 +            .unwrap())
 +    }
 +
 +    fn link(
 +        &self,
 +        sess: &Session,
 +        codegen_results: CodegenResults,
 +        outputs: &OutputFilenames,
 +    ) -> Result<(), ErrorReported> {
 +        use rustc_codegen_ssa::back::link::link_binary;
 +
-     sess.target.llvm_target.parse().unwrap()
++        link_binary::<crate::archive::ArArchiveBuilder<'_>>(sess, &codegen_results, outputs)
 +    }
 +}
 +
 +fn target_triple(sess: &Session) -> target_lexicon::Triple {
-                 cranelift_codegen::isa::lookup_variant(target_triple, variant).unwrap();
++    match sess.target.llvm_target.parse() {
++        Ok(triple) => triple,
++        Err(err) => sess.fatal(&format!("target not recognized: {}", err)),
++    }
 +}
 +
 +fn build_isa(sess: &Session, backend_config: &BackendConfig) -> Box<dyn isa::TargetIsa + 'static> {
 +    use target_lexicon::BinaryFormat;
 +
 +    let target_triple = crate::target_triple(sess);
 +
 +    let mut flags_builder = settings::builder();
 +    flags_builder.enable("is_pic").unwrap();
 +    flags_builder.set("enable_probestack", "false").unwrap(); // __cranelift_probestack is not provided
 +    let enable_verifier = if backend_config.enable_verifier { "true" } else { "false" };
 +    flags_builder.set("enable_verifier", enable_verifier).unwrap();
 +
 +    let tls_model = match target_triple.binary_format {
 +        BinaryFormat::Elf => "elf_gd",
 +        BinaryFormat::Macho => "macho",
 +        BinaryFormat::Coff => "coff",
 +        _ => "none",
 +    };
 +    flags_builder.set("tls_model", tls_model).unwrap();
 +
 +    flags_builder.set("enable_simd", "true").unwrap();
 +
 +    flags_builder.set("enable_llvm_abi_extensions", "true").unwrap();
 +
 +    flags_builder.set("regalloc", &backend_config.regalloc).unwrap();
 +
 +    use rustc_session::config::OptLevel;
 +    match sess.opts.optimize {
 +        OptLevel::No => {
 +            flags_builder.set("opt_level", "none").unwrap();
 +        }
 +        OptLevel::Less | OptLevel::Default => {}
 +        OptLevel::Size | OptLevel::SizeMin | OptLevel::Aggressive => {
 +            flags_builder.set("opt_level", "speed_and_size").unwrap();
 +        }
 +    }
 +
 +    let flags = settings::Flags::new(flags_builder);
 +
 +    let variant = cranelift_codegen::isa::BackendVariant::MachInst;
 +
 +    let isa_builder = match sess.opts.cg.target_cpu.as_deref() {
 +        Some("native") => {
 +            let builder = cranelift_native::builder_with_options(variant, true).unwrap();
 +            builder
 +        }
 +        Some(value) => {
 +            let mut builder =
-                 sess.fatal("The specified target cpu isn't currently supported by Cranelift.");
++                cranelift_codegen::isa::lookup_variant(target_triple.clone(), variant)
++                    .unwrap_or_else(|err| {
++                        sess.fatal(&format!("can't compile for {}: {}", target_triple, err));
++                    });
 +            if let Err(_) = builder.enable(value) {
-                 cranelift_codegen::isa::lookup_variant(target_triple.clone(), variant).unwrap();
++                sess.fatal("the specified target cpu isn't currently supported by Cranelift.");
 +            }
 +            builder
 +        }
 +        None => {
 +            let mut builder =
++                cranelift_codegen::isa::lookup_variant(target_triple.clone(), variant)
++                    .unwrap_or_else(|err| {
++                        sess.fatal(&format!("can't compile for {}: {}", target_triple, err));
++                    });
 +            if target_triple.architecture == target_lexicon::Architecture::X86_64 {
 +                // Don't use "haswell" as the default, as it implies `has_lzcnt`.
 +                // macOS CI is still at Ivy Bridge EP, so `lzcnt` is interpreted as `bsr`.
 +                builder.enable("nehalem").unwrap();
 +            }
 +            builder
 +        }
 +    };
 +
 +    isa_builder.finish(flags)
 +}
 +
 +/// This is the entrypoint for a hot plugged rustc_codegen_cranelift
 +#[no_mangle]
 +pub fn __rustc_codegen_backend() -> Box<dyn CodegenBackend> {
 +    Box::new(CraneliftCodegenBackend { config: None })
 +}
index b6d378a5fe10ae59b53ca8c4656923902250fc05,0000000000000000000000000000000000000000..545d390e269957f1ef94c48b83d2c1e10b5f953f
mode 100644,000000..100644
--- /dev/null
@@@ -1,437 -1,0 +1,422 @@@
-                     let (lhs, rhs) = if (bin_op == BinOp::Eq || bin_op == BinOp::Ne)
-                         && (in_lhs.layout().ty.kind() == fx.tcx.types.i8.kind()
-                             || in_lhs.layout().ty.kind() == fx.tcx.types.i16.kind())
-                     {
-                         // FIXME(CraneStation/cranelift#896) icmp_imm.i8/i16 with eq/ne for signed ints is implemented wrong.
-                         (
-                             fx.bcx.ins().sextend(types::I32, lhs),
-                             fx.bcx.ins().sextend(types::I32, rhs),
-                         )
-                     } else {
-                         (lhs, rhs)
-                     };
 +//! Various operations on integer and floating-point numbers
 +
 +use crate::prelude::*;
 +
 +pub(crate) fn bin_op_to_intcc(bin_op: BinOp, signed: bool) -> Option<IntCC> {
 +    use BinOp::*;
 +    use IntCC::*;
 +    Some(match bin_op {
 +        Eq => Equal,
 +        Lt => {
 +            if signed {
 +                SignedLessThan
 +            } else {
 +                UnsignedLessThan
 +            }
 +        }
 +        Le => {
 +            if signed {
 +                SignedLessThanOrEqual
 +            } else {
 +                UnsignedLessThanOrEqual
 +            }
 +        }
 +        Ne => NotEqual,
 +        Ge => {
 +            if signed {
 +                SignedGreaterThanOrEqual
 +            } else {
 +                UnsignedGreaterThanOrEqual
 +            }
 +        }
 +        Gt => {
 +            if signed {
 +                SignedGreaterThan
 +            } else {
 +                UnsignedGreaterThan
 +            }
 +        }
 +        _ => return None,
 +    })
 +}
 +
 +fn codegen_compare_bin_op<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    bin_op: BinOp,
 +    signed: bool,
 +    lhs: Value,
 +    rhs: Value,
 +) -> CValue<'tcx> {
 +    let intcc = crate::num::bin_op_to_intcc(bin_op, signed).unwrap();
 +    let val = fx.bcx.ins().icmp(intcc, lhs, rhs);
 +    let val = fx.bcx.ins().bint(types::I8, val);
 +    CValue::by_val(val, fx.layout_of(fx.tcx.types.bool))
 +}
 +
 +pub(crate) fn codegen_binop<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    bin_op: BinOp,
 +    in_lhs: CValue<'tcx>,
 +    in_rhs: CValue<'tcx>,
 +) -> CValue<'tcx> {
 +    match bin_op {
 +        BinOp::Eq | BinOp::Lt | BinOp::Le | BinOp::Ne | BinOp::Ge | BinOp::Gt => {
 +            match in_lhs.layout().ty.kind() {
 +                ty::Bool | ty::Uint(_) | ty::Int(_) | ty::Char => {
 +                    let signed = type_sign(in_lhs.layout().ty);
 +                    let lhs = in_lhs.load_scalar(fx);
 +                    let rhs = in_rhs.load_scalar(fx);
 +
-             let actual_shift = fx.bcx.ins().band_imm(rhs, i64::from(lhs_ty.bits() - 1));
-             let actual_shift = clif_intcast(fx, actual_shift, types::I8, false);
-             let val = fx.bcx.ins().ishl(lhs, actual_shift);
 +                    return codegen_compare_bin_op(fx, bin_op, signed, lhs, rhs);
 +                }
 +                _ => {}
 +            }
 +        }
 +        _ => {}
 +    }
 +
 +    match in_lhs.layout().ty.kind() {
 +        ty::Bool => crate::num::codegen_bool_binop(fx, bin_op, in_lhs, in_rhs),
 +        ty::Uint(_) | ty::Int(_) => crate::num::codegen_int_binop(fx, bin_op, in_lhs, in_rhs),
 +        ty::Float(_) => crate::num::codegen_float_binop(fx, bin_op, in_lhs, in_rhs),
 +        ty::RawPtr(..) | ty::FnPtr(..) => crate::num::codegen_ptr_binop(fx, bin_op, in_lhs, in_rhs),
 +        _ => unreachable!("{:?}({:?}, {:?})", bin_op, in_lhs.layout().ty, in_rhs.layout().ty),
 +    }
 +}
 +
 +pub(crate) fn codegen_bool_binop<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    bin_op: BinOp,
 +    in_lhs: CValue<'tcx>,
 +    in_rhs: CValue<'tcx>,
 +) -> CValue<'tcx> {
 +    let lhs = in_lhs.load_scalar(fx);
 +    let rhs = in_rhs.load_scalar(fx);
 +
 +    let b = fx.bcx.ins();
 +    let res = match bin_op {
 +        BinOp::BitXor => b.bxor(lhs, rhs),
 +        BinOp::BitAnd => b.band(lhs, rhs),
 +        BinOp::BitOr => b.bor(lhs, rhs),
 +        // Compare binops handles by `codegen_binop`.
 +        _ => unreachable!("{:?}({:?}, {:?})", bin_op, in_lhs, in_rhs),
 +    };
 +
 +    CValue::by_val(res, fx.layout_of(fx.tcx.types.bool))
 +}
 +
 +pub(crate) fn codegen_int_binop<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    bin_op: BinOp,
 +    in_lhs: CValue<'tcx>,
 +    in_rhs: CValue<'tcx>,
 +) -> CValue<'tcx> {
 +    if bin_op != BinOp::Shl && bin_op != BinOp::Shr {
 +        assert_eq!(
 +            in_lhs.layout().ty,
 +            in_rhs.layout().ty,
 +            "int binop requires lhs and rhs of same type"
 +        );
 +    }
 +
 +    if let Some(res) = crate::codegen_i128::maybe_codegen(fx, bin_op, false, in_lhs, in_rhs) {
 +        return res;
 +    }
 +
 +    let signed = type_sign(in_lhs.layout().ty);
 +
 +    let lhs = in_lhs.load_scalar(fx);
 +    let rhs = in_rhs.load_scalar(fx);
 +
 +    let b = fx.bcx.ins();
 +    let val = match bin_op {
 +        BinOp::Add => b.iadd(lhs, rhs),
 +        BinOp::Sub => b.isub(lhs, rhs),
 +        BinOp::Mul => b.imul(lhs, rhs),
 +        BinOp::Div => {
 +            if signed {
 +                b.sdiv(lhs, rhs)
 +            } else {
 +                b.udiv(lhs, rhs)
 +            }
 +        }
 +        BinOp::Rem => {
 +            if signed {
 +                b.srem(lhs, rhs)
 +            } else {
 +                b.urem(lhs, rhs)
 +            }
 +        }
 +        BinOp::BitXor => b.bxor(lhs, rhs),
 +        BinOp::BitAnd => b.band(lhs, rhs),
 +        BinOp::BitOr => b.bor(lhs, rhs),
 +        BinOp::Shl => {
 +            let lhs_ty = fx.bcx.func.dfg.value_type(lhs);
 +            let actual_shift = fx.bcx.ins().band_imm(rhs, i64::from(lhs_ty.bits() - 1));
 +            fx.bcx.ins().ishl(lhs, actual_shift)
 +        }
 +        BinOp::Shr => {
 +            let lhs_ty = fx.bcx.func.dfg.value_type(lhs);
 +            let actual_shift = fx.bcx.ins().band_imm(rhs, i64::from(lhs_ty.bits() - 1));
 +            if signed {
 +                fx.bcx.ins().sshr(lhs, actual_shift)
 +            } else {
 +                fx.bcx.ins().ushr(lhs, actual_shift)
 +            }
 +        }
 +        // Compare binops handles by `codegen_binop`.
 +        _ => unreachable!("{:?}({:?}, {:?})", bin_op, in_lhs.layout().ty, in_rhs.layout().ty),
 +    };
 +
 +    CValue::by_val(val, in_lhs.layout())
 +}
 +
 +pub(crate) fn codegen_checked_int_binop<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    bin_op: BinOp,
 +    in_lhs: CValue<'tcx>,
 +    in_rhs: CValue<'tcx>,
 +) -> CValue<'tcx> {
 +    if bin_op != BinOp::Shl && bin_op != BinOp::Shr {
 +        assert_eq!(
 +            in_lhs.layout().ty,
 +            in_rhs.layout().ty,
 +            "checked int binop requires lhs and rhs of same type"
 +        );
 +    }
 +
 +    let lhs = in_lhs.load_scalar(fx);
 +    let rhs = in_rhs.load_scalar(fx);
 +
 +    if let Some(res) = crate::codegen_i128::maybe_codegen(fx, bin_op, true, in_lhs, in_rhs) {
 +        return res;
 +    }
 +
 +    let signed = type_sign(in_lhs.layout().ty);
 +
 +    let (res, has_overflow) = match bin_op {
 +        BinOp::Add => {
 +            /*let (val, c_out) = fx.bcx.ins().iadd_cout(lhs, rhs);
 +            (val, c_out)*/
 +            // FIXME(CraneStation/cranelift#849) legalize iadd_cout for i8 and i16
 +            let val = fx.bcx.ins().iadd(lhs, rhs);
 +            let has_overflow = if !signed {
 +                fx.bcx.ins().icmp(IntCC::UnsignedLessThan, val, lhs)
 +            } else {
 +                let rhs_is_negative = fx.bcx.ins().icmp_imm(IntCC::SignedLessThan, rhs, 0);
 +                let slt = fx.bcx.ins().icmp(IntCC::SignedLessThan, val, lhs);
 +                fx.bcx.ins().bxor(rhs_is_negative, slt)
 +            };
 +            (val, has_overflow)
 +        }
 +        BinOp::Sub => {
 +            /*let (val, b_out) = fx.bcx.ins().isub_bout(lhs, rhs);
 +            (val, b_out)*/
 +            // FIXME(CraneStation/cranelift#849) legalize isub_bout for i8 and i16
 +            let val = fx.bcx.ins().isub(lhs, rhs);
 +            let has_overflow = if !signed {
 +                fx.bcx.ins().icmp(IntCC::UnsignedGreaterThan, val, lhs)
 +            } else {
 +                let rhs_is_negative = fx.bcx.ins().icmp_imm(IntCC::SignedLessThan, rhs, 0);
 +                let sgt = fx.bcx.ins().icmp(IntCC::SignedGreaterThan, val, lhs);
 +                fx.bcx.ins().bxor(rhs_is_negative, sgt)
 +            };
 +            (val, has_overflow)
 +        }
 +        BinOp::Mul => {
 +            let ty = fx.bcx.func.dfg.value_type(lhs);
 +            match ty {
 +                types::I8 | types::I16 | types::I32 if !signed => {
 +                    let lhs = fx.bcx.ins().uextend(ty.double_width().unwrap(), lhs);
 +                    let rhs = fx.bcx.ins().uextend(ty.double_width().unwrap(), rhs);
 +                    let val = fx.bcx.ins().imul(lhs, rhs);
 +                    let has_overflow = fx.bcx.ins().icmp_imm(
 +                        IntCC::UnsignedGreaterThan,
 +                        val,
 +                        (1 << ty.bits()) - 1,
 +                    );
 +                    let val = fx.bcx.ins().ireduce(ty, val);
 +                    (val, has_overflow)
 +                }
 +                types::I8 | types::I16 | types::I32 if signed => {
 +                    let lhs = fx.bcx.ins().sextend(ty.double_width().unwrap(), lhs);
 +                    let rhs = fx.bcx.ins().sextend(ty.double_width().unwrap(), rhs);
 +                    let val = fx.bcx.ins().imul(lhs, rhs);
 +                    let has_underflow =
 +                        fx.bcx.ins().icmp_imm(IntCC::SignedLessThan, val, -(1 << (ty.bits() - 1)));
 +                    let has_overflow = fx.bcx.ins().icmp_imm(
 +                        IntCC::SignedGreaterThan,
 +                        val,
 +                        (1 << (ty.bits() - 1)) - 1,
 +                    );
 +                    let val = fx.bcx.ins().ireduce(ty, val);
 +                    (val, fx.bcx.ins().bor(has_underflow, has_overflow))
 +                }
 +                types::I64 => {
 +                    let val = fx.bcx.ins().imul(lhs, rhs);
 +                    let has_overflow = if !signed {
 +                        let val_hi = fx.bcx.ins().umulhi(lhs, rhs);
 +                        fx.bcx.ins().icmp_imm(IntCC::NotEqual, val_hi, 0)
 +                    } else {
 +                        // Based on LLVM's instruction sequence for compiling
 +                        // a.checked_mul(b).is_some() to riscv64gc:
 +                        // mulh    a2, a0, a1
 +                        // mul     a0, a0, a1
 +                        // srai    a0, a0, 63
 +                        // xor     a0, a0, a2
 +                        // snez    a0, a0
 +                        let val_hi = fx.bcx.ins().smulhi(lhs, rhs);
 +                        let val_sign = fx.bcx.ins().sshr_imm(val, i64::from(ty.bits() - 1));
 +                        let xor = fx.bcx.ins().bxor(val_hi, val_sign);
 +                        fx.bcx.ins().icmp_imm(IntCC::NotEqual, xor, 0)
 +                    };
 +                    (val, has_overflow)
 +                }
 +                types::I128 => {
 +                    unreachable!("i128 should have been handled by codegen_i128::maybe_codegen")
 +                }
 +                _ => unreachable!("invalid non-integer type {}", ty),
 +            }
 +        }
 +        BinOp::Shl => {
 +            let lhs_ty = fx.bcx.func.dfg.value_type(lhs);
-             let actual_shift = fx.bcx.ins().band_imm(rhs, i64::from(lhs_ty.bits() - 1));
-             let actual_shift = clif_intcast(fx, actual_shift, types::I8, false);
++            let masked_shift = fx.bcx.ins().band_imm(rhs, i64::from(lhs_ty.bits() - 1));
++            let val = fx.bcx.ins().ishl(lhs, masked_shift);
 +            let ty = fx.bcx.func.dfg.value_type(val);
 +            let max_shift = i64::from(ty.bits()) - 1;
 +            let has_overflow = fx.bcx.ins().icmp_imm(IntCC::UnsignedGreaterThan, rhs, max_shift);
 +            (val, has_overflow)
 +        }
 +        BinOp::Shr => {
 +            let lhs_ty = fx.bcx.func.dfg.value_type(lhs);
-                 fx.bcx.ins().ushr(lhs, actual_shift)
++            let masked_shift = fx.bcx.ins().band_imm(rhs, i64::from(lhs_ty.bits() - 1));
 +            let val = if !signed {
-                 fx.bcx.ins().sshr(lhs, actual_shift)
++                fx.bcx.ins().ushr(lhs, masked_shift)
 +            } else {
++                fx.bcx.ins().sshr(lhs, masked_shift)
 +            };
 +            let ty = fx.bcx.func.dfg.value_type(val);
 +            let max_shift = i64::from(ty.bits()) - 1;
 +            let has_overflow = fx.bcx.ins().icmp_imm(IntCC::UnsignedGreaterThan, rhs, max_shift);
 +            (val, has_overflow)
 +        }
 +        _ => bug!("binop {:?} on checked int/uint lhs: {:?} rhs: {:?}", bin_op, in_lhs, in_rhs),
 +    };
 +
 +    let has_overflow = fx.bcx.ins().bint(types::I8, has_overflow);
 +
 +    let out_layout = fx.layout_of(fx.tcx.mk_tup([in_lhs.layout().ty, fx.tcx.types.bool].iter()));
 +    CValue::by_val_pair(res, has_overflow, out_layout)
 +}
 +
 +pub(crate) fn codegen_float_binop<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    bin_op: BinOp,
 +    in_lhs: CValue<'tcx>,
 +    in_rhs: CValue<'tcx>,
 +) -> CValue<'tcx> {
 +    assert_eq!(in_lhs.layout().ty, in_rhs.layout().ty);
 +
 +    let lhs = in_lhs.load_scalar(fx);
 +    let rhs = in_rhs.load_scalar(fx);
 +
 +    let b = fx.bcx.ins();
 +    let res = match bin_op {
 +        BinOp::Add => b.fadd(lhs, rhs),
 +        BinOp::Sub => b.fsub(lhs, rhs),
 +        BinOp::Mul => b.fmul(lhs, rhs),
 +        BinOp::Div => b.fdiv(lhs, rhs),
 +        BinOp::Rem => {
 +            let name = match in_lhs.layout().ty.kind() {
 +                ty::Float(FloatTy::F32) => "fmodf",
 +                ty::Float(FloatTy::F64) => "fmod",
 +                _ => bug!(),
 +            };
 +            return fx.easy_call(name, &[in_lhs, in_rhs], in_lhs.layout().ty);
 +        }
 +        BinOp::Eq | BinOp::Lt | BinOp::Le | BinOp::Ne | BinOp::Ge | BinOp::Gt => {
 +            let fltcc = match bin_op {
 +                BinOp::Eq => FloatCC::Equal,
 +                BinOp::Lt => FloatCC::LessThan,
 +                BinOp::Le => FloatCC::LessThanOrEqual,
 +                BinOp::Ne => FloatCC::NotEqual,
 +                BinOp::Ge => FloatCC::GreaterThanOrEqual,
 +                BinOp::Gt => FloatCC::GreaterThan,
 +                _ => unreachable!(),
 +            };
 +            let val = fx.bcx.ins().fcmp(fltcc, lhs, rhs);
 +            let val = fx.bcx.ins().bint(types::I8, val);
 +            return CValue::by_val(val, fx.layout_of(fx.tcx.types.bool));
 +        }
 +        _ => unreachable!("{:?}({:?}, {:?})", bin_op, in_lhs, in_rhs),
 +    };
 +
 +    CValue::by_val(res, in_lhs.layout())
 +}
 +
 +pub(crate) fn codegen_ptr_binop<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    bin_op: BinOp,
 +    in_lhs: CValue<'tcx>,
 +    in_rhs: CValue<'tcx>,
 +) -> CValue<'tcx> {
 +    let is_thin_ptr = in_lhs
 +        .layout()
 +        .ty
 +        .builtin_deref(true)
 +        .map(|TypeAndMut { ty, mutbl: _ }| !has_ptr_meta(fx.tcx, ty))
 +        .unwrap_or(true);
 +
 +    if is_thin_ptr {
 +        match bin_op {
 +            BinOp::Eq | BinOp::Lt | BinOp::Le | BinOp::Ne | BinOp::Ge | BinOp::Gt => {
 +                let lhs = in_lhs.load_scalar(fx);
 +                let rhs = in_rhs.load_scalar(fx);
 +
 +                codegen_compare_bin_op(fx, bin_op, false, lhs, rhs)
 +            }
 +            BinOp::Offset => {
 +                let pointee_ty = in_lhs.layout().ty.builtin_deref(true).unwrap().ty;
 +                let (base, offset) = (in_lhs, in_rhs.load_scalar(fx));
 +                let pointee_size = fx.layout_of(pointee_ty).size.bytes();
 +                let ptr_diff = fx.bcx.ins().imul_imm(offset, pointee_size as i64);
 +                let base_val = base.load_scalar(fx);
 +                let res = fx.bcx.ins().iadd(base_val, ptr_diff);
 +                CValue::by_val(res, base.layout())
 +            }
 +            _ => unreachable!("{:?}({:?}, {:?})", bin_op, in_lhs, in_rhs),
 +        }
 +    } else {
 +        let (lhs_ptr, lhs_extra) = in_lhs.load_scalar_pair(fx);
 +        let (rhs_ptr, rhs_extra) = in_rhs.load_scalar_pair(fx);
 +
 +        let res = match bin_op {
 +            BinOp::Eq => {
 +                let ptr_eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_ptr, rhs_ptr);
 +                let extra_eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_extra, rhs_extra);
 +                fx.bcx.ins().band(ptr_eq, extra_eq)
 +            }
 +            BinOp::Ne => {
 +                let ptr_ne = fx.bcx.ins().icmp(IntCC::NotEqual, lhs_ptr, rhs_ptr);
 +                let extra_ne = fx.bcx.ins().icmp(IntCC::NotEqual, lhs_extra, rhs_extra);
 +                fx.bcx.ins().bor(ptr_ne, extra_ne)
 +            }
 +            BinOp::Lt | BinOp::Le | BinOp::Ge | BinOp::Gt => {
 +                let ptr_eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_ptr, rhs_ptr);
 +
 +                let ptr_cmp =
 +                    fx.bcx.ins().icmp(bin_op_to_intcc(bin_op, false).unwrap(), lhs_ptr, rhs_ptr);
 +                let extra_cmp = fx.bcx.ins().icmp(
 +                    bin_op_to_intcc(bin_op, false).unwrap(),
 +                    lhs_extra,
 +                    rhs_extra,
 +                );
 +
 +                fx.bcx.ins().select(ptr_eq, extra_cmp, ptr_cmp)
 +            }
 +            _ => panic!("bin_op {:?} on ptr", bin_op),
 +        };
 +
 +        CValue::by_val(fx.bcx.ins().bint(types::I8, res), fx.layout_of(fx.tcx.types.bool))
 +    }
 +}
index b95e2d72877d992f35622873a25270f624d635e0,0000000000000000000000000000000000000000..d637b4d89293cea0a0a855ebb97378b111c3789b
mode 100644,000000..100644
--- /dev/null
@@@ -1,106 -1,0 +1,67 @@@
- use cranelift_codegen::ir::{
-     condcodes::IntCC, types, InstBuilder, InstructionData, Opcode, Value, ValueDef,
- };
 +//! Peephole optimizations that can be performed while creating clif ir.
 +
- pub(crate) fn make_branchable_value(bcx: &mut FunctionBuilder<'_>, arg: Value) -> Value {
-     if bcx.func.dfg.value_type(arg).is_bool() {
-         return arg;
-     }
-     (|| {
-         let arg_inst = if let ValueDef::Result(arg_inst, 0) = bcx.func.dfg.value_def(arg) {
-             arg_inst
-         } else {
-             return None;
-         };
-         match bcx.func.dfg[arg_inst] {
-             // This is the lowering of Rvalue::Not
-             InstructionData::Load { opcode: Opcode::Load, arg: ptr, flags, offset } => {
-                 // Using `load.i8 + uextend.i32` would legalize to `uload8 + ireduce.i8 +
-                 // uextend.i32`. Just `uload8` is much faster.
-                 match bcx.func.dfg.ctrl_typevar(arg_inst) {
-                     types::I8 => Some(bcx.ins().uload8(types::I32, flags, ptr, offset)),
-                     types::I16 => Some(bcx.ins().uload16(types::I32, flags, ptr, offset)),
-                     _ => None,
-                 }
-             }
-             _ => None,
-         }
-     })()
-     .unwrap_or_else(|| {
-         match bcx.func.dfg.value_type(arg) {
-             types::I8 | types::I16 => {
-                 // WORKAROUND for brz.i8 and brnz.i8 not yet being implemented
-                 bcx.ins().uextend(types::I32, arg)
-             }
-             _ => arg,
-         }
-     })
- }
++use cranelift_codegen::ir::{condcodes::IntCC, InstructionData, Opcode, Value, ValueDef};
 +use cranelift_frontend::FunctionBuilder;
 +
 +/// If the given value was produced by a `bint` instruction, return it's input, otherwise return the
 +/// given value.
 +pub(crate) fn maybe_unwrap_bint(bcx: &mut FunctionBuilder<'_>, arg: Value) -> Value {
 +    if let ValueDef::Result(arg_inst, 0) = bcx.func.dfg.value_def(arg) {
 +        match bcx.func.dfg[arg_inst] {
 +            InstructionData::Unary { opcode: Opcode::Bint, arg } => arg,
 +            _ => arg,
 +        }
 +    } else {
 +        arg
 +    }
 +}
 +
 +/// If the given value was produced by the lowering of `Rvalue::Not` return the input and true,
 +/// otherwise return the given value and false.
 +pub(crate) fn maybe_unwrap_bool_not(bcx: &mut FunctionBuilder<'_>, arg: Value) -> (Value, bool) {
 +    if let ValueDef::Result(arg_inst, 0) = bcx.func.dfg.value_def(arg) {
 +        match bcx.func.dfg[arg_inst] {
 +            // This is the lowering of `Rvalue::Not`
 +            InstructionData::IntCompareImm {
 +                opcode: Opcode::IcmpImm,
 +                cond: IntCC::Equal,
 +                arg,
 +                imm,
 +            } if imm.bits() == 0 => (arg, true),
 +            _ => (arg, false),
 +        }
 +    } else {
 +        (arg, false)
 +    }
 +}
 +
 +/// Returns whether the branch is statically known to be taken or `None` if it isn't statically known.
 +pub(crate) fn maybe_known_branch_taken(
 +    bcx: &FunctionBuilder<'_>,
 +    arg: Value,
 +    test_zero: bool,
 +) -> Option<bool> {
 +    let arg_inst = if let ValueDef::Result(arg_inst, 0) = bcx.func.dfg.value_def(arg) {
 +        arg_inst
 +    } else {
 +        return None;
 +    };
 +
 +    match bcx.func.dfg[arg_inst] {
 +        InstructionData::UnaryBool { opcode: Opcode::Bconst, imm } => {
 +            if test_zero {
 +                Some(!imm)
 +            } else {
 +                Some(imm)
 +            }
 +        }
 +        InstructionData::UnaryImm { opcode: Opcode::Iconst, imm } => {
 +            if test_zero {
 +                Some(imm.bits() == 0)
 +            } else {
 +                Some(imm.bits() != 0)
 +            }
 +        }
 +        _ => None,
 +    }
 +}
index 21d3e68dbc79257ac4f899715efe30ce481d2839,0000000000000000000000000000000000000000..fe8d20fa39fc2b82f826947b4f0a5f5a150a7873
mode 100644,000000..100644
--- /dev/null
@@@ -1,78 -1,0 +1,78 @@@
-                 params: vec![AbiParam::new(pointer_ty(fx.tcx))],
 +//! Helpers used to print a message and abort in case of certain panics and some detected UB.
 +
 +use crate::prelude::*;
 +
 +fn codegen_print(fx: &mut FunctionCx<'_, '_, '_>, msg: &str) {
 +    let puts = fx
 +        .module
 +        .declare_function(
 +            "puts",
 +            Linkage::Import,
 +            &Signature {
 +                call_conv: CallConv::triple_default(fx.triple()),
++                params: vec![AbiParam::new(fx.pointer_type)],
 +                returns: vec![AbiParam::new(types::I32)],
 +            },
 +        )
 +        .unwrap();
 +    let puts = fx.module.declare_func_in_func(puts, &mut fx.bcx.func);
 +    if fx.clif_comments.enabled() {
 +        fx.add_comment(puts, "puts");
 +    }
 +
 +    let real_msg = format!("trap at {:?} ({}): {}\0", fx.instance, fx.symbol_name, msg);
 +    let msg_ptr = fx.anonymous_str(&real_msg);
 +    fx.bcx.ins().call(puts, &[msg_ptr]);
 +}
 +
 +/// Trap code: user1
 +pub(crate) fn trap_abort(fx: &mut FunctionCx<'_, '_, '_>, msg: impl AsRef<str>) {
 +    codegen_print(fx, msg.as_ref());
 +    fx.bcx.ins().trap(TrapCode::User(1));
 +}
 +
 +/// Use this for example when a function call should never return. This will fill the current block,
 +/// so you can **not** add instructions to it afterwards.
 +///
 +/// Trap code: user65535
 +pub(crate) fn trap_unreachable(fx: &mut FunctionCx<'_, '_, '_>, msg: impl AsRef<str>) {
 +    codegen_print(fx, msg.as_ref());
 +    fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
 +}
 +
 +/// Like `trap_unreachable` but returns a fake value of the specified type.
 +///
 +/// Trap code: user65535
 +pub(crate) fn trap_unreachable_ret_value<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    dest_layout: TyAndLayout<'tcx>,
 +    msg: impl AsRef<str>,
 +) -> CValue<'tcx> {
 +    codegen_print(fx, msg.as_ref());
 +    let true_ = fx.bcx.ins().iconst(types::I32, 1);
 +    fx.bcx.ins().trapnz(true_, TrapCode::UnreachableCodeReached);
 +    CValue::by_ref(Pointer::const_addr(fx, 0), dest_layout)
 +}
 +
 +/// Use this when something is unimplemented, but `libcore` or `libstd` requires it to codegen.
 +/// Unlike `trap_unreachable` this will not fill the current block, so you **must** add instructions
 +/// to it afterwards.
 +///
 +/// Trap code: user65535
 +pub(crate) fn trap_unimplemented(fx: &mut FunctionCx<'_, '_, '_>, msg: impl AsRef<str>) {
 +    codegen_print(fx, msg.as_ref());
 +    let true_ = fx.bcx.ins().iconst(types::I32, 1);
 +    fx.bcx.ins().trapnz(true_, TrapCode::User(!0));
 +}
 +
 +/// Like `trap_unimplemented` but returns a fake value of the specified type.
 +///
 +/// Trap code: user65535
 +pub(crate) fn trap_unimplemented_ret_value<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    dest_layout: TyAndLayout<'tcx>,
 +    msg: impl AsRef<str>,
 +) -> CValue<'tcx> {
 +    trap_unimplemented(fx, msg);
 +    CValue::by_ref(Pointer::const_addr(fx, 0), dest_layout)
 +}
index d594731b4dfce5870bc9358d0a5edf466f64fe60,0000000000000000000000000000000000000000..d9c4647cba3a98938f6e861f494e01b2ad605f6a
mode 100644,000000..100644
--- /dev/null
@@@ -1,244 -1,0 +1,242 @@@
-             assert!(!fx.layout_of(a).is_unsized());
 +//! Codegen of the [`PointerCast::Unsize`] operation.
 +//!
 +//! [`PointerCast::Unsize`]: `rustc_middle::ty::adjustment::PointerCast::Unsize`
 +
 +use crate::prelude::*;
 +
 +// Adapted from https://github.com/rust-lang/rust/blob/2a663555ddf36f6b041445894a8c175cd1bc718c/src/librustc_codegen_ssa/base.rs#L159-L307
 +
 +/// Retrieve the information we are losing (making dynamic) in an unsizing
 +/// adjustment.
 +///
 +/// The `old_info` argument is a bit funny. It is intended for use
 +/// in an upcast, where the new vtable for an object will be derived
 +/// from the old one.
 +pub(crate) fn unsized_info<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    source: Ty<'tcx>,
 +    target: Ty<'tcx>,
 +    old_info: Option<Value>,
 +) -> Value {
 +    let (source, target) =
 +        fx.tcx.struct_lockstep_tails_erasing_lifetimes(source, target, ParamEnv::reveal_all());
 +    match (&source.kind(), &target.kind()) {
 +        (&ty::Array(_, len), &ty::Slice(_)) => fx
 +            .bcx
 +            .ins()
 +            .iconst(fx.pointer_type, len.eval_usize(fx.tcx, ParamEnv::reveal_all()) as i64),
 +        (&ty::Dynamic(ref data_a, ..), &ty::Dynamic(ref data_b, ..)) => {
 +            let old_info =
 +                old_info.expect("unsized_info: missing old info for trait upcasting coercion");
 +            if data_a.principal_def_id() == data_b.principal_def_id() {
 +                return old_info;
 +            }
 +            // trait upcasting coercion
 +
 +            // if both of the two `principal`s are `None`, this function would have returned early above.
 +            // and if one of the two `principal`s is `None`, typechecking would have rejected this case.
 +            let principal_a = data_a
 +                .principal()
 +                .expect("unsized_info: missing principal trait for trait upcasting coercion");
 +            let principal_b = data_b
 +                .principal()
 +                .expect("unsized_info: missing principal trait for trait upcasting coercion");
 +
 +            let vptr_entry_idx = fx.tcx.vtable_trait_upcasting_coercion_new_vptr_slot((
 +                principal_a.with_self_ty(fx.tcx, source),
 +                principal_b.with_self_ty(fx.tcx, source),
 +            ));
 +
 +            if let Some(entry_idx) = vptr_entry_idx {
 +                let entry_idx = u32::try_from(entry_idx).unwrap();
 +                let entry_offset = entry_idx * fx.pointer_type.bytes();
 +                let vptr_ptr = Pointer::new(old_info).offset_i64(fx, entry_offset.into()).load(
 +                    fx,
 +                    fx.pointer_type,
 +                    crate::vtable::vtable_memflags(),
 +                );
 +                vptr_ptr
 +            } else {
 +                old_info
 +            }
 +        }
 +        (_, &ty::Dynamic(ref data, ..)) => crate::vtable::get_vtable(fx, source, data.principal()),
 +        _ => bug!("unsized_info: invalid unsizing {:?} -> {:?}", source, target),
 +    }
 +}
 +
 +/// Coerce `src` to `dst_ty`.
 +fn unsize_ptr<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    src: Value,
 +    src_layout: TyAndLayout<'tcx>,
 +    dst_layout: TyAndLayout<'tcx>,
 +    old_info: Option<Value>,
 +) -> (Value, Value) {
 +    match (&src_layout.ty.kind(), &dst_layout.ty.kind()) {
 +        (&ty::Ref(_, a, _), &ty::Ref(_, b, _))
 +        | (&ty::Ref(_, a, _), &ty::RawPtr(ty::TypeAndMut { ty: b, .. }))
 +        | (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }), &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => {
-             assert!(!fx.layout_of(a).is_unsized());
 +            (src, unsized_info(fx, a, b, old_info))
 +        }
 +        (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => {
 +            let (a, b) = (src_layout.ty.boxed_ty(), dst_layout.ty.boxed_ty());
 +            (src, unsized_info(fx, a, b, old_info))
 +        }
 +        (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
 +            assert_eq!(def_a, def_b);
 +
 +            if src_layout == dst_layout {
 +                return (src, old_info.unwrap());
 +            }
 +
 +            let mut result = None;
 +            for i in 0..src_layout.fields.count() {
 +                let src_f = src_layout.field(fx, i);
 +                assert_eq!(src_layout.fields.offset(i).bytes(), 0);
 +                assert_eq!(dst_layout.fields.offset(i).bytes(), 0);
 +                if src_f.is_zst() {
 +                    continue;
 +                }
 +                assert_eq!(src_layout.size, src_f.size);
 +
 +                let dst_f = dst_layout.field(fx, i);
 +                assert_ne!(src_f.ty, dst_f.ty);
 +                assert_eq!(result, None);
 +                result = Some(unsize_ptr(fx, src, src_f, dst_f, old_info));
 +            }
 +            result.unwrap()
 +        }
 +        _ => bug!("unsize_ptr: called on bad types"),
 +    }
 +}
 +
 +/// Coerce `src`, which is a reference to a value of type `src_ty`,
 +/// to a value of type `dst_ty` and store the result in `dst`
 +pub(crate) fn coerce_unsized_into<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    src: CValue<'tcx>,
 +    dst: CPlace<'tcx>,
 +) {
 +    let src_ty = src.layout().ty;
 +    let dst_ty = dst.layout().ty;
 +    let mut coerce_ptr = || {
 +        let (base, info) =
 +            if fx.layout_of(src.layout().ty.builtin_deref(true).unwrap().ty).is_unsized() {
 +                let (old_base, old_info) = src.load_scalar_pair(fx);
 +                unsize_ptr(fx, old_base, src.layout(), dst.layout(), Some(old_info))
 +            } else {
 +                let base = src.load_scalar(fx);
 +                unsize_ptr(fx, base, src.layout(), dst.layout(), None)
 +            };
 +        dst.write_cvalue(fx, CValue::by_val_pair(base, info, dst.layout()));
 +    };
 +    match (&src_ty.kind(), &dst_ty.kind()) {
 +        (&ty::Ref(..), &ty::Ref(..))
 +        | (&ty::Ref(..), &ty::RawPtr(..))
 +        | (&ty::RawPtr(..), &ty::RawPtr(..)) => coerce_ptr(),
 +        (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
 +            assert_eq!(def_a, def_b);
 +
 +            for i in 0..def_a.variants[VariantIdx::new(0)].fields.len() {
 +                let src_f = src.value_field(fx, mir::Field::new(i));
 +                let dst_f = dst.place_field(fx, mir::Field::new(i));
 +
 +                if dst_f.layout().is_zst() {
 +                    continue;
 +                }
 +
 +                if src_f.layout().ty == dst_f.layout().ty {
 +                    dst_f.write_cvalue(fx, src_f);
 +                } else {
 +                    coerce_unsized_into(fx, src_f, dst_f);
 +                }
 +            }
 +        }
 +        _ => bug!("coerce_unsized_into: invalid coercion {:?} -> {:?}", src_ty, dst_ty),
 +    }
 +}
 +
 +// Adapted from https://github.com/rust-lang/rust/blob/2a663555ddf36f6b041445894a8c175cd1bc718c/src/librustc_codegen_ssa/glue.rs
 +
 +pub(crate) fn size_and_align_of_dst<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    layout: TyAndLayout<'tcx>,
 +    info: Value,
 +) -> (Value, Value) {
 +    if !layout.is_unsized() {
 +        let size = fx.bcx.ins().iconst(fx.pointer_type, layout.size.bytes() as i64);
 +        let align = fx.bcx.ins().iconst(fx.pointer_type, layout.align.abi.bytes() as i64);
 +        return (size, align);
 +    }
 +    match layout.ty.kind() {
 +        ty::Dynamic(..) => {
 +            // load size/align from vtable
 +            (crate::vtable::size_of_obj(fx, info), crate::vtable::min_align_of_obj(fx, info))
 +        }
 +        ty::Slice(_) | ty::Str => {
 +            let unit = layout.field(fx, 0);
 +            // The info in this case is the length of the str, so the size is that
 +            // times the unit size.
 +            (
 +                fx.bcx.ins().imul_imm(info, unit.size.bytes() as i64),
 +                fx.bcx.ins().iconst(fx.pointer_type, unit.align.abi.bytes() as i64),
 +            )
 +        }
 +        _ => {
 +            // First get the size of all statically known fields.
 +            // Don't use size_of because it also rounds up to alignment, which we
 +            // want to avoid, as the unsized field's alignment could be smaller.
 +            assert!(!layout.ty.is_simd());
 +
 +            let i = layout.fields.count() - 1;
 +            let sized_size = layout.fields.offset(i).bytes();
 +            let sized_align = layout.align.abi.bytes();
 +            let sized_align = fx.bcx.ins().iconst(fx.pointer_type, sized_align as i64);
 +
 +            // Recurse to get the size of the dynamically sized field (must be
 +            // the last field).
 +            let field_layout = layout.field(fx, i);
 +            let (unsized_size, mut unsized_align) = size_and_align_of_dst(fx, field_layout, info);
 +
 +            // FIXME (#26403, #27023): We should be adding padding
 +            // to `sized_size` (to accommodate the `unsized_align`
 +            // required of the unsized field that follows) before
 +            // summing it with `sized_size`. (Note that since #26403
 +            // is unfixed, we do not yet add the necessary padding
 +            // here. But this is where the add would go.)
 +
 +            // Return the sum of sizes and max of aligns.
 +            let size = fx.bcx.ins().iadd_imm(unsized_size, sized_size as i64);
 +
 +            // Packed types ignore the alignment of their fields.
 +            if let ty::Adt(def, _) = layout.ty.kind() {
 +                if def.repr.packed() {
 +                    unsized_align = sized_align;
 +                }
 +            }
 +
 +            // Choose max of two known alignments (combined value must
 +            // be aligned according to more restrictive of the two).
 +            let cmp = fx.bcx.ins().icmp(IntCC::UnsignedGreaterThan, sized_align, unsized_align);
 +            let align = fx.bcx.ins().select(cmp, sized_align, unsized_align);
 +
 +            // Issue #27023: must add any necessary padding to `size`
 +            // (to make it a multiple of `align`) before returning it.
 +            //
 +            // Namely, the returned size should be, in C notation:
 +            //
 +            //   `size + ((size & (align-1)) ? align : 0)`
 +            //
 +            // emulated via the semi-standard fast bit trick:
 +            //
 +            //   `(size + (align-1)) & -align`
 +            let addend = fx.bcx.ins().iadd_imm(align, -1);
 +            let add = fx.bcx.ins().iadd(size, addend);
 +            let neg = fx.bcx.ins().ineg(align);
 +            let size = fx.bcx.ins().band(add, neg);
 +
 +            (size, align)
 +        }
 +    }
 +}
index ae8ccc626b47089e09a8f75672c9893659ef21fa,0000000000000000000000000000000000000000..364b3da92b88822fe614904ce75ed44582d2a288
mode 100644,000000..100644
--- /dev/null
@@@ -1,734 -1,0 +1,797 @@@
-                 let one = fx.bcx.ins().iconst(pointer_ty(fx.tcx), 1);
 +//! Definition of [`CValue`] and [`CPlace`]
 +
 +use crate::prelude::*;
 +
 +use cranelift_codegen::ir::immediates::Offset32;
 +
 +fn codegen_field<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    base: Pointer,
 +    extra: Option<Value>,
 +    layout: TyAndLayout<'tcx>,
 +    field: mir::Field,
 +) -> (Pointer, TyAndLayout<'tcx>) {
 +    let field_offset = layout.fields.offset(field.index());
 +    let field_layout = layout.field(&*fx, field.index());
 +
 +    let simple = |fx: &mut FunctionCx<'_, '_, '_>| {
 +        (base.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap()), field_layout)
 +    };
 +
 +    if let Some(extra) = extra {
 +        if !field_layout.is_unsized() {
 +            return simple(fx);
 +        }
 +        match field_layout.ty.kind() {
 +            ty::Slice(..) | ty::Str | ty::Foreign(..) => simple(fx),
 +            ty::Adt(def, _) if def.repr.packed() => {
 +                assert_eq!(layout.align.abi.bytes(), 1);
 +                simple(fx)
 +            }
 +            _ => {
 +                // We have to align the offset for DST's
 +                let unaligned_offset = field_offset.bytes();
 +                let (_, unsized_align) =
 +                    crate::unsize::size_and_align_of_dst(fx, field_layout, extra);
 +
-                 let zero = fx.bcx.ins().iconst(pointer_ty(fx.tcx), 0);
++                let one = fx.bcx.ins().iconst(fx.pointer_type, 1);
 +                let align_sub_1 = fx.bcx.ins().isub(unsized_align, one);
 +                let and_lhs = fx.bcx.ins().iadd_imm(align_sub_1, unaligned_offset as i64);
-     pub(crate) fn no_place(layout: TyAndLayout<'tcx>) -> CPlace<'tcx> {
-         CPlace { inner: CPlaceInner::Addr(Pointer::dangling(layout.align.pref), None), layout }
-     }
++                let zero = fx.bcx.ins().iconst(fx.pointer_type, 0);
 +                let and_rhs = fx.bcx.ins().isub(zero, unsized_align);
 +                let offset = fx.bcx.ins().band(and_lhs, and_rhs);
 +
 +                (base.offset_value(fx, offset), field_layout)
 +            }
 +        }
 +    } else {
 +        simple(fx)
 +    }
 +}
 +
 +fn scalar_pair_calculate_b_offset(
 +    tcx: TyCtxt<'_>,
 +    a_scalar: &Scalar,
 +    b_scalar: &Scalar,
 +) -> Offset32 {
 +    let b_offset = a_scalar.value.size(&tcx).align_to(b_scalar.value.align(&tcx).abi);
 +    Offset32::new(b_offset.bytes().try_into().unwrap())
 +}
 +
 +/// A read-only value
 +#[derive(Debug, Copy, Clone)]
 +pub(crate) struct CValue<'tcx>(CValueInner, TyAndLayout<'tcx>);
 +
 +#[derive(Debug, Copy, Clone)]
 +enum CValueInner {
 +    ByRef(Pointer, Option<Value>),
 +    ByVal(Value),
 +    ByValPair(Value, Value),
 +}
 +
 +impl<'tcx> CValue<'tcx> {
 +    pub(crate) fn by_ref(ptr: Pointer, layout: TyAndLayout<'tcx>) -> CValue<'tcx> {
 +        CValue(CValueInner::ByRef(ptr, None), layout)
 +    }
 +
 +    pub(crate) fn by_ref_unsized(
 +        ptr: Pointer,
 +        meta: Value,
 +        layout: TyAndLayout<'tcx>,
 +    ) -> CValue<'tcx> {
 +        CValue(CValueInner::ByRef(ptr, Some(meta)), layout)
 +    }
 +
 +    pub(crate) fn by_val(value: Value, layout: TyAndLayout<'tcx>) -> CValue<'tcx> {
 +        CValue(CValueInner::ByVal(value), layout)
 +    }
 +
 +    pub(crate) fn by_val_pair(
 +        value: Value,
 +        extra: Value,
 +        layout: TyAndLayout<'tcx>,
 +    ) -> CValue<'tcx> {
 +        CValue(CValueInner::ByValPair(value, extra), layout)
 +    }
 +
 +    pub(crate) fn layout(&self) -> TyAndLayout<'tcx> {
 +        self.1
 +    }
 +
 +    // FIXME remove
 +    pub(crate) fn force_stack(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> (Pointer, Option<Value>) {
 +        let layout = self.1;
 +        match self.0 {
 +            CValueInner::ByRef(ptr, meta) => (ptr, meta),
 +            CValueInner::ByVal(_) | CValueInner::ByValPair(_, _) => {
 +                let cplace = CPlace::new_stack_slot(fx, layout);
 +                cplace.write_cvalue(fx, self);
 +                (cplace.to_ptr(), None)
 +            }
 +        }
 +    }
 +
 +    pub(crate) fn try_to_ptr(self) -> Option<(Pointer, Option<Value>)> {
 +        match self.0 {
 +            CValueInner::ByRef(ptr, meta) => Some((ptr, meta)),
 +            CValueInner::ByVal(_) | CValueInner::ByValPair(_, _) => None,
 +        }
 +    }
 +
 +    /// Load a value with layout.abi of scalar
 +    pub(crate) fn load_scalar(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> Value {
 +        let layout = self.1;
 +        match self.0 {
 +            CValueInner::ByRef(ptr, None) => {
 +                let clif_ty = match layout.abi {
 +                    Abi::Scalar(ref scalar) => scalar_to_clif_type(fx.tcx, scalar.clone()),
 +                    Abi::Vector { ref element, count } => {
 +                        scalar_to_clif_type(fx.tcx, element.clone())
 +                            .by(u16::try_from(count).unwrap())
 +                            .unwrap()
 +                    }
 +                    _ => unreachable!("{:?}", layout.ty),
 +                };
 +                let mut flags = MemFlags::new();
 +                flags.set_notrap();
 +                ptr.load(fx, clif_ty, flags)
 +            }
 +            CValueInner::ByVal(value) => value,
 +            CValueInner::ByRef(_, Some(_)) => bug!("load_scalar for unsized value not allowed"),
 +            CValueInner::ByValPair(_, _) => bug!("Please use load_scalar_pair for ByValPair"),
 +        }
 +    }
 +
 +    /// Load a value pair with layout.abi of scalar pair
 +    pub(crate) fn load_scalar_pair(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> (Value, Value) {
 +        let layout = self.1;
 +        match self.0 {
 +            CValueInner::ByRef(ptr, None) => {
 +                let (a_scalar, b_scalar) = match &layout.abi {
 +                    Abi::ScalarPair(a, b) => (a, b),
 +                    _ => unreachable!("load_scalar_pair({:?})", self),
 +                };
 +                let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
 +                let clif_ty1 = scalar_to_clif_type(fx.tcx, a_scalar.clone());
 +                let clif_ty2 = scalar_to_clif_type(fx.tcx, b_scalar.clone());
 +                let mut flags = MemFlags::new();
 +                flags.set_notrap();
 +                let val1 = ptr.load(fx, clif_ty1, flags);
 +                let val2 = ptr.offset(fx, b_offset).load(fx, clif_ty2, flags);
 +                (val1, val2)
 +            }
 +            CValueInner::ByRef(_, Some(_)) => {
 +                bug!("load_scalar_pair for unsized value not allowed")
 +            }
 +            CValueInner::ByVal(_) => bug!("Please use load_scalar for ByVal"),
 +            CValueInner::ByValPair(val1, val2) => (val1, val2),
 +        }
 +    }
 +
 +    pub(crate) fn value_field(
 +        self,
 +        fx: &mut FunctionCx<'_, '_, 'tcx>,
 +        field: mir::Field,
 +    ) -> CValue<'tcx> {
 +        let layout = self.1;
 +        match self.0 {
 +            CValueInner::ByVal(val) => match layout.abi {
 +                Abi::Vector { element: _, count } => {
 +                    let count = u8::try_from(count).expect("SIMD type with more than 255 lanes???");
 +                    let field = u8::try_from(field.index()).unwrap();
 +                    assert!(field < count);
 +                    let lane = fx.bcx.ins().extractlane(val, field);
 +                    let field_layout = layout.field(&*fx, usize::from(field));
 +                    CValue::by_val(lane, field_layout)
 +                }
 +                _ => unreachable!("value_field for ByVal with abi {:?}", layout.abi),
 +            },
 +            CValueInner::ByValPair(val1, val2) => match layout.abi {
 +                Abi::ScalarPair(_, _) => {
 +                    let val = match field.as_u32() {
 +                        0 => val1,
 +                        1 => val2,
 +                        _ => bug!("field should be 0 or 1"),
 +                    };
 +                    let field_layout = layout.field(&*fx, usize::from(field));
 +                    CValue::by_val(val, field_layout)
 +                }
 +                _ => unreachable!("value_field for ByValPair with abi {:?}", layout.abi),
 +            },
 +            CValueInner::ByRef(ptr, None) => {
 +                let (field_ptr, field_layout) = codegen_field(fx, ptr, None, layout, field);
 +                CValue::by_ref(field_ptr, field_layout)
 +            }
 +            CValueInner::ByRef(_, Some(_)) => todo!(),
 +        }
 +    }
 +
++    /// Like [`CValue::value_field`] except handling ADTs containing a single array field in a way
++    /// such that you can access individual lanes.
++    pub(crate) fn value_lane(
++        self,
++        fx: &mut FunctionCx<'_, '_, 'tcx>,
++        lane_idx: u64,
++    ) -> CValue<'tcx> {
++        let layout = self.1;
++        assert!(layout.ty.is_simd());
++        let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
++        let lane_layout = fx.layout_of(lane_ty);
++        assert!(lane_idx < lane_count);
++        match self.0 {
++            CValueInner::ByVal(val) => match layout.abi {
++                Abi::Vector { element: _, count: _ } => {
++                    assert!(lane_count <= u8::MAX.into(), "SIMD type with more than 255 lanes???");
++                    let lane_idx = u8::try_from(lane_idx).unwrap();
++                    let lane = fx.bcx.ins().extractlane(val, lane_idx);
++                    CValue::by_val(lane, lane_layout)
++                }
++                _ => unreachable!("value_lane for ByVal with abi {:?}", layout.abi),
++            },
++            CValueInner::ByValPair(_, _) => unreachable!(),
++            CValueInner::ByRef(ptr, None) => {
++                let field_offset = lane_layout.size * lane_idx;
++                let field_ptr = ptr.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap());
++                CValue::by_ref(field_ptr, lane_layout)
++            }
++            CValueInner::ByRef(_, Some(_)) => unreachable!(),
++        }
++    }
++
 +    pub(crate) fn unsize_value(self, fx: &mut FunctionCx<'_, '_, 'tcx>, dest: CPlace<'tcx>) {
 +        crate::unsize::coerce_unsized_into(fx, self, dest);
 +    }
 +
 +    /// If `ty` is signed, `const_val` must already be sign extended.
 +    pub(crate) fn const_val(
 +        fx: &mut FunctionCx<'_, '_, 'tcx>,
 +        layout: TyAndLayout<'tcx>,
 +        const_val: ty::ScalarInt,
 +    ) -> CValue<'tcx> {
 +        assert_eq!(const_val.size(), layout.size, "{:#?}: {:?}", const_val, layout);
 +        use cranelift_codegen::ir::immediates::{Ieee32, Ieee64};
 +
 +        let clif_ty = fx.clif_type(layout.ty).unwrap();
 +
 +        if let ty::Bool = layout.ty.kind() {
 +            assert!(
 +                const_val == ty::ScalarInt::FALSE || const_val == ty::ScalarInt::TRUE,
 +                "Invalid bool 0x{:032X}",
 +                const_val
 +            );
 +        }
 +
 +        let val = match layout.ty.kind() {
 +            ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
 +                let const_val = const_val.to_bits(layout.size).unwrap();
 +                let lsb = fx.bcx.ins().iconst(types::I64, const_val as u64 as i64);
 +                let msb = fx.bcx.ins().iconst(types::I64, (const_val >> 64) as u64 as i64);
 +                fx.bcx.ins().iconcat(lsb, msb)
 +            }
 +            ty::Bool | ty::Char | ty::Uint(_) | ty::Int(_) | ty::Ref(..) | ty::RawPtr(..) => {
 +                fx.bcx.ins().iconst(clif_ty, const_val.to_bits(layout.size).unwrap() as i64)
 +            }
 +            ty::Float(FloatTy::F32) => {
 +                fx.bcx.ins().f32const(Ieee32::with_bits(u32::try_from(const_val).unwrap()))
 +            }
 +            ty::Float(FloatTy::F64) => {
 +                fx.bcx.ins().f64const(Ieee64::with_bits(u64::try_from(const_val).unwrap()))
 +            }
 +            _ => panic!(
 +                "CValue::const_val for non bool/char/float/integer/pointer type {:?} is not allowed",
 +                layout.ty
 +            ),
 +        };
 +
 +        CValue::by_val(val, layout)
 +    }
 +
 +    pub(crate) fn cast_pointer_to(self, layout: TyAndLayout<'tcx>) -> Self {
 +        assert!(matches!(self.layout().ty.kind(), ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..)));
 +        assert!(matches!(layout.ty.kind(), ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..)));
 +        assert_eq!(self.layout().abi, layout.abi);
 +        CValue(self.0, layout)
 +    }
 +}
 +
 +/// A place where you can write a value to or read a value from
 +#[derive(Debug, Copy, Clone)]
 +pub(crate) struct CPlace<'tcx> {
 +    inner: CPlaceInner,
 +    layout: TyAndLayout<'tcx>,
 +}
 +
 +#[derive(Debug, Copy, Clone)]
 +pub(crate) enum CPlaceInner {
 +    Var(Local, Variable),
 +    VarPair(Local, Variable, Variable),
 +    VarLane(Local, Variable, u8),
 +    Addr(Pointer, Option<Value>),
 +}
 +
 +impl<'tcx> CPlace<'tcx> {
 +    pub(crate) fn layout(&self) -> TyAndLayout<'tcx> {
 +        self.layout
 +    }
 +
 +    pub(crate) fn inner(&self) -> &CPlaceInner {
 +        &self.inner
 +    }
 +
-             return CPlace::no_place(layout);
 +    pub(crate) fn new_stack_slot(
 +        fx: &mut FunctionCx<'_, '_, 'tcx>,
 +        layout: TyAndLayout<'tcx>,
 +    ) -> CPlace<'tcx> {
 +        assert!(!layout.is_unsized());
 +        if layout.size.bytes() == 0 {
++            return CPlace {
++                inner: CPlaceInner::Addr(Pointer::dangling(layout.align.pref), None),
++                layout,
++            };
 +        }
 +
 +        let stack_slot = fx.bcx.create_stack_slot(StackSlotData {
 +            kind: StackSlotKind::ExplicitSlot,
 +            // FIXME Don't force the size to a multiple of 16 bytes once Cranelift gets a way to
 +            // specify stack slot alignment.
 +            size: (u32::try_from(layout.size.bytes()).unwrap() + 15) / 16 * 16,
 +            offset: None,
 +        });
 +        CPlace { inner: CPlaceInner::Addr(Pointer::stack_slot(stack_slot), None), layout }
 +    }
 +
 +    pub(crate) fn new_var(
 +        fx: &mut FunctionCx<'_, '_, 'tcx>,
 +        local: Local,
 +        layout: TyAndLayout<'tcx>,
 +    ) -> CPlace<'tcx> {
 +        let var = Variable::with_u32(fx.next_ssa_var);
 +        fx.next_ssa_var += 1;
 +        fx.bcx.declare_var(var, fx.clif_type(layout.ty).unwrap());
 +        CPlace { inner: CPlaceInner::Var(local, var), layout }
 +    }
 +
 +    pub(crate) fn new_var_pair(
 +        fx: &mut FunctionCx<'_, '_, 'tcx>,
 +        local: Local,
 +        layout: TyAndLayout<'tcx>,
 +    ) -> CPlace<'tcx> {
 +        let var1 = Variable::with_u32(fx.next_ssa_var);
 +        fx.next_ssa_var += 1;
 +        let var2 = Variable::with_u32(fx.next_ssa_var);
 +        fx.next_ssa_var += 1;
 +
 +        let (ty1, ty2) = fx.clif_pair_type(layout.ty).unwrap();
 +        fx.bcx.declare_var(var1, ty1);
 +        fx.bcx.declare_var(var2, ty2);
 +        CPlace { inner: CPlaceInner::VarPair(local, var1, var2), layout }
 +    }
 +
 +    pub(crate) fn for_ptr(ptr: Pointer, layout: TyAndLayout<'tcx>) -> CPlace<'tcx> {
 +        CPlace { inner: CPlaceInner::Addr(ptr, None), layout }
 +    }
 +
 +    pub(crate) fn for_ptr_with_extra(
 +        ptr: Pointer,
 +        extra: Value,
 +        layout: TyAndLayout<'tcx>,
 +    ) -> CPlace<'tcx> {
 +        CPlace { inner: CPlaceInner::Addr(ptr, Some(extra)), layout }
 +    }
 +
 +    pub(crate) fn to_cvalue(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> CValue<'tcx> {
 +        let layout = self.layout();
 +        match self.inner {
 +            CPlaceInner::Var(_local, var) => {
 +                let val = fx.bcx.use_var(var);
 +                //fx.bcx.set_val_label(val, cranelift_codegen::ir::ValueLabel::new(var.index()));
 +                CValue::by_val(val, layout)
 +            }
 +            CPlaceInner::VarPair(_local, var1, var2) => {
 +                let val1 = fx.bcx.use_var(var1);
 +                //fx.bcx.set_val_label(val1, cranelift_codegen::ir::ValueLabel::new(var1.index()));
 +                let val2 = fx.bcx.use_var(var2);
 +                //fx.bcx.set_val_label(val2, cranelift_codegen::ir::ValueLabel::new(var2.index()));
 +                CValue::by_val_pair(val1, val2, layout)
 +            }
 +            CPlaceInner::VarLane(_local, var, lane) => {
 +                let val = fx.bcx.use_var(var);
 +                //fx.bcx.set_val_label(val, cranelift_codegen::ir::ValueLabel::new(var.index()));
 +                let val = fx.bcx.ins().extractlane(val, lane);
 +                CValue::by_val(val, layout)
 +            }
 +            CPlaceInner::Addr(ptr, extra) => {
 +                if let Some(extra) = extra {
 +                    CValue::by_ref_unsized(ptr, extra, layout)
 +                } else {
 +                    CValue::by_ref(ptr, layout)
 +                }
 +            }
 +        }
 +    }
 +
 +    pub(crate) fn to_ptr(self) -> Pointer {
 +        match self.to_ptr_maybe_unsized() {
 +            (ptr, None) => ptr,
 +            (_, Some(_)) => bug!("Expected sized cplace, found {:?}", self),
 +        }
 +    }
 +
 +    pub(crate) fn to_ptr_maybe_unsized(self) -> (Pointer, Option<Value>) {
 +        match self.inner {
 +            CPlaceInner::Addr(ptr, extra) => (ptr, extra),
 +            CPlaceInner::Var(_, _)
 +            | CPlaceInner::VarPair(_, _, _)
 +            | CPlaceInner::VarLane(_, _, _) => bug!("Expected CPlace::Addr, found {:?}", self),
 +        }
 +    }
 +
 +    pub(crate) fn write_cvalue(self, fx: &mut FunctionCx<'_, '_, 'tcx>, from: CValue<'tcx>) {
 +        assert_assignable(fx, from.layout().ty, self.layout().ty);
 +
 +        self.write_cvalue_maybe_transmute(fx, from, "write_cvalue");
 +    }
 +
 +    pub(crate) fn write_cvalue_transmute(
 +        self,
 +        fx: &mut FunctionCx<'_, '_, 'tcx>,
 +        from: CValue<'tcx>,
 +    ) {
 +        self.write_cvalue_maybe_transmute(fx, from, "write_cvalue_transmute");
 +    }
 +
 +    fn write_cvalue_maybe_transmute(
 +        self,
 +        fx: &mut FunctionCx<'_, '_, 'tcx>,
 +        from: CValue<'tcx>,
 +        method: &'static str,
 +    ) {
 +        fn transmute_value<'tcx>(
 +            fx: &mut FunctionCx<'_, '_, 'tcx>,
 +            var: Variable,
 +            data: Value,
 +            dst_ty: Type,
 +        ) {
 +            let src_ty = fx.bcx.func.dfg.value_type(data);
 +            assert_eq!(
 +                src_ty.bytes(),
 +                dst_ty.bytes(),
 +                "write_cvalue_transmute: {:?} -> {:?}",
 +                src_ty,
 +                dst_ty,
 +            );
 +            let data = match (src_ty, dst_ty) {
 +                (_, _) if src_ty == dst_ty => data,
 +
 +                // This is a `write_cvalue_transmute`.
 +                (types::I32, types::F32)
 +                | (types::F32, types::I32)
 +                | (types::I64, types::F64)
 +                | (types::F64, types::I64) => fx.bcx.ins().bitcast(dst_ty, data),
 +                _ if src_ty.is_vector() && dst_ty.is_vector() => {
 +                    fx.bcx.ins().raw_bitcast(dst_ty, data)
 +                }
 +                _ if src_ty.is_vector() || dst_ty.is_vector() => {
 +                    // FIXME do something more efficient for transmutes between vectors and integers.
 +                    let stack_slot = fx.bcx.create_stack_slot(StackSlotData {
 +                        kind: StackSlotKind::ExplicitSlot,
 +                        // FIXME Don't force the size to a multiple of 16 bytes once Cranelift gets a way to
 +                        // specify stack slot alignment.
 +                        size: (src_ty.bytes() + 15) / 16 * 16,
 +                        offset: None,
 +                    });
 +                    let ptr = Pointer::stack_slot(stack_slot);
 +                    ptr.store(fx, data, MemFlags::trusted());
 +                    ptr.load(fx, dst_ty, MemFlags::trusted())
 +                }
 +
 +                // `CValue`s should never contain SSA-only types, so if you ended
 +                // up here having seen an error like `B1 -> I8`, then before
 +                // calling `write_cvalue` you need to add a `bint` instruction.
 +                _ => unreachable!("write_cvalue_transmute: {:?} -> {:?}", src_ty, dst_ty),
 +            };
 +            //fx.bcx.set_val_label(data, cranelift_codegen::ir::ValueLabel::new(var.index()));
 +            fx.bcx.def_var(var, data);
 +        }
 +
 +        assert_eq!(self.layout().size, from.layout().size);
 +
 +        if fx.clif_comments.enabled() {
 +            use cranelift_codegen::cursor::{Cursor, CursorPosition};
 +            let cur_block = match fx.bcx.cursor().position() {
 +                CursorPosition::After(block) => block,
 +                _ => unreachable!(),
 +            };
 +            fx.add_comment(
 +                fx.bcx.func.layout.last_inst(cur_block).unwrap(),
 +                format!(
 +                    "{}: {:?}: {:?} <- {:?}: {:?}",
 +                    method,
 +                    self.inner(),
 +                    self.layout().ty,
 +                    from.0,
 +                    from.layout().ty
 +                ),
 +            );
 +        }
 +
 +        let dst_layout = self.layout();
 +        let to_ptr = match self.inner {
 +            CPlaceInner::Var(_local, var) => {
 +                let data = CValue(from.0, dst_layout).load_scalar(fx);
 +                let dst_ty = fx.clif_type(self.layout().ty).unwrap();
 +                transmute_value(fx, var, data, dst_ty);
 +                return;
 +            }
 +            CPlaceInner::VarPair(_local, var1, var2) => {
 +                let (data1, data2) = CValue(from.0, dst_layout).load_scalar_pair(fx);
 +                let (dst_ty1, dst_ty2) = fx.clif_pair_type(self.layout().ty).unwrap();
 +                transmute_value(fx, var1, data1, dst_ty1);
 +                transmute_value(fx, var2, data2, dst_ty2);
 +                return;
 +            }
 +            CPlaceInner::VarLane(_local, var, lane) => {
 +                let data = from.load_scalar(fx);
 +
 +                // First get the old vector
 +                let vector = fx.bcx.use_var(var);
 +                //fx.bcx.set_val_label(vector, cranelift_codegen::ir::ValueLabel::new(var.index()));
 +
 +                // Next insert the written lane into the vector
 +                let vector = fx.bcx.ins().insertlane(vector, data, lane);
 +
 +                // Finally write the new vector
 +                //fx.bcx.set_val_label(vector, cranelift_codegen::ir::ValueLabel::new(var.index()));
 +                fx.bcx.def_var(var, vector);
 +
 +                return;
 +            }
 +            CPlaceInner::Addr(ptr, None) => {
 +                if dst_layout.size == Size::ZERO || dst_layout.abi == Abi::Uninhabited {
 +                    return;
 +                }
 +                ptr
 +            }
 +            CPlaceInner::Addr(_, Some(_)) => bug!("Can't write value to unsized place {:?}", self),
 +        };
 +
 +        let mut flags = MemFlags::new();
 +        flags.set_notrap();
 +        match from.layout().abi {
 +            // FIXME make Abi::Vector work too
 +            Abi::Scalar(_) => {
 +                let val = from.load_scalar(fx);
 +                to_ptr.store(fx, val, flags);
 +                return;
 +            }
 +            Abi::ScalarPair(ref a_scalar, ref b_scalar) => {
 +                let (value, extra) = from.load_scalar_pair(fx);
 +                let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
 +                to_ptr.store(fx, value, flags);
 +                to_ptr.offset(fx, b_offset).store(fx, extra, flags);
 +                return;
 +            }
 +            _ => {}
 +        }
 +
 +        match from.0 {
 +            CValueInner::ByVal(val) => {
 +                to_ptr.store(fx, val, flags);
 +            }
 +            CValueInner::ByValPair(_, _) => {
 +                bug!("Non ScalarPair abi {:?} for ByValPair CValue", dst_layout.abi);
 +            }
 +            CValueInner::ByRef(from_ptr, None) => {
 +                let from_addr = from_ptr.get_addr(fx);
 +                let to_addr = to_ptr.get_addr(fx);
 +                let src_layout = from.1;
 +                let size = dst_layout.size.bytes();
 +                let src_align = src_layout.align.abi.bytes() as u8;
 +                let dst_align = dst_layout.align.abi.bytes() as u8;
 +                fx.bcx.emit_small_memory_copy(
 +                    fx.module.target_config(),
 +                    to_addr,
 +                    from_addr,
 +                    size,
 +                    dst_align,
 +                    src_align,
 +                    true,
 +                    MemFlags::trusted(),
 +                );
 +            }
 +            CValueInner::ByRef(_, Some(_)) => todo!(),
 +        }
 +    }
 +
 +    pub(crate) fn place_field(
 +        self,
 +        fx: &mut FunctionCx<'_, '_, 'tcx>,
 +        field: mir::Field,
 +    ) -> CPlace<'tcx> {
 +        let layout = self.layout();
 +
 +        match self.inner {
 +            CPlaceInner::Var(local, var) => {
 +                if let Abi::Vector { .. } = layout.abi {
 +                    return CPlace {
 +                        inner: CPlaceInner::VarLane(local, var, field.as_u32().try_into().unwrap()),
 +                        layout: layout.field(fx, field.as_u32().try_into().unwrap()),
 +                    };
 +                }
 +            }
 +            CPlaceInner::VarPair(local, var1, var2) => {
 +                let layout = layout.field(&*fx, field.index());
 +
 +                match field.as_u32() {
 +                    0 => return CPlace { inner: CPlaceInner::Var(local, var1), layout },
 +                    1 => return CPlace { inner: CPlaceInner::Var(local, var2), layout },
 +                    _ => unreachable!("field should be 0 or 1"),
 +                }
 +            }
 +            _ => {}
 +        }
 +
 +        let (base, extra) = self.to_ptr_maybe_unsized();
 +
 +        let (field_ptr, field_layout) = codegen_field(fx, base, extra, layout, field);
 +        if field_layout.is_unsized() {
 +            CPlace::for_ptr_with_extra(field_ptr, extra.unwrap(), field_layout)
 +        } else {
 +            CPlace::for_ptr(field_ptr, field_layout)
 +        }
 +    }
 +
++    /// Like [`CPlace::place_field`] except handling ADTs containing a single array field in a way
++    /// such that you can access individual lanes.
++    pub(crate) fn place_lane(
++        self,
++        fx: &mut FunctionCx<'_, '_, 'tcx>,
++        lane_idx: u64,
++    ) -> CPlace<'tcx> {
++        let layout = self.layout();
++        assert!(layout.ty.is_simd());
++        let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
++        let lane_layout = fx.layout_of(lane_ty);
++        assert!(lane_idx < lane_count);
++
++        match self.inner {
++            CPlaceInner::Var(local, var) => {
++                assert!(matches!(layout.abi, Abi::Vector { .. }));
++                CPlace {
++                    inner: CPlaceInner::VarLane(local, var, lane_idx.try_into().unwrap()),
++                    layout: lane_layout,
++                }
++            }
++            CPlaceInner::VarPair(_, _, _) => unreachable!(),
++            CPlaceInner::VarLane(_, _, _) => unreachable!(),
++            CPlaceInner::Addr(ptr, None) => {
++                let field_offset = lane_layout.size * lane_idx;
++                let field_ptr = ptr.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap());
++                CPlace::for_ptr(field_ptr, lane_layout)
++            }
++            CPlaceInner::Addr(_, Some(_)) => unreachable!(),
++        }
++    }
++
 +    pub(crate) fn place_index(
 +        self,
 +        fx: &mut FunctionCx<'_, '_, 'tcx>,
 +        index: Value,
 +    ) -> CPlace<'tcx> {
 +        let (elem_layout, ptr) = match self.layout().ty.kind() {
 +            ty::Array(elem_ty, _) => (fx.layout_of(elem_ty), self.to_ptr()),
 +            ty::Slice(elem_ty) => (fx.layout_of(elem_ty), self.to_ptr_maybe_unsized().0),
 +            _ => bug!("place_index({:?})", self.layout().ty),
 +        };
 +
 +        let offset = fx.bcx.ins().imul_imm(index, elem_layout.size.bytes() as i64);
 +
 +        CPlace::for_ptr(ptr.offset_value(fx, offset), elem_layout)
 +    }
 +
 +    pub(crate) fn place_deref(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> CPlace<'tcx> {
 +        let inner_layout = fx.layout_of(self.layout().ty.builtin_deref(true).unwrap().ty);
 +        if has_ptr_meta(fx.tcx, inner_layout.ty) {
 +            let (addr, extra) = self.to_cvalue(fx).load_scalar_pair(fx);
 +            CPlace::for_ptr_with_extra(Pointer::new(addr), extra, inner_layout)
 +        } else {
 +            CPlace::for_ptr(Pointer::new(self.to_cvalue(fx).load_scalar(fx)), inner_layout)
 +        }
 +    }
 +
 +    pub(crate) fn place_ref(
 +        self,
 +        fx: &mut FunctionCx<'_, '_, 'tcx>,
 +        layout: TyAndLayout<'tcx>,
 +    ) -> CValue<'tcx> {
 +        if has_ptr_meta(fx.tcx, self.layout().ty) {
 +            let (ptr, extra) = self.to_ptr_maybe_unsized();
 +            CValue::by_val_pair(
 +                ptr.get_addr(fx),
 +                extra.expect("unsized type without metadata"),
 +                layout,
 +            )
 +        } else {
 +            CValue::by_val(self.to_ptr().get_addr(fx), layout)
 +        }
 +    }
 +
 +    pub(crate) fn downcast_variant(
 +        self,
 +        fx: &FunctionCx<'_, '_, 'tcx>,
 +        variant: VariantIdx,
 +    ) -> Self {
 +        assert!(!self.layout().is_unsized());
 +        let layout = self.layout().for_variant(fx, variant);
 +        CPlace { inner: self.inner, layout }
 +    }
 +}
 +
 +#[track_caller]
 +pub(crate) fn assert_assignable<'tcx>(
 +    fx: &FunctionCx<'_, '_, 'tcx>,
 +    from_ty: Ty<'tcx>,
 +    to_ty: Ty<'tcx>,
 +) {
 +    match (from_ty.kind(), to_ty.kind()) {
 +        (ty::Ref(_, a, _), ty::Ref(_, b, _))
 +        | (
 +            ty::RawPtr(TypeAndMut { ty: a, mutbl: _ }),
 +            ty::RawPtr(TypeAndMut { ty: b, mutbl: _ }),
 +        ) => {
 +            assert_assignable(fx, a, b);
 +        }
 +        (ty::Ref(_, a, _), ty::RawPtr(TypeAndMut { ty: b, mutbl: _ }))
 +        | (ty::RawPtr(TypeAndMut { ty: a, mutbl: _ }), ty::Ref(_, b, _)) => {
 +            assert_assignable(fx, a, b);
 +        }
 +        (ty::FnPtr(_), ty::FnPtr(_)) => {
 +            let from_sig = fx.tcx.normalize_erasing_late_bound_regions(
 +                ParamEnv::reveal_all(),
 +                from_ty.fn_sig(fx.tcx),
 +            );
 +            let to_sig = fx
 +                .tcx
 +                .normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), to_ty.fn_sig(fx.tcx));
 +            assert_eq!(
 +                from_sig, to_sig,
 +                "Can't write fn ptr with incompatible sig {:?} to place with sig {:?}\n\n{:#?}",
 +                from_sig, to_sig, fx,
 +            );
 +            // fn(&T) -> for<'l> fn(&'l T) is allowed
 +        }
 +        (&ty::Dynamic(from_traits, _), &ty::Dynamic(to_traits, _)) => {
 +            for (from, to) in from_traits.iter().zip(to_traits) {
 +                let from =
 +                    fx.tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), from);
 +                let to = fx.tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), to);
 +                assert_eq!(
 +                    from, to,
 +                    "Can't write trait object of incompatible traits {:?} to place with traits {:?}\n\n{:#?}",
 +                    from_traits, to_traits, fx,
 +                );
 +            }
 +            // dyn for<'r> Trait<'r> -> dyn Trait<'_> is allowed
 +        }
 +        (&ty::Adt(adt_def_a, substs_a), &ty::Adt(adt_def_b, substs_b))
 +            if adt_def_a.did == adt_def_b.did =>
 +        {
 +            let mut types_a = substs_a.types();
 +            let mut types_b = substs_b.types();
 +            loop {
 +                match (types_a.next(), types_b.next()) {
 +                    (Some(a), Some(b)) => assert_assignable(fx, a, b),
 +                    (None, None) => return,
 +                    (Some(_), None) | (None, Some(_)) => panic!("{:#?}/{:#?}", from_ty, to_ty),
 +                }
 +            }
 +        }
 +        _ => {
 +            assert_eq!(
 +                from_ty, to_ty,
 +                "Can't write value with incompatible type {:?} to place with type {:?}\n\n{:#?}",
 +                from_ty, to_ty, fx,
 +            );
 +        }
 +    }
 +}
index 1b31587430887e3931829a4c81f3c86e109e1b75,0000000000000000000000000000000000000000..f97d416b66f9ff8cabf19755b97f221dd256e9ea
mode 100644,000000..100644
--- /dev/null
@@@ -1,79 -1,0 +1,79 @@@
-         pointer_ty(fx.tcx),
 +//! Codegen vtables and vtable accesses.
 +//!
 +//! See `rustc_codegen_ssa/src/meth.rs` for reference.
 +
 +use crate::constant::data_id_for_alloc_id;
 +use crate::prelude::*;
 +
 +pub(crate) fn vtable_memflags() -> MemFlags {
 +    let mut flags = MemFlags::trusted(); // A vtable access is always aligned and will never trap.
 +    flags.set_readonly(); // A vtable is always read-only.
 +    flags
 +}
 +
 +pub(crate) fn drop_fn_of_obj(fx: &mut FunctionCx<'_, '_, '_>, vtable: Value) -> Value {
 +    let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes() as usize;
 +    fx.bcx.ins().load(
-         pointer_ty(fx.tcx),
++        fx.pointer_type,
 +        vtable_memflags(),
 +        vtable,
 +        (ty::COMMON_VTABLE_ENTRIES_DROPINPLACE * usize_size) as i32,
 +    )
 +}
 +
 +pub(crate) fn size_of_obj(fx: &mut FunctionCx<'_, '_, '_>, vtable: Value) -> Value {
 +    let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes() as usize;
 +    fx.bcx.ins().load(
-         pointer_ty(fx.tcx),
++        fx.pointer_type,
 +        vtable_memflags(),
 +        vtable,
 +        (ty::COMMON_VTABLE_ENTRIES_SIZE * usize_size) as i32,
 +    )
 +}
 +
 +pub(crate) fn min_align_of_obj(fx: &mut FunctionCx<'_, '_, '_>, vtable: Value) -> Value {
 +    let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes() as usize;
 +    fx.bcx.ins().load(
-         pointer_ty(fx.tcx),
++        fx.pointer_type,
 +        vtable_memflags(),
 +        vtable,
 +        (ty::COMMON_VTABLE_ENTRIES_ALIGN * usize_size) as i32,
 +    )
 +}
 +
 +pub(crate) fn get_ptr_and_method_ref<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    arg: CValue<'tcx>,
 +    idx: usize,
 +) -> (Value, Value) {
 +    let (ptr, vtable) = if let Abi::ScalarPair(_, _) = arg.layout().abi {
 +        arg.load_scalar_pair(fx)
 +    } else {
 +        let (ptr, vtable) = arg.try_to_ptr().unwrap();
 +        (ptr.get_addr(fx), vtable.unwrap())
 +    };
 +
 +    let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes();
 +    let func_ref = fx.bcx.ins().load(
++        fx.pointer_type,
 +        vtable_memflags(),
 +        vtable,
 +        (idx * usize_size as usize) as i32,
 +    );
 +    (ptr, func_ref)
 +}
 +
 +pub(crate) fn get_vtable<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    ty: Ty<'tcx>,
 +    trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
 +) -> Value {
 +    let alloc_id = fx.tcx.vtable_allocation(ty, trait_ref);
 +    let data_id =
 +        data_id_for_alloc_id(&mut fx.constants_cx, &mut *fx.module, alloc_id, Mutability::Not);
 +    let local_data_id = fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
 +    if fx.clif_comments.enabled() {
 +        fx.add_comment(local_data_id, format!("vtable: {:?}", alloc_id));
 +    }
 +    fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
 +}
index 43937588b481da1005f68642316a3b2e3fa62c4c,0000000000000000000000000000000000000000..26605003c42008cd8c3baf101e622e5ea4497013
mode 100755,000000..100755
--- /dev/null
@@@ -1,153 -1,0 +1,153 @@@
- //! $ rustc y.rs -o build/y.bin
- //! $ build/y.bin
 +#!/usr/bin/env bash
 +#![allow()] /*This line is ignored by bash
 +# This block is ignored by rustc
 +set -e
 +echo "[BUILD] y.rs" 1>&2
 +rustc $0 -o ${0/.rs/.bin} -g
 +exec ${0/.rs/.bin} $@
 +*/
 +
 +//! The build system for cg_clif
 +//!
 +//! # Manual compilation
 +//!
 +//! If your system doesn't support shell scripts you can manually compile and run this file using
 +//! for example:
 +//!
 +//! ```shell
++//! $ rustc y.rs -o y.bin
++//! $ ./y.bin
 +//! ```
 +//!
 +//! # Naming
 +//!
 +//! The name `y.rs` was chosen to not conflict with rustc's `x.py`.
 +
 +use std::env;
 +use std::path::PathBuf;
 +use std::process;
 +
 +#[path = "build_system/build_backend.rs"]
 +mod build_backend;
 +#[path = "build_system/build_sysroot.rs"]
 +mod build_sysroot;
 +#[path = "build_system/config.rs"]
 +mod config;
 +#[path = "build_system/prepare.rs"]
 +mod prepare;
 +#[path = "build_system/rustc_info.rs"]
 +mod rustc_info;
 +#[path = "build_system/utils.rs"]
 +mod utils;
 +
 +fn usage() {
 +    eprintln!("Usage:");
 +    eprintln!("  ./y.rs prepare");
 +    eprintln!("  ./y.rs build [--debug] [--sysroot none|clif|llvm] [--target-dir DIR]");
 +}
 +
 +macro_rules! arg_error {
 +    ($($err:tt)*) => {{
 +        eprintln!($($err)*);
 +        usage();
 +        std::process::exit(1);
 +    }};
 +}
 +
 +enum Command {
 +    Build,
 +}
 +
 +#[derive(Copy, Clone)]
 +enum SysrootKind {
 +    None,
 +    Clif,
 +    Llvm,
 +}
 +
 +fn main() {
 +    env::set_var("CG_CLIF_DISPLAY_CG_TIME", "1");
 +    env::set_var("CG_CLIF_DISABLE_INCR_CACHE", "1");
 +    // The target dir is expected in the default location. Guard against the user changing it.
 +    env::set_var("CARGO_TARGET_DIR", "target");
 +
 +    let mut args = env::args().skip(1);
 +    let command = match args.next().as_deref() {
 +        Some("prepare") => {
 +            if args.next().is_some() {
 +                arg_error!("./x.rs prepare doesn't expect arguments");
 +            }
 +            prepare::prepare();
 +            process::exit(0);
 +        }
 +        Some("build") => Command::Build,
 +        Some(flag) if flag.starts_with('-') => arg_error!("Expected command found flag {}", flag),
 +        Some(command) => arg_error!("Unknown command {}", command),
 +        None => {
 +            usage();
 +            process::exit(0);
 +        }
 +    };
 +
 +    let mut target_dir = PathBuf::from("build");
 +    let mut channel = "release";
 +    let mut sysroot_kind = SysrootKind::Clif;
 +    while let Some(arg) = args.next().as_deref() {
 +        match arg {
 +            "--target-dir" => {
 +                target_dir = PathBuf::from(args.next().unwrap_or_else(|| {
 +                    arg_error!("--target-dir requires argument");
 +                }))
 +            }
 +            "--debug" => channel = "debug",
 +            "--sysroot" => {
 +                sysroot_kind = match args.next().as_deref() {
 +                    Some("none") => SysrootKind::None,
 +                    Some("clif") => SysrootKind::Clif,
 +                    Some("llvm") => SysrootKind::Llvm,
 +                    Some(arg) => arg_error!("Unknown sysroot kind {}", arg),
 +                    None => arg_error!("--sysroot requires argument"),
 +                }
 +            }
 +            flag if flag.starts_with("-") => arg_error!("Unknown flag {}", flag),
 +            arg => arg_error!("Unexpected argument {}", arg),
 +        }
 +    }
 +
 +    let host_triple = if let Ok(host_triple) = std::env::var("HOST_TRIPLE") {
 +        host_triple
 +    } else if let Some(host_triple) = crate::config::get_value("host") {
 +        host_triple
 +    } else {
 +        rustc_info::get_host_triple()
 +    };
 +    let target_triple = if let Ok(target_triple) = std::env::var("TARGET_TRIPLE") {
 +        if target_triple != "" {
 +            target_triple
 +        } else {
 +            host_triple.clone() // Empty target triple can happen on GHA
 +        }
 +    } else if let Some(target_triple) = crate::config::get_value("target") {
 +        target_triple
 +    } else {
 +        host_triple.clone()
 +    };
 +
 +    if target_triple.ends_with("-msvc") {
 +        eprintln!("The MSVC toolchain is not yet supported by rustc_codegen_cranelift.");
 +        eprintln!("Switch to the MinGW toolchain for Windows support.");
 +        eprintln!("Hint: You can use `rustup set default-host x86_64-pc-windows-gnu` to");
 +        eprintln!("set the global default target to MinGW");
 +        process::exit(1);
 +    }
 +
 +    let cg_clif_build_dir = build_backend::build_backend(channel, &host_triple);
 +    build_sysroot::build_sysroot(
 +        channel,
 +        sysroot_kind,
 +        &target_dir,
 +        cg_clif_build_dir,
 +        &host_triple,
 +        &target_triple,
 +    );
 +}