]> git.lizzy.rs Git - rust.git/commitdiff
Merge commit '2bb3996244cf1b89878da9e39841e9f6bf061602' into sync_cg_clif-2022-12-14
authorbjorn3 <17426603+bjorn3@users.noreply.github.com>
Wed, 14 Dec 2022 18:30:46 +0000 (19:30 +0100)
committerbjorn3 <17426603+bjorn3@users.noreply.github.com>
Wed, 14 Dec 2022 18:30:46 +0000 (19:30 +0100)
53 files changed:
1  2 
compiler/rustc_codegen_cranelift/.cirrus.yml
compiler/rustc_codegen_cranelift/.github/workflows/main.yml
compiler/rustc_codegen_cranelift/.github/workflows/nightly-cranelift.yml
compiler/rustc_codegen_cranelift/.github/workflows/rustc.yml
compiler/rustc_codegen_cranelift/.gitignore
compiler/rustc_codegen_cranelift/.vscode/settings.json
compiler/rustc_codegen_cranelift/Cargo.lock
compiler/rustc_codegen_cranelift/Cargo.toml
compiler/rustc_codegen_cranelift/Readme.md
compiler/rustc_codegen_cranelift/build_sysroot/Cargo.lock
compiler/rustc_codegen_cranelift/build_system/abi_cafe.rs
compiler/rustc_codegen_cranelift/build_system/build_backend.rs
compiler/rustc_codegen_cranelift/build_system/build_sysroot.rs
compiler/rustc_codegen_cranelift/build_system/mod.rs
compiler/rustc_codegen_cranelift/build_system/path.rs
compiler/rustc_codegen_cranelift/build_system/prepare.rs
compiler/rustc_codegen_cranelift/build_system/rustc_info.rs
compiler/rustc_codegen_cranelift/build_system/tests.rs
compiler/rustc_codegen_cranelift/build_system/utils.rs
compiler/rustc_codegen_cranelift/clean_all.sh
compiler/rustc_codegen_cranelift/config.txt
compiler/rustc_codegen_cranelift/docs/usage.md
compiler/rustc_codegen_cranelift/example/issue-72793.rs
compiler/rustc_codegen_cranelift/example/mini_core.rs
compiler/rustc_codegen_cranelift/example/mini_core_hello_world.rs
compiler/rustc_codegen_cranelift/example/std_example.rs
compiler/rustc_codegen_cranelift/rust-toolchain
compiler/rustc_codegen_cranelift/rustfmt.toml
compiler/rustc_codegen_cranelift/scripts/filter_profile.rs
compiler/rustc_codegen_cranelift/scripts/rustdoc-clif.rs
compiler/rustc_codegen_cranelift/scripts/setup_rust_fork.sh
compiler/rustc_codegen_cranelift/scripts/test_rustc_tests.sh
compiler/rustc_codegen_cranelift/src/abi/mod.rs
compiler/rustc_codegen_cranelift/src/allocator.rs
compiler/rustc_codegen_cranelift/src/base.rs
compiler/rustc_codegen_cranelift/src/cast.rs
compiler/rustc_codegen_cranelift/src/common.rs
compiler/rustc_codegen_cranelift/src/constant.rs
compiler/rustc_codegen_cranelift/src/debuginfo/unwind.rs
compiler/rustc_codegen_cranelift/src/discriminant.rs
compiler/rustc_codegen_cranelift/src/driver/jit.rs
compiler/rustc_codegen_cranelift/src/driver/mod.rs
compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs
compiler/rustc_codegen_cranelift/src/intrinsics/llvm_aarch64.rs
compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs
compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs
compiler/rustc_codegen_cranelift/src/main_shim.rs
compiler/rustc_codegen_cranelift/src/num.rs
compiler/rustc_codegen_cranelift/src/optimize/peephole.rs
compiler/rustc_codegen_cranelift/src/value_and_place.rs
compiler/rustc_codegen_cranelift/test.sh
compiler/rustc_codegen_cranelift/y.rs

index 732edd66196d7de4c3d8d18e0c944e05d279906c,0000000000000000000000000000000000000000..d627c2ee09c4ef7555f820cc2a239b73c3133269
mode 100644,000000..100644
--- /dev/null
@@@ -1,25 -1,0 +1,23 @@@
-     - git config --global user.email "user@example.com"
-     - git config --global user.name "User"
 +task:
 +  name: freebsd
 +  freebsd_instance:
 +    image: freebsd-12-1-release-amd64
 +  setup_rust_script:
 +    - pkg install -y curl git bash
 +    - curl https://sh.rustup.rs -sSf --output rustup.sh
 +    - sh rustup.sh --default-toolchain none -y --profile=minimal
 +  cargo_bin_cache:
 +    folder: ~/.cargo/bin
 +  target_cache:
 +    folder: target
 +  prepare_script:
 +    - . $HOME/.cargo/env
 +    - ./y.rs prepare
 +  test_script:
 +    - . $HOME/.cargo/env
 +    - # Enable backtraces for easier debugging
 +    - export RUST_BACKTRACE=1
 +    - # Reduce amount of benchmark runs as they are slow
 +    - export COMPILE_RUNS=2
 +    - export RUN_RUNS=2
 +    - ./y.rs test
index 5061010c86cd3451d4b8e7858b05cecf7efc8631,0000000000000000000000000000000000000000..a6bb12a66a247d66441c390105655e9ff0910978
mode 100644,000000..100644
--- /dev/null
@@@ -1,221 -1,0 +1,227 @@@
-           - os: ubuntu-latest
 +name: CI
 +
 +on:
 +  - push
 +  - pull_request
 +
 +jobs:
 +  rustfmt:
 +    runs-on: ubuntu-latest
 +    timeout-minutes: 10
 +
 +    steps:
 +    - uses: actions/checkout@v3
 +
 +    - name: Install rustfmt
 +      run: |
 +        rustup component add rustfmt
 +
 +    - name: Rustfmt
 +      run: |
 +        cargo fmt --check
++        rustfmt --check build_system/mod.rs
 +
 +  build:
 +    runs-on: ${{ matrix.os }}
 +    timeout-minutes: 60
 +
 +    strategy:
 +      fail-fast: false
 +      matrix:
 +        include:
-       uses: actions/cache@v2
++          - os: ubuntu-20.04 # FIXME switch to ubuntu-22.04 once #1303 is fixed
 +            env:
 +              TARGET_TRIPLE: x86_64-unknown-linux-gnu
 +          - os: macos-latest
 +            env:
 +              TARGET_TRIPLE: x86_64-apple-darwin
 +          # cross-compile from Linux to Windows using mingw
 +          - os: ubuntu-latest
 +            env:
 +              TARGET_TRIPLE: x86_64-pc-windows-gnu
 +          - os: ubuntu-latest
 +            env:
 +              TARGET_TRIPLE: aarch64-unknown-linux-gnu
++          # s390x requires QEMU 6.1 or greater, we could build it from source, but ubuntu 22.04 comes with 6.2 by default
++          - os: ubuntu-latest
++            env:
++              TARGET_TRIPLE: s390x-unknown-linux-gnu
 +
 +    steps:
 +    - uses: actions/checkout@v3
 +
 +    - name: Cache cargo installed crates
-       uses: actions/cache@v2
++      uses: actions/cache@v3
 +      with:
 +        path: ~/.cargo/bin
 +        key: ${{ runner.os }}-cargo-installed-crates
 +
 +    - name: Cache cargo registry and index
-       uses: actions/cache@v2
++      uses: actions/cache@v3
 +      with:
 +        path: |
 +            ~/.cargo/registry
 +            ~/.cargo/git
 +        key: ${{ runner.os }}-cargo-registry-and-index-${{ hashFiles('**/Cargo.lock') }}
 +
 +    - name: Cache cargo target dir
-         path: target
++      uses: actions/cache@v3
 +      with:
-     - name: Prepare dependencies
++        path: build/cg_clif
 +        key: ${{ runner.os }}-cargo-build-target-${{ hashFiles('rust-toolchain', '**/Cargo.lock') }}
 +
 +    - name: Install MinGW toolchain and wine
 +      if: matrix.os == 'ubuntu-latest' && matrix.env.TARGET_TRIPLE == 'x86_64-pc-windows-gnu'
 +      run: |
 +        sudo apt-get update
 +        sudo apt-get install -y gcc-mingw-w64-x86-64 wine-stable
 +        rustup target add x86_64-pc-windows-gnu
 +
 +    - name: Install AArch64 toolchain and qemu
 +      if: matrix.os == 'ubuntu-latest' && matrix.env.TARGET_TRIPLE == 'aarch64-unknown-linux-gnu'
 +      run: |
 +        sudo apt-get update
 +        sudo apt-get install -y gcc-aarch64-linux-gnu qemu-user
 +
-         git config --global user.email "user@example.com"
-         git config --global user.name "User"
-         ./y.rs prepare
++    - name: Install s390x toolchain and qemu
++      if: matrix.env.TARGET_TRIPLE == 's390x-unknown-linux-gnu'
 +      run: |
-       run: tar cvfJ cg_clif.tar.xz build
++        sudo apt-get update
++        sudo apt-get install -y gcc-s390x-linux-gnu qemu-user
++
++    - name: Prepare dependencies
++      run: ./y.rs prepare
 +
 +    - name: Build without unstable features
 +      env:
 +        TARGET_TRIPLE: ${{ matrix.env.TARGET_TRIPLE }}
 +      # This is the config rust-lang/rust uses for builds
 +      run: ./y.rs build --no-unstable-features
 +
 +    - name: Build
 +      run: ./y.rs build --sysroot none
 +
 +    - name: Test
 +      env:
 +        TARGET_TRIPLE: ${{ matrix.env.TARGET_TRIPLE }}
 +      run: |
 +        # Enable backtraces for easier debugging
 +        export RUST_BACKTRACE=1
 +
 +        # Reduce amount of benchmark runs as they are slow
 +        export COMPILE_RUNS=2
 +        export RUN_RUNS=2
 +
 +        # Enable extra checks
 +        export CG_CLIF_ENABLE_VERIFIER=1
 +
 +        ./y.rs test
 +
 +    - name: Package prebuilt cg_clif
-       uses: actions/upload-artifact@v2
++      run: tar cvfJ cg_clif.tar.xz dist
 +
 +    - name: Upload prebuilt cg_clif
 +      if: matrix.env.TARGET_TRIPLE != 'x86_64-pc-windows-gnu'
 +      uses: actions/upload-artifact@v2
 +      with:
 +        name: cg_clif-${{ matrix.env.TARGET_TRIPLE }}
 +        path: cg_clif.tar.xz
 +
 +    - name: Upload prebuilt cg_clif (cross compile)
 +      if: matrix.env.TARGET_TRIPLE == 'x86_64-pc-windows-gnu'
-       uses: actions/cache@v2
++      uses: actions/upload-artifact@v3
 +      with:
 +        name: cg_clif-${{ runner.os }}-cross-x86_64-mingw
 +        path: cg_clif.tar.xz
 +
 +  windows:
 +    runs-on: ${{ matrix.os }}
 +    timeout-minutes: 60
 +
 +    strategy:
 +      fail-fast: false
 +      matrix:
 +        include:
 +          # Native Windows build with MSVC
 +          - os: windows-latest
 +            env:
 +              TARGET_TRIPLE: x86_64-pc-windows-msvc
 +          # cross-compile from Windows to Windows MinGW
 +          - os: windows-latest
 +            env:
 +              TARGET_TRIPLE: x86_64-pc-windows-gnu
 +
 +    steps:
 +    - uses: actions/checkout@v3
 +
 +    - name: Cache cargo installed crates
-       uses: actions/cache@v2
++      uses: actions/cache@v3
 +      with:
 +        path: ~/.cargo/bin
 +        key: ${{ runner.os }}-${{ matrix.env.TARGET_TRIPLE }}-cargo-installed-crates
 +
 +    - name: Cache cargo registry and index
-       uses: actions/cache@v2
++      uses: actions/cache@v3
 +      with:
 +        path: |
 +            ~/.cargo/registry
 +            ~/.cargo/git
 +        key: ${{ runner.os }}-${{ matrix.env.TARGET_TRIPLE }}-cargo-registry-and-index-${{ hashFiles('**/Cargo.lock') }}
 +
 +    - name: Cache cargo target dir
-         path: target
++      uses: actions/cache@v3
 +      with:
-         git config --global user.email "user@example.com"
-         git config --global user.name "User"
++        path: build/cg_clif
 +        key: ${{ runner.os }}-${{ matrix.env.TARGET_TRIPLE }}-cargo-build-target-${{ hashFiles('rust-toolchain', '**/Cargo.lock') }}
 +
 +    - name: Set MinGW as the default toolchain
 +      if: matrix.env.TARGET_TRIPLE == 'x86_64-pc-windows-gnu'
 +      run: rustup set default-host x86_64-pc-windows-gnu
 +
 +    - name: Prepare dependencies
 +      run: |
-         
 +        git config --global core.autocrlf false
 +        rustc y.rs -o y.exe -g
 +        ./y.exe prepare
 +
 +    - name: Build without unstable features
 +      env:
 +        TARGET_TRIPLE: ${{ matrix.env.TARGET_TRIPLE }}
 +      # This is the config rust-lang/rust uses for builds
 +      run: ./y.rs build --no-unstable-features
 +
 +    - name: Build
 +      run: ./y.rs build --sysroot none
 +
 +    - name: Test
 +      run: |
 +        # Enable backtraces for easier debugging
 +        $Env:RUST_BACKTRACE=1
 +
 +        # Reduce amount of benchmark runs as they are slow
 +        $Env:COMPILE_RUNS=2
 +        $Env:RUN_RUNS=2
 +
 +        # Enable extra checks
 +        $Env:CG_CLIF_ENABLE_VERIFIER=1
-         
++
 +        # WIP Disable some tests
-         
-         # This fails with a different output than expected 
++
 +        # This fails due to some weird argument handling by hyperfine, not an actual regression
 +        # more of a build system issue
 +        (Get-Content config.txt) -replace '(bench.simple-raytracer)', '# $1' |  Out-File config.txt
-       run: tar cvf cg_clif.tar build
++
++        # This fails with a different output than expected
 +        (Get-Content config.txt) -replace '(test.regex-shootout-regex-dna)', '# $1' |  Out-File config.txt
 +
 +        ./y.exe test
 +
 +    - name: Package prebuilt cg_clif
 +      # don't use compression as xzip isn't supported by tar on windows and bzip2 hangs
-       uses: actions/upload-artifact@v2
++      run: tar cvf cg_clif.tar dist
 +
 +    - name: Upload prebuilt cg_clif
++      uses: actions/upload-artifact@v3
 +      with:
 +        name: cg_clif-${{ matrix.env.TARGET_TRIPLE }}
 +        path: cg_clif.tar
index 0a3e7ca073b45debb68785811cf0f727a7e0bde7,0000000000000000000000000000000000000000..d0d58d2a7eacbd19a07442af167bdb0e2926274a
mode 100644,000000..100644
--- /dev/null
@@@ -1,59 -1,0 +1,59 @@@
-       uses: actions/cache@v2
 +name: Test nightly Cranelift
 +
 +on:
 +  push:
 +  schedule:
 +    - cron: '17 1 * * *' # At 01:17 UTC every day.
 +
 +jobs:
 +  build:
 +    runs-on: ubuntu-latest
 +    timeout-minutes: 60
 +
 +    steps:
 +    - uses: actions/checkout@v3
 +
 +    - name: Cache cargo installed crates
++      uses: actions/cache@v3
 +      with:
 +        path: ~/.cargo/bin
 +        key: ubuntu-latest-cargo-installed-crates
 +
 +    - name: Prepare dependencies
 +      run: |
 +        git config --global user.email "user@example.com"
 +        git config --global user.name "User"
 +        ./y.rs prepare
 +
 +    - name: Patch Cranelift
 +      run: |
 +        sed -i 's/cranelift-codegen = { version = "\w*.\w*.\w*", features = \["unwind", "all-arch"\] }/cranelift-codegen = { git = "https:\/\/github.com\/bytecodealliance\/wasmtime.git", features = ["unwind", "all-arch"] }/' Cargo.toml
 +        sed -i 's/cranelift-frontend = "\w*.\w*.\w*"/cranelift-frontend = { git = "https:\/\/github.com\/bytecodealliance\/wasmtime.git" }/' Cargo.toml
 +        sed -i 's/cranelift-module = "\w*.\w*.\w*"/cranelift-module = { git = "https:\/\/github.com\/bytecodealliance\/wasmtime.git" }/' Cargo.toml
 +        sed -i 's/cranelift-native = "\w*.\w*.\w*"/cranelift-native = { git = "https:\/\/github.com\/bytecodealliance\/wasmtime.git" }/' Cargo.toml
 +        sed -i 's/cranelift-jit = { version = "\w*.\w*.\w*", optional = true }/cranelift-jit = { git = "https:\/\/github.com\/bytecodealliance\/wasmtime.git", optional = true }/' Cargo.toml
 +        sed -i 's/cranelift-object = "\w*.\w*.\w*"/cranelift-object = { git = "https:\/\/github.com\/bytecodealliance\/wasmtime.git" }/' Cargo.toml
 +
 +        sed -i 's/object = { version = "0.27.0"/object = { version = "0.28.0"/' Cargo.toml
 +
 +        cat Cargo.toml
 +
 +    - name: Build without unstable features
 +      # This is the config rust-lang/rust uses for builds
 +      run: ./y.rs build --no-unstable-features
 +
 +    - name: Build
 +      run: ./y.rs build --sysroot none
 +    - name: Test
 +      run: |
 +        # Enable backtraces for easier debugging
 +        export RUST_BACKTRACE=1
 +
 +        # Reduce amount of benchmark runs as they are slow
 +        export COMPILE_RUNS=2
 +        export RUN_RUNS=2
 +
 +        # Enable extra checks
 +        export CG_CLIF_ENABLE_VERIFIER=1
 +
 +        ./test.sh
index b8a98b83ebe5eb1a5d292f5fa6b2b7df1168a5a9,0000000000000000000000000000000000000000..bef806318efa836aeeb8d6df06d880b77be677fb
mode 100644,000000..100644
--- /dev/null
@@@ -1,82 -1,0 +1,82 @@@
-       uses: actions/cache@v2
 +name: Various rustc tests
 +
 +on:
 +  - push
 +
 +jobs:
 +  bootstrap_rustc:
 +    runs-on: ubuntu-latest
 +
 +    steps:
 +    - uses: actions/checkout@v3
 +
 +    - name: Cache cargo installed crates
-       uses: actions/cache@v2
++      uses: actions/cache@v3
 +      with:
 +        path: ~/.cargo/bin
 +        key: ${{ runner.os }}-cargo-installed-crates
 +
 +    - name: Cache cargo registry and index
-       uses: actions/cache@v2
++      uses: actions/cache@v3
 +      with:
 +        path: |
 +            ~/.cargo/registry
 +            ~/.cargo/git
 +        key: ${{ runner.os }}-cargo-registry-and-index-${{ hashFiles('**/Cargo.lock') }}
 +
 +    - name: Cache cargo target dir
-         path: target
++      uses: actions/cache@v3
 +      with:
-       uses: actions/cache@v2
++        path: build/cg_clif
 +        key: ${{ runner.os }}-cargo-build-target-${{ hashFiles('rust-toolchain', '**/Cargo.lock') }}
 +
 +    - name: Prepare dependencies
 +      run: |
 +        git config --global user.email "user@example.com"
 +        git config --global user.name "User"
 +        ./y.rs prepare
 +
 +    - name: Test
 +      run: |
 +        # Enable backtraces for easier debugging
 +        export RUST_BACKTRACE=1
 +
 +        ./scripts/test_bootstrap.sh
 +  rustc_test_suite:
 +    runs-on: ubuntu-latest
 +
 +    steps:
 +    - uses: actions/checkout@v3
 +
 +    - name: Cache cargo installed crates
-       uses: actions/cache@v2
++      uses: actions/cache@v3
 +      with:
 +        path: ~/.cargo/bin
 +        key: ${{ runner.os }}-cargo-installed-crates
 +
 +    - name: Cache cargo registry and index
-       uses: actions/cache@v2
++      uses: actions/cache@v3
 +      with:
 +        path: |
 +            ~/.cargo/registry
 +            ~/.cargo/git
 +        key: ${{ runner.os }}-cargo-registry-and-index-${{ hashFiles('**/Cargo.lock') }}
 +
 +    - name: Cache cargo target dir
-         path: target
++      uses: actions/cache@v3
 +      with:
++        path: build/cg_clif
 +        key: ${{ runner.os }}-cargo-build-target-${{ hashFiles('rust-toolchain', '**/Cargo.lock') }}
 +
 +    - name: Prepare dependencies
 +      run: |
 +        git config --global user.email "user@example.com"
 +        git config --global user.name "User"
 +        ./y.rs prepare
 +
 +    - name: Test
 +      run: |
 +        # Enable backtraces for easier debugging
 +        export RUST_BACKTRACE=1
 +
 +        ./scripts/test_rustc_tests.sh
index fae09592c6ac0dc91be2641093896c0adc9559d8,0000000000000000000000000000000000000000..b443fd58a1b98344bec2720e26139b6467907d89
mode 100644,000000..100644
--- /dev/null
@@@ -1,18 -1,0 +1,19 @@@
 +target
 +**/*.rs.bk
 +*.rlib
 +*.o
 +perf.data
 +perf.data.old
 +*.events
 +*.string*
 +/y.bin
 +/y.bin.dSYM
 +/y.exe
 +/y.pdb
 +/build
 +/build_sysroot/sysroot_src
 +/build_sysroot/compiler-builtins
 +/build_sysroot/rustc_version
++/dist
 +/rust
 +/download
index 13301bf20a5ed7d57051c9b45fa4165a98e8b7b5,0000000000000000000000000000000000000000..bc914e37d2b51dda8d3a0e4ef090a4cc1399fc90
mode 100644,000000..100644
--- /dev/null
@@@ -1,73 -1,0 +1,44 @@@
-     "rust-analyzer.cargo.features": ["unstable-features"],
 +{
 +    // source for rustc_* is not included in the rust-src component; disable the errors about this
 +    "rust-analyzer.diagnostics.disabled": ["unresolved-extern-crate", "unresolved-macro-call"],
 +    "rust-analyzer.imports.granularity.enforce": true,
 +    "rust-analyzer.imports.granularity.group": "module",
 +    "rust-analyzer.imports.prefix": "crate",
-         //"./build_sysroot/sysroot_src/library/std/Cargo.toml",
++    "rust-analyzer.cargo.features": ["unstable-features", "__check_build_system_using_ra"],
 +    "rust-analyzer.linkedProjects": [
 +        "./Cargo.toml",
-             "roots": [
-                 "./example/mini_core.rs",
-                 "./example/mini_core_hello_world.rs",
-                 "./example/mod_bench.rs"
-             ],
 +        {
-             "roots": ["./example/std_example.rs"],
 +            "crates": [
 +                {
 +                    "root_module": "./example/mini_core.rs",
 +                    "edition": "2018",
 +                    "deps": [],
 +                    "cfg": [],
 +                },
 +                {
 +                    "root_module": "./example/mini_core_hello_world.rs",
 +                    "edition": "2018",
 +                    "deps": [{ "crate": 0, "name": "mini_core" }],
 +                    "cfg": [],
 +                },
 +                {
 +                    "root_module": "./example/mod_bench.rs",
 +                    "edition": "2018",
 +                    "deps": [],
 +                    "cfg": [],
 +                },
 +            ]
 +        },
 +        {
-                     "edition": "2018",
-                     "deps": [{ "crate": 1, "name": "std" }],
-                     "cfg": [],
-                 },
-                 {
-                     "root_module": "./build_sysroot/sysroot_src/library/std/src/lib.rs",
-                     "edition": "2018",
-                     "deps": [],
-                     "cfg": [],
-                 },
-             ]
-         },
-         {
-             "roots": ["./y.rs"],
-             "crates": [
-                 {
-                     "root_module": "./y.rs",
-                     "edition": "2018",
-                     "deps": [{ "crate": 1, "name": "std" }],
-                     "cfg": [],
-                 },
-                 {
-                     "root_module": "./build_sysroot/sysroot_src/library/std/src/lib.rs",
-                     "edition": "2018",
++            "sysroot_src": "./build_sysroot/sysroot_src/library",
 +            "crates": [
 +                {
 +                    "root_module": "./example/std_example.rs",
++                    "edition": "2015",
 +                    "deps": [],
 +                    "cfg": [],
 +                },
 +            ]
 +        }
 +    ]
 +}
index 3b406036c356e9e3023964f049184a2d1f0121f5,0000000000000000000000000000000000000000..e4d3e9ca5ae0a8676863063216c420232248a554
mode 100644,000000..100644
--- /dev/null
@@@ -1,430 -1,0 +1,471 @@@
- version = "1.0.60"
 +# This file is automatically @generated by Cargo.
 +# It is not intended for manual editing.
 +version = 3
 +
 +[[package]]
 +name = "ahash"
 +version = "0.7.6"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47"
 +dependencies = [
 + "getrandom",
 + "once_cell",
 + "version_check",
 +]
 +
 +[[package]]
 +name = "anyhow"
- checksum = "c794e162a5eff65c72ef524dfe393eb923c354e350bb78b9c7383df13f3bc142"
++version = "1.0.66"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "3.11.0"
++checksum = "216261ddc8289130e551ddcd5ce8a064710c0d064a4d2895c67151c92b5443f6"
 +
 +[[package]]
 +name = "arrayvec"
 +version = "0.7.2"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6"
 +
 +[[package]]
 +name = "autocfg"
 +version = "1.1.0"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
 +
 +[[package]]
 +name = "bitflags"
 +version = "1.3.2"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
 +
 +[[package]]
 +name = "bumpalo"
- checksum = "c1ad822118d20d2c234f427000d5acc36eabe1e29a348c89b63dd60b13f28e5d"
++version = "3.11.1"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.88.1"
++checksum = "572f695136211188308f16ad2ca5c851a712c464060ae6974944458eb83880ba"
 +
 +[[package]]
 +name = "byteorder"
 +version = "1.4.3"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610"
 +
 +[[package]]
 +name = "cfg-if"
 +version = "1.0.0"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
 +
 +[[package]]
 +name = "cranelift-bforest"
- checksum = "44409ccf2d0f663920cab563d2b79fcd6b2e9a2bcc6e929fef76c8f82ad6c17a"
++version = "0.90.1"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.88.1"
++checksum = "b62c772976416112fa4484cbd688cb6fb35fd430005c1c586224fc014018abad"
 +dependencies = [
 + "cranelift-entity",
 +]
 +
 +[[package]]
 +name = "cranelift-codegen"
- checksum = "98de2018ad96eb97f621f7d6b900a0cc661aec8d02ea4a50e56ecb48e5a2fcaf"
++version = "0.90.1"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.88.1"
++checksum = "9b40ed2dd13c2ac7e24f88a3090c68ad3414eb1d066a95f8f1f7b3b819cb4e46"
 +dependencies = [
 + "arrayvec",
 + "bumpalo",
 + "cranelift-bforest",
 + "cranelift-codegen-meta",
 + "cranelift-codegen-shared",
++ "cranelift-egraph",
 + "cranelift-entity",
 + "cranelift-isle",
 + "gimli",
 + "log",
 + "regalloc2",
 + "smallvec",
 + "target-lexicon",
 +]
 +
 +[[package]]
 +name = "cranelift-codegen-meta"
- checksum = "5287ce36e6c4758fbaf298bd1a8697ad97a4f2375a3d1b61142ea538db4877e5"
++version = "0.90.1"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.88.1"
++checksum = "bb927a8f1c27c34ee3759b6b0ffa528d2330405d5cc4511f0cab33fe2279f4b5"
 +dependencies = [
 + "cranelift-codegen-shared",
 +]
 +
 +[[package]]
 +name = "cranelift-codegen-shared"
- checksum = "2855c24219e2f08827f3f4ffb2da92e134ae8d8ecc185b11ec8f9878cf5f588e"
++version = "0.90.1"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.88.1"
++checksum = "43dfa417b884a9ab488d95fd6b93b25e959321fe7bfd7a0a960ba5d7fb7ab927"
++
++[[package]]
++name = "cranelift-egraph"
++version = "0.90.1"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "e0a66b39785efd8513d2cca967ede56d6cc57c8d7986a595c7c47d0c78de8dce"
++dependencies = [
++ "cranelift-entity",
++ "fxhash",
++ "hashbrown",
++ "indexmap",
++ "log",
++ "smallvec",
++]
 +
 +[[package]]
 +name = "cranelift-entity"
- checksum = "0b65673279d75d34bf11af9660ae2dbd1c22e6d28f163f5c72f4e1dc56d56103"
++version = "0.90.1"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.88.1"
++checksum = "0637ffde963cb5d759bc4d454cfa364b6509e6c74cdaa21298add0ed9276f346"
 +
 +[[package]]
 +name = "cranelift-frontend"
- checksum = "3ed2b3d7a4751163f6c4a349205ab1b7d9c00eecf19dcea48592ef1f7688eefc"
++version = "0.90.1"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.88.1"
++checksum = "fb72b8342685e850cb037350418f62cc4fc55d6c2eb9c7ca01b82f9f1a6f3d56"
 +dependencies = [
 + "cranelift-codegen",
 + "log",
 + "smallvec",
 + "target-lexicon",
 +]
 +
 +[[package]]
 +name = "cranelift-isle"
- checksum = "3be64cecea9d90105fc6a2ba2d003e98c867c1d6c4c86cc878f97ad9fb916293"
++version = "0.90.1"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.88.1"
++checksum = "850579cb9e4b448f7c301f1e6e6cbad99abe3f1f1d878a4994cb66e33c6db8cd"
 +
 +[[package]]
 +name = "cranelift-jit"
- checksum = "f98ed42a70a0c9c388e34ec9477f57fc7300f541b1e5136a0e2ea02b1fac6015"
++version = "0.90.1"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.88.1"
++checksum = "9add822ad66dcbe152b5ab57de10240a2df4505099f2f6c27159acb711890bd4"
 +dependencies = [
 + "anyhow",
 + "cranelift-codegen",
 + "cranelift-entity",
 + "cranelift-module",
 + "cranelift-native",
 + "libc",
 + "log",
 + "region",
 + "target-lexicon",
++ "wasmtime-jit-icache-coherence",
 + "windows-sys",
 +]
 +
 +[[package]]
 +name = "cranelift-module"
- checksum = "d658ac7f156708bfccb647216cc8b9387469f50d352ba4ad80150541e4ae2d49"
++version = "0.90.1"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.88.1"
++checksum = "406b772626fc2664864cf947f3895a23b619895c7fff635f3622e2d857f4492f"
 +dependencies = [
 + "anyhow",
 + "cranelift-codegen",
 +]
 +
 +[[package]]
 +name = "cranelift-native"
- checksum = "c4a03a6ac1b063e416ca4b93f6247978c991475e8271465340caa6f92f3c16a4"
++version = "0.90.1"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.88.1"
++checksum = "2d0a279e5bcba3e0466c734d8d8eb6bfc1ad29e95c37f3e4955b492b5616335e"
 +dependencies = [
 + "cranelift-codegen",
 + "libc",
 + "target-lexicon",
 +]
 +
 +[[package]]
 +name = "cranelift-object"
- checksum = "eef0b4119b645b870a43a036d76c0ada3a076b1f82e8b8487659304c8b09049b"
++version = "0.90.1"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.2.7"
++checksum = "39793c550f0c1d7db96c2fc1324583670c8143befe6edbfbaf1c68aba53be983"
 +dependencies = [
 + "anyhow",
 + "cranelift-codegen",
 + "cranelift-module",
 + "log",
 + "object",
 + "target-lexicon",
 +]
 +
 +[[package]]
 +name = "crc32fast"
 +version = "1.3.2"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d"
 +dependencies = [
 + "cfg-if",
 +]
 +
++[[package]]
++name = "fallible-iterator"
++version = "0.2.0"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7"
++
 +[[package]]
 +name = "fxhash"
 +version = "0.2.1"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c"
 +dependencies = [
 + "byteorder",
 +]
 +
 +[[package]]
 +name = "getrandom"
- checksum = "4eb1a864a501629691edf6c15a593b7a51eebaa1e8468e9ddc623de7c9b58ec6"
++version = "0.2.8"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "1.9.1"
++checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31"
 +dependencies = [
 + "cfg-if",
 + "libc",
 + "wasi",
 +]
 +
 +[[package]]
 +name = "gimli"
 +version = "0.26.2"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "22030e2c5a68ec659fde1e949a745124b48e6fa8b045b7ed5bd1fe4ccc5c4e5d"
 +dependencies = [
++ "fallible-iterator",
 + "indexmap",
++ "stable_deref_trait",
 +]
 +
 +[[package]]
 +name = "hashbrown"
 +version = "0.12.3"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
 +dependencies = [
 + "ahash",
 +]
 +
 +[[package]]
 +name = "indexmap"
- checksum = "10a35a97730320ffe8e2d410b5d3b69279b98d2c14bdb8b70ea89ecf7888d41e"
++version = "1.9.2"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.2.127"
++checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399"
 +dependencies = [
 + "autocfg",
 + "hashbrown",
 +]
 +
 +[[package]]
 +name = "libc"
- checksum = "505e71a4706fa491e9b1b55f51b95d4037d0821ee40131190475f692b35b009b"
++version = "0.2.138"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.7.3"
++checksum = "db6d7e329c562c5dfab7a46a2afabc8b987ab9a4834c9d1ca04dc54c1546cef8"
 +
 +[[package]]
 +name = "libloading"
- checksum = "efbc0f03f9a775e9f6aed295c6a1ba2253c5757a9e03d55c6caa46a681abcddd"
++version = "0.7.4"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "1.13.0"
++checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f"
 +dependencies = [
 + "cfg-if",
 + "winapi",
 +]
 +
 +[[package]]
 +name = "log"
 +version = "0.4.17"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e"
 +dependencies = [
 + "cfg-if",
 +]
 +
 +[[package]]
 +name = "mach"
 +version = "0.3.2"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "b823e83b2affd8f40a9ee8c29dbc56404c1e34cd2710921f2801e2cf29527afa"
 +dependencies = [
 + "libc",
 +]
 +
 +[[package]]
 +name = "memchr"
 +version = "2.5.0"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d"
 +
 +[[package]]
 +name = "object"
 +version = "0.29.0"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "21158b2c33aa6d4561f1c0a6ea283ca92bc54802a93b263e910746d679a7eb53"
 +dependencies = [
 + "crc32fast",
 + "hashbrown",
 + "indexmap",
 + "memchr",
 +]
 +
 +[[package]]
 +name = "once_cell"
- checksum = "18a6dbe30758c9f83eb00cbea4ac95966305f5a7772f3f42ebfc7fc7eddbd8e1"
++version = "1.16.0"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.3.2"
++checksum = "86f0b0d4bf799edbc74508c1e8bf170ff5f41238e5f8225603ca7caaae2b7860"
 +
 +[[package]]
 +name = "regalloc2"
- checksum = "d43a209257d978ef079f3d446331d0f1794f5e0fc19b306a199983857833a779"
++version = "0.4.2"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "1.9.0"
++checksum = "91b2eab54204ea0117fe9a060537e0b07a4e72f7c7d182361ecc346cab2240e5"
 +dependencies = [
 + "fxhash",
 + "log",
 + "slice-group-by",
 + "smallvec",
 +]
 +
 +[[package]]
 +name = "region"
 +version = "2.2.0"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "877e54ea2adcd70d80e9179344c97f93ef0dffd6b03e1f4529e6e83ab2fa9ae0"
 +dependencies = [
 + "bitflags",
 + "libc",
 + "mach",
 + "winapi",
 +]
 +
 +[[package]]
 +name = "rustc_codegen_cranelift"
 +version = "0.1.0"
 +dependencies = [
 + "cranelift-codegen",
 + "cranelift-frontend",
 + "cranelift-jit",
 + "cranelift-module",
 + "cranelift-native",
 + "cranelift-object",
 + "gimli",
 + "indexmap",
 + "libloading",
 + "object",
 + "once_cell",
 + "smallvec",
 + "target-lexicon",
 +]
 +
 +[[package]]
 +name = "slice-group-by"
 +version = "0.3.0"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "03b634d87b960ab1a38c4fe143b508576f075e7c978bfad18217645ebfdfa2ec"
 +
 +[[package]]
 +name = "smallvec"
- checksum = "2fd0db749597d91ff862fd1d55ea87f7855a744a8425a64695b6fca237d1dad1"
++version = "1.10.0"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.12.4"
++checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0"
++
++[[package]]
++name = "stable_deref_trait"
++version = "1.2.0"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3"
 +
 +[[package]]
 +name = "target-lexicon"
- checksum = "c02424087780c9b71cc96799eaeddff35af2bc513278cda5c99fc1f5d026d3c1"
++version = "0.12.5"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "9410d0f6853b1d94f0e519fb95df60f29d2c1eff2d921ffdf01a4c8a3b54f12d"
 +
 +[[package]]
 +name = "version_check"
 +version = "0.9.4"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
 +
 +[[package]]
 +name = "wasi"
 +version = "0.11.0+wasi-snapshot-preview1"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
 +
++[[package]]
++name = "wasmtime-jit-icache-coherence"
++version = "2.0.1"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "e6bbabb309c06cc238ee91b1455b748c45f0bdcab0dda2c2db85b0a1e69fcb66"
++dependencies = [
++ "cfg-if",
++ "libc",
++ "windows-sys",
++]
++
 +[[package]]
 +name = "winapi"
 +version = "0.3.9"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
 +dependencies = [
 + "winapi-i686-pc-windows-gnu",
 + "winapi-x86_64-pc-windows-gnu",
 +]
 +
 +[[package]]
 +name = "winapi-i686-pc-windows-gnu"
 +version = "0.4.0"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
 +
 +[[package]]
 +name = "winapi-x86_64-pc-windows-gnu"
 +version = "0.4.0"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
 +
 +[[package]]
 +name = "windows-sys"
 +version = "0.36.1"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2"
 +dependencies = [
 + "windows_aarch64_msvc",
 + "windows_i686_gnu",
 + "windows_i686_msvc",
 + "windows_x86_64_gnu",
 + "windows_x86_64_msvc",
 +]
 +
 +[[package]]
 +name = "windows_aarch64_msvc"
 +version = "0.36.1"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47"
 +
 +[[package]]
 +name = "windows_i686_gnu"
 +version = "0.36.1"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6"
 +
 +[[package]]
 +name = "windows_i686_msvc"
 +version = "0.36.1"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024"
 +
 +[[package]]
 +name = "windows_x86_64_gnu"
 +version = "0.36.1"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1"
 +
 +[[package]]
 +name = "windows_x86_64_msvc"
 +version = "0.36.1"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680"
index 0fdd5de118ccb10cb4db0776c5ca9c69b19cc07f,0000000000000000000000000000000000000000..2b216ca072f0096832412085a0eae3fda4dc79ff
mode 100644,000000..100644
--- /dev/null
@@@ -1,44 -1,0 +1,52 @@@
- cranelift-codegen = { version = "0.88.1", features = ["unwind", "all-arch"] }
- cranelift-frontend = "0.88.1"
- cranelift-module = "0.88.1"
- cranelift-native = "0.88.1"
- cranelift-jit = { version = "0.88.1", optional = true }
- cranelift-object = "0.88.1"
 +[package]
 +name = "rustc_codegen_cranelift"
 +version = "0.1.0"
 +edition = "2021"
 +
++[[bin]]
++# This is used just to teach rust-analyzer how to check the build system. required-features is used
++# to disable it for regular builds.
++name = "y"
++path = "./y.rs"
++required-features = ["__check_build_system_using_ra"]
++
 +[lib]
 +crate-type = ["dylib"]
 +
 +[dependencies]
 +# These have to be in sync with each other
++cranelift-codegen = { version = "0.90.1", features = ["unwind", "all-arch"] }
++cranelift-frontend = "0.90.1"
++cranelift-module = "0.90.1"
++cranelift-native = "0.90.1"
++cranelift-jit = { version = "0.90.1", optional = true }
++cranelift-object = "0.90.1"
 +target-lexicon = "0.12.0"
 +gimli = { version = "0.26.0", default-features = false, features = ["write"]}
 +object = { version = "0.29.0", default-features = false, features = ["std", "read_core", "write", "archive", "coff", "elf", "macho", "pe"] }
 +
 +indexmap = "1.9.1"
 +libloading = { version = "0.7.3", optional = true }
 +once_cell = "1.10.0"
 +smallvec = "1.8.1"
 +
 +[patch.crates-io]
 +# Uncomment to use local checkout of cranelift
 +#cranelift-codegen = { path = "../wasmtime/cranelift/codegen" }
 +#cranelift-frontend = { path = "../wasmtime/cranelift/frontend" }
 +#cranelift-module = { path = "../wasmtime/cranelift/module" }
 +#cranelift-native = { path = "../wasmtime/cranelift/native" }
 +#cranelift-jit = { path = "../wasmtime/cranelift/jit" }
 +#cranelift-object = { path = "../wasmtime/cranelift/object" }
 +
 +#gimli = { path = "../" }
 +
 +[features]
 +# Enable features not ready to be enabled when compiling as part of rustc
 +unstable-features = ["jit", "inline_asm"]
 +jit = ["cranelift-jit", "libloading"]
 +inline_asm = []
++__check_build_system_using_ra = []
 +
 +[package.metadata.rust-analyzer]
 +rustc_private = true
index 1e84c7fa3657b37afeb26614a49896fab8846b40,0000000000000000000000000000000000000000..0e9c77244d4cc85d765371e97370db5c0adebf11
mode 100644,000000..100644
--- /dev/null
@@@ -1,73 -1,0 +1,73 @@@
- $ $cg_clif_dir/build/cargo-clif build
 +# Cranelift codegen backend for rust
 +
 +The goal of this project is to create an alternative codegen backend for the rust compiler based on [Cranelift](https://github.com/bytecodealliance/wasmtime/blob/main/cranelift).
 +This has the potential to improve compilation times in debug mode.
 +If your project doesn't use any of the things listed under "Not yet supported", it should work fine.
 +If not please open an issue.
 +
 +## Building and testing
 +
 +```bash
 +$ git clone https://github.com/bjorn3/rustc_codegen_cranelift.git
 +$ cd rustc_codegen_cranelift
 +$ ./y.rs prepare # download and patch sysroot src and install hyperfine for benchmarking
 +$ ./y.rs build
 +```
 +
 +To run the test suite replace the last command with:
 +
 +```bash
 +$ ./test.sh
 +```
 +
 +This will implicitly build cg_clif too. Both `y.rs build` and `test.sh` accept a `--debug` argument to
 +build in debug mode.
 +
 +Alternatively you can download a pre built version from [GHA]. It is listed in the artifacts section
 +of workflow runs. Unfortunately due to GHA restrictions you need to be logged in to access it.
 +
 +[GHA]: https://github.com/bjorn3/rustc_codegen_cranelift/actions?query=branch%3Amaster+event%3Apush+is%3Asuccess
 +
 +## Usage
 +
 +rustc_codegen_cranelift can be used as a near-drop-in replacement for `cargo build` or `cargo run` for existing projects.
 +
 +Assuming `$cg_clif_dir` is the directory you cloned this repo into and you followed the instructions (`y.rs prepare` and `y.rs build` or `test.sh`).
 +
 +In the directory with your project (where you can do the usual `cargo build`), run:
 +
 +```bash
++$ $cg_clif_dir/dist/cargo-clif build
 +```
 +
 +This will build your project with rustc_codegen_cranelift instead of the usual LLVM backend.
 +
 +For additional ways to use rustc_codegen_cranelift like the JIT mode see [usage.md](docs/usage.md).
 +
 +## Configuration
 +
 +See the documentation on the `BackendConfig` struct in [config.rs](src/config.rs) for all
 +configuration options.
 +
 +## Not yet supported
 +
 +* Inline assembly ([no cranelift support](https://github.com/bytecodealliance/wasmtime/issues/1041))
 +    * On UNIX there is support for invoking an external assembler for `global_asm!` and `asm!`.
 +* SIMD ([tracked here](https://github.com/bjorn3/rustc_codegen_cranelift/issues/171), some basic things work)
 +
 +## License
 +
 +Licensed under either of
 +
 +  * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or
 +    http://www.apache.org/licenses/LICENSE-2.0)
 +  * MIT license ([LICENSE-MIT](LICENSE-MIT) or
 +    http://opensource.org/licenses/MIT)
 +
 +at your option.
 +
 +### Contribution
 +
 +Unless you explicitly state otherwise, any contribution intentionally submitted
 +for inclusion in the work by you shall be dual licensed as above, without any
 +additional terms or conditions.
index f6a9cb67290c7d88d591a72b2abfafaa2df67c28,0000000000000000000000000000000000000000..bba3210536ef7832e44ba9dbb5e46c3ee036baf6
mode 100644,000000..100644
--- /dev/null
@@@ -1,343 -1,0 +1,326 @@@
- version = "0.16.0"
 +# This file is automatically @generated by Cargo.
 +# It is not intended for manual editing.
 +version = 3
 +
 +[[package]]
 +name = "addr2line"
- checksum = "3e61f2b7f93d2c7d2b08263acaa4a363b3e276806c68af6134c44f523bf1aacd"
++version = "0.17.0"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
- [[package]]
- name = "autocfg"
- version = "1.1.0"
- source = "registry+https://github.com/rust-lang/crates.io-index"
- checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
++checksum = "b9ecd88a8c8378ca913a680cd98f0f13ac67383d35993f86c90a70e3f137816b"
 +dependencies = [
 + "compiler_builtins",
 + "gimli",
 + "rustc-std-workspace-alloc",
 + "rustc-std-workspace-core",
 +]
 +
 +[[package]]
 +name = "adler"
 +version = "1.0.2"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
 +dependencies = [
 + "compiler_builtins",
 + "rustc-std-workspace-core",
 +]
 +
 +[[package]]
 +name = "alloc"
 +version = "0.0.0"
 +dependencies = [
 + "compiler_builtins",
 + "core",
 +]
 +
- version = "1.0.73"
- source = "registry+https://github.com/rust-lang/crates.io-index"
- checksum = "2fff2a6927b3bb87f9595d67196a70493f627687a71d87a0d692242c33f58c11"
- [[package]]
- name = "cfg-if"
- version = "0.1.10"
 +[[package]]
 +name = "cc"
- checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
- dependencies = [
-  "compiler_builtins",
-  "rustc-std-workspace-core",
- ]
++version = "1.0.77"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.1.82"
++checksum = "e9f73505338f7d905b19d18738976aae232eb46b8efc15554ffc56deb5d9ebe4"
 +
 +[[package]]
 +name = "cfg-if"
 +version = "1.0.0"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
 +dependencies = [
 + "compiler_builtins",
 + "rustc-std-workspace-core",
 +]
 +
 +[[package]]
 +name = "compiler_builtins"
- checksum = "18cd7635fea7bb481ea543b392789844c1ad581299da70184c7175ce3af76603"
++version = "0.1.85"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.25.0"
++checksum = "13e81c6cd7ab79f51a0c927d22858d61ad12bd0b3865f0b13ece02a4486aeabb"
 +dependencies = [
 + "rustc-std-workspace-core",
 +]
 +
 +[[package]]
 +name = "core"
 +version = "0.0.0"
 +
 +[[package]]
 +name = "dlmalloc"
 +version = "0.2.4"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "203540e710bfadb90e5e29930baf5d10270cec1f43ab34f46f78b147b2de715a"
 +dependencies = [
 + "compiler_builtins",
 + "libc",
 + "rustc-std-workspace-core",
 +]
 +
 +[[package]]
 +name = "fortanix-sgx-abi"
 +version = "0.5.0"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "57cafc2274c10fab234f176b25903ce17e690fca7597090d50880e047a0389c5"
 +dependencies = [
 + "compiler_builtins",
 + "rustc-std-workspace-core",
 +]
 +
 +[[package]]
 +name = "getopts"
 +version = "0.2.21"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "14dbbfd5c71d70241ecf9e6f13737f7b5ce823821063188d7e46c41d371eebd5"
 +dependencies = [
 + "rustc-std-workspace-core",
 + "rustc-std-workspace-std",
 + "unicode-width",
 +]
 +
 +[[package]]
 +name = "gimli"
- checksum = "f0a01e0497841a3b2db4f8afa483cce65f7e96a3498bd6c541734792aeac8fe7"
++version = "0.26.2"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.2.135"
++checksum = "22030e2c5a68ec659fde1e949a745124b48e6fa8b045b7ed5bd1fe4ccc5c4e5d"
 +dependencies = [
 + "compiler_builtins",
 + "rustc-std-workspace-alloc",
 + "rustc-std-workspace-core",
 +]
 +
 +[[package]]
 +name = "hashbrown"
 +version = "0.12.3"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
 +dependencies = [
 + "compiler_builtins",
 + "rustc-std-workspace-alloc",
 + "rustc-std-workspace-core",
 +]
 +
 +[[package]]
 +name = "hermit-abi"
 +version = "0.2.6"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7"
 +dependencies = [
 + "compiler_builtins",
 + "libc",
 + "rustc-std-workspace-alloc",
 + "rustc-std-workspace-core",
 +]
 +
 +[[package]]
 +name = "libc"
- checksum = "68783febc7782c6c5cb401fbda4de5a9898be1762314da0bb2c10ced61f18b0c"
++version = "0.2.138"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.4.4"
++checksum = "db6d7e329c562c5dfab7a46a2afabc8b987ab9a4834c9d1ca04dc54c1546cef8"
 +dependencies = [
 + "rustc-std-workspace-core",
 +]
 +
 +[[package]]
 +name = "memchr"
 +version = "2.5.0"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d"
 +dependencies = [
 + "compiler_builtins",
 + "rustc-std-workspace-core",
 +]
 +
 +[[package]]
 +name = "miniz_oxide"
- checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b"
++version = "0.5.4"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
-  "autocfg",
++checksum = "96590ba8f175222643a85693f33d26e9c8a015f599c216509b1a6894af675d34"
 +dependencies = [
 + "adler",
- version = "0.26.2"
 + "compiler_builtins",
 + "rustc-std-workspace-alloc",
 + "rustc-std-workspace-core",
 +]
 +
 +[[package]]
 +name = "object"
- checksum = "39f37e50073ccad23b6d09bcb5b263f4e76d3bb6038e4a3c08e52162ffa8abc2"
++version = "0.29.0"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
-  "cfg-if 0.1.10",
++checksum = "21158b2c33aa6d4561f1c0a6ea283ca92bc54802a93b263e910746d679a7eb53"
 +dependencies = [
 + "compiler_builtins",
 + "memchr",
 + "rustc-std-workspace-alloc",
 + "rustc-std-workspace-core",
 +]
 +
 +[[package]]
 +name = "panic_abort"
 +version = "0.0.0"
 +dependencies = [
 + "alloc",
-  "cfg-if 0.1.10",
++ "cfg-if",
 + "compiler_builtins",
 + "core",
 + "libc",
 +]
 +
 +[[package]]
 +name = "panic_unwind"
 +version = "0.0.0"
 +dependencies = [
 + "alloc",
-  "cfg-if 1.0.0",
++ "cfg-if",
 + "compiler_builtins",
 + "core",
 + "libc",
 + "unwind",
 +]
 +
 +[[package]]
 +name = "proc_macro"
 +version = "0.0.0"
 +dependencies = [
 + "core",
 + "std",
 +]
 +
 +[[package]]
 +name = "rustc-demangle"
 +version = "0.1.21"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342"
 +dependencies = [
 + "compiler_builtins",
 + "rustc-std-workspace-core",
 +]
 +
 +[[package]]
 +name = "rustc-std-workspace-alloc"
 +version = "1.99.0"
 +dependencies = [
 + "alloc",
 +]
 +
 +[[package]]
 +name = "rustc-std-workspace-core"
 +version = "1.99.0"
 +dependencies = [
 + "core",
 +]
 +
 +[[package]]
 +name = "rustc-std-workspace-std"
 +version = "1.99.0"
 +dependencies = [
 + "std",
 +]
 +
 +[[package]]
 +name = "std"
 +version = "0.0.0"
 +dependencies = [
 + "addr2line",
 + "alloc",
-  "cfg-if 1.0.0",
++ "cfg-if",
 + "compiler_builtins",
 + "core",
 + "dlmalloc",
 + "fortanix-sgx-abi",
 + "hashbrown",
 + "hermit-abi",
 + "libc",
 + "miniz_oxide",
 + "object",
 + "panic_abort",
 + "panic_unwind",
 + "rustc-demangle",
 + "std_detect",
 + "unwind",
 + "wasi",
 +]
 +
 +[[package]]
 +name = "std_detect"
 +version = "0.1.5"
 +dependencies = [
-  "cfg-if 0.1.10",
++ "cfg-if",
 + "compiler_builtins",
 + "libc",
 + "rustc-std-workspace-alloc",
 + "rustc-std-workspace-core",
 +]
 +
 +[[package]]
 +name = "sysroot"
 +version = "0.0.0"
 +dependencies = [
 + "alloc",
 + "compiler_builtins",
 + "core",
 + "std",
 + "test",
 +]
 +
 +[[package]]
 +name = "test"
 +version = "0.0.0"
 +dependencies = [
-  "cfg-if 0.1.10",
++ "cfg-if",
 + "core",
 + "getopts",
 + "libc",
 + "panic_abort",
 + "panic_unwind",
 + "proc_macro",
 + "std",
 +]
 +
 +[[package]]
 +name = "unicode-width"
 +version = "0.1.10"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b"
 +dependencies = [
 + "compiler_builtins",
 + "rustc-std-workspace-core",
 + "rustc-std-workspace-std",
 +]
 +
 +[[package]]
 +name = "unwind"
 +version = "0.0.0"
 +dependencies = [
 + "cc",
++ "cfg-if",
 + "compiler_builtins",
 + "core",
 + "libc",
 +]
 +
 +[[package]]
 +name = "wasi"
 +version = "0.11.0+wasi-snapshot-preview1"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
 +dependencies = [
 + "compiler_builtins",
 + "rustc-std-workspace-alloc",
 + "rustc-std-workspace-core",
 +]
index fae5b27163680badc511023892f92fe462ddfbaf,0000000000000000000000000000000000000000..a081fdaa1c7e6475a727f764c033f07090b2b87e
mode 100644,000000..100644
--- /dev/null
@@@ -1,52 -1,0 +1,56 @@@
- use std::env;
 +use std::path::Path;
 +
 +use super::build_sysroot;
 +use super::config;
- use super::prepare;
- use super::utils::{cargo_command, spawn_and_wait};
++use super::path::Dirs;
++use super::prepare::GitRepo;
++use super::utils::{spawn_and_wait, CargoProject, Compiler};
 +use super::SysrootKind;
 +
++pub(crate) static ABI_CAFE_REPO: GitRepo =
++    GitRepo::github("Gankra", "abi-cafe", "4c6dc8c9c687e2b3a760ff2176ce236872b37212", "abi-cafe");
++
++static ABI_CAFE: CargoProject = CargoProject::new(&ABI_CAFE_REPO.source_dir(), "abi_cafe");
++
 +pub(crate) fn run(
 +    channel: &str,
 +    sysroot_kind: SysrootKind,
-     target_dir: &Path,
++    dirs: &Dirs,
 +    cg_clif_dylib: &Path,
 +    host_triple: &str,
 +    target_triple: &str,
 +) {
 +    if !config::get_bool("testsuite.abi-cafe") {
 +        eprintln!("[SKIP] abi-cafe");
 +        return;
 +    }
 +
 +    if host_triple != target_triple {
 +        eprintln!("[SKIP] abi-cafe (cross-compilation not supported)");
 +        return;
 +    }
 +
 +    eprintln!("Building sysroot for abi-cafe");
 +    build_sysroot::build_sysroot(
++        dirs,
 +        channel,
 +        sysroot_kind,
-         target_dir,
 +        cg_clif_dylib,
 +        host_triple,
 +        target_triple,
 +    );
 +
 +    eprintln!("Running abi-cafe");
-     let abi_cafe_path = prepare::ABI_CAFE.source_dir();
-     env::set_current_dir(abi_cafe_path.clone()).unwrap();
 +
 +    let pairs = ["rustc_calls_cgclif", "cgclif_calls_rustc", "cgclif_calls_cc", "cc_calls_cgclif"];
 +
-     let mut cmd = cargo_command("cargo", "run", Some(target_triple), &abi_cafe_path);
++    let mut cmd = ABI_CAFE.run(&Compiler::host(), dirs);
 +    cmd.arg("--");
 +    cmd.arg("--pairs");
 +    cmd.args(pairs);
 +    cmd.arg("--add-rustc-codegen-backend");
 +    cmd.arg(format!("cgclif:{}", cg_clif_dylib.display()));
++    cmd.current_dir(ABI_CAFE.source_dir(dirs));
 +
 +    spawn_and_wait(cmd);
 +}
index cda468bcfa2dfc2e0bfe494bd5a1a9414a97eed6,0000000000000000000000000000000000000000..fde8ef424ccc5441e0198ef7b33b23c39dbe37bc
mode 100644,000000..100644
--- /dev/null
@@@ -1,49 -1,0 +1,52 @@@
- use super::utils::{cargo_command, is_ci};
 +use std::env;
 +use std::path::PathBuf;
 +
++use super::path::{Dirs, RelPath};
 +use super::rustc_info::get_file_name;
-     let source_dir = std::env::current_dir().unwrap();
-     let mut cmd = cargo_command("cargo", "build", Some(host_triple), &source_dir);
++use super::utils::{is_ci, CargoProject, Compiler};
++
++static CG_CLIF: CargoProject = CargoProject::new(&RelPath::SOURCE, "cg_clif");
 +
 +pub(crate) fn build_backend(
++    dirs: &Dirs,
 +    channel: &str,
 +    host_triple: &str,
 +    use_unstable_features: bool,
 +) -> PathBuf {
-     source_dir
-         .join("target")
++    let mut cmd = CG_CLIF.build(&Compiler::host(), dirs);
 +
 +    cmd.env("CARGO_BUILD_INCREMENTAL", "true"); // Force incr comp even in release mode
 +
 +    let mut rustflags = env::var("RUSTFLAGS").unwrap_or_default();
 +
 +    if is_ci() {
 +        // Deny warnings on CI
 +        rustflags += " -Dwarnings";
 +
 +        // Disabling incr comp reduces cache size and incr comp doesn't save as much on CI anyway
 +        cmd.env("CARGO_BUILD_INCREMENTAL", "false");
 +    }
 +
 +    if use_unstable_features {
 +        cmd.arg("--features").arg("unstable-features");
 +    }
 +
 +    match channel {
 +        "debug" => {}
 +        "release" => {
 +            cmd.arg("--release");
 +        }
 +        _ => unreachable!(),
 +    }
 +
 +    cmd.env("RUSTFLAGS", rustflags);
 +
 +    eprintln!("[BUILD] rustc_codegen_cranelift");
 +    super::utils::spawn_and_wait(cmd);
 +
++    CG_CLIF
++        .target_dir(dirs)
 +        .join(host_triple)
 +        .join(channel)
 +        .join(get_file_name("rustc_codegen_cranelift", "dylib"))
 +}
index 856aecc49fd1c05f476c91232c9bb0b429c4c907,0000000000000000000000000000000000000000..cbbf09b9b97b8422ab06caff3b59b34c6352dfd4
mode 100644,000000..100644
--- /dev/null
@@@ -1,222 -1,0 +1,224 @@@
- use std::path::{Path, PathBuf};
 +use std::fs;
- use super::utils::{cargo_command, spawn_and_wait, try_hard_link};
++use std::path::Path;
 +use std::process::{self, Command};
 +
++use super::path::{Dirs, RelPath};
 +use super::rustc_info::{get_file_name, get_rustc_version, get_wrapper_file_name};
-     target_dir: &Path,
++use super::utils::{spawn_and_wait, try_hard_link, CargoProject, Compiler};
 +use super::SysrootKind;
 +
++static DIST_DIR: RelPath = RelPath::DIST;
++static BIN_DIR: RelPath = RelPath::DIST.join("bin");
++static LIB_DIR: RelPath = RelPath::DIST.join("lib");
++static RUSTLIB_DIR: RelPath = LIB_DIR.join("rustlib");
++
 +pub(crate) fn build_sysroot(
++    dirs: &Dirs,
 +    channel: &str,
 +    sysroot_kind: SysrootKind,
-     if target_dir.exists() {
-         fs::remove_dir_all(target_dir).unwrap();
-     }
-     fs::create_dir_all(target_dir.join("bin")).unwrap();
-     fs::create_dir_all(target_dir.join("lib")).unwrap();
 +    cg_clif_dylib_src: &Path,
 +    host_triple: &str,
 +    target_triple: &str,
 +) {
 +    eprintln!("[BUILD] sysroot {:?}", sysroot_kind);
 +
-     let cg_clif_dylib_path = target_dir
-         .join(if cfg!(windows) {
-             // Windows doesn't have rpath support, so the cg_clif dylib needs to be next to the
-             // binaries.
-             "bin"
-         } else {
-             "lib"
-         })
-         .join(get_file_name("rustc_codegen_cranelift", "dylib"));
++    DIST_DIR.ensure_fresh(dirs);
++    BIN_DIR.ensure_exists(dirs);
++    LIB_DIR.ensure_exists(dirs);
 +
 +    // Copy the backend
-     for wrapper in ["rustc-clif", "cargo-clif"] {
++    let cg_clif_dylib_path = if cfg!(windows) {
++        // Windows doesn't have rpath support, so the cg_clif dylib needs to be next to the
++        // binaries.
++        BIN_DIR
++    } else {
++        LIB_DIR
++    }
++    .to_path(dirs)
++    .join(get_file_name("rustc_codegen_cranelift", "dylib"));
 +    try_hard_link(cg_clif_dylib_src, &cg_clif_dylib_path);
 +
 +    // Build and copy rustc and cargo wrappers
-             .arg(PathBuf::from("scripts").join(format!("{wrapper}.rs")))
++    for wrapper in ["rustc-clif", "rustdoc-clif", "cargo-clif"] {
 +        let wrapper_name = get_wrapper_file_name(wrapper, "bin");
 +
 +        let mut build_cargo_wrapper_cmd = Command::new("rustc");
 +        build_cargo_wrapper_cmd
-             .arg(target_dir.join(wrapper_name))
++            .arg(RelPath::SCRIPTS.to_path(dirs).join(&format!("{wrapper}.rs")))
 +            .arg("-o")
-     let rustlib = target_dir.join("lib").join("rustlib");
-     let host_rustlib_lib = rustlib.join(host_triple).join("lib");
-     let target_rustlib_lib = rustlib.join(target_triple).join("lib");
++            .arg(DIST_DIR.to_path(dirs).join(wrapper_name))
 +            .arg("-g");
 +        spawn_and_wait(build_cargo_wrapper_cmd);
 +    }
 +
 +    let default_sysroot = super::rustc_info::get_default_sysroot();
 +
-             build_clif_sysroot_for_triple(
-                 channel,
-                 target_dir,
-                 host_triple,
-                 &cg_clif_dylib_path,
-                 None,
-             );
++    let host_rustlib_lib = RUSTLIB_DIR.to_path(dirs).join(host_triple).join("lib");
++    let target_rustlib_lib = RUSTLIB_DIR.to_path(dirs).join(target_triple).join("lib");
 +    fs::create_dir_all(&host_rustlib_lib).unwrap();
 +    fs::create_dir_all(&target_rustlib_lib).unwrap();
 +
 +    if target_triple == "x86_64-pc-windows-gnu" {
 +        if !default_sysroot.join("lib").join("rustlib").join(target_triple).join("lib").exists() {
 +            eprintln!(
 +                "The x86_64-pc-windows-gnu target needs to be installed first before it is possible \
 +                to compile a sysroot for it.",
 +            );
 +            process::exit(1);
 +        }
 +        for file in fs::read_dir(
 +            default_sysroot.join("lib").join("rustlib").join(target_triple).join("lib"),
 +        )
 +        .unwrap()
 +        {
 +            let file = file.unwrap().path();
 +            if file.extension().map_or(true, |ext| ext.to_str().unwrap() != "o") {
 +                continue; // only copy object files
 +            }
 +            try_hard_link(&file, target_rustlib_lib.join(file.file_name().unwrap()));
 +        }
 +    }
 +
 +    match sysroot_kind {
 +        SysrootKind::None => {} // Nothing to do
 +        SysrootKind::Llvm => {
 +            for file in fs::read_dir(
 +                default_sysroot.join("lib").join("rustlib").join(host_triple).join("lib"),
 +            )
 +            .unwrap()
 +            {
 +                let file = file.unwrap().path();
 +                let file_name_str = file.file_name().unwrap().to_str().unwrap();
 +                if (file_name_str.contains("rustc_")
 +                    && !file_name_str.contains("rustc_std_workspace_")
 +                    && !file_name_str.contains("rustc_demangle"))
 +                    || file_name_str.contains("chalk")
 +                    || file_name_str.contains("tracing")
 +                    || file_name_str.contains("regex")
 +                {
 +                    // These are large crates that are part of the rustc-dev component and are not
 +                    // necessary to run regular programs.
 +                    continue;
 +                }
 +                try_hard_link(&file, host_rustlib_lib.join(file.file_name().unwrap()));
 +            }
 +
 +            if target_triple != host_triple {
 +                for file in fs::read_dir(
 +                    default_sysroot.join("lib").join("rustlib").join(target_triple).join("lib"),
 +                )
 +                .unwrap()
 +                {
 +                    let file = file.unwrap().path();
 +                    try_hard_link(&file, target_rustlib_lib.join(file.file_name().unwrap()));
 +                }
 +            }
 +        }
 +        SysrootKind::Clif => {
-                 let linker = if target_triple == "aarch64-unknown-linux-gnu" {
-                     Some("aarch64-linux-gnu-gcc")
-                 } else {
-                     None
++            build_clif_sysroot_for_triple(dirs, channel, host_triple, &cg_clif_dylib_path, None);
 +
 +            if host_triple != target_triple {
 +                // When cross-compiling it is often necessary to manually pick the right linker
-                     target_dir,
++                let linker = match target_triple {
++                    "aarch64-unknown-linux-gnu" => Some("aarch64-linux-gnu-gcc"),
++                    "s390x-unknown-linux-gnu" => Some("s390x-linux-gnu-gcc"),
++                    _ => None,
 +                };
 +                build_clif_sysroot_for_triple(
++                    dirs,
 +                    channel,
-                     try_hard_link(&file, target_dir.join("lib").join(file.file_name().unwrap()));
 +                    target_triple,
 +                    &cg_clif_dylib_path,
 +                    linker,
 +                );
 +            }
 +
 +            // Copy std for the host to the lib dir. This is necessary for the jit mode to find
 +            // libstd.
 +            for file in fs::read_dir(host_rustlib_lib).unwrap() {
 +                let file = file.unwrap().path();
 +                let filename = file.file_name().unwrap().to_str().unwrap();
 +                if filename.contains("std-") && !filename.contains(".rlib") {
-     target_dir: &Path,
++                    try_hard_link(&file, LIB_DIR.to_path(dirs).join(file.file_name().unwrap()));
 +                }
 +            }
 +        }
 +    }
 +}
 +
++// FIXME move to download/ or dist/
++pub(crate) static SYSROOT_RUSTC_VERSION: RelPath = RelPath::BUILD_SYSROOT.join("rustc_version");
++pub(crate) static SYSROOT_SRC: RelPath = RelPath::BUILD_SYSROOT.join("sysroot_src");
++static STANDARD_LIBRARY: CargoProject = CargoProject::new(&RelPath::BUILD_SYSROOT, "build_sysroot");
++
 +fn build_clif_sysroot_for_triple(
++    dirs: &Dirs,
 +    channel: &str,
-     match fs::read_to_string(Path::new("build_sysroot").join("rustc_version")) {
 +    triple: &str,
 +    cg_clif_dylib_path: &Path,
 +    linker: Option<&str>,
 +) {
-     let build_dir = Path::new("build_sysroot").join("target").join(triple).join(channel);
++    match fs::read_to_string(SYSROOT_RUSTC_VERSION.to_path(dirs)) {
 +        Err(e) => {
 +            eprintln!("Failed to get rustc version for patched sysroot source: {}", e);
 +            eprintln!("Hint: Try `./y.rs prepare` to patch the sysroot source");
 +            process::exit(1);
 +        }
 +        Ok(source_version) => {
 +            let rustc_version = get_rustc_version();
 +            if source_version != rustc_version {
 +                eprintln!("The patched sysroot source is outdated");
 +                eprintln!("Source version: {}", source_version.trim());
 +                eprintln!("Rustc version:  {}", rustc_version.trim());
 +                eprintln!("Hint: Try `./y.rs prepare` to update the patched sysroot source");
 +                process::exit(1);
 +            }
 +        }
 +    }
 +
-     let mut build_cmd = cargo_command("cargo", "build", Some(triple), Path::new("build_sysroot"));
++    let build_dir = STANDARD_LIBRARY.target_dir(dirs).join(triple).join(channel);
 +
 +    if !super::config::get_bool("keep_sysroot") {
 +        // Cleanup the deps dir, but keep build scripts and the incremental cache for faster
 +        // recompilation as they are not affected by changes in cg_clif.
 +        if build_dir.join("deps").exists() {
 +            fs::remove_dir_all(build_dir.join("deps")).unwrap();
 +        }
 +    }
 +
 +    // Build sysroot
-     rustflags.push_str(&format!(" --sysroot={}", target_dir.to_str().unwrap()));
 +    let mut rustflags = "-Zforce-unstable-if-unmarked -Cpanic=abort".to_string();
 +    rustflags.push_str(&format!(" -Zcodegen-backend={}", cg_clif_dylib_path.to_str().unwrap()));
-         build_cmd.arg("--release");
++    rustflags.push_str(&format!(" --sysroot={}", DIST_DIR.to_path(dirs).to_str().unwrap()));
 +    if channel == "release" {
-     build_cmd.env("RUSTFLAGS", rustflags);
 +        rustflags.push_str(" -Zmir-opt-level=3");
 +    }
 +    if let Some(linker) = linker {
 +        use std::fmt::Write;
 +        write!(rustflags, " -Clinker={}", linker).unwrap();
 +    }
-     for entry in
-         fs::read_dir(Path::new("build_sysroot/target").join(triple).join(channel).join("deps"))
-             .unwrap()
-     {
++    let mut compiler = Compiler::with_triple(triple.to_owned());
++    compiler.rustflags = rustflags;
++    let mut build_cmd = STANDARD_LIBRARY.build(&compiler, dirs);
++    if channel == "release" {
++        build_cmd.arg("--release");
++    }
 +    build_cmd.env("__CARGO_DEFAULT_LIB_METADATA", "cg_clif");
 +    spawn_and_wait(build_cmd);
 +
 +    // Copy all relevant files to the sysroot
-             target_dir.join("lib").join("rustlib").join(triple).join("lib").join(entry.file_name()),
++    for entry in fs::read_dir(build_dir.join("deps")).unwrap() {
 +        let entry = entry.unwrap();
 +        if let Some(ext) = entry.path().extension() {
 +            if ext == "rmeta" || ext == "d" || ext == "dSYM" || ext == "clif" {
 +                continue;
 +            }
 +        } else {
 +            continue;
 +        };
 +        try_hard_link(
 +            entry.path(),
++            RUSTLIB_DIR.to_path(dirs).join(triple).join("lib").join(entry.file_name()),
 +        );
 +    }
 +}
index b25270d832ceb4fd483cfdbad2b6b712a4ee1a50,0000000000000000000000000000000000000000..1afc9a55c73b5318bbc9ed006899c4f5ab3647c3
mode 100644,000000..100644
--- /dev/null
@@@ -1,157 -1,0 +1,196 @@@
-     eprintln!("Usage:");
-     eprintln!("  ./y.rs prepare");
-     eprintln!(
-         "  ./y.rs build [--debug] [--sysroot none|clif|llvm] [--target-dir DIR] [--no-unstable-features]"
-     );
-     eprintln!(
-         "  ./y.rs test [--debug] [--sysroot none|clif|llvm] [--target-dir DIR] [--no-unstable-features]"
-     );
 +use std::env;
 +use std::path::PathBuf;
 +use std::process;
 +
 +use self::utils::is_ci;
 +
 +mod abi_cafe;
 +mod build_backend;
 +mod build_sysroot;
 +mod config;
++mod path;
 +mod prepare;
 +mod rustc_info;
 +mod tests;
 +mod utils;
 +
++const USAGE: &str = r#"The build system of cg_clif.
++
++USAGE:
++    ./y.rs prepare [--out-dir DIR]
++    ./y.rs build [--debug] [--sysroot none|clif|llvm] [--out-dir DIR] [--no-unstable-features]
++    ./y.rs test [--debug] [--sysroot none|clif|llvm] [--out-dir DIR] [--no-unstable-features]
++
++OPTIONS:
++    --sysroot none|clif|llvm
++            Which sysroot libraries to use:
++            `none` will not include any standard library in the sysroot.
++            `clif` will build the standard library using Cranelift.
++            `llvm` will use the pre-compiled standard library of rustc which is compiled with LLVM.
++
++    --out-dir DIR
++            Specify the directory in which the download, build and dist directories are stored.
++            By default this is the working directory.
++
++    --no-unstable-features
++            fSome features are not yet ready for production usage. This option will disable these
++            features. This includes the JIT mode and inline assembly support.
++"#;
++
 +fn usage() {
-     // The target dir is expected in the default location. Guard against the user changing it.
-     env::set_var("CARGO_TARGET_DIR", "target");
++    eprintln!("{USAGE}");
 +}
 +
 +macro_rules! arg_error {
 +    ($($err:tt)*) => {{
 +        eprintln!($($err)*);
 +        usage();
 +        std::process::exit(1);
 +    }};
 +}
 +
 +#[derive(PartialEq, Debug)]
 +enum Command {
++    Prepare,
 +    Build,
 +    Test,
 +}
 +
 +#[derive(Copy, Clone, Debug)]
 +pub(crate) enum SysrootKind {
 +    None,
 +    Clif,
 +    Llvm,
 +}
 +
 +pub fn main() {
 +    env::set_var("CG_CLIF_DISPLAY_CG_TIME", "1");
 +    env::set_var("CG_CLIF_DISABLE_INCR_CACHE", "1");
-         Some("prepare") => {
-             if args.next().is_some() {
-                 arg_error!("./y.rs prepare doesn't expect arguments");
-             }
-             prepare::prepare();
-             process::exit(0);
-         }
 +
 +    if is_ci() {
 +        // Disabling incr comp reduces cache size and incr comp doesn't save as much on CI anyway
 +        env::set_var("CARGO_BUILD_INCREMENTAL", "false");
 +    }
 +
 +    let mut args = env::args().skip(1);
 +    let command = match args.next().as_deref() {
-     let mut target_dir = PathBuf::from("build");
++        Some("prepare") => Command::Prepare,
 +        Some("build") => Command::Build,
 +        Some("test") => Command::Test,
 +        Some(flag) if flag.starts_with('-') => arg_error!("Expected command found flag {}", flag),
 +        Some(command) => arg_error!("Unknown command {}", command),
 +        None => {
 +            usage();
 +            process::exit(0);
 +        }
 +    };
 +
-             "--target-dir" => {
-                 target_dir = PathBuf::from(args.next().unwrap_or_else(|| {
-                     arg_error!("--target-dir requires argument");
++    let mut out_dir = PathBuf::from(".");
 +    let mut channel = "release";
 +    let mut sysroot_kind = SysrootKind::Clif;
 +    let mut use_unstable_features = true;
 +    while let Some(arg) = args.next().as_deref() {
 +        match arg {
-     target_dir = std::env::current_dir().unwrap().join(target_dir);
++            "--out-dir" => {
++                out_dir = PathBuf::from(args.next().unwrap_or_else(|| {
++                    arg_error!("--out-dir requires argument");
 +                }))
 +            }
 +            "--debug" => channel = "debug",
 +            "--sysroot" => {
 +                sysroot_kind = match args.next().as_deref() {
 +                    Some("none") => SysrootKind::None,
 +                    Some("clif") => SysrootKind::Clif,
 +                    Some("llvm") => SysrootKind::Llvm,
 +                    Some(arg) => arg_error!("Unknown sysroot kind {}", arg),
 +                    None => arg_error!("--sysroot requires argument"),
 +                }
 +            }
 +            "--no-unstable-features" => use_unstable_features = false,
 +            flag if flag.starts_with("-") => arg_error!("Unknown flag {}", flag),
 +            arg => arg_error!("Unexpected argument {}", arg),
 +        }
 +    }
-     let cg_clif_dylib = build_backend::build_backend(channel, &host_triple, use_unstable_features);
 +
 +    let host_triple = if let Ok(host_triple) = std::env::var("HOST_TRIPLE") {
 +        host_triple
 +    } else if let Some(host_triple) = config::get_value("host") {
 +        host_triple
 +    } else {
 +        rustc_info::get_host_triple()
 +    };
 +    let target_triple = if let Ok(target_triple) = std::env::var("TARGET_TRIPLE") {
 +        if target_triple != "" {
 +            target_triple
 +        } else {
 +            host_triple.clone() // Empty target triple can happen on GHA
 +        }
 +    } else if let Some(target_triple) = config::get_value("target") {
 +        target_triple
 +    } else {
 +        host_triple.clone()
 +    };
 +
-                 &target_dir,
++    // FIXME allow changing the location of these dirs using cli arguments
++    let current_dir = std::env::current_dir().unwrap();
++    out_dir = current_dir.join(out_dir);
++    let dirs = path::Dirs {
++        source_dir: current_dir.clone(),
++        download_dir: out_dir.join("download"),
++        build_dir: out_dir.join("build"),
++        dist_dir: out_dir.join("dist"),
++    };
++
++    path::RelPath::BUILD.ensure_exists(&dirs);
++
++    {
++        // Make sure we always explicitly specify the target dir
++        let target =
++            path::RelPath::BUILD.join("target_dir_should_be_set_explicitly").to_path(&dirs);
++        env::set_var("CARGO_TARGET_DIR", &target);
++        let _ = std::fs::remove_file(&target);
++        std::fs::File::create(target).unwrap();
++    }
++
++    if command == Command::Prepare {
++        prepare::prepare(&dirs);
++        process::exit(0);
++    }
++
++    let cg_clif_dylib =
++        build_backend::build_backend(&dirs, channel, &host_triple, use_unstable_features);
 +    match command {
++        Command::Prepare => {
++            // Handled above
++        }
 +        Command::Test => {
 +            tests::run_tests(
++                &dirs,
 +                channel,
 +                sysroot_kind,
-                 &target_dir,
 +                &cg_clif_dylib,
 +                &host_triple,
 +                &target_triple,
 +            );
 +
 +            abi_cafe::run(
 +                channel,
 +                sysroot_kind,
-                 &target_dir,
++                &dirs,
 +                &cg_clif_dylib,
 +                &host_triple,
 +                &target_triple,
 +            );
 +        }
 +        Command::Build => {
 +            build_sysroot::build_sysroot(
++                &dirs,
 +                channel,
 +                sysroot_kind,
 +                &cg_clif_dylib,
 +                &host_triple,
 +                &target_triple,
 +            );
 +        }
 +    }
 +}
index 0000000000000000000000000000000000000000,0000000000000000000000000000000000000000..e93981f1d64d369b28672c0e61ff5a533c271a23
new file mode 100644 (file)
--- /dev/null
--- /dev/null
@@@ -1,0 -1,0 +1,70 @@@
++use std::fs;
++use std::path::PathBuf;
++
++#[derive(Debug, Clone)]
++pub(crate) struct Dirs {
++    pub(crate) source_dir: PathBuf,
++    pub(crate) download_dir: PathBuf,
++    pub(crate) build_dir: PathBuf,
++    pub(crate) dist_dir: PathBuf,
++}
++
++#[doc(hidden)]
++#[derive(Debug, Copy, Clone)]
++pub(crate) enum PathBase {
++    Source,
++    Download,
++    Build,
++    Dist,
++}
++
++impl PathBase {
++    fn to_path(self, dirs: &Dirs) -> PathBuf {
++        match self {
++            PathBase::Source => dirs.source_dir.clone(),
++            PathBase::Download => dirs.download_dir.clone(),
++            PathBase::Build => dirs.build_dir.clone(),
++            PathBase::Dist => dirs.dist_dir.clone(),
++        }
++    }
++}
++
++#[derive(Debug, Copy, Clone)]
++pub(crate) enum RelPath {
++    Base(PathBase),
++    Join(&'static RelPath, &'static str),
++}
++
++impl RelPath {
++    pub(crate) const SOURCE: RelPath = RelPath::Base(PathBase::Source);
++    pub(crate) const DOWNLOAD: RelPath = RelPath::Base(PathBase::Download);
++    pub(crate) const BUILD: RelPath = RelPath::Base(PathBase::Build);
++    pub(crate) const DIST: RelPath = RelPath::Base(PathBase::Dist);
++
++    pub(crate) const SCRIPTS: RelPath = RelPath::SOURCE.join("scripts");
++    pub(crate) const BUILD_SYSROOT: RelPath = RelPath::SOURCE.join("build_sysroot");
++    pub(crate) const PATCHES: RelPath = RelPath::SOURCE.join("patches");
++
++    pub(crate) const fn join(&'static self, suffix: &'static str) -> RelPath {
++        RelPath::Join(self, suffix)
++    }
++
++    pub(crate) fn to_path(&self, dirs: &Dirs) -> PathBuf {
++        match self {
++            RelPath::Base(base) => base.to_path(dirs),
++            RelPath::Join(base, suffix) => base.to_path(dirs).join(suffix),
++        }
++    }
++
++    pub(crate) fn ensure_exists(&self, dirs: &Dirs) {
++        fs::create_dir_all(self.to_path(dirs)).unwrap();
++    }
++
++    pub(crate) fn ensure_fresh(&self, dirs: &Dirs) {
++        let path = self.to_path(dirs);
++        if path.exists() {
++            fs::remove_dir_all(&path).unwrap();
++        }
++        fs::create_dir_all(path).unwrap();
++    }
++}
index 3111f62f6c2156eaec0ef00a9a7213acf8950f88,0000000000000000000000000000000000000000..8ac67e8f9422823090a97ffab654c0da5a848e12
mode 100644,000000..100644
--- /dev/null
@@@ -1,233 -1,0 +1,235 @@@
- use std::env;
 +use std::ffi::OsStr;
 +use std::fs;
 +use std::path::{Path, PathBuf};
 +use std::process::Command;
 +
++use super::build_sysroot::{SYSROOT_RUSTC_VERSION, SYSROOT_SRC};
++use super::path::{Dirs, RelPath};
 +use super::rustc_info::{get_file_name, get_rustc_path, get_rustc_version};
- use super::utils::{cargo_command, copy_dir_recursively, spawn_and_wait};
- pub(crate) const ABI_CAFE: GitRepo =
-     GitRepo::github("Gankra", "abi-cafe", "4c6dc8c9c687e2b3a760ff2176ce236872b37212", "abi-cafe");
- pub(crate) const RAND: GitRepo =
-     GitRepo::github("rust-random", "rand", "0f933f9c7176e53b2a3c7952ded484e1783f0bf1", "rand");
- pub(crate) const REGEX: GitRepo =
-     GitRepo::github("rust-lang", "regex", "341f207c1071f7290e3f228c710817c280c8dca1", "regex");
- pub(crate) const PORTABLE_SIMD: GitRepo = GitRepo::github(
-     "rust-lang",
-     "portable-simd",
-     "d5cd4a8112d958bd3a252327e0d069a6363249bd",
-     "portable-simd",
- );
- pub(crate) const SIMPLE_RAYTRACER: GitRepo = GitRepo::github(
-     "ebobby",
-     "simple-raytracer",
-     "804a7a21b9e673a482797aa289a18ed480e4d813",
-     "<none>",
- );
- pub(crate) fn prepare() {
-     if Path::new("download").exists() {
-         std::fs::remove_dir_all(Path::new("download")).unwrap();
++use super::utils::{copy_dir_recursively, spawn_and_wait, Compiler};
++
++pub(crate) fn prepare(dirs: &Dirs) {
++    if RelPath::DOWNLOAD.to_path(dirs).exists() {
++        std::fs::remove_dir_all(RelPath::DOWNLOAD.to_path(dirs)).unwrap();
 +    }
-     std::fs::create_dir_all(Path::new("download")).unwrap();
++    std::fs::create_dir_all(RelPath::DOWNLOAD.to_path(dirs)).unwrap();
 +
-     prepare_sysroot();
++    prepare_sysroot(dirs);
 +
 +    // FIXME maybe install this only locally?
 +    eprintln!("[INSTALL] hyperfine");
-     Command::new("cargo").arg("install").arg("hyperfine").spawn().unwrap().wait().unwrap();
++    Command::new("cargo")
++        .arg("install")
++        .arg("hyperfine")
++        .env_remove("CARGO_TARGET_DIR")
++        .spawn()
++        .unwrap()
++        .wait()
++        .unwrap();
 +
-     ABI_CAFE.fetch();
-     RAND.fetch();
-     REGEX.fetch();
-     PORTABLE_SIMD.fetch();
-     SIMPLE_RAYTRACER.fetch();
++    super::abi_cafe::ABI_CAFE_REPO.fetch(dirs);
++    super::tests::RAND_REPO.fetch(dirs);
++    super::tests::REGEX_REPO.fetch(dirs);
++    super::tests::PORTABLE_SIMD_REPO.fetch(dirs);
++    super::tests::SIMPLE_RAYTRACER_REPO.fetch(dirs);
 +
 +    eprintln!("[LLVM BUILD] simple-raytracer");
-     let build_cmd = cargo_command("cargo", "build", None, &SIMPLE_RAYTRACER.source_dir());
++    let host_compiler = Compiler::host();
++    let build_cmd = super::tests::SIMPLE_RAYTRACER.build(&host_compiler, dirs);
 +    spawn_and_wait(build_cmd);
 +    fs::copy(
-         SIMPLE_RAYTRACER
-             .source_dir()
-             .join("target")
++        super::tests::SIMPLE_RAYTRACER
++            .target_dir(dirs)
++            .join(&host_compiler.triple)
 +            .join("debug")
 +            .join(get_file_name("main", "bin")),
-         SIMPLE_RAYTRACER.source_dir().join(get_file_name("raytracer_cg_llvm", "bin")),
++        RelPath::BUILD.to_path(dirs).join(get_file_name("raytracer_cg_llvm", "bin")),
 +    )
 +    .unwrap();
 +}
 +
- fn prepare_sysroot() {
++fn prepare_sysroot(dirs: &Dirs) {
 +    let rustc_path = get_rustc_path();
 +    let sysroot_src_orig = rustc_path.parent().unwrap().join("../lib/rustlib/src/rust");
-     let sysroot_src = env::current_dir().unwrap().join("build_sysroot").join("sysroot_src");
++    let sysroot_src = SYSROOT_SRC;
 +
 +    assert!(sysroot_src_orig.exists());
 +
-     if sysroot_src.exists() {
-         fs::remove_dir_all(&sysroot_src).unwrap();
-     }
-     fs::create_dir_all(sysroot_src.join("library")).unwrap();
++    sysroot_src.ensure_fresh(dirs);
++    fs::create_dir_all(sysroot_src.to_path(dirs).join("library")).unwrap();
 +    eprintln!("[COPY] sysroot src");
-     copy_dir_recursively(&sysroot_src_orig.join("library"), &sysroot_src.join("library"));
++    copy_dir_recursively(
++        &sysroot_src_orig.join("library"),
++        &sysroot_src.to_path(dirs).join("library"),
++    );
 +
 +    let rustc_version = get_rustc_version();
-     fs::write(Path::new("build_sysroot").join("rustc_version"), &rustc_version).unwrap();
++    fs::write(SYSROOT_RUSTC_VERSION.to_path(dirs), &rustc_version).unwrap();
 +
 +    eprintln!("[GIT] init");
-     let mut git_init_cmd = Command::new("git");
-     git_init_cmd.arg("init").arg("-q").current_dir(&sysroot_src);
-     spawn_and_wait(git_init_cmd);
-     init_git_repo(&sysroot_src);
++    init_git_repo(&sysroot_src.to_path(dirs));
 +
-     apply_patches("sysroot", &sysroot_src);
++    apply_patches(dirs, "sysroot", &sysroot_src.to_path(dirs));
 +}
 +
 +pub(crate) struct GitRepo {
 +    url: GitRepoUrl,
 +    rev: &'static str,
 +    patch_name: &'static str,
 +}
 +
 +enum GitRepoUrl {
 +    Github { user: &'static str, repo: &'static str },
 +}
 +
 +impl GitRepo {
-     const fn github(
++    pub(crate) const fn github(
 +        user: &'static str,
 +        repo: &'static str,
 +        rev: &'static str,
 +        patch_name: &'static str,
 +    ) -> GitRepo {
 +        GitRepo { url: GitRepoUrl::Github { user, repo }, rev, patch_name }
 +    }
 +
-     pub(crate) fn source_dir(&self) -> PathBuf {
++    pub(crate) const fn source_dir(&self) -> RelPath {
 +        match self.url {
-             GitRepoUrl::Github { user: _, repo } => {
-                 std::env::current_dir().unwrap().join("download").join(repo)
-             }
++            GitRepoUrl::Github { user: _, repo } => RelPath::DOWNLOAD.join(repo),
 +        }
 +    }
 +
-     fn fetch(&self) {
++    fn fetch(&self, dirs: &Dirs) {
 +        match self.url {
 +            GitRepoUrl::Github { user, repo } => {
-                 clone_repo_shallow_github(&self.source_dir(), user, repo, self.rev);
++                clone_repo_shallow_github(
++                    dirs,
++                    &self.source_dir().to_path(dirs),
++                    user,
++                    repo,
++                    self.rev,
++                );
 +            }
 +        }
-         apply_patches(self.patch_name, &self.source_dir());
++        apply_patches(dirs, self.patch_name, &self.source_dir().to_path(dirs));
 +    }
 +}
 +
 +#[allow(dead_code)]
 +fn clone_repo(download_dir: &Path, repo: &str, rev: &str) {
 +    eprintln!("[CLONE] {}", repo);
 +    // Ignore exit code as the repo may already have been checked out
 +    Command::new("git").arg("clone").arg(repo).arg(&download_dir).spawn().unwrap().wait().unwrap();
 +
 +    let mut clean_cmd = Command::new("git");
 +    clean_cmd.arg("checkout").arg("--").arg(".").current_dir(&download_dir);
 +    spawn_and_wait(clean_cmd);
 +
 +    let mut checkout_cmd = Command::new("git");
 +    checkout_cmd.arg("checkout").arg("-q").arg(rev).current_dir(download_dir);
 +    spawn_and_wait(checkout_cmd);
 +}
 +
- fn clone_repo_shallow_github(download_dir: &Path, user: &str, repo: &str, rev: &str) {
++fn clone_repo_shallow_github(dirs: &Dirs, download_dir: &Path, user: &str, repo: &str, rev: &str) {
 +    if cfg!(windows) {
 +        // Older windows doesn't have tar or curl by default. Fall back to using git.
 +        clone_repo(download_dir, &format!("https://github.com/{}/{}.git", user, repo), rev);
 +        return;
 +    }
 +
-     let downloads_dir = std::env::current_dir().unwrap().join("download");
 +    let archive_url = format!("https://github.com/{}/{}/archive/{}.tar.gz", user, repo, rev);
-     let archive_file = downloads_dir.join(format!("{}.tar.gz", rev));
-     let archive_dir = downloads_dir.join(format!("{}-{}", repo, rev));
++    let archive_file = RelPath::DOWNLOAD.to_path(dirs).join(format!("{}.tar.gz", rev));
++    let archive_dir = RelPath::DOWNLOAD.to_path(dirs).join(format!("{}-{}", repo, rev));
 +
 +    eprintln!("[DOWNLOAD] {}/{} from {}", user, repo, archive_url);
 +
 +    // Remove previous results if they exists
 +    let _ = std::fs::remove_file(&archive_file);
 +    let _ = std::fs::remove_dir_all(&archive_dir);
 +    let _ = std::fs::remove_dir_all(&download_dir);
 +
 +    // Download zip archive
 +    let mut download_cmd = Command::new("curl");
 +    download_cmd.arg("--location").arg("--output").arg(&archive_file).arg(archive_url);
 +    spawn_and_wait(download_cmd);
 +
 +    // Unpack tar archive
 +    let mut unpack_cmd = Command::new("tar");
-     unpack_cmd.arg("xf").arg(&archive_file).current_dir(downloads_dir);
++    unpack_cmd.arg("xf").arg(&archive_file).current_dir(RelPath::DOWNLOAD.to_path(dirs));
 +    spawn_and_wait(unpack_cmd);
 +
 +    // Rename unpacked dir to the expected name
 +    std::fs::rename(archive_dir, &download_dir).unwrap();
 +
 +    init_git_repo(&download_dir);
 +
 +    // Cleanup
 +    std::fs::remove_file(archive_file).unwrap();
 +}
 +
 +fn init_git_repo(repo_dir: &Path) {
 +    let mut git_init_cmd = Command::new("git");
 +    git_init_cmd.arg("init").arg("-q").current_dir(repo_dir);
 +    spawn_and_wait(git_init_cmd);
 +
 +    let mut git_add_cmd = Command::new("git");
 +    git_add_cmd.arg("add").arg(".").current_dir(repo_dir);
 +    spawn_and_wait(git_add_cmd);
 +
 +    let mut git_commit_cmd = Command::new("git");
-     git_commit_cmd.arg("commit").arg("-m").arg("Initial commit").arg("-q").current_dir(repo_dir);
++    git_commit_cmd
++        .arg("-c")
++        .arg("user.name=Dummy")
++        .arg("-c")
++        .arg("user.email=dummy@example.com")
++        .arg("commit")
++        .arg("-m")
++        .arg("Initial commit")
++        .arg("-q")
++        .current_dir(repo_dir);
 +    spawn_and_wait(git_commit_cmd);
 +}
 +
- fn get_patches(source_dir: &Path, crate_name: &str) -> Vec<PathBuf> {
-     let mut patches: Vec<_> = fs::read_dir(source_dir.join("patches"))
++fn get_patches(dirs: &Dirs, crate_name: &str) -> Vec<PathBuf> {
++    let mut patches: Vec<_> = fs::read_dir(RelPath::PATCHES.to_path(dirs))
 +        .unwrap()
 +        .map(|entry| entry.unwrap().path())
 +        .filter(|path| path.extension() == Some(OsStr::new("patch")))
 +        .filter(|path| {
 +            path.file_name()
 +                .unwrap()
 +                .to_str()
 +                .unwrap()
 +                .split_once("-")
 +                .unwrap()
 +                .1
 +                .starts_with(crate_name)
 +        })
 +        .collect();
 +    patches.sort();
 +    patches
 +}
 +
- fn apply_patches(crate_name: &str, target_dir: &Path) {
++fn apply_patches(dirs: &Dirs, crate_name: &str, target_dir: &Path) {
 +    if crate_name == "<none>" {
 +        return;
 +    }
 +
-     for patch in get_patches(&std::env::current_dir().unwrap(), crate_name) {
++    for patch in get_patches(dirs, crate_name) {
 +        eprintln!(
 +            "[PATCH] {:?} <- {:?}",
 +            target_dir.file_name().unwrap(),
 +            patch.file_name().unwrap()
 +        );
 +        let mut apply_patch_cmd = Command::new("git");
-         apply_patch_cmd.arg("am").arg(patch).arg("-q").current_dir(target_dir);
++        apply_patch_cmd
++            .arg("-c")
++            .arg("user.name=Dummy")
++            .arg("-c")
++            .arg("user.email=dummy@example.com")
++            .arg("am")
++            .arg(patch)
++            .arg("-q")
++            .current_dir(target_dir);
 +        spawn_and_wait(apply_patch_cmd);
 +    }
 +}
index 3c08b6fa3894d61277a0c0b6499e23ddcdc544a9,0000000000000000000000000000000000000000..8e5ab688e131b35325af4fb83a3387b8c6228449
mode 100644,000000..100644
--- /dev/null
@@@ -1,74 -1,0 +1,94 @@@
 +use std::path::{Path, PathBuf};
 +use std::process::{Command, Stdio};
 +
 +pub(crate) fn get_rustc_version() -> String {
 +    let version_info =
 +        Command::new("rustc").stderr(Stdio::inherit()).args(&["-V"]).output().unwrap().stdout;
 +    String::from_utf8(version_info).unwrap()
 +}
 +
 +pub(crate) fn get_host_triple() -> String {
 +    let version_info =
 +        Command::new("rustc").stderr(Stdio::inherit()).args(&["-vV"]).output().unwrap().stdout;
 +    String::from_utf8(version_info)
 +        .unwrap()
 +        .lines()
 +        .to_owned()
 +        .find(|line| line.starts_with("host"))
 +        .unwrap()
 +        .split(":")
 +        .nth(1)
 +        .unwrap()
 +        .trim()
 +        .to_owned()
 +}
 +
++pub(crate) fn get_cargo_path() -> PathBuf {
++    let cargo_path = Command::new("rustup")
++        .stderr(Stdio::inherit())
++        .args(&["which", "cargo"])
++        .output()
++        .unwrap()
++        .stdout;
++    Path::new(String::from_utf8(cargo_path).unwrap().trim()).to_owned()
++}
++
 +pub(crate) fn get_rustc_path() -> PathBuf {
 +    let rustc_path = Command::new("rustup")
 +        .stderr(Stdio::inherit())
 +        .args(&["which", "rustc"])
 +        .output()
 +        .unwrap()
 +        .stdout;
 +    Path::new(String::from_utf8(rustc_path).unwrap().trim()).to_owned()
 +}
 +
++pub(crate) fn get_rustdoc_path() -> PathBuf {
++    let rustc_path = Command::new("rustup")
++        .stderr(Stdio::inherit())
++        .args(&["which", "rustdoc"])
++        .output()
++        .unwrap()
++        .stdout;
++    Path::new(String::from_utf8(rustc_path).unwrap().trim()).to_owned()
++}
++
 +pub(crate) fn get_default_sysroot() -> PathBuf {
 +    let default_sysroot = Command::new("rustc")
 +        .stderr(Stdio::inherit())
 +        .args(&["--print", "sysroot"])
 +        .output()
 +        .unwrap()
 +        .stdout;
 +    Path::new(String::from_utf8(default_sysroot).unwrap().trim()).to_owned()
 +}
 +
 +pub(crate) fn get_file_name(crate_name: &str, crate_type: &str) -> String {
 +    let file_name = Command::new("rustc")
 +        .stderr(Stdio::inherit())
 +        .args(&[
 +            "--crate-name",
 +            crate_name,
 +            "--crate-type",
 +            crate_type,
 +            "--print",
 +            "file-names",
 +            "-",
 +        ])
 +        .output()
 +        .unwrap()
 +        .stdout;
 +    let file_name = String::from_utf8(file_name).unwrap().trim().to_owned();
 +    assert!(!file_name.contains('\n'));
 +    assert!(file_name.contains(crate_name));
 +    file_name
 +}
 +
 +/// Similar to `get_file_name`, but converts any dashes (`-`) in the `crate_name` to
 +/// underscores (`_`). This is specially made for the rustc and cargo wrappers
 +/// which have a dash in the name, and that is not allowed in a crate name.
 +pub(crate) fn get_wrapper_file_name(crate_name: &str, crate_type: &str) -> String {
 +    let crate_name = crate_name.replace('-', "_");
 +    let wrapper_name = get_file_name(&crate_name, crate_type);
 +    wrapper_name.replace('_', "-")
 +}
index a414b60f4e06b2b79dccc1ee64bc233f32ae6ba1,0000000000000000000000000000000000000000..1c372736ed65d9121dccf04ae5bcf504d45127bc
mode 100644,000000..100644
--- /dev/null
@@@ -1,610 -1,0 +1,645 @@@
- use super::prepare;
- use super::rustc_info::get_wrapper_file_name;
- use super::utils::{cargo_command, hyperfine_command, spawn_and_wait, spawn_and_wait_with_input};
- use build_system::SysrootKind;
 +use super::build_sysroot;
 +use super::config;
- use std::path::{Path, PathBuf};
++use super::path::{Dirs, RelPath};
++use super::prepare::GitRepo;
++use super::rustc_info::{get_cargo_path, get_wrapper_file_name};
++use super::utils::{
++    hyperfine_command, spawn_and_wait, spawn_and_wait_with_input, CargoProject, Compiler,
++};
++use super::SysrootKind;
 +use std::env;
 +use std::ffi::OsStr;
 +use std::fs;
-             &runner.target_triple,
++use std::path::Path;
 +use std::process::Command;
 +
++static BUILD_EXAMPLE_OUT_DIR: RelPath = RelPath::BUILD.join("example");
++
 +struct TestCase {
 +    config: &'static str,
 +    func: &'static dyn Fn(&TestRunner),
 +}
 +
 +impl TestCase {
 +    const fn new(config: &'static str, func: &'static dyn Fn(&TestRunner)) -> Self {
 +        Self { config, func }
 +    }
 +}
 +
 +const NO_SYSROOT_SUITE: &[TestCase] = &[
 +    TestCase::new("build.mini_core", &|runner| {
 +        runner.run_rustc([
 +            "example/mini_core.rs",
 +            "--crate-name",
 +            "mini_core",
 +            "--crate-type",
 +            "lib,dylib",
 +            "--target",
-             &runner.target_triple,
++            &runner.target_compiler.triple,
 +        ]);
 +    }),
 +    TestCase::new("build.example", &|runner| {
 +        runner.run_rustc([
 +            "example/example.rs",
 +            "--crate-type",
 +            "lib",
 +            "--target",
-             &runner.host_triple,
++            &runner.target_compiler.triple,
 +        ]);
 +    }),
 +    TestCase::new("jit.mini_core_hello_world", &|runner| {
 +        let mut jit_cmd = runner.rustc_command([
 +            "-Zunstable-options",
 +            "-Cllvm-args=mode=jit",
 +            "-Cprefer-dynamic",
 +            "example/mini_core_hello_world.rs",
 +            "--cfg",
 +            "jit",
 +            "--target",
-             &runner.host_triple,
++            &runner.target_compiler.triple,
 +        ]);
 +        jit_cmd.env("CG_CLIF_JIT_ARGS", "abc bcd");
 +        spawn_and_wait(jit_cmd);
 +
 +        eprintln!("[JIT-lazy] mini_core_hello_world");
 +        let mut jit_cmd = runner.rustc_command([
 +            "-Zunstable-options",
 +            "-Cllvm-args=mode=jit-lazy",
 +            "-Cprefer-dynamic",
 +            "example/mini_core_hello_world.rs",
 +            "--cfg",
 +            "jit",
 +            "--target",
-             &runner.target_triple,
++            &runner.target_compiler.triple,
 +        ]);
 +        jit_cmd.env("CG_CLIF_JIT_ARGS", "abc bcd");
 +        spawn_and_wait(jit_cmd);
 +    }),
 +    TestCase::new("aot.mini_core_hello_world", &|runner| {
 +        runner.run_rustc([
 +            "example/mini_core_hello_world.rs",
 +            "--crate-name",
 +            "mini_core_hello_world",
 +            "--crate-type",
 +            "bin",
 +            "-g",
 +            "--target",
-             &runner.target_triple,
++            &runner.target_compiler.triple,
 +        ]);
 +        runner.run_out_command("mini_core_hello_world", ["abc", "bcd"]);
 +    }),
 +];
 +
 +const BASE_SYSROOT_SUITE: &[TestCase] = &[
 +    TestCase::new("aot.arbitrary_self_types_pointers_and_wrappers", &|runner| {
 +        runner.run_rustc([
 +            "example/arbitrary_self_types_pointers_and_wrappers.rs",
 +            "--crate-name",
 +            "arbitrary_self_types_pointers_and_wrappers",
 +            "--crate-type",
 +            "bin",
 +            "--target",
-             &runner.target_triple,
++            &runner.target_compiler.triple,
 +        ]);
 +        runner.run_out_command("arbitrary_self_types_pointers_and_wrappers", []);
 +    }),
 +    TestCase::new("aot.issue_91827_extern_types", &|runner| {
 +        runner.run_rustc([
 +            "example/issue-91827-extern-types.rs",
 +            "--crate-name",
 +            "issue_91827_extern_types",
 +            "--crate-type",
 +            "bin",
 +            "--target",
-             &runner.target_triple,
++            &runner.target_compiler.triple,
 +        ]);
 +        runner.run_out_command("issue_91827_extern_types", []);
 +    }),
 +    TestCase::new("build.alloc_system", &|runner| {
 +        runner.run_rustc([
 +            "example/alloc_system.rs",
 +            "--crate-type",
 +            "lib",
 +            "--target",
-             &runner.target_triple,
++            &runner.target_compiler.triple,
 +        ]);
 +    }),
 +    TestCase::new("aot.alloc_example", &|runner| {
 +        runner.run_rustc([
 +            "example/alloc_example.rs",
 +            "--crate-type",
 +            "bin",
 +            "--target",
-             &runner.host_triple,
++            &runner.target_compiler.triple,
 +        ]);
 +        runner.run_out_command("alloc_example", []);
 +    }),
 +    TestCase::new("jit.std_example", &|runner| {
 +        runner.run_rustc([
 +            "-Zunstable-options",
 +            "-Cllvm-args=mode=jit",
 +            "-Cprefer-dynamic",
 +            "example/std_example.rs",
 +            "--target",
-             &runner.host_triple,
++            &runner.target_compiler.triple,
 +        ]);
 +
 +        eprintln!("[JIT-lazy] std_example");
 +        runner.run_rustc([
 +            "-Zunstable-options",
 +            "-Cllvm-args=mode=jit-lazy",
 +            "-Cprefer-dynamic",
 +            "example/std_example.rs",
 +            "--target",
-             &runner.target_triple,
++            &runner.target_compiler.triple,
 +        ]);
 +    }),
 +    TestCase::new("aot.std_example", &|runner| {
 +        runner.run_rustc([
 +            "example/std_example.rs",
 +            "--crate-type",
 +            "bin",
 +            "--target",
-             &runner.target_triple,
++            &runner.target_compiler.triple,
 +        ]);
 +        runner.run_out_command("std_example", ["arg"]);
 +    }),
 +    TestCase::new("aot.dst_field_align", &|runner| {
 +        runner.run_rustc([
 +            "example/dst-field-align.rs",
 +            "--crate-name",
 +            "dst_field_align",
 +            "--crate-type",
 +            "bin",
 +            "--target",
-             &runner.target_triple,
++            &runner.target_compiler.triple,
 +        ]);
 +        runner.run_out_command("dst_field_align", []);
 +    }),
 +    TestCase::new("aot.subslice-patterns-const-eval", &|runner| {
 +        runner.run_rustc([
 +            "example/subslice-patterns-const-eval.rs",
 +            "--crate-type",
 +            "bin",
 +            "-Cpanic=abort",
 +            "--target",
-             &runner.target_triple,
++            &runner.target_compiler.triple,
 +        ]);
 +        runner.run_out_command("subslice-patterns-const-eval", []);
 +    }),
 +    TestCase::new("aot.track-caller-attribute", &|runner| {
 +        runner.run_rustc([
 +            "example/track-caller-attribute.rs",
 +            "--crate-type",
 +            "bin",
 +            "-Cpanic=abort",
 +            "--target",
-             &runner.target_triple,
++            &runner.target_compiler.triple,
 +        ]);
 +        runner.run_out_command("track-caller-attribute", []);
 +    }),
 +    TestCase::new("aot.float-minmax-pass", &|runner| {
 +        runner.run_rustc([
 +            "example/float-minmax-pass.rs",
 +            "--crate-type",
 +            "bin",
 +            "-Cpanic=abort",
 +            "--target",
-             &runner.target_triple,
++            &runner.target_compiler.triple,
 +        ]);
 +        runner.run_out_command("float-minmax-pass", []);
 +    }),
 +    TestCase::new("aot.mod_bench", &|runner| {
 +        runner.run_rustc([
 +            "example/mod_bench.rs",
 +            "--crate-type",
 +            "bin",
 +            "--target",
-         runner.in_dir(prepare::RAND.source_dir(), |runner| {
-             runner.run_cargo("clean", []);
-             if runner.host_triple == runner.target_triple {
-                 eprintln!("[TEST] rust-random/rand");
-                 runner.run_cargo("test", ["--workspace"]);
-             } else {
-                 eprintln!("[AOT] rust-random/rand");
-                 runner.run_cargo("build", ["--workspace", "--tests"]);
-             }
-         });
++            &runner.target_compiler.triple,
 +        ]);
 +        runner.run_out_command("mod_bench", []);
 +    }),
++    TestCase::new("aot.issue-72793", &|runner| {
++        runner.run_rustc([
++            "example/issue-72793.rs",
++            "--crate-type",
++            "bin",
++            "--target",
++            &runner.target_compiler.triple,
++        ]);
++        runner.run_out_command("issue-72793", []);
++    }),
 +];
 +
++pub(crate) static RAND_REPO: GitRepo =
++    GitRepo::github("rust-random", "rand", "0f933f9c7176e53b2a3c7952ded484e1783f0bf1", "rand");
++
++static RAND: CargoProject = CargoProject::new(&RAND_REPO.source_dir(), "rand");
++
++pub(crate) static REGEX_REPO: GitRepo =
++    GitRepo::github("rust-lang", "regex", "341f207c1071f7290e3f228c710817c280c8dca1", "regex");
++
++static REGEX: CargoProject = CargoProject::new(&REGEX_REPO.source_dir(), "regex");
++
++pub(crate) static PORTABLE_SIMD_REPO: GitRepo = GitRepo::github(
++    "rust-lang",
++    "portable-simd",
++    "d5cd4a8112d958bd3a252327e0d069a6363249bd",
++    "portable-simd",
++);
++
++static PORTABLE_SIMD: CargoProject =
++    CargoProject::new(&PORTABLE_SIMD_REPO.source_dir(), "portable_simd");
++
++pub(crate) static SIMPLE_RAYTRACER_REPO: GitRepo = GitRepo::github(
++    "ebobby",
++    "simple-raytracer",
++    "804a7a21b9e673a482797aa289a18ed480e4d813",
++    "<none>",
++);
++
++pub(crate) static SIMPLE_RAYTRACER: CargoProject =
++    CargoProject::new(&SIMPLE_RAYTRACER_REPO.source_dir(), "simple_raytracer");
++
++static LIBCORE_TESTS: CargoProject =
++    CargoProject::new(&RelPath::BUILD_SYSROOT.join("sysroot_src/library/core/tests"), "core_tests");
++
 +const EXTENDED_SYSROOT_SUITE: &[TestCase] = &[
 +    TestCase::new("test.rust-random/rand", &|runner| {
-         runner.in_dir(prepare::SIMPLE_RAYTRACER.source_dir(), |runner| {
-             let run_runs = env::var("RUN_RUNS").unwrap_or("10".to_string()).parse().unwrap();
-             if runner.host_triple == runner.target_triple {
-                 eprintln!("[BENCH COMPILE] ebobby/simple-raytracer");
-                 let prepare = runner.cargo_command("clean", []);
-                 let llvm_build_cmd = cargo_command("cargo", "build", None, Path::new("."));
-                 let cargo_clif = runner
-                     .root_dir
-                     .clone()
-                     .join("build")
-                     .join(get_wrapper_file_name("cargo-clif", "bin"));
-                 let clif_build_cmd = cargo_command(cargo_clif, "build", None, Path::new("."));
-                 let bench_compile =
-                     hyperfine_command(1, run_runs, Some(prepare), llvm_build_cmd, clif_build_cmd);
-                 spawn_and_wait(bench_compile);
-                 eprintln!("[BENCH RUN] ebobby/simple-raytracer");
-                 fs::copy(PathBuf::from("./target/debug/main"), PathBuf::from("raytracer_cg_clif"))
-                     .unwrap();
-                 let bench_run = hyperfine_command(
-                     0,
-                     run_runs,
-                     None,
-                     Command::new("./raytracer_cg_llvm"),
-                     Command::new("./raytracer_cg_clif"),
-                 );
-                 spawn_and_wait(bench_run);
-             } else {
-                 runner.run_cargo("clean", []);
-                 eprintln!("[BENCH COMPILE] ebobby/simple-raytracer (skipped)");
-                 eprintln!("[COMPILE] ebobby/simple-raytracer");
-                 runner.run_cargo("build", []);
-                 eprintln!("[BENCH RUN] ebobby/simple-raytracer (skipped)");
-             }
-         });
++        spawn_and_wait(RAND.clean(&runner.target_compiler.cargo, &runner.dirs));
++
++        if runner.is_native {
++            eprintln!("[TEST] rust-random/rand");
++            let mut test_cmd = RAND.test(&runner.target_compiler, &runner.dirs);
++            test_cmd.arg("--workspace");
++            spawn_and_wait(test_cmd);
++        } else {
++            eprintln!("[AOT] rust-random/rand");
++            let mut build_cmd = RAND.build(&runner.target_compiler, &runner.dirs);
++            build_cmd.arg("--workspace").arg("--tests");
++            spawn_and_wait(build_cmd);
++        }
 +    }),
 +    TestCase::new("bench.simple-raytracer", &|runner| {
-         runner.in_dir(
-             std::env::current_dir()
-                 .unwrap()
-                 .join("build_sysroot")
-                 .join("sysroot_src")
-                 .join("library")
-                 .join("core")
-                 .join("tests"),
-             |runner| {
-                 runner.run_cargo("clean", []);
-                 if runner.host_triple == runner.target_triple {
-                     runner.run_cargo("test", []);
-                 } else {
-                     eprintln!("Cross-Compiling: Not running tests");
-                     runner.run_cargo("build", ["--tests"]);
-                 }
-             },
-         );
++        let run_runs = env::var("RUN_RUNS").unwrap_or("10".to_string()).parse().unwrap();
++
++        if runner.is_native {
++            eprintln!("[BENCH COMPILE] ebobby/simple-raytracer");
++            let cargo_clif = RelPath::DIST
++                .to_path(&runner.dirs)
++                .join(get_wrapper_file_name("cargo-clif", "bin"));
++            let manifest_path = SIMPLE_RAYTRACER.manifest_path(&runner.dirs);
++            let target_dir = SIMPLE_RAYTRACER.target_dir(&runner.dirs);
++
++            let clean_cmd = format!(
++                "cargo clean --manifest-path {manifest_path} --target-dir {target_dir}",
++                manifest_path = manifest_path.display(),
++                target_dir = target_dir.display(),
++            );
++            let llvm_build_cmd = format!(
++                "cargo build --manifest-path {manifest_path} --target-dir {target_dir}",
++                manifest_path = manifest_path.display(),
++                target_dir = target_dir.display(),
++            );
++            let clif_build_cmd = format!(
++                "{cargo_clif} build --manifest-path {manifest_path} --target-dir {target_dir}",
++                cargo_clif = cargo_clif.display(),
++                manifest_path = manifest_path.display(),
++                target_dir = target_dir.display(),
++            );
++
++            let bench_compile =
++                hyperfine_command(1, run_runs, Some(&clean_cmd), &llvm_build_cmd, &clif_build_cmd);
++
++            spawn_and_wait(bench_compile);
++
++            eprintln!("[BENCH RUN] ebobby/simple-raytracer");
++            fs::copy(
++                target_dir.join("debug").join("main"),
++                RelPath::BUILD.to_path(&runner.dirs).join("raytracer_cg_clif"),
++            )
++            .unwrap();
++
++            let mut bench_run =
++                hyperfine_command(0, run_runs, None, "./raytracer_cg_llvm", "./raytracer_cg_clif");
++            bench_run.current_dir(RelPath::BUILD.to_path(&runner.dirs));
++            spawn_and_wait(bench_run);
++        } else {
++            spawn_and_wait(SIMPLE_RAYTRACER.clean(&runner.target_compiler.cargo, &runner.dirs));
++            eprintln!("[BENCH COMPILE] ebobby/simple-raytracer (skipped)");
++            eprintln!("[COMPILE] ebobby/simple-raytracer");
++            spawn_and_wait(SIMPLE_RAYTRACER.build(&runner.target_compiler, &runner.dirs));
++            eprintln!("[BENCH RUN] ebobby/simple-raytracer (skipped)");
++        }
 +    }),
 +    TestCase::new("test.libcore", &|runner| {
-         runner.in_dir(prepare::REGEX.source_dir(), |runner| {
-             runner.run_cargo("clean", []);
-             // newer aho_corasick versions throw a deprecation warning
-             let lint_rust_flags = format!("{} --cap-lints warn", runner.rust_flags);
-             let mut build_cmd = runner.cargo_command("build", ["--example", "shootout-regex-dna"]);
-             build_cmd.env("RUSTFLAGS", lint_rust_flags.clone());
-             spawn_and_wait(build_cmd);
-             if runner.host_triple == runner.target_triple {
-                 let mut run_cmd = runner.cargo_command("run", ["--example", "shootout-regex-dna"]);
-                 run_cmd.env("RUSTFLAGS", lint_rust_flags);
-                 let input =
-                     fs::read_to_string(PathBuf::from("examples/regexdna-input.txt")).unwrap();
-                 let expected_path = PathBuf::from("examples/regexdna-output.txt");
-                 let expected = fs::read_to_string(&expected_path).unwrap();
-                 let output = spawn_and_wait_with_input(run_cmd, input);
-                 // Make sure `[codegen mono items] start` doesn't poison the diff
-                 let output = output
-                     .lines()
-                     .filter(|line| !line.contains("codegen mono items"))
-                     .chain(Some("")) // This just adds the trailing newline
-                     .collect::<Vec<&str>>()
-                     .join("\r\n");
-                 let output_matches = expected.lines().eq(output.lines());
-                 if !output_matches {
-                     let res_path = PathBuf::from("res.txt");
-                     fs::write(&res_path, &output).unwrap();
-                     if cfg!(windows) {
-                         println!("Output files don't match!");
-                         println!("Expected Output:\n{}", expected);
-                         println!("Actual Output:\n{}", output);
-                     } else {
-                         let mut diff = Command::new("diff");
-                         diff.arg("-u");
-                         diff.arg(res_path);
-                         diff.arg(expected_path);
-                         spawn_and_wait(diff);
-                     }
-                     std::process::exit(1);
++        spawn_and_wait(LIBCORE_TESTS.clean(&runner.host_compiler.cargo, &runner.dirs));
++
++        if runner.is_native {
++            spawn_and_wait(LIBCORE_TESTS.test(&runner.target_compiler, &runner.dirs));
++        } else {
++            eprintln!("Cross-Compiling: Not running tests");
++            let mut build_cmd = LIBCORE_TESTS.build(&runner.target_compiler, &runner.dirs);
++            build_cmd.arg("--tests");
++            spawn_and_wait(build_cmd);
++        }
 +    }),
 +    TestCase::new("test.regex-shootout-regex-dna", &|runner| {
-         });
++        spawn_and_wait(REGEX.clean(&runner.target_compiler.cargo, &runner.dirs));
++
++        // newer aho_corasick versions throw a deprecation warning
++        let lint_rust_flags = format!("{} --cap-lints warn", runner.target_compiler.rustflags);
++
++        let mut build_cmd = REGEX.build(&runner.target_compiler, &runner.dirs);
++        build_cmd.arg("--example").arg("shootout-regex-dna");
++        build_cmd.env("RUSTFLAGS", lint_rust_flags.clone());
++        spawn_and_wait(build_cmd);
++
++        if runner.is_native {
++            let mut run_cmd = REGEX.run(&runner.target_compiler, &runner.dirs);
++            run_cmd.arg("--example").arg("shootout-regex-dna");
++            run_cmd.env("RUSTFLAGS", lint_rust_flags);
++
++            let input = fs::read_to_string(
++                REGEX.source_dir(&runner.dirs).join("examples").join("regexdna-input.txt"),
++            )
++            .unwrap();
++            let expected_path =
++                REGEX.source_dir(&runner.dirs).join("examples").join("regexdna-output.txt");
++            let expected = fs::read_to_string(&expected_path).unwrap();
++
++            let output = spawn_and_wait_with_input(run_cmd, input);
++            // Make sure `[codegen mono items] start` doesn't poison the diff
++            let output = output
++                .lines()
++                .filter(|line| !line.contains("codegen mono items"))
++                .chain(Some("")) // This just adds the trailing newline
++                .collect::<Vec<&str>>()
++                .join("\r\n");
++
++            let output_matches = expected.lines().eq(output.lines());
++            if !output_matches {
++                let res_path = REGEX.source_dir(&runner.dirs).join("res.txt");
++                fs::write(&res_path, &output).unwrap();
++
++                if cfg!(windows) {
++                    println!("Output files don't match!");
++                    println!("Expected Output:\n{}", expected);
++                    println!("Actual Output:\n{}", output);
++                } else {
++                    let mut diff = Command::new("diff");
++                    diff.arg("-u");
++                    diff.arg(res_path);
++                    diff.arg(expected_path);
++                    spawn_and_wait(diff);
 +                }
++
++                std::process::exit(1);
 +            }
-         runner.in_dir(prepare::REGEX.source_dir(), |runner| {
-             runner.run_cargo("clean", []);
-             // newer aho_corasick versions throw a deprecation warning
-             let lint_rust_flags = format!("{} --cap-lints warn", runner.rust_flags);
-             if runner.host_triple == runner.target_triple {
-                 let mut run_cmd = runner.cargo_command(
-                     "test",
-                     [
-                         "--tests",
-                         "--",
-                         "--exclude-should-panic",
-                         "--test-threads",
-                         "1",
-                         "-Zunstable-options",
-                         "-q",
-                     ],
-                 );
-                 run_cmd.env("RUSTFLAGS", lint_rust_flags);
-                 spawn_and_wait(run_cmd);
-             } else {
-                 eprintln!("Cross-Compiling: Not running tests");
-                 let mut build_cmd =
-                     runner.cargo_command("build", ["--tests", "--target", &runner.target_triple]);
-                 build_cmd.env("RUSTFLAGS", lint_rust_flags.clone());
-                 spawn_and_wait(build_cmd);
-             }
-         });
++        }
 +    }),
 +    TestCase::new("test.regex", &|runner| {
-         runner.in_dir(prepare::PORTABLE_SIMD.source_dir(), |runner| {
-             runner.run_cargo("clean", []);
-             runner.run_cargo("build", ["--all-targets", "--target", &runner.target_triple]);
++        spawn_and_wait(REGEX.clean(&runner.host_compiler.cargo, &runner.dirs));
++
++        // newer aho_corasick versions throw a deprecation warning
++        let lint_rust_flags = format!("{} --cap-lints warn", runner.target_compiler.rustflags);
++
++        if runner.is_native {
++            let mut run_cmd = REGEX.test(&runner.target_compiler, &runner.dirs);
++            run_cmd.args([
++                "--tests",
++                "--",
++                "--exclude-should-panic",
++                "--test-threads",
++                "1",
++                "-Zunstable-options",
++                "-q",
++            ]);
++            run_cmd.env("RUSTFLAGS", lint_rust_flags);
++            spawn_and_wait(run_cmd);
++        } else {
++            eprintln!("Cross-Compiling: Not running tests");
++            let mut build_cmd = REGEX.build(&runner.target_compiler, &runner.dirs);
++            build_cmd.arg("--tests");
++            build_cmd.env("RUSTFLAGS", lint_rust_flags.clone());
++            spawn_and_wait(build_cmd);
++        }
 +    }),
 +    TestCase::new("test.portable-simd", &|runner| {
-             if runner.host_triple == runner.target_triple {
-                 runner.run_cargo("test", ["-q"]);
-             }
-         });
++        spawn_and_wait(PORTABLE_SIMD.clean(&runner.host_compiler.cargo, &runner.dirs));
 +
-     target_dir: &Path,
++        let mut build_cmd = PORTABLE_SIMD.build(&runner.target_compiler, &runner.dirs);
++        build_cmd.arg("--all-targets");
++        spawn_and_wait(build_cmd);
++
++        if runner.is_native {
++            let mut test_cmd = PORTABLE_SIMD.test(&runner.target_compiler, &runner.dirs);
++            test_cmd.arg("-q");
++            spawn_and_wait(test_cmd);
++        }
 +    }),
 +];
 +
 +pub(crate) fn run_tests(
++    dirs: &Dirs,
 +    channel: &str,
 +    sysroot_kind: SysrootKind,
-     let runner = TestRunner::new(host_triple.to_string(), target_triple.to_string());
 +    cg_clif_dylib: &Path,
 +    host_triple: &str,
 +    target_triple: &str,
 +) {
-             &target_dir,
++    let runner = TestRunner::new(dirs.clone(), host_triple.to_string(), target_triple.to_string());
 +
 +    if config::get_bool("testsuite.no_sysroot") {
 +        build_sysroot::build_sysroot(
++            dirs,
 +            channel,
 +            SysrootKind::None,
-         let _ = fs::remove_dir_all(Path::new("target").join("out"));
 +            cg_clif_dylib,
 +            &host_triple,
 +            &target_triple,
 +        );
 +
-             &target_dir,
++        BUILD_EXAMPLE_OUT_DIR.ensure_fresh(dirs);
 +        runner.run_testsuite(NO_SYSROOT_SUITE);
 +    } else {
 +        eprintln!("[SKIP] no_sysroot tests");
 +    }
 +
 +    let run_base_sysroot = config::get_bool("testsuite.base_sysroot");
 +    let run_extended_sysroot = config::get_bool("testsuite.extended_sysroot");
 +
 +    if run_base_sysroot || run_extended_sysroot {
 +        build_sysroot::build_sysroot(
++            dirs,
 +            channel,
 +            sysroot_kind,
-     root_dir: PathBuf,
-     out_dir: PathBuf,
 +            cg_clif_dylib,
 +            &host_triple,
 +            &target_triple,
 +        );
 +    }
 +
 +    if run_base_sysroot {
 +        runner.run_testsuite(BASE_SYSROOT_SUITE);
 +    } else {
 +        eprintln!("[SKIP] base_sysroot tests");
 +    }
 +
 +    if run_extended_sysroot {
 +        runner.run_testsuite(EXTENDED_SYSROOT_SUITE);
 +    } else {
 +        eprintln!("[SKIP] extended_sysroot tests");
 +    }
 +}
 +
 +struct TestRunner {
-     rust_flags: String,
-     run_wrapper: Vec<String>,
-     host_triple: String,
-     target_triple: String,
++    is_native: bool,
 +    jit_supported: bool,
-     pub fn new(host_triple: String, target_triple: String) -> Self {
-         let root_dir = env::current_dir().unwrap();
-         let mut out_dir = root_dir.clone();
-         out_dir.push("target");
-         out_dir.push("out");
++    dirs: Dirs,
++    host_compiler: Compiler,
++    target_compiler: Compiler,
 +}
 +
 +impl TestRunner {
-         let mut rust_flags = env::var("RUSTFLAGS").ok().unwrap_or("".to_string());
-         let mut run_wrapper = Vec::new();
++    pub fn new(dirs: Dirs, host_triple: String, target_triple: String) -> Self {
 +        let is_native = host_triple == target_triple;
 +        let jit_supported =
 +            target_triple.contains("x86_64") && is_native && !host_triple.contains("windows");
 +
-                     rust_flags = format!("-Clinker=aarch64-linux-gnu-gcc{}", rust_flags);
-                     run_wrapper = vec!["qemu-aarch64", "-L", "/usr/aarch64-linux-gnu"];
++        let rustc_clif =
++            RelPath::DIST.to_path(&dirs).join(get_wrapper_file_name("rustc-clif", "bin"));
++        let rustdoc_clif =
++            RelPath::DIST.to_path(&dirs).join(get_wrapper_file_name("rustdoc-clif", "bin"));
++
++        let mut rustflags = env::var("RUSTFLAGS").ok().unwrap_or("".to_string());
++        let mut runner = vec![];
 +
 +        if !is_native {
 +            match target_triple.as_str() {
 +                "aarch64-unknown-linux-gnu" => {
 +                    // We are cross-compiling for aarch64. Use the correct linker and run tests in qemu.
-                     run_wrapper = vec!["wine"];
++                    rustflags = format!("-Clinker=aarch64-linux-gnu-gcc{}", rustflags);
++                    runner = vec![
++                        "qemu-aarch64".to_owned(),
++                        "-L".to_owned(),
++                        "/usr/aarch64-linux-gnu".to_owned(),
++                    ];
++                }
++                "s390x-unknown-linux-gnu" => {
++                    // We are cross-compiling for s390x. Use the correct linker and run tests in qemu.
++                    rustflags = format!("-Clinker=s390x-linux-gnu-gcc{}", rustflags);
++                    runner = vec![
++                        "qemu-s390x".to_owned(),
++                        "-L".to_owned(),
++                        "/usr/s390x-linux-gnu".to_owned(),
++                    ];
 +                }
 +                "x86_64-pc-windows-gnu" => {
 +                    // We are cross-compiling for Windows. Run tests in wine.
-         if host_triple.contains("darwin") {
-             rust_flags = format!("{} -Clink-arg=-undefined -Clink-arg=dynamic_lookup", rust_flags);
++                    runner = vec!["wine".to_owned()];
 +                }
 +                _ => {
 +                    println!("Unknown non-native platform");
 +                }
 +            }
 +        }
 +
 +        // FIXME fix `#[linkage = "extern_weak"]` without this
-         Self {
-             root_dir,
-             out_dir,
-             jit_supported,
-             rust_flags,
-             run_wrapper: run_wrapper.iter().map(|s| s.to_string()).collect(),
-             host_triple,
-             target_triple,
-         }
++        if target_triple.contains("darwin") {
++            rustflags = format!("{} -Clink-arg=-undefined -Clink-arg=dynamic_lookup", rustflags);
 +        }
 +
-     fn in_dir(&self, new: impl AsRef<Path>, callback: impl FnOnce(&TestRunner)) {
-         let current = env::current_dir().unwrap();
-         env::set_current_dir(new).unwrap();
-         callback(self);
-         env::set_current_dir(current).unwrap();
-     }
++        let host_compiler = Compiler {
++            cargo: get_cargo_path(),
++            rustc: rustc_clif.clone(),
++            rustdoc: rustdoc_clif.clone(),
++            rustflags: String::new(),
++            rustdocflags: String::new(),
++            triple: host_triple,
++            runner: vec![],
++        };
++
++        let target_compiler = Compiler {
++            cargo: get_cargo_path(),
++            rustc: rustc_clif,
++            rustdoc: rustdoc_clif,
++            rustflags: rustflags.clone(),
++            rustdocflags: rustflags,
++            triple: target_triple,
++            runner,
++        };
++
++        Self { is_native, jit_supported, dirs, host_compiler, target_compiler }
 +    }
 +
 +    pub fn run_testsuite(&self, tests: &[TestCase]) {
 +        for &TestCase { config, func } in tests {
 +            let (tag, testname) = config.split_once('.').unwrap();
 +            let tag = tag.to_uppercase();
 +            let is_jit_test = tag == "JIT";
 +
 +            if !config::get_bool(config) || (is_jit_test && !self.jit_supported) {
 +                eprintln!("[{tag}] {testname} (skipped)");
 +                continue;
 +            } else {
 +                eprintln!("[{tag}] {testname}");
 +            }
 +
 +            func(self);
 +        }
 +    }
 +
-         let mut rustc_clif = self.root_dir.clone();
-         rustc_clif.push("build");
-         rustc_clif.push(get_wrapper_file_name("rustc-clif", "bin"));
-         let mut cmd = Command::new(rustc_clif);
-         cmd.args(self.rust_flags.split_whitespace());
++    #[must_use]
 +    fn rustc_command<I, S>(&self, args: I) -> Command
 +    where
 +        I: IntoIterator<Item = S>,
 +        S: AsRef<OsStr>,
 +    {
-         cmd.arg(format!("crate={}", self.out_dir.display()));
++        let mut cmd = Command::new(&self.target_compiler.rustc);
++        cmd.args(self.target_compiler.rustflags.split_whitespace());
 +        cmd.arg("-L");
-         cmd.arg(format!("{}", self.out_dir.display()));
++        cmd.arg(format!("crate={}", BUILD_EXAMPLE_OUT_DIR.to_path(&self.dirs).display()));
 +        cmd.arg("--out-dir");
-         if !self.run_wrapper.is_empty() {
-             full_cmd.extend(self.run_wrapper.iter().cloned());
++        cmd.arg(format!("{}", BUILD_EXAMPLE_OUT_DIR.to_path(&self.dirs).display()));
 +        cmd.arg("-Cdebuginfo=2");
 +        cmd.args(args);
 +        cmd
 +    }
 +
 +    fn run_rustc<I, S>(&self, args: I)
 +    where
 +        I: IntoIterator<Item = S>,
 +        S: AsRef<OsStr>,
 +    {
 +        spawn_and_wait(self.rustc_command(args));
 +    }
 +
 +    fn run_out_command<'a, I>(&self, name: &str, args: I)
 +    where
 +        I: IntoIterator<Item = &'a str>,
 +    {
 +        let mut full_cmd = vec![];
 +
 +        // Prepend the RUN_WRAPPER's
-         full_cmd.push({
-             let mut out_path = self.out_dir.clone();
-             out_path.push(name);
-             out_path.to_str().unwrap().to_string()
-         });
++        if !self.target_compiler.runner.is_empty() {
++            full_cmd.extend(self.target_compiler.runner.iter().cloned());
 +        }
 +
-     fn cargo_command<'a, I>(&self, subcommand: &str, args: I) -> Command
-     where
-         I: IntoIterator<Item = &'a str>,
-     {
-         let mut cargo_clif = self.root_dir.clone();
-         cargo_clif.push("build");
-         cargo_clif.push(get_wrapper_file_name("cargo-clif", "bin"));
-         let mut cmd = cargo_command(
-             cargo_clif,
-             subcommand,
-             if subcommand == "clean" { None } else { Some(&self.target_triple) },
-             Path::new("."),
-         );
-         cmd.args(args);
-         cmd.env("RUSTFLAGS", &self.rust_flags);
-         cmd
-     }
-     fn run_cargo<'a, I>(&self, subcommand: &str, args: I)
-     where
-         I: IntoIterator<Item = &'a str>,
-     {
-         spawn_and_wait(self.cargo_command(subcommand, args));
-     }
++        full_cmd.push(
++            BUILD_EXAMPLE_OUT_DIR.to_path(&self.dirs).join(name).to_str().unwrap().to_string(),
++        );
 +
 +        for arg in args.into_iter() {
 +            full_cmd.push(arg.to_string());
 +        }
 +
 +        let mut cmd_iter = full_cmd.into_iter();
 +        let first = cmd_iter.next().unwrap();
 +
 +        let mut cmd = Command::new(first);
 +        cmd.args(cmd_iter);
 +
 +        spawn_and_wait(cmd);
 +    }
 +}
index c627af4e62fe14073bc539d195cb5623afd7dabe,0000000000000000000000000000000000000000..2be70e8e421b2961e97b53c99f38c83256940365
mode 100644,000000..100644
--- /dev/null
@@@ -1,108 -1,0 +1,211 @@@
- use std::path::Path;
 +use std::env;
 +use std::fs;
 +use std::io::Write;
- pub(crate) fn cargo_command(
-     cargo: impl AsRef<Path>,
-     subcommand: &str,
-     triple: Option<&str>,
-     source_dir: &Path,
- ) -> Command {
-     let mut cmd = Command::new(cargo.as_ref());
-     cmd.arg(subcommand)
-         .arg("--manifest-path")
-         .arg(source_dir.join("Cargo.toml"))
-         .arg("--target-dir")
-         .arg(source_dir.join("target"));
++use std::path::{Path, PathBuf};
 +use std::process::{self, Command, Stdio};
 +
-     if let Some(triple) = triple {
-         cmd.arg("--target").arg(triple);
++use super::path::{Dirs, RelPath};
++use super::rustc_info::{get_cargo_path, get_host_triple, get_rustc_path, get_rustdoc_path};
++
++pub(crate) struct Compiler {
++    pub(crate) cargo: PathBuf,
++    pub(crate) rustc: PathBuf,
++    pub(crate) rustdoc: PathBuf,
++    pub(crate) rustflags: String,
++    pub(crate) rustdocflags: String,
++    pub(crate) triple: String,
++    pub(crate) runner: Vec<String>,
++}
++
++impl Compiler {
++    pub(crate) fn host() -> Compiler {
++        Compiler {
++            cargo: get_cargo_path(),
++            rustc: get_rustc_path(),
++            rustdoc: get_rustdoc_path(),
++            rustflags: String::new(),
++            rustdocflags: String::new(),
++            triple: get_host_triple(),
++            runner: vec![],
++        }
++    }
++
++    pub(crate) fn with_triple(triple: String) -> Compiler {
++        Compiler {
++            cargo: get_cargo_path(),
++            rustc: get_rustc_path(),
++            rustdoc: get_rustdoc_path(),
++            rustflags: String::new(),
++            rustdocflags: String::new(),
++            triple,
++            runner: vec![],
++        }
++    }
++}
++
++pub(crate) struct CargoProject {
++    source: &'static RelPath,
++    target: &'static str,
++}
++
++impl CargoProject {
++    pub(crate) const fn new(path: &'static RelPath, target: &'static str) -> CargoProject {
++        CargoProject { source: path, target }
++    }
++
++    pub(crate) fn source_dir(&self, dirs: &Dirs) -> PathBuf {
++        self.source.to_path(dirs)
++    }
++
++    pub(crate) fn manifest_path(&self, dirs: &Dirs) -> PathBuf {
++        self.source_dir(dirs).join("Cargo.toml")
++    }
++
++    pub(crate) fn target_dir(&self, dirs: &Dirs) -> PathBuf {
++        RelPath::BUILD.join(self.target).to_path(dirs)
++    }
 +
-     cmd
++    fn base_cmd(&self, command: &str, cargo: &Path, dirs: &Dirs) -> Command {
++        let mut cmd = Command::new(cargo);
++
++        cmd.arg(command)
++            .arg("--manifest-path")
++            .arg(self.manifest_path(dirs))
++            .arg("--target-dir")
++            .arg(self.target_dir(dirs));
++
++        cmd
++    }
++
++    fn build_cmd(&self, command: &str, compiler: &Compiler, dirs: &Dirs) -> Command {
++        let mut cmd = self.base_cmd(command, &compiler.cargo, dirs);
++
++        cmd.arg("--target").arg(&compiler.triple);
++
++        cmd.env("RUSTC", &compiler.rustc);
++        cmd.env("RUSTDOC", &compiler.rustdoc);
++        cmd.env("RUSTFLAGS", &compiler.rustflags);
++        cmd.env("RUSTDOCFLAGS", &compiler.rustdocflags);
++        if !compiler.runner.is_empty() {
++            cmd.env(
++                format!("CARGO_TARGET_{}_RUNNER", compiler.triple.to_uppercase().replace('-', "_")),
++                compiler.runner.join(" "),
++            );
++        }
++
++        cmd
++    }
++
++    #[must_use]
++    pub(crate) fn fetch(&self, cargo: impl AsRef<Path>, dirs: &Dirs) -> Command {
++        let mut cmd = Command::new(cargo.as_ref());
++
++        cmd.arg("fetch").arg("--manifest-path").arg(self.manifest_path(dirs));
++
++        cmd
 +    }
 +
-     prepare: Option<Command>,
-     a: Command,
-     b: Command,
++    #[must_use]
++    pub(crate) fn clean(&self, cargo: &Path, dirs: &Dirs) -> Command {
++        self.base_cmd("clean", cargo, dirs)
++    }
++
++    #[must_use]
++    pub(crate) fn build(&self, compiler: &Compiler, dirs: &Dirs) -> Command {
++        self.build_cmd("build", compiler, dirs)
++    }
++
++    #[must_use]
++    pub(crate) fn test(&self, compiler: &Compiler, dirs: &Dirs) -> Command {
++        self.build_cmd("test", compiler, dirs)
++    }
++
++    #[must_use]
++    pub(crate) fn run(&self, compiler: &Compiler, dirs: &Dirs) -> Command {
++        self.build_cmd("run", compiler, dirs)
++    }
 +}
 +
++#[must_use]
 +pub(crate) fn hyperfine_command(
 +    warmup: u64,
 +    runs: u64,
-         bench.arg("--prepare").arg(format!("{:?}", prepare));
++    prepare: Option<&str>,
++    a: &str,
++    b: &str,
 +) -> Command {
 +    let mut bench = Command::new("hyperfine");
 +
 +    if warmup != 0 {
 +        bench.arg("--warmup").arg(warmup.to_string());
 +    }
 +
 +    if runs != 0 {
 +        bench.arg("--runs").arg(runs.to_string());
 +    }
 +
 +    if let Some(prepare) = prepare {
-     bench.arg(format!("{:?}", a)).arg(format!("{:?}", b));
++        bench.arg("--prepare").arg(prepare);
 +    }
 +
++    bench.arg(a).arg(b);
 +
 +    bench
 +}
 +
 +#[track_caller]
 +pub(crate) fn try_hard_link(src: impl AsRef<Path>, dst: impl AsRef<Path>) {
 +    let src = src.as_ref();
 +    let dst = dst.as_ref();
 +    if let Err(_) = fs::hard_link(src, dst) {
 +        fs::copy(src, dst).unwrap(); // Fallback to copying if hardlinking failed
 +    }
 +}
 +
 +#[track_caller]
 +pub(crate) fn spawn_and_wait(mut cmd: Command) {
 +    if !cmd.spawn().unwrap().wait().unwrap().success() {
 +        process::exit(1);
 +    }
 +}
 +
 +#[track_caller]
 +pub(crate) fn spawn_and_wait_with_input(mut cmd: Command, input: String) -> String {
 +    let mut child = cmd
 +        .stdin(Stdio::piped())
 +        .stdout(Stdio::piped())
 +        .spawn()
 +        .expect("Failed to spawn child process");
 +
 +    let mut stdin = child.stdin.take().expect("Failed to open stdin");
 +    std::thread::spawn(move || {
 +        stdin.write_all(input.as_bytes()).expect("Failed to write to stdin");
 +    });
 +
 +    let output = child.wait_with_output().expect("Failed to read stdout");
 +    if !output.status.success() {
 +        process::exit(1);
 +    }
 +
 +    String::from_utf8(output.stdout).unwrap()
 +}
 +
 +pub(crate) fn copy_dir_recursively(from: &Path, to: &Path) {
 +    for entry in fs::read_dir(from).unwrap() {
 +        let entry = entry.unwrap();
 +        let filename = entry.file_name();
 +        if filename == "." || filename == ".." {
 +            continue;
 +        }
 +        if entry.metadata().unwrap().is_dir() {
 +            fs::create_dir(to.join(&filename)).unwrap();
 +            copy_dir_recursively(&from.join(&filename), &to.join(&filename));
 +        } else {
 +            fs::copy(from.join(&filename), to.join(&filename)).unwrap();
 +        }
 +    }
 +}
 +
 +pub(crate) fn is_ci() -> bool {
 +    env::var("CI").as_deref() == Ok("true")
 +}
index fedab2433aa05f66f33949f35ea62cd88ec3f4ed,0000000000000000000000000000000000000000..1760e5836ecce00a48f7a0236748dfb94329fce0
mode 100755,000000..100755
--- /dev/null
@@@ -1,10 -1,0 +1,10 @@@
- rm -rf target/ build/ perf.data{,.old} y.bin
 +#!/usr/bin/env bash
 +set -e
 +
 +rm -rf build_sysroot/{sysroot_src/,target/,compiler-builtins/,rustc_version}
++rm -rf target/ build/ dist/ perf.data{,.old} y.bin
 +rm -rf download/
 +
 +# Kept for now in case someone updates their checkout of cg_clif before running clean_all.sh
 +# FIXME remove at some point in the future
 +rm -rf rand/ regex/ simple-raytracer/ portable-simd/ abi-checker/ abi-cafe/
index 0d539191b12f95936b65e6d134d460b31d96c3e6,0000000000000000000000000000000000000000..258b67e931476850a25cab17bbc2c3300a243821
mode 100644,000000..100644
--- /dev/null
@@@ -1,52 -1,0 +1,53 @@@
 +# This file allows configuring the build system.
 +
 +# Which triple to produce a compiler toolchain for.
 +#
 +# Defaults to the default triple of rustc on the host system.
 +#host = x86_64-unknown-linux-gnu
 +
 +# Which triple to build libraries (core/alloc/std/test/proc_macro) for.
 +#
 +# Defaults to `host`.
 +#target = x86_64-unknown-linux-gnu
 +
 +# Disables cleaning of the sysroot dir. This will cause old compiled artifacts to be re-used when
 +# the sysroot source hasn't changed. This is useful when the codegen backend hasn't been modified.
 +# This option can be changed while the build system is already running for as long as sysroot
 +# building hasn't started yet.
 +#keep_sysroot
 +
 +
 +# Testsuite
 +#
 +# Each test suite item has a corresponding key here. The default is to run all tests.
 +# Comment any of these lines to skip individual tests.
 +
 +testsuite.no_sysroot
 +build.mini_core
 +build.example
 +jit.mini_core_hello_world
 +aot.mini_core_hello_world
 +
 +testsuite.base_sysroot
 +aot.arbitrary_self_types_pointers_and_wrappers
 +aot.issue_91827_extern_types
 +build.alloc_system
 +aot.alloc_example
 +jit.std_example
 +aot.std_example
 +aot.dst_field_align
 +aot.subslice-patterns-const-eval
 +aot.track-caller-attribute
 +aot.float-minmax-pass
 +aot.mod_bench
++aot.issue-72793
 +
 +testsuite.extended_sysroot
 +test.rust-random/rand
 +bench.simple-raytracer
 +test.libcore
 +test.regex-shootout-regex-dna
 +test.regex
 +test.portable-simd
 +
 +testsuite.abi-cafe
index 33f146e7ba27aec13e57e55d40ebb79e3f28e359,0000000000000000000000000000000000000000..4c2b0fa170498812b976b67041cc62ab73ff3f81
mode 100644,000000..100644
--- /dev/null
@@@ -1,67 -1,0 +1,67 @@@
- $ $cg_clif_dir/build/cargo-clif build
 +# Usage
 +
 +rustc_codegen_cranelift can be used as a near-drop-in replacement for `cargo build` or `cargo run` for existing projects.
 +
 +Assuming `$cg_clif_dir` is the directory you cloned this repo into and you followed the instructions (`y.rs prepare` and `y.rs build` or `test.sh`).
 +
 +## Cargo
 +
 +In the directory with your project (where you can do the usual `cargo build`), run:
 +
 +```bash
- $ $cg_clif_dir/build/rustc-clif my_crate.rs
++$ $cg_clif_dir/dist/cargo-clif build
 +```
 +
 +This will build your project with rustc_codegen_cranelift instead of the usual LLVM backend.
 +
 +## Rustc
 +
 +> You should prefer using the Cargo method.
 +
 +```bash
- $ $cg_clif_dir/build/cargo-clif jit
++$ $cg_clif_dir/dist/rustc-clif my_crate.rs
 +```
 +
 +## Jit mode
 +
 +> âš âš âš  The JIT mode is highly experimental. It may be slower than AOT compilation due to lack of incremental compilation. It may also be hard to setup if you have cargo dependencies. âš âš âš 
 +
 +In jit mode cg_clif will immediately execute your code without creating an executable file.
 +
 +> This requires all dependencies to be available as dynamic library.
 +> The jit mode will probably need cargo integration to make this possible.
 +
 +```bash
- $ $cg_clif_dir/build/rustc-clif -Zunstable-features -Cllvm-args=mode=jit -Cprefer-dynamic my_crate.rs
++$ $cg_clif_dir/dist/cargo-clif jit
 +```
 +
 +or
 +
 +```bash
- $ $cg_clif_dir/build/cargo-clif lazy-jit
++$ $cg_clif_dir/dist/rustc-clif -Zunstable-features -Cllvm-args=mode=jit -Cprefer-dynamic my_crate.rs
 +```
 +
 +There is also an experimental lazy jit mode. In this mode functions are only compiled once they are
 +first called.
 +
 +```bash
-     echo "$@" | $cg_clif_dir/build/rustc-clif - -Zunstable-features -Cllvm-args=mode=jit -Cprefer-dynamic
++$ $cg_clif_dir/dist/cargo-clif lazy-jit
 +```
 +
 +## Shell
 +
 +These are a few functions that allow you to easily run rust code from the shell using cg_clif as jit.
 +
 +```bash
 +function jit_naked() {
++    echo "$@" | $cg_clif_dir/dist/rustc-clif - -Zunstable-features -Cllvm-args=mode=jit -Cprefer-dynamic
 +}
 +
 +function jit() {
 +    jit_naked "fn main() { $@ }"
 +}
 +
 +function jit_calc() {
 +    jit 'println!("0x{:x}", ' $@ ');';
 +}
 +```
index 0000000000000000000000000000000000000000,0000000000000000000000000000000000000000..b1bb9b8e1e73034927ae8f094d1fd997baa80b3a
new file mode 100644 (file)
--- /dev/null
--- /dev/null
@@@ -1,0 -1,0 +1,24 @@@
++// Adapted from rustc ui test suite (ui/type-alias-impl-trait/issue-72793.rs)
++
++#![feature(type_alias_impl_trait)]
++
++trait T { type Item; }
++
++type Alias<'a> = impl T<Item = &'a ()>;
++
++struct S;
++impl<'a> T for &'a S {
++    type Item = &'a ();
++}
++
++fn filter_positive<'a>() -> Alias<'a> {
++    &S
++}
++
++fn with_positive(fun: impl Fn(Alias<'_>)) {
++    fun(filter_positive());
++}
++
++fn main() {
++    with_positive(|_| ());
++}
index 7f85b52f083a7c2a8de18913a937a65e711474b7,0000000000000000000000000000000000000000..1f9db1eb2a97affce42fe9395435a0b5cd0b8341
mode 100644,000000..100644
--- /dev/null
@@@ -1,671 -1,0 +1,674 @@@
- pub trait FnOnce<Args> {
 +#![feature(
 +    no_core,
 +    lang_items,
 +    intrinsics,
 +    unboxed_closures,
 +    extern_types,
 +    decl_macro,
 +    rustc_attrs,
 +    transparent_unions,
 +    auto_traits,
 +    thread_local
 +)]
 +#![no_core]
 +#![allow(dead_code)]
 +
 +#[lang = "sized"]
 +pub trait Sized {}
 +
 +#[lang = "destruct"]
 +pub trait Destruct {}
 +
++#[lang = "tuple_trait"]
++pub trait Tuple {}
++
 +#[lang = "unsize"]
 +pub trait Unsize<T: ?Sized> {}
 +
 +#[lang = "coerce_unsized"]
 +pub trait CoerceUnsized<T> {}
 +
 +impl<'a, 'b: 'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a U> for &'b T {}
 +impl<'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a mut U> for &'a mut T {}
 +impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*const U> for *const T {}
 +impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*mut U> for *mut T {}
 +
 +#[lang = "dispatch_from_dyn"]
 +pub trait DispatchFromDyn<T> {}
 +
 +// &T -> &U
 +impl<'a, T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<&'a U> for &'a T {}
 +// &mut T -> &mut U
 +impl<'a, T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<&'a mut U> for &'a mut T {}
 +// *const T -> *const U
 +impl<T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<*const U> for *const T {}
 +// *mut T -> *mut U
 +impl<T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<*mut U> for *mut T {}
 +impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Box<U>> for Box<T> {}
 +
 +#[lang = "receiver"]
 +pub trait Receiver {}
 +
 +impl<T: ?Sized> Receiver for &T {}
 +impl<T: ?Sized> Receiver for &mut T {}
 +impl<T: ?Sized> Receiver for Box<T> {}
 +
 +#[lang = "copy"]
 +pub unsafe trait Copy {}
 +
 +unsafe impl Copy for bool {}
 +unsafe impl Copy for u8 {}
 +unsafe impl Copy for u16 {}
 +unsafe impl Copy for u32 {}
 +unsafe impl Copy for u64 {}
 +unsafe impl Copy for u128 {}
 +unsafe impl Copy for usize {}
 +unsafe impl Copy for i8 {}
 +unsafe impl Copy for i16 {}
 +unsafe impl Copy for i32 {}
 +unsafe impl Copy for isize {}
 +unsafe impl Copy for f32 {}
 +unsafe impl Copy for f64 {}
 +unsafe impl Copy for char {}
 +unsafe impl<'a, T: ?Sized> Copy for &'a T {}
 +unsafe impl<T: ?Sized> Copy for *const T {}
 +unsafe impl<T: ?Sized> Copy for *mut T {}
 +unsafe impl<T: Copy> Copy for Option<T> {}
 +
 +#[lang = "sync"]
 +pub unsafe trait Sync {}
 +
 +unsafe impl Sync for bool {}
 +unsafe impl Sync for u8 {}
 +unsafe impl Sync for u16 {}
 +unsafe impl Sync for u32 {}
 +unsafe impl Sync for u64 {}
 +unsafe impl Sync for usize {}
 +unsafe impl Sync for i8 {}
 +unsafe impl Sync for i16 {}
 +unsafe impl Sync for i32 {}
 +unsafe impl Sync for isize {}
 +unsafe impl Sync for char {}
 +unsafe impl<'a, T: ?Sized> Sync for &'a T {}
 +unsafe impl Sync for [u8; 16] {}
 +
 +#[lang = "freeze"]
 +unsafe auto trait Freeze {}
 +
 +unsafe impl<T: ?Sized> Freeze for PhantomData<T> {}
 +unsafe impl<T: ?Sized> Freeze for *const T {}
 +unsafe impl<T: ?Sized> Freeze for *mut T {}
 +unsafe impl<T: ?Sized> Freeze for &T {}
 +unsafe impl<T: ?Sized> Freeze for &mut T {}
 +
 +#[lang = "structural_peq"]
 +pub trait StructuralPartialEq {}
 +
 +#[lang = "structural_teq"]
 +pub trait StructuralEq {}
 +
 +#[lang = "not"]
 +pub trait Not {
 +    type Output;
 +
 +    fn not(self) -> Self::Output;
 +}
 +
 +impl Not for bool {
 +    type Output = bool;
 +
 +    fn not(self) -> bool {
 +        !self
 +    }
 +}
 +
 +#[lang = "mul"]
 +pub trait Mul<RHS = Self> {
 +    type Output;
 +
 +    #[must_use]
 +    fn mul(self, rhs: RHS) -> Self::Output;
 +}
 +
 +impl Mul for u8 {
 +    type Output = Self;
 +
 +    fn mul(self, rhs: Self) -> Self::Output {
 +        self * rhs
 +    }
 +}
 +
 +impl Mul for usize {
 +    type Output = Self;
 +
 +    fn mul(self, rhs: Self) -> Self::Output {
 +        self * rhs
 +    }
 +}
 +
 +#[lang = "add"]
 +pub trait Add<RHS = Self> {
 +    type Output;
 +
 +    fn add(self, rhs: RHS) -> Self::Output;
 +}
 +
 +impl Add for u8 {
 +    type Output = Self;
 +
 +    fn add(self, rhs: Self) -> Self {
 +        self + rhs
 +    }
 +}
 +
 +impl Add for i8 {
 +    type Output = Self;
 +
 +    fn add(self, rhs: Self) -> Self {
 +        self + rhs
 +    }
 +}
 +
 +impl Add for usize {
 +    type Output = Self;
 +
 +    fn add(self, rhs: Self) -> Self {
 +        self + rhs
 +    }
 +}
 +
 +#[lang = "sub"]
 +pub trait Sub<RHS = Self> {
 +    type Output;
 +
 +    fn sub(self, rhs: RHS) -> Self::Output;
 +}
 +
 +impl Sub for usize {
 +    type Output = Self;
 +
 +    fn sub(self, rhs: Self) -> Self {
 +        self - rhs
 +    }
 +}
 +
 +impl Sub for u8 {
 +    type Output = Self;
 +
 +    fn sub(self, rhs: Self) -> Self {
 +        self - rhs
 +    }
 +}
 +
 +impl Sub for i8 {
 +    type Output = Self;
 +
 +    fn sub(self, rhs: Self) -> Self {
 +        self - rhs
 +    }
 +}
 +
 +impl Sub for i16 {
 +    type Output = Self;
 +
 +    fn sub(self, rhs: Self) -> Self {
 +        self - rhs
 +    }
 +}
 +
 +#[lang = "rem"]
 +pub trait Rem<RHS = Self> {
 +    type Output;
 +
 +    fn rem(self, rhs: RHS) -> Self::Output;
 +}
 +
 +impl Rem for usize {
 +    type Output = Self;
 +
 +    fn rem(self, rhs: Self) -> Self {
 +        self % rhs
 +    }
 +}
 +
 +#[lang = "bitor"]
 +pub trait BitOr<RHS = Self> {
 +    type Output;
 +
 +    #[must_use]
 +    fn bitor(self, rhs: RHS) -> Self::Output;
 +}
 +
 +impl BitOr for bool {
 +    type Output = bool;
 +
 +    fn bitor(self, rhs: bool) -> bool {
 +        self | rhs
 +    }
 +}
 +
 +impl<'a> BitOr<bool> for &'a bool {
 +    type Output = bool;
 +
 +    fn bitor(self, rhs: bool) -> bool {
 +        *self | rhs
 +    }
 +}
 +
 +#[lang = "eq"]
 +pub trait PartialEq<Rhs: ?Sized = Self> {
 +    fn eq(&self, other: &Rhs) -> bool;
 +    fn ne(&self, other: &Rhs) -> bool;
 +}
 +
 +impl PartialEq for u8 {
 +    fn eq(&self, other: &u8) -> bool {
 +        (*self) == (*other)
 +    }
 +    fn ne(&self, other: &u8) -> bool {
 +        (*self) != (*other)
 +    }
 +}
 +
 +impl PartialEq for u16 {
 +    fn eq(&self, other: &u16) -> bool {
 +        (*self) == (*other)
 +    }
 +    fn ne(&self, other: &u16) -> bool {
 +        (*self) != (*other)
 +    }
 +}
 +
 +impl PartialEq for u32 {
 +    fn eq(&self, other: &u32) -> bool {
 +        (*self) == (*other)
 +    }
 +    fn ne(&self, other: &u32) -> bool {
 +        (*self) != (*other)
 +    }
 +}
 +
 +
 +impl PartialEq for u64 {
 +    fn eq(&self, other: &u64) -> bool {
 +        (*self) == (*other)
 +    }
 +    fn ne(&self, other: &u64) -> bool {
 +        (*self) != (*other)
 +    }
 +}
 +
 +impl PartialEq for u128 {
 +    fn eq(&self, other: &u128) -> bool {
 +        (*self) == (*other)
 +    }
 +    fn ne(&self, other: &u128) -> bool {
 +        (*self) != (*other)
 +    }
 +}
 +
 +impl PartialEq for usize {
 +    fn eq(&self, other: &usize) -> bool {
 +        (*self) == (*other)
 +    }
 +    fn ne(&self, other: &usize) -> bool {
 +        (*self) != (*other)
 +    }
 +}
 +
 +impl PartialEq for i8 {
 +    fn eq(&self, other: &i8) -> bool {
 +        (*self) == (*other)
 +    }
 +    fn ne(&self, other: &i8) -> bool {
 +        (*self) != (*other)
 +    }
 +}
 +
 +impl PartialEq for i32 {
 +    fn eq(&self, other: &i32) -> bool {
 +        (*self) == (*other)
 +    }
 +    fn ne(&self, other: &i32) -> bool {
 +        (*self) != (*other)
 +    }
 +}
 +
 +impl PartialEq for isize {
 +    fn eq(&self, other: &isize) -> bool {
 +        (*self) == (*other)
 +    }
 +    fn ne(&self, other: &isize) -> bool {
 +        (*self) != (*other)
 +    }
 +}
 +
 +impl PartialEq for char {
 +    fn eq(&self, other: &char) -> bool {
 +        (*self) == (*other)
 +    }
 +    fn ne(&self, other: &char) -> bool {
 +        (*self) != (*other)
 +    }
 +}
 +
 +impl<T: ?Sized> PartialEq for *const T {
 +    fn eq(&self, other: &*const T) -> bool {
 +        *self == *other
 +    }
 +    fn ne(&self, other: &*const T) -> bool {
 +        *self != *other
 +    }
 +}
 +
 +impl <T: PartialEq> PartialEq for Option<T> {
 +    fn eq(&self, other: &Self) -> bool {
 +        match (self, other) {
 +            (Some(lhs), Some(rhs)) => *lhs == *rhs,
 +            (None, None) => true,
 +            _ => false,
 +        }
 +    }
 +
 +    fn ne(&self, other: &Self) -> bool {
 +        match (self, other) {
 +            (Some(lhs), Some(rhs)) => *lhs != *rhs,
 +            (None, None) => false,
 +            _ => true,
 +        }
 +    }
 +}
 +
 +#[lang = "shl"]
 +pub trait Shl<RHS = Self> {
 +    type Output;
 +
 +    #[must_use]
 +    fn shl(self, rhs: RHS) -> Self::Output;
 +}
 +
 +impl Shl for u128 {
 +    type Output = u128;
 +
 +    fn shl(self, rhs: u128) -> u128 {
 +        self << rhs
 +    }
 +}
 +
 +#[lang = "neg"]
 +pub trait Neg {
 +    type Output;
 +
 +    fn neg(self) -> Self::Output;
 +}
 +
 +impl Neg for i8 {
 +    type Output = i8;
 +
 +    fn neg(self) -> i8 {
 +        -self
 +    }
 +}
 +
 +impl Neg for i16 {
 +    type Output = i16;
 +
 +    fn neg(self) -> i16 {
 +        self
 +    }
 +}
 +
 +impl Neg for isize {
 +    type Output = isize;
 +
 +    fn neg(self) -> isize {
 +        -self
 +    }
 +}
 +
 +impl Neg for f32 {
 +    type Output = f32;
 +
 +    fn neg(self) -> f32 {
 +        -self
 +    }
 +}
 +
 +pub enum Option<T> {
 +    Some(T),
 +    None,
 +}
 +
 +pub use Option::*;
 +
 +#[lang = "phantom_data"]
 +pub struct PhantomData<T: ?Sized>;
 +
 +#[lang = "fn_once"]
 +#[rustc_paren_sugar]
- pub trait FnMut<Args>: FnOnce<Args> {
++pub trait FnOnce<Args: Tuple> {
 +    #[lang = "fn_once_output"]
 +    type Output;
 +
 +    extern "rust-call" fn call_once(self, args: Args) -> Self::Output;
 +}
 +
 +#[lang = "fn_mut"]
 +#[rustc_paren_sugar]
++pub trait FnMut<Args: Tuple>: FnOnce<Args> {
 +    extern "rust-call" fn call_mut(&mut self, args: Args) -> Self::Output;
 +}
 +
 +#[lang = "panic"]
 +#[track_caller]
 +pub fn panic(_msg: &'static str) -> ! {
 +    unsafe {
 +        libc::puts("Panicking\n\0" as *const str as *const i8);
 +        intrinsics::abort();
 +    }
 +}
 +
 +#[lang = "panic_bounds_check"]
 +#[track_caller]
 +fn panic_bounds_check(index: usize, len: usize) -> ! {
 +    unsafe {
 +        libc::printf("index out of bounds: the len is %d but the index is %d\n\0" as *const str as *const i8, len, index);
 +        intrinsics::abort();
 +    }
 +}
 +
 +#[lang = "eh_personality"]
 +fn eh_personality() -> ! {
 +    loop {}
 +}
 +
 +#[lang = "drop_in_place"]
 +#[allow(unconditional_recursion)]
 +pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
 +    // Code here does not matter - this is replaced by the
 +    // real drop glue by the compiler.
 +    drop_in_place(to_drop);
 +}
 +
 +#[lang = "deref"]
 +pub trait Deref {
 +    type Target: ?Sized;
 +
 +    fn deref(&self) -> &Self::Target;
 +}
 +
 +#[repr(transparent)]
 +#[rustc_layout_scalar_valid_range_start(1)]
 +#[rustc_nonnull_optimization_guaranteed]
 +pub struct NonNull<T: ?Sized>(pub *const T);
 +
 +impl<T: ?Sized, U: ?Sized> CoerceUnsized<NonNull<U>> for NonNull<T> where T: Unsize<U> {}
 +impl<T: ?Sized, U: ?Sized> DispatchFromDyn<NonNull<U>> for NonNull<T> where T: Unsize<U> {}
 +
 +pub struct Unique<T: ?Sized> {
 +    pub pointer: NonNull<T>,
 +    pub _marker: PhantomData<T>,
 +}
 +
 +impl<T: ?Sized, U: ?Sized> CoerceUnsized<Unique<U>> for Unique<T> where T: Unsize<U> {}
 +impl<T: ?Sized, U: ?Sized> DispatchFromDyn<Unique<U>> for Unique<T> where T: Unsize<U> {}
 +
 +#[lang = "owned_box"]
 +pub struct Box<T: ?Sized>(Unique<T>, ());
 +
 +impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Box<U>> for Box<T> {}
 +
 +impl<T: ?Sized> Drop for Box<T> {
 +    fn drop(&mut self) {
 +        // drop is currently performed by compiler.
 +    }
 +}
 +
 +impl<T: ?Sized> Deref for Box<T> {
 +    type Target = T;
 +
 +    fn deref(&self) -> &Self::Target {
 +        &**self
 +    }
 +}
 +
 +#[lang = "exchange_malloc"]
 +unsafe fn allocate(size: usize, _align: usize) -> *mut u8 {
 +    libc::malloc(size)
 +}
 +
 +#[lang = "box_free"]
 +unsafe fn box_free<T: ?Sized>(ptr: Unique<T>, _alloc: ()) {
 +    libc::free(ptr.pointer.0 as *mut u8);
 +}
 +
 +#[lang = "drop"]
 +pub trait Drop {
 +    fn drop(&mut self);
 +}
 +
 +#[lang = "manually_drop"]
 +#[repr(transparent)]
 +pub struct ManuallyDrop<T: ?Sized> {
 +    pub value: T,
 +}
 +
 +#[lang = "maybe_uninit"]
 +#[repr(transparent)]
 +pub union MaybeUninit<T> {
 +    pub uninit: (),
 +    pub value: ManuallyDrop<T>,
 +}
 +
 +pub mod intrinsics {
 +    extern "rust-intrinsic" {
 +        #[rustc_safe_intrinsic]
 +        pub fn abort() -> !;
 +        #[rustc_safe_intrinsic]
 +        pub fn size_of<T>() -> usize;
 +        pub fn size_of_val<T: ?::Sized>(val: *const T) -> usize;
 +        #[rustc_safe_intrinsic]
 +        pub fn min_align_of<T>() -> usize;
 +        pub fn min_align_of_val<T: ?::Sized>(val: *const T) -> usize;
 +        pub fn copy<T>(src: *const T, dst: *mut T, count: usize);
 +        pub fn transmute<T, U>(e: T) -> U;
 +        pub fn ctlz_nonzero<T>(x: T) -> T;
 +        #[rustc_safe_intrinsic]
 +        pub fn needs_drop<T: ?::Sized>() -> bool;
 +        #[rustc_safe_intrinsic]
 +        pub fn bitreverse<T>(x: T) -> T;
 +        #[rustc_safe_intrinsic]
 +        pub fn bswap<T>(x: T) -> T;
 +        pub fn write_bytes<T>(dst: *mut T, val: u8, count: usize);
 +    }
 +}
 +
 +pub mod libc {
 +    // With the new Universal CRT, msvc has switched to all the printf functions being inline wrapper
 +    // functions. legacy_stdio_definitions.lib which provides the printf wrapper functions as normal
 +    // symbols to link against.
 +    #[cfg_attr(unix, link(name = "c"))]
 +    #[cfg_attr(target_env="msvc", link(name="legacy_stdio_definitions"))]
 +    extern "C" {
 +        pub fn printf(format: *const i8, ...) -> i32;
 +    }
 +
 +    #[cfg_attr(unix, link(name = "c"))]
 +    #[cfg_attr(target_env = "msvc", link(name = "msvcrt"))]
 +    extern "C" {
 +        pub fn puts(s: *const i8) -> i32;
 +        pub fn malloc(size: usize) -> *mut u8;
 +        pub fn free(ptr: *mut u8);
 +        pub fn memcpy(dst: *mut u8, src: *const u8, size: usize);
 +        pub fn memmove(dst: *mut u8, src: *const u8, size: usize);
 +        pub fn strncpy(dst: *mut u8, src: *const u8, size: usize);
 +    }
 +}
 +
 +#[lang = "index"]
 +pub trait Index<Idx: ?Sized> {
 +    type Output: ?Sized;
 +    fn index(&self, index: Idx) -> &Self::Output;
 +}
 +
 +impl<T> Index<usize> for [T; 3] {
 +    type Output = T;
 +
 +    fn index(&self, index: usize) -> &Self::Output {
 +        &self[index]
 +    }
 +}
 +
 +impl<T> Index<usize> for [T] {
 +    type Output = T;
 +
 +    fn index(&self, index: usize) -> &Self::Output {
 +        &self[index]
 +    }
 +}
 +
 +extern {
 +    type VaListImpl;
 +}
 +
 +#[lang = "va_list"]
 +#[repr(transparent)]
 +pub struct VaList<'a>(&'a mut VaListImpl);
 +
 +#[rustc_builtin_macro]
 +#[rustc_macro_transparency = "semitransparent"]
 +pub macro stringify($($t:tt)*) { /* compiler built-in */ }
 +
 +#[rustc_builtin_macro]
 +#[rustc_macro_transparency = "semitransparent"]
 +pub macro file() { /* compiler built-in */ }
 +
 +#[rustc_builtin_macro]
 +#[rustc_macro_transparency = "semitransparent"]
 +pub macro line() { /* compiler built-in */ }
 +
 +#[rustc_builtin_macro]
 +#[rustc_macro_transparency = "semitransparent"]
 +pub macro cfg() { /* compiler built-in */ }
 +
 +#[rustc_builtin_macro]
 +#[rustc_macro_transparency = "semitransparent"]
 +pub macro global_asm() { /* compiler built-in */ }
 +
 +pub static A_STATIC: u8 = 42;
 +
 +#[lang = "panic_location"]
 +struct PanicLocation {
 +    file: &'static str,
 +    line: u32,
 +    column: u32,
 +}
 +
 +#[no_mangle]
 +#[cfg(not(windows))]
 +pub fn get_tls() -> u8 {
 +    #[thread_local]
 +    static A: u8 = 42;
 +
 +    A
 +}
index 215d3556a17ca8cf2b5eb7a7673b5a090612da70,0000000000000000000000000000000000000000..c00f8a2e0cdad3229ad1362047bffeb8a4be2ad9
mode 100644,000000..100644
--- /dev/null
@@@ -1,601 -1,0 +1,599 @@@
-     //return;
 +#![feature(no_core, lang_items, never_type, linkage, extern_types, thread_local, box_syntax)]
 +#![no_core]
 +#![allow(dead_code, non_camel_case_types)]
 +
 +extern crate mini_core;
 +
 +use mini_core::*;
 +use mini_core::libc::*;
 +
 +macro_rules! assert {
 +    ($e:expr) => {
 +        if !$e {
 +            panic(stringify!(! $e));
 +        }
 +    };
 +}
 +
 +macro_rules! assert_eq {
 +    ($l:expr, $r: expr) => {
 +        if $l != $r {
 +            panic(stringify!($l != $r));
 +        }
 +    }
 +}
 +
 +#[lang = "termination"]
 +trait Termination {
 +    fn report(self) -> i32;
 +}
 +
 +impl Termination for () {
 +    fn report(self) -> i32 {
 +        unsafe {
 +            NUM = 6 * 7 + 1 + (1u8 == 1u8) as u8; // 44
 +            assert_eq!(*NUM_REF as i32, 44);
 +        }
 +        0
 +    }
 +}
 +
 +trait SomeTrait {
 +    fn object_safe(&self);
 +}
 +
 +impl SomeTrait for &'static str {
 +    fn object_safe(&self) {
 +        unsafe {
 +            puts(*self as *const str as *const i8);
 +        }
 +    }
 +}
 +
 +struct NoisyDrop {
 +    text: &'static str,
 +    inner: NoisyDropInner,
 +}
 +
 +struct NoisyDropUnsized {
 +    inner: NoisyDropInner,
 +    text: str,
 +}
 +
 +struct NoisyDropInner;
 +
 +impl Drop for NoisyDrop {
 +    fn drop(&mut self) {
 +        unsafe {
 +            puts(self.text as *const str as *const i8);
 +        }
 +    }
 +}
 +
 +impl Drop for NoisyDropInner {
 +    fn drop(&mut self) {
 +        unsafe {
 +            puts("Inner got dropped!\0" as *const str as *const i8);
 +        }
 +    }
 +}
 +
 +impl SomeTrait for NoisyDrop {
 +    fn object_safe(&self) {}
 +}
 +
 +enum Ordering {
 +    Less = -1,
 +    Equal = 0,
 +    Greater = 1,
 +}
 +
 +#[lang = "start"]
 +fn start<T: Termination + 'static>(
 +    main: fn() -> T,
 +    argc: isize,
 +    argv: *const *const u8,
 +    _sigpipe: u8,
 +) -> isize {
 +    if argc == 3 {
 +        unsafe { puts(*argv as *const i8); }
 +        unsafe { puts(*((argv as usize + intrinsics::size_of::<*const u8>()) as *const *const i8)); }
 +        unsafe { puts(*((argv as usize + 2 * intrinsics::size_of::<*const u8>()) as *const *const i8)); }
 +    }
 +
 +    main().report() as isize
 +}
 +
 +static mut NUM: u8 = 6 * 7;
 +static NUM_REF: &'static u8 = unsafe { &NUM };
 +
 +
 +unsafe fn zeroed<T>() -> T {
 +    let mut uninit = MaybeUninit { uninit: () };
 +    intrinsics::write_bytes(&mut uninit.value.value as *mut T, 0, 1);
 +    uninit.value.value
 +}
 +
 +fn take_f32(_f: f32) {}
 +fn take_unique(_u: Unique<()>) {}
 +
 +fn return_u128_pair() -> (u128, u128) {
 +    (0, 0)
 +}
 +
 +fn call_return_u128_pair() {
 +    return_u128_pair();
 +}
 +
 +#[repr(C)]
 +pub struct bool_11 {
 +    field0: bool,
 +    field1: bool,
 +    field2: bool,
 +    field3: bool,
 +    field4: bool,
 +    field5: bool,
 +    field6: bool,
 +    field7: bool,
 +    field8: bool,
 +    field9: bool,
 +    field10: bool,
 +}
 +
 +extern "C" fn bool_struct_in_11(_arg0: bool_11) {}
 +
 +#[allow(unreachable_code)] // FIXME false positive
 +fn main() {
 +    take_unique(Unique {
 +        pointer: unsafe { NonNull(1 as *mut ()) },
 +        _marker: PhantomData,
 +    });
 +    take_f32(0.1);
 +
 +    call_return_u128_pair();
 +
 +    bool_struct_in_11(bool_11 {
 +        field0: true,
 +        field1: true,
 +        field2: true,
 +        field3: true,
 +        field4: true,
 +        field5: true,
 +        field6: true,
 +        field7: true,
 +        field8: true,
 +        field9: true,
 +        field10: true,
 +    });
 +
 +    let slice = &[0, 1] as &[i32];
 +    let slice_ptr = slice as *const [i32] as *const i32;
 +
 +    assert_eq!(slice_ptr as usize % 4, 0);
 +
 +    unsafe {
 +        printf("Hello %s\n\0" as *const str as *const i8, "printf\0" as *const str as *const i8);
 +
 +        let hello: &[u8] = b"Hello\0" as &[u8; 6];
 +        let ptr: *const i8 = hello as *const [u8] as *const i8;
 +        puts(ptr);
 +
 +        let world: Box<&str> = box "World!\0";
 +        puts(*world as *const str as *const i8);
 +        world as Box<dyn SomeTrait>;
 +
 +        assert_eq!(intrinsics::bitreverse(0b10101000u8), 0b00010101u8);
 +
 +        assert_eq!(intrinsics::bswap(0xabu8), 0xabu8);
 +        assert_eq!(intrinsics::bswap(0xddccu16), 0xccddu16);
 +        assert_eq!(intrinsics::bswap(0xffee_ddccu32), 0xccdd_eeffu32);
 +        assert_eq!(intrinsics::bswap(0x1234_5678_ffee_ddccu64), 0xccdd_eeff_7856_3412u64);
 +
 +        assert_eq!(intrinsics::size_of_val(hello) as u8, 6);
 +
 +        let chars = &['C', 'h', 'a', 'r', 's'];
 +        let chars = chars as &[char];
 +        assert_eq!(intrinsics::size_of_val(chars) as u8, 4 * 5);
 +
 +        let a: &dyn SomeTrait = &"abc\0";
 +        a.object_safe();
 +
 +        assert_eq!(intrinsics::size_of_val(a) as u8, 16);
 +        assert_eq!(intrinsics::size_of_val(&0u32) as u8, 4);
 +
 +        assert_eq!(intrinsics::min_align_of::<u16>() as u8, 2);
 +        assert_eq!(intrinsics::min_align_of_val(&a) as u8, intrinsics::min_align_of::<&str>() as u8);
 +
 +        assert!(!intrinsics::needs_drop::<u8>());
 +        assert!(!intrinsics::needs_drop::<[u8]>());
 +        assert!(intrinsics::needs_drop::<NoisyDrop>());
 +        assert!(intrinsics::needs_drop::<NoisyDropUnsized>());
 +
 +        Unique {
 +            pointer: NonNull(1 as *mut &str),
 +            _marker: PhantomData,
 +        } as Unique<dyn SomeTrait>;
 +
 +        struct MyDst<T: ?Sized>(T);
 +
 +        intrinsics::size_of_val(&MyDst([0u8; 4]) as &MyDst<[u8]>);
 +
 +        struct Foo {
 +            x: u8,
 +            y: !,
 +        }
 +
 +        unsafe fn uninitialized<T>() -> T {
 +            MaybeUninit { uninit: () }.value.value
 +        }
 +
 +        zeroed::<(u8, u8)>();
 +        #[allow(unreachable_code)]
 +        {
 +            if false {
 +                zeroed::<!>();
 +                zeroed::<Foo>();
 +                uninitialized::<Foo>();
 +            }
 +        }
 +    }
 +
 +    let _ = box NoisyDrop {
 +        text: "Boxed outer got dropped!\0",
 +        inner: NoisyDropInner,
 +    } as Box<dyn SomeTrait>;
 +
 +    const FUNC_REF: Option<fn()> = Some(main);
 +    match FUNC_REF {
 +        Some(_) => {},
 +        None => assert!(false),
 +    }
 +
 +    match Ordering::Less {
 +        Ordering::Less => {},
 +        _ => assert!(false),
 +    }
 +
 +    [NoisyDropInner, NoisyDropInner];
 +
 +    let x = &[0u32, 42u32] as &[u32];
 +    match x {
 +        [] => assert_eq!(0u32, 1),
 +        [_, ref y @ ..] => assert_eq!(&x[1] as *const u32 as usize, &y[0] as *const u32 as usize),
 +    }
 +
 +    assert_eq!(((|()| 42u8) as fn(()) -> u8)(()), 42);
 +
 +    #[cfg(not(any(jit, windows)))]
 +    {
 +        extern {
 +            #[linkage = "extern_weak"]
 +            static ABC: *const u8;
 +        }
 +
 +        {
 +            extern {
 +                #[linkage = "extern_weak"]
 +                static ABC: *const u8;
 +            }
 +        }
 +
 +        unsafe { assert_eq!(ABC as usize, 0); }
 +    }
 +
 +    &mut (|| Some(0 as *const ())) as &mut dyn FnMut() -> Option<*const ()>;
 +
 +    let f = 1000.0;
 +    assert_eq!(f as u8, 255);
 +    let f2 = -1000.0;
 +    assert_eq!(f2 as i8, -128);
 +    assert_eq!(f2 as u8, 0);
 +
 +    let amount = 0;
 +    assert_eq!(1u128 << amount, 1);
 +
 +    static ANOTHER_STATIC: &u8 = &A_STATIC;
 +    assert_eq!(*ANOTHER_STATIC, 42);
 +
 +    check_niche_behavior();
 +
 +    extern "C" {
 +        type ExternType;
 +    }
 +
 +    struct ExternTypeWrapper {
 +        _a: ExternType,
 +    }
 +
 +    let nullptr = 0 as *const ();
 +    let extern_nullptr = nullptr as *const ExternTypeWrapper;
 +    extern_nullptr as *const ();
 +    let slice_ptr = &[] as *const [u8];
 +    slice_ptr as *const u8;
 +
 +    let repeat = [Some(42); 2];
 +    assert_eq!(repeat[0], Some(42));
 +    assert_eq!(repeat[1], Some(42));
 +
 +    from_decimal_string();
 +
 +    #[cfg(not(any(jit, windows)))]
 +    test_tls();
 +
 +    #[cfg(all(not(jit), target_arch = "x86_64", any(target_os = "linux", target_os = "darwin")))]
 +    unsafe {
 +        global_asm_test();
 +    }
 +
 +    // Both statics have a reference that points to the same anonymous allocation.
 +    static REF1: &u8 = &42;
 +    static REF2: &u8 = REF1;
 +    assert_eq!(*REF1, *REF2);
 +
 +    extern "C" {
 +        type A;
 +    }
 +
 +    fn main() {
 +        let x: &A = unsafe { &*(1usize as *const A) };
 +
 +        assert_eq!(unsafe { intrinsics::size_of_val(x) }, 0);
 +        assert_eq!(unsafe { intrinsics::min_align_of_val(x) }, 1);
 +}
 +}
 +
 +#[cfg(all(not(jit), target_arch = "x86_64", any(target_os = "linux", target_os = "darwin")))]
 +extern "C" {
 +    fn global_asm_test();
 +}
 +
 +#[cfg(all(not(jit), target_arch = "x86_64", target_os = "linux"))]
 +global_asm! {
 +    "
 +    .global global_asm_test
 +    global_asm_test:
 +    // comment that would normally be removed by LLVM
 +    ret
 +    "
 +}
 +
 +#[cfg(all(not(jit), target_arch = "x86_64", target_os = "darwin"))]
 +global_asm! {
 +    "
 +    .global _global_asm_test
 +    _global_asm_test:
 +    // comment that would normally be removed by LLVM
 +    ret
 +    "
 +}
 +
 +#[repr(C)]
 +enum c_void {
 +    _1,
 +    _2,
 +}
 +
 +type c_int = i32;
 +type c_ulong = u64;
 +
 +type pthread_t = c_ulong;
 +
 +#[repr(C)]
 +struct pthread_attr_t {
 +    __size: [u64; 7],
 +}
 +
 +#[link(name = "pthread")]
 +#[cfg(unix)]
 +extern "C" {
 +    fn pthread_attr_init(attr: *mut pthread_attr_t) -> c_int;
 +
 +    fn pthread_create(
 +        native: *mut pthread_t,
 +        attr: *const pthread_attr_t,
 +        f: extern "C" fn(_: *mut c_void) -> *mut c_void,
 +        value: *mut c_void
 +    ) -> c_int;
 +
 +    fn pthread_join(
 +        native: pthread_t,
 +        value: *mut *mut c_void
 +    ) -> c_int;
 +}
 +
 +type DWORD = u32;
 +type LPDWORD = *mut u32;
 +
 +type LPVOID = *mut c_void;
 +type HANDLE = *mut c_void;
 +
 +#[link(name = "msvcrt")]
 +#[cfg(windows)]
 +extern "C" {
 +    fn WaitForSingleObject(
 +        hHandle: LPVOID,
 +        dwMilliseconds: DWORD
 +    ) -> DWORD;
 +
 +    fn CreateThread(
 +        lpThreadAttributes: LPVOID, // Technically LPSECURITY_ATTRIBUTES, but we don't use it anyway
 +        dwStackSize: usize,
 +        lpStartAddress: extern "C" fn(_: *mut c_void) -> *mut c_void,
 +        lpParameter: LPVOID,
 +        dwCreationFlags: DWORD,
 +        lpThreadId: LPDWORD
 +    ) -> HANDLE;
 +}
 +
 +struct Thread {
 +    #[cfg(windows)]
 +    handle: HANDLE,
 +    #[cfg(unix)]
 +    handle: pthread_t,
 +}
 +
 +impl Thread {
 +    unsafe fn create(f: extern "C" fn(_: *mut c_void) -> *mut c_void) -> Self {
 +        #[cfg(unix)]
 +        {
 +            let mut attr: pthread_attr_t = zeroed();
 +            let mut thread: pthread_t = 0;
 +
 +            if pthread_attr_init(&mut attr) != 0 {
 +                assert!(false);
 +            }
 +
 +            if pthread_create(&mut thread, &attr, f, 0 as *mut c_void) != 0 {
 +                assert!(false);
 +            }
 +
 +            Thread {
 +                handle: thread,
 +            }
 +        }
 +
 +        #[cfg(windows)]
 +        {
 +            let handle = CreateThread(0 as *mut c_void, 0, f, 0 as *mut c_void, 0, 0 as *mut u32);
 +
 +            if (handle as u64) == 0 {
 +                assert!(false);
 +            }
 +
 +            Thread {
 +                handle,
 +            }
 +        }
 +    }
 +
 +
 +    unsafe fn join(self) {
 +        #[cfg(unix)]
 +        {
 +            let mut res = 0 as *mut c_void;
 +            pthread_join(self.handle, &mut res);
 +        }
 +
 +        #[cfg(windows)]
 +        {
 +            // The INFINITE macro is used to signal operations that do not timeout.
 +            let infinite = 0xffffffff;
 +            assert!(WaitForSingleObject(self.handle, infinite) == 0);
 +        }
 +    }
 +}
 +
 +
 +
 +
 +#[thread_local]
 +#[cfg(not(jit))]
 +static mut TLS: u8 = 42;
 +
 +#[cfg(not(jit))]
 +extern "C" fn mutate_tls(_: *mut c_void) -> *mut c_void {
 +    unsafe { TLS = 0; }
 +    0 as *mut c_void
 +}
 +
 +#[cfg(not(jit))]
 +fn test_tls() {
 +    unsafe {
 +        assert_eq!(TLS, 42);
 +
 +        let thread = Thread::create(mutate_tls);
 +        thread.join();
 +
 +        // TLS of main thread must not have been changed by the other thread.
 +        assert_eq!(TLS, 42);
 +
 +        puts("TLS works!\n\0" as *const str as *const i8);
 +    }
 +}
 +
 +// Copied ui/issues/issue-61696.rs
 +
 +pub enum Infallible {}
 +
 +// The check that the `bool` field of `V1` is encoding a "niche variant"
 +// (i.e. not `V1`, so `V3` or `V4`) used to be mathematically incorrect,
 +// causing valid `V1` values to be interpreted as other variants.
 +pub enum E1 {
 +    V1 { f: bool },
 +    V2 { f: Infallible },
 +    V3,
 +    V4,
 +}
 +
 +// Computing the discriminant used to be done using the niche type (here `u8`,
 +// from the `bool` field of `V1`), overflowing for variants with large enough
 +// indices (`V3` and `V4`), causing them to be interpreted as other variants.
 +pub enum E2<X> {
 +    V1 { f: bool },
 +
 +    /*_00*/ _01(X), _02(X), _03(X), _04(X), _05(X), _06(X), _07(X),
 +    _08(X), _09(X), _0A(X), _0B(X), _0C(X), _0D(X), _0E(X), _0F(X),
 +    _10(X), _11(X), _12(X), _13(X), _14(X), _15(X), _16(X), _17(X),
 +    _18(X), _19(X), _1A(X), _1B(X), _1C(X), _1D(X), _1E(X), _1F(X),
 +    _20(X), _21(X), _22(X), _23(X), _24(X), _25(X), _26(X), _27(X),
 +    _28(X), _29(X), _2A(X), _2B(X), _2C(X), _2D(X), _2E(X), _2F(X),
 +    _30(X), _31(X), _32(X), _33(X), _34(X), _35(X), _36(X), _37(X),
 +    _38(X), _39(X), _3A(X), _3B(X), _3C(X), _3D(X), _3E(X), _3F(X),
 +    _40(X), _41(X), _42(X), _43(X), _44(X), _45(X), _46(X), _47(X),
 +    _48(X), _49(X), _4A(X), _4B(X), _4C(X), _4D(X), _4E(X), _4F(X),
 +    _50(X), _51(X), _52(X), _53(X), _54(X), _55(X), _56(X), _57(X),
 +    _58(X), _59(X), _5A(X), _5B(X), _5C(X), _5D(X), _5E(X), _5F(X),
 +    _60(X), _61(X), _62(X), _63(X), _64(X), _65(X), _66(X), _67(X),
 +    _68(X), _69(X), _6A(X), _6B(X), _6C(X), _6D(X), _6E(X), _6F(X),
 +    _70(X), _71(X), _72(X), _73(X), _74(X), _75(X), _76(X), _77(X),
 +    _78(X), _79(X), _7A(X), _7B(X), _7C(X), _7D(X), _7E(X), _7F(X),
 +    _80(X), _81(X), _82(X), _83(X), _84(X), _85(X), _86(X), _87(X),
 +    _88(X), _89(X), _8A(X), _8B(X), _8C(X), _8D(X), _8E(X), _8F(X),
 +    _90(X), _91(X), _92(X), _93(X), _94(X), _95(X), _96(X), _97(X),
 +    _98(X), _99(X), _9A(X), _9B(X), _9C(X), _9D(X), _9E(X), _9F(X),
 +    _A0(X), _A1(X), _A2(X), _A3(X), _A4(X), _A5(X), _A6(X), _A7(X),
 +    _A8(X), _A9(X), _AA(X), _AB(X), _AC(X), _AD(X), _AE(X), _AF(X),
 +    _B0(X), _B1(X), _B2(X), _B3(X), _B4(X), _B5(X), _B6(X), _B7(X),
 +    _B8(X), _B9(X), _BA(X), _BB(X), _BC(X), _BD(X), _BE(X), _BF(X),
 +    _C0(X), _C1(X), _C2(X), _C3(X), _C4(X), _C5(X), _C6(X), _C7(X),
 +    _C8(X), _C9(X), _CA(X), _CB(X), _CC(X), _CD(X), _CE(X), _CF(X),
 +    _D0(X), _D1(X), _D2(X), _D3(X), _D4(X), _D5(X), _D6(X), _D7(X),
 +    _D8(X), _D9(X), _DA(X), _DB(X), _DC(X), _DD(X), _DE(X), _DF(X),
 +    _E0(X), _E1(X), _E2(X), _E3(X), _E4(X), _E5(X), _E6(X), _E7(X),
 +    _E8(X), _E9(X), _EA(X), _EB(X), _EC(X), _ED(X), _EE(X), _EF(X),
 +    _F0(X), _F1(X), _F2(X), _F3(X), _F4(X), _F5(X), _F6(X), _F7(X),
 +    _F8(X), _F9(X), _FA(X), _FB(X), _FC(X), _FD(X), _FE(X), _FF(X),
 +
 +    V3,
 +    V4,
 +}
 +
 +fn check_niche_behavior () {
 +    if let E1::V2 { .. } = (E1::V1 { f: true }) {
 +        intrinsics::abort();
 +    }
 +
 +    if let E2::V1 { .. } = E2::V3::<Infallible> {
 +        intrinsics::abort();
 +    }
 +}
 +
 +fn from_decimal_string() {
 +    loop {
 +        let multiplier = 1;
 +
 +        take_multiplier_ref(&multiplier);
 +
 +        if multiplier == 1 {
 +            break;
 +        }
 +
 +        unreachable();
 +    }
 +}
 +
 +fn take_multiplier_ref(_multiplier: &u128) {}
 +
 +fn unreachable() -> ! {
 +    panic("unreachable")
 +}
index ad108c34992e30efa101dc5d71fa5f832e45788f,0000000000000000000000000000000000000000..8481d9c39a3cf672e83f85b03220fabdacf61817
mode 100644,000000..100644
--- /dev/null
@@@ -1,356 -1,0 +1,358 @@@
 +#![feature(core_intrinsics, generators, generator_trait, is_sorted)]
 +
 +#[cfg(target_arch = "x86_64")]
 +use std::arch::x86_64::*;
 +use std::hint::black_box;
 +use std::io::Write;
 +use std::ops::Generator;
 +
 +fn main() {
 +    println!("{:?}", std::env::args().collect::<Vec<_>>());
 +
 +    let mutex = std::sync::Mutex::new(());
 +    let _guard = mutex.lock().unwrap();
 +
 +    let _ = ::std::iter::repeat('a' as u8).take(10).collect::<Vec<_>>();
 +    let stderr = ::std::io::stderr();
 +    let mut stderr = stderr.lock();
 +
 +    std::thread::spawn(move || {
 +        println!("Hello from another thread!");
 +    });
 +
 +    writeln!(stderr, "some {} text", "<unknown>").unwrap();
 +
 +    let _ = std::process::Command::new("true").env("c", "d").spawn();
 +
 +    println!("cargo:rustc-link-lib=z");
 +
 +    static ONCE: std::sync::Once = std::sync::Once::new();
 +    ONCE.call_once(|| {});
 +
 +    let _eq = LoopState::Continue(()) == LoopState::Break(());
 +
 +    // Make sure ByValPair values with differently sized components are correctly passed
 +    map(None::<(u8, Box<Instruction>)>);
 +
 +    println!("{}", 2.3f32.exp());
 +    println!("{}", 2.3f32.exp2());
 +    println!("{}", 2.3f32.abs());
 +    println!("{}", 2.3f32.sqrt());
 +    println!("{}", 2.3f32.floor());
 +    println!("{}", 2.3f32.ceil());
 +    println!("{}", 2.3f32.min(1.0));
 +    println!("{}", 2.3f32.max(1.0));
 +    println!("{}", 2.3f32.powi(2));
 +    println!("{}", 2.3f32.log2());
 +    assert_eq!(2.3f32.copysign(-1.0), -2.3f32);
 +    println!("{}", 2.3f32.powf(2.0));
 +
 +    assert_eq!(i64::MAX.checked_mul(2), None);
 +
 +    assert_eq!(-128i8, (-128i8).saturating_sub(1));
 +    assert_eq!(127i8, 127i8.saturating_sub(-128));
 +    assert_eq!(-128i8, (-128i8).saturating_add(-128));
 +    assert_eq!(127i8, 127i8.saturating_add(1));
 +
 +    assert_eq!(0b0000000000000000000000000010000010000000000000000000000000000000_0000000000100000000000000000000000001000000000000100000000000000u128.leading_zeros(), 26);
 +    assert_eq!(0b0000000000000000000000000010000000000000000000000000000000000000_0000000000000000000000000000000000001000000000000000000010000000u128.trailing_zeros(), 7);
 +    assert_eq!(core::intrinsics::saturating_sub(0, -170141183460469231731687303715884105728i128), 170141183460469231731687303715884105727i128);
 +
 +    let _d = 0i128.checked_div(2i128);
 +    let _d = 0u128.checked_div(2u128);
 +    assert_eq!(1u128 + 2, 3);
 +
 +    assert_eq!(0b100010000000000000000000000000000u128 >> 10, 0b10001000000000000000000u128);
 +    assert_eq!(0xFEDCBA987654321123456789ABCDEFu128 >> 64, 0xFEDCBA98765432u128);
 +    assert_eq!(0xFEDCBA987654321123456789ABCDEFu128 as i128 >> 64, 0xFEDCBA98765432i128);
 +
 +    let tmp = 353985398u128;
 +    assert_eq!(tmp * 932490u128, 330087843781020u128);
 +
 +    let tmp = -0x1234_5678_9ABC_DEF0i64;
 +    assert_eq!(tmp as i128, -0x1234_5678_9ABC_DEF0i128);
 +
 +    // Check that all u/i128 <-> float casts work correctly.
 +    let houndred_u128 = 100u128;
 +    let houndred_i128 = 100i128;
 +    let houndred_f32 = 100.0f32;
 +    let houndred_f64 = 100.0f64;
 +    assert_eq!(houndred_u128 as f32, 100.0);
 +    assert_eq!(houndred_u128 as f64, 100.0);
 +    assert_eq!(houndred_f32 as u128, 100);
 +    assert_eq!(houndred_f64 as u128, 100);
 +    assert_eq!(houndred_i128 as f32, 100.0);
 +    assert_eq!(houndred_i128 as f64, 100.0);
 +    assert_eq!(houndred_f32 as i128, 100);
 +    assert_eq!(houndred_f64 as i128, 100);
 +    assert_eq!(1u128.rotate_left(2), 4);
 +
 +    assert_eq!(black_box(f32::NAN) as i128, 0);
 +    assert_eq!(black_box(f32::NAN) as u128, 0);
 +
 +    // Test signed 128bit comparing
 +    let max = usize::MAX as i128;
 +    if 100i128 < 0i128 || 100i128 > max {
 +        panic!();
 +    }
 +
 +    test_checked_mul();
 +
 +    let _a = 1u32 << 2u8;
 +
 +    let empty: [i32; 0] = [];
 +    assert!(empty.is_sorted());
 +
 +    println!("{:?}", std::intrinsics::caller_location());
 +
 +    #[cfg(target_arch = "x86_64")]
 +    unsafe {
 +        test_simd();
 +    }
 +
 +    Box::pin(move |mut _task_context| {
 +        yield ();
 +    }).as_mut().resume(0);
 +
 +    #[derive(Copy, Clone)]
 +    enum Nums {
 +        NegOne = -1,
 +    }
 +
 +    let kind = Nums::NegOne;
 +    assert_eq!(-1i128, kind as i128);
 +
 +    let options = [1u128];
 +    match options[0] {
 +        1 => (),
 +        0 => loop {},
 +        v => panic(v),
 +    };
 +
 +    if black_box(false) {
 +        // Based on https://github.com/rust-lang/rust/blob/2f320a224e827b400be25966755a621779f797cc/src/test/ui/debuginfo/debuginfo_with_uninhabitable_field_and_unsized.rs
 +        let _ = Foo::<dyn Send>::new();
 +
 +        #[allow(dead_code)]
 +        struct Foo<T: ?Sized> {
 +            base: Never,
 +            value: T,
 +        }
 +
 +        impl<T: ?Sized> Foo<T> {
 +            pub fn new() -> Box<Foo<T>> {
 +                todo!()
 +            }
 +        }
 +
 +        enum Never {}
 +    }
 +}
 +
 +fn panic(_: u128) {
 +    panic!();
 +}
 +
 +#[cfg(target_arch = "x86_64")]
 +#[target_feature(enable = "sse2")]
 +unsafe fn test_simd() {
 +    assert!(is_x86_feature_detected!("sse2"));
 +
 +    let x = _mm_setzero_si128();
 +    let y = _mm_set1_epi16(7);
 +    let or = _mm_or_si128(x, y);
 +    let cmp_eq = _mm_cmpeq_epi8(y, y);
 +    let cmp_lt = _mm_cmplt_epi8(y, y);
 +
++    let (zero0, zero1) = std::mem::transmute::<_, (u64, u64)>(x);
++    assert_eq!((zero0, zero1), (0, 0));
 +    assert_eq!(std::mem::transmute::<_, [u16; 8]>(or), [7, 7, 7, 7, 7, 7, 7, 7]);
 +    assert_eq!(std::mem::transmute::<_, [u16; 8]>(cmp_eq), [0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff]);
 +    assert_eq!(std::mem::transmute::<_, [u16; 8]>(cmp_lt), [0, 0, 0, 0, 0, 0, 0, 0]);
 +
 +    test_mm_slli_si128();
 +    test_mm_movemask_epi8();
 +    test_mm256_movemask_epi8();
 +    test_mm_add_epi8();
 +    test_mm_add_pd();
 +    test_mm_cvtepi8_epi16();
 +    test_mm_cvtsi128_si64();
 +
 +    test_mm_extract_epi8();
 +    test_mm_insert_epi16();
 +
 +    let mask1 = _mm_movemask_epi8(dbg!(_mm_setr_epi8(255u8 as i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)));
 +    assert_eq!(mask1, 1);
 +}
 +
 +#[cfg(target_arch = "x86_64")]
 +#[target_feature(enable = "sse2")]
 +unsafe fn test_mm_slli_si128() {
 +    #[rustfmt::skip]
 +    let a = _mm_setr_epi8(
 +        1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
 +    );
 +    let r = _mm_slli_si128(a, 1);
 +    let e = _mm_setr_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
 +    assert_eq_m128i(r, e);
 +
 +    #[rustfmt::skip]
 +    let a = _mm_setr_epi8(
 +        1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
 +    );
 +    let r = _mm_slli_si128(a, 15);
 +    let e = _mm_setr_epi8(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1);
 +    assert_eq_m128i(r, e);
 +
 +    #[rustfmt::skip]
 +    let a = _mm_setr_epi8(
 +        1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
 +    );
 +    let r = _mm_slli_si128(a, 16);
 +    assert_eq_m128i(r, _mm_set1_epi8(0));
 +}
 +
 +#[cfg(target_arch = "x86_64")]
 +#[target_feature(enable = "sse2")]
 +unsafe fn test_mm_movemask_epi8() {
 +    #[rustfmt::skip]
 +    let a = _mm_setr_epi8(
 +        0b1000_0000u8 as i8, 0b0, 0b1000_0000u8 as i8, 0b01,
 +        0b0101, 0b1111_0000u8 as i8, 0, 0,
 +        0, 0, 0b1111_0000u8 as i8, 0b0101,
 +        0b01, 0b1000_0000u8 as i8, 0b0, 0b1000_0000u8 as i8,
 +    );
 +    let r = _mm_movemask_epi8(a);
 +    assert_eq!(r, 0b10100100_00100101);
 +}
 +
 +#[cfg(target_arch = "x86_64")]
 +#[target_feature(enable = "avx2")]
 +unsafe fn test_mm256_movemask_epi8() {
 +    let a = _mm256_set1_epi8(-1);
 +    let r = _mm256_movemask_epi8(a);
 +    let e = -1;
 +    assert_eq!(r, e);
 +}
 +
 +#[cfg(target_arch = "x86_64")]
 +#[target_feature(enable = "sse2")]
 +unsafe fn test_mm_add_epi8() {
 +    let a = _mm_setr_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
 +    #[rustfmt::skip]
 +    let b = _mm_setr_epi8(
 +        16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
 +    );
 +    let r = _mm_add_epi8(a, b);
 +    #[rustfmt::skip]
 +    let e = _mm_setr_epi8(
 +        16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46,
 +    );
 +    assert_eq_m128i(r, e);
 +}
 +
 +#[cfg(target_arch = "x86_64")]
 +#[target_feature(enable = "sse2")]
 +unsafe fn test_mm_add_pd() {
 +    let a = _mm_setr_pd(1.0, 2.0);
 +    let b = _mm_setr_pd(5.0, 10.0);
 +    let r = _mm_add_pd(a, b);
 +    assert_eq_m128d(r, _mm_setr_pd(6.0, 12.0));
 +}
 +
 +#[cfg(target_arch = "x86_64")]
 +fn assert_eq_m128i(x: std::arch::x86_64::__m128i, y: std::arch::x86_64::__m128i) {
 +    unsafe {
 +        assert_eq!(std::mem::transmute::<_, [u8; 16]>(x), std::mem::transmute::<_, [u8; 16]>(y));
 +    }
 +}
 +
 +#[cfg(target_arch = "x86_64")]
 +#[target_feature(enable = "sse2")]
 +pub unsafe fn assert_eq_m128d(a: __m128d, b: __m128d) {
 +    if _mm_movemask_pd(_mm_cmpeq_pd(a, b)) != 0b11 {
 +        panic!("{:?} != {:?}", a, b);
 +    }
 +}
 +
 +#[cfg(target_arch = "x86_64")]
 +#[target_feature(enable = "sse2")]
 +unsafe fn test_mm_cvtsi128_si64() {
 +    let r = _mm_cvtsi128_si64(std::mem::transmute::<[i64; 2], _>([5, 0]));
 +    assert_eq!(r, 5);
 +}
 +
 +#[cfg(target_arch = "x86_64")]
 +#[target_feature(enable = "sse4.1")]
 +unsafe fn test_mm_cvtepi8_epi16() {
 +    let a = _mm_set1_epi8(10);
 +    let r = _mm_cvtepi8_epi16(a);
 +    let e = _mm_set1_epi16(10);
 +    assert_eq_m128i(r, e);
 +    let a = _mm_set1_epi8(-10);
 +    let r = _mm_cvtepi8_epi16(a);
 +    let e = _mm_set1_epi16(-10);
 +    assert_eq_m128i(r, e);
 +}
 +
 +#[cfg(target_arch = "x86_64")]
 +#[target_feature(enable = "sse4.1")]
 +unsafe fn test_mm_extract_epi8() {
 +    #[rustfmt::skip]
 +    let a = _mm_setr_epi8(
 +        -1, 1, 2, 3, 4, 5, 6, 7,
 +        8, 9, 10, 11, 12, 13, 14, 15
 +    );
 +    let r1 = _mm_extract_epi8(a, 0);
 +    let r2 = _mm_extract_epi8(a, 3);
 +    assert_eq!(r1, 0xFF);
 +    assert_eq!(r2, 3);
 +}
 +
 +#[cfg(target_arch = "x86_64")]
 +#[target_feature(enable = "sse2")]
 +unsafe fn test_mm_insert_epi16() {
 +    let a = _mm_setr_epi16(0, 1, 2, 3, 4, 5, 6, 7);
 +    let r = _mm_insert_epi16(a, 9, 0);
 +    let e = _mm_setr_epi16(9, 1, 2, 3, 4, 5, 6, 7);
 +    assert_eq_m128i(r, e);
 +}
 +
 +fn test_checked_mul() {
 +    let u: Option<u8> = u8::from_str_radix("1000", 10).ok();
 +    assert_eq!(u, None);
 +
 +    assert_eq!(1u8.checked_mul(255u8), Some(255u8));
 +    assert_eq!(255u8.checked_mul(255u8), None);
 +    assert_eq!(1i8.checked_mul(127i8), Some(127i8));
 +    assert_eq!(127i8.checked_mul(127i8), None);
 +    assert_eq!((-1i8).checked_mul(-127i8), Some(127i8));
 +    assert_eq!(1i8.checked_mul(-128i8), Some(-128i8));
 +    assert_eq!((-128i8).checked_mul(-128i8), None);
 +
 +    assert_eq!(1u64.checked_mul(u64::MAX), Some(u64::MAX));
 +    assert_eq!(u64::MAX.checked_mul(u64::MAX), None);
 +    assert_eq!(1i64.checked_mul(i64::MAX), Some(i64::MAX));
 +    assert_eq!(i64::MAX.checked_mul(i64::MAX), None);
 +    assert_eq!((-1i64).checked_mul(i64::MIN + 1), Some(i64::MAX));
 +    assert_eq!(1i64.checked_mul(i64::MIN), Some(i64::MIN));
 +    assert_eq!(i64::MIN.checked_mul(i64::MIN), None);
 +}
 +
 +#[derive(PartialEq)]
 +enum LoopState {
 +    Continue(()),
 +    Break(())
 +}
 +
 +pub enum Instruction {
 +    Increment,
 +    Loop,
 +}
 +
 +fn map(a: Option<(u8, Box<Instruction>)>) -> Option<Box<Instruction>> {
 +    match a {
 +        None => None,
 +        Some((_, instr)) => Some(instr),
 +    }
 +}
index c0a2e7a7883fcb14031a9ea5a38289d564d30d4a,0000000000000000000000000000000000000000..d8f28dbcc15c8c43d9631834b087f34ed547cebb
mode 100644,000000..100644
--- /dev/null
@@@ -1,3 -1,0 +1,3 @@@
- channel = "nightly-2022-10-23"
 +[toolchain]
++channel = "nightly-2022-12-13"
 +components = ["rust-src", "rustc-dev", "llvm-tools-preview"]
index 2bd8f7d1bc15d7a6690a34aad18f968f7af9bd3c,0000000000000000000000000000000000000000..ebeca8662a5195c6408e8707a06248c189ffab99
mode 100644,000000..100644
--- /dev/null
@@@ -1,4 -1,0 +1,6 @@@
++ignore = ["y.rs"]
++
 +# Matches rustfmt.toml of rustc
 +version = "Two"
 +use_small_heuristics = "Max"
 +merge_derives = false
index e6f60d1c0cb230985bf85311076b15199291a807,0000000000000000000000000000000000000000..f782671fe36f9b45fab05a5c6f335d9880211c23
mode 100755,000000..100755
--- /dev/null
@@@ -1,125 -1,0 +1,125 @@@
- RUSTC="$(pwd)/build/rustc-clif"
 +#!/usr/bin/env bash
 +#![forbid(unsafe_code)]/* This line is ignored by bash
 +# This block is ignored by rustc
 +pushd $(dirname "$0")/../
++RUSTC="$(pwd)/dist/rustc-clif"
 +popd
 +PROFILE=$1 OUTPUT=$2 exec $RUSTC -Zunstable-options -Cllvm-args=mode=jit -Cprefer-dynamic $0
 +#*/
 +
 +//! This program filters away uninteresting samples and trims uninteresting frames for stackcollapse
 +//! profiles.
 +//!
 +//! Usage: ./filter_profile.rs <profile in stackcollapse format> <output file>
 +//!
 +//! This file is specially crafted to be both a valid bash script and valid rust source file. If
 +//! executed as bash script this will run the rust source using cg_clif in JIT mode.
 +
 +use std::io::Write;
 +
 +fn main() -> Result<(), Box<dyn std::error::Error>> {
 +    let profile_name = std::env::var("PROFILE").unwrap();
 +    let output_name = std::env::var("OUTPUT").unwrap();
 +    if profile_name.is_empty() || output_name.is_empty() {
 +        println!("Usage: ./filter_profile.rs <profile in stackcollapse format> <output file>");
 +        std::process::exit(1);
 +    }
 +    let profile = std::fs::read_to_string(profile_name)
 +        .map_err(|err| format!("Failed to read profile {}", err))?;
 +    let mut output = std::fs::OpenOptions::new()
 +        .create(true)
 +        .write(true)
 +        .truncate(true)
 +        .open(output_name)?;
 +
 +    for line in profile.lines() {
 +        let mut stack = &line[..line.rfind(" ").unwrap()];
 +        let count = &line[line.rfind(" ").unwrap() + 1..];
 +
 +        // Filter away uninteresting samples
 +        if !stack.contains("rustc_codegen_cranelift") {
 +            continue;
 +        }
 +
 +        if stack.contains("rustc_monomorphize::partitioning::collect_and_partition_mono_items")
 +            || stack.contains("rustc_incremental::assert_dep_graph::assert_dep_graph")
 +            || stack.contains("rustc_symbol_mangling::test::report_symbol_names")
 +        {
 +            continue;
 +        }
 +
 +        // Trim start
 +        if let Some(index) = stack.find("rustc_interface::passes::configure_and_expand") {
 +            stack = &stack[index..];
 +        } else if let Some(index) = stack.find("rustc_interface::passes::analysis") {
 +            stack = &stack[index..];
 +        } else if let Some(index) = stack.find("rustc_interface::passes::start_codegen") {
 +            stack = &stack[index..];
 +        } else if let Some(index) = stack.find("rustc_interface::queries::Linker::link") {
 +            stack = &stack[index..];
 +        }
 +
 +        if let Some(index) = stack.find("rustc_codegen_cranelift::driver::aot::module_codegen") {
 +            stack = &stack[index..];
 +        }
 +
 +        // Trim end
 +        const MALLOC: &str = "malloc";
 +        if let Some(index) = stack.find(MALLOC) {
 +            stack = &stack[..index + MALLOC.len()];
 +        }
 +
 +        const FREE: &str = "free";
 +        if let Some(index) = stack.find(FREE) {
 +            stack = &stack[..index + FREE.len()];
 +        }
 +
 +        const TYPECK_ITEM_BODIES: &str = "rustc_typeck::check::typeck_item_bodies";
 +        if let Some(index) = stack.find(TYPECK_ITEM_BODIES) {
 +            stack = &stack[..index + TYPECK_ITEM_BODIES.len()];
 +        }
 +
 +        const COLLECT_AND_PARTITION_MONO_ITEMS: &str =
 +            "rustc_monomorphize::partitioning::collect_and_partition_mono_items";
 +        if let Some(index) = stack.find(COLLECT_AND_PARTITION_MONO_ITEMS) {
 +            stack = &stack[..index + COLLECT_AND_PARTITION_MONO_ITEMS.len()];
 +        }
 +
 +        const ASSERT_DEP_GRAPH: &str = "rustc_incremental::assert_dep_graph::assert_dep_graph";
 +        if let Some(index) = stack.find(ASSERT_DEP_GRAPH) {
 +            stack = &stack[..index + ASSERT_DEP_GRAPH.len()];
 +        }
 +
 +        const REPORT_SYMBOL_NAMES: &str = "rustc_symbol_mangling::test::report_symbol_names";
 +        if let Some(index) = stack.find(REPORT_SYMBOL_NAMES) {
 +            stack = &stack[..index + REPORT_SYMBOL_NAMES.len()];
 +        }
 +
 +        const ENCODE_METADATA: &str = "rustc_metadata::rmeta::encoder::encode_metadata";
 +        if let Some(index) = stack.find(ENCODE_METADATA) {
 +            stack = &stack[..index + ENCODE_METADATA.len()];
 +        }
 +
 +        const SUBST_AND_NORMALIZE_ERASING_REGIONS: &str = "rustc_middle::ty::normalize_erasing_regions::<impl rustc_middle::ty::context::TyCtxt>::subst_and_normalize_erasing_regions";
 +        if let Some(index) = stack.find(SUBST_AND_NORMALIZE_ERASING_REGIONS) {
 +            stack = &stack[..index + SUBST_AND_NORMALIZE_ERASING_REGIONS.len()];
 +        }
 +
 +        const NORMALIZE_ERASING_LATE_BOUND_REGIONS: &str = "rustc_middle::ty::normalize_erasing_regions::<impl rustc_middle::ty::context::TyCtxt>::normalize_erasing_late_bound_regions";
 +        if let Some(index) = stack.find(NORMALIZE_ERASING_LATE_BOUND_REGIONS) {
 +            stack = &stack[..index + NORMALIZE_ERASING_LATE_BOUND_REGIONS.len()];
 +        }
 +
 +        const INST_BUILD: &str = "<cranelift_frontend::frontend::FuncInstBuilder as cranelift_codegen::ir::builder::InstBuilderBase>::build";
 +        if let Some(index) = stack.find(INST_BUILD) {
 +            stack = &stack[..index + INST_BUILD.len()];
 +        }
 +
 +        output.write_all(stack.as_bytes())?;
 +        output.write_all(&*b" ")?;
 +        output.write_all(count.as_bytes())?;
 +        output.write_all(&*b"\n")?;
 +    }
 +
 +    Ok(())
 +}
index 0000000000000000000000000000000000000000,0000000000000000000000000000000000000000..a19d72acfa83e037ac857e1719c3ca1a988895f9
new file mode 100644 (file)
--- /dev/null
--- /dev/null
@@@ -1,0 -1,0 +1,36 @@@
++use std::env;
++use std::ffi::OsString;
++#[cfg(unix)]
++use std::os::unix::process::CommandExt;
++use std::path::PathBuf;
++use std::process::Command;
++
++fn main() {
++    let sysroot = PathBuf::from(env::current_exe().unwrap().parent().unwrap());
++
++    let cg_clif_dylib_path = sysroot.join(if cfg!(windows) { "bin" } else { "lib" }).join(
++        env::consts::DLL_PREFIX.to_string() + "rustc_codegen_cranelift" + env::consts::DLL_SUFFIX,
++    );
++
++    let mut args = std::env::args_os().skip(1).collect::<Vec<_>>();
++    args.push(OsString::from("-Cpanic=abort"));
++    args.push(OsString::from("-Zpanic-abort-tests"));
++    let mut codegen_backend_arg = OsString::from("-Zcodegen-backend=");
++    codegen_backend_arg.push(cg_clif_dylib_path);
++    args.push(codegen_backend_arg);
++    if !args.contains(&OsString::from("--sysroot")) {
++        args.push(OsString::from("--sysroot"));
++        args.push(OsString::from(sysroot.to_str().unwrap()));
++    }
++
++    // Ensure that the right toolchain is used
++    env::set_var("RUSTUP_TOOLCHAIN", env!("RUSTUP_TOOLCHAIN"));
++
++    #[cfg(unix)]
++    Command::new("rustdoc").args(args).exec();
++
++    #[cfg(not(unix))]
++    std::process::exit(
++        Command::new("rustdoc").args(args).spawn().unwrap().wait().unwrap().code().unwrap_or(1),
++    );
++}
index d6a37789599fe8d0621de8a2577fc77aa45b1be3,0000000000000000000000000000000000000000..6c64b7de7daa10d863a903a186e0ee9c4846aeed
mode 100644,000000..100644
--- /dev/null
@@@ -1,74 -1,0 +1,58 @@@
- diff --git a/src/tools/compiletest/src/runtest.rs b/src/tools/compiletest/src/runtest.rs
- index 8431aa7b818..a3ff7e68ce5 100644
- --- a/src/tools/compiletest/src/runtest.rs
- +++ b/src/tools/compiletest/src/runtest.rs
- @@ -3489,12 +3489,7 @@ fn normalize_output(&self, output: &str, custom_rules: &[(String, String)]) -> S
-          let compiler_src_dir = base_dir.join("compiler");
-          normalize_path(&compiler_src_dir, "$(echo '$COMPILER_DIR')");
- -        if let Some(virtual_rust_source_base_dir) =
- -            option_env!("CFG_VIRTUAL_RUST_SOURCE_BASE_DIR").map(PathBuf::from)
- -        {
- -            normalize_path(&virtual_rust_source_base_dir.join("library"), "$(echo '$SRC_DIR')");
- -            normalize_path(&virtual_rust_source_base_dir.join("compiler"), "$(echo '$COMPILER_DIR')");
- -        }
- +        normalize_path(&Path::new("$(cd ../build_sysroot/sysroot_src/library; pwd)"), "$(echo '$SRC_DIR')");
-          // Paths into the build directory
-          let test_build_dir = &self.config.build_base;
 +#!/usr/bin/env bash
 +set -e
 +
 +./y.rs build --no-unstable-features
 +
 +echo "[SETUP] Rust fork"
 +git clone https://github.com/rust-lang/rust.git || true
 +pushd rust
 +git fetch
 +git checkout -- .
 +git checkout "$(rustc -V | cut -d' ' -f3 | tr -d '(')"
 +
 +git am ../patches/*-sysroot-*.patch
 +
 +git apply - <<EOF
 +diff --git a/library/alloc/Cargo.toml b/library/alloc/Cargo.toml
 +index d95b5b7f17f..00b6f0e3635 100644
 +--- a/library/alloc/Cargo.toml
 ++++ b/library/alloc/Cargo.toml
 +@@ -8,7 +8,7 @@ edition = "2018"
 +
 + [dependencies]
 + core = { path = "../core" }
 +-compiler_builtins = { version = "0.1.40", features = ['rustc-dep-of-std'] }
 ++compiler_builtins = { version = "0.1.66", features = ['rustc-dep-of-std', 'no-asm'] }
 +
 + [dev-dependencies]
 + rand = "0.7"
 + rand_xorshift = "0.2"
- rustc = "$(pwd)/../build/rustc-clif"
 +EOF
 +
 +cat > config.toml <<EOF
 +changelog-seen = 2
 +
 +[llvm]
 +ninja = false
 +
 +[build]
++rustc = "$(pwd)/../dist/rustc-clif"
 +cargo = "$(rustup which cargo)"
 +full-bootstrap = true
 +local-rebuild = true
 +
 +[rust]
 +codegen-backends = ["cranelift"]
 +deny-warnings = false
 +verbose-tests = false
 +EOF
 +popd
 +
 +# FIXME remove once inline asm is fully supported
 +export RUSTFLAGS="$RUSTFLAGS --cfg=rustix_use_libc"
 +
++export CFG_VIRTUAL_RUST_SOURCE_BASE_DIR="$(cd build_sysroot/sysroot_src; pwd)"
++
 +# Allow the testsuite to use llvm tools
 +host_triple=$(rustc -vV | grep host | cut -d: -f2 | tr -d " ")
 +export LLVM_BIN_DIR="$(rustc --print sysroot)/lib/rustlib/$host_triple/bin"
index 9b5db3cf81f0e3341f553398f6354c9320cf0640,0000000000000000000000000000000000000000..04ad77ec97eac3a76d01166d875cf18bc073f76f
mode 100755,000000..100755
--- /dev/null
@@@ -1,119 -1,0 +1,127 @@@
- rm -r src/test/run-make/emit-shared-files # requires the rustdoc executable in build/bin/
 +#!/usr/bin/env bash
 +set -e
 +
 +cd $(dirname "$0")/../
 +
 +source ./scripts/setup_rust_fork.sh
 +
 +echo "[TEST] Test suite of rustc"
 +pushd rust
 +
 +command -v rg >/dev/null 2>&1 || cargo install ripgrep
 +
 +rm -r src/test/ui/{extern/,unsized-locals/,lto/,linkage*} || true
 +for test in $(rg --files-with-matches "lto|// needs-asm-support|// needs-unwind" src/test/{ui,incremental}); do
 +  rm $test
 +done
 +
 +for test in $(rg -i --files-with-matches "//(\[\w+\])?~[^\|]*\s*ERR|// error-pattern:|// build-fail|// run-fail|-Cllvm-args" src/test/ui); do
 +  rm $test
 +done
 +
 +git checkout -- src/test/ui/issues/auxiliary/issue-3136-a.rs # contains //~ERROR, but shouldn't be removed
++git checkout -- src/test/ui/proc-macro/pretty-print-hack/
 +
 +# missing features
 +# ================
 +
 +# requires stack unwinding
 +rm src/test/incremental/change_crate_dep_kind.rs
 +rm src/test/incremental/issue-80691-bad-eval-cache.rs # -Cpanic=abort causes abort instead of exit(101)
 +
 +# requires compiling with -Cpanic=unwind
 +rm -r src/test/ui/macros/rfc-2011-nicer-assert-messages/
++rm -r src/test/run-make/test-benches
 +
 +# vendor intrinsics
 +rm src/test/ui/sse2.rs # cpuid not supported, so sse2 not detected
 +rm src/test/ui/intrinsics/const-eval-select-x86_64.rs # requires x86_64 vendor intrinsics
 +rm src/test/ui/simd/array-type.rs # "Index argument for `simd_insert` is not a constant"
 +rm src/test/ui/simd/intrinsic/generic-bitmask-pass.rs # simd_bitmask unimplemented
 +rm src/test/ui/simd/intrinsic/generic-as.rs # simd_as unimplemented
 +rm src/test/ui/simd/intrinsic/generic-arithmetic-saturating-pass.rs # simd_saturating_add unimplemented
 +rm src/test/ui/simd/intrinsic/float-math-pass.rs # simd_fcos unimplemented
 +rm src/test/ui/simd/intrinsic/generic-gather-pass.rs # simd_gather unimplemented
 +rm src/test/ui/simd/intrinsic/generic-select-pass.rs # simd_select_bitmask unimplemented
 +rm src/test/ui/simd/issue-85915-simd-ptrs.rs # simd_gather unimplemented
 +rm src/test/ui/simd/issue-89193.rs # simd_gather unimplemented
 +rm src/test/ui/simd/simd-bitmask.rs # simd_bitmask unimplemented
 +
 +# exotic linkages
 +rm src/test/ui/issues/issue-33992.rs # unsupported linkages
 +rm src/test/incremental/hashes/function_interfaces.rs # same
 +rm src/test/incremental/hashes/statics.rs # same
 +
 +# variadic arguments
 +rm src/test/ui/abi/mir/mir_codegen_calls_variadic.rs # requires float varargs
 +rm src/test/ui/abi/variadic-ffi.rs # requires callee side vararg support
 +
 +# unsized locals
 +rm -r src/test/run-pass-valgrind/unsized-locals
 +
 +# misc unimplemented things
 +rm src/test/ui/intrinsics/intrinsic-nearby.rs # unimplemented nearbyintf32 and nearbyintf64 intrinsics
 +rm src/test/ui/target-feature/missing-plusminus.rs # error not implemented
 +rm src/test/ui/fn/dyn-fn-alignment.rs # wants a 256 byte alignment
 +rm -r src/test/run-make/emit-named-files # requires full --emit support
 +rm src/test/ui/abi/stack-probes.rs # stack probes not yet implemented
 +rm src/test/ui/simd/intrinsic/ptr-cast.rs # simd_expose_addr intrinsic unimplemented
++rm -r src/test/run-make/repr128-dwarf # debuginfo test
++rm src/test/codegen-units/item-collection/asm-sym.rs # requires support for sym in asm!()
 +
 +# optimization tests
 +# ==================
 +rm src/test/ui/codegen/issue-28950.rs # depends on stack size optimizations
 +rm src/test/ui/codegen/init-large-type.rs # same
 +rm src/test/ui/issues/issue-40883.rs # same
 +rm -r src/test/run-make/fmt-write-bloat/ # tests an optimization
 +
 +# backend specific tests
 +# ======================
 +rm src/test/incremental/thinlto/cgu_invalidated_when_import_{added,removed}.rs # requires LLVM
 +rm src/test/ui/abi/stack-protector.rs # requires stack protector support
 +
 +# giving different but possibly correct results
 +# =============================================
 +rm src/test/ui/mir/mir_misc_casts.rs # depends on deduplication of constants
 +rm src/test/ui/mir/mir_raw_fat_ptr.rs # same
 +rm src/test/ui/consts/issue-33537.rs # same
++rm src/test/ui/layout/valid_range_oob.rs # different ICE message
 +
 +# doesn't work due to the way the rustc test suite is invoked.
 +# should work when using ./x.py test the way it is intended
 +# ============================================================
- rm src/test/ui/allocator/no_std-alloc-error-handler-default.rs # missing rust_oom definition
++rm -r src/test/run-make/emit-shared-files # requires the rustdoc executable in dist/bin/
 +rm -r src/test/run-make/unstable-flag-required # same
 +rm -r src/test/run-make/rustdoc-* # same
 +rm -r src/test/run-make/issue-88756-default-output # same
 +rm -r src/test/run-make/remap-path-prefix-dwarf # requires llvm-dwarfdump
++rm -r src/test/ui/consts/missing_span_in_backtrace.rs # expects sysroot source to be elsewhere
 +
 +# genuine bugs
 +# ============
 +rm src/test/incremental/spike-neg1.rs # errors out for some reason
 +rm src/test/incremental/spike-neg2.rs # same
 +rm src/test/ui/issues/issue-74564-if-expr-stack-overflow.rs # gives a stackoverflow before the backend runs
 +rm src/test/ui/mir/ssa-analysis-regression-50041.rs # produces ICE
 +rm src/test/ui/type-alias-impl-trait/assoc-projection-ice.rs # produces ICE
 +
 +rm src/test/ui/simd/intrinsic/generic-reduction-pass.rs # simd_reduce_add_unordered doesn't accept an accumulator for integer vectors
 +
++rm src/test/ui/runtime/out-of-stack.rs # SIGSEGV instead of SIGABRT for some reason (#1301)
++
 +# bugs in the test suite
 +# ======================
 +rm src/test/ui/backtrace.rs # TODO warning
 +rm src/test/ui/simple_global_asm.rs # TODO add needs-asm-support
 +rm src/test/ui/test-attrs/test-type.rs # TODO panic message on stderr. correct stdout
 +# not sure if this is actually a bug in the test suite, but the symbol list shows the function without leading _ for some reason
 +rm -r src/test/run-make/native-link-modifier-bundle
++rm src/test/ui/process/nofile-limit.rs # TODO some AArch64 linking issue
++rm src/test/ui/dyn-star/dispatch-on-pin-mut.rs # TODO failed assertion in vtable::get_ptr_and_method_ref
 +
 +rm src/test/ui/stdio-is-blocking.rs # really slow with unoptimized libstd
 +
 +echo "[TEST] rustc test suite"
 +RUST_TEST_NOCAPTURE=1 COMPILETEST_FORCE_STAGE0=1 ./x.py test --stage 0 src/test/{codegen-units,run-make,run-pass-valgrind,ui,incremental}
 +popd
index 98b5fb1cce28531eb856e758cde8061b8ae15426,0000000000000000000000000000000000000000..65cc6b4376713d8c7e855c2fca1f833894a3cd09
mode 100644,000000..100644
--- /dev/null
@@@ -1,664 -1,0 +1,663 @@@
-     triple: &target_lexicon::Triple,
 +//! Handling of everything related to the calling convention. Also fills `fx.local_map`.
 +
 +mod comments;
 +mod pass_mode;
 +mod returning;
 +
 +use cranelift_module::ModuleError;
 +use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
 +use rustc_middle::ty::layout::FnAbiOf;
 +use rustc_target::abi::call::{Conv, FnAbi};
 +use rustc_target::spec::abi::Abi;
 +
 +use cranelift_codegen::ir::{AbiParam, SigRef};
 +
 +use self::pass_mode::*;
 +use crate::prelude::*;
 +
 +pub(crate) use self::returning::codegen_return;
 +
 +fn clif_sig_from_fn_abi<'tcx>(
 +    tcx: TyCtxt<'tcx>,
 +    default_call_conv: CallConv,
 +    fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
 +) -> Signature {
 +    let call_conv = conv_to_call_conv(fn_abi.conv, default_call_conv);
 +
 +    let inputs = fn_abi.args.iter().map(|arg_abi| arg_abi.get_abi_param(tcx).into_iter()).flatten();
 +
 +    let (return_ptr, returns) = fn_abi.ret.get_abi_return(tcx);
 +    // Sometimes the first param is an pointer to the place where the return value needs to be stored.
 +    let params: Vec<_> = return_ptr.into_iter().chain(inputs).collect();
 +
 +    Signature { params, returns, call_conv }
 +}
 +
 +pub(crate) fn conv_to_call_conv(c: Conv, default_call_conv: CallConv) -> CallConv {
 +    match c {
 +        Conv::Rust | Conv::C => default_call_conv,
 +        Conv::RustCold => CallConv::Cold,
 +        Conv::X86_64SysV => CallConv::SystemV,
 +        Conv::X86_64Win64 => CallConv::WindowsFastcall,
 +        Conv::ArmAapcs
 +        | Conv::CCmseNonSecureCall
 +        | Conv::Msp430Intr
 +        | Conv::PtxKernel
 +        | Conv::X86Fastcall
 +        | Conv::X86Intr
 +        | Conv::X86Stdcall
 +        | Conv::X86ThisCall
 +        | Conv::X86VectorCall
 +        | Conv::AmdGpuKernel
 +        | Conv::AvrInterrupt
 +        | Conv::AvrNonBlockingInterrupt => todo!("{:?}", c),
 +    }
 +}
 +
 +pub(crate) fn get_function_sig<'tcx>(
 +    tcx: TyCtxt<'tcx>,
-         CallConv::triple_default(triple),
++    default_call_conv: CallConv,
 +    inst: Instance<'tcx>,
 +) -> Signature {
 +    assert!(!inst.substs.needs_infer());
 +    clif_sig_from_fn_abi(
 +        tcx,
-     let sig = get_function_sig(tcx, module.isa().triple(), inst);
++        default_call_conv,
 +        &RevealAllLayoutCx(tcx).fn_abi_of_instance(inst, ty::List::empty()),
 +    )
 +}
 +
 +/// Instance must be monomorphized
 +pub(crate) fn import_function<'tcx>(
 +    tcx: TyCtxt<'tcx>,
 +    module: &mut dyn Module,
 +    inst: Instance<'tcx>,
 +) -> FuncId {
 +    let name = tcx.symbol_name(inst).name;
-     let fn_ty = fx.monomorphize(func.ty(fx.mir, fx.tcx));
-     let fn_sig =
-         fx.tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), fn_ty.fn_sig(fx.tcx));
++    let sig = get_function_sig(tcx, module.target_config().default_call_conv, inst);
 +    match module.declare_function(name, Linkage::Import, &sig) {
 +        Ok(func_id) => func_id,
 +        Err(ModuleError::IncompatibleDeclaration(_)) => tcx.sess.fatal(&format!(
 +            "attempt to declare `{name}` as function, but it was already declared as static"
 +        )),
 +        Err(ModuleError::IncompatibleSignature(_, prev_sig, new_sig)) => tcx.sess.fatal(&format!(
 +            "attempt to declare `{name}` with signature {new_sig:?}, \
 +             but it was already declared with signature {prev_sig:?}"
 +        )),
 +        Err(err) => Err::<_, _>(err).unwrap(),
 +    }
 +}
 +
 +impl<'tcx> FunctionCx<'_, '_, 'tcx> {
 +    /// Instance must be monomorphized
 +    pub(crate) fn get_function_ref(&mut self, inst: Instance<'tcx>) -> FuncRef {
 +        let func_id = import_function(self.tcx, self.module, inst);
 +        let func_ref = self.module.declare_func_in_func(func_id, &mut self.bcx.func);
 +
 +        if self.clif_comments.enabled() {
 +            self.add_comment(func_ref, format!("{:?}", inst));
 +        }
 +
 +        func_ref
 +    }
 +
 +    pub(crate) fn lib_call(
 +        &mut self,
 +        name: &str,
 +        params: Vec<AbiParam>,
 +        returns: Vec<AbiParam>,
 +        args: &[Value],
 +    ) -> &[Value] {
 +        let sig = Signature { params, returns, call_conv: self.target_config.default_call_conv };
 +        let func_id = self.module.declare_function(name, Linkage::Import, &sig).unwrap();
 +        let func_ref = self.module.declare_func_in_func(func_id, &mut self.bcx.func);
 +        if self.clif_comments.enabled() {
 +            self.add_comment(func_ref, format!("{:?}", name));
 +        }
 +        let call_inst = self.bcx.ins().call(func_ref, args);
 +        if self.clif_comments.enabled() {
 +            self.add_comment(call_inst, format!("easy_call {}", name));
 +        }
 +        let results = self.bcx.inst_results(call_inst);
 +        assert!(results.len() <= 2, "{}", results.len());
 +        results
 +    }
 +
 +    pub(crate) fn easy_call(
 +        &mut self,
 +        name: &str,
 +        args: &[CValue<'tcx>],
 +        return_ty: Ty<'tcx>,
 +    ) -> CValue<'tcx> {
 +        let (input_tys, args): (Vec<_>, Vec<_>) = args
 +            .iter()
 +            .map(|arg| {
 +                (AbiParam::new(self.clif_type(arg.layout().ty).unwrap()), arg.load_scalar(self))
 +            })
 +            .unzip();
 +        let return_layout = self.layout_of(return_ty);
 +        let return_tys = if let ty::Tuple(tup) = return_ty.kind() {
 +            tup.iter().map(|ty| AbiParam::new(self.clif_type(ty).unwrap())).collect()
 +        } else {
 +            vec![AbiParam::new(self.clif_type(return_ty).unwrap())]
 +        };
 +        let ret_vals = self.lib_call(name, input_tys, return_tys, &args);
 +        match *ret_vals {
 +            [] => CValue::by_ref(
 +                Pointer::const_addr(self, i64::from(self.pointer_type.bytes())),
 +                return_layout,
 +            ),
 +            [val] => CValue::by_val(val, return_layout),
 +            [val, extra] => CValue::by_val_pair(val, extra, return_layout),
 +            _ => unreachable!(),
 +        }
 +    }
 +}
 +
 +/// Make a [`CPlace`] capable of holding value of the specified type.
 +fn make_local_place<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    local: Local,
 +    layout: TyAndLayout<'tcx>,
 +    is_ssa: bool,
 +) -> CPlace<'tcx> {
 +    let place = if is_ssa {
 +        if let rustc_target::abi::Abi::ScalarPair(_, _) = layout.abi {
 +            CPlace::new_var_pair(fx, local, layout)
 +        } else {
 +            CPlace::new_var(fx, local, layout)
 +        }
 +    } else {
 +        CPlace::new_stack_slot(fx, layout)
 +    };
 +
 +    self::comments::add_local_place_comments(fx, place, local);
 +
 +    place
 +}
 +
 +pub(crate) fn codegen_fn_prelude<'tcx>(fx: &mut FunctionCx<'_, '_, 'tcx>, start_block: Block) {
 +    fx.bcx.append_block_params_for_function_params(start_block);
 +
 +    fx.bcx.switch_to_block(start_block);
 +    fx.bcx.ins().nop();
 +
 +    let ssa_analyzed = crate::analyze::analyze(fx);
 +
 +    self::comments::add_args_header_comment(fx);
 +
 +    let mut block_params_iter = fx.bcx.func.dfg.block_params(start_block).to_vec().into_iter();
 +    let ret_place =
 +        self::returning::codegen_return_param(fx, &ssa_analyzed, &mut block_params_iter);
 +    assert_eq!(fx.local_map.push(ret_place), RETURN_PLACE);
 +
 +    // None means pass_mode == NoPass
 +    enum ArgKind<'tcx> {
 +        Normal(Option<CValue<'tcx>>),
 +        Spread(Vec<Option<CValue<'tcx>>>),
 +    }
 +
 +    let fn_abi = fx.fn_abi.take().unwrap();
 +
 +    // FIXME implement variadics in cranelift
 +    if fn_abi.c_variadic {
 +        fx.tcx.sess.span_fatal(
 +            fx.mir.span,
 +            "Defining variadic functions is not yet supported by Cranelift",
 +        );
 +    }
 +
 +    let mut arg_abis_iter = fn_abi.args.iter();
 +
 +    let func_params = fx
 +        .mir
 +        .args_iter()
 +        .map(|local| {
 +            let arg_ty = fx.monomorphize(fx.mir.local_decls[local].ty);
 +
 +            // Adapted from https://github.com/rust-lang/rust/blob/145155dc96757002c7b2e9de8489416e2fdbbd57/src/librustc_codegen_llvm/mir/mod.rs#L442-L482
 +            if Some(local) == fx.mir.spread_arg {
 +                // This argument (e.g. the last argument in the "rust-call" ABI)
 +                // is a tuple that was spread at the ABI level and now we have
 +                // to reconstruct it into a tuple local variable, from multiple
 +                // individual function arguments.
 +
 +                let tupled_arg_tys = match arg_ty.kind() {
 +                    ty::Tuple(ref tys) => tys,
 +                    _ => bug!("spread argument isn't a tuple?! but {:?}", arg_ty),
 +                };
 +
 +                let mut params = Vec::new();
 +                for (i, _arg_ty) in tupled_arg_tys.iter().enumerate() {
 +                    let arg_abi = arg_abis_iter.next().unwrap();
 +                    let param =
 +                        cvalue_for_param(fx, Some(local), Some(i), arg_abi, &mut block_params_iter);
 +                    params.push(param);
 +                }
 +
 +                (local, ArgKind::Spread(params), arg_ty)
 +            } else {
 +                let arg_abi = arg_abis_iter.next().unwrap();
 +                let param =
 +                    cvalue_for_param(fx, Some(local), None, arg_abi, &mut block_params_iter);
 +                (local, ArgKind::Normal(param), arg_ty)
 +            }
 +        })
 +        .collect::<Vec<(Local, ArgKind<'tcx>, Ty<'tcx>)>>();
 +
 +    assert!(fx.caller_location.is_none());
 +    if fx.instance.def.requires_caller_location(fx.tcx) {
 +        // Store caller location for `#[track_caller]`.
 +        let arg_abi = arg_abis_iter.next().unwrap();
 +        fx.caller_location =
 +            Some(cvalue_for_param(fx, None, None, arg_abi, &mut block_params_iter).unwrap());
 +    }
 +
 +    assert!(arg_abis_iter.next().is_none(), "ArgAbi left behind");
 +    fx.fn_abi = Some(fn_abi);
 +    assert!(block_params_iter.next().is_none(), "arg_value left behind");
 +
 +    self::comments::add_locals_header_comment(fx);
 +
 +    for (local, arg_kind, ty) in func_params {
 +        let layout = fx.layout_of(ty);
 +
 +        let is_ssa = ssa_analyzed[local] == crate::analyze::SsaKind::Ssa;
 +
 +        // While this is normally an optimization to prevent an unnecessary copy when an argument is
 +        // not mutated by the current function, this is necessary to support unsized arguments.
 +        if let ArgKind::Normal(Some(val)) = arg_kind {
 +            if let Some((addr, meta)) = val.try_to_ptr() {
 +                // Ownership of the value at the backing storage for an argument is passed to the
 +                // callee per the ABI, so it is fine to borrow the backing storage of this argument
 +                // to prevent a copy.
 +
 +                let place = if let Some(meta) = meta {
 +                    CPlace::for_ptr_with_extra(addr, meta, val.layout())
 +                } else {
 +                    CPlace::for_ptr(addr, val.layout())
 +                };
 +
 +                self::comments::add_local_place_comments(fx, place, local);
 +
 +                assert_eq!(fx.local_map.push(place), local);
 +                continue;
 +            }
 +        }
 +
 +        let place = make_local_place(fx, local, layout, is_ssa);
 +        assert_eq!(fx.local_map.push(place), local);
 +
 +        match arg_kind {
 +            ArgKind::Normal(param) => {
 +                if let Some(param) = param {
 +                    place.write_cvalue(fx, param);
 +                }
 +            }
 +            ArgKind::Spread(params) => {
 +                for (i, param) in params.into_iter().enumerate() {
 +                    if let Some(param) = param {
 +                        place.place_field(fx, mir::Field::new(i)).write_cvalue(fx, param);
 +                    }
 +                }
 +            }
 +        }
 +    }
 +
 +    for local in fx.mir.vars_and_temps_iter() {
 +        let ty = fx.monomorphize(fx.mir.local_decls[local].ty);
 +        let layout = fx.layout_of(ty);
 +
 +        let is_ssa = ssa_analyzed[local] == crate::analyze::SsaKind::Ssa;
 +
 +        let place = make_local_place(fx, local, layout, is_ssa);
 +        assert_eq!(fx.local_map.push(place), local);
 +    }
 +
 +    fx.bcx.ins().jump(*fx.block_map.get(START_BLOCK).unwrap(), &[]);
 +}
 +
 +struct CallArgument<'tcx> {
 +    value: CValue<'tcx>,
 +    is_owned: bool,
 +}
 +
 +// FIXME avoid intermediate `CValue` before calling `adjust_arg_for_abi`
 +fn codegen_call_argument_operand<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    operand: &Operand<'tcx>,
 +) -> CallArgument<'tcx> {
 +    CallArgument {
 +        value: codegen_operand(fx, operand),
 +        is_owned: matches!(operand, Operand::Move(_)),
 +    }
 +}
 +
 +pub(crate) fn codegen_terminator_call<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    source_info: mir::SourceInfo,
 +    func: &Operand<'tcx>,
 +    args: &[Operand<'tcx>],
 +    destination: Place<'tcx>,
 +    target: Option<BasicBlock>,
 +) {
-     let instance = if let ty::FnDef(def_id, substs) = *fn_ty.kind() {
++    let func = codegen_operand(fx, func);
++    let fn_sig = func.layout().ty.fn_sig(fx.tcx);
 +
 +    let ret_place = codegen_place(fx, destination);
 +
 +    // Handle special calls like intrinsics and empty drop glue.
-     let extra_args = &args[fn_sig.inputs().len()..];
++    let instance = if let ty::FnDef(def_id, substs) = *func.layout().ty.kind() {
 +        let instance =
 +            ty::Instance::expect_resolve(fx.tcx, ty::ParamEnv::reveal_all(), def_id, substs)
 +                .polymorphize(fx.tcx);
 +
 +        if fx.tcx.symbol_name(instance).name.starts_with("llvm.") {
 +            crate::intrinsics::codegen_llvm_intrinsic_call(
 +                fx,
 +                &fx.tcx.symbol_name(instance).name,
 +                substs,
 +                args,
 +                ret_place,
 +                target,
 +            );
 +            return;
 +        }
 +
 +        match instance.def {
 +            InstanceDef::Intrinsic(_) => {
 +                crate::intrinsics::codegen_intrinsic_call(
 +                    fx,
 +                    instance,
 +                    args,
 +                    ret_place,
 +                    target,
 +                    source_info,
 +                );
 +                return;
 +            }
 +            InstanceDef::DropGlue(_, None) => {
 +                // empty drop glue - a nop.
 +                let dest = target.expect("Non terminating drop_in_place_real???");
 +                let ret_block = fx.get_block(dest);
 +                fx.bcx.ins().jump(ret_block, &[]);
 +                return;
 +            }
 +            _ => Some(instance),
 +        }
 +    } else {
 +        None
 +    };
 +
-         RevealAllLayoutCx(fx.tcx).fn_abi_of_fn_ptr(fn_ty.fn_sig(fx.tcx), extra_args)
++    let extra_args = &args[fn_sig.inputs().skip_binder().len()..];
 +    let extra_args = fx
 +        .tcx
 +        .mk_type_list(extra_args.iter().map(|op_arg| fx.monomorphize(op_arg.ty(fx.mir, fx.tcx))));
 +    let fn_abi = if let Some(instance) = instance {
 +        RevealAllLayoutCx(fx.tcx).fn_abi_of_instance(instance, extra_args)
 +    } else {
-     let is_cold = if fn_sig.abi == Abi::RustCold {
++        RevealAllLayoutCx(fx.tcx).fn_abi_of_fn_ptr(fn_sig, extra_args)
 +    };
 +
-     let mut args = if fn_sig.abi == Abi::RustCall {
++    let is_cold = if fn_sig.abi() == Abi::RustCold {
 +        true
 +    } else {
 +        instance
 +            .map(|inst| {
 +                fx.tcx.codegen_fn_attrs(inst.def_id()).flags.contains(CodegenFnAttrFlags::COLD)
 +            })
 +            .unwrap_or(false)
 +    };
 +    if is_cold {
 +        fx.bcx.set_cold_block(fx.bcx.current_block().unwrap());
 +        if let Some(destination_block) = target {
 +            fx.bcx.set_cold_block(fx.get_block(destination_block));
 +        }
 +    }
 +
 +    // Unpack arguments tuple for closures
-             let func = codegen_operand(fx, func).load_scalar(fx);
++    let mut args = if fn_sig.abi() == Abi::RustCall {
 +        assert_eq!(args.len(), 2, "rust-call abi requires two arguments");
 +        let self_arg = codegen_call_argument_operand(fx, &args[0]);
 +        let pack_arg = codegen_call_argument_operand(fx, &args[1]);
 +
 +        let tupled_arguments = match pack_arg.value.layout().ty.kind() {
 +            ty::Tuple(ref tupled_arguments) => tupled_arguments,
 +            _ => bug!("argument to function with \"rust-call\" ABI is not a tuple"),
 +        };
 +
 +        let mut args = Vec::with_capacity(1 + tupled_arguments.len());
 +        args.push(self_arg);
 +        for i in 0..tupled_arguments.len() {
 +            args.push(CallArgument {
 +                value: pack_arg.value.value_field(fx, mir::Field::new(i)),
 +                is_owned: pack_arg.is_owned,
 +            });
 +        }
 +        args
 +    } else {
 +        args.iter().map(|arg| codegen_call_argument_operand(fx, arg)).collect::<Vec<_>>()
 +    };
 +
 +    // Pass the caller location for `#[track_caller]`.
 +    if instance.map(|inst| inst.def.requires_caller_location(fx.tcx)).unwrap_or(false) {
 +        let caller_location = fx.get_caller_location(source_info);
 +        args.push(CallArgument { value: caller_location, is_owned: false });
 +    }
 +
 +    let args = args;
 +    assert_eq!(fn_abi.args.len(), args.len());
 +
 +    enum CallTarget {
 +        Direct(FuncRef),
 +        Indirect(SigRef, Value),
 +    }
 +
 +    let (func_ref, first_arg_override) = match instance {
 +        // Trait object call
 +        Some(Instance { def: InstanceDef::Virtual(_, idx), .. }) => {
 +            if fx.clif_comments.enabled() {
 +                let nop_inst = fx.bcx.ins().nop();
 +                fx.add_comment(
 +                    nop_inst,
 +                    format!("virtual call; self arg pass mode: {:?}", &fn_abi.args[0]),
 +                );
 +            }
 +
 +            let (ptr, method) = crate::vtable::get_ptr_and_method_ref(fx, args[0].value, idx);
 +            let sig = clif_sig_from_fn_abi(fx.tcx, fx.target_config.default_call_conv, &fn_abi);
 +            let sig = fx.bcx.import_signature(sig);
 +
 +            (CallTarget::Indirect(sig, method), Some(ptr.get_addr(fx)))
 +        }
 +
 +        // Normal call
 +        Some(instance) => {
 +            let func_ref = fx.get_function_ref(instance);
 +            (CallTarget::Direct(func_ref), None)
 +        }
 +
 +        // Indirect call
 +        None => {
 +            if fx.clif_comments.enabled() {
 +                let nop_inst = fx.bcx.ins().nop();
 +                fx.add_comment(nop_inst, "indirect call");
 +            }
 +
-         if fn_sig.c_variadic {
-             if !matches!(fn_sig.abi, Abi::C { .. }) {
++            let func = func.load_scalar(fx);
 +            let sig = clif_sig_from_fn_abi(fx.tcx, fx.target_config.default_call_conv, &fn_abi);
 +            let sig = fx.bcx.import_signature(sig);
 +
 +            (CallTarget::Indirect(sig, func), None)
 +        }
 +    };
 +
 +    self::returning::codegen_with_call_return_arg(fx, &fn_abi.ret, ret_place, |fx, return_ptr| {
 +        let call_args = return_ptr
 +            .into_iter()
 +            .chain(first_arg_override.into_iter())
 +            .chain(
 +                args.into_iter()
 +                    .enumerate()
 +                    .skip(if first_arg_override.is_some() { 1 } else { 0 })
 +                    .map(|(i, arg)| {
 +                        adjust_arg_for_abi(fx, arg.value, &fn_abi.args[i], arg.is_owned).into_iter()
 +                    })
 +                    .flatten(),
 +            )
 +            .collect::<Vec<Value>>();
 +
 +        let call_inst = match func_ref {
 +            CallTarget::Direct(func_ref) => fx.bcx.ins().call(func_ref, &call_args),
 +            CallTarget::Indirect(sig, func_ptr) => {
 +                fx.bcx.ins().call_indirect(sig, func_ptr, &call_args)
 +            }
 +        };
 +
 +        // FIXME find a cleaner way to support varargs
-                     &format!("Variadic call for non-C abi {:?}", fn_sig.abi),
++        if fn_sig.c_variadic() {
++            if !matches!(fn_sig.abi(), Abi::C { .. }) {
 +                fx.tcx.sess.span_fatal(
 +                    source_info.span,
++                    &format!("Variadic call for non-C abi {:?}", fn_sig.abi()),
 +                );
 +            }
 +            let sig_ref = fx.bcx.func.dfg.call_signature(call_inst).unwrap();
 +            let abi_params = call_args
 +                .into_iter()
 +                .map(|arg| {
 +                    let ty = fx.bcx.func.dfg.value_type(arg);
 +                    if !ty.is_int() {
 +                        // FIXME set %al to upperbound on float args once floats are supported
 +                        fx.tcx.sess.span_fatal(
 +                            source_info.span,
 +                            &format!("Non int ty {:?} for variadic call", ty),
 +                        );
 +                    }
 +                    AbiParam::new(ty)
 +                })
 +                .collect::<Vec<AbiParam>>();
 +            fx.bcx.func.dfg.signatures[sig_ref].params = abi_params;
 +        }
 +
 +        call_inst
 +    });
 +
 +    if let Some(dest) = target {
 +        let ret_block = fx.get_block(dest);
 +        fx.bcx.ins().jump(ret_block, &[]);
 +    } else {
 +        fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
 +    }
 +}
 +
 +pub(crate) fn codegen_drop<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    source_info: mir::SourceInfo,
 +    drop_place: CPlace<'tcx>,
 +) {
 +    let ty = drop_place.layout().ty;
 +    let drop_instance = Instance::resolve_drop_in_place(fx.tcx, ty).polymorphize(fx.tcx);
 +
 +    if let ty::InstanceDef::DropGlue(_, None) = drop_instance.def {
 +        // we don't actually need to drop anything
 +    } else {
 +        match ty.kind() {
 +            ty::Dynamic(_, _, ty::Dyn) => {
 +                // IN THIS ARM, WE HAVE:
 +                // ty = *mut (dyn Trait)
 +                // which is: exists<T> ( *mut T,    Vtable<T: Trait> )
 +                //                       args[0]    args[1]
 +                //
 +                // args = ( Data, Vtable )
 +                //                  |
 +                //                  v
 +                //                /-------\
 +                //                | ...   |
 +                //                \-------/
 +                //
 +                let (ptr, vtable) = drop_place.to_ptr_maybe_unsized();
 +                let ptr = ptr.get_addr(fx);
 +                let drop_fn = crate::vtable::drop_fn_of_obj(fx, vtable.unwrap());
 +
 +                // FIXME(eddyb) perhaps move some of this logic into
 +                // `Instance::resolve_drop_in_place`?
 +                let virtual_drop = Instance {
 +                    def: ty::InstanceDef::Virtual(drop_instance.def_id(), 0),
 +                    substs: drop_instance.substs,
 +                };
 +                let fn_abi =
 +                    RevealAllLayoutCx(fx.tcx).fn_abi_of_instance(virtual_drop, ty::List::empty());
 +
 +                let sig = clif_sig_from_fn_abi(fx.tcx, fx.target_config.default_call_conv, &fn_abi);
 +                let sig = fx.bcx.import_signature(sig);
 +                fx.bcx.ins().call_indirect(sig, drop_fn, &[ptr]);
 +            }
 +            ty::Dynamic(_, _, ty::DynStar) => {
 +                // IN THIS ARM, WE HAVE:
 +                // ty = *mut (dyn* Trait)
 +                // which is: *mut exists<T: sizeof(T) == sizeof(usize)> (T, Vtable<T: Trait>)
 +                //
 +                // args = [ * ]
 +                //          |
 +                //          v
 +                //      ( Data, Vtable )
 +                //                |
 +                //                v
 +                //              /-------\
 +                //              | ...   |
 +                //              \-------/
 +                //
 +                //
 +                // WE CAN CONVERT THIS INTO THE ABOVE LOGIC BY DOING
 +                //
 +                // data = &(*args[0]).0    // gives a pointer to Data above (really the same pointer)
 +                // vtable = (*args[0]).1   // loads the vtable out
 +                // (data, vtable)          // an equivalent Rust `*mut dyn Trait`
 +                //
 +                // SO THEN WE CAN USE THE ABOVE CODE.
 +                let (data, vtable) = drop_place.to_cvalue(fx).dyn_star_force_data_on_stack(fx);
 +                let drop_fn = crate::vtable::drop_fn_of_obj(fx, vtable);
 +
 +                let virtual_drop = Instance {
 +                    def: ty::InstanceDef::Virtual(drop_instance.def_id(), 0),
 +                    substs: drop_instance.substs,
 +                };
 +                let fn_abi =
 +                    RevealAllLayoutCx(fx.tcx).fn_abi_of_instance(virtual_drop, ty::List::empty());
 +
 +                let sig = clif_sig_from_fn_abi(fx.tcx, fx.target_config.default_call_conv, &fn_abi);
 +                let sig = fx.bcx.import_signature(sig);
 +                fx.bcx.ins().call_indirect(sig, drop_fn, &[data]);
 +            }
 +            _ => {
 +                assert!(!matches!(drop_instance.def, InstanceDef::Virtual(_, _)));
 +
 +                let fn_abi =
 +                    RevealAllLayoutCx(fx.tcx).fn_abi_of_instance(drop_instance, ty::List::empty());
 +
 +                let arg_value = drop_place.place_ref(
 +                    fx,
 +                    fx.layout_of(fx.tcx.mk_ref(
 +                        fx.tcx.lifetimes.re_erased,
 +                        TypeAndMut { ty, mutbl: crate::rustc_hir::Mutability::Mut },
 +                    )),
 +                );
 +                let arg_value = adjust_arg_for_abi(fx, arg_value, &fn_abi.args[0], true);
 +
 +                let mut call_args: Vec<Value> = arg_value.into_iter().collect::<Vec<_>>();
 +
 +                if drop_instance.def.requires_caller_location(fx.tcx) {
 +                    // Pass the caller location for `#[track_caller]`.
 +                    let caller_location = fx.get_caller_location(source_info);
 +                    call_args.extend(
 +                        adjust_arg_for_abi(fx, caller_location, &fn_abi.args[1], false).into_iter(),
 +                    );
 +                }
 +
 +                let func_ref = fx.get_function_ref(drop_instance);
 +                fx.bcx.ins().call(func_ref, &call_args);
 +            }
 +        }
 +    }
 +}
index 12bb00d346db42c42c88adfad2d5939df371c1f6,0000000000000000000000000000000000000000..8508227179ac611fb77c350c541e5835e0313936
mode 100644,000000..100644
--- /dev/null
@@@ -1,148 -1,0 +1,148 @@@
-             call_conv: CallConv::triple_default(module.isa().triple()),
 +//! Allocator shim
 +// Adapted from rustc
 +
 +use crate::prelude::*;
 +
 +use rustc_ast::expand::allocator::{AllocatorKind, AllocatorTy, ALLOCATOR_METHODS};
 +use rustc_session::config::OomStrategy;
 +use rustc_span::symbol::sym;
 +
 +/// Returns whether an allocator shim was created
 +pub(crate) fn codegen(
 +    tcx: TyCtxt<'_>,
 +    module: &mut impl Module,
 +    unwind_context: &mut UnwindContext,
 +) -> bool {
 +    let any_dynamic_crate = tcx.dependency_formats(()).iter().any(|(_, list)| {
 +        use rustc_middle::middle::dependency_format::Linkage;
 +        list.iter().any(|&linkage| linkage == Linkage::Dynamic)
 +    });
 +    if any_dynamic_crate {
 +        false
 +    } else if let Some(kind) = tcx.allocator_kind(()) {
 +        codegen_inner(
 +            module,
 +            unwind_context,
 +            kind,
 +            tcx.alloc_error_handler_kind(()).unwrap(),
 +            tcx.sess.opts.unstable_opts.oom,
 +        );
 +        true
 +    } else {
 +        false
 +    }
 +}
 +
 +fn codegen_inner(
 +    module: &mut impl Module,
 +    unwind_context: &mut UnwindContext,
 +    kind: AllocatorKind,
 +    alloc_error_handler_kind: AllocatorKind,
 +    oom_strategy: OomStrategy,
 +) {
 +    let usize_ty = module.target_config().pointer_type();
 +
 +    for method in ALLOCATOR_METHODS {
 +        let mut arg_tys = Vec::with_capacity(method.inputs.len());
 +        for ty in method.inputs.iter() {
 +            match *ty {
 +                AllocatorTy::Layout => {
 +                    arg_tys.push(usize_ty); // size
 +                    arg_tys.push(usize_ty); // align
 +                }
 +                AllocatorTy::Ptr => arg_tys.push(usize_ty),
 +                AllocatorTy::Usize => arg_tys.push(usize_ty),
 +
 +                AllocatorTy::ResultPtr | AllocatorTy::Unit => panic!("invalid allocator arg"),
 +            }
 +        }
 +        let output = match method.output {
 +            AllocatorTy::ResultPtr => Some(usize_ty),
 +            AllocatorTy::Unit => None,
 +
 +            AllocatorTy::Layout | AllocatorTy::Usize | AllocatorTy::Ptr => {
 +                panic!("invalid allocator output")
 +            }
 +        };
 +
 +        let sig = Signature {
-         call_conv: CallConv::triple_default(module.isa().triple()),
++            call_conv: module.target_config().default_call_conv,
 +            params: arg_tys.iter().cloned().map(AbiParam::new).collect(),
 +            returns: output.into_iter().map(AbiParam::new).collect(),
 +        };
 +
 +        let caller_name = format!("__rust_{}", method.name);
 +        let callee_name = kind.fn_name(method.name);
 +
 +        let func_id = module.declare_function(&caller_name, Linkage::Export, &sig).unwrap();
 +
 +        let callee_func_id = module.declare_function(&callee_name, Linkage::Import, &sig).unwrap();
 +
 +        let mut ctx = Context::new();
 +        ctx.func.signature = sig.clone();
 +        {
 +            let mut func_ctx = FunctionBuilderContext::new();
 +            let mut bcx = FunctionBuilder::new(&mut ctx.func, &mut func_ctx);
 +
 +            let block = bcx.create_block();
 +            bcx.switch_to_block(block);
 +            let args = arg_tys
 +                .into_iter()
 +                .map(|ty| bcx.append_block_param(block, ty))
 +                .collect::<Vec<Value>>();
 +
 +            let callee_func_ref = module.declare_func_in_func(callee_func_id, &mut bcx.func);
 +            let call_inst = bcx.ins().call(callee_func_ref, &args);
 +            let results = bcx.inst_results(call_inst).to_vec(); // Clone to prevent borrow error
 +
 +            bcx.ins().return_(&results);
 +            bcx.seal_all_blocks();
 +            bcx.finalize();
 +        }
 +        module.define_function(func_id, &mut ctx).unwrap();
 +        unwind_context.add_function(func_id, &ctx, module.isa());
 +    }
 +
 +    let sig = Signature {
++        call_conv: module.target_config().default_call_conv,
 +        params: vec![AbiParam::new(usize_ty), AbiParam::new(usize_ty)],
 +        returns: vec![],
 +    };
 +
 +    let callee_name = alloc_error_handler_kind.fn_name(sym::oom);
 +
 +    let func_id =
 +        module.declare_function("__rust_alloc_error_handler", Linkage::Export, &sig).unwrap();
 +
 +    let callee_func_id = module.declare_function(&callee_name, Linkage::Import, &sig).unwrap();
 +
 +    let mut ctx = Context::new();
 +    ctx.func.signature = sig;
 +    {
 +        let mut func_ctx = FunctionBuilderContext::new();
 +        let mut bcx = FunctionBuilder::new(&mut ctx.func, &mut func_ctx);
 +
 +        let block = bcx.create_block();
 +        bcx.switch_to_block(block);
 +        let args = (&[usize_ty, usize_ty])
 +            .iter()
 +            .map(|&ty| bcx.append_block_param(block, ty))
 +            .collect::<Vec<Value>>();
 +
 +        let callee_func_ref = module.declare_func_in_func(callee_func_id, &mut bcx.func);
 +        bcx.ins().call(callee_func_ref, &args);
 +
 +        bcx.ins().trap(TrapCode::UnreachableCodeReached);
 +        bcx.seal_all_blocks();
 +        bcx.finalize();
 +    }
 +    module.define_function(func_id, &mut ctx).unwrap();
 +    unwind_context.add_function(func_id, &ctx, module.isa());
 +
 +    let data_id = module.declare_data(OomStrategy::SYMBOL, Linkage::Export, false, false).unwrap();
 +    let mut data_ctx = DataContext::new();
 +    data_ctx.set_align(1);
 +    let val = oom_strategy.should_panic();
 +    data_ctx.define(Box::new([val]));
 +    module.define_data(data_id, &data_ctx).unwrap();
 +}
index 06813d7ec953f558a44eec3eb059723167e45131,0000000000000000000000000000000000000000..89d955e8bf2e1d84c1a40045d0b063140d325525
mode 100644,000000..100644
--- /dev/null
@@@ -1,970 -1,0 +1,962 @@@
-     let sig = get_function_sig(tcx, module.isa().triple(), instance);
 +//! Codegen of a single function
 +
 +use rustc_ast::InlineAsmOptions;
 +use rustc_index::vec::IndexVec;
 +use rustc_middle::ty::adjustment::PointerCast;
 +use rustc_middle::ty::layout::FnAbiOf;
 +use rustc_middle::ty::print::with_no_trimmed_paths;
 +
 +use cranelift_codegen::ir::UserFuncName;
 +
 +use crate::constant::ConstantCx;
 +use crate::debuginfo::FunctionDebugContext;
 +use crate::prelude::*;
 +use crate::pretty_clif::CommentWriter;
 +
 +pub(crate) struct CodegenedFunction {
 +    symbol_name: String,
 +    func_id: FuncId,
 +    func: Function,
 +    clif_comments: CommentWriter,
 +    func_debug_cx: Option<FunctionDebugContext>,
 +}
 +
 +#[cfg_attr(not(feature = "jit"), allow(dead_code))]
 +pub(crate) fn codegen_and_compile_fn<'tcx>(
 +    tcx: TyCtxt<'tcx>,
 +    cx: &mut crate::CodegenCx,
 +    cached_context: &mut Context,
 +    module: &mut dyn Module,
 +    instance: Instance<'tcx>,
 +) {
 +    let _inst_guard =
 +        crate::PrintOnPanic(|| format!("{:?} {}", instance, tcx.symbol_name(instance).name));
 +
 +    let cached_func = std::mem::replace(&mut cached_context.func, Function::new());
 +    let codegened_func = codegen_fn(tcx, cx, cached_func, module, instance);
 +
 +    compile_fn(cx, cached_context, module, codegened_func);
 +}
 +
 +pub(crate) fn codegen_fn<'tcx>(
 +    tcx: TyCtxt<'tcx>,
 +    cx: &mut crate::CodegenCx,
 +    cached_func: Function,
 +    module: &mut dyn Module,
 +    instance: Instance<'tcx>,
 +) -> CodegenedFunction {
 +    debug_assert!(!instance.substs.needs_infer());
 +
 +    let mir = tcx.instance_mir(instance.def);
 +    let _mir_guard = crate::PrintOnPanic(|| {
 +        let mut buf = Vec::new();
 +        with_no_trimmed_paths!({
 +            rustc_middle::mir::pretty::write_mir_fn(tcx, mir, &mut |_, _| Ok(()), &mut buf)
 +                .unwrap();
 +        });
 +        String::from_utf8_lossy(&buf).into_owned()
 +    });
 +
 +    // Declare function
 +    let symbol_name = tcx.symbol_name(instance).name.to_string();
-                     let discr = crate::optimize::peephole::maybe_unwrap_bint(&mut fx.bcx, discr);
++    let sig = get_function_sig(tcx, module.target_config().default_call_conv, instance);
 +    let func_id = module.declare_function(&symbol_name, Linkage::Local, &sig).unwrap();
 +
 +    // Make the FunctionBuilder
 +    let mut func_ctx = FunctionBuilderContext::new();
 +    let mut func = cached_func;
 +    func.clear();
 +    func.name = UserFuncName::user(0, func_id.as_u32());
 +    func.signature = sig;
 +    func.collect_debug_info();
 +
 +    let mut bcx = FunctionBuilder::new(&mut func, &mut func_ctx);
 +
 +    // Predefine blocks
 +    let start_block = bcx.create_block();
 +    let block_map: IndexVec<BasicBlock, Block> =
 +        (0..mir.basic_blocks.len()).map(|_| bcx.create_block()).collect();
 +
 +    // Make FunctionCx
 +    let target_config = module.target_config();
 +    let pointer_type = target_config.pointer_type();
 +    let clif_comments = crate::pretty_clif::CommentWriter::new(tcx, instance);
 +
 +    let func_debug_cx = if let Some(debug_context) = &mut cx.debug_context {
 +        Some(debug_context.define_function(tcx, &symbol_name, mir.span))
 +    } else {
 +        None
 +    };
 +
 +    let mut fx = FunctionCx {
 +        cx,
 +        module,
 +        tcx,
 +        target_config,
 +        pointer_type,
 +        constants_cx: ConstantCx::new(),
 +        func_debug_cx,
 +
 +        instance,
 +        symbol_name,
 +        mir,
 +        fn_abi: Some(RevealAllLayoutCx(tcx).fn_abi_of_instance(instance, ty::List::empty())),
 +
 +        bcx,
 +        block_map,
 +        local_map: IndexVec::with_capacity(mir.local_decls.len()),
 +        caller_location: None, // set by `codegen_fn_prelude`
 +
 +        clif_comments,
 +        last_source_file: None,
 +        next_ssa_var: 0,
 +    };
 +
 +    tcx.sess.time("codegen clif ir", || codegen_fn_body(&mut fx, start_block));
 +
 +    // Recover all necessary data from fx, before accessing func will prevent future access to it.
 +    let symbol_name = fx.symbol_name;
 +    let clif_comments = fx.clif_comments;
 +    let func_debug_cx = fx.func_debug_cx;
 +
 +    fx.constants_cx.finalize(fx.tcx, &mut *fx.module);
 +
 +    if cx.should_write_ir {
 +        crate::pretty_clif::write_clif_file(
 +            tcx.output_filenames(()),
 +            &symbol_name,
 +            "unopt",
 +            module.isa(),
 +            &func,
 +            &clif_comments,
 +        );
 +    }
 +
 +    // Verify function
 +    verify_func(tcx, &clif_comments, &func);
 +
 +    CodegenedFunction { symbol_name, func_id, func, clif_comments, func_debug_cx }
 +}
 +
 +pub(crate) fn compile_fn(
 +    cx: &mut crate::CodegenCx,
 +    cached_context: &mut Context,
 +    module: &mut dyn Module,
 +    codegened_func: CodegenedFunction,
 +) {
 +    let clif_comments = codegened_func.clif_comments;
 +
 +    // Store function in context
 +    let context = cached_context;
 +    context.clear();
 +    context.func = codegened_func.func;
 +
 +    // If the return block is not reachable, then the SSA builder may have inserted an `iconst.i128`
 +    // instruction, which doesn't have an encoding.
 +    context.compute_cfg();
 +    context.compute_domtree();
 +    context.eliminate_unreachable_code(module.isa()).unwrap();
 +    context.dce(module.isa()).unwrap();
 +    // Some Cranelift optimizations expect the domtree to not yet be computed and as such don't
 +    // invalidate it when it would change.
 +    context.domtree.clear();
 +
 +    #[cfg(any())] // This is never true
 +    let _clif_guard = {
 +        use std::fmt::Write;
 +
 +        let func_clone = context.func.clone();
 +        let clif_comments_clone = clif_comments.clone();
 +        let mut clif = String::new();
 +        for flag in module.isa().flags().iter() {
 +            writeln!(clif, "set {}", flag).unwrap();
 +        }
 +        write!(clif, "target {}", module.isa().triple().architecture.to_string()).unwrap();
 +        for isa_flag in module.isa().isa_flags().iter() {
 +            write!(clif, " {}", isa_flag).unwrap();
 +        }
 +        writeln!(clif, "\n").unwrap();
 +        crate::PrintOnPanic(move || {
 +            let mut clif = clif.clone();
 +            ::cranelift_codegen::write::decorate_function(
 +                &mut &clif_comments_clone,
 +                &mut clif,
 +                &func_clone,
 +            )
 +            .unwrap();
 +            clif
 +        })
 +    };
 +
 +    // Define function
 +    cx.profiler.verbose_generic_activity("define function").run(|| {
 +        context.want_disasm = cx.should_write_ir;
 +        module.define_function(codegened_func.func_id, context).unwrap();
 +    });
 +
 +    if cx.should_write_ir {
 +        // Write optimized function to file for debugging
 +        crate::pretty_clif::write_clif_file(
 +            &cx.output_filenames,
 +            &codegened_func.symbol_name,
 +            "opt",
 +            module.isa(),
 +            &context.func,
 +            &clif_comments,
 +        );
 +
 +        if let Some(disasm) = &context.compiled_code().unwrap().disasm {
 +            crate::pretty_clif::write_ir_file(
 +                &cx.output_filenames,
 +                &format!("{}.vcode", codegened_func.symbol_name),
 +                |file| file.write_all(disasm.as_bytes()),
 +            )
 +        }
 +    }
 +
 +    // Define debuginfo for function
 +    let isa = module.isa();
 +    let debug_context = &mut cx.debug_context;
 +    let unwind_context = &mut cx.unwind_context;
 +    cx.profiler.verbose_generic_activity("generate debug info").run(|| {
 +        if let Some(debug_context) = debug_context {
 +            codegened_func.func_debug_cx.unwrap().finalize(
 +                debug_context,
 +                codegened_func.func_id,
 +                context,
 +            );
 +        }
 +        unwind_context.add_function(codegened_func.func_id, &context, isa);
 +    });
 +}
 +
 +pub(crate) fn verify_func(
 +    tcx: TyCtxt<'_>,
 +    writer: &crate::pretty_clif::CommentWriter,
 +    func: &Function,
 +) {
 +    tcx.sess.time("verify clif ir", || {
 +        let flags = cranelift_codegen::settings::Flags::new(cranelift_codegen::settings::builder());
 +        match cranelift_codegen::verify_function(&func, &flags) {
 +            Ok(_) => {}
 +            Err(err) => {
 +                tcx.sess.err(&format!("{:?}", err));
 +                let pretty_error = cranelift_codegen::print_errors::pretty_verifier_error(
 +                    &func,
 +                    Some(Box::new(writer)),
 +                    err,
 +                );
 +                tcx.sess.fatal(&format!("cranelift verify error:\n{}", pretty_error));
 +            }
 +        }
 +    });
 +}
 +
 +fn codegen_fn_body(fx: &mut FunctionCx<'_, '_, '_>, start_block: Block) {
 +    if !crate::constant::check_constants(fx) {
 +        fx.bcx.append_block_params_for_function_params(fx.block_map[START_BLOCK]);
 +        fx.bcx.switch_to_block(fx.block_map[START_BLOCK]);
 +        // compilation should have been aborted
 +        fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
 +        return;
 +    }
 +
 +    let arg_uninhabited = fx
 +        .mir
 +        .args_iter()
 +        .any(|arg| fx.layout_of(fx.monomorphize(fx.mir.local_decls[arg].ty)).abi.is_uninhabited());
 +    if arg_uninhabited {
 +        fx.bcx.append_block_params_for_function_params(fx.block_map[START_BLOCK]);
 +        fx.bcx.switch_to_block(fx.block_map[START_BLOCK]);
 +        fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
 +        return;
 +    }
 +    fx.tcx.sess.time("codegen prelude", || crate::abi::codegen_fn_prelude(fx, start_block));
 +
 +    for (bb, bb_data) in fx.mir.basic_blocks.iter_enumerated() {
 +        let block = fx.get_block(bb);
 +        fx.bcx.switch_to_block(block);
 +
 +        if bb_data.is_cleanup {
 +            // Unwinding after panicking is not supported
 +            continue;
 +
 +            // FIXME Once unwinding is supported and Cranelift supports marking blocks as cold, do
 +            // so for cleanup blocks.
 +        }
 +
 +        fx.bcx.ins().nop();
 +        for stmt in &bb_data.statements {
 +            fx.set_debug_loc(stmt.source_info);
 +            codegen_stmt(fx, block, stmt);
 +        }
 +
 +        if fx.clif_comments.enabled() {
 +            let mut terminator_head = "\n".to_string();
 +            with_no_trimmed_paths!({
 +                bb_data.terminator().kind.fmt_head(&mut terminator_head).unwrap();
 +            });
 +            let inst = fx.bcx.func.layout.last_inst(block).unwrap();
 +            fx.add_comment(inst, terminator_head);
 +        }
 +
 +        let source_info = bb_data.terminator().source_info;
 +        fx.set_debug_loc(source_info);
 +
 +        match &bb_data.terminator().kind {
 +            TerminatorKind::Goto { target } => {
 +                if let TerminatorKind::Return = fx.mir[*target].terminator().kind {
 +                    let mut can_immediately_return = true;
 +                    for stmt in &fx.mir[*target].statements {
 +                        if let StatementKind::StorageDead(_) = stmt.kind {
 +                        } else {
 +                            // FIXME Can sometimes happen, see rust-lang/rust#70531
 +                            can_immediately_return = false;
 +                            break;
 +                        }
 +                    }
 +
 +                    if can_immediately_return {
 +                        crate::abi::codegen_return(fx);
 +                        continue;
 +                    }
 +                }
 +
 +                let block = fx.get_block(*target);
 +                fx.bcx.ins().jump(block, &[]);
 +            }
 +            TerminatorKind::Return => {
 +                crate::abi::codegen_return(fx);
 +            }
 +            TerminatorKind::Assert { cond, expected, msg, target, cleanup: _ } => {
 +                if !fx.tcx.sess.overflow_checks() {
 +                    if let mir::AssertKind::OverflowNeg(_) = *msg {
 +                        let target = fx.get_block(*target);
 +                        fx.bcx.ins().jump(target, &[]);
 +                        continue;
 +                    }
 +                }
 +                let cond = codegen_operand(fx, cond).load_scalar(fx);
 +
 +                let target = fx.get_block(*target);
 +                let failure = fx.bcx.create_block();
 +                fx.bcx.set_cold_block(failure);
 +
 +                if *expected {
 +                    fx.bcx.ins().brz(cond, failure, &[]);
 +                } else {
 +                    fx.bcx.ins().brnz(cond, failure, &[]);
 +                };
 +                fx.bcx.ins().jump(target, &[]);
 +
 +                fx.bcx.switch_to_block(failure);
 +                fx.bcx.ins().nop();
 +
 +                match msg {
 +                    AssertKind::BoundsCheck { ref len, ref index } => {
 +                        let len = codegen_operand(fx, len).load_scalar(fx);
 +                        let index = codegen_operand(fx, index).load_scalar(fx);
 +                        let location = fx.get_caller_location(source_info).load_scalar(fx);
 +
 +                        codegen_panic_inner(
 +                            fx,
 +                            rustc_hir::LangItem::PanicBoundsCheck,
 +                            &[index, len, location],
 +                            source_info.span,
 +                        );
 +                    }
 +                    _ => {
 +                        let msg_str = msg.description();
 +                        codegen_panic(fx, msg_str, source_info);
 +                    }
 +                }
 +            }
 +
 +            TerminatorKind::SwitchInt { discr, targets } => {
 +                let discr = codegen_operand(fx, discr);
 +                let switch_ty = discr.layout().ty;
 +                let discr = discr.load_scalar(fx);
 +
 +                let use_bool_opt = switch_ty.kind() == fx.tcx.types.bool.kind()
 +                    || (targets.iter().count() == 1 && targets.iter().next().unwrap().0 == 0);
 +                if use_bool_opt {
 +                    assert_eq!(targets.iter().count(), 1);
 +                    let (then_value, then_block) = targets.iter().next().unwrap();
 +                    let then_block = fx.get_block(then_block);
 +                    let else_block = fx.get_block(targets.otherwise());
 +                    let test_zero = match then_value {
 +                        0 => true,
 +                        1 => false,
 +                        _ => unreachable!("{:?}", targets),
 +                    };
 +
-                     let discr = crate::optimize::peephole::maybe_unwrap_bint(&mut fx.bcx, discr);
 +                    let (discr, is_inverted) =
 +                        crate::optimize::peephole::maybe_unwrap_bool_not(&mut fx.bcx, discr);
 +                    let test_zero = if is_inverted { !test_zero } else { test_zero };
-                                 CValue::by_val(fx.bcx.ins().bint(types::I8, res), layout)
 +                    if let Some(taken) = crate::optimize::peephole::maybe_known_branch_taken(
 +                        &fx.bcx, discr, test_zero,
 +                    ) {
 +                        if taken {
 +                            fx.bcx.ins().jump(then_block, &[]);
 +                        } else {
 +                            fx.bcx.ins().jump(else_block, &[]);
 +                        }
 +                    } else {
 +                        if test_zero {
 +                            fx.bcx.ins().brz(discr, then_block, &[]);
 +                            fx.bcx.ins().jump(else_block, &[]);
 +                        } else {
 +                            fx.bcx.ins().brnz(discr, then_block, &[]);
 +                            fx.bcx.ins().jump(else_block, &[]);
 +                        }
 +                    }
 +                } else {
 +                    let mut switch = ::cranelift_frontend::Switch::new();
 +                    for (value, block) in targets.iter() {
 +                        let block = fx.get_block(block);
 +                        switch.set_entry(value, block);
 +                    }
 +                    let otherwise_block = fx.get_block(targets.otherwise());
 +                    switch.emit(&mut fx.bcx, discr, otherwise_block);
 +                }
 +            }
 +            TerminatorKind::Call {
 +                func,
 +                args,
 +                destination,
 +                target,
 +                fn_span,
 +                cleanup: _,
 +                from_hir_call: _,
 +            } => {
 +                fx.tcx.sess.time("codegen call", || {
 +                    crate::abi::codegen_terminator_call(
 +                        fx,
 +                        mir::SourceInfo { span: *fn_span, ..source_info },
 +                        func,
 +                        args,
 +                        *destination,
 +                        *target,
 +                    )
 +                });
 +            }
 +            TerminatorKind::InlineAsm {
 +                template,
 +                operands,
 +                options,
 +                destination,
 +                line_spans: _,
 +                cleanup: _,
 +            } => {
 +                if options.contains(InlineAsmOptions::MAY_UNWIND) {
 +                    fx.tcx.sess.span_fatal(
 +                        source_info.span,
 +                        "cranelift doesn't support unwinding from inline assembly.",
 +                    );
 +                }
 +
 +                crate::inline_asm::codegen_inline_asm(
 +                    fx,
 +                    source_info.span,
 +                    template,
 +                    operands,
 +                    *options,
 +                    *destination,
 +                );
 +            }
 +            TerminatorKind::Resume | TerminatorKind::Abort => {
 +                // FIXME implement unwinding
 +                fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
 +            }
 +            TerminatorKind::Unreachable => {
 +                fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
 +            }
 +            TerminatorKind::Yield { .. }
 +            | TerminatorKind::FalseEdge { .. }
 +            | TerminatorKind::FalseUnwind { .. }
 +            | TerminatorKind::DropAndReplace { .. }
 +            | TerminatorKind::GeneratorDrop => {
 +                bug!("shouldn't exist at codegen {:?}", bb_data.terminator());
 +            }
 +            TerminatorKind::Drop { place, target, unwind: _ } => {
 +                let drop_place = codegen_place(fx, *place);
 +                crate::abi::codegen_drop(fx, source_info, drop_place);
 +
 +                let target_block = fx.get_block(*target);
 +                fx.bcx.ins().jump(target_block, &[]);
 +            }
 +        };
 +    }
 +
 +    fx.bcx.seal_all_blocks();
 +    fx.bcx.finalize();
 +}
 +
 +fn codegen_stmt<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    #[allow(unused_variables)] cur_block: Block,
 +    stmt: &Statement<'tcx>,
 +) {
 +    let _print_guard = crate::PrintOnPanic(|| format!("stmt {:?}", stmt));
 +
 +    fx.set_debug_loc(stmt.source_info);
 +
 +    #[cfg(any())] // This is never true
 +    match &stmt.kind {
 +        StatementKind::StorageLive(..) | StatementKind::StorageDead(..) => {} // Those are not very useful
 +        _ => {
 +            if fx.clif_comments.enabled() {
 +                let inst = fx.bcx.func.layout.last_inst(cur_block).unwrap();
 +                fx.add_comment(inst, format!("{:?}", stmt));
 +            }
 +        }
 +    }
 +
 +    match &stmt.kind {
 +        StatementKind::SetDiscriminant { place, variant_index } => {
 +            let place = codegen_place(fx, **place);
 +            crate::discriminant::codegen_set_discriminant(fx, place, *variant_index);
 +        }
 +        StatementKind::Assign(to_place_and_rval) => {
 +            let lval = codegen_place(fx, to_place_and_rval.0);
 +            let dest_layout = lval.layout();
 +            match to_place_and_rval.1 {
 +                Rvalue::Use(ref operand) => {
 +                    let val = codegen_operand(fx, operand);
 +                    lval.write_cvalue(fx, val);
 +                }
 +                Rvalue::CopyForDeref(place) => {
 +                    let cplace = codegen_place(fx, place);
 +                    let val = cplace.to_cvalue(fx);
 +                    lval.write_cvalue(fx, val)
 +                }
 +                Rvalue::Ref(_, _, place) | Rvalue::AddressOf(_, place) => {
 +                    let place = codegen_place(fx, place);
 +                    let ref_ = place.place_ref(fx, lval.layout());
 +                    lval.write_cvalue(fx, ref_);
 +                }
 +                Rvalue::ThreadLocalRef(def_id) => {
 +                    let val = crate::constant::codegen_tls_ref(fx, def_id, lval.layout());
 +                    lval.write_cvalue(fx, val);
 +                }
 +                Rvalue::BinaryOp(bin_op, ref lhs_rhs) => {
 +                    let lhs = codegen_operand(fx, &lhs_rhs.0);
 +                    let rhs = codegen_operand(fx, &lhs_rhs.1);
 +
 +                    let res = crate::num::codegen_binop(fx, bin_op, lhs, rhs);
 +                    lval.write_cvalue(fx, res);
 +                }
 +                Rvalue::CheckedBinaryOp(bin_op, ref lhs_rhs) => {
 +                    let lhs = codegen_operand(fx, &lhs_rhs.0);
 +                    let rhs = codegen_operand(fx, &lhs_rhs.1);
 +
 +                    let res = if !fx.tcx.sess.overflow_checks() {
 +                        let val =
 +                            crate::num::codegen_int_binop(fx, bin_op, lhs, rhs).load_scalar(fx);
 +                        let is_overflow = fx.bcx.ins().iconst(types::I8, 0);
 +                        CValue::by_val_pair(val, is_overflow, lval.layout())
 +                    } else {
 +                        crate::num::codegen_checked_int_binop(fx, bin_op, lhs, rhs)
 +                    };
 +
 +                    lval.write_cvalue(fx, res);
 +                }
 +                Rvalue::UnaryOp(un_op, ref operand) => {
 +                    let operand = codegen_operand(fx, operand);
 +                    let layout = operand.layout();
 +                    let val = operand.load_scalar(fx);
 +                    let res = match un_op {
 +                        UnOp::Not => match layout.ty.kind() {
 +                            ty::Bool => {
 +                                let res = fx.bcx.ins().icmp_imm(IntCC::Equal, val, 0);
-                             ty::Int(IntTy::I128) => {
-                                 // FIXME remove this case once ineg.i128 works
-                                 let zero =
-                                     CValue::const_val(fx, layout, ty::ScalarInt::null(layout.size));
-                                 crate::num::codegen_int_binop(fx, BinOp::Sub, zero, operand)
-                             }
++                                CValue::by_val(res, layout)
 +                            }
 +                            ty::Uint(_) | ty::Int(_) => {
 +                                CValue::by_val(fx.bcx.ins().bnot(val), layout)
 +                            }
 +                            _ => unreachable!("un op Not for {:?}", layout.ty),
 +                        },
 +                        UnOp::Neg => match layout.ty.kind() {
 +                            ty::Int(_) => CValue::by_val(fx.bcx.ins().ineg(val), layout),
 +                            ty::Float(_) => CValue::by_val(fx.bcx.ins().fneg(val), layout),
 +                            _ => unreachable!("un op Neg for {:?}", layout.ty),
 +                        },
 +                    };
 +                    lval.write_cvalue(fx, res);
 +                }
 +                Rvalue::Cast(
 +                    CastKind::Pointer(PointerCast::ReifyFnPointer),
 +                    ref operand,
 +                    to_ty,
 +                ) => {
 +                    let from_ty = fx.monomorphize(operand.ty(&fx.mir.local_decls, fx.tcx));
 +                    let to_layout = fx.layout_of(fx.monomorphize(to_ty));
 +                    match *from_ty.kind() {
 +                        ty::FnDef(def_id, substs) => {
 +                            let func_ref = fx.get_function_ref(
 +                                Instance::resolve_for_fn_ptr(
 +                                    fx.tcx,
 +                                    ParamEnv::reveal_all(),
 +                                    def_id,
 +                                    substs,
 +                                )
 +                                .unwrap()
 +                                .polymorphize(fx.tcx),
 +                            );
 +                            let func_addr = fx.bcx.ins().func_addr(fx.pointer_type, func_ref);
 +                            lval.write_cvalue(fx, CValue::by_val(func_addr, to_layout));
 +                        }
 +                        _ => bug!("Trying to ReifyFnPointer on non FnDef {:?}", from_ty),
 +                    }
 +                }
 +                Rvalue::Cast(
 +                    CastKind::Pointer(PointerCast::UnsafeFnPointer),
 +                    ref operand,
 +                    to_ty,
 +                )
 +                | Rvalue::Cast(
 +                    CastKind::Pointer(PointerCast::MutToConstPointer),
 +                    ref operand,
 +                    to_ty,
 +                )
 +                | Rvalue::Cast(
 +                    CastKind::Pointer(PointerCast::ArrayToPointer),
 +                    ref operand,
 +                    to_ty,
 +                ) => {
 +                    let to_layout = fx.layout_of(fx.monomorphize(to_ty));
 +                    let operand = codegen_operand(fx, operand);
 +                    lval.write_cvalue(fx, operand.cast_pointer_to(to_layout));
 +                }
 +                Rvalue::Cast(
 +                    CastKind::IntToInt
 +                    | CastKind::FloatToFloat
 +                    | CastKind::FloatToInt
 +                    | CastKind::IntToFloat
 +                    | CastKind::FnPtrToPtr
 +                    | CastKind::PtrToPtr
 +                    | CastKind::PointerExposeAddress
 +                    | CastKind::PointerFromExposedAddress,
 +                    ref operand,
 +                    to_ty,
 +                ) => {
 +                    let operand = codegen_operand(fx, operand);
 +                    let from_ty = operand.layout().ty;
 +                    let to_ty = fx.monomorphize(to_ty);
 +
 +                    fn is_fat_ptr<'tcx>(fx: &FunctionCx<'_, '_, 'tcx>, ty: Ty<'tcx>) -> bool {
 +                        ty.builtin_deref(true)
 +                            .map(|ty::TypeAndMut { ty: pointee_ty, mutbl: _ }| {
 +                                has_ptr_meta(fx.tcx, pointee_ty)
 +                            })
 +                            .unwrap_or(false)
 +                    }
 +
 +                    if is_fat_ptr(fx, from_ty) {
 +                        if is_fat_ptr(fx, to_ty) {
 +                            // fat-ptr -> fat-ptr
 +                            lval.write_cvalue(fx, operand.cast_pointer_to(dest_layout));
 +                        } else {
 +                            // fat-ptr -> thin-ptr
 +                            let (ptr, _extra) = operand.load_scalar_pair(fx);
 +                            lval.write_cvalue(fx, CValue::by_val(ptr, dest_layout))
 +                        }
 +                    } else {
 +                        let to_clif_ty = fx.clif_type(to_ty).unwrap();
 +                        let from = operand.load_scalar(fx);
 +
 +                        let res = clif_int_or_float_cast(
 +                            fx,
 +                            from,
 +                            type_sign(from_ty),
 +                            to_clif_ty,
 +                            type_sign(to_ty),
 +                        );
 +                        lval.write_cvalue(fx, CValue::by_val(res, dest_layout));
 +                    }
 +                }
 +                Rvalue::Cast(
 +                    CastKind::Pointer(PointerCast::ClosureFnPointer(_)),
 +                    ref operand,
 +                    _to_ty,
 +                ) => {
 +                    let operand = codegen_operand(fx, operand);
 +                    match *operand.layout().ty.kind() {
 +                        ty::Closure(def_id, substs) => {
 +                            let instance = Instance::resolve_closure(
 +                                fx.tcx,
 +                                def_id,
 +                                substs,
 +                                ty::ClosureKind::FnOnce,
 +                            )
 +                            .expect("failed to normalize and resolve closure during codegen")
 +                            .polymorphize(fx.tcx);
 +                            let func_ref = fx.get_function_ref(instance);
 +                            let func_addr = fx.bcx.ins().func_addr(fx.pointer_type, func_ref);
 +                            lval.write_cvalue(fx, CValue::by_val(func_addr, lval.layout()));
 +                        }
 +                        _ => bug!("{} cannot be cast to a fn ptr", operand.layout().ty),
 +                    }
 +                }
 +                Rvalue::Cast(CastKind::Pointer(PointerCast::Unsize), ref operand, _to_ty) => {
 +                    let operand = codegen_operand(fx, operand);
 +                    operand.unsize_value(fx, lval);
 +                }
 +                Rvalue::Cast(CastKind::DynStar, ref operand, _) => {
 +                    let operand = codegen_operand(fx, operand);
 +                    operand.coerce_dyn_star(fx, lval);
 +                }
 +                Rvalue::Discriminant(place) => {
 +                    let place = codegen_place(fx, place);
 +                    let value = place.to_cvalue(fx);
 +                    crate::discriminant::codegen_get_discriminant(fx, lval, value, dest_layout);
 +                }
 +                Rvalue::Repeat(ref operand, times) => {
 +                    let operand = codegen_operand(fx, operand);
 +                    let times = fx
 +                        .monomorphize(times)
 +                        .eval(fx.tcx, ParamEnv::reveal_all())
 +                        .kind()
 +                        .try_to_bits(fx.tcx.data_layout.pointer_size)
 +                        .unwrap();
 +                    if operand.layout().size.bytes() == 0 {
 +                        // Do nothing for ZST's
 +                    } else if fx.clif_type(operand.layout().ty) == Some(types::I8) {
 +                        let times = fx.bcx.ins().iconst(fx.pointer_type, times as i64);
 +                        // FIXME use emit_small_memset where possible
 +                        let addr = lval.to_ptr().get_addr(fx);
 +                        let val = operand.load_scalar(fx);
 +                        fx.bcx.call_memset(fx.target_config, addr, val, times);
 +                    } else {
 +                        let loop_block = fx.bcx.create_block();
 +                        let loop_block2 = fx.bcx.create_block();
 +                        let done_block = fx.bcx.create_block();
 +                        let index = fx.bcx.append_block_param(loop_block, fx.pointer_type);
 +                        let zero = fx.bcx.ins().iconst(fx.pointer_type, 0);
 +                        fx.bcx.ins().jump(loop_block, &[zero]);
 +
 +                        fx.bcx.switch_to_block(loop_block);
 +                        let done = fx.bcx.ins().icmp_imm(IntCC::Equal, index, times as i64);
 +                        fx.bcx.ins().brnz(done, done_block, &[]);
 +                        fx.bcx.ins().jump(loop_block2, &[]);
 +
 +                        fx.bcx.switch_to_block(loop_block2);
 +                        let to = lval.place_index(fx, index);
 +                        to.write_cvalue(fx, operand);
 +                        let index = fx.bcx.ins().iadd_imm(index, 1);
 +                        fx.bcx.ins().jump(loop_block, &[index]);
 +
 +                        fx.bcx.switch_to_block(done_block);
 +                        fx.bcx.ins().nop();
 +                    }
 +                }
 +                Rvalue::Len(place) => {
 +                    let place = codegen_place(fx, place);
 +                    let usize_layout = fx.layout_of(fx.tcx.types.usize);
 +                    let len = codegen_array_len(fx, place);
 +                    lval.write_cvalue(fx, CValue::by_val(len, usize_layout));
 +                }
 +                Rvalue::ShallowInitBox(ref operand, content_ty) => {
 +                    let content_ty = fx.monomorphize(content_ty);
 +                    let box_layout = fx.layout_of(fx.tcx.mk_box(content_ty));
 +                    let operand = codegen_operand(fx, operand);
 +                    let operand = operand.load_scalar(fx);
 +                    lval.write_cvalue(fx, CValue::by_val(operand, box_layout));
 +                }
 +                Rvalue::NullaryOp(null_op, ty) => {
 +                    assert!(lval.layout().ty.is_sized(fx.tcx, ParamEnv::reveal_all()));
 +                    let layout = fx.layout_of(fx.monomorphize(ty));
 +                    let val = match null_op {
 +                        NullOp::SizeOf => layout.size.bytes(),
 +                        NullOp::AlignOf => layout.align.abi.bytes(),
 +                    };
 +                    let val = CValue::const_val(fx, fx.layout_of(fx.tcx.types.usize), val.into());
 +                    lval.write_cvalue(fx, val);
 +                }
 +                Rvalue::Aggregate(ref kind, ref operands) => match kind.as_ref() {
 +                    AggregateKind::Array(_ty) => {
 +                        for (i, operand) in operands.iter().enumerate() {
 +                            let operand = codegen_operand(fx, operand);
 +                            let index = fx.bcx.ins().iconst(fx.pointer_type, i as i64);
 +                            let to = lval.place_index(fx, index);
 +                            to.write_cvalue(fx, operand);
 +                        }
 +                    }
 +                    _ => unreachable!("shouldn't exist at codegen {:?}", to_place_and_rval.1),
 +                },
 +            }
 +        }
 +        StatementKind::StorageLive(_)
 +        | StatementKind::StorageDead(_)
 +        | StatementKind::Deinit(_)
 +        | StatementKind::Nop
 +        | StatementKind::FakeRead(..)
 +        | StatementKind::Retag { .. }
 +        | StatementKind::AscribeUserType(..) => {}
 +
 +        StatementKind::Coverage { .. } => fx.tcx.sess.fatal("-Zcoverage is unimplemented"),
 +        StatementKind::Intrinsic(ref intrinsic) => match &**intrinsic {
 +            // We ignore `assume` intrinsics, they are only useful for optimizations
 +            NonDivergingIntrinsic::Assume(_) => {}
 +            NonDivergingIntrinsic::CopyNonOverlapping(mir::CopyNonOverlapping {
 +                src,
 +                dst,
 +                count,
 +            }) => {
 +                let dst = codegen_operand(fx, dst);
 +                let pointee = dst
 +                    .layout()
 +                    .pointee_info_at(fx, rustc_target::abi::Size::ZERO)
 +                    .expect("Expected pointer");
 +                let dst = dst.load_scalar(fx);
 +                let src = codegen_operand(fx, src).load_scalar(fx);
 +                let count = codegen_operand(fx, count).load_scalar(fx);
 +                let elem_size: u64 = pointee.size.bytes();
 +                let bytes = if elem_size != 1 {
 +                    fx.bcx.ins().imul_imm(count, elem_size as i64)
 +                } else {
 +                    count
 +                };
 +                fx.bcx.call_memcpy(fx.target_config, dst, src, bytes);
 +            }
 +        },
 +    }
 +}
 +
 +fn codegen_array_len<'tcx>(fx: &mut FunctionCx<'_, '_, 'tcx>, place: CPlace<'tcx>) -> Value {
 +    match *place.layout().ty.kind() {
 +        ty::Array(_elem_ty, len) => {
 +            let len = fx.monomorphize(len).eval_usize(fx.tcx, ParamEnv::reveal_all()) as i64;
 +            fx.bcx.ins().iconst(fx.pointer_type, len)
 +        }
 +        ty::Slice(_elem_ty) => {
 +            place.to_ptr_maybe_unsized().1.expect("Length metadata for slice place")
 +        }
 +        _ => bug!("Rvalue::Len({:?})", place),
 +    }
 +}
 +
 +pub(crate) fn codegen_place<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    place: Place<'tcx>,
 +) -> CPlace<'tcx> {
 +    let mut cplace = fx.get_local_place(place.local);
 +
 +    for elem in place.projection {
 +        match elem {
 +            PlaceElem::Deref => {
 +                cplace = cplace.place_deref(fx);
 +            }
 +            PlaceElem::OpaqueCast(ty) => cplace = cplace.place_opaque_cast(fx, ty),
 +            PlaceElem::Field(field, _ty) => {
 +                cplace = cplace.place_field(fx, field);
 +            }
 +            PlaceElem::Index(local) => {
 +                let index = fx.get_local_place(local).to_cvalue(fx).load_scalar(fx);
 +                cplace = cplace.place_index(fx, index);
 +            }
 +            PlaceElem::ConstantIndex { offset, min_length: _, from_end } => {
 +                let offset: u64 = offset;
 +                let index = if !from_end {
 +                    fx.bcx.ins().iconst(fx.pointer_type, offset as i64)
 +                } else {
 +                    let len = codegen_array_len(fx, cplace);
 +                    fx.bcx.ins().iadd_imm(len, -(offset as i64))
 +                };
 +                cplace = cplace.place_index(fx, index);
 +            }
 +            PlaceElem::Subslice { from, to, from_end } => {
 +                // These indices are generated by slice patterns.
 +                // slice[from:-to] in Python terms.
 +
 +                let from: u64 = from;
 +                let to: u64 = to;
 +
 +                match cplace.layout().ty.kind() {
 +                    ty::Array(elem_ty, _len) => {
 +                        assert!(!from_end, "array subslices are never `from_end`");
 +                        let elem_layout = fx.layout_of(*elem_ty);
 +                        let ptr = cplace.to_ptr();
 +                        cplace = CPlace::for_ptr(
 +                            ptr.offset_i64(fx, elem_layout.size.bytes() as i64 * (from as i64)),
 +                            fx.layout_of(fx.tcx.mk_array(*elem_ty, to - from)),
 +                        );
 +                    }
 +                    ty::Slice(elem_ty) => {
 +                        assert!(from_end, "slice subslices should be `from_end`");
 +                        let elem_layout = fx.layout_of(*elem_ty);
 +                        let (ptr, len) = cplace.to_ptr_maybe_unsized();
 +                        let len = len.unwrap();
 +                        cplace = CPlace::for_ptr_with_extra(
 +                            ptr.offset_i64(fx, elem_layout.size.bytes() as i64 * (from as i64)),
 +                            fx.bcx.ins().iadd_imm(len, -(from as i64 + to as i64)),
 +                            cplace.layout(),
 +                        );
 +                    }
 +                    _ => unreachable!(),
 +                }
 +            }
 +            PlaceElem::Downcast(_adt_def, variant) => {
 +                cplace = cplace.downcast_variant(fx, variant);
 +            }
 +        }
 +    }
 +
 +    cplace
 +}
 +
 +pub(crate) fn codegen_operand<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    operand: &Operand<'tcx>,
 +) -> CValue<'tcx> {
 +    match operand {
 +        Operand::Move(place) | Operand::Copy(place) => {
 +            let cplace = codegen_place(fx, *place);
 +            cplace.to_cvalue(fx)
 +        }
 +        Operand::Constant(const_) => crate::constant::codegen_constant_operand(fx, const_),
 +    }
 +}
 +
 +pub(crate) fn codegen_panic<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    msg_str: &str,
 +    source_info: mir::SourceInfo,
 +) {
 +    let location = fx.get_caller_location(source_info).load_scalar(fx);
 +
 +    let msg_ptr = fx.anonymous_str(msg_str);
 +    let msg_len = fx.bcx.ins().iconst(fx.pointer_type, i64::try_from(msg_str.len()).unwrap());
 +    let args = [msg_ptr, msg_len, location];
 +
 +    codegen_panic_inner(fx, rustc_hir::LangItem::Panic, &args, source_info.span);
 +}
 +
 +pub(crate) fn codegen_panic_inner<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    lang_item: rustc_hir::LangItem,
 +    args: &[Value],
 +    span: Span,
 +) {
 +    let def_id = fx
 +        .tcx
 +        .lang_items()
 +        .require(lang_item)
 +        .unwrap_or_else(|e| fx.tcx.sess.span_fatal(span, e.to_string()));
 +
 +    let instance = Instance::mono(fx.tcx, def_id).polymorphize(fx.tcx);
 +    let symbol_name = fx.tcx.symbol_name(instance).name;
 +
 +    fx.lib_call(
 +        &*symbol_name,
 +        vec![
 +            AbiParam::new(fx.pointer_type),
 +            AbiParam::new(fx.pointer_type),
 +            AbiParam::new(fx.pointer_type),
 +        ],
 +        vec![],
 +        args,
 +    );
 +
 +    fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
 +}
index bad5d1f08a9cf5454766b36de3f2614e5a6020a4,0000000000000000000000000000000000000000..5091c5a9fedacbb7f4dda7b4d5ab7f086f17d1d7
mode 100644,000000..100644
--- /dev/null
@@@ -1,164 -1,0 +1,164 @@@
-         let zero = fx.bcx.ins().iconst(to_ty, 0);
 +//! Various number casting functions
 +
 +use crate::prelude::*;
 +
 +pub(crate) fn clif_intcast(
 +    fx: &mut FunctionCx<'_, '_, '_>,
 +    val: Value,
 +    to: Type,
 +    signed: bool,
 +) -> Value {
 +    let from = fx.bcx.func.dfg.value_type(val);
 +    match (from, to) {
 +        // equal
 +        (_, _) if from == to => val,
 +
 +        // extend
 +        (_, _) if to.wider_or_equal(from) => {
 +            if signed {
 +                fx.bcx.ins().sextend(to, val)
 +            } else {
 +                fx.bcx.ins().uextend(to, val)
 +            }
 +        }
 +
 +        // reduce
 +        (_, _) => fx.bcx.ins().ireduce(to, val),
 +    }
 +}
 +
 +pub(crate) fn clif_int_or_float_cast(
 +    fx: &mut FunctionCx<'_, '_, '_>,
 +    from: Value,
 +    from_signed: bool,
 +    to_ty: Type,
 +    to_signed: bool,
 +) -> Value {
 +    let from_ty = fx.bcx.func.dfg.value_type(from);
 +
 +    if from_ty.is_int() && to_ty.is_int() {
 +        // int-like -> int-like
 +        clif_intcast(
 +            fx,
 +            from,
 +            to_ty,
 +            // This is correct as either from_signed == to_signed (=> this is trivially correct)
 +            // Or from_clif_ty == to_clif_ty, which means this is a no-op.
 +            from_signed,
 +        )
 +    } else if from_ty.is_int() && to_ty.is_float() {
 +        if from_ty == types::I128 {
 +            // _______ss__f_
 +            // __float  tisf: i128 -> f32
 +            // __float  tidf: i128 -> f64
 +            // __floatuntisf: u128 -> f32
 +            // __floatuntidf: u128 -> f64
 +
 +            let name = format!(
 +                "__float{sign}ti{flt}f",
 +                sign = if from_signed { "" } else { "un" },
 +                flt = match to_ty {
 +                    types::F32 => "s",
 +                    types::F64 => "d",
 +                    _ => unreachable!("{:?}", to_ty),
 +                },
 +            );
 +
 +            let from_rust_ty = if from_signed { fx.tcx.types.i128 } else { fx.tcx.types.u128 };
 +
 +            let to_rust_ty = match to_ty {
 +                types::F32 => fx.tcx.types.f32,
 +                types::F64 => fx.tcx.types.f64,
 +                _ => unreachable!(),
 +            };
 +
 +            return fx
 +                .easy_call(&name, &[CValue::by_val(from, fx.layout_of(from_rust_ty))], to_rust_ty)
 +                .load_scalar(fx);
 +        }
 +
 +        // int-like -> float
 +        if from_signed {
 +            fx.bcx.ins().fcvt_from_sint(to_ty, from)
 +        } else {
 +            fx.bcx.ins().fcvt_from_uint(to_ty, from)
 +        }
 +    } else if from_ty.is_float() && to_ty.is_int() {
 +        let val = if to_ty == types::I128 {
 +            // _____sssf___
 +            // __fix   sfti: f32 -> i128
 +            // __fix   dfti: f64 -> i128
 +            // __fixunssfti: f32 -> u128
 +            // __fixunsdfti: f64 -> u128
 +
 +            let name = format!(
 +                "__fix{sign}{flt}fti",
 +                sign = if to_signed { "" } else { "uns" },
 +                flt = match from_ty {
 +                    types::F32 => "s",
 +                    types::F64 => "d",
 +                    _ => unreachable!("{:?}", to_ty),
 +                },
 +            );
 +
 +            let from_rust_ty = match from_ty {
 +                types::F32 => fx.tcx.types.f32,
 +                types::F64 => fx.tcx.types.f64,
 +                _ => unreachable!(),
 +            };
 +
 +            let to_rust_ty = if to_signed { fx.tcx.types.i128 } else { fx.tcx.types.u128 };
 +
 +            fx.easy_call(&name, &[CValue::by_val(from, fx.layout_of(from_rust_ty))], to_rust_ty)
 +                .load_scalar(fx)
 +        } else if to_ty == types::I8 || to_ty == types::I16 {
 +            // FIXME implement fcvt_to_*int_sat.i8/i16
 +            let val = if to_signed {
 +                fx.bcx.ins().fcvt_to_sint_sat(types::I32, from)
 +            } else {
 +                fx.bcx.ins().fcvt_to_uint_sat(types::I32, from)
 +            };
 +            let (min, max) = match (to_ty, to_signed) {
 +                (types::I8, false) => (0, i64::from(u8::MAX)),
 +                (types::I16, false) => (0, i64::from(u16::MAX)),
 +                (types::I8, true) => (i64::from(i8::MIN), i64::from(i8::MAX)),
 +                (types::I16, true) => (i64::from(i16::MIN), i64::from(i16::MAX)),
 +                _ => unreachable!(),
 +            };
 +            let min_val = fx.bcx.ins().iconst(types::I32, min);
 +            let max_val = fx.bcx.ins().iconst(types::I32, max);
 +
 +            let val = if to_signed {
 +                let has_underflow = fx.bcx.ins().icmp_imm(IntCC::SignedLessThan, val, min);
 +                let has_overflow = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThan, val, max);
 +                let bottom_capped = fx.bcx.ins().select(has_underflow, min_val, val);
 +                fx.bcx.ins().select(has_overflow, max_val, bottom_capped)
 +            } else {
 +                let has_overflow = fx.bcx.ins().icmp_imm(IntCC::UnsignedGreaterThan, val, max);
 +                fx.bcx.ins().select(has_overflow, max_val, val)
 +            };
 +            fx.bcx.ins().ireduce(to_ty, val)
 +        } else if to_signed {
 +            fx.bcx.ins().fcvt_to_sint_sat(to_ty, from)
 +        } else {
 +            fx.bcx.ins().fcvt_to_uint_sat(to_ty, from)
 +        };
 +
 +        if let Some(false) = fx.tcx.sess.opts.unstable_opts.saturating_float_casts {
 +            return val;
 +        }
 +
 +        let is_not_nan = fx.bcx.ins().fcmp(FloatCC::Equal, from, from);
++        let zero = type_zero_value(&mut fx.bcx, to_ty);
 +        fx.bcx.ins().select(is_not_nan, val, zero)
 +    } else if from_ty.is_float() && to_ty.is_float() {
 +        // float -> float
 +        match (from_ty, to_ty) {
 +            (types::F32, types::F64) => fx.bcx.ins().fpromote(types::F64, from),
 +            (types::F64, types::F32) => fx.bcx.ins().fdemote(types::F32, from),
 +            _ => from,
 +        }
 +    } else {
 +        unreachable!("cast value from {:?} to {:?}", from_ty, to_ty);
 +    }
 +}
index 589594465783e1611c688cd17f5c82325ae9576f,0000000000000000000000000000000000000000..2dcd42fbd8f431833ac46b414c539e40c828f0e8
mode 100644,000000..100644
--- /dev/null
@@@ -1,507 -1,0 +1,516 @@@
-         let rhs = i64::try_from(rhs).expect("codegen_icmp_imm rhs out of range for <128bit int");
 +use cranelift_codegen::isa::TargetFrontendConfig;
 +use gimli::write::FileId;
 +
 +use rustc_data_structures::sync::Lrc;
 +use rustc_index::vec::IndexVec;
 +use rustc_middle::ty::layout::{
 +    FnAbiError, FnAbiOfHelpers, FnAbiRequest, LayoutError, LayoutOfHelpers,
 +};
 +use rustc_span::SourceFile;
 +use rustc_target::abi::call::FnAbi;
 +use rustc_target::abi::{Integer, Primitive};
 +use rustc_target::spec::{HasTargetSpec, Target};
 +
 +use crate::constant::ConstantCx;
 +use crate::debuginfo::FunctionDebugContext;
 +use crate::prelude::*;
 +
 +pub(crate) fn pointer_ty(tcx: TyCtxt<'_>) -> types::Type {
 +    match tcx.data_layout.pointer_size.bits() {
 +        16 => types::I16,
 +        32 => types::I32,
 +        64 => types::I64,
 +        bits => bug!("ptr_sized_integer: unknown pointer bit size {}", bits),
 +    }
 +}
 +
 +pub(crate) fn scalar_to_clif_type(tcx: TyCtxt<'_>, scalar: Scalar) -> Type {
 +    match scalar.primitive() {
 +        Primitive::Int(int, _sign) => match int {
 +            Integer::I8 => types::I8,
 +            Integer::I16 => types::I16,
 +            Integer::I32 => types::I32,
 +            Integer::I64 => types::I64,
 +            Integer::I128 => types::I128,
 +        },
 +        Primitive::F32 => types::F32,
 +        Primitive::F64 => types::F64,
 +        Primitive::Pointer => pointer_ty(tcx),
 +    }
 +}
 +
 +fn clif_type_from_ty<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Option<types::Type> {
 +    Some(match ty.kind() {
 +        ty::Bool => types::I8,
 +        ty::Uint(size) => match size {
 +            UintTy::U8 => types::I8,
 +            UintTy::U16 => types::I16,
 +            UintTy::U32 => types::I32,
 +            UintTy::U64 => types::I64,
 +            UintTy::U128 => types::I128,
 +            UintTy::Usize => pointer_ty(tcx),
 +        },
 +        ty::Int(size) => match size {
 +            IntTy::I8 => types::I8,
 +            IntTy::I16 => types::I16,
 +            IntTy::I32 => types::I32,
 +            IntTy::I64 => types::I64,
 +            IntTy::I128 => types::I128,
 +            IntTy::Isize => pointer_ty(tcx),
 +        },
 +        ty::Char => types::I32,
 +        ty::Float(size) => match size {
 +            FloatTy::F32 => types::F32,
 +            FloatTy::F64 => types::F64,
 +        },
 +        ty::FnPtr(_) => pointer_ty(tcx),
 +        ty::RawPtr(TypeAndMut { ty: pointee_ty, mutbl: _ }) | ty::Ref(_, pointee_ty, _) => {
 +            if has_ptr_meta(tcx, *pointee_ty) {
 +                return None;
 +            } else {
 +                pointer_ty(tcx)
 +            }
 +        }
 +        ty::Adt(adt_def, _) if adt_def.repr().simd() => {
 +            let (element, count) = match &tcx.layout_of(ParamEnv::reveal_all().and(ty)).unwrap().abi
 +            {
 +                Abi::Vector { element, count } => (element.clone(), *count),
 +                _ => unreachable!(),
 +            };
 +
 +            match scalar_to_clif_type(tcx, element).by(u32::try_from(count).unwrap()) {
 +                // Cranelift currently only implements icmp for 128bit vectors.
 +                Some(vector_ty) if vector_ty.bits() == 128 => vector_ty,
 +                _ => return None,
 +            }
 +        }
 +        ty::Param(_) => bug!("ty param {:?}", ty),
 +        _ => return None,
 +    })
 +}
 +
 +fn clif_pair_type_from_ty<'tcx>(
 +    tcx: TyCtxt<'tcx>,
 +    ty: Ty<'tcx>,
 +) -> Option<(types::Type, types::Type)> {
 +    Some(match ty.kind() {
 +        ty::Tuple(types) if types.len() == 2 => {
 +            let a = clif_type_from_ty(tcx, types[0])?;
 +            let b = clif_type_from_ty(tcx, types[1])?;
 +            if a.is_vector() || b.is_vector() {
 +                return None;
 +            }
 +            (a, b)
 +        }
 +        ty::RawPtr(TypeAndMut { ty: pointee_ty, mutbl: _ }) | ty::Ref(_, pointee_ty, _) => {
 +            if has_ptr_meta(tcx, *pointee_ty) {
 +                (pointer_ty(tcx), pointer_ty(tcx))
 +            } else {
 +                return None;
 +            }
 +        }
 +        _ => return None,
 +    })
 +}
 +
 +/// Is a pointer to this type a fat ptr?
 +pub(crate) fn has_ptr_meta<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> bool {
 +    let ptr_ty = tcx.mk_ptr(TypeAndMut { ty, mutbl: rustc_hir::Mutability::Not });
 +    match &tcx.layout_of(ParamEnv::reveal_all().and(ptr_ty)).unwrap().abi {
 +        Abi::Scalar(_) => false,
 +        Abi::ScalarPair(_, _) => true,
 +        abi => unreachable!("Abi of ptr to {:?} is {:?}???", ty, abi),
 +    }
 +}
 +
 +pub(crate) fn codegen_icmp_imm(
 +    fx: &mut FunctionCx<'_, '_, '_>,
 +    intcc: IntCC,
 +    lhs: Value,
 +    rhs: i128,
 +) -> Value {
 +    let lhs_ty = fx.bcx.func.dfg.value_type(lhs);
 +    if lhs_ty == types::I128 {
 +        // FIXME legalize `icmp_imm.i128` in Cranelift
 +
 +        let (lhs_lsb, lhs_msb) = fx.bcx.ins().isplit(lhs);
 +        let (rhs_lsb, rhs_msb) = (rhs as u128 as u64 as i64, (rhs as u128 >> 64) as u64 as i64);
 +
 +        match intcc {
 +            IntCC::Equal => {
 +                let lsb_eq = fx.bcx.ins().icmp_imm(IntCC::Equal, lhs_lsb, rhs_lsb);
 +                let msb_eq = fx.bcx.ins().icmp_imm(IntCC::Equal, lhs_msb, rhs_msb);
 +                fx.bcx.ins().band(lsb_eq, msb_eq)
 +            }
 +            IntCC::NotEqual => {
 +                let lsb_ne = fx.bcx.ins().icmp_imm(IntCC::NotEqual, lhs_lsb, rhs_lsb);
 +                let msb_ne = fx.bcx.ins().icmp_imm(IntCC::NotEqual, lhs_msb, rhs_msb);
 +                fx.bcx.ins().bor(lsb_ne, msb_ne)
 +            }
 +            _ => {
 +                // if msb_eq {
 +                //     lsb_cc
 +                // } else {
 +                //     msb_cc
 +                // }
 +
 +                let msb_eq = fx.bcx.ins().icmp_imm(IntCC::Equal, lhs_msb, rhs_msb);
 +                let lsb_cc = fx.bcx.ins().icmp_imm(intcc, lhs_lsb, rhs_lsb);
 +                let msb_cc = fx.bcx.ins().icmp_imm(intcc, lhs_msb, rhs_msb);
 +
 +                fx.bcx.ins().select(msb_eq, lsb_cc, msb_cc)
 +            }
 +        }
 +    } else {
++        let rhs = rhs as i64; // Truncates on purpose in case rhs is actually an unsigned value
 +        fx.bcx.ins().icmp_imm(intcc, lhs, rhs)
 +    }
 +}
 +
++pub(crate) fn type_zero_value(bcx: &mut FunctionBuilder<'_>, ty: Type) -> Value {
++    if ty == types::I128 {
++        let zero = bcx.ins().iconst(types::I64, 0);
++        bcx.ins().iconcat(zero, zero)
++    } else {
++        bcx.ins().iconst(ty, 0)
++    }
++}
++
 +pub(crate) fn type_min_max_value(
 +    bcx: &mut FunctionBuilder<'_>,
 +    ty: Type,
 +    signed: bool,
 +) -> (Value, Value) {
 +    assert!(ty.is_int());
 +
 +    if ty == types::I128 {
 +        if signed {
 +            let min = i128::MIN as u128;
 +            let min_lsb = bcx.ins().iconst(types::I64, min as u64 as i64);
 +            let min_msb = bcx.ins().iconst(types::I64, (min >> 64) as u64 as i64);
 +            let min = bcx.ins().iconcat(min_lsb, min_msb);
 +
 +            let max = i128::MAX as u128;
 +            let max_lsb = bcx.ins().iconst(types::I64, max as u64 as i64);
 +            let max_msb = bcx.ins().iconst(types::I64, (max >> 64) as u64 as i64);
 +            let max = bcx.ins().iconcat(max_lsb, max_msb);
 +
 +            return (min, max);
 +        } else {
 +            let min_half = bcx.ins().iconst(types::I64, 0);
 +            let min = bcx.ins().iconcat(min_half, min_half);
 +
 +            let max_half = bcx.ins().iconst(types::I64, u64::MAX as i64);
 +            let max = bcx.ins().iconcat(max_half, max_half);
 +
 +            return (min, max);
 +        }
 +    }
 +
 +    let min = match (ty, signed) {
 +        (types::I8, false) | (types::I16, false) | (types::I32, false) | (types::I64, false) => {
 +            0i64
 +        }
 +        (types::I8, true) => i64::from(i8::MIN),
 +        (types::I16, true) => i64::from(i16::MIN),
 +        (types::I32, true) => i64::from(i32::MIN),
 +        (types::I64, true) => i64::MIN,
 +        _ => unreachable!(),
 +    };
 +
 +    let max = match (ty, signed) {
 +        (types::I8, false) => i64::from(u8::MAX),
 +        (types::I16, false) => i64::from(u16::MAX),
 +        (types::I32, false) => i64::from(u32::MAX),
 +        (types::I64, false) => u64::MAX as i64,
 +        (types::I8, true) => i64::from(i8::MAX),
 +        (types::I16, true) => i64::from(i16::MAX),
 +        (types::I32, true) => i64::from(i32::MAX),
 +        (types::I64, true) => i64::MAX,
 +        _ => unreachable!(),
 +    };
 +
 +    let (min, max) = (bcx.ins().iconst(ty, min), bcx.ins().iconst(ty, max));
 +
 +    (min, max)
 +}
 +
 +pub(crate) fn type_sign(ty: Ty<'_>) -> bool {
 +    match ty.kind() {
 +        ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..) | ty::Char | ty::Uint(..) | ty::Bool => false,
 +        ty::Int(..) => true,
 +        ty::Float(..) => false, // `signed` is unused for floats
 +        _ => panic!("{}", ty),
 +    }
 +}
 +
 +pub(crate) struct FunctionCx<'m, 'clif, 'tcx: 'm> {
 +    pub(crate) cx: &'clif mut crate::CodegenCx,
 +    pub(crate) module: &'m mut dyn Module,
 +    pub(crate) tcx: TyCtxt<'tcx>,
 +    pub(crate) target_config: TargetFrontendConfig, // Cached from module
 +    pub(crate) pointer_type: Type,                  // Cached from module
 +    pub(crate) constants_cx: ConstantCx,
 +    pub(crate) func_debug_cx: Option<FunctionDebugContext>,
 +
 +    pub(crate) instance: Instance<'tcx>,
 +    pub(crate) symbol_name: String,
 +    pub(crate) mir: &'tcx Body<'tcx>,
 +    pub(crate) fn_abi: Option<&'tcx FnAbi<'tcx, Ty<'tcx>>>,
 +
 +    pub(crate) bcx: FunctionBuilder<'clif>,
 +    pub(crate) block_map: IndexVec<BasicBlock, Block>,
 +    pub(crate) local_map: IndexVec<Local, CPlace<'tcx>>,
 +
 +    /// When `#[track_caller]` is used, the implicit caller location is stored in this variable.
 +    pub(crate) caller_location: Option<CValue<'tcx>>,
 +
 +    pub(crate) clif_comments: crate::pretty_clif::CommentWriter,
 +
 +    /// Last accessed source file and it's debuginfo file id.
 +    ///
 +    /// For optimization purposes only
 +    pub(crate) last_source_file: Option<(Lrc<SourceFile>, FileId)>,
 +
 +    /// This should only be accessed by `CPlace::new_var`.
 +    pub(crate) next_ssa_var: u32,
 +}
 +
 +impl<'tcx> LayoutOfHelpers<'tcx> for FunctionCx<'_, '_, 'tcx> {
 +    type LayoutOfResult = TyAndLayout<'tcx>;
 +
 +    #[inline]
 +    fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! {
 +        RevealAllLayoutCx(self.tcx).handle_layout_err(err, span, ty)
 +    }
 +}
 +
 +impl<'tcx> FnAbiOfHelpers<'tcx> for FunctionCx<'_, '_, 'tcx> {
 +    type FnAbiOfResult = &'tcx FnAbi<'tcx, Ty<'tcx>>;
 +
 +    #[inline]
 +    fn handle_fn_abi_err(
 +        &self,
 +        err: FnAbiError<'tcx>,
 +        span: Span,
 +        fn_abi_request: FnAbiRequest<'tcx>,
 +    ) -> ! {
 +        RevealAllLayoutCx(self.tcx).handle_fn_abi_err(err, span, fn_abi_request)
 +    }
 +}
 +
 +impl<'tcx> layout::HasTyCtxt<'tcx> for FunctionCx<'_, '_, 'tcx> {
 +    fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
 +        self.tcx
 +    }
 +}
 +
 +impl<'tcx> rustc_target::abi::HasDataLayout for FunctionCx<'_, '_, 'tcx> {
 +    fn data_layout(&self) -> &rustc_target::abi::TargetDataLayout {
 +        &self.tcx.data_layout
 +    }
 +}
 +
 +impl<'tcx> layout::HasParamEnv<'tcx> for FunctionCx<'_, '_, 'tcx> {
 +    fn param_env(&self) -> ParamEnv<'tcx> {
 +        ParamEnv::reveal_all()
 +    }
 +}
 +
 +impl<'tcx> HasTargetSpec for FunctionCx<'_, '_, 'tcx> {
 +    fn target_spec(&self) -> &Target {
 +        &self.tcx.sess.target
 +    }
 +}
 +
 +impl<'tcx> FunctionCx<'_, '_, 'tcx> {
 +    pub(crate) fn monomorphize<T>(&self, value: T) -> T
 +    where
 +        T: TypeFoldable<'tcx> + Copy,
 +    {
 +        self.instance.subst_mir_and_normalize_erasing_regions(
 +            self.tcx,
 +            ty::ParamEnv::reveal_all(),
 +            value,
 +        )
 +    }
 +
 +    pub(crate) fn clif_type(&self, ty: Ty<'tcx>) -> Option<Type> {
 +        clif_type_from_ty(self.tcx, ty)
 +    }
 +
 +    pub(crate) fn clif_pair_type(&self, ty: Ty<'tcx>) -> Option<(Type, Type)> {
 +        clif_pair_type_from_ty(self.tcx, ty)
 +    }
 +
 +    pub(crate) fn get_block(&self, bb: BasicBlock) -> Block {
 +        *self.block_map.get(bb).unwrap()
 +    }
 +
 +    pub(crate) fn get_local_place(&mut self, local: Local) -> CPlace<'tcx> {
 +        *self.local_map.get(local).unwrap_or_else(|| {
 +            panic!("Local {:?} doesn't exist", local);
 +        })
 +    }
 +
 +    pub(crate) fn set_debug_loc(&mut self, source_info: mir::SourceInfo) {
 +        if let Some(debug_context) = &mut self.cx.debug_context {
 +            let (file, line, column) =
 +                DebugContext::get_span_loc(self.tcx, self.mir.span, source_info.span);
 +
 +            // add_source_file is very slow.
 +            // Optimize for the common case of the current file not being changed.
 +            let mut cached_file_id = None;
 +            if let Some((ref last_source_file, last_file_id)) = self.last_source_file {
 +                // If the allocations are not equal, the files may still be equal, but that
 +                // doesn't matter, as this is just an optimization.
 +                if rustc_data_structures::sync::Lrc::ptr_eq(last_source_file, &file) {
 +                    cached_file_id = Some(last_file_id);
 +                }
 +            }
 +
 +            let file_id = if let Some(file_id) = cached_file_id {
 +                file_id
 +            } else {
 +                debug_context.add_source_file(&file)
 +            };
 +
 +            let source_loc =
 +                self.func_debug_cx.as_mut().unwrap().add_dbg_loc(file_id, line, column);
 +            self.bcx.set_srcloc(source_loc);
 +        }
 +    }
 +
 +    // Note: must be kept in sync with get_caller_location from cg_ssa
 +    pub(crate) fn get_caller_location(&mut self, mut source_info: mir::SourceInfo) -> CValue<'tcx> {
 +        let span_to_caller_location = |fx: &mut FunctionCx<'_, '_, 'tcx>, span: Span| {
 +            let topmost = span.ctxt().outer_expn().expansion_cause().unwrap_or(span);
 +            let caller = fx.tcx.sess.source_map().lookup_char_pos(topmost.lo());
 +            let const_loc = fx.tcx.const_caller_location((
 +                rustc_span::symbol::Symbol::intern(
 +                    &caller.file.name.prefer_remapped().to_string_lossy(),
 +                ),
 +                caller.line as u32,
 +                caller.col_display as u32 + 1,
 +            ));
 +            crate::constant::codegen_const_value(fx, const_loc, fx.tcx.caller_location_ty())
 +        };
 +
 +        // Walk up the `SourceScope`s, in case some of them are from MIR inlining.
 +        // If so, the starting `source_info.span` is in the innermost inlined
 +        // function, and will be replaced with outer callsite spans as long
 +        // as the inlined functions were `#[track_caller]`.
 +        loop {
 +            let scope_data = &self.mir.source_scopes[source_info.scope];
 +
 +            if let Some((callee, callsite_span)) = scope_data.inlined {
 +                // Stop inside the most nested non-`#[track_caller]` function,
 +                // before ever reaching its caller (which is irrelevant).
 +                if !callee.def.requires_caller_location(self.tcx) {
 +                    return span_to_caller_location(self, source_info.span);
 +                }
 +                source_info.span = callsite_span;
 +            }
 +
 +            // Skip past all of the parents with `inlined: None`.
 +            match scope_data.inlined_parent_scope {
 +                Some(parent) => source_info.scope = parent,
 +                None => break,
 +            }
 +        }
 +
 +        // No inlined `SourceScope`s, or all of them were `#[track_caller]`.
 +        self.caller_location.unwrap_or_else(|| span_to_caller_location(self, source_info.span))
 +    }
 +
 +    pub(crate) fn anonymous_str(&mut self, msg: &str) -> Value {
 +        let mut data_ctx = DataContext::new();
 +        data_ctx.define(msg.as_bytes().to_vec().into_boxed_slice());
 +        let msg_id = self.module.declare_anonymous_data(false, false).unwrap();
 +
 +        // Ignore DuplicateDefinition error, as the data will be the same
 +        let _ = self.module.define_data(msg_id, &data_ctx);
 +
 +        let local_msg_id = self.module.declare_data_in_func(msg_id, self.bcx.func);
 +        if self.clif_comments.enabled() {
 +            self.add_comment(local_msg_id, msg);
 +        }
 +        self.bcx.ins().global_value(self.pointer_type, local_msg_id)
 +    }
 +}
 +
 +pub(crate) struct RevealAllLayoutCx<'tcx>(pub(crate) TyCtxt<'tcx>);
 +
 +impl<'tcx> LayoutOfHelpers<'tcx> for RevealAllLayoutCx<'tcx> {
 +    type LayoutOfResult = TyAndLayout<'tcx>;
 +
 +    #[inline]
 +    fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! {
 +        if let layout::LayoutError::SizeOverflow(_) = err {
 +            self.0.sess.span_fatal(span, &err.to_string())
 +        } else {
 +            span_bug!(span, "failed to get layout for `{}`: {}", ty, err)
 +        }
 +    }
 +}
 +
 +impl<'tcx> FnAbiOfHelpers<'tcx> for RevealAllLayoutCx<'tcx> {
 +    type FnAbiOfResult = &'tcx FnAbi<'tcx, Ty<'tcx>>;
 +
 +    #[inline]
 +    fn handle_fn_abi_err(
 +        &self,
 +        err: FnAbiError<'tcx>,
 +        span: Span,
 +        fn_abi_request: FnAbiRequest<'tcx>,
 +    ) -> ! {
 +        if let FnAbiError::Layout(LayoutError::SizeOverflow(_)) = err {
 +            self.0.sess.span_fatal(span, &err.to_string())
 +        } else {
 +            match fn_abi_request {
 +                FnAbiRequest::OfFnPtr { sig, extra_args } => {
 +                    span_bug!(
 +                        span,
 +                        "`fn_abi_of_fn_ptr({}, {:?})` failed: {}",
 +                        sig,
 +                        extra_args,
 +                        err
 +                    );
 +                }
 +                FnAbiRequest::OfInstance { instance, extra_args } => {
 +                    span_bug!(
 +                        span,
 +                        "`fn_abi_of_instance({}, {:?})` failed: {}",
 +                        instance,
 +                        extra_args,
 +                        err
 +                    );
 +                }
 +            }
 +        }
 +    }
 +}
 +
 +impl<'tcx> layout::HasTyCtxt<'tcx> for RevealAllLayoutCx<'tcx> {
 +    fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
 +        self.0
 +    }
 +}
 +
 +impl<'tcx> rustc_target::abi::HasDataLayout for RevealAllLayoutCx<'tcx> {
 +    fn data_layout(&self) -> &rustc_target::abi::TargetDataLayout {
 +        &self.0.data_layout
 +    }
 +}
 +
 +impl<'tcx> layout::HasParamEnv<'tcx> for RevealAllLayoutCx<'tcx> {
 +    fn param_env(&self) -> ParamEnv<'tcx> {
 +        ParamEnv::reveal_all()
 +    }
 +}
 +
 +impl<'tcx> HasTargetSpec for RevealAllLayoutCx<'tcx> {
 +    fn target_spec(&self) -> &Target {
 +        &self.0.sess.target
 +    }
 +}
index a6bde88408497ed9d12a7cd28a82966d7fc52439,0000000000000000000000000000000000000000..dee6fb5b5130d1f27abaf1fda1581605a82973b5
mode 100644,000000..100644
--- /dev/null
@@@ -1,544 -1,0 +1,562 @@@
-         //println!("todo {:?}", self.todo);
 +//! Handling of `static`s, `const`s and promoted allocations
 +
 +use rustc_data_structures::fx::{FxHashMap, FxHashSet};
 +use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
 +use rustc_middle::mir::interpret::{
 +    read_target_uint, AllocId, ConstAllocation, ConstValue, ErrorHandled, GlobalAlloc, Scalar,
 +};
 +
 +use cranelift_module::*;
 +
 +use crate::prelude::*;
 +
 +pub(crate) struct ConstantCx {
 +    todo: Vec<TodoItem>,
 +    done: FxHashSet<DataId>,
 +    anon_allocs: FxHashMap<AllocId, DataId>,
 +}
 +
 +#[derive(Copy, Clone, Debug)]
 +enum TodoItem {
 +    Alloc(AllocId),
 +    Static(DefId),
 +}
 +
 +impl ConstantCx {
 +    pub(crate) fn new() -> Self {
 +        ConstantCx { todo: vec![], done: FxHashSet::default(), anon_allocs: FxHashMap::default() }
 +    }
 +
 +    pub(crate) fn finalize(mut self, tcx: TyCtxt<'_>, module: &mut dyn Module) {
-         //println!("done {:?}", self.done);
 +        define_all_allocs(tcx, module, &mut self);
-     let rlinkage = tcx.codegen_fn_attrs(def_id).linkage;
-     let linkage = if definition {
-         crate::linkage::get_static_linkage(tcx, def_id)
-     } else if rlinkage == Some(rustc_middle::mir::mono::Linkage::ExternalWeak)
-         || rlinkage == Some(rustc_middle::mir::mono::Linkage::WeakAny)
-     {
-         Linkage::Preemptible
-     } else {
-         Linkage::Import
-     };
 +        self.done.clear();
 +    }
 +}
 +
 +pub(crate) fn check_constants(fx: &mut FunctionCx<'_, '_, '_>) -> bool {
 +    let mut all_constants_ok = true;
 +    for constant in &fx.mir.required_consts {
 +        if eval_mir_constant(fx, constant).is_none() {
 +            all_constants_ok = false;
 +        }
 +    }
 +    all_constants_ok
 +}
 +
 +pub(crate) fn codegen_static(tcx: TyCtxt<'_>, module: &mut dyn Module, def_id: DefId) {
 +    let mut constants_cx = ConstantCx::new();
 +    constants_cx.todo.push(TodoItem::Static(def_id));
 +    constants_cx.finalize(tcx, module);
 +}
 +
 +pub(crate) fn codegen_tls_ref<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    def_id: DefId,
 +    layout: TyAndLayout<'tcx>,
 +) -> CValue<'tcx> {
 +    let data_id = data_id_for_static(fx.tcx, fx.module, def_id, false);
 +    let local_data_id = fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
 +    if fx.clif_comments.enabled() {
 +        fx.add_comment(local_data_id, format!("tls {:?}", def_id));
 +    }
 +    let tls_ptr = fx.bcx.ins().tls_value(fx.pointer_type, local_data_id);
 +    CValue::by_val(tls_ptr, layout)
 +}
 +
 +pub(crate) fn eval_mir_constant<'tcx>(
 +    fx: &FunctionCx<'_, '_, 'tcx>,
 +    constant: &Constant<'tcx>,
 +) -> Option<(ConstValue<'tcx>, Ty<'tcx>)> {
 +    let constant_kind = fx.monomorphize(constant.literal);
 +    let uv = match constant_kind {
 +        ConstantKind::Ty(const_) => match const_.kind() {
 +            ty::ConstKind::Unevaluated(uv) => uv.expand(),
 +            ty::ConstKind::Value(val) => {
 +                return Some((fx.tcx.valtree_to_const_val((const_.ty(), val)), const_.ty()));
 +            }
 +            err => span_bug!(
 +                constant.span,
 +                "encountered bad ConstKind after monomorphizing: {:?}",
 +                err
 +            ),
 +        },
 +        ConstantKind::Unevaluated(mir::UnevaluatedConst { def, .. }, _)
 +            if fx.tcx.is_static(def.did) =>
 +        {
 +            span_bug!(constant.span, "MIR constant refers to static");
 +        }
 +        ConstantKind::Unevaluated(uv, _) => uv,
 +        ConstantKind::Val(val, _) => return Some((val, constant_kind.ty())),
 +    };
 +
 +    let val = fx
 +        .tcx
 +        .const_eval_resolve(ty::ParamEnv::reveal_all(), uv, None)
 +        .map_err(|err| match err {
 +            ErrorHandled::Reported(_) => {
 +                fx.tcx.sess.span_err(constant.span, "erroneous constant encountered");
 +            }
 +            ErrorHandled::TooGeneric => {
 +                span_bug!(constant.span, "codegen encountered polymorphic constant: {:?}", err);
 +            }
 +        })
 +        .ok();
 +    val.map(|val| (val, constant_kind.ty()))
 +}
 +
 +pub(crate) fn codegen_constant_operand<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    constant: &Constant<'tcx>,
 +) -> CValue<'tcx> {
 +    let (const_val, ty) = eval_mir_constant(fx, constant).unwrap_or_else(|| {
 +        span_bug!(constant.span, "erroneous constant not captured by required_consts")
 +    });
 +
 +    codegen_const_value(fx, const_val, ty)
 +}
 +
 +pub(crate) fn codegen_const_value<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    const_val: ConstValue<'tcx>,
 +    ty: Ty<'tcx>,
 +) -> CValue<'tcx> {
 +    let layout = fx.layout_of(ty);
 +    assert!(layout.is_sized(), "unsized const value");
 +
 +    if layout.is_zst() {
 +        return CValue::by_ref(crate::Pointer::dangling(layout.align.pref), layout);
 +    }
 +
 +    match const_val {
 +        ConstValue::ZeroSized => unreachable!(), // we already handles ZST above
 +        ConstValue::Scalar(x) => match x {
 +            Scalar::Int(int) => {
 +                if fx.clif_type(layout.ty).is_some() {
 +                    return CValue::const_val(fx, layout, int);
 +                } else {
 +                    let raw_val = int.to_bits(int.size()).unwrap();
 +                    let val = match int.size().bytes() {
 +                        1 => fx.bcx.ins().iconst(types::I8, raw_val as i64),
 +                        2 => fx.bcx.ins().iconst(types::I16, raw_val as i64),
 +                        4 => fx.bcx.ins().iconst(types::I32, raw_val as i64),
 +                        8 => fx.bcx.ins().iconst(types::I64, raw_val as i64),
 +                        16 => {
 +                            let lsb = fx.bcx.ins().iconst(types::I64, raw_val as u64 as i64);
 +                            let msb =
 +                                fx.bcx.ins().iconst(types::I64, (raw_val >> 64) as u64 as i64);
 +                            fx.bcx.ins().iconcat(lsb, msb)
 +                        }
 +                        _ => unreachable!(),
 +                    };
 +
 +                    let place = CPlace::new_stack_slot(fx, layout);
 +                    place.to_ptr().store(fx, val, MemFlags::trusted());
 +                    place.to_cvalue(fx)
 +                }
 +            }
 +            Scalar::Ptr(ptr, _size) => {
 +                let (alloc_id, offset) = ptr.into_parts(); // we know the `offset` is relative
 +                let base_addr = match fx.tcx.global_alloc(alloc_id) {
 +                    GlobalAlloc::Memory(alloc) => {
 +                        let data_id = data_id_for_alloc_id(
 +                            &mut fx.constants_cx,
 +                            fx.module,
 +                            alloc_id,
 +                            alloc.inner().mutability,
 +                        );
 +                        let local_data_id =
 +                            fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
 +                        if fx.clif_comments.enabled() {
 +                            fx.add_comment(local_data_id, format!("{:?}", alloc_id));
 +                        }
 +                        fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
 +                    }
 +                    GlobalAlloc::Function(instance) => {
 +                        let func_id = crate::abi::import_function(fx.tcx, fx.module, instance);
 +                        let local_func_id =
 +                            fx.module.declare_func_in_func(func_id, &mut fx.bcx.func);
 +                        fx.bcx.ins().func_addr(fx.pointer_type, local_func_id)
 +                    }
 +                    GlobalAlloc::VTable(ty, trait_ref) => {
 +                        let alloc_id = fx.tcx.vtable_allocation((ty, trait_ref));
 +                        let alloc = fx.tcx.global_alloc(alloc_id).unwrap_memory();
 +                        // FIXME: factor this common code with the `Memory` arm into a function?
 +                        let data_id = data_id_for_alloc_id(
 +                            &mut fx.constants_cx,
 +                            fx.module,
 +                            alloc_id,
 +                            alloc.inner().mutability,
 +                        );
 +                        let local_data_id =
 +                            fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
 +                        fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
 +                    }
 +                    GlobalAlloc::Static(def_id) => {
 +                        assert!(fx.tcx.is_static(def_id));
 +                        let data_id = data_id_for_static(fx.tcx, fx.module, def_id, false);
 +                        let local_data_id =
 +                            fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
 +                        if fx.clif_comments.enabled() {
 +                            fx.add_comment(local_data_id, format!("{:?}", def_id));
 +                        }
 +                        fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
 +                    }
 +                };
 +                let val = if offset.bytes() != 0 {
 +                    fx.bcx.ins().iadd_imm(base_addr, i64::try_from(offset.bytes()).unwrap())
 +                } else {
 +                    base_addr
 +                };
 +                CValue::by_val(val, layout)
 +            }
 +        },
 +        ConstValue::ByRef { alloc, offset } => CValue::by_ref(
 +            pointer_for_allocation(fx, alloc)
 +                .offset_i64(fx, i64::try_from(offset.bytes()).unwrap()),
 +            layout,
 +        ),
 +        ConstValue::Slice { data, start, end } => {
 +            let ptr = pointer_for_allocation(fx, data)
 +                .offset_i64(fx, i64::try_from(start).unwrap())
 +                .get_addr(fx);
 +            let len = fx
 +                .bcx
 +                .ins()
 +                .iconst(fx.pointer_type, i64::try_from(end.checked_sub(start).unwrap()).unwrap());
 +            CValue::by_val_pair(ptr, len, layout)
 +        }
 +    }
 +}
 +
 +fn pointer_for_allocation<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    alloc: ConstAllocation<'tcx>,
 +) -> crate::pointer::Pointer {
 +    let alloc_id = fx.tcx.create_memory_alloc(alloc);
 +    let data_id = data_id_for_alloc_id(
 +        &mut fx.constants_cx,
 +        &mut *fx.module,
 +        alloc_id,
 +        alloc.inner().mutability,
 +    );
 +
 +    let local_data_id = fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
 +    if fx.clif_comments.enabled() {
 +        fx.add_comment(local_data_id, format!("{:?}", alloc_id));
 +    }
 +    let global_ptr = fx.bcx.ins().global_value(fx.pointer_type, local_data_id);
 +    crate::pointer::Pointer::new(global_ptr)
 +}
 +
 +pub(crate) fn data_id_for_alloc_id(
 +    cx: &mut ConstantCx,
 +    module: &mut dyn Module,
 +    alloc_id: AllocId,
 +    mutability: rustc_hir::Mutability,
 +) -> DataId {
 +    cx.todo.push(TodoItem::Alloc(alloc_id));
 +    *cx.anon_allocs
 +        .entry(alloc_id)
 +        .or_insert_with(|| module.declare_anonymous_data(mutability.is_mut(), false).unwrap())
 +}
 +
 +fn data_id_for_static(
 +    tcx: TyCtxt<'_>,
 +    module: &mut dyn Module,
 +    def_id: DefId,
 +    definition: bool,
 +) -> DataId {
-     let attrs = tcx.codegen_fn_attrs(def_id);
++    let attrs = tcx.codegen_fn_attrs(def_id);
 +
 +    let instance = Instance::mono(tcx, def_id).polymorphize(tcx);
 +    let symbol_name = tcx.symbol_name(instance).name;
 +    let ty = instance.ty(tcx, ParamEnv::reveal_all());
 +    let is_mutable = if tcx.is_mutable_static(def_id) {
 +        true
 +    } else {
 +        !ty.is_freeze(tcx, ParamEnv::reveal_all())
 +    };
 +    let align = tcx.layout_of(ParamEnv::reveal_all().and(ty)).unwrap().align.pref.bytes();
 +
-     let data_id = match module.declare_data(
-         &*symbol_name,
-         linkage,
-         is_mutable,
-         attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL),
-     ) {
-         Ok(data_id) => data_id,
-         Err(ModuleError::IncompatibleDeclaration(_)) => tcx.sess.fatal(&format!(
-             "attempt to declare `{symbol_name}` as static, but it was already declared as function"
-         )),
-         Err(err) => Err::<_, _>(err).unwrap(),
-     };
++    if let Some(import_linkage) = attrs.import_linkage {
++        assert!(!definition);
 +
-     if rlinkage.is_some() {
++        let linkage = if import_linkage == rustc_middle::mir::mono::Linkage::ExternalWeak
++            || import_linkage == rustc_middle::mir::mono::Linkage::WeakAny
++        {
++            Linkage::Preemptible
++        } else {
++            Linkage::Import
++        };
++
++        let data_id = match module.declare_data(
++            &*symbol_name,
++            linkage,
++            is_mutable,
++            attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL),
++        ) {
++            Ok(data_id) => data_id,
++            Err(ModuleError::IncompatibleDeclaration(_)) => tcx.sess.fatal(&format!(
++                "attempt to declare `{symbol_name}` as static, but it was already declared as function"
++            )),
++            Err(err) => Err::<_, _>(err).unwrap(),
++        };
 +
-         ref_data_id
-     } else {
-         data_id
 +        // Comment copied from https://github.com/rust-lang/rust/blob/45060c2a66dfd667f88bd8b94261b28a58d85bd5/src/librustc_codegen_llvm/consts.rs#L141
 +        // Declare an internal global `extern_with_linkage_foo` which
 +        // is initialized with the address of `foo`.  If `foo` is
 +        // discarded during linking (for example, if `foo` has weak
 +        // linkage and there are no definitions), then
 +        // `extern_with_linkage_foo` will instead be initialized to
 +        // zero.
 +
 +        let ref_name = format!("_rust_extern_with_linkage_{}", symbol_name);
 +        let ref_data_id = module.declare_data(&ref_name, Linkage::Local, false, false).unwrap();
 +        let mut data_ctx = DataContext::new();
 +        data_ctx.set_align(align);
 +        let data = module.declare_data_in_data(data_id, &mut data_ctx);
 +        data_ctx.define(std::iter::repeat(0).take(pointer_ty(tcx).bytes() as usize).collect());
 +        data_ctx.write_data_addr(0, data, 0);
 +        match module.define_data(ref_data_id, &data_ctx) {
 +            // Every time the static is referenced there will be another definition of this global,
 +            // so duplicate definitions are expected and allowed.
 +            Err(ModuleError::DuplicateDefinition(_)) => {}
 +            res => res.unwrap(),
 +        }
-                 //println!("static {:?}", def_id);
++
++        return ref_data_id;
 +    }
++
++    let linkage = if definition {
++        crate::linkage::get_static_linkage(tcx, def_id)
++    } else if attrs.linkage == Some(rustc_middle::mir::mono::Linkage::ExternalWeak)
++        || attrs.linkage == Some(rustc_middle::mir::mono::Linkage::WeakAny)
++    {
++        Linkage::Preemptible
++    } else {
++        Linkage::Import
++    };
++
++    let data_id = match module.declare_data(
++        &*symbol_name,
++        linkage,
++        is_mutable,
++        attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL),
++    ) {
++        Ok(data_id) => data_id,
++        Err(ModuleError::IncompatibleDeclaration(_)) => tcx.sess.fatal(&format!(
++            "attempt to declare `{symbol_name}` as static, but it was already declared as function"
++        )),
++        Err(err) => Err::<_, _>(err).unwrap(),
++    };
++
++    data_id
 +}
 +
 +fn define_all_allocs(tcx: TyCtxt<'_>, module: &mut dyn Module, cx: &mut ConstantCx) {
 +    while let Some(todo_item) = cx.todo.pop() {
 +        let (data_id, alloc, section_name) = match todo_item {
 +            TodoItem::Alloc(alloc_id) => {
 +                let alloc = match tcx.global_alloc(alloc_id) {
 +                    GlobalAlloc::Memory(alloc) => alloc,
 +                    GlobalAlloc::Function(_) | GlobalAlloc::Static(_) | GlobalAlloc::VTable(..) => {
 +                        unreachable!()
 +                    }
 +                };
 +                let data_id = *cx.anon_allocs.entry(alloc_id).or_insert_with(|| {
 +                    module.declare_anonymous_data(alloc.inner().mutability.is_mut(), false).unwrap()
 +                });
 +                (data_id, alloc, None)
 +            }
 +            TodoItem::Static(def_id) => {
-         //("data_id {}", data_id);
 +                let section_name = tcx.codegen_fn_attrs(def_id).link_section;
 +
 +                let alloc = tcx.eval_static_initializer(def_id).unwrap();
 +
 +                let data_id = data_id_for_static(tcx, module, def_id, true);
 +                (data_id, alloc, section_name)
 +            }
 +        };
 +
 +        if cx.done.contains(&data_id) {
 +            continue;
 +        }
 +
 +        let mut data_ctx = DataContext::new();
 +        let alloc = alloc.inner();
 +        data_ctx.set_align(alloc.align.bytes());
 +
 +        if let Some(section_name) = section_name {
 +            let (segment_name, section_name) = if tcx.sess.target.is_like_osx {
 +                let section_name = section_name.as_str();
 +                if let Some(names) = section_name.split_once(',') {
 +                    names
 +                } else {
 +                    tcx.sess.fatal(&format!(
 +                        "#[link_section = \"{}\"] is not valid for macos target: must be segment and section separated by comma",
 +                        section_name
 +                    ));
 +                }
 +            } else {
 +                ("", section_name.as_str())
 +            };
 +            data_ctx.set_segment_section(segment_name, section_name);
 +        }
 +
 +        let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len()).to_vec();
 +        data_ctx.define(bytes.into_boxed_slice());
 +
 +        for &(offset, alloc_id) in alloc.provenance().ptrs().iter() {
 +            let addend = {
 +                let endianness = tcx.data_layout.endian;
 +                let offset = offset.bytes() as usize;
 +                let ptr_size = tcx.data_layout.pointer_size;
 +                let bytes = &alloc.inspect_with_uninit_and_ptr_outside_interpreter(
 +                    offset..offset + ptr_size.bytes() as usize,
 +                );
 +                read_target_uint(endianness, bytes).unwrap()
 +            };
 +
 +            let reloc_target_alloc = tcx.global_alloc(alloc_id);
 +            let data_id = match reloc_target_alloc {
 +                GlobalAlloc::Function(instance) => {
 +                    assert_eq!(addend, 0);
 +                    let func_id =
 +                        crate::abi::import_function(tcx, module, instance.polymorphize(tcx));
 +                    let local_func_id = module.declare_func_in_data(func_id, &mut data_ctx);
 +                    data_ctx.write_function_addr(offset.bytes() as u32, local_func_id);
 +                    continue;
 +                }
 +                GlobalAlloc::Memory(target_alloc) => {
 +                    data_id_for_alloc_id(cx, module, alloc_id, target_alloc.inner().mutability)
 +                }
 +                GlobalAlloc::VTable(ty, trait_ref) => {
 +                    let alloc_id = tcx.vtable_allocation((ty, trait_ref));
 +                    data_id_for_alloc_id(cx, module, alloc_id, Mutability::Not)
 +                }
 +                GlobalAlloc::Static(def_id) => {
 +                    if tcx.codegen_fn_attrs(def_id).flags.contains(CodegenFnAttrFlags::THREAD_LOCAL)
 +                    {
 +                        tcx.sess.fatal(&format!(
 +                            "Allocation {:?} contains reference to TLS value {:?}",
 +                            alloc_id, def_id
 +                        ));
 +                    }
 +
 +                    // Don't push a `TodoItem::Static` here, as it will cause statics used by
 +                    // multiple crates to be duplicated between them. It isn't necessary anyway,
 +                    // as it will get pushed by `codegen_static` when necessary.
 +                    data_id_for_static(tcx, module, def_id, false)
 +                }
 +            };
 +
 +            let global_value = module.declare_data_in_data(data_id, &mut data_ctx);
 +            data_ctx.write_data_addr(offset.bytes() as u32, global_value, addend as i64);
 +        }
 +
 +        module.define_data(data_id, &data_ctx).unwrap();
 +        cx.done.insert(data_id);
 +    }
 +
 +    assert!(cx.todo.is_empty(), "{:?}", cx.todo);
 +}
 +
 +/// Used only for intrinsic implementations that need a compile-time constant
 +pub(crate) fn mir_operand_get_const_val<'tcx>(
 +    fx: &FunctionCx<'_, '_, 'tcx>,
 +    operand: &Operand<'tcx>,
 +) -> Option<ConstValue<'tcx>> {
 +    match operand {
 +        Operand::Constant(const_) => Some(eval_mir_constant(fx, const_).unwrap().0),
 +        // FIXME(rust-lang/rust#85105): Casts like `IMM8 as u32` result in the const being stored
 +        // inside a temporary before being passed to the intrinsic requiring the const argument.
 +        // This code tries to find a single constant defining definition of the referenced local.
 +        Operand::Copy(place) | Operand::Move(place) => {
 +            if !place.projection.is_empty() {
 +                return None;
 +            }
 +            let mut computed_const_val = None;
 +            for bb_data in fx.mir.basic_blocks.iter() {
 +                for stmt in &bb_data.statements {
 +                    match &stmt.kind {
 +                        StatementKind::Assign(local_and_rvalue) if &local_and_rvalue.0 == place => {
 +                            match &local_and_rvalue.1 {
 +                                Rvalue::Cast(
 +                                    CastKind::IntToInt
 +                                    | CastKind::FloatToFloat
 +                                    | CastKind::FloatToInt
 +                                    | CastKind::IntToFloat
 +                                    | CastKind::FnPtrToPtr
 +                                    | CastKind::PtrToPtr,
 +                                    operand,
 +                                    ty,
 +                                ) => {
 +                                    if computed_const_val.is_some() {
 +                                        return None; // local assigned twice
 +                                    }
 +                                    if !matches!(ty.kind(), ty::Uint(_) | ty::Int(_)) {
 +                                        return None;
 +                                    }
 +                                    let const_val = mir_operand_get_const_val(fx, operand)?;
 +                                    if fx.layout_of(*ty).size
 +                                        != const_val.try_to_scalar_int()?.size()
 +                                    {
 +                                        return None;
 +                                    }
 +                                    computed_const_val = Some(const_val);
 +                                }
 +                                Rvalue::Use(operand) => {
 +                                    computed_const_val = mir_operand_get_const_val(fx, operand)
 +                                }
 +                                _ => return None,
 +                            }
 +                        }
 +                        StatementKind::SetDiscriminant { place: stmt_place, variant_index: _ }
 +                            if &**stmt_place == place =>
 +                        {
 +                            return None;
 +                        }
 +                        StatementKind::Intrinsic(ref intrinsic) => match **intrinsic {
 +                            NonDivergingIntrinsic::CopyNonOverlapping(..) => return None,
 +                            NonDivergingIntrinsic::Assume(..) => {}
 +                        },
 +                        // conservative handling
 +                        StatementKind::Assign(_)
 +                        | StatementKind::FakeRead(_)
 +                        | StatementKind::SetDiscriminant { .. }
 +                        | StatementKind::Deinit(_)
 +                        | StatementKind::StorageLive(_)
 +                        | StatementKind::StorageDead(_)
 +                        | StatementKind::Retag(_, _)
 +                        | StatementKind::AscribeUserType(_, _)
 +                        | StatementKind::Coverage(_)
 +                        | StatementKind::Nop => {}
 +                    }
 +                }
 +                match &bb_data.terminator().kind {
 +                    TerminatorKind::Goto { .. }
 +                    | TerminatorKind::SwitchInt { .. }
 +                    | TerminatorKind::Resume
 +                    | TerminatorKind::Abort
 +                    | TerminatorKind::Return
 +                    | TerminatorKind::Unreachable
 +                    | TerminatorKind::Drop { .. }
 +                    | TerminatorKind::Assert { .. } => {}
 +                    TerminatorKind::DropAndReplace { .. }
 +                    | TerminatorKind::Yield { .. }
 +                    | TerminatorKind::GeneratorDrop
 +                    | TerminatorKind::FalseEdge { .. }
 +                    | TerminatorKind::FalseUnwind { .. } => unreachable!(),
 +                    TerminatorKind::InlineAsm { .. } => return None,
 +                    TerminatorKind::Call { destination, target: Some(_), .. }
 +                        if destination == place =>
 +                    {
 +                        return None;
 +                    }
 +                    TerminatorKind::Call { .. } => {}
 +                }
 +            }
 +            computed_const_val
 +        }
 +    }
 +}
index d26392c4913b508a3ab68335d2ea771243c6b42d,0000000000000000000000000000000000000000..493359c743f119d6bcee87920e595c943a009642
mode 100644,000000..100644
--- /dev/null
@@@ -1,136 -1,0 +1,138 @@@
-         let unwind_info = if let Some(unwind_info) = context.create_unwind_info(isa).unwrap() {
 +//! Unwind info generation (`.eh_frame`)
 +
 +use crate::prelude::*;
 +
 +use cranelift_codegen::ir::Endianness;
 +use cranelift_codegen::isa::{unwind::UnwindInfo, TargetIsa};
 +
 +use cranelift_object::ObjectProduct;
 +use gimli::write::{Address, CieId, EhFrame, FrameTable, Section};
 +use gimli::RunTimeEndian;
 +
 +use super::object::WriteDebugInfo;
 +
 +pub(crate) struct UnwindContext {
 +    endian: RunTimeEndian,
 +    frame_table: FrameTable,
 +    cie_id: Option<CieId>,
 +}
 +
 +impl UnwindContext {
 +    pub(crate) fn new(isa: &dyn TargetIsa, pic_eh_frame: bool) -> Self {
 +        let endian = match isa.endianness() {
 +            Endianness::Little => RunTimeEndian::Little,
 +            Endianness::Big => RunTimeEndian::Big,
 +        };
 +        let mut frame_table = FrameTable::default();
 +
 +        let cie_id = if let Some(mut cie) = isa.create_systemv_cie() {
 +            if pic_eh_frame {
 +                cie.fde_address_encoding =
 +                    gimli::DwEhPe(gimli::DW_EH_PE_pcrel.0 | gimli::DW_EH_PE_sdata4.0);
 +            }
 +            Some(frame_table.add_cie(cie))
 +        } else {
 +            None
 +        };
 +
 +        UnwindContext { endian, frame_table, cie_id }
 +    }
 +
 +    pub(crate) fn add_function(&mut self, func_id: FuncId, context: &Context, isa: &dyn TargetIsa) {
++        let unwind_info = if let Some(unwind_info) =
++            context.compiled_code().unwrap().create_unwind_info(isa).unwrap()
++        {
 +            unwind_info
 +        } else {
 +            return;
 +        };
 +
 +        match unwind_info {
 +            UnwindInfo::SystemV(unwind_info) => {
 +                self.frame_table.add_fde(
 +                    self.cie_id.unwrap(),
 +                    unwind_info
 +                        .to_fde(Address::Symbol { symbol: func_id.as_u32() as usize, addend: 0 }),
 +                );
 +            }
 +            UnwindInfo::WindowsX64(_) => {
 +                // FIXME implement this
 +            }
 +            unwind_info => unimplemented!("{:?}", unwind_info),
 +        }
 +    }
 +
 +    pub(crate) fn emit(self, product: &mut ObjectProduct) {
 +        let mut eh_frame = EhFrame::from(super::emit::WriterRelocate::new(self.endian));
 +        self.frame_table.write_eh_frame(&mut eh_frame).unwrap();
 +
 +        if !eh_frame.0.writer.slice().is_empty() {
 +            let id = eh_frame.id();
 +            let section_id = product.add_debug_section(id, eh_frame.0.writer.into_vec());
 +            let mut section_map = FxHashMap::default();
 +            section_map.insert(id, section_id);
 +
 +            for reloc in &eh_frame.0.relocs {
 +                product.add_debug_reloc(&section_map, &section_id, reloc);
 +            }
 +        }
 +    }
 +
 +    #[cfg(all(feature = "jit", windows))]
 +    pub(crate) unsafe fn register_jit(self, _jit_module: &cranelift_jit::JITModule) {}
 +
 +    #[cfg(all(feature = "jit", not(windows)))]
 +    pub(crate) unsafe fn register_jit(self, jit_module: &cranelift_jit::JITModule) {
 +        use std::mem::ManuallyDrop;
 +
 +        let mut eh_frame = EhFrame::from(super::emit::WriterRelocate::new(self.endian));
 +        self.frame_table.write_eh_frame(&mut eh_frame).unwrap();
 +
 +        if eh_frame.0.writer.slice().is_empty() {
 +            return;
 +        }
 +
 +        let mut eh_frame = eh_frame.0.relocate_for_jit(jit_module);
 +
 +        // GCC expects a terminating "empty" length, so write a 0 length at the end of the table.
 +        eh_frame.extend(&[0, 0, 0, 0]);
 +
 +        // FIXME support unregistering unwind tables once cranelift-jit supports deallocating
 +        // individual functions
 +        let eh_frame = ManuallyDrop::new(eh_frame);
 +
 +        // =======================================================================
 +        // Everything after this line up to the end of the file is loosely based on
 +        // https://github.com/bytecodealliance/wasmtime/blob/4471a82b0c540ff48960eca6757ccce5b1b5c3e4/crates/jit/src/unwind/systemv.rs
 +        #[cfg(target_os = "macos")]
 +        {
 +            // On macOS, `__register_frame` takes a pointer to a single FDE
 +            let start = eh_frame.as_ptr();
 +            let end = start.add(eh_frame.len());
 +            let mut current = start;
 +
 +            // Walk all of the entries in the frame table and register them
 +            while current < end {
 +                let len = std::ptr::read::<u32>(current as *const u32) as usize;
 +
 +                // Skip over the CIE
 +                if current != start {
 +                    __register_frame(current);
 +                }
 +
 +                // Move to the next table entry (+4 because the length itself is not inclusive)
 +                current = current.add(len + 4);
 +            }
 +        }
 +        #[cfg(not(target_os = "macos"))]
 +        {
 +            // On other platforms, `__register_frame` will walk the FDEs until an entry of length 0
 +            __register_frame(eh_frame.as_ptr());
 +        }
 +    }
 +}
 +
 +extern "C" {
 +    // libunwind import
 +    fn __register_frame(fde: *const u8);
 +}
index 97b395bcd05186b199e59026ddec9fda4472dc77,0000000000000000000000000000000000000000..3cbf313adf0df5a69887cdb1f0516245f2b23232
mode 100644,000000..100644
--- /dev/null
@@@ -1,179 -1,0 +1,294 @@@
- //! Adapted from <https://github.com/rust-lang/rust/blob/d760df5aea483aae041c9a241e7acacf48f75035/src/librustc_codegen_ssa/mir/place.rs>
 +//! Handling of enum discriminants
 +//!
-                 let niche_value = ty::ScalarInt::try_from_uint(
-                     u128::from(niche_value).wrapping_add(niche_start),
-                     niche.layout().size,
-                 )
-                 .unwrap();
-                 let niche_llval = CValue::const_val(fx, niche.layout(), niche_value);
++//! Adapted from <https://github.com/rust-lang/rust/blob/31c0645b9d2539f47eecb096142474b29dc542f7/compiler/rustc_codegen_ssa/src/mir/place.rs>
++//! (<https://github.com/rust-lang/rust/pull/104535>)
 +
 +use rustc_target::abi::{Int, TagEncoding, Variants};
 +
 +use crate::prelude::*;
 +
 +pub(crate) fn codegen_set_discriminant<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    place: CPlace<'tcx>,
 +    variant_index: VariantIdx,
 +) {
 +    let layout = place.layout();
 +    if layout.for_variant(fx, variant_index).abi.is_uninhabited() {
 +        return;
 +    }
 +    match layout.variants {
 +        Variants::Single { index } => {
 +            assert_eq!(index, variant_index);
 +        }
 +        Variants::Multiple {
 +            tag: _,
 +            tag_field,
 +            tag_encoding: TagEncoding::Direct,
 +            variants: _,
 +        } => {
 +            let ptr = place.place_field(fx, mir::Field::new(tag_field));
 +            let to = layout.ty.discriminant_for_variant(fx.tcx, variant_index).unwrap().val;
 +            let to = if ptr.layout().abi.is_signed() {
 +                ty::ScalarInt::try_from_int(
 +                    ptr.layout().size.sign_extend(to) as i128,
 +                    ptr.layout().size,
 +                )
 +                .unwrap()
 +            } else {
 +                ty::ScalarInt::try_from_uint(to, ptr.layout().size).unwrap()
 +            };
 +            let discr = CValue::const_val(fx, ptr.layout(), to);
 +            ptr.write_cvalue(fx, discr);
 +        }
 +        Variants::Multiple {
 +            tag: _,
 +            tag_field,
 +            tag_encoding: TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start },
 +            variants: _,
 +        } => {
 +            if variant_index != untagged_variant {
 +                let niche = place.place_field(fx, mir::Field::new(tag_field));
++                let niche_type = fx.clif_type(niche.layout().ty).unwrap();
 +                let niche_value = variant_index.as_u32() - niche_variants.start().as_u32();
-             // Rebase from niche values to discriminants, and check
-             // whether the result is in range for the niche variants.
-             // We first compute the "relative discriminant" (wrt `niche_variants`),
-             // that is, if `n = niche_variants.end() - niche_variants.start()`,
-             // we remap `niche_start..=niche_start + n` (which may wrap around)
-             // to (non-wrap-around) `0..=n`, to be able to check whether the
-             // discriminant corresponds to a niche variant with one comparison.
-             // We also can't go directly to the (variant index) discriminant
-             // and check that it is in the range `niche_variants`, because
-             // that might not fit in the same type, on top of needing an extra
-             // comparison (see also the comment on `let niche_discr`).
-             let relative_discr = if niche_start == 0 {
-                 tag
++                let niche_value = (niche_value as u128).wrapping_add(niche_start);
++                let niche_value = match niche_type {
++                    types::I128 => {
++                        let lsb = fx.bcx.ins().iconst(types::I64, niche_value as u64 as i64);
++                        let msb =
++                            fx.bcx.ins().iconst(types::I64, (niche_value >> 64) as u64 as i64);
++                        fx.bcx.ins().iconcat(lsb, msb)
++                    }
++                    ty => fx.bcx.ins().iconst(ty, niche_value as i64),
++                };
++                let niche_llval = CValue::by_val(niche_value, niche.layout());
 +                niche.write_cvalue(fx, niche_llval);
 +            }
 +        }
 +    }
 +}
 +
 +pub(crate) fn codegen_get_discriminant<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    dest: CPlace<'tcx>,
 +    value: CValue<'tcx>,
 +    dest_layout: TyAndLayout<'tcx>,
 +) {
 +    let layout = value.layout();
 +
 +    if layout.abi.is_uninhabited() {
 +        return;
 +    }
 +
 +    let (tag_scalar, tag_field, tag_encoding) = match &layout.variants {
 +        Variants::Single { index } => {
 +            let discr_val = layout
 +                .ty
 +                .discriminant_for_variant(fx.tcx, *index)
 +                .map_or(u128::from(index.as_u32()), |discr| discr.val);
 +            let discr_val = if dest_layout.abi.is_signed() {
 +                ty::ScalarInt::try_from_int(
 +                    dest_layout.size.sign_extend(discr_val) as i128,
 +                    dest_layout.size,
 +                )
 +                .unwrap()
 +            } else {
 +                ty::ScalarInt::try_from_uint(discr_val, dest_layout.size).unwrap()
 +            };
 +            let res = CValue::const_val(fx, dest_layout, discr_val);
 +            dest.write_cvalue(fx, res);
 +            return;
 +        }
 +        Variants::Multiple { tag, tag_field, tag_encoding, variants: _ } => {
 +            (tag, *tag_field, tag_encoding)
 +        }
 +    };
 +
++    let cast_to_size = dest_layout.layout.size();
 +    let cast_to = fx.clif_type(dest_layout.ty).unwrap();
 +
 +    // Read the tag/niche-encoded discriminant from memory.
 +    let tag = value.value_field(fx, mir::Field::new(tag_field));
 +    let tag = tag.load_scalar(fx);
 +
 +    // Decode the discriminant (specifically if it's niche-encoded).
 +    match *tag_encoding {
 +        TagEncoding::Direct => {
 +            let signed = match tag_scalar.primitive() {
 +                Int(_, signed) => signed,
 +                _ => false,
 +            };
 +            let val = clif_intcast(fx, tag, cast_to, signed);
 +            let res = CValue::by_val(val, dest_layout);
 +            dest.write_cvalue(fx, res);
 +        }
 +        TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start } => {
-                 fx.bcx.ins().isub(tag, niche_start)
-             };
-             let relative_max = niche_variants.end().as_u32() - niche_variants.start().as_u32();
-             let is_niche = {
-                 codegen_icmp_imm(
++            let tag_size = tag_scalar.size(fx);
++            let max_unsigned = tag_size.unsigned_int_max();
++            let max_signed = tag_size.signed_int_max() as u128;
++            let min_signed = max_signed + 1;
++            let relative_max = niche_variants.end().as_u32() - niche_variants.start().as_u32();
++            let niche_end = niche_start.wrapping_add(relative_max as u128) & max_unsigned;
++            let range = tag_scalar.valid_range(fx);
++
++            let sle = |lhs: u128, rhs: u128| -> bool {
++                // Signed and unsigned comparisons give the same results,
++                // except that in signed comparisons an integer with the
++                // sign bit set is less than one with the sign bit clear.
++                // Toggle the sign bit to do a signed comparison.
++                (lhs ^ min_signed) <= (rhs ^ min_signed)
++            };
++
++            // We have a subrange `niche_start..=niche_end` inside `range`.
++            // If the value of the tag is inside this subrange, it's a
++            // "niche value", an increment of the discriminant. Otherwise it
++            // indicates the untagged variant.
++            // A general algorithm to extract the discriminant from the tag
++            // is:
++            // relative_tag = tag - niche_start
++            // is_niche = relative_tag <= (ule) relative_max
++            // discr = if is_niche {
++            //     cast(relative_tag) + niche_variants.start()
++            // } else {
++            //     untagged_variant
++            // }
++            // However, we will likely be able to emit simpler code.
++
++            // Find the least and greatest values in `range`, considered
++            // both as signed and unsigned.
++            let (low_unsigned, high_unsigned) =
++                if range.start <= range.end { (range.start, range.end) } else { (0, max_unsigned) };
++            let (low_signed, high_signed) = if sle(range.start, range.end) {
++                (range.start, range.end)
 +            } else {
++                (min_signed, max_signed)
++            };
++
++            let niches_ule = niche_start <= niche_end;
++            let niches_sle = sle(niche_start, niche_end);
++            let cast_smaller = cast_to_size <= tag_size;
++
++            // In the algorithm above, we can change
++            // cast(relative_tag) + niche_variants.start()
++            // into
++            // cast(tag + (niche_variants.start() - niche_start))
++            // if either the casted type is no larger than the original
++            // type, or if the niche values are contiguous (in either the
++            // signed or unsigned sense).
++            let can_incr = cast_smaller || niches_ule || niches_sle;
++
++            let data_for_boundary_niche = || -> Option<(IntCC, u128)> {
++                if !can_incr {
++                    None
++                } else if niche_start == low_unsigned {
++                    Some((IntCC::UnsignedLessThanOrEqual, niche_end))
++                } else if niche_end == high_unsigned {
++                    Some((IntCC::UnsignedGreaterThanOrEqual, niche_start))
++                } else if niche_start == low_signed {
++                    Some((IntCC::SignedLessThanOrEqual, niche_end))
++                } else if niche_end == high_signed {
++                    Some((IntCC::SignedGreaterThanOrEqual, niche_start))
++                } else {
++                    None
++                }
++            };
++
++            let (is_niche, tagged_discr, delta) = if relative_max == 0 {
++                // Best case scenario: only one tagged variant. This will
++                // likely become just a comparison and a jump.
++                // The algorithm is:
++                // is_niche = tag == niche_start
++                // discr = if is_niche {
++                //     niche_start
++                // } else {
++                //     untagged_variant
++                // }
++                let is_niche = codegen_icmp_imm(fx, IntCC::Equal, tag, niche_start as i128);
++                let tagged_discr =
++                    fx.bcx.ins().iconst(cast_to, niche_variants.start().as_u32() as i64);
++                (is_niche, tagged_discr, 0)
++            } else if let Some((predicate, constant)) = data_for_boundary_niche() {
++                // The niche values are either the lowest or the highest in
++                // `range`. We can avoid the first subtraction in the
++                // algorithm.
++                // The algorithm is now this:
++                // is_niche = tag <= niche_end
++                // discr = if is_niche {
++                //     cast(tag + (niche_variants.start() - niche_start))
++                // } else {
++                //     untagged_variant
++                // }
++                // (the first line may instead be tag >= niche_start,
++                // and may be a signed or unsigned comparison)
++                // The arithmetic must be done before the cast, so we can
++                // have the correct wrapping behavior. See issue #104519 for
++                // the consequences of getting this wrong.
++                let is_niche = codegen_icmp_imm(fx, predicate, tag, constant as i128);
++                let delta = (niche_variants.start().as_u32() as u128).wrapping_sub(niche_start);
++                let incr_tag = if delta == 0 {
++                    tag
++                } else {
++                    let delta = match fx.bcx.func.dfg.value_type(tag) {
++                        types::I128 => {
++                            let lsb = fx.bcx.ins().iconst(types::I64, delta as u64 as i64);
++                            let msb = fx.bcx.ins().iconst(types::I64, (delta >> 64) as u64 as i64);
++                            fx.bcx.ins().iconcat(lsb, msb)
++                        }
++                        ty => fx.bcx.ins().iconst(ty, delta as i64),
++                    };
++                    fx.bcx.ins().iadd(tag, delta)
++                };
++
++                let cast_tag = clif_intcast(fx, incr_tag, cast_to, !niches_ule);
++
++                (is_niche, cast_tag, 0)
++            } else {
++                // The special cases don't apply, so we'll have to go with
++                // the general algorithm.
 +                let niche_start = match fx.bcx.func.dfg.value_type(tag) {
 +                    types::I128 => {
 +                        let lsb = fx.bcx.ins().iconst(types::I64, niche_start as u64 as i64);
 +                        let msb =
 +                            fx.bcx.ins().iconst(types::I64, (niche_start >> 64) as u64 as i64);
 +                        fx.bcx.ins().iconcat(lsb, msb)
 +                    }
 +                    ty => fx.bcx.ins().iconst(ty, niche_start as i64),
 +                };
-                 )
++                let relative_discr = fx.bcx.ins().isub(tag, niche_start);
++                let cast_tag = clif_intcast(fx, relative_discr, cast_to, false);
++                let is_niche = crate::common::codegen_icmp_imm(
 +                    fx,
 +                    IntCC::UnsignedLessThanOrEqual,
 +                    relative_discr,
 +                    i128::from(relative_max),
-             // NOTE(eddyb) this addition needs to be performed on the final
-             // type, in case the niche itself can't represent all variant
-             // indices (e.g. `u8` niche with more than `256` variants,
-             // but enough uninhabited variants so that the remaining variants
-             // fit in the niche).
-             // In other words, `niche_variants.end - niche_variants.start`
-             // is representable in the niche, but `niche_variants.end`
-             // might not be, in extreme cases.
-             let niche_discr = {
-                 let relative_discr = if relative_max == 0 {
-                     // HACK(eddyb) since we have only one niche, we know which
-                     // one it is, and we can avoid having a dynamic value here.
-                     fx.bcx.ins().iconst(cast_to, 0)
-                 } else {
-                     clif_intcast(fx, relative_discr, cast_to, false)
++                );
++                (is_niche, cast_tag, niche_variants.start().as_u32() as u128)
 +            };
 +
-                 fx.bcx.ins().iadd_imm(relative_discr, i64::from(niche_variants.start().as_u32()))
++            let tagged_discr = if delta == 0 {
++                tagged_discr
++            } else {
++                let delta = match cast_to {
++                    types::I128 => {
++                        let lsb = fx.bcx.ins().iconst(types::I64, delta as u64 as i64);
++                        let msb = fx.bcx.ins().iconst(types::I64, (delta >> 64) as u64 as i64);
++                        fx.bcx.ins().iconcat(lsb, msb)
++                    }
++                    ty => fx.bcx.ins().iconst(ty, delta as i64),
 +                };
-             let untagged_variant =
-                 fx.bcx.ins().iconst(cast_to, i64::from(untagged_variant.as_u32()));
-             let discr = fx.bcx.ins().select(is_niche, niche_discr, untagged_variant);
++                fx.bcx.ins().iadd(tagged_discr, delta)
 +            };
 +
++            let untagged_variant = if cast_to == types::I128 {
++                let zero = fx.bcx.ins().iconst(types::I64, 0);
++                let untagged_variant =
++                    fx.bcx.ins().iconst(types::I64, i64::from(untagged_variant.as_u32()));
++                fx.bcx.ins().iconcat(untagged_variant, zero)
++            } else {
++                fx.bcx.ins().iconst(cast_to, i64::from(untagged_variant.as_u32()))
++            };
++            let discr = fx.bcx.ins().select(is_niche, tagged_discr, untagged_variant);
 +            let res = CValue::by_val(discr, dest_layout);
 +            dest.write_cvalue(fx, res);
 +        }
 +    }
 +}
index 6a430b5215e36d11b483213b5bb7e74c75425727,0000000000000000000000000000000000000000..be1b8c9ead3bf2e133d326818df523e19f201fda
mode 100644,000000..100644
--- /dev/null
@@@ -1,391 -1,0 +1,395 @@@
-     jit_module.finalize_definitions();
 +//! The JIT driver uses [`cranelift_jit`] to JIT execute programs without writing any object
 +//! files.
 +
 +use std::cell::RefCell;
 +use std::ffi::CString;
 +use std::os::raw::{c_char, c_int};
 +use std::sync::{mpsc, Mutex};
 +
 +use rustc_codegen_ssa::CrateInfo;
 +use rustc_middle::mir::mono::MonoItem;
 +use rustc_session::Session;
 +use rustc_span::Symbol;
 +
 +use cranelift_jit::{JITBuilder, JITModule};
 +
 +// FIXME use std::sync::OnceLock once it stabilizes
 +use once_cell::sync::OnceCell;
 +
 +use crate::{prelude::*, BackendConfig};
 +use crate::{CodegenCx, CodegenMode};
 +
 +struct JitState {
 +    backend_config: BackendConfig,
 +    jit_module: JITModule,
 +}
 +
 +thread_local! {
 +    static LAZY_JIT_STATE: RefCell<Option<JitState>> = const { RefCell::new(None) };
 +}
 +
 +/// The Sender owned by the rustc thread
 +static GLOBAL_MESSAGE_SENDER: OnceCell<Mutex<mpsc::Sender<UnsafeMessage>>> = OnceCell::new();
 +
 +/// A message that is sent from the jitted runtime to the rustc thread.
 +/// Senders are responsible for upholding `Send` semantics.
 +enum UnsafeMessage {
 +    /// Request that the specified `Instance` be lazily jitted.
 +    ///
 +    /// Nothing accessible through `instance_ptr` may be moved or mutated by the sender after
 +    /// this message is sent.
 +    JitFn {
 +        instance_ptr: *const Instance<'static>,
 +        trampoline_ptr: *const u8,
 +        tx: mpsc::Sender<*const u8>,
 +    },
 +}
 +unsafe impl Send for UnsafeMessage {}
 +
 +impl UnsafeMessage {
 +    /// Send the message.
 +    fn send(self) -> Result<(), mpsc::SendError<UnsafeMessage>> {
 +        thread_local! {
 +            /// The Sender owned by the local thread
 +            static LOCAL_MESSAGE_SENDER: mpsc::Sender<UnsafeMessage> =
 +                GLOBAL_MESSAGE_SENDER
 +                    .get().unwrap()
 +                    .lock().unwrap()
 +                    .clone();
 +        }
 +        LOCAL_MESSAGE_SENDER.with(|sender| sender.send(self))
 +    }
 +}
 +
 +fn create_jit_module(
 +    tcx: TyCtxt<'_>,
 +    backend_config: &BackendConfig,
 +    hotswap: bool,
 +) -> (JITModule, CodegenCx) {
 +    let crate_info = CrateInfo::new(tcx, "dummy_target_cpu".to_string());
 +
 +    let isa = crate::build_isa(tcx.sess, backend_config);
 +    let mut jit_builder = JITBuilder::with_isa(isa, cranelift_module::default_libcall_names());
 +    jit_builder.hotswap(hotswap);
 +    crate::compiler_builtins::register_functions_for_jit(&mut jit_builder);
 +    jit_builder.symbol_lookup_fn(dep_symbol_lookup_fn(tcx.sess, crate_info));
 +    jit_builder.symbol("__clif_jit_fn", clif_jit_fn as *const u8);
 +    let mut jit_module = JITModule::new(jit_builder);
 +
 +    let mut cx = crate::CodegenCx::new(
 +        tcx,
 +        backend_config.clone(),
 +        jit_module.isa(),
 +        false,
 +        Symbol::intern("dummy_cgu_name"),
 +    );
 +
 +    crate::allocator::codegen(tcx, &mut jit_module, &mut cx.unwind_context);
 +    crate::main_shim::maybe_create_entry_wrapper(
 +        tcx,
 +        &mut jit_module,
 +        &mut cx.unwind_context,
 +        true,
 +        true,
 +    );
 +
 +    (jit_module, cx)
 +}
 +
 +pub(crate) fn run_jit(tcx: TyCtxt<'_>, backend_config: BackendConfig) -> ! {
 +    if !tcx.sess.opts.output_types.should_codegen() {
 +        tcx.sess.fatal("JIT mode doesn't work with `cargo check`");
 +    }
 +
 +    if !tcx.sess.crate_types().contains(&rustc_session::config::CrateType::Executable) {
 +        tcx.sess.fatal("can't jit non-executable crate");
 +    }
 +
 +    let (mut jit_module, mut cx) = create_jit_module(
 +        tcx,
 +        &backend_config,
 +        matches!(backend_config.codegen_mode, CodegenMode::JitLazy),
 +    );
 +    let mut cached_context = Context::new();
 +
 +    let (_, cgus) = tcx.collect_and_partition_mono_items(());
 +    let mono_items = cgus
 +        .iter()
 +        .map(|cgu| cgu.items_in_deterministic_order(tcx).into_iter())
 +        .flatten()
 +        .collect::<FxHashMap<_, (_, _)>>()
 +        .into_iter()
 +        .collect::<Vec<(_, (_, _))>>();
 +
 +    super::time(tcx, backend_config.display_cg_time, "codegen mono items", || {
 +        super::predefine_mono_items(tcx, &mut jit_module, &mono_items);
 +        for (mono_item, _) in mono_items {
 +            match mono_item {
 +                MonoItem::Fn(inst) => match backend_config.codegen_mode {
 +                    CodegenMode::Aot => unreachable!(),
 +                    CodegenMode::Jit => {
 +                        tcx.sess.time("codegen fn", || {
 +                            crate::base::codegen_and_compile_fn(
 +                                tcx,
 +                                &mut cx,
 +                                &mut cached_context,
 +                                &mut jit_module,
 +                                inst,
 +                            )
 +                        });
 +                    }
 +                    CodegenMode::JitLazy => {
 +                        codegen_shim(tcx, &mut cx, &mut cached_context, &mut jit_module, inst)
 +                    }
 +                },
 +                MonoItem::Static(def_id) => {
 +                    crate::constant::codegen_static(tcx, &mut jit_module, def_id);
 +                }
 +                MonoItem::GlobalAsm(item_id) => {
 +                    let item = tcx.hir().item(item_id);
 +                    tcx.sess.span_fatal(item.span, "Global asm is not supported in JIT mode");
 +                }
 +            }
 +        }
 +    });
 +
 +    if !cx.global_asm.is_empty() {
 +        tcx.sess.fatal("Inline asm is not supported in JIT mode");
 +    }
 +
 +    tcx.sess.abort_if_errors();
 +
-             let sig = crate::abi::get_function_sig(tcx, jit_module.isa().triple(), instance);
++    jit_module.finalize_definitions().unwrap();
 +    unsafe { cx.unwind_context.register_jit(&jit_module) };
 +
 +    println!(
 +        "Rustc codegen cranelift will JIT run the executable, because -Cllvm-args=mode=jit was passed"
 +    );
 +
 +    let args = std::iter::once(&*tcx.crate_name(LOCAL_CRATE).as_str().to_string())
 +        .chain(backend_config.jit_args.iter().map(|arg| &**arg))
 +        .map(|arg| CString::new(arg).unwrap())
 +        .collect::<Vec<_>>();
 +
 +    let start_sig = Signature {
 +        params: vec![
 +            AbiParam::new(jit_module.target_config().pointer_type()),
 +            AbiParam::new(jit_module.target_config().pointer_type()),
 +        ],
 +        returns: vec![AbiParam::new(jit_module.target_config().pointer_type() /*isize*/)],
 +        call_conv: jit_module.target_config().default_call_conv,
 +    };
 +    let start_func_id = jit_module.declare_function("main", Linkage::Import, &start_sig).unwrap();
 +    let finalized_start: *const u8 = jit_module.get_finalized_function(start_func_id);
 +
 +    LAZY_JIT_STATE.with(|lazy_jit_state| {
 +        let mut lazy_jit_state = lazy_jit_state.borrow_mut();
 +        assert!(lazy_jit_state.is_none());
 +        *lazy_jit_state = Some(JitState { backend_config, jit_module });
 +    });
 +
 +    let f: extern "C" fn(c_int, *const *const c_char) -> c_int =
 +        unsafe { ::std::mem::transmute(finalized_start) };
 +
 +    let (tx, rx) = mpsc::channel();
 +    GLOBAL_MESSAGE_SENDER.set(Mutex::new(tx)).unwrap();
 +
 +    // Spawn the jitted runtime in a new thread so that this rustc thread can handle messages
 +    // (eg to lazily JIT further functions as required)
 +    std::thread::spawn(move || {
 +        let mut argv = args.iter().map(|arg| arg.as_ptr()).collect::<Vec<_>>();
 +
 +        // Push a null pointer as a terminating argument. This is required by POSIX and
 +        // useful as some dynamic linkers use it as a marker to jump over.
 +        argv.push(std::ptr::null());
 +
 +        let ret = f(args.len() as c_int, argv.as_ptr());
 +        std::process::exit(ret);
 +    });
 +
 +    // Handle messages
 +    loop {
 +        match rx.recv().unwrap() {
 +            // lazy JIT compilation request - compile requested instance and return pointer to result
 +            UnsafeMessage::JitFn { instance_ptr, trampoline_ptr, tx } => {
 +                tx.send(jit_fn(instance_ptr, trampoline_ptr))
 +                    .expect("jitted runtime hung up before response to lazy JIT request was sent");
 +            }
 +        }
 +    }
 +}
 +
 +extern "C" fn clif_jit_fn(
 +    instance_ptr: *const Instance<'static>,
 +    trampoline_ptr: *const u8,
 +) -> *const u8 {
 +    // send the JIT request to the rustc thread, with a channel for the response
 +    let (tx, rx) = mpsc::channel();
 +    UnsafeMessage::JitFn { instance_ptr, trampoline_ptr, tx }
 +        .send()
 +        .expect("rustc thread hung up before lazy JIT request was sent");
 +
 +    // block on JIT compilation result
 +    rx.recv().expect("rustc thread hung up before responding to sent lazy JIT request")
 +}
 +
 +fn jit_fn(instance_ptr: *const Instance<'static>, trampoline_ptr: *const u8) -> *const u8 {
 +    rustc_middle::ty::tls::with(|tcx| {
 +        // lift is used to ensure the correct lifetime for instance.
 +        let instance = tcx.lift(unsafe { *instance_ptr }).unwrap();
 +
 +        LAZY_JIT_STATE.with(|lazy_jit_state| {
 +            let mut lazy_jit_state = lazy_jit_state.borrow_mut();
 +            let lazy_jit_state = lazy_jit_state.as_mut().unwrap();
 +            let jit_module = &mut lazy_jit_state.jit_module;
 +            let backend_config = lazy_jit_state.backend_config.clone();
 +
 +            let name = tcx.symbol_name(instance).name;
-             jit_module.finalize_definitions();
++            let sig = crate::abi::get_function_sig(
++                tcx,
++                jit_module.target_config().default_call_conv,
++                instance,
++            );
 +            let func_id = jit_module.declare_function(name, Linkage::Export, &sig).unwrap();
 +
 +            let current_ptr = jit_module.read_got_entry(func_id);
 +
 +            // If the function's GOT entry has already been updated to point at something other
 +            // than the shim trampoline, don't re-jit but just return the new pointer instead.
 +            // This does not need synchronization as this code is executed only by a sole rustc
 +            // thread.
 +            if current_ptr != trampoline_ptr {
 +                return current_ptr;
 +            }
 +
 +            jit_module.prepare_for_function_redefine(func_id).unwrap();
 +
 +            let mut cx = crate::CodegenCx::new(
 +                tcx,
 +                backend_config,
 +                jit_module.isa(),
 +                false,
 +                Symbol::intern("dummy_cgu_name"),
 +            );
 +            tcx.sess.time("codegen fn", || {
 +                crate::base::codegen_and_compile_fn(
 +                    tcx,
 +                    &mut cx,
 +                    &mut Context::new(),
 +                    jit_module,
 +                    instance,
 +                )
 +            });
 +
 +            assert!(cx.global_asm.is_empty());
-     let sig = crate::abi::get_function_sig(tcx, module.isa().triple(), inst);
++            jit_module.finalize_definitions().unwrap();
 +            unsafe { cx.unwind_context.register_jit(&jit_module) };
 +            jit_module.get_finalized_function(func_id)
 +        })
 +    })
 +}
 +
 +fn dep_symbol_lookup_fn(
 +    sess: &Session,
 +    crate_info: CrateInfo,
 +) -> Box<dyn Fn(&str) -> Option<*const u8>> {
 +    use rustc_middle::middle::dependency_format::Linkage;
 +
 +    let mut dylib_paths = Vec::new();
 +
 +    let data = &crate_info
 +        .dependency_formats
 +        .iter()
 +        .find(|(crate_type, _data)| *crate_type == rustc_session::config::CrateType::Executable)
 +        .unwrap()
 +        .1;
 +    for &cnum in &crate_info.used_crates {
 +        let src = &crate_info.used_crate_source[&cnum];
 +        match data[cnum.as_usize() - 1] {
 +            Linkage::NotLinked | Linkage::IncludedFromDylib => {}
 +            Linkage::Static => {
 +                let name = crate_info.crate_name[&cnum];
 +                let mut err = sess.struct_err(&format!("Can't load static lib {}", name));
 +                err.note("rustc_codegen_cranelift can only load dylibs in JIT mode.");
 +                err.emit();
 +            }
 +            Linkage::Dynamic => {
 +                dylib_paths.push(src.dylib.as_ref().unwrap().0.clone());
 +            }
 +        }
 +    }
 +
 +    let imported_dylibs = Box::leak(
 +        dylib_paths
 +            .into_iter()
 +            .map(|path| unsafe { libloading::Library::new(&path).unwrap() })
 +            .collect::<Box<[_]>>(),
 +    );
 +
 +    sess.abort_if_errors();
 +
 +    Box::new(move |sym_name| {
 +        for dylib in &*imported_dylibs {
 +            if let Ok(sym) = unsafe { dylib.get::<*const u8>(sym_name.as_bytes()) } {
 +                return Some(*sym);
 +            }
 +        }
 +        None
 +    })
 +}
 +
 +fn codegen_shim<'tcx>(
 +    tcx: TyCtxt<'tcx>,
 +    cx: &mut CodegenCx,
 +    cached_context: &mut Context,
 +    module: &mut JITModule,
 +    inst: Instance<'tcx>,
 +) {
 +    let pointer_type = module.target_config().pointer_type();
 +
 +    let name = tcx.symbol_name(inst).name;
++    let sig = crate::abi::get_function_sig(tcx, module.target_config().default_call_conv, inst);
 +    let func_id = module.declare_function(name, Linkage::Export, &sig).unwrap();
 +
 +    let instance_ptr = Box::into_raw(Box::new(inst));
 +
 +    let jit_fn = module
 +        .declare_function(
 +            "__clif_jit_fn",
 +            Linkage::Import,
 +            &Signature {
 +                call_conv: module.target_config().default_call_conv,
 +                params: vec![AbiParam::new(pointer_type), AbiParam::new(pointer_type)],
 +                returns: vec![AbiParam::new(pointer_type)],
 +            },
 +        )
 +        .unwrap();
 +
 +    let context = cached_context;
 +    context.clear();
 +    let trampoline = &mut context.func;
 +    trampoline.signature = sig.clone();
 +
 +    let mut builder_ctx = FunctionBuilderContext::new();
 +    let mut trampoline_builder = FunctionBuilder::new(trampoline, &mut builder_ctx);
 +
 +    let trampoline_fn = module.declare_func_in_func(func_id, trampoline_builder.func);
 +    let jit_fn = module.declare_func_in_func(jit_fn, trampoline_builder.func);
 +    let sig_ref = trampoline_builder.func.import_signature(sig);
 +
 +    let entry_block = trampoline_builder.create_block();
 +    trampoline_builder.append_block_params_for_function_params(entry_block);
 +    let fn_args = trampoline_builder.func.dfg.block_params(entry_block).to_vec();
 +
 +    trampoline_builder.switch_to_block(entry_block);
 +    let instance_ptr = trampoline_builder.ins().iconst(pointer_type, instance_ptr as u64 as i64);
 +    let trampoline_ptr = trampoline_builder.ins().func_addr(pointer_type, trampoline_fn);
 +    let jitted_fn = trampoline_builder.ins().call(jit_fn, &[instance_ptr, trampoline_ptr]);
 +    let jitted_fn = trampoline_builder.func.dfg.inst_results(jitted_fn)[0];
 +    let call_inst = trampoline_builder.ins().call_indirect(sig_ref, jitted_fn, &fn_args);
 +    let ret_vals = trampoline_builder.func.dfg.inst_results(call_inst).to_vec();
 +    trampoline_builder.ins().return_(&ret_vals);
 +
 +    module.define_function(func_id, context).unwrap();
 +    cx.unwind_context.add_function(func_id, context, module.isa());
 +}
index 8f5714ecb417704e446edda114f65b4966facb66,0000000000000000000000000000000000000000..6e925cea277078b68ac5d03de3cc18fae0e91e71
mode 100644,000000..100644
--- /dev/null
@@@ -1,53 -1,0 +1,54 @@@
-                     let sig = get_function_sig(tcx, module.isa().triple(), instance);
 +//! Drivers are responsible for calling [`codegen_fn`] or [`codegen_static`] for each mono item and
 +//! performing any further actions like JIT executing or writing object files.
 +//!
 +//! [`codegen_fn`]: crate::base::codegen_fn
 +//! [`codegen_static`]: crate::constant::codegen_static
 +
 +use rustc_middle::mir::mono::{Linkage as RLinkage, MonoItem, Visibility};
 +
 +use crate::prelude::*;
 +
 +pub(crate) mod aot;
 +#[cfg(feature = "jit")]
 +pub(crate) mod jit;
 +
 +fn predefine_mono_items<'tcx>(
 +    tcx: TyCtxt<'tcx>,
 +    module: &mut dyn Module,
 +    mono_items: &[(MonoItem<'tcx>, (RLinkage, Visibility))],
 +) {
 +    tcx.sess.time("predefine functions", || {
 +        let is_compiler_builtins = tcx.is_compiler_builtins(LOCAL_CRATE);
 +        for &(mono_item, (linkage, visibility)) in mono_items {
 +            match mono_item {
 +                MonoItem::Fn(instance) => {
 +                    let name = tcx.symbol_name(instance).name;
 +                    let _inst_guard = crate::PrintOnPanic(|| format!("{:?} {}", instance, name));
++                    let sig =
++                        get_function_sig(tcx, module.target_config().default_call_conv, instance);
 +                    let linkage = crate::linkage::get_clif_linkage(
 +                        mono_item,
 +                        linkage,
 +                        visibility,
 +                        is_compiler_builtins,
 +                    );
 +                    module.declare_function(name, linkage, &sig).unwrap();
 +                }
 +                MonoItem::Static(_) | MonoItem::GlobalAsm(_) => {}
 +            }
 +        }
 +    });
 +}
 +
 +fn time<R>(tcx: TyCtxt<'_>, display: bool, name: &'static str, f: impl FnOnce() -> R) -> R {
 +    if display {
 +        println!("[{:<30}: {}] start", tcx.crate_name(LOCAL_CRATE), name);
 +        let before = std::time::Instant::now();
 +        let res = tcx.sess.time(name, f);
 +        let after = std::time::Instant::now();
 +        println!("[{:<30}: {}] end time: {:?}", tcx.crate_name(LOCAL_CRATE), name, after - before);
 +        res
 +    } else {
 +        tcx.sess.time(name, f)
 +    }
 +}
index 783d426c30bcc0d14cf94bb658b854e1e4b337fb,0000000000000000000000000000000000000000..f722e52284fe8205f269103a620fec33015ef9a4
mode 100644,000000..100644
--- /dev/null
@@@ -1,196 -1,0 +1,54 @@@
-     _substs: SubstsRef<'tcx>,
 +//! Emulate LLVM intrinsics
 +
 +use crate::intrinsics::*;
 +use crate::prelude::*;
 +
 +use rustc_middle::ty::subst::SubstsRef;
 +
 +pub(crate) fn codegen_llvm_intrinsic_call<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    intrinsic: &str,
-     match intrinsic {
-         "llvm.x86.sse2.pause" | "llvm.aarch64.isb" => {
-             // Spin loop hint
-         }
++    substs: SubstsRef<'tcx>,
 +    args: &[mir::Operand<'tcx>],
 +    ret: CPlace<'tcx>,
 +    target: Option<BasicBlock>,
 +) {
-         // Used by `_mm_movemask_epi8` and `_mm256_movemask_epi8`
-         "llvm.x86.sse2.pmovmskb.128" | "llvm.x86.avx2.pmovmskb" | "llvm.x86.sse2.movmsk.pd" => {
++    if intrinsic.starts_with("llvm.aarch64") {
++        return llvm_aarch64::codegen_aarch64_llvm_intrinsic_call(
++            fx, intrinsic, substs, args, ret, target,
++        );
++    }
++    if intrinsic.starts_with("llvm.x86") {
++        return llvm_x86::codegen_x86_llvm_intrinsic_call(fx, intrinsic, substs, args, ret, target);
++    }
 +
-             let (lane_count, lane_ty) = a.layout().ty.simd_size_and_type(fx.tcx);
-             let lane_ty = fx.clif_type(lane_ty).unwrap();
-             assert!(lane_count <= 32);
-             let mut res = fx.bcx.ins().iconst(types::I32, 0);
-             for lane in (0..lane_count).rev() {
-                 let a_lane = a.value_lane(fx, lane).load_scalar(fx);
-                 // cast float to int
-                 let a_lane = match lane_ty {
-                     types::F32 => fx.bcx.ins().bitcast(types::I32, a_lane),
-                     types::F64 => fx.bcx.ins().bitcast(types::I64, a_lane),
-                     _ => a_lane,
-                 };
-                 // extract sign bit of an int
-                 let a_lane_sign = fx.bcx.ins().ushr_imm(a_lane, i64::from(lane_ty.bits() - 1));
-                 // shift sign bit into result
-                 let a_lane_sign = clif_intcast(fx, a_lane_sign, types::I32, false);
-                 res = fx.bcx.ins().ishl_imm(res, 1);
-                 res = fx.bcx.ins().bor(res, a_lane_sign);
-             }
-             let res = CValue::by_val(res, fx.layout_of(fx.tcx.types.i32));
-             ret.write_cvalue(fx, res);
-         }
-         "llvm.x86.sse2.cmp.ps" | "llvm.x86.sse2.cmp.pd" => {
-             let (x, y, kind) = match args {
-                 [x, y, kind] => (x, y, kind),
-                 _ => bug!("wrong number of args for intrinsic {intrinsic}"),
-             };
-             let x = codegen_operand(fx, x);
-             let y = codegen_operand(fx, y);
-             let kind = crate::constant::mir_operand_get_const_val(fx, kind)
-                 .expect("llvm.x86.sse2.cmp.* kind not const");
-             let flt_cc = match kind
-                 .try_to_bits(Size::from_bytes(1))
-                 .unwrap_or_else(|| panic!("kind not scalar: {:?}", kind))
-             {
-                 0 => FloatCC::Equal,
-                 1 => FloatCC::LessThan,
-                 2 => FloatCC::LessThanOrEqual,
-                 7 => FloatCC::Ordered,
-                 3 => FloatCC::Unordered,
-                 4 => FloatCC::NotEqual,
-                 5 => FloatCC::UnorderedOrGreaterThanOrEqual,
-                 6 => FloatCC::UnorderedOrGreaterThan,
-                 kind => unreachable!("kind {:?}", kind),
-             };
-             simd_pair_for_each_lane(fx, x, y, ret, &|fx, lane_ty, res_lane_ty, x_lane, y_lane| {
-                 let res_lane = match lane_ty.kind() {
-                     ty::Float(_) => fx.bcx.ins().fcmp(flt_cc, x_lane, y_lane),
-                     _ => unreachable!("{:?}", lane_ty),
-                 };
-                 bool_to_zero_or_max_uint(fx, res_lane_ty, res_lane)
++    match intrinsic {
++        _ if intrinsic.starts_with("llvm.ctlz.v") => {
 +            intrinsic_args!(fx, args => (a); intrinsic);
 +
-         "llvm.x86.sse2.psrli.d" => {
-             let (a, imm8) = match args {
-                 [a, imm8] => (a, imm8),
-                 _ => bug!("wrong number of args for intrinsic {intrinsic}"),
-             };
-             let a = codegen_operand(fx, a);
-             let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
-                 .expect("llvm.x86.sse2.psrli.d imm8 not const");
++            simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| {
++                fx.bcx.ins().clz(lane)
 +            });
 +        }
-             simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
-                 .try_to_bits(Size::from_bytes(4))
-                 .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
-             {
-                 imm8 if imm8 < 32 => fx.bcx.ins().ushr_imm(lane, i64::from(imm8 as u8)),
-                 _ => fx.bcx.ins().iconst(types::I32, 0),
-             });
-         }
-         "llvm.x86.sse2.pslli.d" => {
-             let (a, imm8) = match args {
-                 [a, imm8] => (a, imm8),
-                 _ => bug!("wrong number of args for intrinsic {intrinsic}"),
-             };
-             let a = codegen_operand(fx, a);
-             let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
-                 .expect("llvm.x86.sse2.psrli.d imm8 not const");
 +
-             simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
-                 .try_to_bits(Size::from_bytes(4))
-                 .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
-             {
-                 imm8 if imm8 < 32 => fx.bcx.ins().ishl_imm(lane, i64::from(imm8 as u8)),
-                 _ => fx.bcx.ins().iconst(types::I32, 0),
++        _ if intrinsic.starts_with("llvm.ctpop.v") => {
++            intrinsic_args!(fx, args => (a); intrinsic);
 +
-         "llvm.x86.sse2.storeu.dq" => {
-             intrinsic_args!(fx, args => (mem_addr, a); intrinsic);
-             let mem_addr = mem_addr.load_scalar(fx);
-             // FIXME correctly handle the unalignment
-             let dest = CPlace::for_ptr(Pointer::new(mem_addr), a.layout());
-             dest.write_cvalue(fx, a);
-         }
-         "llvm.x86.addcarry.64" => {
-             intrinsic_args!(fx, args => (c_in, a, b); intrinsic);
-             let c_in = c_in.load_scalar(fx);
-             llvm_add_sub(fx, BinOp::Add, ret, c_in, a, b);
-         }
-         "llvm.x86.subborrow.64" => {
-             intrinsic_args!(fx, args => (b_in, a, b); intrinsic);
-             let b_in = b_in.load_scalar(fx);
++            simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| {
++                fx.bcx.ins().popcnt(lane)
 +            });
 +        }
-             llvm_add_sub(fx, BinOp::Sub, ret, b_in, a, b);
-         }
 +
- // llvm.x86.avx2.vperm2i128
- // llvm.x86.ssse3.pshuf.b.128
- // llvm.x86.avx2.pshuf.b
- // llvm.x86.avx2.psrli.w
- // llvm.x86.sse2.psrli.w
- fn llvm_add_sub<'tcx>(
-     fx: &mut FunctionCx<'_, '_, 'tcx>,
-     bin_op: BinOp,
-     ret: CPlace<'tcx>,
-     cb_in: Value,
-     a: CValue<'tcx>,
-     b: CValue<'tcx>,
- ) {
-     assert_eq!(
-         a.layout().ty,
-         fx.tcx.types.u64,
-         "llvm.x86.addcarry.64/llvm.x86.subborrow.64 second operand must be u64"
-     );
-     assert_eq!(
-         b.layout().ty,
-         fx.tcx.types.u64,
-         "llvm.x86.addcarry.64/llvm.x86.subborrow.64 third operand must be u64"
-     );
-     // c + carry -> c + first intermediate carry or borrow respectively
-     let int0 = crate::num::codegen_checked_int_binop(fx, bin_op, a, b);
-     let c = int0.value_field(fx, mir::Field::new(0));
-     let cb0 = int0.value_field(fx, mir::Field::new(1)).load_scalar(fx);
-     // c + carry -> c + second intermediate carry or borrow respectively
-     let cb_in_as_u64 = fx.bcx.ins().uextend(types::I64, cb_in);
-     let cb_in_as_u64 = CValue::by_val(cb_in_as_u64, fx.layout_of(fx.tcx.types.u64));
-     let int1 = crate::num::codegen_checked_int_binop(fx, bin_op, c, cb_in_as_u64);
-     let (c, cb1) = int1.load_scalar_pair(fx);
-     // carry0 | carry1 -> carry or borrow respectively
-     let cb_out = fx.bcx.ins().bor(cb0, cb1);
-     let layout = fx.layout_of(fx.tcx.mk_tup([fx.tcx.types.u8, fx.tcx.types.u64].iter()));
-     let val = CValue::by_val_pair(cb_out, c, layout);
-     ret.write_cvalue(fx, val);
- }
 +        _ => {
 +            fx.tcx
 +                .sess
 +                .warn(&format!("unsupported llvm intrinsic {}; replacing with trap", intrinsic));
 +            crate::trap::trap_unimplemented(fx, intrinsic);
 +            return;
 +        }
 +    }
 +
 +    let dest = target.expect("all llvm intrinsics used by stdlib should return");
 +    let ret_block = fx.get_block(dest);
 +    fx.bcx.ins().jump(ret_block, &[]);
 +}
index 0000000000000000000000000000000000000000,0000000000000000000000000000000000000000..b431158d2690f02d5f3077820ec7c1f36070f7bf
new file mode 100644 (file)
--- /dev/null
--- /dev/null
@@@ -1,0 -1,0 +1,222 @@@
++//! Emulate AArch64 LLVM intrinsics
++
++use crate::intrinsics::*;
++use crate::prelude::*;
++
++use rustc_middle::ty::subst::SubstsRef;
++
++pub(crate) fn codegen_aarch64_llvm_intrinsic_call<'tcx>(
++    fx: &mut FunctionCx<'_, '_, 'tcx>,
++    intrinsic: &str,
++    _substs: SubstsRef<'tcx>,
++    args: &[mir::Operand<'tcx>],
++    ret: CPlace<'tcx>,
++    target: Option<BasicBlock>,
++) {
++    // llvm.aarch64.neon.sqshl.v*i*
++
++    match intrinsic {
++        "llvm.aarch64.isb" => {
++            fx.bcx.ins().fence();
++        }
++
++        _ if intrinsic.starts_with("llvm.aarch64.neon.abs.v") => {
++            intrinsic_args!(fx, args => (a); intrinsic);
++
++            simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| {
++                fx.bcx.ins().iabs(lane)
++            });
++        }
++
++        _ if intrinsic.starts_with("llvm.aarch64.neon.cls.v") => {
++            intrinsic_args!(fx, args => (a); intrinsic);
++
++            simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| {
++                fx.bcx.ins().cls(lane)
++            });
++        }
++
++        _ if intrinsic.starts_with("llvm.aarch64.neon.rbit.v") => {
++            intrinsic_args!(fx, args => (a); intrinsic);
++
++            simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| {
++                fx.bcx.ins().bitrev(lane)
++            });
++        }
++
++        _ if intrinsic.starts_with("llvm.aarch64.neon.sqadd.v") => {
++            intrinsic_args!(fx, args => (x, y); intrinsic);
++
++            simd_pair_for_each_lane_typed(fx, x, y, ret, &|fx, x_lane, y_lane| {
++                crate::num::codegen_saturating_int_binop(fx, BinOp::Add, x_lane, y_lane)
++            });
++        }
++
++        _ if intrinsic.starts_with("llvm.aarch64.neon.sqsub.v") => {
++            intrinsic_args!(fx, args => (x, y); intrinsic);
++
++            simd_pair_for_each_lane_typed(fx, x, y, ret, &|fx, x_lane, y_lane| {
++                crate::num::codegen_saturating_int_binop(fx, BinOp::Sub, x_lane, y_lane)
++            });
++        }
++
++        _ if intrinsic.starts_with("llvm.aarch64.neon.smax.v") => {
++            intrinsic_args!(fx, args => (x, y); intrinsic);
++
++            simd_pair_for_each_lane(
++                fx,
++                x,
++                y,
++                ret,
++                &|fx, _lane_ty, _res_lane_ty, x_lane, y_lane| {
++                    let gt = fx.bcx.ins().icmp(IntCC::SignedGreaterThan, x_lane, y_lane);
++                    fx.bcx.ins().select(gt, x_lane, y_lane)
++                },
++            );
++        }
++
++        _ if intrinsic.starts_with("llvm.aarch64.neon.umax.v") => {
++            intrinsic_args!(fx, args => (x, y); intrinsic);
++
++            simd_pair_for_each_lane(
++                fx,
++                x,
++                y,
++                ret,
++                &|fx, _lane_ty, _res_lane_ty, x_lane, y_lane| {
++                    let gt = fx.bcx.ins().icmp(IntCC::UnsignedGreaterThan, x_lane, y_lane);
++                    fx.bcx.ins().select(gt, x_lane, y_lane)
++                },
++            );
++        }
++
++        _ if intrinsic.starts_with("llvm.aarch64.neon.smaxv.i") => {
++            intrinsic_args!(fx, args => (v); intrinsic);
++
++            simd_reduce(fx, v, None, ret, &|fx, _ty, a, b| {
++                let gt = fx.bcx.ins().icmp(IntCC::SignedGreaterThan, a, b);
++                fx.bcx.ins().select(gt, a, b)
++            });
++        }
++
++        _ if intrinsic.starts_with("llvm.aarch64.neon.umaxv.i") => {
++            intrinsic_args!(fx, args => (v); intrinsic);
++
++            simd_reduce(fx, v, None, ret, &|fx, _ty, a, b| {
++                let gt = fx.bcx.ins().icmp(IntCC::UnsignedGreaterThan, a, b);
++                fx.bcx.ins().select(gt, a, b)
++            });
++        }
++
++        _ if intrinsic.starts_with("llvm.aarch64.neon.smin.v") => {
++            intrinsic_args!(fx, args => (x, y); intrinsic);
++
++            simd_pair_for_each_lane(
++                fx,
++                x,
++                y,
++                ret,
++                &|fx, _lane_ty, _res_lane_ty, x_lane, y_lane| {
++                    let gt = fx.bcx.ins().icmp(IntCC::SignedLessThan, x_lane, y_lane);
++                    fx.bcx.ins().select(gt, x_lane, y_lane)
++                },
++            );
++        }
++
++        _ if intrinsic.starts_with("llvm.aarch64.neon.umin.v") => {
++            intrinsic_args!(fx, args => (x, y); intrinsic);
++
++            simd_pair_for_each_lane(
++                fx,
++                x,
++                y,
++                ret,
++                &|fx, _lane_ty, _res_lane_ty, x_lane, y_lane| {
++                    let gt = fx.bcx.ins().icmp(IntCC::UnsignedLessThan, x_lane, y_lane);
++                    fx.bcx.ins().select(gt, x_lane, y_lane)
++                },
++            );
++        }
++
++        _ if intrinsic.starts_with("llvm.aarch64.neon.sminv.i") => {
++            intrinsic_args!(fx, args => (v); intrinsic);
++
++            simd_reduce(fx, v, None, ret, &|fx, _ty, a, b| {
++                let gt = fx.bcx.ins().icmp(IntCC::SignedLessThan, a, b);
++                fx.bcx.ins().select(gt, a, b)
++            });
++        }
++
++        _ if intrinsic.starts_with("llvm.aarch64.neon.uminv.i") => {
++            intrinsic_args!(fx, args => (v); intrinsic);
++
++            simd_reduce(fx, v, None, ret, &|fx, _ty, a, b| {
++                let gt = fx.bcx.ins().icmp(IntCC::UnsignedLessThan, a, b);
++                fx.bcx.ins().select(gt, a, b)
++            });
++        }
++
++        /*
++        _ if intrinsic.starts_with("llvm.aarch64.neon.sshl.v")
++            || intrinsic.starts_with("llvm.aarch64.neon.sqshl.v")
++            // FIXME split this one out once saturating is implemented
++            || intrinsic.starts_with("llvm.aarch64.neon.sqshlu.v") =>
++        {
++            intrinsic_args!(fx, args => (a, b); intrinsic);
++
++            simd_pair_for_each_lane(fx, a, b, ret, &|fx, _lane_ty, _res_lane_ty, a, b| {
++                // FIXME saturate?
++                fx.bcx.ins().ishl(a, b)
++            });
++        }
++
++        _ if intrinsic.starts_with("llvm.aarch64.neon.sqshrn.v") => {
++            let (a, imm32) = match args {
++                [a, imm32] => (a, imm32),
++                _ => bug!("wrong number of args for intrinsic {intrinsic}"),
++            };
++            let a = codegen_operand(fx, a);
++            let imm32 = crate::constant::mir_operand_get_const_val(fx, imm32)
++                .expect("llvm.aarch64.neon.sqshrn.v* imm32 not const");
++
++            simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm32
++                .try_to_bits(Size::from_bytes(4))
++                .unwrap_or_else(|| panic!("imm32 not scalar: {:?}", imm32))
++            {
++                imm32 if imm32 < 32 => fx.bcx.ins().sshr_imm(lane, i64::from(imm32 as u8)),
++                _ => fx.bcx.ins().iconst(types::I32, 0),
++            });
++        }
++
++        _ if intrinsic.starts_with("llvm.aarch64.neon.sqshrun.v") => {
++            let (a, imm32) = match args {
++                [a, imm32] => (a, imm32),
++                _ => bug!("wrong number of args for intrinsic {intrinsic}"),
++            };
++            let a = codegen_operand(fx, a);
++            let imm32 = crate::constant::mir_operand_get_const_val(fx, imm32)
++                .expect("llvm.aarch64.neon.sqshrn.v* imm32 not const");
++
++            simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm32
++                .try_to_bits(Size::from_bytes(4))
++                .unwrap_or_else(|| panic!("imm32 not scalar: {:?}", imm32))
++            {
++                imm32 if imm32 < 32 => fx.bcx.ins().ushr_imm(lane, i64::from(imm32 as u8)),
++                _ => fx.bcx.ins().iconst(types::I32, 0),
++            });
++        }
++        */
++        _ => {
++            fx.tcx.sess.warn(&format!(
++                "unsupported AArch64 llvm intrinsic {}; replacing with trap",
++                intrinsic
++            ));
++            crate::trap::trap_unimplemented(fx, intrinsic);
++            return;
++        }
++    }
++
++    let dest = target.expect("all llvm intrinsics used by stdlib should return");
++    let ret_block = fx.get_block(dest);
++    fx.bcx.ins().jump(ret_block, &[]);
++}
index 0000000000000000000000000000000000000000,0000000000000000000000000000000000000000..7bc161fbe55236a1b03ad08db5a80e274f94d7f8
new file mode 100644 (file)
--- /dev/null
--- /dev/null
@@@ -1,0 -1,0 +1,197 @@@
++//! Emulate x86 LLVM intrinsics
++
++use crate::intrinsics::*;
++use crate::prelude::*;
++
++use rustc_middle::ty::subst::SubstsRef;
++
++pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
++    fx: &mut FunctionCx<'_, '_, 'tcx>,
++    intrinsic: &str,
++    _substs: SubstsRef<'tcx>,
++    args: &[mir::Operand<'tcx>],
++    ret: CPlace<'tcx>,
++    target: Option<BasicBlock>,
++) {
++    match intrinsic {
++        "llvm.x86.sse2.pause" | "llvm.aarch64.isb" => {
++            // Spin loop hint
++        }
++
++        // Used by `_mm_movemask_epi8` and `_mm256_movemask_epi8`
++        "llvm.x86.sse2.pmovmskb.128" | "llvm.x86.avx2.pmovmskb" | "llvm.x86.sse2.movmsk.pd" => {
++            intrinsic_args!(fx, args => (a); intrinsic);
++
++            let (lane_count, lane_ty) = a.layout().ty.simd_size_and_type(fx.tcx);
++            let lane_ty = fx.clif_type(lane_ty).unwrap();
++            assert!(lane_count <= 32);
++
++            let mut res = fx.bcx.ins().iconst(types::I32, 0);
++
++            for lane in (0..lane_count).rev() {
++                let a_lane = a.value_lane(fx, lane).load_scalar(fx);
++
++                // cast float to int
++                let a_lane = match lane_ty {
++                    types::F32 => fx.bcx.ins().bitcast(types::I32, a_lane),
++                    types::F64 => fx.bcx.ins().bitcast(types::I64, a_lane),
++                    _ => a_lane,
++                };
++
++                // extract sign bit of an int
++                let a_lane_sign = fx.bcx.ins().ushr_imm(a_lane, i64::from(lane_ty.bits() - 1));
++
++                // shift sign bit into result
++                let a_lane_sign = clif_intcast(fx, a_lane_sign, types::I32, false);
++                res = fx.bcx.ins().ishl_imm(res, 1);
++                res = fx.bcx.ins().bor(res, a_lane_sign);
++            }
++
++            let res = CValue::by_val(res, fx.layout_of(fx.tcx.types.i32));
++            ret.write_cvalue(fx, res);
++        }
++        "llvm.x86.sse2.cmp.ps" | "llvm.x86.sse2.cmp.pd" => {
++            let (x, y, kind) = match args {
++                [x, y, kind] => (x, y, kind),
++                _ => bug!("wrong number of args for intrinsic {intrinsic}"),
++            };
++            let x = codegen_operand(fx, x);
++            let y = codegen_operand(fx, y);
++            let kind = crate::constant::mir_operand_get_const_val(fx, kind)
++                .expect("llvm.x86.sse2.cmp.* kind not const");
++
++            let flt_cc = match kind
++                .try_to_bits(Size::from_bytes(1))
++                .unwrap_or_else(|| panic!("kind not scalar: {:?}", kind))
++            {
++                0 => FloatCC::Equal,
++                1 => FloatCC::LessThan,
++                2 => FloatCC::LessThanOrEqual,
++                7 => FloatCC::Ordered,
++                3 => FloatCC::Unordered,
++                4 => FloatCC::NotEqual,
++                5 => FloatCC::UnorderedOrGreaterThanOrEqual,
++                6 => FloatCC::UnorderedOrGreaterThan,
++                kind => unreachable!("kind {:?}", kind),
++            };
++
++            simd_pair_for_each_lane(fx, x, y, ret, &|fx, lane_ty, res_lane_ty, x_lane, y_lane| {
++                let res_lane = match lane_ty.kind() {
++                    ty::Float(_) => fx.bcx.ins().fcmp(flt_cc, x_lane, y_lane),
++                    _ => unreachable!("{:?}", lane_ty),
++                };
++                bool_to_zero_or_max_uint(fx, res_lane_ty, res_lane)
++            });
++        }
++        "llvm.x86.sse2.psrli.d" => {
++            let (a, imm8) = match args {
++                [a, imm8] => (a, imm8),
++                _ => bug!("wrong number of args for intrinsic {intrinsic}"),
++            };
++            let a = codegen_operand(fx, a);
++            let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
++                .expect("llvm.x86.sse2.psrli.d imm8 not const");
++
++            simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
++                .try_to_bits(Size::from_bytes(4))
++                .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
++            {
++                imm8 if imm8 < 32 => fx.bcx.ins().ushr_imm(lane, i64::from(imm8 as u8)),
++                _ => fx.bcx.ins().iconst(types::I32, 0),
++            });
++        }
++        "llvm.x86.sse2.pslli.d" => {
++            let (a, imm8) = match args {
++                [a, imm8] => (a, imm8),
++                _ => bug!("wrong number of args for intrinsic {intrinsic}"),
++            };
++            let a = codegen_operand(fx, a);
++            let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
++                .expect("llvm.x86.sse2.psrli.d imm8 not const");
++
++            simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
++                .try_to_bits(Size::from_bytes(4))
++                .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
++            {
++                imm8 if imm8 < 32 => fx.bcx.ins().ishl_imm(lane, i64::from(imm8 as u8)),
++                _ => fx.bcx.ins().iconst(types::I32, 0),
++            });
++        }
++        "llvm.x86.sse2.storeu.dq" => {
++            intrinsic_args!(fx, args => (mem_addr, a); intrinsic);
++            let mem_addr = mem_addr.load_scalar(fx);
++
++            // FIXME correctly handle the unalignment
++            let dest = CPlace::for_ptr(Pointer::new(mem_addr), a.layout());
++            dest.write_cvalue(fx, a);
++        }
++        "llvm.x86.addcarry.64" => {
++            intrinsic_args!(fx, args => (c_in, a, b); intrinsic);
++            let c_in = c_in.load_scalar(fx);
++
++            llvm_add_sub(fx, BinOp::Add, ret, c_in, a, b);
++        }
++        "llvm.x86.subborrow.64" => {
++            intrinsic_args!(fx, args => (b_in, a, b); intrinsic);
++            let b_in = b_in.load_scalar(fx);
++
++            llvm_add_sub(fx, BinOp::Sub, ret, b_in, a, b);
++        }
++        _ => {
++            fx.tcx.sess.warn(&format!(
++                "unsupported x86 llvm intrinsic {}; replacing with trap",
++                intrinsic
++            ));
++            crate::trap::trap_unimplemented(fx, intrinsic);
++            return;
++        }
++    }
++
++    let dest = target.expect("all llvm intrinsics used by stdlib should return");
++    let ret_block = fx.get_block(dest);
++    fx.bcx.ins().jump(ret_block, &[]);
++}
++
++// llvm.x86.avx2.vperm2i128
++// llvm.x86.ssse3.pshuf.b.128
++// llvm.x86.avx2.pshuf.b
++// llvm.x86.avx2.psrli.w
++// llvm.x86.sse2.psrli.w
++
++fn llvm_add_sub<'tcx>(
++    fx: &mut FunctionCx<'_, '_, 'tcx>,
++    bin_op: BinOp,
++    ret: CPlace<'tcx>,
++    cb_in: Value,
++    a: CValue<'tcx>,
++    b: CValue<'tcx>,
++) {
++    assert_eq!(
++        a.layout().ty,
++        fx.tcx.types.u64,
++        "llvm.x86.addcarry.64/llvm.x86.subborrow.64 second operand must be u64"
++    );
++    assert_eq!(
++        b.layout().ty,
++        fx.tcx.types.u64,
++        "llvm.x86.addcarry.64/llvm.x86.subborrow.64 third operand must be u64"
++    );
++
++    // c + carry -> c + first intermediate carry or borrow respectively
++    let int0 = crate::num::codegen_checked_int_binop(fx, bin_op, a, b);
++    let c = int0.value_field(fx, mir::Field::new(0));
++    let cb0 = int0.value_field(fx, mir::Field::new(1)).load_scalar(fx);
++
++    // c + carry -> c + second intermediate carry or borrow respectively
++    let cb_in_as_u64 = fx.bcx.ins().uextend(types::I64, cb_in);
++    let cb_in_as_u64 = CValue::by_val(cb_in_as_u64, fx.layout_of(fx.tcx.types.u64));
++    let int1 = crate::num::codegen_checked_int_binop(fx, bin_op, c, cb_in_as_u64);
++    let (c, cb1) = int1.load_scalar_pair(fx);
++
++    // carry0 | carry1 -> carry or borrow respectively
++    let cb_out = fx.bcx.ins().bor(cb0, cb1);
++
++    let layout = fx.layout_of(fx.tcx.mk_tup([fx.tcx.types.u8, fx.tcx.types.u64].iter()));
++    let val = CValue::by_val_pair(cb_out, c, layout);
++    ret.write_cvalue(fx, val);
++}
index 0302b843aa226345328a278674026baa33fda632,0000000000000000000000000000000000000000..7a380acf798572a7606130959a7517cfecb2462b
mode 100644,000000..100644
--- /dev/null
@@@ -1,1316 -1,0 +1,1244 @@@
-     let val = fx.bcx.ins().bint(int_ty, val);
-     let mut res = fx.bcx.ins().ineg(val);
 +//! Codegen of intrinsics. This includes `extern "rust-intrinsic"`, `extern "platform-intrinsic"`
 +//! and LLVM intrinsics that have symbol names starting with `llvm.`.
 +
 +macro_rules! intrinsic_args {
 +    ($fx:expr, $args:expr => ($($arg:tt),*); $intrinsic:expr) => {
 +        #[allow(unused_parens)]
 +        let ($($arg),*) = if let [$($arg),*] = $args {
 +            ($(codegen_operand($fx, $arg)),*)
 +        } else {
 +            $crate::intrinsics::bug_on_incorrect_arg_count($intrinsic);
 +        };
 +    }
 +}
 +
 +mod cpuid;
 +mod llvm;
++mod llvm_aarch64;
++mod llvm_x86;
 +mod simd;
 +
 +pub(crate) use cpuid::codegen_cpuid_call;
 +pub(crate) use llvm::codegen_llvm_intrinsic_call;
 +
 +use rustc_middle::ty::print::with_no_trimmed_paths;
 +use rustc_middle::ty::subst::SubstsRef;
 +use rustc_span::symbol::{kw, sym, Symbol};
 +
 +use crate::prelude::*;
 +use cranelift_codegen::ir::AtomicRmwOp;
 +
 +fn bug_on_incorrect_arg_count(intrinsic: impl std::fmt::Display) -> ! {
 +    bug!("wrong number of args for intrinsic {}", intrinsic);
 +}
 +
 +fn report_atomic_type_validation_error<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    intrinsic: Symbol,
 +    span: Span,
 +    ty: Ty<'tcx>,
 +) {
 +    fx.tcx.sess.span_err(
 +        span,
 +        &format!(
 +            "`{}` intrinsic: expected basic integer or raw pointer type, found `{:?}`",
 +            intrinsic, ty
 +        ),
 +    );
 +    // Prevent verifier error
 +    fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
 +}
 +
 +pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Option<Type> {
 +    let (element, count) = match layout.abi {
 +        Abi::Vector { element, count } => (element, count),
 +        _ => unreachable!(),
 +    };
 +
 +    match scalar_to_clif_type(tcx, element).by(u32::try_from(count).unwrap()) {
 +        // Cranelift currently only implements icmp for 128bit vectors.
 +        Some(vector_ty) if vector_ty.bits() == 128 => Some(vector_ty),
 +        _ => None,
 +    }
 +}
 +
 +fn simd_for_each_lane<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    val: CValue<'tcx>,
 +    ret: CPlace<'tcx>,
 +    f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Ty<'tcx>, Value) -> Value,
 +) {
 +    let layout = val.layout();
 +
 +    let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
 +    let lane_layout = fx.layout_of(lane_ty);
 +    let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
 +    let ret_lane_layout = fx.layout_of(ret_lane_ty);
 +    assert_eq!(lane_count, ret_lane_count);
 +
 +    for lane_idx in 0..lane_count {
 +        let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
 +
 +        let res_lane = f(fx, lane_layout.ty, ret_lane_layout.ty, lane);
 +        let res_lane = CValue::by_val(res_lane, ret_lane_layout);
 +
 +        ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
 +    }
 +}
 +
 +fn simd_pair_for_each_lane_typed<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    x: CValue<'tcx>,
 +    y: CValue<'tcx>,
 +    ret: CPlace<'tcx>,
 +    f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, CValue<'tcx>, CValue<'tcx>) -> CValue<'tcx>,
 +) {
 +    assert_eq!(x.layout(), y.layout());
 +    let layout = x.layout();
 +
 +    let (lane_count, _lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
 +    let (ret_lane_count, _ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
 +    assert_eq!(lane_count, ret_lane_count);
 +
 +    for lane_idx in 0..lane_count {
 +        let x_lane = x.value_lane(fx, lane_idx);
 +        let y_lane = y.value_lane(fx, lane_idx);
 +
 +        let res_lane = f(fx, x_lane, y_lane);
 +
 +        ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
 +    }
 +}
 +
 +fn simd_pair_for_each_lane<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    x: CValue<'tcx>,
 +    y: CValue<'tcx>,
 +    ret: CPlace<'tcx>,
 +    f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Ty<'tcx>, Value, Value) -> Value,
 +) {
 +    assert_eq!(x.layout(), y.layout());
 +    let layout = x.layout();
 +
 +    let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
 +    let lane_layout = fx.layout_of(lane_ty);
 +    let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
 +    let ret_lane_layout = fx.layout_of(ret_lane_ty);
 +    assert_eq!(lane_count, ret_lane_count);
 +
 +    for lane_idx in 0..lane_count {
 +        let x_lane = x.value_lane(fx, lane_idx).load_scalar(fx);
 +        let y_lane = y.value_lane(fx, lane_idx).load_scalar(fx);
 +
 +        let res_lane = f(fx, lane_layout.ty, ret_lane_layout.ty, x_lane, y_lane);
 +        let res_lane = CValue::by_val(res_lane, ret_lane_layout);
 +
 +        ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
 +    }
 +}
 +
 +fn simd_reduce<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    val: CValue<'tcx>,
 +    acc: Option<Value>,
 +    ret: CPlace<'tcx>,
 +    f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Value, Value) -> Value,
 +) {
 +    let (lane_count, lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
 +    let lane_layout = fx.layout_of(lane_ty);
 +    assert_eq!(lane_layout, ret.layout());
 +
 +    let (mut res_val, start_lane) =
 +        if let Some(acc) = acc { (acc, 0) } else { (val.value_lane(fx, 0).load_scalar(fx), 1) };
 +    for lane_idx in start_lane..lane_count {
 +        let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
 +        res_val = f(fx, lane_layout.ty, res_val, lane);
 +    }
 +    let res = CValue::by_val(res_val, lane_layout);
 +    ret.write_cvalue(fx, res);
 +}
 +
 +// FIXME move all uses to `simd_reduce`
 +fn simd_reduce_bool<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    val: CValue<'tcx>,
 +    ret: CPlace<'tcx>,
 +    f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Value, Value) -> Value,
 +) {
 +    let (lane_count, _lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
 +    assert!(ret.layout().ty.is_bool());
 +
 +    let res_val = val.value_lane(fx, 0).load_scalar(fx);
 +    let mut res_val = fx.bcx.ins().band_imm(res_val, 1); // mask to boolean
 +    for lane_idx in 1..lane_count {
 +        let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
 +        let lane = fx.bcx.ins().band_imm(lane, 1); // mask to boolean
 +        res_val = f(fx, res_val, lane);
 +    }
 +    let res_val = if fx.bcx.func.dfg.value_type(res_val) != types::I8 {
 +        fx.bcx.ins().ireduce(types::I8, res_val)
 +    } else {
 +        res_val
 +    };
 +    let res = CValue::by_val(res_val, ret.layout());
 +    ret.write_cvalue(fx, res);
 +}
 +
 +fn bool_to_zero_or_max_uint<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    ty: Ty<'tcx>,
 +    val: Value,
 +) -> Value {
 +    let ty = fx.clif_type(ty).unwrap();
 +
 +    let int_ty = match ty {
 +        types::F32 => types::I32,
 +        types::F64 => types::I64,
 +        ty => ty,
 +    };
 +
-             // FIXME(CraneStation/cranelift#794) add bswap instruction to cranelift
-             fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value {
-                 match bcx.func.dfg.value_type(v) {
-                     types::I8 => v,
-                     // https://code.woboq.org/gcc/include/bits/byteswap.h.html
-                     types::I16 => {
-                         let tmp1 = bcx.ins().ishl_imm(v, 8);
-                         let n1 = bcx.ins().band_imm(tmp1, 0xFF00);
-                         let tmp2 = bcx.ins().ushr_imm(v, 8);
-                         let n2 = bcx.ins().band_imm(tmp2, 0x00FF);
-                         bcx.ins().bor(n1, n2)
-                     }
-                     types::I32 => {
-                         let tmp1 = bcx.ins().ishl_imm(v, 24);
-                         let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000);
-                         let tmp2 = bcx.ins().ishl_imm(v, 8);
-                         let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000);
-                         let tmp3 = bcx.ins().ushr_imm(v, 8);
-                         let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00);
-                         let tmp4 = bcx.ins().ushr_imm(v, 24);
-                         let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF);
-                         let or_tmp1 = bcx.ins().bor(n1, n2);
-                         let or_tmp2 = bcx.ins().bor(n3, n4);
-                         bcx.ins().bor(or_tmp1, or_tmp2)
-                     }
-                     types::I64 => {
-                         let tmp1 = bcx.ins().ishl_imm(v, 56);
-                         let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000_0000_0000u64 as i64);
-                         let tmp2 = bcx.ins().ishl_imm(v, 40);
-                         let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000_0000_0000u64 as i64);
-                         let tmp3 = bcx.ins().ishl_imm(v, 24);
-                         let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00_0000_0000u64 as i64);
-                         let tmp4 = bcx.ins().ishl_imm(v, 8);
-                         let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF_0000_0000u64 as i64);
-                         let tmp5 = bcx.ins().ushr_imm(v, 8);
-                         let n5 = bcx.ins().band_imm(tmp5, 0x0000_0000_FF00_0000u64 as i64);
-                         let tmp6 = bcx.ins().ushr_imm(v, 24);
-                         let n6 = bcx.ins().band_imm(tmp6, 0x0000_0000_00FF_0000u64 as i64);
-                         let tmp7 = bcx.ins().ushr_imm(v, 40);
-                         let n7 = bcx.ins().band_imm(tmp7, 0x0000_0000_0000_FF00u64 as i64);
-                         let tmp8 = bcx.ins().ushr_imm(v, 56);
-                         let n8 = bcx.ins().band_imm(tmp8, 0x0000_0000_0000_00FFu64 as i64);
-                         let or_tmp1 = bcx.ins().bor(n1, n2);
-                         let or_tmp2 = bcx.ins().bor(n3, n4);
-                         let or_tmp3 = bcx.ins().bor(n5, n6);
-                         let or_tmp4 = bcx.ins().bor(n7, n8);
-                         let or_tmp5 = bcx.ins().bor(or_tmp1, or_tmp2);
-                         let or_tmp6 = bcx.ins().bor(or_tmp3, or_tmp4);
-                         bcx.ins().bor(or_tmp5, or_tmp6)
-                     }
-                     types::I128 => {
-                         let (lo, hi) = bcx.ins().isplit(v);
-                         let lo = swap(bcx, lo);
-                         let hi = swap(bcx, hi);
-                         bcx.ins().iconcat(hi, lo)
-                     }
-                     ty => unreachable!("bswap {}", ty),
-                 }
-             }
++    let mut res = fx.bcx.ins().bmask(int_ty, val);
 +
 +    if ty.is_float() {
 +        res = fx.bcx.ins().bitcast(ty, res);
 +    }
 +
 +    res
 +}
 +
 +pub(crate) fn codegen_intrinsic_call<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    instance: Instance<'tcx>,
 +    args: &[mir::Operand<'tcx>],
 +    destination: CPlace<'tcx>,
 +    target: Option<BasicBlock>,
 +    source_info: mir::SourceInfo,
 +) {
 +    let intrinsic = fx.tcx.item_name(instance.def_id());
 +    let substs = instance.substs;
 +
 +    let target = if let Some(target) = target {
 +        target
 +    } else {
 +        // Insert non returning intrinsics here
 +        match intrinsic {
 +            sym::abort => {
 +                fx.bcx.ins().trap(TrapCode::User(0));
 +            }
 +            sym::transmute => {
 +                crate::base::codegen_panic(fx, "Transmuting to uninhabited type.", source_info);
 +            }
 +            _ => unimplemented!("unsupported intrinsic {}", intrinsic),
 +        }
 +        return;
 +    };
 +
 +    if intrinsic.as_str().starts_with("simd_") {
 +        self::simd::codegen_simd_intrinsic_call(
 +            fx,
 +            intrinsic,
 +            substs,
 +            args,
 +            destination,
 +            source_info.span,
 +        );
 +        let ret_block = fx.get_block(target);
 +        fx.bcx.ins().jump(ret_block, &[]);
 +    } else if codegen_float_intrinsic_call(fx, intrinsic, args, destination) {
 +        let ret_block = fx.get_block(target);
 +        fx.bcx.ins().jump(ret_block, &[]);
 +    } else {
 +        codegen_regular_intrinsic_call(
 +            fx,
 +            instance,
 +            intrinsic,
 +            substs,
 +            args,
 +            destination,
 +            Some(target),
 +            source_info,
 +        );
 +    }
 +}
 +
 +fn codegen_float_intrinsic_call<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    intrinsic: Symbol,
 +    args: &[mir::Operand<'tcx>],
 +    ret: CPlace<'tcx>,
 +) -> bool {
 +    let (name, arg_count, ty) = match intrinsic {
 +        sym::expf32 => ("expf", 1, fx.tcx.types.f32),
 +        sym::expf64 => ("exp", 1, fx.tcx.types.f64),
 +        sym::exp2f32 => ("exp2f", 1, fx.tcx.types.f32),
 +        sym::exp2f64 => ("exp2", 1, fx.tcx.types.f64),
 +        sym::sqrtf32 => ("sqrtf", 1, fx.tcx.types.f32),
 +        sym::sqrtf64 => ("sqrt", 1, fx.tcx.types.f64),
 +        sym::powif32 => ("__powisf2", 2, fx.tcx.types.f32), // compiler-builtins
 +        sym::powif64 => ("__powidf2", 2, fx.tcx.types.f64), // compiler-builtins
 +        sym::powf32 => ("powf", 2, fx.tcx.types.f32),
 +        sym::powf64 => ("pow", 2, fx.tcx.types.f64),
 +        sym::logf32 => ("logf", 1, fx.tcx.types.f32),
 +        sym::logf64 => ("log", 1, fx.tcx.types.f64),
 +        sym::log2f32 => ("log2f", 1, fx.tcx.types.f32),
 +        sym::log2f64 => ("log2", 1, fx.tcx.types.f64),
 +        sym::log10f32 => ("log10f", 1, fx.tcx.types.f32),
 +        sym::log10f64 => ("log10", 1, fx.tcx.types.f64),
 +        sym::fabsf32 => ("fabsf", 1, fx.tcx.types.f32),
 +        sym::fabsf64 => ("fabs", 1, fx.tcx.types.f64),
 +        sym::fmaf32 => ("fmaf", 3, fx.tcx.types.f32),
 +        sym::fmaf64 => ("fma", 3, fx.tcx.types.f64),
 +        sym::copysignf32 => ("copysignf", 2, fx.tcx.types.f32),
 +        sym::copysignf64 => ("copysign", 2, fx.tcx.types.f64),
 +        sym::floorf32 => ("floorf", 1, fx.tcx.types.f32),
 +        sym::floorf64 => ("floor", 1, fx.tcx.types.f64),
 +        sym::ceilf32 => ("ceilf", 1, fx.tcx.types.f32),
 +        sym::ceilf64 => ("ceil", 1, fx.tcx.types.f64),
 +        sym::truncf32 => ("truncf", 1, fx.tcx.types.f32),
 +        sym::truncf64 => ("trunc", 1, fx.tcx.types.f64),
 +        sym::roundf32 => ("roundf", 1, fx.tcx.types.f32),
 +        sym::roundf64 => ("round", 1, fx.tcx.types.f64),
 +        sym::sinf32 => ("sinf", 1, fx.tcx.types.f32),
 +        sym::sinf64 => ("sin", 1, fx.tcx.types.f64),
 +        sym::cosf32 => ("cosf", 1, fx.tcx.types.f32),
 +        sym::cosf64 => ("cos", 1, fx.tcx.types.f64),
 +        _ => return false,
 +    };
 +
 +    if args.len() != arg_count {
 +        bug!("wrong number of args for intrinsic {:?}", intrinsic);
 +    }
 +
 +    let (a, b, c);
 +    let args = match args {
 +        [x] => {
 +            a = [codegen_operand(fx, x)];
 +            &a as &[_]
 +        }
 +        [x, y] => {
 +            b = [codegen_operand(fx, x), codegen_operand(fx, y)];
 +            &b
 +        }
 +        [x, y, z] => {
 +            c = [codegen_operand(fx, x), codegen_operand(fx, y), codegen_operand(fx, z)];
 +            &c
 +        }
 +        _ => unreachable!(),
 +    };
 +
 +    let layout = fx.layout_of(ty);
 +    let res = match intrinsic {
 +        sym::fmaf32 | sym::fmaf64 => {
 +            let a = args[0].load_scalar(fx);
 +            let b = args[1].load_scalar(fx);
 +            let c = args[2].load_scalar(fx);
 +            CValue::by_val(fx.bcx.ins().fma(a, b, c), layout)
 +        }
 +        sym::copysignf32 | sym::copysignf64 => {
 +            let a = args[0].load_scalar(fx);
 +            let b = args[1].load_scalar(fx);
 +            CValue::by_val(fx.bcx.ins().fcopysign(a, b), layout)
 +        }
 +        sym::fabsf32
 +        | sym::fabsf64
 +        | sym::floorf32
 +        | sym::floorf64
 +        | sym::ceilf32
 +        | sym::ceilf64
 +        | sym::truncf32
 +        | sym::truncf64 => {
 +            let a = args[0].load_scalar(fx);
 +
 +            let val = match intrinsic {
 +                sym::fabsf32 | sym::fabsf64 => fx.bcx.ins().fabs(a),
 +                sym::floorf32 | sym::floorf64 => fx.bcx.ins().floor(a),
 +                sym::ceilf32 | sym::ceilf64 => fx.bcx.ins().ceil(a),
 +                sym::truncf32 | sym::truncf64 => fx.bcx.ins().trunc(a),
 +                _ => unreachable!(),
 +            };
 +
 +            CValue::by_val(val, layout)
 +        }
 +        // These intrinsics aren't supported natively by Cranelift.
 +        // Lower them to a libcall.
 +        _ => fx.easy_call(name, &args, ty),
 +    };
 +
 +    ret.write_cvalue(fx, res);
 +
 +    true
 +}
 +
 +fn codegen_regular_intrinsic_call<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    instance: Instance<'tcx>,
 +    intrinsic: Symbol,
 +    substs: SubstsRef<'tcx>,
 +    args: &[mir::Operand<'tcx>],
 +    ret: CPlace<'tcx>,
 +    destination: Option<BasicBlock>,
 +    source_info: mir::SourceInfo,
 +) {
 +    let usize_layout = fx.layout_of(fx.tcx.types.usize);
 +
 +    match intrinsic {
 +        sym::likely | sym::unlikely => {
 +            intrinsic_args!(fx, args => (a); intrinsic);
 +
 +            ret.write_cvalue(fx, a);
 +        }
 +        sym::breakpoint => {
 +            intrinsic_args!(fx, args => (); intrinsic);
 +
 +            fx.bcx.ins().debugtrap();
 +        }
 +        sym::copy | sym::copy_nonoverlapping => {
 +            intrinsic_args!(fx, args => (src, dst, count); intrinsic);
 +            let src = src.load_scalar(fx);
 +            let dst = dst.load_scalar(fx);
 +            let count = count.load_scalar(fx);
 +
 +            let elem_ty = substs.type_at(0);
 +            let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
 +            assert_eq!(args.len(), 3);
 +            let byte_amount =
 +                if elem_size != 1 { fx.bcx.ins().imul_imm(count, elem_size as i64) } else { count };
 +
 +            if intrinsic == sym::copy_nonoverlapping {
 +                // FIXME emit_small_memcpy
 +                fx.bcx.call_memcpy(fx.target_config, dst, src, byte_amount);
 +            } else {
 +                // FIXME emit_small_memmove
 +                fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount);
 +            }
 +        }
 +        sym::volatile_copy_memory | sym::volatile_copy_nonoverlapping_memory => {
 +            // NOTE: the volatile variants have src and dst swapped
 +            intrinsic_args!(fx, args => (dst, src, count); intrinsic);
 +            let dst = dst.load_scalar(fx);
 +            let src = src.load_scalar(fx);
 +            let count = count.load_scalar(fx);
 +
 +            let elem_ty = substs.type_at(0);
 +            let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
 +            assert_eq!(args.len(), 3);
 +            let byte_amount =
 +                if elem_size != 1 { fx.bcx.ins().imul_imm(count, elem_size as i64) } else { count };
 +
 +            // FIXME make the copy actually volatile when using emit_small_mem{cpy,move}
 +            if intrinsic == sym::volatile_copy_nonoverlapping_memory {
 +                // FIXME emit_small_memcpy
 +                fx.bcx.call_memcpy(fx.target_config, dst, src, byte_amount);
 +            } else {
 +                // FIXME emit_small_memmove
 +                fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount);
 +            }
 +        }
 +        sym::size_of_val => {
 +            intrinsic_args!(fx, args => (ptr); intrinsic);
 +
 +            let layout = fx.layout_of(substs.type_at(0));
 +            // Note: Can't use is_unsized here as truly unsized types need to take the fixed size
 +            // branch
 +            let size = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
 +                let (_ptr, info) = ptr.load_scalar_pair(fx);
 +                let (size, _align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
 +                size
 +            } else {
 +                fx.bcx.ins().iconst(fx.pointer_type, layout.size.bytes() as i64)
 +            };
 +            ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
 +        }
 +        sym::min_align_of_val => {
 +            intrinsic_args!(fx, args => (ptr); intrinsic);
 +
 +            let layout = fx.layout_of(substs.type_at(0));
 +            // Note: Can't use is_unsized here as truly unsized types need to take the fixed size
 +            // branch
 +            let align = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
 +                let (_ptr, info) = ptr.load_scalar_pair(fx);
 +                let (_size, align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
 +                align
 +            } else {
 +                fx.bcx.ins().iconst(fx.pointer_type, layout.align.abi.bytes() as i64)
 +            };
 +            ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
 +        }
 +
 +        sym::vtable_size => {
 +            intrinsic_args!(fx, args => (vtable); intrinsic);
 +            let vtable = vtable.load_scalar(fx);
 +
 +            let size = crate::vtable::size_of_obj(fx, vtable);
 +            ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
 +        }
 +
 +        sym::vtable_align => {
 +            intrinsic_args!(fx, args => (vtable); intrinsic);
 +            let vtable = vtable.load_scalar(fx);
 +
 +            let align = crate::vtable::min_align_of_obj(fx, vtable);
 +            ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
 +        }
 +
 +        sym::unchecked_add
 +        | sym::unchecked_sub
 +        | sym::unchecked_mul
 +        | sym::unchecked_div
 +        | sym::exact_div
 +        | sym::unchecked_rem
 +        | sym::unchecked_shl
 +        | sym::unchecked_shr => {
 +            intrinsic_args!(fx, args => (x, y); intrinsic);
 +
 +            // FIXME trap on overflow
 +            let bin_op = match intrinsic {
 +                sym::unchecked_add => BinOp::Add,
 +                sym::unchecked_sub => BinOp::Sub,
 +                sym::unchecked_mul => BinOp::Mul,
 +                sym::unchecked_div | sym::exact_div => BinOp::Div,
 +                sym::unchecked_rem => BinOp::Rem,
 +                sym::unchecked_shl => BinOp::Shl,
 +                sym::unchecked_shr => BinOp::Shr,
 +                _ => unreachable!(),
 +            };
 +            let res = crate::num::codegen_int_binop(fx, bin_op, x, y);
 +            ret.write_cvalue(fx, res);
 +        }
 +        sym::add_with_overflow | sym::sub_with_overflow | sym::mul_with_overflow => {
 +            intrinsic_args!(fx, args => (x, y); intrinsic);
 +
 +            assert_eq!(x.layout().ty, y.layout().ty);
 +            let bin_op = match intrinsic {
 +                sym::add_with_overflow => BinOp::Add,
 +                sym::sub_with_overflow => BinOp::Sub,
 +                sym::mul_with_overflow => BinOp::Mul,
 +                _ => unreachable!(),
 +            };
 +
 +            let res = crate::num::codegen_checked_int_binop(fx, bin_op, x, y);
 +            ret.write_cvalue(fx, res);
 +        }
 +        sym::saturating_add | sym::saturating_sub => {
 +            intrinsic_args!(fx, args => (lhs, rhs); intrinsic);
 +
 +            assert_eq!(lhs.layout().ty, rhs.layout().ty);
 +            let bin_op = match intrinsic {
 +                sym::saturating_add => BinOp::Add,
 +                sym::saturating_sub => BinOp::Sub,
 +                _ => unreachable!(),
 +            };
 +
 +            let res = crate::num::codegen_saturating_int_binop(fx, bin_op, lhs, rhs);
 +            ret.write_cvalue(fx, res);
 +        }
 +        sym::rotate_left => {
 +            intrinsic_args!(fx, args => (x, y); intrinsic);
 +            let y = y.load_scalar(fx);
 +
 +            let layout = x.layout();
 +            let x = x.load_scalar(fx);
 +            let res = fx.bcx.ins().rotl(x, y);
 +            ret.write_cvalue(fx, CValue::by_val(res, layout));
 +        }
 +        sym::rotate_right => {
 +            intrinsic_args!(fx, args => (x, y); intrinsic);
 +            let y = y.load_scalar(fx);
 +
 +            let layout = x.layout();
 +            let x = x.load_scalar(fx);
 +            let res = fx.bcx.ins().rotr(x, y);
 +            ret.write_cvalue(fx, CValue::by_val(res, layout));
 +        }
 +
 +        // The only difference between offset and arith_offset is regarding UB. Because Cranelift
 +        // doesn't have UB both are codegen'ed the same way
 +        sym::offset | sym::arith_offset => {
 +            intrinsic_args!(fx, args => (base, offset); intrinsic);
 +            let offset = offset.load_scalar(fx);
 +
 +            let pointee_ty = base.layout().ty.builtin_deref(true).unwrap().ty;
 +            let pointee_size = fx.layout_of(pointee_ty).size.bytes();
 +            let ptr_diff = if pointee_size != 1 {
 +                fx.bcx.ins().imul_imm(offset, pointee_size as i64)
 +            } else {
 +                offset
 +            };
 +            let base_val = base.load_scalar(fx);
 +            let res = fx.bcx.ins().iadd(base_val, ptr_diff);
 +            ret.write_cvalue(fx, CValue::by_val(res, base.layout()));
 +        }
 +
 +        sym::ptr_mask => {
 +            intrinsic_args!(fx, args => (ptr, mask); intrinsic);
 +            let ptr = ptr.load_scalar(fx);
 +            let mask = mask.load_scalar(fx);
 +            fx.bcx.ins().band(ptr, mask);
 +        }
 +
 +        sym::transmute => {
 +            intrinsic_args!(fx, args => (from); intrinsic);
 +
 +            ret.write_cvalue_transmute(fx, from);
 +        }
 +        sym::write_bytes | sym::volatile_set_memory => {
 +            intrinsic_args!(fx, args => (dst, val, count); intrinsic);
 +            let val = val.load_scalar(fx);
 +            let count = count.load_scalar(fx);
 +
 +            let pointee_ty = dst.layout().ty.builtin_deref(true).unwrap().ty;
 +            let pointee_size = fx.layout_of(pointee_ty).size.bytes();
 +            let count = if pointee_size != 1 {
 +                fx.bcx.ins().imul_imm(count, pointee_size as i64)
 +            } else {
 +                count
 +            };
 +            let dst_ptr = dst.load_scalar(fx);
 +            // FIXME make the memset actually volatile when switching to emit_small_memset
 +            // FIXME use emit_small_memset
 +            fx.bcx.call_memset(fx.target_config, dst_ptr, val, count);
 +        }
 +        sym::ctlz | sym::ctlz_nonzero => {
 +            intrinsic_args!(fx, args => (arg); intrinsic);
 +            let val = arg.load_scalar(fx);
 +
 +            // FIXME trap on `ctlz_nonzero` with zero arg.
 +            let res = fx.bcx.ins().clz(val);
 +            let res = CValue::by_val(res, arg.layout());
 +            ret.write_cvalue(fx, res);
 +        }
 +        sym::cttz | sym::cttz_nonzero => {
 +            intrinsic_args!(fx, args => (arg); intrinsic);
 +            let val = arg.load_scalar(fx);
 +
 +            // FIXME trap on `cttz_nonzero` with zero arg.
 +            let res = fx.bcx.ins().ctz(val);
 +            let res = CValue::by_val(res, arg.layout());
 +            ret.write_cvalue(fx, res);
 +        }
 +        sym::ctpop => {
 +            intrinsic_args!(fx, args => (arg); intrinsic);
 +            let val = arg.load_scalar(fx);
 +
 +            let res = fx.bcx.ins().popcnt(val);
 +            let res = CValue::by_val(res, arg.layout());
 +            ret.write_cvalue(fx, res);
 +        }
 +        sym::bitreverse => {
 +            intrinsic_args!(fx, args => (arg); intrinsic);
 +            let val = arg.load_scalar(fx);
 +
 +            let res = fx.bcx.ins().bitrev(val);
 +            let res = CValue::by_val(res, arg.layout());
 +            ret.write_cvalue(fx, res);
 +        }
 +        sym::bswap => {
-             let res = CValue::by_val(swap(&mut fx.bcx, val), arg.layout());
 +            intrinsic_args!(fx, args => (arg); intrinsic);
 +            let val = arg.load_scalar(fx);
 +
-             let ret_val =
-                 CValue::by_val_pair(old, fx.bcx.ins().bint(types::I8, is_eq), ret.layout());
++            let res = if fx.bcx.func.dfg.value_type(val) == types::I8 {
++                val
++            } else {
++                fx.bcx.ins().bswap(val)
++            };
++            let res = CValue::by_val(res, arg.layout());
 +            ret.write_cvalue(fx, res);
 +        }
 +        sym::assert_inhabited | sym::assert_zero_valid | sym::assert_uninit_valid => {
 +            intrinsic_args!(fx, args => (); intrinsic);
 +
 +            let layout = fx.layout_of(substs.type_at(0));
 +            if layout.abi.is_uninhabited() {
 +                with_no_trimmed_paths!({
 +                    crate::base::codegen_panic(
 +                        fx,
 +                        &format!("attempted to instantiate uninhabited type `{}`", layout.ty),
 +                        source_info,
 +                    )
 +                });
 +                return;
 +            }
 +
 +            if intrinsic == sym::assert_zero_valid && !fx.tcx.permits_zero_init(layout) {
 +                with_no_trimmed_paths!({
 +                    crate::base::codegen_panic(
 +                        fx,
 +                        &format!(
 +                            "attempted to zero-initialize type `{}`, which is invalid",
 +                            layout.ty
 +                        ),
 +                        source_info,
 +                    );
 +                });
 +                return;
 +            }
 +
 +            if intrinsic == sym::assert_uninit_valid && !fx.tcx.permits_uninit_init(layout) {
 +                with_no_trimmed_paths!({
 +                    crate::base::codegen_panic(
 +                        fx,
 +                        &format!(
 +                            "attempted to leave type `{}` uninitialized, which is invalid",
 +                            layout.ty
 +                        ),
 +                        source_info,
 +                    )
 +                });
 +                return;
 +            }
 +        }
 +
 +        sym::volatile_load | sym::unaligned_volatile_load => {
 +            intrinsic_args!(fx, args => (ptr); intrinsic);
 +
 +            // Cranelift treats loads as volatile by default
 +            // FIXME correctly handle unaligned_volatile_load
 +            let inner_layout = fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
 +            let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
 +            ret.write_cvalue(fx, val);
 +        }
 +        sym::volatile_store | sym::unaligned_volatile_store => {
 +            intrinsic_args!(fx, args => (ptr, val); intrinsic);
 +            let ptr = ptr.load_scalar(fx);
 +
 +            // Cranelift treats stores as volatile by default
 +            // FIXME correctly handle unaligned_volatile_store
 +            let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
 +            dest.write_cvalue(fx, val);
 +        }
 +
 +        sym::pref_align_of
 +        | sym::needs_drop
 +        | sym::type_id
 +        | sym::type_name
 +        | sym::variant_count => {
 +            intrinsic_args!(fx, args => (); intrinsic);
 +
 +            let const_val =
 +                fx.tcx.const_eval_instance(ParamEnv::reveal_all(), instance, None).unwrap();
 +            let val = crate::constant::codegen_const_value(fx, const_val, ret.layout().ty);
 +            ret.write_cvalue(fx, val);
 +        }
 +
 +        sym::ptr_offset_from | sym::ptr_offset_from_unsigned => {
 +            intrinsic_args!(fx, args => (ptr, base); intrinsic);
 +            let ptr = ptr.load_scalar(fx);
 +            let base = base.load_scalar(fx);
 +            let ty = substs.type_at(0);
 +
 +            let pointee_size: u64 = fx.layout_of(ty).size.bytes();
 +            let diff_bytes = fx.bcx.ins().isub(ptr, base);
 +            // FIXME this can be an exact division.
 +            let val = if intrinsic == sym::ptr_offset_from_unsigned {
 +                let usize_layout = fx.layout_of(fx.tcx.types.usize);
 +                // Because diff_bytes ULE isize::MAX, this would be fine as signed,
 +                // but unsigned is slightly easier to codegen, so might as well.
 +                CValue::by_val(fx.bcx.ins().udiv_imm(diff_bytes, pointee_size as i64), usize_layout)
 +            } else {
 +                let isize_layout = fx.layout_of(fx.tcx.types.isize);
 +                CValue::by_val(fx.bcx.ins().sdiv_imm(diff_bytes, pointee_size as i64), isize_layout)
 +            };
 +            ret.write_cvalue(fx, val);
 +        }
 +
 +        sym::ptr_guaranteed_cmp => {
 +            intrinsic_args!(fx, args => (a, b); intrinsic);
 +
 +            let val = crate::num::codegen_ptr_binop(fx, BinOp::Eq, a, b).load_scalar(fx);
 +            ret.write_cvalue(fx, CValue::by_val(val, fx.layout_of(fx.tcx.types.u8)));
 +        }
 +
 +        sym::caller_location => {
 +            intrinsic_args!(fx, args => (); intrinsic);
 +
 +            let caller_location = fx.get_caller_location(source_info);
 +            ret.write_cvalue(fx, caller_location);
 +        }
 +
 +        _ if intrinsic.as_str().starts_with("atomic_fence") => {
 +            intrinsic_args!(fx, args => (); intrinsic);
 +
 +            fx.bcx.ins().fence();
 +        }
 +        _ if intrinsic.as_str().starts_with("atomic_singlethreadfence") => {
 +            intrinsic_args!(fx, args => (); intrinsic);
 +
 +            // FIXME use a compiler fence once Cranelift supports it
 +            fx.bcx.ins().fence();
 +        }
 +        _ if intrinsic.as_str().starts_with("atomic_load") => {
 +            intrinsic_args!(fx, args => (ptr); intrinsic);
 +            let ptr = ptr.load_scalar(fx);
 +
 +            let ty = substs.type_at(0);
 +            match ty.kind() {
 +                ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
 +                    // FIXME implement 128bit atomics
 +                    if fx.tcx.is_compiler_builtins(LOCAL_CRATE) {
 +                        // special case for compiler-builtins to avoid having to patch it
 +                        crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported");
 +                        return;
 +                    } else {
 +                        fx.tcx
 +                            .sess
 +                            .span_fatal(source_info.span, "128bit atomics not yet supported");
 +                    }
 +                }
 +                ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
 +                _ => {
 +                    report_atomic_type_validation_error(fx, intrinsic, source_info.span, ty);
 +                    return;
 +                }
 +            }
 +            let clif_ty = fx.clif_type(ty).unwrap();
 +
 +            let val = fx.bcx.ins().atomic_load(clif_ty, MemFlags::trusted(), ptr);
 +
 +            let val = CValue::by_val(val, fx.layout_of(ty));
 +            ret.write_cvalue(fx, val);
 +        }
 +        _ if intrinsic.as_str().starts_with("atomic_store") => {
 +            intrinsic_args!(fx, args => (ptr, val); intrinsic);
 +            let ptr = ptr.load_scalar(fx);
 +
 +            let ty = substs.type_at(0);
 +            match ty.kind() {
 +                ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
 +                    // FIXME implement 128bit atomics
 +                    if fx.tcx.is_compiler_builtins(LOCAL_CRATE) {
 +                        // special case for compiler-builtins to avoid having to patch it
 +                        crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported");
 +                        return;
 +                    } else {
 +                        fx.tcx
 +                            .sess
 +                            .span_fatal(source_info.span, "128bit atomics not yet supported");
 +                    }
 +                }
 +                ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
 +                _ => {
 +                    report_atomic_type_validation_error(fx, intrinsic, source_info.span, ty);
 +                    return;
 +                }
 +            }
 +
 +            let val = val.load_scalar(fx);
 +
 +            fx.bcx.ins().atomic_store(MemFlags::trusted(), val, ptr);
 +        }
 +        _ if intrinsic.as_str().starts_with("atomic_xchg") => {
 +            intrinsic_args!(fx, args => (ptr, new); intrinsic);
 +            let ptr = ptr.load_scalar(fx);
 +
 +            let layout = new.layout();
 +            match layout.ty.kind() {
 +                ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
 +                _ => {
 +                    report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
 +                    return;
 +                }
 +            }
 +            let ty = fx.clif_type(layout.ty).unwrap();
 +
 +            let new = new.load_scalar(fx);
 +
 +            let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xchg, ptr, new);
 +
 +            let old = CValue::by_val(old, layout);
 +            ret.write_cvalue(fx, old);
 +        }
 +        _ if intrinsic.as_str().starts_with("atomic_cxchg") => {
 +            // both atomic_cxchg_* and atomic_cxchgweak_*
 +            intrinsic_args!(fx, args => (ptr, test_old, new); intrinsic);
 +            let ptr = ptr.load_scalar(fx);
 +
 +            let layout = new.layout();
 +            match layout.ty.kind() {
 +                ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
 +                _ => {
 +                    report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
 +                    return;
 +                }
 +            }
 +
 +            let test_old = test_old.load_scalar(fx);
 +            let new = new.load_scalar(fx);
 +
 +            let old = fx.bcx.ins().atomic_cas(MemFlags::trusted(), ptr, test_old, new);
 +            let is_eq = fx.bcx.ins().icmp(IntCC::Equal, old, test_old);
 +
-                 let eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_val, rhs_val);
-                 fx.bcx.ins().bint(types::I8, eq)
++            let ret_val = CValue::by_val_pair(old, is_eq, ret.layout());
 +            ret.write_cvalue(fx, ret_val)
 +        }
 +
 +        _ if intrinsic.as_str().starts_with("atomic_xadd") => {
 +            intrinsic_args!(fx, args => (ptr, amount); intrinsic);
 +            let ptr = ptr.load_scalar(fx);
 +
 +            let layout = amount.layout();
 +            match layout.ty.kind() {
 +                ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
 +                _ => {
 +                    report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
 +                    return;
 +                }
 +            }
 +            let ty = fx.clif_type(layout.ty).unwrap();
 +
 +            let amount = amount.load_scalar(fx);
 +
 +            let old =
 +                fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Add, ptr, amount);
 +
 +            let old = CValue::by_val(old, layout);
 +            ret.write_cvalue(fx, old);
 +        }
 +        _ if intrinsic.as_str().starts_with("atomic_xsub") => {
 +            intrinsic_args!(fx, args => (ptr, amount); intrinsic);
 +            let ptr = ptr.load_scalar(fx);
 +
 +            let layout = amount.layout();
 +            match layout.ty.kind() {
 +                ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
 +                _ => {
 +                    report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
 +                    return;
 +                }
 +            }
 +            let ty = fx.clif_type(layout.ty).unwrap();
 +
 +            let amount = amount.load_scalar(fx);
 +
 +            let old =
 +                fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Sub, ptr, amount);
 +
 +            let old = CValue::by_val(old, layout);
 +            ret.write_cvalue(fx, old);
 +        }
 +        _ if intrinsic.as_str().starts_with("atomic_and") => {
 +            intrinsic_args!(fx, args => (ptr, src); intrinsic);
 +            let ptr = ptr.load_scalar(fx);
 +
 +            let layout = src.layout();
 +            match layout.ty.kind() {
 +                ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
 +                _ => {
 +                    report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
 +                    return;
 +                }
 +            }
 +            let ty = fx.clif_type(layout.ty).unwrap();
 +
 +            let src = src.load_scalar(fx);
 +
 +            let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::And, ptr, src);
 +
 +            let old = CValue::by_val(old, layout);
 +            ret.write_cvalue(fx, old);
 +        }
 +        _ if intrinsic.as_str().starts_with("atomic_or") => {
 +            intrinsic_args!(fx, args => (ptr, src); intrinsic);
 +            let ptr = ptr.load_scalar(fx);
 +
 +            let layout = src.layout();
 +            match layout.ty.kind() {
 +                ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
 +                _ => {
 +                    report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
 +                    return;
 +                }
 +            }
 +            let ty = fx.clif_type(layout.ty).unwrap();
 +
 +            let src = src.load_scalar(fx);
 +
 +            let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Or, ptr, src);
 +
 +            let old = CValue::by_val(old, layout);
 +            ret.write_cvalue(fx, old);
 +        }
 +        _ if intrinsic.as_str().starts_with("atomic_xor") => {
 +            intrinsic_args!(fx, args => (ptr, src); intrinsic);
 +            let ptr = ptr.load_scalar(fx);
 +
 +            let layout = src.layout();
 +            match layout.ty.kind() {
 +                ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
 +                _ => {
 +                    report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
 +                    return;
 +                }
 +            }
 +            let ty = fx.clif_type(layout.ty).unwrap();
 +
 +            let src = src.load_scalar(fx);
 +
 +            let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xor, ptr, src);
 +
 +            let old = CValue::by_val(old, layout);
 +            ret.write_cvalue(fx, old);
 +        }
 +        _ if intrinsic.as_str().starts_with("atomic_nand") => {
 +            intrinsic_args!(fx, args => (ptr, src); intrinsic);
 +            let ptr = ptr.load_scalar(fx);
 +
 +            let layout = src.layout();
 +            match layout.ty.kind() {
 +                ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
 +                _ => {
 +                    report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
 +                    return;
 +                }
 +            }
 +            let ty = fx.clif_type(layout.ty).unwrap();
 +
 +            let src = src.load_scalar(fx);
 +
 +            let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Nand, ptr, src);
 +
 +            let old = CValue::by_val(old, layout);
 +            ret.write_cvalue(fx, old);
 +        }
 +        _ if intrinsic.as_str().starts_with("atomic_max") => {
 +            intrinsic_args!(fx, args => (ptr, src); intrinsic);
 +            let ptr = ptr.load_scalar(fx);
 +
 +            let layout = src.layout();
 +            match layout.ty.kind() {
 +                ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
 +                _ => {
 +                    report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
 +                    return;
 +                }
 +            }
 +            let ty = fx.clif_type(layout.ty).unwrap();
 +
 +            let src = src.load_scalar(fx);
 +
 +            let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smax, ptr, src);
 +
 +            let old = CValue::by_val(old, layout);
 +            ret.write_cvalue(fx, old);
 +        }
 +        _ if intrinsic.as_str().starts_with("atomic_umax") => {
 +            intrinsic_args!(fx, args => (ptr, src); intrinsic);
 +            let ptr = ptr.load_scalar(fx);
 +
 +            let layout = src.layout();
 +            match layout.ty.kind() {
 +                ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
 +                _ => {
 +                    report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
 +                    return;
 +                }
 +            }
 +            let ty = fx.clif_type(layout.ty).unwrap();
 +
 +            let src = src.load_scalar(fx);
 +
 +            let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umax, ptr, src);
 +
 +            let old = CValue::by_val(old, layout);
 +            ret.write_cvalue(fx, old);
 +        }
 +        _ if intrinsic.as_str().starts_with("atomic_min") => {
 +            intrinsic_args!(fx, args => (ptr, src); intrinsic);
 +            let ptr = ptr.load_scalar(fx);
 +
 +            let layout = src.layout();
 +            match layout.ty.kind() {
 +                ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
 +                _ => {
 +                    report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
 +                    return;
 +                }
 +            }
 +            let ty = fx.clif_type(layout.ty).unwrap();
 +
 +            let src = src.load_scalar(fx);
 +
 +            let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smin, ptr, src);
 +
 +            let old = CValue::by_val(old, layout);
 +            ret.write_cvalue(fx, old);
 +        }
 +        _ if intrinsic.as_str().starts_with("atomic_umin") => {
 +            intrinsic_args!(fx, args => (ptr, src); intrinsic);
 +            let ptr = ptr.load_scalar(fx);
 +
 +            let layout = src.layout();
 +            match layout.ty.kind() {
 +                ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
 +                _ => {
 +                    report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
 +                    return;
 +                }
 +            }
 +            let ty = fx.clif_type(layout.ty).unwrap();
 +
 +            let src = src.load_scalar(fx);
 +
 +            let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umin, ptr, src);
 +
 +            let old = CValue::by_val(old, layout);
 +            ret.write_cvalue(fx, old);
 +        }
 +
 +        sym::minnumf32 => {
 +            intrinsic_args!(fx, args => (a, b); intrinsic);
 +            let a = a.load_scalar(fx);
 +            let b = b.load_scalar(fx);
 +
 +            let val = crate::num::codegen_float_min(fx, a, b);
 +            let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
 +            ret.write_cvalue(fx, val);
 +        }
 +        sym::minnumf64 => {
 +            intrinsic_args!(fx, args => (a, b); intrinsic);
 +            let a = a.load_scalar(fx);
 +            let b = b.load_scalar(fx);
 +
 +            let val = crate::num::codegen_float_min(fx, a, b);
 +            let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
 +            ret.write_cvalue(fx, val);
 +        }
 +        sym::maxnumf32 => {
 +            intrinsic_args!(fx, args => (a, b); intrinsic);
 +            let a = a.load_scalar(fx);
 +            let b = b.load_scalar(fx);
 +
 +            let val = crate::num::codegen_float_max(fx, a, b);
 +            let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
 +            ret.write_cvalue(fx, val);
 +        }
 +        sym::maxnumf64 => {
 +            intrinsic_args!(fx, args => (a, b); intrinsic);
 +            let a = a.load_scalar(fx);
 +            let b = b.load_scalar(fx);
 +
 +            let val = crate::num::codegen_float_max(fx, a, b);
 +            let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
 +            ret.write_cvalue(fx, val);
 +        }
 +
 +        kw::Try => {
 +            intrinsic_args!(fx, args => (f, data, catch_fn); intrinsic);
 +            let f = f.load_scalar(fx);
 +            let data = data.load_scalar(fx);
 +            let _catch_fn = catch_fn.load_scalar(fx);
 +
 +            // FIXME once unwinding is supported, change this to actually catch panics
 +            let f_sig = fx.bcx.func.import_signature(Signature {
 +                call_conv: fx.target_config.default_call_conv,
 +                params: vec![AbiParam::new(pointer_ty(fx.tcx))],
 +                returns: vec![],
 +            });
 +
 +            fx.bcx.ins().call_indirect(f_sig, f, &[data]);
 +
 +            let layout = ret.layout();
 +            let ret_val = CValue::const_val(fx, layout, ty::ScalarInt::null(layout.size));
 +            ret.write_cvalue(fx, ret_val);
 +        }
 +
 +        sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
 +            intrinsic_args!(fx, args => (x, y); intrinsic);
 +
 +            let res = crate::num::codegen_float_binop(
 +                fx,
 +                match intrinsic {
 +                    sym::fadd_fast => BinOp::Add,
 +                    sym::fsub_fast => BinOp::Sub,
 +                    sym::fmul_fast => BinOp::Mul,
 +                    sym::fdiv_fast => BinOp::Div,
 +                    sym::frem_fast => BinOp::Rem,
 +                    _ => unreachable!(),
 +                },
 +                x,
 +                y,
 +            );
 +            ret.write_cvalue(fx, res);
 +        }
 +        sym::float_to_int_unchecked => {
 +            intrinsic_args!(fx, args => (f); intrinsic);
 +            let f = f.load_scalar(fx);
 +
 +            let res = crate::cast::clif_int_or_float_cast(
 +                fx,
 +                f,
 +                false,
 +                fx.clif_type(ret.layout().ty).unwrap(),
 +                type_sign(ret.layout().ty),
 +            );
 +            ret.write_cvalue(fx, CValue::by_val(res, ret.layout()));
 +        }
 +
 +        sym::raw_eq => {
 +            intrinsic_args!(fx, args => (lhs_ref, rhs_ref); intrinsic);
 +            let lhs_ref = lhs_ref.load_scalar(fx);
 +            let rhs_ref = rhs_ref.load_scalar(fx);
 +
 +            let size = fx.layout_of(substs.type_at(0)).layout.size();
 +            // FIXME add and use emit_small_memcmp
 +            let is_eq_value = if size == Size::ZERO {
 +                // No bytes means they're trivially equal
 +                fx.bcx.ins().iconst(types::I8, 1)
 +            } else if let Some(clty) = size.bits().try_into().ok().and_then(Type::int) {
 +                // Can't use `trusted` for these loads; they could be unaligned.
 +                let mut flags = MemFlags::new();
 +                flags.set_notrap();
 +                let lhs_val = fx.bcx.ins().load(clty, flags, lhs_ref, 0);
 +                let rhs_val = fx.bcx.ins().load(clty, flags, rhs_ref, 0);
-                 let eq = fx.bcx.ins().icmp_imm(IntCC::Equal, cmp, 0);
-                 fx.bcx.ins().bint(types::I8, eq)
++                fx.bcx.ins().icmp(IntCC::Equal, lhs_val, rhs_val)
 +            } else {
 +                // Just call `memcmp` (like slices do in core) when the
 +                // size is too large or it's not a power-of-two.
 +                let signed_bytes = i64::try_from(size.bytes()).unwrap();
 +                let bytes_val = fx.bcx.ins().iconst(fx.pointer_type, signed_bytes);
 +                let params = vec![AbiParam::new(fx.pointer_type); 3];
 +                let returns = vec![AbiParam::new(types::I32)];
 +                let args = &[lhs_ref, rhs_ref, bytes_val];
 +                let cmp = fx.lib_call("memcmp", params, returns, args)[0];
++                fx.bcx.ins().icmp_imm(IntCC::Equal, cmp, 0)
 +            };
 +            ret.write_cvalue(fx, CValue::by_val(is_eq_value, ret.layout()));
 +        }
 +
 +        sym::const_allocate => {
 +            intrinsic_args!(fx, args => (_size, _align); intrinsic);
 +
 +            // returns a null pointer at runtime.
 +            let null = fx.bcx.ins().iconst(fx.pointer_type, 0);
 +            ret.write_cvalue(fx, CValue::by_val(null, ret.layout()));
 +        }
 +
 +        sym::const_deallocate => {
 +            intrinsic_args!(fx, args => (_ptr, _size, _align); intrinsic);
 +            // nop at runtime.
 +        }
 +
 +        sym::black_box => {
 +            intrinsic_args!(fx, args => (a); intrinsic);
 +
 +            // FIXME implement black_box semantics
 +            ret.write_cvalue(fx, a);
 +        }
 +
 +        // FIXME implement variadics in cranelift
 +        sym::va_copy | sym::va_arg | sym::va_end => {
 +            fx.tcx.sess.span_fatal(
 +                source_info.span,
 +                "Defining variadic functions is not yet supported by Cranelift",
 +            );
 +        }
 +
 +        _ => {
 +            fx.tcx
 +                .sess
 +                .span_fatal(source_info.span, &format!("unsupported intrinsic {}", intrinsic));
 +        }
 +    }
 +
 +    let ret_block = fx.get_block(destination.unwrap());
 +    fx.bcx.ins().jump(ret_block, &[]);
 +}
index 51fce8c854bdb5307149680f288fa590ed8a8b53,0000000000000000000000000000000000000000..14f5e9187399fac76f2a64d0147f2f647a904929
mode 100644,000000..100644
--- /dev/null
@@@ -1,783 -1,0 +1,780 @@@
-                 let ty = fx.clif_type(res_lane_ty).unwrap();
-                 let res_lane = fx.bcx.ins().bint(ty, res_lane);
-                 fx.bcx.ins().ineg(res_lane)
 +//! Codegen `extern "platform-intrinsic"` intrinsics.
 +
 +use rustc_middle::ty::subst::SubstsRef;
 +use rustc_span::Symbol;
 +use rustc_target::abi::Endian;
 +
 +use super::*;
 +use crate::prelude::*;
 +
 +fn report_simd_type_validation_error(
 +    fx: &mut FunctionCx<'_, '_, '_>,
 +    intrinsic: Symbol,
 +    span: Span,
 +    ty: Ty<'_>,
 +) {
 +    fx.tcx.sess.span_err(span, &format!("invalid monomorphization of `{}` intrinsic: expected SIMD input type, found non-SIMD `{}`", intrinsic, ty));
 +    // Prevent verifier error
 +    fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
 +}
 +
 +pub(super) fn codegen_simd_intrinsic_call<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    intrinsic: Symbol,
 +    _substs: SubstsRef<'tcx>,
 +    args: &[mir::Operand<'tcx>],
 +    ret: CPlace<'tcx>,
 +    span: Span,
 +) {
 +    match intrinsic {
 +        sym::simd_as | sym::simd_cast => {
 +            intrinsic_args!(fx, args => (a); intrinsic);
 +
 +            if !a.layout().ty.is_simd() {
 +                report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty);
 +                return;
 +            }
 +
 +            simd_for_each_lane(fx, a, ret, &|fx, lane_ty, ret_lane_ty, lane| {
 +                let ret_lane_clif_ty = fx.clif_type(ret_lane_ty).unwrap();
 +
 +                let from_signed = type_sign(lane_ty);
 +                let to_signed = type_sign(ret_lane_ty);
 +
 +                clif_int_or_float_cast(fx, lane, from_signed, ret_lane_clif_ty, to_signed)
 +            });
 +        }
 +
 +        sym::simd_eq | sym::simd_ne | sym::simd_lt | sym::simd_le | sym::simd_gt | sym::simd_ge => {
 +            intrinsic_args!(fx, args => (x, y); intrinsic);
 +
 +            if !x.layout().ty.is_simd() {
 +                report_simd_type_validation_error(fx, intrinsic, span, x.layout().ty);
 +                return;
 +            }
 +
 +            // FIXME use vector instructions when possible
 +            simd_pair_for_each_lane(fx, x, y, ret, &|fx, lane_ty, res_lane_ty, x_lane, y_lane| {
 +                let res_lane = match (lane_ty.kind(), intrinsic) {
 +                    (ty::Uint(_), sym::simd_eq) => fx.bcx.ins().icmp(IntCC::Equal, x_lane, y_lane),
 +                    (ty::Uint(_), sym::simd_ne) => {
 +                        fx.bcx.ins().icmp(IntCC::NotEqual, x_lane, y_lane)
 +                    }
 +                    (ty::Uint(_), sym::simd_lt) => {
 +                        fx.bcx.ins().icmp(IntCC::UnsignedLessThan, x_lane, y_lane)
 +                    }
 +                    (ty::Uint(_), sym::simd_le) => {
 +                        fx.bcx.ins().icmp(IntCC::UnsignedLessThanOrEqual, x_lane, y_lane)
 +                    }
 +                    (ty::Uint(_), sym::simd_gt) => {
 +                        fx.bcx.ins().icmp(IntCC::UnsignedGreaterThan, x_lane, y_lane)
 +                    }
 +                    (ty::Uint(_), sym::simd_ge) => {
 +                        fx.bcx.ins().icmp(IntCC::UnsignedGreaterThanOrEqual, x_lane, y_lane)
 +                    }
 +
 +                    (ty::Int(_), sym::simd_eq) => fx.bcx.ins().icmp(IntCC::Equal, x_lane, y_lane),
 +                    (ty::Int(_), sym::simd_ne) => {
 +                        fx.bcx.ins().icmp(IntCC::NotEqual, x_lane, y_lane)
 +                    }
 +                    (ty::Int(_), sym::simd_lt) => {
 +                        fx.bcx.ins().icmp(IntCC::SignedLessThan, x_lane, y_lane)
 +                    }
 +                    (ty::Int(_), sym::simd_le) => {
 +                        fx.bcx.ins().icmp(IntCC::SignedLessThanOrEqual, x_lane, y_lane)
 +                    }
 +                    (ty::Int(_), sym::simd_gt) => {
 +                        fx.bcx.ins().icmp(IntCC::SignedGreaterThan, x_lane, y_lane)
 +                    }
 +                    (ty::Int(_), sym::simd_ge) => {
 +                        fx.bcx.ins().icmp(IntCC::SignedGreaterThanOrEqual, x_lane, y_lane)
 +                    }
 +
 +                    (ty::Float(_), sym::simd_eq) => {
 +                        fx.bcx.ins().fcmp(FloatCC::Equal, x_lane, y_lane)
 +                    }
 +                    (ty::Float(_), sym::simd_ne) => {
 +                        fx.bcx.ins().fcmp(FloatCC::NotEqual, x_lane, y_lane)
 +                    }
 +                    (ty::Float(_), sym::simd_lt) => {
 +                        fx.bcx.ins().fcmp(FloatCC::LessThan, x_lane, y_lane)
 +                    }
 +                    (ty::Float(_), sym::simd_le) => {
 +                        fx.bcx.ins().fcmp(FloatCC::LessThanOrEqual, x_lane, y_lane)
 +                    }
 +                    (ty::Float(_), sym::simd_gt) => {
 +                        fx.bcx.ins().fcmp(FloatCC::GreaterThan, x_lane, y_lane)
 +                    }
 +                    (ty::Float(_), sym::simd_ge) => {
 +                        fx.bcx.ins().fcmp(FloatCC::GreaterThanOrEqual, x_lane, y_lane)
 +                    }
 +
 +                    _ => unreachable!(),
 +                };
 +
-             let mut res = fx.bcx.ins().iconst(res_type, 0);
++                bool_to_zero_or_max_uint(fx, res_lane_ty, res_lane)
 +            });
 +        }
 +
 +        // simd_shuffle32<T, U>(x: T, y: T, idx: [u32; 32]) -> U
 +        _ if intrinsic.as_str().starts_with("simd_shuffle") => {
 +            let (x, y, idx) = match args {
 +                [x, y, idx] => (x, y, idx),
 +                _ => {
 +                    bug!("wrong number of args for intrinsic {intrinsic}");
 +                }
 +            };
 +            let x = codegen_operand(fx, x);
 +            let y = codegen_operand(fx, y);
 +
 +            if !x.layout().ty.is_simd() {
 +                report_simd_type_validation_error(fx, intrinsic, span, x.layout().ty);
 +                return;
 +            }
 +
 +            // If this intrinsic is the older "simd_shuffleN" form, simply parse the integer.
 +            // If there is no suffix, use the index array length.
 +            let n: u16 = if intrinsic == sym::simd_shuffle {
 +                // Make sure this is actually an array, since typeck only checks the length-suffixed
 +                // version of this intrinsic.
 +                let idx_ty = fx.monomorphize(idx.ty(fx.mir, fx.tcx));
 +                match idx_ty.kind() {
 +                    ty::Array(ty, len) if matches!(ty.kind(), ty::Uint(ty::UintTy::U32)) => len
 +                        .try_eval_usize(fx.tcx, ty::ParamEnv::reveal_all())
 +                        .unwrap_or_else(|| {
 +                            span_bug!(span, "could not evaluate shuffle index array length")
 +                        })
 +                        .try_into()
 +                        .unwrap(),
 +                    _ => {
 +                        fx.tcx.sess.span_err(
 +                            span,
 +                            &format!(
 +                                "simd_shuffle index must be an array of `u32`, got `{}`",
 +                                idx_ty,
 +                            ),
 +                        );
 +                        // Prevent verifier error
 +                        fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
 +                        return;
 +                    }
 +                }
 +            } else {
 +                // FIXME remove this case
 +                intrinsic.as_str()["simd_shuffle".len()..].parse().unwrap()
 +            };
 +
 +            assert_eq!(x.layout(), y.layout());
 +            let layout = x.layout();
 +
 +            let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
 +            let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
 +
 +            assert_eq!(lane_ty, ret_lane_ty);
 +            assert_eq!(u64::from(n), ret_lane_count);
 +
 +            let total_len = lane_count * 2;
 +
 +            let indexes = {
 +                use rustc_middle::mir::interpret::*;
 +                let idx_const = crate::constant::mir_operand_get_const_val(fx, idx)
 +                    .expect("simd_shuffle* idx not const");
 +
 +                let idx_bytes = match idx_const {
 +                    ConstValue::ByRef { alloc, offset } => {
 +                        let size = Size::from_bytes(
 +                            4 * ret_lane_count, /* size_of([u32; ret_lane_count]) */
 +                        );
 +                        alloc
 +                            .inner()
 +                            .get_bytes_strip_provenance(fx, alloc_range(offset, size))
 +                            .unwrap()
 +                    }
 +                    _ => unreachable!("{:?}", idx_const),
 +                };
 +
 +                (0..ret_lane_count)
 +                    .map(|i| {
 +                        let i = usize::try_from(i).unwrap();
 +                        let idx = rustc_middle::mir::interpret::read_target_uint(
 +                            fx.tcx.data_layout.endian,
 +                            &idx_bytes[4 * i..4 * i + 4],
 +                        )
 +                        .expect("read_target_uint");
 +                        u16::try_from(idx).expect("try_from u32")
 +                    })
 +                    .collect::<Vec<u16>>()
 +            };
 +
 +            for &idx in &indexes {
 +                assert!(u64::from(idx) < total_len, "idx {} out of range 0..{}", idx, total_len);
 +            }
 +
 +            for (out_idx, in_idx) in indexes.into_iter().enumerate() {
 +                let in_lane = if u64::from(in_idx) < lane_count {
 +                    x.value_lane(fx, in_idx.into())
 +                } else {
 +                    y.value_lane(fx, u64::from(in_idx) - lane_count)
 +                };
 +                let out_lane = ret.place_lane(fx, u64::try_from(out_idx).unwrap());
 +                out_lane.write_cvalue(fx, in_lane);
 +            }
 +        }
 +
 +        sym::simd_insert => {
 +            let (base, idx, val) = match args {
 +                [base, idx, val] => (base, idx, val),
 +                _ => {
 +                    bug!("wrong number of args for intrinsic {intrinsic}");
 +                }
 +            };
 +            let base = codegen_operand(fx, base);
 +            let val = codegen_operand(fx, val);
 +
 +            // FIXME validate
 +            let idx_const = if let Some(idx_const) =
 +                crate::constant::mir_operand_get_const_val(fx, idx)
 +            {
 +                idx_const
 +            } else {
 +                fx.tcx.sess.span_fatal(span, "Index argument for `simd_insert` is not a constant");
 +            };
 +
 +            let idx = idx_const
 +                .try_to_bits(Size::from_bytes(4 /* u32*/))
 +                .unwrap_or_else(|| panic!("kind not scalar: {:?}", idx_const));
 +            let (lane_count, _lane_ty) = base.layout().ty.simd_size_and_type(fx.tcx);
 +            if idx >= lane_count.into() {
 +                fx.tcx.sess.span_fatal(
 +                    fx.mir.span,
 +                    &format!("[simd_insert] idx {} >= lane_count {}", idx, lane_count),
 +                );
 +            }
 +
 +            ret.write_cvalue(fx, base);
 +            let ret_lane = ret.place_field(fx, mir::Field::new(idx.try_into().unwrap()));
 +            ret_lane.write_cvalue(fx, val);
 +        }
 +
 +        sym::simd_extract => {
 +            let (v, idx) = match args {
 +                [v, idx] => (v, idx),
 +                _ => {
 +                    bug!("wrong number of args for intrinsic {intrinsic}");
 +                }
 +            };
 +            let v = codegen_operand(fx, v);
 +
 +            if !v.layout().ty.is_simd() {
 +                report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
 +                return;
 +            }
 +
 +            let idx_const = if let Some(idx_const) =
 +                crate::constant::mir_operand_get_const_val(fx, idx)
 +            {
 +                idx_const
 +            } else {
 +                fx.tcx.sess.span_warn(span, "Index argument for `simd_extract` is not a constant");
 +                let trap_block = fx.bcx.create_block();
 +                let dummy_block = fx.bcx.create_block();
 +                let true_ = fx.bcx.ins().iconst(types::I8, 1);
 +                fx.bcx.ins().brnz(true_, trap_block, &[]);
 +                fx.bcx.ins().jump(dummy_block, &[]);
 +                fx.bcx.switch_to_block(trap_block);
 +                crate::trap::trap_unimplemented(
 +                    fx,
 +                    "Index argument for `simd_extract` is not a constant",
 +                );
 +                fx.bcx.switch_to_block(dummy_block);
 +                return;
 +            };
 +
 +            let idx = idx_const
 +                .try_to_bits(Size::from_bytes(4 /* u32*/))
 +                .unwrap_or_else(|| panic!("kind not scalar: {:?}", idx_const));
 +            let (lane_count, _lane_ty) = v.layout().ty.simd_size_and_type(fx.tcx);
 +            if idx >= lane_count.into() {
 +                fx.tcx.sess.span_fatal(
 +                    fx.mir.span,
 +                    &format!("[simd_extract] idx {} >= lane_count {}", idx, lane_count),
 +                );
 +            }
 +
 +            let ret_lane = v.value_lane(fx, idx.try_into().unwrap());
 +            ret.write_cvalue(fx, ret_lane);
 +        }
 +
 +        sym::simd_neg => {
 +            intrinsic_args!(fx, args => (a); intrinsic);
 +
 +            if !a.layout().ty.is_simd() {
 +                report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty);
 +                return;
 +            }
 +
 +            simd_for_each_lane(
 +                fx,
 +                a,
 +                ret,
 +                &|fx, lane_ty, _ret_lane_ty, lane| match lane_ty.kind() {
 +                    ty::Int(_) => fx.bcx.ins().ineg(lane),
 +                    ty::Float(_) => fx.bcx.ins().fneg(lane),
 +                    _ => unreachable!(),
 +                },
 +            );
 +        }
 +
 +        sym::simd_add
 +        | sym::simd_sub
 +        | sym::simd_mul
 +        | sym::simd_div
 +        | sym::simd_rem
 +        | sym::simd_shl
 +        | sym::simd_shr
 +        | sym::simd_and
 +        | sym::simd_or
 +        | sym::simd_xor => {
 +            intrinsic_args!(fx, args => (x, y); intrinsic);
 +
 +            // FIXME use vector instructions when possible
 +            simd_pair_for_each_lane(fx, x, y, ret, &|fx, lane_ty, _ret_lane_ty, x_lane, y_lane| {
 +                match (lane_ty.kind(), intrinsic) {
 +                    (ty::Uint(_), sym::simd_add) => fx.bcx.ins().iadd(x_lane, y_lane),
 +                    (ty::Uint(_), sym::simd_sub) => fx.bcx.ins().isub(x_lane, y_lane),
 +                    (ty::Uint(_), sym::simd_mul) => fx.bcx.ins().imul(x_lane, y_lane),
 +                    (ty::Uint(_), sym::simd_div) => fx.bcx.ins().udiv(x_lane, y_lane),
 +                    (ty::Uint(_), sym::simd_rem) => fx.bcx.ins().urem(x_lane, y_lane),
 +
 +                    (ty::Int(_), sym::simd_add) => fx.bcx.ins().iadd(x_lane, y_lane),
 +                    (ty::Int(_), sym::simd_sub) => fx.bcx.ins().isub(x_lane, y_lane),
 +                    (ty::Int(_), sym::simd_mul) => fx.bcx.ins().imul(x_lane, y_lane),
 +                    (ty::Int(_), sym::simd_div) => fx.bcx.ins().sdiv(x_lane, y_lane),
 +                    (ty::Int(_), sym::simd_rem) => fx.bcx.ins().srem(x_lane, y_lane),
 +
 +                    (ty::Float(_), sym::simd_add) => fx.bcx.ins().fadd(x_lane, y_lane),
 +                    (ty::Float(_), sym::simd_sub) => fx.bcx.ins().fsub(x_lane, y_lane),
 +                    (ty::Float(_), sym::simd_mul) => fx.bcx.ins().fmul(x_lane, y_lane),
 +                    (ty::Float(_), sym::simd_div) => fx.bcx.ins().fdiv(x_lane, y_lane),
 +                    (ty::Float(FloatTy::F32), sym::simd_rem) => fx.lib_call(
 +                        "fmodf",
 +                        vec![AbiParam::new(types::F32), AbiParam::new(types::F32)],
 +                        vec![AbiParam::new(types::F32)],
 +                        &[x_lane, y_lane],
 +                    )[0],
 +                    (ty::Float(FloatTy::F64), sym::simd_rem) => fx.lib_call(
 +                        "fmod",
 +                        vec![AbiParam::new(types::F64), AbiParam::new(types::F64)],
 +                        vec![AbiParam::new(types::F64)],
 +                        &[x_lane, y_lane],
 +                    )[0],
 +
 +                    (ty::Uint(_), sym::simd_shl) => fx.bcx.ins().ishl(x_lane, y_lane),
 +                    (ty::Uint(_), sym::simd_shr) => fx.bcx.ins().ushr(x_lane, y_lane),
 +                    (ty::Uint(_), sym::simd_and) => fx.bcx.ins().band(x_lane, y_lane),
 +                    (ty::Uint(_), sym::simd_or) => fx.bcx.ins().bor(x_lane, y_lane),
 +                    (ty::Uint(_), sym::simd_xor) => fx.bcx.ins().bxor(x_lane, y_lane),
 +
 +                    (ty::Int(_), sym::simd_shl) => fx.bcx.ins().ishl(x_lane, y_lane),
 +                    (ty::Int(_), sym::simd_shr) => fx.bcx.ins().sshr(x_lane, y_lane),
 +                    (ty::Int(_), sym::simd_and) => fx.bcx.ins().band(x_lane, y_lane),
 +                    (ty::Int(_), sym::simd_or) => fx.bcx.ins().bor(x_lane, y_lane),
 +                    (ty::Int(_), sym::simd_xor) => fx.bcx.ins().bxor(x_lane, y_lane),
 +
 +                    _ => unreachable!(),
 +                }
 +            });
 +        }
 +
 +        sym::simd_fma => {
 +            intrinsic_args!(fx, args => (a, b, c); intrinsic);
 +
 +            if !a.layout().ty.is_simd() {
 +                report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty);
 +                return;
 +            }
 +            assert_eq!(a.layout(), b.layout());
 +            assert_eq!(a.layout(), c.layout());
 +            assert_eq!(a.layout(), ret.layout());
 +
 +            let layout = a.layout();
 +            let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
 +            let res_lane_layout = fx.layout_of(lane_ty);
 +
 +            for lane in 0..lane_count {
 +                let a_lane = a.value_lane(fx, lane).load_scalar(fx);
 +                let b_lane = b.value_lane(fx, lane).load_scalar(fx);
 +                let c_lane = c.value_lane(fx, lane).load_scalar(fx);
 +
 +                let res_lane = fx.bcx.ins().fma(a_lane, b_lane, c_lane);
 +                let res_lane = CValue::by_val(res_lane, res_lane_layout);
 +
 +                ret.place_lane(fx, lane).write_cvalue(fx, res_lane);
 +            }
 +        }
 +
 +        sym::simd_fmin | sym::simd_fmax => {
 +            intrinsic_args!(fx, args => (x, y); intrinsic);
 +
 +            if !x.layout().ty.is_simd() {
 +                report_simd_type_validation_error(fx, intrinsic, span, x.layout().ty);
 +                return;
 +            }
 +
 +            // FIXME use vector instructions when possible
 +            simd_pair_for_each_lane(fx, x, y, ret, &|fx, lane_ty, _ret_lane_ty, x_lane, y_lane| {
 +                match lane_ty.kind() {
 +                    ty::Float(_) => {}
 +                    _ => unreachable!("{:?}", lane_ty),
 +                }
 +                match intrinsic {
 +                    sym::simd_fmin => crate::num::codegen_float_min(fx, x_lane, y_lane),
 +                    sym::simd_fmax => crate::num::codegen_float_max(fx, x_lane, y_lane),
 +                    _ => unreachable!(),
 +                }
 +            });
 +        }
 +
 +        sym::simd_round => {
 +            intrinsic_args!(fx, args => (a); intrinsic);
 +
 +            if !a.layout().ty.is_simd() {
 +                report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty);
 +                return;
 +            }
 +
 +            simd_for_each_lane(
 +                fx,
 +                a,
 +                ret,
 +                &|fx, lane_ty, _ret_lane_ty, lane| match lane_ty.kind() {
 +                    ty::Float(FloatTy::F32) => fx.lib_call(
 +                        "roundf",
 +                        vec![AbiParam::new(types::F32)],
 +                        vec![AbiParam::new(types::F32)],
 +                        &[lane],
 +                    )[0],
 +                    ty::Float(FloatTy::F64) => fx.lib_call(
 +                        "round",
 +                        vec![AbiParam::new(types::F64)],
 +                        vec![AbiParam::new(types::F64)],
 +                        &[lane],
 +                    )[0],
 +                    _ => unreachable!("{:?}", lane_ty),
 +                },
 +            );
 +        }
 +
 +        sym::simd_fabs | sym::simd_fsqrt | sym::simd_ceil | sym::simd_floor | sym::simd_trunc => {
 +            intrinsic_args!(fx, args => (a); intrinsic);
 +
 +            if !a.layout().ty.is_simd() {
 +                report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty);
 +                return;
 +            }
 +
 +            simd_for_each_lane(fx, a, ret, &|fx, lane_ty, _ret_lane_ty, lane| {
 +                match lane_ty.kind() {
 +                    ty::Float(_) => {}
 +                    _ => unreachable!("{:?}", lane_ty),
 +                }
 +                match intrinsic {
 +                    sym::simd_fabs => fx.bcx.ins().fabs(lane),
 +                    sym::simd_fsqrt => fx.bcx.ins().sqrt(lane),
 +                    sym::simd_ceil => fx.bcx.ins().ceil(lane),
 +                    sym::simd_floor => fx.bcx.ins().floor(lane),
 +                    sym::simd_trunc => fx.bcx.ins().trunc(lane),
 +                    _ => unreachable!(),
 +                }
 +            });
 +        }
 +
 +        sym::simd_reduce_add_ordered | sym::simd_reduce_add_unordered => {
 +            intrinsic_args!(fx, args => (v, acc); intrinsic);
 +            let acc = acc.load_scalar(fx);
 +
 +            // FIXME there must be no acc param for integer vectors
 +            if !v.layout().ty.is_simd() {
 +                report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
 +                return;
 +            }
 +
 +            simd_reduce(fx, v, Some(acc), ret, &|fx, lane_ty, a, b| {
 +                if lane_ty.is_floating_point() {
 +                    fx.bcx.ins().fadd(a, b)
 +                } else {
 +                    fx.bcx.ins().iadd(a, b)
 +                }
 +            });
 +        }
 +
 +        sym::simd_reduce_mul_ordered | sym::simd_reduce_mul_unordered => {
 +            intrinsic_args!(fx, args => (v, acc); intrinsic);
 +            let acc = acc.load_scalar(fx);
 +
 +            // FIXME there must be no acc param for integer vectors
 +            if !v.layout().ty.is_simd() {
 +                report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
 +                return;
 +            }
 +
 +            simd_reduce(fx, v, Some(acc), ret, &|fx, lane_ty, a, b| {
 +                if lane_ty.is_floating_point() {
 +                    fx.bcx.ins().fmul(a, b)
 +                } else {
 +                    fx.bcx.ins().imul(a, b)
 +                }
 +            });
 +        }
 +
 +        sym::simd_reduce_all => {
 +            intrinsic_args!(fx, args => (v); intrinsic);
 +
 +            if !v.layout().ty.is_simd() {
 +                report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
 +                return;
 +            }
 +
 +            simd_reduce_bool(fx, v, ret, &|fx, a, b| fx.bcx.ins().band(a, b));
 +        }
 +
 +        sym::simd_reduce_any => {
 +            intrinsic_args!(fx, args => (v); intrinsic);
 +
 +            if !v.layout().ty.is_simd() {
 +                report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
 +                return;
 +            }
 +
 +            simd_reduce_bool(fx, v, ret, &|fx, a, b| fx.bcx.ins().bor(a, b));
 +        }
 +
 +        sym::simd_reduce_and => {
 +            intrinsic_args!(fx, args => (v); intrinsic);
 +
 +            if !v.layout().ty.is_simd() {
 +                report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
 +                return;
 +            }
 +
 +            simd_reduce(fx, v, None, ret, &|fx, _ty, a, b| fx.bcx.ins().band(a, b));
 +        }
 +
 +        sym::simd_reduce_or => {
 +            intrinsic_args!(fx, args => (v); intrinsic);
 +
 +            if !v.layout().ty.is_simd() {
 +                report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
 +                return;
 +            }
 +
 +            simd_reduce(fx, v, None, ret, &|fx, _ty, a, b| fx.bcx.ins().bor(a, b));
 +        }
 +
 +        sym::simd_reduce_xor => {
 +            intrinsic_args!(fx, args => (v); intrinsic);
 +
 +            if !v.layout().ty.is_simd() {
 +                report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
 +                return;
 +            }
 +
 +            simd_reduce(fx, v, None, ret, &|fx, _ty, a, b| fx.bcx.ins().bxor(a, b));
 +        }
 +
 +        sym::simd_reduce_min => {
 +            intrinsic_args!(fx, args => (v); intrinsic);
 +
 +            if !v.layout().ty.is_simd() {
 +                report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
 +                return;
 +            }
 +
 +            simd_reduce(fx, v, None, ret, &|fx, ty, a, b| {
 +                let lt = match ty.kind() {
 +                    ty::Int(_) => fx.bcx.ins().icmp(IntCC::SignedLessThan, a, b),
 +                    ty::Uint(_) => fx.bcx.ins().icmp(IntCC::UnsignedLessThan, a, b),
 +                    ty::Float(_) => return crate::num::codegen_float_min(fx, a, b),
 +                    _ => unreachable!(),
 +                };
 +                fx.bcx.ins().select(lt, a, b)
 +            });
 +        }
 +
 +        sym::simd_reduce_max => {
 +            intrinsic_args!(fx, args => (v); intrinsic);
 +
 +            if !v.layout().ty.is_simd() {
 +                report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
 +                return;
 +            }
 +
 +            simd_reduce(fx, v, None, ret, &|fx, ty, a, b| {
 +                let gt = match ty.kind() {
 +                    ty::Int(_) => fx.bcx.ins().icmp(IntCC::SignedGreaterThan, a, b),
 +                    ty::Uint(_) => fx.bcx.ins().icmp(IntCC::UnsignedGreaterThan, a, b),
 +                    ty::Float(_) => return crate::num::codegen_float_max(fx, a, b),
 +                    _ => unreachable!(),
 +                };
 +                fx.bcx.ins().select(gt, a, b)
 +            });
 +        }
 +
 +        sym::simd_select => {
 +            intrinsic_args!(fx, args => (m, a, b); intrinsic);
 +
 +            if !m.layout().ty.is_simd() {
 +                report_simd_type_validation_error(fx, intrinsic, span, m.layout().ty);
 +                return;
 +            }
 +            if !a.layout().ty.is_simd() {
 +                report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty);
 +                return;
 +            }
 +            assert_eq!(a.layout(), b.layout());
 +
 +            let (lane_count, lane_ty) = a.layout().ty.simd_size_and_type(fx.tcx);
 +            let lane_layout = fx.layout_of(lane_ty);
 +
 +            for lane in 0..lane_count {
 +                let m_lane = m.value_lane(fx, lane).load_scalar(fx);
 +                let a_lane = a.value_lane(fx, lane).load_scalar(fx);
 +                let b_lane = b.value_lane(fx, lane).load_scalar(fx);
 +
 +                let m_lane = fx.bcx.ins().icmp_imm(IntCC::Equal, m_lane, 0);
 +                let res_lane =
 +                    CValue::by_val(fx.bcx.ins().select(m_lane, b_lane, a_lane), lane_layout);
 +
 +                ret.place_lane(fx, lane).write_cvalue(fx, res_lane);
 +            }
 +        }
 +
 +        sym::simd_select_bitmask => {
 +            intrinsic_args!(fx, args => (m, a, b); intrinsic);
 +
 +            if !a.layout().ty.is_simd() {
 +                report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty);
 +                return;
 +            }
 +            assert_eq!(a.layout(), b.layout());
 +
 +            let (lane_count, lane_ty) = a.layout().ty.simd_size_and_type(fx.tcx);
 +            let lane_layout = fx.layout_of(lane_ty);
 +
 +            let m = m.load_scalar(fx);
 +
 +            for lane in 0..lane_count {
 +                let m_lane = fx.bcx.ins().ushr_imm(m, u64::from(lane) as i64);
 +                let m_lane = fx.bcx.ins().band_imm(m_lane, 1);
 +                let a_lane = a.value_lane(fx, lane).load_scalar(fx);
 +                let b_lane = b.value_lane(fx, lane).load_scalar(fx);
 +
 +                let m_lane = fx.bcx.ins().icmp_imm(IntCC::Equal, m_lane, 0);
 +                let res_lane =
 +                    CValue::by_val(fx.bcx.ins().select(m_lane, b_lane, a_lane), lane_layout);
 +
 +                ret.place_lane(fx, lane).write_cvalue(fx, res_lane);
 +            }
 +        }
 +
 +        sym::simd_bitmask => {
 +            intrinsic_args!(fx, args => (a); intrinsic);
 +
 +            let (lane_count, lane_ty) = a.layout().ty.simd_size_and_type(fx.tcx);
 +            let lane_clif_ty = fx.clif_type(lane_ty).unwrap();
 +
 +            // The `fn simd_bitmask(vector) -> unsigned integer` intrinsic takes a
 +            // vector mask and returns the most significant bit (MSB) of each lane in the form
 +            // of either:
 +            // * an unsigned integer
 +            // * an array of `u8`
 +            // If the vector has less than 8 lanes, a u8 is returned with zeroed trailing bits.
 +            //
 +            // The bit order of the result depends on the byte endianness, LSB-first for little
 +            // endian and MSB-first for big endian.
 +            let expected_int_bits = lane_count.max(8);
 +            let expected_bytes = expected_int_bits / 8 + ((expected_int_bits % 8 > 0) as u64);
 +
 +            match lane_ty.kind() {
 +                ty::Int(_) | ty::Uint(_) => {}
 +                _ => {
 +                    fx.tcx.sess.span_fatal(
 +                        span,
 +                        &format!(
 +                            "invalid monomorphization of `simd_bitmask` intrinsic: \
 +                            vector argument `{}`'s element type `{}`, expected integer element \
 +                            type",
 +                            a.layout().ty,
 +                            lane_ty
 +                        ),
 +                    );
 +                }
 +            }
 +
 +            let res_type =
 +                Type::int_with_byte_size(u16::try_from(expected_bytes).unwrap()).unwrap();
++            let mut res = type_zero_value(&mut fx.bcx, res_type);
 +
 +            let lanes = match fx.tcx.sess.target.endian {
 +                Endian::Big => Box::new(0..lane_count) as Box<dyn Iterator<Item = u64>>,
 +                Endian::Little => Box::new((0..lane_count).rev()) as Box<dyn Iterator<Item = u64>>,
 +            };
 +            for lane in lanes {
 +                let a_lane = a.value_lane(fx, lane).load_scalar(fx);
 +
 +                // extract sign bit of an int
 +                let a_lane_sign = fx.bcx.ins().ushr_imm(a_lane, i64::from(lane_clif_ty.bits() - 1));
 +
 +                // shift sign bit into result
 +                let a_lane_sign = clif_intcast(fx, a_lane_sign, res_type, false);
 +                res = fx.bcx.ins().ishl_imm(res, 1);
 +                res = fx.bcx.ins().bor(res, a_lane_sign);
 +            }
 +
 +            match ret.layout().ty.kind() {
 +                ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => {}
 +                ty::Array(elem, len)
 +                    if matches!(elem.kind(), ty::Uint(ty::UintTy::U8))
 +                        && len.try_eval_usize(fx.tcx, ty::ParamEnv::reveal_all())
 +                            == Some(expected_bytes) => {}
 +                _ => {
 +                    fx.tcx.sess.span_fatal(
 +                        span,
 +                        &format!(
 +                            "invalid monomorphization of `simd_bitmask` intrinsic: \
 +                            cannot return `{}`, expected `u{}` or `[u8; {}]`",
 +                            ret.layout().ty,
 +                            expected_int_bits,
 +                            expected_bytes
 +                        ),
 +                    );
 +                }
 +            }
 +
 +            let res = CValue::by_val(res, ret.layout());
 +            ret.write_cvalue(fx, res);
 +        }
 +
 +        sym::simd_saturating_add | sym::simd_saturating_sub => {
 +            intrinsic_args!(fx, args => (x, y); intrinsic);
 +
 +            let bin_op = match intrinsic {
 +                sym::simd_saturating_add => BinOp::Add,
 +                sym::simd_saturating_sub => BinOp::Sub,
 +                _ => unreachable!(),
 +            };
 +
 +            // FIXME use vector instructions when possible
 +            simd_pair_for_each_lane_typed(fx, x, y, ret, &|fx, x_lane, y_lane| {
 +                crate::num::codegen_saturating_int_binop(fx, bin_op, x_lane, y_lane)
 +            });
 +        }
 +
 +        // simd_arith_offset
 +        // simd_scatter
 +        // simd_gather
 +        _ => {
 +            fx.tcx.sess.span_fatal(span, &format!("Unknown SIMD intrinsic {}", intrinsic));
 +        }
 +    }
 +}
index f7434633ea442b40fa30f4a85370135e8c134017,0000000000000000000000000000000000000000..c10054e7f0d2c971711364aafdfe563558e3fd5d
mode 100644,000000..100644
--- /dev/null
@@@ -1,168 -1,0 +1,168 @@@
-                 CallConv::triple_default(m.isa().triple()),
 +use rustc_hir::LangItem;
 +use rustc_middle::ty::subst::GenericArg;
 +use rustc_middle::ty::AssocKind;
 +use rustc_session::config::{sigpipe, EntryFnType};
 +use rustc_span::symbol::Ident;
 +
 +use crate::prelude::*;
 +
 +/// Create the `main` function which will initialize the rust runtime and call
 +/// users main function.
 +pub(crate) fn maybe_create_entry_wrapper(
 +    tcx: TyCtxt<'_>,
 +    module: &mut impl Module,
 +    unwind_context: &mut UnwindContext,
 +    is_jit: bool,
 +    is_primary_cgu: bool,
 +) {
 +    let (main_def_id, (is_main_fn, sigpipe)) = match tcx.entry_fn(()) {
 +        Some((def_id, entry_ty)) => (
 +            def_id,
 +            match entry_ty {
 +                EntryFnType::Main { sigpipe } => (true, sigpipe),
 +                EntryFnType::Start => (false, sigpipe::DEFAULT),
 +            },
 +        ),
 +        None => return,
 +    };
 +
 +    if main_def_id.is_local() {
 +        let instance = Instance::mono(tcx, main_def_id).polymorphize(tcx);
 +        if !is_jit && module.get_name(&*tcx.symbol_name(instance).name).is_none() {
 +            return;
 +        }
 +    } else if !is_primary_cgu {
 +        return;
 +    }
 +
 +    create_entry_fn(tcx, module, unwind_context, main_def_id, is_jit, is_main_fn, sigpipe);
 +
 +    fn create_entry_fn(
 +        tcx: TyCtxt<'_>,
 +        m: &mut impl Module,
 +        unwind_context: &mut UnwindContext,
 +        rust_main_def_id: DefId,
 +        ignore_lang_start_wrapper: bool,
 +        is_main_fn: bool,
 +        sigpipe: u8,
 +    ) {
 +        let main_ret_ty = tcx.fn_sig(rust_main_def_id).output();
 +        // Given that `main()` has no arguments,
 +        // then its return type cannot have
 +        // late-bound regions, since late-bound
 +        // regions must appear in the argument
 +        // listing.
 +        let main_ret_ty = tcx.normalize_erasing_regions(
 +            ty::ParamEnv::reveal_all(),
 +            main_ret_ty.no_bound_vars().unwrap(),
 +        );
 +
 +        let cmain_sig = Signature {
 +            params: vec![
 +                AbiParam::new(m.target_config().pointer_type()),
 +                AbiParam::new(m.target_config().pointer_type()),
 +            ],
 +            returns: vec![AbiParam::new(m.target_config().pointer_type() /*isize*/)],
 +            call_conv: crate::conv_to_call_conv(
 +                tcx.sess.target.options.entry_abi,
-         let main_sig = get_function_sig(tcx, m.isa().triple(), instance);
++                m.target_config().default_call_conv,
 +            ),
 +        };
 +
 +        let entry_name = tcx.sess.target.options.entry_name.as_ref();
 +        let cmain_func_id = m.declare_function(entry_name, Linkage::Export, &cmain_sig).unwrap();
 +
 +        let instance = Instance::mono(tcx, rust_main_def_id).polymorphize(tcx);
 +
 +        let main_name = tcx.symbol_name(instance).name;
-                 let report_sig = get_function_sig(tcx, m.isa().triple(), report);
++        let main_sig = get_function_sig(tcx, m.target_config().default_call_conv, instance);
 +        let main_func_id = m.declare_function(main_name, Linkage::Import, &main_sig).unwrap();
 +
 +        let mut ctx = Context::new();
 +        ctx.func.signature = cmain_sig;
 +        {
 +            let mut func_ctx = FunctionBuilderContext::new();
 +            let mut bcx = FunctionBuilder::new(&mut ctx.func, &mut func_ctx);
 +
 +            let block = bcx.create_block();
 +            bcx.switch_to_block(block);
 +            let arg_argc = bcx.append_block_param(block, m.target_config().pointer_type());
 +            let arg_argv = bcx.append_block_param(block, m.target_config().pointer_type());
 +            let arg_sigpipe = bcx.ins().iconst(types::I8, sigpipe as i64);
 +
 +            let main_func_ref = m.declare_func_in_func(main_func_id, &mut bcx.func);
 +
 +            let result = if is_main_fn && ignore_lang_start_wrapper {
 +                // regular main fn, but ignoring #[lang = "start"] as we are running in the jit
 +                // FIXME set program arguments somehow
 +                let call_inst = bcx.ins().call(main_func_ref, &[]);
 +                let call_results = bcx.func.dfg.inst_results(call_inst).to_owned();
 +
 +                let termination_trait = tcx.require_lang_item(LangItem::Termination, None);
 +                let report = tcx
 +                    .associated_items(termination_trait)
 +                    .find_by_name_and_kind(
 +                        tcx,
 +                        Ident::from_str("report"),
 +                        AssocKind::Fn,
 +                        termination_trait,
 +                    )
 +                    .unwrap();
 +                let report = Instance::resolve(
 +                    tcx,
 +                    ParamEnv::reveal_all(),
 +                    report.def_id,
 +                    tcx.mk_substs([GenericArg::from(main_ret_ty)].iter()),
 +                )
 +                .unwrap()
 +                .unwrap()
 +                .polymorphize(tcx);
 +
 +                let report_name = tcx.symbol_name(report).name;
++                let report_sig = get_function_sig(tcx, m.target_config().default_call_conv, report);
 +                let report_func_id =
 +                    m.declare_function(report_name, Linkage::Import, &report_sig).unwrap();
 +                let report_func_ref = m.declare_func_in_func(report_func_id, &mut bcx.func);
 +
 +                // FIXME do proper abi handling instead of expecting the pass mode to be identical
 +                // for returns and arguments.
 +                let report_call_inst = bcx.ins().call(report_func_ref, &call_results);
 +                let res = bcx.func.dfg.inst_results(report_call_inst)[0];
 +                match m.target_config().pointer_type() {
 +                    types::I32 => res,
 +                    types::I64 => bcx.ins().sextend(types::I64, res),
 +                    _ => unimplemented!("16bit systems are not yet supported"),
 +                }
 +            } else if is_main_fn {
 +                let start_def_id = tcx.require_lang_item(LangItem::Start, None);
 +                let start_instance = Instance::resolve(
 +                    tcx,
 +                    ParamEnv::reveal_all(),
 +                    start_def_id,
 +                    tcx.intern_substs(&[main_ret_ty.into()]),
 +                )
 +                .unwrap()
 +                .unwrap()
 +                .polymorphize(tcx);
 +                let start_func_id = import_function(tcx, m, start_instance);
 +
 +                let main_val = bcx.ins().func_addr(m.target_config().pointer_type(), main_func_ref);
 +
 +                let func_ref = m.declare_func_in_func(start_func_id, &mut bcx.func);
 +                let call_inst =
 +                    bcx.ins().call(func_ref, &[main_val, arg_argc, arg_argv, arg_sigpipe]);
 +                bcx.inst_results(call_inst)[0]
 +            } else {
 +                // using user-defined start fn
 +                let call_inst = bcx.ins().call(main_func_ref, &[arg_argc, arg_argv]);
 +                bcx.inst_results(call_inst)[0]
 +            };
 +
 +            bcx.ins().return_(&[result]);
 +            bcx.seal_all_blocks();
 +            bcx.finalize();
 +        }
 +        m.define_function(cmain_func_id, &mut ctx).unwrap();
 +        unwind_context.add_function(cmain_func_id, &ctx, m.isa());
 +    }
 +}
index ecbab408ded972889b6cbca8840daee8c2e837f4,0000000000000000000000000000000000000000..afacbec644582195440bcb8e3e9c58d6d99b3c74
mode 100644,000000..100644
--- /dev/null
@@@ -1,463 -1,0 +1,459 @@@
-     let val = fx.bcx.ins().bint(types::I8, val);
 +//! Various operations on integer and floating-point numbers
 +
 +use crate::prelude::*;
 +
 +pub(crate) fn bin_op_to_intcc(bin_op: BinOp, signed: bool) -> Option<IntCC> {
 +    use BinOp::*;
 +    use IntCC::*;
 +    Some(match bin_op {
 +        Eq => Equal,
 +        Lt => {
 +            if signed {
 +                SignedLessThan
 +            } else {
 +                UnsignedLessThan
 +            }
 +        }
 +        Le => {
 +            if signed {
 +                SignedLessThanOrEqual
 +            } else {
 +                UnsignedLessThanOrEqual
 +            }
 +        }
 +        Ne => NotEqual,
 +        Ge => {
 +            if signed {
 +                SignedGreaterThanOrEqual
 +            } else {
 +                UnsignedGreaterThanOrEqual
 +            }
 +        }
 +        Gt => {
 +            if signed {
 +                SignedGreaterThan
 +            } else {
 +                UnsignedGreaterThan
 +            }
 +        }
 +        _ => return None,
 +    })
 +}
 +
 +fn codegen_compare_bin_op<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    bin_op: BinOp,
 +    signed: bool,
 +    lhs: Value,
 +    rhs: Value,
 +) -> CValue<'tcx> {
 +    let intcc = crate::num::bin_op_to_intcc(bin_op, signed).unwrap();
 +    let val = fx.bcx.ins().icmp(intcc, lhs, rhs);
-     let has_overflow = fx.bcx.ins().bint(types::I8, has_overflow);
 +    CValue::by_val(val, fx.layout_of(fx.tcx.types.bool))
 +}
 +
 +pub(crate) fn codegen_binop<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    bin_op: BinOp,
 +    in_lhs: CValue<'tcx>,
 +    in_rhs: CValue<'tcx>,
 +) -> CValue<'tcx> {
 +    match bin_op {
 +        BinOp::Eq | BinOp::Lt | BinOp::Le | BinOp::Ne | BinOp::Ge | BinOp::Gt => {
 +            match in_lhs.layout().ty.kind() {
 +                ty::Bool | ty::Uint(_) | ty::Int(_) | ty::Char => {
 +                    let signed = type_sign(in_lhs.layout().ty);
 +                    let lhs = in_lhs.load_scalar(fx);
 +                    let rhs = in_rhs.load_scalar(fx);
 +
 +                    return codegen_compare_bin_op(fx, bin_op, signed, lhs, rhs);
 +                }
 +                _ => {}
 +            }
 +        }
 +        _ => {}
 +    }
 +
 +    match in_lhs.layout().ty.kind() {
 +        ty::Bool => crate::num::codegen_bool_binop(fx, bin_op, in_lhs, in_rhs),
 +        ty::Uint(_) | ty::Int(_) => crate::num::codegen_int_binop(fx, bin_op, in_lhs, in_rhs),
 +        ty::Float(_) => crate::num::codegen_float_binop(fx, bin_op, in_lhs, in_rhs),
 +        ty::RawPtr(..) | ty::FnPtr(..) => crate::num::codegen_ptr_binop(fx, bin_op, in_lhs, in_rhs),
 +        _ => unreachable!("{:?}({:?}, {:?})", bin_op, in_lhs.layout().ty, in_rhs.layout().ty),
 +    }
 +}
 +
 +pub(crate) fn codegen_bool_binop<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    bin_op: BinOp,
 +    in_lhs: CValue<'tcx>,
 +    in_rhs: CValue<'tcx>,
 +) -> CValue<'tcx> {
 +    let lhs = in_lhs.load_scalar(fx);
 +    let rhs = in_rhs.load_scalar(fx);
 +
 +    let b = fx.bcx.ins();
 +    let res = match bin_op {
 +        BinOp::BitXor => b.bxor(lhs, rhs),
 +        BinOp::BitAnd => b.band(lhs, rhs),
 +        BinOp::BitOr => b.bor(lhs, rhs),
 +        // Compare binops handles by `codegen_binop`.
 +        _ => unreachable!("{:?}({:?}, {:?})", bin_op, in_lhs, in_rhs),
 +    };
 +
 +    CValue::by_val(res, fx.layout_of(fx.tcx.types.bool))
 +}
 +
 +pub(crate) fn codegen_int_binop<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    bin_op: BinOp,
 +    in_lhs: CValue<'tcx>,
 +    in_rhs: CValue<'tcx>,
 +) -> CValue<'tcx> {
 +    if bin_op != BinOp::Shl && bin_op != BinOp::Shr {
 +        assert_eq!(
 +            in_lhs.layout().ty,
 +            in_rhs.layout().ty,
 +            "int binop requires lhs and rhs of same type"
 +        );
 +    }
 +
 +    if let Some(res) = crate::codegen_i128::maybe_codegen(fx, bin_op, false, in_lhs, in_rhs) {
 +        return res;
 +    }
 +
 +    let signed = type_sign(in_lhs.layout().ty);
 +
 +    let lhs = in_lhs.load_scalar(fx);
 +    let rhs = in_rhs.load_scalar(fx);
 +
 +    let b = fx.bcx.ins();
 +    let val = match bin_op {
 +        BinOp::Add => b.iadd(lhs, rhs),
 +        BinOp::Sub => b.isub(lhs, rhs),
 +        BinOp::Mul => b.imul(lhs, rhs),
 +        BinOp::Div => {
 +            if signed {
 +                b.sdiv(lhs, rhs)
 +            } else {
 +                b.udiv(lhs, rhs)
 +            }
 +        }
 +        BinOp::Rem => {
 +            if signed {
 +                b.srem(lhs, rhs)
 +            } else {
 +                b.urem(lhs, rhs)
 +            }
 +        }
 +        BinOp::BitXor => b.bxor(lhs, rhs),
 +        BinOp::BitAnd => b.band(lhs, rhs),
 +        BinOp::BitOr => b.bor(lhs, rhs),
 +        BinOp::Shl => b.ishl(lhs, rhs),
 +        BinOp::Shr => {
 +            if signed {
 +                b.sshr(lhs, rhs)
 +            } else {
 +                b.ushr(lhs, rhs)
 +            }
 +        }
 +        // Compare binops handles by `codegen_binop`.
 +        _ => unreachable!("{:?}({:?}, {:?})", bin_op, in_lhs.layout().ty, in_rhs.layout().ty),
 +    };
 +
 +    CValue::by_val(val, in_lhs.layout())
 +}
 +
 +pub(crate) fn codegen_checked_int_binop<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    bin_op: BinOp,
 +    in_lhs: CValue<'tcx>,
 +    in_rhs: CValue<'tcx>,
 +) -> CValue<'tcx> {
 +    if bin_op != BinOp::Shl && bin_op != BinOp::Shr {
 +        assert_eq!(
 +            in_lhs.layout().ty,
 +            in_rhs.layout().ty,
 +            "checked int binop requires lhs and rhs of same type"
 +        );
 +    }
 +
 +    let lhs = in_lhs.load_scalar(fx);
 +    let rhs = in_rhs.load_scalar(fx);
 +
 +    if let Some(res) = crate::codegen_i128::maybe_codegen(fx, bin_op, true, in_lhs, in_rhs) {
 +        return res;
 +    }
 +
 +    let signed = type_sign(in_lhs.layout().ty);
 +
 +    let (res, has_overflow) = match bin_op {
 +        BinOp::Add => {
 +            /*let (val, c_out) = fx.bcx.ins().iadd_cout(lhs, rhs);
 +            (val, c_out)*/
 +            // FIXME(CraneStation/cranelift#849) legalize iadd_cout for i8 and i16
 +            let val = fx.bcx.ins().iadd(lhs, rhs);
 +            let has_overflow = if !signed {
 +                fx.bcx.ins().icmp(IntCC::UnsignedLessThan, val, lhs)
 +            } else {
 +                let rhs_is_negative = fx.bcx.ins().icmp_imm(IntCC::SignedLessThan, rhs, 0);
 +                let slt = fx.bcx.ins().icmp(IntCC::SignedLessThan, val, lhs);
 +                fx.bcx.ins().bxor(rhs_is_negative, slt)
 +            };
 +            (val, has_overflow)
 +        }
 +        BinOp::Sub => {
 +            /*let (val, b_out) = fx.bcx.ins().isub_bout(lhs, rhs);
 +            (val, b_out)*/
 +            // FIXME(CraneStation/cranelift#849) legalize isub_bout for i8 and i16
 +            let val = fx.bcx.ins().isub(lhs, rhs);
 +            let has_overflow = if !signed {
 +                fx.bcx.ins().icmp(IntCC::UnsignedGreaterThan, val, lhs)
 +            } else {
 +                let rhs_is_negative = fx.bcx.ins().icmp_imm(IntCC::SignedLessThan, rhs, 0);
 +                let sgt = fx.bcx.ins().icmp(IntCC::SignedGreaterThan, val, lhs);
 +                fx.bcx.ins().bxor(rhs_is_negative, sgt)
 +            };
 +            (val, has_overflow)
 +        }
 +        BinOp::Mul => {
 +            let ty = fx.bcx.func.dfg.value_type(lhs);
 +            match ty {
 +                types::I8 | types::I16 | types::I32 if !signed => {
 +                    let lhs = fx.bcx.ins().uextend(ty.double_width().unwrap(), lhs);
 +                    let rhs = fx.bcx.ins().uextend(ty.double_width().unwrap(), rhs);
 +                    let val = fx.bcx.ins().imul(lhs, rhs);
 +                    let has_overflow = fx.bcx.ins().icmp_imm(
 +                        IntCC::UnsignedGreaterThan,
 +                        val,
 +                        (1 << ty.bits()) - 1,
 +                    );
 +                    let val = fx.bcx.ins().ireduce(ty, val);
 +                    (val, has_overflow)
 +                }
 +                types::I8 | types::I16 | types::I32 if signed => {
 +                    let lhs = fx.bcx.ins().sextend(ty.double_width().unwrap(), lhs);
 +                    let rhs = fx.bcx.ins().sextend(ty.double_width().unwrap(), rhs);
 +                    let val = fx.bcx.ins().imul(lhs, rhs);
 +                    let has_underflow =
 +                        fx.bcx.ins().icmp_imm(IntCC::SignedLessThan, val, -(1 << (ty.bits() - 1)));
 +                    let has_overflow = fx.bcx.ins().icmp_imm(
 +                        IntCC::SignedGreaterThan,
 +                        val,
 +                        (1 << (ty.bits() - 1)) - 1,
 +                    );
 +                    let val = fx.bcx.ins().ireduce(ty, val);
 +                    (val, fx.bcx.ins().bor(has_underflow, has_overflow))
 +                }
 +                types::I64 => {
 +                    let val = fx.bcx.ins().imul(lhs, rhs);
 +                    let has_overflow = if !signed {
 +                        let val_hi = fx.bcx.ins().umulhi(lhs, rhs);
 +                        fx.bcx.ins().icmp_imm(IntCC::NotEqual, val_hi, 0)
 +                    } else {
 +                        // Based on LLVM's instruction sequence for compiling
 +                        // a.checked_mul(b).is_some() to riscv64gc:
 +                        // mulh    a2, a0, a1
 +                        // mul     a0, a0, a1
 +                        // srai    a0, a0, 63
 +                        // xor     a0, a0, a2
 +                        // snez    a0, a0
 +                        let val_hi = fx.bcx.ins().smulhi(lhs, rhs);
 +                        let val_sign = fx.bcx.ins().sshr_imm(val, i64::from(ty.bits() - 1));
 +                        let xor = fx.bcx.ins().bxor(val_hi, val_sign);
 +                        fx.bcx.ins().icmp_imm(IntCC::NotEqual, xor, 0)
 +                    };
 +                    (val, has_overflow)
 +                }
 +                types::I128 => {
 +                    unreachable!("i128 should have been handled by codegen_i128::maybe_codegen")
 +                }
 +                _ => unreachable!("invalid non-integer type {}", ty),
 +            }
 +        }
 +        BinOp::Shl => {
 +            let val = fx.bcx.ins().ishl(lhs, rhs);
 +            let ty = fx.bcx.func.dfg.value_type(val);
 +            let max_shift = i64::from(ty.bits()) - 1;
 +            let has_overflow = fx.bcx.ins().icmp_imm(IntCC::UnsignedGreaterThan, rhs, max_shift);
 +            (val, has_overflow)
 +        }
 +        BinOp::Shr => {
 +            let val =
 +                if !signed { fx.bcx.ins().ushr(lhs, rhs) } else { fx.bcx.ins().sshr(lhs, rhs) };
 +            let ty = fx.bcx.func.dfg.value_type(val);
 +            let max_shift = i64::from(ty.bits()) - 1;
 +            let has_overflow = fx.bcx.ins().icmp_imm(IntCC::UnsignedGreaterThan, rhs, max_shift);
 +            (val, has_overflow)
 +        }
 +        _ => bug!("binop {:?} on checked int/uint lhs: {:?} rhs: {:?}", bin_op, in_lhs, in_rhs),
 +    };
 +
-             let val = fx.bcx.ins().bint(types::I8, val);
 +    let out_layout = fx.layout_of(fx.tcx.mk_tup([in_lhs.layout().ty, fx.tcx.types.bool].iter()));
 +    CValue::by_val_pair(res, has_overflow, out_layout)
 +}
 +
 +pub(crate) fn codegen_saturating_int_binop<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    bin_op: BinOp,
 +    lhs: CValue<'tcx>,
 +    rhs: CValue<'tcx>,
 +) -> CValue<'tcx> {
 +    assert_eq!(lhs.layout().ty, rhs.layout().ty);
 +
 +    let signed = type_sign(lhs.layout().ty);
 +    let clif_ty = fx.clif_type(lhs.layout().ty).unwrap();
 +    let (min, max) = type_min_max_value(&mut fx.bcx, clif_ty, signed);
 +
 +    let checked_res = crate::num::codegen_checked_int_binop(fx, bin_op, lhs, rhs);
 +    let (val, has_overflow) = checked_res.load_scalar_pair(fx);
 +
 +    let val = match (bin_op, signed) {
 +        (BinOp::Add, false) => fx.bcx.ins().select(has_overflow, max, val),
 +        (BinOp::Sub, false) => fx.bcx.ins().select(has_overflow, min, val),
 +        (BinOp::Add, true) => {
 +            let rhs = rhs.load_scalar(fx);
 +            let rhs_ge_zero = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
 +            let sat_val = fx.bcx.ins().select(rhs_ge_zero, max, min);
 +            fx.bcx.ins().select(has_overflow, sat_val, val)
 +        }
 +        (BinOp::Sub, true) => {
 +            let rhs = rhs.load_scalar(fx);
 +            let rhs_ge_zero = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
 +            let sat_val = fx.bcx.ins().select(rhs_ge_zero, min, max);
 +            fx.bcx.ins().select(has_overflow, sat_val, val)
 +        }
 +        _ => unreachable!(),
 +    };
 +
 +    CValue::by_val(val, lhs.layout())
 +}
 +
 +pub(crate) fn codegen_float_binop<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    bin_op: BinOp,
 +    in_lhs: CValue<'tcx>,
 +    in_rhs: CValue<'tcx>,
 +) -> CValue<'tcx> {
 +    assert_eq!(in_lhs.layout().ty, in_rhs.layout().ty);
 +
 +    let lhs = in_lhs.load_scalar(fx);
 +    let rhs = in_rhs.load_scalar(fx);
 +
 +    let b = fx.bcx.ins();
 +    let res = match bin_op {
 +        BinOp::Add => b.fadd(lhs, rhs),
 +        BinOp::Sub => b.fsub(lhs, rhs),
 +        BinOp::Mul => b.fmul(lhs, rhs),
 +        BinOp::Div => b.fdiv(lhs, rhs),
 +        BinOp::Rem => {
 +            let name = match in_lhs.layout().ty.kind() {
 +                ty::Float(FloatTy::F32) => "fmodf",
 +                ty::Float(FloatTy::F64) => "fmod",
 +                _ => bug!(),
 +            };
 +            return fx.easy_call(name, &[in_lhs, in_rhs], in_lhs.layout().ty);
 +        }
 +        BinOp::Eq | BinOp::Lt | BinOp::Le | BinOp::Ne | BinOp::Ge | BinOp::Gt => {
 +            let fltcc = match bin_op {
 +                BinOp::Eq => FloatCC::Equal,
 +                BinOp::Lt => FloatCC::LessThan,
 +                BinOp::Le => FloatCC::LessThanOrEqual,
 +                BinOp::Ne => FloatCC::NotEqual,
 +                BinOp::Ge => FloatCC::GreaterThanOrEqual,
 +                BinOp::Gt => FloatCC::GreaterThan,
 +                _ => unreachable!(),
 +            };
 +            let val = fx.bcx.ins().fcmp(fltcc, lhs, rhs);
-         CValue::by_val(fx.bcx.ins().bint(types::I8, res), fx.layout_of(fx.tcx.types.bool))
 +            return CValue::by_val(val, fx.layout_of(fx.tcx.types.bool));
 +        }
 +        _ => unreachable!("{:?}({:?}, {:?})", bin_op, in_lhs, in_rhs),
 +    };
 +
 +    CValue::by_val(res, in_lhs.layout())
 +}
 +
 +pub(crate) fn codegen_ptr_binop<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    bin_op: BinOp,
 +    in_lhs: CValue<'tcx>,
 +    in_rhs: CValue<'tcx>,
 +) -> CValue<'tcx> {
 +    let is_thin_ptr = in_lhs
 +        .layout()
 +        .ty
 +        .builtin_deref(true)
 +        .map(|TypeAndMut { ty, mutbl: _ }| !has_ptr_meta(fx.tcx, ty))
 +        .unwrap_or(true);
 +
 +    if is_thin_ptr {
 +        match bin_op {
 +            BinOp::Eq | BinOp::Lt | BinOp::Le | BinOp::Ne | BinOp::Ge | BinOp::Gt => {
 +                let lhs = in_lhs.load_scalar(fx);
 +                let rhs = in_rhs.load_scalar(fx);
 +
 +                codegen_compare_bin_op(fx, bin_op, false, lhs, rhs)
 +            }
 +            BinOp::Offset => {
 +                let pointee_ty = in_lhs.layout().ty.builtin_deref(true).unwrap().ty;
 +                let (base, offset) = (in_lhs, in_rhs.load_scalar(fx));
 +                let pointee_size = fx.layout_of(pointee_ty).size.bytes();
 +                let ptr_diff = fx.bcx.ins().imul_imm(offset, pointee_size as i64);
 +                let base_val = base.load_scalar(fx);
 +                let res = fx.bcx.ins().iadd(base_val, ptr_diff);
 +                CValue::by_val(res, base.layout())
 +            }
 +            _ => unreachable!("{:?}({:?}, {:?})", bin_op, in_lhs, in_rhs),
 +        }
 +    } else {
 +        let (lhs_ptr, lhs_extra) = in_lhs.load_scalar_pair(fx);
 +        let (rhs_ptr, rhs_extra) = in_rhs.load_scalar_pair(fx);
 +
 +        let res = match bin_op {
 +            BinOp::Eq => {
 +                let ptr_eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_ptr, rhs_ptr);
 +                let extra_eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_extra, rhs_extra);
 +                fx.bcx.ins().band(ptr_eq, extra_eq)
 +            }
 +            BinOp::Ne => {
 +                let ptr_ne = fx.bcx.ins().icmp(IntCC::NotEqual, lhs_ptr, rhs_ptr);
 +                let extra_ne = fx.bcx.ins().icmp(IntCC::NotEqual, lhs_extra, rhs_extra);
 +                fx.bcx.ins().bor(ptr_ne, extra_ne)
 +            }
 +            BinOp::Lt | BinOp::Le | BinOp::Ge | BinOp::Gt => {
 +                let ptr_eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_ptr, rhs_ptr);
 +
 +                let ptr_cmp =
 +                    fx.bcx.ins().icmp(bin_op_to_intcc(bin_op, false).unwrap(), lhs_ptr, rhs_ptr);
 +                let extra_cmp = fx.bcx.ins().icmp(
 +                    bin_op_to_intcc(bin_op, false).unwrap(),
 +                    lhs_extra,
 +                    rhs_extra,
 +                );
 +
 +                fx.bcx.ins().select(ptr_eq, extra_cmp, ptr_cmp)
 +            }
 +            _ => panic!("bin_op {:?} on ptr", bin_op),
 +        };
 +
++        CValue::by_val(res, fx.layout_of(fx.tcx.types.bool))
 +    }
 +}
 +
 +// In Rust floating point min and max don't propagate NaN. In Cranelift they do however.
 +// For this reason it is necessary to use `a.is_nan() ? b : (a >= b ? b : a)` for `minnumf*`
 +// and `a.is_nan() ? b : (a <= b ? b : a)` for `maxnumf*`. NaN checks are done by comparing
 +// a float against itself. Only in case of NaN is it not equal to itself.
 +pub(crate) fn codegen_float_min(fx: &mut FunctionCx<'_, '_, '_>, a: Value, b: Value) -> Value {
 +    let a_is_nan = fx.bcx.ins().fcmp(FloatCC::NotEqual, a, a);
 +    let a_ge_b = fx.bcx.ins().fcmp(FloatCC::GreaterThanOrEqual, a, b);
 +    let temp = fx.bcx.ins().select(a_ge_b, b, a);
 +    fx.bcx.ins().select(a_is_nan, b, temp)
 +}
 +
 +pub(crate) fn codegen_float_max(fx: &mut FunctionCx<'_, '_, '_>, a: Value, b: Value) -> Value {
 +    let a_is_nan = fx.bcx.ins().fcmp(FloatCC::NotEqual, a, a);
 +    let a_le_b = fx.bcx.ins().fcmp(FloatCC::LessThanOrEqual, a, b);
 +    let temp = fx.bcx.ins().select(a_le_b, b, a);
 +    fx.bcx.ins().select(a_is_nan, b, temp)
 +}
index d637b4d89293cea0a0a855ebb97378b111c3789b,0000000000000000000000000000000000000000..7f45bbd8f28136a43b8271ba5f3e2755d19d522d
mode 100644,000000..100644
--- /dev/null
@@@ -1,67 -1,0 +1,47 @@@
- /// If the given value was produced by a `bint` instruction, return it's input, otherwise return the
- /// given value.
- pub(crate) fn maybe_unwrap_bint(bcx: &mut FunctionBuilder<'_>, arg: Value) -> Value {
-     if let ValueDef::Result(arg_inst, 0) = bcx.func.dfg.value_def(arg) {
-         match bcx.func.dfg[arg_inst] {
-             InstructionData::Unary { opcode: Opcode::Bint, arg } => arg,
-             _ => arg,
-         }
-     } else {
-         arg
-     }
- }
 +//! Peephole optimizations that can be performed while creating clif ir.
 +
 +use cranelift_codegen::ir::{condcodes::IntCC, InstructionData, Opcode, Value, ValueDef};
 +use cranelift_frontend::FunctionBuilder;
 +
-         InstructionData::UnaryBool { opcode: Opcode::Bconst, imm } => {
-             if test_zero {
-                 Some(!imm)
-             } else {
-                 Some(imm)
-             }
-         }
 +/// If the given value was produced by the lowering of `Rvalue::Not` return the input and true,
 +/// otherwise return the given value and false.
 +pub(crate) fn maybe_unwrap_bool_not(bcx: &mut FunctionBuilder<'_>, arg: Value) -> (Value, bool) {
 +    if let ValueDef::Result(arg_inst, 0) = bcx.func.dfg.value_def(arg) {
 +        match bcx.func.dfg[arg_inst] {
 +            // This is the lowering of `Rvalue::Not`
 +            InstructionData::IntCompareImm {
 +                opcode: Opcode::IcmpImm,
 +                cond: IntCC::Equal,
 +                arg,
 +                imm,
 +            } if imm.bits() == 0 => (arg, true),
 +            _ => (arg, false),
 +        }
 +    } else {
 +        (arg, false)
 +    }
 +}
 +
 +/// Returns whether the branch is statically known to be taken or `None` if it isn't statically known.
 +pub(crate) fn maybe_known_branch_taken(
 +    bcx: &FunctionBuilder<'_>,
 +    arg: Value,
 +    test_zero: bool,
 +) -> Option<bool> {
 +    let arg_inst = if let ValueDef::Result(arg_inst, 0) = bcx.func.dfg.value_def(arg) {
 +        arg_inst
 +    } else {
 +        return None;
 +    };
 +
 +    match bcx.func.dfg[arg_inst] {
 +        InstructionData::UnaryImm { opcode: Opcode::Iconst, imm } => {
 +            if test_zero {
 +                Some(imm.bits() == 0)
 +            } else {
 +                Some(imm.bits() != 0)
 +            }
 +        }
 +        _ => None,
 +    }
 +}
index 34746ff6b6645c11bdda409d0af1f627908e65a0,0000000000000000000000000000000000000000..fe8af21ac6de567cff94a34ec13e625f8876fe4a
mode 100644,000000..100644
--- /dev/null
@@@ -1,940 -1,0 +1,941 @@@
-         let var = Variable::with_u32(fx.next_ssa_var);
 +//! Definition of [`CValue`] and [`CPlace`]
 +
 +use crate::prelude::*;
 +
 +use cranelift_codegen::ir::immediates::Offset32;
 +
 +fn codegen_field<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    base: Pointer,
 +    extra: Option<Value>,
 +    layout: TyAndLayout<'tcx>,
 +    field: mir::Field,
 +) -> (Pointer, TyAndLayout<'tcx>) {
 +    let field_offset = layout.fields.offset(field.index());
 +    let field_layout = layout.field(&*fx, field.index());
 +
 +    let simple = |fx: &mut FunctionCx<'_, '_, '_>| {
 +        (base.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap()), field_layout)
 +    };
 +
 +    if let Some(extra) = extra {
 +        if field_layout.is_sized() {
 +            return simple(fx);
 +        }
 +        match field_layout.ty.kind() {
 +            ty::Slice(..) | ty::Str | ty::Foreign(..) => simple(fx),
 +            ty::Adt(def, _) if def.repr().packed() => {
 +                assert_eq!(layout.align.abi.bytes(), 1);
 +                simple(fx)
 +            }
 +            _ => {
 +                // We have to align the offset for DST's
 +                let unaligned_offset = field_offset.bytes();
 +                let (_, unsized_align) =
 +                    crate::unsize::size_and_align_of_dst(fx, field_layout, extra);
 +
 +                let one = fx.bcx.ins().iconst(fx.pointer_type, 1);
 +                let align_sub_1 = fx.bcx.ins().isub(unsized_align, one);
 +                let and_lhs = fx.bcx.ins().iadd_imm(align_sub_1, unaligned_offset as i64);
 +                let zero = fx.bcx.ins().iconst(fx.pointer_type, 0);
 +                let and_rhs = fx.bcx.ins().isub(zero, unsized_align);
 +                let offset = fx.bcx.ins().band(and_lhs, and_rhs);
 +
 +                (base.offset_value(fx, offset), field_layout)
 +            }
 +        }
 +    } else {
 +        simple(fx)
 +    }
 +}
 +
 +fn scalar_pair_calculate_b_offset(tcx: TyCtxt<'_>, a_scalar: Scalar, b_scalar: Scalar) -> Offset32 {
 +    let b_offset = a_scalar.size(&tcx).align_to(b_scalar.align(&tcx).abi);
 +    Offset32::new(b_offset.bytes().try_into().unwrap())
 +}
 +
 +/// A read-only value
 +#[derive(Debug, Copy, Clone)]
 +pub(crate) struct CValue<'tcx>(CValueInner, TyAndLayout<'tcx>);
 +
 +#[derive(Debug, Copy, Clone)]
 +enum CValueInner {
 +    ByRef(Pointer, Option<Value>),
 +    ByVal(Value),
 +    ByValPair(Value, Value),
 +}
 +
 +impl<'tcx> CValue<'tcx> {
 +    pub(crate) fn by_ref(ptr: Pointer, layout: TyAndLayout<'tcx>) -> CValue<'tcx> {
 +        CValue(CValueInner::ByRef(ptr, None), layout)
 +    }
 +
 +    pub(crate) fn by_ref_unsized(
 +        ptr: Pointer,
 +        meta: Value,
 +        layout: TyAndLayout<'tcx>,
 +    ) -> CValue<'tcx> {
 +        CValue(CValueInner::ByRef(ptr, Some(meta)), layout)
 +    }
 +
 +    pub(crate) fn by_val(value: Value, layout: TyAndLayout<'tcx>) -> CValue<'tcx> {
 +        CValue(CValueInner::ByVal(value), layout)
 +    }
 +
 +    pub(crate) fn by_val_pair(
 +        value: Value,
 +        extra: Value,
 +        layout: TyAndLayout<'tcx>,
 +    ) -> CValue<'tcx> {
 +        CValue(CValueInner::ByValPair(value, extra), layout)
 +    }
 +
 +    pub(crate) fn layout(&self) -> TyAndLayout<'tcx> {
 +        self.1
 +    }
 +
 +    // FIXME remove
 +    pub(crate) fn force_stack(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> (Pointer, Option<Value>) {
 +        let layout = self.1;
 +        match self.0 {
 +            CValueInner::ByRef(ptr, meta) => (ptr, meta),
 +            CValueInner::ByVal(_) | CValueInner::ByValPair(_, _) => {
 +                let cplace = CPlace::new_stack_slot(fx, layout);
 +                cplace.write_cvalue(fx, self);
 +                (cplace.to_ptr(), None)
 +            }
 +        }
 +    }
 +
 +    // FIXME remove
 +    /// Forces the data value of a dyn* value to the stack and returns a pointer to it as well as the
 +    /// vtable pointer.
 +    pub(crate) fn dyn_star_force_data_on_stack(
 +        self,
 +        fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    ) -> (Value, Value) {
 +        assert!(self.1.ty.is_dyn_star());
 +
 +        match self.0 {
 +            CValueInner::ByRef(ptr, None) => {
 +                let (a_scalar, b_scalar) = match self.1.abi {
 +                    Abi::ScalarPair(a, b) => (a, b),
 +                    _ => unreachable!("dyn_star_force_data_on_stack({:?})", self),
 +                };
 +                let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
 +                let clif_ty2 = scalar_to_clif_type(fx.tcx, b_scalar);
 +                let mut flags = MemFlags::new();
 +                flags.set_notrap();
 +                let vtable = ptr.offset(fx, b_offset).load(fx, clif_ty2, flags);
 +                (ptr.get_addr(fx), vtable)
 +            }
 +            CValueInner::ByValPair(data, vtable) => {
 +                let stack_slot = fx.bcx.create_sized_stack_slot(StackSlotData {
 +                    kind: StackSlotKind::ExplicitSlot,
 +                    // FIXME Don't force the size to a multiple of 16 bytes once Cranelift gets a way to
 +                    // specify stack slot alignment.
 +                    size: (u32::try_from(fx.target_config.pointer_type().bytes()).unwrap() + 15)
 +                        / 16
 +                        * 16,
 +                });
 +                let data_ptr = Pointer::stack_slot(stack_slot);
 +                let mut flags = MemFlags::new();
 +                flags.set_notrap();
 +                data_ptr.store(fx, data, flags);
 +
 +                (data_ptr.get_addr(fx), vtable)
 +            }
 +            CValueInner::ByRef(_, Some(_)) | CValueInner::ByVal(_) => {
 +                unreachable!("dyn_star_force_data_on_stack({:?})", self)
 +            }
 +        }
 +    }
 +
 +    pub(crate) fn try_to_ptr(self) -> Option<(Pointer, Option<Value>)> {
 +        match self.0 {
 +            CValueInner::ByRef(ptr, meta) => Some((ptr, meta)),
 +            CValueInner::ByVal(_) | CValueInner::ByValPair(_, _) => None,
 +        }
 +    }
 +
 +    /// Load a value with layout.abi of scalar
 +    pub(crate) fn load_scalar(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> Value {
 +        let layout = self.1;
 +        match self.0 {
 +            CValueInner::ByRef(ptr, None) => {
 +                let clif_ty = match layout.abi {
 +                    Abi::Scalar(scalar) => scalar_to_clif_type(fx.tcx, scalar),
 +                    Abi::Vector { element, count } => scalar_to_clif_type(fx.tcx, element)
 +                        .by(u32::try_from(count).unwrap())
 +                        .unwrap(),
 +                    _ => unreachable!("{:?}", layout.ty),
 +                };
 +                let mut flags = MemFlags::new();
 +                flags.set_notrap();
 +                ptr.load(fx, clif_ty, flags)
 +            }
 +            CValueInner::ByVal(value) => value,
 +            CValueInner::ByRef(_, Some(_)) => bug!("load_scalar for unsized value not allowed"),
 +            CValueInner::ByValPair(_, _) => bug!("Please use load_scalar_pair for ByValPair"),
 +        }
 +    }
 +
 +    /// Load a value pair with layout.abi of scalar pair
 +    pub(crate) fn load_scalar_pair(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> (Value, Value) {
 +        let layout = self.1;
 +        match self.0 {
 +            CValueInner::ByRef(ptr, None) => {
 +                let (a_scalar, b_scalar) = match layout.abi {
 +                    Abi::ScalarPair(a, b) => (a, b),
 +                    _ => unreachable!("load_scalar_pair({:?})", self),
 +                };
 +                let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
 +                let clif_ty1 = scalar_to_clif_type(fx.tcx, a_scalar);
 +                let clif_ty2 = scalar_to_clif_type(fx.tcx, b_scalar);
 +                let mut flags = MemFlags::new();
 +                flags.set_notrap();
 +                let val1 = ptr.load(fx, clif_ty1, flags);
 +                let val2 = ptr.offset(fx, b_offset).load(fx, clif_ty2, flags);
 +                (val1, val2)
 +            }
 +            CValueInner::ByRef(_, Some(_)) => {
 +                bug!("load_scalar_pair for unsized value not allowed")
 +            }
 +            CValueInner::ByVal(_) => bug!("Please use load_scalar for ByVal"),
 +            CValueInner::ByValPair(val1, val2) => (val1, val2),
 +        }
 +    }
 +
 +    pub(crate) fn value_field(
 +        self,
 +        fx: &mut FunctionCx<'_, '_, 'tcx>,
 +        field: mir::Field,
 +    ) -> CValue<'tcx> {
 +        let layout = self.1;
 +        match self.0 {
 +            CValueInner::ByVal(val) => match layout.abi {
 +                Abi::Vector { element: _, count } => {
 +                    let count = u8::try_from(count).expect("SIMD type with more than 255 lanes???");
 +                    let field = u8::try_from(field.index()).unwrap();
 +                    assert!(field < count);
 +                    let lane = fx.bcx.ins().extractlane(val, field);
 +                    let field_layout = layout.field(&*fx, usize::from(field));
 +                    CValue::by_val(lane, field_layout)
 +                }
 +                _ => unreachable!("value_field for ByVal with abi {:?}", layout.abi),
 +            },
 +            CValueInner::ByValPair(val1, val2) => match layout.abi {
 +                Abi::ScalarPair(_, _) => {
 +                    let val = match field.as_u32() {
 +                        0 => val1,
 +                        1 => val2,
 +                        _ => bug!("field should be 0 or 1"),
 +                    };
 +                    let field_layout = layout.field(&*fx, usize::from(field));
 +                    CValue::by_val(val, field_layout)
 +                }
 +                _ => unreachable!("value_field for ByValPair with abi {:?}", layout.abi),
 +            },
 +            CValueInner::ByRef(ptr, None) => {
 +                let (field_ptr, field_layout) = codegen_field(fx, ptr, None, layout, field);
 +                CValue::by_ref(field_ptr, field_layout)
 +            }
 +            CValueInner::ByRef(_, Some(_)) => todo!(),
 +        }
 +    }
 +
 +    /// Like [`CValue::value_field`] except handling ADTs containing a single array field in a way
 +    /// such that you can access individual lanes.
 +    pub(crate) fn value_lane(
 +        self,
 +        fx: &mut FunctionCx<'_, '_, 'tcx>,
 +        lane_idx: u64,
 +    ) -> CValue<'tcx> {
 +        let layout = self.1;
 +        assert!(layout.ty.is_simd());
 +        let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
 +        let lane_layout = fx.layout_of(lane_ty);
 +        assert!(lane_idx < lane_count);
 +        match self.0 {
 +            CValueInner::ByVal(val) => match layout.abi {
 +                Abi::Vector { element: _, count: _ } => {
 +                    assert!(lane_count <= u8::MAX.into(), "SIMD type with more than 255 lanes???");
 +                    let lane_idx = u8::try_from(lane_idx).unwrap();
 +                    let lane = fx.bcx.ins().extractlane(val, lane_idx);
 +                    CValue::by_val(lane, lane_layout)
 +                }
 +                _ => unreachable!("value_lane for ByVal with abi {:?}", layout.abi),
 +            },
 +            CValueInner::ByValPair(_, _) => unreachable!(),
 +            CValueInner::ByRef(ptr, None) => {
 +                let field_offset = lane_layout.size * lane_idx;
 +                let field_ptr = ptr.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap());
 +                CValue::by_ref(field_ptr, lane_layout)
 +            }
 +            CValueInner::ByRef(_, Some(_)) => unreachable!(),
 +        }
 +    }
 +
 +    pub(crate) fn unsize_value(self, fx: &mut FunctionCx<'_, '_, 'tcx>, dest: CPlace<'tcx>) {
 +        crate::unsize::coerce_unsized_into(fx, self, dest);
 +    }
 +
 +    pub(crate) fn coerce_dyn_star(self, fx: &mut FunctionCx<'_, '_, 'tcx>, dest: CPlace<'tcx>) {
 +        crate::unsize::coerce_dyn_star(fx, self, dest);
 +    }
 +
 +    /// If `ty` is signed, `const_val` must already be sign extended.
 +    pub(crate) fn const_val(
 +        fx: &mut FunctionCx<'_, '_, 'tcx>,
 +        layout: TyAndLayout<'tcx>,
 +        const_val: ty::ScalarInt,
 +    ) -> CValue<'tcx> {
 +        assert_eq!(const_val.size(), layout.size, "{:#?}: {:?}", const_val, layout);
 +        use cranelift_codegen::ir::immediates::{Ieee32, Ieee64};
 +
 +        let clif_ty = fx.clif_type(layout.ty).unwrap();
 +
 +        if let ty::Bool = layout.ty.kind() {
 +            assert!(
 +                const_val == ty::ScalarInt::FALSE || const_val == ty::ScalarInt::TRUE,
 +                "Invalid bool 0x{:032X}",
 +                const_val
 +            );
 +        }
 +
 +        let val = match layout.ty.kind() {
 +            ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
 +                let const_val = const_val.to_bits(layout.size).unwrap();
 +                let lsb = fx.bcx.ins().iconst(types::I64, const_val as u64 as i64);
 +                let msb = fx.bcx.ins().iconst(types::I64, (const_val >> 64) as u64 as i64);
 +                fx.bcx.ins().iconcat(lsb, msb)
 +            }
 +            ty::Bool | ty::Char | ty::Uint(_) | ty::Int(_) | ty::Ref(..) | ty::RawPtr(..) => {
 +                fx.bcx.ins().iconst(clif_ty, const_val.to_bits(layout.size).unwrap() as i64)
 +            }
 +            ty::Float(FloatTy::F32) => {
 +                fx.bcx.ins().f32const(Ieee32::with_bits(u32::try_from(const_val).unwrap()))
 +            }
 +            ty::Float(FloatTy::F64) => {
 +                fx.bcx.ins().f64const(Ieee64::with_bits(u64::try_from(const_val).unwrap()))
 +            }
 +            _ => panic!(
 +                "CValue::const_val for non bool/char/float/integer/pointer type {:?} is not allowed",
 +                layout.ty
 +            ),
 +        };
 +
 +        CValue::by_val(val, layout)
 +    }
 +
 +    pub(crate) fn cast_pointer_to(self, layout: TyAndLayout<'tcx>) -> Self {
 +        assert!(matches!(self.layout().ty.kind(), ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..)));
 +        assert!(matches!(layout.ty.kind(), ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..)));
 +        assert_eq!(self.layout().abi, layout.abi);
 +        CValue(self.0, layout)
 +    }
 +}
 +
 +/// A place where you can write a value to or read a value from
 +#[derive(Debug, Copy, Clone)]
 +pub(crate) struct CPlace<'tcx> {
 +    inner: CPlaceInner,
 +    layout: TyAndLayout<'tcx>,
 +}
 +
 +#[derive(Debug, Copy, Clone)]
 +pub(crate) enum CPlaceInner {
 +    Var(Local, Variable),
 +    VarPair(Local, Variable, Variable),
 +    VarLane(Local, Variable, u8),
 +    Addr(Pointer, Option<Value>),
 +}
 +
 +impl<'tcx> CPlace<'tcx> {
 +    pub(crate) fn layout(&self) -> TyAndLayout<'tcx> {
 +        self.layout
 +    }
 +
 +    pub(crate) fn inner(&self) -> &CPlaceInner {
 +        &self.inner
 +    }
 +
 +    pub(crate) fn new_stack_slot(
 +        fx: &mut FunctionCx<'_, '_, 'tcx>,
 +        layout: TyAndLayout<'tcx>,
 +    ) -> CPlace<'tcx> {
 +        assert!(layout.is_sized());
 +        if layout.size.bytes() == 0 {
 +            return CPlace {
 +                inner: CPlaceInner::Addr(Pointer::dangling(layout.align.pref), None),
 +                layout,
 +            };
 +        }
 +
 +        if layout.size.bytes() >= u64::from(u32::MAX - 16) {
 +            fx.tcx
 +                .sess
 +                .fatal(&format!("values of type {} are too big to store on the stack", layout.ty));
 +        }
 +
 +        let stack_slot = fx.bcx.create_sized_stack_slot(StackSlotData {
 +            kind: StackSlotKind::ExplicitSlot,
 +            // FIXME Don't force the size to a multiple of 16 bytes once Cranelift gets a way to
 +            // specify stack slot alignment.
 +            size: (u32::try_from(layout.size.bytes()).unwrap() + 15) / 16 * 16,
 +        });
 +        CPlace { inner: CPlaceInner::Addr(Pointer::stack_slot(stack_slot), None), layout }
 +    }
 +
 +    pub(crate) fn new_var(
 +        fx: &mut FunctionCx<'_, '_, 'tcx>,
 +        local: Local,
 +        layout: TyAndLayout<'tcx>,
 +    ) -> CPlace<'tcx> {
-         let var1 = Variable::with_u32(fx.next_ssa_var);
++        let var = Variable::from_u32(fx.next_ssa_var);
 +        fx.next_ssa_var += 1;
 +        fx.bcx.declare_var(var, fx.clif_type(layout.ty).unwrap());
 +        CPlace { inner: CPlaceInner::Var(local, var), layout }
 +    }
 +
 +    pub(crate) fn new_var_pair(
 +        fx: &mut FunctionCx<'_, '_, 'tcx>,
 +        local: Local,
 +        layout: TyAndLayout<'tcx>,
 +    ) -> CPlace<'tcx> {
-         let var2 = Variable::with_u32(fx.next_ssa_var);
++        let var1 = Variable::from_u32(fx.next_ssa_var);
 +        fx.next_ssa_var += 1;
-                 _ if src_ty.is_vector() && dst_ty.is_vector() => {
-                     fx.bcx.ins().raw_bitcast(dst_ty, data)
-                 }
++        let var2 = Variable::from_u32(fx.next_ssa_var);
 +        fx.next_ssa_var += 1;
 +
 +        let (ty1, ty2) = fx.clif_pair_type(layout.ty).unwrap();
 +        fx.bcx.declare_var(var1, ty1);
 +        fx.bcx.declare_var(var2, ty2);
 +        CPlace { inner: CPlaceInner::VarPair(local, var1, var2), layout }
 +    }
 +
 +    pub(crate) fn for_ptr(ptr: Pointer, layout: TyAndLayout<'tcx>) -> CPlace<'tcx> {
 +        CPlace { inner: CPlaceInner::Addr(ptr, None), layout }
 +    }
 +
 +    pub(crate) fn for_ptr_with_extra(
 +        ptr: Pointer,
 +        extra: Value,
 +        layout: TyAndLayout<'tcx>,
 +    ) -> CPlace<'tcx> {
 +        CPlace { inner: CPlaceInner::Addr(ptr, Some(extra)), layout }
 +    }
 +
 +    pub(crate) fn to_cvalue(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> CValue<'tcx> {
 +        let layout = self.layout();
 +        match self.inner {
 +            CPlaceInner::Var(_local, var) => {
 +                let val = fx.bcx.use_var(var);
 +                //fx.bcx.set_val_label(val, cranelift_codegen::ir::ValueLabel::new(var.index()));
 +                CValue::by_val(val, layout)
 +            }
 +            CPlaceInner::VarPair(_local, var1, var2) => {
 +                let val1 = fx.bcx.use_var(var1);
 +                //fx.bcx.set_val_label(val1, cranelift_codegen::ir::ValueLabel::new(var1.index()));
 +                let val2 = fx.bcx.use_var(var2);
 +                //fx.bcx.set_val_label(val2, cranelift_codegen::ir::ValueLabel::new(var2.index()));
 +                CValue::by_val_pair(val1, val2, layout)
 +            }
 +            CPlaceInner::VarLane(_local, var, lane) => {
 +                let val = fx.bcx.use_var(var);
 +                //fx.bcx.set_val_label(val, cranelift_codegen::ir::ValueLabel::new(var.index()));
 +                let val = fx.bcx.ins().extractlane(val, lane);
 +                CValue::by_val(val, layout)
 +            }
 +            CPlaceInner::Addr(ptr, extra) => {
 +                if let Some(extra) = extra {
 +                    CValue::by_ref_unsized(ptr, extra, layout)
 +                } else {
 +                    CValue::by_ref(ptr, layout)
 +                }
 +            }
 +        }
 +    }
 +
 +    pub(crate) fn to_ptr(self) -> Pointer {
 +        match self.to_ptr_maybe_unsized() {
 +            (ptr, None) => ptr,
 +            (_, Some(_)) => bug!("Expected sized cplace, found {:?}", self),
 +        }
 +    }
 +
 +    pub(crate) fn to_ptr_maybe_unsized(self) -> (Pointer, Option<Value>) {
 +        match self.inner {
 +            CPlaceInner::Addr(ptr, extra) => (ptr, extra),
 +            CPlaceInner::Var(_, _)
 +            | CPlaceInner::VarPair(_, _, _)
 +            | CPlaceInner::VarLane(_, _, _) => bug!("Expected CPlace::Addr, found {:?}", self),
 +        }
 +    }
 +
 +    pub(crate) fn write_cvalue(self, fx: &mut FunctionCx<'_, '_, 'tcx>, from: CValue<'tcx>) {
 +        assert_assignable(fx, from.layout().ty, self.layout().ty, 16);
 +
 +        self.write_cvalue_maybe_transmute(fx, from, "write_cvalue");
 +    }
 +
 +    pub(crate) fn write_cvalue_transmute(
 +        self,
 +        fx: &mut FunctionCx<'_, '_, 'tcx>,
 +        from: CValue<'tcx>,
 +    ) {
 +        self.write_cvalue_maybe_transmute(fx, from, "write_cvalue_transmute");
 +    }
 +
 +    fn write_cvalue_maybe_transmute(
 +        self,
 +        fx: &mut FunctionCx<'_, '_, 'tcx>,
 +        from: CValue<'tcx>,
 +        method: &'static str,
 +    ) {
 +        fn transmute_value<'tcx>(
 +            fx: &mut FunctionCx<'_, '_, 'tcx>,
 +            var: Variable,
 +            data: Value,
 +            dst_ty: Type,
 +        ) {
 +            let src_ty = fx.bcx.func.dfg.value_type(data);
 +            assert_eq!(
 +                src_ty.bytes(),
 +                dst_ty.bytes(),
 +                "write_cvalue_transmute: {:?} -> {:?}",
 +                src_ty,
 +                dst_ty,
 +            );
 +            let data = match (src_ty, dst_ty) {
 +                (_, _) if src_ty == dst_ty => data,
 +
 +                // This is a `write_cvalue_transmute`.
 +                (types::I32, types::F32)
 +                | (types::F32, types::I32)
 +                | (types::I64, types::F64)
 +                | (types::F64, types::I64) => fx.bcx.ins().bitcast(dst_ty, data),
-                 let (data1, data2) = CValue(from.0, dst_layout).load_scalar_pair(fx);
++                _ if src_ty.is_vector() && dst_ty.is_vector() => fx.bcx.ins().bitcast(dst_ty, data),
 +                _ if src_ty.is_vector() || dst_ty.is_vector() => {
 +                    // FIXME do something more efficient for transmutes between vectors and integers.
 +                    let stack_slot = fx.bcx.create_sized_stack_slot(StackSlotData {
 +                        kind: StackSlotKind::ExplicitSlot,
 +                        // FIXME Don't force the size to a multiple of 16 bytes once Cranelift gets a way to
 +                        // specify stack slot alignment.
 +                        size: (src_ty.bytes() + 15) / 16 * 16,
 +                    });
 +                    let ptr = Pointer::stack_slot(stack_slot);
 +                    ptr.store(fx, data, MemFlags::trusted());
 +                    ptr.load(fx, dst_ty, MemFlags::trusted())
 +                }
 +
 +                // `CValue`s should never contain SSA-only types, so if you ended
 +                // up here having seen an error like `B1 -> I8`, then before
 +                // calling `write_cvalue` you need to add a `bint` instruction.
 +                _ => unreachable!("write_cvalue_transmute: {:?} -> {:?}", src_ty, dst_ty),
 +            };
 +            //fx.bcx.set_val_label(data, cranelift_codegen::ir::ValueLabel::new(var.index()));
 +            fx.bcx.def_var(var, data);
 +        }
 +
 +        assert_eq!(self.layout().size, from.layout().size);
 +
 +        if fx.clif_comments.enabled() {
 +            use cranelift_codegen::cursor::{Cursor, CursorPosition};
 +            let cur_block = match fx.bcx.cursor().position() {
 +                CursorPosition::After(block) => block,
 +                _ => unreachable!(),
 +            };
 +            fx.add_comment(
 +                fx.bcx.func.layout.last_inst(cur_block).unwrap(),
 +                format!(
 +                    "{}: {:?}: {:?} <- {:?}: {:?}",
 +                    method,
 +                    self.inner(),
 +                    self.layout().ty,
 +                    from.0,
 +                    from.layout().ty
 +                ),
 +            );
 +        }
 +
 +        let dst_layout = self.layout();
 +        let to_ptr = match self.inner {
 +            CPlaceInner::Var(_local, var) => {
 +                if let ty::Array(element, len) = dst_layout.ty.kind() {
 +                    // Can only happen for vector types
 +                    let len =
 +                        u32::try_from(len.eval_usize(fx.tcx, ParamEnv::reveal_all())).unwrap();
 +                    let vector_ty = fx.clif_type(*element).unwrap().by(len).unwrap();
 +
 +                    let data = match from.0 {
 +                        CValueInner::ByRef(ptr, None) => {
 +                            let mut flags = MemFlags::new();
 +                            flags.set_notrap();
 +                            ptr.load(fx, vector_ty, flags)
 +                        }
 +                        CValueInner::ByVal(_)
 +                        | CValueInner::ByValPair(_, _)
 +                        | CValueInner::ByRef(_, Some(_)) => bug!("array should be ByRef"),
 +                    };
 +
 +                    fx.bcx.def_var(var, data);
 +                    return;
 +                }
 +                let data = CValue(from.0, dst_layout).load_scalar(fx);
 +                let dst_ty = fx.clif_type(self.layout().ty).unwrap();
 +                transmute_value(fx, var, data, dst_ty);
 +                return;
 +            }
 +            CPlaceInner::VarPair(_local, var1, var2) => {
++                let (ptr, meta) = from.force_stack(fx);
++                assert!(meta.is_none());
++                let (data1, data2) =
++                    CValue(CValueInner::ByRef(ptr, None), dst_layout).load_scalar_pair(fx);
 +                let (dst_ty1, dst_ty2) = fx.clif_pair_type(self.layout().ty).unwrap();
 +                transmute_value(fx, var1, data1, dst_ty1);
 +                transmute_value(fx, var2, data2, dst_ty2);
 +                return;
 +            }
 +            CPlaceInner::VarLane(_local, var, lane) => {
 +                let data = from.load_scalar(fx);
 +
 +                // First get the old vector
 +                let vector = fx.bcx.use_var(var);
 +                //fx.bcx.set_val_label(vector, cranelift_codegen::ir::ValueLabel::new(var.index()));
 +
 +                // Next insert the written lane into the vector
 +                let vector = fx.bcx.ins().insertlane(vector, data, lane);
 +
 +                // Finally write the new vector
 +                //fx.bcx.set_val_label(vector, cranelift_codegen::ir::ValueLabel::new(var.index()));
 +                fx.bcx.def_var(var, vector);
 +
 +                return;
 +            }
 +            CPlaceInner::Addr(ptr, None) => {
 +                if dst_layout.size == Size::ZERO || dst_layout.abi == Abi::Uninhabited {
 +                    return;
 +                }
 +                ptr
 +            }
 +            CPlaceInner::Addr(_, Some(_)) => bug!("Can't write value to unsized place {:?}", self),
 +        };
 +
 +        let mut flags = MemFlags::new();
 +        flags.set_notrap();
 +        match from.layout().abi {
 +            // FIXME make Abi::Vector work too
 +            Abi::Scalar(_) => {
 +                let val = from.load_scalar(fx);
 +                to_ptr.store(fx, val, flags);
 +                return;
 +            }
 +            Abi::ScalarPair(a_scalar, b_scalar) => {
 +                let (value, extra) = from.load_scalar_pair(fx);
 +                let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
 +                to_ptr.store(fx, value, flags);
 +                to_ptr.offset(fx, b_offset).store(fx, extra, flags);
 +                return;
 +            }
 +            _ => {}
 +        }
 +
 +        match from.0 {
 +            CValueInner::ByVal(val) => {
 +                to_ptr.store(fx, val, flags);
 +            }
 +            CValueInner::ByValPair(_, _) => {
 +                bug!("Non ScalarPair abi {:?} for ByValPair CValue", dst_layout.abi);
 +            }
 +            CValueInner::ByRef(from_ptr, None) => {
 +                let from_addr = from_ptr.get_addr(fx);
 +                let to_addr = to_ptr.get_addr(fx);
 +                let src_layout = from.1;
 +                let size = dst_layout.size.bytes();
 +                let src_align = src_layout.align.abi.bytes() as u8;
 +                let dst_align = dst_layout.align.abi.bytes() as u8;
 +                fx.bcx.emit_small_memory_copy(
 +                    fx.target_config,
 +                    to_addr,
 +                    from_addr,
 +                    size,
 +                    dst_align,
 +                    src_align,
 +                    true,
 +                    flags,
 +                );
 +            }
 +            CValueInner::ByRef(_, Some(_)) => todo!(),
 +        }
 +    }
 +
 +    pub(crate) fn place_opaque_cast(
 +        self,
 +        fx: &mut FunctionCx<'_, '_, 'tcx>,
 +        ty: Ty<'tcx>,
 +    ) -> CPlace<'tcx> {
 +        CPlace { inner: self.inner, layout: fx.layout_of(ty) }
 +    }
 +
 +    pub(crate) fn place_field(
 +        self,
 +        fx: &mut FunctionCx<'_, '_, 'tcx>,
 +        field: mir::Field,
 +    ) -> CPlace<'tcx> {
 +        let layout = self.layout();
 +
 +        match self.inner {
 +            CPlaceInner::Var(local, var) => match layout.ty.kind() {
 +                ty::Array(_, _) => {
 +                    // Can only happen for vector types
 +                    return CPlace {
 +                        inner: CPlaceInner::VarLane(local, var, field.as_u32().try_into().unwrap()),
 +                        layout: layout.field(fx, field.as_u32().try_into().unwrap()),
 +                    };
 +                }
 +                ty::Adt(adt_def, substs) if layout.ty.is_simd() => {
 +                    let f0_ty = adt_def.non_enum_variant().fields[0].ty(fx.tcx, substs);
 +
 +                    match f0_ty.kind() {
 +                        ty::Array(_, _) => {
 +                            assert_eq!(field.as_u32(), 0);
 +                            return CPlace {
 +                                inner: CPlaceInner::Var(local, var),
 +                                layout: layout.field(fx, field.as_u32().try_into().unwrap()),
 +                            };
 +                        }
 +                        _ => {
 +                            return CPlace {
 +                                inner: CPlaceInner::VarLane(
 +                                    local,
 +                                    var,
 +                                    field.as_u32().try_into().unwrap(),
 +                                ),
 +                                layout: layout.field(fx, field.as_u32().try_into().unwrap()),
 +                            };
 +                        }
 +                    }
 +                }
 +                _ => {}
 +            },
 +            CPlaceInner::VarPair(local, var1, var2) => {
 +                let layout = layout.field(&*fx, field.index());
 +
 +                match field.as_u32() {
 +                    0 => return CPlace { inner: CPlaceInner::Var(local, var1), layout },
 +                    1 => return CPlace { inner: CPlaceInner::Var(local, var2), layout },
 +                    _ => unreachable!("field should be 0 or 1"),
 +                }
 +            }
 +            _ => {}
 +        }
 +
 +        let (base, extra) = self.to_ptr_maybe_unsized();
 +
 +        let (field_ptr, field_layout) = codegen_field(fx, base, extra, layout, field);
 +        if field_layout.is_unsized() {
 +            if let ty::Foreign(_) = field_layout.ty.kind() {
 +                assert!(extra.is_none());
 +                CPlace::for_ptr(field_ptr, field_layout)
 +            } else {
 +                CPlace::for_ptr_with_extra(field_ptr, extra.unwrap(), field_layout)
 +            }
 +        } else {
 +            CPlace::for_ptr(field_ptr, field_layout)
 +        }
 +    }
 +
 +    /// Like [`CPlace::place_field`] except handling ADTs containing a single array field in a way
 +    /// such that you can access individual lanes.
 +    pub(crate) fn place_lane(
 +        self,
 +        fx: &mut FunctionCx<'_, '_, 'tcx>,
 +        lane_idx: u64,
 +    ) -> CPlace<'tcx> {
 +        let layout = self.layout();
 +        assert!(layout.ty.is_simd());
 +        let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
 +        let lane_layout = fx.layout_of(lane_ty);
 +        assert!(lane_idx < lane_count);
 +
 +        match self.inner {
 +            CPlaceInner::Var(local, var) => {
 +                assert!(matches!(layout.abi, Abi::Vector { .. }));
 +                CPlace {
 +                    inner: CPlaceInner::VarLane(local, var, lane_idx.try_into().unwrap()),
 +                    layout: lane_layout,
 +                }
 +            }
 +            CPlaceInner::VarPair(_, _, _) => unreachable!(),
 +            CPlaceInner::VarLane(_, _, _) => unreachable!(),
 +            CPlaceInner::Addr(ptr, None) => {
 +                let field_offset = lane_layout.size * lane_idx;
 +                let field_ptr = ptr.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap());
 +                CPlace::for_ptr(field_ptr, lane_layout)
 +            }
 +            CPlaceInner::Addr(_, Some(_)) => unreachable!(),
 +        }
 +    }
 +
 +    pub(crate) fn place_index(
 +        self,
 +        fx: &mut FunctionCx<'_, '_, 'tcx>,
 +        index: Value,
 +    ) -> CPlace<'tcx> {
 +        let (elem_layout, ptr) = match self.layout().ty.kind() {
 +            ty::Array(elem_ty, _) => (fx.layout_of(*elem_ty), self.to_ptr()),
 +            ty::Slice(elem_ty) => (fx.layout_of(*elem_ty), self.to_ptr_maybe_unsized().0),
 +            _ => bug!("place_index({:?})", self.layout().ty),
 +        };
 +
 +        let offset = fx.bcx.ins().imul_imm(index, elem_layout.size.bytes() as i64);
 +
 +        CPlace::for_ptr(ptr.offset_value(fx, offset), elem_layout)
 +    }
 +
 +    pub(crate) fn place_deref(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> CPlace<'tcx> {
 +        let inner_layout = fx.layout_of(self.layout().ty.builtin_deref(true).unwrap().ty);
 +        if has_ptr_meta(fx.tcx, inner_layout.ty) {
 +            let (addr, extra) = self.to_cvalue(fx).load_scalar_pair(fx);
 +            CPlace::for_ptr_with_extra(Pointer::new(addr), extra, inner_layout)
 +        } else {
 +            CPlace::for_ptr(Pointer::new(self.to_cvalue(fx).load_scalar(fx)), inner_layout)
 +        }
 +    }
 +
 +    pub(crate) fn place_ref(
 +        self,
 +        fx: &mut FunctionCx<'_, '_, 'tcx>,
 +        layout: TyAndLayout<'tcx>,
 +    ) -> CValue<'tcx> {
 +        if has_ptr_meta(fx.tcx, self.layout().ty) {
 +            let (ptr, extra) = self.to_ptr_maybe_unsized();
 +            CValue::by_val_pair(
 +                ptr.get_addr(fx),
 +                extra.expect("unsized type without metadata"),
 +                layout,
 +            )
 +        } else {
 +            CValue::by_val(self.to_ptr().get_addr(fx), layout)
 +        }
 +    }
 +
 +    pub(crate) fn downcast_variant(
 +        self,
 +        fx: &FunctionCx<'_, '_, 'tcx>,
 +        variant: VariantIdx,
 +    ) -> Self {
 +        assert!(self.layout().is_sized());
 +        let layout = self.layout().for_variant(fx, variant);
 +        CPlace { inner: self.inner, layout }
 +    }
 +}
 +
 +#[track_caller]
 +pub(crate) fn assert_assignable<'tcx>(
 +    fx: &FunctionCx<'_, '_, 'tcx>,
 +    from_ty: Ty<'tcx>,
 +    to_ty: Ty<'tcx>,
 +    limit: usize,
 +) {
 +    if limit == 0 {
 +        // assert_assignable exists solely to catch bugs in cg_clif. it isn't necessary for
 +        // soundness. don't attempt to check deep types to avoid exponential behavior in certain
 +        // cases.
 +        return;
 +    }
 +    match (from_ty.kind(), to_ty.kind()) {
 +        (ty::Ref(_, a, _), ty::Ref(_, b, _))
 +        | (
 +            ty::RawPtr(TypeAndMut { ty: a, mutbl: _ }),
 +            ty::RawPtr(TypeAndMut { ty: b, mutbl: _ }),
 +        ) => {
 +            assert_assignable(fx, *a, *b, limit - 1);
 +        }
 +        (ty::Ref(_, a, _), ty::RawPtr(TypeAndMut { ty: b, mutbl: _ }))
 +        | (ty::RawPtr(TypeAndMut { ty: a, mutbl: _ }), ty::Ref(_, b, _)) => {
 +            assert_assignable(fx, *a, *b, limit - 1);
 +        }
 +        (ty::FnPtr(_), ty::FnPtr(_)) => {
 +            let from_sig = fx.tcx.normalize_erasing_late_bound_regions(
 +                ParamEnv::reveal_all(),
 +                from_ty.fn_sig(fx.tcx),
 +            );
 +            let to_sig = fx
 +                .tcx
 +                .normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), to_ty.fn_sig(fx.tcx));
 +            assert_eq!(
 +                from_sig, to_sig,
 +                "Can't write fn ptr with incompatible sig {:?} to place with sig {:?}\n\n{:#?}",
 +                from_sig, to_sig, fx,
 +            );
 +            // fn(&T) -> for<'l> fn(&'l T) is allowed
 +        }
 +        (&ty::Dynamic(from_traits, _, _from_kind), &ty::Dynamic(to_traits, _, _to_kind)) => {
 +            // FIXME(dyn-star): Do the right thing with DynKinds
 +            for (from, to) in from_traits.iter().zip(to_traits) {
 +                let from =
 +                    fx.tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), from);
 +                let to = fx.tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), to);
 +                assert_eq!(
 +                    from, to,
 +                    "Can't write trait object of incompatible traits {:?} to place with traits {:?}\n\n{:#?}",
 +                    from_traits, to_traits, fx,
 +                );
 +            }
 +            // dyn for<'r> Trait<'r> -> dyn Trait<'_> is allowed
 +        }
 +        (&ty::Tuple(types_a), &ty::Tuple(types_b)) => {
 +            let mut types_a = types_a.iter();
 +            let mut types_b = types_b.iter();
 +            loop {
 +                match (types_a.next(), types_b.next()) {
 +                    (Some(a), Some(b)) => assert_assignable(fx, a, b, limit - 1),
 +                    (None, None) => return,
 +                    (Some(_), None) | (None, Some(_)) => panic!("{:#?}/{:#?}", from_ty, to_ty),
 +                }
 +            }
 +        }
 +        (&ty::Adt(adt_def_a, substs_a), &ty::Adt(adt_def_b, substs_b))
 +            if adt_def_a.did() == adt_def_b.did() =>
 +        {
 +            let mut types_a = substs_a.types();
 +            let mut types_b = substs_b.types();
 +            loop {
 +                match (types_a.next(), types_b.next()) {
 +                    (Some(a), Some(b)) => assert_assignable(fx, a, b, limit - 1),
 +                    (None, None) => return,
 +                    (Some(_), None) | (None, Some(_)) => panic!("{:#?}/{:#?}", from_ty, to_ty),
 +                }
 +            }
 +        }
 +        (ty::Array(a, _), ty::Array(b, _)) => assert_assignable(fx, *a, *b, limit - 1),
 +        (&ty::Closure(def_id_a, substs_a), &ty::Closure(def_id_b, substs_b))
 +            if def_id_a == def_id_b =>
 +        {
 +            let mut types_a = substs_a.types();
 +            let mut types_b = substs_b.types();
 +            loop {
 +                match (types_a.next(), types_b.next()) {
 +                    (Some(a), Some(b)) => assert_assignable(fx, a, b, limit - 1),
 +                    (None, None) => return,
 +                    (Some(_), None) | (None, Some(_)) => panic!("{:#?}/{:#?}", from_ty, to_ty),
 +                }
 +            }
 +        }
 +        (ty::Param(_), _) | (_, ty::Param(_)) if fx.tcx.sess.opts.unstable_opts.polymorphize => {
 +            // No way to check if it is correct or not with polymorphization enabled
 +        }
 +        _ => {
 +            assert_eq!(
 +                from_ty,
 +                to_ty,
 +                "Can't write value with incompatible type {:?} to place with type {:?}\n\n{:#?}",
 +                from_ty.kind(),
 +                to_ty.kind(),
 +                fx,
 +            );
 +        }
 +    }
 +}
index 3d929a1d50ce2435307a7834c1fe04c1c73b94bd,0000000000000000000000000000000000000000..13e7784539d5a9b9cdb3c81dd7f548afcb0c418b
mode 100755,000000..100755
--- /dev/null
@@@ -1,2 -1,0 +1,2 @@@
- exec ./y.rs test
 +#!/usr/bin/env bash
++exec ./y.rs test "$@"
index f177b91c2c4876a4ac7b62dd653a3e4ea0857453,0000000000000000000000000000000000000000..02e1e21ade1de98f4d72256adbe70755a3997a55
mode 100755,000000..100755
--- /dev/null
@@@ -1,31 -1,0 +1,31 @@@
- rustc $0 -o ${0/.rs/.bin} -Cdebuginfo=1
 +#!/usr/bin/env bash
 +#![deny(unsafe_code)] /*This line is ignored by bash
 +# This block is ignored by rustc
 +set -e
 +echo "[BUILD] y.rs" 1>&2
++rustc $0 -o ${0/.rs/.bin} -Cdebuginfo=1 --edition 2021
 +exec ${0/.rs/.bin} $@
 +*/
 +
 +//! The build system for cg_clif
 +//!
 +//! # Manual compilation
 +//!
 +//! If your system doesn't support shell scripts you can manually compile and run this file using
 +//! for example:
 +//!
 +//! ```shell
 +//! $ rustc y.rs -o y.bin
 +//! $ ./y.bin
 +//! ```
 +//!
 +//! # Naming
 +//!
 +//! The name `y.rs` was chosen to not conflict with rustc's `x.py`.
 +
 +#[path = "build_system/mod.rs"]
 +mod build_system;
 +
 +fn main() {
 +    build_system::main();
 +}