--- /dev/null
- name: cg_clif-${{ runner.os }}
+name: CI
+
+on:
+ - push
+ - pull_request
+
+jobs:
+ rustfmt:
+ runs-on: ubuntu-latest
+ timeout-minutes: 10
+
+ steps:
+ - uses: actions/checkout@v3
+
+ - name: Install rustfmt
+ run: |
+ rustup component add rustfmt
+
+ - name: Rustfmt
+ run: |
+ cargo fmt --check
+
+ build:
+ runs-on: ${{ matrix.os }}
+ timeout-minutes: 60
+
+ strategy:
+ fail-fast: false
+ matrix:
+ include:
+ - os: ubuntu-latest
++ env:
++ TARGET_TRIPLE: x86_64-unknown-linux-gnu
+ - os: macos-latest
++ env:
++ TARGET_TRIPLE: x86_64-apple-darwin
+ # cross-compile from Linux to Windows using mingw
+ - os: ubuntu-latest
+ env:
+ TARGET_TRIPLE: x86_64-pc-windows-gnu
+ - os: ubuntu-latest
+ env:
+ TARGET_TRIPLE: aarch64-unknown-linux-gnu
+
+ steps:
+ - uses: actions/checkout@v3
+
+ - name: Cache cargo installed crates
+ uses: actions/cache@v2
+ with:
+ path: ~/.cargo/bin
+ key: ${{ runner.os }}-cargo-installed-crates
+
+ - name: Cache cargo registry and index
+ uses: actions/cache@v2
+ with:
+ path: |
+ ~/.cargo/registry
+ ~/.cargo/git
+ key: ${{ runner.os }}-cargo-registry-and-index-${{ hashFiles('**/Cargo.lock') }}
+
+ - name: Cache cargo target dir
+ uses: actions/cache@v2
+ with:
+ path: target
+ key: ${{ runner.os }}-cargo-build-target-${{ hashFiles('rust-toolchain', '**/Cargo.lock') }}
+
+ - name: Install MinGW toolchain and wine
+ if: matrix.os == 'ubuntu-latest' && matrix.env.TARGET_TRIPLE == 'x86_64-pc-windows-gnu'
+ run: |
+ sudo apt-get update
+ sudo apt-get install -y gcc-mingw-w64-x86-64 wine-stable
+ rustup target add x86_64-pc-windows-gnu
+
+ - name: Install AArch64 toolchain and qemu
+ if: matrix.os == 'ubuntu-latest' && matrix.env.TARGET_TRIPLE == 'aarch64-unknown-linux-gnu'
+ run: |
+ sudo apt-get update
+ sudo apt-get install -y gcc-aarch64-linux-gnu qemu-user
+
+ - name: Prepare dependencies
+ run: |
+ git config --global user.email "user@example.com"
+ git config --global user.name "User"
+ ./y.rs prepare
+
+ - name: Build without unstable features
+ env:
+ TARGET_TRIPLE: ${{ matrix.env.TARGET_TRIPLE }}
+ # This is the config rust-lang/rust uses for builds
+ run: ./y.rs build --no-unstable-features
+
+ - name: Build
+ run: ./y.rs build --sysroot none
+
+ - name: Test
+ env:
+ TARGET_TRIPLE: ${{ matrix.env.TARGET_TRIPLE }}
+ run: |
+ # Enable backtraces for easier debugging
+ export RUST_BACKTRACE=1
+
+ # Reduce amount of benchmark runs as they are slow
+ export COMPILE_RUNS=2
+ export RUN_RUNS=2
+
+ # Enable extra checks
+ export CG_CLIF_ENABLE_VERIFIER=1
+
+ ./y.rs test
+
+ - name: Package prebuilt cg_clif
+ run: tar cvfJ cg_clif.tar.xz build
+
+ - name: Upload prebuilt cg_clif
+ if: matrix.env.TARGET_TRIPLE != 'x86_64-pc-windows-gnu'
+ uses: actions/upload-artifact@v2
+ with:
- build_windows:
- runs-on: windows-latest
++ name: cg_clif-${{ matrix.env.TARGET_TRIPLE }}
+ path: cg_clif.tar.xz
+
+ - name: Upload prebuilt cg_clif (cross compile)
+ if: matrix.env.TARGET_TRIPLE == 'x86_64-pc-windows-gnu'
+ uses: actions/upload-artifact@v2
+ with:
+ name: cg_clif-${{ runner.os }}-cross-x86_64-mingw
+ path: cg_clif.tar.xz
+
- #- name: Cache cargo installed crates
- # uses: actions/cache@v2
- # with:
- # path: ~/.cargo/bin
- # key: ${{ runner.os }}-cargo-installed-crates
-
- #- name: Cache cargo registry and index
- # uses: actions/cache@v2
- # with:
- # path: |
- # ~/.cargo/registry
- # ~/.cargo/git
- # key: ${{ runner.os }}-cargo-registry-and-index-${{ hashFiles('**/Cargo.lock') }}
-
- #- name: Cache cargo target dir
- # uses: actions/cache@v2
- # with:
- # path: target
- # key: ${{ runner.os }}-cargo-build-target-${{ hashFiles('rust-toolchain', '**/Cargo.lock') }}
++ windows:
++ runs-on: ${{ matrix.os }}
+ timeout-minutes: 60
+
++ strategy:
++ fail-fast: false
++ matrix:
++ include:
++ # Native Windows build with MSVC
++ - os: windows-latest
++ env:
++ TARGET_TRIPLE: x86_64-pc-windows-msvc
++ # cross-compile from Windows to Windows MinGW
++ - os: windows-latest
++ env:
++ TARGET_TRIPLE: x86_64-pc-windows-gnu
++
+ steps:
+ - uses: actions/checkout@v3
+
- rustup set default-host x86_64-pc-windows-gnu
++ - name: Cache cargo installed crates
++ uses: actions/cache@v2
++ with:
++ path: ~/.cargo/bin
++ key: ${{ runner.os }}-${{ matrix.env.TARGET_TRIPLE }}-cargo-installed-crates
++
++ - name: Cache cargo registry and index
++ uses: actions/cache@v2
++ with:
++ path: |
++ ~/.cargo/registry
++ ~/.cargo/git
++ key: ${{ runner.os }}-${{ matrix.env.TARGET_TRIPLE }}-cargo-registry-and-index-${{ hashFiles('**/Cargo.lock') }}
++
++ - name: Cache cargo target dir
++ uses: actions/cache@v2
++ with:
++ path: target
++ key: ${{ runner.os }}-${{ matrix.env.TARGET_TRIPLE }}-cargo-build-target-${{ hashFiles('rust-toolchain', '**/Cargo.lock') }}
++
++ - name: Set MinGW as the default toolchain
++ if: matrix.env.TARGET_TRIPLE == 'x86_64-pc-windows-gnu'
++ run: rustup set default-host x86_64-pc-windows-gnu
+
+ - name: Prepare dependencies
+ run: |
+ git config --global user.email "user@example.com"
+ git config --global user.name "User"
+ git config --global core.autocrlf false
- #name: Test
+ rustc y.rs -o y.exe -g
+ ./y.exe prepare
+
++ - name: Build without unstable features
++ env:
++ TARGET_TRIPLE: ${{ matrix.env.TARGET_TRIPLE }}
++ # This is the config rust-lang/rust uses for builds
++ run: ./y.rs build --no-unstable-features
++
+ - name: Build
- #$Env:RUST_BACKTRACE=1
++ run: ./y.rs build --sysroot none
++
++ - name: Test
+ run: |
+ # Enable backtraces for easier debugging
- #$Env:COMPILE_RUNS=2
- #$Env:RUN_RUNS=2
++ $Env:RUST_BACKTRACE=1
+
+ # Reduce amount of benchmark runs as they are slow
- #$Env:CG_CLIF_ENABLE_VERIFIER=1
-
- ./y.exe build
++ $Env:COMPILE_RUNS=2
++ $Env:RUN_RUNS=2
+
+ # Enable extra checks
- name: cg_clif-${{ runner.os }}
++ $Env:CG_CLIF_ENABLE_VERIFIER=1
++
++ # WIP Disable some tests
++
++ # This fails due to some weird argument handling by hyperfine, not an actual regression
++ # more of a build system issue
++ (Get-Content config.txt) -replace '(bench.simple-raytracer)', '# $1' | Out-File config.txt
++
++ # This fails with a different output than expected
++ (Get-Content config.txt) -replace '(test.regex-shootout-regex-dna)', '# $1' | Out-File config.txt
++
++ ./y.exe test
+
+ - name: Package prebuilt cg_clif
+ # don't use compression as xzip isn't supported by tar on windows and bzip2 hangs
+ run: tar cvf cg_clif.tar build
+
+ - name: Upload prebuilt cg_clif
+ uses: actions/upload-artifact@v2
+ with:
++ name: cg_clif-${{ matrix.env.TARGET_TRIPLE }}
+ path: cg_clif.tar
--- /dev/null
- /rand
- /regex
- /simple-raytracer
- /portable-simd
- /abi-checker
+target
+**/*.rs.bk
+*.rlib
+*.o
+perf.data
+perf.data.old
+*.events
+*.string*
+/y.bin
+/y.bin.dSYM
+/y.exe
+/y.pdb
+/build
+/build_sysroot/sysroot_src
+/build_sysroot/compiler-builtins
+/build_sysroot/rustc_version
+/rust
++/download
--- /dev/null
- //"./build_sysroot/sysroot_src/src/libstd/Cargo.toml",
+{
+ // source for rustc_* is not included in the rust-src component; disable the errors about this
+ "rust-analyzer.diagnostics.disabled": ["unresolved-extern-crate", "unresolved-macro-call"],
+ "rust-analyzer.imports.granularity.enforce": true,
+ "rust-analyzer.imports.granularity.group": "module",
+ "rust-analyzer.imports.prefix": "crate",
+ "rust-analyzer.cargo.features": ["unstable-features"],
+ "rust-analyzer.linkedProjects": [
+ "./Cargo.toml",
- "roots": ["./scripts/filter_profile.rs"],
++ //"./build_sysroot/sysroot_src/library/std/Cargo.toml",
+ {
+ "roots": [
+ "./example/mini_core.rs",
+ "./example/mini_core_hello_world.rs",
+ "./example/mod_bench.rs"
+ ],
+ "crates": [
+ {
+ "root_module": "./example/mini_core.rs",
+ "edition": "2018",
+ "deps": [],
+ "cfg": [],
+ },
+ {
+ "root_module": "./example/mini_core_hello_world.rs",
+ "edition": "2018",
+ "deps": [{ "crate": 0, "name": "mini_core" }],
+ "cfg": [],
+ },
+ {
+ "root_module": "./example/mod_bench.rs",
+ "edition": "2018",
+ "deps": [],
+ "cfg": [],
+ },
+ ]
+ },
+ {
- "root_module": "./scripts/filter_profile.rs",
++ "roots": ["./example/std_example.rs"],
+ "crates": [
+ {
++ "root_module": "./example/std_example.rs",
+ "edition": "2018",
+ "deps": [{ "crate": 1, "name": "std" }],
+ "cfg": [],
+ },
+ {
+ "root_module": "./build_sysroot/sysroot_src/library/std/src/lib.rs",
+ "edition": "2018",
+ "deps": [],
+ "cfg": [],
+ },
+ ]
+ },
+ {
+ "roots": ["./y.rs"],
+ "crates": [
+ {
+ "root_module": "./y.rs",
+ "edition": "2018",
+ "deps": [{ "crate": 1, "name": "std" }],
+ "cfg": [],
+ },
+ {
+ "root_module": "./build_sysroot/sysroot_src/library/std/src/lib.rs",
+ "edition": "2018",
+ "deps": [],
+ "cfg": [],
+ },
+ ]
+ }
+ ]
+}
--- /dev/null
- version = "0.87.0"
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "ahash"
+version = "0.7.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47"
+dependencies = [
+ "getrandom",
+ "once_cell",
+ "version_check",
+]
+
+[[package]]
+name = "anyhow"
+version = "1.0.60"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c794e162a5eff65c72ef524dfe393eb923c354e350bb78b9c7383df13f3bc142"
+
+[[package]]
+name = "ar"
+version = "0.8.0"
+source = "git+https://github.com/bjorn3/rust-ar.git?branch=do_not_remove_cg_clif_ranlib#de9ab0e56bf3a208381d342aa5b60f9ff2891648"
+
++[[package]]
++name = "arrayvec"
++version = "0.7.2"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6"
++
+[[package]]
+name = "autocfg"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
+
+[[package]]
+name = "bitflags"
+version = "1.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
+
++[[package]]
++name = "bumpalo"
++version = "3.11.0"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "c1ad822118d20d2c234f427000d5acc36eabe1e29a348c89b63dd60b13f28e5d"
++
+[[package]]
+name = "byteorder"
+version = "1.4.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610"
+
+[[package]]
+name = "cfg-if"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
+
+[[package]]
+name = "cranelift-bforest"
- checksum = "93945adbccc8d731503d3038814a51e8317497c9e205411820348132fa01a358"
++version = "0.88.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.87.0"
++checksum = "44409ccf2d0f663920cab563d2b79fcd6b2e9a2bcc6e929fef76c8f82ad6c17a"
+dependencies = [
+ "cranelift-entity",
+]
+
+[[package]]
+name = "cranelift-codegen"
- checksum = "2b482acc9d0d0d1ad3288a90a8150ee648be3dce8dc8c8669ff026f72debdc31"
++version = "0.88.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.87.0"
++checksum = "98de2018ad96eb97f621f7d6b900a0cc661aec8d02ea4a50e56ecb48e5a2fcaf"
+dependencies = [
++ "arrayvec",
++ "bumpalo",
+ "cranelift-bforest",
+ "cranelift-codegen-meta",
+ "cranelift-codegen-shared",
+ "cranelift-entity",
+ "cranelift-isle",
+ "gimli",
+ "log",
+ "regalloc2",
+ "smallvec",
+ "target-lexicon",
+]
+
+[[package]]
+name = "cranelift-codegen-meta"
- checksum = "f9ec188d71e663192ef9048f204e410a7283b609942efc9fcc77da6d496edbb8"
++version = "0.88.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.87.0"
++checksum = "5287ce36e6c4758fbaf298bd1a8697ad97a4f2375a3d1b61142ea538db4877e5"
+dependencies = [
+ "cranelift-codegen-shared",
+]
+
+[[package]]
+name = "cranelift-codegen-shared"
- checksum = "3ad794b1b1c2c7bd9f7b76cfe0f084eaf7753e55d56191c3f7d89e8fa4978b99"
++version = "0.88.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.87.0"
++checksum = "2855c24219e2f08827f3f4ffb2da92e134ae8d8ecc185b11ec8f9878cf5f588e"
+
+[[package]]
+name = "cranelift-entity"
- checksum = "342da0d5056f4119d3c311c4aab2460ceb6ee6e127bb395b76dd2279a09ea7a5"
++version = "0.88.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.87.0"
++checksum = "0b65673279d75d34bf11af9660ae2dbd1c22e6d28f163f5c72f4e1dc56d56103"
+
+[[package]]
+name = "cranelift-frontend"
- checksum = "dfff792f775b07d4d9cfe9f1c767ce755c6cbadda1bbd6db18a1c75ff9f7376a"
++version = "0.88.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.87.0"
++checksum = "3ed2b3d7a4751163f6c4a349205ab1b7d9c00eecf19dcea48592ef1f7688eefc"
+dependencies = [
+ "cranelift-codegen",
+ "log",
+ "smallvec",
+ "target-lexicon",
+]
+
+[[package]]
+name = "cranelift-isle"
- checksum = "8d51089478849f2ac8ef60a8a2d5346c8d4abfec0e45ac5b24530ef9f9499e1e"
++version = "0.88.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.87.0"
++checksum = "3be64cecea9d90105fc6a2ba2d003e98c867c1d6c4c86cc878f97ad9fb916293"
+
+[[package]]
+name = "cranelift-jit"
- checksum = "095936e41720f86004b4c57ce88e6a13af28646bb3a6fb4afbebd5ae90c50029"
++version = "0.88.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.87.0"
++checksum = "f98ed42a70a0c9c388e34ec9477f57fc7300f541b1e5136a0e2ea02b1fac6015"
+dependencies = [
+ "anyhow",
+ "cranelift-codegen",
+ "cranelift-entity",
+ "cranelift-module",
+ "cranelift-native",
+ "libc",
+ "log",
+ "region",
+ "target-lexicon",
+ "windows-sys",
+]
+
+[[package]]
+name = "cranelift-module"
- checksum = "704a1aea4723d97eafe0fb7af110f6f6868b1ac95f5380bbc9adb2a3b8cf97e8"
++version = "0.88.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.87.0"
++checksum = "d658ac7f156708bfccb647216cc8b9387469f50d352ba4ad80150541e4ae2d49"
+dependencies = [
+ "anyhow",
+ "cranelift-codegen",
+]
+
+[[package]]
+name = "cranelift-native"
- checksum = "885debe62f2078638d6585f54c9f05f5c2008f22ce5a2a9100ada785fc065dbd"
++version = "0.88.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.87.0"
++checksum = "c4a03a6ac1b063e416ca4b93f6247978c991475e8271465340caa6f92f3c16a4"
+dependencies = [
+ "cranelift-codegen",
+ "libc",
+ "target-lexicon",
+]
+
+[[package]]
+name = "cranelift-object"
- checksum = "aac1310cf1081ae8eca916c92cd163b977c77cab6e831fa812273c26ff921816"
++version = "0.88.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.6.7"
++checksum = "eef0b4119b645b870a43a036d76c0ada3a076b1f82e8b8487659304c8b09049b"
+dependencies = [
+ "anyhow",
+ "cranelift-codegen",
+ "cranelift-module",
+ "log",
+ "object",
+ "target-lexicon",
+]
+
+[[package]]
+name = "crc32fast"
+version = "1.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d"
+dependencies = [
+ "cfg-if",
+]
+
+[[package]]
+name = "fxhash"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c"
+dependencies = [
+ "byteorder",
+]
+
+[[package]]
+name = "getrandom"
+version = "0.2.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4eb1a864a501629691edf6c15a593b7a51eebaa1e8468e9ddc623de7c9b58ec6"
+dependencies = [
+ "cfg-if",
+ "libc",
+ "wasi",
+]
+
+[[package]]
+name = "gimli"
+version = "0.26.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "22030e2c5a68ec659fde1e949a745124b48e6fa8b045b7ed5bd1fe4ccc5c4e5d"
+dependencies = [
+ "indexmap",
+]
+
+[[package]]
+name = "hashbrown"
+version = "0.12.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
+dependencies = [
+ "ahash",
+]
+
+[[package]]
+name = "indexmap"
+version = "1.9.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "10a35a97730320ffe8e2d410b5d3b69279b98d2c14bdb8b70ea89ecf7888d41e"
+dependencies = [
+ "autocfg",
+ "hashbrown",
+]
+
+[[package]]
+name = "libc"
+version = "0.2.127"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "505e71a4706fa491e9b1b55f51b95d4037d0821ee40131190475f692b35b009b"
+
+[[package]]
+name = "libloading"
- checksum = "351a32417a12d5f7e82c368a66781e307834dae04c6ce0cd4456d52989229883"
++version = "0.7.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "efbc0f03f9a775e9f6aed295c6a1ba2253c5757a9e03d55c6caa46a681abcddd"
+dependencies = [
+ "cfg-if",
+ "winapi",
+]
+
+[[package]]
+name = "log"
+version = "0.4.17"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e"
+dependencies = [
+ "cfg-if",
+]
+
+[[package]]
+name = "mach"
+version = "0.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b823e83b2affd8f40a9ee8c29dbc56404c1e34cd2710921f2801e2cf29527afa"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "memchr"
+version = "2.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d"
+
+[[package]]
+name = "object"
+version = "0.29.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "21158b2c33aa6d4561f1c0a6ea283ca92bc54802a93b263e910746d679a7eb53"
+dependencies = [
+ "crc32fast",
+ "hashbrown",
+ "indexmap",
+ "memchr",
+]
+
+[[package]]
+name = "once_cell"
+version = "1.13.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "18a6dbe30758c9f83eb00cbea4ac95966305f5a7772f3f42ebfc7fc7eddbd8e1"
+
+[[package]]
+name = "regalloc2"
+version = "0.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d43a209257d978ef079f3d446331d0f1794f5e0fc19b306a199983857833a779"
+dependencies = [
+ "fxhash",
+ "log",
+ "slice-group-by",
+ "smallvec",
+]
+
+[[package]]
+name = "region"
+version = "2.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "877e54ea2adcd70d80e9179344c97f93ef0dffd6b03e1f4529e6e83ab2fa9ae0"
+dependencies = [
+ "bitflags",
+ "libc",
+ "mach",
+ "winapi",
+]
+
+[[package]]
+name = "rustc_codegen_cranelift"
+version = "0.1.0"
+dependencies = [
+ "ar",
+ "cranelift-codegen",
+ "cranelift-frontend",
+ "cranelift-jit",
+ "cranelift-module",
+ "cranelift-native",
+ "cranelift-object",
+ "gimli",
+ "indexmap",
+ "libloading",
+ "object",
+ "once_cell",
+ "smallvec",
+ "target-lexicon",
+]
+
+[[package]]
+name = "slice-group-by"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "03b634d87b960ab1a38c4fe143b508576f075e7c978bfad18217645ebfdfa2ec"
+
+[[package]]
+name = "smallvec"
+version = "1.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2fd0db749597d91ff862fd1d55ea87f7855a744a8425a64695b6fca237d1dad1"
+
+[[package]]
+name = "target-lexicon"
+version = "0.12.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c02424087780c9b71cc96799eaeddff35af2bc513278cda5c99fc1f5d026d3c1"
+
+[[package]]
+name = "version_check"
+version = "0.9.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
+
+[[package]]
+name = "wasi"
+version = "0.11.0+wasi-snapshot-preview1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
+
+[[package]]
+name = "winapi"
+version = "0.3.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
+dependencies = [
+ "winapi-i686-pc-windows-gnu",
+ "winapi-x86_64-pc-windows-gnu",
+]
+
+[[package]]
+name = "winapi-i686-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
+
+[[package]]
+name = "winapi-x86_64-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
+
+[[package]]
+name = "windows-sys"
+version = "0.36.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2"
+dependencies = [
+ "windows_aarch64_msvc",
+ "windows_i686_gnu",
+ "windows_i686_msvc",
+ "windows_x86_64_gnu",
+ "windows_x86_64_msvc",
+]
+
+[[package]]
+name = "windows_aarch64_msvc"
+version = "0.36.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47"
+
+[[package]]
+name = "windows_i686_gnu"
+version = "0.36.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6"
+
+[[package]]
+name = "windows_i686_msvc"
+version = "0.36.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024"
+
+[[package]]
+name = "windows_x86_64_gnu"
+version = "0.36.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1"
+
+[[package]]
+name = "windows_x86_64_msvc"
+version = "0.36.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680"
--- /dev/null
- cranelift-codegen = { version = "0.87.0", features = ["unwind", "all-arch"] }
- cranelift-frontend = "0.87.0"
- cranelift-module = "0.87.0"
- cranelift-native = "0.87.0"
- cranelift-jit = { version = "0.87.0", optional = true }
- cranelift-object = "0.87.0"
+[package]
+name = "rustc_codegen_cranelift"
+version = "0.1.0"
+edition = "2021"
+
+[lib]
+crate-type = ["dylib"]
+
+[dependencies]
+# These have to be in sync with each other
- libloading = { version = "0.6.0", optional = true }
++cranelift-codegen = { version = "0.88.1", features = ["unwind", "all-arch"] }
++cranelift-frontend = "0.88.1"
++cranelift-module = "0.88.1"
++cranelift-native = "0.88.1"
++cranelift-jit = { version = "0.88.1", optional = true }
++cranelift-object = "0.88.1"
+target-lexicon = "0.12.0"
+gimli = { version = "0.26.0", default-features = false, features = ["write"]}
+object = { version = "0.29.0", default-features = false, features = ["std", "read_core", "write", "archive", "coff", "elf", "macho", "pe"] }
+
+ar = { git = "https://github.com/bjorn3/rust-ar.git", branch = "do_not_remove_cg_clif_ranlib" }
+indexmap = "1.9.1"
++libloading = { version = "0.7.3", optional = true }
+once_cell = "1.10.0"
+smallvec = "1.8.1"
+
+[patch.crates-io]
+# Uncomment to use local checkout of cranelift
+#cranelift-codegen = { path = "../wasmtime/cranelift/codegen" }
+#cranelift-frontend = { path = "../wasmtime/cranelift/frontend" }
+#cranelift-module = { path = "../wasmtime/cranelift/module" }
+#cranelift-native = { path = "../wasmtime/cranelift/native" }
+#cranelift-jit = { path = "../wasmtime/cranelift/jit" }
+#cranelift-object = { path = "../wasmtime/cranelift/object" }
+
+#gimli = { path = "../" }
+
+[features]
+# Enable features not ready to be enabled when compiling as part of rustc
+unstable-features = ["jit", "inline_asm"]
+jit = ["cranelift-jit", "libloading"]
+inline_asm = []
+
+[package.metadata.rust-analyzer]
+rustc_private = true
--- /dev/null
- version = "0.1.79"
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "addr2line"
+version = "0.16.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3e61f2b7f93d2c7d2b08263acaa4a363b3e276806c68af6134c44f523bf1aacd"
+dependencies = [
+ "compiler_builtins",
+ "gimli",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "adler"
+version = "1.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "alloc"
+version = "0.0.0"
+dependencies = [
+ "compiler_builtins",
+ "core",
+]
+
+[[package]]
+name = "autocfg"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
+
+[[package]]
+name = "cc"
+version = "1.0.73"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2fff2a6927b3bb87f9595d67196a70493f627687a71d87a0d692242c33f58c11"
+
+[[package]]
+name = "cfg-if"
+version = "0.1.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-core",
+]
+
++[[package]]
++name = "cfg-if"
++version = "1.0.0"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
++dependencies = [
++ "compiler_builtins",
++ "rustc-std-workspace-core",
++]
++
+[[package]]
+name = "compiler_builtins"
- checksum = "4f873ce2bd3550b0b565f878b3d04ea8253f4259dc3d20223af2e1ba86f5ecca"
++version = "0.1.82"
+source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.2.5"
++checksum = "18cd7635fea7bb481ea543b392789844c1ad581299da70184c7175ce3af76603"
+dependencies = [
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "core"
+version = "0.0.0"
+
+[[package]]
+name = "dlmalloc"
+version = "0.2.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "203540e710bfadb90e5e29930baf5d10270cec1f43ab34f46f78b147b2de715a"
+dependencies = [
+ "compiler_builtins",
+ "libc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "fortanix-sgx-abi"
+version = "0.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "57cafc2274c10fab234f176b25903ce17e690fca7597090d50880e047a0389c5"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "getopts"
+version = "0.2.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "14dbbfd5c71d70241ecf9e6f13737f7b5ce823821063188d7e46c41d371eebd5"
+dependencies = [
+ "rustc-std-workspace-core",
+ "rustc-std-workspace-std",
+ "unicode-width",
+]
+
+[[package]]
+name = "gimli"
+version = "0.25.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f0a01e0497841a3b2db4f8afa483cce65f7e96a3498bd6c541734792aeac8fe7"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "hashbrown"
+version = "0.12.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "hermit-abi"
- checksum = "897cd85af6387be149f55acf168e41be176a02de7872403aaab184afc2f327e6"
++version = "0.2.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.2.132"
++checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7"
+dependencies = [
+ "compiler_builtins",
+ "libc",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "libc"
- checksum = "8371e4e5341c3a96db127eb2465ac681ced4c433e01dd0e938adbef26ba93ba5"
++version = "0.2.135"
+source = "registry+https://github.com/rust-lang/crates.io-index"
- "cfg-if",
++checksum = "68783febc7782c6c5cb401fbda4de5a9898be1762314da0bb2c10ced61f18b0c"
+dependencies = [
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "memchr"
+version = "2.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "miniz_oxide"
+version = "0.4.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b"
+dependencies = [
+ "adler",
+ "autocfg",
+ "compiler_builtins",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "object"
+version = "0.26.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "39f37e50073ccad23b6d09bcb5b263f4e76d3bb6038e4a3c08e52162ffa8abc2"
+dependencies = [
+ "compiler_builtins",
+ "memchr",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "panic_abort"
+version = "0.0.0"
+dependencies = [
+ "alloc",
- "cfg-if",
++ "cfg-if 0.1.10",
+ "compiler_builtins",
+ "core",
+ "libc",
+]
+
+[[package]]
+name = "panic_unwind"
+version = "0.0.0"
+dependencies = [
+ "alloc",
- "cfg-if",
++ "cfg-if 0.1.10",
+ "compiler_builtins",
+ "core",
+ "libc",
+ "unwind",
+]
+
+[[package]]
+name = "proc_macro"
+version = "0.0.0"
+dependencies = [
+ "core",
+ "std",
+]
+
+[[package]]
+name = "rustc-demangle"
+version = "0.1.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "rustc-std-workspace-alloc"
+version = "1.99.0"
+dependencies = [
+ "alloc",
+]
+
+[[package]]
+name = "rustc-std-workspace-core"
+version = "1.99.0"
+dependencies = [
+ "core",
+]
+
+[[package]]
+name = "rustc-std-workspace-std"
+version = "1.99.0"
+dependencies = [
+ "std",
+]
+
+[[package]]
+name = "std"
+version = "0.0.0"
+dependencies = [
+ "addr2line",
+ "alloc",
- "cfg-if",
++ "cfg-if 1.0.0",
+ "compiler_builtins",
+ "core",
+ "dlmalloc",
+ "fortanix-sgx-abi",
+ "hashbrown",
+ "hermit-abi",
+ "libc",
+ "miniz_oxide",
+ "object",
+ "panic_abort",
+ "panic_unwind",
+ "rustc-demangle",
+ "std_detect",
+ "unwind",
+ "wasi",
+]
+
+[[package]]
+name = "std_detect"
+version = "0.1.5"
+dependencies = [
- "cfg-if",
++ "cfg-if 1.0.0",
+ "compiler_builtins",
+ "libc",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "sysroot"
+version = "0.0.0"
+dependencies = [
+ "alloc",
+ "compiler_builtins",
+ "core",
+ "std",
+ "test",
+]
+
+[[package]]
+name = "test"
+version = "0.0.0"
+dependencies = [
- version = "0.1.9"
++ "cfg-if 0.1.10",
+ "core",
+ "getopts",
+ "libc",
+ "panic_abort",
+ "panic_unwind",
+ "proc_macro",
+ "std",
+]
+
+[[package]]
+name = "unicode-width"
- checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973"
++version = "0.1.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
- "cfg-if",
++checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-core",
+ "rustc-std-workspace-std",
+]
+
+[[package]]
+name = "unwind"
+version = "0.0.0"
+dependencies = [
+ "cc",
++ "cfg-if 0.1.10",
+ "compiler_builtins",
+ "core",
+ "libc",
+]
+
+[[package]]
+name = "wasi"
+version = "0.11.0+wasi-snapshot-preview1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
--- /dev/null
--- /dev/null
++use std::env;
++use std::path::Path;
++
++use super::build_sysroot;
++use super::config;
++use super::prepare;
++use super::utils::{cargo_command, spawn_and_wait};
++use super::SysrootKind;
++
++pub(crate) fn run(
++ channel: &str,
++ sysroot_kind: SysrootKind,
++ target_dir: &Path,
++ cg_clif_dylib: &Path,
++ host_triple: &str,
++ target_triple: &str,
++) {
++ if !config::get_bool("testsuite.abi-cafe") {
++ eprintln!("[SKIP] abi-cafe");
++ return;
++ }
++
++ if host_triple != target_triple {
++ eprintln!("[SKIP] abi-cafe (cross-compilation not supported)");
++ return;
++ }
++
++ eprintln!("Building sysroot for abi-cafe");
++ build_sysroot::build_sysroot(
++ channel,
++ sysroot_kind,
++ target_dir,
++ cg_clif_dylib,
++ host_triple,
++ target_triple,
++ );
++
++ eprintln!("Running abi-cafe");
++ let abi_cafe_path = prepare::ABI_CAFE.source_dir();
++ env::set_current_dir(abi_cafe_path.clone()).unwrap();
++
++ let pairs = ["rustc_calls_cgclif", "cgclif_calls_rustc", "cgclif_calls_cc", "cc_calls_cgclif"];
++
++ let mut cmd = cargo_command("cargo", "run", Some(target_triple), &abi_cafe_path);
++ cmd.arg("--");
++ cmd.arg("--pairs");
++ cmd.args(pairs);
++ cmd.arg("--add-rustc-codegen-backend");
++ cmd.arg(format!("cgclif:{}", cg_clif_dylib.display()));
++
++ spawn_and_wait(cmd);
++}
--- /dev/null
- use std::path::{Path, PathBuf};
- use std::process::Command;
+use std::env;
- use super::utils::is_ci;
++use std::path::PathBuf;
+
- let mut cmd = Command::new("cargo");
- cmd.arg("build").arg("--target").arg(host_triple);
++use super::rustc_info::get_file_name;
++use super::utils::{cargo_command, is_ci};
+
+pub(crate) fn build_backend(
+ channel: &str,
+ host_triple: &str,
+ use_unstable_features: bool,
+) -> PathBuf {
- Path::new("target").join(host_triple).join(channel)
++ let source_dir = std::env::current_dir().unwrap();
++ let mut cmd = cargo_command("cargo", "build", Some(host_triple), &source_dir);
+
+ cmd.env("CARGO_BUILD_INCREMENTAL", "true"); // Force incr comp even in release mode
+
+ let mut rustflags = env::var("RUSTFLAGS").unwrap_or_default();
+
+ if is_ci() {
+ // Deny warnings on CI
+ rustflags += " -Dwarnings";
+
+ // Disabling incr comp reduces cache size and incr comp doesn't save as much on CI anyway
+ cmd.env("CARGO_BUILD_INCREMENTAL", "false");
+ }
+
+ if use_unstable_features {
+ cmd.arg("--features").arg("unstable-features");
+ }
+
+ match channel {
+ "debug" => {}
+ "release" => {
+ cmd.arg("--release");
+ }
+ _ => unreachable!(),
+ }
+
+ cmd.env("RUSTFLAGS", rustflags);
+
+ eprintln!("[BUILD] rustc_codegen_cranelift");
+ super::utils::spawn_and_wait(cmd);
+
++ source_dir
++ .join("target")
++ .join(host_triple)
++ .join(channel)
++ .join(get_file_name("rustc_codegen_cranelift", "dylib"))
+}
--- /dev/null
- use super::utils::{spawn_and_wait, try_hard_link};
+use std::fs;
+use std::path::{Path, PathBuf};
+use std::process::{self, Command};
+
+use super::rustc_info::{get_file_name, get_rustc_version, get_wrapper_file_name};
- cg_clif_build_dir: &Path,
++use super::utils::{cargo_command, spawn_and_wait, try_hard_link};
+use super::SysrootKind;
+
+pub(crate) fn build_sysroot(
+ channel: &str,
+ sysroot_kind: SysrootKind,
+ target_dir: &Path,
- let cg_clif_dylib = get_file_name("rustc_codegen_cranelift", "dylib");
++ cg_clif_dylib_src: &Path,
+ host_triple: &str,
+ target_triple: &str,
+) {
+ eprintln!("[BUILD] sysroot {:?}", sysroot_kind);
+
+ if target_dir.exists() {
+ fs::remove_dir_all(target_dir).unwrap();
+ }
+ fs::create_dir_all(target_dir.join("bin")).unwrap();
+ fs::create_dir_all(target_dir.join("lib")).unwrap();
+
+ // Copy the backend
- .join(&cg_clif_dylib);
- try_hard_link(cg_clif_build_dir.join(cg_clif_dylib), &cg_clif_dylib_path);
+ let cg_clif_dylib_path = target_dir
+ .join(if cfg!(windows) {
+ // Windows doesn't have rpath support, so the cg_clif dylib needs to be next to the
+ // binaries.
+ "bin"
+ } else {
+ "lib"
+ })
- let mut build_cmd = Command::new("cargo");
- build_cmd.arg("build").arg("--target").arg(triple).current_dir("build_sysroot");
++ .join(get_file_name("rustc_codegen_cranelift", "dylib"));
++ try_hard_link(cg_clif_dylib_src, &cg_clif_dylib_path);
+
+ // Build and copy rustc and cargo wrappers
+ for wrapper in ["rustc-clif", "cargo-clif"] {
+ let wrapper_name = get_wrapper_file_name(wrapper, "bin");
+
+ let mut build_cargo_wrapper_cmd = Command::new("rustc");
+ build_cargo_wrapper_cmd
+ .arg(PathBuf::from("scripts").join(format!("{wrapper}.rs")))
+ .arg("-o")
+ .arg(target_dir.join(wrapper_name))
+ .arg("-g");
+ spawn_and_wait(build_cargo_wrapper_cmd);
+ }
+
+ let default_sysroot = super::rustc_info::get_default_sysroot();
+
+ let rustlib = target_dir.join("lib").join("rustlib");
+ let host_rustlib_lib = rustlib.join(host_triple).join("lib");
+ let target_rustlib_lib = rustlib.join(target_triple).join("lib");
+ fs::create_dir_all(&host_rustlib_lib).unwrap();
+ fs::create_dir_all(&target_rustlib_lib).unwrap();
+
+ if target_triple == "x86_64-pc-windows-gnu" {
+ if !default_sysroot.join("lib").join("rustlib").join(target_triple).join("lib").exists() {
+ eprintln!(
+ "The x86_64-pc-windows-gnu target needs to be installed first before it is possible \
+ to compile a sysroot for it.",
+ );
+ process::exit(1);
+ }
+ for file in fs::read_dir(
+ default_sysroot.join("lib").join("rustlib").join(target_triple).join("lib"),
+ )
+ .unwrap()
+ {
+ let file = file.unwrap().path();
+ if file.extension().map_or(true, |ext| ext.to_str().unwrap() != "o") {
+ continue; // only copy object files
+ }
+ try_hard_link(&file, target_rustlib_lib.join(file.file_name().unwrap()));
+ }
+ }
+
+ match sysroot_kind {
+ SysrootKind::None => {} // Nothing to do
+ SysrootKind::Llvm => {
+ for file in fs::read_dir(
+ default_sysroot.join("lib").join("rustlib").join(host_triple).join("lib"),
+ )
+ .unwrap()
+ {
+ let file = file.unwrap().path();
+ let file_name_str = file.file_name().unwrap().to_str().unwrap();
+ if (file_name_str.contains("rustc_")
+ && !file_name_str.contains("rustc_std_workspace_")
+ && !file_name_str.contains("rustc_demangle"))
+ || file_name_str.contains("chalk")
+ || file_name_str.contains("tracing")
+ || file_name_str.contains("regex")
+ {
+ // These are large crates that are part of the rustc-dev component and are not
+ // necessary to run regular programs.
+ continue;
+ }
+ try_hard_link(&file, host_rustlib_lib.join(file.file_name().unwrap()));
+ }
+
+ if target_triple != host_triple {
+ for file in fs::read_dir(
+ default_sysroot.join("lib").join("rustlib").join(target_triple).join("lib"),
+ )
+ .unwrap()
+ {
+ let file = file.unwrap().path();
+ try_hard_link(&file, target_rustlib_lib.join(file.file_name().unwrap()));
+ }
+ }
+ }
+ SysrootKind::Clif => {
+ build_clif_sysroot_for_triple(
+ channel,
+ target_dir,
+ host_triple,
+ &cg_clif_dylib_path,
+ None,
+ );
+
+ if host_triple != target_triple {
+ // When cross-compiling it is often necessary to manually pick the right linker
+ let linker = if target_triple == "aarch64-unknown-linux-gnu" {
+ Some("aarch64-linux-gnu-gcc")
+ } else {
+ None
+ };
+ build_clif_sysroot_for_triple(
+ channel,
+ target_dir,
+ target_triple,
+ &cg_clif_dylib_path,
+ linker,
+ );
+ }
+
+ // Copy std for the host to the lib dir. This is necessary for the jit mode to find
+ // libstd.
+ for file in fs::read_dir(host_rustlib_lib).unwrap() {
+ let file = file.unwrap().path();
+ let filename = file.file_name().unwrap().to_str().unwrap();
+ if filename.contains("std-") && !filename.contains(".rlib") {
+ try_hard_link(&file, target_dir.join("lib").join(file.file_name().unwrap()));
+ }
+ }
+ }
+ }
+}
+
+fn build_clif_sysroot_for_triple(
+ channel: &str,
+ target_dir: &Path,
+ triple: &str,
+ cg_clif_dylib_path: &Path,
+ linker: Option<&str>,
+) {
+ match fs::read_to_string(Path::new("build_sysroot").join("rustc_version")) {
+ Err(e) => {
+ eprintln!("Failed to get rustc version for patched sysroot source: {}", e);
+ eprintln!("Hint: Try `./y.rs prepare` to patch the sysroot source");
+ process::exit(1);
+ }
+ Ok(source_version) => {
+ let rustc_version = get_rustc_version();
+ if source_version != rustc_version {
+ eprintln!("The patched sysroot source is outdated");
+ eprintln!("Source version: {}", source_version.trim());
+ eprintln!("Rustc version: {}", rustc_version.trim());
+ eprintln!("Hint: Try `./y.rs prepare` to update the patched sysroot source");
+ process::exit(1);
+ }
+ }
+ }
+
+ let build_dir = Path::new("build_sysroot").join("target").join(triple).join(channel);
+
+ if !super::config::get_bool("keep_sysroot") {
+ // Cleanup the deps dir, but keep build scripts and the incremental cache for faster
+ // recompilation as they are not affected by changes in cg_clif.
+ if build_dir.join("deps").exists() {
+ fs::remove_dir_all(build_dir.join("deps")).unwrap();
+ }
+ }
+
+ // Build sysroot
++ let mut build_cmd = cargo_command("cargo", "build", Some(triple), Path::new("build_sysroot"));
+ let mut rustflags = "-Zforce-unstable-if-unmarked -Cpanic=abort".to_string();
+ rustflags.push_str(&format!(" -Zcodegen-backend={}", cg_clif_dylib_path.to_str().unwrap()));
++ rustflags.push_str(&format!(" --sysroot={}", target_dir.to_str().unwrap()));
+ if channel == "release" {
+ build_cmd.arg("--release");
+ rustflags.push_str(" -Zmir-opt-level=3");
+ }
+ if let Some(linker) = linker {
+ use std::fmt::Write;
+ write!(rustflags, " -Clinker={}", linker).unwrap();
+ }
+ build_cmd.env("RUSTFLAGS", rustflags);
+ build_cmd.env("__CARGO_DEFAULT_LIB_METADATA", "cg_clif");
+ spawn_and_wait(build_cmd);
+
+ // Copy all relevant files to the sysroot
+ for entry in
+ fs::read_dir(Path::new("build_sysroot/target").join(triple).join(channel).join("deps"))
+ .unwrap()
+ {
+ let entry = entry.unwrap();
+ if let Some(ext) = entry.path().extension() {
+ if ext == "rmeta" || ext == "d" || ext == "dSYM" || ext == "clif" {
+ continue;
+ }
+ } else {
+ continue;
+ };
+ try_hard_link(
+ entry.path(),
+ target_dir.join("lib").join("rustlib").join(triple).join("lib").join(entry.file_name()),
+ );
+ }
+}
--- /dev/null
- use std::{fs, process};
++use std::fs;
++use std::process;
+
+fn load_config_file() -> Vec<(String, Option<String>)> {
+ fs::read_to_string("config.txt")
+ .unwrap()
+ .lines()
+ .map(|line| if let Some((line, _comment)) = line.split_once('#') { line } else { line })
+ .map(|line| line.trim())
+ .filter(|line| !line.is_empty())
+ .map(|line| {
+ if let Some((key, val)) = line.split_once('=') {
+ (key.trim().to_owned(), Some(val.trim().to_owned()))
+ } else {
+ (line.to_owned(), None)
+ }
+ })
+ .collect()
+}
+
+pub(crate) fn get_bool(name: &str) -> bool {
+ let values = load_config_file()
+ .into_iter()
+ .filter(|(key, _)| key == name)
+ .map(|(_, val)| val)
+ .collect::<Vec<_>>();
+ if values.is_empty() {
+ false
+ } else {
+ if values.iter().any(|val| val.is_some()) {
+ eprintln!("Boolean config `{}` has a value", name);
+ process::exit(1);
+ }
+ true
+ }
+}
+
+pub(crate) fn get_value(name: &str) -> Option<String> {
+ let values = load_config_file()
+ .into_iter()
+ .filter(|(key, _)| key == name)
+ .map(|(_, val)| val)
+ .collect::<Vec<_>>();
+ if values.is_empty() {
+ None
+ } else if values.len() == 1 {
+ if values[0].is_none() {
+ eprintln!("Config `{}` missing value", name);
+ process::exit(1);
+ }
+ values.into_iter().next().unwrap()
+ } else {
+ eprintln!("Config `{}` given multiple values: {:?}", name, values);
+ process::exit(1);
+ }
+}
--- /dev/null
- mod abi_checker;
+use std::env;
+use std::path::PathBuf;
+use std::process;
+
+use self::utils::is_ci;
+
- if target_triple.ends_with("-msvc") {
- eprintln!("The MSVC toolchain is not yet supported by rustc_codegen_cranelift.");
- eprintln!("Switch to the MinGW toolchain for Windows support.");
- eprintln!("Hint: You can use `rustup set default-host x86_64-pc-windows-gnu` to");
- eprintln!("set the global default target to MinGW");
- process::exit(1);
- }
-
- let cg_clif_build_dir =
- build_backend::build_backend(channel, &host_triple, use_unstable_features);
++mod abi_cafe;
+mod build_backend;
+mod build_sysroot;
+mod config;
+mod prepare;
+mod rustc_info;
+mod tests;
+mod utils;
+
+fn usage() {
+ eprintln!("Usage:");
+ eprintln!(" ./y.rs prepare");
+ eprintln!(
+ " ./y.rs build [--debug] [--sysroot none|clif|llvm] [--target-dir DIR] [--no-unstable-features]"
+ );
+ eprintln!(
+ " ./y.rs test [--debug] [--sysroot none|clif|llvm] [--target-dir DIR] [--no-unstable-features]"
+ );
+}
+
+macro_rules! arg_error {
+ ($($err:tt)*) => {{
+ eprintln!($($err)*);
+ usage();
+ std::process::exit(1);
+ }};
+}
+
+#[derive(PartialEq, Debug)]
+enum Command {
+ Build,
+ Test,
+}
+
+#[derive(Copy, Clone, Debug)]
+pub(crate) enum SysrootKind {
+ None,
+ Clif,
+ Llvm,
+}
+
+pub fn main() {
+ env::set_var("CG_CLIF_DISPLAY_CG_TIME", "1");
+ env::set_var("CG_CLIF_DISABLE_INCR_CACHE", "1");
+ // The target dir is expected in the default location. Guard against the user changing it.
+ env::set_var("CARGO_TARGET_DIR", "target");
+
+ if is_ci() {
+ // Disabling incr comp reduces cache size and incr comp doesn't save as much on CI anyway
+ env::set_var("CARGO_BUILD_INCREMENTAL", "false");
+ }
+
+ let mut args = env::args().skip(1);
+ let command = match args.next().as_deref() {
+ Some("prepare") => {
+ if args.next().is_some() {
+ arg_error!("./y.rs prepare doesn't expect arguments");
+ }
+ prepare::prepare();
+ process::exit(0);
+ }
+ Some("build") => Command::Build,
+ Some("test") => Command::Test,
+ Some(flag) if flag.starts_with('-') => arg_error!("Expected command found flag {}", flag),
+ Some(command) => arg_error!("Unknown command {}", command),
+ None => {
+ usage();
+ process::exit(0);
+ }
+ };
+
+ let mut target_dir = PathBuf::from("build");
+ let mut channel = "release";
+ let mut sysroot_kind = SysrootKind::Clif;
+ let mut use_unstable_features = true;
+ while let Some(arg) = args.next().as_deref() {
+ match arg {
+ "--target-dir" => {
+ target_dir = PathBuf::from(args.next().unwrap_or_else(|| {
+ arg_error!("--target-dir requires argument");
+ }))
+ }
+ "--debug" => channel = "debug",
+ "--sysroot" => {
+ sysroot_kind = match args.next().as_deref() {
+ Some("none") => SysrootKind::None,
+ Some("clif") => SysrootKind::Clif,
+ Some("llvm") => SysrootKind::Llvm,
+ Some(arg) => arg_error!("Unknown sysroot kind {}", arg),
+ None => arg_error!("--sysroot requires argument"),
+ }
+ }
+ "--no-unstable-features" => use_unstable_features = false,
+ flag if flag.starts_with("-") => arg_error!("Unknown flag {}", flag),
+ arg => arg_error!("Unexpected argument {}", arg),
+ }
+ }
+ target_dir = std::env::current_dir().unwrap().join(target_dir);
+
+ let host_triple = if let Ok(host_triple) = std::env::var("HOST_TRIPLE") {
+ host_triple
+ } else if let Some(host_triple) = config::get_value("host") {
+ host_triple
+ } else {
+ rustc_info::get_host_triple()
+ };
+ let target_triple = if let Ok(target_triple) = std::env::var("TARGET_TRIPLE") {
+ if target_triple != "" {
+ target_triple
+ } else {
+ host_triple.clone() // Empty target triple can happen on GHA
+ }
+ } else if let Some(target_triple) = config::get_value("target") {
+ target_triple
+ } else {
+ host_triple.clone()
+ };
+
- &cg_clif_build_dir,
++ let cg_clif_dylib = build_backend::build_backend(channel, &host_triple, use_unstable_features);
+ match command {
+ Command::Test => {
+ tests::run_tests(
+ channel,
+ sysroot_kind,
+ &target_dir,
- abi_checker::run(
++ &cg_clif_dylib,
+ &host_triple,
+ &target_triple,
+ );
+
- &cg_clif_build_dir,
++ abi_cafe::run(
+ channel,
+ sysroot_kind,
+ &target_dir,
- &cg_clif_build_dir,
++ &cg_clif_dylib,
+ &host_triple,
+ &target_triple,
+ );
+ }
+ Command::Build => {
+ build_sysroot::build_sysroot(
+ channel,
+ sysroot_kind,
+ &target_dir,
++ &cg_clif_dylib,
+ &host_triple,
+ &target_triple,
+ );
+ }
+ }
+}
--- /dev/null
- use std::ffi::OsString;
+use std::env;
+use std::ffi::OsStr;
- use std::path::Path;
+use std::fs;
- use super::utils::{copy_dir_recursively, spawn_and_wait};
++use std::path::{Path, PathBuf};
+use std::process::Command;
+
+use super::rustc_info::{get_file_name, get_rustc_path, get_rustc_version};
- clone_repo_shallow_github(
- "abi-checker",
- "Gankra",
- "abi-checker",
- "a2232d45f202846f5c02203c9f27355360f9a2ff",
- );
- apply_patches("abi-checker", Path::new("abi-checker"));
-
- clone_repo_shallow_github(
- "rand",
- "rust-random",
- "rand",
- "0f933f9c7176e53b2a3c7952ded484e1783f0bf1",
- );
- apply_patches("rand", Path::new("rand"));
-
- clone_repo_shallow_github(
- "regex",
- "rust-lang",
- "regex",
- "341f207c1071f7290e3f228c710817c280c8dca1",
- );
-
- clone_repo_shallow_github(
- "portable-simd",
- "rust-lang",
- "portable-simd",
- "b8d6b6844602f80af79cd96401339ec594d472d8",
- );
- apply_patches("portable-simd", Path::new("portable-simd"));
-
- clone_repo_shallow_github(
- "simple-raytracer",
- "ebobby",
- "simple-raytracer",
- "804a7a21b9e673a482797aa289a18ed480e4d813",
- );
++use super::utils::{cargo_command, copy_dir_recursively, spawn_and_wait};
++
++pub(crate) const ABI_CAFE: GitRepo = GitRepo::github(
++ "Gankra",
++ "abi-cafe",
++ "4c6dc8c9c687e2b3a760ff2176ce236872b37212",
++ "abi-cafe",
++);
++
++pub(crate) const RAND: GitRepo =
++ GitRepo::github("rust-random", "rand", "0f933f9c7176e53b2a3c7952ded484e1783f0bf1", "rand");
++
++pub(crate) const REGEX: GitRepo =
++ GitRepo::github("rust-lang", "regex", "341f207c1071f7290e3f228c710817c280c8dca1", "regex");
++
++pub(crate) const PORTABLE_SIMD: GitRepo = GitRepo::github(
++ "rust-lang",
++ "portable-simd",
++ "d5cd4a8112d958bd3a252327e0d069a6363249bd",
++ "portable-simd",
++);
++
++pub(crate) const SIMPLE_RAYTRACER: GitRepo = GitRepo::github(
++ "ebobby",
++ "simple-raytracer",
++ "804a7a21b9e673a482797aa289a18ed480e4d813",
++ "<none>",
++);
+
+pub(crate) fn prepare() {
++ if Path::new("download").exists() {
++ std::fs::remove_dir_all(Path::new("download")).unwrap();
++ }
++ std::fs::create_dir_all(Path::new("download")).unwrap();
++
+ prepare_sysroot();
+
++ // FIXME maybe install this only locally?
+ eprintln!("[INSTALL] hyperfine");
+ Command::new("cargo").arg("install").arg("hyperfine").spawn().unwrap().wait().unwrap();
+
- let mut build_cmd = Command::new("cargo");
- build_cmd.arg("build").env_remove("CARGO_TARGET_DIR").current_dir("simple-raytracer");
++ ABI_CAFE.fetch();
++ RAND.fetch();
++ REGEX.fetch();
++ PORTABLE_SIMD.fetch();
++ SIMPLE_RAYTRACER.fetch();
+
+ eprintln!("[LLVM BUILD] simple-raytracer");
- Path::new("simple-raytracer/target/debug").join(get_file_name("main", "bin")),
- Path::new("simple-raytracer").join(get_file_name("raytracer_cg_llvm", "bin")),
++ let build_cmd = cargo_command("cargo", "build", None, &SIMPLE_RAYTRACER.source_dir());
+ spawn_and_wait(build_cmd);
+ fs::copy(
- fn clone_repo(target_dir: &str, repo: &str, rev: &str) {
++ SIMPLE_RAYTRACER
++ .source_dir()
++ .join("target")
++ .join("debug")
++ .join(get_file_name("main", "bin")),
++ SIMPLE_RAYTRACER.source_dir().join(get_file_name("raytracer_cg_llvm", "bin")),
+ )
+ .unwrap();
+}
+
+fn prepare_sysroot() {
+ let rustc_path = get_rustc_path();
+ let sysroot_src_orig = rustc_path.parent().unwrap().join("../lib/rustlib/src/rust");
+ let sysroot_src = env::current_dir().unwrap().join("build_sysroot").join("sysroot_src");
+
+ assert!(sysroot_src_orig.exists());
+
+ if sysroot_src.exists() {
+ fs::remove_dir_all(&sysroot_src).unwrap();
+ }
+ fs::create_dir_all(sysroot_src.join("library")).unwrap();
+ eprintln!("[COPY] sysroot src");
+ copy_dir_recursively(&sysroot_src_orig.join("library"), &sysroot_src.join("library"));
+
+ let rustc_version = get_rustc_version();
+ fs::write(Path::new("build_sysroot").join("rustc_version"), &rustc_version).unwrap();
+
+ eprintln!("[GIT] init");
+ let mut git_init_cmd = Command::new("git");
+ git_init_cmd.arg("init").arg("-q").current_dir(&sysroot_src);
+ spawn_and_wait(git_init_cmd);
+
+ init_git_repo(&sysroot_src);
+
+ apply_patches("sysroot", &sysroot_src);
+}
+
++pub(crate) struct GitRepo {
++ url: GitRepoUrl,
++ rev: &'static str,
++ patch_name: &'static str,
++}
++
++enum GitRepoUrl {
++ Github { user: &'static str, repo: &'static str },
++}
++
++impl GitRepo {
++ const fn github(
++ user: &'static str,
++ repo: &'static str,
++ rev: &'static str,
++ patch_name: &'static str,
++ ) -> GitRepo {
++ GitRepo { url: GitRepoUrl::Github { user, repo }, rev, patch_name }
++ }
++
++ pub(crate) fn source_dir(&self) -> PathBuf {
++ match self.url {
++ GitRepoUrl::Github { user: _, repo } => {
++ std::env::current_dir().unwrap().join("download").join(repo)
++ }
++ }
++ }
++
++ fn fetch(&self) {
++ match self.url {
++ GitRepoUrl::Github { user, repo } => {
++ clone_repo_shallow_github(&self.source_dir(), user, repo, self.rev);
++ }
++ }
++ apply_patches(self.patch_name, &self.source_dir());
++ }
++}
++
+#[allow(dead_code)]
- Command::new("git").arg("clone").arg(repo).arg(target_dir).spawn().unwrap().wait().unwrap();
++fn clone_repo(download_dir: &Path, repo: &str, rev: &str) {
+ eprintln!("[CLONE] {}", repo);
+ // Ignore exit code as the repo may already have been checked out
- clean_cmd.arg("checkout").arg("--").arg(".").current_dir(target_dir);
++ Command::new("git").arg("clone").arg(repo).arg(&download_dir).spawn().unwrap().wait().unwrap();
+
+ let mut clean_cmd = Command::new("git");
- checkout_cmd.arg("checkout").arg("-q").arg(rev).current_dir(target_dir);
++ clean_cmd.arg("checkout").arg("--").arg(".").current_dir(&download_dir);
+ spawn_and_wait(clean_cmd);
+
+ let mut checkout_cmd = Command::new("git");
- fn clone_repo_shallow_github(target_dir: &str, username: &str, repo: &str, rev: &str) {
++ checkout_cmd.arg("checkout").arg("-q").arg(rev).current_dir(download_dir);
+ spawn_and_wait(checkout_cmd);
+}
+
- clone_repo(target_dir, &format!("https://github.com/{}/{}.git", username, repo), rev);
++fn clone_repo_shallow_github(download_dir: &Path, user: &str, repo: &str, rev: &str) {
+ if cfg!(windows) {
+ // Older windows doesn't have tar or curl by default. Fall back to using git.
- let archive_url = format!("https://github.com/{}/{}/archive/{}.tar.gz", username, repo, rev);
- let archive_file = format!("{}.tar.gz", rev);
- let archive_dir = format!("{}-{}", repo, rev);
++ clone_repo(download_dir, &format!("https://github.com/{}/{}.git", user, repo), rev);
+ return;
+ }
+
- eprintln!("[DOWNLOAD] {}/{} from {}", username, repo, archive_url);
++ let downloads_dir = std::env::current_dir().unwrap().join("download");
+
- let _ = std::fs::remove_dir_all(target_dir);
++ let archive_url = format!("https://github.com/{}/{}/archive/{}.tar.gz", user, repo, rev);
++ let archive_file = downloads_dir.join(format!("{}.tar.gz", rev));
++ let archive_dir = downloads_dir.join(format!("{}-{}", repo, rev));
++
++ eprintln!("[DOWNLOAD] {}/{} from {}", user, repo, archive_url);
+
+ // Remove previous results if they exists
+ let _ = std::fs::remove_file(&archive_file);
+ let _ = std::fs::remove_dir_all(&archive_dir);
- unpack_cmd.arg("xf").arg(&archive_file);
++ let _ = std::fs::remove_dir_all(&download_dir);
+
+ // Download zip archive
+ let mut download_cmd = Command::new("curl");
+ download_cmd.arg("--location").arg("--output").arg(&archive_file).arg(archive_url);
+ spawn_and_wait(download_cmd);
+
+ // Unpack tar archive
+ let mut unpack_cmd = Command::new("tar");
- std::fs::rename(archive_dir, target_dir).unwrap();
++ unpack_cmd.arg("xf").arg(&archive_file).current_dir(downloads_dir);
+ spawn_and_wait(unpack_cmd);
+
+ // Rename unpacked dir to the expected name
- init_git_repo(Path::new(target_dir));
++ std::fs::rename(archive_dir, &download_dir).unwrap();
+
- fn get_patches(crate_name: &str) -> Vec<OsString> {
- let mut patches: Vec<_> = fs::read_dir("patches")
++ init_git_repo(&download_dir);
+
+ // Cleanup
+ std::fs::remove_file(archive_file).unwrap();
+}
+
+fn init_git_repo(repo_dir: &Path) {
+ let mut git_init_cmd = Command::new("git");
+ git_init_cmd.arg("init").arg("-q").current_dir(repo_dir);
+ spawn_and_wait(git_init_cmd);
+
+ let mut git_add_cmd = Command::new("git");
+ git_add_cmd.arg("add").arg(".").current_dir(repo_dir);
+ spawn_and_wait(git_add_cmd);
+
+ let mut git_commit_cmd = Command::new("git");
+ git_commit_cmd.arg("commit").arg("-m").arg("Initial commit").arg("-q").current_dir(repo_dir);
+ spawn_and_wait(git_commit_cmd);
+}
+
- .map(|path| path.file_name().unwrap().to_owned())
- .filter(|file_name| {
- file_name.to_str().unwrap().split_once("-").unwrap().1.starts_with(crate_name)
++fn get_patches(source_dir: &Path, crate_name: &str) -> Vec<PathBuf> {
++ let mut patches: Vec<_> = fs::read_dir(source_dir.join("patches"))
+ .unwrap()
+ .map(|entry| entry.unwrap().path())
+ .filter(|path| path.extension() == Some(OsStr::new("patch")))
- for patch in get_patches(crate_name) {
- eprintln!("[PATCH] {:?} <- {:?}", target_dir.file_name().unwrap(), patch);
- let patch_arg = env::current_dir().unwrap().join("patches").join(patch);
++ .filter(|path| {
++ path.file_name()
++ .unwrap()
++ .to_str()
++ .unwrap()
++ .split_once("-")
++ .unwrap()
++ .1
++ .starts_with(crate_name)
+ })
+ .collect();
+ patches.sort();
+ patches
+}
+
+fn apply_patches(crate_name: &str, target_dir: &Path) {
- apply_patch_cmd.arg("am").arg(patch_arg).arg("-q").current_dir(target_dir);
++ if crate_name == "<none>" {
++ return;
++ }
++
++ for patch in get_patches(&std::env::current_dir().unwrap(), crate_name) {
++ eprintln!(
++ "[PATCH] {:?} <- {:?}",
++ target_dir.file_name().unwrap(),
++ patch.file_name().unwrap()
++ );
+ let mut apply_patch_cmd = Command::new("git");
++ apply_patch_cmd.arg("am").arg(patch).arg("-q").current_dir(target_dir);
+ spawn_and_wait(apply_patch_cmd);
+ }
+}
--- /dev/null
- use super::utils::{spawn_and_wait, spawn_and_wait_with_input};
+use super::build_sysroot;
+use super::config;
++use super::prepare;
+use super::rustc_info::get_wrapper_file_name;
- runner.in_dir(["rand"], |runner| {
- runner.run_cargo(["clean"]);
++use super::utils::{cargo_command, hyperfine_command, spawn_and_wait, spawn_and_wait_with_input};
+use build_system::SysrootKind;
+use std::env;
+use std::ffi::OsStr;
+use std::fs;
+use std::path::{Path, PathBuf};
+use std::process::Command;
+
+struct TestCase {
+ config: &'static str,
+ func: &'static dyn Fn(&TestRunner),
+}
+
+impl TestCase {
+ const fn new(config: &'static str, func: &'static dyn Fn(&TestRunner)) -> Self {
+ Self { config, func }
+ }
+}
+
+const NO_SYSROOT_SUITE: &[TestCase] = &[
+ TestCase::new("build.mini_core", &|runner| {
+ runner.run_rustc([
+ "example/mini_core.rs",
+ "--crate-name",
+ "mini_core",
+ "--crate-type",
+ "lib,dylib",
+ "--target",
+ &runner.target_triple,
+ ]);
+ }),
+ TestCase::new("build.example", &|runner| {
+ runner.run_rustc([
+ "example/example.rs",
+ "--crate-type",
+ "lib",
+ "--target",
+ &runner.target_triple,
+ ]);
+ }),
+ TestCase::new("jit.mini_core_hello_world", &|runner| {
+ let mut jit_cmd = runner.rustc_command([
+ "-Zunstable-options",
+ "-Cllvm-args=mode=jit",
+ "-Cprefer-dynamic",
+ "example/mini_core_hello_world.rs",
+ "--cfg",
+ "jit",
+ "--target",
+ &runner.host_triple,
+ ]);
+ jit_cmd.env("CG_CLIF_JIT_ARGS", "abc bcd");
+ spawn_and_wait(jit_cmd);
+
+ eprintln!("[JIT-lazy] mini_core_hello_world");
+ let mut jit_cmd = runner.rustc_command([
+ "-Zunstable-options",
+ "-Cllvm-args=mode=jit-lazy",
+ "-Cprefer-dynamic",
+ "example/mini_core_hello_world.rs",
+ "--cfg",
+ "jit",
+ "--target",
+ &runner.host_triple,
+ ]);
+ jit_cmd.env("CG_CLIF_JIT_ARGS", "abc bcd");
+ spawn_and_wait(jit_cmd);
+ }),
+ TestCase::new("aot.mini_core_hello_world", &|runner| {
+ runner.run_rustc([
+ "example/mini_core_hello_world.rs",
+ "--crate-name",
+ "mini_core_hello_world",
+ "--crate-type",
+ "bin",
+ "-g",
+ "--target",
+ &runner.target_triple,
+ ]);
+ runner.run_out_command("mini_core_hello_world", ["abc", "bcd"]);
+ }),
+];
+
+const BASE_SYSROOT_SUITE: &[TestCase] = &[
+ TestCase::new("aot.arbitrary_self_types_pointers_and_wrappers", &|runner| {
+ runner.run_rustc([
+ "example/arbitrary_self_types_pointers_and_wrappers.rs",
+ "--crate-name",
+ "arbitrary_self_types_pointers_and_wrappers",
+ "--crate-type",
+ "bin",
+ "--target",
+ &runner.target_triple,
+ ]);
+ runner.run_out_command("arbitrary_self_types_pointers_and_wrappers", []);
+ }),
+ TestCase::new("aot.issue_91827_extern_types", &|runner| {
+ runner.run_rustc([
+ "example/issue-91827-extern-types.rs",
+ "--crate-name",
+ "issue_91827_extern_types",
+ "--crate-type",
+ "bin",
+ "--target",
+ &runner.target_triple,
+ ]);
+ runner.run_out_command("issue_91827_extern_types", []);
+ }),
+ TestCase::new("build.alloc_system", &|runner| {
+ runner.run_rustc([
+ "example/alloc_system.rs",
+ "--crate-type",
+ "lib",
+ "--target",
+ &runner.target_triple,
+ ]);
+ }),
+ TestCase::new("aot.alloc_example", &|runner| {
+ runner.run_rustc([
+ "example/alloc_example.rs",
+ "--crate-type",
+ "bin",
+ "--target",
+ &runner.target_triple,
+ ]);
+ runner.run_out_command("alloc_example", []);
+ }),
+ TestCase::new("jit.std_example", &|runner| {
+ runner.run_rustc([
+ "-Zunstable-options",
+ "-Cllvm-args=mode=jit",
+ "-Cprefer-dynamic",
+ "example/std_example.rs",
+ "--target",
+ &runner.host_triple,
+ ]);
+
+ eprintln!("[JIT-lazy] std_example");
+ runner.run_rustc([
+ "-Zunstable-options",
+ "-Cllvm-args=mode=jit-lazy",
+ "-Cprefer-dynamic",
+ "example/std_example.rs",
+ "--target",
+ &runner.host_triple,
+ ]);
+ }),
+ TestCase::new("aot.std_example", &|runner| {
+ runner.run_rustc([
+ "example/std_example.rs",
+ "--crate-type",
+ "bin",
+ "--target",
+ &runner.target_triple,
+ ]);
+ runner.run_out_command("std_example", ["arg"]);
+ }),
+ TestCase::new("aot.dst_field_align", &|runner| {
+ runner.run_rustc([
+ "example/dst-field-align.rs",
+ "--crate-name",
+ "dst_field_align",
+ "--crate-type",
+ "bin",
+ "--target",
+ &runner.target_triple,
+ ]);
+ runner.run_out_command("dst_field_align", []);
+ }),
+ TestCase::new("aot.subslice-patterns-const-eval", &|runner| {
+ runner.run_rustc([
+ "example/subslice-patterns-const-eval.rs",
+ "--crate-type",
+ "bin",
+ "-Cpanic=abort",
+ "--target",
+ &runner.target_triple,
+ ]);
+ runner.run_out_command("subslice-patterns-const-eval", []);
+ }),
+ TestCase::new("aot.track-caller-attribute", &|runner| {
+ runner.run_rustc([
+ "example/track-caller-attribute.rs",
+ "--crate-type",
+ "bin",
+ "-Cpanic=abort",
+ "--target",
+ &runner.target_triple,
+ ]);
+ runner.run_out_command("track-caller-attribute", []);
+ }),
+ TestCase::new("aot.float-minmax-pass", &|runner| {
+ runner.run_rustc([
+ "example/float-minmax-pass.rs",
+ "--crate-type",
+ "bin",
+ "-Cpanic=abort",
+ "--target",
+ &runner.target_triple,
+ ]);
+ runner.run_out_command("float-minmax-pass", []);
+ }),
+ TestCase::new("aot.mod_bench", &|runner| {
+ runner.run_rustc([
+ "example/mod_bench.rs",
+ "--crate-type",
+ "bin",
+ "--target",
+ &runner.target_triple,
+ ]);
+ runner.run_out_command("mod_bench", []);
+ }),
+];
+
+const EXTENDED_SYSROOT_SUITE: &[TestCase] = &[
+ TestCase::new("test.rust-random/rand", &|runner| {
- runner.run_cargo(["test", "--workspace"]);
++ runner.in_dir(prepare::RAND.source_dir(), |runner| {
++ runner.run_cargo("clean", []);
+
+ if runner.host_triple == runner.target_triple {
+ eprintln!("[TEST] rust-random/rand");
- runner.run_cargo([
- "build",
- "--workspace",
- "--target",
- &runner.target_triple,
- "--tests",
- ]);
++ runner.run_cargo("test", ["--workspace"]);
+ } else {
+ eprintln!("[AOT] rust-random/rand");
- runner.in_dir(["simple-raytracer"], |runner| {
- let run_runs = env::var("RUN_RUNS").unwrap_or("10".to_string());
++ runner.run_cargo("build", ["--workspace", "--tests"]);
+ }
+ });
+ }),
+ TestCase::new("bench.simple-raytracer", &|runner| {
- let mut bench_compile = Command::new("hyperfine");
- bench_compile.arg("--runs");
- bench_compile.arg(&run_runs);
- bench_compile.arg("--warmup");
- bench_compile.arg("1");
- bench_compile.arg("--prepare");
- bench_compile.arg(format!("{:?}", runner.cargo_command(["clean"])));
-
- if cfg!(windows) {
- bench_compile.arg("cmd /C \"set RUSTFLAGS= && cargo build\"");
- } else {
- bench_compile.arg("RUSTFLAGS='' cargo build");
- }
++ runner.in_dir(prepare::SIMPLE_RAYTRACER.source_dir(), |runner| {
++ let run_runs = env::var("RUN_RUNS").unwrap_or("10".to_string()).parse().unwrap();
+
+ if runner.host_triple == runner.target_triple {
+ eprintln!("[BENCH COMPILE] ebobby/simple-raytracer");
- bench_compile.arg(format!("{:?}", runner.cargo_command(["build"])));
++ let prepare = runner.cargo_command("clean", []);
++
++ let llvm_build_cmd = cargo_command("cargo", "build", None, Path::new("."));
++
++ let cargo_clif = runner
++ .root_dir
++ .clone()
++ .join("build")
++ .join(get_wrapper_file_name("cargo-clif", "bin"));
++ let clif_build_cmd = cargo_command(cargo_clif, "build", None, Path::new("."));
++
++ let bench_compile =
++ hyperfine_command(1, run_runs, Some(prepare), llvm_build_cmd, clif_build_cmd);
+
- let mut bench_run = Command::new("hyperfine");
- bench_run.arg("--runs");
- bench_run.arg(&run_runs);
- bench_run.arg(PathBuf::from("./raytracer_cg_llvm"));
- bench_run.arg(PathBuf::from("./raytracer_cg_clif"));
+ spawn_and_wait(bench_compile);
+
+ eprintln!("[BENCH RUN] ebobby/simple-raytracer");
+ fs::copy(PathBuf::from("./target/debug/main"), PathBuf::from("raytracer_cg_clif"))
+ .unwrap();
+
- runner.run_cargo(["clean"]);
++ let bench_run = hyperfine_command(
++ 0,
++ run_runs,
++ None,
++ Command::new("./raytracer_cg_llvm"),
++ Command::new("./raytracer_cg_clif"),
++ );
+ spawn_and_wait(bench_run);
+ } else {
- runner.run_cargo(["build", "--target", &runner.target_triple]);
++ runner.run_cargo("clean", []);
+ eprintln!("[BENCH COMPILE] ebobby/simple-raytracer (skipped)");
+ eprintln!("[COMPILE] ebobby/simple-raytracer");
- runner.in_dir(["build_sysroot", "sysroot_src", "library", "core", "tests"], |runner| {
- runner.run_cargo(["clean"]);
-
- if runner.host_triple == runner.target_triple {
- runner.run_cargo(["test"]);
- } else {
- eprintln!("Cross-Compiling: Not running tests");
- runner.run_cargo(["build", "--target", &runner.target_triple, "--tests"]);
- }
- });
++ runner.run_cargo("build", []);
+ eprintln!("[BENCH RUN] ebobby/simple-raytracer (skipped)");
+ }
+ });
+ }),
+ TestCase::new("test.libcore", &|runner| {
- runner.in_dir(["regex"], |runner| {
- runner.run_cargo(["clean"]);
++ runner.in_dir(
++ std::env::current_dir()
++ .unwrap()
++ .join("build_sysroot")
++ .join("sysroot_src")
++ .join("library")
++ .join("core")
++ .join("tests"),
++ |runner| {
++ runner.run_cargo("clean", []);
++
++ if runner.host_triple == runner.target_triple {
++ runner.run_cargo("test", []);
++ } else {
++ eprintln!("Cross-Compiling: Not running tests");
++ runner.run_cargo("build", ["--tests"]);
++ }
++ },
++ );
+ }),
+ TestCase::new("test.regex-shootout-regex-dna", &|runner| {
- let mut build_cmd = runner.cargo_command([
- "build",
- "--example",
- "shootout-regex-dna",
- "--target",
- &runner.target_triple,
- ]);
++ runner.in_dir(prepare::REGEX.source_dir(), |runner| {
++ runner.run_cargo("clean", []);
+
+ // newer aho_corasick versions throw a deprecation warning
+ let lint_rust_flags = format!("{} --cap-lints warn", runner.rust_flags);
+
- let mut run_cmd = runner.cargo_command([
- "run",
- "--example",
- "shootout-regex-dna",
- "--target",
- &runner.target_triple,
- ]);
++ let mut build_cmd = runner.cargo_command("build", ["--example", "shootout-regex-dna"]);
+ build_cmd.env("RUSTFLAGS", lint_rust_flags.clone());
+ spawn_and_wait(build_cmd);
+
+ if runner.host_triple == runner.target_triple {
- runner.in_dir(["regex"], |runner| {
- runner.run_cargo(["clean"]);
++ let mut run_cmd = runner.cargo_command("run", ["--example", "shootout-regex-dna"]);
+ run_cmd.env("RUSTFLAGS", lint_rust_flags);
+
+ let input =
+ fs::read_to_string(PathBuf::from("examples/regexdna-input.txt")).unwrap();
+ let expected_path = PathBuf::from("examples/regexdna-output.txt");
+ let expected = fs::read_to_string(&expected_path).unwrap();
+
+ let output = spawn_and_wait_with_input(run_cmd, input);
+ // Make sure `[codegen mono items] start` doesn't poison the diff
+ let output = output
+ .lines()
+ .filter(|line| !line.contains("codegen mono items"))
+ .chain(Some("")) // This just adds the trailing newline
+ .collect::<Vec<&str>>()
+ .join("\r\n");
+
+ let output_matches = expected.lines().eq(output.lines());
+ if !output_matches {
+ let res_path = PathBuf::from("res.txt");
+ fs::write(&res_path, &output).unwrap();
+
+ if cfg!(windows) {
+ println!("Output files don't match!");
+ println!("Expected Output:\n{}", expected);
+ println!("Actual Output:\n{}", output);
+ } else {
+ let mut diff = Command::new("diff");
+ diff.arg("-u");
+ diff.arg(res_path);
+ diff.arg(expected_path);
+ spawn_and_wait(diff);
+ }
+
+ std::process::exit(1);
+ }
+ }
+ });
+ }),
+ TestCase::new("test.regex", &|runner| {
- let mut run_cmd = runner.cargo_command([
++ runner.in_dir(prepare::REGEX.source_dir(), |runner| {
++ runner.run_cargo("clean", []);
+
+ // newer aho_corasick versions throw a deprecation warning
+ let lint_rust_flags = format!("{} --cap-lints warn", runner.rust_flags);
+
+ if runner.host_triple == runner.target_triple {
- "--tests",
- "--",
- "--exclude-should-panic",
- "--test-threads",
- "1",
- "-Zunstable-options",
- "-q",
- ]);
++ let mut run_cmd = runner.cargo_command(
+ "test",
- runner.cargo_command(["build", "--tests", "--target", &runner.target_triple]);
++ [
++ "--tests",
++ "--",
++ "--exclude-should-panic",
++ "--test-threads",
++ "1",
++ "-Zunstable-options",
++ "-q",
++ ],
++ );
+ run_cmd.env("RUSTFLAGS", lint_rust_flags);
+ spawn_and_wait(run_cmd);
+ } else {
+ eprintln!("Cross-Compiling: Not running tests");
+ let mut build_cmd =
- runner.in_dir(["portable-simd"], |runner| {
- runner.run_cargo(["clean"]);
- runner.run_cargo(["build", "--all-targets", "--target", &runner.target_triple]);
++ runner.cargo_command("build", ["--tests", "--target", &runner.target_triple]);
+ build_cmd.env("RUSTFLAGS", lint_rust_flags.clone());
+ spawn_and_wait(build_cmd);
+ }
+ });
+ }),
+ TestCase::new("test.portable-simd", &|runner| {
- runner.run_cargo(["test", "-q"]);
++ runner.in_dir(prepare::PORTABLE_SIMD.source_dir(), |runner| {
++ runner.run_cargo("clean", []);
++ runner.run_cargo("build", ["--all-targets", "--target", &runner.target_triple]);
+
+ if runner.host_triple == runner.target_triple {
- cg_clif_build_dir: &Path,
++ runner.run_cargo("test", ["-q"]);
+ }
+ });
+ }),
+];
+
+pub(crate) fn run_tests(
+ channel: &str,
+ sysroot_kind: SysrootKind,
+ target_dir: &Path,
- cg_clif_build_dir,
++ cg_clif_dylib: &Path,
+ host_triple: &str,
+ target_triple: &str,
+) {
+ let runner = TestRunner::new(host_triple.to_string(), target_triple.to_string());
+
+ if config::get_bool("testsuite.no_sysroot") {
+ build_sysroot::build_sysroot(
+ channel,
+ SysrootKind::None,
+ &target_dir,
- cg_clif_build_dir,
++ cg_clif_dylib,
+ &host_triple,
+ &target_triple,
+ );
+
+ let _ = fs::remove_dir_all(Path::new("target").join("out"));
+ runner.run_testsuite(NO_SYSROOT_SUITE);
+ } else {
+ eprintln!("[SKIP] no_sysroot tests");
+ }
+
+ let run_base_sysroot = config::get_bool("testsuite.base_sysroot");
+ let run_extended_sysroot = config::get_bool("testsuite.extended_sysroot");
+
+ if run_base_sysroot || run_extended_sysroot {
+ build_sysroot::build_sysroot(
+ channel,
+ sysroot_kind,
+ &target_dir,
- fn in_dir<'a, I, F>(&self, dir: I, callback: F)
- where
- I: IntoIterator<Item = &'a str>,
- F: FnOnce(&TestRunner),
- {
++ cg_clif_dylib,
+ &host_triple,
+ &target_triple,
+ );
+ }
+
+ if run_base_sysroot {
+ runner.run_testsuite(BASE_SYSROOT_SUITE);
+ } else {
+ eprintln!("[SKIP] base_sysroot tests");
+ }
+
+ if run_extended_sysroot {
+ runner.run_testsuite(EXTENDED_SYSROOT_SUITE);
+ } else {
+ eprintln!("[SKIP] extended_sysroot tests");
+ }
+}
+
+struct TestRunner {
+ root_dir: PathBuf,
+ out_dir: PathBuf,
+ jit_supported: bool,
+ rust_flags: String,
+ run_wrapper: Vec<String>,
+ host_triple: String,
+ target_triple: String,
+}
+
+impl TestRunner {
+ pub fn new(host_triple: String, target_triple: String) -> Self {
+ let root_dir = env::current_dir().unwrap();
+
+ let mut out_dir = root_dir.clone();
+ out_dir.push("target");
+ out_dir.push("out");
+
+ let is_native = host_triple == target_triple;
+ let jit_supported =
+ target_triple.contains("x86_64") && is_native && !host_triple.contains("windows");
+
+ let mut rust_flags = env::var("RUSTFLAGS").ok().unwrap_or("".to_string());
+ let mut run_wrapper = Vec::new();
+
+ if !is_native {
+ match target_triple.as_str() {
+ "aarch64-unknown-linux-gnu" => {
+ // We are cross-compiling for aarch64. Use the correct linker and run tests in qemu.
+ rust_flags = format!("-Clinker=aarch64-linux-gnu-gcc{}", rust_flags);
+ run_wrapper = vec!["qemu-aarch64", "-L", "/usr/aarch64-linux-gnu"];
+ }
+ "x86_64-pc-windows-gnu" => {
+ // We are cross-compiling for Windows. Run tests in wine.
+ run_wrapper = vec!["wine"];
+ }
+ _ => {
+ println!("Unknown non-native platform");
+ }
+ }
+ }
+
+ // FIXME fix `#[linkage = "extern_weak"]` without this
+ if host_triple.contains("darwin") {
+ rust_flags = format!("{} -Clink-arg=-undefined -Clink-arg=dynamic_lookup", rust_flags);
+ }
+
+ Self {
+ root_dir,
+ out_dir,
+ jit_supported,
+ rust_flags,
+ run_wrapper: run_wrapper.iter().map(|s| s.to_string()).collect(),
+ host_triple,
+ target_triple,
+ }
+ }
+
+ pub fn run_testsuite(&self, tests: &[TestCase]) {
+ for &TestCase { config, func } in tests {
+ let (tag, testname) = config.split_once('.').unwrap();
+ let tag = tag.to_uppercase();
+ let is_jit_test = tag == "JIT";
+
+ if !config::get_bool(config) || (is_jit_test && !self.jit_supported) {
+ eprintln!("[{tag}] {testname} (skipped)");
+ continue;
+ } else {
+ eprintln!("[{tag}] {testname}");
+ }
+
+ func(self);
+ }
+ }
+
- let mut new = current.clone();
- for d in dir {
- new.push(d);
- }
++ fn in_dir(&self, new: impl AsRef<Path>, callback: impl FnOnce(&TestRunner)) {
+ let current = env::current_dir().unwrap();
- fn cargo_command<I, S>(&self, args: I) -> Command
+
+ env::set_current_dir(new).unwrap();
+ callback(self);
+ env::set_current_dir(current).unwrap();
+ }
+
+ fn rustc_command<I, S>(&self, args: I) -> Command
+ where
+ I: IntoIterator<Item = S>,
+ S: AsRef<OsStr>,
+ {
+ let mut rustc_clif = self.root_dir.clone();
+ rustc_clif.push("build");
+ rustc_clif.push(get_wrapper_file_name("rustc-clif", "bin"));
+
+ let mut cmd = Command::new(rustc_clif);
+ cmd.args(self.rust_flags.split_whitespace());
+ cmd.arg("-L");
+ cmd.arg(format!("crate={}", self.out_dir.display()));
+ cmd.arg("--out-dir");
+ cmd.arg(format!("{}", self.out_dir.display()));
+ cmd.arg("-Cdebuginfo=2");
+ cmd.args(args);
+ cmd
+ }
+
+ fn run_rustc<I, S>(&self, args: I)
+ where
+ I: IntoIterator<Item = S>,
+ S: AsRef<OsStr>,
+ {
+ spawn_and_wait(self.rustc_command(args));
+ }
+
+ fn run_out_command<'a, I>(&self, name: &str, args: I)
+ where
+ I: IntoIterator<Item = &'a str>,
+ {
+ let mut full_cmd = vec![];
+
+ // Prepend the RUN_WRAPPER's
+ if !self.run_wrapper.is_empty() {
+ full_cmd.extend(self.run_wrapper.iter().cloned());
+ }
+
+ full_cmd.push({
+ let mut out_path = self.out_dir.clone();
+ out_path.push(name);
+ out_path.to_str().unwrap().to_string()
+ });
+
+ for arg in args.into_iter() {
+ full_cmd.push(arg.to_string());
+ }
+
+ let mut cmd_iter = full_cmd.into_iter();
+ let first = cmd_iter.next().unwrap();
+
+ let mut cmd = Command::new(first);
+ cmd.args(cmd_iter);
+
+ spawn_and_wait(cmd);
+ }
+
- I: IntoIterator<Item = S>,
- S: AsRef<OsStr>,
++ fn cargo_command<'a, I>(&self, subcommand: &str, args: I) -> Command
+ where
- let mut cmd = Command::new(cargo_clif);
++ I: IntoIterator<Item = &'a str>,
+ {
+ let mut cargo_clif = self.root_dir.clone();
+ cargo_clif.push("build");
+ cargo_clif.push(get_wrapper_file_name("cargo-clif", "bin"));
+
- fn run_cargo<'a, I>(&self, args: I)
++ let mut cmd = cargo_command(
++ cargo_clif,
++ subcommand,
++ if subcommand == "clean" { None } else { Some(&self.target_triple) },
++ Path::new("."),
++ );
+ cmd.args(args);
+ cmd.env("RUSTFLAGS", &self.rust_flags);
+ cmd
+ }
+
- spawn_and_wait(self.cargo_command(args));
++ fn run_cargo<'a, I>(&self, subcommand: &str, args: I)
+ where
+ I: IntoIterator<Item = &'a str>,
+ {
++ spawn_and_wait(self.cargo_command(subcommand, args));
+ }
+}
--- /dev/null
+use std::env;
+use std::fs;
+use std::io::Write;
+use std::path::Path;
+use std::process::{self, Command, Stdio};
+
++pub(crate) fn cargo_command(
++ cargo: impl AsRef<Path>,
++ subcommand: &str,
++ triple: Option<&str>,
++ source_dir: &Path,
++) -> Command {
++ let mut cmd = Command::new(cargo.as_ref());
++ cmd.arg(subcommand)
++ .arg("--manifest-path")
++ .arg(source_dir.join("Cargo.toml"))
++ .arg("--target-dir")
++ .arg(source_dir.join("target"));
++
++ if let Some(triple) = triple {
++ cmd.arg("--target").arg(triple);
++ }
++
++ cmd
++}
++
++pub(crate) fn hyperfine_command(
++ warmup: u64,
++ runs: u64,
++ prepare: Option<Command>,
++ a: Command,
++ b: Command,
++) -> Command {
++ let mut bench = Command::new("hyperfine");
++
++ if warmup != 0 {
++ bench.arg("--warmup").arg(warmup.to_string());
++ }
++
++ if runs != 0 {
++ bench.arg("--runs").arg(runs.to_string());
++ }
++
++ if let Some(prepare) = prepare {
++ bench.arg("--prepare").arg(format!("{:?}", prepare));
++ }
++
++ bench.arg(format!("{:?}", a)).arg(format!("{:?}", b));
++
++ bench
++}
++
+#[track_caller]
+pub(crate) fn try_hard_link(src: impl AsRef<Path>, dst: impl AsRef<Path>) {
+ let src = src.as_ref();
+ let dst = dst.as_ref();
+ if let Err(_) = fs::hard_link(src, dst) {
+ fs::copy(src, dst).unwrap(); // Fallback to copying if hardlinking failed
+ }
+}
+
+#[track_caller]
+pub(crate) fn spawn_and_wait(mut cmd: Command) {
+ if !cmd.spawn().unwrap().wait().unwrap().success() {
+ process::exit(1);
+ }
+}
+
+#[track_caller]
+pub(crate) fn spawn_and_wait_with_input(mut cmd: Command, input: String) -> String {
+ let mut child = cmd
+ .stdin(Stdio::piped())
+ .stdout(Stdio::piped())
+ .spawn()
+ .expect("Failed to spawn child process");
+
+ let mut stdin = child.stdin.take().expect("Failed to open stdin");
+ std::thread::spawn(move || {
+ stdin.write_all(input.as_bytes()).expect("Failed to write to stdin");
+ });
+
+ let output = child.wait_with_output().expect("Failed to read stdout");
+ if !output.status.success() {
+ process::exit(1);
+ }
+
+ String::from_utf8(output.stdout).unwrap()
+}
+
+pub(crate) fn copy_dir_recursively(from: &Path, to: &Path) {
+ for entry in fs::read_dir(from).unwrap() {
+ let entry = entry.unwrap();
+ let filename = entry.file_name();
+ if filename == "." || filename == ".." {
+ continue;
+ }
+ if entry.metadata().unwrap().is_dir() {
+ fs::create_dir(to.join(&filename)).unwrap();
+ copy_dir_recursively(&from.join(&filename), &to.join(&filename));
+ } else {
+ fs::copy(from.join(&filename), to.join(&filename)).unwrap();
+ }
+ }
+}
+
+pub(crate) fn is_ci() -> bool {
+ env::var("CI").as_ref().map(|val| &**val) == Ok("true")
+}
--- /dev/null
- rm -rf rand/ regex/ simple-raytracer/ portable-simd/ abi-checker/
+#!/usr/bin/env bash
+set -e
+
+rm -rf build_sysroot/{sysroot_src/,target/,compiler-builtins/,rustc_version}
+rm -rf target/ build/ perf.data{,.old} y.bin
++rm -rf download/
++
++# Kept for now in case someone updates their checkout of cg_clif before running clean_all.sh
++# FIXME remove at some point in the future
++rm -rf rand/ regex/ simple-raytracer/ portable-simd/ abi-checker/ abi-cafe/
--- /dev/null
- testsuite.abi-checker
+# This file allows configuring the build system.
+
+# Which triple to produce a compiler toolchain for.
+#
+# Defaults to the default triple of rustc on the host system.
+#host = x86_64-unknown-linux-gnu
+
+# Which triple to build libraries (core/alloc/std/test/proc_macro) for.
+#
+# Defaults to `host`.
+#target = x86_64-unknown-linux-gnu
+
+# Disables cleaning of the sysroot dir. This will cause old compiled artifacts to be re-used when
+# the sysroot source hasn't changed. This is useful when the codegen backend hasn't been modified.
+# This option can be changed while the build system is already running for as long as sysroot
+# building hasn't started yet.
+#keep_sysroot
+
+
+# Testsuite
+#
+# Each test suite item has a corresponding key here. The default is to run all tests.
+# Comment any of these lines to skip individual tests.
+
+testsuite.no_sysroot
+build.mini_core
+build.example
+jit.mini_core_hello_world
+aot.mini_core_hello_world
+
+testsuite.base_sysroot
+aot.arbitrary_self_types_pointers_and_wrappers
+aot.issue_91827_extern_types
+build.alloc_system
+aot.alloc_example
+jit.std_example
+aot.std_example
+aot.dst_field_align
+aot.subslice-patterns-const-eval
+aot.track-caller-attribute
+aot.float-minmax-pass
+aot.mod_bench
+
+testsuite.extended_sysroot
+test.rust-random/rand
+bench.simple-raytracer
+test.libcore
+test.regex-shootout-regex-dna
+test.regex
+test.portable-simd
+
++testsuite.abi-cafe
--- /dev/null
- #![feature(const_ptr_offset_from)]
+// Copied from rustc ui test suite
+
+// run-pass
+//
+// Test that we can handle unsized types with an extern type tail part.
+// Regression test for issue #91827.
+
+#![feature(extern_types)]
+
+use std::ptr::addr_of;
+
+extern "C" {
+ type Opaque;
+}
+
+unsafe impl Sync for Opaque {}
+
+#[repr(C)]
+pub struct List<T> {
+ len: usize,
+ data: [T; 0],
+ tail: Opaque,
+}
+
+#[repr(C)]
+pub struct ListImpl<T, const N: usize> {
+ len: usize,
+ data: [T; N],
+}
+
+impl<T> List<T> {
+ const fn as_slice(&self) -> &[T] {
+ unsafe { std::slice::from_raw_parts(self.data.as_ptr(), self.len) }
+ }
+}
+
+impl<T, const N: usize> ListImpl<T, N> {
+ const fn as_list(&self) -> &List<T> {
+ unsafe { std::mem::transmute(self) }
+ }
+}
+
+pub static A: ListImpl<u128, 3> = ListImpl {
+ len: 3,
+ data: [5, 6, 7],
+};
+pub static A_REF: &'static List<u128> = A.as_list();
+pub static A_TAIL_OFFSET: isize = tail_offset(A.as_list());
+
+const fn tail_offset<T>(list: &List<T>) -> isize {
+ unsafe { (addr_of!(list.tail) as *const u8).offset_from(list as *const List<T> as *const u8) }
+}
+
+fn main() {
+ assert_eq!(A_REF.as_slice(), &[5, 6, 7]);
+ // Check that interpreter and code generation agree about the position of the tail field.
+ assert_eq!(A_TAIL_OFFSET, tail_offset(A_REF));
+}
--- /dev/null
+#![feature(
+ no_core,
+ lang_items,
+ intrinsics,
+ unboxed_closures,
+ extern_types,
+ decl_macro,
+ rustc_attrs,
+ transparent_unions,
+ auto_traits,
+ thread_local
+)]
+#![no_core]
+#![allow(dead_code)]
+
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "destruct"]
+pub trait Destruct {}
+
+#[lang = "unsize"]
+pub trait Unsize<T: ?Sized> {}
+
+#[lang = "coerce_unsized"]
+pub trait CoerceUnsized<T> {}
+
+impl<'a, 'b: 'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a U> for &'b T {}
+impl<'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a mut U> for &'a mut T {}
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*const U> for *const T {}
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*mut U> for *mut T {}
+
+#[lang = "dispatch_from_dyn"]
+pub trait DispatchFromDyn<T> {}
+
+// &T -> &U
+impl<'a, T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<&'a U> for &'a T {}
+// &mut T -> &mut U
+impl<'a, T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<&'a mut U> for &'a mut T {}
+// *const T -> *const U
+impl<T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<*const U> for *const T {}
+// *mut T -> *mut U
+impl<T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<*mut U> for *mut T {}
+impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Box<U>> for Box<T> {}
+
+#[lang = "receiver"]
+pub trait Receiver {}
+
+impl<T: ?Sized> Receiver for &T {}
+impl<T: ?Sized> Receiver for &mut T {}
+impl<T: ?Sized> Receiver for Box<T> {}
+
+#[lang = "copy"]
+pub unsafe trait Copy {}
+
+unsafe impl Copy for bool {}
+unsafe impl Copy for u8 {}
+unsafe impl Copy for u16 {}
+unsafe impl Copy for u32 {}
+unsafe impl Copy for u64 {}
+unsafe impl Copy for u128 {}
+unsafe impl Copy for usize {}
+unsafe impl Copy for i8 {}
+unsafe impl Copy for i16 {}
+unsafe impl Copy for i32 {}
+unsafe impl Copy for isize {}
+unsafe impl Copy for f32 {}
+unsafe impl Copy for f64 {}
+unsafe impl Copy for char {}
+unsafe impl<'a, T: ?Sized> Copy for &'a T {}
+unsafe impl<T: ?Sized> Copy for *const T {}
+unsafe impl<T: ?Sized> Copy for *mut T {}
+unsafe impl<T: Copy> Copy for Option<T> {}
+
+#[lang = "sync"]
+pub unsafe trait Sync {}
+
+unsafe impl Sync for bool {}
+unsafe impl Sync for u8 {}
+unsafe impl Sync for u16 {}
+unsafe impl Sync for u32 {}
+unsafe impl Sync for u64 {}
+unsafe impl Sync for usize {}
+unsafe impl Sync for i8 {}
+unsafe impl Sync for i16 {}
+unsafe impl Sync for i32 {}
+unsafe impl Sync for isize {}
+unsafe impl Sync for char {}
+unsafe impl<'a, T: ?Sized> Sync for &'a T {}
+unsafe impl Sync for [u8; 16] {}
+
+#[lang = "freeze"]
+unsafe auto trait Freeze {}
+
+unsafe impl<T: ?Sized> Freeze for PhantomData<T> {}
+unsafe impl<T: ?Sized> Freeze for *const T {}
+unsafe impl<T: ?Sized> Freeze for *mut T {}
+unsafe impl<T: ?Sized> Freeze for &T {}
+unsafe impl<T: ?Sized> Freeze for &mut T {}
+
+#[lang = "structural_peq"]
+pub trait StructuralPartialEq {}
+
+#[lang = "structural_teq"]
+pub trait StructuralEq {}
+
+#[lang = "not"]
+pub trait Not {
+ type Output;
+
+ fn not(self) -> Self::Output;
+}
+
+impl Not for bool {
+ type Output = bool;
+
+ fn not(self) -> bool {
+ !self
+ }
+}
+
+#[lang = "mul"]
+pub trait Mul<RHS = Self> {
+ type Output;
+
+ #[must_use]
+ fn mul(self, rhs: RHS) -> Self::Output;
+}
+
+impl Mul for u8 {
+ type Output = Self;
+
+ fn mul(self, rhs: Self) -> Self::Output {
+ self * rhs
+ }
+}
+
+impl Mul for usize {
+ type Output = Self;
+
+ fn mul(self, rhs: Self) -> Self::Output {
+ self * rhs
+ }
+}
+
+#[lang = "add"]
+pub trait Add<RHS = Self> {
+ type Output;
+
+ fn add(self, rhs: RHS) -> Self::Output;
+}
+
+impl Add for u8 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for i8 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for usize {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+#[lang = "sub"]
+pub trait Sub<RHS = Self> {
+ type Output;
+
+ fn sub(self, rhs: RHS) -> Self::Output;
+}
+
+impl Sub for usize {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for u8 {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for i8 {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for i16 {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+#[lang = "rem"]
+pub trait Rem<RHS = Self> {
+ type Output;
+
+ fn rem(self, rhs: RHS) -> Self::Output;
+}
+
+impl Rem for usize {
+ type Output = Self;
+
+ fn rem(self, rhs: Self) -> Self {
+ self % rhs
+ }
+}
+
+#[lang = "bitor"]
+pub trait BitOr<RHS = Self> {
+ type Output;
+
+ #[must_use]
+ fn bitor(self, rhs: RHS) -> Self::Output;
+}
+
+impl BitOr for bool {
+ type Output = bool;
+
+ fn bitor(self, rhs: bool) -> bool {
+ self | rhs
+ }
+}
+
+impl<'a> BitOr<bool> for &'a bool {
+ type Output = bool;
+
+ fn bitor(self, rhs: bool) -> bool {
+ *self | rhs
+ }
+}
+
+#[lang = "eq"]
+pub trait PartialEq<Rhs: ?Sized = Self> {
+ fn eq(&self, other: &Rhs) -> bool;
+ fn ne(&self, other: &Rhs) -> bool;
+}
+
+impl PartialEq for u8 {
+ fn eq(&self, other: &u8) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &u8) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for u16 {
+ fn eq(&self, other: &u16) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &u16) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for u32 {
+ fn eq(&self, other: &u32) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &u32) -> bool {
+ (*self) != (*other)
+ }
+}
+
+
+impl PartialEq for u64 {
+ fn eq(&self, other: &u64) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &u64) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for u128 {
+ fn eq(&self, other: &u128) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &u128) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for usize {
+ fn eq(&self, other: &usize) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &usize) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for i8 {
+ fn eq(&self, other: &i8) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &i8) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for i32 {
+ fn eq(&self, other: &i32) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &i32) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for isize {
+ fn eq(&self, other: &isize) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &isize) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for char {
+ fn eq(&self, other: &char) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &char) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl<T: ?Sized> PartialEq for *const T {
+ fn eq(&self, other: &*const T) -> bool {
+ *self == *other
+ }
+ fn ne(&self, other: &*const T) -> bool {
+ *self != *other
+ }
+}
+
+impl <T: PartialEq> PartialEq for Option<T> {
+ fn eq(&self, other: &Self) -> bool {
+ match (self, other) {
+ (Some(lhs), Some(rhs)) => *lhs == *rhs,
+ (None, None) => true,
+ _ => false,
+ }
+ }
+
+ fn ne(&self, other: &Self) -> bool {
+ match (self, other) {
+ (Some(lhs), Some(rhs)) => *lhs != *rhs,
+ (None, None) => false,
+ _ => true,
+ }
+ }
+}
+
+#[lang = "shl"]
+pub trait Shl<RHS = Self> {
+ type Output;
+
+ #[must_use]
+ fn shl(self, rhs: RHS) -> Self::Output;
+}
+
+impl Shl for u128 {
+ type Output = u128;
+
+ fn shl(self, rhs: u128) -> u128 {
+ self << rhs
+ }
+}
+
+#[lang = "neg"]
+pub trait Neg {
+ type Output;
+
+ fn neg(self) -> Self::Output;
+}
+
+impl Neg for i8 {
+ type Output = i8;
+
+ fn neg(self) -> i8 {
+ -self
+ }
+}
+
+impl Neg for i16 {
+ type Output = i16;
+
+ fn neg(self) -> i16 {
+ self
+ }
+}
+
+impl Neg for isize {
+ type Output = isize;
+
+ fn neg(self) -> isize {
+ -self
+ }
+}
+
+impl Neg for f32 {
+ type Output = f32;
+
+ fn neg(self) -> f32 {
+ -self
+ }
+}
+
+pub enum Option<T> {
+ Some(T),
+ None,
+}
+
+pub use Option::*;
+
+#[lang = "phantom_data"]
+pub struct PhantomData<T: ?Sized>;
+
+#[lang = "fn_once"]
+#[rustc_paren_sugar]
+pub trait FnOnce<Args> {
+ #[lang = "fn_once_output"]
+ type Output;
+
+ extern "rust-call" fn call_once(self, args: Args) -> Self::Output;
+}
+
+#[lang = "fn_mut"]
+#[rustc_paren_sugar]
+pub trait FnMut<Args>: FnOnce<Args> {
+ extern "rust-call" fn call_mut(&mut self, args: Args) -> Self::Output;
+}
+
+#[lang = "panic"]
+#[track_caller]
+pub fn panic(_msg: &'static str) -> ! {
+ unsafe {
+ libc::puts("Panicking\n\0" as *const str as *const i8);
+ intrinsics::abort();
+ }
+}
+
+#[lang = "panic_bounds_check"]
+#[track_caller]
+fn panic_bounds_check(index: usize, len: usize) -> ! {
+ unsafe {
+ libc::printf("index out of bounds: the len is %d but the index is %d\n\0" as *const str as *const i8, len, index);
+ intrinsics::abort();
+ }
+}
+
+#[lang = "eh_personality"]
+fn eh_personality() -> ! {
+ loop {}
+}
+
+#[lang = "drop_in_place"]
+#[allow(unconditional_recursion)]
+pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
+ // Code here does not matter - this is replaced by the
+ // real drop glue by the compiler.
+ drop_in_place(to_drop);
+}
+
+#[lang = "deref"]
+pub trait Deref {
+ type Target: ?Sized;
+
+ fn deref(&self) -> &Self::Target;
+}
+
+#[repr(transparent)]
+#[rustc_layout_scalar_valid_range_start(1)]
+#[rustc_nonnull_optimization_guaranteed]
+pub struct NonNull<T: ?Sized>(pub *const T);
+
+impl<T: ?Sized, U: ?Sized> CoerceUnsized<NonNull<U>> for NonNull<T> where T: Unsize<U> {}
+impl<T: ?Sized, U: ?Sized> DispatchFromDyn<NonNull<U>> for NonNull<T> where T: Unsize<U> {}
+
+pub struct Unique<T: ?Sized> {
+ pub pointer: NonNull<T>,
+ pub _marker: PhantomData<T>,
+}
+
+impl<T: ?Sized, U: ?Sized> CoerceUnsized<Unique<U>> for Unique<T> where T: Unsize<U> {}
+impl<T: ?Sized, U: ?Sized> DispatchFromDyn<Unique<U>> for Unique<T> where T: Unsize<U> {}
+
+#[lang = "owned_box"]
+pub struct Box<T: ?Sized>(Unique<T>, ());
+
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Box<U>> for Box<T> {}
+
+impl<T: ?Sized> Drop for Box<T> {
+ fn drop(&mut self) {
+ // drop is currently performed by compiler.
+ }
+}
+
+impl<T: ?Sized> Deref for Box<T> {
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ &**self
+ }
+}
+
+#[lang = "exchange_malloc"]
+unsafe fn allocate(size: usize, _align: usize) -> *mut u8 {
+ libc::malloc(size)
+}
+
+#[lang = "box_free"]
+unsafe fn box_free<T: ?Sized>(ptr: Unique<T>, _alloc: ()) {
+ libc::free(ptr.pointer.0 as *mut u8);
+}
+
+#[lang = "drop"]
+pub trait Drop {
+ fn drop(&mut self);
+}
+
+#[lang = "manually_drop"]
+#[repr(transparent)]
+pub struct ManuallyDrop<T: ?Sized> {
+ pub value: T,
+}
+
+#[lang = "maybe_uninit"]
+#[repr(transparent)]
+pub union MaybeUninit<T> {
+ pub uninit: (),
+ pub value: ManuallyDrop<T>,
+}
+
+pub mod intrinsics {
+ extern "rust-intrinsic" {
++ #[rustc_safe_intrinsic]
+ pub fn abort() -> !;
++ #[rustc_safe_intrinsic]
+ pub fn size_of<T>() -> usize;
+ pub fn size_of_val<T: ?::Sized>(val: *const T) -> usize;
++ #[rustc_safe_intrinsic]
+ pub fn min_align_of<T>() -> usize;
+ pub fn min_align_of_val<T: ?::Sized>(val: *const T) -> usize;
+ pub fn copy<T>(src: *const T, dst: *mut T, count: usize);
+ pub fn transmute<T, U>(e: T) -> U;
+ pub fn ctlz_nonzero<T>(x: T) -> T;
++ #[rustc_safe_intrinsic]
+ pub fn needs_drop<T: ?::Sized>() -> bool;
++ #[rustc_safe_intrinsic]
+ pub fn bitreverse<T>(x: T) -> T;
++ #[rustc_safe_intrinsic]
+ pub fn bswap<T>(x: T) -> T;
+ pub fn write_bytes<T>(dst: *mut T, val: u8, count: usize);
+ }
+}
+
+pub mod libc {
+ // With the new Universal CRT, msvc has switched to all the printf functions being inline wrapper
+ // functions. legacy_stdio_definitions.lib which provides the printf wrapper functions as normal
+ // symbols to link against.
+ #[cfg_attr(unix, link(name = "c"))]
+ #[cfg_attr(target_env="msvc", link(name="legacy_stdio_definitions"))]
+ extern "C" {
+ pub fn printf(format: *const i8, ...) -> i32;
+ }
+
+ #[cfg_attr(unix, link(name = "c"))]
+ #[cfg_attr(target_env = "msvc", link(name = "msvcrt"))]
+ extern "C" {
+ pub fn puts(s: *const i8) -> i32;
+ pub fn malloc(size: usize) -> *mut u8;
+ pub fn free(ptr: *mut u8);
+ pub fn memcpy(dst: *mut u8, src: *const u8, size: usize);
+ pub fn memmove(dst: *mut u8, src: *const u8, size: usize);
+ pub fn strncpy(dst: *mut u8, src: *const u8, size: usize);
+ }
+}
+
+#[lang = "index"]
+pub trait Index<Idx: ?Sized> {
+ type Output: ?Sized;
+ fn index(&self, index: Idx) -> &Self::Output;
+}
+
+impl<T> Index<usize> for [T; 3] {
+ type Output = T;
+
+ fn index(&self, index: usize) -> &Self::Output {
+ &self[index]
+ }
+}
+
+impl<T> Index<usize> for [T] {
+ type Output = T;
+
+ fn index(&self, index: usize) -> &Self::Output {
+ &self[index]
+ }
+}
+
+extern {
+ type VaListImpl;
+}
+
+#[lang = "va_list"]
+#[repr(transparent)]
+pub struct VaList<'a>(&'a mut VaListImpl);
+
+#[rustc_builtin_macro]
+#[rustc_macro_transparency = "semitransparent"]
+pub macro stringify($($t:tt)*) { /* compiler built-in */ }
+
+#[rustc_builtin_macro]
+#[rustc_macro_transparency = "semitransparent"]
+pub macro file() { /* compiler built-in */ }
+
+#[rustc_builtin_macro]
+#[rustc_macro_transparency = "semitransparent"]
+pub macro line() { /* compiler built-in */ }
+
+#[rustc_builtin_macro]
+#[rustc_macro_transparency = "semitransparent"]
+pub macro cfg() { /* compiler built-in */ }
+
+#[rustc_builtin_macro]
+#[rustc_macro_transparency = "semitransparent"]
+pub macro global_asm() { /* compiler built-in */ }
+
+pub static A_STATIC: u8 = 42;
+
+#[lang = "panic_location"]
+struct PanicLocation {
+ file: &'static str,
+ line: u32,
+ column: u32,
+}
+
+#[no_mangle]
+#[cfg(not(windows))]
+pub fn get_tls() -> u8 {
+ #[thread_local]
+ static A: u8 = 42;
+
+ A
+}
--- /dev/null
+#![feature(no_core, lang_items, never_type, linkage, extern_types, thread_local, box_syntax)]
+#![no_core]
+#![allow(dead_code, non_camel_case_types)]
+
+extern crate mini_core;
+
+use mini_core::*;
+use mini_core::libc::*;
+
+macro_rules! assert {
+ ($e:expr) => {
+ if !$e {
+ panic(stringify!(! $e));
+ }
+ };
+}
+
+macro_rules! assert_eq {
+ ($l:expr, $r: expr) => {
+ if $l != $r {
+ panic(stringify!($l != $r));
+ }
+ }
+}
+
+#[lang = "termination"]
+trait Termination {
+ fn report(self) -> i32;
+}
+
+impl Termination for () {
+ fn report(self) -> i32 {
+ unsafe {
+ NUM = 6 * 7 + 1 + (1u8 == 1u8) as u8; // 44
+ assert_eq!(*NUM_REF as i32, 44);
+ }
+ 0
+ }
+}
+
+trait SomeTrait {
+ fn object_safe(&self);
+}
+
+impl SomeTrait for &'static str {
+ fn object_safe(&self) {
+ unsafe {
+ puts(*self as *const str as *const i8);
+ }
+ }
+}
+
+struct NoisyDrop {
+ text: &'static str,
+ inner: NoisyDropInner,
+}
+
+struct NoisyDropUnsized {
+ inner: NoisyDropInner,
+ text: str,
+}
+
+struct NoisyDropInner;
+
+impl Drop for NoisyDrop {
+ fn drop(&mut self) {
+ unsafe {
+ puts(self.text as *const str as *const i8);
+ }
+ }
+}
+
+impl Drop for NoisyDropInner {
+ fn drop(&mut self) {
+ unsafe {
+ puts("Inner got dropped!\0" as *const str as *const i8);
+ }
+ }
+}
+
+impl SomeTrait for NoisyDrop {
+ fn object_safe(&self) {}
+}
+
+enum Ordering {
+ Less = -1,
+ Equal = 0,
+ Greater = 1,
+}
+
+#[lang = "start"]
+fn start<T: Termination + 'static>(
+ main: fn() -> T,
+ argc: isize,
+ argv: *const *const u8,
++ _sigpipe: u8,
+) -> isize {
+ if argc == 3 {
+ unsafe { puts(*argv as *const i8); }
+ unsafe { puts(*((argv as usize + intrinsics::size_of::<*const u8>()) as *const *const i8)); }
+ unsafe { puts(*((argv as usize + 2 * intrinsics::size_of::<*const u8>()) as *const *const i8)); }
+ }
+
+ main().report() as isize
+}
+
+static mut NUM: u8 = 6 * 7;
+static NUM_REF: &'static u8 = unsafe { &NUM };
+
+
+unsafe fn zeroed<T>() -> T {
+ let mut uninit = MaybeUninit { uninit: () };
+ intrinsics::write_bytes(&mut uninit.value.value as *mut T, 0, 1);
+ uninit.value.value
+}
+
+fn take_f32(_f: f32) {}
+fn take_unique(_u: Unique<()>) {}
+
+fn return_u128_pair() -> (u128, u128) {
+ (0, 0)
+}
+
+fn call_return_u128_pair() {
+ return_u128_pair();
+}
+
+#[repr(C)]
+pub struct bool_11 {
+ field0: bool,
+ field1: bool,
+ field2: bool,
+ field3: bool,
+ field4: bool,
+ field5: bool,
+ field6: bool,
+ field7: bool,
+ field8: bool,
+ field9: bool,
+ field10: bool,
+}
+
+extern "C" fn bool_struct_in_11(_arg0: bool_11) {}
+
+#[allow(unreachable_code)] // FIXME false positive
+fn main() {
+ take_unique(Unique {
+ pointer: unsafe { NonNull(1 as *mut ()) },
+ _marker: PhantomData,
+ });
+ take_f32(0.1);
+
+ call_return_u128_pair();
+
+ bool_struct_in_11(bool_11 {
+ field0: true,
+ field1: true,
+ field2: true,
+ field3: true,
+ field4: true,
+ field5: true,
+ field6: true,
+ field7: true,
+ field8: true,
+ field9: true,
+ field10: true,
+ });
+
+ let slice = &[0, 1] as &[i32];
+ let slice_ptr = slice as *const [i32] as *const i32;
+
+ assert_eq!(slice_ptr as usize % 4, 0);
+
+ //return;
+
+ unsafe {
+ printf("Hello %s\n\0" as *const str as *const i8, "printf\0" as *const str as *const i8);
+
+ let hello: &[u8] = b"Hello\0" as &[u8; 6];
+ let ptr: *const i8 = hello as *const [u8] as *const i8;
+ puts(ptr);
+
+ let world: Box<&str> = box "World!\0";
+ puts(*world as *const str as *const i8);
+ world as Box<dyn SomeTrait>;
+
+ assert_eq!(intrinsics::bitreverse(0b10101000u8), 0b00010101u8);
+
+ assert_eq!(intrinsics::bswap(0xabu8), 0xabu8);
+ assert_eq!(intrinsics::bswap(0xddccu16), 0xccddu16);
+ assert_eq!(intrinsics::bswap(0xffee_ddccu32), 0xccdd_eeffu32);
+ assert_eq!(intrinsics::bswap(0x1234_5678_ffee_ddccu64), 0xccdd_eeff_7856_3412u64);
+
+ assert_eq!(intrinsics::size_of_val(hello) as u8, 6);
+
+ let chars = &['C', 'h', 'a', 'r', 's'];
+ let chars = chars as &[char];
+ assert_eq!(intrinsics::size_of_val(chars) as u8, 4 * 5);
+
+ let a: &dyn SomeTrait = &"abc\0";
+ a.object_safe();
+
+ assert_eq!(intrinsics::size_of_val(a) as u8, 16);
+ assert_eq!(intrinsics::size_of_val(&0u32) as u8, 4);
+
+ assert_eq!(intrinsics::min_align_of::<u16>() as u8, 2);
+ assert_eq!(intrinsics::min_align_of_val(&a) as u8, intrinsics::min_align_of::<&str>() as u8);
+
+ assert!(!intrinsics::needs_drop::<u8>());
+ assert!(!intrinsics::needs_drop::<[u8]>());
+ assert!(intrinsics::needs_drop::<NoisyDrop>());
+ assert!(intrinsics::needs_drop::<NoisyDropUnsized>());
+
+ Unique {
+ pointer: NonNull(1 as *mut &str),
+ _marker: PhantomData,
+ } as Unique<dyn SomeTrait>;
+
+ struct MyDst<T: ?Sized>(T);
+
+ intrinsics::size_of_val(&MyDst([0u8; 4]) as &MyDst<[u8]>);
+
+ struct Foo {
+ x: u8,
+ y: !,
+ }
+
+ unsafe fn uninitialized<T>() -> T {
+ MaybeUninit { uninit: () }.value.value
+ }
+
+ zeroed::<(u8, u8)>();
+ #[allow(unreachable_code)]
+ {
+ if false {
+ zeroed::<!>();
+ zeroed::<Foo>();
+ uninitialized::<Foo>();
+ }
+ }
+ }
+
+ let _ = box NoisyDrop {
+ text: "Boxed outer got dropped!\0",
+ inner: NoisyDropInner,
+ } as Box<dyn SomeTrait>;
+
+ const FUNC_REF: Option<fn()> = Some(main);
+ match FUNC_REF {
+ Some(_) => {},
+ None => assert!(false),
+ }
+
+ match Ordering::Less {
+ Ordering::Less => {},
+ _ => assert!(false),
+ }
+
+ [NoisyDropInner, NoisyDropInner];
+
+ let x = &[0u32, 42u32] as &[u32];
+ match x {
+ [] => assert_eq!(0u32, 1),
+ [_, ref y @ ..] => assert_eq!(&x[1] as *const u32 as usize, &y[0] as *const u32 as usize),
+ }
+
+ assert_eq!(((|()| 42u8) as fn(()) -> u8)(()), 42);
+
+ #[cfg(not(any(jit, windows)))]
+ {
+ extern {
+ #[linkage = "extern_weak"]
+ static ABC: *const u8;
+ }
+
+ {
+ extern {
+ #[linkage = "extern_weak"]
+ static ABC: *const u8;
+ }
+ }
+
+ unsafe { assert_eq!(ABC as usize, 0); }
+ }
+
+ &mut (|| Some(0 as *const ())) as &mut dyn FnMut() -> Option<*const ()>;
+
+ let f = 1000.0;
+ assert_eq!(f as u8, 255);
+ let f2 = -1000.0;
+ assert_eq!(f2 as i8, -128);
+ assert_eq!(f2 as u8, 0);
+
+ let amount = 0;
+ assert_eq!(1u128 << amount, 1);
+
+ static ANOTHER_STATIC: &u8 = &A_STATIC;
+ assert_eq!(*ANOTHER_STATIC, 42);
+
+ check_niche_behavior();
+
+ extern "C" {
+ type ExternType;
+ }
+
+ struct ExternTypeWrapper {
+ _a: ExternType,
+ }
+
+ let nullptr = 0 as *const ();
+ let extern_nullptr = nullptr as *const ExternTypeWrapper;
+ extern_nullptr as *const ();
+ let slice_ptr = &[] as *const [u8];
+ slice_ptr as *const u8;
+
+ let repeat = [Some(42); 2];
+ assert_eq!(repeat[0], Some(42));
+ assert_eq!(repeat[1], Some(42));
+
+ from_decimal_string();
+
+ #[cfg(not(any(jit, windows)))]
+ test_tls();
+
+ #[cfg(all(not(jit), target_arch = "x86_64", any(target_os = "linux", target_os = "darwin")))]
+ unsafe {
+ global_asm_test();
+ }
+
+ // Both statics have a reference that points to the same anonymous allocation.
+ static REF1: &u8 = &42;
+ static REF2: &u8 = REF1;
+ assert_eq!(*REF1, *REF2);
+
+ extern "C" {
+ type A;
+ }
+
+ fn main() {
+ let x: &A = unsafe { &*(1usize as *const A) };
+
+ assert_eq!(unsafe { intrinsics::size_of_val(x) }, 0);
+ assert_eq!(unsafe { intrinsics::min_align_of_val(x) }, 1);
+}
+}
+
+#[cfg(all(not(jit), target_arch = "x86_64", any(target_os = "linux", target_os = "darwin")))]
+extern "C" {
+ fn global_asm_test();
+}
+
+#[cfg(all(not(jit), target_arch = "x86_64", target_os = "linux"))]
+global_asm! {
+ "
+ .global global_asm_test
+ global_asm_test:
+ // comment that would normally be removed by LLVM
+ ret
+ "
+}
+
+#[cfg(all(not(jit), target_arch = "x86_64", target_os = "darwin"))]
+global_asm! {
+ "
+ .global _global_asm_test
+ _global_asm_test:
+ // comment that would normally be removed by LLVM
+ ret
+ "
+}
+
+#[repr(C)]
+enum c_void {
+ _1,
+ _2,
+}
+
+type c_int = i32;
+type c_ulong = u64;
+
+type pthread_t = c_ulong;
+
+#[repr(C)]
+struct pthread_attr_t {
+ __size: [u64; 7],
+}
+
+#[link(name = "pthread")]
+#[cfg(unix)]
+extern "C" {
+ fn pthread_attr_init(attr: *mut pthread_attr_t) -> c_int;
+
+ fn pthread_create(
+ native: *mut pthread_t,
+ attr: *const pthread_attr_t,
+ f: extern "C" fn(_: *mut c_void) -> *mut c_void,
+ value: *mut c_void
+ ) -> c_int;
+
+ fn pthread_join(
+ native: pthread_t,
+ value: *mut *mut c_void
+ ) -> c_int;
+}
+
+type DWORD = u32;
+type LPDWORD = *mut u32;
+
+type LPVOID = *mut c_void;
+type HANDLE = *mut c_void;
+
+#[link(name = "msvcrt")]
+#[cfg(windows)]
+extern "C" {
+ fn WaitForSingleObject(
+ hHandle: LPVOID,
+ dwMilliseconds: DWORD
+ ) -> DWORD;
+
+ fn CreateThread(
+ lpThreadAttributes: LPVOID, // Technically LPSECURITY_ATTRIBUTES, but we don't use it anyway
+ dwStackSize: usize,
+ lpStartAddress: extern "C" fn(_: *mut c_void) -> *mut c_void,
+ lpParameter: LPVOID,
+ dwCreationFlags: DWORD,
+ lpThreadId: LPDWORD
+ ) -> HANDLE;
+}
+
+struct Thread {
+ #[cfg(windows)]
+ handle: HANDLE,
+ #[cfg(unix)]
+ handle: pthread_t,
+}
+
+impl Thread {
+ unsafe fn create(f: extern "C" fn(_: *mut c_void) -> *mut c_void) -> Self {
+ #[cfg(unix)]
+ {
+ let mut attr: pthread_attr_t = zeroed();
+ let mut thread: pthread_t = 0;
+
+ if pthread_attr_init(&mut attr) != 0 {
+ assert!(false);
+ }
+
+ if pthread_create(&mut thread, &attr, f, 0 as *mut c_void) != 0 {
+ assert!(false);
+ }
+
+ Thread {
+ handle: thread,
+ }
+ }
+
+ #[cfg(windows)]
+ {
+ let handle = CreateThread(0 as *mut c_void, 0, f, 0 as *mut c_void, 0, 0 as *mut u32);
+
+ if (handle as u64) == 0 {
+ assert!(false);
+ }
+
+ Thread {
+ handle,
+ }
+ }
+ }
+
+
+ unsafe fn join(self) {
+ #[cfg(unix)]
+ {
+ let mut res = 0 as *mut c_void;
+ pthread_join(self.handle, &mut res);
+ }
+
+ #[cfg(windows)]
+ {
+ // The INFINITE macro is used to signal operations that do not timeout.
+ let infinite = 0xffffffff;
+ assert!(WaitForSingleObject(self.handle, infinite) == 0);
+ }
+ }
+}
+
+
+
+
+#[thread_local]
+#[cfg(not(jit))]
+static mut TLS: u8 = 42;
+
+#[cfg(not(jit))]
+extern "C" fn mutate_tls(_: *mut c_void) -> *mut c_void {
+ unsafe { TLS = 0; }
+ 0 as *mut c_void
+}
+
+#[cfg(not(jit))]
+fn test_tls() {
+ unsafe {
+ assert_eq!(TLS, 42);
+
+ let thread = Thread::create(mutate_tls);
+ thread.join();
+
+ // TLS of main thread must not have been changed by the other thread.
+ assert_eq!(TLS, 42);
+
+ puts("TLS works!\n\0" as *const str as *const i8);
+ }
+}
+
+// Copied ui/issues/issue-61696.rs
+
+pub enum Infallible {}
+
+// The check that the `bool` field of `V1` is encoding a "niche variant"
+// (i.e. not `V1`, so `V3` or `V4`) used to be mathematically incorrect,
+// causing valid `V1` values to be interpreted as other variants.
+pub enum E1 {
+ V1 { f: bool },
+ V2 { f: Infallible },
+ V3,
+ V4,
+}
+
+// Computing the discriminant used to be done using the niche type (here `u8`,
+// from the `bool` field of `V1`), overflowing for variants with large enough
+// indices (`V3` and `V4`), causing them to be interpreted as other variants.
+pub enum E2<X> {
+ V1 { f: bool },
+
+ /*_00*/ _01(X), _02(X), _03(X), _04(X), _05(X), _06(X), _07(X),
+ _08(X), _09(X), _0A(X), _0B(X), _0C(X), _0D(X), _0E(X), _0F(X),
+ _10(X), _11(X), _12(X), _13(X), _14(X), _15(X), _16(X), _17(X),
+ _18(X), _19(X), _1A(X), _1B(X), _1C(X), _1D(X), _1E(X), _1F(X),
+ _20(X), _21(X), _22(X), _23(X), _24(X), _25(X), _26(X), _27(X),
+ _28(X), _29(X), _2A(X), _2B(X), _2C(X), _2D(X), _2E(X), _2F(X),
+ _30(X), _31(X), _32(X), _33(X), _34(X), _35(X), _36(X), _37(X),
+ _38(X), _39(X), _3A(X), _3B(X), _3C(X), _3D(X), _3E(X), _3F(X),
+ _40(X), _41(X), _42(X), _43(X), _44(X), _45(X), _46(X), _47(X),
+ _48(X), _49(X), _4A(X), _4B(X), _4C(X), _4D(X), _4E(X), _4F(X),
+ _50(X), _51(X), _52(X), _53(X), _54(X), _55(X), _56(X), _57(X),
+ _58(X), _59(X), _5A(X), _5B(X), _5C(X), _5D(X), _5E(X), _5F(X),
+ _60(X), _61(X), _62(X), _63(X), _64(X), _65(X), _66(X), _67(X),
+ _68(X), _69(X), _6A(X), _6B(X), _6C(X), _6D(X), _6E(X), _6F(X),
+ _70(X), _71(X), _72(X), _73(X), _74(X), _75(X), _76(X), _77(X),
+ _78(X), _79(X), _7A(X), _7B(X), _7C(X), _7D(X), _7E(X), _7F(X),
+ _80(X), _81(X), _82(X), _83(X), _84(X), _85(X), _86(X), _87(X),
+ _88(X), _89(X), _8A(X), _8B(X), _8C(X), _8D(X), _8E(X), _8F(X),
+ _90(X), _91(X), _92(X), _93(X), _94(X), _95(X), _96(X), _97(X),
+ _98(X), _99(X), _9A(X), _9B(X), _9C(X), _9D(X), _9E(X), _9F(X),
+ _A0(X), _A1(X), _A2(X), _A3(X), _A4(X), _A5(X), _A6(X), _A7(X),
+ _A8(X), _A9(X), _AA(X), _AB(X), _AC(X), _AD(X), _AE(X), _AF(X),
+ _B0(X), _B1(X), _B2(X), _B3(X), _B4(X), _B5(X), _B6(X), _B7(X),
+ _B8(X), _B9(X), _BA(X), _BB(X), _BC(X), _BD(X), _BE(X), _BF(X),
+ _C0(X), _C1(X), _C2(X), _C3(X), _C4(X), _C5(X), _C6(X), _C7(X),
+ _C8(X), _C9(X), _CA(X), _CB(X), _CC(X), _CD(X), _CE(X), _CF(X),
+ _D0(X), _D1(X), _D2(X), _D3(X), _D4(X), _D5(X), _D6(X), _D7(X),
+ _D8(X), _D9(X), _DA(X), _DB(X), _DC(X), _DD(X), _DE(X), _DF(X),
+ _E0(X), _E1(X), _E2(X), _E3(X), _E4(X), _E5(X), _E6(X), _E7(X),
+ _E8(X), _E9(X), _EA(X), _EB(X), _EC(X), _ED(X), _EE(X), _EF(X),
+ _F0(X), _F1(X), _F2(X), _F3(X), _F4(X), _F5(X), _F6(X), _F7(X),
+ _F8(X), _F9(X), _FA(X), _FB(X), _FC(X), _FD(X), _FE(X), _FF(X),
+
+ V3,
+ V4,
+}
+
+fn check_niche_behavior () {
+ if let E1::V2 { .. } = (E1::V1 { f: true }) {
+ intrinsics::abort();
+ }
+
+ if let E2::V1 { .. } = E2::V3::<Infallible> {
+ intrinsics::abort();
+ }
+}
+
+fn from_decimal_string() {
+ loop {
+ let multiplier = 1;
+
+ take_multiplier_ref(&multiplier);
+
+ if multiplier == 1 {
+ break;
+ }
+
+ unreachable();
+ }
+}
+
+fn take_multiplier_ref(_multiplier: &u128) {}
+
+fn unreachable() -> ! {
+ panic("unreachable")
+}
--- /dev/null
--- /dev/null
++From 2b15fee2bb5fd14e34c7e17e44d99cb34f4c555d Mon Sep 17 00:00:00 2001
++From: Afonso Bordado <afonsobordado@az8.co>
++Date: Tue, 27 Sep 2022 07:55:17 +0100
++Subject: [PATCH] Disable some test on x86_64-pc-windows-gnu
++
++---
++ src/report.rs | 6 ++++++
++ 1 file changed, 6 insertions(+)
++
++diff --git a/src/report.rs b/src/report.rs
++index eeec614..f582867 100644
++--- a/src/report.rs
+++++ b/src/report.rs
++@@ -48,6 +48,12 @@ pub fn get_test_rules(test: &TestKey, caller: &dyn AbiImpl, callee: &dyn AbiImpl
++ //
++ // THIS AREA RESERVED FOR VENDORS TO APPLY PATCHES
++
+++ // x86_64-pc-windows-gnu has some broken i128 tests that aren't disabled by default
+++ if cfg!(all(target_os = "windows", target_env = "gnu")) && test.test_name == "ui128" {
+++ result.run = Link;
+++ result.check = Pass(Link);
+++ }
+++
++ // END OF VENDOR RESERVED AREA
++ //
++ //
++--
++2.30.1.windows.1
++
--- /dev/null
- From 97c473937382a5b5858d9cce3c947855d23b2dc5 Mon Sep 17 00:00:00 2001
++From b742f03694b920cc14400727d54424e8e1b60928 Mon Sep 17 00:00:00 2001
+From: bjorn3 <bjorn3@users.noreply.github.com>
+Date: Thu, 18 Nov 2021 19:28:40 +0100
+Subject: [PATCH] Disable unsupported tests
+
+---
- crates/core_simd/src/math.rs | 6 ++++++
- crates/core_simd/src/vector.rs | 2 ++
- crates/core_simd/tests/masks.rs | 2 ++
- crates/core_simd/tests/ops_macros.rs | 4 ++++
- 4 files changed, 14 insertions(+)
++ crates/core_simd/src/elements/int.rs | 8 ++++++++
++ crates/core_simd/src/elements/uint.rs | 4 ++++
++ crates/core_simd/src/masks/full_masks.rs | 6 ++++++
++ crates/core_simd/src/vector.rs | 2 ++
++ crates/core_simd/tests/masks.rs | 3 ---
++ 5 files changed, 20 insertions(+), 3 deletions(-)
+
- diff --git a/crates/core_simd/src/math.rs b/crates/core_simd/src/math.rs
- index 2bae414..2f87499 100644
- --- a/crates/core_simd/src/math.rs
- +++ b/crates/core_simd/src/math.rs
- @@ -5,6 +5,7 @@ macro_rules! impl_uint_arith {
- ($($ty:ty),+) => {
- $( impl<const LANES: usize> Simd<$ty, LANES> where LaneCount<LANES>: SupportedLaneCount {
-
- + /*
- /// Lanewise saturating add.
- ///
- /// # Examples
- @@ -43,6 +44,7 @@ macro_rules! impl_uint_arith {
- pub fn saturating_sub(self, second: Self) -> Self {
- unsafe { simd_saturating_sub(self, second) }
- }
- + */
- })+
- }
- }
- @@ -51,6 +53,7 @@ macro_rules! impl_int_arith {
- ($($ty:ty),+) => {
- $( impl<const LANES: usize> Simd<$ty, LANES> where LaneCount<LANES>: SupportedLaneCount {
-
- + /*
- /// Lanewise saturating add.
- ///
- /// # Examples
- @@ -89,6 +92,7 @@ macro_rules! impl_int_arith {
- pub fn saturating_sub(self, second: Self) -> Self {
- unsafe { simd_saturating_sub(self, second) }
- }
- + */
-
- /// Lanewise absolute value, implemented in Rust.
- /// Every lane becomes its absolute value.
- @@ -109,6 +113,7 @@ macro_rules! impl_int_arith {
- (self^m) - m
- }
-
- + /*
- /// Lanewise saturating absolute value, implemented in Rust.
- /// As abs(), except the MIN value becomes MAX instead of itself.
- ///
- @@ -151,6 +156,7 @@ macro_rules! impl_int_arith {
- pub fn saturating_neg(self) -> Self {
- Self::splat(0).saturating_sub(self)
- }
- + */
- })+
- }
- }
+diff --git a/crates/core_simd/src/vector.rs b/crates/core_simd/src/vector.rs
- index 7c5ec2b..c8631e8 100644
++index e8e8f68..7173c24 100644
+--- a/crates/core_simd/src/vector.rs
++++ b/crates/core_simd/src/vector.rs
- @@ -75,6 +75,7 @@ where
- Self(array)
++@@ -250,6 +250,7 @@ where
++ unsafe { intrinsics::simd_cast(self) }
+ }
+
++ /*
+ /// Reads from potentially discontiguous indices in `slice` to construct a SIMD vector.
+ /// If an index is out-of-bounds, the lane is instead selected from the `or` vector.
+ ///
- @@ -297,6 +298,7 @@ where
++@@ -473,6 +474,7 @@ where
+ // Cleared ☢️ *mut T Zone
+ }
+ }
++ */
+ }
+
+ impl<T, const LANES: usize> Copy for Simd<T, LANES>
- diff --git a/crates/core_simd/tests/masks.rs b/crates/core_simd/tests/masks.rs
- index 6a8ecd3..68fcb49 100644
- --- a/crates/core_simd/tests/masks.rs
- +++ b/crates/core_simd/tests/masks.rs
- @@ -68,6 +68,7 @@ macro_rules! test_mask_api {
- assert_eq!(core_simd::Mask::<$type, 8>::from_int(int), mask);
- }
-
- + /*
- #[cfg(feature = "generic_const_exprs")]
- #[test]
- fn roundtrip_bitmask_conversion() {
- @@ -80,6 +81,7 @@ macro_rules! test_mask_api {
- assert_eq!(bitmask, [0b01001001, 0b10000011]);
- assert_eq!(core_simd::Mask::<$type, 16>::from_bitmask(bitmask), mask);
- }
- + */
- }
- }
- }
+--
- 2.26.2.7.g19db9cfb68
-
++2.25.1
--- /dev/null
--- /dev/null
++From eec874c889b8d24e5ad50faded24288150f057b1 Mon Sep 17 00:00:00 2001
++From: Afonso Bordado <afonsobordado@az8.co>
++Date: Tue, 27 Sep 2022 08:13:58 +0100
++Subject: [PATCH] Disable rand tests on mingw
++
++---
++ rand_distr/src/pareto.rs | 2 ++
++ rand_distr/tests/value_stability.rs | 4 ++++
++ 2 files changed, 6 insertions(+)
++
++diff --git a/rand_distr/src/pareto.rs b/rand_distr/src/pareto.rs
++index 217899e..9cedeb7 100644
++--- a/rand_distr/src/pareto.rs
+++++ b/rand_distr/src/pareto.rs
++@@ -107,6 +107,8 @@ mod tests {
++ }
++
++ #[test]
+++ // This is broken on x86_64-pc-windows-gnu presumably due to a broken powf implementation
+++ #[cfg_attr(all(target_os = "windows", target_env = "gnu"), ignore)]
++ fn value_stability() {
++ fn test_samples<F: Float + core::fmt::Debug, D: Distribution<F>>(
++ distr: D, zero: F, expected: &[F],
++diff --git a/rand_distr/tests/value_stability.rs b/rand_distr/tests/value_stability.rs
++index 192ba74..0101ace 100644
++--- a/rand_distr/tests/value_stability.rs
+++++ b/rand_distr/tests/value_stability.rs
++@@ -72,6 +72,8 @@ fn unit_disc_stability() {
++ }
++
++ #[test]
+++// This is broken on x86_64-pc-windows-gnu
+++#[cfg_attr(all(target_os = "windows", target_env = "gnu"), ignore)]
++ fn pareto_stability() {
++ test_samples(213, Pareto::new(1.0, 1.0).unwrap(), &[
++ 1.0423688f32, 2.1235929, 4.132709, 1.4679428,
++@@ -143,6 +145,8 @@ fn inverse_gaussian_stability() {
++ }
++
++ #[test]
+++// This is broken on x86_64-pc-windows-gnu
+++#[cfg_attr(all(target_os = "windows", target_env = "gnu"), ignore)]
++ fn gamma_stability() {
++ // Gamma has 3 cases: shape == 1, shape < 1, shape > 1
++ test_samples(223, Gamma::new(1.0, 5.0).unwrap(), &[
++--
++2.25.1
--- /dev/null
- channel = "nightly-2022-08-24"
+[toolchain]
++channel = "nightly-2022-10-23"
+components = ["rust-src", "rustc-dev", "llvm-tools-preview"]
--- /dev/null
+#!/usr/bin/env bash
+set -e
+
+./y.rs build --no-unstable-features
+
+echo "[SETUP] Rust fork"
+git clone https://github.com/rust-lang/rust.git || true
+pushd rust
+git fetch
+git checkout -- .
+git checkout "$(rustc -V | cut -d' ' -f3 | tr -d '(')"
+
++git am ../patches/*-sysroot-*.patch
++
+git apply - <<EOF
+diff --git a/library/alloc/Cargo.toml b/library/alloc/Cargo.toml
+index d95b5b7f17f..00b6f0e3635 100644
+--- a/library/alloc/Cargo.toml
++++ b/library/alloc/Cargo.toml
+@@ -8,7 +8,7 @@ edition = "2018"
+
+ [dependencies]
+ core = { path = "../core" }
+-compiler_builtins = { version = "0.1.40", features = ['rustc-dep-of-std'] }
++compiler_builtins = { version = "0.1.66", features = ['rustc-dep-of-std', 'no-asm'] }
+
+ [dev-dependencies]
+ rand = "0.7"
+ rand_xorshift = "0.2"
+diff --git a/src/tools/compiletest/src/runtest.rs b/src/tools/compiletest/src/runtest.rs
+index 8431aa7b818..a3ff7e68ce5 100644
+--- a/src/tools/compiletest/src/runtest.rs
++++ b/src/tools/compiletest/src/runtest.rs
+@@ -3489,12 +3489,7 @@ fn normalize_output(&self, output: &str, custom_rules: &[(String, String)]) -> S
+ let compiler_src_dir = base_dir.join("compiler");
+ normalize_path(&compiler_src_dir, "$(echo '$COMPILER_DIR')");
+
+- if let Some(virtual_rust_source_base_dir) =
+- option_env!("CFG_VIRTUAL_RUST_SOURCE_BASE_DIR").map(PathBuf::from)
+- {
+- normalize_path(&virtual_rust_source_base_dir.join("library"), "$(echo '$SRC_DIR')");
+- normalize_path(&virtual_rust_source_base_dir.join("compiler"), "$(echo '$COMPILER_DIR')");
+- }
++ normalize_path(&Path::new("$(cd ../build_sysroot/sysroot_src/library; pwd)"), "$(echo '$SRC_DIR')");
+
+ // Paths into the build directory
+ let test_build_dir = &self.config.build_base;
+EOF
+
+cat > config.toml <<EOF
+changelog-seen = 2
+
+[llvm]
+ninja = false
+
+[build]
+rustc = "$(pwd)/../build/rustc-clif"
+cargo = "$(rustup which cargo)"
+full-bootstrap = true
+local-rebuild = true
+
+[rust]
+codegen-backends = ["cranelift"]
+deny-warnings = false
+verbose-tests = false
+EOF
+popd
+
+# FIXME remove once inline asm is fully supported
+export RUSTFLAGS="$RUSTFLAGS --cfg=rustix_use_libc"
++
++# Allow the testsuite to use llvm tools
++host_triple=$(rustc -vV | grep host | cut -d: -f2 | tr -d " ")
++export LLVM_BIN_DIR="$(rustc --print sysroot)/lib/rustlib/$host_triple/bin"
--- /dev/null
- rm src/test/ui/test-attrs/test-fn-signature-verification-for-explicit-return-type.rs # "Cannot run dynamic test fn out-of-process"
- rm src/test/ui/async-await/async-fn-size-moved-locals.rs # -Cpanic=abort shrinks some generator by one byte
- rm src/test/ui/async-await/async-fn-size-uninit-locals.rs # same
- rm src/test/ui/generator/size-moved-locals.rs # same
+#!/usr/bin/env bash
+set -e
+
+cd $(dirname "$0")/../
+
+source ./scripts/setup_rust_fork.sh
+
+echo "[TEST] Test suite of rustc"
+pushd rust
+
+command -v rg >/dev/null 2>&1 || cargo install ripgrep
+
+rm -r src/test/ui/{extern/,unsized-locals/,lto/,linkage*} || true
+for test in $(rg --files-with-matches "lto|// needs-asm-support|// needs-unwind" src/test/{ui,incremental}); do
+ rm $test
+done
+
+for test in $(rg -i --files-with-matches "//(\[\w+\])?~[^\|]*\s*ERR|// error-pattern:|// build-fail|// run-fail|-Cllvm-args" src/test/ui); do
+ rm $test
+done
+
+git checkout -- src/test/ui/issues/auxiliary/issue-3136-a.rs # contains //~ERROR, but shouldn't be removed
+
+# missing features
+# ================
+
+# requires stack unwinding
+rm src/test/incremental/change_crate_dep_kind.rs
+rm src/test/incremental/issue-80691-bad-eval-cache.rs # -Cpanic=abort causes abort instead of exit(101)
+
+# requires compiling with -Cpanic=unwind
- rm src/test/ui/empty_global_asm.rs # TODO add needs-asm-support
+rm -r src/test/ui/macros/rfc-2011-nicer-assert-messages/
+
+# vendor intrinsics
+rm src/test/ui/sse2.rs # cpuid not supported, so sse2 not detected
+rm src/test/ui/intrinsics/const-eval-select-x86_64.rs # requires x86_64 vendor intrinsics
+rm src/test/ui/simd/array-type.rs # "Index argument for `simd_insert` is not a constant"
+rm src/test/ui/simd/intrinsic/generic-bitmask-pass.rs # simd_bitmask unimplemented
+rm src/test/ui/simd/intrinsic/generic-as.rs # simd_as unimplemented
+rm src/test/ui/simd/intrinsic/generic-arithmetic-saturating-pass.rs # simd_saturating_add unimplemented
+rm src/test/ui/simd/intrinsic/float-math-pass.rs # simd_fcos unimplemented
+rm src/test/ui/simd/intrinsic/generic-gather-pass.rs # simd_gather unimplemented
+rm src/test/ui/simd/intrinsic/generic-select-pass.rs # simd_select_bitmask unimplemented
+rm src/test/ui/simd/issue-85915-simd-ptrs.rs # simd_gather unimplemented
+rm src/test/ui/simd/issue-89193.rs # simd_gather unimplemented
+rm src/test/ui/simd/simd-bitmask.rs # simd_bitmask unimplemented
+
+# exotic linkages
+rm src/test/ui/issues/issue-33992.rs # unsupported linkages
+rm src/test/incremental/hashes/function_interfaces.rs # same
+rm src/test/incremental/hashes/statics.rs # same
+
+# variadic arguments
+rm src/test/ui/abi/mir/mir_codegen_calls_variadic.rs # requires float varargs
+rm src/test/ui/abi/variadic-ffi.rs # requires callee side vararg support
+
+# unsized locals
+rm -r src/test/run-pass-valgrind/unsized-locals
+
+# misc unimplemented things
+rm src/test/ui/intrinsics/intrinsic-nearby.rs # unimplemented nearbyintf32 and nearbyintf64 intrinsics
+rm src/test/ui/target-feature/missing-plusminus.rs # error not implemented
+rm src/test/ui/fn/dyn-fn-alignment.rs # wants a 256 byte alignment
+rm -r src/test/run-make/emit-named-files # requires full --emit support
+rm src/test/ui/abi/stack-probes.rs # stack probes not yet implemented
++rm src/test/ui/simd/intrinsic/ptr-cast.rs # simd_expose_addr intrinsic unimplemented
+
+# optimization tests
+# ==================
+rm src/test/ui/codegen/issue-28950.rs # depends on stack size optimizations
+rm src/test/ui/codegen/init-large-type.rs # same
+rm src/test/ui/issues/issue-40883.rs # same
+rm -r src/test/run-make/fmt-write-bloat/ # tests an optimization
+
+# backend specific tests
+# ======================
+rm src/test/incremental/thinlto/cgu_invalidated_when_import_{added,removed}.rs # requires LLVM
+rm src/test/ui/abi/stack-protector.rs # requires stack protector support
+
+# giving different but possibly correct results
+# =============================================
+rm src/test/ui/mir/mir_misc_casts.rs # depends on deduplication of constants
+rm src/test/ui/mir/mir_raw_fat_ptr.rs # same
+rm src/test/ui/consts/issue-33537.rs # same
+
+# doesn't work due to the way the rustc test suite is invoked.
+# should work when using ./x.py test the way it is intended
+# ============================================================
+rm -r src/test/run-make/emit-shared-files # requires the rustdoc executable in build/bin/
+rm -r src/test/run-make/unstable-flag-required # same
+rm -r src/test/run-make/rustdoc-* # same
+rm -r src/test/run-make/issue-88756-default-output # same
+rm -r src/test/run-make/remap-path-prefix-dwarf # requires llvm-dwarfdump
+
+# genuine bugs
+# ============
+rm src/test/ui/allocator/no_std-alloc-error-handler-default.rs # missing rust_oom definition
+
+rm src/test/incremental/spike-neg1.rs # errors out for some reason
+rm src/test/incremental/spike-neg2.rs # same
+rm src/test/ui/issues/issue-74564-if-expr-stack-overflow.rs # gives a stackoverflow before the backend runs
+rm src/test/ui/mir/ssa-analysis-regression-50041.rs # produces ICE
+rm src/test/ui/type-alias-impl-trait/assoc-projection-ice.rs # produces ICE
+
+rm src/test/ui/simd/intrinsic/generic-reduction-pass.rs # simd_reduce_add_unordered doesn't accept an accumulator for integer vectors
+
+# bugs in the test suite
+# ======================
+rm src/test/ui/backtrace.rs # TODO warning
+rm src/test/ui/simple_global_asm.rs # TODO add needs-asm-support
+rm src/test/ui/test-attrs/test-type.rs # TODO panic message on stderr. correct stdout
+# not sure if this is actually a bug in the test suite, but the symbol list shows the function without leading _ for some reason
+rm -r src/test/run-make/native-link-modifier-bundle
+
++rm src/test/ui/stdio-is-blocking.rs # really slow with unoptimized libstd
++
+echo "[TEST] rustc test suite"
+RUST_TEST_NOCAPTURE=1 COMPILETEST_FORCE_STAGE0=1 ./x.py test --stage 0 src/test/{codegen-units,run-make,run-pass-valgrind,ui,incremental}
+popd
--- /dev/null
- (CallTarget::Indirect(sig, method), Some(ptr))
+//! Handling of everything related to the calling convention. Also fills `fx.local_map`.
+
+mod comments;
+mod pass_mode;
+mod returning;
+
+use cranelift_module::ModuleError;
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
+use rustc_middle::ty::layout::FnAbiOf;
+use rustc_target::abi::call::{Conv, FnAbi};
+use rustc_target::spec::abi::Abi;
+
+use cranelift_codegen::ir::{AbiParam, SigRef};
+
+use self::pass_mode::*;
+use crate::prelude::*;
+
+pub(crate) use self::returning::codegen_return;
+
+fn clif_sig_from_fn_abi<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ default_call_conv: CallConv,
+ fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
+) -> Signature {
+ let call_conv = match fn_abi.conv {
+ Conv::Rust | Conv::C => default_call_conv,
+ Conv::RustCold => CallConv::Cold,
+ Conv::X86_64SysV => CallConv::SystemV,
+ Conv::X86_64Win64 => CallConv::WindowsFastcall,
+ Conv::ArmAapcs
+ | Conv::CCmseNonSecureCall
+ | Conv::Msp430Intr
+ | Conv::PtxKernel
+ | Conv::X86Fastcall
+ | Conv::X86Intr
+ | Conv::X86Stdcall
+ | Conv::X86ThisCall
+ | Conv::X86VectorCall
+ | Conv::AmdGpuKernel
+ | Conv::AvrInterrupt
+ | Conv::AvrNonBlockingInterrupt => todo!("{:?}", fn_abi.conv),
+ };
+ let inputs = fn_abi.args.iter().map(|arg_abi| arg_abi.get_abi_param(tcx).into_iter()).flatten();
+
+ let (return_ptr, returns) = fn_abi.ret.get_abi_return(tcx);
+ // Sometimes the first param is an pointer to the place where the return value needs to be stored.
+ let params: Vec<_> = return_ptr.into_iter().chain(inputs).collect();
+
+ Signature { params, returns, call_conv }
+}
+
+pub(crate) fn get_function_sig<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ triple: &target_lexicon::Triple,
+ inst: Instance<'tcx>,
+) -> Signature {
+ assert!(!inst.substs.needs_infer());
+ clif_sig_from_fn_abi(
+ tcx,
+ CallConv::triple_default(triple),
+ &RevealAllLayoutCx(tcx).fn_abi_of_instance(inst, ty::List::empty()),
+ )
+}
+
+/// Instance must be monomorphized
+pub(crate) fn import_function<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ module: &mut dyn Module,
+ inst: Instance<'tcx>,
+) -> FuncId {
+ let name = tcx.symbol_name(inst).name;
+ let sig = get_function_sig(tcx, module.isa().triple(), inst);
+ match module.declare_function(name, Linkage::Import, &sig) {
+ Ok(func_id) => func_id,
+ Err(ModuleError::IncompatibleDeclaration(_)) => tcx.sess.fatal(&format!(
+ "attempt to declare `{name}` as function, but it was already declared as static"
+ )),
+ Err(ModuleError::IncompatibleSignature(_, prev_sig, new_sig)) => tcx.sess.fatal(&format!(
+ "attempt to declare `{name}` with signature {new_sig:?}, \
+ but it was already declared with signature {prev_sig:?}"
+ )),
+ Err(err) => Err::<_, _>(err).unwrap(),
+ }
+}
+
+impl<'tcx> FunctionCx<'_, '_, 'tcx> {
+ /// Instance must be monomorphized
+ pub(crate) fn get_function_ref(&mut self, inst: Instance<'tcx>) -> FuncRef {
+ let func_id = import_function(self.tcx, self.module, inst);
+ let func_ref = self.module.declare_func_in_func(func_id, &mut self.bcx.func);
+
+ if self.clif_comments.enabled() {
+ self.add_comment(func_ref, format!("{:?}", inst));
+ }
+
+ func_ref
+ }
+
+ pub(crate) fn lib_call(
+ &mut self,
+ name: &str,
+ params: Vec<AbiParam>,
+ returns: Vec<AbiParam>,
+ args: &[Value],
+ ) -> &[Value] {
+ let sig = Signature { params, returns, call_conv: self.target_config.default_call_conv };
+ let func_id = self.module.declare_function(name, Linkage::Import, &sig).unwrap();
+ let func_ref = self.module.declare_func_in_func(func_id, &mut self.bcx.func);
+ if self.clif_comments.enabled() {
+ self.add_comment(func_ref, format!("{:?}", name));
+ }
+ let call_inst = self.bcx.ins().call(func_ref, args);
+ if self.clif_comments.enabled() {
+ self.add_comment(call_inst, format!("easy_call {}", name));
+ }
+ let results = self.bcx.inst_results(call_inst);
+ assert!(results.len() <= 2, "{}", results.len());
+ results
+ }
+
+ pub(crate) fn easy_call(
+ &mut self,
+ name: &str,
+ args: &[CValue<'tcx>],
+ return_ty: Ty<'tcx>,
+ ) -> CValue<'tcx> {
+ let (input_tys, args): (Vec<_>, Vec<_>) = args
+ .iter()
+ .map(|arg| {
+ (AbiParam::new(self.clif_type(arg.layout().ty).unwrap()), arg.load_scalar(self))
+ })
+ .unzip();
+ let return_layout = self.layout_of(return_ty);
+ let return_tys = if let ty::Tuple(tup) = return_ty.kind() {
+ tup.iter().map(|ty| AbiParam::new(self.clif_type(ty).unwrap())).collect()
+ } else {
+ vec![AbiParam::new(self.clif_type(return_ty).unwrap())]
+ };
+ let ret_vals = self.lib_call(name, input_tys, return_tys, &args);
+ match *ret_vals {
+ [] => CValue::by_ref(
+ Pointer::const_addr(self, i64::from(self.pointer_type.bytes())),
+ return_layout,
+ ),
+ [val] => CValue::by_val(val, return_layout),
+ [val, extra] => CValue::by_val_pair(val, extra, return_layout),
+ _ => unreachable!(),
+ }
+ }
+}
+
+/// Make a [`CPlace`] capable of holding value of the specified type.
+fn make_local_place<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ local: Local,
+ layout: TyAndLayout<'tcx>,
+ is_ssa: bool,
+) -> CPlace<'tcx> {
+ let place = if is_ssa {
+ if let rustc_target::abi::Abi::ScalarPair(_, _) = layout.abi {
+ CPlace::new_var_pair(fx, local, layout)
+ } else {
+ CPlace::new_var(fx, local, layout)
+ }
+ } else {
+ CPlace::new_stack_slot(fx, layout)
+ };
+
+ self::comments::add_local_place_comments(fx, place, local);
+
+ place
+}
+
+pub(crate) fn codegen_fn_prelude<'tcx>(fx: &mut FunctionCx<'_, '_, 'tcx>, start_block: Block) {
+ fx.bcx.append_block_params_for_function_params(start_block);
+
+ fx.bcx.switch_to_block(start_block);
+ fx.bcx.ins().nop();
+
+ let ssa_analyzed = crate::analyze::analyze(fx);
+
+ self::comments::add_args_header_comment(fx);
+
+ let mut block_params_iter = fx.bcx.func.dfg.block_params(start_block).to_vec().into_iter();
+ let ret_place =
+ self::returning::codegen_return_param(fx, &ssa_analyzed, &mut block_params_iter);
+ assert_eq!(fx.local_map.push(ret_place), RETURN_PLACE);
+
+ // None means pass_mode == NoPass
+ enum ArgKind<'tcx> {
+ Normal(Option<CValue<'tcx>>),
+ Spread(Vec<Option<CValue<'tcx>>>),
+ }
+
+ let fn_abi = fx.fn_abi.take().unwrap();
+
+ // FIXME implement variadics in cranelift
+ if fn_abi.c_variadic {
+ fx.tcx.sess.span_fatal(
+ fx.mir.span,
+ "Defining variadic functions is not yet supported by Cranelift",
+ );
+ }
+
+ let mut arg_abis_iter = fn_abi.args.iter();
+
+ let func_params = fx
+ .mir
+ .args_iter()
+ .map(|local| {
+ let arg_ty = fx.monomorphize(fx.mir.local_decls[local].ty);
+
+ // Adapted from https://github.com/rust-lang/rust/blob/145155dc96757002c7b2e9de8489416e2fdbbd57/src/librustc_codegen_llvm/mir/mod.rs#L442-L482
+ if Some(local) == fx.mir.spread_arg {
+ // This argument (e.g. the last argument in the "rust-call" ABI)
+ // is a tuple that was spread at the ABI level and now we have
+ // to reconstruct it into a tuple local variable, from multiple
+ // individual function arguments.
+
+ let tupled_arg_tys = match arg_ty.kind() {
+ ty::Tuple(ref tys) => tys,
+ _ => bug!("spread argument isn't a tuple?! but {:?}", arg_ty),
+ };
+
+ let mut params = Vec::new();
+ for (i, _arg_ty) in tupled_arg_tys.iter().enumerate() {
+ let arg_abi = arg_abis_iter.next().unwrap();
+ let param =
+ cvalue_for_param(fx, Some(local), Some(i), arg_abi, &mut block_params_iter);
+ params.push(param);
+ }
+
+ (local, ArgKind::Spread(params), arg_ty)
+ } else {
+ let arg_abi = arg_abis_iter.next().unwrap();
+ let param =
+ cvalue_for_param(fx, Some(local), None, arg_abi, &mut block_params_iter);
+ (local, ArgKind::Normal(param), arg_ty)
+ }
+ })
+ .collect::<Vec<(Local, ArgKind<'tcx>, Ty<'tcx>)>>();
+
+ assert!(fx.caller_location.is_none());
+ if fx.instance.def.requires_caller_location(fx.tcx) {
+ // Store caller location for `#[track_caller]`.
+ let arg_abi = arg_abis_iter.next().unwrap();
+ fx.caller_location =
+ Some(cvalue_for_param(fx, None, None, arg_abi, &mut block_params_iter).unwrap());
+ }
+
+ assert!(arg_abis_iter.next().is_none(), "ArgAbi left behind");
+ fx.fn_abi = Some(fn_abi);
+ assert!(block_params_iter.next().is_none(), "arg_value left behind");
+
+ self::comments::add_locals_header_comment(fx);
+
+ for (local, arg_kind, ty) in func_params {
+ let layout = fx.layout_of(ty);
+
+ let is_ssa = ssa_analyzed[local] == crate::analyze::SsaKind::Ssa;
+
+ // While this is normally an optimization to prevent an unnecessary copy when an argument is
+ // not mutated by the current function, this is necessary to support unsized arguments.
+ if let ArgKind::Normal(Some(val)) = arg_kind {
+ if let Some((addr, meta)) = val.try_to_ptr() {
+ // Ownership of the value at the backing storage for an argument is passed to the
+ // callee per the ABI, so it is fine to borrow the backing storage of this argument
+ // to prevent a copy.
+
+ let place = if let Some(meta) = meta {
+ CPlace::for_ptr_with_extra(addr, meta, val.layout())
+ } else {
+ CPlace::for_ptr(addr, val.layout())
+ };
+
+ self::comments::add_local_place_comments(fx, place, local);
+
+ assert_eq!(fx.local_map.push(place), local);
+ continue;
+ }
+ }
+
+ let place = make_local_place(fx, local, layout, is_ssa);
+ assert_eq!(fx.local_map.push(place), local);
+
+ match arg_kind {
+ ArgKind::Normal(param) => {
+ if let Some(param) = param {
+ place.write_cvalue(fx, param);
+ }
+ }
+ ArgKind::Spread(params) => {
+ for (i, param) in params.into_iter().enumerate() {
+ if let Some(param) = param {
+ place.place_field(fx, mir::Field::new(i)).write_cvalue(fx, param);
+ }
+ }
+ }
+ }
+ }
+
+ for local in fx.mir.vars_and_temps_iter() {
+ let ty = fx.monomorphize(fx.mir.local_decls[local].ty);
+ let layout = fx.layout_of(ty);
+
+ let is_ssa = ssa_analyzed[local] == crate::analyze::SsaKind::Ssa;
+
+ let place = make_local_place(fx, local, layout, is_ssa);
+ assert_eq!(fx.local_map.push(place), local);
+ }
+
+ fx.bcx.ins().jump(*fx.block_map.get(START_BLOCK).unwrap(), &[]);
+}
+
+struct CallArgument<'tcx> {
+ value: CValue<'tcx>,
+ is_owned: bool,
+}
+
+// FIXME avoid intermediate `CValue` before calling `adjust_arg_for_abi`
+fn codegen_call_argument_operand<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ operand: &Operand<'tcx>,
+) -> CallArgument<'tcx> {
+ CallArgument {
+ value: codegen_operand(fx, operand),
+ is_owned: matches!(operand, Operand::Move(_)),
+ }
+}
+
+pub(crate) fn codegen_terminator_call<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ source_info: mir::SourceInfo,
+ func: &Operand<'tcx>,
+ args: &[Operand<'tcx>],
+ destination: Place<'tcx>,
+ target: Option<BasicBlock>,
+) {
+ let fn_ty = fx.monomorphize(func.ty(fx.mir, fx.tcx));
+ let fn_sig =
+ fx.tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), fn_ty.fn_sig(fx.tcx));
+
+ let ret_place = codegen_place(fx, destination);
+
+ // Handle special calls like intrinsics and empty drop glue.
+ let instance = if let ty::FnDef(def_id, substs) = *fn_ty.kind() {
+ let instance = ty::Instance::resolve(fx.tcx, ty::ParamEnv::reveal_all(), def_id, substs)
+ .unwrap()
+ .unwrap()
+ .polymorphize(fx.tcx);
+
+ if fx.tcx.symbol_name(instance).name.starts_with("llvm.") {
+ crate::intrinsics::codegen_llvm_intrinsic_call(
+ fx,
+ &fx.tcx.symbol_name(instance).name,
+ substs,
+ args,
+ ret_place,
+ target,
+ );
+ return;
+ }
+
+ match instance.def {
+ InstanceDef::Intrinsic(_) => {
+ crate::intrinsics::codegen_intrinsic_call(
+ fx,
+ instance,
+ args,
+ ret_place,
+ target,
+ source_info,
+ );
+ return;
+ }
+ InstanceDef::DropGlue(_, None) => {
+ // empty drop glue - a nop.
+ let dest = target.expect("Non terminating drop_in_place_real???");
+ let ret_block = fx.get_block(dest);
+ fx.bcx.ins().jump(ret_block, &[]);
+ return;
+ }
+ _ => Some(instance),
+ }
+ } else {
+ None
+ };
+
+ let extra_args = &args[fn_sig.inputs().len()..];
+ let extra_args = fx
+ .tcx
+ .mk_type_list(extra_args.iter().map(|op_arg| fx.monomorphize(op_arg.ty(fx.mir, fx.tcx))));
+ let fn_abi = if let Some(instance) = instance {
+ RevealAllLayoutCx(fx.tcx).fn_abi_of_instance(instance, extra_args)
+ } else {
+ RevealAllLayoutCx(fx.tcx).fn_abi_of_fn_ptr(fn_ty.fn_sig(fx.tcx), extra_args)
+ };
+
+ let is_cold = if fn_sig.abi == Abi::RustCold {
+ true
+ } else {
+ instance
+ .map(|inst| {
+ fx.tcx.codegen_fn_attrs(inst.def_id()).flags.contains(CodegenFnAttrFlags::COLD)
+ })
+ .unwrap_or(false)
+ };
+ if is_cold {
+ fx.bcx.set_cold_block(fx.bcx.current_block().unwrap());
+ if let Some(destination_block) = target {
+ fx.bcx.set_cold_block(fx.get_block(destination_block));
+ }
+ }
+
+ // Unpack arguments tuple for closures
+ let mut args = if fn_sig.abi == Abi::RustCall {
+ assert_eq!(args.len(), 2, "rust-call abi requires two arguments");
+ let self_arg = codegen_call_argument_operand(fx, &args[0]);
+ let pack_arg = codegen_call_argument_operand(fx, &args[1]);
+
+ let tupled_arguments = match pack_arg.value.layout().ty.kind() {
+ ty::Tuple(ref tupled_arguments) => tupled_arguments,
+ _ => bug!("argument to function with \"rust-call\" ABI is not a tuple"),
+ };
+
+ let mut args = Vec::with_capacity(1 + tupled_arguments.len());
+ args.push(self_arg);
+ for i in 0..tupled_arguments.len() {
+ args.push(CallArgument {
+ value: pack_arg.value.value_field(fx, mir::Field::new(i)),
+ is_owned: pack_arg.is_owned,
+ });
+ }
+ args
+ } else {
+ args.iter().map(|arg| codegen_call_argument_operand(fx, arg)).collect::<Vec<_>>()
+ };
+
+ // Pass the caller location for `#[track_caller]`.
+ if instance.map(|inst| inst.def.requires_caller_location(fx.tcx)).unwrap_or(false) {
+ let caller_location = fx.get_caller_location(source_info);
+ args.push(CallArgument { value: caller_location, is_owned: false });
+ }
+
+ let args = args;
+ assert_eq!(fn_abi.args.len(), args.len());
+
+ enum CallTarget {
+ Direct(FuncRef),
+ Indirect(SigRef, Value),
+ }
+
+ let (func_ref, first_arg_override) = match instance {
+ // Trait object call
+ Some(Instance { def: InstanceDef::Virtual(_, idx), .. }) => {
+ if fx.clif_comments.enabled() {
+ let nop_inst = fx.bcx.ins().nop();
+ fx.add_comment(
+ nop_inst,
+ format!("virtual call; self arg pass mode: {:?}", &fn_abi.args[0]),
+ );
+ }
+
+ let (ptr, method) = crate::vtable::get_ptr_and_method_ref(fx, args[0].value, idx);
+ let sig = clif_sig_from_fn_abi(fx.tcx, fx.target_config.default_call_conv, &fn_abi);
+ let sig = fx.bcx.import_signature(sig);
+
- ty::Dynamic(..) => {
++ (CallTarget::Indirect(sig, method), Some(ptr.get_addr(fx)))
+ }
+
+ // Normal call
+ Some(instance) => {
+ let func_ref = fx.get_function_ref(instance);
+ (CallTarget::Direct(func_ref), None)
+ }
+
+ // Indirect call
+ None => {
+ if fx.clif_comments.enabled() {
+ let nop_inst = fx.bcx.ins().nop();
+ fx.add_comment(nop_inst, "indirect call");
+ }
+
+ let func = codegen_operand(fx, func).load_scalar(fx);
+ let sig = clif_sig_from_fn_abi(fx.tcx, fx.target_config.default_call_conv, &fn_abi);
+ let sig = fx.bcx.import_signature(sig);
+
+ (CallTarget::Indirect(sig, func), None)
+ }
+ };
+
+ self::returning::codegen_with_call_return_arg(fx, &fn_abi.ret, ret_place, |fx, return_ptr| {
+ let call_args = return_ptr
+ .into_iter()
+ .chain(first_arg_override.into_iter())
+ .chain(
+ args.into_iter()
+ .enumerate()
+ .skip(if first_arg_override.is_some() { 1 } else { 0 })
+ .map(|(i, arg)| {
+ adjust_arg_for_abi(fx, arg.value, &fn_abi.args[i], arg.is_owned).into_iter()
+ })
+ .flatten(),
+ )
+ .collect::<Vec<Value>>();
+
+ let call_inst = match func_ref {
+ CallTarget::Direct(func_ref) => fx.bcx.ins().call(func_ref, &call_args),
+ CallTarget::Indirect(sig, func_ptr) => {
+ fx.bcx.ins().call_indirect(sig, func_ptr, &call_args)
+ }
+ };
+
+ // FIXME find a cleaner way to support varargs
+ if fn_sig.c_variadic {
+ if !matches!(fn_sig.abi, Abi::C { .. }) {
+ fx.tcx.sess.span_fatal(
+ source_info.span,
+ &format!("Variadic call for non-C abi {:?}", fn_sig.abi),
+ );
+ }
+ let sig_ref = fx.bcx.func.dfg.call_signature(call_inst).unwrap();
+ let abi_params = call_args
+ .into_iter()
+ .map(|arg| {
+ let ty = fx.bcx.func.dfg.value_type(arg);
+ if !ty.is_int() {
+ // FIXME set %al to upperbound on float args once floats are supported
+ fx.tcx.sess.span_fatal(
+ source_info.span,
+ &format!("Non int ty {:?} for variadic call", ty),
+ );
+ }
+ AbiParam::new(ty)
+ })
+ .collect::<Vec<AbiParam>>();
+ fx.bcx.func.dfg.signatures[sig_ref].params = abi_params;
+ }
+
+ call_inst
+ });
+
+ if let Some(dest) = target {
+ let ret_block = fx.get_block(dest);
+ fx.bcx.ins().jump(ret_block, &[]);
+ } else {
+ fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
+ }
+}
+
+pub(crate) fn codegen_drop<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ source_info: mir::SourceInfo,
+ drop_place: CPlace<'tcx>,
+) {
+ let ty = drop_place.layout().ty;
+ let drop_instance = Instance::resolve_drop_in_place(fx.tcx, ty).polymorphize(fx.tcx);
+
+ if let ty::InstanceDef::DropGlue(_, None) = drop_instance.def {
+ // we don't actually need to drop anything
+ } else {
+ match ty.kind() {
++ ty::Dynamic(_, _, ty::Dyn) => {
++ // IN THIS ARM, WE HAVE:
++ // ty = *mut (dyn Trait)
++ // which is: exists<T> ( *mut T, Vtable<T: Trait> )
++ // args[0] args[1]
++ //
++ // args = ( Data, Vtable )
++ // |
++ // v
++ // /-------\
++ // | ... |
++ // \-------/
++ //
+ let (ptr, vtable) = drop_place.to_ptr_maybe_unsized();
+ let ptr = ptr.get_addr(fx);
+ let drop_fn = crate::vtable::drop_fn_of_obj(fx, vtable.unwrap());
+
+ // FIXME(eddyb) perhaps move some of this logic into
+ // `Instance::resolve_drop_in_place`?
+ let virtual_drop = Instance {
+ def: ty::InstanceDef::Virtual(drop_instance.def_id(), 0),
+ substs: drop_instance.substs,
+ };
+ let fn_abi =
+ RevealAllLayoutCx(fx.tcx).fn_abi_of_instance(virtual_drop, ty::List::empty());
+
+ let sig = clif_sig_from_fn_abi(fx.tcx, fx.target_config.default_call_conv, &fn_abi);
+ let sig = fx.bcx.import_signature(sig);
+ fx.bcx.ins().call_indirect(sig, drop_fn, &[ptr]);
+ }
++ ty::Dynamic(_, _, ty::DynStar) => {
++ // IN THIS ARM, WE HAVE:
++ // ty = *mut (dyn* Trait)
++ // which is: *mut exists<T: sizeof(T) == sizeof(usize)> (T, Vtable<T: Trait>)
++ //
++ // args = [ * ]
++ // |
++ // v
++ // ( Data, Vtable )
++ // |
++ // v
++ // /-------\
++ // | ... |
++ // \-------/
++ //
++ //
++ // WE CAN CONVERT THIS INTO THE ABOVE LOGIC BY DOING
++ //
++ // data = &(*args[0]).0 // gives a pointer to Data above (really the same pointer)
++ // vtable = (*args[0]).1 // loads the vtable out
++ // (data, vtable) // an equivalent Rust `*mut dyn Trait`
++ //
++ // SO THEN WE CAN USE THE ABOVE CODE.
++ let (data, vtable) = drop_place.to_cvalue(fx).dyn_star_force_data_on_stack(fx);
++ let drop_fn = crate::vtable::drop_fn_of_obj(fx, vtable);
++
++ let virtual_drop = Instance {
++ def: ty::InstanceDef::Virtual(drop_instance.def_id(), 0),
++ substs: drop_instance.substs,
++ };
++ let fn_abi =
++ RevealAllLayoutCx(fx.tcx).fn_abi_of_instance(virtual_drop, ty::List::empty());
++
++ let sig = clif_sig_from_fn_abi(fx.tcx, fx.target_config.default_call_conv, &fn_abi);
++ let sig = fx.bcx.import_signature(sig);
++ fx.bcx.ins().call_indirect(sig, drop_fn, &[data]);
++ }
+ _ => {
+ assert!(!matches!(drop_instance.def, InstanceDef::Virtual(_, _)));
+
+ let fn_abi =
+ RevealAllLayoutCx(fx.tcx).fn_abi_of_instance(drop_instance, ty::List::empty());
+
+ let arg_value = drop_place.place_ref(
+ fx,
+ fx.layout_of(fx.tcx.mk_ref(
+ fx.tcx.lifetimes.re_erased,
+ TypeAndMut { ty, mutbl: crate::rustc_hir::Mutability::Mut },
+ )),
+ );
+ let arg_value = adjust_arg_for_abi(fx, arg_value, &fn_abi.args[0], true);
+
+ let mut call_args: Vec<Value> = arg_value.into_iter().collect::<Vec<_>>();
+
+ if drop_instance.def.requires_caller_location(fx.tcx) {
+ // Pass the caller location for `#[track_caller]`.
+ let caller_location = fx.get_caller_location(source_info);
+ call_args.extend(
+ adjust_arg_for_abi(fx, caller_location, &fn_abi.args[1], false).into_iter(),
+ );
+ }
+
+ let func_ref = fx.get_function_ref(drop_instance);
+ fx.bcx.ins().call(func_ref, &call_args);
+ }
+ }
+ }
+}
--- /dev/null
- ctx.func = Function::with_name_signature(ExternalName::user(0, 0), sig.clone());
+//! Allocator shim
+// Adapted from rustc
+
+use crate::prelude::*;
+
+use rustc_ast::expand::allocator::{AllocatorKind, AllocatorTy, ALLOCATOR_METHODS};
+use rustc_session::config::OomStrategy;
+
+/// Returns whether an allocator shim was created
+pub(crate) fn codegen(
+ tcx: TyCtxt<'_>,
+ module: &mut impl Module,
+ unwind_context: &mut UnwindContext,
+) -> bool {
+ let any_dynamic_crate = tcx.dependency_formats(()).iter().any(|(_, list)| {
+ use rustc_middle::middle::dependency_format::Linkage;
+ list.iter().any(|&linkage| linkage == Linkage::Dynamic)
+ });
+ if any_dynamic_crate {
+ false
+ } else if let Some(kind) = tcx.allocator_kind(()) {
+ codegen_inner(
+ module,
+ unwind_context,
+ kind,
+ tcx.lang_items().oom().is_some(),
+ tcx.sess.opts.unstable_opts.oom,
+ );
+ true
+ } else {
+ false
+ }
+}
+
+fn codegen_inner(
+ module: &mut impl Module,
+ unwind_context: &mut UnwindContext,
+ kind: AllocatorKind,
+ has_alloc_error_handler: bool,
+ oom_strategy: OomStrategy,
+) {
+ let usize_ty = module.target_config().pointer_type();
+
+ for method in ALLOCATOR_METHODS {
+ let mut arg_tys = Vec::with_capacity(method.inputs.len());
+ for ty in method.inputs.iter() {
+ match *ty {
+ AllocatorTy::Layout => {
+ arg_tys.push(usize_ty); // size
+ arg_tys.push(usize_ty); // align
+ }
+ AllocatorTy::Ptr => arg_tys.push(usize_ty),
+ AllocatorTy::Usize => arg_tys.push(usize_ty),
+
+ AllocatorTy::ResultPtr | AllocatorTy::Unit => panic!("invalid allocator arg"),
+ }
+ }
+ let output = match method.output {
+ AllocatorTy::ResultPtr => Some(usize_ty),
+ AllocatorTy::Unit => None,
+
+ AllocatorTy::Layout | AllocatorTy::Usize | AllocatorTy::Ptr => {
+ panic!("invalid allocator output")
+ }
+ };
+
+ let sig = Signature {
+ call_conv: CallConv::triple_default(module.isa().triple()),
+ params: arg_tys.iter().cloned().map(AbiParam::new).collect(),
+ returns: output.into_iter().map(AbiParam::new).collect(),
+ };
+
+ let caller_name = format!("__rust_{}", method.name);
+ let callee_name = kind.fn_name(method.name);
+
+ let func_id = module.declare_function(&caller_name, Linkage::Export, &sig).unwrap();
+
+ let callee_func_id = module.declare_function(&callee_name, Linkage::Import, &sig).unwrap();
+
+ let mut ctx = Context::new();
- ctx.func = Function::with_name_signature(ExternalName::user(0, 0), sig);
++ ctx.func.signature = sig.clone();
+ {
+ let mut func_ctx = FunctionBuilderContext::new();
+ let mut bcx = FunctionBuilder::new(&mut ctx.func, &mut func_ctx);
+
+ let block = bcx.create_block();
+ bcx.switch_to_block(block);
+ let args = arg_tys
+ .into_iter()
+ .map(|ty| bcx.append_block_param(block, ty))
+ .collect::<Vec<Value>>();
+
+ let callee_func_ref = module.declare_func_in_func(callee_func_id, &mut bcx.func);
+ let call_inst = bcx.ins().call(callee_func_ref, &args);
+ let results = bcx.inst_results(call_inst).to_vec(); // Clone to prevent borrow error
+
+ bcx.ins().return_(&results);
+ bcx.seal_all_blocks();
+ bcx.finalize();
+ }
+ module.define_function(func_id, &mut ctx).unwrap();
+ unwind_context.add_function(func_id, &ctx, module.isa());
+ }
+
+ let sig = Signature {
+ call_conv: CallConv::triple_default(module.isa().triple()),
+ params: vec![AbiParam::new(usize_ty), AbiParam::new(usize_ty)],
+ returns: vec![],
+ };
+
+ let callee_name = if has_alloc_error_handler { "__rg_oom" } else { "__rdl_oom" };
+
+ let func_id =
+ module.declare_function("__rust_alloc_error_handler", Linkage::Export, &sig).unwrap();
+
+ let callee_func_id = module.declare_function(callee_name, Linkage::Import, &sig).unwrap();
+
+ let mut ctx = Context::new();
++ ctx.func.signature = sig;
+ {
+ let mut func_ctx = FunctionBuilderContext::new();
+ let mut bcx = FunctionBuilder::new(&mut ctx.func, &mut func_ctx);
+
+ let block = bcx.create_block();
+ bcx.switch_to_block(block);
+ let args = (&[usize_ty, usize_ty])
+ .iter()
+ .map(|&ty| bcx.append_block_param(block, ty))
+ .collect::<Vec<Value>>();
+
+ let callee_func_ref = module.declare_func_in_func(callee_func_id, &mut bcx.func);
+ bcx.ins().call(callee_func_ref, &args);
+
+ bcx.ins().trap(TrapCode::UnreachableCodeReached);
+ bcx.seal_all_blocks();
+ bcx.finalize();
+ }
+ module.define_function(func_id, &mut ctx).unwrap();
+ unwind_context.add_function(func_id, &ctx, module.isa());
+
+ let data_id = module.declare_data(OomStrategy::SYMBOL, Linkage::Export, false, false).unwrap();
+ let mut data_ctx = DataContext::new();
+ data_ctx.set_align(1);
+ let val = oom_strategy.should_panic();
+ data_ctx.define(Box::new([val]));
+ module.define_data(data_id, &data_ctx).unwrap();
+}
--- /dev/null
+//! Creation of ar archives like for the lib and staticlib crate type
+
+use std::collections::BTreeMap;
+use std::fs::File;
+use std::io::{self, Read, Seek};
+use std::path::{Path, PathBuf};
+
+use rustc_codegen_ssa::back::archive::{ArchiveBuilder, ArchiveBuilderBuilder};
+use rustc_session::Session;
+
+use object::read::archive::ArchiveFile;
+use object::{Object, ObjectSymbol, ReadCache};
+
+#[derive(Debug)]
+enum ArchiveEntry {
+ FromArchive { archive_index: usize, file_range: (u64, u64) },
+ File(PathBuf),
+}
+
+pub(crate) struct ArArchiveBuilderBuilder;
+
+impl ArchiveBuilderBuilder for ArArchiveBuilderBuilder {
+ fn new_archive_builder<'a>(&self, sess: &'a Session) -> Box<dyn ArchiveBuilder<'a> + 'a> {
+ Box::new(ArArchiveBuilder {
+ sess,
+ use_gnu_style_archive: sess.target.archive_format == "gnu",
+ // FIXME fix builtin ranlib on macOS
+ no_builtin_ranlib: sess.target.is_like_osx,
+
+ src_archives: vec![],
+ entries: vec![],
+ })
+ }
+
+ fn create_dll_import_lib(
+ &self,
+ _sess: &Session,
+ _lib_name: &str,
+ _dll_imports: &[rustc_session::cstore::DllImport],
+ _tmpdir: &Path,
+ ) -> PathBuf {
+ bug!("creating dll imports is not supported");
+ }
+}
+
+pub(crate) struct ArArchiveBuilder<'a> {
+ sess: &'a Session,
+ use_gnu_style_archive: bool,
+ no_builtin_ranlib: bool,
+
+ src_archives: Vec<File>,
+ // Don't use `HashMap` here, as the order is important. `rust.metadata.bin` must always be at
+ // the end of an archive for linkers to not get confused.
+ entries: Vec<(Vec<u8>, ArchiveEntry)>,
+}
+
+impl<'a> ArchiveBuilder<'a> for ArArchiveBuilder<'a> {
+ fn add_file(&mut self, file: &Path) {
+ self.entries.push((
+ file.file_name().unwrap().to_str().unwrap().to_string().into_bytes(),
+ ArchiveEntry::File(file.to_owned()),
+ ));
+ }
+
+ fn add_archive(
+ &mut self,
+ archive_path: &Path,
+ mut skip: Box<dyn FnMut(&str) -> bool + 'static>,
+ ) -> std::io::Result<()> {
+ let read_cache = ReadCache::new(std::fs::File::open(&archive_path)?);
+ let archive = ArchiveFile::parse(&read_cache).unwrap();
+ let archive_index = self.src_archives.len();
+
+ for entry in archive.members() {
+ let entry = entry.map_err(|err| io::Error::new(io::ErrorKind::InvalidData, err))?;
+ let file_name = String::from_utf8(entry.name().to_vec())
+ .map_err(|err| io::Error::new(io::ErrorKind::InvalidData, err))?;
+ if !skip(&file_name) {
+ self.entries.push((
+ file_name.into_bytes(),
+ ArchiveEntry::FromArchive { archive_index, file_range: entry.file_range() },
+ ));
+ }
+ }
+
+ self.src_archives.push(read_cache.into_inner());
+ Ok(())
+ }
+
+ fn build(mut self: Box<Self>, output: &Path) -> bool {
+ enum BuilderKind {
+ Bsd(ar::Builder<File>),
+ Gnu(ar::GnuBuilder<File>),
+ }
+
+ let sess = self.sess;
+
+ let mut symbol_table = BTreeMap::new();
+
+ let mut entries = Vec::new();
+
+ for (mut entry_name, entry) in self.entries {
+ // FIXME only read the symbol table of the object files to avoid having to keep all
+ // object files in memory at once, or read them twice.
+ let data = match entry {
+ ArchiveEntry::FromArchive { archive_index, file_range } => {
+ // FIXME read symbols from symtab
+ let src_read_cache = &mut self.src_archives[archive_index];
+
+ src_read_cache.seek(io::SeekFrom::Start(file_range.0)).unwrap();
+ let mut data = std::vec::from_elem(0, usize::try_from(file_range.1).unwrap());
+ src_read_cache.read_exact(&mut data).unwrap();
+
+ data
+ }
+ ArchiveEntry::File(file) => std::fs::read(file).unwrap_or_else(|err| {
+ sess.fatal(&format!(
+ "error while reading object file during archive building: {}",
+ err
+ ));
+ }),
+ };
+
+ if !self.no_builtin_ranlib {
+ if symbol_table.contains_key(&entry_name) {
+ // The ar crate can't handle creating a symbol table in case of multiple archive
+ // members with the same name. Work around this by prepending a number until we
+ // get a unique name.
+ for i in 1.. {
+ let new_name = format!("{}_", i)
+ .into_bytes()
+ .into_iter()
+ .chain(entry_name.iter().copied())
+ .collect::<Vec<_>>();
+ if !symbol_table.contains_key(&new_name) {
+ entry_name = new_name;
+ break;
+ }
+ }
+ }
+
+ match object::File::parse(&*data) {
+ Ok(object) => {
+ symbol_table.insert(
+ entry_name.to_vec(),
+ object
+ .symbols()
+ .filter_map(|symbol| {
+ if symbol.is_undefined() || symbol.is_local() {
+ None
+ } else {
+ symbol.name().map(|name| name.as_bytes().to_vec()).ok()
+ }
+ })
+ .collect::<Vec<_>>(),
+ );
+ }
+ Err(err) => {
+ let err = err.to_string();
+ if err == "Unknown file magic" {
+ // Not an object file; skip it.
++ } else if object::read::archive::ArchiveFile::parse(&*data).is_ok() {
++ // Nested archive file; skip it.
+ } else {
+ sess.fatal(&format!(
+ "error parsing `{}` during archive creation: {}",
+ String::from_utf8_lossy(&entry_name),
+ err
+ ));
+ }
+ }
+ }
+ }
+
+ entries.push((entry_name, data));
+ }
+
+ let mut builder = if self.use_gnu_style_archive {
+ BuilderKind::Gnu(
+ ar::GnuBuilder::new(
+ File::create(output).unwrap_or_else(|err| {
+ sess.fatal(&format!(
+ "error opening destination during archive building: {}",
+ err
+ ));
+ }),
+ entries.iter().map(|(name, _)| name.clone()).collect(),
+ ar::GnuSymbolTableFormat::Size32,
+ symbol_table,
+ )
+ .unwrap(),
+ )
+ } else {
+ BuilderKind::Bsd(
+ ar::Builder::new(
+ File::create(output).unwrap_or_else(|err| {
+ sess.fatal(&format!(
+ "error opening destination during archive building: {}",
+ err
+ ));
+ }),
+ symbol_table,
+ )
+ .unwrap(),
+ )
+ };
+
+ let any_members = !entries.is_empty();
+
+ // Add all files
+ for (entry_name, data) in entries.into_iter() {
+ let header = ar::Header::new(entry_name, data.len() as u64);
+ match builder {
+ BuilderKind::Bsd(ref mut builder) => builder.append(&header, &mut &*data).unwrap(),
+ BuilderKind::Gnu(ref mut builder) => builder.append(&header, &mut &*data).unwrap(),
+ }
+ }
+
+ // Finalize archive
+ std::mem::drop(builder);
+
+ if self.no_builtin_ranlib {
+ let ranlib = crate::toolchain::get_toolchain_binary(self.sess, "ranlib");
+
+ // Run ranlib to be able to link the archive
+ let status = std::process::Command::new(ranlib)
+ .arg(output)
+ .status()
+ .expect("Couldn't run ranlib");
+
+ if !status.success() {
+ self.sess.fatal(&format!("Ranlib exited with code {:?}", status.code()));
+ }
+ }
+
+ any_members
+ }
+}
--- /dev/null
- func.name = ExternalName::user(0, func_id.as_u32());
+//! Codegen of a single function
+
+use rustc_ast::InlineAsmOptions;
+use rustc_index::vec::IndexVec;
+use rustc_middle::ty::adjustment::PointerCast;
+use rustc_middle::ty::layout::FnAbiOf;
+use rustc_middle::ty::print::with_no_trimmed_paths;
+
++use cranelift_codegen::ir::UserFuncName;
++
+use crate::constant::ConstantCx;
+use crate::debuginfo::FunctionDebugContext;
+use crate::prelude::*;
+use crate::pretty_clif::CommentWriter;
+
+pub(crate) struct CodegenedFunction {
+ symbol_name: String,
+ func_id: FuncId,
+ func: Function,
+ clif_comments: CommentWriter,
+ func_debug_cx: Option<FunctionDebugContext>,
+}
+
+#[cfg_attr(not(feature = "jit"), allow(dead_code))]
+pub(crate) fn codegen_and_compile_fn<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ cx: &mut crate::CodegenCx,
+ cached_context: &mut Context,
+ module: &mut dyn Module,
+ instance: Instance<'tcx>,
+) {
+ let _inst_guard =
+ crate::PrintOnPanic(|| format!("{:?} {}", instance, tcx.symbol_name(instance).name));
+
+ let cached_func = std::mem::replace(&mut cached_context.func, Function::new());
+ let codegened_func = codegen_fn(tcx, cx, cached_func, module, instance);
+
+ compile_fn(cx, cached_context, module, codegened_func);
+}
+
+pub(crate) fn codegen_fn<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ cx: &mut crate::CodegenCx,
+ cached_func: Function,
+ module: &mut dyn Module,
+ instance: Instance<'tcx>,
+) -> CodegenedFunction {
+ debug_assert!(!instance.substs.needs_infer());
+
+ let mir = tcx.instance_mir(instance.def);
+ let _mir_guard = crate::PrintOnPanic(|| {
+ let mut buf = Vec::new();
+ with_no_trimmed_paths!({
+ rustc_middle::mir::pretty::write_mir_fn(tcx, mir, &mut |_, _| Ok(()), &mut buf)
+ .unwrap();
+ });
+ String::from_utf8_lossy(&buf).into_owned()
+ });
+
+ // Declare function
+ let symbol_name = tcx.symbol_name(instance).name.to_string();
+ let sig = get_function_sig(tcx, module.isa().triple(), instance);
+ let func_id = module.declare_function(&symbol_name, Linkage::Local, &sig).unwrap();
+
+ // Make the FunctionBuilder
+ let mut func_ctx = FunctionBuilderContext::new();
+ let mut func = cached_func;
+ func.clear();
- Rvalue::Cast(CastKind::DynStar, _, _) => {
- // FIXME(dyn-star)
- unimplemented!()
++ func.name = UserFuncName::user(0, func_id.as_u32());
+ func.signature = sig;
+ func.collect_debug_info();
+
+ let mut bcx = FunctionBuilder::new(&mut func, &mut func_ctx);
+
+ // Predefine blocks
+ let start_block = bcx.create_block();
+ let block_map: IndexVec<BasicBlock, Block> =
+ (0..mir.basic_blocks.len()).map(|_| bcx.create_block()).collect();
+
+ // Make FunctionCx
+ let target_config = module.target_config();
+ let pointer_type = target_config.pointer_type();
+ let clif_comments = crate::pretty_clif::CommentWriter::new(tcx, instance);
+
+ let func_debug_cx = if let Some(debug_context) = &mut cx.debug_context {
+ Some(debug_context.define_function(tcx, &symbol_name, mir.span))
+ } else {
+ None
+ };
+
+ let mut fx = FunctionCx {
+ cx,
+ module,
+ tcx,
+ target_config,
+ pointer_type,
+ constants_cx: ConstantCx::new(),
+ func_debug_cx,
+
+ instance,
+ symbol_name,
+ mir,
+ fn_abi: Some(RevealAllLayoutCx(tcx).fn_abi_of_instance(instance, ty::List::empty())),
+
+ bcx,
+ block_map,
+ local_map: IndexVec::with_capacity(mir.local_decls.len()),
+ caller_location: None, // set by `codegen_fn_prelude`
+
+ clif_comments,
+ last_source_file: None,
+ next_ssa_var: 0,
+ };
+
+ tcx.sess.time("codegen clif ir", || codegen_fn_body(&mut fx, start_block));
+
+ // Recover all necessary data from fx, before accessing func will prevent future access to it.
+ let symbol_name = fx.symbol_name;
+ let clif_comments = fx.clif_comments;
+ let func_debug_cx = fx.func_debug_cx;
+
+ fx.constants_cx.finalize(fx.tcx, &mut *fx.module);
+
+ if cx.should_write_ir {
+ crate::pretty_clif::write_clif_file(
+ tcx.output_filenames(()),
+ &symbol_name,
+ "unopt",
+ module.isa(),
+ &func,
+ &clif_comments,
+ );
+ }
+
+ // Verify function
+ verify_func(tcx, &clif_comments, &func);
+
+ CodegenedFunction { symbol_name, func_id, func, clif_comments, func_debug_cx }
+}
+
+pub(crate) fn compile_fn(
+ cx: &mut crate::CodegenCx,
+ cached_context: &mut Context,
+ module: &mut dyn Module,
+ codegened_func: CodegenedFunction,
+) {
+ let clif_comments = codegened_func.clif_comments;
+
+ // Store function in context
+ let context = cached_context;
+ context.clear();
+ context.func = codegened_func.func;
+
+ // If the return block is not reachable, then the SSA builder may have inserted an `iconst.i128`
+ // instruction, which doesn't have an encoding.
+ context.compute_cfg();
+ context.compute_domtree();
+ context.eliminate_unreachable_code(module.isa()).unwrap();
+ context.dce(module.isa()).unwrap();
+ // Some Cranelift optimizations expect the domtree to not yet be computed and as such don't
+ // invalidate it when it would change.
+ context.domtree.clear();
+
+ #[cfg(any())] // This is never true
+ let _clif_guard = {
+ use std::fmt::Write;
+
+ let func_clone = context.func.clone();
+ let clif_comments_clone = clif_comments.clone();
+ let mut clif = String::new();
+ for flag in module.isa().flags().iter() {
+ writeln!(clif, "set {}", flag).unwrap();
+ }
+ write!(clif, "target {}", module.isa().triple().architecture.to_string()).unwrap();
+ for isa_flag in module.isa().isa_flags().iter() {
+ write!(clif, " {}", isa_flag).unwrap();
+ }
+ writeln!(clif, "\n").unwrap();
+ crate::PrintOnPanic(move || {
+ let mut clif = clif.clone();
+ ::cranelift_codegen::write::decorate_function(
+ &mut &clif_comments_clone,
+ &mut clif,
+ &func_clone,
+ )
+ .unwrap();
+ clif
+ })
+ };
+
+ // Define function
+ cx.profiler.verbose_generic_activity("define function").run(|| {
+ context.want_disasm = cx.should_write_ir;
+ module.define_function(codegened_func.func_id, context).unwrap();
+ });
+
+ if cx.should_write_ir {
+ // Write optimized function to file for debugging
+ crate::pretty_clif::write_clif_file(
+ &cx.output_filenames,
+ &codegened_func.symbol_name,
+ "opt",
+ module.isa(),
+ &context.func,
+ &clif_comments,
+ );
+
+ if let Some(disasm) = &context.compiled_code().unwrap().disasm {
+ crate::pretty_clif::write_ir_file(
+ &cx.output_filenames,
+ &format!("{}.vcode", codegened_func.symbol_name),
+ |file| file.write_all(disasm.as_bytes()),
+ )
+ }
+ }
+
+ // Define debuginfo for function
+ let isa = module.isa();
+ let debug_context = &mut cx.debug_context;
+ let unwind_context = &mut cx.unwind_context;
+ cx.profiler.verbose_generic_activity("generate debug info").run(|| {
+ if let Some(debug_context) = debug_context {
+ codegened_func.func_debug_cx.unwrap().finalize(
+ debug_context,
+ codegened_func.func_id,
+ context,
+ );
+ }
+ unwind_context.add_function(codegened_func.func_id, &context, isa);
+ });
+}
+
+pub(crate) fn verify_func(
+ tcx: TyCtxt<'_>,
+ writer: &crate::pretty_clif::CommentWriter,
+ func: &Function,
+) {
+ tcx.sess.time("verify clif ir", || {
+ let flags = cranelift_codegen::settings::Flags::new(cranelift_codegen::settings::builder());
+ match cranelift_codegen::verify_function(&func, &flags) {
+ Ok(_) => {}
+ Err(err) => {
+ tcx.sess.err(&format!("{:?}", err));
+ let pretty_error = cranelift_codegen::print_errors::pretty_verifier_error(
+ &func,
+ Some(Box::new(writer)),
+ err,
+ );
+ tcx.sess.fatal(&format!("cranelift verify error:\n{}", pretty_error));
+ }
+ }
+ });
+}
+
+fn codegen_fn_body(fx: &mut FunctionCx<'_, '_, '_>, start_block: Block) {
+ if !crate::constant::check_constants(fx) {
+ fx.bcx.append_block_params_for_function_params(fx.block_map[START_BLOCK]);
+ fx.bcx.switch_to_block(fx.block_map[START_BLOCK]);
+ // compilation should have been aborted
+ fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
+ return;
+ }
+
+ let arg_uninhabited = fx
+ .mir
+ .args_iter()
+ .any(|arg| fx.layout_of(fx.monomorphize(fx.mir.local_decls[arg].ty)).abi.is_uninhabited());
+ if arg_uninhabited {
+ fx.bcx.append_block_params_for_function_params(fx.block_map[START_BLOCK]);
+ fx.bcx.switch_to_block(fx.block_map[START_BLOCK]);
+ fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
+ return;
+ }
+ fx.tcx.sess.time("codegen prelude", || crate::abi::codegen_fn_prelude(fx, start_block));
+
+ for (bb, bb_data) in fx.mir.basic_blocks.iter_enumerated() {
+ let block = fx.get_block(bb);
+ fx.bcx.switch_to_block(block);
+
+ if bb_data.is_cleanup {
+ // Unwinding after panicking is not supported
+ continue;
+
+ // FIXME Once unwinding is supported and Cranelift supports marking blocks as cold, do
+ // so for cleanup blocks.
+ }
+
+ fx.bcx.ins().nop();
+ for stmt in &bb_data.statements {
+ fx.set_debug_loc(stmt.source_info);
+ codegen_stmt(fx, block, stmt);
+ }
+
+ if fx.clif_comments.enabled() {
+ let mut terminator_head = "\n".to_string();
+ with_no_trimmed_paths!({
+ bb_data.terminator().kind.fmt_head(&mut terminator_head).unwrap();
+ });
+ let inst = fx.bcx.func.layout.last_inst(block).unwrap();
+ fx.add_comment(inst, terminator_head);
+ }
+
+ let source_info = bb_data.terminator().source_info;
+ fx.set_debug_loc(source_info);
+
+ match &bb_data.terminator().kind {
+ TerminatorKind::Goto { target } => {
+ if let TerminatorKind::Return = fx.mir[*target].terminator().kind {
+ let mut can_immediately_return = true;
+ for stmt in &fx.mir[*target].statements {
+ if let StatementKind::StorageDead(_) = stmt.kind {
+ } else {
+ // FIXME Can sometimes happen, see rust-lang/rust#70531
+ can_immediately_return = false;
+ break;
+ }
+ }
+
+ if can_immediately_return {
+ crate::abi::codegen_return(fx);
+ continue;
+ }
+ }
+
+ let block = fx.get_block(*target);
+ fx.bcx.ins().jump(block, &[]);
+ }
+ TerminatorKind::Return => {
+ crate::abi::codegen_return(fx);
+ }
+ TerminatorKind::Assert { cond, expected, msg, target, cleanup: _ } => {
+ if !fx.tcx.sess.overflow_checks() {
+ if let mir::AssertKind::OverflowNeg(_) = *msg {
+ let target = fx.get_block(*target);
+ fx.bcx.ins().jump(target, &[]);
+ continue;
+ }
+ }
+ let cond = codegen_operand(fx, cond).load_scalar(fx);
+
+ let target = fx.get_block(*target);
+ let failure = fx.bcx.create_block();
+ fx.bcx.set_cold_block(failure);
+
+ if *expected {
+ fx.bcx.ins().brz(cond, failure, &[]);
+ } else {
+ fx.bcx.ins().brnz(cond, failure, &[]);
+ };
+ fx.bcx.ins().jump(target, &[]);
+
+ fx.bcx.switch_to_block(failure);
+ fx.bcx.ins().nop();
+
+ match msg {
+ AssertKind::BoundsCheck { ref len, ref index } => {
+ let len = codegen_operand(fx, len).load_scalar(fx);
+ let index = codegen_operand(fx, index).load_scalar(fx);
+ let location = fx.get_caller_location(source_info).load_scalar(fx);
+
+ codegen_panic_inner(
+ fx,
+ rustc_hir::LangItem::PanicBoundsCheck,
+ &[index, len, location],
+ source_info.span,
+ );
+ }
+ _ => {
+ let msg_str = msg.description();
+ codegen_panic(fx, msg_str, source_info);
+ }
+ }
+ }
+
+ TerminatorKind::SwitchInt { discr, switch_ty, targets } => {
+ let discr = codegen_operand(fx, discr).load_scalar(fx);
+
+ let use_bool_opt = switch_ty.kind() == fx.tcx.types.bool.kind()
+ || (targets.iter().count() == 1 && targets.iter().next().unwrap().0 == 0);
+ if use_bool_opt {
+ assert_eq!(targets.iter().count(), 1);
+ let (then_value, then_block) = targets.iter().next().unwrap();
+ let then_block = fx.get_block(then_block);
+ let else_block = fx.get_block(targets.otherwise());
+ let test_zero = match then_value {
+ 0 => true,
+ 1 => false,
+ _ => unreachable!("{:?}", targets),
+ };
+
+ let discr = crate::optimize::peephole::maybe_unwrap_bint(&mut fx.bcx, discr);
+ let (discr, is_inverted) =
+ crate::optimize::peephole::maybe_unwrap_bool_not(&mut fx.bcx, discr);
+ let test_zero = if is_inverted { !test_zero } else { test_zero };
+ let discr = crate::optimize::peephole::maybe_unwrap_bint(&mut fx.bcx, discr);
+ if let Some(taken) = crate::optimize::peephole::maybe_known_branch_taken(
+ &fx.bcx, discr, test_zero,
+ ) {
+ if taken {
+ fx.bcx.ins().jump(then_block, &[]);
+ } else {
+ fx.bcx.ins().jump(else_block, &[]);
+ }
+ } else {
+ if test_zero {
+ fx.bcx.ins().brz(discr, then_block, &[]);
+ fx.bcx.ins().jump(else_block, &[]);
+ } else {
+ fx.bcx.ins().brnz(discr, then_block, &[]);
+ fx.bcx.ins().jump(else_block, &[]);
+ }
+ }
+ } else {
+ let mut switch = ::cranelift_frontend::Switch::new();
+ for (value, block) in targets.iter() {
+ let block = fx.get_block(block);
+ switch.set_entry(value, block);
+ }
+ let otherwise_block = fx.get_block(targets.otherwise());
+ switch.emit(&mut fx.bcx, discr, otherwise_block);
+ }
+ }
+ TerminatorKind::Call {
+ func,
+ args,
+ destination,
+ target,
+ fn_span,
+ cleanup: _,
+ from_hir_call: _,
+ } => {
+ fx.tcx.sess.time("codegen call", || {
+ crate::abi::codegen_terminator_call(
+ fx,
+ mir::SourceInfo { span: *fn_span, ..source_info },
+ func,
+ args,
+ *destination,
+ *target,
+ )
+ });
+ }
+ TerminatorKind::InlineAsm {
+ template,
+ operands,
+ options,
+ destination,
+ line_spans: _,
+ cleanup: _,
+ } => {
+ if options.contains(InlineAsmOptions::MAY_UNWIND) {
+ fx.tcx.sess.span_fatal(
+ source_info.span,
+ "cranelift doesn't support unwinding from inline assembly.",
+ );
+ }
+
+ crate::inline_asm::codegen_inline_asm(
+ fx,
+ source_info.span,
+ template,
+ operands,
+ *options,
+ *destination,
+ );
+ }
+ TerminatorKind::Resume | TerminatorKind::Abort => {
+ // FIXME implement unwinding
+ fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
+ }
+ TerminatorKind::Unreachable => {
+ fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
+ }
+ TerminatorKind::Yield { .. }
+ | TerminatorKind::FalseEdge { .. }
+ | TerminatorKind::FalseUnwind { .. }
+ | TerminatorKind::DropAndReplace { .. }
+ | TerminatorKind::GeneratorDrop => {
+ bug!("shouldn't exist at codegen {:?}", bb_data.terminator());
+ }
+ TerminatorKind::Drop { place, target, unwind: _ } => {
+ let drop_place = codegen_place(fx, *place);
+ crate::abi::codegen_drop(fx, source_info, drop_place);
+
+ let target_block = fx.get_block(*target);
+ fx.bcx.ins().jump(target_block, &[]);
+ }
+ };
+ }
+
+ fx.bcx.seal_all_blocks();
+ fx.bcx.finalize();
+}
+
+fn codegen_stmt<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ #[allow(unused_variables)] cur_block: Block,
+ stmt: &Statement<'tcx>,
+) {
+ let _print_guard = crate::PrintOnPanic(|| format!("stmt {:?}", stmt));
+
+ fx.set_debug_loc(stmt.source_info);
+
+ #[cfg(any())] // This is never true
+ match &stmt.kind {
+ StatementKind::StorageLive(..) | StatementKind::StorageDead(..) => {} // Those are not very useful
+ _ => {
+ if fx.clif_comments.enabled() {
+ let inst = fx.bcx.func.layout.last_inst(cur_block).unwrap();
+ fx.add_comment(inst, format!("{:?}", stmt));
+ }
+ }
+ }
+
+ match &stmt.kind {
+ StatementKind::SetDiscriminant { place, variant_index } => {
+ let place = codegen_place(fx, **place);
+ crate::discriminant::codegen_set_discriminant(fx, place, *variant_index);
+ }
+ StatementKind::Assign(to_place_and_rval) => {
+ let lval = codegen_place(fx, to_place_and_rval.0);
+ let dest_layout = lval.layout();
+ match to_place_and_rval.1 {
+ Rvalue::Use(ref operand) => {
+ let val = codegen_operand(fx, operand);
+ lval.write_cvalue(fx, val);
+ }
+ Rvalue::CopyForDeref(place) => {
+ let cplace = codegen_place(fx, place);
+ let val = cplace.to_cvalue(fx);
+ lval.write_cvalue(fx, val)
+ }
+ Rvalue::Ref(_, _, place) | Rvalue::AddressOf(_, place) => {
+ let place = codegen_place(fx, place);
+ let ref_ = place.place_ref(fx, lval.layout());
+ lval.write_cvalue(fx, ref_);
+ }
+ Rvalue::ThreadLocalRef(def_id) => {
+ let val = crate::constant::codegen_tls_ref(fx, def_id, lval.layout());
+ lval.write_cvalue(fx, val);
+ }
+ Rvalue::BinaryOp(bin_op, ref lhs_rhs) => {
+ let lhs = codegen_operand(fx, &lhs_rhs.0);
+ let rhs = codegen_operand(fx, &lhs_rhs.1);
+
+ let res = crate::num::codegen_binop(fx, bin_op, lhs, rhs);
+ lval.write_cvalue(fx, res);
+ }
+ Rvalue::CheckedBinaryOp(bin_op, ref lhs_rhs) => {
+ let lhs = codegen_operand(fx, &lhs_rhs.0);
+ let rhs = codegen_operand(fx, &lhs_rhs.1);
+
+ let res = if !fx.tcx.sess.overflow_checks() {
+ let val =
+ crate::num::codegen_int_binop(fx, bin_op, lhs, rhs).load_scalar(fx);
+ let is_overflow = fx.bcx.ins().iconst(types::I8, 0);
+ CValue::by_val_pair(val, is_overflow, lval.layout())
+ } else {
+ crate::num::codegen_checked_int_binop(fx, bin_op, lhs, rhs)
+ };
+
+ lval.write_cvalue(fx, res);
+ }
+ Rvalue::UnaryOp(un_op, ref operand) => {
+ let operand = codegen_operand(fx, operand);
+ let layout = operand.layout();
+ let val = operand.load_scalar(fx);
+ let res = match un_op {
+ UnOp::Not => match layout.ty.kind() {
+ ty::Bool => {
+ let res = fx.bcx.ins().icmp_imm(IntCC::Equal, val, 0);
+ CValue::by_val(fx.bcx.ins().bint(types::I8, res), layout)
+ }
+ ty::Uint(_) | ty::Int(_) => {
+ CValue::by_val(fx.bcx.ins().bnot(val), layout)
+ }
+ _ => unreachable!("un op Not for {:?}", layout.ty),
+ },
+ UnOp::Neg => match layout.ty.kind() {
+ ty::Int(IntTy::I128) => {
+ // FIXME remove this case once ineg.i128 works
+ let zero =
+ CValue::const_val(fx, layout, ty::ScalarInt::null(layout.size));
+ crate::num::codegen_int_binop(fx, BinOp::Sub, zero, operand)
+ }
+ ty::Int(_) => CValue::by_val(fx.bcx.ins().ineg(val), layout),
+ ty::Float(_) => CValue::by_val(fx.bcx.ins().fneg(val), layout),
+ _ => unreachable!("un op Neg for {:?}", layout.ty),
+ },
+ };
+ lval.write_cvalue(fx, res);
+ }
+ Rvalue::Cast(
+ CastKind::Pointer(PointerCast::ReifyFnPointer),
+ ref operand,
+ to_ty,
+ ) => {
+ let from_ty = fx.monomorphize(operand.ty(&fx.mir.local_decls, fx.tcx));
+ let to_layout = fx.layout_of(fx.monomorphize(to_ty));
+ match *from_ty.kind() {
+ ty::FnDef(def_id, substs) => {
+ let func_ref = fx.get_function_ref(
+ Instance::resolve_for_fn_ptr(
+ fx.tcx,
+ ParamEnv::reveal_all(),
+ def_id,
+ substs,
+ )
+ .unwrap()
+ .polymorphize(fx.tcx),
+ );
+ let func_addr = fx.bcx.ins().func_addr(fx.pointer_type, func_ref);
+ lval.write_cvalue(fx, CValue::by_val(func_addr, to_layout));
+ }
+ _ => bug!("Trying to ReifyFnPointer on non FnDef {:?}", from_ty),
+ }
+ }
+ Rvalue::Cast(
+ CastKind::Pointer(PointerCast::UnsafeFnPointer),
+ ref operand,
+ to_ty,
+ )
+ | Rvalue::Cast(
+ CastKind::Pointer(PointerCast::MutToConstPointer),
+ ref operand,
+ to_ty,
+ )
+ | Rvalue::Cast(
+ CastKind::Pointer(PointerCast::ArrayToPointer),
+ ref operand,
+ to_ty,
+ ) => {
+ let to_layout = fx.layout_of(fx.monomorphize(to_ty));
+ let operand = codegen_operand(fx, operand);
+ lval.write_cvalue(fx, operand.cast_pointer_to(to_layout));
+ }
+ Rvalue::Cast(
+ CastKind::IntToInt
+ | CastKind::FloatToFloat
+ | CastKind::FloatToInt
+ | CastKind::IntToFloat
+ | CastKind::FnPtrToPtr
+ | CastKind::PtrToPtr
+ | CastKind::PointerExposeAddress
+ | CastKind::PointerFromExposedAddress,
+ ref operand,
+ to_ty,
+ ) => {
+ let operand = codegen_operand(fx, operand);
+ let from_ty = operand.layout().ty;
+ let to_ty = fx.monomorphize(to_ty);
+
+ fn is_fat_ptr<'tcx>(fx: &FunctionCx<'_, '_, 'tcx>, ty: Ty<'tcx>) -> bool {
+ ty.builtin_deref(true)
+ .map(|ty::TypeAndMut { ty: pointee_ty, mutbl: _ }| {
+ has_ptr_meta(fx.tcx, pointee_ty)
+ })
+ .unwrap_or(false)
+ }
+
+ if is_fat_ptr(fx, from_ty) {
+ if is_fat_ptr(fx, to_ty) {
+ // fat-ptr -> fat-ptr
+ lval.write_cvalue(fx, operand.cast_pointer_to(dest_layout));
+ } else {
+ // fat-ptr -> thin-ptr
+ let (ptr, _extra) = operand.load_scalar_pair(fx);
+ lval.write_cvalue(fx, CValue::by_val(ptr, dest_layout))
+ }
+ } else {
+ let to_clif_ty = fx.clif_type(to_ty).unwrap();
+ let from = operand.load_scalar(fx);
+
+ let res = clif_int_or_float_cast(
+ fx,
+ from,
+ type_sign(from_ty),
+ to_clif_ty,
+ type_sign(to_ty),
+ );
+ lval.write_cvalue(fx, CValue::by_val(res, dest_layout));
+ }
+ }
+ Rvalue::Cast(
+ CastKind::Pointer(PointerCast::ClosureFnPointer(_)),
+ ref operand,
+ _to_ty,
+ ) => {
+ let operand = codegen_operand(fx, operand);
+ match *operand.layout().ty.kind() {
+ ty::Closure(def_id, substs) => {
+ let instance = Instance::resolve_closure(
+ fx.tcx,
+ def_id,
+ substs,
+ ty::ClosureKind::FnOnce,
+ )
+ .expect("failed to normalize and resolve closure during codegen")
+ .polymorphize(fx.tcx);
+ let func_ref = fx.get_function_ref(instance);
+ let func_addr = fx.bcx.ins().func_addr(fx.pointer_type, func_ref);
+ lval.write_cvalue(fx, CValue::by_val(func_addr, lval.layout()));
+ }
+ _ => bug!("{} cannot be cast to a fn ptr", operand.layout().ty),
+ }
+ }
+ Rvalue::Cast(CastKind::Pointer(PointerCast::Unsize), ref operand, _to_ty) => {
+ let operand = codegen_operand(fx, operand);
+ operand.unsize_value(fx, lval);
+ }
- Operand::Constant(const_) => crate::constant::codegen_constant(fx, const_),
++ Rvalue::Cast(CastKind::DynStar, ref operand, _) => {
++ let operand = codegen_operand(fx, operand);
++ operand.coerce_dyn_star(fx, lval);
+ }
+ Rvalue::Discriminant(place) => {
+ let place = codegen_place(fx, place);
+ let value = place.to_cvalue(fx);
+ crate::discriminant::codegen_get_discriminant(fx, lval, value, dest_layout);
+ }
+ Rvalue::Repeat(ref operand, times) => {
+ let operand = codegen_operand(fx, operand);
+ let times = fx
+ .monomorphize(times)
+ .eval(fx.tcx, ParamEnv::reveal_all())
+ .kind()
+ .try_to_bits(fx.tcx.data_layout.pointer_size)
+ .unwrap();
+ if operand.layout().size.bytes() == 0 {
+ // Do nothing for ZST's
+ } else if fx.clif_type(operand.layout().ty) == Some(types::I8) {
+ let times = fx.bcx.ins().iconst(fx.pointer_type, times as i64);
+ // FIXME use emit_small_memset where possible
+ let addr = lval.to_ptr().get_addr(fx);
+ let val = operand.load_scalar(fx);
+ fx.bcx.call_memset(fx.target_config, addr, val, times);
+ } else {
+ let loop_block = fx.bcx.create_block();
+ let loop_block2 = fx.bcx.create_block();
+ let done_block = fx.bcx.create_block();
+ let index = fx.bcx.append_block_param(loop_block, fx.pointer_type);
+ let zero = fx.bcx.ins().iconst(fx.pointer_type, 0);
+ fx.bcx.ins().jump(loop_block, &[zero]);
+
+ fx.bcx.switch_to_block(loop_block);
+ let done = fx.bcx.ins().icmp_imm(IntCC::Equal, index, times as i64);
+ fx.bcx.ins().brnz(done, done_block, &[]);
+ fx.bcx.ins().jump(loop_block2, &[]);
+
+ fx.bcx.switch_to_block(loop_block2);
+ let to = lval.place_index(fx, index);
+ to.write_cvalue(fx, operand);
+ let index = fx.bcx.ins().iadd_imm(index, 1);
+ fx.bcx.ins().jump(loop_block, &[index]);
+
+ fx.bcx.switch_to_block(done_block);
+ fx.bcx.ins().nop();
+ }
+ }
+ Rvalue::Len(place) => {
+ let place = codegen_place(fx, place);
+ let usize_layout = fx.layout_of(fx.tcx.types.usize);
+ let len = codegen_array_len(fx, place);
+ lval.write_cvalue(fx, CValue::by_val(len, usize_layout));
+ }
+ Rvalue::ShallowInitBox(ref operand, content_ty) => {
+ let content_ty = fx.monomorphize(content_ty);
+ let box_layout = fx.layout_of(fx.tcx.mk_box(content_ty));
+ let operand = codegen_operand(fx, operand);
+ let operand = operand.load_scalar(fx);
+ lval.write_cvalue(fx, CValue::by_val(operand, box_layout));
+ }
+ Rvalue::NullaryOp(null_op, ty) => {
+ assert!(
+ lval.layout()
+ .ty
+ .is_sized(fx.tcx.at(stmt.source_info.span), ParamEnv::reveal_all())
+ );
+ let layout = fx.layout_of(fx.monomorphize(ty));
+ let val = match null_op {
+ NullOp::SizeOf => layout.size.bytes(),
+ NullOp::AlignOf => layout.align.abi.bytes(),
+ };
+ let val = CValue::const_val(fx, fx.layout_of(fx.tcx.types.usize), val.into());
+ lval.write_cvalue(fx, val);
+ }
+ Rvalue::Aggregate(ref kind, ref operands) => match kind.as_ref() {
+ AggregateKind::Array(_ty) => {
+ for (i, operand) in operands.iter().enumerate() {
+ let operand = codegen_operand(fx, operand);
+ let index = fx.bcx.ins().iconst(fx.pointer_type, i as i64);
+ let to = lval.place_index(fx, index);
+ to.write_cvalue(fx, operand);
+ }
+ }
+ _ => unreachable!("shouldn't exist at codegen {:?}", to_place_and_rval.1),
+ },
+ }
+ }
+ StatementKind::StorageLive(_)
+ | StatementKind::StorageDead(_)
+ | StatementKind::Deinit(_)
+ | StatementKind::Nop
+ | StatementKind::FakeRead(..)
+ | StatementKind::Retag { .. }
+ | StatementKind::AscribeUserType(..) => {}
+
+ StatementKind::Coverage { .. } => fx.tcx.sess.fatal("-Zcoverage is unimplemented"),
+ StatementKind::Intrinsic(ref intrinsic) => match &**intrinsic {
+ // We ignore `assume` intrinsics, they are only useful for optimizations
+ NonDivergingIntrinsic::Assume(_) => {}
+ NonDivergingIntrinsic::CopyNonOverlapping(mir::CopyNonOverlapping {
+ src,
+ dst,
+ count,
+ }) => {
+ let dst = codegen_operand(fx, dst);
+ let pointee = dst
+ .layout()
+ .pointee_info_at(fx, rustc_target::abi::Size::ZERO)
+ .expect("Expected pointer");
+ let dst = dst.load_scalar(fx);
+ let src = codegen_operand(fx, src).load_scalar(fx);
+ let count = codegen_operand(fx, count).load_scalar(fx);
+ let elem_size: u64 = pointee.size.bytes();
+ let bytes = if elem_size != 1 {
+ fx.bcx.ins().imul_imm(count, elem_size as i64)
+ } else {
+ count
+ };
+ fx.bcx.call_memcpy(fx.target_config, dst, src, bytes);
+ }
+ },
+ }
+}
+
+fn codegen_array_len<'tcx>(fx: &mut FunctionCx<'_, '_, 'tcx>, place: CPlace<'tcx>) -> Value {
+ match *place.layout().ty.kind() {
+ ty::Array(_elem_ty, len) => {
+ let len = fx.monomorphize(len).eval_usize(fx.tcx, ParamEnv::reveal_all()) as i64;
+ fx.bcx.ins().iconst(fx.pointer_type, len)
+ }
+ ty::Slice(_elem_ty) => {
+ place.to_ptr_maybe_unsized().1.expect("Length metadata for slice place")
+ }
+ _ => bug!("Rvalue::Len({:?})", place),
+ }
+}
+
+pub(crate) fn codegen_place<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ place: Place<'tcx>,
+) -> CPlace<'tcx> {
+ let mut cplace = fx.get_local_place(place.local);
+
+ for elem in place.projection {
+ match elem {
+ PlaceElem::Deref => {
+ cplace = cplace.place_deref(fx);
+ }
+ PlaceElem::OpaqueCast(ty) => cplace = cplace.place_opaque_cast(fx, ty),
+ PlaceElem::Field(field, _ty) => {
+ cplace = cplace.place_field(fx, field);
+ }
+ PlaceElem::Index(local) => {
+ let index = fx.get_local_place(local).to_cvalue(fx).load_scalar(fx);
+ cplace = cplace.place_index(fx, index);
+ }
+ PlaceElem::ConstantIndex { offset, min_length: _, from_end } => {
+ let offset: u64 = offset;
+ let index = if !from_end {
+ fx.bcx.ins().iconst(fx.pointer_type, offset as i64)
+ } else {
+ let len = codegen_array_len(fx, cplace);
+ fx.bcx.ins().iadd_imm(len, -(offset as i64))
+ };
+ cplace = cplace.place_index(fx, index);
+ }
+ PlaceElem::Subslice { from, to, from_end } => {
+ // These indices are generated by slice patterns.
+ // slice[from:-to] in Python terms.
+
+ let from: u64 = from;
+ let to: u64 = to;
+
+ match cplace.layout().ty.kind() {
+ ty::Array(elem_ty, _len) => {
+ assert!(!from_end, "array subslices are never `from_end`");
+ let elem_layout = fx.layout_of(*elem_ty);
+ let ptr = cplace.to_ptr();
+ cplace = CPlace::for_ptr(
+ ptr.offset_i64(fx, elem_layout.size.bytes() as i64 * (from as i64)),
+ fx.layout_of(fx.tcx.mk_array(*elem_ty, to - from)),
+ );
+ }
+ ty::Slice(elem_ty) => {
+ assert!(from_end, "slice subslices should be `from_end`");
+ let elem_layout = fx.layout_of(*elem_ty);
+ let (ptr, len) = cplace.to_ptr_maybe_unsized();
+ let len = len.unwrap();
+ cplace = CPlace::for_ptr_with_extra(
+ ptr.offset_i64(fx, elem_layout.size.bytes() as i64 * (from as i64)),
+ fx.bcx.ins().iadd_imm(len, -(from as i64 + to as i64)),
+ cplace.layout(),
+ );
+ }
+ _ => unreachable!(),
+ }
+ }
+ PlaceElem::Downcast(_adt_def, variant) => {
+ cplace = cplace.downcast_variant(fx, variant);
+ }
+ }
+ }
+
+ cplace
+}
+
+pub(crate) fn codegen_operand<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ operand: &Operand<'tcx>,
+) -> CValue<'tcx> {
+ match operand {
+ Operand::Move(place) | Operand::Copy(place) => {
+ let cplace = codegen_place(fx, *place);
+ cplace.to_cvalue(fx)
+ }
++ Operand::Constant(const_) => crate::constant::codegen_constant_operand(fx, const_),
+ }
+}
+
+pub(crate) fn codegen_panic<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ msg_str: &str,
+ source_info: mir::SourceInfo,
+) {
+ let location = fx.get_caller_location(source_info).load_scalar(fx);
+
+ let msg_ptr = fx.anonymous_str(msg_str);
+ let msg_len = fx.bcx.ins().iconst(fx.pointer_type, i64::try_from(msg_str.len()).unwrap());
+ let args = [msg_ptr, msg_len, location];
+
+ codegen_panic_inner(fx, rustc_hir::LangItem::Panic, &args, source_info.span);
+}
+
+pub(crate) fn codegen_panic_inner<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ lang_item: rustc_hir::LangItem,
+ args: &[Value],
+ span: Span,
+) {
+ let def_id = fx
+ .tcx
+ .lang_items()
+ .require(lang_item)
+ .unwrap_or_else(|e| fx.tcx.sess.span_fatal(span, e.to_string()));
+
+ let instance = Instance::mono(fx.tcx, def_id).polymorphize(fx.tcx);
+ let symbol_name = fx.tcx.symbol_name(instance).name;
+
+ fx.lib_call(
+ &*symbol_name,
+ vec![
+ AbiParam::new(fx.pointer_type),
+ AbiParam::new(fx.pointer_type),
+ AbiParam::new(fx.pointer_type),
+ ],
+ vec![],
+ args,
+ );
+
+ fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
+}
--- /dev/null
- }
+use std::sync::{Arc, Condvar, Mutex};
+
+use rustc_session::Session;
+
+use jobserver::HelperThread;
+
+// FIXME don't panic when a worker thread panics
+
+pub(super) struct ConcurrencyLimiter {
+ helper_thread: Option<HelperThread>,
+ state: Arc<Mutex<state::ConcurrencyLimiterState>>,
+ available_token_condvar: Arc<Condvar>,
++ finished: bool,
+}
+
+impl ConcurrencyLimiter {
+ pub(super) fn new(sess: &Session, pending_jobs: usize) -> Self {
+ let state = Arc::new(Mutex::new(state::ConcurrencyLimiterState::new(pending_jobs)));
+ let available_token_condvar = Arc::new(Condvar::new());
+
+ let state_helper = state.clone();
+ let available_token_condvar_helper = available_token_condvar.clone();
+ let helper_thread = sess
+ .jobserver
+ .clone()
+ .into_helper_thread(move |token| {
+ let mut state = state_helper.lock().unwrap();
+ state.add_new_token(token.unwrap());
+ available_token_condvar_helper.notify_one();
+ })
+ .unwrap();
+ ConcurrencyLimiter {
+ helper_thread: Some(helper_thread),
+ state,
+ available_token_condvar: Arc::new(Condvar::new()),
++ finished: false,
+ }
+ }
+
+ pub(super) fn acquire(&mut self) -> ConcurrencyLimiterToken {
+ let mut state = self.state.lock().unwrap();
+ loop {
+ state.assert_invariants();
+
+ if state.try_start_job() {
+ return ConcurrencyLimiterToken {
+ state: self.state.clone(),
+ available_token_condvar: self.available_token_condvar.clone(),
+ };
+ }
+
+ self.helper_thread.as_mut().unwrap().request_token();
+ state = self.available_token_condvar.wait(state).unwrap();
+ }
+ }
+
+ pub(super) fn job_already_done(&mut self) {
+ let mut state = self.state.lock().unwrap();
+ state.job_already_done();
+ }
- impl Drop for ConcurrencyLimiter {
- fn drop(&mut self) {
- //
+
++ pub(crate) fn finished(mut self) {
+ self.helper_thread.take();
+
+ // Assert that all jobs have finished
+ let state = Mutex::get_mut(Arc::get_mut(&mut self.state).unwrap()).unwrap();
+ state.assert_done();
++
++ self.finished = true;
++ }
++}
++
++impl Drop for ConcurrencyLimiter {
++ fn drop(&mut self) {
++ if !self.finished && !std::thread::panicking() {
++ panic!("Forgot to call finished() on ConcurrencyLimiter");
++ }
+ }
+}
+
+#[derive(Debug)]
+pub(super) struct ConcurrencyLimiterToken {
+ state: Arc<Mutex<state::ConcurrencyLimiterState>>,
+ available_token_condvar: Arc<Condvar>,
+}
+
+impl Drop for ConcurrencyLimiterToken {
+ fn drop(&mut self) {
+ let mut state = self.state.lock().unwrap();
+ state.job_finished();
+ self.available_token_condvar.notify_one();
+ }
+}
+
+mod state {
+ use jobserver::Acquired;
+
+ #[derive(Debug)]
+ pub(super) struct ConcurrencyLimiterState {
+ pending_jobs: usize,
+ active_jobs: usize,
+
+ // None is used to represent the implicit token, Some to represent explicit tokens
+ tokens: Vec<Option<Acquired>>,
+ }
+
+ impl ConcurrencyLimiterState {
+ pub(super) fn new(pending_jobs: usize) -> Self {
+ ConcurrencyLimiterState { pending_jobs, active_jobs: 0, tokens: vec![None] }
+ }
+
+ pub(super) fn assert_invariants(&self) {
+ // There must be no excess active jobs
+ assert!(self.active_jobs <= self.pending_jobs);
+
+ // There may not be more active jobs than there are tokens
+ assert!(self.active_jobs <= self.tokens.len());
+ }
+
+ pub(super) fn assert_done(&self) {
+ assert_eq!(self.pending_jobs, 0);
+ assert_eq!(self.active_jobs, 0);
+ }
+
+ pub(super) fn add_new_token(&mut self, token: Acquired) {
+ self.tokens.push(Some(token));
+ self.drop_excess_capacity();
+ }
+
+ pub(super) fn try_start_job(&mut self) -> bool {
+ if self.active_jobs < self.tokens.len() {
+ // Using existing token
+ self.job_started();
+ return true;
+ }
+
+ false
+ }
+
+ pub(super) fn job_started(&mut self) {
+ self.assert_invariants();
+ self.active_jobs += 1;
+ self.drop_excess_capacity();
+ self.assert_invariants();
+ }
+
+ pub(super) fn job_finished(&mut self) {
+ self.assert_invariants();
+ self.pending_jobs -= 1;
+ self.active_jobs -= 1;
+ self.assert_invariants();
+ self.drop_excess_capacity();
+ self.assert_invariants();
+ }
+
+ pub(super) fn job_already_done(&mut self) {
+ self.assert_invariants();
+ self.pending_jobs -= 1;
+ self.assert_invariants();
+ self.drop_excess_capacity();
+ self.assert_invariants();
+ }
+
+ fn drop_excess_capacity(&mut self) {
+ self.assert_invariants();
+
+ // Drop all tokens that can never be used anymore
+ self.tokens.truncate(std::cmp::max(self.pending_jobs, 1));
+
+ // Keep some excess tokens to satisfy requests faster
+ const MAX_EXTRA_CAPACITY: usize = 2;
+ self.tokens.truncate(std::cmp::max(self.active_jobs + MAX_EXTRA_CAPACITY, 1));
+
+ self.assert_invariants();
+ }
+ }
+}
--- /dev/null
- use cranelift_codegen::ir::GlobalValueData;
+//! Handling of `static`s, `const`s and promoted allocations
+
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
+use rustc_middle::mir::interpret::{
+ read_target_uint, AllocId, ConstAllocation, ConstValue, ErrorHandled, GlobalAlloc, Scalar,
+};
+use rustc_span::DUMMY_SP;
+
- fn codegen_static_ref<'tcx>(
- fx: &mut FunctionCx<'_, '_, 'tcx>,
- def_id: DefId,
- layout: TyAndLayout<'tcx>,
- ) -> CPlace<'tcx> {
- let data_id = data_id_for_static(fx.tcx, fx.module, def_id, false);
- let local_data_id = fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
- if fx.clif_comments.enabled() {
- fx.add_comment(local_data_id, format!("{:?}", def_id));
- }
- let global_ptr = fx.bcx.ins().global_value(fx.pointer_type, local_data_id);
- assert!(!layout.is_unsized(), "unsized statics aren't supported");
- assert!(
- matches!(
- fx.bcx.func.global_values[local_data_id],
- GlobalValueData::Symbol { tls: false, .. }
- ),
- "tls static referenced without Rvalue::ThreadLocalRef"
- );
- CPlace::for_ptr(crate::pointer::Pointer::new(global_ptr), layout)
- }
-
- pub(crate) fn codegen_constant<'tcx>(
+use cranelift_module::*;
+
+use crate::prelude::*;
+
+pub(crate) struct ConstantCx {
+ todo: Vec<TodoItem>,
+ done: FxHashSet<DataId>,
+ anon_allocs: FxHashMap<AllocId, DataId>,
+}
+
+#[derive(Copy, Clone, Debug)]
+enum TodoItem {
+ Alloc(AllocId),
+ Static(DefId),
+}
+
+impl ConstantCx {
+ pub(crate) fn new() -> Self {
+ ConstantCx { todo: vec![], done: FxHashSet::default(), anon_allocs: FxHashMap::default() }
+ }
+
+ pub(crate) fn finalize(mut self, tcx: TyCtxt<'_>, module: &mut dyn Module) {
+ //println!("todo {:?}", self.todo);
+ define_all_allocs(tcx, module, &mut self);
+ //println!("done {:?}", self.done);
+ self.done.clear();
+ }
+}
+
+pub(crate) fn check_constants(fx: &mut FunctionCx<'_, '_, '_>) -> bool {
+ let mut all_constants_ok = true;
+ for constant in &fx.mir.required_consts {
+ let unevaluated = match fx.monomorphize(constant.literal) {
+ ConstantKind::Ty(_) => unreachable!(),
+ ConstantKind::Unevaluated(uv, _) => uv,
+ ConstantKind::Val(..) => continue,
+ };
+
+ if let Err(err) = fx.tcx.const_eval_resolve(ParamEnv::reveal_all(), unevaluated, None) {
+ all_constants_ok = false;
+ match err {
+ ErrorHandled::Reported(_) | ErrorHandled::Linted => {
+ fx.tcx.sess.span_err(constant.span, "erroneous constant encountered");
+ }
+ ErrorHandled::TooGeneric => {
+ span_bug!(constant.span, "codegen encountered polymorphic constant: {:?}", err);
+ }
+ }
+ }
+ }
+ all_constants_ok
+}
+
+pub(crate) fn codegen_static(tcx: TyCtxt<'_>, module: &mut dyn Module, def_id: DefId) {
+ let mut constants_cx = ConstantCx::new();
+ constants_cx.todo.push(TodoItem::Static(def_id));
+ constants_cx.finalize(tcx, module);
+}
+
+pub(crate) fn codegen_tls_ref<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ def_id: DefId,
+ layout: TyAndLayout<'tcx>,
+) -> CValue<'tcx> {
+ let data_id = data_id_for_static(fx.tcx, fx.module, def_id, false);
+ let local_data_id = fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
+ if fx.clif_comments.enabled() {
+ fx.add_comment(local_data_id, format!("tls {:?}", def_id));
+ }
+ let tls_ptr = fx.bcx.ins().tls_value(fx.pointer_type, local_data_id);
+ CValue::by_val(tls_ptr, layout)
+}
+
- ) -> CValue<'tcx> {
- let (const_val, ty) = match fx.monomorphize(constant.literal) {
- ConstantKind::Ty(const_) => unreachable!("{:?}", const_),
- ConstantKind::Unevaluated(mir::UnevaluatedConst { def, substs, promoted }, ty)
++pub(crate) fn eval_mir_constant<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ constant: &Constant<'tcx>,
- assert!(substs.is_empty());
- assert!(promoted.is_none());
-
- return codegen_static_ref(fx, def.did, fx.layout_of(ty)).to_cvalue(fx);
- }
- ConstantKind::Unevaluated(unevaluated, ty) => {
- match fx.tcx.const_eval_resolve(ParamEnv::reveal_all(), unevaluated, None) {
- Ok(const_val) => (const_val, ty),
- Err(_) => {
- span_bug!(constant.span, "erroneous constant not captured by required_consts");
- }
- }
++) -> (ConstValue<'tcx>, Ty<'tcx>) {
++ let constant_kind = fx.monomorphize(constant.literal);
++ let uv = match constant_kind {
++ ConstantKind::Ty(const_) => match const_.kind() {
++ ty::ConstKind::Unevaluated(uv) => uv.expand(),
++ ty::ConstKind::Value(val) => {
++ return (fx.tcx.valtree_to_const_val((const_.ty(), val)), const_.ty());
++ }
++ err => span_bug!(
++ constant.span,
++ "encountered bad ConstKind after monomorphizing: {:?}",
++ err
++ ),
++ },
++ ConstantKind::Unevaluated(mir::UnevaluatedConst { def, .. }, _)
+ if fx.tcx.is_static(def.did) =>
+ {
- ConstantKind::Val(val, ty) => (val, ty),
++ span_bug!(constant.span, "MIR constant refers to static");
+ }
- pub(crate) fn pointer_for_allocation<'tcx>(
++ ConstantKind::Unevaluated(uv, _) => uv,
++ ConstantKind::Val(val, _) => return (val, constant_kind.ty()),
+ };
+
++ (
++ fx.tcx.const_eval_resolve(ty::ParamEnv::reveal_all(), uv, None).unwrap_or_else(|_err| {
++ span_bug!(constant.span, "erroneous constant not captured by required_consts");
++ }),
++ constant_kind.ty(),
++ )
++}
++
++pub(crate) fn codegen_constant_operand<'tcx>(
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
++ constant: &Constant<'tcx>,
++) -> CValue<'tcx> {
++ let (const_val, ty) = eval_mir_constant(fx, constant);
++
+ codegen_const_value(fx, const_val, ty)
+}
+
+pub(crate) fn codegen_const_value<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ const_val: ConstValue<'tcx>,
+ ty: Ty<'tcx>,
+) -> CValue<'tcx> {
+ let layout = fx.layout_of(ty);
+ assert!(!layout.is_unsized(), "sized const value");
+
+ if layout.is_zst() {
+ return CValue::by_ref(crate::Pointer::dangling(layout.align.pref), layout);
+ }
+
+ match const_val {
+ ConstValue::ZeroSized => unreachable!(), // we already handles ZST above
+ ConstValue::Scalar(x) => match x {
+ Scalar::Int(int) => {
+ if fx.clif_type(layout.ty).is_some() {
+ return CValue::const_val(fx, layout, int);
+ } else {
+ let raw_val = int.to_bits(int.size()).unwrap();
+ let val = match int.size().bytes() {
+ 1 => fx.bcx.ins().iconst(types::I8, raw_val as i64),
+ 2 => fx.bcx.ins().iconst(types::I16, raw_val as i64),
+ 4 => fx.bcx.ins().iconst(types::I32, raw_val as i64),
+ 8 => fx.bcx.ins().iconst(types::I64, raw_val as i64),
+ 16 => {
+ let lsb = fx.bcx.ins().iconst(types::I64, raw_val as u64 as i64);
+ let msb =
+ fx.bcx.ins().iconst(types::I64, (raw_val >> 64) as u64 as i64);
+ fx.bcx.ins().iconcat(lsb, msb)
+ }
+ _ => unreachable!(),
+ };
+
+ let place = CPlace::new_stack_slot(fx, layout);
+ place.to_ptr().store(fx, val, MemFlags::trusted());
+ place.to_cvalue(fx)
+ }
+ }
+ Scalar::Ptr(ptr, _size) => {
+ let (alloc_id, offset) = ptr.into_parts(); // we know the `offset` is relative
+ let base_addr = match fx.tcx.global_alloc(alloc_id) {
+ GlobalAlloc::Memory(alloc) => {
+ let data_id = data_id_for_alloc_id(
+ &mut fx.constants_cx,
+ fx.module,
+ alloc_id,
+ alloc.inner().mutability,
+ );
+ let local_data_id =
+ fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
+ if fx.clif_comments.enabled() {
+ fx.add_comment(local_data_id, format!("{:?}", alloc_id));
+ }
+ fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
+ }
+ GlobalAlloc::Function(instance) => {
+ let func_id = crate::abi::import_function(fx.tcx, fx.module, instance);
+ let local_func_id =
+ fx.module.declare_func_in_func(func_id, &mut fx.bcx.func);
+ fx.bcx.ins().func_addr(fx.pointer_type, local_func_id)
+ }
+ GlobalAlloc::VTable(ty, trait_ref) => {
+ let alloc_id = fx.tcx.vtable_allocation((ty, trait_ref));
+ let alloc = fx.tcx.global_alloc(alloc_id).unwrap_memory();
+ // FIXME: factor this common code with the `Memory` arm into a function?
+ let data_id = data_id_for_alloc_id(
+ &mut fx.constants_cx,
+ fx.module,
+ alloc_id,
+ alloc.inner().mutability,
+ );
+ let local_data_id =
+ fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
+ fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
+ }
+ GlobalAlloc::Static(def_id) => {
+ assert!(fx.tcx.is_static(def_id));
+ let data_id = data_id_for_static(fx.tcx, fx.module, def_id, false);
+ let local_data_id =
+ fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
+ if fx.clif_comments.enabled() {
+ fx.add_comment(local_data_id, format!("{:?}", def_id));
+ }
+ fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
+ }
+ };
+ let val = if offset.bytes() != 0 {
+ fx.bcx.ins().iadd_imm(base_addr, i64::try_from(offset.bytes()).unwrap())
+ } else {
+ base_addr
+ };
+ CValue::by_val(val, layout)
+ }
+ },
+ ConstValue::ByRef { alloc, offset } => CValue::by_ref(
+ pointer_for_allocation(fx, alloc)
+ .offset_i64(fx, i64::try_from(offset.bytes()).unwrap()),
+ layout,
+ ),
+ ConstValue::Slice { data, start, end } => {
+ let ptr = pointer_for_allocation(fx, data)
+ .offset_i64(fx, i64::try_from(start).unwrap())
+ .get_addr(fx);
+ let len = fx
+ .bcx
+ .ins()
+ .iconst(fx.pointer_type, i64::try_from(end.checked_sub(start).unwrap()).unwrap());
+ CValue::by_val_pair(ptr, len, layout)
+ }
+ }
+}
+
- Operand::Constant(const_) => match const_.literal {
- ConstantKind::Ty(const_) => fx
- .monomorphize(const_)
- .eval_for_mir(fx.tcx, ParamEnv::reveal_all())
- .try_to_value(fx.tcx),
++fn pointer_for_allocation<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ alloc: ConstAllocation<'tcx>,
+) -> crate::pointer::Pointer {
+ let alloc_id = fx.tcx.create_memory_alloc(alloc);
+ let data_id = data_id_for_alloc_id(
+ &mut fx.constants_cx,
+ &mut *fx.module,
+ alloc_id,
+ alloc.inner().mutability,
+ );
+
+ let local_data_id = fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
+ if fx.clif_comments.enabled() {
+ fx.add_comment(local_data_id, format!("{:?}", alloc_id));
+ }
+ let global_ptr = fx.bcx.ins().global_value(fx.pointer_type, local_data_id);
+ crate::pointer::Pointer::new(global_ptr)
+}
+
+pub(crate) fn data_id_for_alloc_id(
+ cx: &mut ConstantCx,
+ module: &mut dyn Module,
+ alloc_id: AllocId,
+ mutability: rustc_hir::Mutability,
+) -> DataId {
+ cx.todo.push(TodoItem::Alloc(alloc_id));
+ *cx.anon_allocs.entry(alloc_id).or_insert_with(|| {
+ module.declare_anonymous_data(mutability == rustc_hir::Mutability::Mut, false).unwrap()
+ })
+}
+
+fn data_id_for_static(
+ tcx: TyCtxt<'_>,
+ module: &mut dyn Module,
+ def_id: DefId,
+ definition: bool,
+) -> DataId {
+ let rlinkage = tcx.codegen_fn_attrs(def_id).linkage;
+ let linkage = if definition {
+ crate::linkage::get_static_linkage(tcx, def_id)
+ } else if rlinkage == Some(rustc_middle::mir::mono::Linkage::ExternalWeak)
+ || rlinkage == Some(rustc_middle::mir::mono::Linkage::WeakAny)
+ {
+ Linkage::Preemptible
+ } else {
+ Linkage::Import
+ };
+
+ let instance = Instance::mono(tcx, def_id).polymorphize(tcx);
+ let symbol_name = tcx.symbol_name(instance).name;
+ let ty = instance.ty(tcx, ParamEnv::reveal_all());
+ let is_mutable = if tcx.is_mutable_static(def_id) {
+ true
+ } else {
+ !ty.is_freeze(tcx.at(DUMMY_SP), ParamEnv::reveal_all())
+ };
+ let align = tcx.layout_of(ParamEnv::reveal_all().and(ty)).unwrap().align.pref.bytes();
+
+ let attrs = tcx.codegen_fn_attrs(def_id);
+
+ let data_id = match module.declare_data(
+ &*symbol_name,
+ linkage,
+ is_mutable,
+ attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL),
+ ) {
+ Ok(data_id) => data_id,
+ Err(ModuleError::IncompatibleDeclaration(_)) => tcx.sess.fatal(&format!(
+ "attempt to declare `{symbol_name}` as static, but it was already declared as function"
+ )),
+ Err(err) => Err::<_, _>(err).unwrap(),
+ };
+
+ if rlinkage.is_some() {
+ // Comment copied from https://github.com/rust-lang/rust/blob/45060c2a66dfd667f88bd8b94261b28a58d85bd5/src/librustc_codegen_llvm/consts.rs#L141
+ // Declare an internal global `extern_with_linkage_foo` which
+ // is initialized with the address of `foo`. If `foo` is
+ // discarded during linking (for example, if `foo` has weak
+ // linkage and there are no definitions), then
+ // `extern_with_linkage_foo` will instead be initialized to
+ // zero.
+
+ let ref_name = format!("_rust_extern_with_linkage_{}", symbol_name);
+ let ref_data_id = module.declare_data(&ref_name, Linkage::Local, false, false).unwrap();
+ let mut data_ctx = DataContext::new();
+ data_ctx.set_align(align);
+ let data = module.declare_data_in_data(data_id, &mut data_ctx);
+ data_ctx.define(std::iter::repeat(0).take(pointer_ty(tcx).bytes() as usize).collect());
+ data_ctx.write_data_addr(0, data, 0);
+ match module.define_data(ref_data_id, &data_ctx) {
+ // Every time the static is referenced there will be another definition of this global,
+ // so duplicate definitions are expected and allowed.
+ Err(ModuleError::DuplicateDefinition(_)) => {}
+ res => res.unwrap(),
+ }
+ ref_data_id
+ } else {
+ data_id
+ }
+}
+
+fn define_all_allocs(tcx: TyCtxt<'_>, module: &mut dyn Module, cx: &mut ConstantCx) {
+ while let Some(todo_item) = cx.todo.pop() {
+ let (data_id, alloc, section_name) = match todo_item {
+ TodoItem::Alloc(alloc_id) => {
+ let alloc = match tcx.global_alloc(alloc_id) {
+ GlobalAlloc::Memory(alloc) => alloc,
+ GlobalAlloc::Function(_) | GlobalAlloc::Static(_) | GlobalAlloc::VTable(..) => {
+ unreachable!()
+ }
+ };
+ let data_id = *cx.anon_allocs.entry(alloc_id).or_insert_with(|| {
+ module
+ .declare_anonymous_data(
+ alloc.inner().mutability == rustc_hir::Mutability::Mut,
+ false,
+ )
+ .unwrap()
+ });
+ (data_id, alloc, None)
+ }
+ TodoItem::Static(def_id) => {
+ //println!("static {:?}", def_id);
+
+ let section_name = tcx.codegen_fn_attrs(def_id).link_section;
+
+ let alloc = tcx.eval_static_initializer(def_id).unwrap();
+
+ let data_id = data_id_for_static(tcx, module, def_id, true);
+ (data_id, alloc, section_name)
+ }
+ };
+
+ //("data_id {}", data_id);
+ if cx.done.contains(&data_id) {
+ continue;
+ }
+
+ let mut data_ctx = DataContext::new();
+ let alloc = alloc.inner();
+ data_ctx.set_align(alloc.align.bytes());
+
+ if let Some(section_name) = section_name {
+ let (segment_name, section_name) = if tcx.sess.target.is_like_osx {
+ let section_name = section_name.as_str();
+ if let Some(names) = section_name.split_once(',') {
+ names
+ } else {
+ tcx.sess.fatal(&format!(
+ "#[link_section = \"{}\"] is not valid for macos target: must be segment and section separated by comma",
+ section_name
+ ));
+ }
+ } else {
+ ("", section_name.as_str())
+ };
+ data_ctx.set_segment_section(segment_name, section_name);
+ }
+
+ let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len()).to_vec();
+ data_ctx.define(bytes.into_boxed_slice());
+
+ for &(offset, alloc_id) in alloc.provenance().iter() {
+ let addend = {
+ let endianness = tcx.data_layout.endian;
+ let offset = offset.bytes() as usize;
+ let ptr_size = tcx.data_layout.pointer_size;
+ let bytes = &alloc.inspect_with_uninit_and_ptr_outside_interpreter(
+ offset..offset + ptr_size.bytes() as usize,
+ );
+ read_target_uint(endianness, bytes).unwrap()
+ };
+
+ let reloc_target_alloc = tcx.global_alloc(alloc_id);
+ let data_id = match reloc_target_alloc {
+ GlobalAlloc::Function(instance) => {
+ assert_eq!(addend, 0);
+ let func_id =
+ crate::abi::import_function(tcx, module, instance.polymorphize(tcx));
+ let local_func_id = module.declare_func_in_data(func_id, &mut data_ctx);
+ data_ctx.write_function_addr(offset.bytes() as u32, local_func_id);
+ continue;
+ }
+ GlobalAlloc::Memory(target_alloc) => {
+ data_id_for_alloc_id(cx, module, alloc_id, target_alloc.inner().mutability)
+ }
+ GlobalAlloc::VTable(ty, trait_ref) => {
+ let alloc_id = tcx.vtable_allocation((ty, trait_ref));
+ data_id_for_alloc_id(cx, module, alloc_id, Mutability::Not)
+ }
+ GlobalAlloc::Static(def_id) => {
+ if tcx.codegen_fn_attrs(def_id).flags.contains(CodegenFnAttrFlags::THREAD_LOCAL)
+ {
+ tcx.sess.fatal(&format!(
+ "Allocation {:?} contains reference to TLS value {:?}",
+ alloc, def_id
+ ));
+ }
+
+ // Don't push a `TodoItem::Static` here, as it will cause statics used by
+ // multiple crates to be duplicated between them. It isn't necessary anyway,
+ // as it will get pushed by `codegen_static` when necessary.
+ data_id_for_static(tcx, module, def_id, false)
+ }
+ };
+
+ let global_value = module.declare_data_in_data(data_id, &mut data_ctx);
+ data_ctx.write_data_addr(offset.bytes() as u32, global_value, addend as i64);
+ }
+
+ module.define_data(data_id, &data_ctx).unwrap();
+ cx.done.insert(data_id);
+ }
+
+ assert!(cx.todo.is_empty(), "{:?}", cx.todo);
+}
+
+pub(crate) fn mir_operand_get_const_val<'tcx>(
+ fx: &FunctionCx<'_, '_, 'tcx>,
+ operand: &Operand<'tcx>,
+) -> Option<ConstValue<'tcx>> {
+ match operand {
- fx.tcx.const_eval_resolve(ParamEnv::reveal_all(), uv, None).ok()
++ Operand::Constant(const_) => match fx.monomorphize(const_.literal) {
++ ConstantKind::Ty(const_) => Some(
++ const_.eval_for_mir(fx.tcx, ParamEnv::reveal_all()).try_to_value(fx.tcx).unwrap(),
++ ),
+ ConstantKind::Val(val, _) => Some(val),
+ ConstantKind::Unevaluated(uv, _) => {
++ Some(fx.tcx.const_eval_resolve(ParamEnv::reveal_all(), uv, None).unwrap())
+ }
+ },
+ // FIXME(rust-lang/rust#85105): Casts like `IMM8 as u32` result in the const being stored
+ // inside a temporary before being passed to the intrinsic requiring the const argument.
+ // This code tries to find a single constant defining definition of the referenced local.
+ Operand::Copy(place) | Operand::Move(place) => {
+ if !place.projection.is_empty() {
+ return None;
+ }
+ let mut computed_const_val = None;
+ for bb_data in fx.mir.basic_blocks.iter() {
+ for stmt in &bb_data.statements {
+ match &stmt.kind {
+ StatementKind::Assign(local_and_rvalue) if &local_and_rvalue.0 == place => {
+ match &local_and_rvalue.1 {
+ Rvalue::Cast(
+ CastKind::IntToInt
+ | CastKind::FloatToFloat
+ | CastKind::FloatToInt
+ | CastKind::IntToFloat
+ | CastKind::FnPtrToPtr
+ | CastKind::PtrToPtr,
+ operand,
+ ty,
+ ) => {
+ if computed_const_val.is_some() {
+ return None; // local assigned twice
+ }
+ if !matches!(ty.kind(), ty::Uint(_) | ty::Int(_)) {
+ return None;
+ }
+ let const_val = mir_operand_get_const_val(fx, operand)?;
+ if fx.layout_of(*ty).size
+ != const_val.try_to_scalar_int()?.size()
+ {
+ return None;
+ }
+ computed_const_val = Some(const_val);
+ }
+ Rvalue::Use(operand) => {
+ computed_const_val = mir_operand_get_const_val(fx, operand)
+ }
+ _ => return None,
+ }
+ }
+ StatementKind::SetDiscriminant { place: stmt_place, variant_index: _ }
+ if &**stmt_place == place =>
+ {
+ return None;
+ }
+ StatementKind::Intrinsic(ref intrinsic) => match **intrinsic {
+ NonDivergingIntrinsic::CopyNonOverlapping(..) => return None,
+ NonDivergingIntrinsic::Assume(..) => {}
+ },
+ // conservative handling
+ StatementKind::Assign(_)
+ | StatementKind::FakeRead(_)
+ | StatementKind::SetDiscriminant { .. }
+ | StatementKind::Deinit(_)
+ | StatementKind::StorageLive(_)
+ | StatementKind::StorageDead(_)
+ | StatementKind::Retag(_, _)
+ | StatementKind::AscribeUserType(_, _)
+ | StatementKind::Coverage(_)
+ | StatementKind::Nop => {}
+ }
+ }
+ match &bb_data.terminator().kind {
+ TerminatorKind::Goto { .. }
+ | TerminatorKind::SwitchInt { .. }
+ | TerminatorKind::Resume
+ | TerminatorKind::Abort
+ | TerminatorKind::Return
+ | TerminatorKind::Unreachable
+ | TerminatorKind::Drop { .. }
+ | TerminatorKind::Assert { .. } => {}
+ TerminatorKind::DropAndReplace { .. }
+ | TerminatorKind::Yield { .. }
+ | TerminatorKind::GeneratorDrop
+ | TerminatorKind::FalseEdge { .. }
+ | TerminatorKind::FalseUnwind { .. } => unreachable!(),
+ TerminatorKind::InlineAsm { .. } => return None,
+ TerminatorKind::Call { destination, target: Some(_), .. }
+ if destination == place =>
+ {
+ return None;
+ }
+ TerminatorKind::Call { .. } => {}
+ }
+ }
+ computed_const_val
+ }
+ }
+}
--- /dev/null
- drop(self.concurrency_limiter);
+//! The AOT driver uses [`cranelift_object`] to write object files suitable for linking into a
+//! standalone executable.
+
+use std::fs::File;
+use std::path::PathBuf;
+use std::sync::Arc;
+use std::thread::JoinHandle;
+
+use rustc_codegen_ssa::back::metadata::create_compressed_metadata_file;
+use rustc_codegen_ssa::{CodegenResults, CompiledModule, CrateInfo, ModuleKind};
+use rustc_data_structures::profiling::SelfProfilerRef;
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_metadata::EncodedMetadata;
+use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
+use rustc_middle::mir::mono::{CodegenUnit, MonoItem};
+use rustc_session::cgu_reuse_tracker::CguReuse;
+use rustc_session::config::{DebugInfo, OutputFilenames, OutputType};
+use rustc_session::Session;
+
+use cranelift_object::{ObjectBuilder, ObjectModule};
+
+use crate::concurrency_limiter::{ConcurrencyLimiter, ConcurrencyLimiterToken};
+use crate::global_asm::GlobalAsmConfig;
+use crate::{prelude::*, BackendConfig};
+
+struct ModuleCodegenResult {
+ module_regular: CompiledModule,
+ module_global_asm: Option<CompiledModule>,
+ existing_work_product: Option<(WorkProductId, WorkProduct)>,
+}
+
+enum OngoingModuleCodegen {
+ Sync(Result<ModuleCodegenResult, String>),
+ Async(JoinHandle<Result<ModuleCodegenResult, String>>),
+}
+
+impl<HCX> HashStable<HCX> for OngoingModuleCodegen {
+ fn hash_stable(&self, _: &mut HCX, _: &mut StableHasher) {
+ // do nothing
+ }
+}
+
+pub(crate) struct OngoingCodegen {
+ modules: Vec<OngoingModuleCodegen>,
+ allocator_module: Option<CompiledModule>,
+ metadata_module: Option<CompiledModule>,
+ metadata: EncodedMetadata,
+ crate_info: CrateInfo,
+ concurrency_limiter: ConcurrencyLimiter,
+}
+
+impl OngoingCodegen {
+ pub(crate) fn join(
+ self,
+ sess: &Session,
+ backend_config: &BackendConfig,
+ ) -> (CodegenResults, FxHashMap<WorkProductId, WorkProduct>) {
+ let mut work_products = FxHashMap::default();
+ let mut modules = vec![];
+
+ for module_codegen in self.modules {
+ let module_codegen_result = match module_codegen {
+ OngoingModuleCodegen::Sync(module_codegen_result) => module_codegen_result,
+ OngoingModuleCodegen::Async(join_handle) => match join_handle.join() {
+ Ok(module_codegen_result) => module_codegen_result,
+ Err(panic) => std::panic::resume_unwind(panic),
+ },
+ };
+
+ let module_codegen_result = match module_codegen_result {
+ Ok(module_codegen_result) => module_codegen_result,
+ Err(err) => sess.fatal(&err),
+ };
+ let ModuleCodegenResult { module_regular, module_global_asm, existing_work_product } =
+ module_codegen_result;
+
+ if let Some((work_product_id, work_product)) = existing_work_product {
+ work_products.insert(work_product_id, work_product);
+ } else {
+ let work_product = if backend_config.disable_incr_cache {
+ None
+ } else if let Some(module_global_asm) = &module_global_asm {
+ rustc_incremental::copy_cgu_workproduct_to_incr_comp_cache_dir(
+ sess,
+ &module_regular.name,
+ &[
+ ("o", &module_regular.object.as_ref().unwrap()),
+ ("asm.o", &module_global_asm.object.as_ref().unwrap()),
+ ],
+ )
+ } else {
+ rustc_incremental::copy_cgu_workproduct_to_incr_comp_cache_dir(
+ sess,
+ &module_regular.name,
+ &[("o", &module_regular.object.as_ref().unwrap())],
+ )
+ };
+ if let Some((work_product_id, work_product)) = work_product {
+ work_products.insert(work_product_id, work_product);
+ }
+ }
+
+ modules.push(module_regular);
+ if let Some(module_global_asm) = module_global_asm {
+ modules.push(module_global_asm);
+ }
+ }
+
++ self.concurrency_limiter.finished();
+
+ (
+ CodegenResults {
+ modules,
+ allocator_module: self.allocator_module,
+ metadata_module: self.metadata_module,
+ metadata: self.metadata,
+ crate_info: self.crate_info,
+ },
+ work_products,
+ )
+ }
+}
+
+fn make_module(sess: &Session, backend_config: &BackendConfig, name: String) -> ObjectModule {
+ let isa = crate::build_isa(sess, backend_config);
+
+ let mut builder =
+ ObjectBuilder::new(isa, name + ".o", cranelift_module::default_libcall_names()).unwrap();
+ // Unlike cg_llvm, cg_clif defaults to disabling -Zfunction-sections. For cg_llvm binary size
+ // is important, while cg_clif cares more about compilation times. Enabling -Zfunction-sections
+ // can easily double the amount of time necessary to perform linking.
+ builder.per_function_section(sess.opts.unstable_opts.function_sections.unwrap_or(false));
+ ObjectModule::new(builder)
+}
+
+fn emit_cgu(
+ output_filenames: &OutputFilenames,
+ prof: &SelfProfilerRef,
+ name: String,
+ module: ObjectModule,
+ debug: Option<DebugContext>,
+ unwind_context: UnwindContext,
+ global_asm_object_file: Option<PathBuf>,
+) -> Result<ModuleCodegenResult, String> {
+ let mut product = module.finish();
+
+ if let Some(mut debug) = debug {
+ debug.emit(&mut product);
+ }
+
+ unwind_context.emit(&mut product);
+
+ let module_regular =
+ emit_module(output_filenames, prof, product.object, ModuleKind::Regular, name.clone())?;
+
+ Ok(ModuleCodegenResult {
+ module_regular,
+ module_global_asm: global_asm_object_file.map(|global_asm_object_file| CompiledModule {
+ name: format!("{name}.asm"),
+ kind: ModuleKind::Regular,
+ object: Some(global_asm_object_file),
+ dwarf_object: None,
+ bytecode: None,
+ }),
+ existing_work_product: None,
+ })
+}
+
+fn emit_module(
+ output_filenames: &OutputFilenames,
+ prof: &SelfProfilerRef,
+ object: cranelift_object::object::write::Object<'_>,
+ kind: ModuleKind,
+ name: String,
+) -> Result<CompiledModule, String> {
+ let tmp_file = output_filenames.temp_path(OutputType::Object, Some(&name));
+ let mut file = match File::create(&tmp_file) {
+ Ok(file) => file,
+ Err(err) => return Err(format!("error creating object file: {}", err)),
+ };
+
+ if let Err(err) = object.write_stream(&mut file) {
+ return Err(format!("error writing object file: {}", err));
+ }
+
+ prof.artifact_size("object_file", &*name, file.metadata().unwrap().len());
+
+ Ok(CompiledModule { name, kind, object: Some(tmp_file), dwarf_object: None, bytecode: None })
+}
+
+fn reuse_workproduct_for_cgu(
+ tcx: TyCtxt<'_>,
+ cgu: &CodegenUnit<'_>,
+) -> Result<ModuleCodegenResult, String> {
+ let work_product = cgu.previous_work_product(tcx);
+ let obj_out_regular =
+ tcx.output_filenames(()).temp_path(OutputType::Object, Some(cgu.name().as_str()));
+ let source_file_regular = rustc_incremental::in_incr_comp_dir_sess(
+ &tcx.sess,
+ &work_product.saved_files.get("o").expect("no saved object file in work product"),
+ );
+
+ if let Err(err) = rustc_fs_util::link_or_copy(&source_file_regular, &obj_out_regular) {
+ return Err(format!(
+ "unable to copy {} to {}: {}",
+ source_file_regular.display(),
+ obj_out_regular.display(),
+ err
+ ));
+ }
+ let obj_out_global_asm =
+ crate::global_asm::add_file_stem_postfix(obj_out_regular.clone(), ".asm");
+ let has_global_asm = if let Some(asm_o) = work_product.saved_files.get("asm.o") {
+ let source_file_global_asm = rustc_incremental::in_incr_comp_dir_sess(&tcx.sess, asm_o);
+ if let Err(err) = rustc_fs_util::link_or_copy(&source_file_global_asm, &obj_out_global_asm)
+ {
+ return Err(format!(
+ "unable to copy {} to {}: {}",
+ source_file_regular.display(),
+ obj_out_regular.display(),
+ err
+ ));
+ }
+ true
+ } else {
+ false
+ };
+
+ Ok(ModuleCodegenResult {
+ module_regular: CompiledModule {
+ name: cgu.name().to_string(),
+ kind: ModuleKind::Regular,
+ object: Some(obj_out_regular),
+ dwarf_object: None,
+ bytecode: None,
+ },
+ module_global_asm: if has_global_asm {
+ Some(CompiledModule {
+ name: cgu.name().to_string(),
+ kind: ModuleKind::Regular,
+ object: Some(obj_out_global_asm),
+ dwarf_object: None,
+ bytecode: None,
+ })
+ } else {
+ None
+ },
+ existing_work_product: Some((cgu.work_product_id(), work_product)),
+ })
+}
+
+fn module_codegen(
+ tcx: TyCtxt<'_>,
+ (backend_config, global_asm_config, cgu_name, token): (
+ BackendConfig,
+ Arc<GlobalAsmConfig>,
+ rustc_span::Symbol,
+ ConcurrencyLimiterToken,
+ ),
+) -> OngoingModuleCodegen {
+ let (cgu_name, mut cx, mut module, codegened_functions) = tcx.sess.time("codegen cgu", || {
+ let cgu = tcx.codegen_unit(cgu_name);
+ let mono_items = cgu.items_in_deterministic_order(tcx);
+
+ let mut module = make_module(tcx.sess, &backend_config, cgu_name.as_str().to_string());
+
+ let mut cx = crate::CodegenCx::new(
+ tcx,
+ backend_config.clone(),
+ module.isa(),
+ tcx.sess.opts.debuginfo != DebugInfo::None,
+ cgu_name,
+ );
+ super::predefine_mono_items(tcx, &mut module, &mono_items);
+ let mut codegened_functions = vec![];
+ for (mono_item, _) in mono_items {
+ match mono_item {
+ MonoItem::Fn(inst) => {
+ tcx.sess.time("codegen fn", || {
+ let codegened_function = crate::base::codegen_fn(
+ tcx,
+ &mut cx,
+ Function::new(),
+ &mut module,
+ inst,
+ );
+ codegened_functions.push(codegened_function);
+ });
+ }
+ MonoItem::Static(def_id) => {
+ crate::constant::codegen_static(tcx, &mut module, def_id)
+ }
+ MonoItem::GlobalAsm(item_id) => {
+ crate::global_asm::codegen_global_asm_item(tcx, &mut cx.global_asm, item_id);
+ }
+ }
+ }
+ crate::main_shim::maybe_create_entry_wrapper(
+ tcx,
+ &mut module,
+ &mut cx.unwind_context,
+ false,
+ cgu.is_primary(),
+ );
+
+ let cgu_name = cgu.name().as_str().to_owned();
+
+ (cgu_name, cx, module, codegened_functions)
+ });
+
+ OngoingModuleCodegen::Async(std::thread::spawn(move || {
+ cx.profiler.clone().verbose_generic_activity("compile functions").run(|| {
+ let mut cached_context = Context::new();
+ for codegened_func in codegened_functions {
+ crate::base::compile_fn(&mut cx, &mut cached_context, &mut module, codegened_func);
+ }
+ });
+
+ let global_asm_object_file =
+ cx.profiler.verbose_generic_activity("compile assembly").run(|| {
+ crate::global_asm::compile_global_asm(&global_asm_config, &cgu_name, &cx.global_asm)
+ })?;
+
+ let codegen_result = cx.profiler.verbose_generic_activity("write object file").run(|| {
+ emit_cgu(
+ &global_asm_config.output_filenames,
+ &cx.profiler,
+ cgu_name,
+ module,
+ cx.debug_context,
+ cx.unwind_context,
+ global_asm_object_file,
+ )
+ });
+ std::mem::drop(token);
+ codegen_result
+ }))
+}
+
+pub(crate) fn run_aot(
+ tcx: TyCtxt<'_>,
+ backend_config: BackendConfig,
+ metadata: EncodedMetadata,
+ need_metadata_module: bool,
+) -> Box<OngoingCodegen> {
+ let cgus = if tcx.sess.opts.output_types.should_codegen() {
+ tcx.collect_and_partition_mono_items(()).1
+ } else {
+ // If only `--emit metadata` is used, we shouldn't perform any codegen.
+ // Also `tcx.collect_and_partition_mono_items` may panic in that case.
+ &[]
+ };
+
+ if tcx.dep_graph.is_fully_enabled() {
+ for cgu in &*cgus {
+ tcx.ensure().codegen_unit(cgu.name());
+ }
+ }
+
+ let global_asm_config = Arc::new(crate::global_asm::GlobalAsmConfig::new(tcx));
+
+ let mut concurrency_limiter = ConcurrencyLimiter::new(tcx.sess, cgus.len());
+
+ let modules = super::time(tcx, backend_config.display_cg_time, "codegen mono items", || {
+ cgus.iter()
+ .map(|cgu| {
+ let cgu_reuse = if backend_config.disable_incr_cache {
+ CguReuse::No
+ } else {
+ determine_cgu_reuse(tcx, cgu)
+ };
+ tcx.sess.cgu_reuse_tracker.set_actual_reuse(cgu.name().as_str(), cgu_reuse);
+
+ match cgu_reuse {
+ CguReuse::No => {
+ let dep_node = cgu.codegen_dep_node(tcx);
+ tcx.dep_graph
+ .with_task(
+ dep_node,
+ tcx,
+ (
+ backend_config.clone(),
+ global_asm_config.clone(),
+ cgu.name(),
+ concurrency_limiter.acquire(),
+ ),
+ module_codegen,
+ Some(rustc_middle::dep_graph::hash_result),
+ )
+ .0
+ }
+ CguReuse::PreLto => unreachable!(),
+ CguReuse::PostLto => {
+ concurrency_limiter.job_already_done();
+ OngoingModuleCodegen::Sync(reuse_workproduct_for_cgu(tcx, &*cgu))
+ }
+ }
+ })
+ .collect::<Vec<_>>()
+ });
+
+ tcx.sess.abort_if_errors();
+
+ let mut allocator_module = make_module(tcx.sess, &backend_config, "allocator_shim".to_string());
+ let mut allocator_unwind_context = UnwindContext::new(allocator_module.isa(), true);
+ let created_alloc_shim =
+ crate::allocator::codegen(tcx, &mut allocator_module, &mut allocator_unwind_context);
+
+ let allocator_module = if created_alloc_shim {
+ let mut product = allocator_module.finish();
+ allocator_unwind_context.emit(&mut product);
+
+ match emit_module(
+ tcx.output_filenames(()),
+ &tcx.sess.prof,
+ product.object,
+ ModuleKind::Allocator,
+ "allocator_shim".to_owned(),
+ ) {
+ Ok(allocator_module) => Some(allocator_module),
+ Err(err) => tcx.sess.fatal(err),
+ }
+ } else {
+ None
+ };
+
+ let metadata_module = if need_metadata_module {
+ let _timer = tcx.prof.generic_activity("codegen crate metadata");
+ let (metadata_cgu_name, tmp_file) = tcx.sess.time("write compressed metadata", || {
+ use rustc_middle::mir::mono::CodegenUnitNameBuilder;
+
+ let cgu_name_builder = &mut CodegenUnitNameBuilder::new(tcx);
+ let metadata_cgu_name = cgu_name_builder
+ .build_cgu_name(LOCAL_CRATE, &["crate"], Some("metadata"))
+ .as_str()
+ .to_string();
+
+ let tmp_file =
+ tcx.output_filenames(()).temp_path(OutputType::Metadata, Some(&metadata_cgu_name));
+
+ let symbol_name = rustc_middle::middle::exported_symbols::metadata_symbol_name(tcx);
+ let obj = create_compressed_metadata_file(tcx.sess, &metadata, &symbol_name);
+
+ if let Err(err) = std::fs::write(&tmp_file, obj) {
+ tcx.sess.fatal(&format!("error writing metadata object file: {}", err));
+ }
+
+ (metadata_cgu_name, tmp_file)
+ });
+
+ Some(CompiledModule {
+ name: metadata_cgu_name,
+ kind: ModuleKind::Metadata,
+ object: Some(tmp_file),
+ dwarf_object: None,
+ bytecode: None,
+ })
+ } else {
+ None
+ };
+
+ // FIXME handle `-Ctarget-cpu=native`
+ let target_cpu = match tcx.sess.opts.cg.target_cpu {
+ Some(ref name) => name,
+ None => tcx.sess.target.cpu.as_ref(),
+ }
+ .to_owned();
+
+ Box::new(OngoingCodegen {
+ modules,
+ allocator_module,
+ metadata_module,
+ metadata,
+ crate_info: CrateInfo::new(tcx, target_cpu),
+ concurrency_limiter,
+ })
+}
+
+// Adapted from https://github.com/rust-lang/rust/blob/303d8aff6092709edd4dbd35b1c88e9aa40bf6d8/src/librustc_codegen_ssa/base.rs#L922-L953
+fn determine_cgu_reuse<'tcx>(tcx: TyCtxt<'tcx>, cgu: &CodegenUnit<'tcx>) -> CguReuse {
+ if !tcx.dep_graph.is_fully_enabled() {
+ return CguReuse::No;
+ }
+
+ let work_product_id = &cgu.work_product_id();
+ if tcx.dep_graph.previous_work_product(work_product_id).is_none() {
+ // We don't have anything cached for this CGU. This can happen
+ // if the CGU did not exist in the previous session.
+ return CguReuse::No;
+ }
+
+ // Try to mark the CGU as green. If it we can do so, it means that nothing
+ // affecting the LLVM module has changed and we can re-use a cached version.
+ // If we compile with any kind of LTO, this means we can re-use the bitcode
+ // of the Pre-LTO stage (possibly also the Post-LTO version but we'll only
+ // know that later). If we are not doing LTO, there is only one optimized
+ // version of each module, so we re-use that.
+ let dep_node = cgu.codegen_dep_node(tcx);
+ assert!(
+ !tcx.dep_graph.dep_node_exists(&dep_node),
+ "CompileCodegenUnit dep-node for CGU `{}` already exists before marking.",
+ cgu.name()
+ );
+
+ if tcx.try_mark_green(&dep_node) { CguReuse::PostLto } else { CguReuse::No }
+}
--- /dev/null
- let imported_symbols = load_imported_symbols_for_jit(tcx.sess, crate_info);
+//! The JIT driver uses [`cranelift_jit`] to JIT execute programs without writing any object
+//! files.
+
+use std::cell::RefCell;
+use std::ffi::CString;
+use std::os::raw::{c_char, c_int};
+use std::sync::{mpsc, Mutex};
+
+use rustc_codegen_ssa::CrateInfo;
+use rustc_middle::mir::mono::MonoItem;
+use rustc_session::Session;
+use rustc_span::Symbol;
+
+use cranelift_jit::{JITBuilder, JITModule};
+
+// FIXME use std::sync::OnceLock once it stabilizes
+use once_cell::sync::OnceCell;
+
+use crate::{prelude::*, BackendConfig};
+use crate::{CodegenCx, CodegenMode};
+
+struct JitState {
+ backend_config: BackendConfig,
+ jit_module: JITModule,
+}
+
+thread_local! {
+ static LAZY_JIT_STATE: RefCell<Option<JitState>> = const { RefCell::new(None) };
+}
+
+/// The Sender owned by the rustc thread
+static GLOBAL_MESSAGE_SENDER: OnceCell<Mutex<mpsc::Sender<UnsafeMessage>>> = OnceCell::new();
+
+/// A message that is sent from the jitted runtime to the rustc thread.
+/// Senders are responsible for upholding `Send` semantics.
+enum UnsafeMessage {
+ /// Request that the specified `Instance` be lazily jitted.
+ ///
+ /// Nothing accessible through `instance_ptr` may be moved or mutated by the sender after
+ /// this message is sent.
+ JitFn {
+ instance_ptr: *const Instance<'static>,
+ trampoline_ptr: *const u8,
+ tx: mpsc::Sender<*const u8>,
+ },
+}
+unsafe impl Send for UnsafeMessage {}
+
+impl UnsafeMessage {
+ /// Send the message.
+ fn send(self) -> Result<(), mpsc::SendError<UnsafeMessage>> {
+ thread_local! {
+ /// The Sender owned by the local thread
+ static LOCAL_MESSAGE_SENDER: mpsc::Sender<UnsafeMessage> =
+ GLOBAL_MESSAGE_SENDER
+ .get().unwrap()
+ .lock().unwrap()
+ .clone();
+ }
+ LOCAL_MESSAGE_SENDER.with(|sender| sender.send(self))
+ }
+}
+
+fn create_jit_module(
+ tcx: TyCtxt<'_>,
+ backend_config: &BackendConfig,
+ hotswap: bool,
+) -> (JITModule, CodegenCx) {
+ let crate_info = CrateInfo::new(tcx, "dummy_target_cpu".to_string());
- jit_builder.symbols(imported_symbols);
+
+ let isa = crate::build_isa(tcx.sess, backend_config);
+ let mut jit_builder = JITBuilder::with_isa(isa, cranelift_module::default_libcall_names());
+ jit_builder.hotswap(hotswap);
+ crate::compiler_builtins::register_functions_for_jit(&mut jit_builder);
- fn load_imported_symbols_for_jit(
++ jit_builder.symbol_lookup_fn(dep_symbol_lookup_fn(tcx.sess, crate_info));
+ jit_builder.symbol("__clif_jit_fn", clif_jit_fn as *const u8);
+ let mut jit_module = JITModule::new(jit_builder);
+
+ let mut cx = crate::CodegenCx::new(
+ tcx,
+ backend_config.clone(),
+ jit_module.isa(),
+ false,
+ Symbol::intern("dummy_cgu_name"),
+ );
+
+ crate::allocator::codegen(tcx, &mut jit_module, &mut cx.unwind_context);
+ crate::main_shim::maybe_create_entry_wrapper(
+ tcx,
+ &mut jit_module,
+ &mut cx.unwind_context,
+ true,
+ true,
+ );
+
+ (jit_module, cx)
+}
+
+pub(crate) fn run_jit(tcx: TyCtxt<'_>, backend_config: BackendConfig) -> ! {
+ if !tcx.sess.opts.output_types.should_codegen() {
+ tcx.sess.fatal("JIT mode doesn't work with `cargo check`");
+ }
+
+ if !tcx.sess.crate_types().contains(&rustc_session::config::CrateType::Executable) {
+ tcx.sess.fatal("can't jit non-executable crate");
+ }
+
+ let (mut jit_module, mut cx) = create_jit_module(
+ tcx,
+ &backend_config,
+ matches!(backend_config.codegen_mode, CodegenMode::JitLazy),
+ );
+ let mut cached_context = Context::new();
+
+ let (_, cgus) = tcx.collect_and_partition_mono_items(());
+ let mono_items = cgus
+ .iter()
+ .map(|cgu| cgu.items_in_deterministic_order(tcx).into_iter())
+ .flatten()
+ .collect::<FxHashMap<_, (_, _)>>()
+ .into_iter()
+ .collect::<Vec<(_, (_, _))>>();
+
+ super::time(tcx, backend_config.display_cg_time, "codegen mono items", || {
+ super::predefine_mono_items(tcx, &mut jit_module, &mono_items);
+ for (mono_item, _) in mono_items {
+ match mono_item {
+ MonoItem::Fn(inst) => match backend_config.codegen_mode {
+ CodegenMode::Aot => unreachable!(),
+ CodegenMode::Jit => {
+ tcx.sess.time("codegen fn", || {
+ crate::base::codegen_and_compile_fn(
+ tcx,
+ &mut cx,
+ &mut cached_context,
+ &mut jit_module,
+ inst,
+ )
+ });
+ }
+ CodegenMode::JitLazy => {
+ codegen_shim(tcx, &mut cx, &mut cached_context, &mut jit_module, inst)
+ }
+ },
+ MonoItem::Static(def_id) => {
+ crate::constant::codegen_static(tcx, &mut jit_module, def_id);
+ }
+ MonoItem::GlobalAsm(item_id) => {
+ let item = tcx.hir().item(item_id);
+ tcx.sess.span_fatal(item.span, "Global asm is not supported in JIT mode");
+ }
+ }
+ }
+ });
+
+ if !cx.global_asm.is_empty() {
+ tcx.sess.fatal("Inline asm is not supported in JIT mode");
+ }
+
+ tcx.sess.abort_if_errors();
+
+ jit_module.finalize_definitions();
+ unsafe { cx.unwind_context.register_jit(&jit_module) };
+
+ println!(
+ "Rustc codegen cranelift will JIT run the executable, because -Cllvm-args=mode=jit was passed"
+ );
+
+ let args = std::iter::once(&*tcx.crate_name(LOCAL_CRATE).as_str().to_string())
+ .chain(backend_config.jit_args.iter().map(|arg| &**arg))
+ .map(|arg| CString::new(arg).unwrap())
+ .collect::<Vec<_>>();
+
+ let start_sig = Signature {
+ params: vec![
+ AbiParam::new(jit_module.target_config().pointer_type()),
+ AbiParam::new(jit_module.target_config().pointer_type()),
+ ],
+ returns: vec![AbiParam::new(jit_module.target_config().pointer_type() /*isize*/)],
+ call_conv: jit_module.target_config().default_call_conv,
+ };
+ let start_func_id = jit_module.declare_function("main", Linkage::Import, &start_sig).unwrap();
+ let finalized_start: *const u8 = jit_module.get_finalized_function(start_func_id);
+
+ LAZY_JIT_STATE.with(|lazy_jit_state| {
+ let mut lazy_jit_state = lazy_jit_state.borrow_mut();
+ assert!(lazy_jit_state.is_none());
+ *lazy_jit_state = Some(JitState { backend_config, jit_module });
+ });
+
+ let f: extern "C" fn(c_int, *const *const c_char) -> c_int =
+ unsafe { ::std::mem::transmute(finalized_start) };
+
+ let (tx, rx) = mpsc::channel();
+ GLOBAL_MESSAGE_SENDER.set(Mutex::new(tx)).unwrap();
+
+ // Spawn the jitted runtime in a new thread so that this rustc thread can handle messages
+ // (eg to lazily JIT further functions as required)
+ std::thread::spawn(move || {
+ let mut argv = args.iter().map(|arg| arg.as_ptr()).collect::<Vec<_>>();
+
+ // Push a null pointer as a terminating argument. This is required by POSIX and
+ // useful as some dynamic linkers use it as a marker to jump over.
+ argv.push(std::ptr::null());
+
+ let ret = f(args.len() as c_int, argv.as_ptr());
+ std::process::exit(ret);
+ });
+
+ // Handle messages
+ loop {
+ match rx.recv().unwrap() {
+ // lazy JIT compilation request - compile requested instance and return pointer to result
+ UnsafeMessage::JitFn { instance_ptr, trampoline_ptr, tx } => {
+ tx.send(jit_fn(instance_ptr, trampoline_ptr))
+ .expect("jitted runtime hung up before response to lazy JIT request was sent");
+ }
+ }
+ }
+}
+
+extern "C" fn clif_jit_fn(
+ instance_ptr: *const Instance<'static>,
+ trampoline_ptr: *const u8,
+) -> *const u8 {
+ // send the JIT request to the rustc thread, with a channel for the response
+ let (tx, rx) = mpsc::channel();
+ UnsafeMessage::JitFn { instance_ptr, trampoline_ptr, tx }
+ .send()
+ .expect("rustc thread hung up before lazy JIT request was sent");
+
+ // block on JIT compilation result
+ rx.recv().expect("rustc thread hung up before responding to sent lazy JIT request")
+}
+
+fn jit_fn(instance_ptr: *const Instance<'static>, trampoline_ptr: *const u8) -> *const u8 {
+ rustc_middle::ty::tls::with(|tcx| {
+ // lift is used to ensure the correct lifetime for instance.
+ let instance = tcx.lift(unsafe { *instance_ptr }).unwrap();
+
+ LAZY_JIT_STATE.with(|lazy_jit_state| {
+ let mut lazy_jit_state = lazy_jit_state.borrow_mut();
+ let lazy_jit_state = lazy_jit_state.as_mut().unwrap();
+ let jit_module = &mut lazy_jit_state.jit_module;
+ let backend_config = lazy_jit_state.backend_config.clone();
+
+ let name = tcx.symbol_name(instance).name;
+ let sig = crate::abi::get_function_sig(tcx, jit_module.isa().triple(), instance);
+ let func_id = jit_module.declare_function(name, Linkage::Export, &sig).unwrap();
+
+ let current_ptr = jit_module.read_got_entry(func_id);
+
+ // If the function's GOT entry has already been updated to point at something other
+ // than the shim trampoline, don't re-jit but just return the new pointer instead.
+ // This does not need synchronization as this code is executed only by a sole rustc
+ // thread.
+ if current_ptr != trampoline_ptr {
+ return current_ptr;
+ }
+
+ jit_module.prepare_for_function_redefine(func_id).unwrap();
+
+ let mut cx = crate::CodegenCx::new(
+ tcx,
+ backend_config,
+ jit_module.isa(),
+ false,
+ Symbol::intern("dummy_cgu_name"),
+ );
+ tcx.sess.time("codegen fn", || {
+ crate::base::codegen_and_compile_fn(
+ tcx,
+ &mut cx,
+ &mut Context::new(),
+ jit_module,
+ instance,
+ )
+ });
+
+ assert!(cx.global_asm.is_empty());
+ jit_module.finalize_definitions();
+ unsafe { cx.unwind_context.register_jit(&jit_module) };
+ jit_module.get_finalized_function(func_id)
+ })
+ })
+}
+
- ) -> Vec<(String, *const u8)> {
++fn dep_symbol_lookup_fn(
+ sess: &Session,
+ crate_info: CrateInfo,
- let mut imported_symbols = Vec::new();
- for path in dylib_paths {
- use object::{Object, ObjectSymbol};
- let lib = libloading::Library::new(&path).unwrap();
- let obj = std::fs::read(path).unwrap();
- let obj = object::File::parse(&*obj).unwrap();
- imported_symbols.extend(obj.dynamic_symbols().filter_map(|symbol| {
- let name = symbol.name().unwrap().to_string();
- if name.is_empty() || !symbol.is_global() || symbol.is_undefined() {
- return None;
- }
- if name.starts_with("rust_metadata_") {
- // The metadata is part of a section that is not loaded by the dynamic linker in
- // case of cg_llvm.
- return None;
- }
- let dlsym_name = if cfg!(target_os = "macos") {
- // On macOS `dlsym` expects the name without leading `_`.
- assert!(name.starts_with('_'), "{:?}", name);
- &name[1..]
- } else {
- &name
- };
- let symbol: libloading::Symbol<'_, *const u8> =
- unsafe { lib.get(dlsym_name.as_bytes()) }.unwrap();
- Some((name, *symbol))
- }));
- std::mem::forget(lib)
- }
++) -> Box<dyn Fn(&str) -> Option<*const u8>> {
+ use rustc_middle::middle::dependency_format::Linkage;
+
+ let mut dylib_paths = Vec::new();
+
+ let data = &crate_info
+ .dependency_formats
+ .iter()
+ .find(|(crate_type, _data)| *crate_type == rustc_session::config::CrateType::Executable)
+ .unwrap()
+ .1;
+ for &cnum in &crate_info.used_crates {
+ let src = &crate_info.used_crate_source[&cnum];
+ match data[cnum.as_usize() - 1] {
+ Linkage::NotLinked | Linkage::IncludedFromDylib => {}
+ Linkage::Static => {
+ let name = crate_info.crate_name[&cnum];
+ let mut err = sess.struct_err(&format!("Can't load static lib {}", name));
+ err.note("rustc_codegen_cranelift can only load dylibs in JIT mode.");
+ err.emit();
+ }
+ Linkage::Dynamic => {
+ dylib_paths.push(src.dylib.as_ref().unwrap().0.clone());
+ }
+ }
+ }
+
- imported_symbols
++ let imported_dylibs = Box::leak(
++ dylib_paths
++ .into_iter()
++ .map(|path| unsafe { libloading::Library::new(&path).unwrap() })
++ .collect::<Box<[_]>>(),
++ );
+
+ sess.abort_if_errors();
+
++ Box::new(move |sym_name| {
++ for dylib in &*imported_dylibs {
++ if let Ok(sym) = unsafe { dylib.get::<*const u8>(sym_name.as_bytes()) } {
++ return Some(*sym);
++ }
++ }
++ None
++ })
+}
+
+fn codegen_shim<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ cx: &mut CodegenCx,
+ cached_context: &mut Context,
+ module: &mut JITModule,
+ inst: Instance<'tcx>,
+) {
+ let pointer_type = module.target_config().pointer_type();
+
+ let name = tcx.symbol_name(inst).name;
+ let sig = crate::abi::get_function_sig(tcx, module.isa().triple(), inst);
+ let func_id = module.declare_function(name, Linkage::Export, &sig).unwrap();
+
+ let instance_ptr = Box::into_raw(Box::new(inst));
+
+ let jit_fn = module
+ .declare_function(
+ "__clif_jit_fn",
+ Linkage::Import,
+ &Signature {
+ call_conv: module.target_config().default_call_conv,
+ params: vec![AbiParam::new(pointer_type), AbiParam::new(pointer_type)],
+ returns: vec![AbiParam::new(pointer_type)],
+ },
+ )
+ .unwrap();
+
+ let context = cached_context;
+ context.clear();
+ let trampoline = &mut context.func;
+ trampoline.signature = sig.clone();
+
+ let mut builder_ctx = FunctionBuilderContext::new();
+ let mut trampoline_builder = FunctionBuilder::new(trampoline, &mut builder_ctx);
+
+ let trampoline_fn = module.declare_func_in_func(func_id, trampoline_builder.func);
+ let jit_fn = module.declare_func_in_func(jit_fn, trampoline_builder.func);
+ let sig_ref = trampoline_builder.func.import_signature(sig);
+
+ let entry_block = trampoline_builder.create_block();
+ trampoline_builder.append_block_params_for_function_params(entry_block);
+ let fn_args = trampoline_builder.func.dfg.block_params(entry_block).to_vec();
+
+ trampoline_builder.switch_to_block(entry_block);
+ let instance_ptr = trampoline_builder.ins().iconst(pointer_type, instance_ptr as u64 as i64);
+ let trampoline_ptr = trampoline_builder.ins().func_addr(pointer_type, trampoline_fn);
+ let jitted_fn = trampoline_builder.ins().call(jit_fn, &[instance_ptr, trampoline_ptr]);
+ let jitted_fn = trampoline_builder.func.dfg.inst_results(jitted_fn)[0];
+ let call_inst = trampoline_builder.ins().call_indirect(sig_ref, jitted_fn, &fn_args);
+ let ret_vals = trampoline_builder.func.dfg.inst_results(call_inst).to_vec();
+ trampoline_builder.ins().return_(&ret_vals);
+
+ module.define_function(func_id, context).unwrap();
+ cx.unwind_context.add_function(func_id, context, module.isa());
+}
--- /dev/null
- if template[0] == InlineAsmTemplatePiece::String("movq %rbx, ".to_string())
+//! Codegen of `asm!` invocations.
+
+use crate::prelude::*;
+
+use std::fmt::Write;
+
+use rustc_ast::ast::{InlineAsmOptions, InlineAsmTemplatePiece};
+use rustc_middle::mir::InlineAsmOperand;
+use rustc_span::sym;
+use rustc_target::asm::*;
+
+pub(crate) fn codegen_inline_asm<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ _span: Span,
+ template: &[InlineAsmTemplatePiece],
+ operands: &[InlineAsmOperand<'tcx>],
+ options: InlineAsmOptions,
+ destination: Option<mir::BasicBlock>,
+) {
+ // FIXME add .eh_frame unwind info directives
+
+ if !template.is_empty() {
+ // Used by panic_abort
+ if template[0] == InlineAsmTemplatePiece::String("int $$0x29".to_string()) {
+ fx.bcx.ins().trap(TrapCode::User(1));
+ return;
+ }
+
+ // Used by stdarch
- && template[2] == InlineAsmTemplatePiece::String("\n".to_string())
- && template[3] == InlineAsmTemplatePiece::String("cpuid".to_string())
- && template[4] == InlineAsmTemplatePiece::String("\n".to_string())
- && template[5] == InlineAsmTemplatePiece::String("xchgq %rbx, ".to_string())
++ if template[0] == InlineAsmTemplatePiece::String("mov ".to_string())
+ && matches!(
+ template[1],
+ InlineAsmTemplatePiece::Placeholder {
+ operand_idx: 0,
+ modifier: Some('r'),
+ span: _
+ }
+ )
- template[6],
++ && template[2] == InlineAsmTemplatePiece::String(", rbx".to_string())
++ && template[3] == InlineAsmTemplatePiece::String("\n".to_string())
++ && template[4] == InlineAsmTemplatePiece::String("cpuid".to_string())
++ && template[5] == InlineAsmTemplatePiece::String("\n".to_string())
++ && template[6] == InlineAsmTemplatePiece::String("xchg ".to_string())
+ && matches!(
- late: true,
++ template[7],
+ InlineAsmTemplatePiece::Placeholder {
+ operand_idx: 0,
+ modifier: Some('r'),
+ span: _
+ }
+ )
++ && template[8] == InlineAsmTemplatePiece::String(", rbx".to_string())
+ {
+ assert_eq!(operands.len(), 4);
+ let (leaf, eax_place) = match operands[1] {
+ InlineAsmOperand::InOut {
+ reg: InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::ax)),
- late: true,
++ late: _,
+ ref in_value,
+ out_place: Some(out_place),
+ } => (
+ crate::base::codegen_operand(fx, in_value).load_scalar(fx),
+ crate::base::codegen_place(fx, out_place),
+ ),
+ _ => unreachable!(),
+ };
+ let ebx_place = match operands[0] {
+ InlineAsmOperand::Out {
+ reg:
+ InlineAsmRegOrRegClass::RegClass(InlineAsmRegClass::X86(
+ X86InlineAsmRegClass::reg,
+ )),
- late: true,
++ late: _,
+ place: Some(place),
+ } => crate::base::codegen_place(fx, place),
+ _ => unreachable!(),
+ };
+ let (sub_leaf, ecx_place) = match operands[2] {
+ InlineAsmOperand::InOut {
+ reg: InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::cx)),
- late: true,
++ late: _,
+ ref in_value,
+ out_place: Some(out_place),
+ } => (
+ crate::base::codegen_operand(fx, in_value).load_scalar(fx),
+ crate::base::codegen_place(fx, out_place),
+ ),
+ _ => unreachable!(),
+ };
+ let edx_place = match operands[3] {
+ InlineAsmOperand::Out {
+ reg: InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::dx)),
++ late: _,
+ place: Some(place),
+ } => crate::base::codegen_place(fx, place),
+ _ => unreachable!(),
+ };
+
+ let (eax, ebx, ecx, edx) = crate::intrinsics::codegen_cpuid_call(fx, leaf, sub_leaf);
+
+ eax_place.write_cvalue(fx, CValue::by_val(eax, fx.layout_of(fx.tcx.types.u32)));
+ ebx_place.write_cvalue(fx, CValue::by_val(ebx, fx.layout_of(fx.tcx.types.u32)));
+ ecx_place.write_cvalue(fx, CValue::by_val(ecx, fx.layout_of(fx.tcx.types.u32)));
+ edx_place.write_cvalue(fx, CValue::by_val(edx, fx.layout_of(fx.tcx.types.u32)));
+ let destination_block = fx.get_block(destination.unwrap());
+ fx.bcx.ins().jump(destination_block, &[]);
+ return;
+ }
+
+ // Used by compiler-builtins
+ if fx.tcx.symbol_name(fx.instance).name.starts_with("___chkstk") {
+ // ___chkstk, ___chkstk_ms and __alloca are only used on Windows
+ crate::trap::trap_unimplemented(fx, "Stack probes are not supported");
+ return;
+ } else if fx.tcx.symbol_name(fx.instance).name == "__alloca" {
+ crate::trap::trap_unimplemented(fx, "Alloca is not supported");
+ return;
+ }
+
+ // Used by measureme
+ if template[0] == InlineAsmTemplatePiece::String("xor %eax, %eax".to_string())
+ && template[1] == InlineAsmTemplatePiece::String("\n".to_string())
+ && template[2] == InlineAsmTemplatePiece::String("mov %rbx, ".to_string())
+ && matches!(
+ template[3],
+ InlineAsmTemplatePiece::Placeholder {
+ operand_idx: 0,
+ modifier: Some('r'),
+ span: _
+ }
+ )
+ && template[4] == InlineAsmTemplatePiece::String("\n".to_string())
+ && template[5] == InlineAsmTemplatePiece::String("cpuid".to_string())
+ && template[6] == InlineAsmTemplatePiece::String("\n".to_string())
+ && template[7] == InlineAsmTemplatePiece::String("mov ".to_string())
+ && matches!(
+ template[8],
+ InlineAsmTemplatePiece::Placeholder {
+ operand_idx: 0,
+ modifier: Some('r'),
+ span: _
+ }
+ )
+ && template[9] == InlineAsmTemplatePiece::String(", %rbx".to_string())
+ {
+ let destination_block = fx.get_block(destination.unwrap());
+ fx.bcx.ins().jump(destination_block, &[]);
+ return;
+ } else if template[0] == InlineAsmTemplatePiece::String("rdpmc".to_string()) {
+ // Return zero dummy values for all performance counters
+ match operands[0] {
+ InlineAsmOperand::In {
+ reg: InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::cx)),
+ value: _,
+ } => {}
+ _ => unreachable!(),
+ };
+ let lo = match operands[1] {
+ InlineAsmOperand::Out {
+ reg: InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::ax)),
+ late: true,
+ place: Some(place),
+ } => crate::base::codegen_place(fx, place),
+ _ => unreachable!(),
+ };
+ let hi = match operands[2] {
+ InlineAsmOperand::Out {
+ reg: InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::dx)),
+ late: true,
+ place: Some(place),
+ } => crate::base::codegen_place(fx, place),
+ _ => unreachable!(),
+ };
+
+ let u32_layout = fx.layout_of(fx.tcx.types.u32);
+ let zero = fx.bcx.ins().iconst(types::I32, 0);
+ lo.write_cvalue(fx, CValue::by_val(zero, u32_layout));
+ hi.write_cvalue(fx, CValue::by_val(zero, u32_layout));
+
+ let destination_block = fx.get_block(destination.unwrap());
+ fx.bcx.ins().jump(destination_block, &[]);
+ return;
+ } else if template[0] == InlineAsmTemplatePiece::String("lock xadd ".to_string())
+ && matches!(
+ template[1],
+ InlineAsmTemplatePiece::Placeholder { operand_idx: 1, modifier: None, span: _ }
+ )
+ && template[2] == InlineAsmTemplatePiece::String(", (".to_string())
+ && matches!(
+ template[3],
+ InlineAsmTemplatePiece::Placeholder { operand_idx: 0, modifier: None, span: _ }
+ )
+ && template[4] == InlineAsmTemplatePiece::String(")".to_string())
+ {
+ let destination_block = fx.get_block(destination.unwrap());
+ fx.bcx.ins().jump(destination_block, &[]);
+ return;
+ }
+ }
+
+ let mut inputs = Vec::new();
+ let mut outputs = Vec::new();
+
+ let mut asm_gen = InlineAssemblyGenerator {
+ tcx: fx.tcx,
+ arch: fx.tcx.sess.asm_arch.unwrap(),
+ enclosing_def_id: fx.instance.def_id(),
+ template,
+ operands,
+ options,
+ registers: Vec::new(),
+ stack_slots_clobber: Vec::new(),
+ stack_slots_input: Vec::new(),
+ stack_slots_output: Vec::new(),
+ stack_slot_size: Size::from_bytes(0),
+ };
+ asm_gen.allocate_registers();
+ asm_gen.allocate_stack_slots();
+
+ let inline_asm_index = fx.cx.inline_asm_index.get();
+ fx.cx.inline_asm_index.set(inline_asm_index + 1);
+ let asm_name = format!(
+ "__inline_asm_{}_n{}",
+ fx.cx.cgu_name.as_str().replace('.', "__").replace('-', "_"),
+ inline_asm_index
+ );
+
+ let generated_asm = asm_gen.generate_asm_wrapper(&asm_name);
+ fx.cx.global_asm.push_str(&generated_asm);
+
+ for (i, operand) in operands.iter().enumerate() {
+ match *operand {
+ InlineAsmOperand::In { reg: _, ref value } => {
+ inputs.push((
+ asm_gen.stack_slots_input[i].unwrap(),
+ crate::base::codegen_operand(fx, value).load_scalar(fx),
+ ));
+ }
+ InlineAsmOperand::Out { reg: _, late: _, place } => {
+ if let Some(place) = place {
+ outputs.push((
+ asm_gen.stack_slots_output[i].unwrap(),
+ crate::base::codegen_place(fx, place),
+ ));
+ }
+ }
+ InlineAsmOperand::InOut { reg: _, late: _, ref in_value, out_place } => {
+ inputs.push((
+ asm_gen.stack_slots_input[i].unwrap(),
+ crate::base::codegen_operand(fx, in_value).load_scalar(fx),
+ ));
+ if let Some(out_place) = out_place {
+ outputs.push((
+ asm_gen.stack_slots_output[i].unwrap(),
+ crate::base::codegen_place(fx, out_place),
+ ));
+ }
+ }
+ InlineAsmOperand::Const { value: _ } => todo!(),
+ InlineAsmOperand::SymFn { value: _ } => todo!(),
+ InlineAsmOperand::SymStatic { def_id: _ } => todo!(),
+ }
+ }
+
+ call_inline_asm(fx, &asm_name, asm_gen.stack_slot_size, inputs, outputs);
+
+ match destination {
+ Some(destination) => {
+ let destination_block = fx.get_block(destination);
+ fx.bcx.ins().jump(destination_block, &[]);
+ }
+ None => {
+ fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
+ }
+ }
+}
+
+struct InlineAssemblyGenerator<'a, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ arch: InlineAsmArch,
+ enclosing_def_id: DefId,
+ template: &'a [InlineAsmTemplatePiece],
+ operands: &'a [InlineAsmOperand<'tcx>],
+ options: InlineAsmOptions,
+ registers: Vec<Option<InlineAsmReg>>,
+ stack_slots_clobber: Vec<Option<Size>>,
+ stack_slots_input: Vec<Option<Size>>,
+ stack_slots_output: Vec<Option<Size>>,
+ stack_slot_size: Size,
+}
+
+impl<'tcx> InlineAssemblyGenerator<'_, 'tcx> {
+ fn allocate_registers(&mut self) {
+ let sess = self.tcx.sess;
+ let map = allocatable_registers(
+ self.arch,
+ sess.relocation_model(),
+ self.tcx.asm_target_features(self.enclosing_def_id),
+ &sess.target,
+ );
+ let mut allocated = FxHashMap::<_, (bool, bool)>::default();
+ let mut regs = vec![None; self.operands.len()];
+
+ // Add explicit registers to the allocated set.
+ for (i, operand) in self.operands.iter().enumerate() {
+ match *operand {
+ InlineAsmOperand::In { reg: InlineAsmRegOrRegClass::Reg(reg), .. } => {
+ regs[i] = Some(reg);
+ allocated.entry(reg).or_default().0 = true;
+ }
+ InlineAsmOperand::Out {
+ reg: InlineAsmRegOrRegClass::Reg(reg), late: true, ..
+ } => {
+ regs[i] = Some(reg);
+ allocated.entry(reg).or_default().1 = true;
+ }
+ InlineAsmOperand::Out { reg: InlineAsmRegOrRegClass::Reg(reg), .. }
+ | InlineAsmOperand::InOut { reg: InlineAsmRegOrRegClass::Reg(reg), .. } => {
+ regs[i] = Some(reg);
+ allocated.insert(reg, (true, true));
+ }
+ _ => (),
+ }
+ }
+
+ // Allocate out/inout/inlateout registers first because they are more constrained.
+ for (i, operand) in self.operands.iter().enumerate() {
+ match *operand {
+ InlineAsmOperand::Out {
+ reg: InlineAsmRegOrRegClass::RegClass(class),
+ late: false,
+ ..
+ }
+ | InlineAsmOperand::InOut {
+ reg: InlineAsmRegOrRegClass::RegClass(class), ..
+ } => {
+ let mut alloc_reg = None;
+ for ® in &map[&class] {
+ let mut used = false;
+ reg.overlapping_regs(|r| {
+ if allocated.contains_key(&r) {
+ used = true;
+ }
+ });
+
+ if !used {
+ alloc_reg = Some(reg);
+ break;
+ }
+ }
+
+ let reg = alloc_reg.expect("cannot allocate registers");
+ regs[i] = Some(reg);
+ allocated.insert(reg, (true, true));
+ }
+ _ => (),
+ }
+ }
+
+ // Allocate in/lateout.
+ for (i, operand) in self.operands.iter().enumerate() {
+ match *operand {
+ InlineAsmOperand::In { reg: InlineAsmRegOrRegClass::RegClass(class), .. } => {
+ let mut alloc_reg = None;
+ for ® in &map[&class] {
+ let mut used = false;
+ reg.overlapping_regs(|r| {
+ if allocated.get(&r).copied().unwrap_or_default().0 {
+ used = true;
+ }
+ });
+
+ if !used {
+ alloc_reg = Some(reg);
+ break;
+ }
+ }
+
+ let reg = alloc_reg.expect("cannot allocate registers");
+ regs[i] = Some(reg);
+ allocated.entry(reg).or_default().0 = true;
+ }
+ InlineAsmOperand::Out {
+ reg: InlineAsmRegOrRegClass::RegClass(class),
+ late: true,
+ ..
+ } => {
+ let mut alloc_reg = None;
+ for ® in &map[&class] {
+ let mut used = false;
+ reg.overlapping_regs(|r| {
+ if allocated.get(&r).copied().unwrap_or_default().1 {
+ used = true;
+ }
+ });
+
+ if !used {
+ alloc_reg = Some(reg);
+ break;
+ }
+ }
+
+ let reg = alloc_reg.expect("cannot allocate registers");
+ regs[i] = Some(reg);
+ allocated.entry(reg).or_default().1 = true;
+ }
+ _ => (),
+ }
+ }
+
+ self.registers = regs;
+ }
+
+ fn allocate_stack_slots(&mut self) {
+ let mut slot_size = Size::from_bytes(0);
+ let mut slots_clobber = vec![None; self.operands.len()];
+ let mut slots_input = vec![None; self.operands.len()];
+ let mut slots_output = vec![None; self.operands.len()];
+
+ let new_slot_fn = |slot_size: &mut Size, reg_class: InlineAsmRegClass| {
+ let reg_size =
+ reg_class.supported_types(self.arch).iter().map(|(ty, _)| ty.size()).max().unwrap();
+ let align = rustc_target::abi::Align::from_bytes(reg_size.bytes()).unwrap();
+ let offset = slot_size.align_to(align);
+ *slot_size = offset + reg_size;
+ offset
+ };
+ let mut new_slot = |x| new_slot_fn(&mut slot_size, x);
+
+ // Allocate stack slots for saving clobbered registers
+ let abi_clobber = InlineAsmClobberAbi::parse(self.arch, &self.tcx.sess.target, sym::C)
+ .unwrap()
+ .clobbered_regs();
+ for (i, reg) in self.registers.iter().enumerate().filter_map(|(i, r)| r.map(|r| (i, r))) {
+ let mut need_save = true;
+ // If the register overlaps with a register clobbered by function call, then
+ // we don't need to save it.
+ for r in abi_clobber {
+ r.overlapping_regs(|r| {
+ if r == reg {
+ need_save = false;
+ }
+ });
+
+ if !need_save {
+ break;
+ }
+ }
+
+ if need_save {
+ slots_clobber[i] = Some(new_slot(reg.reg_class()));
+ }
+ }
+
+ // Allocate stack slots for inout
+ for (i, operand) in self.operands.iter().enumerate() {
+ match *operand {
+ InlineAsmOperand::InOut { reg, out_place: Some(_), .. } => {
+ let slot = new_slot(reg.reg_class());
+ slots_input[i] = Some(slot);
+ slots_output[i] = Some(slot);
+ }
+ _ => (),
+ }
+ }
+
+ let slot_size_before_input = slot_size;
+ let mut new_slot = |x| new_slot_fn(&mut slot_size, x);
+
+ // Allocate stack slots for input
+ for (i, operand) in self.operands.iter().enumerate() {
+ match *operand {
+ InlineAsmOperand::In { reg, .. }
+ | InlineAsmOperand::InOut { reg, out_place: None, .. } => {
+ slots_input[i] = Some(new_slot(reg.reg_class()));
+ }
+ _ => (),
+ }
+ }
+
+ // Reset slot size to before input so that input and output operands can overlap
+ // and save some memory.
+ let slot_size_after_input = slot_size;
+ slot_size = slot_size_before_input;
+ let mut new_slot = |x| new_slot_fn(&mut slot_size, x);
+
+ // Allocate stack slots for output
+ for (i, operand) in self.operands.iter().enumerate() {
+ match *operand {
+ InlineAsmOperand::Out { reg, place: Some(_), .. } => {
+ slots_output[i] = Some(new_slot(reg.reg_class()));
+ }
+ _ => (),
+ }
+ }
+
+ slot_size = slot_size.max(slot_size_after_input);
+
+ self.stack_slots_clobber = slots_clobber;
+ self.stack_slots_input = slots_input;
+ self.stack_slots_output = slots_output;
+ self.stack_slot_size = slot_size;
+ }
+
+ fn generate_asm_wrapper(&self, asm_name: &str) -> String {
+ let mut generated_asm = String::new();
+ writeln!(generated_asm, ".globl {}", asm_name).unwrap();
+ writeln!(generated_asm, ".type {},@function", asm_name).unwrap();
+ writeln!(generated_asm, ".section .text.{},\"ax\",@progbits", asm_name).unwrap();
+ writeln!(generated_asm, "{}:", asm_name).unwrap();
+
+ let is_x86 = matches!(self.arch, InlineAsmArch::X86 | InlineAsmArch::X86_64);
+
+ if is_x86 {
+ generated_asm.push_str(".intel_syntax noprefix\n");
+ }
+ Self::prologue(&mut generated_asm, self.arch);
+
+ // Save clobbered registers
+ if !self.options.contains(InlineAsmOptions::NORETURN) {
+ for (reg, slot) in self
+ .registers
+ .iter()
+ .zip(self.stack_slots_clobber.iter().copied())
+ .filter_map(|(r, s)| r.zip(s))
+ {
+ Self::save_register(&mut generated_asm, self.arch, reg, slot);
+ }
+ }
+
+ // Write input registers
+ for (reg, slot) in self
+ .registers
+ .iter()
+ .zip(self.stack_slots_input.iter().copied())
+ .filter_map(|(r, s)| r.zip(s))
+ {
+ Self::restore_register(&mut generated_asm, self.arch, reg, slot);
+ }
+
+ if is_x86 && self.options.contains(InlineAsmOptions::ATT_SYNTAX) {
+ generated_asm.push_str(".att_syntax\n");
+ }
+
+ // The actual inline asm
+ for piece in self.template {
+ match piece {
+ InlineAsmTemplatePiece::String(s) => {
+ generated_asm.push_str(s);
+ }
+ InlineAsmTemplatePiece::Placeholder { operand_idx, modifier, span: _ } => {
+ if self.options.contains(InlineAsmOptions::ATT_SYNTAX) {
+ generated_asm.push('%');
+ }
+ self.registers[*operand_idx]
+ .unwrap()
+ .emit(&mut generated_asm, self.arch, *modifier)
+ .unwrap();
+ }
+ }
+ }
+ generated_asm.push('\n');
+
+ if is_x86 && self.options.contains(InlineAsmOptions::ATT_SYNTAX) {
+ generated_asm.push_str(".intel_syntax noprefix\n");
+ }
+
+ if !self.options.contains(InlineAsmOptions::NORETURN) {
+ // Read output registers
+ for (reg, slot) in self
+ .registers
+ .iter()
+ .zip(self.stack_slots_output.iter().copied())
+ .filter_map(|(r, s)| r.zip(s))
+ {
+ Self::save_register(&mut generated_asm, self.arch, reg, slot);
+ }
+
+ // Restore clobbered registers
+ for (reg, slot) in self
+ .registers
+ .iter()
+ .zip(self.stack_slots_clobber.iter().copied())
+ .filter_map(|(r, s)| r.zip(s))
+ {
+ Self::restore_register(&mut generated_asm, self.arch, reg, slot);
+ }
+
+ Self::epilogue(&mut generated_asm, self.arch);
+ } else {
+ Self::epilogue_noreturn(&mut generated_asm, self.arch);
+ }
+
+ if is_x86 {
+ generated_asm.push_str(".att_syntax\n");
+ }
+ writeln!(generated_asm, ".size {name}, .-{name}", name = asm_name).unwrap();
+ generated_asm.push_str(".text\n");
+ generated_asm.push_str("\n\n");
+
+ generated_asm
+ }
+
+ fn prologue(generated_asm: &mut String, arch: InlineAsmArch) {
+ match arch {
+ InlineAsmArch::X86 => {
+ generated_asm.push_str(" push ebp\n");
+ generated_asm.push_str(" mov ebp,[esp+8]\n");
+ }
+ InlineAsmArch::X86_64 => {
+ generated_asm.push_str(" push rbp\n");
+ generated_asm.push_str(" mov rbp,rdi\n");
+ }
+ InlineAsmArch::RiscV32 => {
+ generated_asm.push_str(" addi sp, sp, -8\n");
+ generated_asm.push_str(" sw ra, 4(sp)\n");
+ generated_asm.push_str(" sw s0, 0(sp)\n");
+ generated_asm.push_str(" mv s0, a0\n");
+ }
+ InlineAsmArch::RiscV64 => {
+ generated_asm.push_str(" addi sp, sp, -16\n");
+ generated_asm.push_str(" sd ra, 8(sp)\n");
+ generated_asm.push_str(" sd s0, 0(sp)\n");
+ generated_asm.push_str(" mv s0, a0\n");
+ }
+ _ => unimplemented!("prologue for {:?}", arch),
+ }
+ }
+
+ fn epilogue(generated_asm: &mut String, arch: InlineAsmArch) {
+ match arch {
+ InlineAsmArch::X86 => {
+ generated_asm.push_str(" pop ebp\n");
+ generated_asm.push_str(" ret\n");
+ }
+ InlineAsmArch::X86_64 => {
+ generated_asm.push_str(" pop rbp\n");
+ generated_asm.push_str(" ret\n");
+ }
+ InlineAsmArch::RiscV32 => {
+ generated_asm.push_str(" lw s0, 0(sp)\n");
+ generated_asm.push_str(" lw ra, 4(sp)\n");
+ generated_asm.push_str(" addi sp, sp, 8\n");
+ generated_asm.push_str(" ret\n");
+ }
+ InlineAsmArch::RiscV64 => {
+ generated_asm.push_str(" ld s0, 0(sp)\n");
+ generated_asm.push_str(" ld ra, 8(sp)\n");
+ generated_asm.push_str(" addi sp, sp, 16\n");
+ generated_asm.push_str(" ret\n");
+ }
+ _ => unimplemented!("epilogue for {:?}", arch),
+ }
+ }
+
+ fn epilogue_noreturn(generated_asm: &mut String, arch: InlineAsmArch) {
+ match arch {
+ InlineAsmArch::X86 | InlineAsmArch::X86_64 => {
+ generated_asm.push_str(" ud2\n");
+ }
+ InlineAsmArch::RiscV32 | InlineAsmArch::RiscV64 => {
+ generated_asm.push_str(" ebreak\n");
+ }
+ _ => unimplemented!("epilogue_noreturn for {:?}", arch),
+ }
+ }
+
+ fn save_register(
+ generated_asm: &mut String,
+ arch: InlineAsmArch,
+ reg: InlineAsmReg,
+ offset: Size,
+ ) {
+ match arch {
+ InlineAsmArch::X86 => {
+ write!(generated_asm, " mov [ebp+0x{:x}], ", offset.bytes()).unwrap();
+ reg.emit(generated_asm, InlineAsmArch::X86, None).unwrap();
+ generated_asm.push('\n');
+ }
+ InlineAsmArch::X86_64 => {
+ write!(generated_asm, " mov [rbp+0x{:x}], ", offset.bytes()).unwrap();
+ reg.emit(generated_asm, InlineAsmArch::X86_64, None).unwrap();
+ generated_asm.push('\n');
+ }
+ InlineAsmArch::RiscV32 => {
+ generated_asm.push_str(" sw ");
+ reg.emit(generated_asm, InlineAsmArch::RiscV32, None).unwrap();
+ writeln!(generated_asm, ", 0x{:x}(s0)", offset.bytes()).unwrap();
+ }
+ InlineAsmArch::RiscV64 => {
+ generated_asm.push_str(" sd ");
+ reg.emit(generated_asm, InlineAsmArch::RiscV64, None).unwrap();
+ writeln!(generated_asm, ", 0x{:x}(s0)", offset.bytes()).unwrap();
+ }
+ _ => unimplemented!("save_register for {:?}", arch),
+ }
+ }
+
+ fn restore_register(
+ generated_asm: &mut String,
+ arch: InlineAsmArch,
+ reg: InlineAsmReg,
+ offset: Size,
+ ) {
+ match arch {
+ InlineAsmArch::X86 => {
+ generated_asm.push_str(" mov ");
+ reg.emit(generated_asm, InlineAsmArch::X86, None).unwrap();
+ writeln!(generated_asm, ", [ebp+0x{:x}]", offset.bytes()).unwrap();
+ }
+ InlineAsmArch::X86_64 => {
+ generated_asm.push_str(" mov ");
+ reg.emit(generated_asm, InlineAsmArch::X86_64, None).unwrap();
+ writeln!(generated_asm, ", [rbp+0x{:x}]", offset.bytes()).unwrap();
+ }
+ InlineAsmArch::RiscV32 => {
+ generated_asm.push_str(" lw ");
+ reg.emit(generated_asm, InlineAsmArch::RiscV32, None).unwrap();
+ writeln!(generated_asm, ", 0x{:x}(s0)", offset.bytes()).unwrap();
+ }
+ InlineAsmArch::RiscV64 => {
+ generated_asm.push_str(" ld ");
+ reg.emit(generated_asm, InlineAsmArch::RiscV64, None).unwrap();
+ writeln!(generated_asm, ", 0x{:x}(s0)", offset.bytes()).unwrap();
+ }
+ _ => unimplemented!("restore_register for {:?}", arch),
+ }
+ }
+}
+
+fn call_inline_asm<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ asm_name: &str,
+ slot_size: Size,
+ inputs: Vec<(Size, Value)>,
+ outputs: Vec<(Size, CPlace<'tcx>)>,
+) {
+ let stack_slot = fx.bcx.func.create_sized_stack_slot(StackSlotData {
+ kind: StackSlotKind::ExplicitSlot,
+ size: u32::try_from(slot_size.bytes()).unwrap(),
+ });
+ if fx.clif_comments.enabled() {
+ fx.add_comment(stack_slot, "inline asm scratch slot");
+ }
+
+ let inline_asm_func = fx
+ .module
+ .declare_function(
+ asm_name,
+ Linkage::Import,
+ &Signature {
+ call_conv: CallConv::SystemV,
+ params: vec![AbiParam::new(fx.pointer_type)],
+ returns: vec![],
+ },
+ )
+ .unwrap();
+ let inline_asm_func = fx.module.declare_func_in_func(inline_asm_func, &mut fx.bcx.func);
+ if fx.clif_comments.enabled() {
+ fx.add_comment(inline_asm_func, asm_name);
+ }
+
+ for (offset, value) in inputs {
+ fx.bcx.ins().stack_store(value, stack_slot, i32::try_from(offset.bytes()).unwrap());
+ }
+
+ let stack_slot_addr = fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, 0);
+ fx.bcx.ins().call(inline_asm_func, &[stack_slot_addr]);
+
+ for (offset, place) in outputs {
+ let ty = fx.clif_type(place.layout().ty).unwrap();
+ let value = fx.bcx.ins().stack_load(ty, stack_slot, i32::try_from(offset.bytes()).unwrap());
+ place.write_cvalue(fx, CValue::by_val(value, place.layout()));
+ }
+}
--- /dev/null
- let a_lane =
- a.value_field(fx, mir::Field::new(lane.try_into().unwrap())).load_scalar(fx);
+//! Emulate LLVM intrinsics
+
+use crate::intrinsics::*;
+use crate::prelude::*;
+
+use rustc_middle::ty::subst::SubstsRef;
+
+pub(crate) fn codegen_llvm_intrinsic_call<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ intrinsic: &str,
+ _substs: SubstsRef<'tcx>,
+ args: &[mir::Operand<'tcx>],
+ ret: CPlace<'tcx>,
+ target: Option<BasicBlock>,
+) {
+ match intrinsic {
++ "llvm.x86.sse2.pause" | "llvm.aarch64.isb" => {
++ // Spin loop hint
++ }
++
+ // Used by `_mm_movemask_epi8` and `_mm256_movemask_epi8`
+ "llvm.x86.sse2.pmovmskb.128" | "llvm.x86.avx2.pmovmskb" | "llvm.x86.sse2.movmsk.pd" => {
+ intrinsic_args!(fx, args => (a); intrinsic);
+
+ let (lane_count, lane_ty) = a.layout().ty.simd_size_and_type(fx.tcx);
+ let lane_ty = fx.clif_type(lane_ty).unwrap();
+ assert!(lane_count <= 32);
+
+ let mut res = fx.bcx.ins().iconst(types::I32, 0);
+
+ for lane in (0..lane_count).rev() {
++ let a_lane = a.value_lane(fx, lane).load_scalar(fx);
+
+ // cast float to int
+ let a_lane = match lane_ty {
+ types::F32 => fx.bcx.ins().bitcast(types::I32, a_lane),
+ types::F64 => fx.bcx.ins().bitcast(types::I64, a_lane),
+ _ => a_lane,
+ };
+
+ // extract sign bit of an int
+ let a_lane_sign = fx.bcx.ins().ushr_imm(a_lane, i64::from(lane_ty.bits() - 1));
+
+ // shift sign bit into result
+ let a_lane_sign = clif_intcast(fx, a_lane_sign, types::I32, false);
+ res = fx.bcx.ins().ishl_imm(res, 1);
+ res = fx.bcx.ins().bor(res, a_lane_sign);
+ }
+
+ let res = CValue::by_val(res, fx.layout_of(fx.tcx.types.i32));
+ ret.write_cvalue(fx, res);
+ }
+ "llvm.x86.sse2.cmp.ps" | "llvm.x86.sse2.cmp.pd" => {
+ let (x, y, kind) = match args {
+ [x, y, kind] => (x, y, kind),
+ _ => bug!("wrong number of args for intrinsic {intrinsic}"),
+ };
+ let x = codegen_operand(fx, x);
+ let y = codegen_operand(fx, y);
+ let kind = crate::constant::mir_operand_get_const_val(fx, kind)
+ .expect("llvm.x86.sse2.cmp.* kind not const");
+
+ let flt_cc = match kind
+ .try_to_bits(Size::from_bytes(1))
+ .unwrap_or_else(|| panic!("kind not scalar: {:?}", kind))
+ {
+ 0 => FloatCC::Equal,
+ 1 => FloatCC::LessThan,
+ 2 => FloatCC::LessThanOrEqual,
+ 7 => FloatCC::Ordered,
+ 3 => FloatCC::Unordered,
+ 4 => FloatCC::NotEqual,
+ 5 => FloatCC::UnorderedOrGreaterThanOrEqual,
+ 6 => FloatCC::UnorderedOrGreaterThan,
+ kind => unreachable!("kind {:?}", kind),
+ };
+
+ simd_pair_for_each_lane(fx, x, y, ret, &|fx, lane_ty, res_lane_ty, x_lane, y_lane| {
+ let res_lane = match lane_ty.kind() {
+ ty::Float(_) => fx.bcx.ins().fcmp(flt_cc, x_lane, y_lane),
+ _ => unreachable!("{:?}", lane_ty),
+ };
+ bool_to_zero_or_max_uint(fx, res_lane_ty, res_lane)
+ });
+ }
+ "llvm.x86.sse2.psrli.d" => {
+ let (a, imm8) = match args {
+ [a, imm8] => (a, imm8),
+ _ => bug!("wrong number of args for intrinsic {intrinsic}"),
+ };
+ let a = codegen_operand(fx, a);
+ let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
+ .expect("llvm.x86.sse2.psrli.d imm8 not const");
+
+ simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
+ .try_to_bits(Size::from_bytes(4))
+ .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
+ {
+ imm8 if imm8 < 32 => fx.bcx.ins().ushr_imm(lane, i64::from(imm8 as u8)),
+ _ => fx.bcx.ins().iconst(types::I32, 0),
+ });
+ }
+ "llvm.x86.sse2.pslli.d" => {
+ let (a, imm8) = match args {
+ [a, imm8] => (a, imm8),
+ _ => bug!("wrong number of args for intrinsic {intrinsic}"),
+ };
+ let a = codegen_operand(fx, a);
+ let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
+ .expect("llvm.x86.sse2.psrli.d imm8 not const");
+
+ simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
+ .try_to_bits(Size::from_bytes(4))
+ .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
+ {
+ imm8 if imm8 < 32 => fx.bcx.ins().ishl_imm(lane, i64::from(imm8 as u8)),
+ _ => fx.bcx.ins().iconst(types::I32, 0),
+ });
+ }
+ "llvm.x86.sse2.storeu.dq" => {
+ intrinsic_args!(fx, args => (mem_addr, a); intrinsic);
+ let mem_addr = mem_addr.load_scalar(fx);
+
+ // FIXME correctly handle the unalignment
+ let dest = CPlace::for_ptr(Pointer::new(mem_addr), a.layout());
+ dest.write_cvalue(fx, a);
+ }
+ "llvm.x86.addcarry.64" => {
+ intrinsic_args!(fx, args => (c_in, a, b); intrinsic);
+ let c_in = c_in.load_scalar(fx);
+
+ llvm_add_sub(fx, BinOp::Add, ret, c_in, a, b);
+ }
+ "llvm.x86.subborrow.64" => {
+ intrinsic_args!(fx, args => (b_in, a, b); intrinsic);
+ let b_in = b_in.load_scalar(fx);
+
+ llvm_add_sub(fx, BinOp::Sub, ret, b_in, a, b);
+ }
+ _ => {
+ fx.tcx
+ .sess
+ .warn(&format!("unsupported llvm intrinsic {}; replacing with trap", intrinsic));
+ crate::trap::trap_unimplemented(fx, intrinsic);
+ return;
+ }
+ }
+
+ let dest = target.expect("all llvm intrinsics used by stdlib should return");
+ let ret_block = fx.get_block(dest);
+ fx.bcx.ins().jump(ret_block, &[]);
+}
+
+// llvm.x86.avx2.vperm2i128
+// llvm.x86.ssse3.pshuf.b.128
+// llvm.x86.avx2.pshuf.b
+// llvm.x86.avx2.psrli.w
+// llvm.x86.sse2.psrli.w
+
+fn llvm_add_sub<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ bin_op: BinOp,
+ ret: CPlace<'tcx>,
+ cb_in: Value,
+ a: CValue<'tcx>,
+ b: CValue<'tcx>,
+) {
+ assert_eq!(
+ a.layout().ty,
+ fx.tcx.types.u64,
+ "llvm.x86.addcarry.64/llvm.x86.subborrow.64 second operand must be u64"
+ );
+ assert_eq!(
+ b.layout().ty,
+ fx.tcx.types.u64,
+ "llvm.x86.addcarry.64/llvm.x86.subborrow.64 third operand must be u64"
+ );
+
+ // c + carry -> c + first intermediate carry or borrow respectively
+ let int0 = crate::num::codegen_checked_int_binop(fx, bin_op, a, b);
+ let c = int0.value_field(fx, mir::Field::new(0));
+ let cb0 = int0.value_field(fx, mir::Field::new(1)).load_scalar(fx);
+
+ // c + carry -> c + second intermediate carry or borrow respectively
+ let cb_in_as_u64 = fx.bcx.ins().uextend(types::I64, cb_in);
+ let cb_in_as_u64 = CValue::by_val(cb_in_as_u64, fx.layout_of(fx.tcx.types.u64));
+ let int1 = crate::num::codegen_checked_int_binop(fx, bin_op, c, cb_in_as_u64);
+ let (c, cb1) = int1.load_scalar_pair(fx);
+
+ // carry0 | carry1 -> carry or borrow respectively
+ let cb_out = fx.bcx.ins().bor(cb0, cb1);
+
+ let layout = fx.layout_of(fx.tcx.mk_tup([fx.tcx.types.u8, fx.tcx.types.u64].iter()));
+ let val = CValue::by_val_pair(cb_out, c, layout);
+ ret.write_cvalue(fx, val);
+}
--- /dev/null
- let signed = type_sign(lhs.layout().ty);
-
- let checked_res = crate::num::codegen_checked_int_binop(fx, bin_op, lhs, rhs);
-
- let (val, has_overflow) = checked_res.load_scalar_pair(fx);
- let clif_ty = fx.clif_type(lhs.layout().ty).unwrap();
-
- let (min, max) = type_min_max_value(&mut fx.bcx, clif_ty, signed);
-
- let val = match (intrinsic, signed) {
- (sym::saturating_add, false) => fx.bcx.ins().select(has_overflow, max, val),
- (sym::saturating_sub, false) => fx.bcx.ins().select(has_overflow, min, val),
- (sym::saturating_add, true) => {
- let rhs = rhs.load_scalar(fx);
- let rhs_ge_zero =
- fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
- let sat_val = fx.bcx.ins().select(rhs_ge_zero, max, min);
- fx.bcx.ins().select(has_overflow, sat_val, val)
- }
- (sym::saturating_sub, true) => {
- let rhs = rhs.load_scalar(fx);
- let rhs_ge_zero =
- fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
- let sat_val = fx.bcx.ins().select(rhs_ge_zero, min, max);
- fx.bcx.ins().select(has_overflow, sat_val, val)
- }
- _ => unreachable!(),
- };
-
- let res = CValue::by_val(val, lhs.layout());
-
+//! Codegen of intrinsics. This includes `extern "rust-intrinsic"`, `extern "platform-intrinsic"`
+//! and LLVM intrinsics that have symbol names starting with `llvm.`.
+
+macro_rules! intrinsic_args {
+ ($fx:expr, $args:expr => ($($arg:tt),*); $intrinsic:expr) => {
+ #[allow(unused_parens)]
+ let ($($arg),*) = if let [$($arg),*] = $args {
+ ($(codegen_operand($fx, $arg)),*)
+ } else {
+ $crate::intrinsics::bug_on_incorrect_arg_count($intrinsic);
+ };
+ }
+}
+
+mod cpuid;
+mod llvm;
+mod simd;
+
+pub(crate) use cpuid::codegen_cpuid_call;
+pub(crate) use llvm::codegen_llvm_intrinsic_call;
+
+use rustc_middle::ty::print::with_no_trimmed_paths;
+use rustc_middle::ty::subst::SubstsRef;
+use rustc_span::symbol::{kw, sym, Symbol};
+
+use crate::prelude::*;
+use cranelift_codegen::ir::AtomicRmwOp;
+
+fn bug_on_incorrect_arg_count(intrinsic: impl std::fmt::Display) -> ! {
+ bug!("wrong number of args for intrinsic {}", intrinsic);
+}
+
+fn report_atomic_type_validation_error<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ intrinsic: Symbol,
+ span: Span,
+ ty: Ty<'tcx>,
+) {
+ fx.tcx.sess.span_err(
+ span,
+ &format!(
+ "`{}` intrinsic: expected basic integer or raw pointer type, found `{:?}`",
+ intrinsic, ty
+ ),
+ );
+ // Prevent verifier error
+ fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
+}
+
+pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Option<Type> {
+ let (element, count) = match layout.abi {
+ Abi::Vector { element, count } => (element, count),
+ _ => unreachable!(),
+ };
+
+ match scalar_to_clif_type(tcx, element).by(u32::try_from(count).unwrap()) {
+ // Cranelift currently only implements icmp for 128bit vectors.
+ Some(vector_ty) if vector_ty.bits() == 128 => Some(vector_ty),
+ _ => None,
+ }
+}
+
+fn simd_for_each_lane<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ val: CValue<'tcx>,
+ ret: CPlace<'tcx>,
+ f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Ty<'tcx>, Value) -> Value,
+) {
+ let layout = val.layout();
+
+ let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
+ let lane_layout = fx.layout_of(lane_ty);
+ let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
+ let ret_lane_layout = fx.layout_of(ret_lane_ty);
+ assert_eq!(lane_count, ret_lane_count);
+
+ for lane_idx in 0..lane_count {
+ let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
+
+ let res_lane = f(fx, lane_layout.ty, ret_lane_layout.ty, lane);
+ let res_lane = CValue::by_val(res_lane, ret_lane_layout);
+
+ ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
+ }
+}
+
++fn simd_pair_for_each_lane_typed<'tcx>(
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
++ x: CValue<'tcx>,
++ y: CValue<'tcx>,
++ ret: CPlace<'tcx>,
++ f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, CValue<'tcx>, CValue<'tcx>) -> CValue<'tcx>,
++) {
++ assert_eq!(x.layout(), y.layout());
++ let layout = x.layout();
++
++ let (lane_count, _lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
++ let (ret_lane_count, _ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
++ assert_eq!(lane_count, ret_lane_count);
++
++ for lane_idx in 0..lane_count {
++ let x_lane = x.value_lane(fx, lane_idx);
++ let y_lane = y.value_lane(fx, lane_idx);
++
++ let res_lane = f(fx, x_lane, y_lane);
++
++ ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
++ }
++}
++
+fn simd_pair_for_each_lane<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ x: CValue<'tcx>,
+ y: CValue<'tcx>,
+ ret: CPlace<'tcx>,
+ f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Ty<'tcx>, Value, Value) -> Value,
+) {
+ assert_eq!(x.layout(), y.layout());
+ let layout = x.layout();
+
+ let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
+ let lane_layout = fx.layout_of(lane_ty);
+ let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
+ let ret_lane_layout = fx.layout_of(ret_lane_ty);
+ assert_eq!(lane_count, ret_lane_count);
+
+ for lane_idx in 0..lane_count {
+ let x_lane = x.value_lane(fx, lane_idx).load_scalar(fx);
+ let y_lane = y.value_lane(fx, lane_idx).load_scalar(fx);
+
+ let res_lane = f(fx, lane_layout.ty, ret_lane_layout.ty, x_lane, y_lane);
+ let res_lane = CValue::by_val(res_lane, ret_lane_layout);
+
+ ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
+ }
+}
+
+fn simd_reduce<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ val: CValue<'tcx>,
+ acc: Option<Value>,
+ ret: CPlace<'tcx>,
+ f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Value, Value) -> Value,
+) {
+ let (lane_count, lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
+ let lane_layout = fx.layout_of(lane_ty);
+ assert_eq!(lane_layout, ret.layout());
+
+ let (mut res_val, start_lane) =
+ if let Some(acc) = acc { (acc, 0) } else { (val.value_lane(fx, 0).load_scalar(fx), 1) };
+ for lane_idx in start_lane..lane_count {
+ let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
+ res_val = f(fx, lane_layout.ty, res_val, lane);
+ }
+ let res = CValue::by_val(res_val, lane_layout);
+ ret.write_cvalue(fx, res);
+}
+
+// FIXME move all uses to `simd_reduce`
+fn simd_reduce_bool<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ val: CValue<'tcx>,
+ ret: CPlace<'tcx>,
+ f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Value, Value) -> Value,
+) {
+ let (lane_count, _lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
+ assert!(ret.layout().ty.is_bool());
+
+ let res_val = val.value_lane(fx, 0).load_scalar(fx);
+ let mut res_val = fx.bcx.ins().band_imm(res_val, 1); // mask to boolean
+ for lane_idx in 1..lane_count {
+ let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
+ let lane = fx.bcx.ins().band_imm(lane, 1); // mask to boolean
+ res_val = f(fx, res_val, lane);
+ }
+ let res_val = if fx.bcx.func.dfg.value_type(res_val) != types::I8 {
+ fx.bcx.ins().ireduce(types::I8, res_val)
+ } else {
+ res_val
+ };
+ let res = CValue::by_val(res_val, ret.layout());
+ ret.write_cvalue(fx, res);
+}
+
+fn bool_to_zero_or_max_uint<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ ty: Ty<'tcx>,
+ val: Value,
+) -> Value {
+ let ty = fx.clif_type(ty).unwrap();
+
+ let int_ty = match ty {
+ types::F32 => types::I32,
+ types::F64 => types::I64,
+ ty => ty,
+ };
+
+ let val = fx.bcx.ins().bint(int_ty, val);
+ let mut res = fx.bcx.ins().ineg(val);
+
+ if ty.is_float() {
+ res = fx.bcx.ins().bitcast(ty, res);
+ }
+
+ res
+}
+
+pub(crate) fn codegen_intrinsic_call<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ instance: Instance<'tcx>,
+ args: &[mir::Operand<'tcx>],
+ destination: CPlace<'tcx>,
+ target: Option<BasicBlock>,
+ source_info: mir::SourceInfo,
+) {
+ let intrinsic = fx.tcx.item_name(instance.def_id());
+ let substs = instance.substs;
+
+ let target = if let Some(target) = target {
+ target
+ } else {
+ // Insert non returning intrinsics here
+ match intrinsic {
+ sym::abort => {
+ fx.bcx.ins().trap(TrapCode::User(0));
+ }
+ sym::transmute => {
+ crate::base::codegen_panic(fx, "Transmuting to uninhabited type.", source_info);
+ }
+ _ => unimplemented!("unsupported intrinsic {}", intrinsic),
+ }
+ return;
+ };
+
+ if intrinsic.as_str().starts_with("simd_") {
+ self::simd::codegen_simd_intrinsic_call(
+ fx,
+ intrinsic,
+ substs,
+ args,
+ destination,
+ source_info.span,
+ );
+ let ret_block = fx.get_block(target);
+ fx.bcx.ins().jump(ret_block, &[]);
+ } else if codegen_float_intrinsic_call(fx, intrinsic, args, destination) {
+ let ret_block = fx.get_block(target);
+ fx.bcx.ins().jump(ret_block, &[]);
+ } else {
+ codegen_regular_intrinsic_call(
+ fx,
+ instance,
+ intrinsic,
+ substs,
+ args,
+ destination,
+ Some(target),
+ source_info,
+ );
+ }
+}
+
+fn codegen_float_intrinsic_call<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ intrinsic: Symbol,
+ args: &[mir::Operand<'tcx>],
+ ret: CPlace<'tcx>,
+) -> bool {
+ let (name, arg_count, ty) = match intrinsic {
+ sym::expf32 => ("expf", 1, fx.tcx.types.f32),
+ sym::expf64 => ("exp", 1, fx.tcx.types.f64),
+ sym::exp2f32 => ("exp2f", 1, fx.tcx.types.f32),
+ sym::exp2f64 => ("exp2", 1, fx.tcx.types.f64),
+ sym::sqrtf32 => ("sqrtf", 1, fx.tcx.types.f32),
+ sym::sqrtf64 => ("sqrt", 1, fx.tcx.types.f64),
+ sym::powif32 => ("__powisf2", 2, fx.tcx.types.f32), // compiler-builtins
+ sym::powif64 => ("__powidf2", 2, fx.tcx.types.f64), // compiler-builtins
+ sym::powf32 => ("powf", 2, fx.tcx.types.f32),
+ sym::powf64 => ("pow", 2, fx.tcx.types.f64),
+ sym::logf32 => ("logf", 1, fx.tcx.types.f32),
+ sym::logf64 => ("log", 1, fx.tcx.types.f64),
+ sym::log2f32 => ("log2f", 1, fx.tcx.types.f32),
+ sym::log2f64 => ("log2", 1, fx.tcx.types.f64),
+ sym::log10f32 => ("log10f", 1, fx.tcx.types.f32),
+ sym::log10f64 => ("log10", 1, fx.tcx.types.f64),
+ sym::fabsf32 => ("fabsf", 1, fx.tcx.types.f32),
+ sym::fabsf64 => ("fabs", 1, fx.tcx.types.f64),
+ sym::fmaf32 => ("fmaf", 3, fx.tcx.types.f32),
+ sym::fmaf64 => ("fma", 3, fx.tcx.types.f64),
+ sym::copysignf32 => ("copysignf", 2, fx.tcx.types.f32),
+ sym::copysignf64 => ("copysign", 2, fx.tcx.types.f64),
+ sym::floorf32 => ("floorf", 1, fx.tcx.types.f32),
+ sym::floorf64 => ("floor", 1, fx.tcx.types.f64),
+ sym::ceilf32 => ("ceilf", 1, fx.tcx.types.f32),
+ sym::ceilf64 => ("ceil", 1, fx.tcx.types.f64),
+ sym::truncf32 => ("truncf", 1, fx.tcx.types.f32),
+ sym::truncf64 => ("trunc", 1, fx.tcx.types.f64),
+ sym::roundf32 => ("roundf", 1, fx.tcx.types.f32),
+ sym::roundf64 => ("round", 1, fx.tcx.types.f64),
+ sym::sinf32 => ("sinf", 1, fx.tcx.types.f32),
+ sym::sinf64 => ("sin", 1, fx.tcx.types.f64),
+ sym::cosf32 => ("cosf", 1, fx.tcx.types.f32),
+ sym::cosf64 => ("cos", 1, fx.tcx.types.f64),
+ _ => return false,
+ };
+
+ if args.len() != arg_count {
+ bug!("wrong number of args for intrinsic {:?}", intrinsic);
+ }
+
+ let (a, b, c);
+ let args = match args {
+ [x] => {
+ a = [codegen_operand(fx, x)];
+ &a as &[_]
+ }
+ [x, y] => {
+ b = [codegen_operand(fx, x), codegen_operand(fx, y)];
+ &b
+ }
+ [x, y, z] => {
+ c = [codegen_operand(fx, x), codegen_operand(fx, y), codegen_operand(fx, z)];
+ &c
+ }
+ _ => unreachable!(),
+ };
+
+ let layout = fx.layout_of(ty);
+ let res = match intrinsic {
+ sym::fmaf32 | sym::fmaf64 => {
+ let a = args[0].load_scalar(fx);
+ let b = args[1].load_scalar(fx);
+ let c = args[2].load_scalar(fx);
+ CValue::by_val(fx.bcx.ins().fma(a, b, c), layout)
+ }
+ sym::copysignf32 | sym::copysignf64 => {
+ let a = args[0].load_scalar(fx);
+ let b = args[1].load_scalar(fx);
+ CValue::by_val(fx.bcx.ins().fcopysign(a, b), layout)
+ }
+ sym::fabsf32
+ | sym::fabsf64
+ | sym::floorf32
+ | sym::floorf64
+ | sym::ceilf32
+ | sym::ceilf64
+ | sym::truncf32
+ | sym::truncf64 => {
+ let a = args[0].load_scalar(fx);
+
+ let val = match intrinsic {
+ sym::fabsf32 | sym::fabsf64 => fx.bcx.ins().fabs(a),
+ sym::floorf32 | sym::floorf64 => fx.bcx.ins().floor(a),
+ sym::ceilf32 | sym::ceilf64 => fx.bcx.ins().ceil(a),
+ sym::truncf32 | sym::truncf64 => fx.bcx.ins().trunc(a),
+ _ => unreachable!(),
+ };
+
+ CValue::by_val(val, layout)
+ }
+ // These intrinsics aren't supported natively by Cranelift.
+ // Lower them to a libcall.
+ _ => fx.easy_call(name, &args, ty),
+ };
+
+ ret.write_cvalue(fx, res);
+
+ true
+}
+
+fn codegen_regular_intrinsic_call<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ instance: Instance<'tcx>,
+ intrinsic: Symbol,
+ substs: SubstsRef<'tcx>,
+ args: &[mir::Operand<'tcx>],
+ ret: CPlace<'tcx>,
+ destination: Option<BasicBlock>,
+ source_info: mir::SourceInfo,
+) {
+ let usize_layout = fx.layout_of(fx.tcx.types.usize);
+
+ match intrinsic {
+ sym::likely | sym::unlikely => {
+ intrinsic_args!(fx, args => (a); intrinsic);
+
+ ret.write_cvalue(fx, a);
+ }
+ sym::breakpoint => {
+ intrinsic_args!(fx, args => (); intrinsic);
+
+ fx.bcx.ins().debugtrap();
+ }
+ sym::copy | sym::copy_nonoverlapping => {
+ intrinsic_args!(fx, args => (src, dst, count); intrinsic);
+ let src = src.load_scalar(fx);
+ let dst = dst.load_scalar(fx);
+ let count = count.load_scalar(fx);
+
+ let elem_ty = substs.type_at(0);
+ let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
+ assert_eq!(args.len(), 3);
+ let byte_amount =
+ if elem_size != 1 { fx.bcx.ins().imul_imm(count, elem_size as i64) } else { count };
+
+ if intrinsic == sym::copy_nonoverlapping {
+ // FIXME emit_small_memcpy
+ fx.bcx.call_memcpy(fx.target_config, dst, src, byte_amount);
+ } else {
+ // FIXME emit_small_memmove
+ fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount);
+ }
+ }
+ sym::volatile_copy_memory | sym::volatile_copy_nonoverlapping_memory => {
+ // NOTE: the volatile variants have src and dst swapped
+ intrinsic_args!(fx, args => (dst, src, count); intrinsic);
+ let dst = dst.load_scalar(fx);
+ let src = src.load_scalar(fx);
+ let count = count.load_scalar(fx);
+
+ let elem_ty = substs.type_at(0);
+ let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
+ assert_eq!(args.len(), 3);
+ let byte_amount =
+ if elem_size != 1 { fx.bcx.ins().imul_imm(count, elem_size as i64) } else { count };
+
+ // FIXME make the copy actually volatile when using emit_small_mem{cpy,move}
+ if intrinsic == sym::volatile_copy_nonoverlapping_memory {
+ // FIXME emit_small_memcpy
+ fx.bcx.call_memcpy(fx.target_config, dst, src, byte_amount);
+ } else {
+ // FIXME emit_small_memmove
+ fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount);
+ }
+ }
+ sym::size_of_val => {
+ intrinsic_args!(fx, args => (ptr); intrinsic);
+
+ let layout = fx.layout_of(substs.type_at(0));
+ // Note: Can't use is_unsized here as truly unsized types need to take the fixed size
+ // branch
+ let size = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
+ let (_ptr, info) = ptr.load_scalar_pair(fx);
+ let (size, _align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
+ size
+ } else {
+ fx.bcx.ins().iconst(fx.pointer_type, layout.size.bytes() as i64)
+ };
+ ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
+ }
+ sym::min_align_of_val => {
+ intrinsic_args!(fx, args => (ptr); intrinsic);
+
+ let layout = fx.layout_of(substs.type_at(0));
+ // Note: Can't use is_unsized here as truly unsized types need to take the fixed size
+ // branch
+ let align = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
+ let (_ptr, info) = ptr.load_scalar_pair(fx);
+ let (_size, align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
+ align
+ } else {
+ fx.bcx.ins().iconst(fx.pointer_type, layout.align.abi.bytes() as i64)
+ };
+ ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
+ }
+
+ sym::vtable_size => {
+ intrinsic_args!(fx, args => (vtable); intrinsic);
+ let vtable = vtable.load_scalar(fx);
+
+ let size = crate::vtable::size_of_obj(fx, vtable);
+ ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
+ }
+
+ sym::vtable_align => {
+ intrinsic_args!(fx, args => (vtable); intrinsic);
+ let vtable = vtable.load_scalar(fx);
+
+ let align = crate::vtable::min_align_of_obj(fx, vtable);
+ ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
+ }
+
+ sym::unchecked_add
+ | sym::unchecked_sub
+ | sym::unchecked_mul
+ | sym::unchecked_div
+ | sym::exact_div
+ | sym::unchecked_rem
+ | sym::unchecked_shl
+ | sym::unchecked_shr => {
+ intrinsic_args!(fx, args => (x, y); intrinsic);
+
+ // FIXME trap on overflow
+ let bin_op = match intrinsic {
+ sym::unchecked_add => BinOp::Add,
+ sym::unchecked_sub => BinOp::Sub,
+ sym::unchecked_mul => BinOp::Mul,
+ sym::unchecked_div | sym::exact_div => BinOp::Div,
+ sym::unchecked_rem => BinOp::Rem,
+ sym::unchecked_shl => BinOp::Shl,
+ sym::unchecked_shr => BinOp::Shr,
+ _ => unreachable!(),
+ };
+ let res = crate::num::codegen_int_binop(fx, bin_op, x, y);
+ ret.write_cvalue(fx, res);
+ }
+ sym::add_with_overflow | sym::sub_with_overflow | sym::mul_with_overflow => {
+ intrinsic_args!(fx, args => (x, y); intrinsic);
+
+ assert_eq!(x.layout().ty, y.layout().ty);
+ let bin_op = match intrinsic {
+ sym::add_with_overflow => BinOp::Add,
+ sym::sub_with_overflow => BinOp::Sub,
+ sym::mul_with_overflow => BinOp::Mul,
+ _ => unreachable!(),
+ };
+
+ let res = crate::num::codegen_checked_int_binop(fx, bin_op, x, y);
+ ret.write_cvalue(fx, res);
+ }
+ sym::saturating_add | sym::saturating_sub => {
+ intrinsic_args!(fx, args => (lhs, rhs); intrinsic);
+
+ assert_eq!(lhs.layout().ty, rhs.layout().ty);
+ let bin_op = match intrinsic {
+ sym::saturating_add => BinOp::Add,
+ sym::saturating_sub => BinOp::Sub,
+ _ => unreachable!(),
+ };
+
- let val = crate::num::codegen_ptr_binop(fx, BinOp::Eq, a, b);
- ret.write_cvalue(fx, val);
++ let res = crate::num::codegen_saturating_int_binop(fx, bin_op, lhs, rhs);
+ ret.write_cvalue(fx, res);
+ }
+ sym::rotate_left => {
+ intrinsic_args!(fx, args => (x, y); intrinsic);
+ let y = y.load_scalar(fx);
+
+ let layout = x.layout();
+ let x = x.load_scalar(fx);
+ let res = fx.bcx.ins().rotl(x, y);
+ ret.write_cvalue(fx, CValue::by_val(res, layout));
+ }
+ sym::rotate_right => {
+ intrinsic_args!(fx, args => (x, y); intrinsic);
+ let y = y.load_scalar(fx);
+
+ let layout = x.layout();
+ let x = x.load_scalar(fx);
+ let res = fx.bcx.ins().rotr(x, y);
+ ret.write_cvalue(fx, CValue::by_val(res, layout));
+ }
+
+ // The only difference between offset and arith_offset is regarding UB. Because Cranelift
+ // doesn't have UB both are codegen'ed the same way
+ sym::offset | sym::arith_offset => {
+ intrinsic_args!(fx, args => (base, offset); intrinsic);
+ let offset = offset.load_scalar(fx);
+
+ let pointee_ty = base.layout().ty.builtin_deref(true).unwrap().ty;
+ let pointee_size = fx.layout_of(pointee_ty).size.bytes();
+ let ptr_diff = if pointee_size != 1 {
+ fx.bcx.ins().imul_imm(offset, pointee_size as i64)
+ } else {
+ offset
+ };
+ let base_val = base.load_scalar(fx);
+ let res = fx.bcx.ins().iadd(base_val, ptr_diff);
+ ret.write_cvalue(fx, CValue::by_val(res, base.layout()));
+ }
+
+ sym::ptr_mask => {
+ intrinsic_args!(fx, args => (ptr, mask); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+ let mask = mask.load_scalar(fx);
+ fx.bcx.ins().band(ptr, mask);
+ }
+
+ sym::transmute => {
+ intrinsic_args!(fx, args => (from); intrinsic);
+
+ ret.write_cvalue_transmute(fx, from);
+ }
+ sym::write_bytes | sym::volatile_set_memory => {
+ intrinsic_args!(fx, args => (dst, val, count); intrinsic);
+ let val = val.load_scalar(fx);
+ let count = count.load_scalar(fx);
+
+ let pointee_ty = dst.layout().ty.builtin_deref(true).unwrap().ty;
+ let pointee_size = fx.layout_of(pointee_ty).size.bytes();
+ let count = if pointee_size != 1 {
+ fx.bcx.ins().imul_imm(count, pointee_size as i64)
+ } else {
+ count
+ };
+ let dst_ptr = dst.load_scalar(fx);
+ // FIXME make the memset actually volatile when switching to emit_small_memset
+ // FIXME use emit_small_memset
+ fx.bcx.call_memset(fx.target_config, dst_ptr, val, count);
+ }
+ sym::ctlz | sym::ctlz_nonzero => {
+ intrinsic_args!(fx, args => (arg); intrinsic);
+ let val = arg.load_scalar(fx);
+
+ // FIXME trap on `ctlz_nonzero` with zero arg.
+ let res = fx.bcx.ins().clz(val);
+ let res = CValue::by_val(res, arg.layout());
+ ret.write_cvalue(fx, res);
+ }
+ sym::cttz | sym::cttz_nonzero => {
+ intrinsic_args!(fx, args => (arg); intrinsic);
+ let val = arg.load_scalar(fx);
+
+ // FIXME trap on `cttz_nonzero` with zero arg.
+ let res = fx.bcx.ins().ctz(val);
+ let res = CValue::by_val(res, arg.layout());
+ ret.write_cvalue(fx, res);
+ }
+ sym::ctpop => {
+ intrinsic_args!(fx, args => (arg); intrinsic);
+ let val = arg.load_scalar(fx);
+
+ let res = fx.bcx.ins().popcnt(val);
+ let res = CValue::by_val(res, arg.layout());
+ ret.write_cvalue(fx, res);
+ }
+ sym::bitreverse => {
+ intrinsic_args!(fx, args => (arg); intrinsic);
+ let val = arg.load_scalar(fx);
+
+ let res = fx.bcx.ins().bitrev(val);
+ let res = CValue::by_val(res, arg.layout());
+ ret.write_cvalue(fx, res);
+ }
+ sym::bswap => {
+ // FIXME(CraneStation/cranelift#794) add bswap instruction to cranelift
+ fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value {
+ match bcx.func.dfg.value_type(v) {
+ types::I8 => v,
+
+ // https://code.woboq.org/gcc/include/bits/byteswap.h.html
+ types::I16 => {
+ let tmp1 = bcx.ins().ishl_imm(v, 8);
+ let n1 = bcx.ins().band_imm(tmp1, 0xFF00);
+
+ let tmp2 = bcx.ins().ushr_imm(v, 8);
+ let n2 = bcx.ins().band_imm(tmp2, 0x00FF);
+
+ bcx.ins().bor(n1, n2)
+ }
+ types::I32 => {
+ let tmp1 = bcx.ins().ishl_imm(v, 24);
+ let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000);
+
+ let tmp2 = bcx.ins().ishl_imm(v, 8);
+ let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000);
+
+ let tmp3 = bcx.ins().ushr_imm(v, 8);
+ let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00);
+
+ let tmp4 = bcx.ins().ushr_imm(v, 24);
+ let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF);
+
+ let or_tmp1 = bcx.ins().bor(n1, n2);
+ let or_tmp2 = bcx.ins().bor(n3, n4);
+ bcx.ins().bor(or_tmp1, or_tmp2)
+ }
+ types::I64 => {
+ let tmp1 = bcx.ins().ishl_imm(v, 56);
+ let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000_0000_0000u64 as i64);
+
+ let tmp2 = bcx.ins().ishl_imm(v, 40);
+ let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000_0000_0000u64 as i64);
+
+ let tmp3 = bcx.ins().ishl_imm(v, 24);
+ let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00_0000_0000u64 as i64);
+
+ let tmp4 = bcx.ins().ishl_imm(v, 8);
+ let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF_0000_0000u64 as i64);
+
+ let tmp5 = bcx.ins().ushr_imm(v, 8);
+ let n5 = bcx.ins().band_imm(tmp5, 0x0000_0000_FF00_0000u64 as i64);
+
+ let tmp6 = bcx.ins().ushr_imm(v, 24);
+ let n6 = bcx.ins().band_imm(tmp6, 0x0000_0000_00FF_0000u64 as i64);
+
+ let tmp7 = bcx.ins().ushr_imm(v, 40);
+ let n7 = bcx.ins().band_imm(tmp7, 0x0000_0000_0000_FF00u64 as i64);
+
+ let tmp8 = bcx.ins().ushr_imm(v, 56);
+ let n8 = bcx.ins().band_imm(tmp8, 0x0000_0000_0000_00FFu64 as i64);
+
+ let or_tmp1 = bcx.ins().bor(n1, n2);
+ let or_tmp2 = bcx.ins().bor(n3, n4);
+ let or_tmp3 = bcx.ins().bor(n5, n6);
+ let or_tmp4 = bcx.ins().bor(n7, n8);
+
+ let or_tmp5 = bcx.ins().bor(or_tmp1, or_tmp2);
+ let or_tmp6 = bcx.ins().bor(or_tmp3, or_tmp4);
+ bcx.ins().bor(or_tmp5, or_tmp6)
+ }
+ types::I128 => {
+ let (lo, hi) = bcx.ins().isplit(v);
+ let lo = swap(bcx, lo);
+ let hi = swap(bcx, hi);
+ bcx.ins().iconcat(hi, lo)
+ }
+ ty => unreachable!("bswap {}", ty),
+ }
+ }
+ intrinsic_args!(fx, args => (arg); intrinsic);
+ let val = arg.load_scalar(fx);
+
+ let res = CValue::by_val(swap(&mut fx.bcx, val), arg.layout());
+ ret.write_cvalue(fx, res);
+ }
+ sym::assert_inhabited | sym::assert_zero_valid | sym::assert_uninit_valid => {
+ intrinsic_args!(fx, args => (); intrinsic);
+
+ let layout = fx.layout_of(substs.type_at(0));
+ if layout.abi.is_uninhabited() {
+ with_no_trimmed_paths!({
+ crate::base::codegen_panic(
+ fx,
+ &format!("attempted to instantiate uninhabited type `{}`", layout.ty),
+ source_info,
+ )
+ });
+ return;
+ }
+
+ if intrinsic == sym::assert_zero_valid && !fx.tcx.permits_zero_init(layout) {
+ with_no_trimmed_paths!({
+ crate::base::codegen_panic(
+ fx,
+ &format!(
+ "attempted to zero-initialize type `{}`, which is invalid",
+ layout.ty
+ ),
+ source_info,
+ );
+ });
+ return;
+ }
+
+ if intrinsic == sym::assert_uninit_valid && !fx.tcx.permits_uninit_init(layout) {
+ with_no_trimmed_paths!({
+ crate::base::codegen_panic(
+ fx,
+ &format!(
+ "attempted to leave type `{}` uninitialized, which is invalid",
+ layout.ty
+ ),
+ source_info,
+ )
+ });
+ return;
+ }
+ }
+
+ sym::volatile_load | sym::unaligned_volatile_load => {
+ intrinsic_args!(fx, args => (ptr); intrinsic);
+
+ // Cranelift treats loads as volatile by default
+ // FIXME correctly handle unaligned_volatile_load
+ let inner_layout = fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
+ let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
+ ret.write_cvalue(fx, val);
+ }
+ sym::volatile_store | sym::unaligned_volatile_store => {
+ intrinsic_args!(fx, args => (ptr, val); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ // Cranelift treats stores as volatile by default
+ // FIXME correctly handle unaligned_volatile_store
+ let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
+ dest.write_cvalue(fx, val);
+ }
+
+ sym::pref_align_of
+ | sym::needs_drop
+ | sym::type_id
+ | sym::type_name
+ | sym::variant_count => {
+ intrinsic_args!(fx, args => (); intrinsic);
+
+ let const_val =
+ fx.tcx.const_eval_instance(ParamEnv::reveal_all(), instance, None).unwrap();
+ let val = crate::constant::codegen_const_value(fx, const_val, ret.layout().ty);
+ ret.write_cvalue(fx, val);
+ }
+
+ sym::ptr_offset_from | sym::ptr_offset_from_unsigned => {
+ intrinsic_args!(fx, args => (ptr, base); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+ let base = base.load_scalar(fx);
+ let ty = substs.type_at(0);
+
+ let pointee_size: u64 = fx.layout_of(ty).size.bytes();
+ let diff_bytes = fx.bcx.ins().isub(ptr, base);
+ // FIXME this can be an exact division.
+ let val = if intrinsic == sym::ptr_offset_from_unsigned {
+ let usize_layout = fx.layout_of(fx.tcx.types.usize);
+ // Because diff_bytes ULE isize::MAX, this would be fine as signed,
+ // but unsigned is slightly easier to codegen, so might as well.
+ CValue::by_val(fx.bcx.ins().udiv_imm(diff_bytes, pointee_size as i64), usize_layout)
+ } else {
+ let isize_layout = fx.layout_of(fx.tcx.types.isize);
+ CValue::by_val(fx.bcx.ins().sdiv_imm(diff_bytes, pointee_size as i64), isize_layout)
+ };
+ ret.write_cvalue(fx, val);
+ }
+
+ sym::ptr_guaranteed_cmp => {
+ intrinsic_args!(fx, args => (a, b); intrinsic);
+
- params: vec![AbiParam::new(fx.bcx.func.dfg.value_type(data))],
++ let val = crate::num::codegen_ptr_binop(fx, BinOp::Eq, a, b).load_scalar(fx);
++ ret.write_cvalue(fx, CValue::by_val(val, fx.layout_of(fx.tcx.types.u8)));
+ }
+
+ sym::caller_location => {
+ intrinsic_args!(fx, args => (); intrinsic);
+
+ let caller_location = fx.get_caller_location(source_info);
+ ret.write_cvalue(fx, caller_location);
+ }
+
+ _ if intrinsic.as_str().starts_with("atomic_fence") => {
+ intrinsic_args!(fx, args => (); intrinsic);
+
+ fx.bcx.ins().fence();
+ }
+ _ if intrinsic.as_str().starts_with("atomic_singlethreadfence") => {
+ intrinsic_args!(fx, args => (); intrinsic);
+
+ // FIXME use a compiler fence once Cranelift supports it
+ fx.bcx.ins().fence();
+ }
+ _ if intrinsic.as_str().starts_with("atomic_load") => {
+ intrinsic_args!(fx, args => (ptr); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let ty = substs.type_at(0);
+ match ty.kind() {
+ ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
+ // FIXME implement 128bit atomics
+ if fx.tcx.is_compiler_builtins(LOCAL_CRATE) {
+ // special case for compiler-builtins to avoid having to patch it
+ crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported");
+ return;
+ } else {
+ fx.tcx
+ .sess
+ .span_fatal(source_info.span, "128bit atomics not yet supported");
+ }
+ }
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, ty);
+ return;
+ }
+ }
+ let clif_ty = fx.clif_type(ty).unwrap();
+
+ let val = fx.bcx.ins().atomic_load(clif_ty, MemFlags::trusted(), ptr);
+
+ let val = CValue::by_val(val, fx.layout_of(ty));
+ ret.write_cvalue(fx, val);
+ }
+ _ if intrinsic.as_str().starts_with("atomic_store") => {
+ intrinsic_args!(fx, args => (ptr, val); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let ty = substs.type_at(0);
+ match ty.kind() {
+ ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
+ // FIXME implement 128bit atomics
+ if fx.tcx.is_compiler_builtins(LOCAL_CRATE) {
+ // special case for compiler-builtins to avoid having to patch it
+ crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported");
+ return;
+ } else {
+ fx.tcx
+ .sess
+ .span_fatal(source_info.span, "128bit atomics not yet supported");
+ }
+ }
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, ty);
+ return;
+ }
+ }
+
+ let val = val.load_scalar(fx);
+
+ fx.bcx.ins().atomic_store(MemFlags::trusted(), val, ptr);
+ }
+ _ if intrinsic.as_str().starts_with("atomic_xchg") => {
+ intrinsic_args!(fx, args => (ptr, new); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let layout = new.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let new = new.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xchg, ptr, new);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ }
+ _ if intrinsic.as_str().starts_with("atomic_cxchg") => {
+ // both atomic_cxchg_* and atomic_cxchgweak_*
+ intrinsic_args!(fx, args => (ptr, test_old, new); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let layout = new.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+
+ let test_old = test_old.load_scalar(fx);
+ let new = new.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_cas(MemFlags::trusted(), ptr, test_old, new);
+ let is_eq = fx.bcx.ins().icmp(IntCC::Equal, old, test_old);
+
+ let ret_val =
+ CValue::by_val_pair(old, fx.bcx.ins().bint(types::I8, is_eq), ret.layout());
+ ret.write_cvalue(fx, ret_val)
+ }
+
+ _ if intrinsic.as_str().starts_with("atomic_xadd") => {
+ intrinsic_args!(fx, args => (ptr, amount); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let layout = amount.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let amount = amount.load_scalar(fx);
+
+ let old =
+ fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Add, ptr, amount);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ }
+ _ if intrinsic.as_str().starts_with("atomic_xsub") => {
+ intrinsic_args!(fx, args => (ptr, amount); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let layout = amount.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let amount = amount.load_scalar(fx);
+
+ let old =
+ fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Sub, ptr, amount);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ }
+ _ if intrinsic.as_str().starts_with("atomic_and") => {
+ intrinsic_args!(fx, args => (ptr, src); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let layout = src.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::And, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ }
+ _ if intrinsic.as_str().starts_with("atomic_or") => {
+ intrinsic_args!(fx, args => (ptr, src); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let layout = src.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Or, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ }
+ _ if intrinsic.as_str().starts_with("atomic_xor") => {
+ intrinsic_args!(fx, args => (ptr, src); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let layout = src.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xor, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ }
+ _ if intrinsic.as_str().starts_with("atomic_nand") => {
+ intrinsic_args!(fx, args => (ptr, src); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let layout = src.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Nand, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ }
+ _ if intrinsic.as_str().starts_with("atomic_max") => {
+ intrinsic_args!(fx, args => (ptr, src); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let layout = src.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smax, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ }
+ _ if intrinsic.as_str().starts_with("atomic_umax") => {
+ intrinsic_args!(fx, args => (ptr, src); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let layout = src.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umax, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ }
+ _ if intrinsic.as_str().starts_with("atomic_min") => {
+ intrinsic_args!(fx, args => (ptr, src); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let layout = src.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smin, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ }
+ _ if intrinsic.as_str().starts_with("atomic_umin") => {
+ intrinsic_args!(fx, args => (ptr, src); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let layout = src.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umin, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ }
+
+ sym::minnumf32 => {
+ intrinsic_args!(fx, args => (a, b); intrinsic);
+ let a = a.load_scalar(fx);
+ let b = b.load_scalar(fx);
+
+ let val = crate::num::codegen_float_min(fx, a, b);
+ let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
+ ret.write_cvalue(fx, val);
+ }
+ sym::minnumf64 => {
+ intrinsic_args!(fx, args => (a, b); intrinsic);
+ let a = a.load_scalar(fx);
+ let b = b.load_scalar(fx);
+
+ let val = crate::num::codegen_float_min(fx, a, b);
+ let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
+ ret.write_cvalue(fx, val);
+ }
+ sym::maxnumf32 => {
+ intrinsic_args!(fx, args => (a, b); intrinsic);
+ let a = a.load_scalar(fx);
+ let b = b.load_scalar(fx);
+
+ let val = crate::num::codegen_float_max(fx, a, b);
+ let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
+ ret.write_cvalue(fx, val);
+ }
+ sym::maxnumf64 => {
+ intrinsic_args!(fx, args => (a, b); intrinsic);
+ let a = a.load_scalar(fx);
+ let b = b.load_scalar(fx);
+
+ let val = crate::num::codegen_float_max(fx, a, b);
+ let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
+ ret.write_cvalue(fx, val);
+ }
+
+ kw::Try => {
+ intrinsic_args!(fx, args => (f, data, catch_fn); intrinsic);
+ let f = f.load_scalar(fx);
+ let data = data.load_scalar(fx);
+ let _catch_fn = catch_fn.load_scalar(fx);
+
+ // FIXME once unwinding is supported, change this to actually catch panics
+ let f_sig = fx.bcx.func.import_signature(Signature {
+ call_conv: fx.target_config.default_call_conv,
++ params: vec![AbiParam::new(pointer_ty(fx.tcx))],
+ returns: vec![],
+ });
+
+ fx.bcx.ins().call_indirect(f_sig, f, &[data]);
+
+ let layout = ret.layout();
+ let ret_val = CValue::const_val(fx, layout, ty::ScalarInt::null(layout.size));
+ ret.write_cvalue(fx, ret_val);
+ }
+
+ sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
+ intrinsic_args!(fx, args => (x, y); intrinsic);
+
+ let res = crate::num::codegen_float_binop(
+ fx,
+ match intrinsic {
+ sym::fadd_fast => BinOp::Add,
+ sym::fsub_fast => BinOp::Sub,
+ sym::fmul_fast => BinOp::Mul,
+ sym::fdiv_fast => BinOp::Div,
+ sym::frem_fast => BinOp::Rem,
+ _ => unreachable!(),
+ },
+ x,
+ y,
+ );
+ ret.write_cvalue(fx, res);
+ }
+ sym::float_to_int_unchecked => {
+ intrinsic_args!(fx, args => (f); intrinsic);
+ let f = f.load_scalar(fx);
+
+ let res = crate::cast::clif_int_or_float_cast(
+ fx,
+ f,
+ false,
+ fx.clif_type(ret.layout().ty).unwrap(),
+ type_sign(ret.layout().ty),
+ );
+ ret.write_cvalue(fx, CValue::by_val(res, ret.layout()));
+ }
+
+ sym::raw_eq => {
+ intrinsic_args!(fx, args => (lhs_ref, rhs_ref); intrinsic);
+ let lhs_ref = lhs_ref.load_scalar(fx);
+ let rhs_ref = rhs_ref.load_scalar(fx);
+
+ let size = fx.layout_of(substs.type_at(0)).layout.size();
+ // FIXME add and use emit_small_memcmp
+ let is_eq_value = if size == Size::ZERO {
+ // No bytes means they're trivially equal
+ fx.bcx.ins().iconst(types::I8, 1)
+ } else if let Some(clty) = size.bits().try_into().ok().and_then(Type::int) {
+ // Can't use `trusted` for these loads; they could be unaligned.
+ let mut flags = MemFlags::new();
+ flags.set_notrap();
+ let lhs_val = fx.bcx.ins().load(clty, flags, lhs_ref, 0);
+ let rhs_val = fx.bcx.ins().load(clty, flags, rhs_ref, 0);
+ let eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_val, rhs_val);
+ fx.bcx.ins().bint(types::I8, eq)
+ } else {
+ // Just call `memcmp` (like slices do in core) when the
+ // size is too large or it's not a power-of-two.
+ let signed_bytes = i64::try_from(size.bytes()).unwrap();
+ let bytes_val = fx.bcx.ins().iconst(fx.pointer_type, signed_bytes);
+ let params = vec![AbiParam::new(fx.pointer_type); 3];
+ let returns = vec![AbiParam::new(types::I32)];
+ let args = &[lhs_ref, rhs_ref, bytes_val];
+ let cmp = fx.lib_call("memcmp", params, returns, args)[0];
+ let eq = fx.bcx.ins().icmp_imm(IntCC::Equal, cmp, 0);
+ fx.bcx.ins().bint(types::I8, eq)
+ };
+ ret.write_cvalue(fx, CValue::by_val(is_eq_value, ret.layout()));
+ }
+
+ sym::const_allocate => {
+ intrinsic_args!(fx, args => (_size, _align); intrinsic);
+
+ // returns a null pointer at runtime.
+ let null = fx.bcx.ins().iconst(fx.pointer_type, 0);
+ ret.write_cvalue(fx, CValue::by_val(null, ret.layout()));
+ }
+
+ sym::const_deallocate => {
+ intrinsic_args!(fx, args => (_ptr, _size, _align); intrinsic);
+ // nop at runtime.
+ }
+
+ sym::black_box => {
+ intrinsic_args!(fx, args => (a); intrinsic);
+
+ // FIXME implement black_box semantics
+ ret.write_cvalue(fx, a);
+ }
+
+ // FIXME implement variadics in cranelift
+ sym::va_copy | sym::va_arg | sym::va_end => {
+ fx.tcx.sess.span_fatal(
+ source_info.span,
+ "Defining variadic functions is not yet supported by Cranelift",
+ );
+ }
+
+ _ => {
+ fx.tcx
+ .sess
+ .span_fatal(source_info.span, &format!("unsupported intrinsic {}", intrinsic));
+ }
+ }
+
+ let ret_block = fx.get_block(destination.unwrap());
+ fx.bcx.ins().jump(ret_block, &[]);
+}
--- /dev/null
- sym::simd_cast => {
+//! Codegen `extern "platform-intrinsic"` intrinsics.
+
+use rustc_middle::ty::subst::SubstsRef;
+use rustc_span::Symbol;
++use rustc_target::abi::Endian;
+
+use super::*;
+use crate::prelude::*;
+
+fn report_simd_type_validation_error(
+ fx: &mut FunctionCx<'_, '_, '_>,
+ intrinsic: Symbol,
+ span: Span,
+ ty: Ty<'_>,
+) {
+ fx.tcx.sess.span_err(span, &format!("invalid monomorphization of `{}` intrinsic: expected SIMD input type, found non-SIMD `{}`", intrinsic, ty));
+ // Prevent verifier error
+ fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
+}
+
+pub(super) fn codegen_simd_intrinsic_call<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ intrinsic: Symbol,
+ _substs: SubstsRef<'tcx>,
+ args: &[mir::Operand<'tcx>],
+ ret: CPlace<'tcx>,
+ span: Span,
+) {
+ match intrinsic {
- // simd_saturating_*
- // simd_bitmask
++ sym::simd_as | sym::simd_cast => {
+ intrinsic_args!(fx, args => (a); intrinsic);
+
+ if !a.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty);
+ return;
+ }
+
+ simd_for_each_lane(fx, a, ret, &|fx, lane_ty, ret_lane_ty, lane| {
+ let ret_lane_clif_ty = fx.clif_type(ret_lane_ty).unwrap();
+
+ let from_signed = type_sign(lane_ty);
+ let to_signed = type_sign(ret_lane_ty);
+
+ clif_int_or_float_cast(fx, lane, from_signed, ret_lane_clif_ty, to_signed)
+ });
+ }
+
+ sym::simd_eq | sym::simd_ne | sym::simd_lt | sym::simd_le | sym::simd_gt | sym::simd_ge => {
+ intrinsic_args!(fx, args => (x, y); intrinsic);
+
+ if !x.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, x.layout().ty);
+ return;
+ }
+
+ // FIXME use vector instructions when possible
+ simd_pair_for_each_lane(fx, x, y, ret, &|fx, lane_ty, res_lane_ty, x_lane, y_lane| {
+ let res_lane = match (lane_ty.kind(), intrinsic) {
+ (ty::Uint(_), sym::simd_eq) => fx.bcx.ins().icmp(IntCC::Equal, x_lane, y_lane),
+ (ty::Uint(_), sym::simd_ne) => {
+ fx.bcx.ins().icmp(IntCC::NotEqual, x_lane, y_lane)
+ }
+ (ty::Uint(_), sym::simd_lt) => {
+ fx.bcx.ins().icmp(IntCC::UnsignedLessThan, x_lane, y_lane)
+ }
+ (ty::Uint(_), sym::simd_le) => {
+ fx.bcx.ins().icmp(IntCC::UnsignedLessThanOrEqual, x_lane, y_lane)
+ }
+ (ty::Uint(_), sym::simd_gt) => {
+ fx.bcx.ins().icmp(IntCC::UnsignedGreaterThan, x_lane, y_lane)
+ }
+ (ty::Uint(_), sym::simd_ge) => {
+ fx.bcx.ins().icmp(IntCC::UnsignedGreaterThanOrEqual, x_lane, y_lane)
+ }
+
+ (ty::Int(_), sym::simd_eq) => fx.bcx.ins().icmp(IntCC::Equal, x_lane, y_lane),
+ (ty::Int(_), sym::simd_ne) => {
+ fx.bcx.ins().icmp(IntCC::NotEqual, x_lane, y_lane)
+ }
+ (ty::Int(_), sym::simd_lt) => {
+ fx.bcx.ins().icmp(IntCC::SignedLessThan, x_lane, y_lane)
+ }
+ (ty::Int(_), sym::simd_le) => {
+ fx.bcx.ins().icmp(IntCC::SignedLessThanOrEqual, x_lane, y_lane)
+ }
+ (ty::Int(_), sym::simd_gt) => {
+ fx.bcx.ins().icmp(IntCC::SignedGreaterThan, x_lane, y_lane)
+ }
+ (ty::Int(_), sym::simd_ge) => {
+ fx.bcx.ins().icmp(IntCC::SignedGreaterThanOrEqual, x_lane, y_lane)
+ }
+
+ (ty::Float(_), sym::simd_eq) => {
+ fx.bcx.ins().fcmp(FloatCC::Equal, x_lane, y_lane)
+ }
+ (ty::Float(_), sym::simd_ne) => {
+ fx.bcx.ins().fcmp(FloatCC::NotEqual, x_lane, y_lane)
+ }
+ (ty::Float(_), sym::simd_lt) => {
+ fx.bcx.ins().fcmp(FloatCC::LessThan, x_lane, y_lane)
+ }
+ (ty::Float(_), sym::simd_le) => {
+ fx.bcx.ins().fcmp(FloatCC::LessThanOrEqual, x_lane, y_lane)
+ }
+ (ty::Float(_), sym::simd_gt) => {
+ fx.bcx.ins().fcmp(FloatCC::GreaterThan, x_lane, y_lane)
+ }
+ (ty::Float(_), sym::simd_ge) => {
+ fx.bcx.ins().fcmp(FloatCC::GreaterThanOrEqual, x_lane, y_lane)
+ }
+
+ _ => unreachable!(),
+ };
+
+ let ty = fx.clif_type(res_lane_ty).unwrap();
+
+ let res_lane = fx.bcx.ins().bint(ty, res_lane);
+ fx.bcx.ins().ineg(res_lane)
+ });
+ }
+
+ // simd_shuffle32<T, U>(x: T, y: T, idx: [u32; 32]) -> U
+ _ if intrinsic.as_str().starts_with("simd_shuffle") => {
+ let (x, y, idx) = match args {
+ [x, y, idx] => (x, y, idx),
+ _ => {
+ bug!("wrong number of args for intrinsic {intrinsic}");
+ }
+ };
+ let x = codegen_operand(fx, x);
+ let y = codegen_operand(fx, y);
+
+ if !x.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, x.layout().ty);
+ return;
+ }
+
+ // If this intrinsic is the older "simd_shuffleN" form, simply parse the integer.
+ // If there is no suffix, use the index array length.
+ let n: u16 = if intrinsic == sym::simd_shuffle {
+ // Make sure this is actually an array, since typeck only checks the length-suffixed
+ // version of this intrinsic.
+ let idx_ty = fx.monomorphize(idx.ty(fx.mir, fx.tcx));
+ match idx_ty.kind() {
+ ty::Array(ty, len) if matches!(ty.kind(), ty::Uint(ty::UintTy::U32)) => len
+ .try_eval_usize(fx.tcx, ty::ParamEnv::reveal_all())
+ .unwrap_or_else(|| {
+ span_bug!(span, "could not evaluate shuffle index array length")
+ })
+ .try_into()
+ .unwrap(),
+ _ => {
+ fx.tcx.sess.span_err(
+ span,
+ &format!(
+ "simd_shuffle index must be an array of `u32`, got `{}`",
+ idx_ty,
+ ),
+ );
+ // Prevent verifier error
+ fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
+ return;
+ }
+ }
+ } else {
++ // FIXME remove this case
+ intrinsic.as_str()["simd_shuffle".len()..].parse().unwrap()
+ };
+
+ assert_eq!(x.layout(), y.layout());
+ let layout = x.layout();
+
+ let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
+ let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
+
+ assert_eq!(lane_ty, ret_lane_ty);
+ assert_eq!(u64::from(n), ret_lane_count);
+
+ let total_len = lane_count * 2;
+
+ let indexes = {
+ use rustc_middle::mir::interpret::*;
+ let idx_const = crate::constant::mir_operand_get_const_val(fx, idx)
+ .expect("simd_shuffle* idx not const");
+
+ let idx_bytes = match idx_const {
+ ConstValue::ByRef { alloc, offset } => {
+ let size = Size::from_bytes(
+ 4 * ret_lane_count, /* size_of([u32; ret_lane_count]) */
+ );
+ alloc
+ .inner()
+ .get_bytes_strip_provenance(fx, alloc_range(offset, size))
+ .unwrap()
+ }
+ _ => unreachable!("{:?}", idx_const),
+ };
+
+ (0..ret_lane_count)
+ .map(|i| {
+ let i = usize::try_from(i).unwrap();
+ let idx = rustc_middle::mir::interpret::read_target_uint(
+ fx.tcx.data_layout.endian,
+ &idx_bytes[4 * i..4 * i + 4],
+ )
+ .expect("read_target_uint");
+ u16::try_from(idx).expect("try_from u32")
+ })
+ .collect::<Vec<u16>>()
+ };
+
+ for &idx in &indexes {
+ assert!(u64::from(idx) < total_len, "idx {} out of range 0..{}", idx, total_len);
+ }
+
+ for (out_idx, in_idx) in indexes.into_iter().enumerate() {
+ let in_lane = if u64::from(in_idx) < lane_count {
+ x.value_lane(fx, in_idx.into())
+ } else {
+ y.value_lane(fx, u64::from(in_idx) - lane_count)
+ };
+ let out_lane = ret.place_lane(fx, u64::try_from(out_idx).unwrap());
+ out_lane.write_cvalue(fx, in_lane);
+ }
+ }
+
+ sym::simd_insert => {
+ let (base, idx, val) = match args {
+ [base, idx, val] => (base, idx, val),
+ _ => {
+ bug!("wrong number of args for intrinsic {intrinsic}");
+ }
+ };
+ let base = codegen_operand(fx, base);
+ let val = codegen_operand(fx, val);
+
+ // FIXME validate
+ let idx_const = if let Some(idx_const) =
+ crate::constant::mir_operand_get_const_val(fx, idx)
+ {
+ idx_const
+ } else {
+ fx.tcx.sess.span_fatal(span, "Index argument for `simd_insert` is not a constant");
+ };
+
+ let idx = idx_const
+ .try_to_bits(Size::from_bytes(4 /* u32*/))
+ .unwrap_or_else(|| panic!("kind not scalar: {:?}", idx_const));
+ let (lane_count, _lane_ty) = base.layout().ty.simd_size_and_type(fx.tcx);
+ if idx >= lane_count.into() {
+ fx.tcx.sess.span_fatal(
+ fx.mir.span,
+ &format!("[simd_insert] idx {} >= lane_count {}", idx, lane_count),
+ );
+ }
+
+ ret.write_cvalue(fx, base);
+ let ret_lane = ret.place_field(fx, mir::Field::new(idx.try_into().unwrap()));
+ ret_lane.write_cvalue(fx, val);
+ }
+
+ sym::simd_extract => {
+ let (v, idx) = match args {
+ [v, idx] => (v, idx),
+ _ => {
+ bug!("wrong number of args for intrinsic {intrinsic}");
+ }
+ };
+ let v = codegen_operand(fx, v);
+
+ if !v.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
+ return;
+ }
+
+ let idx_const = if let Some(idx_const) =
+ crate::constant::mir_operand_get_const_val(fx, idx)
+ {
+ idx_const
+ } else {
+ fx.tcx.sess.span_warn(span, "Index argument for `simd_extract` is not a constant");
+ let trap_block = fx.bcx.create_block();
+ let dummy_block = fx.bcx.create_block();
+ let true_ = fx.bcx.ins().iconst(types::I8, 1);
+ fx.bcx.ins().brnz(true_, trap_block, &[]);
+ fx.bcx.ins().jump(dummy_block, &[]);
+ fx.bcx.switch_to_block(trap_block);
+ crate::trap::trap_unimplemented(
+ fx,
+ "Index argument for `simd_extract` is not a constant",
+ );
+ fx.bcx.switch_to_block(dummy_block);
+ return;
+ };
+
+ let idx = idx_const
+ .try_to_bits(Size::from_bytes(4 /* u32*/))
+ .unwrap_or_else(|| panic!("kind not scalar: {:?}", idx_const));
+ let (lane_count, _lane_ty) = v.layout().ty.simd_size_and_type(fx.tcx);
+ if idx >= lane_count.into() {
+ fx.tcx.sess.span_fatal(
+ fx.mir.span,
+ &format!("[simd_extract] idx {} >= lane_count {}", idx, lane_count),
+ );
+ }
+
+ let ret_lane = v.value_lane(fx, idx.try_into().unwrap());
+ ret.write_cvalue(fx, ret_lane);
+ }
+
+ sym::simd_neg => {
+ intrinsic_args!(fx, args => (a); intrinsic);
+
+ if !a.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty);
+ return;
+ }
+
+ simd_for_each_lane(
+ fx,
+ a,
+ ret,
+ &|fx, lane_ty, _ret_lane_ty, lane| match lane_ty.kind() {
+ ty::Int(_) => fx.bcx.ins().ineg(lane),
+ ty::Float(_) => fx.bcx.ins().fneg(lane),
+ _ => unreachable!(),
+ },
+ );
+ }
+
+ sym::simd_add
+ | sym::simd_sub
+ | sym::simd_mul
+ | sym::simd_div
+ | sym::simd_rem
+ | sym::simd_shl
+ | sym::simd_shr
+ | sym::simd_and
+ | sym::simd_or
+ | sym::simd_xor => {
+ intrinsic_args!(fx, args => (x, y); intrinsic);
+
+ // FIXME use vector instructions when possible
+ simd_pair_for_each_lane(fx, x, y, ret, &|fx, lane_ty, _ret_lane_ty, x_lane, y_lane| {
+ match (lane_ty.kind(), intrinsic) {
+ (ty::Uint(_), sym::simd_add) => fx.bcx.ins().iadd(x_lane, y_lane),
+ (ty::Uint(_), sym::simd_sub) => fx.bcx.ins().isub(x_lane, y_lane),
+ (ty::Uint(_), sym::simd_mul) => fx.bcx.ins().imul(x_lane, y_lane),
+ (ty::Uint(_), sym::simd_div) => fx.bcx.ins().udiv(x_lane, y_lane),
+ (ty::Uint(_), sym::simd_rem) => fx.bcx.ins().urem(x_lane, y_lane),
+
+ (ty::Int(_), sym::simd_add) => fx.bcx.ins().iadd(x_lane, y_lane),
+ (ty::Int(_), sym::simd_sub) => fx.bcx.ins().isub(x_lane, y_lane),
+ (ty::Int(_), sym::simd_mul) => fx.bcx.ins().imul(x_lane, y_lane),
+ (ty::Int(_), sym::simd_div) => fx.bcx.ins().sdiv(x_lane, y_lane),
+ (ty::Int(_), sym::simd_rem) => fx.bcx.ins().srem(x_lane, y_lane),
+
+ (ty::Float(_), sym::simd_add) => fx.bcx.ins().fadd(x_lane, y_lane),
+ (ty::Float(_), sym::simd_sub) => fx.bcx.ins().fsub(x_lane, y_lane),
+ (ty::Float(_), sym::simd_mul) => fx.bcx.ins().fmul(x_lane, y_lane),
+ (ty::Float(_), sym::simd_div) => fx.bcx.ins().fdiv(x_lane, y_lane),
+ (ty::Float(FloatTy::F32), sym::simd_rem) => fx.lib_call(
+ "fmodf",
+ vec![AbiParam::new(types::F32), AbiParam::new(types::F32)],
+ vec![AbiParam::new(types::F32)],
+ &[x_lane, y_lane],
+ )[0],
+ (ty::Float(FloatTy::F64), sym::simd_rem) => fx.lib_call(
+ "fmod",
+ vec![AbiParam::new(types::F64), AbiParam::new(types::F64)],
+ vec![AbiParam::new(types::F64)],
+ &[x_lane, y_lane],
+ )[0],
+
+ (ty::Uint(_), sym::simd_shl) => fx.bcx.ins().ishl(x_lane, y_lane),
+ (ty::Uint(_), sym::simd_shr) => fx.bcx.ins().ushr(x_lane, y_lane),
+ (ty::Uint(_), sym::simd_and) => fx.bcx.ins().band(x_lane, y_lane),
+ (ty::Uint(_), sym::simd_or) => fx.bcx.ins().bor(x_lane, y_lane),
+ (ty::Uint(_), sym::simd_xor) => fx.bcx.ins().bxor(x_lane, y_lane),
+
+ (ty::Int(_), sym::simd_shl) => fx.bcx.ins().ishl(x_lane, y_lane),
+ (ty::Int(_), sym::simd_shr) => fx.bcx.ins().sshr(x_lane, y_lane),
+ (ty::Int(_), sym::simd_and) => fx.bcx.ins().band(x_lane, y_lane),
+ (ty::Int(_), sym::simd_or) => fx.bcx.ins().bor(x_lane, y_lane),
+ (ty::Int(_), sym::simd_xor) => fx.bcx.ins().bxor(x_lane, y_lane),
+
+ _ => unreachable!(),
+ }
+ });
+ }
+
+ sym::simd_fma => {
+ intrinsic_args!(fx, args => (a, b, c); intrinsic);
+
+ if !a.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty);
+ return;
+ }
+ assert_eq!(a.layout(), b.layout());
+ assert_eq!(a.layout(), c.layout());
+ assert_eq!(a.layout(), ret.layout());
+
+ let layout = a.layout();
+ let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
+ let res_lane_layout = fx.layout_of(lane_ty);
+
+ for lane in 0..lane_count {
+ let a_lane = a.value_lane(fx, lane).load_scalar(fx);
+ let b_lane = b.value_lane(fx, lane).load_scalar(fx);
+ let c_lane = c.value_lane(fx, lane).load_scalar(fx);
+
+ let res_lane = fx.bcx.ins().fma(a_lane, b_lane, c_lane);
+ let res_lane = CValue::by_val(res_lane, res_lane_layout);
+
+ ret.place_lane(fx, lane).write_cvalue(fx, res_lane);
+ }
+ }
+
+ sym::simd_fmin | sym::simd_fmax => {
+ intrinsic_args!(fx, args => (x, y); intrinsic);
+
+ if !x.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, x.layout().ty);
+ return;
+ }
+
+ // FIXME use vector instructions when possible
+ simd_pair_for_each_lane(fx, x, y, ret, &|fx, lane_ty, _ret_lane_ty, x_lane, y_lane| {
+ match lane_ty.kind() {
+ ty::Float(_) => {}
+ _ => unreachable!("{:?}", lane_ty),
+ }
+ match intrinsic {
+ sym::simd_fmin => crate::num::codegen_float_min(fx, x_lane, y_lane),
+ sym::simd_fmax => crate::num::codegen_float_max(fx, x_lane, y_lane),
+ _ => unreachable!(),
+ }
+ });
+ }
+
+ sym::simd_round => {
+ intrinsic_args!(fx, args => (a); intrinsic);
+
+ if !a.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty);
+ return;
+ }
+
+ simd_for_each_lane(
+ fx,
+ a,
+ ret,
+ &|fx, lane_ty, _ret_lane_ty, lane| match lane_ty.kind() {
+ ty::Float(FloatTy::F32) => fx.lib_call(
+ "roundf",
+ vec![AbiParam::new(types::F32)],
+ vec![AbiParam::new(types::F32)],
+ &[lane],
+ )[0],
+ ty::Float(FloatTy::F64) => fx.lib_call(
+ "round",
+ vec![AbiParam::new(types::F64)],
+ vec![AbiParam::new(types::F64)],
+ &[lane],
+ )[0],
+ _ => unreachable!("{:?}", lane_ty),
+ },
+ );
+ }
+
+ sym::simd_fabs | sym::simd_fsqrt | sym::simd_ceil | sym::simd_floor | sym::simd_trunc => {
+ intrinsic_args!(fx, args => (a); intrinsic);
+
+ if !a.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty);
+ return;
+ }
+
+ simd_for_each_lane(fx, a, ret, &|fx, lane_ty, _ret_lane_ty, lane| {
+ match lane_ty.kind() {
+ ty::Float(_) => {}
+ _ => unreachable!("{:?}", lane_ty),
+ }
+ match intrinsic {
+ sym::simd_fabs => fx.bcx.ins().fabs(lane),
+ sym::simd_fsqrt => fx.bcx.ins().sqrt(lane),
+ sym::simd_ceil => fx.bcx.ins().ceil(lane),
+ sym::simd_floor => fx.bcx.ins().floor(lane),
+ sym::simd_trunc => fx.bcx.ins().trunc(lane),
+ _ => unreachable!(),
+ }
+ });
+ }
+
+ sym::simd_reduce_add_ordered | sym::simd_reduce_add_unordered => {
+ intrinsic_args!(fx, args => (v, acc); intrinsic);
+ let acc = acc.load_scalar(fx);
+
+ // FIXME there must be no acc param for integer vectors
+ if !v.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
+ return;
+ }
+
+ simd_reduce(fx, v, Some(acc), ret, &|fx, lane_ty, a, b| {
+ if lane_ty.is_floating_point() {
+ fx.bcx.ins().fadd(a, b)
+ } else {
+ fx.bcx.ins().iadd(a, b)
+ }
+ });
+ }
+
+ sym::simd_reduce_mul_ordered | sym::simd_reduce_mul_unordered => {
+ intrinsic_args!(fx, args => (v, acc); intrinsic);
+ let acc = acc.load_scalar(fx);
+
+ // FIXME there must be no acc param for integer vectors
+ if !v.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
+ return;
+ }
+
+ simd_reduce(fx, v, Some(acc), ret, &|fx, lane_ty, a, b| {
+ if lane_ty.is_floating_point() {
+ fx.bcx.ins().fmul(a, b)
+ } else {
+ fx.bcx.ins().imul(a, b)
+ }
+ });
+ }
+
+ sym::simd_reduce_all => {
+ intrinsic_args!(fx, args => (v); intrinsic);
+
+ if !v.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
+ return;
+ }
+
+ simd_reduce_bool(fx, v, ret, &|fx, a, b| fx.bcx.ins().band(a, b));
+ }
+
+ sym::simd_reduce_any => {
+ intrinsic_args!(fx, args => (v); intrinsic);
+
+ if !v.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
+ return;
+ }
+
+ simd_reduce_bool(fx, v, ret, &|fx, a, b| fx.bcx.ins().bor(a, b));
+ }
+
+ sym::simd_reduce_and => {
+ intrinsic_args!(fx, args => (v); intrinsic);
+
+ if !v.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
+ return;
+ }
+
+ simd_reduce(fx, v, None, ret, &|fx, _ty, a, b| fx.bcx.ins().band(a, b));
+ }
+
+ sym::simd_reduce_or => {
+ intrinsic_args!(fx, args => (v); intrinsic);
+
+ if !v.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
+ return;
+ }
+
+ simd_reduce(fx, v, None, ret, &|fx, _ty, a, b| fx.bcx.ins().bor(a, b));
+ }
+
+ sym::simd_reduce_xor => {
+ intrinsic_args!(fx, args => (v); intrinsic);
+
+ if !v.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
+ return;
+ }
+
+ simd_reduce(fx, v, None, ret, &|fx, _ty, a, b| fx.bcx.ins().bxor(a, b));
+ }
+
+ sym::simd_reduce_min => {
+ intrinsic_args!(fx, args => (v); intrinsic);
+
+ if !v.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
+ return;
+ }
+
+ simd_reduce(fx, v, None, ret, &|fx, ty, a, b| {
+ let lt = match ty.kind() {
+ ty::Int(_) => fx.bcx.ins().icmp(IntCC::SignedLessThan, a, b),
+ ty::Uint(_) => fx.bcx.ins().icmp(IntCC::UnsignedLessThan, a, b),
+ ty::Float(_) => return crate::num::codegen_float_min(fx, a, b),
+ _ => unreachable!(),
+ };
+ fx.bcx.ins().select(lt, a, b)
+ });
+ }
+
+ sym::simd_reduce_max => {
+ intrinsic_args!(fx, args => (v); intrinsic);
+
+ if !v.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
+ return;
+ }
+
+ simd_reduce(fx, v, None, ret, &|fx, ty, a, b| {
+ let gt = match ty.kind() {
+ ty::Int(_) => fx.bcx.ins().icmp(IntCC::SignedGreaterThan, a, b),
+ ty::Uint(_) => fx.bcx.ins().icmp(IntCC::UnsignedGreaterThan, a, b),
+ ty::Float(_) => return crate::num::codegen_float_max(fx, a, b),
+ _ => unreachable!(),
+ };
+ fx.bcx.ins().select(gt, a, b)
+ });
+ }
+
+ sym::simd_select => {
+ intrinsic_args!(fx, args => (m, a, b); intrinsic);
+
+ if !m.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, m.layout().ty);
+ return;
+ }
+ if !a.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty);
+ return;
+ }
+ assert_eq!(a.layout(), b.layout());
+
+ let (lane_count, lane_ty) = a.layout().ty.simd_size_and_type(fx.tcx);
+ let lane_layout = fx.layout_of(lane_ty);
+
+ for lane in 0..lane_count {
+ let m_lane = m.value_lane(fx, lane).load_scalar(fx);
+ let a_lane = a.value_lane(fx, lane).load_scalar(fx);
+ let b_lane = b.value_lane(fx, lane).load_scalar(fx);
+
+ let m_lane = fx.bcx.ins().icmp_imm(IntCC::Equal, m_lane, 0);
+ let res_lane =
+ CValue::by_val(fx.bcx.ins().select(m_lane, b_lane, a_lane), lane_layout);
+
+ ret.place_lane(fx, lane).write_cvalue(fx, res_lane);
+ }
+ }
+
++ sym::simd_select_bitmask => {
++ intrinsic_args!(fx, args => (m, a, b); intrinsic);
++
++ if !a.layout().ty.is_simd() {
++ report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty);
++ return;
++ }
++ assert_eq!(a.layout(), b.layout());
++
++ let (lane_count, lane_ty) = a.layout().ty.simd_size_and_type(fx.tcx);
++ let lane_layout = fx.layout_of(lane_ty);
++
++ let m = m.load_scalar(fx);
++
++ for lane in 0..lane_count {
++ let m_lane = fx.bcx.ins().ushr_imm(m, u64::from(lane) as i64);
++ let m_lane = fx.bcx.ins().band_imm(m_lane, 1);
++ let a_lane = a.value_lane(fx, lane).load_scalar(fx);
++ let b_lane = b.value_lane(fx, lane).load_scalar(fx);
++
++ let m_lane = fx.bcx.ins().icmp_imm(IntCC::Equal, m_lane, 0);
++ let res_lane =
++ CValue::by_val(fx.bcx.ins().select(m_lane, b_lane, a_lane), lane_layout);
++
++ ret.place_lane(fx, lane).write_cvalue(fx, res_lane);
++ }
++ }
++
++ sym::simd_bitmask => {
++ intrinsic_args!(fx, args => (a); intrinsic);
++
++ let (lane_count, lane_ty) = a.layout().ty.simd_size_and_type(fx.tcx);
++ let lane_clif_ty = fx.clif_type(lane_ty).unwrap();
++
++ // The `fn simd_bitmask(vector) -> unsigned integer` intrinsic takes a
++ // vector mask and returns the most significant bit (MSB) of each lane in the form
++ // of either:
++ // * an unsigned integer
++ // * an array of `u8`
++ // If the vector has less than 8 lanes, a u8 is returned with zeroed trailing bits.
++ //
++ // The bit order of the result depends on the byte endianness, LSB-first for little
++ // endian and MSB-first for big endian.
++ let expected_int_bits = lane_count.max(8);
++ let expected_bytes = expected_int_bits / 8 + ((expected_int_bits % 8 > 0) as u64);
++
++ match lane_ty.kind() {
++ ty::Int(_) | ty::Uint(_) => {}
++ _ => {
++ fx.tcx.sess.span_fatal(
++ span,
++ &format!(
++ "invalid monomorphization of `simd_bitmask` intrinsic: \
++ vector argument `{}`'s element type `{}`, expected integer element \
++ type",
++ a.layout().ty,
++ lane_ty
++ ),
++ );
++ }
++ }
++
++ let res_type =
++ Type::int_with_byte_size(u16::try_from(expected_bytes).unwrap()).unwrap();
++ let mut res = fx.bcx.ins().iconst(res_type, 0);
++
++ let lanes = match fx.tcx.sess.target.endian {
++ Endian::Big => Box::new(0..lane_count) as Box<dyn Iterator<Item = u64>>,
++ Endian::Little => Box::new((0..lane_count).rev()) as Box<dyn Iterator<Item = u64>>,
++ };
++ for lane in lanes {
++ let a_lane = a.value_lane(fx, lane).load_scalar(fx);
++
++ // extract sign bit of an int
++ let a_lane_sign = fx.bcx.ins().ushr_imm(a_lane, i64::from(lane_clif_ty.bits() - 1));
++
++ // shift sign bit into result
++ let a_lane_sign = clif_intcast(fx, a_lane_sign, res_type, false);
++ res = fx.bcx.ins().ishl_imm(res, 1);
++ res = fx.bcx.ins().bor(res, a_lane_sign);
++ }
++
++ match ret.layout().ty.kind() {
++ ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => {}
++ ty::Array(elem, len)
++ if matches!(elem.kind(), ty::Uint(ty::UintTy::U8))
++ && len.try_eval_usize(fx.tcx, ty::ParamEnv::reveal_all())
++ == Some(expected_bytes) => {}
++ _ => {
++ fx.tcx.sess.span_fatal(
++ span,
++ &format!(
++ "invalid monomorphization of `simd_bitmask` intrinsic: \
++ cannot return `{}`, expected `u{}` or `[u8; {}]`",
++ ret.layout().ty,
++ expected_int_bits,
++ expected_bytes
++ ),
++ );
++ }
++ }
++
++ let res = CValue::by_val(res, ret.layout());
++ ret.write_cvalue(fx, res);
++ }
++
++ sym::simd_saturating_add | sym::simd_saturating_sub => {
++ intrinsic_args!(fx, args => (x, y); intrinsic);
++
++ let bin_op = match intrinsic {
++ sym::simd_saturating_add => BinOp::Add,
++ sym::simd_saturating_sub => BinOp::Sub,
++ _ => unreachable!(),
++ };
++
++ // FIXME use vector instructions when possible
++ simd_pair_for_each_lane_typed(fx, x, y, ret, &|fx, x_lane, y_lane| {
++ crate::num::codegen_saturating_int_binop(fx, bin_op, x_lane, y_lane)
++ });
++ }
++
++ // simd_arith_offset
+ // simd_scatter
+ // simd_gather
+ _ => {
+ fx.tcx.sess.span_fatal(span, &format!("Unknown SIMD intrinsic {}", intrinsic));
+ }
+ }
+}
--- /dev/null
- AbiParam, Block, ExternalName, FuncRef, Inst, InstBuilder, MemFlags, Signature, SourceLoc,
- StackSlot, StackSlotData, StackSlotKind, TrapCode, Type, Value,
+#![feature(rustc_private)]
+// Note: please avoid adding other feature gates where possible
+#![warn(rust_2018_idioms)]
+#![warn(unused_lifetimes)]
+#![warn(unreachable_pub)]
+
+extern crate jobserver;
+#[macro_use]
+extern crate rustc_middle;
+extern crate rustc_ast;
+extern crate rustc_codegen_ssa;
+extern crate rustc_data_structures;
+extern crate rustc_errors;
+extern crate rustc_fs_util;
+extern crate rustc_hir;
+extern crate rustc_incremental;
+extern crate rustc_index;
+extern crate rustc_interface;
+extern crate rustc_metadata;
+extern crate rustc_session;
+extern crate rustc_span;
+extern crate rustc_target;
+
+// This prevents duplicating functions and statics that are already part of the host rustc process.
+#[allow(unused_extern_crates)]
+extern crate rustc_driver;
+
+use std::any::Any;
+use std::cell::{Cell, RefCell};
+use std::sync::Arc;
+
+use rustc_codegen_ssa::traits::CodegenBackend;
+use rustc_codegen_ssa::CodegenResults;
+use rustc_data_structures::profiling::SelfProfilerRef;
+use rustc_errors::ErrorGuaranteed;
+use rustc_metadata::EncodedMetadata;
+use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
+use rustc_session::config::OutputFilenames;
+use rustc_session::Session;
+use rustc_span::Symbol;
+
+use cranelift_codegen::isa::TargetIsa;
+use cranelift_codegen::settings::{self, Configurable};
+
+pub use crate::config::*;
+use crate::prelude::*;
+
+mod abi;
+mod allocator;
+mod analyze;
+mod archive;
+mod base;
+mod cast;
+mod codegen_i128;
+mod common;
+mod compiler_builtins;
+mod concurrency_limiter;
+mod config;
+mod constant;
+mod debuginfo;
+mod discriminant;
+mod driver;
+mod global_asm;
+mod inline_asm;
+mod intrinsics;
+mod linkage;
+mod main_shim;
+mod num;
+mod optimize;
+mod pointer;
+mod pretty_clif;
+mod toolchain;
+mod trap;
+mod unsize;
+mod value_and_place;
+mod vtable;
+
+mod prelude {
+ pub(crate) use rustc_span::{FileNameDisplayPreference, Span};
+
+ pub(crate) use rustc_hir::def_id::{DefId, LOCAL_CRATE};
+ pub(crate) use rustc_middle::bug;
+ pub(crate) use rustc_middle::mir::{self, *};
+ pub(crate) use rustc_middle::ty::layout::{self, LayoutOf, TyAndLayout};
+ pub(crate) use rustc_middle::ty::{
+ self, FloatTy, Instance, InstanceDef, IntTy, ParamEnv, Ty, TyCtxt, TypeAndMut,
+ TypeFoldable, TypeVisitable, UintTy,
+ };
+ pub(crate) use rustc_target::abi::{Abi, Scalar, Size, VariantIdx};
+
+ pub(crate) use rustc_data_structures::fx::FxHashMap;
+
+ pub(crate) use rustc_index::vec::Idx;
+
+ pub(crate) use cranelift_codegen::ir::condcodes::{FloatCC, IntCC};
+ pub(crate) use cranelift_codegen::ir::function::Function;
+ pub(crate) use cranelift_codegen::ir::types;
+ pub(crate) use cranelift_codegen::ir::{
- flags_builder.set("enable_probestack", "false").unwrap(); // __cranelift_probestack is not provided
++ AbiParam, Block, FuncRef, Inst, InstBuilder, MemFlags, Signature, SourceLoc, StackSlot,
++ StackSlotData, StackSlotKind, TrapCode, Type, Value,
+ };
+ pub(crate) use cranelift_codegen::isa::{self, CallConv};
+ pub(crate) use cranelift_codegen::Context;
+ pub(crate) use cranelift_frontend::{FunctionBuilder, FunctionBuilderContext, Variable};
+ pub(crate) use cranelift_module::{self, DataContext, FuncId, Linkage, Module};
+
+ pub(crate) use crate::abi::*;
+ pub(crate) use crate::base::{codegen_operand, codegen_place};
+ pub(crate) use crate::cast::*;
+ pub(crate) use crate::common::*;
+ pub(crate) use crate::debuginfo::{DebugContext, UnwindContext};
+ pub(crate) use crate::pointer::Pointer;
+ pub(crate) use crate::value_and_place::{CPlace, CPlaceInner, CValue};
+}
+
+struct PrintOnPanic<F: Fn() -> String>(F);
+impl<F: Fn() -> String> Drop for PrintOnPanic<F> {
+ fn drop(&mut self) {
+ if ::std::thread::panicking() {
+ println!("{}", (self.0)());
+ }
+ }
+}
+
+/// The codegen context holds any information shared between the codegen of individual functions
+/// inside a single codegen unit with the exception of the Cranelift [`Module`](cranelift_module::Module).
+struct CodegenCx {
+ profiler: SelfProfilerRef,
+ output_filenames: Arc<OutputFilenames>,
+ should_write_ir: bool,
+ global_asm: String,
+ inline_asm_index: Cell<usize>,
+ debug_context: Option<DebugContext>,
+ unwind_context: UnwindContext,
+ cgu_name: Symbol,
+}
+
+impl CodegenCx {
+ fn new(
+ tcx: TyCtxt<'_>,
+ backend_config: BackendConfig,
+ isa: &dyn TargetIsa,
+ debug_info: bool,
+ cgu_name: Symbol,
+ ) -> Self {
+ assert_eq!(pointer_ty(tcx), isa.pointer_type());
+
+ let unwind_context =
+ UnwindContext::new(isa, matches!(backend_config.codegen_mode, CodegenMode::Aot));
+ let debug_context = if debug_info && !tcx.sess.target.options.is_like_windows {
+ Some(DebugContext::new(tcx, isa))
+ } else {
+ None
+ };
+ CodegenCx {
+ profiler: tcx.prof.clone(),
+ output_filenames: tcx.output_filenames(()).clone(),
+ should_write_ir: crate::pretty_clif::should_write_ir(tcx),
+ global_asm: String::new(),
+ inline_asm_index: Cell::new(0),
+ debug_context,
+ unwind_context,
+ cgu_name,
+ }
+ }
+}
+
+pub struct CraneliftCodegenBackend {
+ pub config: RefCell<Option<BackendConfig>>,
+}
+
+impl CodegenBackend for CraneliftCodegenBackend {
+ fn init(&self, sess: &Session) {
+ use rustc_session::config::Lto;
+ match sess.lto() {
+ Lto::No | Lto::ThinLocal => {}
+ Lto::Thin | Lto::Fat => sess.warn("LTO is not supported. You may get a linker error."),
+ }
+
+ let mut config = self.config.borrow_mut();
+ if config.is_none() {
+ let new_config = BackendConfig::from_opts(&sess.opts.cg.llvm_args)
+ .unwrap_or_else(|err| sess.fatal(&err));
+ *config = Some(new_config);
+ }
+ }
+
+ fn target_features(&self, _sess: &Session, _allow_unstable: bool) -> Vec<rustc_span::Symbol> {
+ vec![]
+ }
+
+ fn print_version(&self) {
+ println!("Cranelift version: {}", cranelift_codegen::VERSION);
+ }
+
+ fn codegen_crate(
+ &self,
+ tcx: TyCtxt<'_>,
+ metadata: EncodedMetadata,
+ need_metadata_module: bool,
+ ) -> Box<dyn Any> {
+ tcx.sess.abort_if_errors();
+ let config = self.config.borrow().clone().unwrap();
+ match config.codegen_mode {
+ CodegenMode::Aot => driver::aot::run_aot(tcx, config, metadata, need_metadata_module),
+ CodegenMode::Jit | CodegenMode::JitLazy => {
+ #[cfg(feature = "jit")]
+ driver::jit::run_jit(tcx, config);
+
+ #[cfg(not(feature = "jit"))]
+ tcx.sess.fatal("jit support was disabled when compiling rustc_codegen_cranelift");
+ }
+ }
+ }
+
+ fn join_codegen(
+ &self,
+ ongoing_codegen: Box<dyn Any>,
+ sess: &Session,
+ _outputs: &OutputFilenames,
+ ) -> Result<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>), ErrorGuaranteed> {
+ Ok(ongoing_codegen
+ .downcast::<driver::aot::OngoingCodegen>()
+ .unwrap()
+ .join(sess, self.config.borrow().as_ref().unwrap()))
+ }
+
+ fn link(
+ &self,
+ sess: &Session,
+ codegen_results: CodegenResults,
+ outputs: &OutputFilenames,
+ ) -> Result<(), ErrorGuaranteed> {
+ use rustc_codegen_ssa::back::link::link_binary;
+
+ link_binary(sess, &crate::archive::ArArchiveBuilderBuilder, &codegen_results, outputs)
+ }
+}
+
+fn target_triple(sess: &Session) -> target_lexicon::Triple {
+ match sess.target.llvm_target.parse() {
+ Ok(triple) => triple,
+ Err(err) => sess.fatal(&format!("target not recognized: {}", err)),
+ }
+}
+
+fn build_isa(sess: &Session, backend_config: &BackendConfig) -> Box<dyn isa::TargetIsa + 'static> {
+ use target_lexicon::BinaryFormat;
+
+ let target_triple = crate::target_triple(sess);
+
+ let mut flags_builder = settings::builder();
+ flags_builder.enable("is_pic").unwrap();
+ let enable_verifier = if backend_config.enable_verifier { "true" } else { "false" };
+ flags_builder.set("enable_verifier", enable_verifier).unwrap();
+ flags_builder.set("regalloc_checker", enable_verifier).unwrap();
+
+ let tls_model = match target_triple.binary_format {
+ BinaryFormat::Elf => "elf_gd",
+ BinaryFormat::Macho => "macho",
+ BinaryFormat::Coff => "coff",
+ _ => "none",
+ };
+ flags_builder.set("tls_model", tls_model).unwrap();
+
+ flags_builder.set("enable_simd", "true").unwrap();
+
+ flags_builder.set("enable_llvm_abi_extensions", "true").unwrap();
+
+ use rustc_session::config::OptLevel;
+ match sess.opts.optimize {
+ OptLevel::No => {
+ flags_builder.set("opt_level", "none").unwrap();
+ }
+ OptLevel::Less | OptLevel::Default => {}
+ OptLevel::Size | OptLevel::SizeMin | OptLevel::Aggressive => {
+ flags_builder.set("opt_level", "speed_and_size").unwrap();
+ }
+ }
+
++ if target_triple.architecture == target_lexicon::Architecture::X86_64 {
++ // Windows depends on stack probes to grow the committed part of the stack
++ flags_builder.enable("enable_probestack").unwrap();
++ flags_builder.set("probestack_strategy", "inline").unwrap();
++ } else {
++ // __cranelift_probestack is not provided and inline stack probes are only supported on x86_64
++ flags_builder.set("enable_probestack", "false").unwrap();
++ }
++
+ let flags = settings::Flags::new(flags_builder);
+
+ let isa_builder = match sess.opts.cg.target_cpu.as_deref() {
+ Some("native") => {
+ let builder = cranelift_native::builder_with_options(true).unwrap();
+ builder
+ }
+ Some(value) => {
+ let mut builder =
+ cranelift_codegen::isa::lookup(target_triple.clone()).unwrap_or_else(|err| {
+ sess.fatal(&format!("can't compile for {}: {}", target_triple, err));
+ });
+ if let Err(_) = builder.enable(value) {
+ sess.fatal("the specified target cpu isn't currently supported by Cranelift.");
+ }
+ builder
+ }
+ None => {
+ let mut builder =
+ cranelift_codegen::isa::lookup(target_triple.clone()).unwrap_or_else(|err| {
+ sess.fatal(&format!("can't compile for {}: {}", target_triple, err));
+ });
+ if target_triple.architecture == target_lexicon::Architecture::X86_64 {
+ // Don't use "haswell" as the default, as it implies `has_lzcnt`.
+ // macOS CI is still at Ivy Bridge EP, so `lzcnt` is interpreted as `bsr`.
+ builder.enable("nehalem").unwrap();
+ }
+ builder
+ }
+ };
+
+ match isa_builder.finish(flags) {
+ Ok(target_isa) => target_isa,
+ Err(err) => sess.fatal(&format!("failed to build TargetIsa: {}", err)),
+ }
+}
+
+/// This is the entrypoint for a hot plugged rustc_codegen_cranelift
+#[no_mangle]
+pub fn __rustc_codegen_backend() -> Box<dyn CodegenBackend> {
+ Box::new(CraneliftCodegenBackend { config: RefCell::new(None) })
+}
--- /dev/null
- ctx.func = Function::with_name_signature(ExternalName::user(0, 0), cmain_sig);
+use rustc_hir::LangItem;
+use rustc_middle::ty::subst::GenericArg;
+use rustc_middle::ty::AssocKind;
+use rustc_session::config::{sigpipe, EntryFnType};
+use rustc_span::symbol::Ident;
+
+use crate::prelude::*;
+
+/// Create the `main` function which will initialize the rust runtime and call
+/// users main function.
+pub(crate) fn maybe_create_entry_wrapper(
+ tcx: TyCtxt<'_>,
+ module: &mut impl Module,
+ unwind_context: &mut UnwindContext,
+ is_jit: bool,
+ is_primary_cgu: bool,
+) {
+ let (main_def_id, (is_main_fn, sigpipe)) = match tcx.entry_fn(()) {
+ Some((def_id, entry_ty)) => (
+ def_id,
+ match entry_ty {
+ EntryFnType::Main { sigpipe } => (true, sigpipe),
+ EntryFnType::Start => (false, sigpipe::DEFAULT),
+ },
+ ),
+ None => return,
+ };
+
+ if main_def_id.is_local() {
+ let instance = Instance::mono(tcx, main_def_id).polymorphize(tcx);
+ if !is_jit && module.get_name(&*tcx.symbol_name(instance).name).is_none() {
+ return;
+ }
+ } else if !is_primary_cgu {
+ return;
+ }
+
+ create_entry_fn(tcx, module, unwind_context, main_def_id, is_jit, is_main_fn, sigpipe);
+
+ fn create_entry_fn(
+ tcx: TyCtxt<'_>,
+ m: &mut impl Module,
+ unwind_context: &mut UnwindContext,
+ rust_main_def_id: DefId,
+ ignore_lang_start_wrapper: bool,
+ is_main_fn: bool,
+ sigpipe: u8,
+ ) {
+ let main_ret_ty = tcx.fn_sig(rust_main_def_id).output();
+ // Given that `main()` has no arguments,
+ // then its return type cannot have
+ // late-bound regions, since late-bound
+ // regions must appear in the argument
+ // listing.
+ let main_ret_ty = tcx.normalize_erasing_regions(
+ ty::ParamEnv::reveal_all(),
+ main_ret_ty.no_bound_vars().unwrap(),
+ );
+
+ let cmain_sig = Signature {
+ params: vec![
+ AbiParam::new(m.target_config().pointer_type()),
+ AbiParam::new(m.target_config().pointer_type()),
+ ],
+ returns: vec![AbiParam::new(m.target_config().pointer_type() /*isize*/)],
+ call_conv: CallConv::triple_default(m.isa().triple()),
+ };
+
+ let cmain_func_id = m.declare_function("main", Linkage::Export, &cmain_sig).unwrap();
+
+ let instance = Instance::mono(tcx, rust_main_def_id).polymorphize(tcx);
+
+ let main_name = tcx.symbol_name(instance).name;
+ let main_sig = get_function_sig(tcx, m.isa().triple(), instance);
+ let main_func_id = m.declare_function(main_name, Linkage::Import, &main_sig).unwrap();
+
+ let mut ctx = Context::new();
++ ctx.func.signature = cmain_sig;
+ {
+ let mut func_ctx = FunctionBuilderContext::new();
+ let mut bcx = FunctionBuilder::new(&mut ctx.func, &mut func_ctx);
+
+ let block = bcx.create_block();
+ bcx.switch_to_block(block);
+ let arg_argc = bcx.append_block_param(block, m.target_config().pointer_type());
+ let arg_argv = bcx.append_block_param(block, m.target_config().pointer_type());
+ let arg_sigpipe = bcx.ins().iconst(types::I8, sigpipe as i64);
+
+ let main_func_ref = m.declare_func_in_func(main_func_id, &mut bcx.func);
+
+ let result = if is_main_fn && ignore_lang_start_wrapper {
+ // regular main fn, but ignoring #[lang = "start"] as we are running in the jit
+ // FIXME set program arguments somehow
+ let call_inst = bcx.ins().call(main_func_ref, &[]);
+ let call_results = bcx.func.dfg.inst_results(call_inst).to_owned();
+
+ let termination_trait = tcx.require_lang_item(LangItem::Termination, None);
+ let report = tcx
+ .associated_items(termination_trait)
+ .find_by_name_and_kind(
+ tcx,
+ Ident::from_str("report"),
+ AssocKind::Fn,
+ termination_trait,
+ )
+ .unwrap();
+ let report = Instance::resolve(
+ tcx,
+ ParamEnv::reveal_all(),
+ report.def_id,
+ tcx.mk_substs([GenericArg::from(main_ret_ty)].iter()),
+ )
+ .unwrap()
+ .unwrap()
+ .polymorphize(tcx);
+
+ let report_name = tcx.symbol_name(report).name;
+ let report_sig = get_function_sig(tcx, m.isa().triple(), report);
+ let report_func_id =
+ m.declare_function(report_name, Linkage::Import, &report_sig).unwrap();
+ let report_func_ref = m.declare_func_in_func(report_func_id, &mut bcx.func);
+
+ // FIXME do proper abi handling instead of expecting the pass mode to be identical
+ // for returns and arguments.
+ let report_call_inst = bcx.ins().call(report_func_ref, &call_results);
+ let res = bcx.func.dfg.inst_results(report_call_inst)[0];
+ match m.target_config().pointer_type() {
+ types::I32 => res,
+ types::I64 => bcx.ins().sextend(types::I64, res),
+ _ => unimplemented!("16bit systems are not yet supported"),
+ }
+ } else if is_main_fn {
+ let start_def_id = tcx.require_lang_item(LangItem::Start, None);
+ let start_instance = Instance::resolve(
+ tcx,
+ ParamEnv::reveal_all(),
+ start_def_id,
+ tcx.intern_substs(&[main_ret_ty.into()]),
+ )
+ .unwrap()
+ .unwrap()
+ .polymorphize(tcx);
+ let start_func_id = import_function(tcx, m, start_instance);
+
+ let main_val = bcx.ins().func_addr(m.target_config().pointer_type(), main_func_ref);
+
+ let func_ref = m.declare_func_in_func(start_func_id, &mut bcx.func);
+ let call_inst =
+ bcx.ins().call(func_ref, &[main_val, arg_argc, arg_argv, arg_sigpipe]);
+ bcx.inst_results(call_inst)[0]
+ } else {
+ // using user-defined start fn
+ let call_inst = bcx.ins().call(main_func_ref, &[arg_argc, arg_argv]);
+ bcx.inst_results(call_inst)[0]
+ };
+
+ bcx.ins().return_(&[result]);
+ bcx.seal_all_blocks();
+ bcx.finalize();
+ }
+ m.define_function(cmain_func_id, &mut ctx).unwrap();
+ unwind_context.add_function(cmain_func_id, &ctx, m.isa());
+ }
+}
--- /dev/null
- BinOp::Shl => {
- let lhs_ty = fx.bcx.func.dfg.value_type(lhs);
- let actual_shift = fx.bcx.ins().band_imm(rhs, i64::from(lhs_ty.bits() - 1));
- fx.bcx.ins().ishl(lhs, actual_shift)
- }
+//! Various operations on integer and floating-point numbers
+
+use crate::prelude::*;
+
+pub(crate) fn bin_op_to_intcc(bin_op: BinOp, signed: bool) -> Option<IntCC> {
+ use BinOp::*;
+ use IntCC::*;
+ Some(match bin_op {
+ Eq => Equal,
+ Lt => {
+ if signed {
+ SignedLessThan
+ } else {
+ UnsignedLessThan
+ }
+ }
+ Le => {
+ if signed {
+ SignedLessThanOrEqual
+ } else {
+ UnsignedLessThanOrEqual
+ }
+ }
+ Ne => NotEqual,
+ Ge => {
+ if signed {
+ SignedGreaterThanOrEqual
+ } else {
+ UnsignedGreaterThanOrEqual
+ }
+ }
+ Gt => {
+ if signed {
+ SignedGreaterThan
+ } else {
+ UnsignedGreaterThan
+ }
+ }
+ _ => return None,
+ })
+}
+
+fn codegen_compare_bin_op<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ bin_op: BinOp,
+ signed: bool,
+ lhs: Value,
+ rhs: Value,
+) -> CValue<'tcx> {
+ let intcc = crate::num::bin_op_to_intcc(bin_op, signed).unwrap();
+ let val = fx.bcx.ins().icmp(intcc, lhs, rhs);
+ let val = fx.bcx.ins().bint(types::I8, val);
+ CValue::by_val(val, fx.layout_of(fx.tcx.types.bool))
+}
+
+pub(crate) fn codegen_binop<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ bin_op: BinOp,
+ in_lhs: CValue<'tcx>,
+ in_rhs: CValue<'tcx>,
+) -> CValue<'tcx> {
+ match bin_op {
+ BinOp::Eq | BinOp::Lt | BinOp::Le | BinOp::Ne | BinOp::Ge | BinOp::Gt => {
+ match in_lhs.layout().ty.kind() {
+ ty::Bool | ty::Uint(_) | ty::Int(_) | ty::Char => {
+ let signed = type_sign(in_lhs.layout().ty);
+ let lhs = in_lhs.load_scalar(fx);
+ let rhs = in_rhs.load_scalar(fx);
+
+ return codegen_compare_bin_op(fx, bin_op, signed, lhs, rhs);
+ }
+ _ => {}
+ }
+ }
+ _ => {}
+ }
+
+ match in_lhs.layout().ty.kind() {
+ ty::Bool => crate::num::codegen_bool_binop(fx, bin_op, in_lhs, in_rhs),
+ ty::Uint(_) | ty::Int(_) => crate::num::codegen_int_binop(fx, bin_op, in_lhs, in_rhs),
+ ty::Float(_) => crate::num::codegen_float_binop(fx, bin_op, in_lhs, in_rhs),
+ ty::RawPtr(..) | ty::FnPtr(..) => crate::num::codegen_ptr_binop(fx, bin_op, in_lhs, in_rhs),
+ _ => unreachable!("{:?}({:?}, {:?})", bin_op, in_lhs.layout().ty, in_rhs.layout().ty),
+ }
+}
+
+pub(crate) fn codegen_bool_binop<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ bin_op: BinOp,
+ in_lhs: CValue<'tcx>,
+ in_rhs: CValue<'tcx>,
+) -> CValue<'tcx> {
+ let lhs = in_lhs.load_scalar(fx);
+ let rhs = in_rhs.load_scalar(fx);
+
+ let b = fx.bcx.ins();
+ let res = match bin_op {
+ BinOp::BitXor => b.bxor(lhs, rhs),
+ BinOp::BitAnd => b.band(lhs, rhs),
+ BinOp::BitOr => b.bor(lhs, rhs),
+ // Compare binops handles by `codegen_binop`.
+ _ => unreachable!("{:?}({:?}, {:?})", bin_op, in_lhs, in_rhs),
+ };
+
+ CValue::by_val(res, fx.layout_of(fx.tcx.types.bool))
+}
+
+pub(crate) fn codegen_int_binop<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ bin_op: BinOp,
+ in_lhs: CValue<'tcx>,
+ in_rhs: CValue<'tcx>,
+) -> CValue<'tcx> {
+ if bin_op != BinOp::Shl && bin_op != BinOp::Shr {
+ assert_eq!(
+ in_lhs.layout().ty,
+ in_rhs.layout().ty,
+ "int binop requires lhs and rhs of same type"
+ );
+ }
+
+ if let Some(res) = crate::codegen_i128::maybe_codegen(fx, bin_op, false, in_lhs, in_rhs) {
+ return res;
+ }
+
+ let signed = type_sign(in_lhs.layout().ty);
+
+ let lhs = in_lhs.load_scalar(fx);
+ let rhs = in_rhs.load_scalar(fx);
+
+ let b = fx.bcx.ins();
+ let val = match bin_op {
+ BinOp::Add => b.iadd(lhs, rhs),
+ BinOp::Sub => b.isub(lhs, rhs),
+ BinOp::Mul => b.imul(lhs, rhs),
+ BinOp::Div => {
+ if signed {
+ b.sdiv(lhs, rhs)
+ } else {
+ b.udiv(lhs, rhs)
+ }
+ }
+ BinOp::Rem => {
+ if signed {
+ b.srem(lhs, rhs)
+ } else {
+ b.urem(lhs, rhs)
+ }
+ }
+ BinOp::BitXor => b.bxor(lhs, rhs),
+ BinOp::BitAnd => b.band(lhs, rhs),
+ BinOp::BitOr => b.bor(lhs, rhs),
- let lhs_ty = fx.bcx.func.dfg.value_type(lhs);
- let actual_shift = fx.bcx.ins().band_imm(rhs, i64::from(lhs_ty.bits() - 1));
++ BinOp::Shl => b.ishl(lhs, rhs),
+ BinOp::Shr => {
- fx.bcx.ins().sshr(lhs, actual_shift)
+ if signed {
- fx.bcx.ins().ushr(lhs, actual_shift)
++ b.sshr(lhs, rhs)
+ } else {
- let lhs_ty = fx.bcx.func.dfg.value_type(lhs);
- let masked_shift = fx.bcx.ins().band_imm(rhs, i64::from(lhs_ty.bits() - 1));
- let val = fx.bcx.ins().ishl(lhs, masked_shift);
++ b.ushr(lhs, rhs)
+ }
+ }
+ // Compare binops handles by `codegen_binop`.
+ _ => unreachable!("{:?}({:?}, {:?})", bin_op, in_lhs.layout().ty, in_rhs.layout().ty),
+ };
+
+ CValue::by_val(val, in_lhs.layout())
+}
+
+pub(crate) fn codegen_checked_int_binop<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ bin_op: BinOp,
+ in_lhs: CValue<'tcx>,
+ in_rhs: CValue<'tcx>,
+) -> CValue<'tcx> {
+ if bin_op != BinOp::Shl && bin_op != BinOp::Shr {
+ assert_eq!(
+ in_lhs.layout().ty,
+ in_rhs.layout().ty,
+ "checked int binop requires lhs and rhs of same type"
+ );
+ }
+
+ let lhs = in_lhs.load_scalar(fx);
+ let rhs = in_rhs.load_scalar(fx);
+
+ if let Some(res) = crate::codegen_i128::maybe_codegen(fx, bin_op, true, in_lhs, in_rhs) {
+ return res;
+ }
+
+ let signed = type_sign(in_lhs.layout().ty);
+
+ let (res, has_overflow) = match bin_op {
+ BinOp::Add => {
+ /*let (val, c_out) = fx.bcx.ins().iadd_cout(lhs, rhs);
+ (val, c_out)*/
+ // FIXME(CraneStation/cranelift#849) legalize iadd_cout for i8 and i16
+ let val = fx.bcx.ins().iadd(lhs, rhs);
+ let has_overflow = if !signed {
+ fx.bcx.ins().icmp(IntCC::UnsignedLessThan, val, lhs)
+ } else {
+ let rhs_is_negative = fx.bcx.ins().icmp_imm(IntCC::SignedLessThan, rhs, 0);
+ let slt = fx.bcx.ins().icmp(IntCC::SignedLessThan, val, lhs);
+ fx.bcx.ins().bxor(rhs_is_negative, slt)
+ };
+ (val, has_overflow)
+ }
+ BinOp::Sub => {
+ /*let (val, b_out) = fx.bcx.ins().isub_bout(lhs, rhs);
+ (val, b_out)*/
+ // FIXME(CraneStation/cranelift#849) legalize isub_bout for i8 and i16
+ let val = fx.bcx.ins().isub(lhs, rhs);
+ let has_overflow = if !signed {
+ fx.bcx.ins().icmp(IntCC::UnsignedGreaterThan, val, lhs)
+ } else {
+ let rhs_is_negative = fx.bcx.ins().icmp_imm(IntCC::SignedLessThan, rhs, 0);
+ let sgt = fx.bcx.ins().icmp(IntCC::SignedGreaterThan, val, lhs);
+ fx.bcx.ins().bxor(rhs_is_negative, sgt)
+ };
+ (val, has_overflow)
+ }
+ BinOp::Mul => {
+ let ty = fx.bcx.func.dfg.value_type(lhs);
+ match ty {
+ types::I8 | types::I16 | types::I32 if !signed => {
+ let lhs = fx.bcx.ins().uextend(ty.double_width().unwrap(), lhs);
+ let rhs = fx.bcx.ins().uextend(ty.double_width().unwrap(), rhs);
+ let val = fx.bcx.ins().imul(lhs, rhs);
+ let has_overflow = fx.bcx.ins().icmp_imm(
+ IntCC::UnsignedGreaterThan,
+ val,
+ (1 << ty.bits()) - 1,
+ );
+ let val = fx.bcx.ins().ireduce(ty, val);
+ (val, has_overflow)
+ }
+ types::I8 | types::I16 | types::I32 if signed => {
+ let lhs = fx.bcx.ins().sextend(ty.double_width().unwrap(), lhs);
+ let rhs = fx.bcx.ins().sextend(ty.double_width().unwrap(), rhs);
+ let val = fx.bcx.ins().imul(lhs, rhs);
+ let has_underflow =
+ fx.bcx.ins().icmp_imm(IntCC::SignedLessThan, val, -(1 << (ty.bits() - 1)));
+ let has_overflow = fx.bcx.ins().icmp_imm(
+ IntCC::SignedGreaterThan,
+ val,
+ (1 << (ty.bits() - 1)) - 1,
+ );
+ let val = fx.bcx.ins().ireduce(ty, val);
+ (val, fx.bcx.ins().bor(has_underflow, has_overflow))
+ }
+ types::I64 => {
+ let val = fx.bcx.ins().imul(lhs, rhs);
+ let has_overflow = if !signed {
+ let val_hi = fx.bcx.ins().umulhi(lhs, rhs);
+ fx.bcx.ins().icmp_imm(IntCC::NotEqual, val_hi, 0)
+ } else {
+ // Based on LLVM's instruction sequence for compiling
+ // a.checked_mul(b).is_some() to riscv64gc:
+ // mulh a2, a0, a1
+ // mul a0, a0, a1
+ // srai a0, a0, 63
+ // xor a0, a0, a2
+ // snez a0, a0
+ let val_hi = fx.bcx.ins().smulhi(lhs, rhs);
+ let val_sign = fx.bcx.ins().sshr_imm(val, i64::from(ty.bits() - 1));
+ let xor = fx.bcx.ins().bxor(val_hi, val_sign);
+ fx.bcx.ins().icmp_imm(IntCC::NotEqual, xor, 0)
+ };
+ (val, has_overflow)
+ }
+ types::I128 => {
+ unreachable!("i128 should have been handled by codegen_i128::maybe_codegen")
+ }
+ _ => unreachable!("invalid non-integer type {}", ty),
+ }
+ }
+ BinOp::Shl => {
- let lhs_ty = fx.bcx.func.dfg.value_type(lhs);
- let masked_shift = fx.bcx.ins().band_imm(rhs, i64::from(lhs_ty.bits() - 1));
- let val = if !signed {
- fx.bcx.ins().ushr(lhs, masked_shift)
- } else {
- fx.bcx.ins().sshr(lhs, masked_shift)
- };
++ let val = fx.bcx.ins().ishl(lhs, rhs);
+ let ty = fx.bcx.func.dfg.value_type(val);
+ let max_shift = i64::from(ty.bits()) - 1;
+ let has_overflow = fx.bcx.ins().icmp_imm(IntCC::UnsignedGreaterThan, rhs, max_shift);
+ (val, has_overflow)
+ }
+ BinOp::Shr => {
++ let val =
++ if !signed { fx.bcx.ins().ushr(lhs, rhs) } else { fx.bcx.ins().sshr(lhs, rhs) };
+ let ty = fx.bcx.func.dfg.value_type(val);
+ let max_shift = i64::from(ty.bits()) - 1;
+ let has_overflow = fx.bcx.ins().icmp_imm(IntCC::UnsignedGreaterThan, rhs, max_shift);
+ (val, has_overflow)
+ }
+ _ => bug!("binop {:?} on checked int/uint lhs: {:?} rhs: {:?}", bin_op, in_lhs, in_rhs),
+ };
+
+ let has_overflow = fx.bcx.ins().bint(types::I8, has_overflow);
+
+ let out_layout = fx.layout_of(fx.tcx.mk_tup([in_lhs.layout().ty, fx.tcx.types.bool].iter()));
+ CValue::by_val_pair(res, has_overflow, out_layout)
+}
+
++pub(crate) fn codegen_saturating_int_binop<'tcx>(
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
++ bin_op: BinOp,
++ lhs: CValue<'tcx>,
++ rhs: CValue<'tcx>,
++) -> CValue<'tcx> {
++ assert_eq!(lhs.layout().ty, rhs.layout().ty);
++
++ let signed = type_sign(lhs.layout().ty);
++ let clif_ty = fx.clif_type(lhs.layout().ty).unwrap();
++ let (min, max) = type_min_max_value(&mut fx.bcx, clif_ty, signed);
++
++ let checked_res = crate::num::codegen_checked_int_binop(fx, bin_op, lhs, rhs);
++ let (val, has_overflow) = checked_res.load_scalar_pair(fx);
++
++ let val = match (bin_op, signed) {
++ (BinOp::Add, false) => fx.bcx.ins().select(has_overflow, max, val),
++ (BinOp::Sub, false) => fx.bcx.ins().select(has_overflow, min, val),
++ (BinOp::Add, true) => {
++ let rhs = rhs.load_scalar(fx);
++ let rhs_ge_zero = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
++ let sat_val = fx.bcx.ins().select(rhs_ge_zero, max, min);
++ fx.bcx.ins().select(has_overflow, sat_val, val)
++ }
++ (BinOp::Sub, true) => {
++ let rhs = rhs.load_scalar(fx);
++ let rhs_ge_zero = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
++ let sat_val = fx.bcx.ins().select(rhs_ge_zero, min, max);
++ fx.bcx.ins().select(has_overflow, sat_val, val)
++ }
++ _ => unreachable!(),
++ };
++
++ CValue::by_val(val, lhs.layout())
++}
++
+pub(crate) fn codegen_float_binop<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ bin_op: BinOp,
+ in_lhs: CValue<'tcx>,
+ in_rhs: CValue<'tcx>,
+) -> CValue<'tcx> {
+ assert_eq!(in_lhs.layout().ty, in_rhs.layout().ty);
+
+ let lhs = in_lhs.load_scalar(fx);
+ let rhs = in_rhs.load_scalar(fx);
+
+ let b = fx.bcx.ins();
+ let res = match bin_op {
+ BinOp::Add => b.fadd(lhs, rhs),
+ BinOp::Sub => b.fsub(lhs, rhs),
+ BinOp::Mul => b.fmul(lhs, rhs),
+ BinOp::Div => b.fdiv(lhs, rhs),
+ BinOp::Rem => {
+ let name = match in_lhs.layout().ty.kind() {
+ ty::Float(FloatTy::F32) => "fmodf",
+ ty::Float(FloatTy::F64) => "fmod",
+ _ => bug!(),
+ };
+ return fx.easy_call(name, &[in_lhs, in_rhs], in_lhs.layout().ty);
+ }
+ BinOp::Eq | BinOp::Lt | BinOp::Le | BinOp::Ne | BinOp::Ge | BinOp::Gt => {
+ let fltcc = match bin_op {
+ BinOp::Eq => FloatCC::Equal,
+ BinOp::Lt => FloatCC::LessThan,
+ BinOp::Le => FloatCC::LessThanOrEqual,
+ BinOp::Ne => FloatCC::NotEqual,
+ BinOp::Ge => FloatCC::GreaterThanOrEqual,
+ BinOp::Gt => FloatCC::GreaterThan,
+ _ => unreachable!(),
+ };
+ let val = fx.bcx.ins().fcmp(fltcc, lhs, rhs);
+ let val = fx.bcx.ins().bint(types::I8, val);
+ return CValue::by_val(val, fx.layout_of(fx.tcx.types.bool));
+ }
+ _ => unreachable!("{:?}({:?}, {:?})", bin_op, in_lhs, in_rhs),
+ };
+
+ CValue::by_val(res, in_lhs.layout())
+}
+
+pub(crate) fn codegen_ptr_binop<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ bin_op: BinOp,
+ in_lhs: CValue<'tcx>,
+ in_rhs: CValue<'tcx>,
+) -> CValue<'tcx> {
+ let is_thin_ptr = in_lhs
+ .layout()
+ .ty
+ .builtin_deref(true)
+ .map(|TypeAndMut { ty, mutbl: _ }| !has_ptr_meta(fx.tcx, ty))
+ .unwrap_or(true);
+
+ if is_thin_ptr {
+ match bin_op {
+ BinOp::Eq | BinOp::Lt | BinOp::Le | BinOp::Ne | BinOp::Ge | BinOp::Gt => {
+ let lhs = in_lhs.load_scalar(fx);
+ let rhs = in_rhs.load_scalar(fx);
+
+ codegen_compare_bin_op(fx, bin_op, false, lhs, rhs)
+ }
+ BinOp::Offset => {
+ let pointee_ty = in_lhs.layout().ty.builtin_deref(true).unwrap().ty;
+ let (base, offset) = (in_lhs, in_rhs.load_scalar(fx));
+ let pointee_size = fx.layout_of(pointee_ty).size.bytes();
+ let ptr_diff = fx.bcx.ins().imul_imm(offset, pointee_size as i64);
+ let base_val = base.load_scalar(fx);
+ let res = fx.bcx.ins().iadd(base_val, ptr_diff);
+ CValue::by_val(res, base.layout())
+ }
+ _ => unreachable!("{:?}({:?}, {:?})", bin_op, in_lhs, in_rhs),
+ }
+ } else {
+ let (lhs_ptr, lhs_extra) = in_lhs.load_scalar_pair(fx);
+ let (rhs_ptr, rhs_extra) = in_rhs.load_scalar_pair(fx);
+
+ let res = match bin_op {
+ BinOp::Eq => {
+ let ptr_eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_ptr, rhs_ptr);
+ let extra_eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_extra, rhs_extra);
+ fx.bcx.ins().band(ptr_eq, extra_eq)
+ }
+ BinOp::Ne => {
+ let ptr_ne = fx.bcx.ins().icmp(IntCC::NotEqual, lhs_ptr, rhs_ptr);
+ let extra_ne = fx.bcx.ins().icmp(IntCC::NotEqual, lhs_extra, rhs_extra);
+ fx.bcx.ins().bor(ptr_ne, extra_ne)
+ }
+ BinOp::Lt | BinOp::Le | BinOp::Ge | BinOp::Gt => {
+ let ptr_eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_ptr, rhs_ptr);
+
+ let ptr_cmp =
+ fx.bcx.ins().icmp(bin_op_to_intcc(bin_op, false).unwrap(), lhs_ptr, rhs_ptr);
+ let extra_cmp = fx.bcx.ins().icmp(
+ bin_op_to_intcc(bin_op, false).unwrap(),
+ lhs_extra,
+ rhs_extra,
+ );
+
+ fx.bcx.ins().select(ptr_eq, extra_cmp, ptr_cmp)
+ }
+ _ => panic!("bin_op {:?} on ptr", bin_op),
+ };
+
+ CValue::by_val(fx.bcx.ins().bint(types::I8, res), fx.layout_of(fx.tcx.types.bool))
+ }
+}
+
+// In Rust floating point min and max don't propagate NaN. In Cranelift they do however.
+// For this reason it is necessary to use `a.is_nan() ? b : (a >= b ? b : a)` for `minnumf*`
+// and `a.is_nan() ? b : (a <= b ? b : a)` for `maxnumf*`. NaN checks are done by comparing
+// a float against itself. Only in case of NaN is it not equal to itself.
+pub(crate) fn codegen_float_min(fx: &mut FunctionCx<'_, '_, '_>, a: Value, b: Value) -> Value {
+ let a_is_nan = fx.bcx.ins().fcmp(FloatCC::NotEqual, a, a);
+ let a_ge_b = fx.bcx.ins().fcmp(FloatCC::GreaterThanOrEqual, a, b);
+ let temp = fx.bcx.ins().select(a_ge_b, b, a);
+ fx.bcx.ins().select(a_is_nan, b, temp)
+}
+
+pub(crate) fn codegen_float_max(fx: &mut FunctionCx<'_, '_, '_>, a: Value, b: Value) -> Value {
+ let a_is_nan = fx.bcx.ins().fcmp(FloatCC::NotEqual, a, a);
+ let a_le_b = fx.bcx.ins().fcmp(FloatCC::LessThanOrEqual, a, b);
+ let temp = fx.bcx.ins().select(a_le_b, b, a);
+ fx.bcx.ins().select(a_is_nan, b, temp)
+}
--- /dev/null
- (&ty::Dynamic(ref data_a, ..), &ty::Dynamic(ref data_b, ..)) => {
+//! Codegen of the [`PointerCast::Unsize`] operation.
+//!
+//! [`PointerCast::Unsize`]: `rustc_middle::ty::adjustment::PointerCast::Unsize`
+
+use crate::prelude::*;
+
+// Adapted from https://github.com/rust-lang/rust/blob/2a663555ddf36f6b041445894a8c175cd1bc718c/src/librustc_codegen_ssa/base.rs#L159-L307
+
+/// Retrieve the information we are losing (making dynamic) in an unsizing
+/// adjustment.
+///
+/// The `old_info` argument is a bit funny. It is intended for use
+/// in an upcast, where the new vtable for an object will be derived
+/// from the old one.
+pub(crate) fn unsized_info<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ source: Ty<'tcx>,
+ target: Ty<'tcx>,
+ old_info: Option<Value>,
+) -> Value {
+ let (source, target) =
+ fx.tcx.struct_lockstep_tails_erasing_lifetimes(source, target, ParamEnv::reveal_all());
+ match (&source.kind(), &target.kind()) {
+ (&ty::Array(_, len), &ty::Slice(_)) => fx
+ .bcx
+ .ins()
+ .iconst(fx.pointer_type, len.eval_usize(fx.tcx, ParamEnv::reveal_all()) as i64),
++ (
++ &ty::Dynamic(ref data_a, _, src_dyn_kind),
++ &ty::Dynamic(ref data_b, _, target_dyn_kind),
++ ) => {
++ assert_eq!(src_dyn_kind, target_dyn_kind);
++
+ let old_info =
+ old_info.expect("unsized_info: missing old info for trait upcasting coercion");
+ if data_a.principal_def_id() == data_b.principal_def_id() {
+ // A NOP cast that doesn't actually change anything, should be allowed even with invalid vtables.
+ return old_info;
+ }
+
+ // trait upcasting coercion
+ let vptr_entry_idx =
+ fx.tcx.vtable_trait_upcasting_coercion_new_vptr_slot((source, target));
+
+ if let Some(entry_idx) = vptr_entry_idx {
+ let entry_idx = u32::try_from(entry_idx).unwrap();
+ let entry_offset = entry_idx * fx.pointer_type.bytes();
+ let vptr_ptr = Pointer::new(old_info).offset_i64(fx, entry_offset.into()).load(
+ fx,
+ fx.pointer_type,
+ crate::vtable::vtable_memflags(),
+ );
+ vptr_ptr
+ } else {
+ old_info
+ }
+ }
+ (_, &ty::Dynamic(ref data, ..)) => crate::vtable::get_vtable(fx, source, data.principal()),
+ _ => bug!("unsized_info: invalid unsizing {:?} -> {:?}", source, target),
+ }
+}
+
+/// Coerce `src` to `dst_ty`.
+fn unsize_ptr<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ src: Value,
+ src_layout: TyAndLayout<'tcx>,
+ dst_layout: TyAndLayout<'tcx>,
+ old_info: Option<Value>,
+) -> (Value, Value) {
+ match (&src_layout.ty.kind(), &dst_layout.ty.kind()) {
+ (&ty::Ref(_, a, _), &ty::Ref(_, b, _))
+ | (&ty::Ref(_, a, _), &ty::RawPtr(ty::TypeAndMut { ty: b, .. }))
+ | (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }), &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => {
+ (src, unsized_info(fx, *a, *b, old_info))
+ }
+ (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => {
+ let (a, b) = (src_layout.ty.boxed_ty(), dst_layout.ty.boxed_ty());
+ (src, unsized_info(fx, a, b, old_info))
+ }
+ (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
+ assert_eq!(def_a, def_b);
+
+ if src_layout == dst_layout {
+ return (src, old_info.unwrap());
+ }
+
+ let mut result = None;
+ for i in 0..src_layout.fields.count() {
+ let src_f = src_layout.field(fx, i);
+ assert_eq!(src_layout.fields.offset(i).bytes(), 0);
+ assert_eq!(dst_layout.fields.offset(i).bytes(), 0);
+ if src_f.is_zst() {
+ continue;
+ }
+ assert_eq!(src_layout.size, src_f.size);
+
+ let dst_f = dst_layout.field(fx, i);
+ assert_ne!(src_f.ty, dst_f.ty);
+ assert_eq!(result, None);
+ result = Some(unsize_ptr(fx, src, src_f, dst_f, old_info));
+ }
+ result.unwrap()
+ }
+ _ => bug!("unsize_ptr: called on bad types"),
+ }
+}
+
++/// Coerces `src` to `dst_ty` which is guaranteed to be a `dyn*` type.
++pub(crate) fn cast_to_dyn_star<'tcx>(
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
++ src: Value,
++ src_ty_and_layout: TyAndLayout<'tcx>,
++ dst_ty: Ty<'tcx>,
++ old_info: Option<Value>,
++) -> (Value, Value) {
++ assert!(
++ matches!(dst_ty.kind(), ty::Dynamic(_, _, ty::DynStar)),
++ "destination type must be a dyn*"
++ );
++ (src, unsized_info(fx, src_ty_and_layout.ty, dst_ty, old_info))
++}
++
+/// Coerce `src`, which is a reference to a value of type `src_ty`,
+/// to a value of type `dst_ty` and store the result in `dst`
+pub(crate) fn coerce_unsized_into<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ src: CValue<'tcx>,
+ dst: CPlace<'tcx>,
+) {
+ let src_ty = src.layout().ty;
+ let dst_ty = dst.layout().ty;
+ let mut coerce_ptr = || {
+ let (base, info) =
+ if fx.layout_of(src.layout().ty.builtin_deref(true).unwrap().ty).is_unsized() {
+ let (old_base, old_info) = src.load_scalar_pair(fx);
+ unsize_ptr(fx, old_base, src.layout(), dst.layout(), Some(old_info))
+ } else {
+ let base = src.load_scalar(fx);
+ unsize_ptr(fx, base, src.layout(), dst.layout(), None)
+ };
+ dst.write_cvalue(fx, CValue::by_val_pair(base, info, dst.layout()));
+ };
+ match (&src_ty.kind(), &dst_ty.kind()) {
+ (&ty::Ref(..), &ty::Ref(..))
+ | (&ty::Ref(..), &ty::RawPtr(..))
+ | (&ty::RawPtr(..), &ty::RawPtr(..)) => coerce_ptr(),
+ (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
+ assert_eq!(def_a, def_b);
+
+ for i in 0..def_a.variant(VariantIdx::new(0)).fields.len() {
+ let src_f = src.value_field(fx, mir::Field::new(i));
+ let dst_f = dst.place_field(fx, mir::Field::new(i));
+
+ if dst_f.layout().is_zst() {
+ continue;
+ }
+
+ if src_f.layout().ty == dst_f.layout().ty {
+ dst_f.write_cvalue(fx, src_f);
+ } else {
+ coerce_unsized_into(fx, src_f, dst_f);
+ }
+ }
+ }
+ _ => bug!("coerce_unsized_into: invalid coercion {:?} -> {:?}", src_ty, dst_ty),
+ }
+}
+
++pub(crate) fn coerce_dyn_star<'tcx>(
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
++ src: CValue<'tcx>,
++ dst: CPlace<'tcx>,
++) {
++ let (data, extra) = if let ty::Dynamic(_, _, ty::DynStar) = src.layout().ty.kind() {
++ let (data, vtable) = src.load_scalar_pair(fx);
++ (data, Some(vtable))
++ } else {
++ let data = src.load_scalar(fx);
++ (data, None)
++ };
++
++ let (data, vtable) = cast_to_dyn_star(fx, data, src.layout(), dst.layout().ty, extra);
++
++ dst.write_cvalue(fx, CValue::by_val_pair(data, vtable, dst.layout()));
++}
++
+// Adapted from https://github.com/rust-lang/rust/blob/2a663555ddf36f6b041445894a8c175cd1bc718c/src/librustc_codegen_ssa/glue.rs
+
+pub(crate) fn size_and_align_of_dst<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ layout: TyAndLayout<'tcx>,
+ info: Value,
+) -> (Value, Value) {
+ assert!(layout.is_unsized() || layout.abi == Abi::Uninhabited);
+ match layout.ty.kind() {
+ ty::Dynamic(..) => {
+ // load size/align from vtable
+ (crate::vtable::size_of_obj(fx, info), crate::vtable::min_align_of_obj(fx, info))
+ }
+ ty::Slice(_) | ty::Str => {
+ let unit = layout.field(fx, 0);
+ // The info in this case is the length of the str, so the size is that
+ // times the unit size.
+ (
+ fx.bcx.ins().imul_imm(info, unit.size.bytes() as i64),
+ fx.bcx.ins().iconst(fx.pointer_type, unit.align.abi.bytes() as i64),
+ )
+ }
+ _ => {
+ // First get the size of all statically known fields.
+ // Don't use size_of because it also rounds up to alignment, which we
+ // want to avoid, as the unsized field's alignment could be smaller.
+ assert!(!layout.ty.is_simd());
+
+ let i = layout.fields.count() - 1;
+ let sized_size = layout.fields.offset(i).bytes();
+ let sized_align = layout.align.abi.bytes();
+ let sized_align = fx.bcx.ins().iconst(fx.pointer_type, sized_align as i64);
+
+ // Recurse to get the size of the dynamically sized field (must be
+ // the last field).
+ let field_layout = layout.field(fx, i);
+ let (unsized_size, mut unsized_align) = size_and_align_of_dst(fx, field_layout, info);
+
+ // FIXME (#26403, #27023): We should be adding padding
+ // to `sized_size` (to accommodate the `unsized_align`
+ // required of the unsized field that follows) before
+ // summing it with `sized_size`. (Note that since #26403
+ // is unfixed, we do not yet add the necessary padding
+ // here. But this is where the add would go.)
+
+ // Return the sum of sizes and max of aligns.
+ let size = fx.bcx.ins().iadd_imm(unsized_size, sized_size as i64);
+
+ // Packed types ignore the alignment of their fields.
+ if let ty::Adt(def, _) = layout.ty.kind() {
+ if def.repr().packed() {
+ unsized_align = sized_align;
+ }
+ }
+
+ // Choose max of two known alignments (combined value must
+ // be aligned according to more restrictive of the two).
+ let cmp = fx.bcx.ins().icmp(IntCC::UnsignedGreaterThan, sized_align, unsized_align);
+ let align = fx.bcx.ins().select(cmp, sized_align, unsized_align);
+
+ // Issue #27023: must add any necessary padding to `size`
+ // (to make it a multiple of `align`) before returning it.
+ //
+ // Namely, the returned size should be, in C notation:
+ //
+ // `size + ((size & (align-1)) ? align : 0)`
+ //
+ // emulated via the semi-standard fast bit trick:
+ //
+ // `(size + (align-1)) & -align`
+ let addend = fx.bcx.ins().iadd_imm(align, -1);
+ let add = fx.bcx.ins().iadd(size, addend);
+ let neg = fx.bcx.ins().ineg(align);
+ let size = fx.bcx.ins().band(add, neg);
+
+ (size, align)
+ }
+ }
+}
--- /dev/null
+//! Definition of [`CValue`] and [`CPlace`]
+
+use crate::prelude::*;
+
+use cranelift_codegen::ir::immediates::Offset32;
+
+fn codegen_field<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ base: Pointer,
+ extra: Option<Value>,
+ layout: TyAndLayout<'tcx>,
+ field: mir::Field,
+) -> (Pointer, TyAndLayout<'tcx>) {
+ let field_offset = layout.fields.offset(field.index());
+ let field_layout = layout.field(&*fx, field.index());
+
+ let simple = |fx: &mut FunctionCx<'_, '_, '_>| {
+ (base.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap()), field_layout)
+ };
+
+ if let Some(extra) = extra {
+ if !field_layout.is_unsized() {
+ return simple(fx);
+ }
+ match field_layout.ty.kind() {
+ ty::Slice(..) | ty::Str | ty::Foreign(..) => simple(fx),
+ ty::Adt(def, _) if def.repr().packed() => {
+ assert_eq!(layout.align.abi.bytes(), 1);
+ simple(fx)
+ }
+ _ => {
+ // We have to align the offset for DST's
+ let unaligned_offset = field_offset.bytes();
+ let (_, unsized_align) =
+ crate::unsize::size_and_align_of_dst(fx, field_layout, extra);
+
+ let one = fx.bcx.ins().iconst(fx.pointer_type, 1);
+ let align_sub_1 = fx.bcx.ins().isub(unsized_align, one);
+ let and_lhs = fx.bcx.ins().iadd_imm(align_sub_1, unaligned_offset as i64);
+ let zero = fx.bcx.ins().iconst(fx.pointer_type, 0);
+ let and_rhs = fx.bcx.ins().isub(zero, unsized_align);
+ let offset = fx.bcx.ins().band(and_lhs, and_rhs);
+
+ (base.offset_value(fx, offset), field_layout)
+ }
+ }
+ } else {
+ simple(fx)
+ }
+}
+
+fn scalar_pair_calculate_b_offset(tcx: TyCtxt<'_>, a_scalar: Scalar, b_scalar: Scalar) -> Offset32 {
+ let b_offset = a_scalar.size(&tcx).align_to(b_scalar.align(&tcx).abi);
+ Offset32::new(b_offset.bytes().try_into().unwrap())
+}
+
+/// A read-only value
+#[derive(Debug, Copy, Clone)]
+pub(crate) struct CValue<'tcx>(CValueInner, TyAndLayout<'tcx>);
+
+#[derive(Debug, Copy, Clone)]
+enum CValueInner {
+ ByRef(Pointer, Option<Value>),
+ ByVal(Value),
+ ByValPair(Value, Value),
+}
+
+impl<'tcx> CValue<'tcx> {
+ pub(crate) fn by_ref(ptr: Pointer, layout: TyAndLayout<'tcx>) -> CValue<'tcx> {
+ CValue(CValueInner::ByRef(ptr, None), layout)
+ }
+
+ pub(crate) fn by_ref_unsized(
+ ptr: Pointer,
+ meta: Value,
+ layout: TyAndLayout<'tcx>,
+ ) -> CValue<'tcx> {
+ CValue(CValueInner::ByRef(ptr, Some(meta)), layout)
+ }
+
+ pub(crate) fn by_val(value: Value, layout: TyAndLayout<'tcx>) -> CValue<'tcx> {
+ CValue(CValueInner::ByVal(value), layout)
+ }
+
+ pub(crate) fn by_val_pair(
+ value: Value,
+ extra: Value,
+ layout: TyAndLayout<'tcx>,
+ ) -> CValue<'tcx> {
+ CValue(CValueInner::ByValPair(value, extra), layout)
+ }
+
+ pub(crate) fn layout(&self) -> TyAndLayout<'tcx> {
+ self.1
+ }
+
+ // FIXME remove
+ pub(crate) fn force_stack(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> (Pointer, Option<Value>) {
+ let layout = self.1;
+ match self.0 {
+ CValueInner::ByRef(ptr, meta) => (ptr, meta),
+ CValueInner::ByVal(_) | CValueInner::ByValPair(_, _) => {
+ let cplace = CPlace::new_stack_slot(fx, layout);
+ cplace.write_cvalue(fx, self);
+ (cplace.to_ptr(), None)
+ }
+ }
+ }
+
++ // FIXME remove
++ // Forces the data value of a dyn* value to the stack and returns a pointer to it as well as the
++ // vtable pointer.
++ pub(crate) fn dyn_star_force_data_on_stack(
++ self,
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
++ ) -> (Value, Value) {
++ assert!(self.1.ty.is_dyn_star());
++
++ match self.0 {
++ CValueInner::ByRef(ptr, None) => {
++ let (a_scalar, b_scalar) = match self.1.abi {
++ Abi::ScalarPair(a, b) => (a, b),
++ _ => unreachable!("dyn_star_force_data_on_stack({:?})", self),
++ };
++ let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
++ let clif_ty2 = scalar_to_clif_type(fx.tcx, b_scalar);
++ let mut flags = MemFlags::new();
++ flags.set_notrap();
++ let vtable = ptr.offset(fx, b_offset).load(fx, clif_ty2, flags);
++ (ptr.get_addr(fx), vtable)
++ }
++ CValueInner::ByValPair(data, vtable) => {
++ let stack_slot = fx.bcx.create_sized_stack_slot(StackSlotData {
++ kind: StackSlotKind::ExplicitSlot,
++ // FIXME Don't force the size to a multiple of 16 bytes once Cranelift gets a way to
++ // specify stack slot alignment.
++ size: (u32::try_from(fx.target_config.pointer_type().bytes()).unwrap() + 15)
++ / 16
++ * 16,
++ });
++ let data_ptr = Pointer::stack_slot(stack_slot);
++ let mut flags = MemFlags::new();
++ flags.set_notrap();
++ data_ptr.store(fx, data, flags);
++
++ (data_ptr.get_addr(fx), vtable)
++ }
++ CValueInner::ByRef(_, Some(_)) | CValueInner::ByVal(_) => {
++ unreachable!("dyn_star_force_data_on_stack({:?})", self)
++ }
++ }
++ }
++
+ pub(crate) fn try_to_ptr(self) -> Option<(Pointer, Option<Value>)> {
+ match self.0 {
+ CValueInner::ByRef(ptr, meta) => Some((ptr, meta)),
+ CValueInner::ByVal(_) | CValueInner::ByValPair(_, _) => None,
+ }
+ }
+
+ /// Load a value with layout.abi of scalar
+ pub(crate) fn load_scalar(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> Value {
+ let layout = self.1;
+ match self.0 {
+ CValueInner::ByRef(ptr, None) => {
+ let clif_ty = match layout.abi {
+ Abi::Scalar(scalar) => scalar_to_clif_type(fx.tcx, scalar),
+ Abi::Vector { element, count } => scalar_to_clif_type(fx.tcx, element)
+ .by(u32::try_from(count).unwrap())
+ .unwrap(),
+ _ => unreachable!("{:?}", layout.ty),
+ };
+ let mut flags = MemFlags::new();
+ flags.set_notrap();
+ ptr.load(fx, clif_ty, flags)
+ }
+ CValueInner::ByVal(value) => value,
+ CValueInner::ByRef(_, Some(_)) => bug!("load_scalar for unsized value not allowed"),
+ CValueInner::ByValPair(_, _) => bug!("Please use load_scalar_pair for ByValPair"),
+ }
+ }
+
+ /// Load a value pair with layout.abi of scalar pair
+ pub(crate) fn load_scalar_pair(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> (Value, Value) {
+ let layout = self.1;
+ match self.0 {
+ CValueInner::ByRef(ptr, None) => {
+ let (a_scalar, b_scalar) = match layout.abi {
+ Abi::ScalarPair(a, b) => (a, b),
+ _ => unreachable!("load_scalar_pair({:?})", self),
+ };
+ let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
+ let clif_ty1 = scalar_to_clif_type(fx.tcx, a_scalar);
+ let clif_ty2 = scalar_to_clif_type(fx.tcx, b_scalar);
+ let mut flags = MemFlags::new();
+ flags.set_notrap();
+ let val1 = ptr.load(fx, clif_ty1, flags);
+ let val2 = ptr.offset(fx, b_offset).load(fx, clif_ty2, flags);
+ (val1, val2)
+ }
+ CValueInner::ByRef(_, Some(_)) => {
+ bug!("load_scalar_pair for unsized value not allowed")
+ }
+ CValueInner::ByVal(_) => bug!("Please use load_scalar for ByVal"),
+ CValueInner::ByValPair(val1, val2) => (val1, val2),
+ }
+ }
+
+ pub(crate) fn value_field(
+ self,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ field: mir::Field,
+ ) -> CValue<'tcx> {
+ let layout = self.1;
+ match self.0 {
+ CValueInner::ByVal(val) => match layout.abi {
+ Abi::Vector { element: _, count } => {
+ let count = u8::try_from(count).expect("SIMD type with more than 255 lanes???");
+ let field = u8::try_from(field.index()).unwrap();
+ assert!(field < count);
+ let lane = fx.bcx.ins().extractlane(val, field);
+ let field_layout = layout.field(&*fx, usize::from(field));
+ CValue::by_val(lane, field_layout)
+ }
+ _ => unreachable!("value_field for ByVal with abi {:?}", layout.abi),
+ },
+ CValueInner::ByValPair(val1, val2) => match layout.abi {
+ Abi::ScalarPair(_, _) => {
+ let val = match field.as_u32() {
+ 0 => val1,
+ 1 => val2,
+ _ => bug!("field should be 0 or 1"),
+ };
+ let field_layout = layout.field(&*fx, usize::from(field));
+ CValue::by_val(val, field_layout)
+ }
+ _ => unreachable!("value_field for ByValPair with abi {:?}", layout.abi),
+ },
+ CValueInner::ByRef(ptr, None) => {
+ let (field_ptr, field_layout) = codegen_field(fx, ptr, None, layout, field);
+ CValue::by_ref(field_ptr, field_layout)
+ }
+ CValueInner::ByRef(_, Some(_)) => todo!(),
+ }
+ }
+
+ /// Like [`CValue::value_field`] except handling ADTs containing a single array field in a way
+ /// such that you can access individual lanes.
+ pub(crate) fn value_lane(
+ self,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ lane_idx: u64,
+ ) -> CValue<'tcx> {
+ let layout = self.1;
+ assert!(layout.ty.is_simd());
+ let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
+ let lane_layout = fx.layout_of(lane_ty);
+ assert!(lane_idx < lane_count);
+ match self.0 {
+ CValueInner::ByVal(val) => match layout.abi {
+ Abi::Vector { element: _, count: _ } => {
+ assert!(lane_count <= u8::MAX.into(), "SIMD type with more than 255 lanes???");
+ let lane_idx = u8::try_from(lane_idx).unwrap();
+ let lane = fx.bcx.ins().extractlane(val, lane_idx);
+ CValue::by_val(lane, lane_layout)
+ }
+ _ => unreachable!("value_lane for ByVal with abi {:?}", layout.abi),
+ },
+ CValueInner::ByValPair(_, _) => unreachable!(),
+ CValueInner::ByRef(ptr, None) => {
+ let field_offset = lane_layout.size * lane_idx;
+ let field_ptr = ptr.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap());
+ CValue::by_ref(field_ptr, lane_layout)
+ }
+ CValueInner::ByRef(_, Some(_)) => unreachable!(),
+ }
+ }
+
+ pub(crate) fn unsize_value(self, fx: &mut FunctionCx<'_, '_, 'tcx>, dest: CPlace<'tcx>) {
+ crate::unsize::coerce_unsized_into(fx, self, dest);
+ }
+
++ pub(crate) fn coerce_dyn_star(self, fx: &mut FunctionCx<'_, '_, 'tcx>, dest: CPlace<'tcx>) {
++ crate::unsize::coerce_dyn_star(fx, self, dest);
++ }
++
+ /// If `ty` is signed, `const_val` must already be sign extended.
+ pub(crate) fn const_val(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ layout: TyAndLayout<'tcx>,
+ const_val: ty::ScalarInt,
+ ) -> CValue<'tcx> {
+ assert_eq!(const_val.size(), layout.size, "{:#?}: {:?}", const_val, layout);
+ use cranelift_codegen::ir::immediates::{Ieee32, Ieee64};
+
+ let clif_ty = fx.clif_type(layout.ty).unwrap();
+
+ if let ty::Bool = layout.ty.kind() {
+ assert!(
+ const_val == ty::ScalarInt::FALSE || const_val == ty::ScalarInt::TRUE,
+ "Invalid bool 0x{:032X}",
+ const_val
+ );
+ }
+
+ let val = match layout.ty.kind() {
+ ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
+ let const_val = const_val.to_bits(layout.size).unwrap();
+ let lsb = fx.bcx.ins().iconst(types::I64, const_val as u64 as i64);
+ let msb = fx.bcx.ins().iconst(types::I64, (const_val >> 64) as u64 as i64);
+ fx.bcx.ins().iconcat(lsb, msb)
+ }
+ ty::Bool | ty::Char | ty::Uint(_) | ty::Int(_) | ty::Ref(..) | ty::RawPtr(..) => {
+ fx.bcx.ins().iconst(clif_ty, const_val.to_bits(layout.size).unwrap() as i64)
+ }
+ ty::Float(FloatTy::F32) => {
+ fx.bcx.ins().f32const(Ieee32::with_bits(u32::try_from(const_val).unwrap()))
+ }
+ ty::Float(FloatTy::F64) => {
+ fx.bcx.ins().f64const(Ieee64::with_bits(u64::try_from(const_val).unwrap()))
+ }
+ _ => panic!(
+ "CValue::const_val for non bool/char/float/integer/pointer type {:?} is not allowed",
+ layout.ty
+ ),
+ };
+
+ CValue::by_val(val, layout)
+ }
+
+ pub(crate) fn cast_pointer_to(self, layout: TyAndLayout<'tcx>) -> Self {
+ assert!(matches!(self.layout().ty.kind(), ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..)));
+ assert!(matches!(layout.ty.kind(), ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..)));
+ assert_eq!(self.layout().abi, layout.abi);
+ CValue(self.0, layout)
+ }
+}
+
+/// A place where you can write a value to or read a value from
+#[derive(Debug, Copy, Clone)]
+pub(crate) struct CPlace<'tcx> {
+ inner: CPlaceInner,
+ layout: TyAndLayout<'tcx>,
+}
+
+#[derive(Debug, Copy, Clone)]
+pub(crate) enum CPlaceInner {
+ Var(Local, Variable),
+ VarPair(Local, Variable, Variable),
+ VarLane(Local, Variable, u8),
+ Addr(Pointer, Option<Value>),
+}
+
+impl<'tcx> CPlace<'tcx> {
+ pub(crate) fn layout(&self) -> TyAndLayout<'tcx> {
+ self.layout
+ }
+
+ pub(crate) fn inner(&self) -> &CPlaceInner {
+ &self.inner
+ }
+
+ pub(crate) fn new_stack_slot(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ layout: TyAndLayout<'tcx>,
+ ) -> CPlace<'tcx> {
+ assert!(!layout.is_unsized());
+ if layout.size.bytes() == 0 {
+ return CPlace {
+ inner: CPlaceInner::Addr(Pointer::dangling(layout.align.pref), None),
+ layout,
+ };
+ }
+
+ if layout.size.bytes() >= u64::from(u32::MAX - 16) {
+ fx.tcx
+ .sess
+ .fatal(&format!("values of type {} are too big to store on the stack", layout.ty));
+ }
+
+ let stack_slot = fx.bcx.create_sized_stack_slot(StackSlotData {
+ kind: StackSlotKind::ExplicitSlot,
+ // FIXME Don't force the size to a multiple of 16 bytes once Cranelift gets a way to
+ // specify stack slot alignment.
+ size: (u32::try_from(layout.size.bytes()).unwrap() + 15) / 16 * 16,
+ });
+ CPlace { inner: CPlaceInner::Addr(Pointer::stack_slot(stack_slot), None), layout }
+ }
+
+ pub(crate) fn new_var(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ local: Local,
+ layout: TyAndLayout<'tcx>,
+ ) -> CPlace<'tcx> {
+ let var = Variable::with_u32(fx.next_ssa_var);
+ fx.next_ssa_var += 1;
+ fx.bcx.declare_var(var, fx.clif_type(layout.ty).unwrap());
+ CPlace { inner: CPlaceInner::Var(local, var), layout }
+ }
+
+ pub(crate) fn new_var_pair(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ local: Local,
+ layout: TyAndLayout<'tcx>,
+ ) -> CPlace<'tcx> {
+ let var1 = Variable::with_u32(fx.next_ssa_var);
+ fx.next_ssa_var += 1;
+ let var2 = Variable::with_u32(fx.next_ssa_var);
+ fx.next_ssa_var += 1;
+
+ let (ty1, ty2) = fx.clif_pair_type(layout.ty).unwrap();
+ fx.bcx.declare_var(var1, ty1);
+ fx.bcx.declare_var(var2, ty2);
+ CPlace { inner: CPlaceInner::VarPair(local, var1, var2), layout }
+ }
+
+ pub(crate) fn for_ptr(ptr: Pointer, layout: TyAndLayout<'tcx>) -> CPlace<'tcx> {
+ CPlace { inner: CPlaceInner::Addr(ptr, None), layout }
+ }
+
+ pub(crate) fn for_ptr_with_extra(
+ ptr: Pointer,
+ extra: Value,
+ layout: TyAndLayout<'tcx>,
+ ) -> CPlace<'tcx> {
+ CPlace { inner: CPlaceInner::Addr(ptr, Some(extra)), layout }
+ }
+
+ pub(crate) fn to_cvalue(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> CValue<'tcx> {
+ let layout = self.layout();
+ match self.inner {
+ CPlaceInner::Var(_local, var) => {
+ let val = fx.bcx.use_var(var);
+ //fx.bcx.set_val_label(val, cranelift_codegen::ir::ValueLabel::new(var.index()));
+ CValue::by_val(val, layout)
+ }
+ CPlaceInner::VarPair(_local, var1, var2) => {
+ let val1 = fx.bcx.use_var(var1);
+ //fx.bcx.set_val_label(val1, cranelift_codegen::ir::ValueLabel::new(var1.index()));
+ let val2 = fx.bcx.use_var(var2);
+ //fx.bcx.set_val_label(val2, cranelift_codegen::ir::ValueLabel::new(var2.index()));
+ CValue::by_val_pair(val1, val2, layout)
+ }
+ CPlaceInner::VarLane(_local, var, lane) => {
+ let val = fx.bcx.use_var(var);
+ //fx.bcx.set_val_label(val, cranelift_codegen::ir::ValueLabel::new(var.index()));
+ let val = fx.bcx.ins().extractlane(val, lane);
+ CValue::by_val(val, layout)
+ }
+ CPlaceInner::Addr(ptr, extra) => {
+ if let Some(extra) = extra {
+ CValue::by_ref_unsized(ptr, extra, layout)
+ } else {
+ CValue::by_ref(ptr, layout)
+ }
+ }
+ }
+ }
+
+ pub(crate) fn to_ptr(self) -> Pointer {
+ match self.to_ptr_maybe_unsized() {
+ (ptr, None) => ptr,
+ (_, Some(_)) => bug!("Expected sized cplace, found {:?}", self),
+ }
+ }
+
+ pub(crate) fn to_ptr_maybe_unsized(self) -> (Pointer, Option<Value>) {
+ match self.inner {
+ CPlaceInner::Addr(ptr, extra) => (ptr, extra),
+ CPlaceInner::Var(_, _)
+ | CPlaceInner::VarPair(_, _, _)
+ | CPlaceInner::VarLane(_, _, _) => bug!("Expected CPlace::Addr, found {:?}", self),
+ }
+ }
+
+ pub(crate) fn write_cvalue(self, fx: &mut FunctionCx<'_, '_, 'tcx>, from: CValue<'tcx>) {
+ assert_assignable(fx, from.layout().ty, self.layout().ty, 16);
+
+ self.write_cvalue_maybe_transmute(fx, from, "write_cvalue");
+ }
+
+ pub(crate) fn write_cvalue_transmute(
+ self,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ from: CValue<'tcx>,
+ ) {
+ self.write_cvalue_maybe_transmute(fx, from, "write_cvalue_transmute");
+ }
+
+ fn write_cvalue_maybe_transmute(
+ self,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ from: CValue<'tcx>,
+ method: &'static str,
+ ) {
+ fn transmute_value<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ var: Variable,
+ data: Value,
+ dst_ty: Type,
+ ) {
+ let src_ty = fx.bcx.func.dfg.value_type(data);
+ assert_eq!(
+ src_ty.bytes(),
+ dst_ty.bytes(),
+ "write_cvalue_transmute: {:?} -> {:?}",
+ src_ty,
+ dst_ty,
+ );
+ let data = match (src_ty, dst_ty) {
+ (_, _) if src_ty == dst_ty => data,
+
+ // This is a `write_cvalue_transmute`.
+ (types::I32, types::F32)
+ | (types::F32, types::I32)
+ | (types::I64, types::F64)
+ | (types::F64, types::I64) => fx.bcx.ins().bitcast(dst_ty, data),
+ _ if src_ty.is_vector() && dst_ty.is_vector() => {
+ fx.bcx.ins().raw_bitcast(dst_ty, data)
+ }
+ _ if src_ty.is_vector() || dst_ty.is_vector() => {
+ // FIXME do something more efficient for transmutes between vectors and integers.
+ let stack_slot = fx.bcx.create_sized_stack_slot(StackSlotData {
+ kind: StackSlotKind::ExplicitSlot,
+ // FIXME Don't force the size to a multiple of 16 bytes once Cranelift gets a way to
+ // specify stack slot alignment.
+ size: (src_ty.bytes() + 15) / 16 * 16,
+ });
+ let ptr = Pointer::stack_slot(stack_slot);
+ ptr.store(fx, data, MemFlags::trusted());
+ ptr.load(fx, dst_ty, MemFlags::trusted())
+ }
+
+ // `CValue`s should never contain SSA-only types, so if you ended
+ // up here having seen an error like `B1 -> I8`, then before
+ // calling `write_cvalue` you need to add a `bint` instruction.
+ _ => unreachable!("write_cvalue_transmute: {:?} -> {:?}", src_ty, dst_ty),
+ };
+ //fx.bcx.set_val_label(data, cranelift_codegen::ir::ValueLabel::new(var.index()));
+ fx.bcx.def_var(var, data);
+ }
+
+ assert_eq!(self.layout().size, from.layout().size);
+
+ if fx.clif_comments.enabled() {
+ use cranelift_codegen::cursor::{Cursor, CursorPosition};
+ let cur_block = match fx.bcx.cursor().position() {
+ CursorPosition::After(block) => block,
+ _ => unreachable!(),
+ };
+ fx.add_comment(
+ fx.bcx.func.layout.last_inst(cur_block).unwrap(),
+ format!(
+ "{}: {:?}: {:?} <- {:?}: {:?}",
+ method,
+ self.inner(),
+ self.layout().ty,
+ from.0,
+ from.layout().ty
+ ),
+ );
+ }
+
+ let dst_layout = self.layout();
+ let to_ptr = match self.inner {
+ CPlaceInner::Var(_local, var) => {
+ if let ty::Array(element, len) = dst_layout.ty.kind() {
+ // Can only happen for vector types
+ let len =
+ u32::try_from(len.eval_usize(fx.tcx, ParamEnv::reveal_all())).unwrap();
+ let vector_ty = fx.clif_type(*element).unwrap().by(len).unwrap();
+
+ let data = match from.0 {
+ CValueInner::ByRef(ptr, None) => {
+ let mut flags = MemFlags::new();
+ flags.set_notrap();
+ ptr.load(fx, vector_ty, flags)
+ }
+ CValueInner::ByVal(_)
+ | CValueInner::ByValPair(_, _)
+ | CValueInner::ByRef(_, Some(_)) => bug!("array should be ByRef"),
+ };
+
+ fx.bcx.def_var(var, data);
+ return;
+ }
+ let data = CValue(from.0, dst_layout).load_scalar(fx);
+ let dst_ty = fx.clif_type(self.layout().ty).unwrap();
+ transmute_value(fx, var, data, dst_ty);
+ return;
+ }
+ CPlaceInner::VarPair(_local, var1, var2) => {
+ let (data1, data2) = CValue(from.0, dst_layout).load_scalar_pair(fx);
+ let (dst_ty1, dst_ty2) = fx.clif_pair_type(self.layout().ty).unwrap();
+ transmute_value(fx, var1, data1, dst_ty1);
+ transmute_value(fx, var2, data2, dst_ty2);
+ return;
+ }
+ CPlaceInner::VarLane(_local, var, lane) => {
+ let data = from.load_scalar(fx);
+
+ // First get the old vector
+ let vector = fx.bcx.use_var(var);
+ //fx.bcx.set_val_label(vector, cranelift_codegen::ir::ValueLabel::new(var.index()));
+
+ // Next insert the written lane into the vector
+ let vector = fx.bcx.ins().insertlane(vector, data, lane);
+
+ // Finally write the new vector
+ //fx.bcx.set_val_label(vector, cranelift_codegen::ir::ValueLabel::new(var.index()));
+ fx.bcx.def_var(var, vector);
+
+ return;
+ }
+ CPlaceInner::Addr(ptr, None) => {
+ if dst_layout.size == Size::ZERO || dst_layout.abi == Abi::Uninhabited {
+ return;
+ }
+ ptr
+ }
+ CPlaceInner::Addr(_, Some(_)) => bug!("Can't write value to unsized place {:?}", self),
+ };
+
+ let mut flags = MemFlags::new();
+ flags.set_notrap();
+ match from.layout().abi {
+ // FIXME make Abi::Vector work too
+ Abi::Scalar(_) => {
+ let val = from.load_scalar(fx);
+ to_ptr.store(fx, val, flags);
+ return;
+ }
+ Abi::ScalarPair(a_scalar, b_scalar) => {
+ let (value, extra) = from.load_scalar_pair(fx);
+ let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
+ to_ptr.store(fx, value, flags);
+ to_ptr.offset(fx, b_offset).store(fx, extra, flags);
+ return;
+ }
+ _ => {}
+ }
+
+ match from.0 {
+ CValueInner::ByVal(val) => {
+ to_ptr.store(fx, val, flags);
+ }
+ CValueInner::ByValPair(_, _) => {
+ bug!("Non ScalarPair abi {:?} for ByValPair CValue", dst_layout.abi);
+ }
+ CValueInner::ByRef(from_ptr, None) => {
+ let from_addr = from_ptr.get_addr(fx);
+ let to_addr = to_ptr.get_addr(fx);
+ let src_layout = from.1;
+ let size = dst_layout.size.bytes();
+ let src_align = src_layout.align.abi.bytes() as u8;
+ let dst_align = dst_layout.align.abi.bytes() as u8;
+ fx.bcx.emit_small_memory_copy(
+ fx.target_config,
+ to_addr,
+ from_addr,
+ size,
+ dst_align,
+ src_align,
+ true,
+ flags,
+ );
+ }
+ CValueInner::ByRef(_, Some(_)) => todo!(),
+ }
+ }
+
+ pub(crate) fn place_opaque_cast(
+ self,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ ty: Ty<'tcx>,
+ ) -> CPlace<'tcx> {
+ CPlace { inner: self.inner, layout: fx.layout_of(ty) }
+ }
+
+ pub(crate) fn place_field(
+ self,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ field: mir::Field,
+ ) -> CPlace<'tcx> {
+ let layout = self.layout();
+
+ match self.inner {
+ CPlaceInner::Var(local, var) => match layout.ty.kind() {
+ ty::Array(_, _) => {
+ // Can only happen for vector types
+ return CPlace {
+ inner: CPlaceInner::VarLane(local, var, field.as_u32().try_into().unwrap()),
+ layout: layout.field(fx, field.as_u32().try_into().unwrap()),
+ };
+ }
+ ty::Adt(adt_def, substs) if layout.ty.is_simd() => {
+ let f0_ty = adt_def.non_enum_variant().fields[0].ty(fx.tcx, substs);
+
+ match f0_ty.kind() {
+ ty::Array(_, _) => {
+ assert_eq!(field.as_u32(), 0);
+ return CPlace {
+ inner: CPlaceInner::Var(local, var),
+ layout: layout.field(fx, field.as_u32().try_into().unwrap()),
+ };
+ }
+ _ => {
+ return CPlace {
+ inner: CPlaceInner::VarLane(
+ local,
+ var,
+ field.as_u32().try_into().unwrap(),
+ ),
+ layout: layout.field(fx, field.as_u32().try_into().unwrap()),
+ };
+ }
+ }
+ }
+ _ => {}
+ },
+ CPlaceInner::VarPair(local, var1, var2) => {
+ let layout = layout.field(&*fx, field.index());
+
+ match field.as_u32() {
+ 0 => return CPlace { inner: CPlaceInner::Var(local, var1), layout },
+ 1 => return CPlace { inner: CPlaceInner::Var(local, var2), layout },
+ _ => unreachable!("field should be 0 or 1"),
+ }
+ }
+ _ => {}
+ }
+
+ let (base, extra) = self.to_ptr_maybe_unsized();
+
+ let (field_ptr, field_layout) = codegen_field(fx, base, extra, layout, field);
+ if field_layout.is_unsized() {
+ if let ty::Foreign(_) = field_layout.ty.kind() {
+ assert!(extra.is_none());
+ CPlace::for_ptr(field_ptr, field_layout)
+ } else {
+ CPlace::for_ptr_with_extra(field_ptr, extra.unwrap(), field_layout)
+ }
+ } else {
+ CPlace::for_ptr(field_ptr, field_layout)
+ }
+ }
+
+ /// Like [`CPlace::place_field`] except handling ADTs containing a single array field in a way
+ /// such that you can access individual lanes.
+ pub(crate) fn place_lane(
+ self,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ lane_idx: u64,
+ ) -> CPlace<'tcx> {
+ let layout = self.layout();
+ assert!(layout.ty.is_simd());
+ let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
+ let lane_layout = fx.layout_of(lane_ty);
+ assert!(lane_idx < lane_count);
+
+ match self.inner {
+ CPlaceInner::Var(local, var) => {
+ assert!(matches!(layout.abi, Abi::Vector { .. }));
+ CPlace {
+ inner: CPlaceInner::VarLane(local, var, lane_idx.try_into().unwrap()),
+ layout: lane_layout,
+ }
+ }
+ CPlaceInner::VarPair(_, _, _) => unreachable!(),
+ CPlaceInner::VarLane(_, _, _) => unreachable!(),
+ CPlaceInner::Addr(ptr, None) => {
+ let field_offset = lane_layout.size * lane_idx;
+ let field_ptr = ptr.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap());
+ CPlace::for_ptr(field_ptr, lane_layout)
+ }
+ CPlaceInner::Addr(_, Some(_)) => unreachable!(),
+ }
+ }
+
+ pub(crate) fn place_index(
+ self,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ index: Value,
+ ) -> CPlace<'tcx> {
+ let (elem_layout, ptr) = match self.layout().ty.kind() {
+ ty::Array(elem_ty, _) => (fx.layout_of(*elem_ty), self.to_ptr()),
+ ty::Slice(elem_ty) => (fx.layout_of(*elem_ty), self.to_ptr_maybe_unsized().0),
+ _ => bug!("place_index({:?})", self.layout().ty),
+ };
+
+ let offset = fx.bcx.ins().imul_imm(index, elem_layout.size.bytes() as i64);
+
+ CPlace::for_ptr(ptr.offset_value(fx, offset), elem_layout)
+ }
+
+ pub(crate) fn place_deref(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> CPlace<'tcx> {
+ let inner_layout = fx.layout_of(self.layout().ty.builtin_deref(true).unwrap().ty);
+ if has_ptr_meta(fx.tcx, inner_layout.ty) {
+ let (addr, extra) = self.to_cvalue(fx).load_scalar_pair(fx);
+ CPlace::for_ptr_with_extra(Pointer::new(addr), extra, inner_layout)
+ } else {
+ CPlace::for_ptr(Pointer::new(self.to_cvalue(fx).load_scalar(fx)), inner_layout)
+ }
+ }
+
+ pub(crate) fn place_ref(
+ self,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ layout: TyAndLayout<'tcx>,
+ ) -> CValue<'tcx> {
+ if has_ptr_meta(fx.tcx, self.layout().ty) {
+ let (ptr, extra) = self.to_ptr_maybe_unsized();
+ CValue::by_val_pair(
+ ptr.get_addr(fx),
+ extra.expect("unsized type without metadata"),
+ layout,
+ )
+ } else {
+ CValue::by_val(self.to_ptr().get_addr(fx), layout)
+ }
+ }
+
+ pub(crate) fn downcast_variant(
+ self,
+ fx: &FunctionCx<'_, '_, 'tcx>,
+ variant: VariantIdx,
+ ) -> Self {
+ assert!(!self.layout().is_unsized());
+ let layout = self.layout().for_variant(fx, variant);
+ CPlace { inner: self.inner, layout }
+ }
+}
+
+#[track_caller]
+pub(crate) fn assert_assignable<'tcx>(
+ fx: &FunctionCx<'_, '_, 'tcx>,
+ from_ty: Ty<'tcx>,
+ to_ty: Ty<'tcx>,
+ limit: usize,
+) {
+ if limit == 0 {
+ // assert_assignable exists solely to catch bugs in cg_clif. it isn't necessary for
+ // soundness. don't attempt to check deep types to avoid exponential behavior in certain
+ // cases.
+ return;
+ }
+ match (from_ty.kind(), to_ty.kind()) {
+ (ty::Ref(_, a, _), ty::Ref(_, b, _))
+ | (
+ ty::RawPtr(TypeAndMut { ty: a, mutbl: _ }),
+ ty::RawPtr(TypeAndMut { ty: b, mutbl: _ }),
+ ) => {
+ assert_assignable(fx, *a, *b, limit - 1);
+ }
+ (ty::Ref(_, a, _), ty::RawPtr(TypeAndMut { ty: b, mutbl: _ }))
+ | (ty::RawPtr(TypeAndMut { ty: a, mutbl: _ }), ty::Ref(_, b, _)) => {
+ assert_assignable(fx, *a, *b, limit - 1);
+ }
+ (ty::FnPtr(_), ty::FnPtr(_)) => {
+ let from_sig = fx.tcx.normalize_erasing_late_bound_regions(
+ ParamEnv::reveal_all(),
+ from_ty.fn_sig(fx.tcx),
+ );
+ let to_sig = fx
+ .tcx
+ .normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), to_ty.fn_sig(fx.tcx));
+ assert_eq!(
+ from_sig, to_sig,
+ "Can't write fn ptr with incompatible sig {:?} to place with sig {:?}\n\n{:#?}",
+ from_sig, to_sig, fx,
+ );
+ // fn(&T) -> for<'l> fn(&'l T) is allowed
+ }
+ (&ty::Dynamic(from_traits, _, _from_kind), &ty::Dynamic(to_traits, _, _to_kind)) => {
+ // FIXME(dyn-star): Do the right thing with DynKinds
+ for (from, to) in from_traits.iter().zip(to_traits) {
+ let from =
+ fx.tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), from);
+ let to = fx.tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), to);
+ assert_eq!(
+ from, to,
+ "Can't write trait object of incompatible traits {:?} to place with traits {:?}\n\n{:#?}",
+ from_traits, to_traits, fx,
+ );
+ }
+ // dyn for<'r> Trait<'r> -> dyn Trait<'_> is allowed
+ }
+ (&ty::Tuple(types_a), &ty::Tuple(types_b)) => {
+ let mut types_a = types_a.iter();
+ let mut types_b = types_b.iter();
+ loop {
+ match (types_a.next(), types_b.next()) {
+ (Some(a), Some(b)) => assert_assignable(fx, a, b, limit - 1),
+ (None, None) => return,
+ (Some(_), None) | (None, Some(_)) => panic!("{:#?}/{:#?}", from_ty, to_ty),
+ }
+ }
+ }
+ (&ty::Adt(adt_def_a, substs_a), &ty::Adt(adt_def_b, substs_b))
+ if adt_def_a.did() == adt_def_b.did() =>
+ {
+ let mut types_a = substs_a.types();
+ let mut types_b = substs_b.types();
+ loop {
+ match (types_a.next(), types_b.next()) {
+ (Some(a), Some(b)) => assert_assignable(fx, a, b, limit - 1),
+ (None, None) => return,
+ (Some(_), None) | (None, Some(_)) => panic!("{:#?}/{:#?}", from_ty, to_ty),
+ }
+ }
+ }
+ (ty::Array(a, _), ty::Array(b, _)) => assert_assignable(fx, *a, *b, limit - 1),
+ (&ty::Closure(def_id_a, substs_a), &ty::Closure(def_id_b, substs_b))
+ if def_id_a == def_id_b =>
+ {
+ let mut types_a = substs_a.types();
+ let mut types_b = substs_b.types();
+ loop {
+ match (types_a.next(), types_b.next()) {
+ (Some(a), Some(b)) => assert_assignable(fx, a, b, limit - 1),
+ (None, None) => return,
+ (Some(_), None) | (None, Some(_)) => panic!("{:#?}/{:#?}", from_ty, to_ty),
+ }
+ }
+ }
+ (ty::Param(_), _) | (_, ty::Param(_)) if fx.tcx.sess.opts.unstable_opts.polymorphize => {
+ // No way to check if it is correct or not with polymorphization enabled
+ }
+ _ => {
+ assert_eq!(
+ from_ty,
+ to_ty,
+ "Can't write value with incompatible type {:?} to place with type {:?}\n\n{:#?}",
+ from_ty.kind(),
+ to_ty.kind(),
+ fx,
+ );
+ }
+ }
+}
--- /dev/null
- ) -> (Value, Value) {
- let (ptr, vtable) = if let Abi::ScalarPair(_, _) = arg.layout().abi {
- arg.load_scalar_pair(fx)
- } else {
- let (ptr, vtable) = arg.try_to_ptr().unwrap();
- (ptr.get_addr(fx), vtable.unwrap())
+//! Codegen vtables and vtable accesses.
+//!
+//! See `rustc_codegen_ssa/src/meth.rs` for reference.
+
+use crate::constant::data_id_for_alloc_id;
+use crate::prelude::*;
+
+pub(crate) fn vtable_memflags() -> MemFlags {
+ let mut flags = MemFlags::trusted(); // A vtable access is always aligned and will never trap.
+ flags.set_readonly(); // A vtable is always read-only.
+ flags
+}
+
+pub(crate) fn drop_fn_of_obj(fx: &mut FunctionCx<'_, '_, '_>, vtable: Value) -> Value {
+ let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes() as usize;
+ fx.bcx.ins().load(
+ fx.pointer_type,
+ vtable_memflags(),
+ vtable,
+ (ty::COMMON_VTABLE_ENTRIES_DROPINPLACE * usize_size) as i32,
+ )
+}
+
+pub(crate) fn size_of_obj(fx: &mut FunctionCx<'_, '_, '_>, vtable: Value) -> Value {
+ let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes() as usize;
+ fx.bcx.ins().load(
+ fx.pointer_type,
+ vtable_memflags(),
+ vtable,
+ (ty::COMMON_VTABLE_ENTRIES_SIZE * usize_size) as i32,
+ )
+}
+
+pub(crate) fn min_align_of_obj(fx: &mut FunctionCx<'_, '_, '_>, vtable: Value) -> Value {
+ let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes() as usize;
+ fx.bcx.ins().load(
+ fx.pointer_type,
+ vtable_memflags(),
+ vtable,
+ (ty::COMMON_VTABLE_ENTRIES_ALIGN * usize_size) as i32,
+ )
+}
+
+pub(crate) fn get_ptr_and_method_ref<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ arg: CValue<'tcx>,
+ idx: usize,
++) -> (Pointer, Value) {
++ let (ptr, vtable) = 'block: {
++ if let ty::Ref(_, ty, _) = arg.layout().ty.kind() {
++ if ty.is_dyn_star() {
++ let inner_layout = fx.layout_of(arg.layout().ty.builtin_deref(true).unwrap().ty);
++ let dyn_star = CPlace::for_ptr(Pointer::new(arg.load_scalar(fx)), inner_layout);
++ let ptr = dyn_star.place_field(fx, mir::Field::new(0)).to_ptr();
++ let vtable =
++ dyn_star.place_field(fx, mir::Field::new(1)).to_cvalue(fx).load_scalar(fx);
++ break 'block (ptr, vtable);
++ }
++ }
++
++ if let Abi::ScalarPair(_, _) = arg.layout().abi {
++ let (ptr, vtable) = arg.load_scalar_pair(fx);
++ (Pointer::new(ptr), vtable)
++ } else {
++ let (ptr, vtable) = arg.try_to_ptr().unwrap();
++ (ptr, vtable.unwrap())
++ }
+ };
+
+ let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes();
+ let func_ref = fx.bcx.ins().load(
+ fx.pointer_type,
+ vtable_memflags(),
+ vtable,
+ (idx * usize_size as usize) as i32,
+ );
+ (ptr, func_ref)
+}
+
+pub(crate) fn get_vtable<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ ty: Ty<'tcx>,
+ trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
+) -> Value {
+ let alloc_id = fx.tcx.vtable_allocation((ty, trait_ref));
+ let data_id =
+ data_id_for_alloc_id(&mut fx.constants_cx, &mut *fx.module, alloc_id, Mutability::Not);
+ let local_data_id = fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
+ if fx.clif_comments.enabled() {
+ fx.add_comment(local_data_id, format!("vtable: {:?}", alloc_id));
+ }
+ fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
+}