--- /dev/null
- - os: ubuntu-20.04 # FIXME switch to ubuntu-22.04 once #1303 is fixed
+name: CI
+
+on:
+ - push
+ - pull_request
+
+jobs:
+ rustfmt:
+ runs-on: ubuntu-latest
+ timeout-minutes: 10
+
+ steps:
+ - uses: actions/checkout@v3
+
+ - name: Install rustfmt
+ run: |
+ rustup component add rustfmt
+
+ - name: Rustfmt
+ run: |
+ cargo fmt --check
+ rustfmt --check build_system/mod.rs
+
++
+ build:
+ runs-on: ${{ matrix.os }}
+ timeout-minutes: 60
+
+ defaults:
+ run:
+ shell: bash
+
+ strategy:
+ fail-fast: false
+ matrix:
+ include:
- - name: Package prebuilt cg_clif
- run: tar cvfJ cg_clif.tar.xz dist
-
- - name: Upload prebuilt cg_clif
- if: matrix.os == 'windows-latest' || matrix.env.TARGET_TRIPLE != 'x86_64-pc-windows-gnu'
- uses: actions/upload-artifact@v3
- with:
- name: cg_clif-${{ matrix.env.TARGET_TRIPLE }}
- path: cg_clif.tar.xz
-
- - name: Upload prebuilt cg_clif (cross compile)
- if: matrix.os != 'windows-latest' && matrix.env.TARGET_TRIPLE == 'x86_64-pc-windows-gnu'
- uses: actions/upload-artifact@v3
- with:
- name: cg_clif-${{ runner.os }}-cross-x86_64-mingw
- path: cg_clif.tar.xz
-
++ - os: ubuntu-latest
+ env:
+ TARGET_TRIPLE: x86_64-unknown-linux-gnu
+ - os: macos-latest
+ env:
+ TARGET_TRIPLE: x86_64-apple-darwin
+ # cross-compile from Linux to Windows using mingw
+ - os: ubuntu-latest
+ env:
+ TARGET_TRIPLE: x86_64-pc-windows-gnu
+ - os: ubuntu-latest
+ env:
+ TARGET_TRIPLE: aarch64-unknown-linux-gnu
+ # s390x requires QEMU 6.1 or greater, we could build it from source, but ubuntu 22.04 comes with 6.2 by default
+ - os: ubuntu-latest
+ env:
+ TARGET_TRIPLE: s390x-unknown-linux-gnu
+ - os: windows-latest
+ env:
+ TARGET_TRIPLE: x86_64-pc-windows-msvc
+ - os: windows-latest
+ env:
+ TARGET_TRIPLE: x86_64-pc-windows-gnu
+
+ steps:
+ - uses: actions/checkout@v3
+
+ - name: Cache cargo target dir
+ uses: actions/cache@v3
+ with:
+ path: build/cg_clif
+ key: ${{ runner.os }}-${{ matrix.env.TARGET_TRIPLE }}-cargo-build-target-${{ hashFiles('rust-toolchain', '**/Cargo.lock') }}
+
+ - name: Set MinGW as the default toolchain
+ if: matrix.os == 'windows-latest' && matrix.env.TARGET_TRIPLE == 'x86_64-pc-windows-gnu'
+ run: rustup set default-host x86_64-pc-windows-gnu
+
+ - name: Install MinGW toolchain and wine
+ if: matrix.os == 'ubuntu-latest' && matrix.env.TARGET_TRIPLE == 'x86_64-pc-windows-gnu'
+ run: |
+ sudo apt-get update
+ sudo apt-get install -y gcc-mingw-w64-x86-64 wine-stable
+
+ - name: Install AArch64 toolchain and qemu
+ if: matrix.os == 'ubuntu-latest' && matrix.env.TARGET_TRIPLE == 'aarch64-unknown-linux-gnu'
+ run: |
+ sudo apt-get update
+ sudo apt-get install -y gcc-aarch64-linux-gnu qemu-user
+
+ - name: Install s390x toolchain and qemu
+ if: matrix.env.TARGET_TRIPLE == 's390x-unknown-linux-gnu'
+ run: |
+ sudo apt-get update
+ sudo apt-get install -y gcc-s390x-linux-gnu qemu-user
+
+ - name: Use sparse cargo registry
+ run: |
+ cat >> ~/.cargo/config.toml <<EOF
+ [unstable]
+ sparse-registry = true
+ EOF
+
+ - name: Prepare dependencies
+ run: ./y.rs prepare
+
+ - name: Build without unstable features
+ env:
+ TARGET_TRIPLE: ${{ matrix.env.TARGET_TRIPLE }}
+ # This is the config rust-lang/rust uses for builds
+ run: ./y.rs build --no-unstable-features
+
+ - name: Build
+ run: ./y.rs build --sysroot none
+
+ - name: Test
+ env:
+ TARGET_TRIPLE: ${{ matrix.env.TARGET_TRIPLE }}
+ run: ./y.rs test
+
+
+ abi_cafe:
+ runs-on: ${{ matrix.os }}
+ timeout-minutes: 60
+
+ defaults:
+ run:
+ shell: bash
+
+ strategy:
+ fail-fast: true
+ matrix:
+ include:
+ - os: ubuntu-latest
+ env:
+ TARGET_TRIPLE: x86_64-unknown-linux-gnu
+ - os: macos-latest
+ env:
+ TARGET_TRIPLE: x86_64-apple-darwin
+ - os: windows-latest
+ env:
+ TARGET_TRIPLE: x86_64-pc-windows-msvc
+ - os: windows-latest
+ env:
+ TARGET_TRIPLE: x86_64-pc-windows-gnu
+
+ steps:
+ - uses: actions/checkout@v3
+
+ - name: Cache cargo target dir
+ uses: actions/cache@v3
+ with:
+ path: build/cg_clif
+ key: ${{ runner.os }}-${{ matrix.env.TARGET_TRIPLE }}-cargo-build-target-${{ hashFiles('rust-toolchain', '**/Cargo.lock') }}
+
+ - name: Set MinGW as the default toolchain
+ if: matrix.env.TARGET_TRIPLE == 'x86_64-pc-windows-gnu'
+ run: rustup set default-host x86_64-pc-windows-gnu
+
+ - name: Use sparse cargo registry
+ run: |
+ cat >> ~/.cargo/config.toml <<EOF
+ [unstable]
+ sparse-registry = true
+ EOF
+
+ - name: Prepare dependencies
+ run: ./y.rs prepare
+
+ - name: Build
+ run: ./y.rs build --sysroot none
+
+ - name: Test abi-cafe
+ env:
+ TARGET_TRIPLE: ${{ matrix.env.TARGET_TRIPLE }}
+ run: ./y.rs abi-cafe
++
++
++ bench:
++ runs-on: ubuntu-latest
++ timeout-minutes: 60
++
++ defaults:
++ run:
++ shell: bash
++
++ steps:
++ - uses: actions/checkout@v3
++
++ - name: Cache cargo target dir
++ uses: actions/cache@v3
++ with:
++ path: build/cg_clif
++ key: ${{ runner.os }}-x86_64-unknown-linux-gnu-cargo-build-target-${{ hashFiles('rust-toolchain', '**/Cargo.lock') }}
++
++ - name: Cache cargo bin dir
++ uses: actions/cache@v3
++ with:
++ path: ~/.cargo/bin
++ key: ${{ runner.os }}-${{ matrix.env.TARGET_TRIPLE }}-cargo-bin-dir-${{ hashFiles('rust-toolchain', '**/Cargo.lock') }}
++
++ - name: Use sparse cargo registry
++ run: |
++ cat >> ~/.cargo/config.toml <<EOF
++ [unstable]
++ sparse-registry = true
++ EOF
++
++ - name: Install hyperfine
++ run: cargo install hyperfine || true
++
++ - name: Prepare dependencies
++ run: ./y.rs prepare
++
++ - name: Build
++ run: CI_OPT=1 ./y.rs build --sysroot none
++
++ - name: Benchmark
++ run: CI_OPT=1 ./y.rs bench
++
++
++ dist:
++ runs-on: ${{ matrix.os }}
++ timeout-minutes: 60
++
++ defaults:
++ run:
++ shell: bash
++
++ strategy:
++ fail-fast: false
++ matrix:
++ include:
++ # FIXME update at some point in the future once most distros use a newer glibc
++ - os: ubuntu-20.04
++ env:
++ TARGET_TRIPLE: x86_64-unknown-linux-gnu
++ - os: macos-latest
++ env:
++ TARGET_TRIPLE: x86_64-apple-darwin
++ # cross-compile from Linux to Windows using mingw
++ - os: ubuntu-latest
++ env:
++ TARGET_TRIPLE: x86_64-pc-windows-gnu
++ - os: windows-latest
++ env:
++ TARGET_TRIPLE: x86_64-pc-windows-msvc
++ - os: windows-latest
++ env:
++ TARGET_TRIPLE: x86_64-pc-windows-gnu
++
++ steps:
++ - uses: actions/checkout@v3
++
++ - name: Cache cargo target dir
++ uses: actions/cache@v3
++ with:
++ path: build/cg_clif
++ key: ${{ runner.os }}-${{ matrix.env.TARGET_TRIPLE }}-dist-cargo-build-target-${{ hashFiles('rust-toolchain', '**/Cargo.lock') }}
++
++ - name: Set MinGW as the default toolchain
++ if: matrix.os == 'windows-latest' && matrix.env.TARGET_TRIPLE == 'x86_64-pc-windows-gnu'
++ run: rustup set default-host x86_64-pc-windows-gnu
++
++ - name: Install MinGW toolchain and wine
++ if: matrix.os == 'ubuntu-latest' && matrix.env.TARGET_TRIPLE == 'x86_64-pc-windows-gnu'
++ run: |
++ sudo apt-get update
++ sudo apt-get install -y gcc-mingw-w64-x86-64 wine-stable
++
++ - name: Use sparse cargo registry
++ run: |
++ cat >> ~/.cargo/config.toml <<EOF
++ [unstable]
++ sparse-registry = true
++ EOF
++
++ - name: Prepare dependencies
++ run: ./y.rs prepare
++
++ - name: Build backend
++ run: CI_OPT=1 ./y.rs build --sysroot none
++
++ - name: Build sysroot
++ run: CI_OPT=1 ./y.rs build
++
++ - name: Package prebuilt cg_clif
++ run: tar cvfJ cg_clif.tar.xz dist
++
++ - name: Upload prebuilt cg_clif
++ if: matrix.os == 'windows-latest' || matrix.env.TARGET_TRIPLE != 'x86_64-pc-windows-gnu'
++ uses: actions/upload-artifact@v3
++ with:
++ name: cg_clif-${{ matrix.env.TARGET_TRIPLE }}
++ path: cg_clif.tar.xz
++
++ - name: Upload prebuilt cg_clif (cross compile)
++ if: matrix.os != 'windows-latest' && matrix.env.TARGET_TRIPLE == 'x86_64-pc-windows-gnu'
++ uses: actions/upload-artifact@v3
++ with:
++ name: cg_clif-${{ runner.os }}-cross-x86_64-mingw
++ path: cg_clif.tar.xz
--- /dev/null
- version = "1.0.78"
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "addr2line"
+version = "0.17.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b9ecd88a8c8378ca913a680cd98f0f13ac67383d35993f86c90a70e3f137816b"
+dependencies = [
+ "compiler_builtins",
+ "gimli",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "adler"
+version = "1.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "alloc"
+version = "0.0.0"
+dependencies = [
+ "compiler_builtins",
+ "core",
+]
+
+[[package]]
+name = "cc"
- checksum = "a20104e2335ce8a659d6dd92a51a767a0c062599c73b343fd152cb401e828c3d"
++version = "1.0.79"
+source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f"
+
+[[package]]
+name = "cfg-if"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "compiler_builtins"
+version = "0.1.86"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5dae98c88e576098d7ab13ebcb40cc43e5114b2beafe61a87cda9200649ff205"
+dependencies = [
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "core"
+version = "0.0.0"
+
+[[package]]
+name = "dlmalloc"
+version = "0.2.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "203540e710bfadb90e5e29930baf5d10270cec1f43ab34f46f78b147b2de715a"
+dependencies = [
+ "compiler_builtins",
+ "libc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "fortanix-sgx-abi"
+version = "0.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "57cafc2274c10fab234f176b25903ce17e690fca7597090d50880e047a0389c5"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "getopts"
+version = "0.2.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "14dbbfd5c71d70241ecf9e6f13737f7b5ce823821063188d7e46c41d371eebd5"
+dependencies = [
+ "rustc-std-workspace-core",
+ "rustc-std-workspace-std",
+ "unicode-width",
+]
+
+[[package]]
+name = "gimli"
+version = "0.26.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "22030e2c5a68ec659fde1e949a745124b48e6fa8b045b7ed5bd1fe4ccc5c4e5d"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "hashbrown"
+version = "0.12.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "hermit-abi"
+version = "0.2.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7"
+dependencies = [
+ "compiler_builtins",
+ "libc",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "libc"
+version = "0.2.139"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "201de327520df007757c1f0adce6e827fe8562fbc28bfd9c15571c66ca1f5f79"
+dependencies = [
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "memchr"
+version = "2.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "miniz_oxide"
+version = "0.5.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "96590ba8f175222643a85693f33d26e9c8a015f599c216509b1a6894af675d34"
+dependencies = [
+ "adler",
+ "compiler_builtins",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "object"
+version = "0.29.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "21158b2c33aa6d4561f1c0a6ea283ca92bc54802a93b263e910746d679a7eb53"
+dependencies = [
+ "compiler_builtins",
+ "memchr",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "panic_abort"
+version = "0.0.0"
+dependencies = [
+ "alloc",
+ "cfg-if",
+ "compiler_builtins",
+ "core",
+ "libc",
+]
+
+[[package]]
+name = "panic_unwind"
+version = "0.0.0"
+dependencies = [
+ "alloc",
+ "cfg-if",
+ "compiler_builtins",
+ "core",
+ "libc",
+ "unwind",
+]
+
+[[package]]
+name = "proc_macro"
+version = "0.0.0"
+dependencies = [
+ "core",
+ "std",
+]
+
+[[package]]
+name = "rustc-demangle"
+version = "0.1.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "rustc-std-workspace-alloc"
+version = "1.99.0"
+dependencies = [
+ "alloc",
+]
+
+[[package]]
+name = "rustc-std-workspace-core"
+version = "1.99.0"
+dependencies = [
+ "core",
+]
+
+[[package]]
+name = "rustc-std-workspace-std"
+version = "1.99.0"
+dependencies = [
+ "std",
+]
+
+[[package]]
+name = "std"
+version = "0.0.0"
+dependencies = [
+ "addr2line",
+ "alloc",
+ "cfg-if",
+ "compiler_builtins",
+ "core",
+ "dlmalloc",
+ "fortanix-sgx-abi",
+ "hashbrown",
+ "hermit-abi",
+ "libc",
+ "miniz_oxide",
+ "object",
+ "panic_abort",
+ "panic_unwind",
+ "rustc-demangle",
+ "std_detect",
+ "unwind",
+ "wasi",
+]
+
+[[package]]
+name = "std_detect"
+version = "0.1.5"
+dependencies = [
+ "cfg-if",
+ "compiler_builtins",
+ "libc",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "sysroot"
+version = "0.0.0"
+dependencies = [
+ "alloc",
+ "compiler_builtins",
+ "core",
+ "std",
+ "test",
+]
+
+[[package]]
+name = "test"
+version = "0.0.0"
+dependencies = [
+ "cfg-if",
+ "core",
+ "getopts",
+ "libc",
+ "panic_abort",
+ "panic_unwind",
+ "proc_macro",
+ "std",
+]
+
+[[package]]
+name = "unicode-width"
+version = "0.1.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-core",
+ "rustc-std-workspace-std",
+]
+
+[[package]]
+name = "unwind"
+version = "0.0.0"
+dependencies = [
+ "cc",
+ "cfg-if",
+ "compiler_builtins",
+ "core",
+ "libc",
+]
+
+[[package]]
+name = "wasi"
+version = "0.11.0+wasi-snapshot-preview1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
--- /dev/null
- pub(crate) static ABI_CAFE_REPO: GitRepo =
+use std::path::Path;
+
+use super::build_sysroot;
+use super::path::Dirs;
+use super::prepare::GitRepo;
+use super::utils::{spawn_and_wait, CargoProject, Compiler};
+use super::SysrootKind;
+
- pub(crate) static ABI_CAFE: CargoProject =
- CargoProject::new(&ABI_CAFE_REPO.source_dir(), "abi_cafe");
++static ABI_CAFE_REPO: GitRepo =
+ GitRepo::github("Gankra", "abi-cafe", "4c6dc8c9c687e2b3a760ff2176ce236872b37212", "abi-cafe");
+
++static ABI_CAFE: CargoProject = CargoProject::new(&ABI_CAFE_REPO.source_dir(), "abi_cafe");
+
+pub(crate) fn run(
+ channel: &str,
+ sysroot_kind: SysrootKind,
+ dirs: &Dirs,
+ cg_clif_dylib: &Path,
+ bootstrap_host_compiler: &Compiler,
+) {
++ ABI_CAFE_REPO.fetch(dirs);
++ spawn_and_wait(ABI_CAFE.fetch("cargo", &bootstrap_host_compiler.rustc, dirs));
++
+ eprintln!("Building sysroot for abi-cafe");
+ build_sysroot::build_sysroot(
+ dirs,
+ channel,
+ sysroot_kind,
+ cg_clif_dylib,
+ bootstrap_host_compiler,
+ bootstrap_host_compiler.triple.clone(),
+ );
+
+ eprintln!("Running abi-cafe");
+
+ let pairs = ["rustc_calls_cgclif", "cgclif_calls_rustc", "cgclif_calls_cc", "cc_calls_cgclif"];
+
+ let mut cmd = ABI_CAFE.run(bootstrap_host_compiler, dirs);
+ cmd.arg("--");
+ cmd.arg("--pairs");
+ cmd.args(pairs);
+ cmd.arg("--add-rustc-codegen-backend");
+ cmd.arg(format!("cgclif:{}", cg_clif_dylib.display()));
+ cmd.current_dir(ABI_CAFE.source_dir(dirs));
+
+ spawn_and_wait(cmd);
+}
--- /dev/null
- use super::utils::{hyperfine_command, is_ci, spawn_and_wait, CargoProject, Compiler};
+use std::env;
+use std::fs;
+use std::path::Path;
+
+use super::path::{Dirs, RelPath};
+use super::prepare::GitRepo;
+use super::rustc_info::get_file_name;
- pub(crate) static SIMPLE_RAYTRACER_REPO: GitRepo = GitRepo::github(
++use super::utils::{hyperfine_command, spawn_and_wait, CargoProject, Compiler};
+
- pub(crate) static SIMPLE_RAYTRACER_LLVM: CargoProject =
++static SIMPLE_RAYTRACER_REPO: GitRepo = GitRepo::github(
+ "ebobby",
+ "simple-raytracer",
+ "804a7a21b9e673a482797aa289a18ed480e4d813",
+ "<none>",
+);
+
+// Use a separate target dir for the initial LLVM build to reduce unnecessary recompiles
- pub(crate) static SIMPLE_RAYTRACER: CargoProject =
++static SIMPLE_RAYTRACER_LLVM: CargoProject =
+ CargoProject::new(&SIMPLE_RAYTRACER_REPO.source_dir(), "simple_raytracer_llvm");
+
- let run_runs = env::var("RUN_RUNS")
- .unwrap_or(if is_ci() { "2" } else { "10" }.to_string())
- .parse()
- .unwrap();
++static SIMPLE_RAYTRACER: CargoProject =
+ CargoProject::new(&SIMPLE_RAYTRACER_REPO.source_dir(), "simple_raytracer");
+
+pub(crate) fn benchmark(dirs: &Dirs, bootstrap_host_compiler: &Compiler) {
+ benchmark_simple_raytracer(dirs, bootstrap_host_compiler);
+}
+
+fn benchmark_simple_raytracer(dirs: &Dirs, bootstrap_host_compiler: &Compiler) {
+ if std::process::Command::new("hyperfine").output().is_err() {
+ eprintln!("Hyperfine not installed");
+ eprintln!("Hint: Try `cargo install hyperfine` to install hyperfine");
+ std::process::exit(1);
+ }
+
++ if !SIMPLE_RAYTRACER_REPO.source_dir().to_path(dirs).exists() {
++ SIMPLE_RAYTRACER_REPO.fetch(dirs);
++ spawn_and_wait(SIMPLE_RAYTRACER.fetch(
++ &bootstrap_host_compiler.cargo,
++ &bootstrap_host_compiler.rustc,
++ dirs,
++ ));
++ }
++
+ eprintln!("[LLVM BUILD] simple-raytracer");
+ let build_cmd = SIMPLE_RAYTRACER_LLVM.build(bootstrap_host_compiler, dirs);
+ spawn_and_wait(build_cmd);
+ fs::copy(
+ SIMPLE_RAYTRACER_LLVM
+ .target_dir(dirs)
+ .join(&bootstrap_host_compiler.triple)
+ .join("debug")
+ .join(get_file_name("main", "bin")),
+ RelPath::BUILD.to_path(dirs).join(get_file_name("raytracer_cg_llvm", "bin")),
+ )
+ .unwrap();
+
- "cargo clean --manifest-path {manifest_path} --target-dir {target_dir}",
++ let bench_runs = env::var("BENCH_RUNS").unwrap_or_else(|_| "10".to_string()).parse().unwrap();
+
+ eprintln!("[BENCH COMPILE] ebobby/simple-raytracer");
+ let cargo_clif =
+ RelPath::DIST.to_path(dirs).join(get_file_name("cargo_clif", "bin").replace('_', "-"));
+ let manifest_path = SIMPLE_RAYTRACER.manifest_path(dirs);
+ let target_dir = SIMPLE_RAYTRACER.target_dir(dirs);
+
+ let clean_cmd = format!(
- "cargo build --manifest-path {manifest_path} --target-dir {target_dir}",
++ "RUSTC=rustc cargo clean --manifest-path {manifest_path} --target-dir {target_dir}",
+ manifest_path = manifest_path.display(),
+ target_dir = target_dir.display(),
+ );
+ let llvm_build_cmd = format!(
- "{cargo_clif} build --manifest-path {manifest_path} --target-dir {target_dir}",
++ "RUSTC=rustc cargo build --manifest-path {manifest_path} --target-dir {target_dir}",
+ manifest_path = manifest_path.display(),
+ target_dir = target_dir.display(),
+ );
+ let clif_build_cmd = format!(
- hyperfine_command(1, run_runs, Some(&clean_cmd), &llvm_build_cmd, &clif_build_cmd);
++ "RUSTC=rustc {cargo_clif} build --manifest-path {manifest_path} --target-dir {target_dir}",
+ cargo_clif = cargo_clif.display(),
+ manifest_path = manifest_path.display(),
+ target_dir = target_dir.display(),
+ );
+
+ let bench_compile =
- run_runs,
++ hyperfine_command(1, bench_runs, Some(&clean_cmd), &llvm_build_cmd, &clif_build_cmd);
+
+ spawn_and_wait(bench_compile);
+
+ eprintln!("[BENCH RUN] ebobby/simple-raytracer");
+ fs::copy(
+ target_dir.join("debug").join(get_file_name("main", "bin")),
+ RelPath::BUILD.to_path(dirs).join(get_file_name("raytracer_cg_clif", "bin")),
+ )
+ .unwrap();
+
+ let mut bench_run = hyperfine_command(
+ 0,
++ bench_runs,
+ None,
+ Path::new(".").join(get_file_name("raytracer_cg_llvm", "bin")).to_str().unwrap(),
+ Path::new(".").join(get_file_name("raytracer_cg_clif", "bin")).to_str().unwrap(),
+ );
+ bench_run.current_dir(RelPath::BUILD.to_path(dirs));
+ spawn_and_wait(bench_run);
+}
--- /dev/null
- use super::utils::{is_ci, CargoProject, Compiler};
+use std::env;
+use std::path::PathBuf;
+
+use super::path::{Dirs, RelPath};
+use super::rustc_info::get_file_name;
- cmd.env("CARGO_PROFILE_RELEASE_DEBUG_ASSERTIONS", "true");
++use super::utils::{is_ci, is_ci_opt, CargoProject, Compiler};
+
+pub(crate) static CG_CLIF: CargoProject = CargoProject::new(&RelPath::SOURCE, "cg_clif");
+
+pub(crate) fn build_backend(
+ dirs: &Dirs,
+ channel: &str,
+ bootstrap_host_compiler: &Compiler,
+ use_unstable_features: bool,
+) -> PathBuf {
+ let mut cmd = CG_CLIF.build(&bootstrap_host_compiler, dirs);
+
+ cmd.env("CARGO_BUILD_INCREMENTAL", "true"); // Force incr comp even in release mode
+
+ let mut rustflags = env::var("RUSTFLAGS").unwrap_or_default();
+
+ if is_ci() {
+ // Deny warnings on CI
+ rustflags += " -Dwarnings";
+
+ // Disabling incr comp reduces cache size and incr comp doesn't save as much on CI anyway
+ cmd.env("CARGO_BUILD_INCREMENTAL", "false");
+
++ if !is_ci_opt() {
++ cmd.env("CARGO_PROFILE_RELEASE_DEBUG_ASSERTIONS", "true");
++ }
+ }
+
+ if use_unstable_features {
+ cmd.arg("--features").arg("unstable-features");
+ }
+
+ match channel {
+ "debug" => {}
+ "release" => {
+ cmd.arg("--release");
+ }
+ _ => unreachable!(),
+ }
+
+ cmd.env("RUSTFLAGS", rustflags);
+
+ eprintln!("[BUILD] rustc_codegen_cranelift");
+ super::utils::spawn_and_wait(cmd);
+
+ CG_CLIF
+ .target_dir(dirs)
+ .join(&bootstrap_host_compiler.triple)
+ .join(channel)
+ .join(get_file_name("rustc_codegen_cranelift", "dylib"))
+}
--- /dev/null
+use std::fs;
+use std::path::{Path, PathBuf};
+use std::process::{self, Command};
+
+use super::path::{Dirs, RelPath};
+use super::rustc_info::{get_file_name, get_rustc_version, get_toolchain_name};
+use super::utils::{remove_dir_if_exists, spawn_and_wait, try_hard_link, CargoProject, Compiler};
+use super::SysrootKind;
+
+static DIST_DIR: RelPath = RelPath::DIST;
+static BIN_DIR: RelPath = RelPath::DIST.join("bin");
+static LIB_DIR: RelPath = RelPath::DIST.join("lib");
+
+pub(crate) fn build_sysroot(
+ dirs: &Dirs,
+ channel: &str,
+ sysroot_kind: SysrootKind,
+ cg_clif_dylib_src: &Path,
+ bootstrap_host_compiler: &Compiler,
+ target_triple: String,
+) -> Compiler {
+ eprintln!("[BUILD] sysroot {:?}", sysroot_kind);
+
+ DIST_DIR.ensure_fresh(dirs);
+ BIN_DIR.ensure_exists(dirs);
+ LIB_DIR.ensure_exists(dirs);
+
+ let is_native = bootstrap_host_compiler.triple == target_triple;
+
+ // Copy the backend
+ let cg_clif_dylib_path = if cfg!(windows) {
+ // Windows doesn't have rpath support, so the cg_clif dylib needs to be next to the
+ // binaries.
+ BIN_DIR
+ } else {
+ LIB_DIR
+ }
+ .to_path(dirs)
+ .join(cg_clif_dylib_src.file_name().unwrap());
+ try_hard_link(cg_clif_dylib_src, &cg_clif_dylib_path);
+
+ // Build and copy rustc and cargo wrappers
+ let wrapper_base_name = get_file_name("____", "bin");
+ let toolchain_name = get_toolchain_name();
+ for wrapper in ["rustc-clif", "rustdoc-clif", "cargo-clif"] {
+ let wrapper_name = wrapper_base_name.replace("____", wrapper);
+
+ let mut build_cargo_wrapper_cmd = Command::new(&bootstrap_host_compiler.rustc);
+ build_cargo_wrapper_cmd
+ .env("TOOLCHAIN_NAME", toolchain_name.clone())
+ .arg(RelPath::SCRIPTS.to_path(dirs).join(&format!("{wrapper}.rs")))
+ .arg("-o")
+ .arg(DIST_DIR.to_path(dirs).join(wrapper_name))
+ .arg("-Cstrip=debuginfo");
+ spawn_and_wait(build_cargo_wrapper_cmd);
+ }
+
+ let host = build_sysroot_for_triple(
+ dirs,
+ channel,
+ bootstrap_host_compiler.clone(),
+ &cg_clif_dylib_path,
+ sysroot_kind,
+ );
+ host.install_into_sysroot(&DIST_DIR.to_path(dirs));
+
+ if !is_native {
+ build_sysroot_for_triple(
+ dirs,
+ channel,
+ {
+ let mut bootstrap_target_compiler = bootstrap_host_compiler.clone();
+ bootstrap_target_compiler.triple = target_triple.clone();
+ bootstrap_target_compiler.set_cross_linker_and_runner();
+ bootstrap_target_compiler
+ },
+ &cg_clif_dylib_path,
+ sysroot_kind,
+ )
+ .install_into_sysroot(&DIST_DIR.to_path(dirs));
+ }
+
+ // Copy std for the host to the lib dir. This is necessary for the jit mode to find
+ // libstd.
+ for lib in host.libs {
+ let filename = lib.file_name().unwrap().to_str().unwrap();
+ if filename.contains("std-") && !filename.contains(".rlib") {
+ try_hard_link(&lib, LIB_DIR.to_path(dirs).join(lib.file_name().unwrap()));
+ }
+ }
+
+ let mut target_compiler = {
+ let dirs: &Dirs = &dirs;
+ let rustc_clif =
+ RelPath::DIST.to_path(&dirs).join(wrapper_base_name.replace("____", "rustc-clif"));
+ let rustdoc_clif =
+ RelPath::DIST.to_path(&dirs).join(wrapper_base_name.replace("____", "rustdoc-clif"));
+
+ Compiler {
+ cargo: bootstrap_host_compiler.cargo.clone(),
+ rustc: rustc_clif.clone(),
+ rustdoc: rustdoc_clif.clone(),
+ rustflags: String::new(),
+ rustdocflags: String::new(),
+ triple: target_triple,
+ runner: vec![],
+ }
+ };
+ if !is_native {
+ target_compiler.set_cross_linker_and_runner();
+ }
+ target_compiler
+}
+
+struct SysrootTarget {
+ triple: String,
+ libs: Vec<PathBuf>,
+}
+
+impl SysrootTarget {
+ fn install_into_sysroot(&self, sysroot: &Path) {
+ if self.libs.is_empty() {
+ return;
+ }
+
+ let target_rustlib_lib = sysroot.join("lib").join("rustlib").join(&self.triple).join("lib");
+ fs::create_dir_all(&target_rustlib_lib).unwrap();
+
+ for lib in &self.libs {
+ try_hard_link(lib, target_rustlib_lib.join(lib.file_name().unwrap()));
+ }
+ }
+}
+
+pub(crate) static ORIG_BUILD_SYSROOT: RelPath = RelPath::SOURCE.join("build_sysroot");
+pub(crate) static BUILD_SYSROOT: RelPath = RelPath::DOWNLOAD.join("sysroot");
+pub(crate) static SYSROOT_RUSTC_VERSION: RelPath = BUILD_SYSROOT.join("rustc_version");
+pub(crate) static SYSROOT_SRC: RelPath = BUILD_SYSROOT.join("sysroot_src");
+pub(crate) static STANDARD_LIBRARY: CargoProject =
+ CargoProject::new(&BUILD_SYSROOT, "build_sysroot");
+pub(crate) static RTSTARTUP_SYSROOT: RelPath = RelPath::BUILD.join("rtstartup");
+
+#[must_use]
+fn build_sysroot_for_triple(
+ dirs: &Dirs,
+ channel: &str,
+ compiler: Compiler,
+ cg_clif_dylib_path: &Path,
+ sysroot_kind: SysrootKind,
+) -> SysrootTarget {
+ match sysroot_kind {
+ SysrootKind::None => build_rtstartup(dirs, &compiler)
+ .unwrap_or(SysrootTarget { triple: compiler.triple, libs: vec![] }),
+ SysrootKind::Llvm => build_llvm_sysroot_for_triple(compiler),
+ SysrootKind::Clif => {
+ build_clif_sysroot_for_triple(dirs, channel, compiler, &cg_clif_dylib_path)
+ }
+ }
+}
+
+#[must_use]
+fn build_llvm_sysroot_for_triple(compiler: Compiler) -> SysrootTarget {
+ let default_sysroot = super::rustc_info::get_default_sysroot(&compiler.rustc);
+
+ let mut target_libs = SysrootTarget { triple: compiler.triple, libs: vec![] };
+
+ for entry in fs::read_dir(
+ default_sysroot.join("lib").join("rustlib").join(&target_libs.triple).join("lib"),
+ )
+ .unwrap()
+ {
+ let entry = entry.unwrap();
+ if entry.file_type().unwrap().is_dir() {
+ continue;
+ }
+ let file = entry.path();
+ let file_name_str = file.file_name().unwrap().to_str().unwrap();
+ if (file_name_str.contains("rustc_")
+ && !file_name_str.contains("rustc_std_workspace_")
+ && !file_name_str.contains("rustc_demangle"))
+ || file_name_str.contains("chalk")
+ || file_name_str.contains("tracing")
+ || file_name_str.contains("regex")
+ {
+ // These are large crates that are part of the rustc-dev component and are not
+ // necessary to run regular programs.
+ continue;
+ }
+ target_libs.libs.push(file);
+ }
+
+ target_libs
+}
+
+#[must_use]
+fn build_clif_sysroot_for_triple(
+ dirs: &Dirs,
+ channel: &str,
+ mut compiler: Compiler,
+ cg_clif_dylib_path: &Path,
+) -> SysrootTarget {
+ match fs::read_to_string(SYSROOT_RUSTC_VERSION.to_path(dirs)) {
+ Err(e) => {
+ eprintln!("Failed to get rustc version for patched sysroot source: {}", e);
+ eprintln!("Hint: Try `./y.rs prepare` to patch the sysroot source");
+ process::exit(1);
+ }
+ Ok(source_version) => {
+ let rustc_version = get_rustc_version(&compiler.rustc);
+ if source_version != rustc_version {
+ eprintln!("The patched sysroot source is outdated");
+ eprintln!("Source version: {}", source_version.trim());
+ eprintln!("Rustc version: {}", rustc_version.trim());
+ eprintln!("Hint: Try `./y.rs prepare` to update the patched sysroot source");
+ process::exit(1);
+ }
+ }
+ }
+
+ let mut target_libs = SysrootTarget { triple: compiler.triple.clone(), libs: vec![] };
+
+ if let Some(rtstartup_target_libs) = build_rtstartup(dirs, &compiler) {
+ rtstartup_target_libs.install_into_sysroot(&RTSTARTUP_SYSROOT.to_path(dirs));
+
+ target_libs.libs.extend(rtstartup_target_libs.libs);
+ }
+
+ let build_dir = STANDARD_LIBRARY.target_dir(dirs).join(&compiler.triple).join(channel);
+
+ if !super::config::get_bool("keep_sysroot") {
+ // Cleanup the deps dir, but keep build scripts and the incremental cache for faster
+ // recompilation as they are not affected by changes in cg_clif.
+ remove_dir_if_exists(&build_dir.join("deps"));
+ }
+
+ // Build sysroot
+ let mut rustflags = " -Zforce-unstable-if-unmarked -Cpanic=abort".to_string();
+ rustflags.push_str(&format!(" -Zcodegen-backend={}", cg_clif_dylib_path.to_str().unwrap()));
+ // Necessary for MinGW to find rsbegin.o and rsend.o
+ rustflags
+ .push_str(&format!(" --sysroot={}", RTSTARTUP_SYSROOT.to_path(dirs).to_str().unwrap()));
+ if channel == "release" {
+ rustflags.push_str(" -Zmir-opt-level=3");
+ }
+ compiler.rustflags += &rustflags;
+ let mut build_cmd = STANDARD_LIBRARY.build(&compiler, dirs);
+ if channel == "release" {
+ build_cmd.arg("--release");
+ }
+ build_cmd.env("__CARGO_DEFAULT_LIB_METADATA", "cg_clif");
++ if compiler.triple.contains("apple") {
++ build_cmd.env("CARGO_PROFILE_RELEASE_SPLIT_DEBUGINFO", "packed");
++ }
+ spawn_and_wait(build_cmd);
+
+ for entry in fs::read_dir(build_dir.join("deps")).unwrap() {
+ let entry = entry.unwrap();
+ if let Some(ext) = entry.path().extension() {
+ if ext == "rmeta" || ext == "d" || ext == "dSYM" || ext == "clif" {
+ continue;
+ }
+ } else {
+ continue;
+ };
+ target_libs.libs.push(entry.path());
+ }
+
+ target_libs
+}
+
+fn build_rtstartup(dirs: &Dirs, compiler: &Compiler) -> Option<SysrootTarget> {
+ if !compiler.triple.ends_with("windows-gnu") {
+ return None;
+ }
+
+ RTSTARTUP_SYSROOT.ensure_fresh(dirs);
+
+ let rtstartup_src = SYSROOT_SRC.to_path(dirs).join("library").join("rtstartup");
+ let mut target_libs = SysrootTarget { triple: compiler.triple.clone(), libs: vec![] };
+
+ for file in ["rsbegin", "rsend"] {
+ let obj = RTSTARTUP_SYSROOT.to_path(dirs).join(format!("{file}.o"));
+ let mut build_rtstartup_cmd = Command::new(&compiler.rustc);
+ build_rtstartup_cmd
+ .arg("--target")
+ .arg(&compiler.triple)
+ .arg("--emit=obj")
+ .arg("-o")
+ .arg(&obj)
+ .arg(rtstartup_src.join(format!("{file}.rs")));
+ spawn_and_wait(build_rtstartup_cmd);
+ target_libs.libs.push(obj.clone());
+ }
+
+ Some(target_libs)
+}
--- /dev/null
- use self::utils::{is_ci, Compiler};
+use std::env;
+use std::path::PathBuf;
+use std::process;
+
- // Enable the Cranelift verifier
- env::set_var("CG_CLIF_ENABLE_VERIFIER", "1");
++use self::utils::{is_ci, is_ci_opt, Compiler};
+
+mod abi_cafe;
+mod bench;
+mod build_backend;
+mod build_sysroot;
+mod config;
+mod path;
+mod prepare;
+mod rustc_info;
+mod tests;
+mod utils;
+
+fn usage() {
+ eprintln!("{}", include_str!("usage.txt"));
+}
+
+macro_rules! arg_error {
+ ($($err:tt)*) => {{
+ eprintln!($($err)*);
+ usage();
+ std::process::exit(1);
+ }};
+}
+
+#[derive(PartialEq, Debug)]
+enum Command {
+ Prepare,
+ Build,
+ Test,
+ AbiCafe,
+ Bench,
+}
+
+#[derive(Copy, Clone, Debug)]
+pub(crate) enum SysrootKind {
+ None,
+ Clif,
+ Llvm,
+}
+
+pub fn main() {
+ if env::var("RUST_BACKTRACE").is_err() {
+ env::set_var("RUST_BACKTRACE", "1");
+ }
+ env::set_var("CG_CLIF_DISABLE_INCR_CACHE", "1");
+
+ if is_ci() {
+ // Disabling incr comp reduces cache size and incr comp doesn't save as much on CI anyway
+ env::set_var("CARGO_BUILD_INCREMENTAL", "false");
+
++ if !is_ci_opt() {
++ // Enable the Cranelift verifier
++ env::set_var("CG_CLIF_ENABLE_VERIFIER", "1");
++ }
+ }
+
+ let mut args = env::args().skip(1);
+ let command = match args.next().as_deref() {
+ Some("prepare") => Command::Prepare,
+ Some("build") => Command::Build,
+ Some("test") => Command::Test,
+ Some("abi-cafe") => Command::AbiCafe,
+ Some("bench") => Command::Bench,
+ Some(flag) if flag.starts_with('-') => arg_error!("Expected command found flag {}", flag),
+ Some(command) => arg_error!("Unknown command {}", command),
+ None => {
+ usage();
+ process::exit(0);
+ }
+ };
+
+ let mut out_dir = PathBuf::from(".");
+ let mut channel = "release";
+ let mut sysroot_kind = SysrootKind::Clif;
+ let mut use_unstable_features = true;
+ while let Some(arg) = args.next().as_deref() {
+ match arg {
+ "--out-dir" => {
+ out_dir = PathBuf::from(args.next().unwrap_or_else(|| {
+ arg_error!("--out-dir requires argument");
+ }))
+ }
+ "--debug" => channel = "debug",
+ "--sysroot" => {
+ sysroot_kind = match args.next().as_deref() {
+ Some("none") => SysrootKind::None,
+ Some("clif") => SysrootKind::Clif,
+ Some("llvm") => SysrootKind::Llvm,
+ Some(arg) => arg_error!("Unknown sysroot kind {}", arg),
+ None => arg_error!("--sysroot requires argument"),
+ }
+ }
+ "--no-unstable-features" => use_unstable_features = false,
+ flag if flag.starts_with("-") => arg_error!("Unknown flag {}", flag),
+ arg => arg_error!("Unexpected argument {}", arg),
+ }
+ }
+
+ let bootstrap_host_compiler = Compiler::bootstrap_with_triple(
+ std::env::var("HOST_TRIPLE")
+ .ok()
+ .or_else(|| config::get_value("host"))
+ .unwrap_or_else(|| rustc_info::get_host_triple()),
+ );
+ let target_triple = std::env::var("TARGET_TRIPLE")
+ .ok()
+ .or_else(|| config::get_value("target"))
+ .unwrap_or_else(|| bootstrap_host_compiler.triple.clone());
+
+ // FIXME allow changing the location of these dirs using cli arguments
+ let current_dir = std::env::current_dir().unwrap();
+ out_dir = current_dir.join(out_dir);
+ let dirs = path::Dirs {
+ source_dir: current_dir.clone(),
+ download_dir: out_dir.join("download"),
+ build_dir: out_dir.join("build"),
+ dist_dir: out_dir.join("dist"),
+ };
+
+ path::RelPath::BUILD.ensure_exists(&dirs);
+
+ {
+ // Make sure we always explicitly specify the target dir
+ let target =
+ path::RelPath::BUILD.join("target_dir_should_be_set_explicitly").to_path(&dirs);
+ env::set_var("CARGO_TARGET_DIR", &target);
+ let _ = std::fs::remove_file(&target);
+ std::fs::File::create(target).unwrap();
+ }
+
+ if command == Command::Prepare {
+ prepare::prepare(&dirs);
+ process::exit(0);
+ }
+
+ env::set_var("RUSTC", "rustc_should_be_set_explicitly");
+ env::set_var("RUSTDOC", "rustdoc_should_be_set_explicitly");
+
+ let cg_clif_dylib = build_backend::build_backend(
+ &dirs,
+ channel,
+ &bootstrap_host_compiler,
+ use_unstable_features,
+ );
+ match command {
+ Command::Prepare => {
+ // Handled above
+ }
+ Command::Test => {
+ tests::run_tests(
+ &dirs,
+ channel,
+ sysroot_kind,
+ &cg_clif_dylib,
+ &bootstrap_host_compiler,
+ target_triple.clone(),
+ );
+ }
+ Command::AbiCafe => {
+ if bootstrap_host_compiler.triple != target_triple {
+ eprintln!("Abi-cafe doesn't support cross-compilation");
+ process::exit(1);
+ }
+ abi_cafe::run(channel, sysroot_kind, &dirs, &cg_clif_dylib, &bootstrap_host_compiler);
+ }
+ Command::Build => {
+ build_sysroot::build_sysroot(
+ &dirs,
+ channel,
+ sysroot_kind,
+ &cg_clif_dylib,
+ &bootstrap_host_compiler,
+ target_triple,
+ );
+ }
+ Command::Bench => {
+ build_sysroot::build_sysroot(
+ &dirs,
+ channel,
+ sysroot_kind,
+ &cg_clif_dylib,
+ &bootstrap_host_compiler,
+ target_triple,
+ );
+ bench::benchmark(&dirs, &bootstrap_host_compiler);
+ }
+ }
+}
--- /dev/null
- spawn_and_wait(super::build_backend::CG_CLIF.fetch("cargo", dirs));
+use std::ffi::OsStr;
+use std::fs;
+use std::path::{Path, PathBuf};
+use std::process::Command;
+
+use super::build_sysroot::{BUILD_SYSROOT, ORIG_BUILD_SYSROOT, SYSROOT_RUSTC_VERSION, SYSROOT_SRC};
+use super::path::{Dirs, RelPath};
+use super::rustc_info::{get_default_sysroot, get_rustc_version};
+use super::utils::{copy_dir_recursively, git_command, retry_spawn_and_wait, spawn_and_wait};
+
+pub(crate) fn prepare(dirs: &Dirs) {
+ RelPath::DOWNLOAD.ensure_fresh(dirs);
+
- spawn_and_wait(super::build_sysroot::STANDARD_LIBRARY.fetch("cargo", dirs));
- spawn_and_wait(super::tests::LIBCORE_TESTS.fetch("cargo", dirs));
++ spawn_and_wait(super::build_backend::CG_CLIF.fetch("cargo", "rustc", dirs));
+
+ prepare_sysroot(dirs);
- super::abi_cafe::ABI_CAFE_REPO.fetch(dirs);
- spawn_and_wait(super::abi_cafe::ABI_CAFE.fetch("cargo", dirs));
++ spawn_and_wait(super::build_sysroot::STANDARD_LIBRARY.fetch("cargo", "rustc", dirs));
++ spawn_and_wait(super::tests::LIBCORE_TESTS.fetch("cargo", "rustc", dirs));
+
- spawn_and_wait(super::tests::RAND.fetch("cargo", dirs));
+ super::tests::RAND_REPO.fetch(dirs);
- spawn_and_wait(super::tests::REGEX.fetch("cargo", dirs));
++ spawn_and_wait(super::tests::RAND.fetch("cargo", "rustc", dirs));
+ super::tests::REGEX_REPO.fetch(dirs);
- spawn_and_wait(super::tests::PORTABLE_SIMD.fetch("cargo", dirs));
- super::bench::SIMPLE_RAYTRACER_REPO.fetch(dirs);
- spawn_and_wait(super::bench::SIMPLE_RAYTRACER.fetch("cargo", dirs));
++ spawn_and_wait(super::tests::REGEX.fetch("cargo", "rustc", dirs));
+ super::tests::PORTABLE_SIMD_REPO.fetch(dirs);
- fn fetch(&self, dirs: &Dirs) {
++ spawn_and_wait(super::tests::PORTABLE_SIMD.fetch("cargo", "rustc", dirs));
+}
+
+fn prepare_sysroot(dirs: &Dirs) {
+ let sysroot_src_orig = get_default_sysroot(Path::new("rustc")).join("lib/rustlib/src/rust");
+ assert!(sysroot_src_orig.exists());
+
+ eprintln!("[COPY] sysroot src");
+
+ // FIXME ensure builds error out or update the copy if any of the files copied here change
+ BUILD_SYSROOT.ensure_fresh(dirs);
+ copy_dir_recursively(&ORIG_BUILD_SYSROOT.to_path(dirs), &BUILD_SYSROOT.to_path(dirs));
+
+ fs::create_dir_all(SYSROOT_SRC.to_path(dirs).join("library")).unwrap();
+ copy_dir_recursively(
+ &sysroot_src_orig.join("library"),
+ &SYSROOT_SRC.to_path(dirs).join("library"),
+ );
+
+ let rustc_version = get_rustc_version(Path::new("rustc"));
+ fs::write(SYSROOT_RUSTC_VERSION.to_path(dirs), &rustc_version).unwrap();
+
+ eprintln!("[GIT] init");
+ init_git_repo(&SYSROOT_SRC.to_path(dirs));
+
+ apply_patches(dirs, "sysroot", &SYSROOT_SRC.to_path(dirs));
+}
+
+pub(crate) struct GitRepo {
+ url: GitRepoUrl,
+ rev: &'static str,
+ patch_name: &'static str,
+}
+
+enum GitRepoUrl {
+ Github { user: &'static str, repo: &'static str },
+}
+
+impl GitRepo {
+ pub(crate) const fn github(
+ user: &'static str,
+ repo: &'static str,
+ rev: &'static str,
+ patch_name: &'static str,
+ ) -> GitRepo {
+ GitRepo { url: GitRepoUrl::Github { user, repo }, rev, patch_name }
+ }
+
+ pub(crate) const fn source_dir(&self) -> RelPath {
+ match self.url {
+ GitRepoUrl::Github { user: _, repo } => RelPath::DOWNLOAD.join(repo),
+ }
+ }
+
++ pub(crate) fn fetch(&self, dirs: &Dirs) {
+ match self.url {
+ GitRepoUrl::Github { user, repo } => {
+ clone_repo_shallow_github(
+ dirs,
+ &self.source_dir().to_path(dirs),
+ user,
+ repo,
+ self.rev,
+ );
+ }
+ }
+ apply_patches(dirs, self.patch_name, &self.source_dir().to_path(dirs));
+ }
+}
+
+#[allow(dead_code)]
+fn clone_repo(download_dir: &Path, repo: &str, rev: &str) {
+ eprintln!("[CLONE] {}", repo);
+ // Ignore exit code as the repo may already have been checked out
+ git_command(None, "clone").arg(repo).arg(download_dir).spawn().unwrap().wait().unwrap();
+
+ let mut clean_cmd = git_command(download_dir, "checkout");
+ clean_cmd.arg("--").arg(".");
+ spawn_and_wait(clean_cmd);
+
+ let mut checkout_cmd = git_command(download_dir, "checkout");
+ checkout_cmd.arg("-q").arg(rev);
+ spawn_and_wait(checkout_cmd);
+}
+
+fn clone_repo_shallow_github(dirs: &Dirs, download_dir: &Path, user: &str, repo: &str, rev: &str) {
+ if cfg!(windows) {
+ // Older windows doesn't have tar or curl by default. Fall back to using git.
+ clone_repo(download_dir, &format!("https://github.com/{}/{}.git", user, repo), rev);
+ return;
+ }
+
+ let archive_url = format!("https://github.com/{}/{}/archive/{}.tar.gz", user, repo, rev);
+ let archive_file = RelPath::DOWNLOAD.to_path(dirs).join(format!("{}.tar.gz", rev));
+ let archive_dir = RelPath::DOWNLOAD.to_path(dirs).join(format!("{}-{}", repo, rev));
+
+ eprintln!("[DOWNLOAD] {}/{} from {}", user, repo, archive_url);
+
+ // Remove previous results if they exists
+ let _ = std::fs::remove_file(&archive_file);
+ let _ = std::fs::remove_dir_all(&archive_dir);
+ let _ = std::fs::remove_dir_all(&download_dir);
+
+ // Download zip archive
+ let mut download_cmd = Command::new("curl");
+ download_cmd
+ .arg("--max-time")
+ .arg("600")
+ .arg("-y")
+ .arg("30")
+ .arg("-Y")
+ .arg("10")
+ .arg("--connect-timeout")
+ .arg("30")
+ .arg("--continue-at")
+ .arg("-")
+ .arg("--location")
+ .arg("--output")
+ .arg(&archive_file)
+ .arg(archive_url);
+ retry_spawn_and_wait(5, download_cmd);
+
+ // Unpack tar archive
+ let mut unpack_cmd = Command::new("tar");
+ unpack_cmd.arg("xf").arg(&archive_file).current_dir(RelPath::DOWNLOAD.to_path(dirs));
+ spawn_and_wait(unpack_cmd);
+
+ // Rename unpacked dir to the expected name
+ std::fs::rename(archive_dir, &download_dir).unwrap();
+
+ init_git_repo(&download_dir);
+
+ // Cleanup
+ std::fs::remove_file(archive_file).unwrap();
+}
+
+fn init_git_repo(repo_dir: &Path) {
+ let mut git_init_cmd = git_command(repo_dir, "init");
+ git_init_cmd.arg("-q");
+ spawn_and_wait(git_init_cmd);
+
+ let mut git_add_cmd = git_command(repo_dir, "add");
+ git_add_cmd.arg(".");
+ spawn_and_wait(git_add_cmd);
+
+ let mut git_commit_cmd = git_command(repo_dir, "commit");
+ git_commit_cmd.arg("-m").arg("Initial commit").arg("-q");
+ spawn_and_wait(git_commit_cmd);
+}
+
+fn get_patches(dirs: &Dirs, crate_name: &str) -> Vec<PathBuf> {
+ let mut patches: Vec<_> = fs::read_dir(RelPath::PATCHES.to_path(dirs))
+ .unwrap()
+ .map(|entry| entry.unwrap().path())
+ .filter(|path| path.extension() == Some(OsStr::new("patch")))
+ .filter(|path| {
+ path.file_name()
+ .unwrap()
+ .to_str()
+ .unwrap()
+ .split_once("-")
+ .unwrap()
+ .1
+ .starts_with(crate_name)
+ })
+ .collect();
+ patches.sort();
+ patches
+}
+
+fn apply_patches(dirs: &Dirs, crate_name: &str, target_dir: &Path) {
+ if crate_name == "<none>" {
+ return;
+ }
+
+ for patch in get_patches(dirs, crate_name) {
+ eprintln!(
+ "[PATCH] {:?} <- {:?}",
+ target_dir.file_name().unwrap(),
+ patch.file_name().unwrap()
+ );
+ let mut apply_patch_cmd = git_command(target_dir, "am");
+ apply_patch_cmd.arg(patch).arg("-q");
+ spawn_and_wait(apply_patch_cmd);
+ }
+}
--- /dev/null
- use super::bench::SIMPLE_RAYTRACER;
+use super::build_sysroot::{self, SYSROOT_SRC};
+use super::config;
+use super::path::{Dirs, RelPath};
+use super::prepare::GitRepo;
+use super::rustc_info::get_host_triple;
+use super::utils::{spawn_and_wait, spawn_and_wait_with_input, CargoProject, Compiler};
+use super::SysrootKind;
+use std::env;
+use std::ffi::OsStr;
+use std::fs;
+use std::path::Path;
+use std::process::Command;
+
+static BUILD_EXAMPLE_OUT_DIR: RelPath = RelPath::BUILD.join("example");
+
+struct TestCase {
+ config: &'static str,
+ cmd: TestCaseCmd,
+}
+
+enum TestCaseCmd {
+ Custom { func: &'static dyn Fn(&TestRunner) },
+ BuildLib { source: &'static str, crate_types: &'static str },
+ BuildBinAndRun { source: &'static str, args: &'static [&'static str] },
+ JitBin { source: &'static str, args: &'static str },
+}
+
+impl TestCase {
+ // FIXME reduce usage of custom test case commands
+ const fn custom(config: &'static str, func: &'static dyn Fn(&TestRunner)) -> Self {
+ Self { config, cmd: TestCaseCmd::Custom { func } }
+ }
+
+ const fn build_lib(
+ config: &'static str,
+ source: &'static str,
+ crate_types: &'static str,
+ ) -> Self {
+ Self { config, cmd: TestCaseCmd::BuildLib { source, crate_types } }
+ }
+
+ const fn build_bin_and_run(
+ config: &'static str,
+ source: &'static str,
+ args: &'static [&'static str],
+ ) -> Self {
+ Self { config, cmd: TestCaseCmd::BuildBinAndRun { source, args } }
+ }
+
+ const fn jit_bin(config: &'static str, source: &'static str, args: &'static str) -> Self {
+ Self { config, cmd: TestCaseCmd::JitBin { source, args } }
+ }
+}
+
+const NO_SYSROOT_SUITE: &[TestCase] = &[
+ TestCase::build_lib("build.mini_core", "example/mini_core.rs", "lib,dylib"),
+ TestCase::build_lib("build.example", "example/example.rs", "lib"),
+ TestCase::jit_bin("jit.mini_core_hello_world", "example/mini_core_hello_world.rs", "abc bcd"),
+ TestCase::build_bin_and_run(
+ "aot.mini_core_hello_world",
+ "example/mini_core_hello_world.rs",
+ &["abc", "bcd"],
+ ),
+];
+
+const BASE_SYSROOT_SUITE: &[TestCase] = &[
+ TestCase::build_bin_and_run(
+ "aot.arbitrary_self_types_pointers_and_wrappers",
+ "example/arbitrary_self_types_pointers_and_wrappers.rs",
+ &[],
+ ),
+ TestCase::build_bin_and_run(
+ "aot.issue_91827_extern_types",
+ "example/issue-91827-extern-types.rs",
+ &[],
+ ),
+ TestCase::build_lib("build.alloc_system", "example/alloc_system.rs", "lib"),
+ TestCase::build_bin_and_run("aot.alloc_example", "example/alloc_example.rs", &[]),
+ TestCase::jit_bin("jit.std_example", "example/std_example.rs", ""),
+ TestCase::build_bin_and_run("aot.std_example", "example/std_example.rs", &["arg"]),
+ TestCase::build_bin_and_run("aot.dst_field_align", "example/dst-field-align.rs", &[]),
+ TestCase::build_bin_and_run(
+ "aot.subslice-patterns-const-eval",
+ "example/subslice-patterns-const-eval.rs",
+ &[],
+ ),
+ TestCase::build_bin_and_run(
+ "aot.track-caller-attribute",
+ "example/track-caller-attribute.rs",
+ &[],
+ ),
+ TestCase::build_bin_and_run("aot.float-minmax-pass", "example/float-minmax-pass.rs", &[]),
+ TestCase::build_bin_and_run("aot.mod_bench", "example/mod_bench.rs", &[]),
+ TestCase::build_bin_and_run("aot.issue-72793", "example/issue-72793.rs", &[]),
+];
+
+pub(crate) static RAND_REPO: GitRepo =
+ GitRepo::github("rust-random", "rand", "0f933f9c7176e53b2a3c7952ded484e1783f0bf1", "rand");
+
+pub(crate) static RAND: CargoProject = CargoProject::new(&RAND_REPO.source_dir(), "rand");
+
+pub(crate) static REGEX_REPO: GitRepo =
+ GitRepo::github("rust-lang", "regex", "341f207c1071f7290e3f228c710817c280c8dca1", "regex");
+
+pub(crate) static REGEX: CargoProject = CargoProject::new(®EX_REPO.source_dir(), "regex");
+
+pub(crate) static PORTABLE_SIMD_REPO: GitRepo = GitRepo::github(
+ "rust-lang",
+ "portable-simd",
+ "582239ac3b32007613df04d7ffa78dc30f4c5645",
+ "portable-simd",
+);
+
+pub(crate) static PORTABLE_SIMD: CargoProject =
+ CargoProject::new(&PORTABLE_SIMD_REPO.source_dir(), "portable_simd");
+
+pub(crate) static LIBCORE_TESTS: CargoProject =
+ CargoProject::new(&SYSROOT_SRC.join("library/core/tests"), "core_tests");
+
+const EXTENDED_SYSROOT_SUITE: &[TestCase] = &[
+ TestCase::custom("test.rust-random/rand", &|runner| {
+ RAND.clean(&runner.dirs);
+
+ if runner.is_native {
+ eprintln!("[TEST] rust-random/rand");
+ let mut test_cmd = RAND.test(&runner.target_compiler, &runner.dirs);
+ test_cmd.arg("--workspace");
+ spawn_and_wait(test_cmd);
+ } else {
+ eprintln!("[AOT] rust-random/rand");
+ let mut build_cmd = RAND.build(&runner.target_compiler, &runner.dirs);
+ build_cmd.arg("--workspace").arg("--tests");
+ spawn_and_wait(build_cmd);
+ }
+ }),
- TestCase::custom("test.simple-raytracer", &|runner| {
- SIMPLE_RAYTRACER.clean(&runner.dirs);
- spawn_and_wait(SIMPLE_RAYTRACER.build(&runner.target_compiler, &runner.dirs));
- }),
+ TestCase::custom("test.libcore", &|runner| {
+ LIBCORE_TESTS.clean(&runner.dirs);
+
+ if runner.is_native {
+ spawn_and_wait(LIBCORE_TESTS.test(&runner.target_compiler, &runner.dirs));
+ } else {
+ eprintln!("Cross-Compiling: Not running tests");
+ let mut build_cmd = LIBCORE_TESTS.build(&runner.target_compiler, &runner.dirs);
+ build_cmd.arg("--tests");
+ spawn_and_wait(build_cmd);
+ }
+ }),
+ TestCase::custom("test.regex-shootout-regex-dna", &|runner| {
+ REGEX.clean(&runner.dirs);
+
+ // newer aho_corasick versions throw a deprecation warning
+ let lint_rust_flags = format!("{} --cap-lints warn", runner.target_compiler.rustflags);
+
+ let mut build_cmd = REGEX.build(&runner.target_compiler, &runner.dirs);
+ build_cmd.arg("--example").arg("shootout-regex-dna");
+ build_cmd.env("RUSTFLAGS", lint_rust_flags.clone());
+ spawn_and_wait(build_cmd);
+
+ if runner.is_native {
+ let mut run_cmd = REGEX.run(&runner.target_compiler, &runner.dirs);
+ run_cmd.arg("--example").arg("shootout-regex-dna");
+ run_cmd.env("RUSTFLAGS", lint_rust_flags);
+
+ let input = fs::read_to_string(
+ REGEX.source_dir(&runner.dirs).join("examples").join("regexdna-input.txt"),
+ )
+ .unwrap();
+ let expected = fs::read_to_string(
+ REGEX.source_dir(&runner.dirs).join("examples").join("regexdna-output.txt"),
+ )
+ .unwrap();
+
+ let output = spawn_and_wait_with_input(run_cmd, input);
+ // Make sure `[codegen mono items] start` doesn't poison the diff
+ let output = output
+ .lines()
+ .filter(|line| !line.contains("codegen mono items"))
+ .chain(Some("")) // This just adds the trailing newline
+ .collect::<Vec<&str>>()
+ .join("\r\n");
+
+ let output_matches = expected.lines().eq(output.lines());
+ if !output_matches {
+ println!("Output files don't match!");
+ println!("Expected Output:\n{}", expected);
+ println!("Actual Output:\n{}", output);
+
+ std::process::exit(1);
+ }
+ }
+ }),
+ TestCase::custom("test.regex", &|runner| {
+ REGEX.clean(&runner.dirs);
+
+ // newer aho_corasick versions throw a deprecation warning
+ let lint_rust_flags = format!("{} --cap-lints warn", runner.target_compiler.rustflags);
+
+ if runner.is_native {
+ let mut run_cmd = REGEX.test(&runner.target_compiler, &runner.dirs);
+ run_cmd.args([
+ "--tests",
+ "--",
+ "--exclude-should-panic",
+ "--test-threads",
+ "1",
+ "-Zunstable-options",
+ "-q",
+ ]);
+ run_cmd.env("RUSTFLAGS", lint_rust_flags);
+ spawn_and_wait(run_cmd);
+ } else {
+ eprintln!("Cross-Compiling: Not running tests");
+ let mut build_cmd = REGEX.build(&runner.target_compiler, &runner.dirs);
+ build_cmd.arg("--tests");
+ build_cmd.env("RUSTFLAGS", lint_rust_flags.clone());
+ spawn_and_wait(build_cmd);
+ }
+ }),
+ TestCase::custom("test.portable-simd", &|runner| {
+ PORTABLE_SIMD.clean(&runner.dirs);
+
+ let mut build_cmd = PORTABLE_SIMD.build(&runner.target_compiler, &runner.dirs);
+ build_cmd.arg("--all-targets");
+ spawn_and_wait(build_cmd);
+
+ if runner.is_native {
+ let mut test_cmd = PORTABLE_SIMD.test(&runner.target_compiler, &runner.dirs);
+ test_cmd.arg("-q");
+ spawn_and_wait(test_cmd);
+ }
+ }),
+];
+
+pub(crate) fn run_tests(
+ dirs: &Dirs,
+ channel: &str,
+ sysroot_kind: SysrootKind,
+ cg_clif_dylib: &Path,
+ bootstrap_host_compiler: &Compiler,
+ target_triple: String,
+) {
+ if config::get_bool("testsuite.no_sysroot") {
+ let target_compiler = build_sysroot::build_sysroot(
+ dirs,
+ channel,
+ SysrootKind::None,
+ cg_clif_dylib,
+ bootstrap_host_compiler,
+ target_triple.clone(),
+ );
+
+ let runner =
+ TestRunner::new(dirs.clone(), target_compiler, get_host_triple() == target_triple);
+
+ BUILD_EXAMPLE_OUT_DIR.ensure_fresh(dirs);
+ runner.run_testsuite(NO_SYSROOT_SUITE);
+ } else {
+ eprintln!("[SKIP] no_sysroot tests");
+ }
+
+ let run_base_sysroot = config::get_bool("testsuite.base_sysroot");
+ let run_extended_sysroot = config::get_bool("testsuite.extended_sysroot");
+
+ if run_base_sysroot || run_extended_sysroot {
+ let target_compiler = build_sysroot::build_sysroot(
+ dirs,
+ channel,
+ sysroot_kind,
+ cg_clif_dylib,
+ bootstrap_host_compiler,
+ target_triple.clone(),
+ );
+
+ let runner =
+ TestRunner::new(dirs.clone(), target_compiler, get_host_triple() == target_triple);
+
+ if run_base_sysroot {
+ runner.run_testsuite(BASE_SYSROOT_SUITE);
+ } else {
+ eprintln!("[SKIP] base_sysroot tests");
+ }
+
+ if run_extended_sysroot {
+ runner.run_testsuite(EXTENDED_SYSROOT_SUITE);
+ } else {
+ eprintln!("[SKIP] extended_sysroot tests");
+ }
+ }
+}
+
+struct TestRunner {
+ is_native: bool,
+ jit_supported: bool,
+ dirs: Dirs,
+ target_compiler: Compiler,
+}
+
+impl TestRunner {
+ pub fn new(dirs: Dirs, mut target_compiler: Compiler, is_native: bool) -> Self {
+ if let Ok(rustflags) = env::var("RUSTFLAGS") {
+ target_compiler.rustflags.push(' ');
+ target_compiler.rustflags.push_str(&rustflags);
+ }
+ if let Ok(rustdocflags) = env::var("RUSTDOCFLAGS") {
+ target_compiler.rustdocflags.push(' ');
+ target_compiler.rustdocflags.push_str(&rustdocflags);
+ }
+
+ // FIXME fix `#[linkage = "extern_weak"]` without this
+ if target_compiler.triple.contains("darwin") {
+ target_compiler.rustflags.push_str(" -Clink-arg=-undefined -Clink-arg=dynamic_lookup");
+ }
+
+ let jit_supported = is_native
+ && target_compiler.triple.contains("x86_64")
+ && !target_compiler.triple.contains("windows");
+
+ Self { is_native, jit_supported, dirs, target_compiler }
+ }
+
+ pub fn run_testsuite(&self, tests: &[TestCase]) {
+ for TestCase { config, cmd } in tests {
+ let (tag, testname) = config.split_once('.').unwrap();
+ let tag = tag.to_uppercase();
+ let is_jit_test = tag == "JIT";
+
+ if !config::get_bool(config) || (is_jit_test && !self.jit_supported) {
+ eprintln!("[{tag}] {testname} (skipped)");
+ continue;
+ } else {
+ eprintln!("[{tag}] {testname}");
+ }
+
+ match *cmd {
+ TestCaseCmd::Custom { func } => func(self),
+ TestCaseCmd::BuildLib { source, crate_types } => {
+ self.run_rustc([source, "--crate-type", crate_types]);
+ }
+ TestCaseCmd::BuildBinAndRun { source, args } => {
+ self.run_rustc([source]);
+ self.run_out_command(
+ source.split('/').last().unwrap().split('.').next().unwrap(),
+ args,
+ );
+ }
+ TestCaseCmd::JitBin { source, args } => {
+ let mut jit_cmd = self.rustc_command([
+ "-Zunstable-options",
+ "-Cllvm-args=mode=jit",
+ "-Cprefer-dynamic",
+ source,
+ "--cfg",
+ "jit",
+ ]);
+ if !args.is_empty() {
+ jit_cmd.env("CG_CLIF_JIT_ARGS", args);
+ }
+ spawn_and_wait(jit_cmd);
+
+ eprintln!("[JIT-lazy] {testname}");
+ let mut jit_cmd = self.rustc_command([
+ "-Zunstable-options",
+ "-Cllvm-args=mode=jit-lazy",
+ "-Cprefer-dynamic",
+ source,
+ "--cfg",
+ "jit",
+ ]);
+ if !args.is_empty() {
+ jit_cmd.env("CG_CLIF_JIT_ARGS", args);
+ }
+ spawn_and_wait(jit_cmd);
+ }
+ }
+ }
+ }
+
+ #[must_use]
+ fn rustc_command<I, S>(&self, args: I) -> Command
+ where
+ I: IntoIterator<Item = S>,
+ S: AsRef<OsStr>,
+ {
+ let mut cmd = Command::new(&self.target_compiler.rustc);
+ cmd.args(self.target_compiler.rustflags.split_whitespace());
+ cmd.arg("-L");
+ cmd.arg(format!("crate={}", BUILD_EXAMPLE_OUT_DIR.to_path(&self.dirs).display()));
+ cmd.arg("--out-dir");
+ cmd.arg(format!("{}", BUILD_EXAMPLE_OUT_DIR.to_path(&self.dirs).display()));
+ cmd.arg("-Cdebuginfo=2");
+ cmd.arg("--target");
+ cmd.arg(&self.target_compiler.triple);
+ cmd.arg("-Cpanic=abort");
+ cmd.args(args);
+ cmd
+ }
+
+ fn run_rustc<I, S>(&self, args: I)
+ where
+ I: IntoIterator<Item = S>,
+ S: AsRef<OsStr>,
+ {
+ spawn_and_wait(self.rustc_command(args));
+ }
+
+ fn run_out_command<'a>(&self, name: &str, args: &[&str]) {
+ let mut full_cmd = vec![];
+
+ // Prepend the RUN_WRAPPER's
+ if !self.target_compiler.runner.is_empty() {
+ full_cmd.extend(self.target_compiler.runner.iter().cloned());
+ }
+
+ full_cmd.push(
+ BUILD_EXAMPLE_OUT_DIR.to_path(&self.dirs).join(name).to_str().unwrap().to_string(),
+ );
+
+ for arg in args {
+ full_cmd.push(arg.to_string());
+ }
+
+ let mut cmd_iter = full_cmd.into_iter();
+ let first = cmd_iter.next().unwrap();
+
+ let mut cmd = Command::new(first);
+ cmd.args(cmd_iter);
+
+ spawn_and_wait(cmd);
+ }
+}
--- /dev/null
- pub(crate) fn fetch(&self, cargo: impl AsRef<Path>, dirs: &Dirs) -> Command {
+use std::env;
+use std::fs;
+use std::io::{self, Write};
+use std::path::{Path, PathBuf};
+use std::process::{self, Command, Stdio};
+
+use super::path::{Dirs, RelPath};
+use super::rustc_info::{get_cargo_path, get_rustc_path, get_rustdoc_path};
+
+#[derive(Clone, Debug)]
+pub(crate) struct Compiler {
+ pub(crate) cargo: PathBuf,
+ pub(crate) rustc: PathBuf,
+ pub(crate) rustdoc: PathBuf,
+ pub(crate) rustflags: String,
+ pub(crate) rustdocflags: String,
+ pub(crate) triple: String,
+ pub(crate) runner: Vec<String>,
+}
+
+impl Compiler {
+ pub(crate) fn bootstrap_with_triple(triple: String) -> Compiler {
+ Compiler {
+ cargo: get_cargo_path(),
+ rustc: get_rustc_path(),
+ rustdoc: get_rustdoc_path(),
+ rustflags: String::new(),
+ rustdocflags: String::new(),
+ triple,
+ runner: vec![],
+ }
+ }
+
+ pub(crate) fn set_cross_linker_and_runner(&mut self) {
+ match self.triple.as_str() {
+ "aarch64-unknown-linux-gnu" => {
+ // We are cross-compiling for aarch64. Use the correct linker and run tests in qemu.
+ self.rustflags += " -Clinker=aarch64-linux-gnu-gcc";
+ self.rustdocflags += " -Clinker=aarch64-linux-gnu-gcc";
+ self.runner = vec![
+ "qemu-aarch64".to_owned(),
+ "-L".to_owned(),
+ "/usr/aarch64-linux-gnu".to_owned(),
+ ];
+ }
+ "s390x-unknown-linux-gnu" => {
+ // We are cross-compiling for s390x. Use the correct linker and run tests in qemu.
+ self.rustflags += " -Clinker=s390x-linux-gnu-gcc";
+ self.rustdocflags += " -Clinker=s390x-linux-gnu-gcc";
+ self.runner = vec![
+ "qemu-s390x".to_owned(),
+ "-L".to_owned(),
+ "/usr/s390x-linux-gnu".to_owned(),
+ ];
+ }
+ "x86_64-pc-windows-gnu" => {
+ // We are cross-compiling for Windows. Run tests in wine.
+ self.runner = vec!["wine".to_owned()];
+ }
+ _ => {
+ println!("Unknown non-native platform");
+ }
+ }
+ }
+}
+
+pub(crate) struct CargoProject {
+ source: &'static RelPath,
+ target: &'static str,
+}
+
+impl CargoProject {
+ pub(crate) const fn new(path: &'static RelPath, target: &'static str) -> CargoProject {
+ CargoProject { source: path, target }
+ }
+
+ pub(crate) fn source_dir(&self, dirs: &Dirs) -> PathBuf {
+ self.source.to_path(dirs)
+ }
+
+ pub(crate) fn manifest_path(&self, dirs: &Dirs) -> PathBuf {
+ self.source_dir(dirs).join("Cargo.toml")
+ }
+
+ pub(crate) fn target_dir(&self, dirs: &Dirs) -> PathBuf {
+ RelPath::BUILD.join(self.target).to_path(dirs)
+ }
+
+ #[must_use]
+ fn base_cmd(&self, command: &str, cargo: &Path, dirs: &Dirs) -> Command {
+ let mut cmd = Command::new(cargo);
+
+ cmd.arg(command)
+ .arg("--manifest-path")
+ .arg(self.manifest_path(dirs))
+ .arg("--target-dir")
+ .arg(self.target_dir(dirs))
+ .arg("--frozen");
+
+ cmd
+ }
+
+ #[must_use]
+ fn build_cmd(&self, command: &str, compiler: &Compiler, dirs: &Dirs) -> Command {
+ let mut cmd = self.base_cmd(command, &compiler.cargo, dirs);
+
+ cmd.arg("--target").arg(&compiler.triple);
+
+ cmd.env("RUSTC", &compiler.rustc);
+ cmd.env("RUSTDOC", &compiler.rustdoc);
+ cmd.env("RUSTFLAGS", &compiler.rustflags);
+ cmd.env("RUSTDOCFLAGS", &compiler.rustdocflags);
+ if !compiler.runner.is_empty() {
+ cmd.env(
+ format!("CARGO_TARGET_{}_RUNNER", compiler.triple.to_uppercase().replace('-', "_")),
+ compiler.runner.join(" "),
+ );
+ }
+
+ cmd
+ }
+
+ #[must_use]
- cmd.arg("fetch").arg("--manifest-path").arg(self.manifest_path(dirs));
++ pub(crate) fn fetch(
++ &self,
++ cargo: impl AsRef<Path>,
++ rustc: impl AsRef<Path>,
++ dirs: &Dirs,
++ ) -> Command {
+ let mut cmd = Command::new(cargo.as_ref());
+
- env::var("CI").as_deref() == Ok("true")
++ cmd.env("RUSTC", rustc.as_ref())
++ .arg("fetch")
++ .arg("--manifest-path")
++ .arg(self.manifest_path(dirs));
+
+ cmd
+ }
+
+ pub(crate) fn clean(&self, dirs: &Dirs) {
+ let _ = fs::remove_dir_all(self.target_dir(dirs));
+ }
+
+ #[must_use]
+ pub(crate) fn build(&self, compiler: &Compiler, dirs: &Dirs) -> Command {
+ self.build_cmd("build", compiler, dirs)
+ }
+
+ #[must_use]
+ pub(crate) fn test(&self, compiler: &Compiler, dirs: &Dirs) -> Command {
+ self.build_cmd("test", compiler, dirs)
+ }
+
+ #[must_use]
+ pub(crate) fn run(&self, compiler: &Compiler, dirs: &Dirs) -> Command {
+ self.build_cmd("run", compiler, dirs)
+ }
+}
+
+#[must_use]
+pub(crate) fn hyperfine_command(
+ warmup: u64,
+ runs: u64,
+ prepare: Option<&str>,
+ a: &str,
+ b: &str,
+) -> Command {
+ let mut bench = Command::new("hyperfine");
+
+ if warmup != 0 {
+ bench.arg("--warmup").arg(warmup.to_string());
+ }
+
+ if runs != 0 {
+ bench.arg("--runs").arg(runs.to_string());
+ }
+
+ if let Some(prepare) = prepare {
+ bench.arg("--prepare").arg(prepare);
+ }
+
+ bench.arg(a).arg(b);
+
+ bench
+}
+
+#[must_use]
+pub(crate) fn git_command<'a>(repo_dir: impl Into<Option<&'a Path>>, cmd: &str) -> Command {
+ let mut git_cmd = Command::new("git");
+ git_cmd
+ .arg("-c")
+ .arg("user.name=Dummy")
+ .arg("-c")
+ .arg("user.email=dummy@example.com")
+ .arg("-c")
+ .arg("core.autocrlf=false")
+ .arg(cmd);
+ if let Some(repo_dir) = repo_dir.into() {
+ git_cmd.current_dir(repo_dir);
+ }
+ git_cmd
+}
+
+#[track_caller]
+pub(crate) fn try_hard_link(src: impl AsRef<Path>, dst: impl AsRef<Path>) {
+ let src = src.as_ref();
+ let dst = dst.as_ref();
+ if let Err(_) = fs::hard_link(src, dst) {
+ fs::copy(src, dst).unwrap(); // Fallback to copying if hardlinking failed
+ }
+}
+
+#[track_caller]
+pub(crate) fn spawn_and_wait(mut cmd: Command) {
+ if !cmd.spawn().unwrap().wait().unwrap().success() {
+ process::exit(1);
+ }
+}
+
+// Based on the retry function in rust's src/ci/shared.sh
+#[track_caller]
+pub(crate) fn retry_spawn_and_wait(tries: u64, mut cmd: Command) {
+ for i in 1..tries + 1 {
+ if i != 1 {
+ println!("Command failed. Attempt {i}/{tries}:");
+ }
+ if cmd.spawn().unwrap().wait().unwrap().success() {
+ return;
+ }
+ std::thread::sleep(std::time::Duration::from_secs(i * 5));
+ }
+ println!("The command has failed after {tries} attempts.");
+ process::exit(1);
+}
+
+#[track_caller]
+pub(crate) fn spawn_and_wait_with_input(mut cmd: Command, input: String) -> String {
+ let mut child = cmd
+ .stdin(Stdio::piped())
+ .stdout(Stdio::piped())
+ .spawn()
+ .expect("Failed to spawn child process");
+
+ let mut stdin = child.stdin.take().expect("Failed to open stdin");
+ std::thread::spawn(move || {
+ stdin.write_all(input.as_bytes()).expect("Failed to write to stdin");
+ });
+
+ let output = child.wait_with_output().expect("Failed to read stdout");
+ if !output.status.success() {
+ process::exit(1);
+ }
+
+ String::from_utf8(output.stdout).unwrap()
+}
+
+pub(crate) fn remove_dir_if_exists(path: &Path) {
+ match fs::remove_dir_all(&path) {
+ Ok(()) => {}
+ Err(err) if err.kind() == io::ErrorKind::NotFound => {}
+ Err(err) => panic!("Failed to remove {path}: {err}", path = path.display()),
+ }
+}
+
+pub(crate) fn copy_dir_recursively(from: &Path, to: &Path) {
+ for entry in fs::read_dir(from).unwrap() {
+ let entry = entry.unwrap();
+ let filename = entry.file_name();
+ if filename == "." || filename == ".." {
+ continue;
+ }
+ if entry.metadata().unwrap().is_dir() {
+ fs::create_dir(to.join(&filename)).unwrap();
+ copy_dir_recursively(&from.join(&filename), &to.join(&filename));
+ } else {
+ fs::copy(from.join(&filename), to.join(&filename)).unwrap();
+ }
+ }
+}
+
+pub(crate) fn is_ci() -> bool {
++ env::var("CI").is_ok()
++}
++
++pub(crate) fn is_ci_opt() -> bool {
++ env::var("CI_OPT").is_ok()
+}
--- /dev/null
- test.simple-raytracer
+# This file allows configuring the build system.
+
+# Which triple to produce a compiler toolchain for.
+#
+# Defaults to the default triple of rustc on the host system.
+#host = x86_64-unknown-linux-gnu
+
+# Which triple to build libraries (core/alloc/std/test/proc_macro) for.
+#
+# Defaults to `host`.
+#target = x86_64-unknown-linux-gnu
+
+# Disables cleaning of the sysroot dir. This will cause old compiled artifacts to be re-used when
+# the sysroot source hasn't changed. This is useful when the codegen backend hasn't been modified.
+# This option can be changed while the build system is already running for as long as sysroot
+# building hasn't started yet.
+#keep_sysroot
+
+
+# Testsuite
+#
+# Each test suite item has a corresponding key here. The default is to run all tests.
+# Comment any of these lines to skip individual tests.
+
+testsuite.no_sysroot
+build.mini_core
+build.example
+jit.mini_core_hello_world
+aot.mini_core_hello_world
+
+testsuite.base_sysroot
+aot.arbitrary_self_types_pointers_and_wrappers
+aot.issue_91827_extern_types
+build.alloc_system
+aot.alloc_example
+jit.std_example
+aot.std_example
+aot.dst_field_align
+aot.subslice-patterns-const-eval
+aot.track-caller-attribute
+aot.float-minmax-pass
+aot.mod_bench
+aot.issue-72793
+
+testsuite.extended_sysroot
+test.rust-random/rand
+test.libcore
+test.regex-shootout-regex-dna
+test.regex
+test.portable-simd
--- /dev/null
- channel = "nightly-2023-01-20"
+[toolchain]
++channel = "nightly-2023-02-06"
+components = ["rust-src", "rustc-dev", "llvm-tools-preview"]
--- /dev/null
+#!/usr/bin/env bash
+set -e
+
+cd $(dirname "$0")/../
+
+source ./scripts/setup_rust_fork.sh
+
+echo "[TEST] Test suite of rustc"
+pushd rust
+
+command -v rg >/dev/null 2>&1 || cargo install ripgrep
+
+rm -r tests/ui/{extern/,unsized-locals/,lto/,linkage*} || true
+for test in $(rg --files-with-matches "lto|// needs-asm-support|// needs-unwind" tests/{codegen-units,ui,incremental}); do
+ rm $test
+done
+
+for test in $(rg -i --files-with-matches "//(\[\w+\])?~[^\|]*\s*ERR|// error-pattern:|// build-fail|// run-fail|-Cllvm-args" tests/ui); do
+ rm $test
+done
+
+git checkout -- tests/ui/issues/auxiliary/issue-3136-a.rs # contains //~ERROR, but shouldn't be removed
+git checkout -- tests/ui/proc-macro/pretty-print-hack/
+
+# missing features
+# ================
+
+# requires stack unwinding
+rm tests/incremental/change_crate_dep_kind.rs
+rm tests/incremental/issue-80691-bad-eval-cache.rs # -Cpanic=abort causes abort instead of exit(101)
+
+# requires compiling with -Cpanic=unwind
+rm -r tests/ui/macros/rfc-2011-nicer-assert-messages/
+rm -r tests/run-make/test-benches
+rm tests/ui/test-attrs/test-type.rs
+
+# vendor intrinsics
+rm tests/ui/sse2.rs # cpuid not supported, so sse2 not detected
+rm tests/ui/intrinsics/const-eval-select-x86_64.rs # requires x86_64 vendor intrinsics
+rm tests/ui/simd/array-type.rs # "Index argument for `simd_insert` is not a constant"
+rm tests/ui/simd/intrinsic/float-math-pass.rs # simd_fcos unimplemented
+
+# exotic linkages
+rm tests/ui/issues/issue-33992.rs # unsupported linkages
+rm tests/incremental/hashes/function_interfaces.rs # same
+rm tests/incremental/hashes/statics.rs # same
+
+# variadic arguments
+rm tests/ui/abi/mir/mir_codegen_calls_variadic.rs # requires float varargs
+rm tests/ui/abi/variadic-ffi.rs # requires callee side vararg support
+
+# unsized locals
+rm -r tests/run-pass-valgrind/unsized-locals
+
+# misc unimplemented things
+rm tests/ui/intrinsics/intrinsic-nearby.rs # unimplemented nearbyintf32 and nearbyintf64 intrinsics
+rm tests/ui/target-feature/missing-plusminus.rs # error not implemented
+rm tests/ui/fn/dyn-fn-alignment.rs # wants a 256 byte alignment
+rm -r tests/run-make/emit-named-files # requires full --emit support
+rm -r tests/run-make/repr128-dwarf # debuginfo test
+
+# optimization tests
+# ==================
+rm tests/ui/codegen/issue-28950.rs # depends on stack size optimizations
+rm tests/ui/codegen/init-large-type.rs # same
+rm tests/ui/issues/issue-40883.rs # same
+rm -r tests/run-make/fmt-write-bloat/ # tests an optimization
+
+# backend specific tests
+# ======================
+rm tests/incremental/thinlto/cgu_invalidated_when_import_{added,removed}.rs # requires LLVM
+rm tests/ui/abi/stack-protector.rs # requires stack protector support
+
+# giving different but possibly correct results
+# =============================================
+rm tests/ui/mir/mir_misc_casts.rs # depends on deduplication of constants
+rm tests/ui/mir/mir_raw_fat_ptr.rs # same
+rm tests/ui/consts/issue-33537.rs # same
+rm tests/ui/layout/valid_range_oob.rs # different ICE message
+
+rm tests/ui/consts/issue-miri-1910.rs # different error message
+rm tests/ui/consts/offset_ub.rs # same
+rm tests/ui/intrinsics/panic-uninitialized-zeroed.rs # same
+rm tests/ui/lint/lint-const-item-mutation.rs # same
+rm tests/ui/pattern/usefulness/doc-hidden-non-exhaustive.rs # same
+rm tests/ui/suggestions/derive-trait-for-method-call.rs # same
+rm tests/ui/typeck/issue-46112.rs # same
+
+rm tests/ui/proc-macro/crt-static.rs # extra warning about -Cpanic=abort for proc macros
+rm tests/ui/proc-macro/proc-macro-deprecated-attr.rs # same
+rm tests/ui/proc-macro/quote-debug.rs # same
+rm tests/ui/proc-macro/no-missing-docs.rs # same
+rm tests/ui/rust-2018/proc-macro-crate-in-paths.rs # same
++rm tests/ui/proc-macro/allowed-signatures.rs # same
+
+# doesn't work due to the way the rustc test suite is invoked.
+# should work when using ./x.py test the way it is intended
+# ============================================================
+rm -r tests/run-make/emit-shared-files # requires the rustdoc executable in dist/bin/
+rm -r tests/run-make/unstable-flag-required # same
+rm -r tests/run-make/rustdoc-* # same
+rm -r tests/run-make/issue-88756-default-output # same
+rm -r tests/run-make/remap-path-prefix-dwarf # requires llvm-dwarfdump
+rm -r tests/ui/consts/missing_span_in_backtrace.rs # expects sysroot source to be elsewhere
+
+# genuine bugs
+# ============
+rm tests/incremental/spike-neg1.rs # errors out for some reason
+rm tests/incremental/spike-neg2.rs # same
+
+rm tests/ui/simd/intrinsic/generic-reduction-pass.rs # simd_reduce_add_unordered doesn't accept an accumulator for integer vectors
+
+rm tests/ui/simd/intrinsic/generic-as.rs # crash when accessing vector type filed (#1318)
+rm tests/ui/simd/simd-bitmask.rs # crash
+
+rm tests/ui/dyn-star/dyn-star-to-dyn.rs
+rm tests/ui/dyn-star/dispatch-on-pin-mut.rs
+
+# bugs in the test suite
+# ======================
+rm tests/ui/backtrace.rs # TODO warning
+rm tests/ui/simple_global_asm.rs # TODO add needs-asm-support
+rm tests/ui/process/nofile-limit.rs # TODO some AArch64 linking issue
+
+rm tests/ui/stdio-is-blocking.rs # really slow with unoptimized libstd
+
+echo "[TEST] rustc test suite"
+RUST_TEST_NOCAPTURE=1 COMPILETEST_FORCE_STAGE0=1 ./x.py test --stage 0 tests/{codegen-units,run-make,run-pass-valgrind,ui,incremental}
+popd
--- /dev/null
- ("ssa", Cow::Owned(format!(",var=({}, {})", var1.index(), var2.index())))
+//! Annotate the clif ir with comments describing how arguments are passed into the current function
+//! and where all locals are stored.
+
+use std::borrow::Cow;
+
+use rustc_middle::mir;
+use rustc_target::abi::call::PassMode;
+
+use cranelift_codegen::entity::EntityRef;
+
+use crate::prelude::*;
+
+pub(super) fn add_args_header_comment(fx: &mut FunctionCx<'_, '_, '_>) {
+ if fx.clif_comments.enabled() {
+ fx.add_global_comment(
+ "kind loc.idx param pass mode ty".to_string(),
+ );
+ }
+}
+
+pub(super) fn add_arg_comment<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ kind: &str,
+ local: Option<mir::Local>,
+ local_field: Option<usize>,
+ params: &[Value],
+ arg_abi_mode: &PassMode,
+ arg_layout: TyAndLayout<'tcx>,
+) {
+ if !fx.clif_comments.enabled() {
+ return;
+ }
+
+ let local = if let Some(local) = local {
+ Cow::Owned(format!("{:?}", local))
+ } else {
+ Cow::Borrowed("???")
+ };
+ let local_field = if let Some(local_field) = local_field {
+ Cow::Owned(format!(".{}", local_field))
+ } else {
+ Cow::Borrowed("")
+ };
+
+ let params = match params {
+ [] => Cow::Borrowed("-"),
+ [param] => Cow::Owned(format!("= {:?}", param)),
+ [param_a, param_b] => Cow::Owned(format!("= {:?},{:?}", param_a, param_b)),
+ params => Cow::Owned(format!(
+ "= {}",
+ params.iter().map(ToString::to_string).collect::<Vec<_>>().join(",")
+ )),
+ };
+
+ let pass_mode = format!("{:?}", arg_abi_mode);
+ fx.add_global_comment(format!(
+ "{kind:5}{local:>3}{local_field:<5} {params:10} {pass_mode:36} {ty:?}",
+ kind = kind,
+ local = local,
+ local_field = local_field,
+ params = params,
+ pass_mode = pass_mode,
+ ty = arg_layout.ty,
+ ));
+}
+
+pub(super) fn add_locals_header_comment(fx: &mut FunctionCx<'_, '_, '_>) {
+ if fx.clif_comments.enabled() {
+ fx.add_global_comment(String::new());
+ fx.add_global_comment(
+ "kind local ty size align (abi,pref)".to_string(),
+ );
+ }
+}
+
+pub(super) fn add_local_place_comments<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ place: CPlace<'tcx>,
+ local: Local,
+) {
+ if !fx.clif_comments.enabled() {
+ return;
+ }
+ let TyAndLayout { ty, layout } = place.layout();
+ let rustc_target::abi::LayoutS {
+ size,
+ align,
+ abi: _,
+ variants: _,
+ fields: _,
+ largest_niche: _,
+ } = layout.0.0;
+
+ let (kind, extra) = match *place.inner() {
+ CPlaceInner::Var(place_local, var) => {
+ assert_eq!(local, place_local);
+ ("ssa", Cow::Owned(format!(",var={}", var.index())))
+ }
+ CPlaceInner::VarPair(place_local, var1, var2) => {
+ assert_eq!(local, place_local);
- Cow::Owned(format!(",meta={}", meta))
++ ("ssa", Cow::Owned(format!("var=({}, {})", var1.index(), var2.index())))
+ }
+ CPlaceInner::VarLane(_local, _var, _lane) => unreachable!(),
+ CPlaceInner::Addr(ptr, meta) => {
+ let meta = if let Some(meta) = meta {
++ Cow::Owned(format!("meta={}", meta))
+ } else {
+ Cow::Borrowed("")
+ };
+ match ptr.debug_base_and_offset() {
+ (crate::pointer::PointerBase::Addr(addr), offset) => {
+ ("reuse", format!("storage={}{}{}", addr, offset, meta).into())
+ }
+ (crate::pointer::PointerBase::Stack(stack_slot), offset) => {
+ ("stack", format!("storage={}{}{}", stack_slot, offset, meta).into())
+ }
+ (crate::pointer::PointerBase::Dangling(align), offset) => {
+ ("zst", format!("align={},offset={}", align.bytes(), offset).into())
+ }
+ }
+ }
+ };
+
+ fx.add_global_comment(format!(
+ "{:<5} {:5} {:30} {:4}b {}, {}{}{}",
+ kind,
+ format!("{:?}", local),
+ format!("{:?}", ty),
+ size.bytes(),
+ align.abi.bytes(),
+ align.pref.bytes(),
+ if extra.is_empty() { "" } else { " " },
+ extra,
+ ));
+}
--- /dev/null
-
- let caller_name = format!("__rust_{}", method.name);
- let callee_name = kind.fn_name(method.name);
-
- let func_id = module.declare_function(&caller_name, Linkage::Export, &sig).unwrap();
-
- let callee_func_id = module.declare_function(&callee_name, Linkage::Import, &sig).unwrap();
-
- let mut ctx = Context::new();
- ctx.func.signature = sig.clone();
- {
- let mut func_ctx = FunctionBuilderContext::new();
- let mut bcx = FunctionBuilder::new(&mut ctx.func, &mut func_ctx);
-
- let block = bcx.create_block();
- bcx.switch_to_block(block);
- let args = arg_tys
- .into_iter()
- .map(|ty| bcx.append_block_param(block, ty))
- .collect::<Vec<Value>>();
-
- let callee_func_ref = module.declare_func_in_func(callee_func_id, &mut bcx.func);
- let call_inst = bcx.ins().call(callee_func_ref, &args);
- let results = bcx.inst_results(call_inst).to_vec(); // Clone to prevent borrow error
-
- bcx.ins().return_(&results);
- bcx.seal_all_blocks();
- bcx.finalize();
- }
- module.define_function(func_id, &mut ctx).unwrap();
- unwind_context.add_function(func_id, &ctx, module.isa());
+//! Allocator shim
+// Adapted from rustc
+
+use crate::prelude::*;
+
+use rustc_ast::expand::allocator::{AllocatorKind, AllocatorTy, ALLOCATOR_METHODS};
+use rustc_session::config::OomStrategy;
+use rustc_span::symbol::sym;
+
+/// Returns whether an allocator shim was created
+pub(crate) fn codegen(
+ tcx: TyCtxt<'_>,
+ module: &mut impl Module,
+ unwind_context: &mut UnwindContext,
+) -> bool {
+ let any_dynamic_crate = tcx.dependency_formats(()).iter().any(|(_, list)| {
+ use rustc_middle::middle::dependency_format::Linkage;
+ list.iter().any(|&linkage| linkage == Linkage::Dynamic)
+ });
+ if any_dynamic_crate {
+ false
+ } else if let Some(kind) = tcx.allocator_kind(()) {
+ codegen_inner(
+ module,
+ unwind_context,
+ kind,
+ tcx.alloc_error_handler_kind(()).unwrap(),
+ tcx.sess.opts.unstable_opts.oom,
+ );
+ true
+ } else {
+ false
+ }
+}
+
+fn codegen_inner(
+ module: &mut impl Module,
+ unwind_context: &mut UnwindContext,
+ kind: AllocatorKind,
+ alloc_error_handler_kind: AllocatorKind,
+ oom_strategy: OomStrategy,
+) {
+ let usize_ty = module.target_config().pointer_type();
+
+ for method in ALLOCATOR_METHODS {
+ let mut arg_tys = Vec::with_capacity(method.inputs.len());
+ for ty in method.inputs.iter() {
+ match *ty {
+ AllocatorTy::Layout => {
+ arg_tys.push(usize_ty); // size
+ arg_tys.push(usize_ty); // align
+ }
+ AllocatorTy::Ptr => arg_tys.push(usize_ty),
+ AllocatorTy::Usize => arg_tys.push(usize_ty),
+
+ AllocatorTy::ResultPtr | AllocatorTy::Unit => panic!("invalid allocator arg"),
+ }
+ }
+ let output = match method.output {
+ AllocatorTy::ResultPtr => Some(usize_ty),
+ AllocatorTy::Unit => None,
+
+ AllocatorTy::Layout | AllocatorTy::Usize | AllocatorTy::Ptr => {
+ panic!("invalid allocator output")
+ }
+ };
+
+ let sig = Signature {
+ call_conv: module.target_config().default_call_conv,
+ params: arg_tys.iter().cloned().map(AbiParam::new).collect(),
+ returns: output.into_iter().map(AbiParam::new).collect(),
+ };
-
- let callee_name = alloc_error_handler_kind.fn_name(sym::oom);
-
- let func_id =
- module.declare_function("__rust_alloc_error_handler", Linkage::Export, &sig).unwrap();
-
- let callee_func_id = module.declare_function(&callee_name, Linkage::Import, &sig).unwrap();
-
- let mut ctx = Context::new();
- ctx.func.signature = sig;
- {
- let mut func_ctx = FunctionBuilderContext::new();
- let mut bcx = FunctionBuilder::new(&mut ctx.func, &mut func_ctx);
-
- let block = bcx.create_block();
- bcx.switch_to_block(block);
- let args = (&[usize_ty, usize_ty])
- .iter()
- .map(|&ty| bcx.append_block_param(block, ty))
- .collect::<Vec<Value>>();
-
- let callee_func_ref = module.declare_func_in_func(callee_func_id, &mut bcx.func);
- bcx.ins().call(callee_func_ref, &args);
-
- bcx.ins().trap(TrapCode::UnreachableCodeReached);
- bcx.seal_all_blocks();
- bcx.finalize();
- }
- module.define_function(func_id, &mut ctx).unwrap();
- unwind_context.add_function(func_id, &ctx, module.isa());
++ crate::common::create_wrapper_function(
++ module,
++ unwind_context,
++ sig,
++ &format!("__rust_{}", method.name),
++ &kind.fn_name(method.name),
++ );
+ }
+
+ let sig = Signature {
+ call_conv: module.target_config().default_call_conv,
+ params: vec![AbiParam::new(usize_ty), AbiParam::new(usize_ty)],
+ returns: vec![],
+ };
++ crate::common::create_wrapper_function(
++ module,
++ unwind_context,
++ sig,
++ "__rust_alloc_error_handler",
++ &alloc_error_handler_kind.fn_name(sym::oom),
++ );
+
+ let data_id = module.declare_data(OomStrategy::SYMBOL, Linkage::Export, false, false).unwrap();
+ let mut data_ctx = DataContext::new();
+ data_ctx.set_align(1);
+ let val = oom_strategy.should_panic();
+ data_ctx.define(Box::new([val]));
+ module.define_data(data_id, &data_ctx).unwrap();
+}
--- /dev/null
- #[cfg_attr(not(feature = "jit"), allow(dead_code))]
- pub(crate) fn codegen_and_compile_fn<'tcx>(
- tcx: TyCtxt<'tcx>,
- cx: &mut crate::CodegenCx,
- cached_context: &mut Context,
- module: &mut dyn Module,
- instance: Instance<'tcx>,
- ) {
- let _inst_guard =
- crate::PrintOnPanic(|| format!("{:?} {}", instance, tcx.symbol_name(instance).name));
-
- let cached_func = std::mem::replace(&mut cached_context.func, Function::new());
- let codegened_func = codegen_fn(tcx, cx, cached_func, module, instance);
-
- compile_fn(cx, cached_context, module, codegened_func);
- }
-
+//! Codegen of a single function
+
+use rustc_ast::InlineAsmOptions;
+use rustc_index::vec::IndexVec;
+use rustc_middle::ty::adjustment::PointerCast;
+use rustc_middle::ty::layout::FnAbiOf;
+use rustc_middle::ty::print::with_no_trimmed_paths;
+
+use cranelift_codegen::ir::UserFuncName;
+
+use crate::constant::ConstantCx;
+use crate::debuginfo::FunctionDebugContext;
+use crate::prelude::*;
+use crate::pretty_clif::CommentWriter;
+
+pub(crate) struct CodegenedFunction {
+ symbol_name: String,
+ func_id: FuncId,
+ func: Function,
+ clif_comments: CommentWriter,
+ func_debug_cx: Option<FunctionDebugContext>,
+}
+
- let symbol_name = tcx.symbol_name(instance).name.to_string();
+pub(crate) fn codegen_fn<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ cx: &mut crate::CodegenCx,
+ cached_func: Function,
+ module: &mut dyn Module,
+ instance: Instance<'tcx>,
+) -> CodegenedFunction {
+ debug_assert!(!instance.substs.needs_infer());
+
++ let symbol_name = tcx.symbol_name(instance).name.to_string();
++ let _timer = tcx.prof.generic_activity_with_arg("codegen fn", &*symbol_name);
++
+ let mir = tcx.instance_mir(instance.def);
+ let _mir_guard = crate::PrintOnPanic(|| {
+ let mut buf = Vec::new();
+ with_no_trimmed_paths!({
+ rustc_middle::mir::pretty::write_mir_fn(tcx, mir, &mut |_, _| Ok(()), &mut buf)
+ .unwrap();
+ });
+ String::from_utf8_lossy(&buf).into_owned()
+ });
+
+ // Declare function
- tcx.sess.time("codegen clif ir", || codegen_fn_body(&mut fx, start_block));
+ let sig = get_function_sig(tcx, module.target_config().default_call_conv, instance);
+ let func_id = module.declare_function(&symbol_name, Linkage::Local, &sig).unwrap();
+
+ // Make the FunctionBuilder
+ let mut func_ctx = FunctionBuilderContext::new();
+ let mut func = cached_func;
+ func.clear();
+ func.name = UserFuncName::user(0, func_id.as_u32());
+ func.signature = sig;
+ func.collect_debug_info();
+
+ let mut bcx = FunctionBuilder::new(&mut func, &mut func_ctx);
+
+ // Predefine blocks
+ let start_block = bcx.create_block();
+ let block_map: IndexVec<BasicBlock, Block> =
+ (0..mir.basic_blocks.len()).map(|_| bcx.create_block()).collect();
+
+ // Make FunctionCx
+ let target_config = module.target_config();
+ let pointer_type = target_config.pointer_type();
+ let clif_comments = crate::pretty_clif::CommentWriter::new(tcx, instance);
+
+ let func_debug_cx = if let Some(debug_context) = &mut cx.debug_context {
+ Some(debug_context.define_function(tcx, &symbol_name, mir.span))
+ } else {
+ None
+ };
+
+ let mut fx = FunctionCx {
+ cx,
+ module,
+ tcx,
+ target_config,
+ pointer_type,
+ constants_cx: ConstantCx::new(),
+ func_debug_cx,
+
+ instance,
+ symbol_name,
+ mir,
+ fn_abi: Some(RevealAllLayoutCx(tcx).fn_abi_of_instance(instance, ty::List::empty())),
+
+ bcx,
+ block_map,
+ local_map: IndexVec::with_capacity(mir.local_decls.len()),
+ caller_location: None, // set by `codegen_fn_prelude`
+
+ clif_comments,
+ last_source_file: None,
+ next_ssa_var: 0,
+ };
+
- cx.profiler.verbose_generic_activity("define function").run(|| {
++ tcx.prof.generic_activity("codegen clif ir").run(|| codegen_fn_body(&mut fx, start_block));
+ fx.bcx.seal_all_blocks();
+ fx.bcx.finalize();
+
+ // Recover all necessary data from fx, before accessing func will prevent future access to it.
+ let symbol_name = fx.symbol_name;
+ let clif_comments = fx.clif_comments;
+ let func_debug_cx = fx.func_debug_cx;
+
+ fx.constants_cx.finalize(fx.tcx, &mut *fx.module);
+
+ if cx.should_write_ir {
+ crate::pretty_clif::write_clif_file(
+ tcx.output_filenames(()),
+ &symbol_name,
+ "unopt",
+ module.isa(),
+ &func,
+ &clif_comments,
+ );
+ }
+
+ // Verify function
+ verify_func(tcx, &clif_comments, &func);
+
+ CodegenedFunction { symbol_name, func_id, func, clif_comments, func_debug_cx }
+}
+
+pub(crate) fn compile_fn(
+ cx: &mut crate::CodegenCx,
+ cached_context: &mut Context,
+ module: &mut dyn Module,
+ codegened_func: CodegenedFunction,
+) {
++ let _timer =
++ cx.profiler.generic_activity_with_arg("compile function", &*codegened_func.symbol_name);
++
+ let clif_comments = codegened_func.clif_comments;
+
+ // Store function in context
+ let context = cached_context;
+ context.clear();
+ context.func = codegened_func.func;
+
+ // If the return block is not reachable, then the SSA builder may have inserted an `iconst.i128`
+ // instruction, which doesn't have an encoding.
+ context.compute_cfg();
+ context.compute_domtree();
+ context.eliminate_unreachable_code(module.isa()).unwrap();
+ context.dce(module.isa()).unwrap();
+ // Some Cranelift optimizations expect the domtree to not yet be computed and as such don't
+ // invalidate it when it would change.
+ context.domtree.clear();
+
+ #[cfg(any())] // This is never true
+ let _clif_guard = {
+ use std::fmt::Write;
+
+ let func_clone = context.func.clone();
+ let clif_comments_clone = clif_comments.clone();
+ let mut clif = String::new();
+ for flag in module.isa().flags().iter() {
+ writeln!(clif, "set {}", flag).unwrap();
+ }
+ write!(clif, "target {}", module.isa().triple().architecture.to_string()).unwrap();
+ for isa_flag in module.isa().isa_flags().iter() {
+ write!(clif, " {}", isa_flag).unwrap();
+ }
+ writeln!(clif, "\n").unwrap();
+ crate::PrintOnPanic(move || {
+ let mut clif = clif.clone();
+ ::cranelift_codegen::write::decorate_function(
+ &mut &clif_comments_clone,
+ &mut clif,
+ &func_clone,
+ )
+ .unwrap();
+ clif
+ })
+ };
+
+ // Define function
- cx.profiler.verbose_generic_activity("generate debug info").run(|| {
++ cx.profiler.generic_activity("define function").run(|| {
+ context.want_disasm = cx.should_write_ir;
+ module.define_function(codegened_func.func_id, context).unwrap();
++
++ if cx.profiler.enabled() {
++ let mut recording_args = false;
++ cx.profiler
++ .generic_activity_with_arg_recorder(
++ "define function (clif pass timings)",
++ |recorder| {
++ let pass_times = cranelift_codegen::timing::take_current();
++ // Replace newlines with | as measureme doesn't allow control characters like
++ // newlines inside strings.
++ recorder.record_arg(format!("{}", pass_times).replace("\n", " | "));
++ recording_args = true;
++ },
++ )
++ .run(|| {
++ if recording_args {
++ // Wait a tiny bit to ensure chrome's profiler doesn't hide the event
++ std::thread::sleep(std::time::Duration::from_nanos(2))
++ }
++ });
++ }
+ });
+
+ if cx.should_write_ir {
+ // Write optimized function to file for debugging
+ crate::pretty_clif::write_clif_file(
+ &cx.output_filenames,
+ &codegened_func.symbol_name,
+ "opt",
+ module.isa(),
+ &context.func,
+ &clif_comments,
+ );
+
+ if let Some(disasm) = &context.compiled_code().unwrap().disasm {
+ crate::pretty_clif::write_ir_file(
+ &cx.output_filenames,
+ &format!("{}.vcode", codegened_func.symbol_name),
+ |file| file.write_all(disasm.as_bytes()),
+ )
+ }
+ }
+
+ // Define debuginfo for function
+ let isa = module.isa();
+ let debug_context = &mut cx.debug_context;
+ let unwind_context = &mut cx.unwind_context;
- tcx.sess.time("verify clif ir", || {
++ cx.profiler.generic_activity("generate debug info").run(|| {
+ if let Some(debug_context) = debug_context {
+ codegened_func.func_debug_cx.unwrap().finalize(
+ debug_context,
+ codegened_func.func_id,
+ context,
+ );
+ }
+ unwind_context.add_function(codegened_func.func_id, &context, isa);
+ });
+}
+
+pub(crate) fn verify_func(
+ tcx: TyCtxt<'_>,
+ writer: &crate::pretty_clif::CommentWriter,
+ func: &Function,
+) {
- fx.tcx.sess.time("codegen prelude", || crate::abi::codegen_fn_prelude(fx, start_block));
++ tcx.prof.generic_activity("verify clif ir").run(|| {
+ let flags = cranelift_codegen::settings::Flags::new(cranelift_codegen::settings::builder());
+ match cranelift_codegen::verify_function(&func, &flags) {
+ Ok(_) => {}
+ Err(err) => {
+ tcx.sess.err(&format!("{:?}", err));
+ let pretty_error = cranelift_codegen::print_errors::pretty_verifier_error(
+ &func,
+ Some(Box::new(writer)),
+ err,
+ );
+ tcx.sess.fatal(&format!("cranelift verify error:\n{}", pretty_error));
+ }
+ }
+ });
+}
+
+fn codegen_fn_body(fx: &mut FunctionCx<'_, '_, '_>, start_block: Block) {
+ if !crate::constant::check_constants(fx) {
+ fx.bcx.append_block_params_for_function_params(fx.block_map[START_BLOCK]);
+ fx.bcx.switch_to_block(fx.block_map[START_BLOCK]);
+ // compilation should have been aborted
+ fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
+ return;
+ }
+
+ let arg_uninhabited = fx
+ .mir
+ .args_iter()
+ .any(|arg| fx.layout_of(fx.monomorphize(fx.mir.local_decls[arg].ty)).abi.is_uninhabited());
+ if arg_uninhabited {
+ fx.bcx.append_block_params_for_function_params(fx.block_map[START_BLOCK]);
+ fx.bcx.switch_to_block(fx.block_map[START_BLOCK]);
+ fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
+ return;
+ }
- fx.tcx.sess.time("codegen call", || {
++ fx.tcx
++ .prof
++ .generic_activity("codegen prelude")
++ .run(|| crate::abi::codegen_fn_prelude(fx, start_block));
+
+ for (bb, bb_data) in fx.mir.basic_blocks.iter_enumerated() {
+ let block = fx.get_block(bb);
+ fx.bcx.switch_to_block(block);
+
+ if bb_data.is_cleanup {
+ // Unwinding after panicking is not supported
+ continue;
+
+ // FIXME Once unwinding is supported and Cranelift supports marking blocks as cold, do
+ // so for cleanup blocks.
+ }
+
+ fx.bcx.ins().nop();
+ for stmt in &bb_data.statements {
+ fx.set_debug_loc(stmt.source_info);
+ codegen_stmt(fx, block, stmt);
+ }
+
+ if fx.clif_comments.enabled() {
+ let mut terminator_head = "\n".to_string();
+ with_no_trimmed_paths!({
+ bb_data.terminator().kind.fmt_head(&mut terminator_head).unwrap();
+ });
+ let inst = fx.bcx.func.layout.last_inst(block).unwrap();
+ fx.add_comment(inst, terminator_head);
+ }
+
+ let source_info = bb_data.terminator().source_info;
+ fx.set_debug_loc(source_info);
+
+ let _print_guard =
+ crate::PrintOnPanic(|| format!("terminator {:?}", bb_data.terminator().kind));
+
+ match &bb_data.terminator().kind {
+ TerminatorKind::Goto { target } => {
+ if let TerminatorKind::Return = fx.mir[*target].terminator().kind {
+ let mut can_immediately_return = true;
+ for stmt in &fx.mir[*target].statements {
+ if let StatementKind::StorageDead(_) = stmt.kind {
+ } else {
+ // FIXME Can sometimes happen, see rust-lang/rust#70531
+ can_immediately_return = false;
+ break;
+ }
+ }
+
+ if can_immediately_return {
+ crate::abi::codegen_return(fx);
+ continue;
+ }
+ }
+
+ let block = fx.get_block(*target);
+ fx.bcx.ins().jump(block, &[]);
+ }
+ TerminatorKind::Return => {
+ crate::abi::codegen_return(fx);
+ }
+ TerminatorKind::Assert { cond, expected, msg, target, cleanup: _ } => {
+ if !fx.tcx.sess.overflow_checks() {
+ if let mir::AssertKind::OverflowNeg(_) = *msg {
+ let target = fx.get_block(*target);
+ fx.bcx.ins().jump(target, &[]);
+ continue;
+ }
+ }
+ let cond = codegen_operand(fx, cond).load_scalar(fx);
+
+ let target = fx.get_block(*target);
+ let failure = fx.bcx.create_block();
+ fx.bcx.set_cold_block(failure);
+
+ if *expected {
+ fx.bcx.ins().brz(cond, failure, &[]);
+ } else {
+ fx.bcx.ins().brnz(cond, failure, &[]);
+ };
+ fx.bcx.ins().jump(target, &[]);
+
+ fx.bcx.switch_to_block(failure);
+ fx.bcx.ins().nop();
+
+ match msg {
+ AssertKind::BoundsCheck { ref len, ref index } => {
+ let len = codegen_operand(fx, len).load_scalar(fx);
+ let index = codegen_operand(fx, index).load_scalar(fx);
+ let location = fx.get_caller_location(source_info).load_scalar(fx);
+
+ codegen_panic_inner(
+ fx,
+ rustc_hir::LangItem::PanicBoundsCheck,
+ &[index, len, location],
+ source_info.span,
+ );
+ }
+ _ => {
+ let msg_str = msg.description();
+ codegen_panic(fx, msg_str, source_info);
+ }
+ }
+ }
+
+ TerminatorKind::SwitchInt { discr, targets } => {
+ let discr = codegen_operand(fx, discr);
+ let switch_ty = discr.layout().ty;
+ let discr = discr.load_scalar(fx);
+
+ let use_bool_opt = switch_ty.kind() == fx.tcx.types.bool.kind()
+ || (targets.iter().count() == 1 && targets.iter().next().unwrap().0 == 0);
+ if use_bool_opt {
+ assert_eq!(targets.iter().count(), 1);
+ let (then_value, then_block) = targets.iter().next().unwrap();
+ let then_block = fx.get_block(then_block);
+ let else_block = fx.get_block(targets.otherwise());
+ let test_zero = match then_value {
+ 0 => true,
+ 1 => false,
+ _ => unreachable!("{:?}", targets),
+ };
+
+ let (discr, is_inverted) =
+ crate::optimize::peephole::maybe_unwrap_bool_not(&mut fx.bcx, discr);
+ let test_zero = if is_inverted { !test_zero } else { test_zero };
+ if let Some(taken) = crate::optimize::peephole::maybe_known_branch_taken(
+ &fx.bcx, discr, test_zero,
+ ) {
+ if taken {
+ fx.bcx.ins().jump(then_block, &[]);
+ } else {
+ fx.bcx.ins().jump(else_block, &[]);
+ }
+ } else {
+ if test_zero {
+ fx.bcx.ins().brz(discr, then_block, &[]);
+ fx.bcx.ins().jump(else_block, &[]);
+ } else {
+ fx.bcx.ins().brnz(discr, then_block, &[]);
+ fx.bcx.ins().jump(else_block, &[]);
+ }
+ }
+ } else {
+ let mut switch = ::cranelift_frontend::Switch::new();
+ for (value, block) in targets.iter() {
+ let block = fx.get_block(block);
+ switch.set_entry(value, block);
+ }
+ let otherwise_block = fx.get_block(targets.otherwise());
+ switch.emit(&mut fx.bcx, discr, otherwise_block);
+ }
+ }
+ TerminatorKind::Call {
+ func,
+ args,
+ destination,
+ target,
+ fn_span,
+ cleanup: _,
+ from_hir_call: _,
+ } => {
- Rvalue::Aggregate(ref kind, ref operands) => match kind.as_ref() {
- AggregateKind::Array(_ty) => {
- for (i, operand) in operands.iter().enumerate() {
- let operand = codegen_operand(fx, operand);
- let index = fx.bcx.ins().iconst(fx.pointer_type, i as i64);
- let to = lval.place_index(fx, index);
- to.write_cvalue(fx, operand);
++ fx.tcx.prof.generic_activity("codegen call").run(|| {
+ crate::abi::codegen_terminator_call(
+ fx,
+ mir::SourceInfo { span: *fn_span, ..source_info },
+ func,
+ args,
+ *destination,
+ *target,
+ )
+ });
+ }
+ TerminatorKind::InlineAsm {
+ template,
+ operands,
+ options,
+ destination,
+ line_spans: _,
+ cleanup: _,
+ } => {
+ if options.contains(InlineAsmOptions::MAY_UNWIND) {
+ fx.tcx.sess.span_fatal(
+ source_info.span,
+ "cranelift doesn't support unwinding from inline assembly.",
+ );
+ }
+
+ crate::inline_asm::codegen_inline_asm(
+ fx,
+ source_info.span,
+ template,
+ operands,
+ *options,
+ *destination,
+ );
+ }
+ TerminatorKind::Abort => {
+ codegen_panic_cannot_unwind(fx, source_info);
+ }
+ TerminatorKind::Resume => {
+ // FIXME implement unwinding
+ fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
+ }
+ TerminatorKind::Unreachable => {
+ fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
+ }
+ TerminatorKind::Yield { .. }
+ | TerminatorKind::FalseEdge { .. }
+ | TerminatorKind::FalseUnwind { .. }
+ | TerminatorKind::DropAndReplace { .. }
+ | TerminatorKind::GeneratorDrop => {
+ bug!("shouldn't exist at codegen {:?}", bb_data.terminator());
+ }
+ TerminatorKind::Drop { place, target, unwind: _ } => {
+ let drop_place = codegen_place(fx, *place);
+ crate::abi::codegen_drop(fx, source_info, drop_place);
+
+ let target_block = fx.get_block(*target);
+ fx.bcx.ins().jump(target_block, &[]);
+ }
+ };
+ }
+}
+
+fn codegen_stmt<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ #[allow(unused_variables)] cur_block: Block,
+ stmt: &Statement<'tcx>,
+) {
+ let _print_guard = crate::PrintOnPanic(|| format!("stmt {:?}", stmt));
+
+ fx.set_debug_loc(stmt.source_info);
+
+ #[cfg(any())] // This is never true
+ match &stmt.kind {
+ StatementKind::StorageLive(..) | StatementKind::StorageDead(..) => {} // Those are not very useful
+ _ => {
+ if fx.clif_comments.enabled() {
+ let inst = fx.bcx.func.layout.last_inst(cur_block).unwrap();
+ fx.add_comment(inst, format!("{:?}", stmt));
+ }
+ }
+ }
+
+ match &stmt.kind {
+ StatementKind::SetDiscriminant { place, variant_index } => {
+ let place = codegen_place(fx, **place);
+ crate::discriminant::codegen_set_discriminant(fx, place, *variant_index);
+ }
+ StatementKind::Assign(to_place_and_rval) => {
+ let lval = codegen_place(fx, to_place_and_rval.0);
+ let dest_layout = lval.layout();
+ match to_place_and_rval.1 {
+ Rvalue::Use(ref operand) => {
+ let val = codegen_operand(fx, operand);
+ lval.write_cvalue(fx, val);
+ }
+ Rvalue::CopyForDeref(place) => {
+ let cplace = codegen_place(fx, place);
+ let val = cplace.to_cvalue(fx);
+ lval.write_cvalue(fx, val)
+ }
+ Rvalue::Ref(_, _, place) | Rvalue::AddressOf(_, place) => {
+ let place = codegen_place(fx, place);
+ let ref_ = place.place_ref(fx, lval.layout());
+ lval.write_cvalue(fx, ref_);
+ }
+ Rvalue::ThreadLocalRef(def_id) => {
+ let val = crate::constant::codegen_tls_ref(fx, def_id, lval.layout());
+ lval.write_cvalue(fx, val);
+ }
+ Rvalue::BinaryOp(bin_op, ref lhs_rhs) => {
+ let lhs = codegen_operand(fx, &lhs_rhs.0);
+ let rhs = codegen_operand(fx, &lhs_rhs.1);
+
+ let res = crate::num::codegen_binop(fx, bin_op, lhs, rhs);
+ lval.write_cvalue(fx, res);
+ }
+ Rvalue::CheckedBinaryOp(bin_op, ref lhs_rhs) => {
+ let lhs = codegen_operand(fx, &lhs_rhs.0);
+ let rhs = codegen_operand(fx, &lhs_rhs.1);
+
+ let res = if !fx.tcx.sess.overflow_checks() {
+ let val =
+ crate::num::codegen_int_binop(fx, bin_op, lhs, rhs).load_scalar(fx);
+ let is_overflow = fx.bcx.ins().iconst(types::I8, 0);
+ CValue::by_val_pair(val, is_overflow, lval.layout())
+ } else {
+ crate::num::codegen_checked_int_binop(fx, bin_op, lhs, rhs)
+ };
+
+ lval.write_cvalue(fx, res);
+ }
+ Rvalue::UnaryOp(un_op, ref operand) => {
+ let operand = codegen_operand(fx, operand);
+ let layout = operand.layout();
+ let val = operand.load_scalar(fx);
+ let res = match un_op {
+ UnOp::Not => match layout.ty.kind() {
+ ty::Bool => {
+ let res = fx.bcx.ins().icmp_imm(IntCC::Equal, val, 0);
+ CValue::by_val(res, layout)
+ }
+ ty::Uint(_) | ty::Int(_) => {
+ CValue::by_val(fx.bcx.ins().bnot(val), layout)
+ }
+ _ => unreachable!("un op Not for {:?}", layout.ty),
+ },
+ UnOp::Neg => match layout.ty.kind() {
+ ty::Int(_) => CValue::by_val(fx.bcx.ins().ineg(val), layout),
+ ty::Float(_) => CValue::by_val(fx.bcx.ins().fneg(val), layout),
+ _ => unreachable!("un op Neg for {:?}", layout.ty),
+ },
+ };
+ lval.write_cvalue(fx, res);
+ }
+ Rvalue::Cast(
+ CastKind::Pointer(PointerCast::ReifyFnPointer),
+ ref operand,
+ to_ty,
+ ) => {
+ let from_ty = fx.monomorphize(operand.ty(&fx.mir.local_decls, fx.tcx));
+ let to_layout = fx.layout_of(fx.monomorphize(to_ty));
+ match *from_ty.kind() {
+ ty::FnDef(def_id, substs) => {
+ let func_ref = fx.get_function_ref(
+ Instance::resolve_for_fn_ptr(
+ fx.tcx,
+ ParamEnv::reveal_all(),
+ def_id,
+ substs,
+ )
+ .unwrap()
+ .polymorphize(fx.tcx),
+ );
+ let func_addr = fx.bcx.ins().func_addr(fx.pointer_type, func_ref);
+ lval.write_cvalue(fx, CValue::by_val(func_addr, to_layout));
+ }
+ _ => bug!("Trying to ReifyFnPointer on non FnDef {:?}", from_ty),
+ }
+ }
+ Rvalue::Cast(
+ CastKind::Pointer(PointerCast::UnsafeFnPointer),
+ ref operand,
+ to_ty,
+ )
+ | Rvalue::Cast(
+ CastKind::Pointer(PointerCast::MutToConstPointer),
+ ref operand,
+ to_ty,
+ )
+ | Rvalue::Cast(
+ CastKind::Pointer(PointerCast::ArrayToPointer),
+ ref operand,
+ to_ty,
+ ) => {
+ let to_layout = fx.layout_of(fx.monomorphize(to_ty));
+ let operand = codegen_operand(fx, operand);
+ lval.write_cvalue(fx, operand.cast_pointer_to(to_layout));
+ }
+ Rvalue::Cast(
+ CastKind::IntToInt
+ | CastKind::FloatToFloat
+ | CastKind::FloatToInt
+ | CastKind::IntToFloat
+ | CastKind::FnPtrToPtr
+ | CastKind::PtrToPtr
+ | CastKind::PointerExposeAddress
+ | CastKind::PointerFromExposedAddress,
+ ref operand,
+ to_ty,
+ ) => {
+ let operand = codegen_operand(fx, operand);
+ let from_ty = operand.layout().ty;
+ let to_ty = fx.monomorphize(to_ty);
+
+ fn is_fat_ptr<'tcx>(fx: &FunctionCx<'_, '_, 'tcx>, ty: Ty<'tcx>) -> bool {
+ ty.builtin_deref(true)
+ .map(|ty::TypeAndMut { ty: pointee_ty, mutbl: _ }| {
+ has_ptr_meta(fx.tcx, pointee_ty)
+ })
+ .unwrap_or(false)
+ }
+
+ if is_fat_ptr(fx, from_ty) {
+ if is_fat_ptr(fx, to_ty) {
+ // fat-ptr -> fat-ptr
+ lval.write_cvalue(fx, operand.cast_pointer_to(dest_layout));
+ } else {
+ // fat-ptr -> thin-ptr
+ let (ptr, _extra) = operand.load_scalar_pair(fx);
+ lval.write_cvalue(fx, CValue::by_val(ptr, dest_layout))
+ }
+ } else {
+ let to_clif_ty = fx.clif_type(to_ty).unwrap();
+ let from = operand.load_scalar(fx);
+
+ let res = clif_int_or_float_cast(
+ fx,
+ from,
+ type_sign(from_ty),
+ to_clif_ty,
+ type_sign(to_ty),
+ );
+ lval.write_cvalue(fx, CValue::by_val(res, dest_layout));
+ }
+ }
+ Rvalue::Cast(
+ CastKind::Pointer(PointerCast::ClosureFnPointer(_)),
+ ref operand,
+ _to_ty,
+ ) => {
+ let operand = codegen_operand(fx, operand);
+ match *operand.layout().ty.kind() {
+ ty::Closure(def_id, substs) => {
+ let instance = Instance::resolve_closure(
+ fx.tcx,
+ def_id,
+ substs,
+ ty::ClosureKind::FnOnce,
+ )
+ .expect("failed to normalize and resolve closure during codegen")
+ .polymorphize(fx.tcx);
+ let func_ref = fx.get_function_ref(instance);
+ let func_addr = fx.bcx.ins().func_addr(fx.pointer_type, func_ref);
+ lval.write_cvalue(fx, CValue::by_val(func_addr, lval.layout()));
+ }
+ _ => bug!("{} cannot be cast to a fn ptr", operand.layout().ty),
+ }
+ }
+ Rvalue::Cast(CastKind::Pointer(PointerCast::Unsize), ref operand, _to_ty) => {
+ let operand = codegen_operand(fx, operand);
+ operand.unsize_value(fx, lval);
+ }
+ Rvalue::Cast(CastKind::DynStar, ref operand, _) => {
+ let operand = codegen_operand(fx, operand);
+ operand.coerce_dyn_star(fx, lval);
+ }
+ Rvalue::Discriminant(place) => {
+ let place = codegen_place(fx, place);
+ let value = place.to_cvalue(fx);
+ crate::discriminant::codegen_get_discriminant(fx, lval, value, dest_layout);
+ }
+ Rvalue::Repeat(ref operand, times) => {
+ let operand = codegen_operand(fx, operand);
+ let times = fx
+ .monomorphize(times)
+ .eval(fx.tcx, ParamEnv::reveal_all())
+ .kind()
+ .try_to_bits(fx.tcx.data_layout.pointer_size)
+ .unwrap();
+ if operand.layout().size.bytes() == 0 {
+ // Do nothing for ZST's
+ } else if fx.clif_type(operand.layout().ty) == Some(types::I8) {
+ let times = fx.bcx.ins().iconst(fx.pointer_type, times as i64);
+ // FIXME use emit_small_memset where possible
+ let addr = lval.to_ptr().get_addr(fx);
+ let val = operand.load_scalar(fx);
+ fx.bcx.call_memset(fx.target_config, addr, val, times);
+ } else {
+ let loop_block = fx.bcx.create_block();
+ let loop_block2 = fx.bcx.create_block();
+ let done_block = fx.bcx.create_block();
+ let index = fx.bcx.append_block_param(loop_block, fx.pointer_type);
+ let zero = fx.bcx.ins().iconst(fx.pointer_type, 0);
+ fx.bcx.ins().jump(loop_block, &[zero]);
+
+ fx.bcx.switch_to_block(loop_block);
+ let done = fx.bcx.ins().icmp_imm(IntCC::Equal, index, times as i64);
+ fx.bcx.ins().brnz(done, done_block, &[]);
+ fx.bcx.ins().jump(loop_block2, &[]);
+
+ fx.bcx.switch_to_block(loop_block2);
+ let to = lval.place_index(fx, index);
+ to.write_cvalue(fx, operand);
+ let index = fx.bcx.ins().iadd_imm(index, 1);
+ fx.bcx.ins().jump(loop_block, &[index]);
+
+ fx.bcx.switch_to_block(done_block);
+ fx.bcx.ins().nop();
+ }
+ }
+ Rvalue::Len(place) => {
+ let place = codegen_place(fx, place);
+ let usize_layout = fx.layout_of(fx.tcx.types.usize);
+ let len = codegen_array_len(fx, place);
+ lval.write_cvalue(fx, CValue::by_val(len, usize_layout));
+ }
+ Rvalue::ShallowInitBox(ref operand, content_ty) => {
+ let content_ty = fx.monomorphize(content_ty);
+ let box_layout = fx.layout_of(fx.tcx.mk_box(content_ty));
+ let operand = codegen_operand(fx, operand);
+ let operand = operand.load_scalar(fx);
+ lval.write_cvalue(fx, CValue::by_val(operand, box_layout));
+ }
+ Rvalue::NullaryOp(null_op, ty) => {
+ assert!(lval.layout().ty.is_sized(fx.tcx, ParamEnv::reveal_all()));
+ let layout = fx.layout_of(fx.monomorphize(ty));
+ let val = match null_op {
+ NullOp::SizeOf => layout.size.bytes(),
+ NullOp::AlignOf => layout.align.abi.bytes(),
+ };
+ let val = CValue::const_val(fx, fx.layout_of(fx.tcx.types.usize), val.into());
+ lval.write_cvalue(fx, val);
+ }
- _ => unreachable!("shouldn't exist at codegen {:?}", to_place_and_rval.1),
- },
++ Rvalue::Aggregate(ref kind, ref operands) => {
++ let (variant_index, variant_dest, active_field_index) = match **kind {
++ mir::AggregateKind::Adt(_, variant_index, _, _, active_field_index) => {
++ let variant_dest = lval.downcast_variant(fx, variant_index);
++ (variant_index, variant_dest, active_field_index)
+ }
++ _ => (VariantIdx::from_u32(0), lval, None),
++ };
++ if active_field_index.is_some() {
++ assert_eq!(operands.len(), 1);
++ }
++ for (i, operand) in operands.iter().enumerate() {
++ let operand = codegen_operand(fx, operand);
++ let field_index = active_field_index.unwrap_or(i);
++ let to = if let mir::AggregateKind::Array(_) = **kind {
++ let index = fx.bcx.ins().iconst(fx.pointer_type, field_index as i64);
++ variant_dest.place_index(fx, index)
++ } else {
++ variant_dest.place_field(fx, mir::Field::new(field_index))
++ };
++ to.write_cvalue(fx, operand);
+ }
++ crate::discriminant::codegen_set_discriminant(fx, lval, variant_index);
++ }
+ }
+ }
+ StatementKind::StorageLive(_)
+ | StatementKind::StorageDead(_)
+ | StatementKind::Deinit(_)
+ | StatementKind::ConstEvalCounter
+ | StatementKind::Nop
+ | StatementKind::FakeRead(..)
+ | StatementKind::Retag { .. }
+ | StatementKind::AscribeUserType(..) => {}
+
+ StatementKind::Coverage { .. } => fx.tcx.sess.fatal("-Zcoverage is unimplemented"),
+ StatementKind::Intrinsic(ref intrinsic) => match &**intrinsic {
+ // We ignore `assume` intrinsics, they are only useful for optimizations
+ NonDivergingIntrinsic::Assume(_) => {}
+ NonDivergingIntrinsic::CopyNonOverlapping(mir::CopyNonOverlapping {
+ src,
+ dst,
+ count,
+ }) => {
+ let dst = codegen_operand(fx, dst);
+ let pointee = dst
+ .layout()
+ .pointee_info_at(fx, rustc_target::abi::Size::ZERO)
+ .expect("Expected pointer");
+ let dst = dst.load_scalar(fx);
+ let src = codegen_operand(fx, src).load_scalar(fx);
+ let count = codegen_operand(fx, count).load_scalar(fx);
+ let elem_size: u64 = pointee.size.bytes();
+ let bytes = if elem_size != 1 {
+ fx.bcx.ins().imul_imm(count, elem_size as i64)
+ } else {
+ count
+ };
+ fx.bcx.call_memcpy(fx.target_config, dst, src, bytes);
+ }
+ },
+ }
+}
+
+fn codegen_array_len<'tcx>(fx: &mut FunctionCx<'_, '_, 'tcx>, place: CPlace<'tcx>) -> Value {
+ match *place.layout().ty.kind() {
+ ty::Array(_elem_ty, len) => {
+ let len = fx.monomorphize(len).eval_usize(fx.tcx, ParamEnv::reveal_all()) as i64;
+ fx.bcx.ins().iconst(fx.pointer_type, len)
+ }
+ ty::Slice(_elem_ty) => {
+ place.to_ptr_maybe_unsized().1.expect("Length metadata for slice place")
+ }
+ _ => bug!("Rvalue::Len({:?})", place),
+ }
+}
+
+pub(crate) fn codegen_place<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ place: Place<'tcx>,
+) -> CPlace<'tcx> {
+ let mut cplace = fx.get_local_place(place.local);
+
+ for elem in place.projection {
+ match elem {
+ PlaceElem::Deref => {
+ cplace = cplace.place_deref(fx);
+ }
+ PlaceElem::OpaqueCast(ty) => cplace = cplace.place_opaque_cast(fx, ty),
+ PlaceElem::Field(field, _ty) => {
+ cplace = cplace.place_field(fx, field);
+ }
+ PlaceElem::Index(local) => {
+ let index = fx.get_local_place(local).to_cvalue(fx).load_scalar(fx);
+ cplace = cplace.place_index(fx, index);
+ }
+ PlaceElem::ConstantIndex { offset, min_length: _, from_end } => {
+ let offset: u64 = offset;
+ let index = if !from_end {
+ fx.bcx.ins().iconst(fx.pointer_type, offset as i64)
+ } else {
+ let len = codegen_array_len(fx, cplace);
+ fx.bcx.ins().iadd_imm(len, -(offset as i64))
+ };
+ cplace = cplace.place_index(fx, index);
+ }
+ PlaceElem::Subslice { from, to, from_end } => {
+ // These indices are generated by slice patterns.
+ // slice[from:-to] in Python terms.
+
+ let from: u64 = from;
+ let to: u64 = to;
+
+ match cplace.layout().ty.kind() {
+ ty::Array(elem_ty, _len) => {
+ assert!(!from_end, "array subslices are never `from_end`");
+ let elem_layout = fx.layout_of(*elem_ty);
+ let ptr = cplace.to_ptr();
+ cplace = CPlace::for_ptr(
+ ptr.offset_i64(fx, elem_layout.size.bytes() as i64 * (from as i64)),
+ fx.layout_of(fx.tcx.mk_array(*elem_ty, to - from)),
+ );
+ }
+ ty::Slice(elem_ty) => {
+ assert!(from_end, "slice subslices should be `from_end`");
+ let elem_layout = fx.layout_of(*elem_ty);
+ let (ptr, len) = cplace.to_ptr_maybe_unsized();
+ let len = len.unwrap();
+ cplace = CPlace::for_ptr_with_extra(
+ ptr.offset_i64(fx, elem_layout.size.bytes() as i64 * (from as i64)),
+ fx.bcx.ins().iadd_imm(len, -(from as i64 + to as i64)),
+ cplace.layout(),
+ );
+ }
+ _ => unreachable!(),
+ }
+ }
+ PlaceElem::Downcast(_adt_def, variant) => {
+ cplace = cplace.downcast_variant(fx, variant);
+ }
+ }
+ }
+
+ cplace
+}
+
+pub(crate) fn codegen_operand<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ operand: &Operand<'tcx>,
+) -> CValue<'tcx> {
+ match operand {
+ Operand::Move(place) | Operand::Copy(place) => {
+ let cplace = codegen_place(fx, *place);
+ cplace.to_cvalue(fx)
+ }
+ Operand::Constant(const_) => crate::constant::codegen_constant_operand(fx, const_),
+ }
+}
+
+pub(crate) fn codegen_panic<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ msg_str: &str,
+ source_info: mir::SourceInfo,
+) {
+ let location = fx.get_caller_location(source_info).load_scalar(fx);
+
+ let msg_ptr = fx.anonymous_str(msg_str);
+ let msg_len = fx.bcx.ins().iconst(fx.pointer_type, i64::try_from(msg_str.len()).unwrap());
+ let args = [msg_ptr, msg_len, location];
+
+ codegen_panic_inner(fx, rustc_hir::LangItem::Panic, &args, source_info.span);
+}
+
+pub(crate) fn codegen_panic_nounwind<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ msg_str: &str,
+ source_info: mir::SourceInfo,
+) {
+ let msg_ptr = fx.anonymous_str(msg_str);
+ let msg_len = fx.bcx.ins().iconst(fx.pointer_type, i64::try_from(msg_str.len()).unwrap());
+ let args = [msg_ptr, msg_len];
+
+ codegen_panic_inner(fx, rustc_hir::LangItem::PanicNounwind, &args, source_info.span);
+}
+
+pub(crate) fn codegen_panic_cannot_unwind<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ source_info: mir::SourceInfo,
+) {
+ let args = [];
+
+ codegen_panic_inner(fx, rustc_hir::LangItem::PanicCannotUnwind, &args, source_info.span);
+}
+
+fn codegen_panic_inner<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ lang_item: rustc_hir::LangItem,
+ args: &[Value],
+ span: Span,
+) {
+ let def_id = fx
+ .tcx
+ .lang_items()
+ .require(lang_item)
+ .unwrap_or_else(|e| fx.tcx.sess.span_fatal(span, e.to_string()));
+
+ let instance = Instance::mono(fx.tcx, def_id).polymorphize(fx.tcx);
+ let symbol_name = fx.tcx.symbol_name(instance).name;
+
+ fx.lib_call(
+ &*symbol_name,
+ args.iter().map(|&arg| AbiParam::new(fx.bcx.func.dfg.value_type(arg))).collect(),
+ vec![],
+ args,
+ );
+
+ fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
+}
--- /dev/null
+use cranelift_codegen::isa::TargetFrontendConfig;
+use gimli::write::FileId;
+
+use rustc_data_structures::sync::Lrc;
+use rustc_index::vec::IndexVec;
+use rustc_middle::ty::layout::{
+ FnAbiError, FnAbiOfHelpers, FnAbiRequest, LayoutError, LayoutOfHelpers,
+};
+use rustc_span::SourceFile;
+use rustc_target::abi::call::FnAbi;
+use rustc_target::abi::{Integer, Primitive};
+use rustc_target::spec::{HasTargetSpec, Target};
+
+use crate::constant::ConstantCx;
+use crate::debuginfo::FunctionDebugContext;
+use crate::prelude::*;
+
+pub(crate) fn pointer_ty(tcx: TyCtxt<'_>) -> types::Type {
+ match tcx.data_layout.pointer_size.bits() {
+ 16 => types::I16,
+ 32 => types::I32,
+ 64 => types::I64,
+ bits => bug!("ptr_sized_integer: unknown pointer bit size {}", bits),
+ }
+}
+
+pub(crate) fn scalar_to_clif_type(tcx: TyCtxt<'_>, scalar: Scalar) -> Type {
+ match scalar.primitive() {
+ Primitive::Int(int, _sign) => match int {
+ Integer::I8 => types::I8,
+ Integer::I16 => types::I16,
+ Integer::I32 => types::I32,
+ Integer::I64 => types::I64,
+ Integer::I128 => types::I128,
+ },
+ Primitive::F32 => types::F32,
+ Primitive::F64 => types::F64,
+ // FIXME(erikdesjardins): handle non-default addrspace ptr sizes
+ Primitive::Pointer(_) => pointer_ty(tcx),
+ }
+}
+
+fn clif_type_from_ty<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Option<types::Type> {
+ Some(match ty.kind() {
+ ty::Bool => types::I8,
+ ty::Uint(size) => match size {
+ UintTy::U8 => types::I8,
+ UintTy::U16 => types::I16,
+ UintTy::U32 => types::I32,
+ UintTy::U64 => types::I64,
+ UintTy::U128 => types::I128,
+ UintTy::Usize => pointer_ty(tcx),
+ },
+ ty::Int(size) => match size {
+ IntTy::I8 => types::I8,
+ IntTy::I16 => types::I16,
+ IntTy::I32 => types::I32,
+ IntTy::I64 => types::I64,
+ IntTy::I128 => types::I128,
+ IntTy::Isize => pointer_ty(tcx),
+ },
+ ty::Char => types::I32,
+ ty::Float(size) => match size {
+ FloatTy::F32 => types::F32,
+ FloatTy::F64 => types::F64,
+ },
+ ty::FnPtr(_) => pointer_ty(tcx),
+ ty::RawPtr(TypeAndMut { ty: pointee_ty, mutbl: _ }) | ty::Ref(_, pointee_ty, _) => {
+ if has_ptr_meta(tcx, *pointee_ty) {
+ return None;
+ } else {
+ pointer_ty(tcx)
+ }
+ }
+ ty::Adt(adt_def, _) if adt_def.repr().simd() => {
+ let (element, count) = match &tcx.layout_of(ParamEnv::reveal_all().and(ty)).unwrap().abi
+ {
+ Abi::Vector { element, count } => (element.clone(), *count),
+ _ => unreachable!(),
+ };
+
+ match scalar_to_clif_type(tcx, element).by(u32::try_from(count).unwrap()) {
+ // Cranelift currently only implements icmp for 128bit vectors.
+ Some(vector_ty) if vector_ty.bits() == 128 => vector_ty,
+ _ => return None,
+ }
+ }
+ ty::Param(_) => bug!("ty param {:?}", ty),
+ _ => return None,
+ })
+}
+
+fn clif_pair_type_from_ty<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ ty: Ty<'tcx>,
+) -> Option<(types::Type, types::Type)> {
+ Some(match ty.kind() {
+ ty::Tuple(types) if types.len() == 2 => {
+ let a = clif_type_from_ty(tcx, types[0])?;
+ let b = clif_type_from_ty(tcx, types[1])?;
+ if a.is_vector() || b.is_vector() {
+ return None;
+ }
+ (a, b)
+ }
+ ty::RawPtr(TypeAndMut { ty: pointee_ty, mutbl: _ }) | ty::Ref(_, pointee_ty, _) => {
+ if has_ptr_meta(tcx, *pointee_ty) {
+ (pointer_ty(tcx), pointer_ty(tcx))
+ } else {
+ return None;
+ }
+ }
+ _ => return None,
+ })
+}
+
+/// Is a pointer to this type a fat ptr?
+pub(crate) fn has_ptr_meta<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> bool {
+ let ptr_ty = tcx.mk_ptr(TypeAndMut { ty, mutbl: rustc_hir::Mutability::Not });
+ match &tcx.layout_of(ParamEnv::reveal_all().and(ptr_ty)).unwrap().abi {
+ Abi::Scalar(_) => false,
+ Abi::ScalarPair(_, _) => true,
+ abi => unreachable!("Abi of ptr to {:?} is {:?}???", ty, abi),
+ }
+}
+
+pub(crate) fn codegen_icmp_imm(
+ fx: &mut FunctionCx<'_, '_, '_>,
+ intcc: IntCC,
+ lhs: Value,
+ rhs: i128,
+) -> Value {
+ let lhs_ty = fx.bcx.func.dfg.value_type(lhs);
+ if lhs_ty == types::I128 {
+ // FIXME legalize `icmp_imm.i128` in Cranelift
+
+ let (lhs_lsb, lhs_msb) = fx.bcx.ins().isplit(lhs);
+ let (rhs_lsb, rhs_msb) = (rhs as u128 as u64 as i64, (rhs as u128 >> 64) as u64 as i64);
+
+ match intcc {
+ IntCC::Equal => {
+ let lsb_eq = fx.bcx.ins().icmp_imm(IntCC::Equal, lhs_lsb, rhs_lsb);
+ let msb_eq = fx.bcx.ins().icmp_imm(IntCC::Equal, lhs_msb, rhs_msb);
+ fx.bcx.ins().band(lsb_eq, msb_eq)
+ }
+ IntCC::NotEqual => {
+ let lsb_ne = fx.bcx.ins().icmp_imm(IntCC::NotEqual, lhs_lsb, rhs_lsb);
+ let msb_ne = fx.bcx.ins().icmp_imm(IntCC::NotEqual, lhs_msb, rhs_msb);
+ fx.bcx.ins().bor(lsb_ne, msb_ne)
+ }
+ _ => {
+ // if msb_eq {
+ // lsb_cc
+ // } else {
+ // msb_cc
+ // }
+
+ let msb_eq = fx.bcx.ins().icmp_imm(IntCC::Equal, lhs_msb, rhs_msb);
+ let lsb_cc = fx.bcx.ins().icmp_imm(intcc, lhs_lsb, rhs_lsb);
+ let msb_cc = fx.bcx.ins().icmp_imm(intcc, lhs_msb, rhs_msb);
+
+ fx.bcx.ins().select(msb_eq, lsb_cc, msb_cc)
+ }
+ }
+ } else {
+ let rhs = rhs as i64; // Truncates on purpose in case rhs is actually an unsigned value
+ fx.bcx.ins().icmp_imm(intcc, lhs, rhs)
+ }
+}
+
+pub(crate) fn codegen_bitcast(fx: &mut FunctionCx<'_, '_, '_>, dst_ty: Type, val: Value) -> Value {
+ let mut flags = MemFlags::new();
+ flags.set_endianness(match fx.tcx.data_layout.endian {
+ rustc_target::abi::Endian::Big => cranelift_codegen::ir::Endianness::Big,
+ rustc_target::abi::Endian::Little => cranelift_codegen::ir::Endianness::Little,
+ });
+ fx.bcx.ins().bitcast(dst_ty, flags, val)
+}
+
+pub(crate) fn type_zero_value(bcx: &mut FunctionBuilder<'_>, ty: Type) -> Value {
+ if ty == types::I128 {
+ let zero = bcx.ins().iconst(types::I64, 0);
+ bcx.ins().iconcat(zero, zero)
+ } else {
+ bcx.ins().iconst(ty, 0)
+ }
+}
+
+pub(crate) fn type_min_max_value(
+ bcx: &mut FunctionBuilder<'_>,
+ ty: Type,
+ signed: bool,
+) -> (Value, Value) {
+ assert!(ty.is_int());
+
+ if ty == types::I128 {
+ if signed {
+ let min = i128::MIN as u128;
+ let min_lsb = bcx.ins().iconst(types::I64, min as u64 as i64);
+ let min_msb = bcx.ins().iconst(types::I64, (min >> 64) as u64 as i64);
+ let min = bcx.ins().iconcat(min_lsb, min_msb);
+
+ let max = i128::MAX as u128;
+ let max_lsb = bcx.ins().iconst(types::I64, max as u64 as i64);
+ let max_msb = bcx.ins().iconst(types::I64, (max >> 64) as u64 as i64);
+ let max = bcx.ins().iconcat(max_lsb, max_msb);
+
+ return (min, max);
+ } else {
+ let min_half = bcx.ins().iconst(types::I64, 0);
+ let min = bcx.ins().iconcat(min_half, min_half);
+
+ let max_half = bcx.ins().iconst(types::I64, u64::MAX as i64);
+ let max = bcx.ins().iconcat(max_half, max_half);
+
+ return (min, max);
+ }
+ }
+
+ let min = match (ty, signed) {
+ (types::I8, false) | (types::I16, false) | (types::I32, false) | (types::I64, false) => {
+ 0i64
+ }
+ (types::I8, true) => i64::from(i8::MIN),
+ (types::I16, true) => i64::from(i16::MIN),
+ (types::I32, true) => i64::from(i32::MIN),
+ (types::I64, true) => i64::MIN,
+ _ => unreachable!(),
+ };
+
+ let max = match (ty, signed) {
+ (types::I8, false) => i64::from(u8::MAX),
+ (types::I16, false) => i64::from(u16::MAX),
+ (types::I32, false) => i64::from(u32::MAX),
+ (types::I64, false) => u64::MAX as i64,
+ (types::I8, true) => i64::from(i8::MAX),
+ (types::I16, true) => i64::from(i16::MAX),
+ (types::I32, true) => i64::from(i32::MAX),
+ (types::I64, true) => i64::MAX,
+ _ => unreachable!(),
+ };
+
+ let (min, max) = (bcx.ins().iconst(ty, min), bcx.ins().iconst(ty, max));
+
+ (min, max)
+}
+
+pub(crate) fn type_sign(ty: Ty<'_>) -> bool {
+ match ty.kind() {
+ ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..) | ty::Char | ty::Uint(..) | ty::Bool => false,
+ ty::Int(..) => true,
+ ty::Float(..) => false, // `signed` is unused for floats
+ _ => panic!("{}", ty),
+ }
+}
+
++pub(crate) fn create_wrapper_function(
++ module: &mut dyn Module,
++ unwind_context: &mut UnwindContext,
++ sig: Signature,
++ wrapper_name: &str,
++ callee_name: &str,
++) {
++ let wrapper_func_id = module.declare_function(wrapper_name, Linkage::Export, &sig).unwrap();
++ let callee_func_id = module.declare_function(callee_name, Linkage::Import, &sig).unwrap();
++
++ let mut ctx = Context::new();
++ ctx.func.signature = sig;
++ {
++ let mut func_ctx = FunctionBuilderContext::new();
++ let mut bcx = FunctionBuilder::new(&mut ctx.func, &mut func_ctx);
++
++ let block = bcx.create_block();
++ bcx.switch_to_block(block);
++ let func = &mut bcx.func.stencil;
++ let args = func
++ .signature
++ .params
++ .iter()
++ .map(|param| func.dfg.append_block_param(block, param.value_type))
++ .collect::<Vec<Value>>();
++
++ let callee_func_ref = module.declare_func_in_func(callee_func_id, &mut bcx.func);
++ let call_inst = bcx.ins().call(callee_func_ref, &args);
++ let results = bcx.inst_results(call_inst).to_vec(); // Clone to prevent borrow error
++
++ bcx.ins().return_(&results);
++ bcx.seal_all_blocks();
++ bcx.finalize();
++ }
++ module.define_function(wrapper_func_id, &mut ctx).unwrap();
++ unwind_context.add_function(wrapper_func_id, &ctx, module.isa());
++}
++
+pub(crate) struct FunctionCx<'m, 'clif, 'tcx: 'm> {
+ pub(crate) cx: &'clif mut crate::CodegenCx,
+ pub(crate) module: &'m mut dyn Module,
+ pub(crate) tcx: TyCtxt<'tcx>,
+ pub(crate) target_config: TargetFrontendConfig, // Cached from module
+ pub(crate) pointer_type: Type, // Cached from module
+ pub(crate) constants_cx: ConstantCx,
+ pub(crate) func_debug_cx: Option<FunctionDebugContext>,
+
+ pub(crate) instance: Instance<'tcx>,
+ pub(crate) symbol_name: String,
+ pub(crate) mir: &'tcx Body<'tcx>,
+ pub(crate) fn_abi: Option<&'tcx FnAbi<'tcx, Ty<'tcx>>>,
+
+ pub(crate) bcx: FunctionBuilder<'clif>,
+ pub(crate) block_map: IndexVec<BasicBlock, Block>,
+ pub(crate) local_map: IndexVec<Local, CPlace<'tcx>>,
+
+ /// When `#[track_caller]` is used, the implicit caller location is stored in this variable.
+ pub(crate) caller_location: Option<CValue<'tcx>>,
+
+ pub(crate) clif_comments: crate::pretty_clif::CommentWriter,
+
+ /// Last accessed source file and it's debuginfo file id.
+ ///
+ /// For optimization purposes only
+ pub(crate) last_source_file: Option<(Lrc<SourceFile>, FileId)>,
+
+ /// This should only be accessed by `CPlace::new_var`.
+ pub(crate) next_ssa_var: u32,
+}
+
+impl<'tcx> LayoutOfHelpers<'tcx> for FunctionCx<'_, '_, 'tcx> {
+ type LayoutOfResult = TyAndLayout<'tcx>;
+
+ #[inline]
+ fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! {
+ RevealAllLayoutCx(self.tcx).handle_layout_err(err, span, ty)
+ }
+}
+
+impl<'tcx> FnAbiOfHelpers<'tcx> for FunctionCx<'_, '_, 'tcx> {
+ type FnAbiOfResult = &'tcx FnAbi<'tcx, Ty<'tcx>>;
+
+ #[inline]
+ fn handle_fn_abi_err(
+ &self,
+ err: FnAbiError<'tcx>,
+ span: Span,
+ fn_abi_request: FnAbiRequest<'tcx>,
+ ) -> ! {
+ RevealAllLayoutCx(self.tcx).handle_fn_abi_err(err, span, fn_abi_request)
+ }
+}
+
+impl<'tcx> layout::HasTyCtxt<'tcx> for FunctionCx<'_, '_, 'tcx> {
+ fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+}
+
+impl<'tcx> rustc_target::abi::HasDataLayout for FunctionCx<'_, '_, 'tcx> {
+ fn data_layout(&self) -> &rustc_target::abi::TargetDataLayout {
+ &self.tcx.data_layout
+ }
+}
+
+impl<'tcx> layout::HasParamEnv<'tcx> for FunctionCx<'_, '_, 'tcx> {
+ fn param_env(&self) -> ParamEnv<'tcx> {
+ ParamEnv::reveal_all()
+ }
+}
+
+impl<'tcx> HasTargetSpec for FunctionCx<'_, '_, 'tcx> {
+ fn target_spec(&self) -> &Target {
+ &self.tcx.sess.target
+ }
+}
+
+impl<'tcx> FunctionCx<'_, '_, 'tcx> {
+ pub(crate) fn monomorphize<T>(&self, value: T) -> T
+ where
+ T: TypeFoldable<'tcx> + Copy,
+ {
+ self.instance.subst_mir_and_normalize_erasing_regions(
+ self.tcx,
+ ty::ParamEnv::reveal_all(),
+ value,
+ )
+ }
+
+ pub(crate) fn clif_type(&self, ty: Ty<'tcx>) -> Option<Type> {
+ clif_type_from_ty(self.tcx, ty)
+ }
+
+ pub(crate) fn clif_pair_type(&self, ty: Ty<'tcx>) -> Option<(Type, Type)> {
+ clif_pair_type_from_ty(self.tcx, ty)
+ }
+
+ pub(crate) fn get_block(&self, bb: BasicBlock) -> Block {
+ *self.block_map.get(bb).unwrap()
+ }
+
+ pub(crate) fn get_local_place(&mut self, local: Local) -> CPlace<'tcx> {
+ *self.local_map.get(local).unwrap_or_else(|| {
+ panic!("Local {:?} doesn't exist", local);
+ })
+ }
+
+ pub(crate) fn set_debug_loc(&mut self, source_info: mir::SourceInfo) {
+ if let Some(debug_context) = &mut self.cx.debug_context {
+ let (file, line, column) =
+ DebugContext::get_span_loc(self.tcx, self.mir.span, source_info.span);
+
+ // add_source_file is very slow.
+ // Optimize for the common case of the current file not being changed.
+ let mut cached_file_id = None;
+ if let Some((ref last_source_file, last_file_id)) = self.last_source_file {
+ // If the allocations are not equal, the files may still be equal, but that
+ // doesn't matter, as this is just an optimization.
+ if rustc_data_structures::sync::Lrc::ptr_eq(last_source_file, &file) {
+ cached_file_id = Some(last_file_id);
+ }
+ }
+
+ let file_id = if let Some(file_id) = cached_file_id {
+ file_id
+ } else {
+ debug_context.add_source_file(&file)
+ };
+
+ let source_loc =
+ self.func_debug_cx.as_mut().unwrap().add_dbg_loc(file_id, line, column);
+ self.bcx.set_srcloc(source_loc);
+ }
+ }
+
+ // Note: must be kept in sync with get_caller_location from cg_ssa
+ pub(crate) fn get_caller_location(&mut self, mut source_info: mir::SourceInfo) -> CValue<'tcx> {
+ let span_to_caller_location = |fx: &mut FunctionCx<'_, '_, 'tcx>, span: Span| {
+ let topmost = span.ctxt().outer_expn().expansion_cause().unwrap_or(span);
+ let caller = fx.tcx.sess.source_map().lookup_char_pos(topmost.lo());
+ let const_loc = fx.tcx.const_caller_location((
+ rustc_span::symbol::Symbol::intern(
+ &caller.file.name.prefer_remapped().to_string_lossy(),
+ ),
+ caller.line as u32,
+ caller.col_display as u32 + 1,
+ ));
+ crate::constant::codegen_const_value(fx, const_loc, fx.tcx.caller_location_ty())
+ };
+
+ // Walk up the `SourceScope`s, in case some of them are from MIR inlining.
+ // If so, the starting `source_info.span` is in the innermost inlined
+ // function, and will be replaced with outer callsite spans as long
+ // as the inlined functions were `#[track_caller]`.
+ loop {
+ let scope_data = &self.mir.source_scopes[source_info.scope];
+
+ if let Some((callee, callsite_span)) = scope_data.inlined {
+ // Stop inside the most nested non-`#[track_caller]` function,
+ // before ever reaching its caller (which is irrelevant).
+ if !callee.def.requires_caller_location(self.tcx) {
+ return span_to_caller_location(self, source_info.span);
+ }
+ source_info.span = callsite_span;
+ }
+
+ // Skip past all of the parents with `inlined: None`.
+ match scope_data.inlined_parent_scope {
+ Some(parent) => source_info.scope = parent,
+ None => break,
+ }
+ }
+
+ // No inlined `SourceScope`s, or all of them were `#[track_caller]`.
+ self.caller_location.unwrap_or_else(|| span_to_caller_location(self, source_info.span))
+ }
+
+ pub(crate) fn anonymous_str(&mut self, msg: &str) -> Value {
+ let mut data_ctx = DataContext::new();
+ data_ctx.define(msg.as_bytes().to_vec().into_boxed_slice());
+ let msg_id = self.module.declare_anonymous_data(false, false).unwrap();
+
+ // Ignore DuplicateDefinition error, as the data will be the same
+ let _ = self.module.define_data(msg_id, &data_ctx);
+
+ let local_msg_id = self.module.declare_data_in_func(msg_id, self.bcx.func);
+ if self.clif_comments.enabled() {
+ self.add_comment(local_msg_id, msg);
+ }
+ self.bcx.ins().global_value(self.pointer_type, local_msg_id)
+ }
+}
+
+pub(crate) struct RevealAllLayoutCx<'tcx>(pub(crate) TyCtxt<'tcx>);
+
+impl<'tcx> LayoutOfHelpers<'tcx> for RevealAllLayoutCx<'tcx> {
+ type LayoutOfResult = TyAndLayout<'tcx>;
+
+ #[inline]
+ fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! {
+ if let layout::LayoutError::SizeOverflow(_) = err {
+ self.0.sess.span_fatal(span, &err.to_string())
+ } else {
+ span_bug!(span, "failed to get layout for `{}`: {}", ty, err)
+ }
+ }
+}
+
+impl<'tcx> FnAbiOfHelpers<'tcx> for RevealAllLayoutCx<'tcx> {
+ type FnAbiOfResult = &'tcx FnAbi<'tcx, Ty<'tcx>>;
+
+ #[inline]
+ fn handle_fn_abi_err(
+ &self,
+ err: FnAbiError<'tcx>,
+ span: Span,
+ fn_abi_request: FnAbiRequest<'tcx>,
+ ) -> ! {
+ if let FnAbiError::Layout(LayoutError::SizeOverflow(_)) = err {
+ self.0.sess.span_fatal(span, &err.to_string())
+ } else {
+ match fn_abi_request {
+ FnAbiRequest::OfFnPtr { sig, extra_args } => {
+ span_bug!(
+ span,
+ "`fn_abi_of_fn_ptr({}, {:?})` failed: {}",
+ sig,
+ extra_args,
+ err
+ );
+ }
+ FnAbiRequest::OfInstance { instance, extra_args } => {
+ span_bug!(
+ span,
+ "`fn_abi_of_instance({}, {:?})` failed: {}",
+ instance,
+ extra_args,
+ err
+ );
+ }
+ }
+ }
+ }
+}
+
+impl<'tcx> layout::HasTyCtxt<'tcx> for RevealAllLayoutCx<'tcx> {
+ fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
+ self.0
+ }
+}
+
+impl<'tcx> rustc_target::abi::HasDataLayout for RevealAllLayoutCx<'tcx> {
+ fn data_layout(&self) -> &rustc_target::abi::TargetDataLayout {
+ &self.0.data_layout
+ }
+}
+
+impl<'tcx> layout::HasParamEnv<'tcx> for RevealAllLayoutCx<'tcx> {
+ fn param_env(&self) -> ParamEnv<'tcx> {
+ ParamEnv::reveal_all()
+ }
+}
+
+impl<'tcx> HasTargetSpec for RevealAllLayoutCx<'tcx> {
+ fn target_spec(&self) -> &Target {
+ &self.0.sess.target
+ }
+}
--- /dev/null
- ($register:ident; $(fn $name:ident($($arg_name:ident: $arg_ty:ty),*) -> $ret_ty:ty;)*) => {
++#[cfg(all(unix, feature = "jit"))]
++use std::ffi::c_int;
++#[cfg(feature = "jit")]
++use std::ffi::c_void;
++
++// FIXME replace with core::ffi::c_size_t once stablized
++#[allow(non_camel_case_types)]
++#[cfg(feature = "jit")]
++type size_t = usize;
++
+macro_rules! builtin_functions {
- $(fn $name($($arg_name: $arg_ty),*) -> $ret_ty;)*
++ (
++ $register:ident;
++ $(
++ $(#[$attr:meta])?
++ fn $name:ident($($arg_name:ident: $arg_ty:ty),*) -> $ret_ty:ty;
++ )*
++ ) => {
+ #[cfg(feature = "jit")]
+ #[allow(improper_ctypes)]
+ extern "C" {
- for (name, val) in [$((stringify!($name), $name as *const u8)),*] {
++ $(
++ $(#[$attr])?
++ fn $name($($arg_name: $arg_ty),*) -> $ret_ty;
++ )*
+ }
+
+ #[cfg(feature = "jit")]
+ pub(crate) fn $register(builder: &mut cranelift_jit::JITBuilder) {
++ for (name, val) in [$($(#[$attr])? (stringify!($name), $name as *const u8)),*] {
+ builder.symbol(name, val);
+ }
+ }
+ };
+}
+
+builtin_functions! {
+ register_functions_for_jit;
+
+ // integers
+ fn __multi3(a: i128, b: i128) -> i128;
+ fn __udivti3(n: u128, d: u128) -> u128;
+ fn __divti3(n: i128, d: i128) -> i128;
+ fn __umodti3(n: u128, d: u128) -> u128;
+ fn __modti3(n: i128, d: i128) -> i128;
+ fn __rust_u128_addo(a: u128, b: u128) -> (u128, bool);
+ fn __rust_i128_addo(a: i128, b: i128) -> (i128, bool);
+ fn __rust_u128_subo(a: u128, b: u128) -> (u128, bool);
+ fn __rust_i128_subo(a: i128, b: i128) -> (i128, bool);
+ fn __rust_u128_mulo(a: u128, b: u128) -> (u128, bool);
+ fn __rust_i128_mulo(a: i128, b: i128) -> (i128, bool);
+
+ // floats
+ fn __floattisf(i: i128) -> f32;
+ fn __floattidf(i: i128) -> f64;
+ fn __floatuntisf(i: u128) -> f32;
+ fn __floatuntidf(i: u128) -> f64;
+ fn __fixsfti(f: f32) -> i128;
+ fn __fixdfti(f: f64) -> i128;
+ fn __fixunssfti(f: f32) -> u128;
+ fn __fixunsdfti(f: f64) -> u128;
++
++ // allocator
++ // NOTE: These need to be mentioned here despite not being part of compiler_builtins because
++ // newer glibc resolve dlsym("malloc") to libc.so despite the override in the rustc binary to
++ // use jemalloc. Libraries opened with dlopen still get the jemalloc version, causing multiple
++ // allocators to be mixed, resulting in a crash.
++ fn calloc(nobj: size_t, size: size_t) -> *mut c_void;
++ #[cfg(unix)]
++ fn posix_memalign(memptr: *mut *mut c_void, align: size_t, size: size_t) -> c_int;
++ fn malloc(size: size_t) -> *mut c_void;
++ fn realloc(p: *mut c_void, size: size_t) -> *mut c_void;
++ fn free(p: *mut c_void) -> ();
++
+}
--- /dev/null
- /// Display the time it took to perform codegen for a crate.
- ///
- /// Defaults to true when the `CG_CLIF_DISPLAY_CG_TIME` env var is set to 1 or false otherwise.
- /// Can be set using `-Cllvm-args=display_cg_time=...`.
- pub display_cg_time: bool,
-
+use std::env;
+use std::str::FromStr;
+
+fn bool_env_var(key: &str) -> bool {
+ env::var(key).as_deref() == Ok("1")
+}
+
+/// The mode to use for compilation.
+#[derive(Copy, Clone, Debug)]
+pub enum CodegenMode {
+ /// AOT compile the crate. This is the default.
+ Aot,
+ /// JIT compile and execute the crate.
+ Jit,
+ /// JIT compile and execute the crate, but only compile functions the first time they are used.
+ JitLazy,
+}
+
+impl FromStr for CodegenMode {
+ type Err = String;
+
+ fn from_str(s: &str) -> Result<Self, Self::Err> {
+ match s {
+ "aot" => Ok(CodegenMode::Aot),
+ "jit" => Ok(CodegenMode::Jit),
+ "jit-lazy" => Ok(CodegenMode::JitLazy),
+ _ => Err(format!("Unknown codegen mode `{}`", s)),
+ }
+ }
+}
+
+/// Configuration of cg_clif as passed in through `-Cllvm-args` and various env vars.
+#[derive(Clone, Debug)]
+pub struct BackendConfig {
+ /// Should the crate be AOT compiled or JIT executed.
+ ///
+ /// Defaults to AOT compilation. Can be set using `-Cllvm-args=mode=...`.
+ pub codegen_mode: CodegenMode,
+
+ /// When JIT mode is enable pass these arguments to the program.
+ ///
+ /// Defaults to the value of `CG_CLIF_JIT_ARGS`.
+ pub jit_args: Vec<String>,
+
- display_cg_time: bool_env_var("CG_CLIF_DISPLAY_CG_TIME"),
+ /// Enable the Cranelift ir verifier for all compilation passes. If not set it will only run
+ /// once before passing the clif ir to Cranelift for compilation.
+ ///
+ /// Defaults to true when the `CG_CLIF_ENABLE_VERIFIER` env var is set to 1 or when cg_clif is
+ /// compiled with debug assertions enabled or false otherwise. Can be set using
+ /// `-Cllvm-args=enable_verifier=...`.
+ pub enable_verifier: bool,
+
+ /// Don't cache object files in the incremental cache. Useful during development of cg_clif
+ /// to make it possible to use incremental mode for all analyses performed by rustc without
+ /// caching object files when their content should have been changed by a change to cg_clif.
+ ///
+ /// Defaults to true when the `CG_CLIF_DISABLE_INCR_CACHE` env var is set to 1 or false
+ /// otherwise. Can be set using `-Cllvm-args=disable_incr_cache=...`.
+ pub disable_incr_cache: bool,
+}
+
+impl Default for BackendConfig {
+ fn default() -> Self {
+ BackendConfig {
+ codegen_mode: CodegenMode::Aot,
+ jit_args: {
+ let args = std::env::var("CG_CLIF_JIT_ARGS").unwrap_or_else(|_| String::new());
+ args.split(' ').map(|arg| arg.to_string()).collect()
+ },
- "display_cg_time" => config.display_cg_time = parse_bool(name, value)?,
+ enable_verifier: cfg!(debug_assertions) || bool_env_var("CG_CLIF_ENABLE_VERIFIER"),
+ disable_incr_cache: bool_env_var("CG_CLIF_DISABLE_INCR_CACHE"),
+ }
+ }
+}
+
+impl BackendConfig {
+ /// Parse the configuration passed in using `-Cllvm-args`.
+ pub fn from_opts(opts: &[String]) -> Result<Self, String> {
+ fn parse_bool(name: &str, value: &str) -> Result<bool, String> {
+ value.parse().map_err(|_| format!("failed to parse value `{}` for {}", value, name))
+ }
+
+ let mut config = BackendConfig::default();
+ for opt in opts {
+ if let Some((name, value)) = opt.split_once('=') {
+ match name {
+ "mode" => config.codegen_mode = value.parse()?,
+ "enable_verifier" => config.enable_verifier = parse_bool(name, value)?,
+ "disable_incr_cache" => config.disable_incr_cache = parse_bool(name, value)?,
+ _ => return Err(format!("Unknown option `{}`", name)),
+ }
+ } else {
+ return Err(format!("Invalid option `{}`", opt));
+ }
+ }
+
+ Ok(config)
+ }
+}
--- /dev/null
- let (cgu_name, mut cx, mut module, codegened_functions) = tcx.sess.time("codegen cgu", || {
- let cgu = tcx.codegen_unit(cgu_name);
- let mono_items = cgu.items_in_deterministic_order(tcx);
-
- let mut module = make_module(tcx.sess, &backend_config, cgu_name.as_str().to_string());
-
- let mut cx = crate::CodegenCx::new(
- tcx,
- backend_config.clone(),
- module.isa(),
- tcx.sess.opts.debuginfo != DebugInfo::None,
- cgu_name,
- );
- super::predefine_mono_items(tcx, &mut module, &mono_items);
- let mut codegened_functions = vec![];
- for (mono_item, _) in mono_items {
- match mono_item {
- MonoItem::Fn(inst) => {
- tcx.sess.time("codegen fn", || {
+//! The AOT driver uses [`cranelift_object`] to write object files suitable for linking into a
+//! standalone executable.
+
+use std::fs::File;
+use std::path::PathBuf;
+use std::sync::Arc;
+use std::thread::JoinHandle;
+
+use rustc_codegen_ssa::back::metadata::create_compressed_metadata_file;
+use rustc_codegen_ssa::{CodegenResults, CompiledModule, CrateInfo, ModuleKind};
+use rustc_data_structures::profiling::SelfProfilerRef;
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_metadata::EncodedMetadata;
+use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
+use rustc_middle::mir::mono::{CodegenUnit, MonoItem};
+use rustc_session::cgu_reuse_tracker::CguReuse;
+use rustc_session::config::{DebugInfo, OutputFilenames, OutputType};
+use rustc_session::Session;
+
+use cranelift_object::{ObjectBuilder, ObjectModule};
+
+use crate::concurrency_limiter::{ConcurrencyLimiter, ConcurrencyLimiterToken};
+use crate::global_asm::GlobalAsmConfig;
+use crate::{prelude::*, BackendConfig};
+
+struct ModuleCodegenResult {
+ module_regular: CompiledModule,
+ module_global_asm: Option<CompiledModule>,
+ existing_work_product: Option<(WorkProductId, WorkProduct)>,
+}
+
+enum OngoingModuleCodegen {
+ Sync(Result<ModuleCodegenResult, String>),
+ Async(JoinHandle<Result<ModuleCodegenResult, String>>),
+}
+
+impl<HCX> HashStable<HCX> for OngoingModuleCodegen {
+ fn hash_stable(&self, _: &mut HCX, _: &mut StableHasher) {
+ // do nothing
+ }
+}
+
+pub(crate) struct OngoingCodegen {
+ modules: Vec<OngoingModuleCodegen>,
+ allocator_module: Option<CompiledModule>,
+ metadata_module: Option<CompiledModule>,
+ metadata: EncodedMetadata,
+ crate_info: CrateInfo,
+ concurrency_limiter: ConcurrencyLimiter,
+}
+
+impl OngoingCodegen {
+ pub(crate) fn join(
+ self,
+ sess: &Session,
+ backend_config: &BackendConfig,
+ ) -> (CodegenResults, FxHashMap<WorkProductId, WorkProduct>) {
+ let mut work_products = FxHashMap::default();
+ let mut modules = vec![];
+
+ for module_codegen in self.modules {
+ let module_codegen_result = match module_codegen {
+ OngoingModuleCodegen::Sync(module_codegen_result) => module_codegen_result,
+ OngoingModuleCodegen::Async(join_handle) => match join_handle.join() {
+ Ok(module_codegen_result) => module_codegen_result,
+ Err(panic) => std::panic::resume_unwind(panic),
+ },
+ };
+
+ let module_codegen_result = match module_codegen_result {
+ Ok(module_codegen_result) => module_codegen_result,
+ Err(err) => sess.fatal(&err),
+ };
+ let ModuleCodegenResult { module_regular, module_global_asm, existing_work_product } =
+ module_codegen_result;
+
+ if let Some((work_product_id, work_product)) = existing_work_product {
+ work_products.insert(work_product_id, work_product);
+ } else {
+ let work_product = if backend_config.disable_incr_cache {
+ None
+ } else if let Some(module_global_asm) = &module_global_asm {
+ rustc_incremental::copy_cgu_workproduct_to_incr_comp_cache_dir(
+ sess,
+ &module_regular.name,
+ &[
+ ("o", &module_regular.object.as_ref().unwrap()),
+ ("asm.o", &module_global_asm.object.as_ref().unwrap()),
+ ],
+ )
+ } else {
+ rustc_incremental::copy_cgu_workproduct_to_incr_comp_cache_dir(
+ sess,
+ &module_regular.name,
+ &[("o", &module_regular.object.as_ref().unwrap())],
+ )
+ };
+ if let Some((work_product_id, work_product)) = work_product {
+ work_products.insert(work_product_id, work_product);
+ }
+ }
+
+ modules.push(module_regular);
+ if let Some(module_global_asm) = module_global_asm {
+ modules.push(module_global_asm);
+ }
+ }
+
+ self.concurrency_limiter.finished();
+
+ sess.abort_if_errors();
+
+ (
+ CodegenResults {
+ modules,
+ allocator_module: self.allocator_module,
+ metadata_module: self.metadata_module,
+ metadata: self.metadata,
+ crate_info: self.crate_info,
+ },
+ work_products,
+ )
+ }
+}
+
+fn make_module(sess: &Session, backend_config: &BackendConfig, name: String) -> ObjectModule {
+ let isa = crate::build_isa(sess, backend_config);
+
+ let mut builder =
+ ObjectBuilder::new(isa, name + ".o", cranelift_module::default_libcall_names()).unwrap();
+ // Unlike cg_llvm, cg_clif defaults to disabling -Zfunction-sections. For cg_llvm binary size
+ // is important, while cg_clif cares more about compilation times. Enabling -Zfunction-sections
+ // can easily double the amount of time necessary to perform linking.
+ builder.per_function_section(sess.opts.unstable_opts.function_sections.unwrap_or(false));
+ ObjectModule::new(builder)
+}
+
+fn emit_cgu(
+ output_filenames: &OutputFilenames,
+ prof: &SelfProfilerRef,
+ name: String,
+ module: ObjectModule,
+ debug: Option<DebugContext>,
+ unwind_context: UnwindContext,
+ global_asm_object_file: Option<PathBuf>,
+) -> Result<ModuleCodegenResult, String> {
+ let mut product = module.finish();
+
+ if let Some(mut debug) = debug {
+ debug.emit(&mut product);
+ }
+
+ unwind_context.emit(&mut product);
+
+ let module_regular =
+ emit_module(output_filenames, prof, product.object, ModuleKind::Regular, name.clone())?;
+
+ Ok(ModuleCodegenResult {
+ module_regular,
+ module_global_asm: global_asm_object_file.map(|global_asm_object_file| CompiledModule {
+ name: format!("{name}.asm"),
+ kind: ModuleKind::Regular,
+ object: Some(global_asm_object_file),
+ dwarf_object: None,
+ bytecode: None,
+ }),
+ existing_work_product: None,
+ })
+}
+
+fn emit_module(
+ output_filenames: &OutputFilenames,
+ prof: &SelfProfilerRef,
+ mut object: cranelift_object::object::write::Object<'_>,
+ kind: ModuleKind,
+ name: String,
+) -> Result<CompiledModule, String> {
+ if object.format() == cranelift_object::object::BinaryFormat::Elf {
+ let comment_section = object.add_section(
+ Vec::new(),
+ b".comment".to_vec(),
+ cranelift_object::object::SectionKind::OtherString,
+ );
+ let mut producer = vec![0];
+ producer.extend(crate::debuginfo::producer().as_bytes());
+ producer.push(0);
+ object.set_section_data(comment_section, producer, 1);
+ }
+
+ let tmp_file = output_filenames.temp_path(OutputType::Object, Some(&name));
+ let mut file = match File::create(&tmp_file) {
+ Ok(file) => file,
+ Err(err) => return Err(format!("error creating object file: {}", err)),
+ };
+
+ if let Err(err) = object.write_stream(&mut file) {
+ return Err(format!("error writing object file: {}", err));
+ }
+
+ prof.artifact_size("object_file", &*name, file.metadata().unwrap().len());
+
+ Ok(CompiledModule { name, kind, object: Some(tmp_file), dwarf_object: None, bytecode: None })
+}
+
+fn reuse_workproduct_for_cgu(
+ tcx: TyCtxt<'_>,
+ cgu: &CodegenUnit<'_>,
+) -> Result<ModuleCodegenResult, String> {
+ let work_product = cgu.previous_work_product(tcx);
+ let obj_out_regular =
+ tcx.output_filenames(()).temp_path(OutputType::Object, Some(cgu.name().as_str()));
+ let source_file_regular = rustc_incremental::in_incr_comp_dir_sess(
+ &tcx.sess,
+ &work_product.saved_files.get("o").expect("no saved object file in work product"),
+ );
+
+ if let Err(err) = rustc_fs_util::link_or_copy(&source_file_regular, &obj_out_regular) {
+ return Err(format!(
+ "unable to copy {} to {}: {}",
+ source_file_regular.display(),
+ obj_out_regular.display(),
+ err
+ ));
+ }
+ let obj_out_global_asm =
+ crate::global_asm::add_file_stem_postfix(obj_out_regular.clone(), ".asm");
+ let has_global_asm = if let Some(asm_o) = work_product.saved_files.get("asm.o") {
+ let source_file_global_asm = rustc_incremental::in_incr_comp_dir_sess(&tcx.sess, asm_o);
+ if let Err(err) = rustc_fs_util::link_or_copy(&source_file_global_asm, &obj_out_global_asm)
+ {
+ return Err(format!(
+ "unable to copy {} to {}: {}",
+ source_file_regular.display(),
+ obj_out_regular.display(),
+ err
+ ));
+ }
+ true
+ } else {
+ false
+ };
+
+ Ok(ModuleCodegenResult {
+ module_regular: CompiledModule {
+ name: cgu.name().to_string(),
+ kind: ModuleKind::Regular,
+ object: Some(obj_out_regular),
+ dwarf_object: None,
+ bytecode: None,
+ },
+ module_global_asm: if has_global_asm {
+ Some(CompiledModule {
+ name: cgu.name().to_string(),
+ kind: ModuleKind::Regular,
+ object: Some(obj_out_global_asm),
+ dwarf_object: None,
+ bytecode: None,
+ })
+ } else {
+ None
+ },
+ existing_work_product: Some((cgu.work_product_id(), work_product)),
+ })
+}
+
+fn module_codegen(
+ tcx: TyCtxt<'_>,
+ (backend_config, global_asm_config, cgu_name, token): (
+ BackendConfig,
+ Arc<GlobalAsmConfig>,
+ rustc_span::Symbol,
+ ConcurrencyLimiterToken,
+ ),
+) -> OngoingModuleCodegen {
- });
- }
- MonoItem::Static(def_id) => {
- crate::constant::codegen_static(tcx, &mut module, def_id)
- }
- MonoItem::GlobalAsm(item_id) => {
- crate::global_asm::codegen_global_asm_item(tcx, &mut cx.global_asm, item_id);
++ let (cgu_name, mut cx, mut module, codegened_functions) =
++ tcx.prof.verbose_generic_activity_with_arg("codegen cgu", cgu_name.as_str()).run(|| {
++ let cgu = tcx.codegen_unit(cgu_name);
++ let mono_items = cgu.items_in_deterministic_order(tcx);
++
++ let mut module = make_module(tcx.sess, &backend_config, cgu_name.as_str().to_string());
++
++ let mut cx = crate::CodegenCx::new(
++ tcx,
++ backend_config.clone(),
++ module.isa(),
++ tcx.sess.opts.debuginfo != DebugInfo::None,
++ cgu_name,
++ );
++ super::predefine_mono_items(tcx, &mut module, &mono_items);
++ let mut codegened_functions = vec![];
++ for (mono_item, _) in mono_items {
++ match mono_item {
++ MonoItem::Fn(inst) => {
+ let codegened_function = crate::base::codegen_fn(
+ tcx,
+ &mut cx,
+ Function::new(),
+ &mut module,
+ inst,
+ );
+ codegened_functions.push(codegened_function);
- }
- crate::main_shim::maybe_create_entry_wrapper(
- tcx,
- &mut module,
- &mut cx.unwind_context,
- false,
- cgu.is_primary(),
- );
++ }
++ MonoItem::Static(def_id) => {
++ crate::constant::codegen_static(tcx, &mut module, def_id)
++ }
++ MonoItem::GlobalAsm(item_id) => {
++ crate::global_asm::codegen_global_asm_item(
++ tcx,
++ &mut cx.global_asm,
++ item_id,
++ );
++ }
+ }
+ }
- let cgu_name = cgu.name().as_str().to_owned();
++ crate::main_shim::maybe_create_entry_wrapper(
++ tcx,
++ &mut module,
++ &mut cx.unwind_context,
++ false,
++ cgu.is_primary(),
++ );
+
- (cgu_name, cx, module, codegened_functions)
- });
++ let cgu_name = cgu.name().as_str().to_owned();
+
- cx.profiler.clone().verbose_generic_activity("compile functions").run(|| {
- let mut cached_context = Context::new();
- for codegened_func in codegened_functions {
- crate::base::compile_fn(&mut cx, &mut cached_context, &mut module, codegened_func);
- }
- });
++ (cgu_name, cx, module, codegened_functions)
++ });
+
+ OngoingModuleCodegen::Async(std::thread::spawn(move || {
- let global_asm_object_file =
- cx.profiler.verbose_generic_activity("compile assembly").run(|| {
++ cx.profiler.clone().verbose_generic_activity_with_arg("compile functions", &*cgu_name).run(
++ || {
++ let mut cached_context = Context::new();
++ for codegened_func in codegened_functions {
++ crate::base::compile_fn(
++ &mut cx,
++ &mut cached_context,
++ &mut module,
++ codegened_func,
++ );
++ }
++ },
++ );
+
- let codegen_result = cx.profiler.verbose_generic_activity("write object file").run(|| {
- emit_cgu(
- &global_asm_config.output_filenames,
- &cx.profiler,
- cgu_name,
- module,
- cx.debug_context,
- cx.unwind_context,
- global_asm_object_file,
- )
- });
++ let global_asm_object_file = cx
++ .profiler
++ .verbose_generic_activity_with_arg("compile assembly", &*cgu_name)
++ .run(|| {
+ crate::global_asm::compile_global_asm(&global_asm_config, &cgu_name, &cx.global_asm)
+ })?;
+
- let modules = super::time(tcx, backend_config.display_cg_time, "codegen mono items", || {
++ let codegen_result = cx
++ .profiler
++ .verbose_generic_activity_with_arg("write object file", &*cgu_name)
++ .run(|| {
++ emit_cgu(
++ &global_asm_config.output_filenames,
++ &cx.profiler,
++ cgu_name,
++ module,
++ cx.debug_context,
++ cx.unwind_context,
++ global_asm_object_file,
++ )
++ });
+ std::mem::drop(token);
+ codegen_result
+ }))
+}
+
+pub(crate) fn run_aot(
+ tcx: TyCtxt<'_>,
+ backend_config: BackendConfig,
+ metadata: EncodedMetadata,
+ need_metadata_module: bool,
+) -> Box<OngoingCodegen> {
+ let cgus = if tcx.sess.opts.output_types.should_codegen() {
+ tcx.collect_and_partition_mono_items(()).1
+ } else {
+ // If only `--emit metadata` is used, we shouldn't perform any codegen.
+ // Also `tcx.collect_and_partition_mono_items` may panic in that case.
+ &[]
+ };
+
+ if tcx.dep_graph.is_fully_enabled() {
+ for cgu in &*cgus {
+ tcx.ensure().codegen_unit(cgu.name());
+ }
+ }
+
+ let global_asm_config = Arc::new(crate::global_asm::GlobalAsmConfig::new(tcx));
+
+ let mut concurrency_limiter = ConcurrencyLimiter::new(tcx.sess, cgus.len());
+
- let _timer = tcx.prof.generic_activity("codegen crate metadata");
++ let modules = tcx.sess.time("codegen mono items", || {
+ cgus.iter()
+ .map(|cgu| {
+ let cgu_reuse = if backend_config.disable_incr_cache {
+ CguReuse::No
+ } else {
+ determine_cgu_reuse(tcx, cgu)
+ };
+ tcx.sess.cgu_reuse_tracker.set_actual_reuse(cgu.name().as_str(), cgu_reuse);
+
+ match cgu_reuse {
+ CguReuse::No => {
+ let dep_node = cgu.codegen_dep_node(tcx);
+ tcx.dep_graph
+ .with_task(
+ dep_node,
+ tcx,
+ (
+ backend_config.clone(),
+ global_asm_config.clone(),
+ cgu.name(),
+ concurrency_limiter.acquire(),
+ ),
+ module_codegen,
+ Some(rustc_middle::dep_graph::hash_result),
+ )
+ .0
+ }
+ CguReuse::PreLto => unreachable!(),
+ CguReuse::PostLto => {
+ concurrency_limiter.job_already_done();
+ OngoingModuleCodegen::Sync(reuse_workproduct_for_cgu(tcx, &*cgu))
+ }
+ }
+ })
+ .collect::<Vec<_>>()
+ });
+
+ let mut allocator_module = make_module(tcx.sess, &backend_config, "allocator_shim".to_string());
+ let mut allocator_unwind_context = UnwindContext::new(allocator_module.isa(), true);
+ let created_alloc_shim =
+ crate::allocator::codegen(tcx, &mut allocator_module, &mut allocator_unwind_context);
+
+ let allocator_module = if created_alloc_shim {
+ let mut product = allocator_module.finish();
+ allocator_unwind_context.emit(&mut product);
+
+ match emit_module(
+ tcx.output_filenames(()),
+ &tcx.sess.prof,
+ product.object,
+ ModuleKind::Allocator,
+ "allocator_shim".to_owned(),
+ ) {
+ Ok(allocator_module) => Some(allocator_module),
+ Err(err) => tcx.sess.fatal(err),
+ }
+ } else {
+ None
+ };
+
+ let metadata_module = if need_metadata_module {
+ let (metadata_cgu_name, tmp_file) = tcx.sess.time("write compressed metadata", || {
+ use rustc_middle::mir::mono::CodegenUnitNameBuilder;
+
+ let cgu_name_builder = &mut CodegenUnitNameBuilder::new(tcx);
+ let metadata_cgu_name = cgu_name_builder
+ .build_cgu_name(LOCAL_CRATE, &["crate"], Some("metadata"))
+ .as_str()
+ .to_string();
+
+ let tmp_file =
+ tcx.output_filenames(()).temp_path(OutputType::Metadata, Some(&metadata_cgu_name));
+
+ let symbol_name = rustc_middle::middle::exported_symbols::metadata_symbol_name(tcx);
+ let obj = create_compressed_metadata_file(tcx.sess, &metadata, &symbol_name);
+
+ if let Err(err) = std::fs::write(&tmp_file, obj) {
+ tcx.sess.fatal(&format!("error writing metadata object file: {}", err));
+ }
+
+ (metadata_cgu_name, tmp_file)
+ });
+
+ Some(CompiledModule {
+ name: metadata_cgu_name,
+ kind: ModuleKind::Metadata,
+ object: Some(tmp_file),
+ dwarf_object: None,
+ bytecode: None,
+ })
+ } else {
+ None
+ };
+
+ // FIXME handle `-Ctarget-cpu=native`
+ let target_cpu = match tcx.sess.opts.cg.target_cpu {
+ Some(ref name) => name,
+ None => tcx.sess.target.cpu.as_ref(),
+ }
+ .to_owned();
+
+ Box::new(OngoingCodegen {
+ modules,
+ allocator_module,
+ metadata_module,
+ metadata,
+ crate_info: CrateInfo::new(tcx, target_cpu),
+ concurrency_limiter,
+ })
+}
+
+// Adapted from https://github.com/rust-lang/rust/blob/303d8aff6092709edd4dbd35b1c88e9aa40bf6d8/src/librustc_codegen_ssa/base.rs#L922-L953
+fn determine_cgu_reuse<'tcx>(tcx: TyCtxt<'tcx>, cgu: &CodegenUnit<'tcx>) -> CguReuse {
+ if !tcx.dep_graph.is_fully_enabled() {
+ return CguReuse::No;
+ }
+
+ let work_product_id = &cgu.work_product_id();
+ if tcx.dep_graph.previous_work_product(work_product_id).is_none() {
+ // We don't have anything cached for this CGU. This can happen
+ // if the CGU did not exist in the previous session.
+ return CguReuse::No;
+ }
+
+ // Try to mark the CGU as green. If it we can do so, it means that nothing
+ // affecting the LLVM module has changed and we can re-use a cached version.
+ // If we compile with any kind of LTO, this means we can re-use the bitcode
+ // of the Pre-LTO stage (possibly also the Post-LTO version but we'll only
+ // know that later). If we are not doing LTO, there is only one optimized
+ // version of each module, so we re-use that.
+ let dep_node = cgu.codegen_dep_node(tcx);
+ assert!(
+ !tcx.dep_graph.dep_node_exists(&dep_node),
+ "CompileCodegenUnit dep-node for CGU `{}` already exists before marking.",
+ cgu.name()
+ );
+
+ if tcx.try_mark_green(&dep_node) { CguReuse::PostLto } else { CguReuse::No }
+}
--- /dev/null
- super::time(tcx, backend_config.display_cg_time, "codegen mono items", || {
+//! The JIT driver uses [`cranelift_jit`] to JIT execute programs without writing any object
+//! files.
+
+use std::cell::RefCell;
+use std::ffi::CString;
+use std::os::raw::{c_char, c_int};
+use std::sync::{mpsc, Mutex};
+
+use rustc_codegen_ssa::CrateInfo;
+use rustc_middle::mir::mono::MonoItem;
+use rustc_session::Session;
+use rustc_span::Symbol;
+
+use cranelift_jit::{JITBuilder, JITModule};
+
+// FIXME use std::sync::OnceLock once it stabilizes
+use once_cell::sync::OnceCell;
+
+use crate::{prelude::*, BackendConfig};
+use crate::{CodegenCx, CodegenMode};
+
+struct JitState {
+ backend_config: BackendConfig,
+ jit_module: JITModule,
+}
+
+thread_local! {
+ static LAZY_JIT_STATE: RefCell<Option<JitState>> = const { RefCell::new(None) };
+}
+
+/// The Sender owned by the rustc thread
+static GLOBAL_MESSAGE_SENDER: OnceCell<Mutex<mpsc::Sender<UnsafeMessage>>> = OnceCell::new();
+
+/// A message that is sent from the jitted runtime to the rustc thread.
+/// Senders are responsible for upholding `Send` semantics.
+enum UnsafeMessage {
+ /// Request that the specified `Instance` be lazily jitted.
+ ///
+ /// Nothing accessible through `instance_ptr` may be moved or mutated by the sender after
+ /// this message is sent.
+ JitFn {
+ instance_ptr: *const Instance<'static>,
+ trampoline_ptr: *const u8,
+ tx: mpsc::Sender<*const u8>,
+ },
+}
+unsafe impl Send for UnsafeMessage {}
+
+impl UnsafeMessage {
+ /// Send the message.
+ fn send(self) -> Result<(), mpsc::SendError<UnsafeMessage>> {
+ thread_local! {
+ /// The Sender owned by the local thread
+ static LOCAL_MESSAGE_SENDER: mpsc::Sender<UnsafeMessage> =
+ GLOBAL_MESSAGE_SENDER
+ .get().unwrap()
+ .lock().unwrap()
+ .clone();
+ }
+ LOCAL_MESSAGE_SENDER.with(|sender| sender.send(self))
+ }
+}
+
+fn create_jit_module(
+ tcx: TyCtxt<'_>,
+ backend_config: &BackendConfig,
+ hotswap: bool,
+) -> (JITModule, CodegenCx) {
+ let crate_info = CrateInfo::new(tcx, "dummy_target_cpu".to_string());
+
+ let isa = crate::build_isa(tcx.sess, backend_config);
+ let mut jit_builder = JITBuilder::with_isa(isa, cranelift_module::default_libcall_names());
+ jit_builder.hotswap(hotswap);
+ crate::compiler_builtins::register_functions_for_jit(&mut jit_builder);
+ jit_builder.symbol_lookup_fn(dep_symbol_lookup_fn(tcx.sess, crate_info));
+ jit_builder.symbol("__clif_jit_fn", clif_jit_fn as *const u8);
+ let mut jit_module = JITModule::new(jit_builder);
+
+ let mut cx = crate::CodegenCx::new(
+ tcx,
+ backend_config.clone(),
+ jit_module.isa(),
+ false,
+ Symbol::intern("dummy_cgu_name"),
+ );
+
+ crate::allocator::codegen(tcx, &mut jit_module, &mut cx.unwind_context);
+ crate::main_shim::maybe_create_entry_wrapper(
+ tcx,
+ &mut jit_module,
+ &mut cx.unwind_context,
+ true,
+ true,
+ );
+
+ (jit_module, cx)
+}
+
+pub(crate) fn run_jit(tcx: TyCtxt<'_>, backend_config: BackendConfig) -> ! {
+ if !tcx.sess.opts.output_types.should_codegen() {
+ tcx.sess.fatal("JIT mode doesn't work with `cargo check`");
+ }
+
+ if !tcx.sess.crate_types().contains(&rustc_session::config::CrateType::Executable) {
+ tcx.sess.fatal("can't jit non-executable crate");
+ }
+
+ let (mut jit_module, mut cx) = create_jit_module(
+ tcx,
+ &backend_config,
+ matches!(backend_config.codegen_mode, CodegenMode::JitLazy),
+ );
+ let mut cached_context = Context::new();
+
+ let (_, cgus) = tcx.collect_and_partition_mono_items(());
+ let mono_items = cgus
+ .iter()
+ .map(|cgu| cgu.items_in_deterministic_order(tcx).into_iter())
+ .flatten()
+ .collect::<FxHashMap<_, (_, _)>>()
+ .into_iter()
+ .collect::<Vec<(_, (_, _))>>();
+
- tcx.sess.time("codegen fn", || {
- crate::base::codegen_and_compile_fn(
- tcx,
- &mut cx,
- &mut cached_context,
- &mut jit_module,
- inst,
- )
- });
++ tcx.sess.time("codegen mono items", || {
+ super::predefine_mono_items(tcx, &mut jit_module, &mono_items);
+ for (mono_item, _) in mono_items {
+ match mono_item {
+ MonoItem::Fn(inst) => match backend_config.codegen_mode {
+ CodegenMode::Aot => unreachable!(),
+ CodegenMode::Jit => {
- tcx.sess.time("codegen fn", || {
- crate::base::codegen_and_compile_fn(
- tcx,
- &mut cx,
- &mut Context::new(),
- jit_module,
- instance,
- )
- });
++ codegen_and_compile_fn(
++ tcx,
++ &mut cx,
++ &mut cached_context,
++ &mut jit_module,
++ inst,
++ );
+ }
+ CodegenMode::JitLazy => {
+ codegen_shim(tcx, &mut cx, &mut cached_context, &mut jit_module, inst)
+ }
+ },
+ MonoItem::Static(def_id) => {
+ crate::constant::codegen_static(tcx, &mut jit_module, def_id);
+ }
+ MonoItem::GlobalAsm(item_id) => {
+ let item = tcx.hir().item(item_id);
+ tcx.sess.span_fatal(item.span, "Global asm is not supported in JIT mode");
+ }
+ }
+ }
+ });
+
+ if !cx.global_asm.is_empty() {
+ tcx.sess.fatal("Inline asm is not supported in JIT mode");
+ }
+
+ tcx.sess.abort_if_errors();
+
+ jit_module.finalize_definitions().unwrap();
+ unsafe { cx.unwind_context.register_jit(&jit_module) };
+
+ println!(
+ "Rustc codegen cranelift will JIT run the executable, because -Cllvm-args=mode=jit was passed"
+ );
+
+ let args = std::iter::once(&*tcx.crate_name(LOCAL_CRATE).as_str().to_string())
+ .chain(backend_config.jit_args.iter().map(|arg| &**arg))
+ .map(|arg| CString::new(arg).unwrap())
+ .collect::<Vec<_>>();
+
+ let start_sig = Signature {
+ params: vec![
+ AbiParam::new(jit_module.target_config().pointer_type()),
+ AbiParam::new(jit_module.target_config().pointer_type()),
+ ],
+ returns: vec![AbiParam::new(jit_module.target_config().pointer_type() /*isize*/)],
+ call_conv: jit_module.target_config().default_call_conv,
+ };
+ let start_func_id = jit_module.declare_function("main", Linkage::Import, &start_sig).unwrap();
+ let finalized_start: *const u8 = jit_module.get_finalized_function(start_func_id);
+
+ LAZY_JIT_STATE.with(|lazy_jit_state| {
+ let mut lazy_jit_state = lazy_jit_state.borrow_mut();
+ assert!(lazy_jit_state.is_none());
+ *lazy_jit_state = Some(JitState { backend_config, jit_module });
+ });
+
+ let f: extern "C" fn(c_int, *const *const c_char) -> c_int =
+ unsafe { ::std::mem::transmute(finalized_start) };
+
+ let (tx, rx) = mpsc::channel();
+ GLOBAL_MESSAGE_SENDER.set(Mutex::new(tx)).unwrap();
+
+ // Spawn the jitted runtime in a new thread so that this rustc thread can handle messages
+ // (eg to lazily JIT further functions as required)
+ std::thread::spawn(move || {
+ let mut argv = args.iter().map(|arg| arg.as_ptr()).collect::<Vec<_>>();
+
+ // Push a null pointer as a terminating argument. This is required by POSIX and
+ // useful as some dynamic linkers use it as a marker to jump over.
+ argv.push(std::ptr::null());
+
+ let ret = f(args.len() as c_int, argv.as_ptr());
+ std::process::exit(ret);
+ });
+
+ // Handle messages
+ loop {
+ match rx.recv().unwrap() {
+ // lazy JIT compilation request - compile requested instance and return pointer to result
+ UnsafeMessage::JitFn { instance_ptr, trampoline_ptr, tx } => {
+ tx.send(jit_fn(instance_ptr, trampoline_ptr))
+ .expect("jitted runtime hung up before response to lazy JIT request was sent");
+ }
+ }
+ }
+}
+
++pub(crate) fn codegen_and_compile_fn<'tcx>(
++ tcx: TyCtxt<'tcx>,
++ cx: &mut crate::CodegenCx,
++ cached_context: &mut Context,
++ module: &mut dyn Module,
++ instance: Instance<'tcx>,
++) {
++ tcx.prof.generic_activity("codegen and compile fn").run(|| {
++ let _inst_guard =
++ crate::PrintOnPanic(|| format!("{:?} {}", instance, tcx.symbol_name(instance).name));
++
++ let cached_func = std::mem::replace(&mut cached_context.func, Function::new());
++ let codegened_func = crate::base::codegen_fn(tcx, cx, cached_func, module, instance);
++
++ crate::base::compile_fn(cx, cached_context, module, codegened_func);
++ });
++}
++
+extern "C" fn clif_jit_fn(
+ instance_ptr: *const Instance<'static>,
+ trampoline_ptr: *const u8,
+) -> *const u8 {
+ // send the JIT request to the rustc thread, with a channel for the response
+ let (tx, rx) = mpsc::channel();
+ UnsafeMessage::JitFn { instance_ptr, trampoline_ptr, tx }
+ .send()
+ .expect("rustc thread hung up before lazy JIT request was sent");
+
+ // block on JIT compilation result
+ rx.recv().expect("rustc thread hung up before responding to sent lazy JIT request")
+}
+
+fn jit_fn(instance_ptr: *const Instance<'static>, trampoline_ptr: *const u8) -> *const u8 {
+ rustc_middle::ty::tls::with(|tcx| {
+ // lift is used to ensure the correct lifetime for instance.
+ let instance = tcx.lift(unsafe { *instance_ptr }).unwrap();
+
+ LAZY_JIT_STATE.with(|lazy_jit_state| {
+ let mut lazy_jit_state = lazy_jit_state.borrow_mut();
+ let lazy_jit_state = lazy_jit_state.as_mut().unwrap();
+ let jit_module = &mut lazy_jit_state.jit_module;
+ let backend_config = lazy_jit_state.backend_config.clone();
+
+ let name = tcx.symbol_name(instance).name;
+ let sig = crate::abi::get_function_sig(
+ tcx,
+ jit_module.target_config().default_call_conv,
+ instance,
+ );
+ let func_id = jit_module.declare_function(name, Linkage::Export, &sig).unwrap();
+
+ let current_ptr = jit_module.read_got_entry(func_id);
+
+ // If the function's GOT entry has already been updated to point at something other
+ // than the shim trampoline, don't re-jit but just return the new pointer instead.
+ // This does not need synchronization as this code is executed only by a sole rustc
+ // thread.
+ if current_ptr != trampoline_ptr {
+ return current_ptr;
+ }
+
+ jit_module.prepare_for_function_redefine(func_id).unwrap();
+
+ let mut cx = crate::CodegenCx::new(
+ tcx,
+ backend_config,
+ jit_module.isa(),
+ false,
+ Symbol::intern("dummy_cgu_name"),
+ );
++ codegen_and_compile_fn(tcx, &mut cx, &mut Context::new(), jit_module, instance);
+
+ assert!(cx.global_asm.is_empty());
+ jit_module.finalize_definitions().unwrap();
+ unsafe { cx.unwind_context.register_jit(&jit_module) };
+ jit_module.get_finalized_function(func_id)
+ })
+ })
+}
+
+fn dep_symbol_lookup_fn(
+ sess: &Session,
+ crate_info: CrateInfo,
+) -> Box<dyn Fn(&str) -> Option<*const u8>> {
+ use rustc_middle::middle::dependency_format::Linkage;
+
+ let mut dylib_paths = Vec::new();
+
+ let data = &crate_info
+ .dependency_formats
+ .iter()
+ .find(|(crate_type, _data)| *crate_type == rustc_session::config::CrateType::Executable)
+ .unwrap()
+ .1;
+ for &cnum in &crate_info.used_crates {
+ let src = &crate_info.used_crate_source[&cnum];
+ match data[cnum.as_usize() - 1] {
+ Linkage::NotLinked | Linkage::IncludedFromDylib => {}
+ Linkage::Static => {
+ let name = crate_info.crate_name[&cnum];
+ let mut err = sess.struct_err(&format!("Can't load static lib {}", name));
+ err.note("rustc_codegen_cranelift can only load dylibs in JIT mode.");
+ err.emit();
+ }
+ Linkage::Dynamic => {
+ dylib_paths.push(src.dylib.as_ref().unwrap().0.clone());
+ }
+ }
+ }
+
+ let imported_dylibs = Box::leak(
+ dylib_paths
+ .into_iter()
+ .map(|path| unsafe { libloading::Library::new(&path).unwrap() })
+ .collect::<Box<[_]>>(),
+ );
+
+ sess.abort_if_errors();
+
+ Box::new(move |sym_name| {
+ for dylib in &*imported_dylibs {
+ if let Ok(sym) = unsafe { dylib.get::<*const u8>(sym_name.as_bytes()) } {
+ return Some(*sym);
+ }
+ }
+ None
+ })
+}
+
+fn codegen_shim<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ cx: &mut CodegenCx,
+ cached_context: &mut Context,
+ module: &mut JITModule,
+ inst: Instance<'tcx>,
+) {
+ let pointer_type = module.target_config().pointer_type();
+
+ let name = tcx.symbol_name(inst).name;
+ let sig = crate::abi::get_function_sig(tcx, module.target_config().default_call_conv, inst);
+ let func_id = module.declare_function(name, Linkage::Export, &sig).unwrap();
+
+ let instance_ptr = Box::into_raw(Box::new(inst));
+
+ let jit_fn = module
+ .declare_function(
+ "__clif_jit_fn",
+ Linkage::Import,
+ &Signature {
+ call_conv: module.target_config().default_call_conv,
+ params: vec![AbiParam::new(pointer_type), AbiParam::new(pointer_type)],
+ returns: vec![AbiParam::new(pointer_type)],
+ },
+ )
+ .unwrap();
+
+ let context = cached_context;
+ context.clear();
+ let trampoline = &mut context.func;
+ trampoline.signature = sig.clone();
+
+ let mut builder_ctx = FunctionBuilderContext::new();
+ let mut trampoline_builder = FunctionBuilder::new(trampoline, &mut builder_ctx);
+
+ let trampoline_fn = module.declare_func_in_func(func_id, trampoline_builder.func);
+ let jit_fn = module.declare_func_in_func(jit_fn, trampoline_builder.func);
+ let sig_ref = trampoline_builder.func.import_signature(sig);
+
+ let entry_block = trampoline_builder.create_block();
+ trampoline_builder.append_block_params_for_function_params(entry_block);
+ let fn_args = trampoline_builder.func.dfg.block_params(entry_block).to_vec();
+
+ trampoline_builder.switch_to_block(entry_block);
+ let instance_ptr = trampoline_builder.ins().iconst(pointer_type, instance_ptr as u64 as i64);
+ let trampoline_ptr = trampoline_builder.ins().func_addr(pointer_type, trampoline_fn);
+ let jitted_fn = trampoline_builder.ins().call(jit_fn, &[instance_ptr, trampoline_ptr]);
+ let jitted_fn = trampoline_builder.func.dfg.inst_results(jitted_fn)[0];
+ let call_inst = trampoline_builder.ins().call_indirect(sig_ref, jitted_fn, &fn_args);
+ let ret_vals = trampoline_builder.func.dfg.inst_results(call_inst).to_vec();
+ trampoline_builder.ins().return_(&ret_vals);
+
+ module.define_function(func_id, context).unwrap();
+ cx.unwind_context.add_function(func_id, context, module.isa());
+}
--- /dev/null
- tcx.sess.time("predefine functions", || {
+//! Drivers are responsible for calling [`codegen_fn`] or [`codegen_static`] for each mono item and
+//! performing any further actions like JIT executing or writing object files.
+//!
+//! [`codegen_fn`]: crate::base::codegen_fn
+//! [`codegen_static`]: crate::constant::codegen_static
+
+use rustc_middle::mir::mono::{Linkage as RLinkage, MonoItem, Visibility};
+
+use crate::prelude::*;
+
+pub(crate) mod aot;
+#[cfg(feature = "jit")]
+pub(crate) mod jit;
+
+fn predefine_mono_items<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ module: &mut dyn Module,
+ mono_items: &[(MonoItem<'tcx>, (RLinkage, Visibility))],
+) {
-
- fn time<R>(tcx: TyCtxt<'_>, display: bool, name: &'static str, f: impl FnOnce() -> R) -> R {
- if display {
- println!("[{:<30}: {}] start", tcx.crate_name(LOCAL_CRATE), name);
- let before = std::time::Instant::now();
- let res = tcx.sess.time(name, f);
- let after = std::time::Instant::now();
- println!("[{:<30}: {}] end time: {:?}", tcx.crate_name(LOCAL_CRATE), name, after - before);
- res
- } else {
- tcx.sess.time(name, f)
- }
- }
++ tcx.prof.generic_activity("predefine functions").run(|| {
+ let is_compiler_builtins = tcx.is_compiler_builtins(LOCAL_CRATE);
+ for &(mono_item, (linkage, visibility)) in mono_items {
+ match mono_item {
+ MonoItem::Fn(instance) => {
+ let name = tcx.symbol_name(instance).name;
+ let _inst_guard = crate::PrintOnPanic(|| format!("{:?} {}", instance, name));
+ let sig =
+ get_function_sig(tcx, module.target_config().default_call_conv, instance);
+ let linkage = crate::linkage::get_clif_linkage(
+ mono_item,
+ linkage,
+ visibility,
+ is_compiler_builtins,
+ );
+ module.declare_function(name, linkage, &sig).unwrap();
+ }
+ MonoItem::Static(_) | MonoItem::GlobalAsm(_) => {}
+ }
+ }
+ });
+}
--- /dev/null
- use rustc_hir::ItemId;
+//! The AOT driver uses [`cranelift_object`] to write object files suitable for linking into a
+//! standalone executable.
+
+use std::io::Write;
+use std::path::PathBuf;
+use std::process::{Command, Stdio};
+use std::sync::Arc;
+
+use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
- InlineAsmTemplatePiece::Placeholder { .. } => todo!(),
++use rustc_hir::{InlineAsmOperand, ItemId};
+use rustc_session::config::{OutputFilenames, OutputType};
+
+use crate::prelude::*;
+
+pub(crate) fn codegen_global_asm_item(tcx: TyCtxt<'_>, global_asm: &mut String, item_id: ItemId) {
+ let item = tcx.hir().item(item_id);
+ if let rustc_hir::ItemKind::GlobalAsm(asm) = item.kind {
+ if !asm.options.contains(InlineAsmOptions::ATT_SYNTAX) {
+ global_asm.push_str("\n.intel_syntax noprefix\n");
+ } else {
+ global_asm.push_str("\n.att_syntax\n");
+ }
+ for piece in asm.template {
+ match *piece {
+ InlineAsmTemplatePiece::String(ref s) => global_asm.push_str(s),
++ InlineAsmTemplatePiece::Placeholder { operand_idx, modifier: _, span: op_sp } => {
++ match asm.operands[operand_idx].0 {
++ InlineAsmOperand::Const { ref anon_const } => {
++ let const_value =
++ tcx.const_eval_poly(anon_const.def_id.to_def_id()).unwrap_or_else(
++ |_| span_bug!(op_sp, "asm const cannot be resolved"),
++ );
++ let ty = tcx.typeck_body(anon_const.body).node_type(anon_const.hir_id);
++ let string = rustc_codegen_ssa::common::asm_const_to_str(
++ tcx,
++ op_sp,
++ const_value,
++ RevealAllLayoutCx(tcx).layout_of(ty),
++ );
++ global_asm.push_str(&string);
++ }
++ InlineAsmOperand::SymFn { anon_const } => {
++ let ty = tcx.typeck_body(anon_const.body).node_type(anon_const.hir_id);
++ let instance = match ty.kind() {
++ &ty::FnDef(def_id, substs) => Instance::new(def_id, substs),
++ _ => span_bug!(op_sp, "asm sym is not a function"),
++ };
++ let symbol = tcx.symbol_name(instance);
++ // FIXME handle the case where the function was made private to the
++ // current codegen unit
++ global_asm.push_str(symbol.name);
++ }
++ InlineAsmOperand::SymStatic { path: _, def_id } => {
++ let instance = Instance::mono(tcx, def_id).polymorphize(tcx);
++ let symbol = tcx.symbol_name(instance);
++ global_asm.push_str(symbol.name);
++ }
++ InlineAsmOperand::In { .. }
++ | InlineAsmOperand::Out { .. }
++ | InlineAsmOperand::InOut { .. }
++ | InlineAsmOperand::SplitInOut { .. } => {
++ span_bug!(op_sp, "invalid operand type for global_asm!")
++ }
++ }
++ }
+ }
+ }
+ global_asm.push_str("\n.att_syntax\n\n");
+ } else {
+ bug!("Expected GlobalAsm found {:?}", item);
+ }
+}
+
+#[derive(Debug)]
+pub(crate) struct GlobalAsmConfig {
+ asm_enabled: bool,
+ assembler: PathBuf,
+ pub(crate) output_filenames: Arc<OutputFilenames>,
+}
+
+impl GlobalAsmConfig {
+ pub(crate) fn new(tcx: TyCtxt<'_>) -> Self {
+ let asm_enabled = cfg!(feature = "inline_asm") && !tcx.sess.target.is_like_windows;
+
+ GlobalAsmConfig {
+ asm_enabled,
+ assembler: crate::toolchain::get_toolchain_binary(tcx.sess, "as"),
+ output_filenames: tcx.output_filenames(()).clone(),
+ }
+ }
+}
+
+pub(crate) fn compile_global_asm(
+ config: &GlobalAsmConfig,
+ cgu_name: &str,
+ global_asm: &str,
+) -> Result<Option<PathBuf>, String> {
+ if global_asm.is_empty() {
+ return Ok(None);
+ }
+
+ if !config.asm_enabled {
+ if global_asm.contains("__rust_probestack") {
+ return Ok(None);
+ }
+
+ // FIXME fix linker error on macOS
+ if cfg!(not(feature = "inline_asm")) {
+ return Err(
+ "asm! and global_asm! support is disabled while compiling rustc_codegen_cranelift"
+ .to_owned(),
+ );
+ } else {
+ return Err("asm! and global_asm! are not yet supported on Windows".to_owned());
+ }
+ }
+
+ // Remove all LLVM style comments
+ let global_asm = global_asm
+ .lines()
+ .map(|line| if let Some(index) = line.find("//") { &line[0..index] } else { line })
+ .collect::<Vec<_>>()
+ .join("\n");
+
+ let output_object_file = config.output_filenames.temp_path(OutputType::Object, Some(cgu_name));
+
+ // Assemble `global_asm`
+ let global_asm_object_file = add_file_stem_postfix(output_object_file.clone(), ".asm");
+ let mut child = Command::new(&config.assembler)
+ .arg("-o")
+ .arg(&global_asm_object_file)
+ .stdin(Stdio::piped())
+ .spawn()
+ .expect("Failed to spawn `as`.");
+ child.stdin.take().unwrap().write_all(global_asm.as_bytes()).unwrap();
+ let status = child.wait().expect("Failed to wait for `as`.");
+ if !status.success() {
+ return Err(format!("Failed to assemble `{}`", global_asm));
+ }
+
+ Ok(Some(global_asm_object_file))
+}
+
+pub(crate) fn add_file_stem_postfix(mut path: PathBuf, postfix: &str) -> PathBuf {
+ let mut new_filename = path.file_stem().unwrap().to_owned();
+ new_filename.push(postfix);
+ if let Some(extension) = path.extension() {
+ new_filename.push(".");
+ new_filename.push(extension);
+ }
+ path.set_file_name(new_filename);
+ path
+}
--- /dev/null
- _span: Span,
+//! Codegen of `asm!` invocations.
+
+use crate::prelude::*;
+
+use std::fmt::Write;
+
+use rustc_ast::ast::{InlineAsmOptions, InlineAsmTemplatePiece};
+use rustc_middle::mir::InlineAsmOperand;
+use rustc_span::sym;
+use rustc_target::asm::*;
+
++enum CInlineAsmOperand<'tcx> {
++ In {
++ reg: InlineAsmRegOrRegClass,
++ value: CValue<'tcx>,
++ },
++ Out {
++ reg: InlineAsmRegOrRegClass,
++ late: bool,
++ place: Option<CPlace<'tcx>>,
++ },
++ InOut {
++ reg: InlineAsmRegOrRegClass,
++ _late: bool,
++ in_value: CValue<'tcx>,
++ out_place: Option<CPlace<'tcx>>,
++ },
++ Const {
++ value: String,
++ },
++ Symbol {
++ symbol: String,
++ },
++}
++
+pub(crate) fn codegen_inline_asm<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
- operands,
++ span: Span,
+ template: &[InlineAsmTemplatePiece],
+ operands: &[InlineAsmOperand<'tcx>],
+ options: InlineAsmOptions,
+ destination: Option<mir::BasicBlock>,
+) {
+ // FIXME add .eh_frame unwind info directives
+
+ if !template.is_empty() {
+ // Used by panic_abort
+ if template[0] == InlineAsmTemplatePiece::String("int $$0x29".to_string()) {
+ fx.bcx.ins().trap(TrapCode::User(1));
+ return;
+ }
+
+ // Used by stdarch
+ if template[0] == InlineAsmTemplatePiece::String("mov ".to_string())
+ && matches!(
+ template[1],
+ InlineAsmTemplatePiece::Placeholder {
+ operand_idx: 0,
+ modifier: Some('r'),
+ span: _
+ }
+ )
+ && template[2] == InlineAsmTemplatePiece::String(", rbx".to_string())
+ && template[3] == InlineAsmTemplatePiece::String("\n".to_string())
+ && template[4] == InlineAsmTemplatePiece::String("cpuid".to_string())
+ && template[5] == InlineAsmTemplatePiece::String("\n".to_string())
+ && template[6] == InlineAsmTemplatePiece::String("xchg ".to_string())
+ && matches!(
+ template[7],
+ InlineAsmTemplatePiece::Placeholder {
+ operand_idx: 0,
+ modifier: Some('r'),
+ span: _
+ }
+ )
+ && template[8] == InlineAsmTemplatePiece::String(", rbx".to_string())
+ {
+ assert_eq!(operands.len(), 4);
+ let (leaf, eax_place) = match operands[1] {
+ InlineAsmOperand::InOut {
+ reg: InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::ax)),
+ late: _,
+ ref in_value,
+ out_place: Some(out_place),
+ } => (
+ crate::base::codegen_operand(fx, in_value).load_scalar(fx),
+ crate::base::codegen_place(fx, out_place),
+ ),
+ _ => unreachable!(),
+ };
+ let ebx_place = match operands[0] {
+ InlineAsmOperand::Out {
+ reg:
+ InlineAsmRegOrRegClass::RegClass(InlineAsmRegClass::X86(
+ X86InlineAsmRegClass::reg,
+ )),
+ late: _,
+ place: Some(place),
+ } => crate::base::codegen_place(fx, place),
+ _ => unreachable!(),
+ };
+ let (sub_leaf, ecx_place) = match operands[2] {
+ InlineAsmOperand::InOut {
+ reg: InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::cx)),
+ late: _,
+ ref in_value,
+ out_place: Some(out_place),
+ } => (
+ crate::base::codegen_operand(fx, in_value).load_scalar(fx),
+ crate::base::codegen_place(fx, out_place),
+ ),
+ _ => unreachable!(),
+ };
+ let edx_place = match operands[3] {
+ InlineAsmOperand::Out {
+ reg: InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::dx)),
+ late: _,
+ place: Some(place),
+ } => crate::base::codegen_place(fx, place),
+ _ => unreachable!(),
+ };
+
+ let (eax, ebx, ecx, edx) = crate::intrinsics::codegen_cpuid_call(fx, leaf, sub_leaf);
+
+ eax_place.write_cvalue(fx, CValue::by_val(eax, fx.layout_of(fx.tcx.types.u32)));
+ ebx_place.write_cvalue(fx, CValue::by_val(ebx, fx.layout_of(fx.tcx.types.u32)));
+ ecx_place.write_cvalue(fx, CValue::by_val(ecx, fx.layout_of(fx.tcx.types.u32)));
+ edx_place.write_cvalue(fx, CValue::by_val(edx, fx.layout_of(fx.tcx.types.u32)));
+ let destination_block = fx.get_block(destination.unwrap());
+ fx.bcx.ins().jump(destination_block, &[]);
+ return;
+ }
+
+ // Used by compiler-builtins
+ if fx.tcx.symbol_name(fx.instance).name.starts_with("___chkstk") {
+ // ___chkstk, ___chkstk_ms and __alloca are only used on Windows
+ crate::trap::trap_unimplemented(fx, "Stack probes are not supported");
+ return;
+ } else if fx.tcx.symbol_name(fx.instance).name == "__alloca" {
+ crate::trap::trap_unimplemented(fx, "Alloca is not supported");
+ return;
+ }
+
+ // Used by measureme
+ if template[0] == InlineAsmTemplatePiece::String("xor %eax, %eax".to_string())
+ && template[1] == InlineAsmTemplatePiece::String("\n".to_string())
+ && template[2] == InlineAsmTemplatePiece::String("mov %rbx, ".to_string())
+ && matches!(
+ template[3],
+ InlineAsmTemplatePiece::Placeholder {
+ operand_idx: 0,
+ modifier: Some('r'),
+ span: _
+ }
+ )
+ && template[4] == InlineAsmTemplatePiece::String("\n".to_string())
+ && template[5] == InlineAsmTemplatePiece::String("cpuid".to_string())
+ && template[6] == InlineAsmTemplatePiece::String("\n".to_string())
+ && template[7] == InlineAsmTemplatePiece::String("mov ".to_string())
+ && matches!(
+ template[8],
+ InlineAsmTemplatePiece::Placeholder {
+ operand_idx: 0,
+ modifier: Some('r'),
+ span: _
+ }
+ )
+ && template[9] == InlineAsmTemplatePiece::String(", %rbx".to_string())
+ {
+ let destination_block = fx.get_block(destination.unwrap());
+ fx.bcx.ins().jump(destination_block, &[]);
+ return;
+ } else if template[0] == InlineAsmTemplatePiece::String("rdpmc".to_string()) {
+ // Return zero dummy values for all performance counters
+ match operands[0] {
+ InlineAsmOperand::In {
+ reg: InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::cx)),
+ value: _,
+ } => {}
+ _ => unreachable!(),
+ };
+ let lo = match operands[1] {
+ InlineAsmOperand::Out {
+ reg: InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::ax)),
+ late: true,
+ place: Some(place),
+ } => crate::base::codegen_place(fx, place),
+ _ => unreachable!(),
+ };
+ let hi = match operands[2] {
+ InlineAsmOperand::Out {
+ reg: InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::dx)),
+ late: true,
+ place: Some(place),
+ } => crate::base::codegen_place(fx, place),
+ _ => unreachable!(),
+ };
+
+ let u32_layout = fx.layout_of(fx.tcx.types.u32);
+ let zero = fx.bcx.ins().iconst(types::I32, 0);
+ lo.write_cvalue(fx, CValue::by_val(zero, u32_layout));
+ hi.write_cvalue(fx, CValue::by_val(zero, u32_layout));
+
+ let destination_block = fx.get_block(destination.unwrap());
+ fx.bcx.ins().jump(destination_block, &[]);
+ return;
+ } else if template[0] == InlineAsmTemplatePiece::String("lock xadd ".to_string())
+ && matches!(
+ template[1],
+ InlineAsmTemplatePiece::Placeholder { operand_idx: 1, modifier: None, span: _ }
+ )
+ && template[2] == InlineAsmTemplatePiece::String(", (".to_string())
+ && matches!(
+ template[3],
+ InlineAsmTemplatePiece::Placeholder { operand_idx: 0, modifier: None, span: _ }
+ )
+ && template[4] == InlineAsmTemplatePiece::String(")".to_string())
+ {
+ let destination_block = fx.get_block(destination.unwrap());
+ fx.bcx.ins().jump(destination_block, &[]);
+ return;
+ }
+ }
+
++ let operands = operands
++ .into_iter()
++ .map(|operand| match *operand {
++ InlineAsmOperand::In { reg, ref value } => {
++ CInlineAsmOperand::In { reg, value: crate::base::codegen_operand(fx, value) }
++ }
++ InlineAsmOperand::Out { reg, late, ref place } => CInlineAsmOperand::Out {
++ reg,
++ late,
++ place: place.map(|place| crate::base::codegen_place(fx, place)),
++ },
++ InlineAsmOperand::InOut { reg, late, ref in_value, ref out_place } => {
++ CInlineAsmOperand::InOut {
++ reg,
++ _late: late,
++ in_value: crate::base::codegen_operand(fx, in_value),
++ out_place: out_place.map(|place| crate::base::codegen_place(fx, place)),
++ }
++ }
++ InlineAsmOperand::Const { ref value } => {
++ let (const_value, ty) = crate::constant::eval_mir_constant(fx, &*value)
++ .unwrap_or_else(|| span_bug!(span, "asm const cannot be resolved"));
++ let value = rustc_codegen_ssa::common::asm_const_to_str(
++ fx.tcx,
++ span,
++ const_value,
++ fx.layout_of(ty),
++ );
++ CInlineAsmOperand::Const { value }
++ }
++ InlineAsmOperand::SymFn { ref value } => {
++ let literal = fx.monomorphize(value.literal);
++ if let ty::FnDef(def_id, substs) = *literal.ty().kind() {
++ let instance = ty::Instance::resolve_for_fn_ptr(
++ fx.tcx,
++ ty::ParamEnv::reveal_all(),
++ def_id,
++ substs,
++ )
++ .unwrap();
++ let symbol = fx.tcx.symbol_name(instance);
++
++ // Pass a wrapper rather than the function itself as the function itself may not
++ // be exported from the main codegen unit and may thus be unreachable from the
++ // object file created by an external assembler.
++ let inline_asm_index = fx.cx.inline_asm_index.get();
++ fx.cx.inline_asm_index.set(inline_asm_index + 1);
++ let wrapper_name = format!(
++ "__inline_asm_{}_wrapper_n{}",
++ fx.cx.cgu_name.as_str().replace('.', "__").replace('-', "_"),
++ inline_asm_index
++ );
++ let sig =
++ get_function_sig(fx.tcx, fx.target_config.default_call_conv, instance);
++ create_wrapper_function(
++ fx.module,
++ &mut fx.cx.unwind_context,
++ sig,
++ &wrapper_name,
++ symbol.name,
++ );
++
++ CInlineAsmOperand::Symbol { symbol: wrapper_name }
++ } else {
++ span_bug!(span, "invalid type for asm sym (fn)");
++ }
++ }
++ InlineAsmOperand::SymStatic { def_id } => {
++ assert!(fx.tcx.is_static(def_id));
++ let instance = Instance::mono(fx.tcx, def_id).polymorphize(fx.tcx);
++ CInlineAsmOperand::Symbol { symbol: fx.tcx.symbol_name(instance).name.to_owned() }
++ }
++ })
++ .collect::<Vec<_>>();
++
+ let mut inputs = Vec::new();
+ let mut outputs = Vec::new();
+
+ let mut asm_gen = InlineAssemblyGenerator {
+ tcx: fx.tcx,
+ arch: fx.tcx.sess.asm_arch.unwrap(),
+ enclosing_def_id: fx.instance.def_id(),
+ template,
- match *operand {
- InlineAsmOperand::In { reg: _, ref value } => {
- inputs.push((
- asm_gen.stack_slots_input[i].unwrap(),
- crate::base::codegen_operand(fx, value).load_scalar(fx),
- ));
++ operands: &operands,
+ options,
+ registers: Vec::new(),
+ stack_slots_clobber: Vec::new(),
+ stack_slots_input: Vec::new(),
+ stack_slots_output: Vec::new(),
+ stack_slot_size: Size::from_bytes(0),
+ };
+ asm_gen.allocate_registers();
+ asm_gen.allocate_stack_slots();
+
+ let inline_asm_index = fx.cx.inline_asm_index.get();
+ fx.cx.inline_asm_index.set(inline_asm_index + 1);
+ let asm_name = format!(
+ "__inline_asm_{}_n{}",
+ fx.cx.cgu_name.as_str().replace('.', "__").replace('-', "_"),
+ inline_asm_index
+ );
+
+ let generated_asm = asm_gen.generate_asm_wrapper(&asm_name);
+ fx.cx.global_asm.push_str(&generated_asm);
+
+ for (i, operand) in operands.iter().enumerate() {
- InlineAsmOperand::Out { reg: _, late: _, place } => {
++ match operand {
++ CInlineAsmOperand::In { reg: _, value } => {
++ inputs.push((asm_gen.stack_slots_input[i].unwrap(), value.load_scalar(fx)));
+ }
- outputs.push((
- asm_gen.stack_slots_output[i].unwrap(),
- crate::base::codegen_place(fx, place),
- ));
++ CInlineAsmOperand::Out { reg: _, late: _, place } => {
+ if let Some(place) = place {
- InlineAsmOperand::InOut { reg: _, late: _, ref in_value, out_place } => {
- inputs.push((
- asm_gen.stack_slots_input[i].unwrap(),
- crate::base::codegen_operand(fx, in_value).load_scalar(fx),
- ));
++ outputs.push((asm_gen.stack_slots_output[i].unwrap(), place.clone()));
+ }
+ }
- outputs.push((
- asm_gen.stack_slots_output[i].unwrap(),
- crate::base::codegen_place(fx, out_place),
- ));
++ CInlineAsmOperand::InOut { reg: _, _late: _, in_value, out_place } => {
++ inputs.push((asm_gen.stack_slots_input[i].unwrap(), in_value.load_scalar(fx)));
+ if let Some(out_place) = out_place {
- InlineAsmOperand::Const { value: _ } => todo!(),
- InlineAsmOperand::SymFn { value: _ } => todo!(),
- InlineAsmOperand::SymStatic { def_id: _ } => todo!(),
++ outputs.push((asm_gen.stack_slots_output[i].unwrap(), out_place.clone()));
+ }
+ }
- operands: &'a [InlineAsmOperand<'tcx>],
++ CInlineAsmOperand::Const { value: _ } | CInlineAsmOperand::Symbol { symbol: _ } => {}
+ }
+ }
+
+ call_inline_asm(fx, &asm_name, asm_gen.stack_slot_size, inputs, outputs);
+
+ match destination {
+ Some(destination) => {
+ let destination_block = fx.get_block(destination);
+ fx.bcx.ins().jump(destination_block, &[]);
+ }
+ None => {
+ fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
+ }
+ }
+}
+
+struct InlineAssemblyGenerator<'a, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ arch: InlineAsmArch,
+ enclosing_def_id: DefId,
+ template: &'a [InlineAsmTemplatePiece],
- InlineAsmOperand::In { reg: InlineAsmRegOrRegClass::Reg(reg), .. } => {
++ operands: &'a [CInlineAsmOperand<'tcx>],
+ options: InlineAsmOptions,
+ registers: Vec<Option<InlineAsmReg>>,
+ stack_slots_clobber: Vec<Option<Size>>,
+ stack_slots_input: Vec<Option<Size>>,
+ stack_slots_output: Vec<Option<Size>>,
+ stack_slot_size: Size,
+}
+
+impl<'tcx> InlineAssemblyGenerator<'_, 'tcx> {
+ fn allocate_registers(&mut self) {
+ let sess = self.tcx.sess;
+ let map = allocatable_registers(
+ self.arch,
+ sess.relocation_model(),
+ self.tcx.asm_target_features(self.enclosing_def_id),
+ &sess.target,
+ );
+ let mut allocated = FxHashMap::<_, (bool, bool)>::default();
+ let mut regs = vec![None; self.operands.len()];
+
+ // Add explicit registers to the allocated set.
+ for (i, operand) in self.operands.iter().enumerate() {
+ match *operand {
- InlineAsmOperand::Out {
- reg: InlineAsmRegOrRegClass::Reg(reg), late: true, ..
++ CInlineAsmOperand::In { reg: InlineAsmRegOrRegClass::Reg(reg), .. } => {
+ regs[i] = Some(reg);
+ allocated.entry(reg).or_default().0 = true;
+ }
- InlineAsmOperand::Out { reg: InlineAsmRegOrRegClass::Reg(reg), .. }
- | InlineAsmOperand::InOut { reg: InlineAsmRegOrRegClass::Reg(reg), .. } => {
++ CInlineAsmOperand::Out {
++ reg: InlineAsmRegOrRegClass::Reg(reg),
++ late: true,
++ ..
+ } => {
+ regs[i] = Some(reg);
+ allocated.entry(reg).or_default().1 = true;
+ }
- InlineAsmOperand::Out {
++ CInlineAsmOperand::Out { reg: InlineAsmRegOrRegClass::Reg(reg), .. }
++ | CInlineAsmOperand::InOut { reg: InlineAsmRegOrRegClass::Reg(reg), .. } => {
+ regs[i] = Some(reg);
+ allocated.insert(reg, (true, true));
+ }
+ _ => (),
+ }
+ }
+
+ // Allocate out/inout/inlateout registers first because they are more constrained.
+ for (i, operand) in self.operands.iter().enumerate() {
+ match *operand {
- | InlineAsmOperand::InOut {
++ CInlineAsmOperand::Out {
+ reg: InlineAsmRegOrRegClass::RegClass(class),
+ late: false,
+ ..
+ }
- InlineAsmOperand::In { reg: InlineAsmRegOrRegClass::RegClass(class), .. } => {
++ | CInlineAsmOperand::InOut {
+ reg: InlineAsmRegOrRegClass::RegClass(class), ..
+ } => {
+ let mut alloc_reg = None;
+ for ® in &map[&class] {
+ let mut used = false;
+ reg.overlapping_regs(|r| {
+ if allocated.contains_key(&r) {
+ used = true;
+ }
+ });
+
+ if !used {
+ alloc_reg = Some(reg);
+ break;
+ }
+ }
+
+ let reg = alloc_reg.expect("cannot allocate registers");
+ regs[i] = Some(reg);
+ allocated.insert(reg, (true, true));
+ }
+ _ => (),
+ }
+ }
+
+ // Allocate in/lateout.
+ for (i, operand) in self.operands.iter().enumerate() {
+ match *operand {
- InlineAsmOperand::Out {
++ CInlineAsmOperand::In { reg: InlineAsmRegOrRegClass::RegClass(class), .. } => {
+ let mut alloc_reg = None;
+ for ® in &map[&class] {
+ let mut used = false;
+ reg.overlapping_regs(|r| {
+ if allocated.get(&r).copied().unwrap_or_default().0 {
+ used = true;
+ }
+ });
+
+ if !used {
+ alloc_reg = Some(reg);
+ break;
+ }
+ }
+
+ let reg = alloc_reg.expect("cannot allocate registers");
+ regs[i] = Some(reg);
+ allocated.entry(reg).or_default().0 = true;
+ }
- InlineAsmOperand::InOut { reg, out_place: Some(_), .. } => {
++ CInlineAsmOperand::Out {
+ reg: InlineAsmRegOrRegClass::RegClass(class),
+ late: true,
+ ..
+ } => {
+ let mut alloc_reg = None;
+ for ® in &map[&class] {
+ let mut used = false;
+ reg.overlapping_regs(|r| {
+ if allocated.get(&r).copied().unwrap_or_default().1 {
+ used = true;
+ }
+ });
+
+ if !used {
+ alloc_reg = Some(reg);
+ break;
+ }
+ }
+
+ let reg = alloc_reg.expect("cannot allocate registers");
+ regs[i] = Some(reg);
+ allocated.entry(reg).or_default().1 = true;
+ }
+ _ => (),
+ }
+ }
+
+ self.registers = regs;
+ }
+
+ fn allocate_stack_slots(&mut self) {
+ let mut slot_size = Size::from_bytes(0);
+ let mut slots_clobber = vec![None; self.operands.len()];
+ let mut slots_input = vec![None; self.operands.len()];
+ let mut slots_output = vec![None; self.operands.len()];
+
+ let new_slot_fn = |slot_size: &mut Size, reg_class: InlineAsmRegClass| {
+ let reg_size =
+ reg_class.supported_types(self.arch).iter().map(|(ty, _)| ty.size()).max().unwrap();
+ let align = rustc_target::abi::Align::from_bytes(reg_size.bytes()).unwrap();
+ let offset = slot_size.align_to(align);
+ *slot_size = offset + reg_size;
+ offset
+ };
+ let mut new_slot = |x| new_slot_fn(&mut slot_size, x);
+
+ // Allocate stack slots for saving clobbered registers
+ let abi_clobber = InlineAsmClobberAbi::parse(self.arch, &self.tcx.sess.target, sym::C)
+ .unwrap()
+ .clobbered_regs();
+ for (i, reg) in self.registers.iter().enumerate().filter_map(|(i, r)| r.map(|r| (i, r))) {
+ let mut need_save = true;
+ // If the register overlaps with a register clobbered by function call, then
+ // we don't need to save it.
+ for r in abi_clobber {
+ r.overlapping_regs(|r| {
+ if r == reg {
+ need_save = false;
+ }
+ });
+
+ if !need_save {
+ break;
+ }
+ }
+
+ if need_save {
+ slots_clobber[i] = Some(new_slot(reg.reg_class()));
+ }
+ }
+
+ // Allocate stack slots for inout
+ for (i, operand) in self.operands.iter().enumerate() {
+ match *operand {
- InlineAsmOperand::In { reg, .. }
- | InlineAsmOperand::InOut { reg, out_place: None, .. } => {
++ CInlineAsmOperand::InOut { reg, out_place: Some(_), .. } => {
+ let slot = new_slot(reg.reg_class());
+ slots_input[i] = Some(slot);
+ slots_output[i] = Some(slot);
+ }
+ _ => (),
+ }
+ }
+
+ let slot_size_before_input = slot_size;
+ let mut new_slot = |x| new_slot_fn(&mut slot_size, x);
+
+ // Allocate stack slots for input
+ for (i, operand) in self.operands.iter().enumerate() {
+ match *operand {
- InlineAsmOperand::Out { reg, place: Some(_), .. } => {
++ CInlineAsmOperand::In { reg, .. }
++ | CInlineAsmOperand::InOut { reg, out_place: None, .. } => {
+ slots_input[i] = Some(new_slot(reg.reg_class()));
+ }
+ _ => (),
+ }
+ }
+
+ // Reset slot size to before input so that input and output operands can overlap
+ // and save some memory.
+ let slot_size_after_input = slot_size;
+ slot_size = slot_size_before_input;
+ let mut new_slot = |x| new_slot_fn(&mut slot_size, x);
+
+ // Allocate stack slots for output
+ for (i, operand) in self.operands.iter().enumerate() {
+ match *operand {
- if self.options.contains(InlineAsmOptions::ATT_SYNTAX) {
- generated_asm.push('%');
++ CInlineAsmOperand::Out { reg, place: Some(_), .. } => {
+ slots_output[i] = Some(new_slot(reg.reg_class()));
+ }
+ _ => (),
+ }
+ }
+
+ slot_size = slot_size.max(slot_size_after_input);
+
+ self.stack_slots_clobber = slots_clobber;
+ self.stack_slots_input = slots_input;
+ self.stack_slots_output = slots_output;
+ self.stack_slot_size = slot_size;
+ }
+
+ fn generate_asm_wrapper(&self, asm_name: &str) -> String {
+ let mut generated_asm = String::new();
+ writeln!(generated_asm, ".globl {}", asm_name).unwrap();
+ writeln!(generated_asm, ".type {},@function", asm_name).unwrap();
+ writeln!(generated_asm, ".section .text.{},\"ax\",@progbits", asm_name).unwrap();
+ writeln!(generated_asm, "{}:", asm_name).unwrap();
+
+ let is_x86 = matches!(self.arch, InlineAsmArch::X86 | InlineAsmArch::X86_64);
+
+ if is_x86 {
+ generated_asm.push_str(".intel_syntax noprefix\n");
+ }
+ Self::prologue(&mut generated_asm, self.arch);
+
+ // Save clobbered registers
+ if !self.options.contains(InlineAsmOptions::NORETURN) {
+ for (reg, slot) in self
+ .registers
+ .iter()
+ .zip(self.stack_slots_clobber.iter().copied())
+ .filter_map(|(r, s)| r.zip(s))
+ {
+ Self::save_register(&mut generated_asm, self.arch, reg, slot);
+ }
+ }
+
+ // Write input registers
+ for (reg, slot) in self
+ .registers
+ .iter()
+ .zip(self.stack_slots_input.iter().copied())
+ .filter_map(|(r, s)| r.zip(s))
+ {
+ Self::restore_register(&mut generated_asm, self.arch, reg, slot);
+ }
+
+ if is_x86 && self.options.contains(InlineAsmOptions::ATT_SYNTAX) {
+ generated_asm.push_str(".att_syntax\n");
+ }
+
+ // The actual inline asm
+ for piece in self.template {
+ match piece {
+ InlineAsmTemplatePiece::String(s) => {
+ generated_asm.push_str(s);
+ }
+ InlineAsmTemplatePiece::Placeholder { operand_idx, modifier, span: _ } => {
- self.registers[*operand_idx]
- .unwrap()
- .emit(&mut generated_asm, self.arch, *modifier)
- .unwrap();
++ match self.operands[*operand_idx] {
++ CInlineAsmOperand::In { .. }
++ | CInlineAsmOperand::Out { .. }
++ | CInlineAsmOperand::InOut { .. } => {
++ if self.options.contains(InlineAsmOptions::ATT_SYNTAX) {
++ generated_asm.push('%');
++ }
++ self.registers[*operand_idx]
++ .unwrap()
++ .emit(&mut generated_asm, self.arch, *modifier)
++ .unwrap();
++ }
++ CInlineAsmOperand::Const { ref value } => {
++ generated_asm.push_str(value);
++ }
++ CInlineAsmOperand::Symbol { ref symbol } => generated_asm.push_str(symbol),
+ }
+ }
+ }
+ }
+ generated_asm.push('\n');
+
+ if is_x86 && self.options.contains(InlineAsmOptions::ATT_SYNTAX) {
+ generated_asm.push_str(".intel_syntax noprefix\n");
+ }
+
+ if !self.options.contains(InlineAsmOptions::NORETURN) {
+ // Read output registers
+ for (reg, slot) in self
+ .registers
+ .iter()
+ .zip(self.stack_slots_output.iter().copied())
+ .filter_map(|(r, s)| r.zip(s))
+ {
+ Self::save_register(&mut generated_asm, self.arch, reg, slot);
+ }
+
+ // Restore clobbered registers
+ for (reg, slot) in self
+ .registers
+ .iter()
+ .zip(self.stack_slots_clobber.iter().copied())
+ .filter_map(|(r, s)| r.zip(s))
+ {
+ Self::restore_register(&mut generated_asm, self.arch, reg, slot);
+ }
+
+ Self::epilogue(&mut generated_asm, self.arch);
+ } else {
+ Self::epilogue_noreturn(&mut generated_asm, self.arch);
+ }
+
+ if is_x86 {
+ generated_asm.push_str(".att_syntax\n");
+ }
+ writeln!(generated_asm, ".size {name}, .-{name}", name = asm_name).unwrap();
+ generated_asm.push_str(".text\n");
+ generated_asm.push_str("\n\n");
+
+ generated_asm
+ }
+
+ fn prologue(generated_asm: &mut String, arch: InlineAsmArch) {
+ match arch {
+ InlineAsmArch::X86 => {
+ generated_asm.push_str(" push ebp\n");
+ generated_asm.push_str(" mov ebp,[esp+8]\n");
+ }
+ InlineAsmArch::X86_64 => {
+ generated_asm.push_str(" push rbp\n");
+ generated_asm.push_str(" mov rbp,rdi\n");
+ }
+ InlineAsmArch::RiscV32 => {
+ generated_asm.push_str(" addi sp, sp, -8\n");
+ generated_asm.push_str(" sw ra, 4(sp)\n");
+ generated_asm.push_str(" sw s0, 0(sp)\n");
+ generated_asm.push_str(" mv s0, a0\n");
+ }
+ InlineAsmArch::RiscV64 => {
+ generated_asm.push_str(" addi sp, sp, -16\n");
+ generated_asm.push_str(" sd ra, 8(sp)\n");
+ generated_asm.push_str(" sd s0, 0(sp)\n");
+ generated_asm.push_str(" mv s0, a0\n");
+ }
+ _ => unimplemented!("prologue for {:?}", arch),
+ }
+ }
+
+ fn epilogue(generated_asm: &mut String, arch: InlineAsmArch) {
+ match arch {
+ InlineAsmArch::X86 => {
+ generated_asm.push_str(" pop ebp\n");
+ generated_asm.push_str(" ret\n");
+ }
+ InlineAsmArch::X86_64 => {
+ generated_asm.push_str(" pop rbp\n");
+ generated_asm.push_str(" ret\n");
+ }
+ InlineAsmArch::RiscV32 => {
+ generated_asm.push_str(" lw s0, 0(sp)\n");
+ generated_asm.push_str(" lw ra, 4(sp)\n");
+ generated_asm.push_str(" addi sp, sp, 8\n");
+ generated_asm.push_str(" ret\n");
+ }
+ InlineAsmArch::RiscV64 => {
+ generated_asm.push_str(" ld s0, 0(sp)\n");
+ generated_asm.push_str(" ld ra, 8(sp)\n");
+ generated_asm.push_str(" addi sp, sp, 16\n");
+ generated_asm.push_str(" ret\n");
+ }
+ _ => unimplemented!("epilogue for {:?}", arch),
+ }
+ }
+
+ fn epilogue_noreturn(generated_asm: &mut String, arch: InlineAsmArch) {
+ match arch {
+ InlineAsmArch::X86 | InlineAsmArch::X86_64 => {
+ generated_asm.push_str(" ud2\n");
+ }
+ InlineAsmArch::RiscV32 | InlineAsmArch::RiscV64 => {
+ generated_asm.push_str(" ebreak\n");
+ }
+ _ => unimplemented!("epilogue_noreturn for {:?}", arch),
+ }
+ }
+
+ fn save_register(
+ generated_asm: &mut String,
+ arch: InlineAsmArch,
+ reg: InlineAsmReg,
+ offset: Size,
+ ) {
+ match arch {
+ InlineAsmArch::X86 => {
+ write!(generated_asm, " mov [ebp+0x{:x}], ", offset.bytes()).unwrap();
+ reg.emit(generated_asm, InlineAsmArch::X86, None).unwrap();
+ generated_asm.push('\n');
+ }
+ InlineAsmArch::X86_64 => {
+ write!(generated_asm, " mov [rbp+0x{:x}], ", offset.bytes()).unwrap();
+ reg.emit(generated_asm, InlineAsmArch::X86_64, None).unwrap();
+ generated_asm.push('\n');
+ }
+ InlineAsmArch::RiscV32 => {
+ generated_asm.push_str(" sw ");
+ reg.emit(generated_asm, InlineAsmArch::RiscV32, None).unwrap();
+ writeln!(generated_asm, ", 0x{:x}(s0)", offset.bytes()).unwrap();
+ }
+ InlineAsmArch::RiscV64 => {
+ generated_asm.push_str(" sd ");
+ reg.emit(generated_asm, InlineAsmArch::RiscV64, None).unwrap();
+ writeln!(generated_asm, ", 0x{:x}(s0)", offset.bytes()).unwrap();
+ }
+ _ => unimplemented!("save_register for {:?}", arch),
+ }
+ }
+
+ fn restore_register(
+ generated_asm: &mut String,
+ arch: InlineAsmArch,
+ reg: InlineAsmReg,
+ offset: Size,
+ ) {
+ match arch {
+ InlineAsmArch::X86 => {
+ generated_asm.push_str(" mov ");
+ reg.emit(generated_asm, InlineAsmArch::X86, None).unwrap();
+ writeln!(generated_asm, ", [ebp+0x{:x}]", offset.bytes()).unwrap();
+ }
+ InlineAsmArch::X86_64 => {
+ generated_asm.push_str(" mov ");
+ reg.emit(generated_asm, InlineAsmArch::X86_64, None).unwrap();
+ writeln!(generated_asm, ", [rbp+0x{:x}]", offset.bytes()).unwrap();
+ }
+ InlineAsmArch::RiscV32 => {
+ generated_asm.push_str(" lw ");
+ reg.emit(generated_asm, InlineAsmArch::RiscV32, None).unwrap();
+ writeln!(generated_asm, ", 0x{:x}(s0)", offset.bytes()).unwrap();
+ }
+ InlineAsmArch::RiscV64 => {
+ generated_asm.push_str(" ld ");
+ reg.emit(generated_asm, InlineAsmArch::RiscV64, None).unwrap();
+ writeln!(generated_asm, ", 0x{:x}(s0)", offset.bytes()).unwrap();
+ }
+ _ => unimplemented!("restore_register for {:?}", arch),
+ }
+ }
+}
+
+fn call_inline_asm<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ asm_name: &str,
+ slot_size: Size,
+ inputs: Vec<(Size, Value)>,
+ outputs: Vec<(Size, CPlace<'tcx>)>,
+) {
+ let stack_slot = fx.bcx.func.create_sized_stack_slot(StackSlotData {
+ kind: StackSlotKind::ExplicitSlot,
+ size: u32::try_from(slot_size.bytes()).unwrap(),
+ });
+ if fx.clif_comments.enabled() {
+ fx.add_comment(stack_slot, "inline asm scratch slot");
+ }
+
+ let inline_asm_func = fx
+ .module
+ .declare_function(
+ asm_name,
+ Linkage::Import,
+ &Signature {
+ call_conv: CallConv::SystemV,
+ params: vec![AbiParam::new(fx.pointer_type)],
+ returns: vec![],
+ },
+ )
+ .unwrap();
+ let inline_asm_func = fx.module.declare_func_in_func(inline_asm_func, &mut fx.bcx.func);
+ if fx.clif_comments.enabled() {
+ fx.add_comment(inline_asm_func, asm_name);
+ }
+
+ for (offset, value) in inputs {
+ fx.bcx.ins().stack_store(value, stack_slot, i32::try_from(offset.bytes()).unwrap());
+ }
+
+ let stack_slot_addr = fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, 0);
+ fx.bcx.ins().call(inline_asm_func, &[stack_slot_addr]);
+
+ for (offset, place) in outputs {
+ let ty = fx.clif_type(place.layout().ty).unwrap();
+ let value = fx.bcx.ins().stack_load(ty, stack_slot, i32::try_from(offset.bytes()).unwrap());
+ place.write_cvalue(fx, CValue::by_val(value, place.layout()));
+ }
+}
--- /dev/null
- let target = if let Some(target) = target {
- target
- } else {
- // Insert non returning intrinsics here
- match intrinsic {
- sym::abort => {
- fx.bcx.ins().trap(TrapCode::User(0));
- }
- sym::transmute => {
- crate::base::codegen_panic(fx, "Transmuting to uninhabited type.", source_info);
- }
- _ => unimplemented!("unsupported intrinsic {}", intrinsic),
- }
- return;
- };
-
+//! Codegen of intrinsics. This includes `extern "rust-intrinsic"`, `extern "platform-intrinsic"`
+//! and LLVM intrinsics that have symbol names starting with `llvm.`.
+
+macro_rules! intrinsic_args {
+ ($fx:expr, $args:expr => ($($arg:tt),*); $intrinsic:expr) => {
+ #[allow(unused_parens)]
+ let ($($arg),*) = if let [$($arg),*] = $args {
+ ($(codegen_operand($fx, $arg)),*)
+ } else {
+ $crate::intrinsics::bug_on_incorrect_arg_count($intrinsic);
+ };
+ }
+}
+
+mod cpuid;
+mod llvm;
+mod llvm_aarch64;
+mod llvm_x86;
+mod simd;
+
+pub(crate) use cpuid::codegen_cpuid_call;
+pub(crate) use llvm::codegen_llvm_intrinsic_call;
+
+use rustc_middle::ty::layout::HasParamEnv;
+use rustc_middle::ty::print::with_no_trimmed_paths;
+use rustc_middle::ty::subst::SubstsRef;
+use rustc_span::symbol::{kw, sym, Symbol};
+
+use crate::prelude::*;
+use cranelift_codegen::ir::AtomicRmwOp;
+
+fn bug_on_incorrect_arg_count(intrinsic: impl std::fmt::Display) -> ! {
+ bug!("wrong number of args for intrinsic {}", intrinsic);
+}
+
+fn report_atomic_type_validation_error<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ intrinsic: Symbol,
+ span: Span,
+ ty: Ty<'tcx>,
+) {
+ fx.tcx.sess.span_err(
+ span,
+ &format!(
+ "`{}` intrinsic: expected basic integer or raw pointer type, found `{:?}`",
+ intrinsic, ty
+ ),
+ );
+ // Prevent verifier error
+ fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
+}
+
+pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Option<Type> {
+ let (element, count) = match layout.abi {
+ Abi::Vector { element, count } => (element, count),
+ _ => unreachable!(),
+ };
+
+ match scalar_to_clif_type(tcx, element).by(u32::try_from(count).unwrap()) {
+ // Cranelift currently only implements icmp for 128bit vectors.
+ Some(vector_ty) if vector_ty.bits() == 128 => Some(vector_ty),
+ _ => None,
+ }
+}
+
+fn simd_for_each_lane<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ val: CValue<'tcx>,
+ ret: CPlace<'tcx>,
+ f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Ty<'tcx>, Value) -> Value,
+) {
+ let layout = val.layout();
+
+ let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
+ let lane_layout = fx.layout_of(lane_ty);
+ let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
+ let ret_lane_layout = fx.layout_of(ret_lane_ty);
+ assert_eq!(lane_count, ret_lane_count);
+
+ for lane_idx in 0..lane_count {
+ let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
+
+ let res_lane = f(fx, lane_layout.ty, ret_lane_layout.ty, lane);
+ let res_lane = CValue::by_val(res_lane, ret_lane_layout);
+
+ ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
+ }
+}
+
+fn simd_pair_for_each_lane_typed<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ x: CValue<'tcx>,
+ y: CValue<'tcx>,
+ ret: CPlace<'tcx>,
+ f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, CValue<'tcx>, CValue<'tcx>) -> CValue<'tcx>,
+) {
+ assert_eq!(x.layout(), y.layout());
+ let layout = x.layout();
+
+ let (lane_count, _lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
+ let (ret_lane_count, _ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
+ assert_eq!(lane_count, ret_lane_count);
+
+ for lane_idx in 0..lane_count {
+ let x_lane = x.value_lane(fx, lane_idx);
+ let y_lane = y.value_lane(fx, lane_idx);
+
+ let res_lane = f(fx, x_lane, y_lane);
+
+ ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
+ }
+}
+
+fn simd_pair_for_each_lane<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ x: CValue<'tcx>,
+ y: CValue<'tcx>,
+ ret: CPlace<'tcx>,
+ f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Ty<'tcx>, Value, Value) -> Value,
+) {
+ assert_eq!(x.layout(), y.layout());
+ let layout = x.layout();
+
+ let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
+ let lane_layout = fx.layout_of(lane_ty);
+ let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
+ let ret_lane_layout = fx.layout_of(ret_lane_ty);
+ assert_eq!(lane_count, ret_lane_count);
+
+ for lane_idx in 0..lane_count {
+ let x_lane = x.value_lane(fx, lane_idx).load_scalar(fx);
+ let y_lane = y.value_lane(fx, lane_idx).load_scalar(fx);
+
+ let res_lane = f(fx, lane_layout.ty, ret_lane_layout.ty, x_lane, y_lane);
+ let res_lane = CValue::by_val(res_lane, ret_lane_layout);
+
+ ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
+ }
+}
+
+fn simd_reduce<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ val: CValue<'tcx>,
+ acc: Option<Value>,
+ ret: CPlace<'tcx>,
+ f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Value, Value) -> Value,
+) {
+ let (lane_count, lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
+ let lane_layout = fx.layout_of(lane_ty);
+ assert_eq!(lane_layout, ret.layout());
+
+ let (mut res_val, start_lane) =
+ if let Some(acc) = acc { (acc, 0) } else { (val.value_lane(fx, 0).load_scalar(fx), 1) };
+ for lane_idx in start_lane..lane_count {
+ let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
+ res_val = f(fx, lane_layout.ty, res_val, lane);
+ }
+ let res = CValue::by_val(res_val, lane_layout);
+ ret.write_cvalue(fx, res);
+}
+
+// FIXME move all uses to `simd_reduce`
+fn simd_reduce_bool<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ val: CValue<'tcx>,
+ ret: CPlace<'tcx>,
+ f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Value, Value) -> Value,
+) {
+ let (lane_count, _lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
+ assert!(ret.layout().ty.is_bool());
+
+ let res_val = val.value_lane(fx, 0).load_scalar(fx);
+ let mut res_val = fx.bcx.ins().band_imm(res_val, 1); // mask to boolean
+ for lane_idx in 1..lane_count {
+ let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
+ let lane = fx.bcx.ins().band_imm(lane, 1); // mask to boolean
+ res_val = f(fx, res_val, lane);
+ }
+ let res_val = if fx.bcx.func.dfg.value_type(res_val) != types::I8 {
+ fx.bcx.ins().ireduce(types::I8, res_val)
+ } else {
+ res_val
+ };
+ let res = CValue::by_val(res_val, ret.layout());
+ ret.write_cvalue(fx, res);
+}
+
+fn bool_to_zero_or_max_uint<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ ty: Ty<'tcx>,
+ val: Value,
+) -> Value {
+ let ty = fx.clif_type(ty).unwrap();
+
+ let int_ty = match ty {
+ types::F32 => types::I32,
+ types::F64 => types::I64,
+ ty => ty,
+ };
+
+ let mut res = fx.bcx.ins().bmask(int_ty, val);
+
+ if ty.is_float() {
+ res = codegen_bitcast(fx, ty, res);
+ }
+
+ res
+}
+
+pub(crate) fn codegen_intrinsic_call<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ instance: Instance<'tcx>,
+ args: &[mir::Operand<'tcx>],
+ destination: CPlace<'tcx>,
+ target: Option<BasicBlock>,
+ source_info: mir::SourceInfo,
+) {
+ let intrinsic = fx.tcx.item_name(instance.def_id());
+ let substs = instance.substs;
+
- target,
+ if intrinsic.as_str().starts_with("simd_") {
+ self::simd::codegen_simd_intrinsic_call(
+ fx,
+ intrinsic,
+ substs,
+ args,
+ destination,
- let ret_block = fx.get_block(target);
++ target.expect("target for simd intrinsic"),
+ source_info.span,
+ );
+ } else if codegen_float_intrinsic_call(fx, intrinsic, args, destination) {
- Some(target),
++ let ret_block = fx.get_block(target.expect("target for float intrinsic"));
+ fx.bcx.ins().jump(ret_block, &[]);
+ } else {
+ codegen_regular_intrinsic_call(
+ fx,
+ instance,
+ intrinsic,
+ substs,
+ args,
+ destination,
++ target,
+ source_info,
+ );
+ }
+}
+
+fn codegen_float_intrinsic_call<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ intrinsic: Symbol,
+ args: &[mir::Operand<'tcx>],
+ ret: CPlace<'tcx>,
+) -> bool {
+ let (name, arg_count, ty) = match intrinsic {
+ sym::expf32 => ("expf", 1, fx.tcx.types.f32),
+ sym::expf64 => ("exp", 1, fx.tcx.types.f64),
+ sym::exp2f32 => ("exp2f", 1, fx.tcx.types.f32),
+ sym::exp2f64 => ("exp2", 1, fx.tcx.types.f64),
+ sym::sqrtf32 => ("sqrtf", 1, fx.tcx.types.f32),
+ sym::sqrtf64 => ("sqrt", 1, fx.tcx.types.f64),
+ sym::powif32 => ("__powisf2", 2, fx.tcx.types.f32), // compiler-builtins
+ sym::powif64 => ("__powidf2", 2, fx.tcx.types.f64), // compiler-builtins
+ sym::powf32 => ("powf", 2, fx.tcx.types.f32),
+ sym::powf64 => ("pow", 2, fx.tcx.types.f64),
+ sym::logf32 => ("logf", 1, fx.tcx.types.f32),
+ sym::logf64 => ("log", 1, fx.tcx.types.f64),
+ sym::log2f32 => ("log2f", 1, fx.tcx.types.f32),
+ sym::log2f64 => ("log2", 1, fx.tcx.types.f64),
+ sym::log10f32 => ("log10f", 1, fx.tcx.types.f32),
+ sym::log10f64 => ("log10", 1, fx.tcx.types.f64),
+ sym::fabsf32 => ("fabsf", 1, fx.tcx.types.f32),
+ sym::fabsf64 => ("fabs", 1, fx.tcx.types.f64),
+ sym::fmaf32 => ("fmaf", 3, fx.tcx.types.f32),
+ sym::fmaf64 => ("fma", 3, fx.tcx.types.f64),
+ sym::copysignf32 => ("copysignf", 2, fx.tcx.types.f32),
+ sym::copysignf64 => ("copysign", 2, fx.tcx.types.f64),
+ sym::floorf32 => ("floorf", 1, fx.tcx.types.f32),
+ sym::floorf64 => ("floor", 1, fx.tcx.types.f64),
+ sym::ceilf32 => ("ceilf", 1, fx.tcx.types.f32),
+ sym::ceilf64 => ("ceil", 1, fx.tcx.types.f64),
+ sym::truncf32 => ("truncf", 1, fx.tcx.types.f32),
+ sym::truncf64 => ("trunc", 1, fx.tcx.types.f64),
+ sym::roundf32 => ("roundf", 1, fx.tcx.types.f32),
+ sym::roundf64 => ("round", 1, fx.tcx.types.f64),
+ sym::sinf32 => ("sinf", 1, fx.tcx.types.f32),
+ sym::sinf64 => ("sin", 1, fx.tcx.types.f64),
+ sym::cosf32 => ("cosf", 1, fx.tcx.types.f32),
+ sym::cosf64 => ("cos", 1, fx.tcx.types.f64),
+ _ => return false,
+ };
+
+ if args.len() != arg_count {
+ bug!("wrong number of args for intrinsic {:?}", intrinsic);
+ }
+
+ let (a, b, c);
+ let args = match args {
+ [x] => {
+ a = [codegen_operand(fx, x)];
+ &a as &[_]
+ }
+ [x, y] => {
+ b = [codegen_operand(fx, x), codegen_operand(fx, y)];
+ &b
+ }
+ [x, y, z] => {
+ c = [codegen_operand(fx, x), codegen_operand(fx, y), codegen_operand(fx, z)];
+ &c
+ }
+ _ => unreachable!(),
+ };
+
+ let layout = fx.layout_of(ty);
+ let res = match intrinsic {
+ sym::fmaf32 | sym::fmaf64 => {
+ let a = args[0].load_scalar(fx);
+ let b = args[1].load_scalar(fx);
+ let c = args[2].load_scalar(fx);
+ CValue::by_val(fx.bcx.ins().fma(a, b, c), layout)
+ }
+ sym::copysignf32 | sym::copysignf64 => {
+ let a = args[0].load_scalar(fx);
+ let b = args[1].load_scalar(fx);
+ CValue::by_val(fx.bcx.ins().fcopysign(a, b), layout)
+ }
+ sym::fabsf32
+ | sym::fabsf64
+ | sym::floorf32
+ | sym::floorf64
+ | sym::ceilf32
+ | sym::ceilf64
+ | sym::truncf32
+ | sym::truncf64 => {
+ let a = args[0].load_scalar(fx);
+
+ let val = match intrinsic {
+ sym::fabsf32 | sym::fabsf64 => fx.bcx.ins().fabs(a),
+ sym::floorf32 | sym::floorf64 => fx.bcx.ins().floor(a),
+ sym::ceilf32 | sym::ceilf64 => fx.bcx.ins().ceil(a),
+ sym::truncf32 | sym::truncf64 => fx.bcx.ins().trunc(a),
+ _ => unreachable!(),
+ };
+
+ CValue::by_val(val, layout)
+ }
+ // These intrinsics aren't supported natively by Cranelift.
+ // Lower them to a libcall.
+ _ => fx.easy_call(name, &args, ty),
+ };
+
+ ret.write_cvalue(fx, res);
+
+ true
+}
+
+fn codegen_regular_intrinsic_call<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ instance: Instance<'tcx>,
+ intrinsic: Symbol,
+ substs: SubstsRef<'tcx>,
+ args: &[mir::Operand<'tcx>],
+ ret: CPlace<'tcx>,
+ destination: Option<BasicBlock>,
+ source_info: mir::SourceInfo,
+) {
+ let usize_layout = fx.layout_of(fx.tcx.types.usize);
+
+ match intrinsic {
++ sym::abort => {
++ fx.bcx.ins().trap(TrapCode::User(0));
++ return;
++ }
+ sym::likely | sym::unlikely => {
+ intrinsic_args!(fx, args => (a); intrinsic);
+
+ ret.write_cvalue(fx, a);
+ }
+ sym::breakpoint => {
+ intrinsic_args!(fx, args => (); intrinsic);
+
+ fx.bcx.ins().debugtrap();
+ }
+ sym::copy | sym::copy_nonoverlapping => {
+ intrinsic_args!(fx, args => (src, dst, count); intrinsic);
+ let src = src.load_scalar(fx);
+ let dst = dst.load_scalar(fx);
+ let count = count.load_scalar(fx);
+
+ let elem_ty = substs.type_at(0);
+ let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
+ assert_eq!(args.len(), 3);
+ let byte_amount =
+ if elem_size != 1 { fx.bcx.ins().imul_imm(count, elem_size as i64) } else { count };
+
+ if intrinsic == sym::copy_nonoverlapping {
+ // FIXME emit_small_memcpy
+ fx.bcx.call_memcpy(fx.target_config, dst, src, byte_amount);
+ } else {
+ // FIXME emit_small_memmove
+ fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount);
+ }
+ }
+ sym::volatile_copy_memory | sym::volatile_copy_nonoverlapping_memory => {
+ // NOTE: the volatile variants have src and dst swapped
+ intrinsic_args!(fx, args => (dst, src, count); intrinsic);
+ let dst = dst.load_scalar(fx);
+ let src = src.load_scalar(fx);
+ let count = count.load_scalar(fx);
+
+ let elem_ty = substs.type_at(0);
+ let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
+ assert_eq!(args.len(), 3);
+ let byte_amount =
+ if elem_size != 1 { fx.bcx.ins().imul_imm(count, elem_size as i64) } else { count };
+
+ // FIXME make the copy actually volatile when using emit_small_mem{cpy,move}
+ if intrinsic == sym::volatile_copy_nonoverlapping_memory {
+ // FIXME emit_small_memcpy
+ fx.bcx.call_memcpy(fx.target_config, dst, src, byte_amount);
+ } else {
+ // FIXME emit_small_memmove
+ fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount);
+ }
+ }
+ sym::size_of_val => {
+ intrinsic_args!(fx, args => (ptr); intrinsic);
+
+ let layout = fx.layout_of(substs.type_at(0));
+ // Note: Can't use is_unsized here as truly unsized types need to take the fixed size
+ // branch
+ let size = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
+ let (_ptr, info) = ptr.load_scalar_pair(fx);
+ let (size, _align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
+ size
+ } else {
+ fx.bcx.ins().iconst(fx.pointer_type, layout.size.bytes() as i64)
+ };
+ ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
+ }
+ sym::min_align_of_val => {
+ intrinsic_args!(fx, args => (ptr); intrinsic);
+
+ let layout = fx.layout_of(substs.type_at(0));
+ // Note: Can't use is_unsized here as truly unsized types need to take the fixed size
+ // branch
+ let align = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
+ let (_ptr, info) = ptr.load_scalar_pair(fx);
+ let (_size, align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
+ align
+ } else {
+ fx.bcx.ins().iconst(fx.pointer_type, layout.align.abi.bytes() as i64)
+ };
+ ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
+ }
+
+ sym::vtable_size => {
+ intrinsic_args!(fx, args => (vtable); intrinsic);
+ let vtable = vtable.load_scalar(fx);
+
+ let size = crate::vtable::size_of_obj(fx, vtable);
+ ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
+ }
+
+ sym::vtable_align => {
+ intrinsic_args!(fx, args => (vtable); intrinsic);
+ let vtable = vtable.load_scalar(fx);
+
+ let align = crate::vtable::min_align_of_obj(fx, vtable);
+ ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
+ }
+
+ sym::unchecked_add
+ | sym::unchecked_sub
+ | sym::unchecked_mul
+ | sym::unchecked_div
+ | sym::exact_div
+ | sym::unchecked_rem
+ | sym::unchecked_shl
+ | sym::unchecked_shr => {
+ intrinsic_args!(fx, args => (x, y); intrinsic);
+
+ // FIXME trap on overflow
+ let bin_op = match intrinsic {
+ sym::unchecked_add => BinOp::Add,
+ sym::unchecked_sub => BinOp::Sub,
+ sym::unchecked_mul => BinOp::Mul,
+ sym::unchecked_div | sym::exact_div => BinOp::Div,
+ sym::unchecked_rem => BinOp::Rem,
+ sym::unchecked_shl => BinOp::Shl,
+ sym::unchecked_shr => BinOp::Shr,
+ _ => unreachable!(),
+ };
+ let res = crate::num::codegen_int_binop(fx, bin_op, x, y);
+ ret.write_cvalue(fx, res);
+ }
+ sym::add_with_overflow | sym::sub_with_overflow | sym::mul_with_overflow => {
+ intrinsic_args!(fx, args => (x, y); intrinsic);
+
+ assert_eq!(x.layout().ty, y.layout().ty);
+ let bin_op = match intrinsic {
+ sym::add_with_overflow => BinOp::Add,
+ sym::sub_with_overflow => BinOp::Sub,
+ sym::mul_with_overflow => BinOp::Mul,
+ _ => unreachable!(),
+ };
+
+ let res = crate::num::codegen_checked_int_binop(fx, bin_op, x, y);
+ ret.write_cvalue(fx, res);
+ }
+ sym::saturating_add | sym::saturating_sub => {
+ intrinsic_args!(fx, args => (lhs, rhs); intrinsic);
+
+ assert_eq!(lhs.layout().ty, rhs.layout().ty);
+ let bin_op = match intrinsic {
+ sym::saturating_add => BinOp::Add,
+ sym::saturating_sub => BinOp::Sub,
+ _ => unreachable!(),
+ };
+
+ let res = crate::num::codegen_saturating_int_binop(fx, bin_op, lhs, rhs);
+ ret.write_cvalue(fx, res);
+ }
+ sym::rotate_left => {
+ intrinsic_args!(fx, args => (x, y); intrinsic);
+ let y = y.load_scalar(fx);
+
+ let layout = x.layout();
+ let x = x.load_scalar(fx);
+ let res = fx.bcx.ins().rotl(x, y);
+ ret.write_cvalue(fx, CValue::by_val(res, layout));
+ }
+ sym::rotate_right => {
+ intrinsic_args!(fx, args => (x, y); intrinsic);
+ let y = y.load_scalar(fx);
+
+ let layout = x.layout();
+ let x = x.load_scalar(fx);
+ let res = fx.bcx.ins().rotr(x, y);
+ ret.write_cvalue(fx, CValue::by_val(res, layout));
+ }
+
+ // The only difference between offset and arith_offset is regarding UB. Because Cranelift
+ // doesn't have UB both are codegen'ed the same way
+ sym::offset | sym::arith_offset => {
+ intrinsic_args!(fx, args => (base, offset); intrinsic);
+ let offset = offset.load_scalar(fx);
+
+ let pointee_ty = base.layout().ty.builtin_deref(true).unwrap().ty;
+ let pointee_size = fx.layout_of(pointee_ty).size.bytes();
+ let ptr_diff = if pointee_size != 1 {
+ fx.bcx.ins().imul_imm(offset, pointee_size as i64)
+ } else {
+ offset
+ };
+ let base_val = base.load_scalar(fx);
+ let res = fx.bcx.ins().iadd(base_val, ptr_diff);
+ ret.write_cvalue(fx, CValue::by_val(res, base.layout()));
+ }
+
+ sym::ptr_mask => {
+ intrinsic_args!(fx, args => (ptr, mask); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+ let mask = mask.load_scalar(fx);
+ fx.bcx.ins().band(ptr, mask);
+ }
+
+ sym::transmute => {
+ intrinsic_args!(fx, args => (from); intrinsic);
+
++ if ret.layout().abi.is_uninhabited() {
++ crate::base::codegen_panic(fx, "Transmuting to uninhabited type.", source_info);
++ return;
++ }
++
+ ret.write_cvalue_transmute(fx, from);
+ }
+ sym::write_bytes | sym::volatile_set_memory => {
+ intrinsic_args!(fx, args => (dst, val, count); intrinsic);
+ let val = val.load_scalar(fx);
+ let count = count.load_scalar(fx);
+
+ let pointee_ty = dst.layout().ty.builtin_deref(true).unwrap().ty;
+ let pointee_size = fx.layout_of(pointee_ty).size.bytes();
+ let count = if pointee_size != 1 {
+ fx.bcx.ins().imul_imm(count, pointee_size as i64)
+ } else {
+ count
+ };
+ let dst_ptr = dst.load_scalar(fx);
+ // FIXME make the memset actually volatile when switching to emit_small_memset
+ // FIXME use emit_small_memset
+ fx.bcx.call_memset(fx.target_config, dst_ptr, val, count);
+ }
+ sym::ctlz | sym::ctlz_nonzero => {
+ intrinsic_args!(fx, args => (arg); intrinsic);
+ let val = arg.load_scalar(fx);
+
+ // FIXME trap on `ctlz_nonzero` with zero arg.
+ let res = fx.bcx.ins().clz(val);
+ let res = CValue::by_val(res, arg.layout());
+ ret.write_cvalue(fx, res);
+ }
+ sym::cttz | sym::cttz_nonzero => {
+ intrinsic_args!(fx, args => (arg); intrinsic);
+ let val = arg.load_scalar(fx);
+
+ // FIXME trap on `cttz_nonzero` with zero arg.
+ let res = fx.bcx.ins().ctz(val);
+ let res = CValue::by_val(res, arg.layout());
+ ret.write_cvalue(fx, res);
+ }
+ sym::ctpop => {
+ intrinsic_args!(fx, args => (arg); intrinsic);
+ let val = arg.load_scalar(fx);
+
+ let res = fx.bcx.ins().popcnt(val);
+ let res = CValue::by_val(res, arg.layout());
+ ret.write_cvalue(fx, res);
+ }
+ sym::bitreverse => {
+ intrinsic_args!(fx, args => (arg); intrinsic);
+ let val = arg.load_scalar(fx);
+
+ let res = fx.bcx.ins().bitrev(val);
+ let res = CValue::by_val(res, arg.layout());
+ ret.write_cvalue(fx, res);
+ }
+ sym::bswap => {
+ intrinsic_args!(fx, args => (arg); intrinsic);
+ let val = arg.load_scalar(fx);
+
+ let res = if fx.bcx.func.dfg.value_type(val) == types::I8 {
+ val
+ } else {
+ fx.bcx.ins().bswap(val)
+ };
+ let res = CValue::by_val(res, arg.layout());
+ ret.write_cvalue(fx, res);
+ }
+ sym::assert_inhabited | sym::assert_zero_valid | sym::assert_mem_uninitialized_valid => {
+ intrinsic_args!(fx, args => (); intrinsic);
+
+ let layout = fx.layout_of(substs.type_at(0));
+ if layout.abi.is_uninhabited() {
+ with_no_trimmed_paths!({
+ crate::base::codegen_panic_nounwind(
+ fx,
+ &format!("attempted to instantiate uninhabited type `{}`", layout.ty),
+ source_info,
+ )
+ });
+ return;
+ }
+
+ if intrinsic == sym::assert_zero_valid
+ && !fx.tcx.permits_zero_init(fx.param_env().and(layout))
+ {
+ with_no_trimmed_paths!({
+ crate::base::codegen_panic_nounwind(
+ fx,
+ &format!(
+ "attempted to zero-initialize type `{}`, which is invalid",
+ layout.ty
+ ),
+ source_info,
+ );
+ });
+ return;
+ }
+
+ if intrinsic == sym::assert_mem_uninitialized_valid
+ && !fx.tcx.permits_uninit_init(fx.param_env().and(layout))
+ {
+ with_no_trimmed_paths!({
+ crate::base::codegen_panic_nounwind(
+ fx,
+ &format!(
+ "attempted to leave type `{}` uninitialized, which is invalid",
+ layout.ty
+ ),
+ source_info,
+ )
+ });
+ return;
+ }
+ }
+
+ sym::volatile_load | sym::unaligned_volatile_load => {
+ intrinsic_args!(fx, args => (ptr); intrinsic);
+
+ // Cranelift treats loads as volatile by default
+ // FIXME correctly handle unaligned_volatile_load
+ let inner_layout = fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
+ let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
+ ret.write_cvalue(fx, val);
+ }
+ sym::volatile_store | sym::unaligned_volatile_store => {
+ intrinsic_args!(fx, args => (ptr, val); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ // Cranelift treats stores as volatile by default
+ // FIXME correctly handle unaligned_volatile_store
+ let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
+ dest.write_cvalue(fx, val);
+ }
+
+ sym::pref_align_of
+ | sym::needs_drop
+ | sym::type_id
+ | sym::type_name
+ | sym::variant_count => {
+ intrinsic_args!(fx, args => (); intrinsic);
+
+ let const_val =
+ fx.tcx.const_eval_instance(ParamEnv::reveal_all(), instance, None).unwrap();
+ let val = crate::constant::codegen_const_value(fx, const_val, ret.layout().ty);
+ ret.write_cvalue(fx, val);
+ }
+
+ sym::ptr_offset_from | sym::ptr_offset_from_unsigned => {
+ intrinsic_args!(fx, args => (ptr, base); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+ let base = base.load_scalar(fx);
+ let ty = substs.type_at(0);
+
+ let pointee_size: u64 = fx.layout_of(ty).size.bytes();
+ let diff_bytes = fx.bcx.ins().isub(ptr, base);
+ // FIXME this can be an exact division.
+ let val = if intrinsic == sym::ptr_offset_from_unsigned {
+ let usize_layout = fx.layout_of(fx.tcx.types.usize);
+ // Because diff_bytes ULE isize::MAX, this would be fine as signed,
+ // but unsigned is slightly easier to codegen, so might as well.
+ CValue::by_val(fx.bcx.ins().udiv_imm(diff_bytes, pointee_size as i64), usize_layout)
+ } else {
+ let isize_layout = fx.layout_of(fx.tcx.types.isize);
+ CValue::by_val(fx.bcx.ins().sdiv_imm(diff_bytes, pointee_size as i64), isize_layout)
+ };
+ ret.write_cvalue(fx, val);
+ }
+
+ sym::ptr_guaranteed_cmp => {
+ intrinsic_args!(fx, args => (a, b); intrinsic);
+
+ let val = crate::num::codegen_ptr_binop(fx, BinOp::Eq, a, b).load_scalar(fx);
+ ret.write_cvalue(fx, CValue::by_val(val, fx.layout_of(fx.tcx.types.u8)));
+ }
+
+ sym::caller_location => {
+ intrinsic_args!(fx, args => (); intrinsic);
+
+ let caller_location = fx.get_caller_location(source_info);
+ ret.write_cvalue(fx, caller_location);
+ }
+
+ _ if intrinsic.as_str().starts_with("atomic_fence") => {
+ intrinsic_args!(fx, args => (); intrinsic);
+
+ fx.bcx.ins().fence();
+ }
+ _ if intrinsic.as_str().starts_with("atomic_singlethreadfence") => {
+ intrinsic_args!(fx, args => (); intrinsic);
+
+ // FIXME use a compiler fence once Cranelift supports it
+ fx.bcx.ins().fence();
+ }
+ _ if intrinsic.as_str().starts_with("atomic_load") => {
+ intrinsic_args!(fx, args => (ptr); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let ty = substs.type_at(0);
+ match ty.kind() {
+ ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
+ // FIXME implement 128bit atomics
+ if fx.tcx.is_compiler_builtins(LOCAL_CRATE) {
+ // special case for compiler-builtins to avoid having to patch it
+ crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported");
+ return;
+ } else {
+ fx.tcx
+ .sess
+ .span_fatal(source_info.span, "128bit atomics not yet supported");
+ }
+ }
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, ty);
+ return;
+ }
+ }
+ let clif_ty = fx.clif_type(ty).unwrap();
+
+ let val = fx.bcx.ins().atomic_load(clif_ty, MemFlags::trusted(), ptr);
+
+ let val = CValue::by_val(val, fx.layout_of(ty));
+ ret.write_cvalue(fx, val);
+ }
+ _ if intrinsic.as_str().starts_with("atomic_store") => {
+ intrinsic_args!(fx, args => (ptr, val); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let ty = substs.type_at(0);
+ match ty.kind() {
+ ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
+ // FIXME implement 128bit atomics
+ if fx.tcx.is_compiler_builtins(LOCAL_CRATE) {
+ // special case for compiler-builtins to avoid having to patch it
+ crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported");
+ return;
+ } else {
+ fx.tcx
+ .sess
+ .span_fatal(source_info.span, "128bit atomics not yet supported");
+ }
+ }
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, ty);
+ return;
+ }
+ }
+
+ let val = val.load_scalar(fx);
+
+ fx.bcx.ins().atomic_store(MemFlags::trusted(), val, ptr);
+ }
+ _ if intrinsic.as_str().starts_with("atomic_xchg") => {
+ intrinsic_args!(fx, args => (ptr, new); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let layout = new.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let new = new.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xchg, ptr, new);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ }
+ _ if intrinsic.as_str().starts_with("atomic_cxchg") => {
+ // both atomic_cxchg_* and atomic_cxchgweak_*
+ intrinsic_args!(fx, args => (ptr, test_old, new); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let layout = new.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+
+ let test_old = test_old.load_scalar(fx);
+ let new = new.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_cas(MemFlags::trusted(), ptr, test_old, new);
+ let is_eq = fx.bcx.ins().icmp(IntCC::Equal, old, test_old);
+
+ let ret_val = CValue::by_val_pair(old, is_eq, ret.layout());
+ ret.write_cvalue(fx, ret_val)
+ }
+
+ _ if intrinsic.as_str().starts_with("atomic_xadd") => {
+ intrinsic_args!(fx, args => (ptr, amount); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let layout = amount.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let amount = amount.load_scalar(fx);
+
+ let old =
+ fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Add, ptr, amount);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ }
+ _ if intrinsic.as_str().starts_with("atomic_xsub") => {
+ intrinsic_args!(fx, args => (ptr, amount); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let layout = amount.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let amount = amount.load_scalar(fx);
+
+ let old =
+ fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Sub, ptr, amount);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ }
+ _ if intrinsic.as_str().starts_with("atomic_and") => {
+ intrinsic_args!(fx, args => (ptr, src); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let layout = src.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::And, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ }
+ _ if intrinsic.as_str().starts_with("atomic_or") => {
+ intrinsic_args!(fx, args => (ptr, src); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let layout = src.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Or, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ }
+ _ if intrinsic.as_str().starts_with("atomic_xor") => {
+ intrinsic_args!(fx, args => (ptr, src); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let layout = src.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xor, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ }
+ _ if intrinsic.as_str().starts_with("atomic_nand") => {
+ intrinsic_args!(fx, args => (ptr, src); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let layout = src.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Nand, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ }
+ _ if intrinsic.as_str().starts_with("atomic_max") => {
+ intrinsic_args!(fx, args => (ptr, src); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let layout = src.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smax, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ }
+ _ if intrinsic.as_str().starts_with("atomic_umax") => {
+ intrinsic_args!(fx, args => (ptr, src); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let layout = src.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umax, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ }
+ _ if intrinsic.as_str().starts_with("atomic_min") => {
+ intrinsic_args!(fx, args => (ptr, src); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let layout = src.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smin, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ }
+ _ if intrinsic.as_str().starts_with("atomic_umin") => {
+ intrinsic_args!(fx, args => (ptr, src); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let layout = src.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umin, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ }
+
+ sym::minnumf32 => {
+ intrinsic_args!(fx, args => (a, b); intrinsic);
+ let a = a.load_scalar(fx);
+ let b = b.load_scalar(fx);
+
+ let val = crate::num::codegen_float_min(fx, a, b);
+ let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
+ ret.write_cvalue(fx, val);
+ }
+ sym::minnumf64 => {
+ intrinsic_args!(fx, args => (a, b); intrinsic);
+ let a = a.load_scalar(fx);
+ let b = b.load_scalar(fx);
+
+ let val = crate::num::codegen_float_min(fx, a, b);
+ let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
+ ret.write_cvalue(fx, val);
+ }
+ sym::maxnumf32 => {
+ intrinsic_args!(fx, args => (a, b); intrinsic);
+ let a = a.load_scalar(fx);
+ let b = b.load_scalar(fx);
+
+ let val = crate::num::codegen_float_max(fx, a, b);
+ let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
+ ret.write_cvalue(fx, val);
+ }
+ sym::maxnumf64 => {
+ intrinsic_args!(fx, args => (a, b); intrinsic);
+ let a = a.load_scalar(fx);
+ let b = b.load_scalar(fx);
+
+ let val = crate::num::codegen_float_max(fx, a, b);
+ let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
+ ret.write_cvalue(fx, val);
+ }
+
+ kw::Try => {
+ intrinsic_args!(fx, args => (f, data, catch_fn); intrinsic);
+ let f = f.load_scalar(fx);
+ let data = data.load_scalar(fx);
+ let _catch_fn = catch_fn.load_scalar(fx);
+
+ // FIXME once unwinding is supported, change this to actually catch panics
+ let f_sig = fx.bcx.func.import_signature(Signature {
+ call_conv: fx.target_config.default_call_conv,
+ params: vec![AbiParam::new(pointer_ty(fx.tcx))],
+ returns: vec![],
+ });
+
+ fx.bcx.ins().call_indirect(f_sig, f, &[data]);
+
+ let layout = ret.layout();
+ let ret_val = CValue::const_val(fx, layout, ty::ScalarInt::null(layout.size));
+ ret.write_cvalue(fx, ret_val);
+ }
+
+ sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
+ intrinsic_args!(fx, args => (x, y); intrinsic);
+
+ let res = crate::num::codegen_float_binop(
+ fx,
+ match intrinsic {
+ sym::fadd_fast => BinOp::Add,
+ sym::fsub_fast => BinOp::Sub,
+ sym::fmul_fast => BinOp::Mul,
+ sym::fdiv_fast => BinOp::Div,
+ sym::frem_fast => BinOp::Rem,
+ _ => unreachable!(),
+ },
+ x,
+ y,
+ );
+ ret.write_cvalue(fx, res);
+ }
+ sym::float_to_int_unchecked => {
+ intrinsic_args!(fx, args => (f); intrinsic);
+ let f = f.load_scalar(fx);
+
+ let res = crate::cast::clif_int_or_float_cast(
+ fx,
+ f,
+ false,
+ fx.clif_type(ret.layout().ty).unwrap(),
+ type_sign(ret.layout().ty),
+ );
+ ret.write_cvalue(fx, CValue::by_val(res, ret.layout()));
+ }
+
+ sym::raw_eq => {
+ intrinsic_args!(fx, args => (lhs_ref, rhs_ref); intrinsic);
+ let lhs_ref = lhs_ref.load_scalar(fx);
+ let rhs_ref = rhs_ref.load_scalar(fx);
+
+ let size = fx.layout_of(substs.type_at(0)).layout.size();
+ // FIXME add and use emit_small_memcmp
+ let is_eq_value = if size == Size::ZERO {
+ // No bytes means they're trivially equal
+ fx.bcx.ins().iconst(types::I8, 1)
+ } else if let Some(clty) = size.bits().try_into().ok().and_then(Type::int) {
+ // Can't use `trusted` for these loads; they could be unaligned.
+ let mut flags = MemFlags::new();
+ flags.set_notrap();
+ let lhs_val = fx.bcx.ins().load(clty, flags, lhs_ref, 0);
+ let rhs_val = fx.bcx.ins().load(clty, flags, rhs_ref, 0);
+ fx.bcx.ins().icmp(IntCC::Equal, lhs_val, rhs_val)
+ } else {
+ // Just call `memcmp` (like slices do in core) when the
+ // size is too large or it's not a power-of-two.
+ let signed_bytes = i64::try_from(size.bytes()).unwrap();
+ let bytes_val = fx.bcx.ins().iconst(fx.pointer_type, signed_bytes);
+ let params = vec![AbiParam::new(fx.pointer_type); 3];
+ let returns = vec![AbiParam::new(types::I32)];
+ let args = &[lhs_ref, rhs_ref, bytes_val];
+ let cmp = fx.lib_call("memcmp", params, returns, args)[0];
+ fx.bcx.ins().icmp_imm(IntCC::Equal, cmp, 0)
+ };
+ ret.write_cvalue(fx, CValue::by_val(is_eq_value, ret.layout()));
+ }
+
+ sym::const_allocate => {
+ intrinsic_args!(fx, args => (_size, _align); intrinsic);
+
+ // returns a null pointer at runtime.
+ let null = fx.bcx.ins().iconst(fx.pointer_type, 0);
+ ret.write_cvalue(fx, CValue::by_val(null, ret.layout()));
+ }
+
+ sym::const_deallocate => {
+ intrinsic_args!(fx, args => (_ptr, _size, _align); intrinsic);
+ // nop at runtime.
+ }
+
+ sym::black_box => {
+ intrinsic_args!(fx, args => (a); intrinsic);
+
+ // FIXME implement black_box semantics
+ ret.write_cvalue(fx, a);
+ }
+
+ // FIXME implement variadics in cranelift
+ sym::va_copy | sym::va_arg | sym::va_end => {
+ fx.tcx.sess.span_fatal(
+ source_info.span,
+ "Defining variadic functions is not yet supported by Cranelift",
+ );
+ }
+
+ _ => {
+ fx.tcx
+ .sess
+ .span_fatal(source_info.span, &format!("unsupported intrinsic {}", intrinsic));
+ }
+ }
+
+ let ret_block = fx.get_block(destination.unwrap());
+ fx.bcx.ins().jump(ret_block, &[]);
+}
--- /dev/null
- let (ptr, meta) = from.force_stack(fx);
- assert!(meta.is_none());
- let (data1, data2) =
- CValue(CValueInner::ByRef(ptr, None), dst_layout).load_scalar_pair(fx);
+//! Definition of [`CValue`] and [`CPlace`]
+
+use crate::prelude::*;
+
+use cranelift_codegen::ir::immediates::Offset32;
+
+fn codegen_field<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ base: Pointer,
+ extra: Option<Value>,
+ layout: TyAndLayout<'tcx>,
+ field: mir::Field,
+) -> (Pointer, TyAndLayout<'tcx>) {
+ let field_offset = layout.fields.offset(field.index());
+ let field_layout = layout.field(&*fx, field.index());
+
+ let simple = |fx: &mut FunctionCx<'_, '_, '_>| {
+ (base.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap()), field_layout)
+ };
+
+ if let Some(extra) = extra {
+ if field_layout.is_sized() {
+ return simple(fx);
+ }
+ match field_layout.ty.kind() {
+ ty::Slice(..) | ty::Str | ty::Foreign(..) => simple(fx),
+ ty::Adt(def, _) if def.repr().packed() => {
+ assert_eq!(layout.align.abi.bytes(), 1);
+ simple(fx)
+ }
+ _ => {
+ // We have to align the offset for DST's
+ let unaligned_offset = field_offset.bytes();
+ let (_, unsized_align) =
+ crate::unsize::size_and_align_of_dst(fx, field_layout, extra);
+
+ let one = fx.bcx.ins().iconst(fx.pointer_type, 1);
+ let align_sub_1 = fx.bcx.ins().isub(unsized_align, one);
+ let and_lhs = fx.bcx.ins().iadd_imm(align_sub_1, unaligned_offset as i64);
+ let zero = fx.bcx.ins().iconst(fx.pointer_type, 0);
+ let and_rhs = fx.bcx.ins().isub(zero, unsized_align);
+ let offset = fx.bcx.ins().band(and_lhs, and_rhs);
+
+ (base.offset_value(fx, offset), field_layout)
+ }
+ }
+ } else {
+ simple(fx)
+ }
+}
+
+fn scalar_pair_calculate_b_offset(tcx: TyCtxt<'_>, a_scalar: Scalar, b_scalar: Scalar) -> Offset32 {
+ let b_offset = a_scalar.size(&tcx).align_to(b_scalar.align(&tcx).abi);
+ Offset32::new(b_offset.bytes().try_into().unwrap())
+}
+
+/// A read-only value
+#[derive(Debug, Copy, Clone)]
+pub(crate) struct CValue<'tcx>(CValueInner, TyAndLayout<'tcx>);
+
+#[derive(Debug, Copy, Clone)]
+enum CValueInner {
+ ByRef(Pointer, Option<Value>),
+ ByVal(Value),
+ ByValPair(Value, Value),
+}
+
+impl<'tcx> CValue<'tcx> {
+ pub(crate) fn by_ref(ptr: Pointer, layout: TyAndLayout<'tcx>) -> CValue<'tcx> {
+ CValue(CValueInner::ByRef(ptr, None), layout)
+ }
+
+ pub(crate) fn by_ref_unsized(
+ ptr: Pointer,
+ meta: Value,
+ layout: TyAndLayout<'tcx>,
+ ) -> CValue<'tcx> {
+ CValue(CValueInner::ByRef(ptr, Some(meta)), layout)
+ }
+
+ pub(crate) fn by_val(value: Value, layout: TyAndLayout<'tcx>) -> CValue<'tcx> {
+ CValue(CValueInner::ByVal(value), layout)
+ }
+
+ pub(crate) fn by_val_pair(
+ value: Value,
+ extra: Value,
+ layout: TyAndLayout<'tcx>,
+ ) -> CValue<'tcx> {
+ CValue(CValueInner::ByValPair(value, extra), layout)
+ }
+
+ pub(crate) fn layout(&self) -> TyAndLayout<'tcx> {
+ self.1
+ }
+
+ // FIXME remove
+ pub(crate) fn force_stack(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> (Pointer, Option<Value>) {
+ let layout = self.1;
+ match self.0 {
+ CValueInner::ByRef(ptr, meta) => (ptr, meta),
+ CValueInner::ByVal(_) | CValueInner::ByValPair(_, _) => {
+ let cplace = CPlace::new_stack_slot(fx, layout);
+ cplace.write_cvalue(fx, self);
+ (cplace.to_ptr(), None)
+ }
+ }
+ }
+
+ // FIXME remove
+ /// Forces the data value of a dyn* value to the stack and returns a pointer to it as well as the
+ /// vtable pointer.
+ pub(crate) fn dyn_star_force_data_on_stack(
+ self,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ ) -> (Value, Value) {
+ assert!(self.1.ty.is_dyn_star());
+
+ match self.0 {
+ CValueInner::ByRef(ptr, None) => {
+ let (a_scalar, b_scalar) = match self.1.abi {
+ Abi::ScalarPair(a, b) => (a, b),
+ _ => unreachable!("dyn_star_force_data_on_stack({:?})", self),
+ };
+ let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
+ let clif_ty2 = scalar_to_clif_type(fx.tcx, b_scalar);
+ let mut flags = MemFlags::new();
+ flags.set_notrap();
+ let vtable = ptr.offset(fx, b_offset).load(fx, clif_ty2, flags);
+ (ptr.get_addr(fx), vtable)
+ }
+ CValueInner::ByValPair(data, vtable) => {
+ let stack_slot = fx.bcx.create_sized_stack_slot(StackSlotData {
+ kind: StackSlotKind::ExplicitSlot,
+ // FIXME Don't force the size to a multiple of 16 bytes once Cranelift gets a way to
+ // specify stack slot alignment.
+ size: (u32::try_from(fx.target_config.pointer_type().bytes()).unwrap() + 15)
+ / 16
+ * 16,
+ });
+ let data_ptr = Pointer::stack_slot(stack_slot);
+ let mut flags = MemFlags::new();
+ flags.set_notrap();
+ data_ptr.store(fx, data, flags);
+
+ (data_ptr.get_addr(fx), vtable)
+ }
+ CValueInner::ByRef(_, Some(_)) | CValueInner::ByVal(_) => {
+ unreachable!("dyn_star_force_data_on_stack({:?})", self)
+ }
+ }
+ }
+
+ pub(crate) fn try_to_ptr(self) -> Option<(Pointer, Option<Value>)> {
+ match self.0 {
+ CValueInner::ByRef(ptr, meta) => Some((ptr, meta)),
+ CValueInner::ByVal(_) | CValueInner::ByValPair(_, _) => None,
+ }
+ }
+
+ /// Load a value with layout.abi of scalar
+ pub(crate) fn load_scalar(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> Value {
+ let layout = self.1;
+ match self.0 {
+ CValueInner::ByRef(ptr, None) => {
+ let clif_ty = match layout.abi {
+ Abi::Scalar(scalar) => scalar_to_clif_type(fx.tcx, scalar),
+ Abi::Vector { element, count } => scalar_to_clif_type(fx.tcx, element)
+ .by(u32::try_from(count).unwrap())
+ .unwrap(),
+ _ => unreachable!("{:?}", layout.ty),
+ };
+ let mut flags = MemFlags::new();
+ flags.set_notrap();
+ ptr.load(fx, clif_ty, flags)
+ }
+ CValueInner::ByVal(value) => value,
+ CValueInner::ByRef(_, Some(_)) => bug!("load_scalar for unsized value not allowed"),
+ CValueInner::ByValPair(_, _) => bug!("Please use load_scalar_pair for ByValPair"),
+ }
+ }
+
+ /// Load a value pair with layout.abi of scalar pair
+ pub(crate) fn load_scalar_pair(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> (Value, Value) {
+ let layout = self.1;
+ match self.0 {
+ CValueInner::ByRef(ptr, None) => {
+ let (a_scalar, b_scalar) = match layout.abi {
+ Abi::ScalarPair(a, b) => (a, b),
+ _ => unreachable!("load_scalar_pair({:?})", self),
+ };
+ let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
+ let clif_ty1 = scalar_to_clif_type(fx.tcx, a_scalar);
+ let clif_ty2 = scalar_to_clif_type(fx.tcx, b_scalar);
+ let mut flags = MemFlags::new();
+ flags.set_notrap();
+ let val1 = ptr.load(fx, clif_ty1, flags);
+ let val2 = ptr.offset(fx, b_offset).load(fx, clif_ty2, flags);
+ (val1, val2)
+ }
+ CValueInner::ByRef(_, Some(_)) => {
+ bug!("load_scalar_pair for unsized value not allowed")
+ }
+ CValueInner::ByVal(_) => bug!("Please use load_scalar for ByVal"),
+ CValueInner::ByValPair(val1, val2) => (val1, val2),
+ }
+ }
+
+ pub(crate) fn value_field(
+ self,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ field: mir::Field,
+ ) -> CValue<'tcx> {
+ let layout = self.1;
+ match self.0 {
+ CValueInner::ByVal(val) => match layout.abi {
+ Abi::Vector { element: _, count } => {
+ let count = u8::try_from(count).expect("SIMD type with more than 255 lanes???");
+ let field = u8::try_from(field.index()).unwrap();
+ assert!(field < count);
+ let lane = fx.bcx.ins().extractlane(val, field);
+ let field_layout = layout.field(&*fx, usize::from(field));
+ CValue::by_val(lane, field_layout)
+ }
+ _ => unreachable!("value_field for ByVal with abi {:?}", layout.abi),
+ },
+ CValueInner::ByValPair(val1, val2) => match layout.abi {
+ Abi::ScalarPair(_, _) => {
+ let val = match field.as_u32() {
+ 0 => val1,
+ 1 => val2,
+ _ => bug!("field should be 0 or 1"),
+ };
+ let field_layout = layout.field(&*fx, usize::from(field));
+ CValue::by_val(val, field_layout)
+ }
+ _ => unreachable!("value_field for ByValPair with abi {:?}", layout.abi),
+ },
+ CValueInner::ByRef(ptr, None) => {
+ let (field_ptr, field_layout) = codegen_field(fx, ptr, None, layout, field);
+ CValue::by_ref(field_ptr, field_layout)
+ }
+ CValueInner::ByRef(_, Some(_)) => todo!(),
+ }
+ }
+
+ /// Like [`CValue::value_field`] except handling ADTs containing a single array field in a way
+ /// such that you can access individual lanes.
+ pub(crate) fn value_lane(
+ self,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ lane_idx: u64,
+ ) -> CValue<'tcx> {
+ let layout = self.1;
+ assert!(layout.ty.is_simd());
+ let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
+ let lane_layout = fx.layout_of(lane_ty);
+ assert!(lane_idx < lane_count);
+ match self.0 {
+ CValueInner::ByVal(val) => match layout.abi {
+ Abi::Vector { element: _, count: _ } => {
+ assert!(lane_count <= u8::MAX.into(), "SIMD type with more than 255 lanes???");
+ let lane_idx = u8::try_from(lane_idx).unwrap();
+ let lane = fx.bcx.ins().extractlane(val, lane_idx);
+ CValue::by_val(lane, lane_layout)
+ }
+ _ => unreachable!("value_lane for ByVal with abi {:?}", layout.abi),
+ },
+ CValueInner::ByValPair(_, _) => unreachable!(),
+ CValueInner::ByRef(ptr, None) => {
+ let field_offset = lane_layout.size * lane_idx;
+ let field_ptr = ptr.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap());
+ CValue::by_ref(field_ptr, lane_layout)
+ }
+ CValueInner::ByRef(_, Some(_)) => unreachable!(),
+ }
+ }
+
+ pub(crate) fn unsize_value(self, fx: &mut FunctionCx<'_, '_, 'tcx>, dest: CPlace<'tcx>) {
+ crate::unsize::coerce_unsized_into(fx, self, dest);
+ }
+
+ pub(crate) fn coerce_dyn_star(self, fx: &mut FunctionCx<'_, '_, 'tcx>, dest: CPlace<'tcx>) {
+ crate::unsize::coerce_dyn_star(fx, self, dest);
+ }
+
+ /// If `ty` is signed, `const_val` must already be sign extended.
+ pub(crate) fn const_val(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ layout: TyAndLayout<'tcx>,
+ const_val: ty::ScalarInt,
+ ) -> CValue<'tcx> {
+ assert_eq!(const_val.size(), layout.size, "{:#?}: {:?}", const_val, layout);
+ use cranelift_codegen::ir::immediates::{Ieee32, Ieee64};
+
+ let clif_ty = fx.clif_type(layout.ty).unwrap();
+
+ if let ty::Bool = layout.ty.kind() {
+ assert!(
+ const_val == ty::ScalarInt::FALSE || const_val == ty::ScalarInt::TRUE,
+ "Invalid bool 0x{:032X}",
+ const_val
+ );
+ }
+
+ let val = match layout.ty.kind() {
+ ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
+ let const_val = const_val.to_bits(layout.size).unwrap();
+ let lsb = fx.bcx.ins().iconst(types::I64, const_val as u64 as i64);
+ let msb = fx.bcx.ins().iconst(types::I64, (const_val >> 64) as u64 as i64);
+ fx.bcx.ins().iconcat(lsb, msb)
+ }
+ ty::Bool | ty::Char | ty::Uint(_) | ty::Int(_) | ty::Ref(..) | ty::RawPtr(..) => {
+ fx.bcx.ins().iconst(clif_ty, const_val.to_bits(layout.size).unwrap() as i64)
+ }
+ ty::Float(FloatTy::F32) => {
+ fx.bcx.ins().f32const(Ieee32::with_bits(u32::try_from(const_val).unwrap()))
+ }
+ ty::Float(FloatTy::F64) => {
+ fx.bcx.ins().f64const(Ieee64::with_bits(u64::try_from(const_val).unwrap()))
+ }
+ _ => panic!(
+ "CValue::const_val for non bool/char/float/integer/pointer type {:?} is not allowed",
+ layout.ty
+ ),
+ };
+
+ CValue::by_val(val, layout)
+ }
+
+ pub(crate) fn cast_pointer_to(self, layout: TyAndLayout<'tcx>) -> Self {
+ assert!(matches!(self.layout().ty.kind(), ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..)));
+ assert!(matches!(layout.ty.kind(), ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..)));
+ assert_eq!(self.layout().abi, layout.abi);
+ CValue(self.0, layout)
+ }
+}
+
+/// A place where you can write a value to or read a value from
+#[derive(Debug, Copy, Clone)]
+pub(crate) struct CPlace<'tcx> {
+ inner: CPlaceInner,
+ layout: TyAndLayout<'tcx>,
+}
+
+#[derive(Debug, Copy, Clone)]
+pub(crate) enum CPlaceInner {
+ Var(Local, Variable),
+ VarPair(Local, Variable, Variable),
+ VarLane(Local, Variable, u8),
+ Addr(Pointer, Option<Value>),
+}
+
+impl<'tcx> CPlace<'tcx> {
+ pub(crate) fn layout(&self) -> TyAndLayout<'tcx> {
+ self.layout
+ }
+
+ pub(crate) fn inner(&self) -> &CPlaceInner {
+ &self.inner
+ }
+
+ pub(crate) fn new_stack_slot(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ layout: TyAndLayout<'tcx>,
+ ) -> CPlace<'tcx> {
+ assert!(layout.is_sized());
+ if layout.size.bytes() == 0 {
+ return CPlace {
+ inner: CPlaceInner::Addr(Pointer::dangling(layout.align.pref), None),
+ layout,
+ };
+ }
+
+ if layout.size.bytes() >= u64::from(u32::MAX - 16) {
+ fx.tcx
+ .sess
+ .fatal(&format!("values of type {} are too big to store on the stack", layout.ty));
+ }
+
+ let stack_slot = fx.bcx.create_sized_stack_slot(StackSlotData {
+ kind: StackSlotKind::ExplicitSlot,
+ // FIXME Don't force the size to a multiple of 16 bytes once Cranelift gets a way to
+ // specify stack slot alignment.
+ size: (u32::try_from(layout.size.bytes()).unwrap() + 15) / 16 * 16,
+ });
+ CPlace { inner: CPlaceInner::Addr(Pointer::stack_slot(stack_slot), None), layout }
+ }
+
+ pub(crate) fn new_var(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ local: Local,
+ layout: TyAndLayout<'tcx>,
+ ) -> CPlace<'tcx> {
+ let var = Variable::from_u32(fx.next_ssa_var);
+ fx.next_ssa_var += 1;
+ fx.bcx.declare_var(var, fx.clif_type(layout.ty).unwrap());
+ CPlace { inner: CPlaceInner::Var(local, var), layout }
+ }
+
+ pub(crate) fn new_var_pair(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ local: Local,
+ layout: TyAndLayout<'tcx>,
+ ) -> CPlace<'tcx> {
+ let var1 = Variable::from_u32(fx.next_ssa_var);
+ fx.next_ssa_var += 1;
+ let var2 = Variable::from_u32(fx.next_ssa_var);
+ fx.next_ssa_var += 1;
+
+ let (ty1, ty2) = fx.clif_pair_type(layout.ty).unwrap();
+ fx.bcx.declare_var(var1, ty1);
+ fx.bcx.declare_var(var2, ty2);
+ CPlace { inner: CPlaceInner::VarPair(local, var1, var2), layout }
+ }
+
+ pub(crate) fn for_ptr(ptr: Pointer, layout: TyAndLayout<'tcx>) -> CPlace<'tcx> {
+ CPlace { inner: CPlaceInner::Addr(ptr, None), layout }
+ }
+
+ pub(crate) fn for_ptr_with_extra(
+ ptr: Pointer,
+ extra: Value,
+ layout: TyAndLayout<'tcx>,
+ ) -> CPlace<'tcx> {
+ CPlace { inner: CPlaceInner::Addr(ptr, Some(extra)), layout }
+ }
+
+ pub(crate) fn to_cvalue(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> CValue<'tcx> {
+ let layout = self.layout();
+ match self.inner {
+ CPlaceInner::Var(_local, var) => {
+ let val = fx.bcx.use_var(var);
+ //fx.bcx.set_val_label(val, cranelift_codegen::ir::ValueLabel::new(var.index()));
+ CValue::by_val(val, layout)
+ }
+ CPlaceInner::VarPair(_local, var1, var2) => {
+ let val1 = fx.bcx.use_var(var1);
+ //fx.bcx.set_val_label(val1, cranelift_codegen::ir::ValueLabel::new(var1.index()));
+ let val2 = fx.bcx.use_var(var2);
+ //fx.bcx.set_val_label(val2, cranelift_codegen::ir::ValueLabel::new(var2.index()));
+ CValue::by_val_pair(val1, val2, layout)
+ }
+ CPlaceInner::VarLane(_local, var, lane) => {
+ let val = fx.bcx.use_var(var);
+ //fx.bcx.set_val_label(val, cranelift_codegen::ir::ValueLabel::new(var.index()));
+ let val = fx.bcx.ins().extractlane(val, lane);
+ CValue::by_val(val, layout)
+ }
+ CPlaceInner::Addr(ptr, extra) => {
+ if let Some(extra) = extra {
+ CValue::by_ref_unsized(ptr, extra, layout)
+ } else {
+ CValue::by_ref(ptr, layout)
+ }
+ }
+ }
+ }
+
+ pub(crate) fn to_ptr(self) -> Pointer {
+ match self.to_ptr_maybe_unsized() {
+ (ptr, None) => ptr,
+ (_, Some(_)) => bug!("Expected sized cplace, found {:?}", self),
+ }
+ }
+
+ pub(crate) fn to_ptr_maybe_unsized(self) -> (Pointer, Option<Value>) {
+ match self.inner {
+ CPlaceInner::Addr(ptr, extra) => (ptr, extra),
+ CPlaceInner::Var(_, _)
+ | CPlaceInner::VarPair(_, _, _)
+ | CPlaceInner::VarLane(_, _, _) => bug!("Expected CPlace::Addr, found {:?}", self),
+ }
+ }
+
+ pub(crate) fn write_cvalue(self, fx: &mut FunctionCx<'_, '_, 'tcx>, from: CValue<'tcx>) {
+ assert_assignable(fx, from.layout().ty, self.layout().ty, 16);
+
+ self.write_cvalue_maybe_transmute(fx, from, "write_cvalue");
+ }
+
+ pub(crate) fn write_cvalue_transmute(
+ self,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ from: CValue<'tcx>,
+ ) {
+ self.write_cvalue_maybe_transmute(fx, from, "write_cvalue_transmute");
+ }
+
+ fn write_cvalue_maybe_transmute(
+ self,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ from: CValue<'tcx>,
+ method: &'static str,
+ ) {
+ fn transmute_value<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ var: Variable,
+ data: Value,
+ dst_ty: Type,
+ ) {
+ let src_ty = fx.bcx.func.dfg.value_type(data);
+ assert_eq!(
+ src_ty.bytes(),
+ dst_ty.bytes(),
+ "write_cvalue_transmute: {:?} -> {:?}",
+ src_ty,
+ dst_ty,
+ );
+ let data = match (src_ty, dst_ty) {
+ (_, _) if src_ty == dst_ty => data,
+
+ // This is a `write_cvalue_transmute`.
+ (types::I32, types::F32)
+ | (types::F32, types::I32)
+ | (types::I64, types::F64)
+ | (types::F64, types::I64) => codegen_bitcast(fx, dst_ty, data),
+ _ if src_ty.is_vector() && dst_ty.is_vector() => codegen_bitcast(fx, dst_ty, data),
+ _ if src_ty.is_vector() || dst_ty.is_vector() => {
+ // FIXME do something more efficient for transmutes between vectors and integers.
+ let stack_slot = fx.bcx.create_sized_stack_slot(StackSlotData {
+ kind: StackSlotKind::ExplicitSlot,
+ // FIXME Don't force the size to a multiple of 16 bytes once Cranelift gets a way to
+ // specify stack slot alignment.
+ size: (src_ty.bytes() + 15) / 16 * 16,
+ });
+ let ptr = Pointer::stack_slot(stack_slot);
+ ptr.store(fx, data, MemFlags::trusted());
+ ptr.load(fx, dst_ty, MemFlags::trusted())
+ }
+
+ // `CValue`s should never contain SSA-only types, so if you ended
+ // up here having seen an error like `B1 -> I8`, then before
+ // calling `write_cvalue` you need to add a `bint` instruction.
+ _ => unreachable!("write_cvalue_transmute: {:?} -> {:?}", src_ty, dst_ty),
+ };
+ //fx.bcx.set_val_label(data, cranelift_codegen::ir::ValueLabel::new(var.index()));
+ fx.bcx.def_var(var, data);
+ }
+
+ assert_eq!(self.layout().size, from.layout().size);
+
+ if fx.clif_comments.enabled() {
+ use cranelift_codegen::cursor::{Cursor, CursorPosition};
+ let cur_block = match fx.bcx.cursor().position() {
+ CursorPosition::After(block) => block,
+ _ => unreachable!(),
+ };
+ fx.add_comment(
+ fx.bcx.func.layout.last_inst(cur_block).unwrap(),
+ format!(
+ "{}: {:?}: {:?} <- {:?}: {:?}",
+ method,
+ self.inner(),
+ self.layout().ty,
+ from.0,
+ from.layout().ty
+ ),
+ );
+ }
+
+ let dst_layout = self.layout();
+ let to_ptr = match self.inner {
+ CPlaceInner::Var(_local, var) => {
+ if let ty::Array(element, len) = dst_layout.ty.kind() {
+ // Can only happen for vector types
+ let len =
+ u32::try_from(len.eval_usize(fx.tcx, ParamEnv::reveal_all())).unwrap();
+ let vector_ty = fx.clif_type(*element).unwrap().by(len).unwrap();
+
+ let data = match from.0 {
+ CValueInner::ByRef(ptr, None) => {
+ let mut flags = MemFlags::new();
+ flags.set_notrap();
+ ptr.load(fx, vector_ty, flags)
+ }
+ CValueInner::ByVal(_)
+ | CValueInner::ByValPair(_, _)
+ | CValueInner::ByRef(_, Some(_)) => bug!("array should be ByRef"),
+ };
+
+ fx.bcx.def_var(var, data);
+ return;
+ }
+ let data = CValue(from.0, dst_layout).load_scalar(fx);
+ let dst_ty = fx.clif_type(self.layout().ty).unwrap();
+ transmute_value(fx, var, data, dst_ty);
+ return;
+ }
+ CPlaceInner::VarPair(_local, var1, var2) => {
++ let (data1, data2) = if from.layout().ty == dst_layout.ty {
++ CValue(from.0, dst_layout).load_scalar_pair(fx)
++ } else {
++ let (ptr, meta) = from.force_stack(fx);
++ assert!(meta.is_none());
++ CValue(CValueInner::ByRef(ptr, None), dst_layout).load_scalar_pair(fx)
++ };
+ let (dst_ty1, dst_ty2) = fx.clif_pair_type(self.layout().ty).unwrap();
+ transmute_value(fx, var1, data1, dst_ty1);
+ transmute_value(fx, var2, data2, dst_ty2);
+ return;
+ }
+ CPlaceInner::VarLane(_local, var, lane) => {
+ let data = from.load_scalar(fx);
+
+ // First get the old vector
+ let vector = fx.bcx.use_var(var);
+ //fx.bcx.set_val_label(vector, cranelift_codegen::ir::ValueLabel::new(var.index()));
+
+ // Next insert the written lane into the vector
+ let vector = fx.bcx.ins().insertlane(vector, data, lane);
+
+ // Finally write the new vector
+ //fx.bcx.set_val_label(vector, cranelift_codegen::ir::ValueLabel::new(var.index()));
+ fx.bcx.def_var(var, vector);
+
+ return;
+ }
+ CPlaceInner::Addr(ptr, None) => {
+ if dst_layout.size == Size::ZERO || dst_layout.abi == Abi::Uninhabited {
+ return;
+ }
+ ptr
+ }
+ CPlaceInner::Addr(_, Some(_)) => bug!("Can't write value to unsized place {:?}", self),
+ };
+
+ let mut flags = MemFlags::new();
+ flags.set_notrap();
+ match from.layout().abi {
+ // FIXME make Abi::Vector work too
+ Abi::Scalar(_) => {
+ let val = from.load_scalar(fx);
+ to_ptr.store(fx, val, flags);
+ return;
+ }
+ Abi::ScalarPair(a_scalar, b_scalar) => {
+ let (value, extra) = from.load_scalar_pair(fx);
+ let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
+ to_ptr.store(fx, value, flags);
+ to_ptr.offset(fx, b_offset).store(fx, extra, flags);
+ return;
+ }
+ _ => {}
+ }
+
+ match from.0 {
+ CValueInner::ByVal(val) => {
+ to_ptr.store(fx, val, flags);
+ }
+ CValueInner::ByValPair(_, _) => {
+ bug!("Non ScalarPair abi {:?} for ByValPair CValue", dst_layout.abi);
+ }
+ CValueInner::ByRef(from_ptr, None) => {
+ let from_addr = from_ptr.get_addr(fx);
+ let to_addr = to_ptr.get_addr(fx);
+ let src_layout = from.1;
+ let size = dst_layout.size.bytes();
+ let src_align = src_layout.align.abi.bytes() as u8;
+ let dst_align = dst_layout.align.abi.bytes() as u8;
+ fx.bcx.emit_small_memory_copy(
+ fx.target_config,
+ to_addr,
+ from_addr,
+ size,
+ dst_align,
+ src_align,
+ true,
+ flags,
+ );
+ }
+ CValueInner::ByRef(_, Some(_)) => todo!(),
+ }
+ }
+
+ pub(crate) fn place_opaque_cast(
+ self,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ ty: Ty<'tcx>,
+ ) -> CPlace<'tcx> {
+ CPlace { inner: self.inner, layout: fx.layout_of(ty) }
+ }
+
+ pub(crate) fn place_field(
+ self,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ field: mir::Field,
+ ) -> CPlace<'tcx> {
+ let layout = self.layout();
+
+ match self.inner {
+ CPlaceInner::Var(local, var) => match layout.ty.kind() {
+ ty::Array(_, _) => {
+ // Can only happen for vector types
+ return CPlace {
+ inner: CPlaceInner::VarLane(local, var, field.as_u32().try_into().unwrap()),
+ layout: layout.field(fx, field.as_u32().try_into().unwrap()),
+ };
+ }
+ ty::Adt(adt_def, substs) if layout.ty.is_simd() => {
+ let f0_ty = adt_def.non_enum_variant().fields[0].ty(fx.tcx, substs);
+
+ match f0_ty.kind() {
+ ty::Array(_, _) => {
+ assert_eq!(field.as_u32(), 0);
+ return CPlace {
+ inner: CPlaceInner::Var(local, var),
+ layout: layout.field(fx, field.as_u32().try_into().unwrap()),
+ };
+ }
+ _ => {
+ return CPlace {
+ inner: CPlaceInner::VarLane(
+ local,
+ var,
+ field.as_u32().try_into().unwrap(),
+ ),
+ layout: layout.field(fx, field.as_u32().try_into().unwrap()),
+ };
+ }
+ }
+ }
+ _ => {}
+ },
+ CPlaceInner::VarPair(local, var1, var2) => {
+ let layout = layout.field(&*fx, field.index());
+
+ match field.as_u32() {
+ 0 => return CPlace { inner: CPlaceInner::Var(local, var1), layout },
+ 1 => return CPlace { inner: CPlaceInner::Var(local, var2), layout },
+ _ => unreachable!("field should be 0 or 1"),
+ }
+ }
+ _ => {}
+ }
+
+ let (base, extra) = self.to_ptr_maybe_unsized();
+
+ let (field_ptr, field_layout) = codegen_field(fx, base, extra, layout, field);
+ if field_layout.is_unsized() {
+ if let ty::Foreign(_) = field_layout.ty.kind() {
+ assert!(extra.is_none());
+ CPlace::for_ptr(field_ptr, field_layout)
+ } else {
+ CPlace::for_ptr_with_extra(field_ptr, extra.unwrap(), field_layout)
+ }
+ } else {
+ CPlace::for_ptr(field_ptr, field_layout)
+ }
+ }
+
+ /// Like [`CPlace::place_field`] except handling ADTs containing a single array field in a way
+ /// such that you can access individual lanes.
+ pub(crate) fn place_lane(
+ self,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ lane_idx: u64,
+ ) -> CPlace<'tcx> {
+ let layout = self.layout();
+ assert!(layout.ty.is_simd());
+ let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
+ let lane_layout = fx.layout_of(lane_ty);
+ assert!(lane_idx < lane_count);
+
+ match self.inner {
+ CPlaceInner::Var(local, var) => {
+ assert!(matches!(layout.abi, Abi::Vector { .. }));
+ CPlace {
+ inner: CPlaceInner::VarLane(local, var, lane_idx.try_into().unwrap()),
+ layout: lane_layout,
+ }
+ }
+ CPlaceInner::VarPair(_, _, _) => unreachable!(),
+ CPlaceInner::VarLane(_, _, _) => unreachable!(),
+ CPlaceInner::Addr(ptr, None) => {
+ let field_offset = lane_layout.size * lane_idx;
+ let field_ptr = ptr.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap());
+ CPlace::for_ptr(field_ptr, lane_layout)
+ }
+ CPlaceInner::Addr(_, Some(_)) => unreachable!(),
+ }
+ }
+
+ pub(crate) fn place_index(
+ self,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ index: Value,
+ ) -> CPlace<'tcx> {
+ let (elem_layout, ptr) = match self.layout().ty.kind() {
+ ty::Array(elem_ty, _) => (fx.layout_of(*elem_ty), self.to_ptr()),
+ ty::Slice(elem_ty) => (fx.layout_of(*elem_ty), self.to_ptr_maybe_unsized().0),
+ _ => bug!("place_index({:?})", self.layout().ty),
+ };
+
+ let offset = fx.bcx.ins().imul_imm(index, elem_layout.size.bytes() as i64);
+
+ CPlace::for_ptr(ptr.offset_value(fx, offset), elem_layout)
+ }
+
+ pub(crate) fn place_deref(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> CPlace<'tcx> {
+ let inner_layout = fx.layout_of(self.layout().ty.builtin_deref(true).unwrap().ty);
+ if has_ptr_meta(fx.tcx, inner_layout.ty) {
+ let (addr, extra) = self.to_cvalue(fx).load_scalar_pair(fx);
+ CPlace::for_ptr_with_extra(Pointer::new(addr), extra, inner_layout)
+ } else {
+ CPlace::for_ptr(Pointer::new(self.to_cvalue(fx).load_scalar(fx)), inner_layout)
+ }
+ }
+
+ pub(crate) fn place_ref(
+ self,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ layout: TyAndLayout<'tcx>,
+ ) -> CValue<'tcx> {
+ if has_ptr_meta(fx.tcx, self.layout().ty) {
+ let (ptr, extra) = self.to_ptr_maybe_unsized();
+ CValue::by_val_pair(
+ ptr.get_addr(fx),
+ extra.expect("unsized type without metadata"),
+ layout,
+ )
+ } else {
+ CValue::by_val(self.to_ptr().get_addr(fx), layout)
+ }
+ }
+
+ pub(crate) fn downcast_variant(
+ self,
+ fx: &FunctionCx<'_, '_, 'tcx>,
+ variant: VariantIdx,
+ ) -> Self {
+ assert!(self.layout().is_sized());
+ let layout = self.layout().for_variant(fx, variant);
+ CPlace { inner: self.inner, layout }
+ }
+}
+
+#[track_caller]
+pub(crate) fn assert_assignable<'tcx>(
+ fx: &FunctionCx<'_, '_, 'tcx>,
+ from_ty: Ty<'tcx>,
+ to_ty: Ty<'tcx>,
+ limit: usize,
+) {
+ if limit == 0 {
+ // assert_assignable exists solely to catch bugs in cg_clif. it isn't necessary for
+ // soundness. don't attempt to check deep types to avoid exponential behavior in certain
+ // cases.
+ return;
+ }
+ match (from_ty.kind(), to_ty.kind()) {
+ (ty::Ref(_, a, _), ty::Ref(_, b, _))
+ | (
+ ty::RawPtr(TypeAndMut { ty: a, mutbl: _ }),
+ ty::RawPtr(TypeAndMut { ty: b, mutbl: _ }),
+ ) => {
+ assert_assignable(fx, *a, *b, limit - 1);
+ }
+ (ty::Ref(_, a, _), ty::RawPtr(TypeAndMut { ty: b, mutbl: _ }))
+ | (ty::RawPtr(TypeAndMut { ty: a, mutbl: _ }), ty::Ref(_, b, _)) => {
+ assert_assignable(fx, *a, *b, limit - 1);
+ }
+ (ty::FnPtr(_), ty::FnPtr(_)) => {
+ let from_sig = fx.tcx.normalize_erasing_late_bound_regions(
+ ParamEnv::reveal_all(),
+ from_ty.fn_sig(fx.tcx),
+ );
+ let to_sig = fx
+ .tcx
+ .normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), to_ty.fn_sig(fx.tcx));
+ assert_eq!(
+ from_sig, to_sig,
+ "Can't write fn ptr with incompatible sig {:?} to place with sig {:?}\n\n{:#?}",
+ from_sig, to_sig, fx,
+ );
+ // fn(&T) -> for<'l> fn(&'l T) is allowed
+ }
+ (&ty::Dynamic(from_traits, _, _from_kind), &ty::Dynamic(to_traits, _, _to_kind)) => {
+ // FIXME(dyn-star): Do the right thing with DynKinds
+ for (from, to) in from_traits.iter().zip(to_traits) {
+ let from =
+ fx.tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), from);
+ let to = fx.tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), to);
+ assert_eq!(
+ from, to,
+ "Can't write trait object of incompatible traits {:?} to place with traits {:?}\n\n{:#?}",
+ from_traits, to_traits, fx,
+ );
+ }
+ // dyn for<'r> Trait<'r> -> dyn Trait<'_> is allowed
+ }
+ (&ty::Tuple(types_a), &ty::Tuple(types_b)) => {
+ let mut types_a = types_a.iter();
+ let mut types_b = types_b.iter();
+ loop {
+ match (types_a.next(), types_b.next()) {
+ (Some(a), Some(b)) => assert_assignable(fx, a, b, limit - 1),
+ (None, None) => return,
+ (Some(_), None) | (None, Some(_)) => panic!("{:#?}/{:#?}", from_ty, to_ty),
+ }
+ }
+ }
+ (&ty::Adt(adt_def_a, substs_a), &ty::Adt(adt_def_b, substs_b))
+ if adt_def_a.did() == adt_def_b.did() =>
+ {
+ let mut types_a = substs_a.types();
+ let mut types_b = substs_b.types();
+ loop {
+ match (types_a.next(), types_b.next()) {
+ (Some(a), Some(b)) => assert_assignable(fx, a, b, limit - 1),
+ (None, None) => return,
+ (Some(_), None) | (None, Some(_)) => panic!("{:#?}/{:#?}", from_ty, to_ty),
+ }
+ }
+ }
+ (ty::Array(a, _), ty::Array(b, _)) => assert_assignable(fx, *a, *b, limit - 1),
+ (&ty::Closure(def_id_a, substs_a), &ty::Closure(def_id_b, substs_b))
+ if def_id_a == def_id_b =>
+ {
+ let mut types_a = substs_a.types();
+ let mut types_b = substs_b.types();
+ loop {
+ match (types_a.next(), types_b.next()) {
+ (Some(a), Some(b)) => assert_assignable(fx, a, b, limit - 1),
+ (None, None) => return,
+ (Some(_), None) | (None, Some(_)) => panic!("{:#?}/{:#?}", from_ty, to_ty),
+ }
+ }
+ }
+ (ty::Param(_), _) | (_, ty::Param(_)) if fx.tcx.sess.opts.unstable_opts.polymorphize => {
+ // No way to check if it is correct or not with polymorphization enabled
+ }
+ _ => {
+ assert_eq!(
+ from_ty,
+ to_ty,
+ "Can't write value with incompatible type {:?} to place with type {:?}\n\n{:#?}",
+ from_ty.kind(),
+ to_ty.kind(),
+ fx,
+ );
+ }
+ }
+}