--- /dev/null
--- /dev/null
++task:
++ name: freebsd
++ freebsd_instance:
++ image: freebsd-12-1-release-amd64
++ setup_rust_script:
++ - pkg install -y curl git bash
++ - curl https://sh.rustup.rs -sSf --output rustup.sh
++ - sh rustup.sh --default-toolchain none -y --profile=minimal
++ cargo_bin_cache:
++ folder: ~/.cargo/bin
++ target_cache:
++ folder: target
++ prepare_script:
++ - . $HOME/.cargo/env
++ - git config --global user.email "user@example.com"
++ - git config --global user.name "User"
++ - ./prepare.sh
++ test_script:
++ - . $HOME/.cargo/env
++ - # Enable backtraces for easier debugging
++ - export RUST_BACKTRACE=1
++ - # Reduce amount of benchmark runs as they are slow
++ - export COMPILE_RUNS=2
++ - export RUN_RUNS=2
++ - ./test.sh
--- /dev/null
- env:
- - BACKEND: ""
- - BACKEND: --oldbe
+name: CI
+
+on:
+ - push
+ - pull_request
+
+jobs:
+ build:
+ runs-on: ${{ matrix.os }}
+
+ strategy:
+ fail-fast: false
+ matrix:
+ os: [ubuntu-latest, macos-latest]
- ./test.sh $BACKEND
+
+ steps:
+ - uses: actions/checkout@v2
+
+ - name: Cache cargo installed crates
+ uses: actions/cache@v2
+ with:
+ path: ~/.cargo/bin
+ key: ${{ runner.os }}-cargo-installed-crates
+
+ - name: Cache cargo registry and index
+ uses: actions/cache@v2
+ with:
+ path: |
+ ~/.cargo/registry
+ ~/.cargo/git
+ key: ${{ runner.os }}-cargo-registry-and-index-${{ hashFiles('**/Cargo.lock') }}
+
+ - name: Cache cargo target dir
+ uses: actions/cache@v2
+ with:
+ path: target
+ key: ${{ runner.os }}-cargo-build-target-${{ hashFiles('rust-toolchain', '**/Cargo.lock') }}
+
+ - name: Prepare dependencies
+ run: |
+ git config --global user.email "user@example.com"
+ git config --global user.name "User"
+ ./prepare.sh
+
+ - name: Test
+ run: |
+ # Enable backtraces for easier debugging
+ export RUST_BACKTRACE=1
+
+ # Reduce amount of benchmark runs as they are slow
+ export COMPILE_RUNS=2
+ export RUN_RUNS=2
+
++ ./test.sh
+
+ - name: Package prebuilt cg_clif
+ run: tar cvfJ cg_clif.tar.xz build
+
+ - name: Upload prebuilt cg_clif
+ uses: actions/upload-artifact@v2
+ with:
+ name: cg_clif-${{ runner.os }}
+ path: cg_clif.tar.xz
--- /dev/null
- "rust-analyzer.diagnostics.disabled": ["unresolved-extern-crate"],
+{
+ // source for rustc_* is not included in the rust-src component; disable the errors about this
++ "rust-analyzer.diagnostics.disabled": ["unresolved-extern-crate", "macro-error"],
+ "rust-analyzer.assist.importMergeBehavior": "last",
+ "rust-analyzer.cargo.loadOutDirsFromCheck": true,
+ "rust-analyzer.linkedProjects": [
+ "./Cargo.toml",
+ //"./build_sysroot/sysroot_src/src/libstd/Cargo.toml",
+ {
+ "roots": [
+ "./example/mini_core.rs",
+ "./example/mini_core_hello_world.rs",
+ "./example/mod_bench.rs"
+ ],
+ "crates": [
+ {
+ "root_module": "./example/mini_core.rs",
+ "edition": "2018",
+ "deps": [],
+ "cfg": [],
+ },
+ {
+ "root_module": "./example/mini_core_hello_world.rs",
+ "edition": "2018",
+ "deps": [{ "crate": 0, "name": "mini_core" }],
+ "cfg": [],
+ },
+ {
+ "root_module": "./example/mod_bench.rs",
+ "edition": "2018",
+ "deps": [],
+ "cfg": [],
+ },
+ ]
+ },
+ {
+ "roots": ["./scripts/filter_profile.rs"],
+ "crates": [
+ {
+ "root_module": "./scripts/filter_profile.rs",
+ "edition": "2018",
+ "deps": [{ "crate": 1, "name": "std" }],
+ "cfg": [],
+ },
+ {
+ "root_module": "./build_sysroot/sysroot_src/library/std/src/lib.rs",
+ "edition": "2018",
+ "deps": [],
+ "cfg": [],
+ },
+ ]
+ }
+ ]
+}
--- /dev/null
- [[package]]
- name = "cc"
- version = "1.0.66"
- source = "registry+https://github.com/rust-lang/crates.io-index"
- checksum = "4c0496836a84f8d0495758516b8621a622beb77c0fed418570e50764093ced48"
-
- [[package]]
- name = "cfg-if"
- version = "0.1.10"
- source = "registry+https://github.com/rust-lang/crates.io-index"
- checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
-
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
++version = 3
++
+[[package]]
+name = "anyhow"
+version = "1.0.38"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "afddf7f520a80dbf76e6f50a35bca42a2331ef227a28b3b6dc5c2e2338d114b1"
+
+[[package]]
+name = "ar"
+version = "0.8.0"
+source = "git+https://github.com/bjorn3/rust-ar.git?branch=do_not_remove_cg_clif_ranlib#de9ab0e56bf3a208381d342aa5b60f9ff2891648"
+
+[[package]]
+name = "autocfg"
+version = "1.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
+
+[[package]]
+name = "bitflags"
+version = "1.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
+
+[[package]]
+name = "byteorder"
+version = "1.4.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ae44d1a3d5a19df61dd0c8beb138458ac2a53a7ac09eba97d55592540004306b"
+
- version = "0.69.0"
- source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#986b5768f9e68f1564b43f32b8a4080a6582c8ca"
+[[package]]
+name = "cfg-if"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
+
+[[package]]
+name = "cranelift-bforest"
- version = "0.69.0"
- source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#986b5768f9e68f1564b43f32b8a4080a6582c8ca"
++version = "0.70.0"
++source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#cdb60ec5a9df087262ae8960a31067e88cd80058"
+dependencies = [
+ "cranelift-entity",
+]
+
+[[package]]
+name = "cranelift-codegen"
- version = "0.69.0"
- source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#986b5768f9e68f1564b43f32b8a4080a6582c8ca"
++version = "0.70.0"
++source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#cdb60ec5a9df087262ae8960a31067e88cd80058"
+dependencies = [
+ "byteorder",
+ "cranelift-bforest",
+ "cranelift-codegen-meta",
+ "cranelift-codegen-shared",
+ "cranelift-entity",
+ "gimli",
+ "log",
+ "regalloc",
+ "smallvec",
+ "target-lexicon",
+ "thiserror",
+]
+
+[[package]]
+name = "cranelift-codegen-meta"
- version = "0.69.0"
- source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#986b5768f9e68f1564b43f32b8a4080a6582c8ca"
++version = "0.70.0"
++source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#cdb60ec5a9df087262ae8960a31067e88cd80058"
+dependencies = [
+ "cranelift-codegen-shared",
+ "cranelift-entity",
+]
+
+[[package]]
+name = "cranelift-codegen-shared"
- version = "0.69.0"
- source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#986b5768f9e68f1564b43f32b8a4080a6582c8ca"
++version = "0.70.0"
++source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#cdb60ec5a9df087262ae8960a31067e88cd80058"
+
+[[package]]
+name = "cranelift-entity"
- version = "0.69.0"
- source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#986b5768f9e68f1564b43f32b8a4080a6582c8ca"
++version = "0.70.0"
++source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#cdb60ec5a9df087262ae8960a31067e88cd80058"
+
+[[package]]
+name = "cranelift-frontend"
- version = "0.69.0"
- source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#986b5768f9e68f1564b43f32b8a4080a6582c8ca"
++version = "0.70.0"
++source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#cdb60ec5a9df087262ae8960a31067e88cd80058"
+dependencies = [
+ "cranelift-codegen",
+ "log",
+ "smallvec",
+ "target-lexicon",
+]
+
+[[package]]
+name = "cranelift-jit"
- version = "0.69.0"
- source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#986b5768f9e68f1564b43f32b8a4080a6582c8ca"
++version = "0.70.0"
++source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#cdb60ec5a9df087262ae8960a31067e88cd80058"
+dependencies = [
+ "anyhow",
+ "cranelift-codegen",
+ "cranelift-entity",
+ "cranelift-module",
+ "cranelift-native",
+ "errno",
+ "libc",
+ "log",
+ "region",
+ "target-lexicon",
+ "winapi",
+]
+
+[[package]]
+name = "cranelift-module"
- version = "0.69.0"
- source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#986b5768f9e68f1564b43f32b8a4080a6582c8ca"
++version = "0.70.0"
++source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#cdb60ec5a9df087262ae8960a31067e88cd80058"
+dependencies = [
+ "anyhow",
+ "cranelift-codegen",
+ "cranelift-entity",
+ "log",
+ "thiserror",
+]
+
+[[package]]
+name = "cranelift-native"
- "raw-cpuid",
++version = "0.70.0"
++source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#cdb60ec5a9df087262ae8960a31067e88cd80058"
+dependencies = [
+ "cranelift-codegen",
- version = "0.69.0"
- source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#986b5768f9e68f1564b43f32b8a4080a6582c8ca"
+ "target-lexicon",
+]
+
+[[package]]
+name = "cranelift-object"
- "cfg-if 1.0.0",
++version = "0.70.0"
++source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#cdb60ec5a9df087262ae8960a31067e88cd80058"
+dependencies = [
+ "anyhow",
+ "cranelift-codegen",
+ "cranelift-module",
+ "log",
+ "object",
+ "target-lexicon",
+]
+
+[[package]]
+name = "crc32fast"
+version = "1.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a"
+dependencies = [
- version = "0.2.82"
++ "cfg-if",
+]
+
+[[package]]
+name = "errno"
+version = "0.2.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fa68f2fb9cae9d37c9b2b3584aba698a2e97f72d7aef7b9f7aa71d8b54ce46fe"
+dependencies = [
+ "errno-dragonfly",
+ "libc",
+ "winapi",
+]
+
+[[package]]
+name = "errno-dragonfly"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "14ca354e36190500e1e1fb267c647932382b54053c50b14970856c0b00a35067"
+dependencies = [
+ "gcc",
+ "libc",
+]
+
+[[package]]
+name = "gcc"
+version = "0.3.55"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8f5f3913fa0bfe7ee1fd8248b6b9f42a5af4b9d65ec2dd2c3c26132b950ecfc2"
+
+[[package]]
+name = "gimli"
+version = "0.23.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f6503fe142514ca4799d4c26297c4248239fe8838d827db6bd6065c6ed29a6ce"
+dependencies = [
+ "indexmap",
+]
+
+[[package]]
+name = "hashbrown"
+version = "0.9.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04"
+
+[[package]]
+name = "indexmap"
+version = "1.6.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4fb1fa934250de4de8aef298d81c729a7d33d8c239daa3a7575e6b92bfc7313b"
+dependencies = [
+ "autocfg",
+ "hashbrown",
+]
+
+[[package]]
+name = "libc"
- checksum = "89203f3fba0a3795506acaad8ebce3c80c0af93f994d5a1d7a0b1eeb23271929"
++version = "0.2.86"
+source = "registry+https://github.com/rust-lang/crates.io-index"
- "cfg-if 1.0.0",
++checksum = "b7282d924be3275cec7f6756ff4121987bc6481325397dde6ba3e7802b1a8b1c"
+
+[[package]]
+name = "libloading"
+version = "0.6.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "351a32417a12d5f7e82c368a66781e307834dae04c6ce0cd4456d52989229883"
+dependencies = [
- version = "0.4.13"
++ "cfg-if",
+ "winapi",
+]
+
+[[package]]
+name = "log"
- checksum = "fcf3805d4480bb5b86070dcfeb9e2cb2ebc148adb753c5cca5f884d1d65a42b2"
++version = "0.4.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
- "cfg-if 0.1.10",
++checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710"
+dependencies = [
- version = "0.22.0"
++ "cfg-if",
+]
+
+[[package]]
+name = "mach"
+version = "0.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b823e83b2affd8f40a9ee8c29dbc56404c1e34cd2710921f2801e2cf29527afa"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "object"
- checksum = "8d3b63360ec3cb337817c2dbd47ab4a0f170d285d8e5a2064600f3def1402397"
++version = "0.23.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "1.0.8"
++checksum = "a9a7ab5d64814df0fe4a4b5ead45ed6c5f181ee3ff04ba344313a6c80446c5d4"
+dependencies = [
+ "crc32fast",
+ "indexmap",
+]
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.24"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1e0704ee1a7e00d7bb417d0770ea303c1bccbabf0ef1667dae92b5967f5f8a71"
+dependencies = [
+ "unicode-xid",
+]
+
+[[package]]
+name = "quote"
- checksum = "991431c3519a3f36861882da93630ce66b52918dcf1b8e2fd66b397fc96f28df"
++version = "1.0.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
- [[package]]
- name = "raw-cpuid"
- version = "8.1.2"
- source = "registry+https://github.com/rust-lang/crates.io-index"
- checksum = "1fdf7d9dbd43f3d81d94a49c1c3df73cc2b3827995147e6cf7f89d4ec5483e73"
- dependencies = [
- "bitflags",
- "cc",
- "rustc_version",
- ]
-
++checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7"
+dependencies = [
+ "proc-macro2",
+]
+
- [[package]]
- name = "rustc_version"
- version = "0.2.3"
- source = "registry+https://github.com/rust-lang/crates.io-index"
- checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a"
- dependencies = [
- "semver",
- ]
-
- [[package]]
- name = "semver"
- version = "0.9.0"
- source = "registry+https://github.com/rust-lang/crates.io-index"
- checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403"
- dependencies = [
- "semver-parser",
- ]
-
- [[package]]
- name = "semver-parser"
- version = "0.7.0"
- source = "registry+https://github.com/rust-lang/crates.io-index"
- checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3"
-
+[[package]]
+name = "regalloc"
+version = "0.0.31"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "571f7f397d61c4755285cd37853fe8e03271c243424a907415909379659381c5"
+dependencies = [
+ "log",
+ "rustc-hash",
+ "smallvec",
+]
+
+[[package]]
+name = "region"
+version = "2.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "877e54ea2adcd70d80e9179344c97f93ef0dffd6b03e1f4529e6e83ab2fa9ae0"
+dependencies = [
+ "bitflags",
+ "libc",
+ "mach",
+ "winapi",
+]
+
+[[package]]
+name = "rustc-hash"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2"
+
+[[package]]
+name = "rustc_codegen_cranelift"
+version = "0.1.0"
+dependencies = [
+ "ar",
+ "cranelift-codegen",
+ "cranelift-frontend",
+ "cranelift-jit",
+ "cranelift-module",
+ "cranelift-object",
+ "gimli",
+ "indexmap",
+ "libloading",
+ "object",
+ "smallvec",
+ "target-lexicon",
+]
+
- version = "1.0.58"
+[[package]]
+name = "smallvec"
+version = "1.6.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fe0f37c9e8f3c5a4a66ad655a93c74daac4ad00c441533bf5c6e7990bb42604e"
+
+[[package]]
+name = "syn"
- checksum = "cc60a3d73ea6594cd712d830cc1f0390fd71542d8c8cd24e70cc54cdfd5e05d5"
++version = "1.0.60"
+source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.11.1"
++checksum = "c700597eca8a5a762beb35753ef6b94df201c81cca676604f547495a0d7f0081"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-xid",
+]
+
+[[package]]
+name = "target-lexicon"
- checksum = "4ee5a98e506fb7231a304c3a1bd7c132a55016cf65001e0282480665870dfcb9"
++version = "0.11.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "1.0.23"
++checksum = "422045212ea98508ae3d28025bc5aaa2bd4a9cdaecd442a08da2ee620ee9ea95"
+
+[[package]]
+name = "thiserror"
- checksum = "76cc616c6abf8c8928e2fdcc0dbfab37175edd8fb49a4641066ad1364fdab146"
++version = "1.0.24"
+source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "1.0.23"
++checksum = "e0f4a65597094d4483ddaed134f409b2cb7c1beccf25201a9f73c719254fa98e"
+dependencies = [
+ "thiserror-impl",
+]
+
+[[package]]
+name = "thiserror-impl"
- checksum = "9be73a2caec27583d0046ef3796c3794f868a5bc813db689eed00c7631275cd1"
++version = "1.0.24"
+source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "7765189610d8241a44529806d6fd1f2e0a08734313a35d5b3a556f92b381f3c0"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "unicode-xid"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564"
+
+[[package]]
+name = "winapi"
+version = "0.3.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
+dependencies = [
+ "winapi-i686-pc-windows-gnu",
+ "winapi-x86_64-pc-windows-gnu",
+]
+
+[[package]]
+name = "winapi-i686-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
+
+[[package]]
+name = "winapi-x86_64-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
--- /dev/null
- cranelift-codegen = { git = "https://github.com/bytecodealliance/wasmtime/", branch = "main", features = ["unwind", "x86", "x64"] }
+[package]
+name = "rustc_codegen_cranelift"
+version = "0.1.0"
+authors = ["bjorn3 <bjorn3@users.noreply.github.com>"]
+edition = "2018"
+
+[lib]
+crate-type = ["dylib"]
+
+[dependencies]
+# These have to be in sync with each other
- object = { version = "0.22.0", default-features = false, features = ["std", "read_core", "write", "coff", "elf", "macho", "pe"] }
++cranelift-codegen = { git = "https://github.com/bytecodealliance/wasmtime/", branch = "main", features = ["unwind", "x64"] }
+cranelift-frontend = { git = "https://github.com/bytecodealliance/wasmtime/", branch = "main" }
+cranelift-module = { git = "https://github.com/bytecodealliance/wasmtime/", branch = "main" }
+cranelift-jit = { git = "https://github.com/bytecodealliance/wasmtime/", branch = "main", optional = true }
+cranelift-object = { git = "https://github.com/bytecodealliance/wasmtime/", branch = "main" }
+target-lexicon = "0.11.0"
+gimli = { version = "0.23.0", default-features = false, features = ["write"]}
- oldbe = []
++object = { version = "0.23.0", default-features = false, features = ["std", "read_core", "write", "coff", "elf", "macho", "pe"] }
+
+ar = { git = "https://github.com/bjorn3/rust-ar.git", branch = "do_not_remove_cg_clif_ranlib" }
+indexmap = "1.0.2"
+libloading = { version = "0.6.0", optional = true }
+smallvec = "1.6.1"
+
+# Uncomment to use local checkout of cranelift
+#[patch."https://github.com/bytecodealliance/wasmtime/"]
+#cranelift-codegen = { path = "../wasmtime/cranelift/codegen" }
+#cranelift-frontend = { path = "../wasmtime/cranelift/frontend" }
+#cranelift-module = { path = "../wasmtime/cranelift/module" }
+#cranelift-jit = { path = "../wasmtime/cranelift/jit" }
+#cranelift-object = { path = "../wasmtime/cranelift/object" }
+
+#[patch.crates-io]
+#gimli = { path = "../" }
+
+[features]
+default = ["jit", "inline_asm"]
+jit = ["cranelift-jit", "libloading"]
+inline_asm = []
+
+[profile.dev]
+# By compiling dependencies with optimizations, performing tests gets much faster.
+opt-level = 3
+
+[profile.dev.package.rustc_codegen_cranelift]
+# Disabling optimizations for cg_clif itself makes compilation after a change faster.
+opt-level = 0
+
+[profile.release.package.rustc_codegen_cranelift]
+incremental = true
+
+# Disable optimizations and debuginfo of build scripts and some of the heavy build deps, as the
+# execution time of build scripts is so fast that optimizing them slows down the total build time.
+[profile.dev.build-override]
+opt-level = 0
+debug = false
+
+[profile.release.build-override]
+opt-level = 0
+debug = false
+
+[profile.dev.package.cranelift-codegen-meta]
+opt-level = 0
+debug = false
+
+[profile.release.package.cranelift-codegen-meta]
+opt-level = 0
+debug = false
+
+[profile.dev.package.syn]
+opt-level = 0
+debug = false
+
+[profile.release.package.syn]
+opt-level = 0
+debug = false
--- /dev/null
- #!/bin/bash
++#!/usr/bin/env bash
+set -e
+
+# Settings
+export CHANNEL="release"
+build_sysroot="clif"
+target_dir='build'
- oldbe=''
+while [[ $# != 0 ]]; do
+ case $1 in
+ "--debug")
+ export CHANNEL="debug"
+ ;;
+ "--sysroot")
+ build_sysroot=$2
+ shift
+ ;;
+ "--target-dir")
+ target_dir=$2
+ shift
+ ;;
- "--oldbe")
- oldbe='--features oldbe'
- ;;
+ *)
+ echo "Unknown flag '$1'"
- echo "Usage: ./build.sh [--debug] [--sysroot none|clif|llvm] [--target-dir DIR] [--oldbe]"
++ echo "Usage: ./build.sh [--debug] [--sysroot none|clif|llvm] [--target-dir DIR]"
+ exit 1
+ ;;
+ esac
+ shift
+done
+
+# Build cg_clif
+unset CARGO_TARGET_DIR
+unamestr=$(uname)
- if [[ "$unamestr" == 'Linux' ]]; then
++if [[ "$unamestr" == 'Linux' || "$unamestr" == "FreeBSD" ]]; then
+ export RUSTFLAGS='-Clink-arg=-Wl,-rpath=$ORIGIN/../lib '$RUSTFLAGS
+elif [[ "$unamestr" == 'Darwin' ]]; then
+ export RUSTFLAGS='-Csplit-debuginfo=unpacked -Clink-arg=-Wl,-rpath,@loader_path/../lib -Zosx-rpath-install-name '$RUSTFLAGS
+ dylib_ext='dylib'
+else
- echo "Unsupported os"
++ echo "Unsupported os $unamestr"
+ exit 1
+fi
+if [[ "$CHANNEL" == "release" ]]; then
- cargo build $oldbe --release
++ cargo build --release
+else
- cargo build $oldbe
++ cargo build
+fi
+
+source scripts/ext_config.sh
+
+rm -rf "$target_dir"
+mkdir "$target_dir"
+mkdir "$target_dir"/bin "$target_dir"/lib
+ln target/$CHANNEL/cg_clif{,_build_sysroot} "$target_dir"/bin
+ln target/$CHANNEL/*rustc_codegen_cranelift* "$target_dir"/lib
+ln rust-toolchain scripts/config.sh scripts/cargo.sh "$target_dir"
+
+mkdir -p "$target_dir/lib/rustlib/$TARGET_TRIPLE/lib/"
+if [[ "$TARGET_TRIPLE" == "x86_64-pc-windows-gnu" ]]; then
+ cp $(rustc --print sysroot)/lib/rustlib/$TARGET_TRIPLE/lib/*.o "$target_dir/lib/rustlib/$TARGET_TRIPLE/lib/"
+fi
+
+case "$build_sysroot" in
+ "none")
+ ;;
+ "llvm")
+ cp -r $(rustc --print sysroot)/lib/rustlib/$TARGET_TRIPLE/lib "$target_dir/lib/rustlib/$TARGET_TRIPLE/"
+ ;;
+ "clif")
+ echo "[BUILD] sysroot"
+ dir=$(pwd)
+ cd "$target_dir"
+ time "$dir/build_sysroot/build_sysroot.sh"
+ cp lib/rustlib/*/lib/libstd-* lib/
+ ;;
+ *)
+ echo "Unknown sysroot kind \`$build_sysroot\`."
+ echo "The allowed values are:"
+ echo " none A sysroot that doesn't contain the standard library"
+ echo " llvm Copy the sysroot from rustc compiled by cg_llvm"
+ echo " clif Build a new sysroot using cg_clif"
+ exit 1
+esac
--- /dev/null
- [[package]]
- name = "alloc_system"
- version = "0.0.0"
- dependencies = [
- "compiler_builtins",
- "core",
- "libc",
- ]
-
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
++version = 3
++
+[[package]]
+name = "addr2line"
+version = "0.14.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a55f82cfe485775d02112886f4169bde0c5894d75e79ead7eafe7e40a25e45f7"
+dependencies = [
+ "compiler_builtins",
+ "gimli",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "adler"
+version = "0.2.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ee2a4ec343196209d6594e19543ae87a39f96d5534d7174822a3ad825dd6ed7e"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "alloc"
+version = "0.0.0"
+dependencies = [
+ "compiler_builtins",
+ "core",
+]
+
- version = "1.0.66"
+[[package]]
+name = "autocfg"
+version = "1.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
+
+[[package]]
+name = "cc"
- checksum = "4c0496836a84f8d0495758516b8621a622beb77c0fed418570e50764093ced48"
++version = "1.0.67"
+source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.2.84"
++checksum = "e3c69b077ad434294d3ce9f1f6143a2a4b89a8a2d54ef813d85003a4fd1137fd"
+
+[[package]]
+name = "cfg-if"
+version = "0.1.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "compiler_builtins"
+version = "0.1.39"
+dependencies = [
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "core"
+version = "0.0.0"
+
+[[package]]
+name = "dlmalloc"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "332570860c2edf2d57914987bf9e24835425f75825086b6ba7d1e6a3e4f1f254"
+dependencies = [
+ "compiler_builtins",
+ "libc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "fortanix-sgx-abi"
+version = "0.3.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c56c422ef86062869b2d57ae87270608dc5929969dd130a6e248979cf4fb6ca6"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "getopts"
+version = "0.2.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "14dbbfd5c71d70241ecf9e6f13737f7b5ce823821063188d7e46c41d371eebd5"
+dependencies = [
+ "rustc-std-workspace-core",
+ "rustc-std-workspace-std",
+ "unicode-width",
+]
+
+[[package]]
+name = "gimli"
+version = "0.23.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f6503fe142514ca4799d4c26297c4248239fe8838d827db6bd6065c6ed29a6ce"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "hashbrown"
+version = "0.9.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "hermit-abi"
+version = "0.1.18"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c"
+dependencies = [
+ "compiler_builtins",
+ "libc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "libc"
- checksum = "1cca32fa0182e8c0989459524dc356b8f2b5c10f1b9eb521b7d182c03cf8c5ff"
++version = "0.2.86"
+source = "registry+https://github.com/rust-lang/crates.io-index"
- "alloc_system",
++checksum = "b7282d924be3275cec7f6756ff4121987bc6481325397dde6ba3e7802b1a8b1c"
+dependencies = [
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "miniz_oxide"
+version = "0.4.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0f2d26ec3309788e423cfbf68ad1800f061638098d76a83681af979dc4eda19d"
+dependencies = [
+ "adler",
+ "autocfg",
+ "compiler_builtins",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "object"
+version = "0.22.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8d3b63360ec3cb337817c2dbd47ab4a0f170d285d8e5a2064600f3def1402397"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "panic_abort"
+version = "0.0.0"
+dependencies = [
+ "cfg-if",
+ "compiler_builtins",
+ "core",
+ "libc",
+]
+
+[[package]]
+name = "panic_unwind"
+version = "0.0.0"
+dependencies = [
+ "alloc",
+ "cfg-if",
+ "compiler_builtins",
+ "core",
+ "libc",
+ "unwind",
+]
+
+[[package]]
+name = "proc_macro"
+version = "0.0.0"
+dependencies = [
+ "std",
+]
+
+[[package]]
+name = "rustc-demangle"
+version = "0.1.18"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6e3bad0ee36814ca07d7968269dd4b7ec89ec2da10c4bb613928d3077083c232"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "rustc-std-workspace-alloc"
+version = "1.99.0"
+dependencies = [
+ "alloc",
+]
+
+[[package]]
+name = "rustc-std-workspace-core"
+version = "1.99.0"
+dependencies = [
+ "core",
+]
+
+[[package]]
+name = "rustc-std-workspace-std"
+version = "1.99.0"
+dependencies = [
+ "std",
+]
+
+[[package]]
+name = "std"
+version = "0.0.0"
+dependencies = [
+ "addr2line",
+ "alloc",
+ "cfg-if",
+ "compiler_builtins",
+ "core",
+ "dlmalloc",
+ "fortanix-sgx-abi",
+ "hashbrown",
+ "hermit-abi",
+ "libc",
+ "miniz_oxide",
+ "object",
+ "panic_abort",
+ "panic_unwind",
+ "rustc-demangle",
+ "unwind",
+ "wasi",
+]
+
+[[package]]
+name = "sysroot"
+version = "0.0.0"
+dependencies = [
+ "alloc",
+ "compiler_builtins",
+ "core",
+ "std",
+ "test",
+]
+
+[[package]]
+name = "term"
+version = "0.0.0"
+dependencies = [
+ "core",
+ "std",
+]
+
+[[package]]
+name = "test"
+version = "0.0.0"
+dependencies = [
+ "cfg-if",
+ "core",
+ "getopts",
+ "libc",
+ "panic_abort",
+ "panic_unwind",
+ "proc_macro",
+ "std",
+ "term",
+]
+
+[[package]]
+name = "unicode-width"
+version = "0.1.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-core",
+ "rustc-std-workspace-std",
+]
+
+[[package]]
+name = "unwind"
+version = "0.0.0"
+dependencies = [
+ "cc",
+ "cfg-if",
+ "compiler_builtins",
+ "core",
+ "libc",
+]
+
+[[package]]
+name = "wasi"
+version = "0.9.0+wasi-snapshot-preview1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
--- /dev/null
- alloc_system = { path = "./alloc_system" }
-
+[package]
+authors = ["bjorn3 <bjorn3@users.noreply.github.com>"]
+name = "sysroot"
+version = "0.0.0"
+
+[dependencies]
+core = { path = "./sysroot_src/library/core" }
+alloc = { path = "./sysroot_src/library/alloc" }
+std = { path = "./sysroot_src/library/std", features = ["panic_unwind", "backtrace"] }
+test = { path = "./sysroot_src/library/test" }
+
+compiler_builtins = { version = "0.1.39", default-features = false, features = ["no-asm"] }
+
+[patch.crates-io]
+rustc-std-workspace-core = { path = "./sysroot_src/library/rustc-std-workspace-core" }
+rustc-std-workspace-alloc = { path = "./sysroot_src/library/rustc-std-workspace-alloc" }
+rustc-std-workspace-std = { path = "./sysroot_src/library/rustc-std-workspace-std" }
+compiler_builtins = { path = "./compiler-builtins" }
+
+[profile.dev]
+lto = "off"
+
+[profile.release]
+debug = true
+incremental = true
+lto = "off"
--- /dev/null
- #!/bin/bash
++#!/usr/bin/env bash
+
+# Requires the CHANNEL env var to be set to `debug` or `release.`
+
+set -e
+
+source ./config.sh
+
+dir=$(pwd)
+
+# Use rustc with cg_clif as hotpluggable backend instead of the custom cg_clif driver so that
+# build scripts are still compiled using cg_llvm.
+export RUSTC=$dir"/bin/cg_clif_build_sysroot"
+export RUSTFLAGS=$RUSTFLAGS" --clif"
+
+cd "$(dirname "$0")"
+
+# Cleanup for previous run
+# v Clean target dir except for build scripts and incremental cache
+rm -r target/*/{debug,release}/{build,deps,examples,libsysroot*,native} 2>/dev/null || true
+
+# We expect the target dir in the default location. Guard against the user changing it.
+export CARGO_TARGET_DIR=target
+
+# Build libs
+export RUSTFLAGS="$RUSTFLAGS -Zforce-unstable-if-unmarked -Cpanic=abort"
+export __CARGO_DEFAULT_LIB_METADATA="cg_clif"
+if [[ "$1" != "--debug" ]]; then
+ sysroot_channel='release'
+ # FIXME Enable incremental again once rust-lang/rust#74946 is fixed
+ CARGO_INCREMENTAL=0 RUSTFLAGS="$RUSTFLAGS -Zmir-opt-level=2" cargo build --target "$TARGET_TRIPLE" --release
+else
+ sysroot_channel='debug'
+ cargo build --target "$TARGET_TRIPLE"
+fi
+
+# Copy files to sysroot
+ln "target/$TARGET_TRIPLE/$sysroot_channel/deps/"* "$dir/lib/rustlib/$TARGET_TRIPLE/lib/"
+rm "$dir/lib/rustlib/$TARGET_TRIPLE/lib/"*.{rmeta,d}
--- /dev/null
- #!/bin/bash
++#!/usr/bin/env bash
+set -e
+cd "$(dirname "$0")"
+
+SRC_DIR="$(dirname "$(rustup which rustc)")/../lib/rustlib/src/rust/"
+DST_DIR="sysroot_src"
+
+if [ ! -e "$SRC_DIR" ]; then
+ echo "Please install rust-src component"
+ exit 1
+fi
+
+rm -rf $DST_DIR
+mkdir -p $DST_DIR/library
+cp -a "$SRC_DIR/library" $DST_DIR/
+
+pushd $DST_DIR
+echo "[GIT] init"
+git init
+echo "[GIT] add"
+git add .
+echo "[GIT] commit"
+git commit -m "Initial commit" -q
+for file in $(ls ../../patches/ | grep -v patcha); do
+echo "[GIT] apply" "$file"
+git apply ../../patches/"$file"
+git add -A
+git commit --no-gpg-sign -m "Patch $file"
+done
+popd
+
+git clone https://github.com/rust-lang/compiler-builtins.git || echo "rust-lang/compiler-builtins has already been cloned"
+pushd compiler-builtins
+git checkout -- .
+git checkout 0.1.39
- git apply ../../crate_patches/0001-compiler-builtins-Remove-rotate_left-from-Int.patch
++git apply ../../crate_patches/000*-compiler-builtins-*.patch
+popd
+
+echo "Successfully prepared sysroot source for building"
--- /dev/null
- #!/bin/bash --verbose
++#!/usr/bin/env bash
+set -e
+
+rm -rf target/ build/ build_sysroot/{sysroot_src/,target/,compiler-builtins/} perf.data{,.old}
+rm -rf rand/ regex/ simple-raytracer/
--- /dev/null
--- /dev/null
++From 1d574bf5e32d51641dcacaf8ef777e95b44f6f2a Mon Sep 17 00:00:00 2001
++From: bjorn3 <bjorn3@users.noreply.github.com>
++Date: Thu, 18 Feb 2021 18:30:55 +0100
++Subject: [PATCH] Disable 128bit atomic operations
++
++Cranelift doesn't support them yet
++---
++ src/mem/mod.rs | 12 ------------
++ 1 file changed, 12 deletions(-)
++
++diff --git a/src/mem/mod.rs b/src/mem/mod.rs
++index 107762c..2d1ae10 100644
++--- a/src/mem/mod.rs
+++++ b/src/mem/mod.rs
++@@ -137,10 +137,6 @@ intrinsics! {
++ pub extern "C" fn __llvm_memcpy_element_unordered_atomic_8(dest: *mut u64, src: *const u64, bytes: usize) -> () {
++ memcpy_element_unordered_atomic(dest, src, bytes);
++ }
++- #[cfg(target_has_atomic_load_store = "128")]
++- pub extern "C" fn __llvm_memcpy_element_unordered_atomic_16(dest: *mut u128, src: *const u128, bytes: usize) -> () {
++- memcpy_element_unordered_atomic(dest, src, bytes);
++- }
++
++ #[cfg(target_has_atomic_load_store = "8")]
++ pub extern "C" fn __llvm_memmove_element_unordered_atomic_1(dest: *mut u8, src: *const u8, bytes: usize) -> () {
++@@ -158,10 +154,6 @@ intrinsics! {
++ pub extern "C" fn __llvm_memmove_element_unordered_atomic_8(dest: *mut u64, src: *const u64, bytes: usize) -> () {
++ memmove_element_unordered_atomic(dest, src, bytes);
++ }
++- #[cfg(target_has_atomic_load_store = "128")]
++- pub extern "C" fn __llvm_memmove_element_unordered_atomic_16(dest: *mut u128, src: *const u128, bytes: usize) -> () {
++- memmove_element_unordered_atomic(dest, src, bytes);
++- }
++
++ #[cfg(target_has_atomic_load_store = "8")]
++ pub extern "C" fn __llvm_memset_element_unordered_atomic_1(s: *mut u8, c: u8, bytes: usize) -> () {
++@@ -179,8 +171,4 @@ intrinsics! {
++ pub extern "C" fn __llvm_memset_element_unordered_atomic_8(s: *mut u64, c: u8, bytes: usize) -> () {
++ memset_element_unordered_atomic(s, c, bytes);
++ }
++- #[cfg(target_has_atomic_load_store = "128")]
++- pub extern "C" fn __llvm_memset_element_unordered_atomic_16(s: *mut u128, c: u8, bytes: usize) -> () {
++- memset_element_unordered_atomic(s, c, bytes);
++- }
++ }
++--
++2.26.2.7.g19db9cfb68
++
--- /dev/null
- #![feature(start, box_syntax, alloc_system, core_intrinsics, alloc_prelude, alloc_error_handler)]
++#![feature(start, box_syntax, core_intrinsics, alloc_prelude, alloc_error_handler)]
+#![no_std]
+
+extern crate alloc;
+extern crate alloc_system;
+
+use alloc::prelude::v1::*;
+
+use alloc_system::System;
+
+#[global_allocator]
+static ALLOC: System = System;
+
+#[cfg_attr(unix, link(name = "c"))]
+#[cfg_attr(target_env = "msvc", link(name = "msvcrt"))]
+extern "C" {
+ fn puts(s: *const u8) -> i32;
+}
+
+#[panic_handler]
+fn panic_handler(_: &core::panic::PanicInfo) -> ! {
+ core::intrinsics::abort();
+}
+
+#[alloc_error_handler]
+fn alloc_error_handler(_: alloc::alloc::Layout) -> ! {
+ core::intrinsics::abort();
+}
+
+#[start]
+fn main(_argc: isize, _argv: *const *const u8) -> isize {
+ let world: Box<&str> = box "Hello World!\0";
+ unsafe {
+ puts(*world as *const str as *const u8);
+ }
+
+ 0
+}
--- /dev/null
--- /dev/null
++// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
++// file at the top-level directory of this distribution and at
++// http://rust-lang.org/COPYRIGHT.
++//
++// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
++// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
++// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
++// option. This file may not be copied, modified, or distributed
++// except according to those terms.
++#![no_std]
++#![feature(allocator_api, rustc_private)]
++#![cfg_attr(any(unix, target_os = "redox"), feature(libc))]
++
++// The minimum alignment guaranteed by the architecture. This value is used to
++// add fast paths for low alignment values.
++#[cfg(all(any(target_arch = "x86",
++ target_arch = "arm",
++ target_arch = "mips",
++ target_arch = "powerpc",
++ target_arch = "powerpc64")))]
++const MIN_ALIGN: usize = 8;
++#[cfg(all(any(target_arch = "x86_64",
++ target_arch = "aarch64",
++ target_arch = "mips64",
++ target_arch = "s390x",
++ target_arch = "sparc64")))]
++const MIN_ALIGN: usize = 16;
++
++pub struct System;
++#[cfg(any(windows, unix, target_os = "redox"))]
++mod realloc_fallback {
++ use core::alloc::{GlobalAlloc, Layout};
++ use core::cmp;
++ use core::ptr;
++ impl super::System {
++ pub(crate) unsafe fn realloc_fallback(&self, ptr: *mut u8, old_layout: Layout,
++ new_size: usize) -> *mut u8 {
++ // Docs for GlobalAlloc::realloc require this to be valid:
++ let new_layout = Layout::from_size_align_unchecked(new_size, old_layout.align());
++ let new_ptr = GlobalAlloc::alloc(self, new_layout);
++ if !new_ptr.is_null() {
++ let size = cmp::min(old_layout.size(), new_size);
++ ptr::copy_nonoverlapping(ptr, new_ptr, size);
++ GlobalAlloc::dealloc(self, ptr, old_layout);
++ }
++ new_ptr
++ }
++ }
++}
++#[cfg(any(unix, target_os = "redox"))]
++mod platform {
++ extern crate libc;
++ use core::ptr;
++ use MIN_ALIGN;
++ use System;
++ use core::alloc::{GlobalAlloc, Layout};
++ unsafe impl GlobalAlloc for System {
++ #[inline]
++ unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
++ if layout.align() <= MIN_ALIGN && layout.align() <= layout.size() {
++ libc::malloc(layout.size()) as *mut u8
++ } else {
++ #[cfg(target_os = "macos")]
++ {
++ if layout.align() > (1 << 31) {
++ return ptr::null_mut()
++ }
++ }
++ aligned_malloc(&layout)
++ }
++ }
++ #[inline]
++ unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
++ if layout.align() <= MIN_ALIGN && layout.align() <= layout.size() {
++ libc::calloc(layout.size(), 1) as *mut u8
++ } else {
++ let ptr = self.alloc(layout.clone());
++ if !ptr.is_null() {
++ ptr::write_bytes(ptr, 0, layout.size());
++ }
++ ptr
++ }
++ }
++ #[inline]
++ unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) {
++ libc::free(ptr as *mut libc::c_void)
++ }
++ #[inline]
++ unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
++ if layout.align() <= MIN_ALIGN && layout.align() <= new_size {
++ libc::realloc(ptr as *mut libc::c_void, new_size) as *mut u8
++ } else {
++ self.realloc_fallback(ptr, layout, new_size)
++ }
++ }
++ }
++ #[cfg(any(target_os = "android",
++ target_os = "hermit",
++ target_os = "redox",
++ target_os = "solaris"))]
++ #[inline]
++ unsafe fn aligned_malloc(layout: &Layout) -> *mut u8 {
++ // On android we currently target API level 9 which unfortunately
++ // doesn't have the `posix_memalign` API used below. Instead we use
++ // `memalign`, but this unfortunately has the property on some systems
++ // where the memory returned cannot be deallocated by `free`!
++ //
++ // Upon closer inspection, however, this appears to work just fine with
++ // Android, so for this platform we should be fine to call `memalign`
++ // (which is present in API level 9). Some helpful references could
++ // possibly be chromium using memalign [1], attempts at documenting that
++ // memalign + free is ok [2] [3], or the current source of chromium
++ // which still uses memalign on android [4].
++ //
++ // [1]: https://codereview.chromium.org/10796020/
++ // [2]: https://code.google.com/p/android/issues/detail?id=35391
++ // [3]: https://bugs.chromium.org/p/chromium/issues/detail?id=138579
++ // [4]: https://chromium.googlesource.com/chromium/src/base/+/master/
++ // /memory/aligned_memory.cc
++ libc::memalign(layout.align(), layout.size()) as *mut u8
++ }
++ #[cfg(not(any(target_os = "android",
++ target_os = "hermit",
++ target_os = "redox",
++ target_os = "solaris")))]
++ #[inline]
++ unsafe fn aligned_malloc(layout: &Layout) -> *mut u8 {
++ let mut out = ptr::null_mut();
++ let ret = libc::posix_memalign(&mut out, layout.align(), layout.size());
++ if ret != 0 {
++ ptr::null_mut()
++ } else {
++ out as *mut u8
++ }
++ }
++}
++#[cfg(windows)]
++#[allow(nonstandard_style)]
++mod platform {
++ use MIN_ALIGN;
++ use System;
++ use core::alloc::{GlobalAlloc, Layout};
++ type LPVOID = *mut u8;
++ type HANDLE = LPVOID;
++ type SIZE_T = usize;
++ type DWORD = u32;
++ type BOOL = i32;
++ extern "system" {
++ fn GetProcessHeap() -> HANDLE;
++ fn HeapAlloc(hHeap: HANDLE, dwFlags: DWORD, dwBytes: SIZE_T) -> LPVOID;
++ fn HeapReAlloc(hHeap: HANDLE, dwFlags: DWORD, lpMem: LPVOID, dwBytes: SIZE_T) -> LPVOID;
++ fn HeapFree(hHeap: HANDLE, dwFlags: DWORD, lpMem: LPVOID) -> BOOL;
++ fn GetLastError() -> DWORD;
++ }
++ #[repr(C)]
++ struct Header(*mut u8);
++ const HEAP_ZERO_MEMORY: DWORD = 0x00000008;
++ unsafe fn get_header<'a>(ptr: *mut u8) -> &'a mut Header {
++ &mut *(ptr as *mut Header).offset(-1)
++ }
++ unsafe fn align_ptr(ptr: *mut u8, align: usize) -> *mut u8 {
++ let aligned = ptr.add(align - (ptr as usize & (align - 1)));
++ *get_header(aligned) = Header(ptr);
++ aligned
++ }
++ #[inline]
++ unsafe fn allocate_with_flags(layout: Layout, flags: DWORD) -> *mut u8 {
++ let ptr = if layout.align() <= MIN_ALIGN {
++ HeapAlloc(GetProcessHeap(), flags, layout.size())
++ } else {
++ let size = layout.size() + layout.align();
++ let ptr = HeapAlloc(GetProcessHeap(), flags, size);
++ if ptr.is_null() {
++ ptr
++ } else {
++ align_ptr(ptr, layout.align())
++ }
++ };
++ ptr as *mut u8
++ }
++ unsafe impl GlobalAlloc for System {
++ #[inline]
++ unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
++ allocate_with_flags(layout, 0)
++ }
++ #[inline]
++ unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
++ allocate_with_flags(layout, HEAP_ZERO_MEMORY)
++ }
++ #[inline]
++ unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
++ if layout.align() <= MIN_ALIGN {
++ let err = HeapFree(GetProcessHeap(), 0, ptr as LPVOID);
++ debug_assert!(err != 0, "Failed to free heap memory: {}",
++ GetLastError());
++ } else {
++ let header = get_header(ptr);
++ let err = HeapFree(GetProcessHeap(), 0, header.0 as LPVOID);
++ debug_assert!(err != 0, "Failed to free heap memory: {}",
++ GetLastError());
++ }
++ }
++ #[inline]
++ unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
++ if layout.align() <= MIN_ALIGN {
++ HeapReAlloc(GetProcessHeap(), 0, ptr as LPVOID, new_size) as *mut u8
++ } else {
++ self.realloc_fallback(ptr, layout, new_size)
++ }
++ }
++ }
++}
--- /dev/null
- #![feature(no_core, arbitrary_self_types, box_syntax)]
+// Adapted from rustc run-pass test suite
+
- #![feature(start, lang_items)]
- #![no_core]
-
- extern crate mini_core;
-
- use mini_core::*;
-
- macro_rules! assert_eq {
- ($l:expr, $r: expr) => {
- if $l != $r {
- panic(stringify!($l != $r));
- }
- }
- }
++#![feature(arbitrary_self_types, unsize, coerce_unsized, dispatch_from_dyn)]
+#![feature(rustc_attrs)]
+
- #[start]
- fn main(_: isize, _: *const *const u8) -> isize {
- let pw = Ptr(box Wrapper(5)) as Ptr<Wrapper<dyn Trait>>;
++use std::{
++ ops::{Deref, CoerceUnsized, DispatchFromDyn},
++ marker::Unsize,
++};
+
+struct Ptr<T: ?Sized>(Box<T>);
+
+impl<T: ?Sized> Deref for Ptr<T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ &*self.0
+ }
+}
+
+impl<T: Unsize<U> + ?Sized, U: ?Sized> CoerceUnsized<Ptr<U>> for Ptr<T> {}
+impl<T: Unsize<U> + ?Sized, U: ?Sized> DispatchFromDyn<Ptr<U>> for Ptr<T> {}
+
+struct Wrapper<T: ?Sized>(T);
+
+impl<T: ?Sized> Deref for Wrapper<T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ &self.0
+ }
+}
+
+impl<T: CoerceUnsized<U>, U> CoerceUnsized<Wrapper<U>> for Wrapper<T> {}
+impl<T: DispatchFromDyn<U>, U> DispatchFromDyn<Wrapper<U>> for Wrapper<T> {}
+
+
+trait Trait {
+ // This method isn't object-safe yet. Unsized by-value `self` is object-safe (but not callable
+ // without unsized_locals), but wrappers arond `Self` currently are not.
+ // FIXME (mikeyhew) uncomment this when unsized rvalues object-safety is implemented
+ // fn wrapper(self: Wrapper<Self>) -> i32;
+ fn ptr_wrapper(self: Ptr<Wrapper<Self>>) -> i32;
+ fn wrapper_ptr(self: Wrapper<Ptr<Self>>) -> i32;
+ fn wrapper_ptr_wrapper(self: Wrapper<Ptr<Wrapper<Self>>>) -> i32;
+}
+
+impl Trait for i32 {
+ fn ptr_wrapper(self: Ptr<Wrapper<Self>>) -> i32 {
+ **self
+ }
+ fn wrapper_ptr(self: Wrapper<Ptr<Self>>) -> i32 {
+ **self
+ }
+ fn wrapper_ptr_wrapper(self: Wrapper<Ptr<Wrapper<Self>>>) -> i32 {
+ ***self
+ }
+}
+
- let wp = Wrapper(Ptr(box 6)) as Wrapper<Ptr<dyn Trait>>;
++fn main() {
++ let pw = Ptr(Box::new(Wrapper(5))) as Ptr<Wrapper<dyn Trait>>;
+ assert_eq!(pw.ptr_wrapper(), 5);
+
- let wpw = Wrapper(Ptr(box Wrapper(7))) as Wrapper<Ptr<Wrapper<dyn Trait>>>;
++ let wp = Wrapper(Ptr(Box::new(6))) as Wrapper<Ptr<dyn Trait>>;
+ assert_eq!(wp.wrapper_ptr(), 6);
+
-
- 0
++ let wpw = Wrapper(Ptr(Box::new(Wrapper(7)))) as Wrapper<Ptr<Wrapper<dyn Trait>>>;
+ assert_eq!(wpw.wrapper_ptr_wrapper(), 7);
+}
--- /dev/null
+#![feature(
+ no_core, lang_items, intrinsics, unboxed_closures, type_ascription, extern_types,
+ untagged_unions, decl_macro, rustc_attrs, transparent_unions, auto_traits,
+ thread_local,
+)]
+#![no_core]
+#![allow(dead_code)]
+
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "unsize"]
+pub trait Unsize<T: ?Sized> {}
+
+#[lang = "coerce_unsized"]
+pub trait CoerceUnsized<T> {}
+
+impl<'a, 'b: 'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a U> for &'b T {}
+impl<'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a mut U> for &'a mut T {}
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*const U> for *const T {}
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*mut U> for *mut T {}
+
+#[lang = "dispatch_from_dyn"]
+pub trait DispatchFromDyn<T> {}
+
+// &T -> &U
+impl<'a, T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<&'a U> for &'a T {}
+// &mut T -> &mut U
+impl<'a, T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<&'a mut U> for &'a mut T {}
+// *const T -> *const U
+impl<T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<*const U> for *const T {}
+// *mut T -> *mut U
+impl<T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<*mut U> for *mut T {}
+impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Box<U>> for Box<T> {}
+
+#[lang = "receiver"]
+pub trait Receiver {}
+
+impl<T: ?Sized> Receiver for &T {}
+impl<T: ?Sized> Receiver for &mut T {}
+impl<T: ?Sized> Receiver for Box<T> {}
+
+#[lang = "copy"]
+pub unsafe trait Copy {}
+
+unsafe impl Copy for bool {}
+unsafe impl Copy for u8 {}
+unsafe impl Copy for u16 {}
+unsafe impl Copy for u32 {}
+unsafe impl Copy for u64 {}
+unsafe impl Copy for u128 {}
+unsafe impl Copy for usize {}
+unsafe impl Copy for i8 {}
+unsafe impl Copy for i16 {}
+unsafe impl Copy for i32 {}
+unsafe impl Copy for isize {}
+unsafe impl Copy for f32 {}
+unsafe impl Copy for char {}
+unsafe impl<'a, T: ?Sized> Copy for &'a T {}
+unsafe impl<T: ?Sized> Copy for *const T {}
+unsafe impl<T: ?Sized> Copy for *mut T {}
+unsafe impl<T: Copy> Copy for Option<T> {}
+
+#[lang = "sync"]
+pub unsafe trait Sync {}
+
+unsafe impl Sync for bool {}
+unsafe impl Sync for u8 {}
+unsafe impl Sync for u16 {}
+unsafe impl Sync for u32 {}
+unsafe impl Sync for u64 {}
+unsafe impl Sync for usize {}
+unsafe impl Sync for i8 {}
+unsafe impl Sync for i16 {}
+unsafe impl Sync for i32 {}
+unsafe impl Sync for isize {}
+unsafe impl Sync for char {}
+unsafe impl<'a, T: ?Sized> Sync for &'a T {}
+unsafe impl Sync for [u8; 16] {}
+
+#[lang = "freeze"]
+unsafe auto trait Freeze {}
+
+unsafe impl<T: ?Sized> Freeze for PhantomData<T> {}
+unsafe impl<T: ?Sized> Freeze for *const T {}
+unsafe impl<T: ?Sized> Freeze for *mut T {}
+unsafe impl<T: ?Sized> Freeze for &T {}
+unsafe impl<T: ?Sized> Freeze for &mut T {}
+
+#[lang = "structural_peq"]
+pub trait StructuralPartialEq {}
+
+#[lang = "structural_teq"]
+pub trait StructuralEq {}
+
+#[lang = "not"]
+pub trait Not {
+ type Output;
+
+ fn not(self) -> Self::Output;
+}
+
+impl Not for bool {
+ type Output = bool;
+
+ fn not(self) -> bool {
+ !self
+ }
+}
+
+#[lang = "mul"]
+pub trait Mul<RHS = Self> {
+ type Output;
+
+ #[must_use]
+ fn mul(self, rhs: RHS) -> Self::Output;
+}
+
+impl Mul for u8 {
+ type Output = Self;
+
+ fn mul(self, rhs: Self) -> Self::Output {
+ self * rhs
+ }
+}
+
+impl Mul for usize {
+ type Output = Self;
+
+ fn mul(self, rhs: Self) -> Self::Output {
+ self * rhs
+ }
+}
+
+#[lang = "add"]
+pub trait Add<RHS = Self> {
+ type Output;
+
+ fn add(self, rhs: RHS) -> Self::Output;
+}
+
+impl Add for u8 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for i8 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for usize {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+#[lang = "sub"]
+pub trait Sub<RHS = Self> {
+ type Output;
+
+ fn sub(self, rhs: RHS) -> Self::Output;
+}
+
+impl Sub for usize {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for u8 {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for i8 {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for i16 {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+#[lang = "rem"]
+pub trait Rem<RHS = Self> {
+ type Output;
+
+ fn rem(self, rhs: RHS) -> Self::Output;
+}
+
+impl Rem for usize {
+ type Output = Self;
+
+ fn rem(self, rhs: Self) -> Self {
+ self % rhs
+ }
+}
+
+#[lang = "bitor"]
+pub trait BitOr<RHS = Self> {
+ type Output;
+
+ #[must_use]
+ fn bitor(self, rhs: RHS) -> Self::Output;
+}
+
+impl BitOr for bool {
+ type Output = bool;
+
+ fn bitor(self, rhs: bool) -> bool {
+ self | rhs
+ }
+}
+
+impl<'a> BitOr<bool> for &'a bool {
+ type Output = bool;
+
+ fn bitor(self, rhs: bool) -> bool {
+ *self | rhs
+ }
+}
+
+#[lang = "eq"]
+pub trait PartialEq<Rhs: ?Sized = Self> {
+ fn eq(&self, other: &Rhs) -> bool;
+ fn ne(&self, other: &Rhs) -> bool;
+}
+
+impl PartialEq for u8 {
+ fn eq(&self, other: &u8) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &u8) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for u16 {
+ fn eq(&self, other: &u16) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &u16) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for u32 {
+ fn eq(&self, other: &u32) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &u32) -> bool {
+ (*self) != (*other)
+ }
+}
+
+
+impl PartialEq for u64 {
+ fn eq(&self, other: &u64) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &u64) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for u128 {
+ fn eq(&self, other: &u128) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &u128) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for usize {
+ fn eq(&self, other: &usize) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &usize) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for i8 {
+ fn eq(&self, other: &i8) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &i8) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for i32 {
+ fn eq(&self, other: &i32) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &i32) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for isize {
+ fn eq(&self, other: &isize) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &isize) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for char {
+ fn eq(&self, other: &char) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &char) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl<T: ?Sized> PartialEq for *const T {
+ fn eq(&self, other: &*const T) -> bool {
+ *self == *other
+ }
+ fn ne(&self, other: &*const T) -> bool {
+ *self != *other
+ }
+}
+
+impl <T: PartialEq> PartialEq for Option<T> {
+ fn eq(&self, other: &Self) -> bool {
+ match (self, other) {
+ (Some(lhs), Some(rhs)) => *lhs == *rhs,
+ (None, None) => true,
+ _ => false,
+ }
+ }
+
+ fn ne(&self, other: &Self) -> bool {
+ match (self, other) {
+ (Some(lhs), Some(rhs)) => *lhs != *rhs,
+ (None, None) => false,
+ _ => true,
+ }
+ }
+}
+
++#[lang = "shl"]
++pub trait Shl<RHS = Self> {
++ type Output;
++
++ #[must_use]
++ fn shl(self, rhs: RHS) -> Self::Output;
++}
++
++impl Shl for u128 {
++ type Output = u128;
++
++ fn shl(self, rhs: u128) -> u128 {
++ self << rhs
++ }
++}
++
+#[lang = "neg"]
+pub trait Neg {
+ type Output;
+
+ fn neg(self) -> Self::Output;
+}
+
+impl Neg for i8 {
+ type Output = i8;
+
+ fn neg(self) -> i8 {
+ -self
+ }
+}
+
+impl Neg for i16 {
+ type Output = i16;
+
+ fn neg(self) -> i16 {
+ self
+ }
+}
+
+impl Neg for isize {
+ type Output = isize;
+
+ fn neg(self) -> isize {
+ -self
+ }
+}
+
+impl Neg for f32 {
+ type Output = f32;
+
+ fn neg(self) -> f32 {
+ -self
+ }
+}
+
+pub enum Option<T> {
+ Some(T),
+ None,
+}
+
+pub use Option::*;
+
+#[lang = "phantom_data"]
+pub struct PhantomData<T: ?Sized>;
+
+#[lang = "fn_once"]
+#[rustc_paren_sugar]
+pub trait FnOnce<Args> {
+ #[lang = "fn_once_output"]
+ type Output;
+
+ extern "rust-call" fn call_once(self, args: Args) -> Self::Output;
+}
+
+#[lang = "fn_mut"]
+#[rustc_paren_sugar]
+pub trait FnMut<Args>: FnOnce<Args> {
+ extern "rust-call" fn call_mut(&mut self, args: Args) -> Self::Output;
+}
+
+#[lang = "panic"]
+#[track_caller]
+pub fn panic(_msg: &str) -> ! {
+ unsafe {
+ libc::puts("Panicking\n\0" as *const str as *const i8);
+ intrinsics::abort();
+ }
+}
+
+#[lang = "panic_bounds_check"]
+#[track_caller]
+fn panic_bounds_check(index: usize, len: usize) -> ! {
+ unsafe {
+ libc::printf("index out of bounds: the len is %d but the index is %d\n\0" as *const str as *const i8, len, index);
+ intrinsics::abort();
+ }
+}
+
+#[lang = "eh_personality"]
+fn eh_personality() -> ! {
+ loop {}
+}
+
+#[lang = "drop_in_place"]
+#[allow(unconditional_recursion)]
+pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
+ // Code here does not matter - this is replaced by the
+ // real drop glue by the compiler.
+ drop_in_place(to_drop);
+}
+
+#[lang = "deref"]
+pub trait Deref {
+ type Target: ?Sized;
+
+ fn deref(&self) -> &Self::Target;
+}
+
+#[lang = "owned_box"]
+pub struct Box<T: ?Sized>(*mut T);
+
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Box<U>> for Box<T> {}
+
+impl<T: ?Sized> Drop for Box<T> {
+ fn drop(&mut self) {
+ // drop is currently performed by compiler.
+ }
+}
+
+impl<T> Deref for Box<T> {
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ &**self
+ }
+}
+
+#[lang = "exchange_malloc"]
+unsafe fn allocate(size: usize, _align: usize) -> *mut u8 {
+ libc::malloc(size)
+}
+
+#[lang = "box_free"]
+unsafe fn box_free<T: ?Sized>(ptr: *mut T) {
+ libc::free(ptr as *mut u8);
+}
+
+#[lang = "drop"]
+pub trait Drop {
+ fn drop(&mut self);
+}
+
+#[lang = "manually_drop"]
+#[repr(transparent)]
+pub struct ManuallyDrop<T: ?Sized> {
+ pub value: T,
+}
+
+#[lang = "maybe_uninit"]
+#[repr(transparent)]
+pub union MaybeUninit<T> {
+ pub uninit: (),
+ pub value: ManuallyDrop<T>,
+}
+
+pub mod intrinsics {
+ extern "rust-intrinsic" {
+ pub fn abort() -> !;
+ pub fn size_of<T>() -> usize;
+ pub fn size_of_val<T: ?::Sized>(val: *const T) -> usize;
+ pub fn min_align_of<T>() -> usize;
+ pub fn min_align_of_val<T: ?::Sized>(val: *const T) -> usize;
+ pub fn copy<T>(src: *const T, dst: *mut T, count: usize);
+ pub fn transmute<T, U>(e: T) -> U;
+ pub fn ctlz_nonzero<T>(x: T) -> T;
+ pub fn needs_drop<T>() -> bool;
+ pub fn bitreverse<T>(x: T) -> T;
+ pub fn bswap<T>(x: T) -> T;
+ pub fn write_bytes<T>(dst: *mut T, val: u8, count: usize);
+ }
+}
+
+pub mod libc {
+ #[cfg_attr(unix, link(name = "c"))]
+ #[cfg_attr(target_env = "msvc", link(name = "msvcrt"))]
+ extern "C" {
+ pub fn puts(s: *const i8) -> i32;
+ pub fn printf(format: *const i8, ...) -> i32;
+ pub fn malloc(size: usize) -> *mut u8;
+ pub fn free(ptr: *mut u8);
+ pub fn memcpy(dst: *mut u8, src: *const u8, size: usize);
+ pub fn memmove(dst: *mut u8, src: *const u8, size: usize);
+ pub fn strncpy(dst: *mut u8, src: *const u8, size: usize);
+ }
+}
+
+#[lang = "index"]
+pub trait Index<Idx: ?Sized> {
+ type Output: ?Sized;
+ fn index(&self, index: Idx) -> &Self::Output;
+}
+
+impl<T> Index<usize> for [T; 3] {
+ type Output = T;
+
+ fn index(&self, index: usize) -> &Self::Output {
+ &self[index]
+ }
+}
+
+impl<T> Index<usize> for [T] {
+ type Output = T;
+
+ fn index(&self, index: usize) -> &Self::Output {
+ &self[index]
+ }
+}
+
+extern {
+ type VaListImpl;
+}
+
+#[lang = "va_list"]
+#[repr(transparent)]
+pub struct VaList<'a>(&'a mut VaListImpl);
+
+#[rustc_builtin_macro]
+#[rustc_macro_transparency = "semitransparent"]
+pub macro stringify($($t:tt)*) { /* compiler built-in */ }
+
+#[rustc_builtin_macro]
+#[rustc_macro_transparency = "semitransparent"]
+pub macro file() { /* compiler built-in */ }
+
+#[rustc_builtin_macro]
+#[rustc_macro_transparency = "semitransparent"]
+pub macro line() { /* compiler built-in */ }
+
+#[rustc_builtin_macro]
+#[rustc_macro_transparency = "semitransparent"]
+pub macro cfg() { /* compiler built-in */ }
+
+#[rustc_builtin_macro]
+#[rustc_macro_transparency = "semitransparent"]
+pub macro global_asm() { /* compiler built-in */ }
+
+pub static A_STATIC: u8 = 42;
+
+#[lang = "panic_location"]
+struct PanicLocation {
+ file: &'static str,
+ line: u32,
+ column: u32,
+}
+
+#[no_mangle]
+pub fn get_tls() -> u8 {
+ #[thread_local]
+ static A: u8 = 42;
+
+ A
+}
--- /dev/null
+#![feature(
+ no_core, start, lang_items, box_syntax, never_type, linkage,
+ extern_types, thread_local
+)]
+#![no_core]
+#![allow(dead_code, non_camel_case_types)]
+
+extern crate mini_core;
+
+use mini_core::*;
+use mini_core::libc::*;
+
+unsafe extern "C" fn my_puts(s: *const i8) {
+ puts(s);
+}
+
+#[lang = "termination"]
+trait Termination {
+ fn report(self) -> i32;
+}
+
+impl Termination for () {
+ fn report(self) -> i32 {
+ unsafe {
+ NUM = 6 * 7 + 1 + (1u8 == 1u8) as u8; // 44
+ *NUM_REF as i32
+ }
+ }
+}
+
+trait SomeTrait {
+ fn object_safe(&self);
+}
+
+impl SomeTrait for &'static str {
+ fn object_safe(&self) {
+ unsafe {
+ puts(*self as *const str as *const i8);
+ }
+ }
+}
+
+struct NoisyDrop {
+ text: &'static str,
+ inner: NoisyDropInner,
+}
+
+struct NoisyDropInner;
+
+impl Drop for NoisyDrop {
+ fn drop(&mut self) {
+ unsafe {
+ puts(self.text as *const str as *const i8);
+ }
+ }
+}
+
+impl Drop for NoisyDropInner {
+ fn drop(&mut self) {
+ unsafe {
+ puts("Inner got dropped!\0" as *const str as *const i8);
+ }
+ }
+}
+
+impl SomeTrait for NoisyDrop {
+ fn object_safe(&self) {}
+}
+
+enum Ordering {
+ Less = -1,
+ Equal = 0,
+ Greater = 1,
+}
+
+#[lang = "start"]
+fn start<T: Termination + 'static>(
+ main: fn() -> T,
+ argc: isize,
+ argv: *const *const u8,
+) -> isize {
+ if argc == 3 {
+ unsafe { puts(*argv as *const i8); }
+ unsafe { puts(*((argv as usize + intrinsics::size_of::<*const u8>()) as *const *const i8)); }
+ unsafe { puts(*((argv as usize + 2 * intrinsics::size_of::<*const u8>()) as *const *const i8)); }
+ }
+
+ main().report();
+ 0
+}
+
+static mut NUM: u8 = 6 * 7;
+static NUM_REF: &'static u8 = unsafe { &NUM };
+
+macro_rules! assert {
+ ($e:expr) => {
+ if !$e {
+ panic(stringify!(! $e));
+ }
+ };
+}
+
+macro_rules! assert_eq {
+ ($l:expr, $r: expr) => {
+ if $l != $r {
+ panic(stringify!($l != $r));
+ }
+ }
+}
+
+struct Unique<T: ?Sized> {
+ pointer: *const T,
+ _marker: PhantomData<T>,
+}
+
+impl<T: ?Sized, U: ?Sized> CoerceUnsized<Unique<U>> for Unique<T> where T: Unsize<U> {}
+
+unsafe fn zeroed<T>() -> T {
+ let mut uninit = MaybeUninit { uninit: () };
+ intrinsics::write_bytes(&mut uninit.value.value as *mut T, 0, 1);
+ uninit.value.value
+}
+
+fn take_f32(_f: f32) {}
+fn take_unique(_u: Unique<()>) {}
+
+fn return_u128_pair() -> (u128, u128) {
+ (0, 0)
+}
+
+fn call_return_u128_pair() {
+ return_u128_pair();
+}
+
+fn main() {
+ take_unique(Unique {
+ pointer: 0 as *const (),
+ _marker: PhantomData,
+ });
+ take_f32(0.1);
+
+ call_return_u128_pair();
+
+ let slice = &[0, 1] as &[i32];
+ let slice_ptr = slice as *const [i32] as *const i32;
+
+ assert_eq!(slice_ptr as usize % 4, 0);
+
+ //return;
+
+ unsafe {
+ printf("Hello %s\n\0" as *const str as *const i8, "printf\0" as *const str as *const i8);
+
+ let hello: &[u8] = b"Hello\0" as &[u8; 6];
+ let ptr: *const i8 = hello as *const [u8] as *const i8;
+ puts(ptr);
+
+ let world: Box<&str> = box "World!\0";
+ puts(*world as *const str as *const i8);
+ world as Box<dyn SomeTrait>;
+
+ assert_eq!(intrinsics::bitreverse(0b10101000u8), 0b00010101u8);
+
+ assert_eq!(intrinsics::bswap(0xabu8), 0xabu8);
+ assert_eq!(intrinsics::bswap(0xddccu16), 0xccddu16);
+ assert_eq!(intrinsics::bswap(0xffee_ddccu32), 0xccdd_eeffu32);
+ assert_eq!(intrinsics::bswap(0x1234_5678_ffee_ddccu64), 0xccdd_eeff_7856_3412u64);
+
+ assert_eq!(intrinsics::size_of_val(hello) as u8, 6);
+
+ let chars = &['C', 'h', 'a', 'r', 's'];
+ let chars = chars as &[char];
+ assert_eq!(intrinsics::size_of_val(chars) as u8, 4 * 5);
+
+ let a: &dyn SomeTrait = &"abc\0";
+ a.object_safe();
+
+ assert_eq!(intrinsics::size_of_val(a) as u8, 16);
+ assert_eq!(intrinsics::size_of_val(&0u32) as u8, 4);
+
+ assert_eq!(intrinsics::min_align_of::<u16>() as u8, 2);
+ assert_eq!(intrinsics::min_align_of_val(&a) as u8, intrinsics::min_align_of::<&str>() as u8);
+
+ assert!(!intrinsics::needs_drop::<u8>());
+ assert!(intrinsics::needs_drop::<NoisyDrop>());
+
+ Unique {
+ pointer: 0 as *const &str,
+ _marker: PhantomData,
+ } as Unique<dyn SomeTrait>;
+
+ struct MyDst<T: ?Sized>(T);
+
+ intrinsics::size_of_val(&MyDst([0u8; 4]) as &MyDst<[u8]>);
+
+ struct Foo {
+ x: u8,
+ y: !,
+ }
+
+ unsafe fn uninitialized<T>() -> T {
+ MaybeUninit { uninit: () }.value.value
+ }
+
+ zeroed::<(u8, u8)>();
+ #[allow(unreachable_code)]
+ {
+ if false {
+ zeroed::<!>();
+ zeroed::<Foo>();
+ uninitialized::<Foo>();
+ }
+ }
+ }
+
+ let _ = box NoisyDrop {
+ text: "Boxed outer got dropped!\0",
+ inner: NoisyDropInner,
+ } as Box<dyn SomeTrait>;
+
+ const FUNC_REF: Option<fn()> = Some(main);
+ match FUNC_REF {
+ Some(_) => {},
+ None => assert!(false),
+ }
+
+ match Ordering::Less {
+ Ordering::Less => {},
+ _ => assert!(false),
+ }
+
+ [NoisyDropInner, NoisyDropInner];
+
+ let x = &[0u32, 42u32] as &[u32];
+ match x {
+ [] => assert_eq!(0u32, 1),
+ [_, ref y @ ..] => assert_eq!(&x[1] as *const u32 as usize, &y[0] as *const u32 as usize),
+ }
+
+ assert_eq!(((|()| 42u8) as fn(()) -> u8)(()), 42);
+
+ #[cfg(not(jit))]
+ {
+ extern {
+ #[linkage = "extern_weak"]
+ static ABC: *const u8;
+ }
+
+ {
+ extern {
+ #[linkage = "extern_weak"]
+ static ABC: *const u8;
+ }
+ }
+
+ unsafe { assert_eq!(ABC as usize, 0); }
+ }
+
+ &mut (|| Some(0 as *const ())) as &mut dyn FnMut() -> Option<*const ()>;
+
+ let f = 1000.0;
+ assert_eq!(f as u8, 255);
+ let f2 = -1000.0;
+ assert_eq!(f2 as i8, -128);
+ assert_eq!(f2 as u8, 0);
+
++ let amount = 0;
++ assert_eq!(1u128 << amount, 1);
++
+ static ANOTHER_STATIC: &u8 = &A_STATIC;
+ assert_eq!(*ANOTHER_STATIC, 42);
+
+ check_niche_behavior();
+
+ extern "C" {
+ type ExternType;
+ }
+
+ struct ExternTypeWrapper {
+ _a: ExternType,
+ }
+
+ let nullptr = 0 as *const ();
+ let extern_nullptr = nullptr as *const ExternTypeWrapper;
+ extern_nullptr as *const ();
+ let slice_ptr = &[] as *const [u8];
+ slice_ptr as *const u8;
+
+ let repeat = [Some(42); 2];
+ assert_eq!(repeat[0], Some(42));
+ assert_eq!(repeat[1], Some(42));
+
+ from_decimal_string();
+
+ #[cfg(not(jit))]
+ test_tls();
+
+ #[cfg(all(not(jit), target_os = "linux"))]
+ unsafe {
+ global_asm_test();
+ }
+}
+
+#[cfg(all(not(jit), target_os = "linux"))]
+extern "C" {
+ fn global_asm_test();
+}
+
+#[cfg(all(not(jit), target_os = "linux"))]
+global_asm! {
+ "
+ .global global_asm_test
+ global_asm_test:
+ // comment that would normally be removed by LLVM
+ ret
+ "
+}
+
+#[repr(C)]
+enum c_void {
+ _1,
+ _2,
+}
+
+type c_int = i32;
+type c_ulong = u64;
+
+type pthread_t = c_ulong;
+
+#[repr(C)]
+struct pthread_attr_t {
+ __size: [u64; 7],
+}
+
+#[link(name = "pthread")]
+extern "C" {
+ fn pthread_attr_init(attr: *mut pthread_attr_t) -> c_int;
+
+ fn pthread_create(
+ native: *mut pthread_t,
+ attr: *const pthread_attr_t,
+ f: extern "C" fn(_: *mut c_void) -> *mut c_void,
+ value: *mut c_void
+ ) -> c_int;
+
+ fn pthread_join(
+ native: pthread_t,
+ value: *mut *mut c_void
+ ) -> c_int;
+}
+
+#[thread_local]
+#[cfg(not(jit))]
+static mut TLS: u8 = 42;
+
+#[cfg(not(jit))]
+extern "C" fn mutate_tls(_: *mut c_void) -> *mut c_void {
+ unsafe { TLS = 0; }
+ 0 as *mut c_void
+}
+
+#[cfg(not(jit))]
+fn test_tls() {
+ unsafe {
+ let mut attr: pthread_attr_t = zeroed();
+ let mut thread: pthread_t = 0;
+
+ assert_eq!(TLS, 42);
+
+ if pthread_attr_init(&mut attr) != 0 {
+ assert!(false);
+ }
+
+ if pthread_create(&mut thread, &attr, mutate_tls, 0 as *mut c_void) != 0 {
+ assert!(false);
+ }
+
+ let mut res = 0 as *mut c_void;
+ pthread_join(thread, &mut res);
+
+ // TLS of main thread must not have been changed by the other thread.
+ assert_eq!(TLS, 42);
+
+ puts("TLS works!\n\0" as *const str as *const i8);
+ }
+}
+
+// Copied ui/issues/issue-61696.rs
+
+pub enum Infallible {}
+
+// The check that the `bool` field of `V1` is encoding a "niche variant"
+// (i.e. not `V1`, so `V3` or `V4`) used to be mathematically incorrect,
+// causing valid `V1` values to be interpreted as other variants.
+pub enum E1 {
+ V1 { f: bool },
+ V2 { f: Infallible },
+ V3,
+ V4,
+}
+
+// Computing the discriminant used to be done using the niche type (here `u8`,
+// from the `bool` field of `V1`), overflowing for variants with large enough
+// indices (`V3` and `V4`), causing them to be interpreted as other variants.
+pub enum E2<X> {
+ V1 { f: bool },
+
+ /*_00*/ _01(X), _02(X), _03(X), _04(X), _05(X), _06(X), _07(X),
+ _08(X), _09(X), _0A(X), _0B(X), _0C(X), _0D(X), _0E(X), _0F(X),
+ _10(X), _11(X), _12(X), _13(X), _14(X), _15(X), _16(X), _17(X),
+ _18(X), _19(X), _1A(X), _1B(X), _1C(X), _1D(X), _1E(X), _1F(X),
+ _20(X), _21(X), _22(X), _23(X), _24(X), _25(X), _26(X), _27(X),
+ _28(X), _29(X), _2A(X), _2B(X), _2C(X), _2D(X), _2E(X), _2F(X),
+ _30(X), _31(X), _32(X), _33(X), _34(X), _35(X), _36(X), _37(X),
+ _38(X), _39(X), _3A(X), _3B(X), _3C(X), _3D(X), _3E(X), _3F(X),
+ _40(X), _41(X), _42(X), _43(X), _44(X), _45(X), _46(X), _47(X),
+ _48(X), _49(X), _4A(X), _4B(X), _4C(X), _4D(X), _4E(X), _4F(X),
+ _50(X), _51(X), _52(X), _53(X), _54(X), _55(X), _56(X), _57(X),
+ _58(X), _59(X), _5A(X), _5B(X), _5C(X), _5D(X), _5E(X), _5F(X),
+ _60(X), _61(X), _62(X), _63(X), _64(X), _65(X), _66(X), _67(X),
+ _68(X), _69(X), _6A(X), _6B(X), _6C(X), _6D(X), _6E(X), _6F(X),
+ _70(X), _71(X), _72(X), _73(X), _74(X), _75(X), _76(X), _77(X),
+ _78(X), _79(X), _7A(X), _7B(X), _7C(X), _7D(X), _7E(X), _7F(X),
+ _80(X), _81(X), _82(X), _83(X), _84(X), _85(X), _86(X), _87(X),
+ _88(X), _89(X), _8A(X), _8B(X), _8C(X), _8D(X), _8E(X), _8F(X),
+ _90(X), _91(X), _92(X), _93(X), _94(X), _95(X), _96(X), _97(X),
+ _98(X), _99(X), _9A(X), _9B(X), _9C(X), _9D(X), _9E(X), _9F(X),
+ _A0(X), _A1(X), _A2(X), _A3(X), _A4(X), _A5(X), _A6(X), _A7(X),
+ _A8(X), _A9(X), _AA(X), _AB(X), _AC(X), _AD(X), _AE(X), _AF(X),
+ _B0(X), _B1(X), _B2(X), _B3(X), _B4(X), _B5(X), _B6(X), _B7(X),
+ _B8(X), _B9(X), _BA(X), _BB(X), _BC(X), _BD(X), _BE(X), _BF(X),
+ _C0(X), _C1(X), _C2(X), _C3(X), _C4(X), _C5(X), _C6(X), _C7(X),
+ _C8(X), _C9(X), _CA(X), _CB(X), _CC(X), _CD(X), _CE(X), _CF(X),
+ _D0(X), _D1(X), _D2(X), _D3(X), _D4(X), _D5(X), _D6(X), _D7(X),
+ _D8(X), _D9(X), _DA(X), _DB(X), _DC(X), _DD(X), _DE(X), _DF(X),
+ _E0(X), _E1(X), _E2(X), _E3(X), _E4(X), _E5(X), _E6(X), _E7(X),
+ _E8(X), _E9(X), _EA(X), _EB(X), _EC(X), _ED(X), _EE(X), _EF(X),
+ _F0(X), _F1(X), _F2(X), _F3(X), _F4(X), _F5(X), _F6(X), _F7(X),
+ _F8(X), _F9(X), _FA(X), _FB(X), _FC(X), _FD(X), _FE(X), _FF(X),
+
+ V3,
+ V4,
+}
+
+fn check_niche_behavior () {
+ if let E1::V2 { .. } = (E1::V1 { f: true }) {
+ intrinsics::abort();
+ }
+
+ if let E2::V1 { .. } = E2::V3::<Infallible> {
+ intrinsics::abort();
+ }
+}
+
+fn from_decimal_string() {
+ loop {
+ let multiplier = 1;
+
+ take_multiplier_ref(&multiplier);
+
+ if multiplier == 1 {
+ break;
+ }
+
+ unreachable();
+ }
+}
+
+fn take_multiplier_ref(_multiplier: &u128) {}
+
+fn unreachable() -> ! {
+ panic("unreachable")
+}
--- /dev/null
- diff --git a/library/core/tests/num/ops.rs b/library/core/tests/num/ops.rs
- index 9979cc8..d5d1d83 100644
- --- a/library/core/tests/num/ops.rs
- +++ b/library/core/tests/num/ops.rs
- @@ -238,7 +238,7 @@ macro_rules! test_shift_assign {
- }
- };
- }
- -test_shift!(test_shl_defined, Shl::shl);
- -test_shift_assign!(test_shl_assign_defined, ShlAssign::shl_assign);
- -test_shift!(test_shr_defined, Shr::shr);
- -test_shift_assign!(test_shr_assign_defined, ShrAssign::shr_assign);
- +//test_shift!(test_shl_defined, Shl::shl);
- +//test_shift_assign!(test_shl_assign_defined, ShlAssign::shl_assign);
- +//test_shift!(test_shr_defined, Shr::shr);
- +//test_shift_assign!(test_shr_assign_defined, ShrAssign::shr_assign);
+From f6befc4bb51d84f5f1cf35938a168c953d421350 Mon Sep 17 00:00:00 2001
+From: bjorn3 <bjorn3@users.noreply.github.com>
+Date: Sun, 24 Nov 2019 15:10:23 +0100
+Subject: [PATCH] [core] Disable not compiling tests
+
+---
+ library/core/tests/Cargo.toml | 8 ++++++++
+ library/core/tests/num/flt2dec/mod.rs | 1 -
+ library/core/tests/num/int_macros.rs | 2 ++
+ library/core/tests/num/uint_macros.rs | 2 ++
+ library/core/tests/ptr.rs | 2 ++
+ library/core/tests/slice.rs | 2 ++
+ 6 files changed, 16 insertions(+), 1 deletion(-)
+ create mode 100644 library/core/tests/Cargo.toml
+
+diff --git a/library/core/tests/Cargo.toml b/library/core/tests/Cargo.toml
+new file mode 100644
+index 0000000..46fd999
+--- /dev/null
++++ b/library/core/tests/Cargo.toml
+@@ -0,0 +1,8 @@
++[package]
++name = "core"
++version = "0.0.0"
++edition = "2018"
++
++[lib]
++name = "coretests"
++path = "lib.rs"
+diff --git a/library/core/tests/num/flt2dec/mod.rs b/library/core/tests/num/flt2dec/mod.rs
+index a35897e..f0bf645 100644
+--- a/library/core/tests/num/flt2dec/mod.rs
++++ b/library/core/tests/num/flt2dec/mod.rs
+@@ -13,7 +13,6 @@ mod strategy {
+ mod dragon;
+ mod grisu;
+ }
+-mod random;
+
+ pub fn decode_finite<T: DecodableFloat>(v: T) -> Decoded {
+ match decode(v).1 {
+diff --git a/library/core/tests/num/int_macros.rs b/library/core/tests/num/int_macros.rs
+index 0475aeb..9558198 100644
+--- a/library/core/tests/num/int_macros.rs
++++ b/library/core/tests/num/int_macros.rs
+@@ -88,6 +88,7 @@ mod tests {
+ assert_eq!(x.trailing_ones(), 0);
+ }
+
++ /*
+ #[test]
+ fn test_rotate() {
+ assert_eq!(A.rotate_left(6).rotate_right(2).rotate_right(4), A);
+@@ -112,6 +113,7 @@ mod tests {
+ assert_eq!(B.rotate_left(128), B);
+ assert_eq!(C.rotate_left(128), C);
+ }
++ */
+
+ #[test]
+ fn test_swap_bytes() {
+diff --git a/library/core/tests/num/uint_macros.rs b/library/core/tests/num/uint_macros.rs
+index 04ed14f..a6e372e 100644
+--- a/library/core/tests/num/uint_macros.rs
++++ b/library/core/tests/num/uint_macros.rs
+@@ -52,6 +52,7 @@ mod tests {
+ assert_eq!(x.trailing_ones(), 0);
+ }
+
++ /*
+ #[test]
+ fn test_rotate() {
+ assert_eq!(A.rotate_left(6).rotate_right(2).rotate_right(4), A);
+@@ -76,6 +77,7 @@ mod tests {
+ assert_eq!(B.rotate_left(128), B);
+ assert_eq!(C.rotate_left(128), C);
+ }
++ */
+
+ #[test]
+ fn test_swap_bytes() {
+diff --git a/library/core/tests/ptr.rs b/library/core/tests/ptr.rs
+index 1a6be3a..42dbd59 100644
+--- a/library/core/tests/ptr.rs
++++ b/library/core/tests/ptr.rs
+@@ -250,6 +250,7 @@ fn test_unsized_nonnull() {
+ assert!(ys == zs);
+ }
+
++/*
+ #[test]
+ #[allow(warnings)]
+ // Have a symbol for the test below. It doesn’t need to be an actual variadic function, match the
+@@ -289,6 +290,7 @@ fn write_unaligned_drop() {
+ }
+ DROPS.with(|d| assert_eq!(*d.borrow(), [0]));
+ }
++*/
+
+ #[test]
+ fn align_offset_zst() {
+diff --git a/library/core/tests/slice.rs b/library/core/tests/slice.rs
+index 6609bc3..241b497 100644
+--- a/library/core/tests/slice.rs
++++ b/library/core/tests/slice.rs
+@@ -1209,6 +1209,7 @@ fn brute_force_rotate_test_1() {
+ }
+ }
+
++/*
+ #[test]
+ #[cfg(not(target_arch = "wasm32"))]
+ fn sort_unstable() {
+@@ -1394,6 +1395,7 @@ fn partition_at_index() {
+ v.select_nth_unstable(0);
+ assert!(v == [0xDEADBEEF]);
+ }
++*/
+
+ #[test]
+ #[should_panic(expected = "index 0 greater than length of slice")]
+--
+2.21.0 (Apple Git-122)
--- /dev/null
--- /dev/null
++From 894e07dfec2624ba539129b1c1d63e1d7d812bda Mon Sep 17 00:00:00 2001
++From: bjorn3 <bjorn3@users.noreply.github.com>
++Date: Thu, 18 Feb 2021 18:45:28 +0100
++Subject: [PATCH] Disable 128bit atomic operations
++
++Cranelift doesn't support them yet
++---
++ library/core/src/sync/atomic.rs | 38 ---------------------------------
++ library/core/tests/atomic.rs | 4 ----
++ library/std/src/panic.rs | 6 ------
++ 3 files changed, 48 deletions(-)
++
++diff --git a/library/core/src/sync/atomic.rs b/library/core/src/sync/atomic.rs
++index 81c9e1d..65c9503 100644
++--- a/library/core/src/sync/atomic.rs
+++++ b/library/core/src/sync/atomic.rs
++@@ -2228,44 +2228,6 @@ atomic_int! {
++ "AtomicU64::new(0)",
++ u64 AtomicU64 ATOMIC_U64_INIT
++ }
++-#[cfg(target_has_atomic_load_store = "128")]
++-atomic_int! {
++- cfg(target_has_atomic = "128"),
++- cfg(target_has_atomic_equal_alignment = "128"),
++- unstable(feature = "integer_atomics", issue = "32976"),
++- unstable(feature = "integer_atomics", issue = "32976"),
++- unstable(feature = "integer_atomics", issue = "32976"),
++- unstable(feature = "integer_atomics", issue = "32976"),
++- unstable(feature = "integer_atomics", issue = "32976"),
++- unstable(feature = "integer_atomics", issue = "32976"),
++- rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
++- unstable(feature = "integer_atomics", issue = "32976"),
++- "i128",
++- "#![feature(integer_atomics)]\n\n",
++- atomic_min, atomic_max,
++- 16,
++- "AtomicI128::new(0)",
++- i128 AtomicI128 ATOMIC_I128_INIT
++-}
++-#[cfg(target_has_atomic_load_store = "128")]
++-atomic_int! {
++- cfg(target_has_atomic = "128"),
++- cfg(target_has_atomic_equal_alignment = "128"),
++- unstable(feature = "integer_atomics", issue = "32976"),
++- unstable(feature = "integer_atomics", issue = "32976"),
++- unstable(feature = "integer_atomics", issue = "32976"),
++- unstable(feature = "integer_atomics", issue = "32976"),
++- unstable(feature = "integer_atomics", issue = "32976"),
++- unstable(feature = "integer_atomics", issue = "32976"),
++- rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
++- unstable(feature = "integer_atomics", issue = "32976"),
++- "u128",
++- "#![feature(integer_atomics)]\n\n",
++- atomic_umin, atomic_umax,
++- 16,
++- "AtomicU128::new(0)",
++- u128 AtomicU128 ATOMIC_U128_INIT
++-}
++
++ macro_rules! atomic_int_ptr_sized {
++ ( $($target_pointer_width:literal $align:literal)* ) => { $(
++diff --git a/library/core/tests/atomic.rs b/library/core/tests/atomic.rs
++index 2d1e449..cb6da5d 100644
++--- a/library/core/tests/atomic.rs
+++++ b/library/core/tests/atomic.rs
++@@ -145,10 +145,6 @@ fn atomic_alignment() {
++ assert_eq!(align_of::<AtomicU64>(), size_of::<AtomicU64>());
++ #[cfg(target_has_atomic = "64")]
++ assert_eq!(align_of::<AtomicI64>(), size_of::<AtomicI64>());
++- #[cfg(target_has_atomic = "128")]
++- assert_eq!(align_of::<AtomicU128>(), size_of::<AtomicU128>());
++- #[cfg(target_has_atomic = "128")]
++- assert_eq!(align_of::<AtomicI128>(), size_of::<AtomicI128>());
++ #[cfg(target_has_atomic = "ptr")]
++ assert_eq!(align_of::<AtomicUsize>(), size_of::<AtomicUsize>());
++ #[cfg(target_has_atomic = "ptr")]
++diff --git a/library/std/src/panic.rs b/library/std/src/panic.rs
++index 89a822a..779fd88 100644
++--- a/library/std/src/panic.rs
+++++ b/library/std/src/panic.rs
++@@ -279,9 +279,6 @@ impl RefUnwindSafe for atomic::AtomicI32 {}
++ #[cfg(target_has_atomic_load_store = "64")]
++ #[stable(feature = "integer_atomics_stable", since = "1.34.0")]
++ impl RefUnwindSafe for atomic::AtomicI64 {}
++-#[cfg(target_has_atomic_load_store = "128")]
++-#[unstable(feature = "integer_atomics", issue = "32976")]
++-impl RefUnwindSafe for atomic::AtomicI128 {}
++
++ #[cfg(target_has_atomic_load_store = "ptr")]
++ #[stable(feature = "unwind_safe_atomic_refs", since = "1.14.0")]
++@@ -298,9 +295,6 @@ impl RefUnwindSafe for atomic::AtomicU32 {}
++ #[cfg(target_has_atomic_load_store = "64")]
++ #[stable(feature = "integer_atomics_stable", since = "1.34.0")]
++ impl RefUnwindSafe for atomic::AtomicU64 {}
++-#[cfg(target_has_atomic_load_store = "128")]
++-#[unstable(feature = "integer_atomics", issue = "32976")]
++-impl RefUnwindSafe for atomic::AtomicU128 {}
++
++ #[cfg(target_has_atomic_load_store = "8")]
++ #[stable(feature = "unwind_safe_atomic_refs", since = "1.14.0")]
++--
++2.26.2.7.g19db9cfb68
++
--- /dev/null
- #!/bin/bash --verbose
++#!/usr/bin/env bash
+set -e
+
+rustup component add rust-src rustc-dev llvm-tools-preview
+./build_sysroot/prepare_sysroot_src.sh
+cargo install hyperfine || echo "Skipping hyperfine install"
+
+git clone https://github.com/rust-random/rand.git || echo "rust-random/rand has already been cloned"
+pushd rand
+git checkout -- .
+git checkout 0f933f9c7176e53b2a3c7952ded484e1783f0bf1
+git am ../crate_patches/*-rand-*.patch
+popd
+
+git clone https://github.com/rust-lang/regex.git || echo "rust-lang/regex has already been cloned"
+pushd regex
+git checkout -- .
+git checkout 341f207c1071f7290e3f228c710817c280c8dca1
+popd
+
+git clone https://github.com/ebobby/simple-raytracer || echo "ebobby/simple-raytracer has already been cloned"
+pushd simple-raytracer
+git checkout -- .
+git checkout 804a7a21b9e673a482797aa289a18ed480e4d813
+
+# build with cg_llvm for perf comparison
+unset CARGO_TARGET_DIR
+cargo build
+mv target/debug/main raytracer_cg_llvm
+popd
--- /dev/null
- nightly-2021-01-30
++nightly-2021-03-05
--- /dev/null
--- /dev/null
++# Matches rustfmt.toml of rustc
++version = "Two"
++use_small_heuristics = "Max"
++merge_derives = false
--- /dev/null
- #!/bin/bash
++#!/usr/bin/env bash
+
+dir=$(dirname "$0")
+source "$dir/config.sh"
+
+# read nightly compiler from rust-toolchain file
+TOOLCHAIN=$(cat "$dir/rust-toolchain")
+
+cmd=$1
+shift || true
+
+if [[ "$cmd" = "jit" ]]; then
+cargo "+${TOOLCHAIN}" rustc "$@" -- -Cllvm-args=mode=jit -Cprefer-dynamic
+elif [[ "$cmd" = "lazy-jit" ]]; then
+cargo "+${TOOLCHAIN}" rustc "$@" -- -Cllvm-args=mode=jit-lazy -Cprefer-dynamic
+else
+cargo "+${TOOLCHAIN}" "$cmd" "$@"
+fi
--- /dev/null
- if [[ "$unamestr" == 'Linux' ]]; then
+# Note to people running shellcheck: this file should only be sourced, not executed directly.
+
+set -e
+
+unamestr=$(uname)
- # FIXME remove once the atomic shim is gone
++if [[ "$unamestr" == 'Linux' || "$unamestr" == 'FreeBSD' ]]; then
+ dylib_ext='so'
+elif [[ "$unamestr" == 'Darwin' ]]; then
+ dylib_ext='dylib'
+else
+ echo "Unsupported os"
+ exit 1
+fi
+
+if echo "$RUSTC_WRAPPER" | grep sccache; then
+echo
+echo -e "\x1b[1;93m=== Warning: Unset RUSTC_WRAPPER to prevent interference with sccache ===\x1b[0m"
+echo
+export RUSTC_WRAPPER=
+fi
+
+dir=$(cd "$(dirname "${BASH_SOURCE[0]}")"; pwd)
+
+export RUSTC=$dir"/bin/cg_clif"
+
+export RUSTDOCFLAGS=$linker' -Cpanic=abort -Zpanic-abort-tests '\
+'-Zcodegen-backend='$dir'/lib/librustc_codegen_cranelift.'$dylib_ext' --sysroot '$dir
+
++# FIXME fix `#[linkage = "extern_weak"]` without this
+if [[ "$unamestr" == 'Darwin' ]]; then
+ export RUSTFLAGS="$RUSTFLAGS -Clink-arg=-undefined -Clink-arg=dynamic_lookup"
+fi
+
+export LD_LIBRARY_PATH="$(rustc --print sysroot)/lib:"$dir"/lib"
+export DYLD_LIBRARY_PATH=$LD_LIBRARY_PATH
--- /dev/null
- #!/bin/bash
++#!/usr/bin/env bash
+
+set -e
+
+case $1 in
+ "prepare")
+ TOOLCHAIN=$(date +%Y-%m-%d)
+
+ echo "=> Installing new nightly"
+ rustup toolchain install --profile minimal "nightly-${TOOLCHAIN}" # Sanity check to see if the nightly exists
+ echo "nightly-${TOOLCHAIN}" > rust-toolchain
+ rustup component add rustfmt || true
+
+ echo "=> Uninstalling all old nighlies"
+ for nightly in $(rustup toolchain list | grep nightly | grep -v "$TOOLCHAIN" | grep -v nightly-x86_64); do
+ rustup toolchain uninstall "$nightly"
+ done
+
+ ./clean_all.sh
+ ./prepare.sh
+
+ (cd build_sysroot && cargo update)
+
+ ;;
+ "commit")
+ git add rust-toolchain build_sysroot/Cargo.lock
+ git commit -m "Rustup to $(rustc -V)"
+ ;;
+ "push")
+ cg_clif=$(pwd)
+ pushd ../rust
+ git pull origin master
+ branch=sync_cg_clif-$(date +%Y-%m-%d)
+ git checkout -b "$branch"
+ git subtree pull --prefix=compiler/rustc_codegen_cranelift/ https://github.com/bjorn3/rustc_codegen_cranelift.git master
+ git push -u my "$branch"
+
+ # immediately merge the merge commit into cg_clif to prevent merge conflicts when syncing
+ # from rust-lang/rust later
+ git subtree push --prefix=compiler/rustc_codegen_cranelift/ "$cg_clif" sync_from_rust
+ popd
+ git merge sync_from_rust
+ ;;
+ "pull")
+ cg_clif=$(pwd)
+ pushd ../rust
+ git pull origin master
+ rust_vers="$(git rev-parse HEAD)"
+ git subtree push --prefix=compiler/rustc_codegen_cranelift/ "$cg_clif" sync_from_rust
+ popd
+ git merge sync_from_rust -m "Sync from rust $rust_vers"
+ git branch -d sync_from_rust
+ ;;
+ *)
+ echo "Unknown command '$1'"
+ echo "Usage: ./rustup.sh prepare|commit"
+ ;;
+esac
--- /dev/null
- #!/bin/bash
++#!/usr/bin/env bash
+set -e
+
+cd "$(dirname "$0")/../"
+
+./build.sh
+source build/config.sh
+
+echo "[TEST] Bootstrap of rustc"
+git clone https://github.com/rust-lang/rust.git || true
+pushd rust
+git fetch
+git checkout -- .
+git checkout "$(rustc -V | cut -d' ' -f3 | tr -d '(')"
+
+git apply - <<EOF
- diff --git a/.gitmodules b/.gitmodules
- index 984113151de..c1e9d960d56 100644
- --- a/.gitmodules
- +++ b/.gitmodules
- @@ -34,10 +34,6 @@
- [submodule "src/doc/edition-guide"]
- path = src/doc/edition-guide
- url = https://github.com/rust-lang/edition-guide.git
- -[submodule "src/llvm-project"]
- - path = src/llvm-project
- - url = https://github.com/rust-lang/llvm-project.git
- - branch = rustc/11.0-2020-10-12
- [submodule "src/doc/embedded-book"]
- path = src/doc/embedded-book
- url = https://github.com/rust-embedded/book.git
++diff --git a/Cargo.toml b/Cargo.toml
++index 5bd1147cad5..10d68a2ff14 100644
++--- a/Cargo.toml
+++++ b/Cargo.toml
++@@ -111,5 +111,7 @@ rustc-std-workspace-std = { path = 'library/rustc-std-workspace-std' }
++ rustc-std-workspace-alloc = { path = 'library/rustc-std-workspace-alloc' }
++ rustc-std-workspace-std = { path = 'library/rustc-std-workspace-std' }
++
+++compiler_builtins = { path = "../build_sysroot/compiler-builtins" }
+++
++ [patch."https://github.com/rust-lang/rust-clippy"]
++ clippy_lints = { path = "src/tools/clippy/clippy_lints" }
+diff --git a/compiler/rustc_data_structures/Cargo.toml b/compiler/rustc_data_structures/Cargo.toml
+index 23e689fcae7..5f077b765b6 100644
+--- a/compiler/rustc_data_structures/Cargo.toml
++++ b/compiler/rustc_data_structures/Cargo.toml
+@@ -32,7 +32,6 @@ tempfile = "3.0.5"
+
+ [dependencies.parking_lot]
+ version = "0.11"
+-features = ["nightly"]
+
+ [target.'cfg(windows)'.dependencies]
+ winapi = { version = "0.3", features = ["fileapi", "psapi"] }
++diff --git a/library/alloc/Cargo.toml b/library/alloc/Cargo.toml
++index d95b5b7f17f..00b6f0e3635 100644
++--- a/library/alloc/Cargo.toml
+++++ b/library/alloc/Cargo.toml
++@@ -8,7 +8,7 @@ edition = "2018"
++
++ [dependencies]
++ core = { path = "../core" }
++-compiler_builtins = { version = "0.1.39", features = ['rustc-dep-of-std'] }
+++compiler_builtins = { version = "0.1.39", features = ['rustc-dep-of-std', 'no-asm'] }
++
++ [dev-dependencies]
++ rand = "0.7"
+EOF
+
+cat > config.toml <<EOF
+[llvm]
+ninja = false
+
+[build]
+rustc = "$(pwd)/../build/bin/cg_clif"
+cargo = "$(rustup which cargo)"
+full-bootstrap = true
+local-rebuild = true
+
+[rust]
+codegen-backends = ["cranelift"]
+EOF
+
+rm -r compiler/rustc_codegen_cranelift/{Cargo.*,src}
+cp ../Cargo.* compiler/rustc_codegen_cranelift/
+cp -r ../src compiler/rustc_codegen_cranelift/src
+
+./x.py build --stage 1 library/std
+popd
--- /dev/null
- #!/bin/bash
++#!/usr/bin/env bash
+
+set -e
+
+source build/config.sh
+source scripts/ext_config.sh
+MY_RUSTC="$RUSTC $RUSTFLAGS -L crate=target/out --out-dir target/out -Cdebuginfo=2"
+
+function no_sysroot_tests() {
+ echo "[BUILD] mini_core"
+ $MY_RUSTC example/mini_core.rs --crate-name mini_core --crate-type lib,dylib --target "$TARGET_TRIPLE"
+
+ echo "[BUILD] example"
+ $MY_RUSTC example/example.rs --crate-type lib --target "$TARGET_TRIPLE"
+
+ if [[ "$JIT_SUPPORTED" = "1" ]]; then
+ echo "[JIT] mini_core_hello_world"
+ CG_CLIF_JIT_ARGS="abc bcd" $MY_RUSTC -Cllvm-args=mode=jit -Cprefer-dynamic example/mini_core_hello_world.rs --cfg jit --target "$HOST_TRIPLE"
+
+ echo "[JIT-lazy] mini_core_hello_world"
+ CG_CLIF_JIT_ARGS="abc bcd" $MY_RUSTC -Cllvm-args=mode=jit-lazy -Cprefer-dynamic example/mini_core_hello_world.rs --cfg jit --target "$HOST_TRIPLE"
+ else
+ echo "[JIT] mini_core_hello_world (skipped)"
+ fi
+
+ echo "[AOT] mini_core_hello_world"
+ $MY_RUSTC example/mini_core_hello_world.rs --crate-name mini_core_hello_world --crate-type bin -g --target "$TARGET_TRIPLE"
+ $RUN_WRAPPER ./target/out/mini_core_hello_world abc bcd
+ # (echo "break set -n main"; echo "run"; sleep 1; echo "si -c 10"; sleep 1; echo "frame variable") | lldb -- ./target/out/mini_core_hello_world abc bcd
++}
+
++function base_sysroot_tests() {
+ echo "[AOT] arbitrary_self_types_pointers_and_wrappers"
+ $MY_RUSTC example/arbitrary_self_types_pointers_and_wrappers.rs --crate-name arbitrary_self_types_pointers_and_wrappers --crate-type bin --target "$TARGET_TRIPLE"
+ $RUN_WRAPPER ./target/out/arbitrary_self_types_pointers_and_wrappers
- }
+
- function base_sysroot_tests() {
++ echo "[AOT] alloc_system"
++ $MY_RUSTC example/alloc_system.rs --crate-type lib --target "$TARGET_TRIPLE"
++
+ echo "[AOT] alloc_example"
+ $MY_RUSTC example/alloc_example.rs --crate-type bin --target "$TARGET_TRIPLE"
+ $RUN_WRAPPER ./target/out/alloc_example
+
+ if [[ "$JIT_SUPPORTED" = "1" ]]; then
+ echo "[JIT] std_example"
+ $MY_RUSTC -Cllvm-args=mode=jit -Cprefer-dynamic example/std_example.rs --target "$HOST_TRIPLE"
+
+ echo "[JIT-lazy] std_example"
+ $MY_RUSTC -Cllvm-args=mode=jit-lazy -Cprefer-dynamic example/std_example.rs --cfg lazy_jit --target "$HOST_TRIPLE"
+ else
+ echo "[JIT] std_example (skipped)"
+ fi
+
+ echo "[AOT] dst_field_align"
+ # FIXME Re-add -Zmir-opt-level=2 once rust-lang/rust#67529 is fixed.
+ $MY_RUSTC example/dst-field-align.rs --crate-name dst_field_align --crate-type bin --target "$TARGET_TRIPLE"
+ $RUN_WRAPPER ./target/out/dst_field_align || (echo $?; false)
+
+ echo "[AOT] std_example"
+ $MY_RUSTC example/std_example.rs --crate-type bin --target "$TARGET_TRIPLE"
+ $RUN_WRAPPER ./target/out/std_example arg
+
+ echo "[AOT] subslice-patterns-const-eval"
+ $MY_RUSTC example/subslice-patterns-const-eval.rs --crate-type bin -Cpanic=abort --target "$TARGET_TRIPLE"
+ $RUN_WRAPPER ./target/out/subslice-patterns-const-eval
+
+ echo "[AOT] track-caller-attribute"
+ $MY_RUSTC example/track-caller-attribute.rs --crate-type bin -Cpanic=abort --target "$TARGET_TRIPLE"
+ $RUN_WRAPPER ./target/out/track-caller-attribute
+
+ echo "[AOT] mod_bench"
+ $MY_RUSTC example/mod_bench.rs --crate-type bin --target "$TARGET_TRIPLE"
+ $RUN_WRAPPER ./target/out/mod_bench
+
+ pushd rand
+ rm -r ./target || true
+ ../build/cargo.sh test --workspace
+ popd
+}
+
+function extended_sysroot_tests() {
+ pushd simple-raytracer
+ if [[ "$HOST_TRIPLE" = "$TARGET_TRIPLE" ]]; then
+ echo "[BENCH COMPILE] ebobby/simple-raytracer"
+ hyperfine --runs "${RUN_RUNS:-10}" --warmup 1 --prepare "cargo clean" \
+ "RUSTC=rustc RUSTFLAGS='' cargo build" \
+ "../build/cargo.sh build"
+
+ echo "[BENCH RUN] ebobby/simple-raytracer"
+ cp ./target/debug/main ./raytracer_cg_clif
+ hyperfine --runs "${RUN_RUNS:-10}" ./raytracer_cg_llvm ./raytracer_cg_clif
+ else
+ echo "[BENCH COMPILE] ebobby/simple-raytracer (skipped)"
+ echo "[COMPILE] ebobby/simple-raytracer"
+ ../cargo.sh build
+ echo "[BENCH RUN] ebobby/simple-raytracer (skipped)"
+ fi
+ popd
+
+ pushd build_sysroot/sysroot_src/library/core/tests
+ echo "[TEST] libcore"
+ rm -r ./target || true
+ ../../../../../build/cargo.sh test
+ popd
+
+ pushd regex
+ echo "[TEST] rust-lang/regex example shootout-regex-dna"
+ ../build/cargo.sh clean
+ # Make sure `[codegen mono items] start` doesn't poison the diff
+ ../build/cargo.sh build --example shootout-regex-dna
+ cat examples/regexdna-input.txt | ../build/cargo.sh run --example shootout-regex-dna | grep -v "Spawned thread" > res.txt
+ diff -u res.txt examples/regexdna-output.txt
+
+ echo "[TEST] rust-lang/regex tests"
+ ../build/cargo.sh test --tests -- --exclude-should-panic --test-threads 1 -Zunstable-options -q
+ popd
+}
+
+case "$1" in
+ "no_sysroot")
+ no_sysroot_tests
+ ;;
+ "base_sysroot")
+ base_sysroot_tests
+ ;;
+ "extended_sysroot")
+ extended_sysroot_tests
+ ;;
+ *)
+ echo "unknown test suite"
+ ;;
+esac
--- /dev/null
- pub(super) fn add_args_header_comment(fx: &mut FunctionCx<'_, '_, impl Module>) {
+//! Annotate the clif ir with comments describing how arguments are passed into the current function
+//! and where all locals are stored.
+
+use std::borrow::Cow;
+
+use rustc_middle::mir;
+use rustc_target::abi::call::PassMode;
+
+use cranelift_codegen::entity::EntityRef;
+
+use crate::prelude::*;
+
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
++pub(super) fn add_args_header_comment(fx: &mut FunctionCx<'_, '_, '_>) {
+ fx.add_global_comment(
+ "kind loc.idx param pass mode ty".to_string(),
+ );
+}
+
+pub(super) fn add_arg_comment<'tcx>(
- params
- .iter()
- .map(ToString::to_string)
- .collect::<Vec<_>>()
- .join(",")
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ kind: &str,
+ local: Option<mir::Local>,
+ local_field: Option<usize>,
+ params: &[Value],
+ arg_abi_mode: PassMode,
+ arg_layout: TyAndLayout<'tcx>,
+) {
+ let local = if let Some(local) = local {
+ Cow::Owned(format!("{:?}", local))
+ } else {
+ Cow::Borrowed("???")
+ };
+ let local_field = if let Some(local_field) = local_field {
+ Cow::Owned(format!(".{}", local_field))
+ } else {
+ Cow::Borrowed("")
+ };
+
+ let params = match params {
+ [] => Cow::Borrowed("-"),
+ [param] => Cow::Owned(format!("= {:?}", param)),
+ [param_a, param_b] => Cow::Owned(format!("= {:?},{:?}", param_a, param_b)),
+ params => Cow::Owned(format!(
+ "= {}",
- pub(super) fn add_locals_header_comment(fx: &mut FunctionCx<'_, '_, impl Module>) {
++ params.iter().map(ToString::to_string).collect::<Vec<_>>().join(",")
+ )),
+ };
+
+ let pass_mode = format!("{:?}", arg_abi_mode);
+ fx.add_global_comment(format!(
+ "{kind:5}{local:>3}{local_field:<5} {params:10} {pass_mode:36} {ty:?}",
+ kind = kind,
+ local = local,
+ local_field = local_field,
+ params = params,
+ pass_mode = pass_mode,
+ ty = arg_layout.ty,
+ ));
+}
+
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
++pub(super) fn add_locals_header_comment(fx: &mut FunctionCx<'_, '_, '_>) {
+ fx.add_global_comment(String::new());
+ fx.add_global_comment(
+ "kind local ty size align (abi,pref)".to_string(),
+ );
+}
+
+pub(super) fn add_local_place_comments<'tcx>(
- let rustc_target::abi::Layout {
- size,
- align,
- abi: _,
- variants: _,
- fields: _,
- largest_niche: _,
- } = layout;
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ place: CPlace<'tcx>,
+ local: Local,
+) {
+ let TyAndLayout { ty, layout } = place.layout();
- (
- "ssa",
- Cow::Owned(format!(",var=({}, {})", var1.index(), var2.index())),
- )
++ let rustc_target::abi::Layout { size, align, abi: _, variants: _, fields: _, largest_niche: _ } =
++ layout;
+
+ let (kind, extra) = match *place.inner() {
+ CPlaceInner::Var(place_local, var) => {
+ assert_eq!(local, place_local);
+ ("ssa", Cow::Owned(format!(",var={}", var.index())))
+ }
+ CPlaceInner::VarPair(place_local, var1, var2) => {
+ assert_eq!(local, place_local);
- (crate::pointer::PointerBase::Addr(addr), offset) => (
- "reuse",
- format!("storage={}{}{}", addr, offset, meta).into(),
- ),
- (crate::pointer::PointerBase::Stack(stack_slot), offset) => (
- "stack",
- format!("storage={}{}{}", stack_slot, offset, meta).into(),
- ),
- (crate::pointer::PointerBase::Dangling(align), offset) => (
- "zst",
- format!("align={},offset={}", align.bytes(), offset).into(),
- ),
++ ("ssa", Cow::Owned(format!(",var=({}, {})", var1.index(), var2.index())))
+ }
+ CPlaceInner::VarLane(_local, _var, _lane) => unreachable!(),
+ CPlaceInner::Addr(ptr, meta) => {
+ let meta = if let Some(meta) = meta {
+ Cow::Owned(format!(",meta={}", meta))
+ } else {
+ Cow::Borrowed("")
+ };
+ match ptr.base_and_offset() {
- if extra.is_empty() {
- ""
- } else {
- " "
- },
++ (crate::pointer::PointerBase::Addr(addr), offset) => {
++ ("reuse", format!("storage={}{}{}", addr, offset, meta).into())
++ }
++ (crate::pointer::PointerBase::Stack(stack_slot), offset) => {
++ ("stack", format!("storage={}{}{}", stack_slot, offset, meta).into())
++ }
++ (crate::pointer::PointerBase::Dangling(align), offset) => {
++ ("zst", format!("align={},offset={}", align.bytes(), offset).into())
++ }
+ }
+ }
+ };
+
+ fx.add_global_comment(format!(
+ "{:<5} {:5} {:30} {:4}b {}, {}{}{}",
+ kind,
+ format!("{:?}", local),
+ format!("{:?}", ty),
+ size.bytes(),
+ align.abi.bytes(),
+ align.pref.bytes(),
++ if extra.is_empty() { "" } else { " " },
+ extra,
+ ));
+}
--- /dev/null
- | Conv::AvrNonBlockingInterrupt => {
- todo!("{:?}", fn_abi.conv)
- }
+//! Handling of everything related to the calling convention. Also fills `fx.local_map`.
+
+#[cfg(debug_assertions)]
+mod comments;
+mod pass_mode;
+mod returning;
+
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
+use rustc_middle::ty::layout::FnAbiExt;
+use rustc_target::abi::call::{Conv, FnAbi};
+use rustc_target::spec::abi::Abi;
+
+use cranelift_codegen::ir::AbiParam;
+use smallvec::smallvec;
+
+use self::pass_mode::*;
+use crate::prelude::*;
+
+pub(crate) use self::returning::{can_return_to_ssa_var, codegen_return};
+
+fn clif_sig_from_fn_abi<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ triple: &target_lexicon::Triple,
+ fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
+) -> Signature {
+ let call_conv = match fn_abi.conv {
+ Conv::Rust | Conv::C => CallConv::triple_default(triple),
+ Conv::X86_64SysV => CallConv::SystemV,
+ Conv::X86_64Win64 => CallConv::WindowsFastcall,
+ Conv::ArmAapcs
+ | Conv::CCmseNonSecureCall
+ | Conv::Msp430Intr
+ | Conv::PtxKernel
+ | Conv::X86Fastcall
+ | Conv::X86Intr
+ | Conv::X86Stdcall
+ | Conv::X86ThisCall
+ | Conv::X86VectorCall
+ | Conv::AmdGpuKernel
+ | Conv::AvrInterrupt
- let inputs = fn_abi
- .args
- .iter()
- .map(|arg_abi| arg_abi.get_abi_param(tcx).into_iter())
- .flatten();
++ | Conv::AvrNonBlockingInterrupt => todo!("{:?}", fn_abi.conv),
+ };
- Signature {
- params,
- returns,
- call_conv,
- }
++ let inputs = fn_abi.args.iter().map(|arg_abi| arg_abi.get_abi_param(tcx).into_iter()).flatten();
+
+ let (return_ptr, returns) = fn_abi.ret.get_abi_return(tcx);
+ // Sometimes the first param is an pointer to the place where the return value needs to be stored.
+ let params: Vec<_> = return_ptr.into_iter().chain(inputs).collect();
+
- clif_sig_from_fn_abi(
- tcx,
- triple,
- &FnAbi::of_instance(&RevealAllLayoutCx(tcx), inst, &[]),
- )
++ Signature { params, returns, call_conv }
+}
+
+pub(crate) fn get_function_sig<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ triple: &target_lexicon::Triple,
+ inst: Instance<'tcx>,
+) -> Signature {
+ assert!(!inst.substs.needs_infer());
- module: &mut impl Module,
++ clif_sig_from_fn_abi(tcx, triple, &FnAbi::of_instance(&RevealAllLayoutCx(tcx), inst, &[]))
+}
+
+/// Instance must be monomorphized
+pub(crate) fn import_function<'tcx>(
+ tcx: TyCtxt<'tcx>,
- module
- .declare_function(&name, Linkage::Import, &sig)
- .unwrap()
++ module: &mut dyn Module,
+ inst: Instance<'tcx>,
+) -> FuncId {
+ let name = tcx.symbol_name(inst).name.to_string();
+ let sig = get_function_sig(tcx, module.isa().triple(), inst);
- impl<'tcx, M: Module> FunctionCx<'_, 'tcx, M> {
++ module.declare_function(&name, Linkage::Import, &sig).unwrap()
+}
+
- let func_id = import_function(self.tcx, &mut self.cx.module, inst);
- let func_ref = self
- .cx
- .module
- .declare_func_in_func(func_id, &mut self.bcx.func);
++impl<'tcx> FunctionCx<'_, '_, 'tcx> {
+ /// Instance must be monomorphized
+ pub(crate) fn get_function_ref(&mut self, inst: Instance<'tcx>) -> FuncRef {
- let sig = Signature {
- params,
- returns,
- call_conv: CallConv::triple_default(self.triple()),
- };
- let func_id = self
- .cx
- .module
- .declare_function(&name, Linkage::Import, &sig)
- .unwrap();
- let func_ref = self
- .cx
- .module
- .declare_func_in_func(func_id, &mut self.bcx.func);
++ let func_id = import_function(self.tcx, self.cx.module, inst);
++ let func_ref = self.cx.module.declare_func_in_func(func_id, &mut self.bcx.func);
+
+ #[cfg(debug_assertions)]
+ self.add_comment(func_ref, format!("{:?}", inst));
+
+ func_ref
+ }
+
+ pub(crate) fn lib_call(
+ &mut self,
+ name: &str,
+ params: Vec<AbiParam>,
+ returns: Vec<AbiParam>,
+ args: &[Value],
+ ) -> &[Value] {
- (
- AbiParam::new(self.clif_type(arg.layout().ty).unwrap()),
- arg.load_scalar(self),
- )
++ let sig = Signature { params, returns, call_conv: CallConv::triple_default(self.triple()) };
++ let func_id = self.cx.module.declare_function(&name, Linkage::Import, &sig).unwrap();
++ let func_ref = self.cx.module.declare_func_in_func(func_id, &mut self.bcx.func);
+ let call_inst = self.bcx.ins().call(func_ref, args);
+ #[cfg(debug_assertions)]
+ {
+ self.add_comment(call_inst, format!("easy_call {}", name));
+ }
+ let results = self.bcx.inst_results(call_inst);
+ assert!(results.len() <= 2, "{}", results.len());
+ results
+ }
+
+ pub(crate) fn easy_call(
+ &mut self,
+ name: &str,
+ args: &[CValue<'tcx>],
+ return_ty: Ty<'tcx>,
+ ) -> CValue<'tcx> {
+ let (input_tys, args): (Vec<_>, Vec<_>) = args
+ .iter()
+ .map(|arg| {
- tup.types()
- .map(|ty| AbiParam::new(self.clif_type(ty).unwrap()))
- .collect()
++ (AbiParam::new(self.clif_type(arg.layout().ty).unwrap()), arg.load_scalar(self))
+ })
+ .unzip();
+ let return_layout = self.layout_of(return_ty);
+ let return_tys = if let ty::Tuple(tup) = return_ty.kind() {
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
++ tup.types().map(|ty| AbiParam::new(self.clif_type(ty).unwrap())).collect()
+ } else {
+ vec![AbiParam::new(self.clif_type(return_ty).unwrap())]
+ };
+ let ret_vals = self.lib_call(name, input_tys, return_tys, &args);
+ match *ret_vals {
+ [] => CValue::by_ref(
+ Pointer::const_addr(self, i64::from(self.pointer_type.bytes())),
+ return_layout,
+ ),
+ [val] => CValue::by_val(val, return_layout),
+ [val, extra] => CValue::by_val_pair(val, extra, return_layout),
+ _ => unreachable!(),
+ }
+ }
+}
+
+/// Make a [`CPlace`] capable of holding value of the specified type.
+fn make_local_place<'tcx>(
- pub(crate) fn codegen_fn_prelude<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
- start_block: Block,
- ) {
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ local: Local,
+ layout: TyAndLayout<'tcx>,
+ is_ssa: bool,
+) -> CPlace<'tcx> {
+ let place = if is_ssa {
+ if let rustc_target::abi::Abi::ScalarPair(_, _) = layout.abi {
+ CPlace::new_var_pair(fx, local, layout)
+ } else {
+ CPlace::new_var(fx, local, layout)
+ }
+ } else {
+ CPlace::new_stack_slot(fx, layout)
+ };
+
+ #[cfg(debug_assertions)]
+ self::comments::add_local_place_comments(fx, place, local);
+
+ place
+}
+
- let mut block_params_iter = fx
- .bcx
- .func
- .dfg
- .block_params(start_block)
- .to_vec()
- .into_iter();
++pub(crate) fn codegen_fn_prelude<'tcx>(fx: &mut FunctionCx<'_, '_, 'tcx>, start_block: Block) {
+ fx.bcx.append_block_params_for_function_params(start_block);
+
+ fx.bcx.switch_to_block(start_block);
+ fx.bcx.ins().nop();
+
+ let ssa_analyzed = crate::analyze::analyze(fx);
+
+ #[cfg(debug_assertions)]
+ self::comments::add_args_header_comment(fx);
+
- let internally_mutable = !val.layout().ty.is_freeze(
- fx.tcx.at(local_decl.source_info.span),
- ParamEnv::reveal_all(),
- );
++ let mut block_params_iter = fx.bcx.func.dfg.block_params(start_block).to_vec().into_iter();
+ let ret_place =
+ self::returning::codegen_return_param(fx, &ssa_analyzed, &mut block_params_iter);
+ assert_eq!(fx.local_map.push(ret_place), RETURN_PLACE);
+
+ // None means pass_mode == NoPass
+ enum ArgKind<'tcx> {
+ Normal(Option<CValue<'tcx>>),
+ Spread(Vec<Option<CValue<'tcx>>>),
+ }
+
+ let fn_abi = fx.fn_abi.take().unwrap();
+ let mut arg_abis_iter = fn_abi.args.iter();
+
+ let func_params = fx
+ .mir
+ .args_iter()
+ .map(|local| {
+ let arg_ty = fx.monomorphize(fx.mir.local_decls[local].ty);
+
+ // Adapted from https://github.com/rust-lang/rust/blob/145155dc96757002c7b2e9de8489416e2fdbbd57/src/librustc_codegen_llvm/mir/mod.rs#L442-L482
+ if Some(local) == fx.mir.spread_arg {
+ // This argument (e.g. the last argument in the "rust-call" ABI)
+ // is a tuple that was spread at the ABI level and now we have
+ // to reconstruct it into a tuple local variable, from multiple
+ // individual function arguments.
+
+ let tupled_arg_tys = match arg_ty.kind() {
+ ty::Tuple(ref tys) => tys,
+ _ => bug!("spread argument isn't a tuple?! but {:?}", arg_ty),
+ };
+
+ let mut params = Vec::new();
+ for (i, _arg_ty) in tupled_arg_tys.types().enumerate() {
+ let arg_abi = arg_abis_iter.next().unwrap();
+ let param =
+ cvalue_for_param(fx, Some(local), Some(i), arg_abi, &mut block_params_iter);
+ params.push(param);
+ }
+
+ (local, ArgKind::Spread(params), arg_ty)
+ } else {
+ let arg_abi = arg_abis_iter.next().unwrap();
+ let param =
+ cvalue_for_param(fx, Some(local), None, arg_abi, &mut block_params_iter);
+ (local, ArgKind::Normal(param), arg_ty)
+ }
+ })
+ .collect::<Vec<(Local, ArgKind<'tcx>, Ty<'tcx>)>>();
+
+ assert!(fx.caller_location.is_none());
+ if fx.instance.def.requires_caller_location(fx.tcx) {
+ // Store caller location for `#[track_caller]`.
+ let arg_abi = arg_abis_iter.next().unwrap();
+ fx.caller_location =
+ Some(cvalue_for_param(fx, None, None, arg_abi, &mut block_params_iter).unwrap());
+ }
+
+ assert!(arg_abis_iter.next().is_none(), "ArgAbi left behind");
+ fx.fn_abi = Some(fn_abi);
+ assert!(block_params_iter.next().is_none(), "arg_value left behind");
+
+ #[cfg(debug_assertions)]
+ self::comments::add_locals_header_comment(fx);
+
+ for (local, arg_kind, ty) in func_params {
+ let layout = fx.layout_of(ty);
+
+ let is_ssa = ssa_analyzed[local] == crate::analyze::SsaKind::Ssa;
+
+ // While this is normally an optimization to prevent an unnecessary copy when an argument is
+ // not mutated by the current function, this is necessary to support unsized arguments.
+ if let ArgKind::Normal(Some(val)) = arg_kind {
+ if let Some((addr, meta)) = val.try_to_ptr() {
+ let local_decl = &fx.mir.local_decls[local];
+ // v this ! is important
- place
- .place_field(fx, mir::Field::new(i))
- .write_cvalue(fx, param);
++ let internally_mutable = !val
++ .layout()
++ .ty
++ .is_freeze(fx.tcx.at(local_decl.source_info.span), ParamEnv::reveal_all());
+ if local_decl.mutability == mir::Mutability::Not && !internally_mutable {
+ // We wont mutate this argument, so it is fine to borrow the backing storage
+ // of this argument, to prevent a copy.
+
+ let place = if let Some(meta) = meta {
+ CPlace::for_ptr_with_extra(addr, meta, val.layout())
+ } else {
+ CPlace::for_ptr(addr, val.layout())
+ };
+
+ #[cfg(debug_assertions)]
+ self::comments::add_local_place_comments(fx, place, local);
+
+ assert_eq!(fx.local_map.push(place), local);
+ continue;
+ }
+ }
+ }
+
+ let place = make_local_place(fx, local, layout, is_ssa);
+ assert_eq!(fx.local_map.push(place), local);
+
+ match arg_kind {
+ ArgKind::Normal(param) => {
+ if let Some(param) = param {
+ place.write_cvalue(fx, param);
+ }
+ }
+ ArgKind::Spread(params) => {
+ for (i, param) in params.into_iter().enumerate() {
+ if let Some(param) = param {
- fx.bcx
- .ins()
- .jump(*fx.block_map.get(START_BLOCK).unwrap(), &[]);
++ place.place_field(fx, mir::Field::new(i)).write_cvalue(fx, param);
+ }
+ }
+ }
+ }
+ }
+
+ for local in fx.mir.vars_and_temps_iter() {
+ let ty = fx.monomorphize(fx.mir.local_decls[local].ty);
+ let layout = fx.layout_of(ty);
+
+ let is_ssa = ssa_analyzed[local] == crate::analyze::SsaKind::Ssa;
+
+ let place = make_local_place(fx, local, layout, is_ssa);
+ assert_eq!(fx.local_map.push(place), local);
+ }
+
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
++ fx.bcx.ins().jump(*fx.block_map.get(START_BLOCK).unwrap(), &[]);
+}
+
+pub(crate) fn codegen_terminator_call<'tcx>(
- let fn_sig = fx
- .tcx
- .normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), fn_ty.fn_sig(fx.tcx));
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ span: Span,
+ current_block: Block,
+ func: &Operand<'tcx>,
+ args: &[Operand<'tcx>],
+ destination: Option<(Place<'tcx>, BasicBlock)>,
+) {
+ let fn_ty = fx.monomorphize(func.ty(fx.mir, fx.tcx));
- FnAbi::of_fn_ptr(
- &RevealAllLayoutCx(fx.tcx),
- fn_ty.fn_sig(fx.tcx),
- &extra_args,
- )
++ let fn_sig =
++ fx.tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), fn_ty.fn_sig(fx.tcx));
+
+ let destination = destination.map(|(place, bb)| (codegen_place(fx, place), bb));
+
+ // Handle special calls like instrinsics and empty drop glue.
+ let instance = if let ty::FnDef(def_id, substs) = *fn_ty.kind() {
+ let instance = ty::Instance::resolve(fx.tcx, ty::ParamEnv::reveal_all(), def_id, substs)
+ .unwrap()
+ .unwrap()
+ .polymorphize(fx.tcx);
+
+ if fx.tcx.symbol_name(instance).name.starts_with("llvm.") {
+ crate::intrinsics::codegen_llvm_intrinsic_call(
+ fx,
+ &fx.tcx.symbol_name(instance).name,
+ substs,
+ args,
+ destination,
+ );
+ return;
+ }
+
+ match instance.def {
+ InstanceDef::Intrinsic(_) => {
+ crate::intrinsics::codegen_intrinsic_call(fx, instance, args, destination, span);
+ return;
+ }
+ InstanceDef::DropGlue(_, None) => {
+ // empty drop glue - a nop.
+ let (_, dest) = destination.expect("Non terminating drop_in_place_real???");
+ let ret_block = fx.get_block(dest);
+ fx.bcx.ins().jump(ret_block, &[]);
+ return;
+ }
+ _ => Some(instance),
+ }
+ } else {
+ None
+ };
+
+ let extra_args = &args[fn_sig.inputs().len()..];
+ let extra_args = extra_args
+ .iter()
+ .map(|op_arg| fx.monomorphize(op_arg.ty(fx.mir, fx.tcx)))
+ .collect::<Vec<_>>();
+ let fn_abi = if let Some(instance) = instance {
+ FnAbi::of_instance(&RevealAllLayoutCx(fx.tcx), instance, &extra_args)
+ } else {
- .map(|inst| {
- fx.tcx
- .codegen_fn_attrs(inst.def_id())
- .flags
- .contains(CodegenFnAttrFlags::COLD)
- })
++ FnAbi::of_fn_ptr(&RevealAllLayoutCx(fx.tcx), fn_ty.fn_sig(fx.tcx), &extra_args)
+ };
+
+ let is_cold = instance
- args.iter()
- .map(|arg| codegen_operand(fx, arg))
- .collect::<Vec<_>>()
++ .map(|inst| fx.tcx.codegen_fn_attrs(inst.def_id()).flags.contains(CodegenFnAttrFlags::COLD))
+ .unwrap_or(false);
+ if is_cold {
+ fx.cold_blocks.insert(current_block);
+ }
+
+ // Unpack arguments tuple for closures
+ let args = if fn_sig.abi == Abi::RustCall {
+ assert_eq!(args.len(), 2, "rust-call abi requires two arguments");
+ let self_arg = codegen_operand(fx, &args[0]);
+ let pack_arg = codegen_operand(fx, &args[1]);
+
+ let tupled_arguments = match pack_arg.layout().ty.kind() {
+ ty::Tuple(ref tupled_arguments) => tupled_arguments,
+ _ => bug!("argument to function with \"rust-call\" ABI is not a tuple"),
+ };
+
+ let mut args = Vec::with_capacity(1 + tupled_arguments.len());
+ args.push(self_arg);
+ for i in 0..tupled_arguments.len() {
+ args.push(pack_arg.value_field(fx, mir::Field::new(i)));
+ }
+ args
+ } else {
- Some(Instance {
- def: InstanceDef::Virtual(_, idx),
- ..
- }) => {
++ args.iter().map(|arg| codegen_operand(fx, arg)).collect::<Vec<_>>()
+ };
+
+ // | indirect call target
+ // | | the first argument to be passed
+ // v v
+ let (func_ref, first_arg) = match instance {
+ // Trait object call
- if instance
- .map(|inst| inst.def.requires_caller_location(fx.tcx))
- .unwrap_or(false)
- {
++ Some(Instance { def: InstanceDef::Virtual(_, idx), .. }) => {
+ #[cfg(debug_assertions)]
+ {
+ let nop_inst = fx.bcx.ins().nop();
+ fx.add_comment(
+ nop_inst,
+ format!("virtual call; self arg pass mode: {:?}", &fn_abi.args[0],),
+ );
+ }
+ let (ptr, method) = crate::vtable::get_ptr_and_method_ref(fx, args[0], idx);
+ (Some(method), smallvec![ptr])
+ }
+
+ // Normal call
+ Some(_) => (
+ None,
+ args.get(0)
+ .map(|arg| adjust_arg_for_abi(fx, *arg, &fn_abi.args[0]))
+ .unwrap_or(smallvec![]),
+ ),
+
+ // Indirect call
+ None => {
+ #[cfg(debug_assertions)]
+ {
+ let nop_inst = fx.bcx.ins().nop();
+ fx.add_comment(nop_inst, "indirect call");
+ }
+ let func = codegen_operand(fx, func).load_scalar(fx);
+ (
+ Some(func),
+ args.get(0)
+ .map(|arg| adjust_arg_for_abi(fx, *arg, &fn_abi.args[0]))
+ .unwrap_or(smallvec![]),
+ )
+ }
+ };
+
+ let ret_place = destination.map(|(place, _)| place);
+ let (call_inst, call_args) = self::returning::codegen_with_call_return_arg(
+ fx,
+ &fn_abi.ret,
+ ret_place,
+ |fx, return_ptr| {
+ let regular_args_count = args.len();
+ let mut call_args: Vec<Value> = return_ptr
+ .into_iter()
+ .chain(first_arg.into_iter())
+ .chain(
+ args.into_iter()
+ .enumerate()
+ .skip(1)
+ .map(|(i, arg)| adjust_arg_for_abi(fx, arg, &fn_abi.args[i]).into_iter())
+ .flatten(),
+ )
+ .collect::<Vec<_>>();
+
- fx.tcx.sess.span_fatal(
- span,
- &format!("Variadic call for non-C abi {:?}", fn_sig.abi),
- );
++ if instance.map(|inst| inst.def.requires_caller_location(fx.tcx)).unwrap_or(false) {
+ // Pass the caller location for `#[track_caller]`.
+ let caller_location = fx.get_caller_location(span);
+ call_args.extend(
+ adjust_arg_for_abi(fx, caller_location, &fn_abi.args[regular_args_count])
+ .into_iter(),
+ );
+ assert_eq!(fn_abi.args.len(), regular_args_count + 1);
+ } else {
+ assert_eq!(fn_abi.args.len(), regular_args_count);
+ }
+
+ let call_inst = if let Some(func_ref) = func_ref {
+ let sig = clif_sig_from_fn_abi(fx.tcx, fx.triple(), &fn_abi);
+ let sig = fx.bcx.import_signature(sig);
+ fx.bcx.ins().call_indirect(sig, func_ref, &call_args)
+ } else {
+ let func_ref =
+ fx.get_function_ref(instance.expect("non-indirect call on non-FnDef type"));
+ fx.bcx.ins().call(func_ref, &call_args)
+ };
+
+ (call_inst, call_args)
+ },
+ );
+
+ // FIXME find a cleaner way to support varargs
+ if fn_sig.c_variadic {
+ if fn_sig.abi != Abi::C {
- fx.tcx
- .sess
- .span_fatal(span, &format!("Non int ty {:?} for variadic call", ty));
++ fx.tcx.sess.span_fatal(span, &format!("Variadic call for non-C abi {:?}", fn_sig.abi));
+ }
+ let sig_ref = fx.bcx.func.dfg.call_signature(call_inst).unwrap();
+ let abi_params = call_args
+ .into_iter()
+ .map(|arg| {
+ let ty = fx.bcx.func.dfg.value_type(arg);
+ if !ty.is_int() {
+ // FIXME set %al to upperbound on float args once floats are supported
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
++ fx.tcx.sess.span_fatal(span, &format!("Non int ty {:?} for variadic call", ty));
+ }
+ AbiParam::new(ty)
+ })
+ .collect::<Vec<AbiParam>>();
+ fx.bcx.func.dfg.signatures[sig_ref].params = abi_params;
+ }
+
+ if let Some((_, dest)) = destination {
+ let ret_block = fx.get_block(dest);
+ fx.bcx.ins().jump(ret_block, &[]);
+ } else {
+ trap_unreachable(fx, "[corruption] Diverging function returned");
+ }
+}
+
+pub(crate) fn codegen_drop<'tcx>(
- TypeAndMut {
- ty,
- mutbl: crate::rustc_hir::Mutability::Mut,
- },
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ span: Span,
+ drop_place: CPlace<'tcx>,
+) {
+ let ty = drop_place.layout().ty;
+ let drop_instance = Instance::resolve_drop_in_place(fx.tcx, ty).polymorphize(fx.tcx);
+
+ if let ty::InstanceDef::DropGlue(_, None) = drop_instance.def {
+ // we don't actually need to drop anything
+ } else {
+ match ty.kind() {
+ ty::Dynamic(..) => {
+ let (ptr, vtable) = drop_place.to_ptr_maybe_unsized();
+ let ptr = ptr.get_addr(fx);
+ let drop_fn = crate::vtable::drop_fn_of_obj(fx, vtable.unwrap());
+
+ // FIXME(eddyb) perhaps move some of this logic into
+ // `Instance::resolve_drop_in_place`?
+ let virtual_drop = Instance {
+ def: ty::InstanceDef::Virtual(drop_instance.def_id(), 0),
+ substs: drop_instance.substs,
+ };
+ let fn_abi = FnAbi::of_instance(&RevealAllLayoutCx(fx.tcx), virtual_drop, &[]);
+
+ let sig = clif_sig_from_fn_abi(fx.tcx, fx.triple(), &fn_abi);
+ let sig = fx.bcx.import_signature(sig);
+ fx.bcx.ins().call_indirect(sig, drop_fn, &[ptr]);
+ }
+ _ => {
+ assert!(!matches!(drop_instance.def, InstanceDef::Virtual(_, _)));
+
+ let fn_abi = FnAbi::of_instance(&RevealAllLayoutCx(fx.tcx), drop_instance, &[]);
+
+ let arg_value = drop_place.place_ref(
+ fx,
+ fx.layout_of(fx.tcx.mk_ref(
+ &ty::RegionKind::ReErased,
++ TypeAndMut { ty, mutbl: crate::rustc_hir::Mutability::Mut },
+ )),
+ );
+ let arg_value = adjust_arg_for_abi(fx, arg_value, &fn_abi.args[0]);
+
+ let mut call_args: Vec<Value> = arg_value.into_iter().collect::<Vec<_>>();
+
+ if drop_instance.def.requires_caller_location(fx.tcx) {
+ // Pass the caller location for `#[track_caller]`.
+ let caller_location = fx.get_caller_location(span);
+ call_args.extend(
+ adjust_arg_for_abi(fx, caller_location, &fn_abi.args[1]).into_iter(),
+ );
+ }
+
+ let func_ref = fx.get_function_ref(drop_instance);
+ fx.bcx.ins().call(func_ref, &call_args);
+ }
+ }
+ }
+}
--- /dev/null
- .map(|&kind| {
- reg_to_abi_param(Reg {
- kind,
- size: cast.prefix_chunk_size,
- })
- })
+//! Argument passing
+
+use crate::prelude::*;
+use crate::value_and_place::assert_assignable;
+
+use cranelift_codegen::ir::{ArgumentExtension, ArgumentPurpose};
+use rustc_target::abi::call::{
+ ArgAbi, ArgAttributes, ArgExtension as RustcArgExtension, CastTarget, PassMode, Reg, RegKind,
+};
+use smallvec::{smallvec, SmallVec};
+
+pub(super) trait ArgAbiExt<'tcx> {
+ fn get_abi_param(&self, tcx: TyCtxt<'tcx>) -> SmallVec<[AbiParam; 2]>;
+ fn get_abi_return(&self, tcx: TyCtxt<'tcx>) -> (Option<AbiParam>, Vec<AbiParam>);
+}
+
+fn reg_to_abi_param(reg: Reg) -> AbiParam {
+ let clif_ty = match (reg.kind, reg.size.bytes()) {
+ (RegKind::Integer, 1) => types::I8,
+ (RegKind::Integer, 2) => types::I16,
+ (RegKind::Integer, 4) => types::I32,
+ (RegKind::Integer, 8) => types::I64,
+ (RegKind::Integer, 16) => types::I128,
+ (RegKind::Float, 4) => types::F32,
+ (RegKind::Float, 8) => types::F64,
+ (RegKind::Vector, size) => types::I8.by(u16::try_from(size).unwrap()).unwrap(),
+ _ => unreachable!("{:?}", reg),
+ };
+ AbiParam::new(clif_ty)
+}
+
+fn apply_arg_attrs_to_abi_param(mut param: AbiParam, arg_attrs: ArgAttributes) -> AbiParam {
+ match arg_attrs.arg_ext {
+ RustcArgExtension::None => {}
+ RustcArgExtension::Zext => param.extension = ArgumentExtension::Uext,
+ RustcArgExtension::Sext => param.extension = ArgumentExtension::Sext,
+ }
+ param
+}
+
+fn cast_target_to_abi_params(cast: CastTarget) -> SmallVec<[AbiParam; 2]> {
+ let (rest_count, rem_bytes) = if cast.rest.unit.size.bytes() == 0 {
+ (0, 0)
+ } else {
+ (
+ cast.rest.total.bytes() / cast.rest.unit.size.bytes(),
+ cast.rest.total.bytes() % cast.rest.unit.size.bytes(),
+ )
+ };
+
+ if cast.prefix.iter().all(|x| x.is_none()) {
+ // Simplify to a single unit when there is no prefix and size <= unit size
+ if cast.rest.total <= cast.rest.unit.size {
+ let clif_ty = match (cast.rest.unit.kind, cast.rest.unit.size.bytes()) {
+ (RegKind::Integer, 1) => types::I8,
+ (RegKind::Integer, 2) => types::I16,
+ (RegKind::Integer, 3..=4) => types::I32,
+ (RegKind::Integer, 5..=8) => types::I64,
+ (RegKind::Integer, 9..=16) => types::I128,
+ (RegKind::Float, 4) => types::F32,
+ (RegKind::Float, 8) => types::F64,
+ (RegKind::Vector, size) => types::I8.by(u16::try_from(size).unwrap()).unwrap(),
+ _ => unreachable!("{:?}", cast.rest.unit),
+ };
+ return smallvec![AbiParam::new(clif_ty)];
+ }
+ }
+
+ // Create list of fields in the main structure
+ let mut args = cast
+ .prefix
+ .iter()
+ .flatten()
- Abi::Scalar(scalar) => {
- smallvec![apply_arg_attrs_to_abi_param(
- AbiParam::new(scalar_to_clif_type(tcx, scalar.clone())),
- attrs
- )]
- }
++ .map(|&kind| reg_to_abi_param(Reg { kind, size: cast.prefix_chunk_size }))
+ .chain((0..rest_count).map(|_| reg_to_abi_param(cast.rest.unit)))
+ .collect::<SmallVec<_>>();
+
+ // Append final integer
+ if rem_bytes != 0 {
+ // Only integers can be really split further.
+ assert_eq!(cast.rest.unit.kind, RegKind::Integer);
+ args.push(reg_to_abi_param(Reg {
+ kind: RegKind::Integer,
+ size: Size::from_bytes(rem_bytes),
+ }));
+ }
+
+ args
+}
+
+impl<'tcx> ArgAbiExt<'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
+ fn get_abi_param(&self, tcx: TyCtxt<'tcx>) -> SmallVec<[AbiParam; 2]> {
+ match self.mode {
+ PassMode::Ignore => smallvec![],
+ PassMode::Direct(attrs) => match &self.layout.abi {
- PassMode::Indirect {
- attrs,
- extra_attrs: None,
- on_stack,
- } => {
++ Abi::Scalar(scalar) => smallvec![apply_arg_attrs_to_abi_param(
++ AbiParam::new(scalar_to_clif_type(tcx, scalar.clone())),
++ attrs
++ )],
+ Abi::Vector { .. } => {
+ let vector_ty = crate::intrinsics::clif_vector_type(tcx, self.layout).unwrap();
+ smallvec![AbiParam::new(vector_ty)]
+ }
+ _ => unreachable!("{:?}", self.layout.abi),
+ },
+ PassMode::Pair(attrs_a, attrs_b) => match &self.layout.abi {
+ Abi::ScalarPair(a, b) => {
+ let a = scalar_to_clif_type(tcx, a.clone());
+ let b = scalar_to_clif_type(tcx, b.clone());
+ smallvec![
+ apply_arg_attrs_to_abi_param(AbiParam::new(a), attrs_a),
+ apply_arg_attrs_to_abi_param(AbiParam::new(b), attrs_b),
+ ]
+ }
+ _ => unreachable!("{:?}", self.layout.abi),
+ },
+ PassMode::Cast(cast) => cast_target_to_abi_params(cast),
- smallvec![apply_arg_attrs_to_abi_param(
- AbiParam::new(pointer_ty(tcx)),
- attrs
- )]
++ PassMode::Indirect { attrs, extra_attrs: None, on_stack } => {
+ if on_stack {
+ let size = u32::try_from(self.layout.size.bytes()).unwrap();
+ smallvec![apply_arg_attrs_to_abi_param(
+ AbiParam::special(pointer_ty(tcx), ArgumentPurpose::StructArgument(size),),
+ attrs
+ )]
+ } else {
- PassMode::Indirect {
- attrs,
- extra_attrs: Some(extra_attrs),
- on_stack,
- } => {
++ smallvec![apply_arg_attrs_to_abi_param(AbiParam::new(pointer_ty(tcx)), attrs)]
+ }
+ }
- Abi::Scalar(scalar) => (
- None,
- vec![AbiParam::new(scalar_to_clif_type(tcx, scalar.clone()))],
- ),
++ PassMode::Indirect { attrs, extra_attrs: Some(extra_attrs), on_stack } => {
+ assert!(!on_stack);
+ smallvec![
+ apply_arg_attrs_to_abi_param(AbiParam::new(pointer_ty(tcx)), attrs),
+ apply_arg_attrs_to_abi_param(AbiParam::new(pointer_ty(tcx)), extra_attrs),
+ ]
+ }
+ }
+ }
+
+ fn get_abi_return(&self, tcx: TyCtxt<'tcx>) -> (Option<AbiParam>, Vec<AbiParam>) {
+ match self.mode {
+ PassMode::Ignore => (None, vec![]),
+ PassMode::Direct(_) => match &self.layout.abi {
- PassMode::Indirect {
- attrs: _,
- extra_attrs: None,
- on_stack,
- } => {
++ Abi::Scalar(scalar) => {
++ (None, vec![AbiParam::new(scalar_to_clif_type(tcx, scalar.clone()))])
++ }
+ Abi::Vector { .. } => {
+ let vector_ty = crate::intrinsics::clif_vector_type(tcx, self.layout).unwrap();
+ (None, vec![AbiParam::new(vector_ty)])
+ }
+ _ => unreachable!("{:?}", self.layout.abi),
+ },
+ PassMode::Pair(_, _) => match &self.layout.abi {
+ Abi::ScalarPair(a, b) => {
+ let a = scalar_to_clif_type(tcx, a.clone());
+ let b = scalar_to_clif_type(tcx, b.clone());
+ (None, vec![AbiParam::new(a), AbiParam::new(b)])
+ }
+ _ => unreachable!("{:?}", self.layout.abi),
+ },
+ PassMode::Cast(cast) => (None, cast_target_to_abi_params(cast).into_iter().collect()),
- (
- Some(AbiParam::special(
- pointer_ty(tcx),
- ArgumentPurpose::StructReturn,
- )),
- vec![],
- )
++ PassMode::Indirect { attrs: _, extra_attrs: None, on_stack } => {
+ assert!(!on_stack);
- PassMode::Indirect {
- attrs: _,
- extra_attrs: Some(_),
- on_stack: _,
- } => unreachable!("unsized return value"),
++ (Some(AbiParam::special(pointer_ty(tcx), ArgumentPurpose::StructReturn)), vec![])
++ }
++ PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
++ unreachable!("unsized return value")
+ }
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ }
+ }
+}
+
+pub(super) fn to_casted_value<'tcx>(
- let val = ptr
- .offset_i64(fx, offset)
- .load(fx, param.value_type, MemFlags::new());
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ arg: CValue<'tcx>,
+ cast: CastTarget,
+) -> SmallVec<[Value; 2]> {
+ let (ptr, meta) = arg.force_stack(fx);
+ assert!(meta.is_none());
+ let mut offset = 0;
+ cast_target_to_abi_params(cast)
+ .into_iter()
+ .map(|param| {
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
++ let val = ptr.offset_i64(fx, offset).load(fx, param.value_type, MemFlags::new());
+ offset += i64::from(param.value_type.bytes());
+ val
+ })
+ .collect()
+}
+
+pub(super) fn from_casted_value<'tcx>(
- let abi_param_size: u32 = abi_params
- .iter()
- .map(|param| param.value_type.bytes())
- .sum();
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ block_params: &[Value],
+ layout: TyAndLayout<'tcx>,
+ cast: CastTarget,
+) -> CValue<'tcx> {
+ let abi_params = cast_target_to_abi_params(cast);
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
++ let abi_param_size: u32 = abi_params.iter().map(|param| param.value_type.bytes()).sum();
+ let layout_size = u32::try_from(layout.size.bytes()).unwrap();
+ let stack_slot = fx.bcx.create_stack_slot(StackSlotData {
+ kind: StackSlotKind::ExplicitSlot,
+ // FIXME Don't force the size to a multiple of 16 bytes once Cranelift gets a way to
+ // specify stack slot alignment.
+ // Stack slot size may be bigger for for example `[u8; 3]` which is packed into an `i32`.
+ // It may also be smaller for example when the type is a wrapper around an integer with a
+ // larger alignment than the integer.
+ size: (std::cmp::max(abi_param_size, layout_size) + 15) / 16 * 16,
+ offset: None,
+ });
+ let ptr = Pointer::new(fx.bcx.ins().stack_addr(pointer_ty(fx.tcx), stack_slot, 0));
+ let mut offset = 0;
+ let mut block_params_iter = block_params.into_iter().copied();
+ for param in abi_params {
+ let val = ptr.offset_i64(fx, offset).store(
+ fx,
+ block_params_iter.next().unwrap(),
+ MemFlags::new(),
+ );
+ offset += i64::from(param.value_type.bytes());
+ val
+ }
+ assert_eq!(block_params_iter.next(), None, "Leftover block param");
+ CValue::by_ref(ptr, layout)
+}
+
+/// Get a set of values to be passed as function arguments.
+pub(super) fn adjust_arg_for_abi<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ arg: CValue<'tcx>,
+ arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+) -> SmallVec<[Value; 2]> {
+ assert_assignable(fx, arg.layout().ty, arg_abi.layout.ty);
+ match arg_abi.mode {
+ PassMode::Ignore => smallvec![],
+ PassMode::Direct(_) => smallvec![arg.load_scalar(fx)],
+ PassMode::Pair(_, _) => {
+ let (a, b) = arg.load_scalar_pair(fx);
+ smallvec![a, b]
+ }
+ PassMode::Cast(cast) => to_casted_value(fx, arg, cast),
+ PassMode::Indirect { .. } => match arg.force_stack(fx) {
+ (ptr, None) => smallvec![ptr.get_addr(fx)],
+ (ptr, Some(meta)) => smallvec![ptr.get_addr(fx), meta],
+ },
+ }
+}
+
+/// Create a [`CValue`] containing the value of a function parameter adding clif function parameters
+/// as necessary.
+pub(super) fn cvalue_for_param<'tcx>(
- assert_eq!(
- fx.bcx.func.dfg.value_type(block_param),
- abi_param.value_type
- );
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ #[cfg_attr(not(debug_assertions), allow(unused_variables))] local: Option<mir::Local>,
+ #[cfg_attr(not(debug_assertions), allow(unused_variables))] local_field: Option<usize>,
+ arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+ block_params_iter: &mut impl Iterator<Item = Value>,
+) -> Option<CValue<'tcx>> {
+ let block_params = arg_abi
+ .get_abi_param(fx.tcx)
+ .into_iter()
+ .map(|abi_param| {
+ let block_param = block_params_iter.next().unwrap();
- Some(CValue::by_val_pair(
- block_params[0],
- block_params[1],
- arg_abi.layout,
- ))
++ assert_eq!(fx.bcx.func.dfg.value_type(block_param), abi_param.value_type);
+ block_param
+ })
+ .collect::<SmallVec<[_; 2]>>();
+
+ #[cfg(debug_assertions)]
+ crate::abi::comments::add_arg_comment(
+ fx,
+ "arg",
+ local,
+ local_field,
+ &block_params,
+ arg_abi.mode,
+ arg_abi.layout,
+ );
+
+ match arg_abi.mode {
+ PassMode::Ignore => None,
+ PassMode::Direct(_) => {
+ assert_eq!(block_params.len(), 1, "{:?}", block_params);
+ Some(CValue::by_val(block_params[0], arg_abi.layout))
+ }
+ PassMode::Pair(_, _) => {
+ assert_eq!(block_params.len(), 2, "{:?}", block_params);
- PassMode::Indirect {
- attrs: _,
- extra_attrs: None,
- on_stack: _,
- } => {
++ Some(CValue::by_val_pair(block_params[0], block_params[1], arg_abi.layout))
+ }
+ PassMode::Cast(cast) => Some(from_casted_value(fx, &block_params, arg_abi.layout, cast)),
- Some(CValue::by_ref(
- Pointer::new(block_params[0]),
- arg_abi.layout,
- ))
++ PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
+ assert_eq!(block_params.len(), 1, "{:?}", block_params);
- PassMode::Indirect {
- attrs: _,
- extra_attrs: Some(_),
- on_stack: _,
- } => {
++ Some(CValue::by_ref(Pointer::new(block_params[0]), arg_abi.layout))
+ }
++ PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+ assert_eq!(block_params.len(), 2, "{:?}", block_params);
+ Some(CValue::by_ref_unsized(
+ Pointer::new(block_params[0]),
+ block_params[1],
+ arg_abi.layout,
+ ))
+ }
+ }
+}
--- /dev/null
- fx: &FunctionCx<'_, 'tcx, impl Module>,
+//! Return value handling
+
+use crate::prelude::*;
+
+use rustc_middle::ty::layout::FnAbiExt;
+use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode};
+use smallvec::{smallvec, SmallVec};
+
+/// Can the given type be returned into an ssa var or does it need to be returned on the stack.
+pub(crate) fn can_return_to_ssa_var<'tcx>(
- let fn_sig = fx
- .tcx
- .normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), fn_ty.fn_sig(fx.tcx));
++ fx: &FunctionCx<'_, '_, 'tcx>,
+ func: &mir::Operand<'tcx>,
+ args: &[mir::Operand<'tcx>],
+) -> bool {
+ let fn_ty = fx.monomorphize(func.ty(fx.mir, fx.tcx));
- FnAbi::of_fn_ptr(
- &RevealAllLayoutCx(fx.tcx),
- fn_ty.fn_sig(fx.tcx),
- &extra_args,
- )
++ let fn_sig =
++ fx.tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), fn_ty.fn_sig(fx.tcx));
+
+ // Handle special calls like instrinsics and empty drop glue.
+ let instance = if let ty::FnDef(def_id, substs) = *fn_ty.kind() {
+ let instance = ty::Instance::resolve(fx.tcx, ty::ParamEnv::reveal_all(), def_id, substs)
+ .unwrap()
+ .unwrap()
+ .polymorphize(fx.tcx);
+
+ match instance.def {
+ InstanceDef::Intrinsic(_) | InstanceDef::DropGlue(_, _) => {
+ return true;
+ }
+ _ => Some(instance),
+ }
+ } else {
+ None
+ };
+
+ let extra_args = &args[fn_sig.inputs().len()..];
+ let extra_args = extra_args
+ .iter()
+ .map(|op_arg| fx.monomorphize(op_arg.ty(fx.mir, fx.tcx)))
+ .collect::<Vec<_>>();
+ let fn_abi = if let Some(instance) = instance {
+ FnAbi::of_instance(&RevealAllLayoutCx(fx.tcx), instance, &extra_args)
+ } else {
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
++ FnAbi::of_fn_ptr(&RevealAllLayoutCx(fx.tcx), fn_ty.fn_sig(fx.tcx), &extra_args)
+ };
+ match fn_abi.ret.mode {
+ PassMode::Ignore | PassMode::Direct(_) | PassMode::Pair(_, _) => true,
+ // FIXME Make it possible to return Cast and Indirect to an ssa var.
+ PassMode::Cast(_) | PassMode::Indirect { .. } => false,
+ }
+}
+
+/// Return a place where the return value of the current function can be written to. If necessary
+/// this adds an extra parameter pointing to where the return value needs to be stored.
+pub(super) fn codegen_return_param<'tcx>(
- PassMode::Ignore => (
- CPlace::no_place(fx.fn_abi.as_ref().unwrap().ret.layout),
- smallvec![],
- ),
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ ssa_analyzed: &rustc_index::vec::IndexVec<Local, crate::analyze::SsaKind>,
+ block_params_iter: &mut impl Iterator<Item = Value>,
+) -> CPlace<'tcx> {
+ let (ret_place, ret_param): (_, SmallVec<[_; 2]>) = match fx.fn_abi.as_ref().unwrap().ret.mode {
- PassMode::Indirect {
- attrs: _,
- extra_attrs: None,
- on_stack: _,
- } => {
++ PassMode::Ignore => (CPlace::no_place(fx.fn_abi.as_ref().unwrap().ret.layout), smallvec![]),
+ PassMode::Direct(_) | PassMode::Pair(_, _) | PassMode::Cast(_) => {
+ let is_ssa = ssa_analyzed[RETURN_PLACE] == crate::analyze::SsaKind::Ssa;
+ (
+ super::make_local_place(
+ fx,
+ RETURN_PLACE,
+ fx.fn_abi.as_ref().unwrap().ret.layout,
+ is_ssa,
+ ),
+ smallvec![],
+ )
+ }
- CPlace::for_ptr(
- Pointer::new(ret_param),
- fx.fn_abi.as_ref().unwrap().ret.layout,
- ),
++ PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
+ let ret_param = block_params_iter.next().unwrap();
+ assert_eq!(fx.bcx.func.dfg.value_type(ret_param), pointer_ty(fx.tcx));
+ (
- PassMode::Indirect {
- attrs: _,
- extra_attrs: Some(_),
- on_stack: _,
- } => unreachable!("unsized return value"),
++ CPlace::for_ptr(Pointer::new(ret_param), fx.fn_abi.as_ref().unwrap().ret.layout),
+ smallvec![ret_param],
+ )
+ }
- pub(super) fn codegen_with_call_return_arg<'tcx, M: Module, T>(
- fx: &mut FunctionCx<'_, 'tcx, M>,
++ PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
++ unreachable!("unsized return value")
++ }
+ };
+
+ #[cfg(not(debug_assertions))]
+ let _ = ret_param;
+
+ #[cfg(debug_assertions)]
+ crate::abi::comments::add_arg_comment(
+ fx,
+ "ret",
+ Some(RETURN_PLACE),
+ None,
+ &ret_param,
+ fx.fn_abi.as_ref().unwrap().ret.mode,
+ fx.fn_abi.as_ref().unwrap().ret.layout,
+ );
+
+ ret_place
+}
+
+/// Invokes the closure with if necessary a value representing the return pointer. When the closure
+/// returns the call return value(s) if any are written to the correct place.
- f: impl FnOnce(&mut FunctionCx<'_, 'tcx, M>, Option<Value>) -> (Inst, T),
++pub(super) fn codegen_with_call_return_arg<'tcx, T>(
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ ret_arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+ ret_place: Option<CPlace<'tcx>>,
- PassMode::Indirect {
- attrs: _,
- extra_attrs: None,
- on_stack: _,
- } => match ret_place {
++ f: impl FnOnce(&mut FunctionCx<'_, '_, 'tcx>, Option<Value>) -> (Inst, T),
+) -> (Inst, T) {
+ let return_ptr = match ret_arg_abi.mode {
+ PassMode::Ignore => None,
- PassMode::Indirect {
- attrs: _,
- extra_attrs: Some(_),
- on_stack: _,
- } => unreachable!("unsized return value"),
++ PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => match ret_place {
+ Some(ret_place) => Some(ret_place.to_ptr().get_addr(fx)),
+ None => Some(fx.bcx.ins().iconst(fx.pointer_type, 43)), // FIXME allocate temp stack slot
+ },
- PassMode::Indirect {
- attrs: _,
- extra_attrs: None,
- on_stack: _,
- } => {}
- PassMode::Indirect {
- attrs: _,
- extra_attrs: Some(_),
- on_stack: _,
- } => unreachable!("unsized return value"),
++ PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
++ unreachable!("unsized return value")
++ }
+ PassMode::Direct(_) | PassMode::Pair(_, _) | PassMode::Cast(_) => None,
+ };
+
+ let (call_inst, meta) = f(fx, return_ptr);
+
+ match ret_arg_abi.mode {
+ PassMode::Ignore => {}
+ PassMode::Direct(_) => {
+ if let Some(ret_place) = ret_place {
+ let ret_val = fx.bcx.inst_results(call_inst)[0];
+ ret_place.write_cvalue(fx, CValue::by_val(ret_val, ret_arg_abi.layout));
+ }
+ }
+ PassMode::Pair(_, _) => {
+ if let Some(ret_place) = ret_place {
+ let ret_val_a = fx.bcx.inst_results(call_inst)[0];
+ let ret_val_b = fx.bcx.inst_results(call_inst)[1];
+ ret_place.write_cvalue(
+ fx,
+ CValue::by_val_pair(ret_val_a, ret_val_b, ret_arg_abi.layout),
+ );
+ }
+ }
+ PassMode::Cast(cast) => {
+ if let Some(ret_place) = ret_place {
+ let results = fx
+ .bcx
+ .inst_results(call_inst)
+ .into_iter()
+ .copied()
+ .collect::<SmallVec<[Value; 2]>>();
+ let result =
+ super::pass_mode::from_casted_value(fx, &results, ret_place.layout(), cast);
+ ret_place.write_cvalue(fx, result);
+ }
+ }
- pub(crate) fn codegen_return(fx: &mut FunctionCx<'_, '_, impl Module>) {
++ PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {}
++ PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
++ unreachable!("unsized return value")
++ }
+ }
+
+ (call_inst, meta)
+}
+
+/// Codegen a return instruction with the right return value(s) if any.
- PassMode::Ignore
- | PassMode::Indirect {
- attrs: _,
- extra_attrs: None,
- on_stack: _,
- } => {
++pub(crate) fn codegen_return(fx: &mut FunctionCx<'_, '_, '_>) {
+ match fx.fn_abi.as_ref().unwrap().ret.mode {
- PassMode::Indirect {
- attrs: _,
- extra_attrs: Some(_),
- on_stack: _,
- } => unreachable!("unsized return value"),
++ PassMode::Ignore | PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
+ fx.bcx.ins().return_(&[]);
+ }
++ PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
++ unreachable!("unsized return value")
++ }
+ PassMode::Direct(_) => {
+ let place = fx.get_local_place(RETURN_PLACE);
+ let ret_val = place.to_cvalue(fx).load_scalar(fx);
+ fx.bcx.ins().return_(&[ret_val]);
+ }
+ PassMode::Pair(_, _) => {
+ let place = fx.get_local_place(RETURN_PLACE);
+ let (ret_val_a, ret_val_b) = place.to_cvalue(fx).load_scalar_pair(fx);
+ fx.bcx.ins().return_(&[ret_val_a, ret_val_b]);
+ }
+ PassMode::Cast(cast) => {
+ let place = fx.get_local_place(RETURN_PLACE);
+ let ret_val = place.to_cvalue(fx);
+ let ret_vals = super::pass_mode::to_casted_value(fx, ret_val, cast);
+ fx.bcx.ins().return_(&ret_vals);
+ }
+ }
+}
--- /dev/null
- let func_id = module
- .declare_function(&caller_name, Linkage::Export, &sig)
- .unwrap();
+//! Allocator shim
+// Adapted from rustc
+
+use crate::prelude::*;
+
+use rustc_ast::expand::allocator::{AllocatorKind, AllocatorTy, ALLOCATOR_METHODS};
+use rustc_span::symbol::sym;
+
+/// Returns whether an allocator shim was created
+pub(crate) fn codegen(
+ tcx: TyCtxt<'_>,
+ module: &mut impl Module,
+ unwind_context: &mut UnwindContext<'_>,
+) -> bool {
+ let any_dynamic_crate = tcx.dependency_formats(LOCAL_CRATE).iter().any(|(_, list)| {
+ use rustc_middle::middle::dependency_format::Linkage;
+ list.iter().any(|&linkage| linkage == Linkage::Dynamic)
+ });
+ if any_dynamic_crate {
+ false
+ } else if let Some(kind) = tcx.allocator_kind() {
+ codegen_inner(module, unwind_context, kind);
+ true
+ } else {
+ false
+ }
+}
+
+fn codegen_inner(
+ module: &mut impl Module,
+ unwind_context: &mut UnwindContext<'_>,
+ kind: AllocatorKind,
+) {
+ let usize_ty = module.target_config().pointer_type();
+
+ for method in ALLOCATOR_METHODS {
+ let mut arg_tys = Vec::with_capacity(method.inputs.len());
+ for ty in method.inputs.iter() {
+ match *ty {
+ AllocatorTy::Layout => {
+ arg_tys.push(usize_ty); // size
+ arg_tys.push(usize_ty); // align
+ }
+ AllocatorTy::Ptr => arg_tys.push(usize_ty),
+ AllocatorTy::Usize => arg_tys.push(usize_ty),
+
+ AllocatorTy::ResultPtr | AllocatorTy::Unit => panic!("invalid allocator arg"),
+ }
+ }
+ let output = match method.output {
+ AllocatorTy::ResultPtr => Some(usize_ty),
+ AllocatorTy::Unit => None,
+
+ AllocatorTy::Layout | AllocatorTy::Usize | AllocatorTy::Ptr => {
+ panic!("invalid allocator output")
+ }
+ };
+
+ let sig = Signature {
+ call_conv: CallConv::triple_default(module.isa().triple()),
+ params: arg_tys.iter().cloned().map(AbiParam::new).collect(),
+ returns: output.into_iter().map(AbiParam::new).collect(),
+ };
+
+ let caller_name = format!("__rust_{}", method.name);
+ let callee_name = kind.fn_name(method.name);
+ //eprintln!("Codegen allocator shim {} -> {} ({:?} -> {:?})", caller_name, callee_name, sig.params, sig.returns);
+
- let callee_func_id = module
- .declare_function(&callee_name, Linkage::Import, &sig)
- .unwrap();
++ let func_id = module.declare_function(&caller_name, Linkage::Export, &sig).unwrap();
+
- .define_function(
- func_id,
- &mut ctx,
- &mut cranelift_codegen::binemit::NullTrapSink {},
- )
++ let callee_func_id = module.declare_function(&callee_name, Linkage::Import, &sig).unwrap();
+
+ let mut ctx = Context::new();
+ ctx.func = Function::with_name_signature(ExternalName::user(0, 0), sig.clone());
+ {
+ let mut func_ctx = FunctionBuilderContext::new();
+ let mut bcx = FunctionBuilder::new(&mut ctx.func, &mut func_ctx);
+
+ let block = bcx.create_block();
+ bcx.switch_to_block(block);
+ let args = arg_tys
+ .into_iter()
+ .map(|ty| bcx.append_block_param(block, ty))
+ .collect::<Vec<Value>>();
+
+ let callee_func_ref = module.declare_func_in_func(callee_func_id, &mut bcx.func);
+ let call_inst = bcx.ins().call(callee_func_ref, &args);
+ let results = bcx.inst_results(call_inst).to_vec(); // Clone to prevent borrow error
+
+ bcx.ins().return_(&results);
+ bcx.seal_all_blocks();
+ bcx.finalize();
+ }
+ module
- let func_id = module
- .declare_function("__rust_alloc_error_handler", Linkage::Export, &sig)
- .unwrap();
++ .define_function(func_id, &mut ctx, &mut cranelift_codegen::binemit::NullTrapSink {})
+ .unwrap();
+ unwind_context.add_function(func_id, &ctx, module.isa());
+ }
+
+ let sig = Signature {
+ call_conv: CallConv::triple_default(module.isa().triple()),
+ params: vec![AbiParam::new(usize_ty), AbiParam::new(usize_ty)],
+ returns: vec![],
+ };
+
+ let callee_name = kind.fn_name(sym::oom);
+ //eprintln!("Codegen allocator shim {} -> {} ({:?} -> {:?})", caller_name, callee_name, sig.params, sig.returns);
+
- let callee_func_id = module
- .declare_function(&callee_name, Linkage::Import, &sig)
- .unwrap();
++ let func_id =
++ module.declare_function("__rust_alloc_error_handler", Linkage::Export, &sig).unwrap();
+
- .define_function(
- func_id,
- &mut ctx,
- &mut cranelift_codegen::binemit::NullTrapSink {},
- )
++ let callee_func_id = module.declare_function(&callee_name, Linkage::Import, &sig).unwrap();
+
+ let mut ctx = Context::new();
+ ctx.func = Function::with_name_signature(ExternalName::user(0, 0), sig);
+ {
+ let mut func_ctx = FunctionBuilderContext::new();
+ let mut bcx = FunctionBuilder::new(&mut ctx.func, &mut func_ctx);
+
+ let block = bcx.create_block();
+ bcx.switch_to_block(block);
+ let args = (&[usize_ty, usize_ty])
+ .iter()
+ .map(|&ty| bcx.append_block_param(block, ty))
+ .collect::<Vec<Value>>();
+
+ let callee_func_ref = module.declare_func_in_func(callee_func_id, &mut bcx.func);
+ bcx.ins().call(callee_func_ref, &args);
+
+ bcx.ins().trap(TrapCode::UnreachableCodeReached);
+ bcx.seal_all_blocks();
+ bcx.finalize();
+ }
+ module
++ .define_function(func_id, &mut ctx, &mut cranelift_codegen::binemit::NullTrapSink {})
+ .unwrap();
+ unwind_context.add_function(func_id, &ctx, module.isa());
+}
--- /dev/null
- pub(crate) fn analyze(fx: &FunctionCx<'_, '_, impl Module>) -> IndexVec<Local, SsaKind> {
+//! SSA analysis
+
+use crate::prelude::*;
+
+use rustc_index::vec::IndexVec;
+use rustc_middle::mir::StatementKind::*;
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
+pub(crate) enum SsaKind {
+ NotSsa,
+ Ssa,
+}
+
- TerminatorKind::Call {
- destination,
- func,
- args,
- ..
- } => {
++pub(crate) fn analyze(fx: &FunctionCx<'_, '_, '_>) -> IndexVec<Local, SsaKind> {
+ let mut flag_map = fx
+ .mir
+ .local_decls
+ .iter()
+ .map(|local_decl| {
+ let ty = fx.monomorphize(local_decl.ty);
+ if fx.clif_type(ty).is_some() || fx.clif_pair_type(ty).is_some() {
+ SsaKind::Ssa
+ } else {
+ SsaKind::NotSsa
+ }
+ })
+ .collect::<IndexVec<Local, SsaKind>>();
+
+ for bb in fx.mir.basic_blocks().iter() {
+ for stmt in bb.statements.iter() {
+ match &stmt.kind {
+ Assign(place_and_rval) => match &place_and_rval.1 {
+ Rvalue::Ref(_, _, place) | Rvalue::AddressOf(_, place) => {
+ not_ssa(&mut flag_map, place.local)
+ }
+ _ => {}
+ },
+ _ => {}
+ }
+ }
+
+ match &bb.terminator().kind {
++ TerminatorKind::Call { destination, func, args, .. } => {
+ if let Some((dest_place, _dest_bb)) = destination {
+ if !crate::abi::can_return_to_ssa_var(fx, func, args) {
+ not_ssa(&mut flag_map, dest_place.local)
+ }
+ }
+ }
+ _ => {}
+ }
+ }
+
+ flag_map
+}
+
+fn not_ssa(flag_map: &mut IndexVec<Local, SsaKind>, local: Local) {
+ flag_map[local] = SsaKind::NotSsa;
+}
--- /dev/null
- FromArchive {
- archive_index: usize,
- entry_index: usize,
- },
+//! Creation of ar archives like for the lib and staticlib crate type
+
+use std::collections::BTreeMap;
+use std::fs::File;
+use std::path::{Path, PathBuf};
+
+use rustc_codegen_ssa::back::archive::{find_library, ArchiveBuilder};
+use rustc_codegen_ssa::METADATA_FILENAME;
+use rustc_session::Session;
+
+use object::{Object, ObjectSymbol, SymbolKind};
+
+#[derive(Debug)]
+enum ArchiveEntry {
- update_symbols: bool,
++ FromArchive { archive_index: usize, entry_index: usize },
+ File(PathBuf),
+}
+
+pub(crate) struct ArArchiveBuilder<'a> {
+ sess: &'a Session,
+ dst: PathBuf,
+ lib_search_paths: Vec<PathBuf>,
+ use_gnu_style_archive: bool,
+ no_builtin_ranlib: bool,
+
+ src_archives: Vec<(PathBuf, ar::Archive<File>)>,
+ // Don't use `HashMap` here, as the order is important. `rust.metadata.bin` must always be at
+ // the end of an archive for linkers to not get confused.
+ entries: Vec<(String, ArchiveEntry)>,
- ArchiveEntry::FromArchive {
- archive_index: 0,
- entry_index: i,
- },
+}
+
+impl<'a> ArchiveBuilder<'a> for ArArchiveBuilder<'a> {
+ fn new(sess: &'a Session, output: &Path, input: Option<&Path>) -> Self {
+ use rustc_codegen_ssa::back::link::archive_search_paths;
+
+ let (src_archives, entries) = if let Some(input) = input {
+ let mut archive = ar::Archive::new(File::open(input).unwrap());
+ let mut entries = Vec::new();
+
+ let mut i = 0;
+ while let Some(entry) = archive.next_entry() {
+ let entry = entry.unwrap();
+ entries.push((
+ String::from_utf8(entry.header().identifier().to_vec()).unwrap(),
- update_symbols: false,
++ ArchiveEntry::FromArchive { archive_index: 0, entry_index: i },
+ ));
+ i += 1;
+ }
+
+ (vec![(input.to_owned(), archive)], entries)
+ } else {
+ (vec![], Vec::new())
+ };
+
+ ArArchiveBuilder {
+ sess,
+ dst: output.to_path_buf(),
+ lib_search_paths: archive_search_paths(sess),
+ use_gnu_style_archive: sess.target.archive_format == "gnu",
+ // FIXME fix builtin ranlib on macOS
+ no_builtin_ranlib: sess.target.is_like_osx,
+
+ src_archives,
+ entries,
- self.add_archive(location.clone(), |_| false)
- .unwrap_or_else(|e| {
- panic!(
- "failed to add native library {}: {}",
- location.to_string_lossy(),
- e
- );
- });
+ }
+ }
+
+ fn src_files(&mut self) -> Vec<String> {
+ self.entries.iter().map(|(name, _)| name.clone()).collect()
+ }
+
+ fn remove_file(&mut self, name: &str) {
+ let index = self
+ .entries
+ .iter()
+ .position(|(entry_name, _)| entry_name == name)
+ .expect("Tried to remove file not existing in src archive");
+ self.entries.remove(index);
+ }
+
+ fn add_file(&mut self, file: &Path) {
+ self.entries.push((
+ file.file_name().unwrap().to_str().unwrap().to_string(),
+ ArchiveEntry::File(file.to_owned()),
+ ));
+ }
+
+ fn add_native_library(&mut self, name: rustc_span::symbol::Symbol) {
+ let location = find_library(name, &self.lib_search_paths, self.sess);
- fn update_symbols(&mut self) {
- self.update_symbols = true;
- }
++ self.add_archive(location.clone(), |_| false).unwrap_or_else(|e| {
++ panic!("failed to add native library {}: {}", location.to_string_lossy(), e);
++ });
+ }
+
+ fn add_rlib(
+ &mut self,
+ rlib: &Path,
+ name: &str,
+ lto: bool,
+ skip_objects: bool,
+ ) -> std::io::Result<()> {
+ let obj_start = name.to_owned();
+
+ self.add_archive(rlib.to_owned(), move |fname: &str| {
+ // Ignore metadata files, no matter the name.
+ if fname == METADATA_FILENAME {
+ return true;
+ }
+
+ // Don't include Rust objects if LTO is enabled
+ if lto && fname.starts_with(&obj_start) && fname.ends_with(".o") {
+ return true;
+ }
+
+ // Otherwise if this is *not* a rust object and we're skipping
+ // objects then skip this file
+ if skip_objects && (!fname.starts_with(&obj_start) || !fname.ends_with(".o")) {
+ return true;
+ }
+
+ // ok, don't skip this
+ false
+ })
+ }
+
- ArchiveEntry::FromArchive {
- archive_index,
- entry_index,
- } => {
++ fn update_symbols(&mut self) {}
+
+ fn build(mut self) {
+ enum BuilderKind {
+ Bsd(ar::Builder<File>),
+ Gnu(ar::GnuBuilder<File>),
+ }
+
+ let sess = self.sess;
+
+ let mut symbol_table = BTreeMap::new();
+
+ let mut entries = Vec::new();
+
+ for (entry_name, entry) in self.entries {
+ // FIXME only read the symbol table of the object files to avoid having to keep all
+ // object files in memory at once, or read them twice.
+ let data = match entry {
- entries
- .iter()
- .map(|(name, _)| name.as_bytes().to_vec())
- .collect(),
++ ArchiveEntry::FromArchive { archive_index, entry_index } => {
+ // FIXME read symbols from symtab
+ use std::io::Read;
+ let (ref _src_archive_path, ref mut src_archive) =
+ self.src_archives[archive_index];
+ let mut entry = src_archive.jump_to_entry(entry_index).unwrap();
+ let mut data = Vec::new();
+ entry.read_to_end(&mut data).unwrap();
+ data
+ }
+ ArchiveEntry::File(file) => std::fs::read(file).unwrap_or_else(|err| {
+ sess.fatal(&format!(
+ "error while reading object file during archive building: {}",
+ err
+ ));
+ }),
+ };
+
+ if !self.no_builtin_ranlib {
+ match object::File::parse(&data) {
+ Ok(object) => {
+ symbol_table.insert(
+ entry_name.as_bytes().to_vec(),
+ object
+ .symbols()
+ .filter_map(|symbol| {
+ if symbol.is_undefined()
+ || symbol.is_local()
+ || symbol.kind() != SymbolKind::Data
+ && symbol.kind() != SymbolKind::Text
+ && symbol.kind() != SymbolKind::Tls
+ {
+ None
+ } else {
+ symbol.name().map(|name| name.as_bytes().to_vec()).ok()
+ }
+ })
+ .collect::<Vec<_>>(),
+ );
+ }
+ Err(err) => {
+ let err = err.to_string();
+ if err == "Unknown file magic" {
+ // Not an object file; skip it.
+ } else {
+ sess.fatal(&format!(
+ "error parsing `{}` during archive creation: {}",
+ entry_name, err
+ ));
+ }
+ }
+ }
+ }
+
+ entries.push((entry_name, data));
+ }
+
+ let mut builder = if self.use_gnu_style_archive {
+ BuilderKind::Gnu(
+ ar::GnuBuilder::new(
+ File::create(&self.dst).unwrap_or_else(|err| {
+ sess.fatal(&format!(
+ "error opening destination during archive building: {}",
+ err
+ ));
+ }),
- self.sess
- .fatal(&format!("Ranlib exited with code {:?}", status.code()));
++ entries.iter().map(|(name, _)| name.as_bytes().to_vec()).collect(),
+ ar::GnuSymbolTableFormat::Size32,
+ symbol_table,
+ )
+ .unwrap(),
+ )
+ } else {
+ BuilderKind::Bsd(
+ ar::Builder::new(
+ File::create(&self.dst).unwrap_or_else(|err| {
+ sess.fatal(&format!(
+ "error opening destination during archive building: {}",
+ err
+ ));
+ }),
+ symbol_table,
+ )
+ .unwrap(),
+ )
+ };
+
+ // Add all files
+ for (entry_name, data) in entries.into_iter() {
+ let header = ar::Header::new(entry_name.into_bytes(), data.len() as u64);
+ match builder {
+ BuilderKind::Bsd(ref mut builder) => builder.append(&header, &mut &*data).unwrap(),
+ BuilderKind::Gnu(ref mut builder) => builder.append(&header, &mut &*data).unwrap(),
+ }
+ }
+
+ // Finalize archive
+ std::mem::drop(builder);
+
+ if self.no_builtin_ranlib {
+ let ranlib = crate::toolchain::get_toolchain_binary(self.sess, "ranlib");
+
+ // Run ranlib to be able to link the archive
+ let status = std::process::Command::new(ranlib)
+ .arg(self.dst)
+ .status()
+ .expect("Couldn't run ranlib");
+
+ if !status.success() {
- self.entries.push((
- file_name,
- ArchiveEntry::FromArchive {
- archive_index,
- entry_index: i,
- },
- ));
++ self.sess.fatal(&format!("Ranlib exited with code {:?}", status.code()));
+ }
+ }
+ }
+}
+
+impl<'a> ArArchiveBuilder<'a> {
+ fn add_archive<F>(&mut self, archive_path: PathBuf, mut skip: F) -> std::io::Result<()>
+ where
+ F: FnMut(&str) -> bool + 'static,
+ {
+ let mut archive = ar::Archive::new(std::fs::File::open(&archive_path)?);
+ let archive_index = self.src_archives.len();
+
+ let mut i = 0;
+ while let Some(entry) = archive.next_entry() {
+ let entry = entry?;
+ let file_name = String::from_utf8(entry.header().identifier().to_vec())
+ .map_err(|err| std::io::Error::new(std::io::ErrorKind::InvalidData, err))?;
+ if !skip(&file_name) {
++ self.entries
++ .push((file_name, ArchiveEntry::FromArchive { archive_index, entry_index: i }));
+ }
+ i += 1;
+ }
+
+ self.src_archives.push((archive_path, archive));
+ Ok(())
+ }
+}
--- /dev/null
- use object::{RelocationEncoding, RelocationKind, SectionKind, SymbolFlags};
+//! Abstraction around the object writing crate
+
+use std::convert::{TryFrom, TryInto};
+
+use rustc_data_structures::fx::FxHashMap;
+use rustc_session::Session;
+
+use cranelift_module::FuncId;
+
+use object::write::*;
- let segment = self
- .segment_name(object::write::StandardSegment::Data)
- .to_vec();
++use object::{RelocationEncoding, SectionKind, SymbolFlags};
+
+use cranelift_object::{ObjectBuilder, ObjectModule, ObjectProduct};
+
+use gimli::SectionId;
+
+use crate::debuginfo::{DebugReloc, DebugRelocName};
+
+pub(crate) trait WriteMetadata {
+ fn add_rustc_section(&mut self, symbol_name: String, data: Vec<u8>, is_like_osx: bool);
+}
+
+impl WriteMetadata for object::write::Object {
+ fn add_rustc_section(&mut self, symbol_name: String, data: Vec<u8>, _is_like_osx: bool) {
- if id == SectionId::EhFrame {
- SectionKind::ReadOnlyData
- } else {
- SectionKind::Debug
- },
++ let segment = self.segment_name(object::write::StandardSegment::Data).to_vec();
+ let section_id = self.add_section(segment, b".rustc".to_vec(), object::SectionKind::Data);
+ let offset = self.append_section_data(section_id, &data, 1);
+ // For MachO and probably PE this is necessary to prevent the linker from throwing away the
+ // .rustc section. For ELF this isn't necessary, but it also doesn't harm.
+ self.add_symbol(object::write::Symbol {
+ name: symbol_name.into_bytes(),
+ value: offset,
+ size: data.len() as u64,
+ kind: object::SymbolKind::Data,
+ scope: object::SymbolScope::Dynamic,
+ weak: false,
+ section: SymbolSection::Section(section_id),
+ flags: SymbolFlags::None,
+ });
+ }
+}
+
+pub(crate) trait WriteDebugInfo {
+ type SectionId: Copy;
+
+ fn add_debug_section(&mut self, name: SectionId, data: Vec<u8>) -> Self::SectionId;
+ fn add_debug_reloc(
+ &mut self,
+ section_map: &FxHashMap<SectionId, Self::SectionId>,
+ from: &Self::SectionId,
+ reloc: &DebugReloc,
+ );
+}
+
+impl WriteDebugInfo for ObjectProduct {
+ type SectionId = (object::write::SectionId, object::write::SymbolId);
+
+ fn add_debug_section(
+ &mut self,
+ id: SectionId,
+ data: Vec<u8>,
+ ) -> (object::write::SectionId, object::write::SymbolId) {
+ let name = if self.object.format() == object::BinaryFormat::MachO {
+ id.name().replace('.', "__") // machO expects __debug_info instead of .debug_info
+ } else {
+ id.name().to_string()
+ }
+ .into_bytes();
+
+ let segment = self.object.segment_name(StandardSegment::Debug).to_vec();
+ // FIXME use SHT_X86_64_UNWIND for .eh_frame
+ let section_id = self.object.add_section(
+ segment,
+ name,
- // FIXME remove once atomic instructions are implemented in Cranelift.
- pub(crate) trait AddConstructor {
- fn add_constructor(&mut self, func_id: FuncId);
- }
-
- impl AddConstructor for ObjectProduct {
- fn add_constructor(&mut self, func_id: FuncId) {
- let symbol = self.function_symbol(func_id);
- let segment = self
- .object
- .segment_name(object::write::StandardSegment::Data);
- let init_array_section =
- self.object
- .add_section(segment.to_vec(), b".init_array".to_vec(), SectionKind::Data);
- let address_size = self
- .object
- .architecture()
- .address_size()
- .expect("address_size must be known")
- .bytes();
- self.object.append_section_data(
- init_array_section,
- &std::iter::repeat(0)
- .take(address_size.into())
- .collect::<Vec<u8>>(),
- 8,
- );
- self.object
- .add_relocation(
- init_array_section,
- object::write::Relocation {
- offset: 0,
- size: address_size * 8,
- kind: RelocationKind::Absolute,
- encoding: RelocationEncoding::Generic,
- symbol,
- addend: 0,
- },
- )
- .unwrap();
- }
- }
-
++ if id == SectionId::EhFrame { SectionKind::ReadOnlyData } else { SectionKind::Debug },
+ );
+ self.object
+ .section_mut(section_id)
+ .set_data(data, if id == SectionId::EhFrame { 8 } else { 1 });
+ let symbol_id = self.object.section_symbol(section_id);
+ (section_id, symbol_id)
+ }
+
+ fn add_debug_reloc(
+ &mut self,
+ section_map: &FxHashMap<SectionId, Self::SectionId>,
+ from: &Self::SectionId,
+ reloc: &DebugReloc,
+ ) {
+ let (symbol, symbol_offset) = match reloc.name {
+ DebugRelocName::Section(id) => (section_map.get(&id).unwrap().1, 0),
+ DebugRelocName::Symbol(id) => {
+ let symbol_id = self.function_symbol(FuncId::from_u32(id.try_into().unwrap()));
+ self.object
+ .symbol_section_and_offset(symbol_id)
+ .expect("Debug reloc for undef sym???")
+ }
+ };
+ self.object
+ .add_relocation(
+ from.0,
+ Relocation {
+ offset: u64::from(reloc.offset),
+ symbol,
+ kind: reloc.kind,
+ encoding: RelocationEncoding::Generic,
+ size: reloc.size * 8,
+ addend: i64::try_from(symbol_offset).unwrap() + reloc.addend,
+ },
+ )
+ .unwrap();
+ }
+}
+
- architecture => sess.fatal(&format!(
- "target architecture {:?} is unsupported",
- architecture,
- )),
+pub(crate) fn with_object(sess: &Session, name: &str, f: impl FnOnce(&mut Object)) -> Vec<u8> {
+ let triple = crate::build_isa(sess).triple().clone();
+
+ let binary_format = match triple.binary_format {
+ target_lexicon::BinaryFormat::Elf => object::BinaryFormat::Elf,
+ target_lexicon::BinaryFormat::Coff => object::BinaryFormat::Coff,
+ target_lexicon::BinaryFormat::Macho => object::BinaryFormat::MachO,
+ binary_format => sess.fatal(&format!("binary format {} is unsupported", binary_format)),
+ };
+ let architecture = match triple.architecture {
+ target_lexicon::Architecture::X86_32(_) => object::Architecture::I386,
+ target_lexicon::Architecture::X86_64 => object::Architecture::X86_64,
+ target_lexicon::Architecture::Arm(_) => object::Architecture::Arm,
+ target_lexicon::Architecture::Aarch64(_) => object::Architecture::Aarch64,
++ architecture => {
++ sess.fatal(&format!("target architecture {:?} is unsupported", architecture,))
++ }
+ };
+ let endian = match triple.endianness().unwrap() {
+ target_lexicon::Endianness::Little => object::Endianness::Little,
+ target_lexicon::Endianness::Big => object::Endianness::Big,
+ };
+
+ let mut metadata_object = object::write::Object::new(binary_format, architecture, endian);
+ metadata_object.add_file_symbol(name.as_bytes().to_vec());
+ f(&mut metadata_object);
+ metadata_object.write().unwrap()
+}
+
+pub(crate) fn make_module(sess: &Session, name: String) -> ObjectModule {
+ let mut builder = ObjectBuilder::new(
+ crate::build_isa(sess),
+ name + ".o",
+ cranelift_module::default_libcall_names(),
+ )
+ .unwrap();
+ // Unlike cg_llvm, cg_clif defaults to disabling -Zfunction-sections. For cg_llvm binary size
+ // is important, while cg_clif cares more about compilation times. Enabling -Zfunction-sections
+ // can easily double the amount of time necessary to perform linking.
+ builder.per_function_section(sess.opts.debugging_opts.function_sections.unwrap_or(false));
+ ObjectModule::new(builder)
+}
--- /dev/null
- cx: &mut crate::CodegenCx<'tcx, impl Module>,
+//! Codegen of a single function
+
+use rustc_index::vec::IndexVec;
+use rustc_middle::ty::adjustment::PointerCast;
+use rustc_middle::ty::layout::FnAbiExt;
+use rustc_target::abi::call::FnAbi;
+
+use crate::prelude::*;
+
+pub(crate) fn codegen_fn<'tcx>(
- let block_map: IndexVec<BasicBlock, Block> = (0..mir.basic_blocks().len())
- .map(|_| bcx.create_block())
- .collect();
++ cx: &mut crate::CodegenCx<'_, 'tcx>,
+ instance: Instance<'tcx>,
+ linkage: Linkage,
+) {
+ let tcx = cx.tcx;
+
+ let _inst_guard =
+ crate::PrintOnPanic(|| format!("{:?} {}", instance, tcx.symbol_name(instance).name));
+ debug_assert!(!instance.substs.needs_infer());
+
+ let mir = tcx.instance_mir(instance.def);
+
+ // Declare function
+ let name = tcx.symbol_name(instance).name.to_string();
+ let sig = get_function_sig(tcx, cx.module.isa().triple(), instance);
+ let func_id = cx.module.declare_function(&name, linkage, &sig).unwrap();
+
+ cx.cached_context.clear();
+
+ // Make the FunctionBuilder
+ let mut func_ctx = FunctionBuilderContext::new();
+ let mut func = std::mem::replace(&mut cx.cached_context.func, Function::new());
+ func.name = ExternalName::user(0, func_id.as_u32());
+ func.signature = sig;
+ func.collect_debug_info();
+
+ let mut bcx = FunctionBuilder::new(&mut func, &mut func_ctx);
+
+ // Predefine blocks
+ let start_block = bcx.create_block();
- let arg_uninhabited = fx.mir.args_iter().any(|arg| {
- fx.layout_of(fx.monomorphize(&fx.mir.local_decls[arg].ty))
- .abi
- .is_uninhabited()
- });
++ let block_map: IndexVec<BasicBlock, Block> =
++ (0..mir.basic_blocks().len()).map(|_| bcx.create_block()).collect();
+
+ // Make FunctionCx
+ let pointer_type = cx.module.target_config().pointer_type();
+ let clif_comments = crate::pretty_clif::CommentWriter::new(tcx, instance);
+
+ let mut fx = FunctionCx {
+ cx,
+ tcx,
+ pointer_type,
+
+ instance,
+ mir,
+ fn_abi: Some(FnAbi::of_instance(&RevealAllLayoutCx(tcx), instance, &[])),
+
+ bcx,
+ block_map,
+ local_map: IndexVec::with_capacity(mir.local_decls.len()),
+ caller_location: None, // set by `codegen_fn_prelude`
+ cold_blocks: EntitySet::new(),
+
+ clif_comments,
+ source_info_set: indexmap::IndexSet::new(),
+ next_ssa_var: 0,
+
+ inline_asm_index: 0,
+ };
+
- if arg_uninhabited {
- fx.bcx
- .append_block_params_for_function_params(fx.block_map[START_BLOCK]);
++ let arg_uninhabited = fx
++ .mir
++ .args_iter()
++ .any(|arg| fx.layout_of(fx.monomorphize(&fx.mir.local_decls[arg].ty)).abi.is_uninhabited());
+
- tcx.sess.time("codegen prelude", || {
- crate::abi::codegen_fn_prelude(&mut fx, start_block)
- });
++ if !crate::constant::check_constants(&mut fx) {
++ fx.bcx.append_block_params_for_function_params(fx.block_map[START_BLOCK]);
++ fx.bcx.switch_to_block(fx.block_map[START_BLOCK]);
++ crate::trap::trap_unreachable(&mut fx, "compilation should have been aborted");
++ } else if arg_uninhabited {
++ fx.bcx.append_block_params_for_function_params(fx.block_map[START_BLOCK]);
+ fx.bcx.switch_to_block(fx.block_map[START_BLOCK]);
+ crate::trap::trap_unreachable(&mut fx, "function has uninhabited argument");
+ } else {
+ tcx.sess.time("codegen clif ir", || {
- .define_function(
- func_id,
- context,
- &mut cranelift_codegen::binemit::NullTrapSink {},
- )
++ tcx.sess
++ .time("codegen prelude", || crate::abi::codegen_fn_prelude(&mut fx, start_block));
+ codegen_fn_content(&mut fx);
+ });
+ }
+
+ // Recover all necessary data from fx, before accessing func will prevent future access to it.
+ let instance = fx.instance;
+ let mut clif_comments = fx.clif_comments;
+ let source_info_set = fx.source_info_set;
+ let local_map = fx.local_map;
+ let cold_blocks = fx.cold_blocks;
+
+ // Store function in context
+ let context = &mut cx.cached_context;
+ context.func = func;
+
+ crate::pretty_clif::write_clif_file(tcx, "unopt", None, instance, &context, &clif_comments);
+
+ // Verify function
+ verify_func(tcx, &clif_comments, &context.func);
+
+ // Perform rust specific optimizations
+ tcx.sess.time("optimize clif ir", || {
+ crate::optimize::optimize_function(
+ tcx,
+ instance,
+ context,
+ &cold_blocks,
+ &mut clif_comments,
+ );
+ });
+
+ // If the return block is not reachable, then the SSA builder may have inserted an `iconst.i128`
+ // instruction, which doesn't have an encoding.
+ context.compute_cfg();
+ context.compute_domtree();
+ context.eliminate_unreachable_code(cx.module.isa()).unwrap();
+ context.dce(cx.module.isa()).unwrap();
+ // Some Cranelift optimizations expect the domtree to not yet be computed and as such don't
+ // invalidate it when it would change.
+ context.domtree.clear();
+
+ context.want_disasm = crate::pretty_clif::should_write_ir(tcx);
+
+ // Define function
+ let module = &mut cx.module;
+ tcx.sess.time("define function", || {
+ module
- if let Some(mach_compile_result) = &context.mach_compile_result {
- if let Some(disasm) = &mach_compile_result.disasm {
- crate::pretty_clif::write_ir_file(
- tcx,
- &format!("{}.vcode", tcx.symbol_name(instance).name),
- |file| file.write_all(disasm.as_bytes()),
- )
- }
++ .define_function(func_id, context, &mut cranelift_codegen::binemit::NullTrapSink {})
+ .unwrap()
+ });
+
+ // Write optimized function to file for debugging
+ crate::pretty_clif::write_clif_file(
+ tcx,
+ "opt",
+ Some(cx.module.isa()),
+ instance,
+ &context,
+ &clif_comments,
+ );
+
- tcx.sess
- .fatal(&format!("cranelift verify error:\n{}", pretty_error));
++ if let Some(disasm) = &context.mach_compile_result.as_ref().unwrap().disasm {
++ crate::pretty_clif::write_ir_file(
++ tcx,
++ &format!("{}.vcode", tcx.symbol_name(instance).name),
++ |file| file.write_all(disasm.as_bytes()),
++ )
+ }
+
+ // Define debuginfo for function
+ let isa = cx.module.isa();
+ let debug_context = &mut cx.debug_context;
+ let unwind_context = &mut cx.unwind_context;
+ tcx.sess.time("generate debug info", || {
+ if let Some(debug_context) = debug_context {
+ debug_context.define_function(
+ instance,
+ func_id,
+ &name,
+ isa,
+ context,
+ &source_info_set,
+ local_map,
+ );
+ }
+ unwind_context.add_function(func_id, &context, isa);
+ });
+
+ // Clear context to make it usable for the next function
+ context.clear();
+}
+
+pub(crate) fn verify_func(
+ tcx: TyCtxt<'_>,
+ writer: &crate::pretty_clif::CommentWriter,
+ func: &Function,
+) {
+ tcx.sess.time("verify clif ir", || {
+ let flags = cranelift_codegen::settings::Flags::new(cranelift_codegen::settings::builder());
+ match cranelift_codegen::verify_function(&func, &flags) {
+ Ok(_) => {}
+ Err(err) => {
+ tcx.sess.err(&format!("{:?}", err));
+ let pretty_error = cranelift_codegen::print_errors::pretty_verifier_error(
+ &func,
+ None,
+ Some(Box::new(writer)),
+ err,
+ );
- fn codegen_fn_content(fx: &mut FunctionCx<'_, '_, impl Module>) {
- crate::constant::check_constants(fx);
-
++ tcx.sess.fatal(&format!("cranelift verify error:\n{}", pretty_error));
+ }
+ }
+ });
+}
+
- bb_data
- .terminator()
- .kind
- .fmt_head(&mut terminator_head)
- .unwrap();
++fn codegen_fn_content(fx: &mut FunctionCx<'_, '_, '_>) {
+ for (bb, bb_data) in fx.mir.basic_blocks().iter_enumerated() {
+ let block = fx.get_block(bb);
+ fx.bcx.switch_to_block(block);
+
+ if bb_data.is_cleanup {
+ // Unwinding after panicking is not supported
+ continue;
+
+ // FIXME once unwinding is supported uncomment next lines
+ // // Unwinding is unlikely to happen, so mark cleanup block's as cold.
+ // fx.cold_blocks.insert(block);
+ }
+
+ fx.bcx.ins().nop();
+ for stmt in &bb_data.statements {
+ fx.set_debug_loc(stmt.source_info);
+ codegen_stmt(fx, block, stmt);
+ }
+
+ #[cfg(debug_assertions)]
+ {
+ let mut terminator_head = "\n".to_string();
- TerminatorKind::Assert {
- cond,
- expected,
- msg,
- target,
- cleanup: _,
- } => {
++ bb_data.terminator().kind.fmt_head(&mut terminator_head).unwrap();
+ let inst = fx.bcx.func.layout.last_inst(block).unwrap();
+ fx.add_comment(inst, terminator_head);
+ }
+
+ fx.set_debug_loc(bb_data.terminator().source_info);
+
+ match &bb_data.terminator().kind {
+ TerminatorKind::Goto { target } => {
+ if let TerminatorKind::Return = fx.mir[*target].terminator().kind {
+ let mut can_immediately_return = true;
+ for stmt in &fx.mir[*target].statements {
+ if let StatementKind::StorageDead(_) = stmt.kind {
+ } else {
+ // FIXME Can sometimes happen, see rust-lang/rust#70531
+ can_immediately_return = false;
+ break;
+ }
+ }
+
+ if can_immediately_return {
+ crate::abi::codegen_return(fx);
+ continue;
+ }
+ }
+
+ let block = fx.get_block(*target);
+ fx.bcx.ins().jump(block, &[]);
+ }
+ TerminatorKind::Return => {
+ crate::abi::codegen_return(fx);
+ }
- TerminatorKind::SwitchInt {
- discr,
- switch_ty,
- targets,
- } => {
++ TerminatorKind::Assert { cond, expected, msg, target, cleanup: _ } => {
+ if !fx.tcx.sess.overflow_checks() {
+ if let mir::AssertKind::OverflowNeg(_) = *msg {
+ let target = fx.get_block(*target);
+ fx.bcx.ins().jump(target, &[]);
+ continue;
+ }
+ }
+ let cond = codegen_operand(fx, cond).load_scalar(fx);
+
+ let target = fx.get_block(*target);
+ let failure = fx.bcx.create_block();
+ fx.cold_blocks.insert(failure);
+
+ if *expected {
+ fx.bcx.ins().brz(cond, failure, &[]);
+ } else {
+ fx.bcx.ins().brnz(cond, failure, &[]);
+ };
+ fx.bcx.ins().jump(target, &[]);
+
+ fx.bcx.switch_to_block(failure);
+ fx.bcx.ins().nop();
+
+ match msg {
+ AssertKind::BoundsCheck { ref len, ref index } => {
+ let len = codegen_operand(fx, len).load_scalar(fx);
+ let index = codegen_operand(fx, index).load_scalar(fx);
+ let location = fx
+ .get_caller_location(bb_data.terminator().source_info.span)
+ .load_scalar(fx);
+
+ codegen_panic_inner(
+ fx,
+ rustc_hir::LangItem::PanicBoundsCheck,
+ &[index, len, location],
+ bb_data.terminator().source_info.span,
+ );
+ }
+ _ => {
+ let msg_str = msg.description();
+ codegen_panic(fx, msg_str, bb_data.terminator().source_info.span);
+ }
+ }
+ }
+
- TerminatorKind::Drop {
- place,
- target,
- unwind: _,
- } => {
++ TerminatorKind::SwitchInt { discr, switch_ty, targets } => {
+ let discr = codegen_operand(fx, discr).load_scalar(fx);
+
+ let use_bool_opt = switch_ty.kind() == fx.tcx.types.bool.kind()
+ || (targets.iter().count() == 1 && targets.iter().next().unwrap().0 == 0);
+ if use_bool_opt {
+ assert_eq!(targets.iter().count(), 1);
+ let (then_value, then_block) = targets.iter().next().unwrap();
+ let then_block = fx.get_block(then_block);
+ let else_block = fx.get_block(targets.otherwise());
+ let test_zero = match then_value {
+ 0 => true,
+ 1 => false,
+ _ => unreachable!("{:?}", targets),
+ };
+
+ let discr = crate::optimize::peephole::maybe_unwrap_bint(&mut fx.bcx, discr);
+ let (discr, is_inverted) =
+ crate::optimize::peephole::maybe_unwrap_bool_not(&mut fx.bcx, discr);
+ let test_zero = if is_inverted { !test_zero } else { test_zero };
+ let discr = crate::optimize::peephole::maybe_unwrap_bint(&mut fx.bcx, discr);
+ let discr =
+ crate::optimize::peephole::make_branchable_value(&mut fx.bcx, discr);
+ if let Some(taken) = crate::optimize::peephole::maybe_known_branch_taken(
+ &fx.bcx, discr, test_zero,
+ ) {
+ if taken {
+ fx.bcx.ins().jump(then_block, &[]);
+ } else {
+ fx.bcx.ins().jump(else_block, &[]);
+ }
+ } else {
+ if test_zero {
+ fx.bcx.ins().brz(discr, then_block, &[]);
+ fx.bcx.ins().jump(else_block, &[]);
+ } else {
+ fx.bcx.ins().brnz(discr, then_block, &[]);
+ fx.bcx.ins().jump(else_block, &[]);
+ }
+ }
+ } else {
+ let mut switch = ::cranelift_frontend::Switch::new();
+ for (value, block) in targets.iter() {
+ let block = fx.get_block(block);
+ switch.set_entry(value, block);
+ }
+ let otherwise_block = fx.get_block(targets.otherwise());
+ switch.emit(&mut fx.bcx, discr, otherwise_block);
+ }
+ }
+ TerminatorKind::Call {
+ func,
+ args,
+ destination,
+ fn_span,
+ cleanup: _,
+ from_hir_call: _,
+ } => {
+ fx.tcx.sess.time("codegen call", || {
+ crate::abi::codegen_terminator_call(
+ fx,
+ *fn_span,
+ block,
+ func,
+ args,
+ *destination,
+ )
+ });
+ }
+ TerminatorKind::InlineAsm {
+ template,
+ operands,
+ options,
+ destination,
+ line_spans: _,
+ } => {
+ crate::inline_asm::codegen_inline_asm(
+ fx,
+ bb_data.terminator().source_info.span,
+ template,
+ operands,
+ *options,
+ );
+
+ match *destination {
+ Some(destination) => {
+ let destination_block = fx.get_block(destination);
+ fx.bcx.ins().jump(destination_block, &[]);
+ }
+ None => {
+ crate::trap::trap_unreachable(
+ fx,
+ "[corruption] Returned from noreturn inline asm",
+ );
+ }
+ }
+ }
+ TerminatorKind::Resume | TerminatorKind::Abort => {
+ trap_unreachable(fx, "[corruption] Unwinding bb reached.");
+ }
+ TerminatorKind::Unreachable => {
+ trap_unreachable(fx, "[corruption] Hit unreachable code.");
+ }
+ TerminatorKind::Yield { .. }
+ | TerminatorKind::FalseEdge { .. }
+ | TerminatorKind::FalseUnwind { .. }
+ | TerminatorKind::DropAndReplace { .. }
+ | TerminatorKind::GeneratorDrop => {
+ bug!("shouldn't exist at codegen {:?}", bb_data.terminator());
+ }
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
++ TerminatorKind::Drop { place, target, unwind: _ } => {
+ let drop_place = codegen_place(fx, *place);
+ crate::abi::codegen_drop(fx, bb_data.terminator().source_info.span, drop_place);
+
+ let target_block = fx.get_block(*target);
+ fx.bcx.ins().jump(target_block, &[]);
+ }
+ };
+ }
+
+ fx.bcx.seal_all_blocks();
+ fx.bcx.finalize();
+}
+
+fn codegen_stmt<'tcx>(
- StatementKind::SetDiscriminant {
- place,
- variant_index,
- } => {
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ #[allow(unused_variables)] cur_block: Block,
+ stmt: &Statement<'tcx>,
+) {
+ let _print_guard = crate::PrintOnPanic(|| format!("stmt {:?}", stmt));
+
+ fx.set_debug_loc(stmt.source_info);
+
+ #[cfg(false_debug_assertions)]
+ match &stmt.kind {
+ StatementKind::StorageLive(..) | StatementKind::StorageDead(..) => {} // Those are not very useful
+ _ => {
+ let inst = fx.bcx.func.layout.last_inst(cur_block).unwrap();
+ fx.add_comment(inst, format!("{:?}", stmt));
+ }
+ }
+
+ match &stmt.kind {
- fn is_fat_ptr<'tcx>(
- fx: &FunctionCx<'_, 'tcx, impl Module>,
- ty: Ty<'tcx>,
- ) -> bool {
++ StatementKind::SetDiscriminant { place, variant_index } => {
+ let place = codegen_place(fx, **place);
+ crate::discriminant::codegen_set_discriminant(fx, place, *variant_index);
+ }
+ StatementKind::Assign(to_place_and_rval) => {
+ let lval = codegen_place(fx, to_place_and_rval.0);
+ let dest_layout = lval.layout();
+ match to_place_and_rval.1 {
+ Rvalue::Use(ref operand) => {
+ let val = codegen_operand(fx, operand);
+ lval.write_cvalue(fx, val);
+ }
+ Rvalue::Ref(_, _, place) | Rvalue::AddressOf(_, place) => {
+ let place = codegen_place(fx, place);
+ let ref_ = place.place_ref(fx, lval.layout());
+ lval.write_cvalue(fx, ref_);
+ }
+ Rvalue::ThreadLocalRef(def_id) => {
+ let val = crate::constant::codegen_tls_ref(fx, def_id, lval.layout());
+ lval.write_cvalue(fx, val);
+ }
+ Rvalue::BinaryOp(bin_op, ref lhs, ref rhs) => {
+ let lhs = codegen_operand(fx, lhs);
+ let rhs = codegen_operand(fx, rhs);
+
+ let res = crate::num::codegen_binop(fx, bin_op, lhs, rhs);
+ lval.write_cvalue(fx, res);
+ }
+ Rvalue::CheckedBinaryOp(bin_op, ref lhs, ref rhs) => {
+ let lhs = codegen_operand(fx, lhs);
+ let rhs = codegen_operand(fx, rhs);
+
+ let res = if !fx.tcx.sess.overflow_checks() {
+ let val =
+ crate::num::codegen_int_binop(fx, bin_op, lhs, rhs).load_scalar(fx);
+ let is_overflow = fx.bcx.ins().iconst(types::I8, 0);
+ CValue::by_val_pair(val, is_overflow, lval.layout())
+ } else {
+ crate::num::codegen_checked_int_binop(fx, bin_op, lhs, rhs)
+ };
+
+ lval.write_cvalue(fx, res);
+ }
+ Rvalue::UnaryOp(un_op, ref operand) => {
+ let operand = codegen_operand(fx, operand);
+ let layout = operand.layout();
+ let val = operand.load_scalar(fx);
+ let res = match un_op {
+ UnOp::Not => match layout.ty.kind() {
+ ty::Bool => {
+ let res = fx.bcx.ins().icmp_imm(IntCC::Equal, val, 0);
+ CValue::by_val(fx.bcx.ins().bint(types::I8, res), layout)
+ }
+ ty::Uint(_) | ty::Int(_) => {
+ CValue::by_val(fx.bcx.ins().bnot(val), layout)
+ }
+ _ => unreachable!("un op Not for {:?}", layout.ty),
+ },
+ UnOp::Neg => match layout.ty.kind() {
+ ty::Int(IntTy::I128) => {
+ // FIXME remove this case once ineg.i128 works
+ let zero =
+ CValue::const_val(fx, layout, ty::ScalarInt::null(layout.size));
+ crate::num::codegen_int_binop(fx, BinOp::Sub, zero, operand)
+ }
+ ty::Int(_) => CValue::by_val(fx.bcx.ins().ineg(val), layout),
+ ty::Float(_) => CValue::by_val(fx.bcx.ins().fneg(val), layout),
+ _ => unreachable!("un op Neg for {:?}", layout.ty),
+ },
+ };
+ lval.write_cvalue(fx, res);
+ }
+ Rvalue::Cast(
+ CastKind::Pointer(PointerCast::ReifyFnPointer),
+ ref operand,
+ to_ty,
+ ) => {
+ let from_ty = fx.monomorphize(operand.ty(&fx.mir.local_decls, fx.tcx));
+ let to_layout = fx.layout_of(fx.monomorphize(to_ty));
+ match *from_ty.kind() {
+ ty::FnDef(def_id, substs) => {
+ let func_ref = fx.get_function_ref(
+ Instance::resolve_for_fn_ptr(
+ fx.tcx,
+ ParamEnv::reveal_all(),
+ def_id,
+ substs,
+ )
+ .unwrap()
+ .polymorphize(fx.tcx),
+ );
+ let func_addr = fx.bcx.ins().func_addr(fx.pointer_type, func_ref);
+ lval.write_cvalue(fx, CValue::by_val(func_addr, to_layout));
+ }
+ _ => bug!("Trying to ReifyFnPointer on non FnDef {:?}", from_ty),
+ }
+ }
+ Rvalue::Cast(
+ CastKind::Pointer(PointerCast::UnsafeFnPointer),
+ ref operand,
+ to_ty,
+ )
+ | Rvalue::Cast(
+ CastKind::Pointer(PointerCast::MutToConstPointer),
+ ref operand,
+ to_ty,
+ )
+ | Rvalue::Cast(
+ CastKind::Pointer(PointerCast::ArrayToPointer),
+ ref operand,
+ to_ty,
+ ) => {
+ let to_layout = fx.layout_of(fx.monomorphize(to_ty));
+ let operand = codegen_operand(fx, operand);
+ lval.write_cvalue(fx, operand.cast_pointer_to(to_layout));
+ }
+ Rvalue::Cast(CastKind::Misc, ref operand, to_ty) => {
+ let operand = codegen_operand(fx, operand);
+ let from_ty = operand.layout().ty;
+ let to_ty = fx.monomorphize(to_ty);
+
- .map(
- |ty::TypeAndMut {
- ty: pointee_ty,
- mutbl: _,
- }| {
- has_ptr_meta(fx.tcx, pointee_ty)
- },
- )
++ fn is_fat_ptr<'tcx>(fx: &FunctionCx<'_, '_, 'tcx>, ty: Ty<'tcx>) -> bool {
+ ty.builtin_deref(true)
- use rustc_target::abi::{Int, TagEncoding, Variants};
-
- match operand.layout().variants {
- Variants::Single { index } => {
- let discr = operand
- .layout()
- .ty
- .discriminant_for_variant(fx.tcx, index)
- .unwrap();
- let discr = if discr.ty.is_signed() {
- fx.layout_of(discr.ty).size.sign_extend(discr.val)
- } else {
- discr.val
- };
- let discr = discr.into();
-
- let discr = CValue::const_val(fx, fx.layout_of(to_ty), discr);
- lval.write_cvalue(fx, discr);
- }
- Variants::Multiple {
- ref tag,
- tag_field,
- tag_encoding: TagEncoding::Direct,
- variants: _,
- } => {
- let cast_to = fx.clif_type(dest_layout.ty).unwrap();
-
- // Read the tag/niche-encoded discriminant from memory.
- let encoded_discr =
- operand.value_field(fx, mir::Field::new(tag_field));
- let encoded_discr = encoded_discr.load_scalar(fx);
-
- // Decode the discriminant (specifically if it's niche-encoded).
- let signed = match tag.value {
- Int(_, signed) => signed,
- _ => false,
- };
- let val = clif_intcast(fx, encoded_discr, cast_to, signed);
- let val = CValue::by_val(val, dest_layout);
- lval.write_cvalue(fx, val);
- }
- Variants::Multiple { .. } => unreachable!(),
- }
++ .map(|ty::TypeAndMut { ty: pointee_ty, mutbl: _ }| {
++ has_ptr_meta(fx.tcx, pointee_ty)
++ })
+ .unwrap_or(false)
+ }
+
+ if is_fat_ptr(fx, from_ty) {
+ if is_fat_ptr(fx, to_ty) {
+ // fat-ptr -> fat-ptr
+ lval.write_cvalue(fx, operand.cast_pointer_to(dest_layout));
+ } else {
+ // fat-ptr -> thin-ptr
+ let (ptr, _extra) = operand.load_scalar_pair(fx);
+ lval.write_cvalue(fx, CValue::by_val(ptr, dest_layout))
+ }
+ } else if let ty::Adt(adt_def, _substs) = from_ty.kind() {
+ // enum -> discriminant value
+ assert!(adt_def.is_enum());
+ match to_ty.kind() {
+ ty::Uint(_) | ty::Int(_) => {}
+ _ => unreachable!("cast adt {} -> {}", from_ty, to_ty),
+ }
++ let to_clif_ty = fx.clif_type(to_ty).unwrap();
+
- fx.bcx
- .call_memset(fx.cx.module.target_config(), addr, val, times);
++ let discriminant = crate::discriminant::codegen_get_discriminant(
++ fx,
++ operand,
++ fx.layout_of(operand.layout().ty.discriminant_ty(fx.tcx)),
++ )
++ .load_scalar(fx);
++
++ let res = crate::cast::clif_intcast(
++ fx,
++ discriminant,
++ to_clif_ty,
++ to_ty.is_signed(),
++ );
++ lval.write_cvalue(fx, CValue::by_val(res, dest_layout));
+ } else {
+ let to_clif_ty = fx.clif_type(to_ty).unwrap();
+ let from = operand.load_scalar(fx);
+
+ let res = clif_int_or_float_cast(
+ fx,
+ from,
+ type_sign(from_ty),
+ to_clif_ty,
+ type_sign(to_ty),
+ );
+ lval.write_cvalue(fx, CValue::by_val(res, dest_layout));
+ }
+ }
+ Rvalue::Cast(
+ CastKind::Pointer(PointerCast::ClosureFnPointer(_)),
+ ref operand,
+ _to_ty,
+ ) => {
+ let operand = codegen_operand(fx, operand);
+ match *operand.layout().ty.kind() {
+ ty::Closure(def_id, substs) => {
+ let instance = Instance::resolve_closure(
+ fx.tcx,
+ def_id,
+ substs,
+ ty::ClosureKind::FnOnce,
+ )
+ .polymorphize(fx.tcx);
+ let func_ref = fx.get_function_ref(instance);
+ let func_addr = fx.bcx.ins().func_addr(fx.pointer_type, func_ref);
+ lval.write_cvalue(fx, CValue::by_val(func_addr, lval.layout()));
+ }
+ _ => bug!("{} cannot be cast to a fn ptr", operand.layout().ty),
+ }
+ }
+ Rvalue::Cast(CastKind::Pointer(PointerCast::Unsize), ref operand, _to_ty) => {
+ let operand = codegen_operand(fx, operand);
+ operand.unsize_value(fx, lval);
+ }
+ Rvalue::Discriminant(place) => {
+ let place = codegen_place(fx, place);
+ let value = place.to_cvalue(fx);
+ let discr =
+ crate::discriminant::codegen_get_discriminant(fx, value, dest_layout);
+ lval.write_cvalue(fx, discr);
+ }
+ Rvalue::Repeat(ref operand, times) => {
+ let operand = codegen_operand(fx, operand);
+ let times = fx
+ .monomorphize(times)
+ .eval(fx.tcx, ParamEnv::reveal_all())
+ .val
+ .try_to_bits(fx.tcx.data_layout.pointer_size)
+ .unwrap();
+ if fx.clif_type(operand.layout().ty) == Some(types::I8) {
+ let times = fx.bcx.ins().iconst(fx.pointer_type, times as i64);
+ // FIXME use emit_small_memset where possible
+ let addr = lval.to_ptr().get_addr(fx);
+ let val = operand.load_scalar(fx);
- let llalign = fx
- .bcx
- .ins()
- .iconst(usize_type, layout.align.abi.bytes() as i64);
++ fx.bcx.call_memset(fx.cx.module.target_config(), addr, val, times);
+ } else {
+ let loop_block = fx.bcx.create_block();
+ let loop_block2 = fx.bcx.create_block();
+ let done_block = fx.bcx.create_block();
+ let index = fx.bcx.append_block_param(loop_block, fx.pointer_type);
+ let zero = fx.bcx.ins().iconst(fx.pointer_type, 0);
+ fx.bcx.ins().jump(loop_block, &[zero]);
+
+ fx.bcx.switch_to_block(loop_block);
+ let done = fx.bcx.ins().icmp_imm(IntCC::Equal, index, times as i64);
+ fx.bcx.ins().brnz(done, done_block, &[]);
+ fx.bcx.ins().jump(loop_block2, &[]);
+
+ fx.bcx.switch_to_block(loop_block2);
+ let to = lval.place_index(fx, index);
+ to.write_cvalue(fx, operand);
+ let index = fx.bcx.ins().iadd_imm(index, 1);
+ fx.bcx.ins().jump(loop_block, &[index]);
+
+ fx.bcx.switch_to_block(done_block);
+ fx.bcx.ins().nop();
+ }
+ }
+ Rvalue::Len(place) => {
+ let place = codegen_place(fx, place);
+ let usize_layout = fx.layout_of(fx.tcx.types.usize);
+ let len = codegen_array_len(fx, place);
+ lval.write_cvalue(fx, CValue::by_val(len, usize_layout));
+ }
+ Rvalue::NullaryOp(NullOp::Box, content_ty) => {
+ let usize_type = fx.clif_type(fx.tcx.types.usize).unwrap();
+ let content_ty = fx.monomorphize(content_ty);
+ let layout = fx.layout_of(content_ty);
+ let llsize = fx.bcx.ins().iconst(usize_type, layout.size.bytes() as i64);
- let def_id = match fx
- .tcx
- .lang_items()
- .require(rustc_hir::LangItem::ExchangeMalloc)
- {
- Ok(id) => id,
- Err(s) => {
- fx.tcx
- .sess
- .fatal(&format!("allocation of `{}` {}", box_layout.ty, s));
- }
- };
++ let llalign = fx.bcx.ins().iconst(usize_type, layout.align.abi.bytes() as i64);
+ let box_layout = fx.layout_of(fx.tcx.mk_box(content_ty));
+
+ // Allocate space:
- assert!(lval
- .layout()
- .ty
- .is_sized(fx.tcx.at(stmt.source_info.span), ParamEnv::reveal_all()));
++ let def_id =
++ match fx.tcx.lang_items().require(rustc_hir::LangItem::ExchangeMalloc) {
++ Ok(id) => id,
++ Err(s) => {
++ fx.tcx
++ .sess
++ .fatal(&format!("allocation of `{}` {}", box_layout.ty, s));
++ }
++ };
+ let instance = ty::Instance::mono(fx.tcx, def_id).polymorphize(fx.tcx);
+ let func_ref = fx.get_function_ref(instance);
+ let call = fx.bcx.ins().call(func_ref, &[llsize, llalign]);
+ let ptr = fx.bcx.inst_results(call)[0];
+ lval.write_cvalue(fx, CValue::by_val(ptr, box_layout));
+ }
+ Rvalue::NullaryOp(NullOp::SizeOf, ty) => {
- let LlvmInlineAsm {
- asm,
- outputs,
- inputs,
- } = &**asm;
++ assert!(
++ lval.layout()
++ .ty
++ .is_sized(fx.tcx.at(stmt.source_info.span), ParamEnv::reveal_all())
++ );
+ let ty_size = fx.layout_of(fx.monomorphize(ty)).size.bytes();
+ let val =
+ CValue::const_val(fx, fx.layout_of(fx.tcx.types.usize), ty_size.into());
+ lval.write_cvalue(fx, val);
+ }
+ Rvalue::Aggregate(ref kind, ref operands) => match kind.as_ref() {
+ AggregateKind::Array(_ty) => {
+ for (i, operand) in operands.iter().enumerate() {
+ let operand = codegen_operand(fx, operand);
+ let index = fx.bcx.ins().iconst(fx.pointer_type, i as i64);
+ let to = lval.place_index(fx, index);
+ to.write_cvalue(fx, operand);
+ }
+ }
+ _ => unreachable!("shouldn't exist at codegen {:?}", to_place_and_rval.1),
+ },
+ }
+ }
+ StatementKind::StorageLive(_)
+ | StatementKind::StorageDead(_)
+ | StatementKind::Nop
+ | StatementKind::FakeRead(..)
+ | StatementKind::Retag { .. }
+ | StatementKind::AscribeUserType(..) => {}
+
+ StatementKind::LlvmInlineAsm(asm) => {
+ use rustc_span::symbol::Symbol;
- assert_eq!(
- input_names,
- &[Symbol::intern("{eax}"), Symbol::intern("{ecx}")]
- );
++ let LlvmInlineAsm { asm, outputs, inputs } = &**asm;
+ let rustc_hir::LlvmInlineAsmInner {
+ asm: asm_code, // Name
+ outputs: output_names, // Vec<LlvmInlineAsmOutput>
+ inputs: input_names, // Vec<Name>
+ clobbers, // Vec<Name>
+ volatile, // bool
+ alignstack, // bool
+ dialect: _,
+ asm_str_style: _,
+ } = asm;
+ match asm_code.as_str().trim() {
+ "" => {
+ // Black box
+ }
+ "mov %rbx, %rsi\n cpuid\n xchg %rbx, %rsi" => {
- for (i, c) in (&["={eax}", "={esi}", "={ecx}", "={edx}"])
- .iter()
- .enumerate()
- {
++ assert_eq!(input_names, &[Symbol::intern("{eax}"), Symbol::intern("{ecx}")]);
+ assert_eq!(output_names.len(), 4);
- _ if fx
- .tcx
- .symbol_name(fx.instance)
- .name
- .starts_with("___chkstk") =>
- {
++ for (i, c) in (&["={eax}", "={esi}", "={ecx}", "={edx}"]).iter().enumerate() {
+ assert_eq!(&output_names[i].constraint.as_str(), c);
+ assert!(!output_names[i].is_rw);
+ assert!(!output_names[i].is_indirect);
+ }
+
+ assert_eq!(clobbers, &[]);
+
+ assert!(!volatile);
+ assert!(!alignstack);
+
+ assert_eq!(inputs.len(), 2);
+ let leaf = codegen_operand(fx, &inputs[0].1).load_scalar(fx); // %eax
+ let subleaf = codegen_operand(fx, &inputs[1].1).load_scalar(fx); // %ecx
+
+ let (eax, ebx, ecx, edx) =
+ crate::intrinsics::codegen_cpuid_call(fx, leaf, subleaf);
+
+ assert_eq!(outputs.len(), 4);
+ codegen_place(fx, outputs[0])
+ .write_cvalue(fx, CValue::by_val(eax, fx.layout_of(fx.tcx.types.u32)));
+ codegen_place(fx, outputs[1])
+ .write_cvalue(fx, CValue::by_val(ebx, fx.layout_of(fx.tcx.types.u32)));
+ codegen_place(fx, outputs[2])
+ .write_cvalue(fx, CValue::by_val(ecx, fx.layout_of(fx.tcx.types.u32)));
+ codegen_place(fx, outputs[3])
+ .write_cvalue(fx, CValue::by_val(edx, fx.layout_of(fx.tcx.types.u32)));
+ }
+ "xgetbv" => {
+ assert_eq!(input_names, &[Symbol::intern("{ecx}")]);
+
+ assert_eq!(output_names.len(), 2);
+ for (i, c) in (&["={eax}", "={edx}"]).iter().enumerate() {
+ assert_eq!(&output_names[i].constraint.as_str(), c);
+ assert!(!output_names[i].is_rw);
+ assert!(!output_names[i].is_indirect);
+ }
+
+ assert_eq!(clobbers, &[]);
+
+ assert!(!volatile);
+ assert!(!alignstack);
+
+ crate::trap::trap_unimplemented(fx, "_xgetbv arch intrinsic is not supported");
+ }
+ // ___chkstk, ___chkstk_ms and __alloca are only used on Windows
- fn codegen_array_len<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
- place: CPlace<'tcx>,
- ) -> Value {
++ _ if fx.tcx.symbol_name(fx.instance).name.starts_with("___chkstk") => {
+ crate::trap::trap_unimplemented(fx, "Stack probes are not supported");
+ }
+ _ if fx.tcx.symbol_name(fx.instance).name == "__alloca" => {
+ crate::trap::trap_unimplemented(fx, "Alloca is not supported");
+ }
+ // Used in sys::windows::abort_internal
+ "int $$0x29" => {
+ crate::trap::trap_unimplemented(fx, "Windows abort");
+ }
+ _ => fx
+ .tcx
+ .sess
+ .span_fatal(stmt.source_info.span, "Inline assembly is not supported"),
+ }
+ }
+ StatementKind::Coverage { .. } => fx.tcx.sess.fatal("-Zcoverage is unimplemented"),
+ }
+}
+
- let len = fx
- .monomorphize(len)
- .eval_usize(fx.tcx, ParamEnv::reveal_all()) as i64;
++fn codegen_array_len<'tcx>(fx: &mut FunctionCx<'_, '_, 'tcx>, place: CPlace<'tcx>) -> Value {
+ match *place.layout().ty.kind() {
+ ty::Array(_elem_ty, len) => {
- ty::Slice(_elem_ty) => place
- .to_ptr_maybe_unsized()
- .1
- .expect("Length metadata for slice place"),
++ let len = fx.monomorphize(len).eval_usize(fx.tcx, ParamEnv::reveal_all()) as i64;
+ fx.bcx.ins().iconst(fx.pointer_type, len)
+ }
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
++ ty::Slice(_elem_ty) => {
++ place.to_ptr_maybe_unsized().1.expect("Length metadata for slice place")
++ }
+ _ => bug!("Rvalue::Len({:?})", place),
+ }
+}
+
+pub(crate) fn codegen_place<'tcx>(
- PlaceElem::ConstantIndex {
- offset,
- min_length: _,
- from_end,
- } => {
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ place: Place<'tcx>,
+) -> CPlace<'tcx> {
+ let mut cplace = fx.get_local_place(place.local);
+
+ for elem in place.projection {
+ match elem {
+ PlaceElem::Deref => {
+ cplace = cplace.place_deref(fx);
+ }
+ PlaceElem::Field(field, _ty) => {
+ cplace = cplace.place_field(fx, field);
+ }
+ PlaceElem::Index(local) => {
+ let index = fx.get_local_place(local).to_cvalue(fx).load_scalar(fx);
+ cplace = cplace.place_index(fx, index);
+ }
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
++ PlaceElem::ConstantIndex { offset, min_length: _, from_end } => {
+ let offset: u64 = offset;
+ let index = if !from_end {
+ fx.bcx.ins().iconst(fx.pointer_type, offset as i64)
+ } else {
+ let len = codegen_array_len(fx, cplace);
+ fx.bcx.ins().iadd_imm(len, -(offset as i64))
+ };
+ cplace = cplace.place_index(fx, index);
+ }
+ PlaceElem::Subslice { from, to, from_end } => {
+ // These indices are generated by slice patterns.
+ // slice[from:-to] in Python terms.
+
+ let from: u64 = from;
+ let to: u64 = to;
+
+ match cplace.layout().ty.kind() {
+ ty::Array(elem_ty, _len) => {
+ assert!(!from_end, "array subslices are never `from_end`");
+ let elem_layout = fx.layout_of(elem_ty);
+ let ptr = cplace.to_ptr();
+ cplace = CPlace::for_ptr(
+ ptr.offset_i64(fx, elem_layout.size.bytes() as i64 * (from as i64)),
+ fx.layout_of(fx.tcx.mk_array(elem_ty, to - from)),
+ );
+ }
+ ty::Slice(elem_ty) => {
+ assert!(from_end, "slice subslices should be `from_end`");
+ let elem_layout = fx.layout_of(elem_ty);
+ let (ptr, len) = cplace.to_ptr_maybe_unsized();
+ let len = len.unwrap();
+ cplace = CPlace::for_ptr_with_extra(
+ ptr.offset_i64(fx, elem_layout.size.bytes() as i64 * (from as i64)),
+ fx.bcx.ins().iadd_imm(len, -(from as i64 + to as i64)),
+ cplace.layout(),
+ );
+ }
+ _ => unreachable!(),
+ }
+ }
+ PlaceElem::Downcast(_adt_def, variant) => {
+ cplace = cplace.downcast_variant(fx, variant);
+ }
+ }
+ }
+
+ cplace
+}
+
+pub(crate) fn codegen_operand<'tcx>(
- pub(crate) fn codegen_panic<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
- msg_str: &str,
- span: Span,
- ) {
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ operand: &Operand<'tcx>,
+) -> CValue<'tcx> {
+ match operand {
+ Operand::Move(place) | Operand::Copy(place) => {
+ let cplace = codegen_place(fx, *place);
+ cplace.to_cvalue(fx)
+ }
+ Operand::Constant(const_) => crate::constant::codegen_constant(fx, const_),
+ }
+}
+
- let msg_len = fx
- .bcx
- .ins()
- .iconst(fx.pointer_type, i64::try_from(msg_str.len()).unwrap());
++pub(crate) fn codegen_panic<'tcx>(fx: &mut FunctionCx<'_, '_, 'tcx>, msg_str: &str, span: Span) {
+ let location = fx.get_caller_location(span).load_scalar(fx);
+
+ let msg_ptr = fx.anonymous_str("assert", msg_str);
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
++ let msg_len = fx.bcx.ins().iconst(fx.pointer_type, i64::try_from(msg_str.len()).unwrap());
+ let args = [msg_ptr, msg_len, location];
+
+ codegen_panic_inner(fx, rustc_hir::LangItem::Panic, &args, span);
+}
+
+pub(crate) fn codegen_panic_inner<'tcx>(
- let def_id = fx
- .tcx
- .lang_items()
- .require(lang_item)
- .unwrap_or_else(|s| fx.tcx.sess.span_fatal(span, &s));
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ lang_item: rustc_hir::LangItem,
+ args: &[Value],
+ span: Span,
+) {
++ let def_id =
++ fx.tcx.lang_items().require(lang_item).unwrap_or_else(|s| fx.tcx.sess.span_fatal(span, &s));
+
+ let instance = Instance::mono(fx.tcx, def_id).polymorphize(fx.tcx);
+ let symbol_name = fx.tcx.symbol_name(instance).name;
+
+ fx.lib_call(
+ &*symbol_name,
+ vec![
+ AbiParam::new(fx.pointer_type),
+ AbiParam::new(fx.pointer_type),
+ AbiParam::new(fx.pointer_type),
+ ],
+ vec![],
+ args,
+ );
+
+ crate::trap::trap_unreachable(fx, "panic lang item returned");
+}
--- /dev/null
- std::env::current_exe()
- .unwrap()
- .parent()
- .unwrap()
- .parent()
- .unwrap()
- .to_owned()
+#![feature(rustc_private)]
+
+extern crate rustc_data_structures;
+extern crate rustc_driver;
+extern crate rustc_interface;
+extern crate rustc_session;
+extern crate rustc_target;
+
+use rustc_data_structures::profiling::{get_resident_set_size, print_time_passes_entry};
+use rustc_interface::interface;
+use rustc_session::config::ErrorOutputType;
+use rustc_session::early_error;
+use rustc_target::spec::PanicStrategy;
+
+#[derive(Default)]
+pub struct CraneliftPassesCallbacks {
+ time_passes: bool,
+}
+
+impl rustc_driver::Callbacks for CraneliftPassesCallbacks {
+ fn config(&mut self, config: &mut interface::Config) {
+ // If a --prints=... option has been given, we don't print the "total"
+ // time because it will mess up the --prints output. See #64339.
+ self.time_passes = config.opts.prints.is_empty()
+ && (config.opts.debugging_opts.time_passes || config.opts.debugging_opts.time);
+
+ config.opts.cg.panic = Some(PanicStrategy::Abort);
+ config.opts.debugging_opts.panic_abort_tests = true;
+ config.opts.maybe_sysroot = Some(config.opts.maybe_sysroot.clone().unwrap_or_else(|| {
++ std::env::current_exe().unwrap().parent().unwrap().parent().unwrap().to_owned()
+ }));
+ }
+}
+
+fn main() {
+ let start_time = std::time::Instant::now();
+ let start_rss = get_resident_set_size();
+ rustc_driver::init_rustc_env_logger();
+ let mut callbacks = CraneliftPassesCallbacks::default();
+ rustc_driver::install_ice_hook();
+ let exit_code = rustc_driver::catch_with_exit_code(|| {
+ let args = std::env::args_os()
+ .enumerate()
+ .map(|(i, arg)| {
+ arg.into_string().unwrap_or_else(|arg| {
+ early_error(
+ ErrorOutputType::default(),
+ &format!("Argument {} is not valid Unicode: {:?}", i, arg),
+ )
+ })
+ })
+ .collect::<Vec<_>>();
+ let mut run_compiler = rustc_driver::RunCompiler::new(&args, &mut callbacks);
+ run_compiler.set_make_codegen_backend(Some(Box::new(move |_| {
+ Box::new(rustc_codegen_cranelift::CraneliftCodegenBackend { config: None })
+ })));
+ run_compiler.run()
+ });
+
+ if callbacks.time_passes {
+ let end_rss = get_resident_set_size();
+ print_time_passes_entry("total", start_time.elapsed(), start_rss, end_rss);
+ }
+
+ std::process::exit(exit_code)
+}
--- /dev/null
- config.opts.maybe_sysroot = Some(
- std::env::current_exe()
- .unwrap()
- .parent()
- .unwrap()
- .parent()
- .unwrap()
- .to_owned(),
- );
+//! The only difference between this and cg_clif.rs is that this binary defaults to using cg_llvm
+//! instead of cg_clif and requires `--clif` to use cg_clif and that this binary doesn't have JIT
+//! support.
+//! This is necessary as with Cargo `RUSTC` applies to both target crates and host crates. The host
+//! crates must be built with cg_llvm as we are currently building a sysroot for cg_clif.
+//! `RUSTFLAGS` however is only applied to target crates, so `--clif` would only be passed to the
+//! target crates.
+
+#![feature(rustc_private)]
+
+extern crate rustc_data_structures;
+extern crate rustc_driver;
+extern crate rustc_interface;
+extern crate rustc_session;
+extern crate rustc_target;
+
+use std::path::PathBuf;
+
+use rustc_interface::interface;
+use rustc_session::config::ErrorOutputType;
+use rustc_session::early_error;
+use rustc_target::spec::PanicStrategy;
+
+fn find_sysroot() -> String {
+ // Taken from https://github.com/Manishearth/rust-clippy/pull/911.
+ let home = option_env!("RUSTUP_HOME").or(option_env!("MULTIRUST_HOME"));
+ let toolchain = option_env!("RUSTUP_TOOLCHAIN").or(option_env!("MULTIRUST_TOOLCHAIN"));
+ match (home, toolchain) {
+ (Some(home), Some(toolchain)) => format!("{}/toolchains/{}", home, toolchain),
+ _ => option_env!("RUST_SYSROOT")
+ .expect("need to specify RUST_SYSROOT env var or use rustup or multirust")
+ .to_owned(),
+ }
+}
+
+pub struct CraneliftPassesCallbacks {
+ use_clif: bool,
+}
+
+impl rustc_driver::Callbacks for CraneliftPassesCallbacks {
+ fn config(&mut self, config: &mut interface::Config) {
+ if !self.use_clif {
+ config.opts.maybe_sysroot = Some(PathBuf::from(find_sysroot()));
+ return;
+ }
+
+ config.opts.cg.panic = Some(PanicStrategy::Abort);
+ config.opts.debugging_opts.panic_abort_tests = true;
++ config.opts.maybe_sysroot =
++ Some(std::env::current_exe().unwrap().parent().unwrap().parent().unwrap().to_owned());
+ }
+}
+
+fn main() {
+ rustc_driver::init_rustc_env_logger();
+ rustc_driver::install_ice_hook();
+ let exit_code = rustc_driver::catch_with_exit_code(|| {
+ let mut use_clif = false;
+
+ let args = std::env::args_os()
+ .enumerate()
+ .map(|(i, arg)| {
+ arg.into_string().unwrap_or_else(|arg| {
+ early_error(
+ ErrorOutputType::default(),
+ &format!("Argument {} is not valid Unicode: {:?}", i, arg),
+ )
+ })
+ })
+ .filter(|arg| {
+ if arg == "--clif" {
+ use_clif = true;
+ false
+ } else {
+ true
+ }
+ })
+ .collect::<Vec<_>>();
+
+ let mut callbacks = CraneliftPassesCallbacks { use_clif };
+
+ let mut run_compiler = rustc_driver::RunCompiler::new(&args, &mut callbacks);
+ if use_clif {
+ run_compiler.set_make_codegen_backend(Some(Box::new(move |_| {
+ Box::new(rustc_codegen_cranelift::CraneliftCodegenBackend { config: None })
+ })));
+ }
+ run_compiler.run()
+ });
+ std::process::exit(exit_code)
+}
--- /dev/null
- fx: &mut FunctionCx<'_, '_, impl Module>,
+//! Various number casting functions
+
+use crate::prelude::*;
+
+pub(crate) fn clif_intcast(
- if to == types::I64 {
- lsb
- } else {
- fx.bcx.ins().ireduce(to, lsb)
- }
++ fx: &mut FunctionCx<'_, '_, '_>,
+ val: Value,
+ to: Type,
+ signed: bool,
+) -> Value {
+ let from = fx.bcx.func.dfg.value_type(val);
+ match (from, to) {
+ // equal
+ (_, _) if from == to => val,
+
+ // extend
+ (_, types::I128) => {
+ let lo = if from == types::I64 {
+ val
+ } else if signed {
+ fx.bcx.ins().sextend(types::I64, val)
+ } else {
+ fx.bcx.ins().uextend(types::I64, val)
+ };
+ let hi = if signed {
+ fx.bcx.ins().sshr_imm(lo, 63)
+ } else {
+ fx.bcx.ins().iconst(types::I64, 0)
+ };
+ fx.bcx.ins().iconcat(lo, hi)
+ }
+ (_, _) if to.wider_or_equal(from) => {
+ if signed {
+ fx.bcx.ins().sextend(to, val)
+ } else {
+ fx.bcx.ins().uextend(to, val)
+ }
+ }
+
+ // reduce
+ (types::I128, _) => {
+ let (lsb, _msb) = fx.bcx.ins().isplit(val);
- fx: &mut FunctionCx<'_, '_, impl Module>,
++ if to == types::I64 { lsb } else { fx.bcx.ins().ireduce(to, lsb) }
+ }
+ (_, _) => fx.bcx.ins().ireduce(to, val),
+ }
+}
+
+pub(crate) fn clif_int_or_float_cast(
- let from_rust_ty = if from_signed {
- fx.tcx.types.i128
- } else {
- fx.tcx.types.u128
- };
++ fx: &mut FunctionCx<'_, '_, '_>,
+ from: Value,
+ from_signed: bool,
+ to_ty: Type,
+ to_signed: bool,
+) -> Value {
+ let from_ty = fx.bcx.func.dfg.value_type(from);
+
+ if from_ty.is_int() && to_ty.is_int() {
+ // int-like -> int-like
+ clif_intcast(
+ fx,
+ from,
+ to_ty,
+ // This is correct as either from_signed == to_signed (=> this is trivially correct)
+ // Or from_clif_ty == to_clif_ty, which means this is a no-op.
+ from_signed,
+ )
+ } else if from_ty.is_int() && to_ty.is_float() {
+ if from_ty == types::I128 {
+ // _______ss__f_
+ // __float tisf: i128 -> f32
+ // __float tidf: i128 -> f64
+ // __floatuntisf: u128 -> f32
+ // __floatuntidf: u128 -> f64
+
+ let name = format!(
+ "__float{sign}ti{flt}f",
+ sign = if from_signed { "" } else { "un" },
+ flt = match to_ty {
+ types::F32 => "s",
+ types::F64 => "d",
+ _ => unreachable!("{:?}", to_ty),
+ },
+ );
+
- .easy_call(
- &name,
- &[CValue::by_val(from, fx.layout_of(from_rust_ty))],
- to_rust_ty,
- )
++ let from_rust_ty = if from_signed { fx.tcx.types.i128 } else { fx.tcx.types.u128 };
+
+ let to_rust_ty = match to_ty {
+ types::F32 => fx.tcx.types.f32,
+ types::F64 => fx.tcx.types.f64,
+ _ => unreachable!(),
+ };
+
+ return fx
- let to_rust_ty = if to_signed {
- fx.tcx.types.i128
- } else {
- fx.tcx.types.u128
- };
++ .easy_call(&name, &[CValue::by_val(from, fx.layout_of(from_rust_ty))], to_rust_ty)
+ .load_scalar(fx);
+ }
+
+ // int-like -> float
+ if from_signed {
+ fx.bcx.ins().fcvt_from_sint(to_ty, from)
+ } else {
+ fx.bcx.ins().fcvt_from_uint(to_ty, from)
+ }
+ } else if from_ty.is_float() && to_ty.is_int() {
+ if to_ty == types::I128 {
+ // _____sssf___
+ // __fix sfti: f32 -> i128
+ // __fix dfti: f64 -> i128
+ // __fixunssfti: f32 -> u128
+ // __fixunsdfti: f64 -> u128
+
+ let name = format!(
+ "__fix{sign}{flt}fti",
+ sign = if to_signed { "" } else { "uns" },
+ flt = match from_ty {
+ types::F32 => "s",
+ types::F64 => "d",
+ _ => unreachable!("{:?}", to_ty),
+ },
+ );
+
+ let from_rust_ty = match from_ty {
+ types::F32 => fx.tcx.types.f32,
+ types::F64 => fx.tcx.types.f64,
+ _ => unreachable!(),
+ };
+
- .easy_call(
- &name,
- &[CValue::by_val(from, fx.layout_of(from_rust_ty))],
- to_rust_ty,
- )
++ let to_rust_ty = if to_signed { fx.tcx.types.i128 } else { fx.tcx.types.u128 };
+
+ return fx
++ .easy_call(&name, &[CValue::by_val(from, fx.layout_of(from_rust_ty))], to_rust_ty)
+ .load_scalar(fx);
+ }
+
+ // float -> int-like
+ if to_ty == types::I8 || to_ty == types::I16 {
+ // FIXME implement fcvt_to_*int_sat.i8/i16
+ let val = if to_signed {
+ fx.bcx.ins().fcvt_to_sint_sat(types::I32, from)
+ } else {
+ fx.bcx.ins().fcvt_to_uint_sat(types::I32, from)
+ };
+ let (min, max) = match (to_ty, to_signed) {
+ (types::I8, false) => (0, i64::from(u8::MAX)),
+ (types::I16, false) => (0, i64::from(u16::MAX)),
+ (types::I8, true) => (i64::from(i8::MIN), i64::from(i8::MAX)),
+ (types::I16, true) => (i64::from(i16::MIN), i64::from(i16::MAX)),
+ _ => unreachable!(),
+ };
+ let min_val = fx.bcx.ins().iconst(types::I32, min);
+ let max_val = fx.bcx.ins().iconst(types::I32, max);
+
+ let val = if to_signed {
+ let has_underflow = fx.bcx.ins().icmp_imm(IntCC::SignedLessThan, val, min);
+ let has_overflow = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThan, val, max);
+ let bottom_capped = fx.bcx.ins().select(has_underflow, min_val, val);
+ fx.bcx.ins().select(has_overflow, max_val, bottom_capped)
+ } else {
+ let has_overflow = fx.bcx.ins().icmp_imm(IntCC::UnsignedGreaterThan, val, max);
+ fx.bcx.ins().select(has_overflow, max_val, val)
+ };
+ fx.bcx.ins().ireduce(to_ty, val)
+ } else if to_signed {
+ fx.bcx.ins().fcvt_to_sint_sat(to_ty, from)
+ } else {
+ fx.bcx.ins().fcvt_to_uint_sat(to_ty, from)
+ }
+ } else if from_ty.is_float() && to_ty.is_float() {
+ // float -> float
+ match (from_ty, to_ty) {
+ (types::F32, types::F64) => fx.bcx.ins().fpromote(types::F64, from),
+ (types::F64, types::F32) => fx.bcx.ins().fdemote(types::F32, from),
+ _ => from,
+ }
+ } else {
+ unreachable!("cast value from {:?} to {:?}", from_ty, to_ty);
+ }
+}
--- /dev/null
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+//! Replaces 128-bit operators with lang item calls where necessary
+
+use cranelift_codegen::ir::ArgumentPurpose;
+
+use crate::prelude::*;
+
+pub(crate) fn maybe_codegen<'tcx>(
- if lhs.layout().ty != fx.tcx.types.u128 && lhs.layout().ty != fx.tcx.types.i128 {
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ bin_op: BinOp,
+ checked: bool,
+ lhs: CValue<'tcx>,
+ rhs: CValue<'tcx>,
+) -> Option<CValue<'tcx>> {
- let val_ty = if is_signed {
- fx.tcx.types.i128
- } else {
- fx.tcx.types.u128
- };
++ if lhs.layout().ty != fx.tcx.types.u128
++ && lhs.layout().ty != fx.tcx.types.i128
++ && rhs.layout().ty != fx.tcx.types.u128
++ && rhs.layout().ty != fx.tcx.types.i128
++ {
+ return None;
+ }
+
+ let lhs_val = lhs.load_scalar(fx);
+ let rhs_val = rhs.load_scalar(fx);
+
+ let is_signed = type_sign(lhs.layout().ty);
+
+ match bin_op {
+ BinOp::BitAnd | BinOp::BitOr | BinOp::BitXor => {
+ assert!(!checked);
+ None
+ }
+ BinOp::Add | BinOp::Sub if !checked => None,
+ BinOp::Mul if !checked => {
- let args = [
- out_place.to_ptr().get_addr(fx),
- lhs.load_scalar(fx),
- rhs.load_scalar(fx),
- ];
++ let val_ty = if is_signed { fx.tcx.types.i128 } else { fx.tcx.types.u128 };
+ Some(fx.easy_call("__multi3", &[lhs, rhs], val_ty))
+ }
+ BinOp::Add | BinOp::Sub | BinOp::Mul => {
+ assert!(checked);
+ let out_ty = fx.tcx.mk_tup([lhs.layout().ty, fx.tcx.types.bool].iter());
+ let out_place = CPlace::new_stack_slot(fx, fx.layout_of(out_ty));
+ let param_types = vec![
+ AbiParam::special(pointer_ty(fx.tcx), ArgumentPurpose::StructReturn),
+ AbiParam::new(types::I128),
+ AbiParam::new(types::I128),
+ ];
- // Optimize `val >> 64`, because compiler_builtins uses it to deconstruct an 128bit
- // integer into its lsb and msb.
- // https://github.com/rust-lang-nursery/compiler-builtins/blob/79a6a1603d5672cbb9187ff41ff4d9b5048ac1cb/src/int/mod.rs#L217
- if resolve_value_imm(fx.bcx.func, rhs_val) == Some(64) {
- let (lhs_lsb, lhs_msb) = fx.bcx.ins().isplit(lhs_val);
- let all_zeros = fx.bcx.ins().iconst(types::I64, 0);
- let val = match (bin_op, is_signed) {
- (BinOp::Shr, false) => {
- let val = fx.bcx.ins().iconcat(lhs_msb, all_zeros);
- Some(CValue::by_val(val, fx.layout_of(fx.tcx.types.u128)))
- }
- (BinOp::Shr, true) => {
- let sign = fx.bcx.ins().icmp_imm(IntCC::SignedLessThan, lhs_msb, 0);
- let all_ones = fx.bcx.ins().iconst(types::I64, u64::MAX as i64);
- let all_sign_bits = fx.bcx.ins().select(sign, all_zeros, all_ones);
-
- let val = fx.bcx.ins().iconcat(lhs_msb, all_sign_bits);
- Some(CValue::by_val(val, fx.layout_of(fx.tcx.types.i128)))
- }
- (BinOp::Shl, _) => {
- let val_ty = if is_signed {
- fx.tcx.types.i128
- } else {
- fx.tcx.types.u128
- };
- let val = fx.bcx.ins().iconcat(all_zeros, lhs_lsb);
- Some(CValue::by_val(val, fx.layout_of(val_ty)))
- }
- _ => None,
- };
- if let Some(val) = val {
- if let Some(is_overflow) = is_overflow {
- let out_ty = fx.tcx.mk_tup([lhs.layout().ty, fx.tcx.types.bool].iter());
- let val = val.load_scalar(fx);
- return Some(CValue::by_val_pair(val, is_overflow, fx.layout_of(out_ty)));
++ let args = [out_place.to_ptr().get_addr(fx), lhs.load_scalar(fx), rhs.load_scalar(fx)];
+ let name = match (bin_op, is_signed) {
+ (BinOp::Add, false) => "__rust_u128_addo",
+ (BinOp::Add, true) => "__rust_i128_addo",
+ (BinOp::Sub, false) => "__rust_u128_subo",
+ (BinOp::Sub, true) => "__rust_i128_subo",
+ (BinOp::Mul, false) => "__rust_u128_mulo",
+ (BinOp::Mul, true) => "__rust_i128_mulo",
+ _ => unreachable!(),
+ };
+ fx.lib_call(name, param_types, vec![], &args);
+ Some(out_place.to_cvalue(fx))
+ }
+ BinOp::Offset => unreachable!("offset should only be used on pointers, not 128bit ints"),
+ BinOp::Div => {
+ assert!(!checked);
+ if is_signed {
+ Some(fx.easy_call("__divti3", &[lhs, rhs], fx.tcx.types.i128))
+ } else {
+ Some(fx.easy_call("__udivti3", &[lhs, rhs], fx.tcx.types.u128))
+ }
+ }
+ BinOp::Rem => {
+ assert!(!checked);
+ if is_signed {
+ Some(fx.easy_call("__modti3", &[lhs, rhs], fx.tcx.types.i128))
+ } else {
+ Some(fx.easy_call("__umodti3", &[lhs, rhs], fx.tcx.types.u128))
+ }
+ }
+ BinOp::Lt | BinOp::Le | BinOp::Eq | BinOp::Ge | BinOp::Gt | BinOp::Ne => {
+ assert!(!checked);
+ None
+ }
+ BinOp::Shl | BinOp::Shr => {
+ let is_overflow = if checked {
+ // rhs >= 128
+
+ // FIXME support non 128bit rhs
+ /*let (rhs_lsb, rhs_msb) = fx.bcx.ins().isplit(rhs_val);
+ let rhs_msb_gt_0 = fx.bcx.ins().icmp_imm(IntCC::NotEqual, rhs_msb, 0);
+ let rhs_lsb_ge_128 = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThan, rhs_lsb, 127);
+ let is_overflow = fx.bcx.ins().bor(rhs_msb_gt_0, rhs_lsb_ge_128);*/
+ let is_overflow = fx.bcx.ins().bconst(types::B1, false);
+
+ Some(fx.bcx.ins().bint(types::I8, is_overflow))
+ } else {
+ None
+ };
+
- return Some(val);
++ let truncated_rhs = clif_intcast(fx, rhs_val, types::I32, false);
++ let val = match bin_op {
++ BinOp::Shl => fx.bcx.ins().ishl(lhs_val, truncated_rhs),
++ BinOp::Shr => {
++ if is_signed {
++ fx.bcx.ins().sshr(lhs_val, truncated_rhs)
+ } else {
- }
-
- let truncated_rhs = clif_intcast(fx, rhs_val, types::I32, false);
- let truncated_rhs = CValue::by_val(truncated_rhs, fx.layout_of(fx.tcx.types.u32));
- let val = match (bin_op, is_signed) {
- (BinOp::Shl, false) => {
- fx.easy_call("__ashlti3", &[lhs, truncated_rhs], fx.tcx.types.u128)
- }
- (BinOp::Shl, true) => {
- fx.easy_call("__ashlti3", &[lhs, truncated_rhs], fx.tcx.types.i128)
- }
- (BinOp::Shr, false) => {
- fx.easy_call("__lshrti3", &[lhs, truncated_rhs], fx.tcx.types.u128)
- }
- (BinOp::Shr, true) => {
- fx.easy_call("__ashrti3", &[lhs, truncated_rhs], fx.tcx.types.i128)
- }
- (_, _) => unreachable!(),
++ fx.bcx.ins().ushr(lhs_val, truncated_rhs)
+ }
+ }
- let val = val.load_scalar(fx);
++ _ => unreachable!(),
+ };
+ if let Some(is_overflow) = is_overflow {
+ let out_ty = fx.tcx.mk_tup([lhs.layout().ty, fx.tcx.types.bool].iter());
- Some(val)
+ Some(CValue::by_val_pair(val, is_overflow, fx.layout_of(out_ty)))
+ } else {
++ Some(CValue::by_val(val, lhs.layout()))
+ }
+ }
+ }
+}
--- /dev/null
- use cranelift_codegen::ir::{InstructionData, Opcode, ValueDef};
-
+use rustc_index::vec::IndexVec;
+use rustc_target::abi::call::FnAbi;
+use rustc_target::abi::{Integer, Primitive};
+use rustc_target::spec::{HasTargetSpec, Target};
+
- ty::RawPtr(TypeAndMut {
- ty: pointee_ty,
- mutbl: _,
- })
- | ty::Ref(_, pointee_ty, _) => {
+use crate::prelude::*;
+
+pub(crate) fn pointer_ty(tcx: TyCtxt<'_>) -> types::Type {
+ match tcx.data_layout.pointer_size.bits() {
+ 16 => types::I16,
+ 32 => types::I32,
+ 64 => types::I64,
+ bits => bug!("ptr_sized_integer: unknown pointer bit size {}", bits),
+ }
+}
+
+pub(crate) fn scalar_to_clif_type(tcx: TyCtxt<'_>, scalar: Scalar) -> Type {
+ match scalar.value {
+ Primitive::Int(int, _sign) => match int {
+ Integer::I8 => types::I8,
+ Integer::I16 => types::I16,
+ Integer::I32 => types::I32,
+ Integer::I64 => types::I64,
+ Integer::I128 => types::I128,
+ },
+ Primitive::F32 => types::F32,
+ Primitive::F64 => types::F64,
+ Primitive::Pointer => pointer_ty(tcx),
+ }
+}
+
+fn clif_type_from_ty<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Option<types::Type> {
+ Some(match ty.kind() {
+ ty::Bool => types::I8,
+ ty::Uint(size) => match size {
+ UintTy::U8 => types::I8,
+ UintTy::U16 => types::I16,
+ UintTy::U32 => types::I32,
+ UintTy::U64 => types::I64,
+ UintTy::U128 => types::I128,
+ UintTy::Usize => pointer_ty(tcx),
+ },
+ ty::Int(size) => match size {
+ IntTy::I8 => types::I8,
+ IntTy::I16 => types::I16,
+ IntTy::I32 => types::I32,
+ IntTy::I64 => types::I64,
+ IntTy::I128 => types::I128,
+ IntTy::Isize => pointer_ty(tcx),
+ },
+ ty::Char => types::I32,
+ ty::Float(size) => match size {
+ FloatTy::F32 => types::F32,
+ FloatTy::F64 => types::F64,
+ },
+ ty::FnPtr(_) => pointer_ty(tcx),
- ty::RawPtr(TypeAndMut {
- ty: pointee_ty,
- mutbl: _,
- })
- | ty::Ref(_, pointee_ty, _) => {
++ ty::RawPtr(TypeAndMut { ty: pointee_ty, mutbl: _ }) | ty::Ref(_, pointee_ty, _) => {
+ if has_ptr_meta(tcx, pointee_ty) {
+ return None;
+ } else {
+ pointer_ty(tcx)
+ }
+ }
+ ty::Adt(adt_def, _) if adt_def.repr.simd() => {
+ let (element, count) = match &tcx.layout_of(ParamEnv::reveal_all().and(ty)).unwrap().abi
+ {
+ Abi::Vector { element, count } => (element.clone(), *count),
+ _ => unreachable!(),
+ };
+
+ match scalar_to_clif_type(tcx, element).by(u16::try_from(count).unwrap()) {
+ // Cranelift currently only implements icmp for 128bit vectors.
+ Some(vector_ty) if vector_ty.bits() == 128 => vector_ty,
+ _ => return None,
+ }
+ }
+ ty::Param(_) => bug!("ty param {:?}", ty),
+ _ => return None,
+ })
+}
+
+fn clif_pair_type_from_ty<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ ty: Ty<'tcx>,
+) -> Option<(types::Type, types::Type)> {
+ Some(match ty.kind() {
+ ty::Tuple(substs) if substs.len() == 2 => {
+ let mut types = substs.types();
+ let a = clif_type_from_ty(tcx, types.next().unwrap())?;
+ let b = clif_type_from_ty(tcx, types.next().unwrap())?;
+ if a.is_vector() || b.is_vector() {
+ return None;
+ }
+ (a, b)
+ }
- let ptr_ty = tcx.mk_ptr(TypeAndMut {
- ty,
- mutbl: rustc_hir::Mutability::Not,
- });
- match &tcx
- .layout_of(ParamEnv::reveal_all().and(ptr_ty))
- .unwrap()
- .abi
- {
++ ty::RawPtr(TypeAndMut { ty: pointee_ty, mutbl: _ }) | ty::Ref(_, pointee_ty, _) => {
+ if has_ptr_meta(tcx, pointee_ty) {
+ (pointer_ty(tcx), pointer_ty(tcx))
+ } else {
+ return None;
+ }
+ }
+ _ => return None,
+ })
+}
+
+/// Is a pointer to this type a fat ptr?
+pub(crate) fn has_ptr_meta<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> bool {
- fx: &mut FunctionCx<'_, '_, impl Module>,
++ let ptr_ty = tcx.mk_ptr(TypeAndMut { ty, mutbl: rustc_hir::Mutability::Not });
++ match &tcx.layout_of(ParamEnv::reveal_all().and(ptr_ty)).unwrap().abi {
+ Abi::Scalar(_) => false,
+ Abi::ScalarPair(_, _) => true,
+ abi => unreachable!("Abi of ptr to {:?} is {:?}???", ty, abi),
+ }
+}
+
+pub(crate) fn codegen_icmp_imm(
- fn resolve_normal_value_imm(func: &Function, val: Value) -> Option<i64> {
- if let ValueDef::Result(inst, 0 /*param*/) = func.dfg.value_def(val) {
- if let InstructionData::UnaryImm {
- opcode: Opcode::Iconst,
- imm,
- } = func.dfg[inst]
- {
- Some(imm.into())
- } else {
- None
- }
- } else {
- None
- }
- }
-
- fn resolve_128bit_value_imm(func: &Function, val: Value) -> Option<u128> {
- let (lsb, msb) = if let ValueDef::Result(inst, 0 /*param*/) = func.dfg.value_def(val) {
- if let InstructionData::Binary {
- opcode: Opcode::Iconcat,
- args: [lsb, msb],
- } = func.dfg[inst]
- {
- (lsb, msb)
- } else {
- return None;
- }
- } else {
- return None;
- };
-
- let lsb = u128::from(resolve_normal_value_imm(func, lsb)? as u64);
- let msb = u128::from(resolve_normal_value_imm(func, msb)? as u64);
-
- Some(msb << 64 | lsb)
- }
-
- pub(crate) fn resolve_value_imm(func: &Function, val: Value) -> Option<u128> {
- if func.dfg.value_type(val) == types::I128 {
- resolve_128bit_value_imm(func, val)
- } else {
- resolve_normal_value_imm(func, val).map(|imm| u128::from(imm as u64))
- }
- }
-
++ fx: &mut FunctionCx<'_, '_, '_>,
+ intcc: IntCC,
+ lhs: Value,
+ rhs: i128,
+) -> Value {
+ let lhs_ty = fx.bcx.func.dfg.value_type(lhs);
+ if lhs_ty == types::I128 {
+ // FIXME legalize `icmp_imm.i128` in Cranelift
+
+ let (lhs_lsb, lhs_msb) = fx.bcx.ins().isplit(lhs);
+ let (rhs_lsb, rhs_msb) = (rhs as u128 as u64 as i64, (rhs as u128 >> 64) as u64 as i64);
+
+ match intcc {
+ IntCC::Equal => {
+ let lsb_eq = fx.bcx.ins().icmp_imm(IntCC::Equal, lhs_lsb, rhs_lsb);
+ let msb_eq = fx.bcx.ins().icmp_imm(IntCC::Equal, lhs_msb, rhs_msb);
+ fx.bcx.ins().band(lsb_eq, msb_eq)
+ }
+ IntCC::NotEqual => {
+ let lsb_ne = fx.bcx.ins().icmp_imm(IntCC::NotEqual, lhs_lsb, rhs_lsb);
+ let msb_ne = fx.bcx.ins().icmp_imm(IntCC::NotEqual, lhs_msb, rhs_msb);
+ fx.bcx.ins().bor(lsb_ne, msb_ne)
+ }
+ _ => {
+ // if msb_eq {
+ // lsb_cc
+ // } else {
+ // msb_cc
+ // }
+
+ let msb_eq = fx.bcx.ins().icmp_imm(IntCC::Equal, lhs_msb, rhs_msb);
+ let lsb_cc = fx.bcx.ins().icmp_imm(intcc, lhs_lsb, rhs_lsb);
+ let msb_cc = fx.bcx.ins().icmp_imm(intcc, lhs_msb, rhs_msb);
+
+ fx.bcx.ins().select(msb_eq, lsb_cc, msb_cc)
+ }
+ }
+ } else {
+ let rhs = i64::try_from(rhs).expect("codegen_icmp_imm rhs out of range for <128bit int");
+ fx.bcx.ins().icmp_imm(intcc, lhs, rhs)
+ }
+}
+
- pub(crate) struct FunctionCx<'clif, 'tcx, M: Module> {
- pub(crate) cx: &'clif mut crate::CodegenCx<'tcx, M>,
+pub(crate) fn type_min_max_value(
+ bcx: &mut FunctionBuilder<'_>,
+ ty: Type,
+ signed: bool,
+) -> (Value, Value) {
+ assert!(ty.is_int());
+
+ if ty == types::I128 {
+ if signed {
+ let min = i128::MIN as u128;
+ let min_lsb = bcx.ins().iconst(types::I64, min as u64 as i64);
+ let min_msb = bcx.ins().iconst(types::I64, (min >> 64) as u64 as i64);
+ let min = bcx.ins().iconcat(min_lsb, min_msb);
+
+ let max = i128::MAX as u128;
+ let max_lsb = bcx.ins().iconst(types::I64, max as u64 as i64);
+ let max_msb = bcx.ins().iconst(types::I64, (max >> 64) as u64 as i64);
+ let max = bcx.ins().iconcat(max_lsb, max_msb);
+
+ return (min, max);
+ } else {
+ let min_half = bcx.ins().iconst(types::I64, 0);
+ let min = bcx.ins().iconcat(min_half, min_half);
+
+ let max_half = bcx.ins().iconst(types::I64, u64::MAX as i64);
+ let max = bcx.ins().iconcat(max_half, max_half);
+
+ return (min, max);
+ }
+ }
+
+ let min = match (ty, signed) {
+ (types::I8, false) | (types::I16, false) | (types::I32, false) | (types::I64, false) => {
+ 0i64
+ }
+ (types::I8, true) => i64::from(i8::MIN),
+ (types::I16, true) => i64::from(i16::MIN),
+ (types::I32, true) => i64::from(i32::MIN),
+ (types::I64, true) => i64::MIN,
+ _ => unreachable!(),
+ };
+
+ let max = match (ty, signed) {
+ (types::I8, false) => i64::from(u8::MAX),
+ (types::I16, false) => i64::from(u16::MAX),
+ (types::I32, false) => i64::from(u32::MAX),
+ (types::I64, false) => u64::MAX as i64,
+ (types::I8, true) => i64::from(i8::MAX),
+ (types::I16, true) => i64::from(i16::MAX),
+ (types::I32, true) => i64::from(i32::MAX),
+ (types::I64, true) => i64::MAX,
+ _ => unreachable!(),
+ };
+
+ let (min, max) = (bcx.ins().iconst(ty, min), bcx.ins().iconst(ty, max));
+
+ (min, max)
+}
+
+pub(crate) fn type_sign(ty: Ty<'_>) -> bool {
+ match ty.kind() {
+ ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..) | ty::Char | ty::Uint(..) | ty::Bool => false,
+ ty::Int(..) => true,
+ ty::Float(..) => false, // `signed` is unused for floats
+ _ => panic!("{}", ty),
+ }
+}
+
- impl<'tcx, M: Module> LayoutOf for FunctionCx<'_, 'tcx, M> {
++pub(crate) struct FunctionCx<'m, 'clif, 'tcx> {
++ pub(crate) cx: &'clif mut crate::CodegenCx<'m, 'tcx>,
+ pub(crate) tcx: TyCtxt<'tcx>,
+ pub(crate) pointer_type: Type, // Cached from module
+
+ pub(crate) instance: Instance<'tcx>,
+ pub(crate) mir: &'tcx Body<'tcx>,
+ pub(crate) fn_abi: Option<FnAbi<'tcx, Ty<'tcx>>>,
+
+ pub(crate) bcx: FunctionBuilder<'clif>,
+ pub(crate) block_map: IndexVec<BasicBlock, Block>,
+ pub(crate) local_map: IndexVec<Local, CPlace<'tcx>>,
+
+ /// When `#[track_caller]` is used, the implicit caller location is stored in this variable.
+ pub(crate) caller_location: Option<CValue<'tcx>>,
+
+ /// See [`crate::optimize::code_layout`] for more information.
+ pub(crate) cold_blocks: EntitySet<Block>,
+
+ pub(crate) clif_comments: crate::pretty_clif::CommentWriter,
+ pub(crate) source_info_set: indexmap::IndexSet<SourceInfo>,
+
+ /// This should only be accessed by `CPlace::new_var`.
+ pub(crate) next_ssa_var: u32,
+
+ pub(crate) inline_asm_index: u32,
+}
+
- impl<'tcx, M: Module> layout::HasTyCtxt<'tcx> for FunctionCx<'_, 'tcx, M> {
++impl<'tcx> LayoutOf for FunctionCx<'_, '_, 'tcx> {
+ type Ty = Ty<'tcx>;
+ type TyAndLayout = TyAndLayout<'tcx>;
+
+ fn layout_of(&self, ty: Ty<'tcx>) -> TyAndLayout<'tcx> {
+ RevealAllLayoutCx(self.tcx).layout_of(ty)
+ }
+}
+
- impl<'tcx, M: Module> rustc_target::abi::HasDataLayout for FunctionCx<'_, 'tcx, M> {
++impl<'tcx> layout::HasTyCtxt<'tcx> for FunctionCx<'_, '_, 'tcx> {
+ fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+}
+
- impl<'tcx, M: Module> layout::HasParamEnv<'tcx> for FunctionCx<'_, 'tcx, M> {
++impl<'tcx> rustc_target::abi::HasDataLayout for FunctionCx<'_, '_, 'tcx> {
+ fn data_layout(&self) -> &rustc_target::abi::TargetDataLayout {
+ &self.tcx.data_layout
+ }
+}
+
- impl<'tcx, M: Module> HasTargetSpec for FunctionCx<'_, 'tcx, M> {
++impl<'tcx> layout::HasParamEnv<'tcx> for FunctionCx<'_, '_, 'tcx> {
+ fn param_env(&self) -> ParamEnv<'tcx> {
+ ParamEnv::reveal_all()
+ }
+}
+
- impl<'tcx, M: Module> FunctionCx<'_, 'tcx, M> {
++impl<'tcx> HasTargetSpec for FunctionCx<'_, '_, 'tcx> {
+ fn target_spec(&self) -> &Target {
+ &self.tcx.sess.target
+ }
+}
+
- .declare_data(
- &format!("__{}_{:08x}", prefix, msg_hash),
- Linkage::Local,
- false,
- false,
- )
++impl<'tcx> FunctionCx<'_, '_, 'tcx> {
+ pub(crate) fn monomorphize<T>(&self, value: T) -> T
+ where
+ T: TypeFoldable<'tcx> + Copy,
+ {
+ self.instance.subst_mir_and_normalize_erasing_regions(
+ self.tcx,
+ ty::ParamEnv::reveal_all(),
+ value,
+ )
+ }
+
+ pub(crate) fn clif_type(&self, ty: Ty<'tcx>) -> Option<Type> {
+ clif_type_from_ty(self.tcx, ty)
+ }
+
+ pub(crate) fn clif_pair_type(&self, ty: Ty<'tcx>) -> Option<(Type, Type)> {
+ clif_pair_type_from_ty(self.tcx, ty)
+ }
+
+ pub(crate) fn get_block(&self, bb: BasicBlock) -> Block {
+ *self.block_map.get(bb).unwrap()
+ }
+
+ pub(crate) fn get_local_place(&mut self, local: Local) -> CPlace<'tcx> {
+ *self.local_map.get(local).unwrap_or_else(|| {
+ panic!("Local {:?} doesn't exist", local);
+ })
+ }
+
+ pub(crate) fn set_debug_loc(&mut self, source_info: mir::SourceInfo) {
+ let (index, _) = self.source_info_set.insert_full(source_info);
+ self.bcx.set_srcloc(SourceLoc::new(index as u32));
+ }
+
+ pub(crate) fn get_caller_location(&mut self, span: Span) -> CValue<'tcx> {
+ if let Some(loc) = self.caller_location {
+ // `#[track_caller]` is used; return caller location instead of current location.
+ return loc;
+ }
+
+ let topmost = span.ctxt().outer_expn().expansion_cause().unwrap_or(span);
+ let caller = self.tcx.sess.source_map().lookup_char_pos(topmost.lo());
+ let const_loc = self.tcx.const_caller_location((
+ rustc_span::symbol::Symbol::intern(&caller.file.name.to_string()),
+ caller.line as u32,
+ caller.col_display as u32 + 1,
+ ));
+ crate::constant::codegen_const_value(self, const_loc, self.tcx.caller_location_ty())
+ }
+
+ pub(crate) fn triple(&self) -> &target_lexicon::Triple {
+ self.cx.module.isa().triple()
+ }
+
+ pub(crate) fn anonymous_str(&mut self, prefix: &str, msg: &str) -> Value {
+ use std::collections::hash_map::DefaultHasher;
+ use std::hash::{Hash, Hasher};
+
+ let mut hasher = DefaultHasher::new();
+ msg.hash(&mut hasher);
+ let msg_hash = hasher.finish();
+ let mut data_ctx = DataContext::new();
+ data_ctx.define(msg.as_bytes().to_vec().into_boxed_slice());
+ let msg_id = self
+ .cx
+ .module
- self.0
- .layout_of(ParamEnv::reveal_all().and(&ty))
- .unwrap_or_else(|e| {
- if let layout::LayoutError::SizeOverflow(_) = e {
- self.0.sess.fatal(&e.to_string())
- } else {
- bug!("failed to get layout for `{}`: {}", ty, e)
- }
- })
++ .declare_data(&format!("__{}_{:08x}", prefix, msg_hash), Linkage::Local, false, false)
+ .unwrap();
+
+ // Ignore DuplicateDefinition error, as the data will be the same
+ let _ = self.cx.module.define_data(msg_id, &data_ctx);
+
+ let local_msg_id = self.cx.module.declare_data_in_func(msg_id, self.bcx.func);
+ #[cfg(debug_assertions)]
+ {
+ self.add_comment(local_msg_id, msg);
+ }
+ self.bcx.ins().global_value(self.pointer_type, local_msg_id)
+ }
+}
+
+pub(crate) struct RevealAllLayoutCx<'tcx>(pub(crate) TyCtxt<'tcx>);
+
+impl<'tcx> LayoutOf for RevealAllLayoutCx<'tcx> {
+ type Ty = Ty<'tcx>;
+ type TyAndLayout = TyAndLayout<'tcx>;
+
+ fn layout_of(&self, ty: Ty<'tcx>) -> TyAndLayout<'tcx> {
+ assert!(!ty.still_further_specializable());
++ self.0.layout_of(ParamEnv::reveal_all().and(&ty)).unwrap_or_else(|e| {
++ if let layout::LayoutError::SizeOverflow(_) = e {
++ self.0.sess.fatal(&e.to_string())
++ } else {
++ bug!("failed to get layout for `{}`: {}", ty, e)
++ }
++ })
+ }
+}
+
+impl<'tcx> layout::HasTyCtxt<'tcx> for RevealAllLayoutCx<'tcx> {
+ fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
+ self.0
+ }
+}
+
+impl<'tcx> rustc_target::abi::HasDataLayout for RevealAllLayoutCx<'tcx> {
+ fn data_layout(&self) -> &rustc_target::abi::TargetDataLayout {
+ &self.0.data_layout
+ }
+}
+
+impl<'tcx> layout::HasParamEnv<'tcx> for RevealAllLayoutCx<'tcx> {
+ fn param_env(&self) -> ParamEnv<'tcx> {
+ ParamEnv::reveal_all()
+ }
+}
+
+impl<'tcx> HasTargetSpec for RevealAllLayoutCx<'tcx> {
+ fn target_spec(&self) -> &Target {
+ &self.0.sess.target
+ }
+}
--- /dev/null
- pub(crate) fn finalize(mut self, tcx: TyCtxt<'_>, module: &mut impl Module) {
+//! Handling of `static`s, `const`s and promoted allocations
+
+use rustc_span::DUMMY_SP;
+
+use rustc_data_structures::fx::FxHashSet;
+use rustc_errors::ErrorReported;
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
+use rustc_middle::mir::interpret::{
+ read_target_uint, AllocId, Allocation, ConstValue, ErrorHandled, GlobalAlloc, Pointer, Scalar,
+};
+use rustc_middle::ty::{Const, ConstKind};
+
+use cranelift_codegen::ir::GlobalValueData;
+use cranelift_module::*;
+
+use crate::prelude::*;
+
+#[derive(Default)]
+pub(crate) struct ConstantCx {
+ todo: Vec<TodoItem>,
+ done: FxHashSet<DataId>,
+}
+
+#[derive(Copy, Clone, Debug)]
+enum TodoItem {
+ Alloc(AllocId),
+ Static(DefId),
+}
+
+impl ConstantCx {
- pub(crate) fn check_constants(fx: &mut FunctionCx<'_, '_, impl Module>) {
++ pub(crate) fn finalize(mut self, tcx: TyCtxt<'_>, module: &mut dyn Module) {
+ //println!("todo {:?}", self.todo);
+ define_all_allocs(tcx, module, &mut self);
+ //println!("done {:?}", self.done);
+ self.done.clear();
+ }
+}
+
- fx.tcx
- .const_eval_resolve(ParamEnv::reveal_all(), def, substs, promoted, None)
++pub(crate) fn check_constants(fx: &mut FunctionCx<'_, '_, '_>) -> bool {
++ let mut all_constants_ok = true;
+ for constant in &fx.mir.required_consts {
+ let const_ = fx.monomorphize(constant.literal);
+ match const_.val {
+ ConstKind::Value(_) => {}
+ ConstKind::Unevaluated(def, ref substs, promoted) => {
+ if let Err(err) =
- fx.tcx
- .sess
- .span_err(constant.span, "erroneous constant encountered");
++ fx.tcx.const_eval_resolve(ParamEnv::reveal_all(), def, substs, promoted, None)
+ {
++ all_constants_ok = false;
+ match err {
+ ErrorHandled::Reported(ErrorReported) | ErrorHandled::Linted => {
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
++ fx.tcx.sess.span_err(constant.span, "erroneous constant encountered");
+ }
+ ErrorHandled::TooGeneric => {
+ span_bug!(
+ constant.span,
+ "codgen encountered polymorphic constant: {:?}",
+ err
+ );
+ }
+ }
+ }
+ }
+ ConstKind::Param(_)
+ | ConstKind::Infer(_)
+ | ConstKind::Bound(_, _)
+ | ConstKind::Placeholder(_)
+ | ConstKind::Error(_) => unreachable!("{:?}", const_),
+ }
+ }
++ all_constants_ok
+}
+
+pub(crate) fn codegen_static(constants_cx: &mut ConstantCx, def_id: DefId) {
+ constants_cx.todo.push(TodoItem::Static(def_id));
+}
+
+pub(crate) fn codegen_tls_ref<'tcx>(
- let data_id = data_id_for_static(fx.tcx, &mut fx.cx.module, def_id, false);
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ def_id: DefId,
+ layout: TyAndLayout<'tcx>,
+) -> CValue<'tcx> {
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
++ let data_id = data_id_for_static(fx.tcx, fx.cx.module, def_id, false);
+ let local_data_id = fx.cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
+ #[cfg(debug_assertions)]
+ fx.add_comment(local_data_id, format!("tls {:?}", def_id));
+ let tls_ptr = fx.bcx.ins().tls_value(fx.pointer_type, local_data_id);
+ CValue::by_val(tls_ptr, layout)
+}
+
+fn codegen_static_ref<'tcx>(
- let data_id = data_id_for_static(fx.tcx, &mut fx.cx.module, def_id, false);
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ def_id: DefId,
+ layout: TyAndLayout<'tcx>,
+) -> CPlace<'tcx> {
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
++ let data_id = data_id_for_static(fx.tcx, fx.cx.module, def_id, false);
+ let local_data_id = fx.cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
+ #[cfg(debug_assertions)]
+ fx.add_comment(local_data_id, format!("{:?}", def_id));
+ let global_ptr = fx.bcx.ins().global_value(fx.pointer_type, local_data_id);
+ assert!(!layout.is_unsized(), "unsized statics aren't supported");
+ assert!(
+ matches!(
+ fx.bcx.func.global_values[local_data_id],
+ GlobalValueData::Symbol { tls: false, .. }
+ ),
+ "tls static referenced without Rvalue::ThreadLocalRef"
+ );
+ CPlace::for_ptr(crate::pointer::Pointer::new(global_ptr), layout)
+}
+
+pub(crate) fn codegen_constant<'tcx>(
- match fx
- .tcx
- .const_eval_resolve(ParamEnv::reveal_all(), def, substs, promoted, None)
- {
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ constant: &Constant<'tcx>,
+) -> CValue<'tcx> {
+ let const_ = fx.monomorphize(constant.literal);
+ let const_val = match const_.val {
+ ConstKind::Value(const_val) => const_val,
+ ConstKind::Unevaluated(def, ref substs, promoted) if fx.tcx.is_static(def.did) => {
+ assert!(substs.is_empty());
+ assert!(promoted.is_none());
+
+ return codegen_static_ref(
+ fx,
+ def.did,
+ fx.layout_of(fx.monomorphize(&constant.literal.ty)),
+ )
+ .to_cvalue(fx);
+ }
+ ConstKind::Unevaluated(def, ref substs, promoted) => {
- fx.tcx
- .sess
- .span_err(constant.span, "erroneous constant encountered");
- return crate::trap::trap_unreachable_ret_value(
- fx,
- fx.layout_of(const_.ty),
- "erroneous constant encountered",
- );
++ match fx.tcx.const_eval_resolve(ParamEnv::reveal_all(), def, substs, promoted, None) {
+ Ok(const_val) => const_val,
+ Err(_) => {
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
++ span_bug!(constant.span, "erroneous constant not captured by required_consts");
+ }
+ }
+ }
+ ConstKind::Param(_)
+ | ConstKind::Infer(_)
+ | ConstKind::Bound(_, _)
+ | ConstKind::Placeholder(_)
+ | ConstKind::Error(_) => unreachable!("{:?}", const_),
+ };
+
+ codegen_const_value(fx, const_val, const_.ty)
+}
+
+pub(crate) fn codegen_const_value<'tcx>(
- std::iter::repeat(0)
- .take(size.bytes_usize())
- .collect::<Vec<u8>>(),
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ const_val: ConstValue<'tcx>,
+ ty: Ty<'tcx>,
+) -> CValue<'tcx> {
+ let layout = fx.layout_of(ty);
+ assert!(!layout.is_unsized(), "sized const value");
+
+ if layout.is_zst() {
+ return CValue::by_ref(crate::Pointer::dangling(layout.align.pref), layout);
+ }
+
+ match const_val {
+ ConstValue::Scalar(x) => {
+ if fx.clif_type(layout.ty).is_none() {
+ let (size, align) = (layout.size, layout.align.pref);
+ let mut alloc = Allocation::from_bytes(
- let data_id = data_id_for_alloc_id(
- &mut fx.cx.module,
- ptr.alloc_id,
- alloc.mutability,
- );
++ std::iter::repeat(0).take(size.bytes_usize()).collect::<Vec<u8>>(),
+ align,
+ );
+ let ptr = Pointer::new(AllocId(!0), Size::ZERO); // The alloc id is never used
+ alloc.write_scalar(fx, ptr, x.into(), size).unwrap();
+ let alloc = fx.tcx.intern_const_alloc(alloc);
+ return CValue::by_ref(pointer_for_allocation(fx, alloc), layout);
+ }
+
+ match x {
+ Scalar::Int(int) => CValue::const_val(fx, layout, int),
+ Scalar::Ptr(ptr) => {
+ let alloc_kind = fx.tcx.get_global_alloc(ptr.alloc_id);
+ let base_addr = match alloc_kind {
+ Some(GlobalAlloc::Memory(alloc)) => {
+ fx.cx.constants_cx.todo.push(TodoItem::Alloc(ptr.alloc_id));
- crate::abi::import_function(fx.tcx, &mut fx.cx.module, instance);
++ let data_id =
++ data_id_for_alloc_id(fx.cx.module, ptr.alloc_id, alloc.mutability);
+ let local_data_id =
+ fx.cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
+ #[cfg(debug_assertions)]
+ fx.add_comment(local_data_id, format!("{:?}", ptr.alloc_id));
+ fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
+ }
+ Some(GlobalAlloc::Function(instance)) => {
+ let func_id =
- let data_id =
- data_id_for_static(fx.tcx, &mut fx.cx.module, def_id, false);
++ crate::abi::import_function(fx.tcx, fx.cx.module, instance);
+ let local_func_id =
+ fx.cx.module.declare_func_in_func(func_id, &mut fx.bcx.func);
+ fx.bcx.ins().func_addr(fx.pointer_type, local_func_id)
+ }
+ Some(GlobalAlloc::Static(def_id)) => {
+ assert!(fx.tcx.is_static(def_id));
- fx.bcx
- .ins()
- .iadd_imm(base_addr, i64::try_from(ptr.offset.bytes()).unwrap())
++ let data_id = data_id_for_static(fx.tcx, fx.cx.module, def_id, false);
+ let local_data_id =
+ fx.cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
+ #[cfg(debug_assertions)]
+ fx.add_comment(local_data_id, format!("{:?}", def_id));
+ fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
+ }
+ None => bug!("missing allocation {:?}", ptr.alloc_id),
+ };
+ let val = if ptr.offset.bytes() != 0 {
- let len = fx.bcx.ins().iconst(
- fx.pointer_type,
- i64::try_from(end.checked_sub(start).unwrap()).unwrap(),
- );
++ fx.bcx.ins().iadd_imm(base_addr, i64::try_from(ptr.offset.bytes()).unwrap())
+ } else {
+ base_addr
+ };
+ CValue::by_val(val, layout)
+ }
+ }
+ }
+ ConstValue::ByRef { alloc, offset } => CValue::by_ref(
+ pointer_for_allocation(fx, alloc)
+ .offset_i64(fx, i64::try_from(offset.bytes()).unwrap()),
+ layout,
+ ),
+ ConstValue::Slice { data, start, end } => {
+ let ptr = pointer_for_allocation(fx, data)
+ .offset_i64(fx, i64::try_from(start).unwrap())
+ .get_addr(fx);
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
++ let len = fx
++ .bcx
++ .ins()
++ .iconst(fx.pointer_type, i64::try_from(end.checked_sub(start).unwrap()).unwrap());
+ CValue::by_val_pair(ptr, len, layout)
+ }
+ }
+}
+
+fn pointer_for_allocation<'tcx>(
- let data_id = data_id_for_alloc_id(&mut fx.cx.module, alloc_id, alloc.mutability);
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ alloc: &'tcx Allocation,
+) -> crate::pointer::Pointer {
+ let alloc_id = fx.tcx.create_memory_alloc(alloc);
+ fx.cx.constants_cx.todo.push(TodoItem::Alloc(alloc_id));
- module: &mut impl Module,
++ let data_id = data_id_for_alloc_id(fx.cx.module, alloc_id, alloc.mutability);
+
+ let local_data_id = fx.cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
+ #[cfg(debug_assertions)]
+ fx.add_comment(local_data_id, format!("{:?}", alloc_id));
+ let global_ptr = fx.bcx.ins().global_value(fx.pointer_type, local_data_id);
+ crate::pointer::Pointer::new(global_ptr)
+}
+
+fn data_id_for_alloc_id(
- module: &mut impl Module,
++ module: &mut dyn Module,
+ alloc_id: AllocId,
+ mutability: rustc_hir::Mutability,
+) -> DataId {
+ module
+ .declare_data(
+ &format!(".L__alloc_{:x}", alloc_id.0),
+ Linkage::Local,
+ mutability == rustc_hir::Mutability::Mut,
+ false,
+ )
+ .unwrap()
+}
+
+fn data_id_for_static(
+ tcx: TyCtxt<'_>,
- let align = tcx
- .layout_of(ParamEnv::reveal_all().and(ty))
- .unwrap()
- .align
- .pref
- .bytes();
++ module: &mut dyn Module,
+ def_id: DefId,
+ definition: bool,
+) -> DataId {
+ let rlinkage = tcx.codegen_fn_attrs(def_id).linkage;
+ let linkage = if definition {
+ crate::linkage::get_static_linkage(tcx, def_id)
+ } else if rlinkage == Some(rustc_middle::mir::mono::Linkage::ExternalWeak)
+ || rlinkage == Some(rustc_middle::mir::mono::Linkage::WeakAny)
+ {
+ Linkage::Preemptible
+ } else {
+ Linkage::Import
+ };
+
+ let instance = Instance::mono(tcx, def_id).polymorphize(tcx);
+ let symbol_name = tcx.symbol_name(instance).name;
+ let ty = instance.ty(tcx, ParamEnv::reveal_all());
+ let is_mutable = if tcx.is_mutable_static(def_id) {
+ true
+ } else {
+ !ty.is_freeze(tcx.at(DUMMY_SP), ParamEnv::reveal_all())
+ };
- let ref_data_id = module
- .declare_data(&ref_name, Linkage::Local, false, false)
- .unwrap();
++ let align = tcx.layout_of(ParamEnv::reveal_all().and(ty)).unwrap().align.pref.bytes();
+
+ let attrs = tcx.codegen_fn_attrs(def_id);
+
+ let data_id = module
+ .declare_data(
+ &*symbol_name,
+ linkage,
+ is_mutable,
+ attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL),
+ )
+ .unwrap();
+
+ if rlinkage.is_some() {
+ // Comment copied from https://github.com/rust-lang/rust/blob/45060c2a66dfd667f88bd8b94261b28a58d85bd5/src/librustc_codegen_llvm/consts.rs#L141
+ // Declare an internal global `extern_with_linkage_foo` which
+ // is initialized with the address of `foo`. If `foo` is
+ // discarded during linking (for example, if `foo` has weak
+ // linkage and there are no definitions), then
+ // `extern_with_linkage_foo` will instead be initialized to
+ // zero.
+
+ let ref_name = format!("_rust_extern_with_linkage_{}", symbol_name);
- data_ctx.define(
- std::iter::repeat(0)
- .take(pointer_ty(tcx).bytes() as usize)
- .collect(),
- );
++ let ref_data_id = module.declare_data(&ref_name, Linkage::Local, false, false).unwrap();
+ let mut data_ctx = DataContext::new();
+ data_ctx.set_align(align);
+ let data = module.declare_data_in_data(data_id, &mut data_ctx);
- fn define_all_allocs(tcx: TyCtxt<'_>, module: &mut impl Module, cx: &mut ConstantCx) {
++ data_ctx.define(std::iter::repeat(0).take(pointer_ty(tcx).bytes() as usize).collect());
+ data_ctx.write_data_addr(0, data, 0);
+ match module.define_data(ref_data_id, &data_ctx) {
+ // Every time the static is referenced there will be another definition of this global,
+ // so duplicate definitions are expected and allowed.
+ Err(ModuleError::DuplicateDefinition(_)) => {}
+ res => res.unwrap(),
+ }
+ ref_data_id
+ } else {
+ data_id
+ }
+}
+
- let section_name = tcx
- .codegen_fn_attrs(def_id)
- .link_section
- .map(|s| s.as_str());
++fn define_all_allocs(tcx: TyCtxt<'_>, module: &mut dyn Module, cx: &mut ConstantCx) {
+ while let Some(todo_item) = cx.todo.pop() {
+ let (data_id, alloc, section_name) = match todo_item {
+ TodoItem::Alloc(alloc_id) => {
+ //println!("alloc_id {}", alloc_id);
+ let alloc = match tcx.get_global_alloc(alloc_id).unwrap() {
+ GlobalAlloc::Memory(alloc) => alloc,
+ GlobalAlloc::Function(_) | GlobalAlloc::Static(_) => unreachable!(),
+ };
+ let data_id = data_id_for_alloc_id(module, alloc_id, alloc.mutability);
+ (data_id, alloc, None)
+ }
+ TodoItem::Static(def_id) => {
+ //println!("static {:?}", def_id);
+
- let bytes = alloc
- .inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len())
- .to_vec();
++ let section_name = tcx.codegen_fn_attrs(def_id).link_section.map(|s| s.as_str());
+
+ let alloc = tcx.eval_static_initializer(def_id).unwrap();
+
+ let data_id = data_id_for_static(tcx, module, def_id, true);
+ (data_id, alloc, section_name)
+ }
+ };
+
+ //("data_id {}", data_id);
+ if cx.done.contains(&data_id) {
+ continue;
+ }
+
+ let mut data_ctx = DataContext::new();
+ data_ctx.set_align(alloc.align.bytes());
+
+ if let Some(section_name) = section_name {
+ // FIXME set correct segment for Mach-O files
+ data_ctx.set_segment_section("", &*section_name);
+ }
+
- if tcx
- .codegen_fn_attrs(def_id)
- .flags
- .contains(CodegenFnAttrFlags::THREAD_LOCAL)
++ let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len()).to_vec();
+ data_ctx.define(bytes.into_boxed_slice());
+
+ for &(offset, (_tag, reloc)) in alloc.relocations().iter() {
+ let addend = {
+ let endianness = tcx.data_layout.endian;
+ let offset = offset.bytes() as usize;
+ let ptr_size = tcx.data_layout.pointer_size;
+ let bytes = &alloc.inspect_with_uninit_and_ptr_outside_interpreter(
+ offset..offset + ptr_size.bytes() as usize,
+ );
+ read_target_uint(endianness, bytes).unwrap()
+ };
+
+ let reloc_target_alloc = tcx.get_global_alloc(reloc).unwrap();
+ let data_id = match reloc_target_alloc {
+ GlobalAlloc::Function(instance) => {
+ assert_eq!(addend, 0);
+ let func_id = crate::abi::import_function(tcx, module, instance);
+ let local_func_id = module.declare_func_in_data(func_id, &mut data_ctx);
+ data_ctx.write_function_addr(offset.bytes() as u32, local_func_id);
+ continue;
+ }
+ GlobalAlloc::Memory(target_alloc) => {
+ cx.todo.push(TodoItem::Alloc(reloc));
+ data_id_for_alloc_id(module, reloc, target_alloc.mutability)
+ }
+ GlobalAlloc::Static(def_id) => {
- fx: &FunctionCx<'_, 'tcx, impl Module>,
++ if tcx.codegen_fn_attrs(def_id).flags.contains(CodegenFnAttrFlags::THREAD_LOCAL)
+ {
+ tcx.sess.fatal(&format!(
+ "Allocation {:?} contains reference to TLS value {:?}",
+ alloc, def_id
+ ));
+ }
+
+ // Don't push a `TodoItem::Static` here, as it will cause statics used by
+ // multiple crates to be duplicated between them. It isn't necessary anyway,
+ // as it will get pushed by `codegen_static` when necessary.
+ data_id_for_static(tcx, module, def_id, false)
+ }
+ };
+
+ let global_value = module.declare_data_in_data(data_id, &mut data_ctx);
+ data_ctx.write_data_addr(offset.bytes() as u32, global_value, addend as i64);
+ }
+
+ // FIXME don't duplicate definitions in lazy jit mode
+ let _ = module.define_data(data_id, &data_ctx);
+ cx.done.insert(data_id);
+ }
+
+ assert!(cx.todo.is_empty(), "{:?}", cx.todo);
+}
+
+pub(crate) fn mir_operand_get_const_val<'tcx>(
- Operand::Constant(const_) => Some(
- fx.monomorphize(const_.literal)
- .eval(fx.tcx, ParamEnv::reveal_all()),
- ),
++ fx: &FunctionCx<'_, '_, 'tcx>,
+ operand: &Operand<'tcx>,
+) -> Option<&'tcx Const<'tcx>> {
+ match operand {
+ Operand::Copy(_) | Operand::Move(_) => None,
++ Operand::Constant(const_) => {
++ Some(fx.monomorphize(const_.literal).eval(fx.tcx, ParamEnv::reveal_all()))
++ }
+ }
+}
--- /dev/null
- root.set(
- gimli::DW_AT_ranges,
- AttributeValue::RangeListRef(unit_range_list_id),
- );
+//! Write the debuginfo into an object file.
+
+use rustc_data_structures::fx::FxHashMap;
+
+use gimli::write::{Address, AttributeValue, EndianVec, Result, Sections, Writer};
+use gimli::{RunTimeEndian, SectionId};
+
+use crate::backend::WriteDebugInfo;
+
+use super::DebugContext;
+
+impl DebugContext<'_> {
+ pub(crate) fn emit<P: WriteDebugInfo>(&mut self, product: &mut P) {
+ let unit_range_list_id = self.dwarf.unit.ranges.add(self.unit_range_list.clone());
+ let root = self.dwarf.unit.root();
+ let root = self.dwarf.unit.get_mut(root);
- WriterRelocate {
- relocs: Vec::new(),
- writer: EndianVec::new(endian),
- }
++ root.set(gimli::DW_AT_ranges, AttributeValue::RangeListRef(unit_range_list_id));
+
+ let mut sections = Sections::new(WriterRelocate::new(self.endian));
+ self.dwarf.write(&mut sections).unwrap();
+
+ let mut section_map = FxHashMap::default();
+ let _: Result<()> = sections.for_each_mut(|id, section| {
+ if !section.writer.slice().is_empty() {
+ let section_id = product.add_debug_section(id, section.writer.take());
+ section_map.insert(id, section_id);
+ }
+ Ok(())
+ });
+
+ let _: Result<()> = sections.for_each(|id, section| {
+ if let Some(section_id) = section_map.get(&id) {
+ for reloc in §ion.relocs {
+ product.add_debug_reloc(§ion_map, section_id, reloc);
+ }
+ }
+ Ok(())
+ });
+ }
+}
+
+#[derive(Clone)]
+pub(crate) struct DebugReloc {
+ pub(crate) offset: u32,
+ pub(crate) size: u8,
+ pub(crate) name: DebugRelocName,
+ pub(crate) addend: i64,
+ pub(crate) kind: object::RelocationKind,
+}
+
+#[derive(Clone)]
+pub(crate) enum DebugRelocName {
+ Section(SectionId),
+ Symbol(usize),
+}
+
+/// A [`Writer`] that collects all necessary relocations.
+#[derive(Clone)]
+pub(super) struct WriterRelocate {
+ pub(super) relocs: Vec<DebugReloc>,
+ pub(super) writer: EndianVec<RunTimeEndian>,
+}
+
+impl WriterRelocate {
+ pub(super) fn new(endian: RunTimeEndian) -> Self {
- self.writer
- .write_udata_at(reloc.offset as usize, val, reloc.size)
- .unwrap();
++ WriterRelocate { relocs: Vec::new(), writer: EndianVec::new(endian) }
+ }
+
+ /// Perform the collected relocations to be usable for JIT usage.
+ #[cfg(feature = "jit")]
+ pub(super) fn relocate_for_jit(mut self, jit_module: &cranelift_jit::JITModule) -> Vec<u8> {
+ use std::convert::TryInto;
+
+ for reloc in self.relocs.drain(..) {
+ match reloc.name {
+ super::DebugRelocName::Section(_) => unreachable!(),
+ super::DebugRelocName::Symbol(sym) => {
+ let addr = jit_module.get_finalized_function(
+ cranelift_module::FuncId::from_u32(sym.try_into().unwrap()),
+ );
+ let val = (addr as u64 as i64 + reloc.addend) as u64;
++ self.writer.write_udata_at(reloc.offset as usize, val, reloc.size).unwrap();
+ }
+ }
+ }
+ self.writer.into_vec()
+ }
+}
+
+impl Writer for WriterRelocate {
+ type Endian = RunTimeEndian;
+
+ fn endian(&self) -> Self::Endian {
+ self.writer.endian()
+ }
+
+ fn len(&self) -> usize {
+ self.writer.len()
+ }
+
+ fn write(&mut self, bytes: &[u8]) -> Result<()> {
+ self.writer.write(bytes)
+ }
+
+ fn write_at(&mut self, offset: usize, bytes: &[u8]) -> Result<()> {
+ self.writer.write_at(offset, bytes)
+ }
+
+ fn write_address(&mut self, address: Address, size: u8) -> Result<()> {
+ match address {
+ Address::Constant(val) => self.write_udata(val, size),
+ Address::Symbol { symbol, addend } => {
+ let offset = self.len() as u64;
+ self.relocs.push(DebugReloc {
+ offset: offset as u32,
+ size,
+ name: DebugRelocName::Symbol(symbol),
+ addend: addend as i64,
+ kind: object::RelocationKind::Absolute,
+ });
+ self.write_udata(0, size)
+ }
+ }
+ }
+
+ fn write_offset(&mut self, val: usize, section: SectionId, size: u8) -> Result<()> {
+ let offset = self.len() as u32;
+ self.relocs.push(DebugReloc {
+ offset,
+ size,
+ name: DebugRelocName::Section(section),
+ addend: val as i64,
+ kind: object::RelocationKind::Absolute,
+ });
+ self.write_udata(0, size)
+ }
+
+ fn write_offset_at(
+ &mut self,
+ offset: usize,
+ val: usize,
+ section: SectionId,
+ size: u8,
+ ) -> Result<()> {
+ self.relocs.push(DebugReloc {
+ offset: offset as u32,
+ size,
+ name: DebugRelocName::Section(section),
+ addend: val as i64,
+ kind: object::RelocationKind::Absolute,
+ });
+ self.write_udata_at(offset, 0, size)
+ }
+
+ fn write_eh_pointer(&mut self, address: Address, eh_pe: gimli::DwEhPe, size: u8) -> Result<()> {
+ match address {
+ // Address::Constant arm copied from gimli
+ Address::Constant(val) => {
+ // Indirect doesn't matter here.
+ let val = match eh_pe.application() {
+ gimli::DW_EH_PE_absptr => val,
+ gimli::DW_EH_PE_pcrel => {
+ // TODO: better handling of sign
+ let offset = self.len() as u64;
+ offset.wrapping_sub(val)
+ }
+ _ => {
+ return Err(gimli::write::Error::UnsupportedPointerEncoding(eh_pe));
+ }
+ };
+ self.write_eh_pointer_data(val, eh_pe.format(), size)
+ }
+ Address::Symbol { symbol, addend } => match eh_pe.application() {
+ gimli::DW_EH_PE_pcrel => {
+ let size = match eh_pe.format() {
+ gimli::DW_EH_PE_sdata4 => 4,
+ _ => return Err(gimli::write::Error::UnsupportedPointerEncoding(eh_pe)),
+ };
+ self.relocs.push(DebugReloc {
+ offset: self.len() as u32,
+ size,
+ name: DebugRelocName::Symbol(symbol),
+ addend,
+ kind: object::RelocationKind::Relative,
+ });
+ self.write_udata(0, size)
+ }
+ _ => Err(gimli::write::Error::UnsupportedPointerEncoding(eh_pe)),
+ },
+ }
+ }
+}
--- /dev/null
- Some(FileInfo {
- timestamp: 0,
- size: 0,
- md5: buf,
- })
+//! Line info generation (`.debug_line`)
+
+use std::ffi::OsStr;
+use std::path::{Component, Path};
+
+use crate::prelude::*;
+
+use rustc_span::{
+ FileName, Pos, SourceFile, SourceFileAndLine, SourceFileHash, SourceFileHashAlgorithm,
+};
+
+use cranelift_codegen::binemit::CodeOffset;
+use cranelift_codegen::machinst::MachSrcLoc;
+
+use gimli::write::{
+ Address, AttributeValue, FileId, FileInfo, LineProgram, LineString, LineStringTable,
+ UnitEntryId,
+};
+
+// OPTIMIZATION: It is cheaper to do this in one pass than using `.parent()` and `.file_name()`.
+fn split_path_dir_and_file(path: &Path) -> (&Path, &OsStr) {
+ let mut iter = path.components();
+ let file_name = match iter.next_back() {
+ Some(Component::Normal(p)) => p,
+ component => {
+ panic!(
+ "Path component {:?} of path {} is an invalid filename",
+ component,
+ path.display()
+ );
+ }
+ };
+ let parent = iter.as_path();
+ (parent, file_name)
+}
+
+// OPTIMIZATION: Avoid UTF-8 validation on UNIX.
+fn osstr_as_utf8_bytes(path: &OsStr) -> &[u8] {
+ #[cfg(unix)]
+ {
+ use std::os::unix::ffi::OsStrExt;
+ return path.as_bytes();
+ }
+ #[cfg(not(unix))]
+ {
+ return path.to_str().unwrap().as_bytes();
+ }
+}
+
+pub(crate) const MD5_LEN: usize = 16;
+
+pub(crate) fn make_file_info(hash: SourceFileHash) -> Option<FileInfo> {
+ if hash.kind == SourceFileHashAlgorithm::Md5 {
+ let mut buf = [0u8; MD5_LEN];
+ buf.copy_from_slice(hash.hash_bytes());
- entry.set(
- gimli::DW_AT_decl_file,
- AttributeValue::FileIndex(Some(file_id)),
- );
- entry.set(
- gimli::DW_AT_decl_line,
- AttributeValue::Udata(loc.line as u64),
- );
++ Some(FileInfo { timestamp: 0, size: 0, md5: buf })
+ } else {
+ None
+ }
+}
+
+fn line_program_add_file(
+ line_program: &mut LineProgram,
+ line_strings: &mut LineStringTable,
+ file: &SourceFile,
+) -> FileId {
+ match &file.name {
+ FileName::Real(path) => {
+ let (dir_path, file_name) = split_path_dir_and_file(path.stable_name());
+ let dir_name = osstr_as_utf8_bytes(dir_path.as_os_str());
+ let file_name = osstr_as_utf8_bytes(file_name);
+
+ let dir_id = if !dir_name.is_empty() {
+ let dir_name = LineString::new(dir_name, line_program.encoding(), line_strings);
+ line_program.add_directory(dir_name)
+ } else {
+ line_program.default_directory()
+ };
+ let file_name = LineString::new(file_name, line_program.encoding(), line_strings);
+
+ let info = make_file_info(file.src_hash);
+
+ line_program.file_has_md5 &= info.is_some();
+ line_program.add_file(file_name, dir_id, info)
+ }
+ // FIXME give more appropriate file names
+ filename => {
+ let dir_id = line_program.default_directory();
+ let dummy_file_name = LineString::new(
+ filename.to_string().into_bytes(),
+ line_program.encoding(),
+ line_strings,
+ );
+ line_program.add_file(dummy_file_name, dir_id, None)
+ }
+ }
+}
+
+impl<'tcx> DebugContext<'tcx> {
+ pub(super) fn emit_location(&mut self, entry_id: UnitEntryId, span: Span) {
+ let loc = self.tcx.sess.source_map().lookup_char_pos(span.lo());
+
+ let file_id = line_program_add_file(
+ &mut self.dwarf.unit.line_program,
+ &mut self.dwarf.line_strings,
+ &loc.file,
+ );
+
+ let entry = self.dwarf.unit.get_mut(entry_id);
+
- entry.set(
- gimli::DW_AT_decl_column,
- AttributeValue::Udata(loc.col.to_usize() as u64),
- );
++ entry.set(gimli::DW_AT_decl_file, AttributeValue::FileIndex(Some(file_id)));
++ entry.set(gimli::DW_AT_decl_line, AttributeValue::Udata(loc.line as u64));
+ // FIXME: probably omit this
- isa: &dyn cranelift_codegen::isa::TargetIsa,
++ entry.set(gimli::DW_AT_decl_column, AttributeValue::Udata(loc.col.to_usize() as u64));
+ }
+
+ pub(super) fn create_debug_lines(
+ &mut self,
- let func = &context.func;
+ symbol: usize,
+ entry_id: UnitEntryId,
+ context: &Context,
+ function_span: Span,
+ source_info_set: &indexmap::IndexSet<SourceInfo>,
+ ) -> CodeOffset {
+ let tcx = self.tcx;
+ let line_program = &mut self.dwarf.unit.line_program;
- if let Some(ref mcr) = &context.mach_compile_result {
- for &MachSrcLoc { start, end, loc } in mcr.buffer.get_srclocs_sorted() {
- line_program.row().address_offset = u64::from(start);
- if !loc.is_default() {
- let source_info = *source_info_set.get_index(loc.bits() as usize).unwrap();
- create_row_for_span(line_program, source_info.span);
- } else {
- create_row_for_span(line_program, function_span);
- }
- func_end = end;
- }
-
- line_program.end_sequence(u64::from(func_end));
-
- func_end = mcr.buffer.total_size();
- } else {
- let encinfo = isa.encoding_info();
- let mut blocks = func.layout.blocks().collect::<Vec<_>>();
- blocks.sort_by_key(|block| func.offsets[*block]); // Ensure inst offsets always increase
-
- for block in blocks {
- for (offset, inst, size) in func.inst_offsets(block, &encinfo) {
- let srcloc = func.srclocs[inst];
- line_program.row().address_offset = u64::from(offset);
- if !srcloc.is_default() {
- let source_info =
- *source_info_set.get_index(srcloc.bits() as usize).unwrap();
- create_row_for_span(line_program, source_info.span);
- } else {
- create_row_for_span(line_program, function_span);
- }
- func_end = offset + size;
- }
+
+ let line_strings = &mut self.dwarf.line_strings;
+ let mut last_span = None;
+ let mut last_file = None;
+ let mut create_row_for_span = |line_program: &mut LineProgram, span: Span| {
+ if let Some(last_span) = last_span {
+ if span == last_span {
+ line_program.generate_row();
+ return;
+ }
+ }
+ last_span = Some(span);
+
+ // Based on https://github.com/rust-lang/rust/blob/e369d87b015a84653343032833d65d0545fd3f26/src/librustc_codegen_ssa/mir/mod.rs#L116-L131
+ // In order to have a good line stepping behavior in debugger, we overwrite debug
+ // locations of macro expansions with that of the outermost expansion site
+ // (unless the crate is being compiled with `-Z debug-macros`).
+ let span = if !span.from_expansion() || tcx.sess.opts.debugging_opts.debug_macros {
+ span
+ } else {
+ // Walk up the macro expansion chain until we reach a non-expanded span.
+ // We also stop at the function body level because no line stepping can occur
+ // at the level above that.
+ rustc_span::hygiene::walk_chain(span, function_span.ctxt())
+ };
+
+ let (file, line, col) = match tcx.sess.source_map().lookup_line(span.lo()) {
+ Ok(SourceFileAndLine { sf: file, line }) => {
+ let line_pos = file.line_begin_pos(span.lo());
+
+ (
+ file,
+ u64::try_from(line).unwrap() + 1,
+ u64::from((span.lo() - line_pos).to_u32()) + 1,
+ )
+ }
+ Err(file) => (file, 0, 0),
+ };
+
+ // line_program_add_file is very slow.
+ // Optimize for the common case of the current file not being changed.
+ let current_file_changed = if let Some(last_file) = &last_file {
+ // If the allocations are not equal, then the files may still be equal, but that
+ // is not a problem, as this is just an optimization.
+ !rustc_data_structures::sync::Lrc::ptr_eq(last_file, &file)
+ } else {
+ true
+ };
+ if current_file_changed {
+ let file_id = line_program_add_file(line_program, line_strings, &file);
+ line_program.row().file = file_id;
+ last_file = Some(file);
+ }
+
+ line_program.row().line = line;
+ line_program.row().column = col;
+ line_program.generate_row();
+ };
+
+ line_program.begin_sequence(Some(Address::Symbol { symbol, addend: 0 }));
+
+ let mut func_end = 0;
+
- line_program.end_sequence(u64::from(func_end));
++ let mcr = context.mach_compile_result.as_ref().unwrap();
++ for &MachSrcLoc { start, end, loc } in mcr.buffer.get_srclocs_sorted() {
++ line_program.row().address_offset = u64::from(start);
++ if !loc.is_default() {
++ let source_info = *source_info_set.get_index(loc.bits() as usize).unwrap();
++ create_row_for_span(line_program, source_info.span);
++ } else {
++ create_row_for_span(line_program, function_span);
+ }
- entry.set(
- gimli::DW_AT_high_pc,
- AttributeValue::Udata(u64::from(func_end)),
- );
++ func_end = end;
+ }
+
++ line_program.end_sequence(u64::from(func_end));
++
++ let func_end = mcr.buffer.total_size();
++
+ assert_ne!(func_end, 0);
+
+ let entry = self.dwarf.unit.get_mut(entry_id);
+ entry.set(
+ gimli::DW_AT_low_pc,
+ AttributeValue::Address(Address::Symbol { symbol, addend: 0 }),
+ );
++ entry.set(gimli::DW_AT_high_pc, AttributeValue::Udata(u64::from(func_end)));
+
+ self.emit_location(entry_id, function_span);
+
+ func_end
+ }
+}
--- /dev/null
- use cranelift_codegen::ir::{StackSlots, ValueLabel, ValueLoc};
+//! Handling of everything related to debuginfo.
+
+mod emit;
+mod line_info;
+mod unwind;
+
+use crate::prelude::*;
+
+use rustc_index::vec::IndexVec;
+
+use cranelift_codegen::entity::EntityRef;
- clif_types: FxHashMap<Type, UnitEntryId>,
++use cranelift_codegen::ir::{LabelValueLoc, StackSlots, ValueLabel, ValueLoc};
+use cranelift_codegen::isa::TargetIsa;
+use cranelift_codegen::ValueLocRange;
+
+use gimli::write::{
+ Address, AttributeValue, DwarfUnit, Expression, LineProgram, LineString, Location,
+ LocationList, Range, RangeList, UnitEntryId,
+};
+use gimli::{Encoding, Format, LineEncoding, RunTimeEndian, X86_64};
+
+pub(crate) use emit::{DebugReloc, DebugRelocName};
+pub(crate) use unwind::UnwindContext;
+
+fn target_endian(tcx: TyCtxt<'_>) -> RunTimeEndian {
+ use rustc_target::abi::Endian;
+
+ match tcx.data_layout.endian {
+ Endian::Big => RunTimeEndian::Big,
+ Endian::Little => RunTimeEndian::Little,
+ }
+}
+
+pub(crate) struct DebugContext<'tcx> {
+ tcx: TyCtxt<'tcx>,
+
+ endian: RunTimeEndian,
+
+ dwarf: DwarfUnit,
+ unit_range_list: RangeList,
+
- root.set(
- gimli::DW_AT_producer,
- AttributeValue::StringRef(dwarf.strings.add(producer)),
- );
- root.set(
- gimli::DW_AT_language,
- AttributeValue::Language(gimli::DW_LANG_Rust),
- );
+ types: FxHashMap<Ty<'tcx>, UnitEntryId>,
+}
+
+impl<'tcx> DebugContext<'tcx> {
+ pub(crate) fn new(tcx: TyCtxt<'tcx>, isa: &dyn TargetIsa) -> Self {
+ let encoding = Encoding {
+ format: Format::Dwarf32,
+ // TODO: this should be configurable
+ // macOS doesn't seem to support DWARF > 3
+ // 5 version is required for md5 file hash
+ version: if tcx.sess.target.is_like_osx {
+ 3
+ } else {
+ // FIXME change to version 5 once the gdb and lldb shipping with the latest debian
+ // support it.
+ 4
+ },
+ address_size: isa.frontend_config().pointer_bytes(),
+ };
+
+ let mut dwarf = DwarfUnit::new(encoding);
+
+ // FIXME: how to get version when building out of tree?
+ // Normally this would use option_env!("CFG_VERSION").
+ let producer = format!("cg_clif (rustc {})", "unknown version");
+ let comp_dir = tcx.sess.working_dir.0.to_string_lossy().into_owned();
+ let (name, file_info) = match tcx.sess.local_crate_source_file.clone() {
+ Some(path) => {
+ let name = path.to_string_lossy().into_owned();
+ (name, None)
+ }
+ None => (tcx.crate_name(LOCAL_CRATE).to_string(), None),
+ };
+
+ let mut line_program = LineProgram::new(
+ encoding,
+ LineEncoding::default(),
+ LineString::new(comp_dir.as_bytes(), encoding, &mut dwarf.line_strings),
+ LineString::new(name.as_bytes(), encoding, &mut dwarf.line_strings),
+ file_info,
+ );
+ line_program.file_has_md5 = file_info.is_some();
+
+ dwarf.unit.line_program = line_program;
+
+ {
+ let name = dwarf.strings.add(name);
+ let comp_dir = dwarf.strings.add(comp_dir);
+
+ let root = dwarf.unit.root();
+ let root = dwarf.unit.get_mut(root);
- root.set(
- gimli::DW_AT_low_pc,
- AttributeValue::Address(Address::Constant(0)),
- );
++ root.set(gimli::DW_AT_producer, AttributeValue::StringRef(dwarf.strings.add(producer)));
++ root.set(gimli::DW_AT_language, AttributeValue::Language(gimli::DW_LANG_Rust));
+ root.set(gimli::DW_AT_name, AttributeValue::StringRef(name));
+ root.set(gimli::DW_AT_comp_dir, AttributeValue::StringRef(comp_dir));
- clif_types: FxHashMap::default(),
++ root.set(gimli::DW_AT_low_pc, AttributeValue::Address(Address::Constant(0)));
+ }
+
+ DebugContext {
+ tcx,
+
+ endian: target_endian(tcx),
+
+ dwarf,
+ unit_range_list: RangeList(Vec::new()),
+
- fn dwarf_ty_for_clif_ty(&mut self, ty: Type) -> UnitEntryId {
- if let Some(type_id) = self.clif_types.get(&ty) {
- return *type_id;
- }
-
- let new_entry = |dwarf: &mut DwarfUnit, tag| dwarf.unit.add(dwarf.unit.root(), tag);
-
- let primitive = |dwarf: &mut DwarfUnit, ate| {
- let type_id = new_entry(dwarf, gimli::DW_TAG_base_type);
- let type_entry = dwarf.unit.get_mut(type_id);
- type_entry.set(gimli::DW_AT_encoding, AttributeValue::Encoding(ate));
- type_id
- };
-
- let type_id = if ty.is_bool() {
- primitive(&mut self.dwarf, gimli::DW_ATE_boolean)
- } else if ty.is_int() {
- primitive(&mut self.dwarf, gimli::DW_ATE_address)
- } else if ty.is_float() {
- primitive(&mut self.dwarf, gimli::DW_ATE_float)
- } else {
- new_entry(&mut self.dwarf, gimli::DW_TAG_structure_type)
- };
-
- let type_entry = self.dwarf.unit.get_mut(type_id);
- type_entry.set(
- gimli::DW_AT_name,
- AttributeValue::String(format!("{}", ty).replace('i', "u").into_bytes()),
- );
- type_entry.set(
- gimli::DW_AT_byte_size,
- AttributeValue::Udata(u64::from(ty.bytes())),
- );
-
- type_id
- }
-
+ types: FxHashMap::default(),
+ }
+ }
+
- | ty::RawPtr(ty::TypeAndMut {
- ty: pointee_ty,
- mutbl: _mutbl,
- }) => {
+ fn dwarf_ty(&mut self, ty: Ty<'tcx>) -> UnitEntryId {
+ if let Some(type_id) = self.types.get(ty) {
+ return *type_id;
+ }
+
+ let new_entry = |dwarf: &mut DwarfUnit, tag| dwarf.unit.add(dwarf.unit.root(), tag);
+
+ let primitive = |dwarf: &mut DwarfUnit, ate| {
+ let type_id = new_entry(dwarf, gimli::DW_TAG_base_type);
+ let type_entry = dwarf.unit.get_mut(type_id);
+ type_entry.set(gimli::DW_AT_encoding, AttributeValue::Encoding(ate));
+ type_id
+ };
+
+ let name = format!("{}", ty);
+ let layout = self.tcx.layout_of(ParamEnv::reveal_all().and(ty)).unwrap();
+
+ let type_id = match ty.kind() {
+ ty::Bool => primitive(&mut self.dwarf, gimli::DW_ATE_boolean),
+ ty::Char => primitive(&mut self.dwarf, gimli::DW_ATE_UTF),
+ ty::Uint(_) => primitive(&mut self.dwarf, gimli::DW_ATE_unsigned),
+ ty::Int(_) => primitive(&mut self.dwarf, gimli::DW_ATE_signed),
+ ty::Float(_) => primitive(&mut self.dwarf, gimli::DW_ATE_float),
+ ty::Ref(_, pointee_ty, _mutbl)
- &layout::LayoutCx {
- tcx: self.tcx,
- param_env: ParamEnv::reveal_all(),
- },
++ | ty::RawPtr(ty::TypeAndMut { ty: pointee_ty, mutbl: _mutbl }) => {
+ let type_id = new_entry(&mut self.dwarf, gimli::DW_TAG_pointer_type);
+
+ // Ensure that type is inserted before recursing to avoid duplicates
+ self.types.insert(ty, type_id);
+
+ let pointee = self.dwarf_ty(pointee_ty);
+
+ let type_entry = self.dwarf.unit.get_mut(type_id);
+
+ //type_entry.set(gimli::DW_AT_mutable, AttributeValue::Flag(mutbl == rustc_hir::Mutability::Mut));
+ type_entry.set(gimli::DW_AT_type, AttributeValue::UnitRef(pointee));
+
+ type_id
+ }
+ ty::Adt(adt_def, _substs) if adt_def.is_struct() && !layout.is_unsized() => {
+ let type_id = new_entry(&mut self.dwarf, gimli::DW_TAG_structure_type);
+
+ // Ensure that type is inserted before recursing to avoid duplicates
+ self.types.insert(ty, type_id);
+
+ let variant = adt_def.non_enum_variant();
+
+ for (field_idx, field_def) in variant.fields.iter().enumerate() {
+ let field_offset = layout.fields.offset(field_idx);
+ let field_layout = layout
+ .field(
- type_entry.set(
- gimli::DW_AT_byte_size,
- AttributeValue::Udata(layout.size.bytes()),
- );
++ &layout::LayoutCx { tcx: self.tcx, param_env: ParamEnv::reveal_all() },
+ field_idx,
+ )
+ .unwrap();
+
+ let field_type = self.dwarf_ty(field_layout.ty);
+
+ let field_id = self.dwarf.unit.add(type_id, gimli::DW_TAG_member);
+ let field_entry = self.dwarf.unit.get_mut(field_id);
+
+ field_entry.set(
+ gimli::DW_AT_name,
+ AttributeValue::String(field_def.ident.as_str().to_string().into_bytes()),
+ );
+ field_entry.set(
+ gimli::DW_AT_data_member_location,
+ AttributeValue::Udata(field_offset.bytes()),
+ );
+ field_entry.set(gimli::DW_AT_type, AttributeValue::UnitRef(field_type));
+ }
+
+ type_id
+ }
+ _ => new_entry(&mut self.dwarf, gimli::DW_TAG_structure_type),
+ };
+
+ let type_entry = self.dwarf.unit.get_mut(type_id);
+
+ type_entry.set(gimli::DW_AT_name, AttributeValue::String(name.into_bytes()));
- entry.set(
- gimli::DW_AT_linkage_name,
- AttributeValue::StringRef(name_id),
- );
++ type_entry.set(gimli::DW_AT_byte_size, AttributeValue::Udata(layout.size.bytes()));
+
+ self.types.insert(ty, type_id);
+
+ type_id
+ }
+
+ fn define_local(&mut self, scope: UnitEntryId, name: String, ty: Ty<'tcx>) -> UnitEntryId {
+ let dw_ty = self.dwarf_ty(ty);
+
+ let var_id = self.dwarf.unit.add(scope, gimli::DW_TAG_variable);
+ let var_entry = self.dwarf.unit.get_mut(var_id);
+
+ var_entry.set(gimli::DW_AT_name, AttributeValue::String(name.into_bytes()));
+ var_entry.set(gimli::DW_AT_type, AttributeValue::UnitRef(dw_ty));
+
+ var_id
+ }
+
+ pub(crate) fn define_function(
+ &mut self,
+ instance: Instance<'tcx>,
+ func_id: FuncId,
+ name: &str,
+ isa: &dyn TargetIsa,
+ context: &Context,
+ source_info_set: &indexmap::IndexSet<SourceInfo>,
+ local_map: IndexVec<mir::Local, CPlace<'tcx>>,
+ ) {
+ let symbol = func_id.as_u32() as usize;
+ let mir = self.tcx.instance_mir(instance.def);
+
+ // FIXME: add to appropriate scope instead of root
+ let scope = self.dwarf.unit.root();
+
+ let entry_id = self.dwarf.unit.add(scope, gimli::DW_TAG_subprogram);
+ let entry = self.dwarf.unit.get_mut(entry_id);
+ let name_id = self.dwarf.strings.add(name);
+ // Gdb requires DW_AT_name. Otherwise the DW_TAG_subprogram is skipped.
+ entry.set(gimli::DW_AT_name, AttributeValue::StringRef(name_id));
- let end =
- self.create_debug_lines(isa, symbol, entry_id, context, mir.span, source_info_set);
++ entry.set(gimli::DW_AT_linkage_name, AttributeValue::StringRef(name_id));
+
- if isa.get_mach_backend().is_some() {
- return; // Not yet implemented for the AArch64 backend.
- }
-
++ let end = self.create_debug_lines(symbol, entry_id, context, mir.span, source_info_set);
+
+ self.unit_range_list.0.push(Range::StartLength {
+ begin: Address::Symbol { symbol, addend: 0 },
+ length: u64::from(end),
+ });
+
- // FIXME Remove once actual debuginfo for locals works.
- for (i, (param, &val)) in context
- .func
- .signature
- .params
- .iter()
- .zip(
- context
- .func
- .dfg
- .block_params(context.func.layout.entry_block().unwrap()),
- )
- .enumerate()
- {
- use cranelift_codegen::ir::ArgumentPurpose;
- let base_name = match param.purpose {
- ArgumentPurpose::Normal => "arg",
- ArgumentPurpose::StructArgument(_) => "struct_arg",
- ArgumentPurpose::StructReturn => "sret",
- ArgumentPurpose::Link
- | ArgumentPurpose::FramePointer
- | ArgumentPurpose::CalleeSaved => continue,
- ArgumentPurpose::VMContext
- | ArgumentPurpose::SignatureId
- | ArgumentPurpose::CallerTLS
- | ArgumentPurpose::CalleeTLS
- | ArgumentPurpose::StackLimit => unreachable!(),
- };
- let name = format!("{}{}", base_name, i);
-
- let dw_ty = self.dwarf_ty_for_clif_ty(param.value_type);
- let loc =
- translate_loc(isa, context.func.locations[val], &context.func.stack_slots).unwrap();
-
- let arg_id = self
- .dwarf
- .unit
- .add(entry_id, gimli::DW_TAG_formal_parameter);
- let var_entry = self.dwarf.unit.get_mut(arg_id);
-
- var_entry.set(gimli::DW_AT_name, AttributeValue::String(name.into_bytes()));
- var_entry.set(gimli::DW_AT_type, AttributeValue::UnitRef(dw_ty));
- var_entry.set(gimli::DW_AT_location, AttributeValue::Exprloc(loc));
- }
-
+ let func_entry = self.dwarf.unit.get_mut(entry_id);
+ // Gdb requires both DW_AT_low_pc and DW_AT_high_pc. Otherwise the DW_TAG_subprogram is skipped.
+ func_entry.set(
+ gimli::DW_AT_low_pc,
+ AttributeValue::Address(Address::Symbol { symbol, addend: 0 }),
+ );
+ // Using Udata for DW_AT_high_pc requires at least DWARF4
+ func_entry.set(gimli::DW_AT_high_pc, AttributeValue::Udata(u64::from(end)));
+
- Place {
- local,
- projection: ty::List::empty(),
- },
+ // FIXME make it more reliable and implement scopes before re-enabling this.
+ if false {
+ let value_labels_ranges = context.build_value_labels_ranges(isa).unwrap();
+
+ for (local, _local_decl) in mir.local_decls.iter_enumerated() {
+ let ty = self.tcx.subst_and_normalize_erasing_regions(
+ instance.substs,
+ ty::ParamEnv::reveal_all(),
+ mir.local_decls[local].ty,
+ );
+ let var_id = self.define_local(entry_id, format!("{:?}", local), ty);
+
+ let location = place_location(
+ self,
+ isa,
+ symbol,
+ context,
+ &local_map,
+ &value_labels_ranges,
- end: Address::Symbol {
- symbol,
- addend: i64::from(value_loc_range.end),
- },
++ Place { local, projection: ty::List::empty() },
+ );
+
+ let var_entry = self.dwarf.unit.get_mut(var_id);
+ var_entry.set(gimli::DW_AT_location, location);
+ }
+ }
+
+ // FIXME create locals for all entries in mir.var_debug_info
+ }
+}
+
+fn place_location<'tcx>(
+ debug_context: &mut DebugContext<'tcx>,
+ isa: &dyn TargetIsa,
+ symbol: usize,
+ context: &Context,
+ local_map: &IndexVec<mir::Local, CPlace<'tcx>>,
+ #[allow(rustc::default_hash_types)] value_labels_ranges: &std::collections::HashMap<
+ ValueLabel,
+ Vec<ValueLocRange>,
+ >,
+ place: Place<'tcx>,
+) -> AttributeValue {
+ assert!(place.projection.is_empty()); // FIXME implement them
+
+ match local_map[place.local].inner() {
+ CPlaceInner::Var(_local, var) => {
+ let value_label = cranelift_codegen::ir::ValueLabel::new(var.index());
+ if let Some(value_loc_ranges) = value_labels_ranges.get(&value_label) {
+ let loc_list = LocationList(
+ value_loc_ranges
+ .iter()
+ .map(|value_loc_range| Location::StartEnd {
+ begin: Address::Symbol {
+ symbol,
+ addend: i64::from(value_loc_range.start),
+ },
- loc: ValueLoc,
++ end: Address::Symbol { symbol, addend: i64::from(value_loc_range.end) },
+ data: translate_loc(
+ isa,
+ value_loc_range.loc,
+ &context.func.stack_slots,
+ )
+ .unwrap(),
+ })
+ .collect(),
+ );
+ let loc_list_id = debug_context.dwarf.unit.locations.add(loc_list);
+
+ AttributeValue::LocationListRef(loc_list_id)
+ } else {
+ // FIXME set value labels for unused locals
+
+ AttributeValue::Exprloc(Expression::new())
+ }
+ }
+ CPlaceInner::VarPair(_, _, _) => {
+ // FIXME implement this
+
+ AttributeValue::Exprloc(Expression::new())
+ }
+ CPlaceInner::VarLane(_, _, _) => {
+ // FIXME implement this
+
+ AttributeValue::Exprloc(Expression::new())
+ }
+ CPlaceInner::Addr(_, _) => {
+ // FIXME implement this (used by arguments and returns)
+
+ AttributeValue::Exprloc(Expression::new())
+
+ // For PointerBase::Stack:
+ //AttributeValue::Exprloc(translate_loc(ValueLoc::Stack(*stack_slot), &context.func.stack_slots).unwrap())
+ }
+ }
+}
+
+// Adapted from https://github.com/CraneStation/wasmtime/blob/5a1845b4caf7a5dba8eda1fef05213a532ed4259/crates/debug/src/transform/expression.rs#L59-L137
+fn translate_loc(
+ isa: &dyn TargetIsa,
- ValueLoc::Reg(reg) => {
++ loc: LabelValueLoc,
+ stack_slots: &StackSlots,
+) -> Option<Expression> {
+ match loc {
- ValueLoc::Stack(ss) => {
++ LabelValueLoc::ValueLoc(ValueLoc::Reg(reg)) => {
+ let machine_reg = isa.map_dwarf_register(reg).unwrap();
+ let mut expr = Expression::new();
+ expr.op_reg(gimli::Register(machine_reg));
+ Some(expr)
+ }
- _ => None,
++ LabelValueLoc::ValueLoc(ValueLoc::Stack(ss)) => {
+ if let Some(ss_offset) = stack_slots[ss].offset {
+ let mut expr = Expression::new();
+ expr.op_breg(X86_64::RBP, i64::from(ss_offset) + 16);
+ Some(expr)
+ } else {
+ None
+ }
+ }
++ LabelValueLoc::ValueLoc(ValueLoc::Unassigned) => unreachable!(),
++ LabelValueLoc::Reg(reg) => {
++ let machine_reg = isa.map_regalloc_reg_to_dwarf(reg).unwrap();
++ let mut expr = Expression::new();
++ expr.op_reg(gimli::Register(machine_reg));
++ Some(expr)
++ }
++ LabelValueLoc::SPOffset(offset) => {
++ let mut expr = Expression::new();
++ expr.op_breg(X86_64::RSP, offset);
++ Some(expr)
++ }
+ }
+}
--- /dev/null
- UnwindContext {
- tcx,
- frame_table,
- cie_id,
- }
+//! Unwind info generation (`.eh_frame`)
+
+use crate::prelude::*;
+
+use cranelift_codegen::isa::{unwind::UnwindInfo, TargetIsa};
+
+use gimli::write::{Address, CieId, EhFrame, FrameTable, Section};
+
+use crate::backend::WriteDebugInfo;
+
+pub(crate) struct UnwindContext<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ frame_table: FrameTable,
+ cie_id: Option<CieId>,
+}
+
+impl<'tcx> UnwindContext<'tcx> {
+ pub(crate) fn new(tcx: TyCtxt<'tcx>, isa: &dyn TargetIsa, pic_eh_frame: bool) -> Self {
+ let mut frame_table = FrameTable::default();
+
+ let cie_id = if let Some(mut cie) = isa.create_systemv_cie() {
+ if pic_eh_frame {
+ cie.fde_address_encoding =
+ gimli::DwEhPe(gimli::DW_EH_PE_pcrel.0 | gimli::DW_EH_PE_sdata4.0);
+ }
+ Some(frame_table.add_cie(cie))
+ } else {
+ None
+ };
+
- unwind_info.to_fde(Address::Symbol {
- symbol: func_id.as_u32() as usize,
- addend: 0,
- }),
++ UnwindContext { tcx, frame_table, cie_id }
+ }
+
+ pub(crate) fn add_function(&mut self, func_id: FuncId, context: &Context, isa: &dyn TargetIsa) {
+ let unwind_info = if let Some(unwind_info) = context.create_unwind_info(isa).unwrap() {
+ unwind_info
+ } else {
+ return;
+ };
+
+ match unwind_info {
+ UnwindInfo::SystemV(unwind_info) => {
+ self.frame_table.add_fde(
+ self.cie_id.unwrap(),
- let mut eh_frame = EhFrame::from(super::emit::WriterRelocate::new(super::target_endian(
- self.tcx,
- )));
++ unwind_info
++ .to_fde(Address::Symbol { symbol: func_id.as_u32() as usize, addend: 0 }),
+ );
+ }
+ UnwindInfo::WindowsX64(_) => {
+ // FIXME implement this
+ }
+ unwind_info => unimplemented!("{:?}", unwind_info),
+ }
+ }
+
+ pub(crate) fn emit<P: WriteDebugInfo>(self, product: &mut P) {
- let mut eh_frame = EhFrame::from(super::emit::WriterRelocate::new(super::target_endian(
- self.tcx,
- )));
++ let mut eh_frame =
++ EhFrame::from(super::emit::WriterRelocate::new(super::target_endian(self.tcx)));
+ self.frame_table.write_eh_frame(&mut eh_frame).unwrap();
+
+ if !eh_frame.0.writer.slice().is_empty() {
+ let id = eh_frame.id();
+ let section_id = product.add_debug_section(id, eh_frame.0.writer.into_vec());
+ let mut section_map = FxHashMap::default();
+ section_map.insert(id, section_id);
+
+ for reloc in &eh_frame.0.relocs {
+ product.add_debug_reloc(§ion_map, §ion_id, reloc);
+ }
+ }
+ }
+
+ #[cfg(feature = "jit")]
+ pub(crate) unsafe fn register_jit(
+ self,
+ jit_module: &cranelift_jit::JITModule,
+ ) -> Option<UnwindRegistry> {
- Some(UnwindRegistry {
- _frame_table: eh_frame,
- registrations,
- })
++ let mut eh_frame =
++ EhFrame::from(super::emit::WriterRelocate::new(super::target_endian(self.tcx)));
+ self.frame_table.write_eh_frame(&mut eh_frame).unwrap();
+
+ if eh_frame.0.writer.slice().is_empty() {
+ return None;
+ }
+
+ let mut eh_frame = eh_frame.0.relocate_for_jit(jit_module);
+
+ // GCC expects a terminating "empty" length, so write a 0 length at the end of the table.
+ eh_frame.extend(&[0, 0, 0, 0]);
+
+ let mut registrations = Vec::new();
+
+ // =======================================================================
+ // Everything after this line up to the end of the file is loosly based on
+ // https://github.com/bytecodealliance/wasmtime/blob/4471a82b0c540ff48960eca6757ccce5b1b5c3e4/crates/jit/src/unwind/systemv.rs
+ #[cfg(target_os = "macos")]
+ {
+ // On macOS, `__register_frame` takes a pointer to a single FDE
+ let start = eh_frame.as_ptr();
+ let end = start.add(eh_frame.len());
+ let mut current = start;
+
+ // Walk all of the entries in the frame table and register them
+ while current < end {
+ let len = std::ptr::read::<u32>(current as *const u32) as usize;
+
+ // Skip over the CIE
+ if current != start {
+ __register_frame(current);
+ registrations.push(current as usize);
+ }
+
+ // Move to the next table entry (+4 because the length itself is not inclusive)
+ current = current.add(len + 4);
+ }
+ }
+ #[cfg(not(target_os = "macos"))]
+ {
+ // On other platforms, `__register_frame` will walk the FDEs until an entry of length 0
+ let ptr = eh_frame.as_ptr();
+ __register_frame(ptr);
+ registrations.push(ptr as usize);
+ }
+
++ Some(UnwindRegistry { _frame_table: eh_frame, registrations })
+ }
+}
+
+/// Represents a registry of function unwind information for System V ABI.
+pub(crate) struct UnwindRegistry {
+ _frame_table: Vec<u8>,
+ registrations: Vec<usize>,
+}
+
+extern "C" {
+ // libunwind import
+ fn __register_frame(fde: *const u8);
+ fn __deregister_frame(fde: *const u8);
+}
+
+impl Drop for UnwindRegistry {
+ fn drop(&mut self) {
+ unsafe {
+ // libgcc stores the frame entries as a linked list in decreasing sort order
+ // based on the PC value of the registered entry.
+ //
+ // As we store the registrations in increasing order, it would be O(N^2) to
+ // deregister in that order.
+ //
+ // To ensure that we just pop off the first element in the list upon every
+ // deregistration, walk our list of registrations backwards.
+ for fde in self.registrations.iter().rev() {
+ __deregister_frame(*fde as *const _);
+ }
+ }
+ }
+}
--- /dev/null
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+//! Handling of enum discriminants
+//!
+//! Adapted from <https://github.com/rust-lang/rust/blob/d760df5aea483aae041c9a241e7acacf48f75035/src/librustc_codegen_ssa/mir/place.rs>
+
+use rustc_target::abi::{Int, TagEncoding, Variants};
+
+use crate::prelude::*;
+
+pub(crate) fn codegen_set_discriminant<'tcx>(
- let to = layout
- .ty
- .discriminant_for_variant(fx.tcx, variant_index)
- .unwrap()
- .val;
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ place: CPlace<'tcx>,
+ variant_index: VariantIdx,
+) {
+ let layout = place.layout();
+ if layout.for_variant(fx, variant_index).abi.is_uninhabited() {
+ return;
+ }
+ match layout.variants {
+ Variants::Single { index } => {
+ assert_eq!(index, variant_index);
+ }
+ Variants::Multiple {
+ tag: _,
+ tag_field,
+ tag_encoding: TagEncoding::Direct,
+ variants: _,
+ } => {
+ let ptr = place.place_field(fx, mir::Field::new(tag_field));
- tag_encoding:
- TagEncoding::Niche {
- dataful_variant,
- ref niche_variants,
- niche_start,
- },
++ let to = layout.ty.discriminant_for_variant(fx.tcx, variant_index).unwrap().val;
+ let to = if ptr.layout().abi.is_signed() {
+ ty::ScalarInt::try_from_int(
+ ptr.layout().size.sign_extend(to) as i128,
+ ptr.layout().size,
+ )
+ .unwrap()
+ } else {
+ ty::ScalarInt::try_from_uint(to, ptr.layout().size).unwrap()
+ };
+ let discr = CValue::const_val(fx, ptr.layout(), to);
+ ptr.write_cvalue(fx, discr);
+ }
+ Variants::Multiple {
+ tag: _,
+ tag_field,
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
++ tag_encoding: TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start },
+ variants: _,
+ } => {
+ if variant_index != dataful_variant {
+ let niche = place.place_field(fx, mir::Field::new(tag_field));
+ let niche_value = variant_index.as_u32() - niche_variants.start().as_u32();
+ let niche_value = ty::ScalarInt::try_from_uint(
+ u128::from(niche_value).wrapping_add(niche_start),
+ niche.layout().size,
+ )
+ .unwrap();
+ let niche_llval = CValue::const_val(fx, niche.layout(), niche_value);
+ niche.write_cvalue(fx, niche_llval);
+ }
+ }
+ }
+}
+
+pub(crate) fn codegen_get_discriminant<'tcx>(
- Variants::Multiple {
- tag,
- tag_field,
- tag_encoding,
- variants: _,
- } => (tag, *tag_field, tag_encoding),
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ value: CValue<'tcx>,
+ dest_layout: TyAndLayout<'tcx>,
+) -> CValue<'tcx> {
+ let layout = value.layout();
+
+ if layout.abi == Abi::Uninhabited {
+ return trap_unreachable_ret_value(
+ fx,
+ dest_layout,
+ "[panic] Tried to get discriminant for uninhabited type.",
+ );
+ }
+
+ let (tag_scalar, tag_field, tag_encoding) = match &layout.variants {
+ Variants::Single { index } => {
+ let discr_val = layout
+ .ty
+ .discriminant_for_variant(fx.tcx, *index)
+ .map_or(u128::from(index.as_u32()), |discr| discr.val);
+ let discr_val = if dest_layout.abi.is_signed() {
+ ty::ScalarInt::try_from_int(
+ dest_layout.size.sign_extend(discr_val) as i128,
+ dest_layout.size,
+ )
+ .unwrap()
+ } else {
+ ty::ScalarInt::try_from_uint(discr_val, dest_layout.size).unwrap()
+ };
+ return CValue::const_val(fx, dest_layout, discr_val);
+ }
- TagEncoding::Niche {
- dataful_variant,
- ref niche_variants,
- niche_start,
- } => {
++ Variants::Multiple { tag, tag_field, tag_encoding, variants: _ } => {
++ (tag, *tag_field, tag_encoding)
++ }
+ };
+
+ let cast_to = fx.clif_type(dest_layout.ty).unwrap();
+
+ // Read the tag/niche-encoded discriminant from memory.
+ let tag = value.value_field(fx, mir::Field::new(tag_field));
+ let tag = tag.load_scalar(fx);
+
+ // Decode the discriminant (specifically if it's niche-encoded).
+ match *tag_encoding {
+ TagEncoding::Direct => {
+ let signed = match tag_scalar.value {
+ Int(_, signed) => signed,
+ _ => false,
+ };
+ let val = clif_intcast(fx, tag, cast_to, signed);
+ CValue::by_val(val, dest_layout)
+ }
- fx.bcx
- .ins()
- .iadd_imm(tag, -i64::try_from(niche_start).unwrap())
++ TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start } => {
+ // Rebase from niche values to discriminants, and check
+ // whether the result is in range for the niche variants.
+
+ // We first compute the "relative discriminant" (wrt `niche_variants`),
+ // that is, if `n = niche_variants.end() - niche_variants.start()`,
+ // we remap `niche_start..=niche_start + n` (which may wrap around)
+ // to (non-wrap-around) `0..=n`, to be able to check whether the
+ // discriminant corresponds to a niche variant with one comparison.
+ // We also can't go directly to the (variant index) discriminant
+ // and check that it is in the range `niche_variants`, because
+ // that might not fit in the same type, on top of needing an extra
+ // comparison (see also the comment on `let niche_discr`).
+ let relative_discr = if niche_start == 0 {
+ tag
+ } else {
+ // FIXME handle niche_start > i64::MAX
- fx.bcx
- .ins()
- .iadd_imm(relative_discr, i64::from(niche_variants.start().as_u32()))
++ fx.bcx.ins().iadd_imm(tag, -i64::try_from(niche_start).unwrap())
+ };
+ let relative_max = niche_variants.end().as_u32() - niche_variants.start().as_u32();
+ let is_niche = {
+ codegen_icmp_imm(
+ fx,
+ IntCC::UnsignedLessThanOrEqual,
+ relative_discr,
+ i128::from(relative_max),
+ )
+ };
+
+ // NOTE(eddyb) this addition needs to be performed on the final
+ // type, in case the niche itself can't represent all variant
+ // indices (e.g. `u8` niche with more than `256` variants,
+ // but enough uninhabited variants so that the remaining variants
+ // fit in the niche).
+ // In other words, `niche_variants.end - niche_variants.start`
+ // is representable in the niche, but `niche_variants.end`
+ // might not be, in extreme cases.
+ let niche_discr = {
+ let relative_discr = if relative_max == 0 {
+ // HACK(eddyb) since we have only one niche, we know which
+ // one it is, and we can avoid having a dynamic value here.
+ fx.bcx.ins().iconst(cast_to, 0)
+ } else {
+ clif_intcast(fx, relative_discr, cast_to, false)
+ };
- let dataful_variant = fx
- .bcx
- .ins()
- .iconst(cast_to, i64::from(dataful_variant.as_u32()));
++ fx.bcx.ins().iadd_imm(relative_discr, i64::from(niche_variants.start().as_u32()))
+ };
+
++ let dataful_variant = fx.bcx.ins().iconst(cast_to, i64::from(dataful_variant.as_u32()));
+ let discr = fx.bcx.ins().select(is_niche, niche_discr, dataful_variant);
+ CValue::by_val(discr, dest_layout)
+ }
+ }
+}
--- /dev/null
- use cranelift_object::{ObjectModule, ObjectProduct};
+//! The AOT driver uses [`cranelift_object`] to write object files suitable for linking into a
+//! standalone executable.
+
+use std::path::PathBuf;
+
+use rustc_codegen_ssa::back::linker::LinkerInfo;
+use rustc_codegen_ssa::{CodegenResults, CompiledModule, CrateInfo, ModuleKind};
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
+use rustc_middle::middle::cstore::EncodedMetadata;
+use rustc_middle::mir::mono::{CodegenUnit, MonoItem};
+use rustc_session::cgu_reuse_tracker::CguReuse;
+use rustc_session::config::{DebugInfo, OutputType};
+
- use crate::prelude::*;
-
- use crate::backend::AddConstructor;
++use cranelift_object::ObjectModule;
+
- map_product: impl FnOnce(ObjectProduct) -> ObjectProduct,
++use crate::{prelude::*, BackendConfig};
+
+fn new_module(tcx: TyCtxt<'_>, name: String) -> ObjectModule {
+ let module = crate::backend::make_module(tcx.sess, name);
+ assert_eq!(pointer_ty(tcx), module.target_config().pointer_type());
+ module
+}
+
+struct ModuleCodegenResult(CompiledModule, Option<(WorkProductId, WorkProduct)>);
+
+impl<HCX> HashStable<HCX> for ModuleCodegenResult {
+ fn hash_stable(&self, _: &mut HCX, _: &mut StableHasher) {
+ // do nothing
+ }
+}
+
+fn emit_module(
+ tcx: TyCtxt<'_>,
+ name: String,
+ kind: ModuleKind,
+ module: ObjectModule,
+ debug: Option<DebugContext<'_>>,
+ unwind_context: UnwindContext<'_>,
- let product = map_product(product);
-
- let tmp_file = tcx
- .output_filenames(LOCAL_CRATE)
- .temp_path(OutputType::Object, Some(&name));
+) -> ModuleCodegenResult {
+ let mut product = module.finish();
+
+ if let Some(mut debug) = debug {
+ debug.emit(&mut product);
+ }
+
+ unwind_context.emit(&mut product);
+
- tcx.sess
- .fatal(&format!("error writing object file: {}", err));
++ let tmp_file = tcx.output_filenames(LOCAL_CRATE).temp_path(OutputType::Object, Some(&name));
+ let obj = product.object.write().unwrap();
+ if let Err(err) = std::fs::write(&tmp_file, obj) {
- CompiledModule {
- name,
- kind,
- object: Some(tmp_file),
- dwarf_object: None,
- bytecode: None,
- },
++ tcx.sess.fatal(&format!("error writing object file: {}", err));
+ }
+
+ let work_product = if std::env::var("CG_CLIF_INCR_CACHE_DISABLED").is_ok() {
+ None
+ } else {
+ rustc_incremental::copy_cgu_workproduct_to_incr_comp_cache_dir(
+ tcx.sess,
+ &name,
+ &Some(tmp_file.clone()),
+ )
+ };
+
+ ModuleCodegenResult(
- fn module_codegen(tcx: TyCtxt<'_>, cgu_name: rustc_span::Symbol) -> ModuleCodegenResult {
++ CompiledModule { name, kind, object: Some(tmp_file), dwarf_object: None, bytecode: None },
+ work_product,
+ )
+}
+
+fn reuse_workproduct_for_cgu(
+ tcx: TyCtxt<'_>,
+ cgu: &CodegenUnit<'_>,
+ work_products: &mut FxHashMap<WorkProductId, WorkProduct>,
+) -> CompiledModule {
+ let incr_comp_session_dir = tcx.sess.incr_comp_session_dir();
+ let mut object = None;
+ let work_product = cgu.work_product(tcx);
+ if let Some(saved_file) = &work_product.saved_file {
+ let obj_out = tcx
+ .output_filenames(LOCAL_CRATE)
+ .temp_path(OutputType::Object, Some(&cgu.name().as_str()));
+ object = Some(obj_out.clone());
+ let source_file = rustc_incremental::in_incr_comp_dir(&incr_comp_session_dir, &saved_file);
+ if let Err(err) = rustc_fs_util::link_or_copy(&source_file, &obj_out) {
+ tcx.sess.err(&format!(
+ "unable to copy {} to {}: {}",
+ source_file.display(),
+ obj_out.display(),
+ err
+ ));
+ }
+ }
+
+ work_products.insert(cgu.work_product_id(), work_product);
+
+ CompiledModule {
+ name: cgu.name().to_string(),
+ kind: ModuleKind::Regular,
+ object,
+ dwarf_object: None,
+ bytecode: None,
+ }
+}
+
- // Initialize the global atomic mutex using a constructor for proc-macros.
- // FIXME implement atomic instructions in Cranelift.
- let mut init_atomics_mutex_from_constructor = None;
- if tcx
- .sess
- .crate_types()
- .contains(&rustc_session::config::CrateType::ProcMacro)
- {
- if mono_items.iter().any(|(mono_item, _)| match mono_item {
- rustc_middle::mir::mono::MonoItem::Static(def_id) => tcx
- .symbol_name(Instance::mono(tcx, *def_id))
- .name
- .contains("__rustc_proc_macro_decls_"),
- _ => false,
- }) {
- init_atomics_mutex_from_constructor =
- Some(crate::atomic_shim::init_global_lock_constructor(
- &mut module,
- &format!("{}_init_atomics_mutex", cgu_name.as_str()),
- ));
- }
- }
-
++fn module_codegen(
++ tcx: TyCtxt<'_>,
++ (backend_config, cgu_name): (BackendConfig, rustc_span::Symbol),
++) -> ModuleCodegenResult {
+ let cgu = tcx.codegen_unit(cgu_name);
+ let mono_items = cgu.items_in_deterministic_order(tcx);
+
+ let mut module = new_module(tcx, cgu_name.as_str().to_string());
+
- module,
+ let mut cx = crate::CodegenCx::new(
+ tcx,
- true,
++ backend_config,
++ &mut module,
+ tcx.sess.opts.debuginfo != DebugInfo::None,
- cx.tcx.sess.time("codegen fn", || {
- crate::base::codegen_fn(&mut cx, inst, linkage)
- });
+ );
+ super::predefine_mono_items(&mut cx, &mono_items);
+ for (mono_item, (linkage, visibility)) in mono_items {
+ let linkage = crate::linkage::get_clif_linkage(mono_item, linkage, visibility);
+ match mono_item {
+ MonoItem::Fn(inst) => {
- let (mut module, global_asm, debug, mut unwind_context) =
++ cx.tcx.sess.time("codegen fn", || crate::base::codegen_fn(&mut cx, inst, linkage));
+ }
+ MonoItem::Static(def_id) => {
+ crate::constant::codegen_static(&mut cx.constants_cx, def_id)
+ }
+ MonoItem::GlobalAsm(item_id) => {
+ let item = cx.tcx.hir().item(item_id);
+ if let rustc_hir::ItemKind::GlobalAsm(rustc_hir::GlobalAsm { asm }) = item.kind {
+ cx.global_asm.push_str(&*asm.as_str());
+ cx.global_asm.push_str("\n\n");
+ } else {
+ bug!("Expected GlobalAsm found {:?}", item);
+ }
+ }
+ }
+ }
- crate::main_shim::maybe_create_entry_wrapper(tcx, &mut module, &mut unwind_context, false);
++ let (global_asm, debug, mut unwind_context) =
+ tcx.sess.time("finalize CodegenCx", || cx.finalize());
- |mut product| {
- if let Some(func_id) = init_atomics_mutex_from_constructor {
- product.add_constructor(func_id);
- }
-
- product
- },
++ crate::main_shim::maybe_create_entry_wrapper(tcx, &mut module, &mut unwind_context);
+
+ let codegen_result = emit_module(
+ tcx,
+ cgu.name().as_str().to_string(),
+ ModuleKind::Regular,
+ module,
+ debug,
+ unwind_context,
- tcx.sess
- .cgu_reuse_tracker
- .set_actual_reuse(&cgu.name().as_str(), cgu_reuse);
+ );
+
+ codegen_global_asm(tcx, &cgu.name().as_str(), &global_asm);
+
+ codegen_result
+}
+
+pub(super) fn run_aot(
+ tcx: TyCtxt<'_>,
++ backend_config: BackendConfig,
+ metadata: EncodedMetadata,
+ need_metadata_module: bool,
+) -> Box<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>)> {
+ let mut work_products = FxHashMap::default();
+
+ let cgus = if tcx.sess.opts.output_types.should_codegen() {
+ tcx.collect_and_partition_mono_items(LOCAL_CRATE).1
+ } else {
+ // If only `--emit metadata` is used, we shouldn't perform any codegen.
+ // Also `tcx.collect_and_partition_mono_items` may panic in that case.
+ &[]
+ };
+
+ if tcx.dep_graph.is_fully_enabled() {
+ for cgu in &*cgus {
+ tcx.ensure().codegen_unit(cgu.name());
+ }
+ }
+
+ let modules = super::time(tcx, "codegen mono items", || {
+ cgus.iter()
+ .map(|cgu| {
+ let cgu_reuse = determine_cgu_reuse(tcx, cgu);
- cgu.name(),
++ tcx.sess.cgu_reuse_tracker.set_actual_reuse(&cgu.name().as_str(), cgu_reuse);
+
+ match cgu_reuse {
+ _ if std::env::var("CG_CLIF_INCR_CACHE_DISABLED").is_ok() => {}
+ CguReuse::No => {}
+ CguReuse::PreLto => {
+ return reuse_workproduct_for_cgu(tcx, &*cgu, &mut work_products);
+ }
+ CguReuse::PostLto => unreachable!(),
+ }
+
+ let dep_node = cgu.codegen_dep_node(tcx);
+ let (ModuleCodegenResult(module, work_product), _) = tcx.dep_graph.with_task(
+ dep_node,
+ tcx,
- |product| product,
++ (backend_config, cgu.name()),
+ module_codegen,
+ rustc_middle::dep_graph::hash_result,
+ );
+
+ if let Some((id, product)) = work_product {
+ work_products.insert(id, product);
+ }
+
+ module
+ })
+ .collect::<Vec<_>>()
+ });
+
+ tcx.sess.abort_if_errors();
+
+ let mut allocator_module = new_module(tcx, "allocator_shim".to_string());
+ let mut allocator_unwind_context = UnwindContext::new(tcx, allocator_module.isa(), true);
+ let created_alloc_shim =
+ crate::allocator::codegen(tcx, &mut allocator_module, &mut allocator_unwind_context);
+
+ let allocator_module = if created_alloc_shim {
+ let ModuleCodegenResult(module, work_product) = emit_module(
+ tcx,
+ "allocator_shim".to_string(),
+ ModuleKind::Allocator,
+ allocator_module,
+ None,
+ allocator_unwind_context,
- tcx.sess
- .fatal(&format!("error writing metadata object file: {}", err));
+ );
+ if let Some((id, product)) = work_product {
+ work_products.insert(id, product);
+ }
+ Some(module)
+ } else {
+ None
+ };
+
+ let metadata_module = if need_metadata_module {
+ let _timer = tcx.prof.generic_activity("codegen crate metadata");
+ let (metadata_cgu_name, tmp_file) = tcx.sess.time("write compressed metadata", || {
+ use rustc_middle::mir::mono::CodegenUnitNameBuilder;
+
+ let cgu_name_builder = &mut CodegenUnitNameBuilder::new(tcx);
+ let metadata_cgu_name = cgu_name_builder
+ .build_cgu_name(LOCAL_CRATE, &["crate"], Some("metadata"))
+ .as_str()
+ .to_string();
+
+ let tmp_file = tcx
+ .output_filenames(LOCAL_CRATE)
+ .temp_path(OutputType::Metadata, Some(&metadata_cgu_name));
+
+ let obj = crate::backend::with_object(tcx.sess, &metadata_cgu_name, |object| {
+ crate::metadata::write_metadata(tcx, object);
+ });
+
+ if let Err(err) = std::fs::write(&tmp_file, obj) {
- tcx.sess
- .fatal("asm! and global_asm! are not yet supported on macOS and Windows");
++ tcx.sess.fatal(&format!("error writing metadata object file: {}", err));
+ }
+
+ (metadata_cgu_name, tmp_file)
+ });
+
+ Some(CompiledModule {
+ name: metadata_cgu_name,
+ kind: ModuleKind::Metadata,
+ object: Some(tmp_file),
+ dwarf_object: None,
+ bytecode: None,
+ })
+ } else {
+ None
+ };
+
+ Box::new((
+ CodegenResults {
+ crate_name: tcx.crate_name(LOCAL_CRATE),
+ modules,
+ allocator_module,
+ metadata_module,
+ metadata,
+ windows_subsystem: None, // Windows is not yet supported
+ linker_info: LinkerInfo::new(tcx),
+ crate_info: CrateInfo::new(tcx),
+ },
+ work_products,
+ ))
+}
+
+fn codegen_global_asm(tcx: TyCtxt<'_>, cgu_name: &str, global_asm: &str) {
+ use std::io::Write;
+ use std::process::{Command, Stdio};
+
+ if global_asm.is_empty() {
+ return;
+ }
+
+ if cfg!(not(feature = "inline_asm"))
+ || tcx.sess.target.is_like_osx
+ || tcx.sess.target.is_like_windows
+ {
+ if global_asm.contains("__rust_probestack") {
+ return;
+ }
+
+ // FIXME fix linker error on macOS
+ if cfg!(not(feature = "inline_asm")) {
+ tcx.sess.fatal(
+ "asm! and global_asm! support is disabled while compiling rustc_codegen_cranelift",
+ );
+ } else {
- .map(|line| {
- if let Some(index) = line.find("//") {
- &line[0..index]
- } else {
- line
- }
- })
++ tcx.sess.fatal("asm! and global_asm! are not yet supported on macOS and Windows");
+ }
+ }
+
+ let assembler = crate::toolchain::get_toolchain_binary(tcx.sess, "as");
+ let linker = crate::toolchain::get_toolchain_binary(tcx.sess, "ld");
+
+ // Remove all LLVM style comments
+ let global_asm = global_asm
+ .lines()
- let output_object_file = tcx
- .output_filenames(LOCAL_CRATE)
- .temp_path(OutputType::Object, Some(cgu_name));
++ .map(|line| if let Some(index) = line.find("//") { &line[0..index] } else { line })
+ .collect::<Vec<_>>()
+ .join("\n");
+
- child
- .stdin
- .take()
- .unwrap()
- .write_all(global_asm.as_bytes())
- .unwrap();
++ let output_object_file =
++ tcx.output_filenames(LOCAL_CRATE).temp_path(OutputType::Object, Some(cgu_name));
+
+ // Assemble `global_asm`
+ let global_asm_object_file = add_file_stem_postfix(output_object_file.clone(), ".asm");
+ let mut child = Command::new(assembler)
+ .arg("-o")
+ .arg(&global_asm_object_file)
+ .stdin(Stdio::piped())
+ .spawn()
+ .expect("Failed to spawn `as`.");
- tcx.sess
- .fatal(&format!("Failed to assemble `{}`", global_asm));
++ child.stdin.take().unwrap().write_all(global_asm.as_bytes()).unwrap();
+ let status = child.wait().expect("Failed to wait for `as`.");
+ if !status.success() {
- if tcx
- .dep_graph
- .previous_work_product(work_product_id)
- .is_none()
- {
++ tcx.sess.fatal(&format!("Failed to assemble `{}`", global_asm));
+ }
+
+ // Link the global asm and main object file together
+ let main_object_file = add_file_stem_postfix(output_object_file.clone(), ".main");
+ std::fs::rename(&output_object_file, &main_object_file).unwrap();
+ let status = Command::new(linker)
+ .arg("-r") // Create a new object file
+ .arg("-o")
+ .arg(output_object_file)
+ .arg(&main_object_file)
+ .arg(&global_asm_object_file)
+ .status()
+ .unwrap();
+ if !status.success() {
+ tcx.sess.fatal(&format!(
+ "Failed to link `{}` and `{}` together",
+ main_object_file.display(),
+ global_asm_object_file.display(),
+ ));
+ }
+
+ std::fs::remove_file(global_asm_object_file).unwrap();
+ std::fs::remove_file(main_object_file).unwrap();
+}
+
+fn add_file_stem_postfix(mut path: PathBuf, postfix: &str) -> PathBuf {
+ let mut new_filename = path.file_stem().unwrap().to_owned();
+ new_filename.push(postfix);
+ if let Some(extension) = path.extension() {
+ new_filename.push(".");
+ new_filename.push(extension);
+ }
+ path.set_file_name(new_filename);
+ path
+}
+
+// Adapted from https://github.com/rust-lang/rust/blob/303d8aff6092709edd4dbd35b1c88e9aa40bf6d8/src/librustc_codegen_ssa/base.rs#L922-L953
+fn determine_cgu_reuse<'tcx>(tcx: TyCtxt<'tcx>, cgu: &CodegenUnit<'tcx>) -> CguReuse {
+ if !tcx.dep_graph.is_fully_enabled() {
+ return CguReuse::No;
+ }
+
+ let work_product_id = &cgu.work_product_id();
++ if tcx.dep_graph.previous_work_product(work_product_id).is_none() {
+ // We don't have anything cached for this CGU. This can happen
+ // if the CGU did not exist in the previous session.
+ return CguReuse::No;
+ }
+
+ // Try to mark the CGU as green. If it we can do so, it means that nothing
+ // affecting the LLVM module has changed and we can re-use a cached version.
+ // If we compile with any kind of LTO, this means we can re-use the bitcode
+ // of the Pre-LTO stage (possibly also the Post-LTO version but we'll only
+ // know that later). If we are not doing LTO, there is only one optimized
+ // version of each module, so we re-use that.
+ let dep_node = cgu.codegen_dep_node(tcx);
+ assert!(
+ !tcx.dep_graph.dep_node_exists(&dep_node),
+ "CompileCodegenUnit dep-node for CGU `{}` already exists before marking.",
+ cgu.name()
+ );
+
+ if tcx.try_mark_green(&dep_node) { CguReuse::PreLto } else { CguReuse::No }
+}
--- /dev/null
- use crate::prelude::*;
+//! The JIT driver uses [`cranelift_simplejit`] to JIT execute programs without writing any object
+//! files.
+
+use std::cell::RefCell;
+use std::ffi::CString;
+use std::os::raw::{c_char, c_int};
+
+use rustc_codegen_ssa::CrateInfo;
+use rustc_middle::mir::mono::MonoItem;
+
+use cranelift_jit::{JITBuilder, JITModule};
+
- pub(super) fn run_jit(tcx: TyCtxt<'_>, codegen_mode: CodegenMode) -> ! {
++use crate::{prelude::*, BackendConfig};
+use crate::{CodegenCx, CodegenMode};
+
+thread_local! {
++ pub static BACKEND_CONFIG: RefCell<Option<BackendConfig>> = RefCell::new(None);
+ pub static CURRENT_MODULE: RefCell<Option<JITModule>> = RefCell::new(None);
+}
+
- #[cfg(unix)]
- unsafe {
- // When not using our custom driver rustc will open us without the RTLD_GLOBAL flag, so
- // __cg_clif_global_atomic_mutex will not be exported. We fix this by opening ourself again
- // as global.
- // FIXME remove once atomic_shim is gone
-
- let mut dl_info: libc::Dl_info = std::mem::zeroed();
- assert_ne!(
- libc::dladdr(run_jit as *const libc::c_void, &mut dl_info),
- 0
- );
- assert_ne!(
- libc::dlopen(dl_info.dli_fname, libc::RTLD_NOW | libc::RTLD_GLOBAL),
- std::ptr::null_mut(),
- );
- }
-
++pub(super) fn run_jit(tcx: TyCtxt<'_>, backend_config: BackendConfig) -> ! {
+ if !tcx.sess.opts.output_types.should_codegen() {
+ tcx.sess.fatal("JIT mode doesn't work with `cargo check`.");
+ }
+
- let mut jit_builder = JITBuilder::with_isa(
- crate::build_isa(tcx.sess),
- cranelift_module::default_libcall_names(),
- );
- jit_builder.hotswap(matches!(codegen_mode, CodegenMode::JitLazy));
+ let imported_symbols = load_imported_symbols_for_jit(tcx);
+
- returns: vec![AbiParam::new(
- jit_module.target_config().pointer_type(), /*isize*/
- )],
++ let mut jit_builder =
++ JITBuilder::with_isa(crate::build_isa(tcx.sess), cranelift_module::default_libcall_names());
++ jit_builder.hotswap(matches!(backend_config.codegen_mode, CodegenMode::JitLazy));
+ jit_builder.symbols(imported_symbols);
+ let mut jit_module = JITModule::new(jit_builder);
+ assert_eq!(pointer_ty(tcx), jit_module.target_config().pointer_type());
+
+ let sig = Signature {
+ params: vec![
+ AbiParam::new(jit_module.target_config().pointer_type()),
+ AbiParam::new(jit_module.target_config().pointer_type()),
+ ],
- let main_func_id = jit_module
- .declare_function("main", Linkage::Import, &sig)
- .unwrap();
++ returns: vec![AbiParam::new(jit_module.target_config().pointer_type() /*isize*/)],
+ call_conv: CallConv::triple_default(&crate::target_triple(tcx.sess)),
+ };
- let mut cx = crate::CodegenCx::new(tcx, jit_module, false, false);
++ let main_func_id = jit_module.declare_function("main", Linkage::Import, &sig).unwrap();
+
+ let (_, cgus) = tcx.collect_and_partition_mono_items(LOCAL_CRATE);
+ let mono_items = cgus
+ .iter()
+ .map(|cgu| cgu.items_in_deterministic_order(tcx).into_iter())
+ .flatten()
+ .collect::<FxHashMap<_, (_, _)>>()
+ .into_iter()
+ .collect::<Vec<(_, (_, _))>>();
+
- MonoItem::Fn(inst) => match codegen_mode {
++ let mut cx = crate::CodegenCx::new(tcx, backend_config, &mut jit_module, false);
+
+ super::time(tcx, "codegen mono items", || {
+ super::predefine_mono_items(&mut cx, &mono_items);
+ for (mono_item, (linkage, visibility)) in mono_items {
+ let linkage = crate::linkage::get_clif_linkage(mono_item, linkage, visibility);
+ match mono_item {
- cx.tcx.sess.time("codegen fn", || {
- crate::base::codegen_fn(&mut cx, inst, linkage)
- });
++ MonoItem::Fn(inst) => match backend_config.codegen_mode {
+ CodegenMode::Aot => unreachable!(),
+ CodegenMode::Jit => {
- let (mut jit_module, global_asm, _debug, mut unwind_context) =
++ cx.tcx
++ .sess
++ .time("codegen fn", || crate::base::codegen_fn(&mut cx, inst, linkage));
+ }
+ CodegenMode::JitLazy => codegen_shim(&mut cx, inst),
+ },
+ MonoItem::Static(def_id) => {
+ crate::constant::codegen_static(&mut cx.constants_cx, def_id);
+ }
+ MonoItem::GlobalAsm(item_id) => {
+ let item = cx.tcx.hir().item(item_id);
+ tcx.sess.span_fatal(item.span, "Global asm is not supported in JIT mode");
+ }
+ }
+ }
+ });
+
- crate::main_shim::maybe_create_entry_wrapper(tcx, &mut jit_module, &mut unwind_context, true);
++ let (global_asm, _debug, mut unwind_context) =
+ tcx.sess.time("finalize CodegenCx", || cx.finalize());
+ jit_module.finalize_definitions();
+
+ if !global_asm.is_empty() {
+ tcx.sess.fatal("Inline asm is not supported in JIT mode");
+ }
+
- println!("Rustc codegen cranelift will JIT run the executable, because -Cllvm-args=mode=jit was passed");
++ crate::main_shim::maybe_create_entry_wrapper(tcx, &mut jit_module, &mut unwind_context);
+ crate::allocator::codegen(tcx, &mut jit_module, &mut unwind_context);
+
+ tcx.sess.abort_if_errors();
+
+ jit_module.finalize_definitions();
+
+ let _unwind_register_guard = unsafe { unwind_context.register_jit(&jit_module) };
+
+ let finalized_main: *const u8 = jit_module.get_finalized_function(main_func_id);
+
- let mut cx = crate::CodegenCx::new(tcx, jit_module, false, false);
++ println!(
++ "Rustc codegen cranelift will JIT run the executable, because -Cllvm-args=mode=jit was passed"
++ );
+
+ let f: extern "C" fn(c_int, *const *const c_char) -> c_int =
+ unsafe { ::std::mem::transmute(finalized_main) };
+
+ let args = ::std::env::var("CG_CLIF_JIT_ARGS").unwrap_or_else(|_| String::new());
+ let args = std::iter::once(&*tcx.crate_name(LOCAL_CRATE).as_str().to_string())
+ .chain(args.split(' '))
+ .map(|arg| CString::new(arg).unwrap())
+ .collect::<Vec<_>>();
+ let mut argv = args.iter().map(|arg| arg.as_ptr()).collect::<Vec<_>>();
+
+ // Push a null pointer as a terminating argument. This is required by POSIX and
+ // useful as some dynamic linkers use it as a marker to jump over.
+ argv.push(std::ptr::null());
+
++ BACKEND_CONFIG.with(|tls_backend_config| {
++ assert!(tls_backend_config.borrow_mut().replace(backend_config).is_none())
++ });
+ CURRENT_MODULE
+ .with(|current_module| assert!(current_module.borrow_mut().replace(jit_module).is_none()));
+
+ let ret = f(args.len() as c_int, argv.as_ptr());
+
+ std::process::exit(ret);
+}
+
+#[no_mangle]
+extern "C" fn __clif_jit_fn(instance_ptr: *const Instance<'static>) -> *const u8 {
+ rustc_middle::ty::tls::with(|tcx| {
+ // lift is used to ensure the correct lifetime for instance.
+ let instance = tcx.lift(unsafe { *instance_ptr }).unwrap();
+
+ CURRENT_MODULE.with(|jit_module| {
+ let mut jit_module = jit_module.borrow_mut();
+ let jit_module = jit_module.as_mut().unwrap();
- let sig = crate::abi::get_function_sig(tcx, cx.module.isa().triple(), instance);
- let func_id = cx
- .module
- .declare_function(&name, Linkage::Export, &sig)
- .unwrap();
- cx.module.prepare_for_function_redefine(func_id).unwrap();
-
- tcx.sess.time("codegen fn", || {
- crate::base::codegen_fn(&mut cx, instance, Linkage::Export)
- });
-
- let (jit_module, global_asm, _debug_context, unwind_context) = cx.finalize();
++ let backend_config =
++ BACKEND_CONFIG.with(|backend_config| backend_config.borrow().clone().unwrap());
+
+ let name = tcx.symbol_name(instance).name.to_string();
- let mut err = tcx
- .sess
- .struct_err(&format!("Can't load static lib {}", name.as_str()));
++ let sig = crate::abi::get_function_sig(tcx, jit_module.isa().triple(), instance);
++ let func_id = jit_module.declare_function(&name, Linkage::Export, &sig).unwrap();
++ jit_module.prepare_for_function_redefine(func_id).unwrap();
++
++ let mut cx = crate::CodegenCx::new(tcx, backend_config, jit_module, false);
++ tcx.sess
++ .time("codegen fn", || crate::base::codegen_fn(&mut cx, instance, Linkage::Export));
++
++ let (global_asm, _debug_context, unwind_context) = cx.finalize();
+ assert!(global_asm.is_empty());
+ jit_module.finalize_definitions();
+ std::mem::forget(unsafe { unwind_context.register_jit(&jit_module) });
+ jit_module.get_finalized_function(func_id)
+ })
+ })
+}
+
+fn load_imported_symbols_for_jit(tcx: TyCtxt<'_>) -> Vec<(String, *const u8)> {
+ use rustc_middle::middle::dependency_format::Linkage;
+
+ let mut dylib_paths = Vec::new();
+
+ let crate_info = CrateInfo::new(tcx);
+ let formats = tcx.dependency_formats(LOCAL_CRATE);
+ let data = &formats
+ .iter()
+ .find(|(crate_type, _data)| *crate_type == rustc_session::config::CrateType::Executable)
+ .unwrap()
+ .1;
+ for &(cnum, _) in &crate_info.used_crates_dynamic {
+ let src = &crate_info.used_crate_source[&cnum];
+ match data[cnum.as_usize() - 1] {
+ Linkage::NotLinked | Linkage::IncludedFromDylib => {}
+ Linkage::Static => {
+ let name = tcx.crate_name(cnum);
- pub(super) fn codegen_shim<'tcx>(cx: &mut CodegenCx<'tcx, impl Module>, inst: Instance<'tcx>) {
++ let mut err =
++ tcx.sess.struct_err(&format!("Can't load static lib {}", name.as_str()));
+ err.note("rustc_codegen_cranelift can only load dylibs in JIT mode.");
+ err.emit();
+ }
+ Linkage::Dynamic => {
+ dylib_paths.push(src.dylib.as_ref().unwrap().0.clone());
+ }
+ }
+ }
+
+ let mut imported_symbols = Vec::new();
+ for path in dylib_paths {
+ use object::{Object, ObjectSymbol};
+ let lib = libloading::Library::new(&path).unwrap();
+ let obj = std::fs::read(path).unwrap();
+ let obj = object::File::parse(&obj).unwrap();
+ imported_symbols.extend(obj.dynamic_symbols().filter_map(|symbol| {
+ let name = symbol.name().unwrap().to_string();
+ if name.is_empty() || !symbol.is_global() || symbol.is_undefined() {
+ return None;
+ }
++ if name.starts_with("rust_metadata_") {
++ // The metadata is part of a section that is not loaded by the dynamic linker in
++ // case of cg_llvm.
++ return None;
++ }
+ let dlsym_name = if cfg!(target_os = "macos") {
+ // On macOS `dlsym` expects the name without leading `_`.
+ assert!(name.starts_with('_'), "{:?}", name);
+ &name[1..]
+ } else {
+ &name
+ };
+ let symbol: libloading::Symbol<'_, *const u8> =
+ unsafe { lib.get(dlsym_name.as_bytes()) }.unwrap();
+ Some((name, *symbol))
+ }));
+ std::mem::forget(lib)
+ }
+
+ tcx.sess.abort_if_errors();
+
+ imported_symbols
+}
+
- let func_id = cx
- .module
- .declare_function(&name, Linkage::Export, &sig)
- .unwrap();
++pub(super) fn codegen_shim<'tcx>(cx: &mut CodegenCx<'_, 'tcx>, inst: Instance<'tcx>) {
+ let tcx = cx.tcx;
+
+ let pointer_type = cx.module.target_config().pointer_type();
+
+ let name = tcx.symbol_name(inst).name.to_string();
+ let sig = crate::abi::get_function_sig(tcx, cx.module.isa().triple(), inst);
- let jit_fn = cx
- .module
- .declare_func_in_func(jit_fn, trampoline_builder.func);
++ let func_id = cx.module.declare_function(&name, Linkage::Export, &sig).unwrap();
+
+ let instance_ptr = Box::into_raw(Box::new(inst));
+
+ let jit_fn = cx
+ .module
+ .declare_function(
+ "__clif_jit_fn",
+ Linkage::Import,
+ &Signature {
+ call_conv: cx.module.target_config().default_call_conv,
+ params: vec![AbiParam::new(pointer_type)],
+ returns: vec![AbiParam::new(pointer_type)],
+ },
+ )
+ .unwrap();
+
+ let mut trampoline = Function::with_name_signature(ExternalName::default(), sig.clone());
+ let mut builder_ctx = FunctionBuilderContext::new();
+ let mut trampoline_builder = FunctionBuilder::new(&mut trampoline, &mut builder_ctx);
+
- let fn_args = trampoline_builder
- .func
- .dfg
- .block_params(entry_block)
- .to_vec();
++ let jit_fn = cx.module.declare_func_in_func(jit_fn, trampoline_builder.func);
+ let sig_ref = trampoline_builder.func.import_signature(sig);
+
+ let entry_block = trampoline_builder.create_block();
+ trampoline_builder.append_block_params_for_function_params(entry_block);
- let instance_ptr = trampoline_builder
- .ins()
- .iconst(pointer_type, instance_ptr as u64 as i64);
++ let fn_args = trampoline_builder.func.dfg.block_params(entry_block).to_vec();
+
+ trampoline_builder.switch_to_block(entry_block);
- let call_inst = trampoline_builder
- .ins()
- .call_indirect(sig_ref, jitted_fn, &fn_args);
++ let instance_ptr = trampoline_builder.ins().iconst(pointer_type, instance_ptr as u64 as i64);
+ let jitted_fn = trampoline_builder.ins().call(jit_fn, &[instance_ptr]);
+ let jitted_fn = trampoline_builder.func.dfg.inst_results(jitted_fn)[0];
++ let call_inst = trampoline_builder.ins().call_indirect(sig_ref, jitted_fn, &fn_args);
+ let ret_vals = trampoline_builder.func.dfg.inst_results(call_inst).to_vec();
+ trampoline_builder.ins().return_(&ret_vals);
+
+ cx.module
+ .define_function(
+ func_id,
+ &mut Context::for_function(trampoline),
+ &mut cranelift_codegen::binemit::NullTrapSink {},
+ )
+ .unwrap();
+}
--- /dev/null
- config: crate::BackendConfig,
+//! Drivers are responsible for calling [`codegen_mono_item`] and performing any further actions
+//! like JIT executing or writing object files.
+
+use std::any::Any;
+
+use rustc_middle::middle::cstore::EncodedMetadata;
+use rustc_middle::mir::mono::{Linkage as RLinkage, MonoItem, Visibility};
+
+use crate::prelude::*;
+use crate::CodegenMode;
+
+mod aot;
+#[cfg(feature = "jit")]
+mod jit;
+
+pub(crate) fn codegen_crate(
+ tcx: TyCtxt<'_>,
+ metadata: EncodedMetadata,
+ need_metadata_module: bool,
- match config.codegen_mode {
- CodegenMode::Aot => aot::run_aot(tcx, metadata, need_metadata_module),
++ backend_config: crate::BackendConfig,
+) -> Box<dyn Any> {
+ tcx.sess.abort_if_errors();
+
- let is_executable = tcx
- .sess
- .crate_types()
- .contains(&rustc_session::config::CrateType::Executable);
++ match backend_config.codegen_mode {
++ CodegenMode::Aot => aot::run_aot(tcx, backend_config, metadata, need_metadata_module),
+ CodegenMode::Jit | CodegenMode::JitLazy => {
- let _: ! = jit::run_jit(tcx, config.codegen_mode);
++ let is_executable =
++ tcx.sess.crate_types().contains(&rustc_session::config::CrateType::Executable);
+ if !is_executable {
+ tcx.sess.fatal("can't jit non-executable crate");
+ }
+
+ #[cfg(feature = "jit")]
- tcx.sess
- .fatal("jit support was disabled when compiling rustc_codegen_cranelift");
++ let _: ! = jit::run_jit(tcx, backend_config);
+
+ #[cfg(not(feature = "jit"))]
- cx: &mut crate::CodegenCx<'tcx, impl Module>,
++ tcx.sess.fatal("jit support was disabled when compiling rustc_codegen_cranelift");
+ }
+ }
+}
+
+fn predefine_mono_items<'tcx>(
- if std::env::var("CG_CLIF_DISPLAY_CG_TIME")
- .as_ref()
- .map(|val| &**val)
- == Ok("1")
- {
++ cx: &mut crate::CodegenCx<'_, 'tcx>,
+ mono_items: &[(MonoItem<'tcx>, (RLinkage, Visibility))],
+) {
+ cx.tcx.sess.time("predefine functions", || {
+ for &(mono_item, (linkage, visibility)) in mono_items {
+ match mono_item {
+ MonoItem::Fn(instance) => {
+ let name = cx.tcx.symbol_name(instance).name.to_string();
+ let _inst_guard = crate::PrintOnPanic(|| format!("{:?} {}", instance, name));
+ let sig = get_function_sig(cx.tcx, cx.module.isa().triple(), instance);
+ let linkage = crate::linkage::get_clif_linkage(mono_item, linkage, visibility);
+ cx.module.declare_function(&name, linkage, &sig).unwrap();
+ }
+ MonoItem::Static(_) | MonoItem::GlobalAsm(_) => {}
+ }
+ }
+ });
+}
+
+fn time<R>(tcx: TyCtxt<'_>, name: &'static str, f: impl FnOnce() -> R) -> R {
- println!(
- "[{:<30}: {}] end time: {:?}",
- tcx.crate_name(LOCAL_CRATE),
- name,
- after - before
- );
++ if std::env::var("CG_CLIF_DISPLAY_CG_TIME").as_ref().map(|val| &**val) == Ok("1") {
+ println!("[{:<30}: {}] start", tcx.crate_name(LOCAL_CRATE), name);
+ let before = std::time::Instant::now();
+ let res = tcx.sess.time(name, f);
+ let after = std::time::Instant::now();
++ println!("[{:<30}: {}] end time: {:?}", tcx.crate_name(LOCAL_CRATE), name, after - before);
+ res
+ } else {
+ tcx.sess.time(name, f)
+ }
+}
--- /dev/null
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+//! Codegen of [`asm!`] invocations.
+
+use crate::prelude::*;
+
+use std::fmt::Write;
+
+use rustc_ast::ast::{InlineAsmOptions, InlineAsmTemplatePiece};
+use rustc_middle::mir::InlineAsmOperand;
+use rustc_target::asm::*;
+
+pub(crate) fn codegen_inline_asm<'tcx>(
- InlineAsmOperand::Out {
- reg,
- late: _,
- place,
- } => {
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ _span: Span,
+ template: &[InlineAsmTemplatePiece],
+ operands: &[InlineAsmOperand<'tcx>],
+ options: InlineAsmOptions,
+) {
+ // FIXME add .eh_frame unwind info directives
+
+ if template.is_empty() {
+ // Black box
+ return;
+ }
+
+ let mut slot_size = Size::from_bytes(0);
+ let mut clobbered_regs = Vec::new();
+ let mut inputs = Vec::new();
+ let mut outputs = Vec::new();
+
+ let mut new_slot = |reg_class: InlineAsmRegClass| {
+ let reg_size = reg_class
+ .supported_types(InlineAsmArch::X86_64)
+ .iter()
+ .map(|(ty, _)| ty.size())
+ .max()
+ .unwrap();
+ let align = rustc_target::abi::Align::from_bytes(reg_size.bytes()).unwrap();
+ slot_size = slot_size.align_to(align);
+ let offset = slot_size;
+ slot_size += reg_size;
+ offset
+ };
+
+ // FIXME overlap input and output slots to save stack space
+ for operand in operands {
+ match *operand {
+ InlineAsmOperand::In { reg, ref value } => {
+ let reg = expect_reg(reg);
+ clobbered_regs.push((reg, new_slot(reg.reg_class())));
+ inputs.push((
+ reg,
+ new_slot(reg.reg_class()),
+ crate::base::codegen_operand(fx, value).load_scalar(fx),
+ ));
+ }
- InlineAsmOperand::InOut {
- reg,
- late: _,
- ref in_value,
- out_place,
- } => {
++ InlineAsmOperand::Out { reg, late: _, place } => {
+ let reg = expect_reg(reg);
+ clobbered_regs.push((reg, new_slot(reg.reg_class())));
+ if let Some(place) = place {
+ outputs.push((
+ reg,
+ new_slot(reg.reg_class()),
+ crate::base::codegen_place(fx, place),
+ ));
+ }
+ }
- let asm_name = format!(
- "{}__inline_asm_{}",
- fx.tcx.symbol_name(fx.instance).name,
- inline_asm_index
- );
++ InlineAsmOperand::InOut { reg, late: _, ref in_value, out_place } => {
+ let reg = expect_reg(reg);
+ clobbered_regs.push((reg, new_slot(reg.reg_class())));
+ inputs.push((
+ reg,
+ new_slot(reg.reg_class()),
+ crate::base::codegen_operand(fx, in_value).load_scalar(fx),
+ ));
+ if let Some(out_place) = out_place {
+ outputs.push((
+ reg,
+ new_slot(reg.reg_class()),
+ crate::base::codegen_place(fx, out_place),
+ ));
+ }
+ }
+ InlineAsmOperand::Const { value: _ } => todo!(),
+ InlineAsmOperand::SymFn { value: _ } => todo!(),
+ InlineAsmOperand::SymStatic { def_id: _ } => todo!(),
+ }
+ }
+
+ let inline_asm_index = fx.inline_asm_index;
+ fx.inline_asm_index += 1;
- writeln!(
- generated_asm,
- ".section .text.{},\"ax\",@progbits",
- asm_name
- )
- .unwrap();
++ let asm_name =
++ format!("{}__inline_asm_{}", fx.tcx.symbol_name(fx.instance).name, inline_asm_index);
+
+ let generated_asm = generate_asm_wrapper(
+ &asm_name,
+ InlineAsmArch::X86_64,
+ options,
+ template,
+ clobbered_regs,
+ &inputs,
+ &outputs,
+ );
+ fx.cx.global_asm.push_str(&generated_asm);
+
+ call_inline_asm(fx, &asm_name, slot_size, inputs, outputs);
+}
+
+fn generate_asm_wrapper(
+ asm_name: &str,
+ arch: InlineAsmArch,
+ options: InlineAsmOptions,
+ template: &[InlineAsmTemplatePiece],
+ clobbered_regs: Vec<(InlineAsmReg, Size)>,
+ inputs: &[(InlineAsmReg, Size, Value)],
+ outputs: &[(InlineAsmReg, Size, CPlace<'_>)],
+) -> String {
+ let mut generated_asm = String::new();
+ writeln!(generated_asm, ".globl {}", asm_name).unwrap();
+ writeln!(generated_asm, ".type {},@function", asm_name).unwrap();
- InlineAsmTemplatePiece::Placeholder {
- operand_idx: _,
- modifier: _,
- span: _,
- } => todo!(),
++ writeln!(generated_asm, ".section .text.{},\"ax\",@progbits", asm_name).unwrap();
+ writeln!(generated_asm, "{}:", asm_name).unwrap();
+
+ generated_asm.push_str(".intel_syntax noprefix\n");
+ generated_asm.push_str(" push rbp\n");
+ generated_asm.push_str(" mov rbp,rdi\n");
+
+ // Save clobbered registers
+ if !options.contains(InlineAsmOptions::NORETURN) {
+ // FIXME skip registers saved by the calling convention
+ for &(reg, offset) in &clobbered_regs {
+ save_register(&mut generated_asm, arch, reg, offset);
+ }
+ }
+
+ // Write input registers
+ for &(reg, offset, _value) in inputs {
+ restore_register(&mut generated_asm, arch, reg, offset);
+ }
+
+ if options.contains(InlineAsmOptions::ATT_SYNTAX) {
+ generated_asm.push_str(".att_syntax\n");
+ }
+
+ // The actual inline asm
+ for piece in template {
+ match piece {
+ InlineAsmTemplatePiece::String(s) => {
+ generated_asm.push_str(s);
+ }
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
++ InlineAsmTemplatePiece::Placeholder { operand_idx: _, modifier: _, span: _ } => todo!(),
+ }
+ }
+ generated_asm.push('\n');
+
+ if options.contains(InlineAsmOptions::ATT_SYNTAX) {
+ generated_asm.push_str(".intel_syntax noprefix\n");
+ }
+
+ if !options.contains(InlineAsmOptions::NORETURN) {
+ // Read output registers
+ for &(reg, offset, _place) in outputs {
+ save_register(&mut generated_asm, arch, reg, offset);
+ }
+
+ // Restore clobbered registers
+ for &(reg, offset) in clobbered_regs.iter().rev() {
+ restore_register(&mut generated_asm, arch, reg, offset);
+ }
+
+ generated_asm.push_str(" pop rbp\n");
+ generated_asm.push_str(" ret\n");
+ } else {
+ generated_asm.push_str(" ud2\n");
+ }
+
+ generated_asm.push_str(".att_syntax\n");
+ writeln!(generated_asm, ".size {name}, .-{name}", name = asm_name).unwrap();
+ generated_asm.push_str(".text\n");
+ generated_asm.push_str("\n\n");
+
+ generated_asm
+}
+
+fn call_inline_asm<'tcx>(
- let inline_asm_func = fx
- .cx
- .module
- .declare_func_in_func(inline_asm_func, &mut fx.bcx.func);
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ asm_name: &str,
+ slot_size: Size,
+ inputs: Vec<(InlineAsmReg, Size, Value)>,
+ outputs: Vec<(InlineAsmReg, Size, CPlace<'tcx>)>,
+) {
+ let stack_slot = fx.bcx.func.create_stack_slot(StackSlotData {
+ kind: StackSlotKind::ExplicitSlot,
+ offset: None,
+ size: u32::try_from(slot_size.bytes()).unwrap(),
+ });
+ #[cfg(debug_assertions)]
+ fx.add_comment(stack_slot, "inline asm scratch slot");
+
+ let inline_asm_func = fx
+ .cx
+ .module
+ .declare_function(
+ asm_name,
+ Linkage::Import,
+ &Signature {
+ call_conv: CallConv::SystemV,
+ params: vec![AbiParam::new(fx.pointer_type)],
+ returns: vec![],
+ },
+ )
+ .unwrap();
- fx.bcx
- .ins()
- .stack_store(value, stack_slot, i32::try_from(offset.bytes()).unwrap());
++ let inline_asm_func = fx.cx.module.declare_func_in_func(inline_asm_func, &mut fx.bcx.func);
+ #[cfg(debug_assertions)]
+ fx.add_comment(inline_asm_func, asm_name);
+
+ for (_reg, offset, value) in inputs {
- let value = fx
- .bcx
- .ins()
- .stack_load(ty, stack_slot, i32::try_from(offset.bytes()).unwrap());
++ fx.bcx.ins().stack_store(value, stack_slot, i32::try_from(offset.bytes()).unwrap());
+ }
+
+ let stack_slot_addr = fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, 0);
+ fx.bcx.ins().call(inline_asm_func, &[stack_slot_addr]);
+
+ for (_reg, offset, place) in outputs {
+ let ty = fx.clif_type(place.layout().ty).unwrap();
- reg.emit(generated_asm, InlineAsmArch::X86_64, None)
- .unwrap();
++ let value = fx.bcx.ins().stack_load(ty, stack_slot, i32::try_from(offset.bytes()).unwrap());
+ place.write_cvalue(fx, CValue::by_val(value, place.layout()));
+ }
+}
+
+fn expect_reg(reg_or_class: InlineAsmRegOrRegClass) -> InlineAsmReg {
+ match reg_or_class {
+ InlineAsmRegOrRegClass::Reg(reg) => reg,
+ InlineAsmRegOrRegClass::RegClass(class) => unimplemented!("{:?}", class),
+ }
+}
+
+fn save_register(generated_asm: &mut String, arch: InlineAsmArch, reg: InlineAsmReg, offset: Size) {
+ match arch {
+ InlineAsmArch::X86_64 => {
+ write!(generated_asm, " mov [rbp+0x{:x}], ", offset.bytes()).unwrap();
- reg.emit(generated_asm, InlineAsmArch::X86_64, None)
- .unwrap();
++ reg.emit(generated_asm, InlineAsmArch::X86_64, None).unwrap();
+ generated_asm.push('\n');
+ }
+ _ => unimplemented!("save_register for {:?}", arch),
+ }
+}
+
+fn restore_register(
+ generated_asm: &mut String,
+ arch: InlineAsmArch,
+ reg: InlineAsmReg,
+ offset: Size,
+) {
+ match arch {
+ InlineAsmArch::X86_64 => {
+ generated_asm.push_str(" mov ");
++ reg.emit(generated_asm, InlineAsmArch::X86_64, None).unwrap();
+ writeln!(generated_asm, ", [rbp+0x{:x}]", offset.bytes()).unwrap();
+ }
+ _ => unimplemented!("restore_register for {:?}", arch),
+ }
+}
--- /dev/null
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+//! Emulation of a subset of the cpuid x86 instruction.
+
+use crate::prelude::*;
+
+/// Emulates a subset of the cpuid x86 instruction.
+///
+/// This emulates an intel cpu with sse and sse2 support, but which doesn't support anything else.
+pub(crate) fn codegen_cpuid_call<'tcx>(
- let vend0 = fx
- .bcx
- .ins()
- .iconst(types::I32, i64::from(u32::from_le_bytes(*b"Genu")));
- let vend2 = fx
- .bcx
- .ins()
- .iconst(types::I32, i64::from(u32::from_le_bytes(*b"ineI")));
- let vend1 = fx
- .bcx
- .ins()
- .iconst(types::I32, i64::from(u32::from_le_bytes(*b"ntel")));
- fx.bcx
- .ins()
- .jump(dest, &[max_basic_leaf, vend0, vend1, vend2]);
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ leaf: Value,
+ _subleaf: Value,
+) -> (Value, Value, Value, Value) {
+ let leaf_0 = fx.bcx.create_block();
+ let leaf_1 = fx.bcx.create_block();
+ let leaf_8000_0000 = fx.bcx.create_block();
+ let leaf_8000_0001 = fx.bcx.create_block();
+ let unsupported_leaf = fx.bcx.create_block();
+
+ let dest = fx.bcx.create_block();
+ let eax = fx.bcx.append_block_param(dest, types::I32);
+ let ebx = fx.bcx.append_block_param(dest, types::I32);
+ let ecx = fx.bcx.append_block_param(dest, types::I32);
+ let edx = fx.bcx.append_block_param(dest, types::I32);
+
+ let mut switch = cranelift_frontend::Switch::new();
+ switch.set_entry(0, leaf_0);
+ switch.set_entry(1, leaf_1);
+ switch.set_entry(0x8000_0000, leaf_8000_0000);
+ switch.set_entry(0x8000_0001, leaf_8000_0001);
+ switch.emit(&mut fx.bcx, leaf, unsupported_leaf);
+
+ fx.bcx.switch_to_block(leaf_0);
+ let max_basic_leaf = fx.bcx.ins().iconst(types::I32, 1);
- let edx_features = fx
- .bcx
- .ins()
- .iconst(types::I32, 1 << 25 /* sse */ | 1 << 26 /* sse2 */);
- fx.bcx.ins().jump(
- dest,
- &[
- cpu_signature,
- additional_information,
- ecx_features,
- edx_features,
- ],
- );
++ let vend0 = fx.bcx.ins().iconst(types::I32, i64::from(u32::from_le_bytes(*b"Genu")));
++ let vend2 = fx.bcx.ins().iconst(types::I32, i64::from(u32::from_le_bytes(*b"ineI")));
++ let vend1 = fx.bcx.ins().iconst(types::I32, i64::from(u32::from_le_bytes(*b"ntel")));
++ fx.bcx.ins().jump(dest, &[max_basic_leaf, vend0, vend1, vend2]);
+
+ fx.bcx.switch_to_block(leaf_1);
+ let cpu_signature = fx.bcx.ins().iconst(types::I32, 0);
+ let additional_information = fx.bcx.ins().iconst(types::I32, 0);
+ let ecx_features = fx.bcx.ins().iconst(types::I32, 0);
- fx.bcx
- .ins()
- .jump(dest, &[extended_max_basic_leaf, zero, zero, zero]);
++ let edx_features = fx.bcx.ins().iconst(types::I32, 1 << 25 /* sse */ | 1 << 26 /* sse2 */);
++ fx.bcx.ins().jump(dest, &[cpu_signature, additional_information, ecx_features, edx_features]);
+
+ fx.bcx.switch_to_block(leaf_8000_0000);
+ let extended_max_basic_leaf = fx.bcx.ins().iconst(types::I32, 0);
+ let zero = fx.bcx.ins().iconst(types::I32, 0);
- fx.bcx
- .ins()
- .jump(dest, &[zero, zero, proc_info_ecx, proc_info_edx]);
++ fx.bcx.ins().jump(dest, &[extended_max_basic_leaf, zero, zero, zero]);
+
+ fx.bcx.switch_to_block(leaf_8000_0001);
+ let zero = fx.bcx.ins().iconst(types::I32, 0);
+ let proc_info_ecx = fx.bcx.ins().iconst(types::I32, 0);
+ let proc_info_edx = fx.bcx.ins().iconst(types::I32, 0);
++ fx.bcx.ins().jump(dest, &[zero, zero, proc_info_ecx, proc_info_edx]);
+
+ fx.bcx.switch_to_block(unsupported_leaf);
+ crate::trap::trap_unreachable(
+ fx,
+ "__cpuid_count arch intrinsic doesn't yet support specified leaf",
+ );
+
+ fx.bcx.switch_to_block(dest);
+ fx.bcx.ins().nop();
+
+ (eax, ebx, ecx, edx)
+}
--- /dev/null
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+//! Emulate LLVM intrinsics
+
+use crate::intrinsics::*;
+use crate::prelude::*;
+
+use rustc_middle::ty::subst::SubstsRef;
+
+pub(crate) fn codegen_llvm_intrinsic_call<'tcx>(
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ intrinsic: &str,
+ substs: SubstsRef<'tcx>,
+ args: &[mir::Operand<'tcx>],
+ destination: Option<(CPlace<'tcx>, BasicBlock)>,
+) {
+ let ret = destination.unwrap().0;
+
+ intrinsic_match! {
+ fx, intrinsic, substs, args,
+ _ => {
+ fx.tcx.sess.warn(&format!("unsupported llvm intrinsic {}; replacing with trap", intrinsic));
+ crate::trap::trap_unimplemented(fx, intrinsic);
+ };
+
+ // Used by `_mm_movemask_epi8` and `_mm256_movemask_epi8`
+ llvm.x86.sse2.pmovmskb.128 | llvm.x86.avx2.pmovmskb | llvm.x86.sse2.movmsk.pd, (c a) {
+ let (lane_count, lane_ty) = a.layout().ty.simd_size_and_type(fx.tcx);
+ let lane_ty = fx.clif_type(lane_ty).unwrap();
+ assert!(lane_count <= 32);
+
+ let mut res = fx.bcx.ins().iconst(types::I32, 0);
+
+ for lane in (0..lane_count).rev() {
+ let a_lane = a.value_field(fx, mir::Field::new(lane.try_into().unwrap())).load_scalar(fx);
+
+ // cast float to int
+ let a_lane = match lane_ty {
+ types::F32 => fx.bcx.ins().bitcast(types::I32, a_lane),
+ types::F64 => fx.bcx.ins().bitcast(types::I64, a_lane),
+ _ => a_lane,
+ };
+
+ // extract sign bit of an int
+ let a_lane_sign = fx.bcx.ins().ushr_imm(a_lane, i64::from(lane_ty.bits() - 1));
+
+ // shift sign bit into result
+ let a_lane_sign = clif_intcast(fx, a_lane_sign, types::I32, false);
+ res = fx.bcx.ins().ishl_imm(res, 1);
+ res = fx.bcx.ins().bor(res, a_lane_sign);
+ }
+
+ let res = CValue::by_val(res, fx.layout_of(fx.tcx.types.i32));
+ ret.write_cvalue(fx, res);
+ };
+ llvm.x86.sse2.cmp.ps | llvm.x86.sse2.cmp.pd, (c x, c y, o kind) {
+ let kind_const = crate::constant::mir_operand_get_const_val(fx, kind).expect("llvm.x86.sse2.cmp.* kind not const");
+ let flt_cc = match kind_const.val.try_to_bits(Size::from_bytes(1)).unwrap_or_else(|| panic!("kind not scalar: {:?}", kind_const)) {
+ 0 => FloatCC::Equal,
+ 1 => FloatCC::LessThan,
+ 2 => FloatCC::LessThanOrEqual,
+ 7 => {
+ unimplemented!("Compares corresponding elements in `a` and `b` to see if neither is `NaN`.");
+ }
+ 3 => {
+ unimplemented!("Compares corresponding elements in `a` and `b` to see if either is `NaN`.");
+ }
+ 4 => FloatCC::NotEqual,
+ 5 => {
+ unimplemented!("not less than");
+ }
+ 6 => {
+ unimplemented!("not less than or equal");
+ }
+ kind => unreachable!("kind {:?}", kind),
+ };
+
+ simd_pair_for_each_lane(fx, x, y, ret, |fx, lane_layout, res_lane_layout, x_lane, y_lane| {
+ let res_lane = match lane_layout.ty.kind() {
+ ty::Float(_) => fx.bcx.ins().fcmp(flt_cc, x_lane, y_lane),
+ _ => unreachable!("{:?}", lane_layout.ty),
+ };
+ bool_to_zero_or_max_uint(fx, res_lane_layout, res_lane)
+ });
+ };
+ llvm.x86.sse2.psrli.d, (c a, o imm8) {
+ let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8).expect("llvm.x86.sse2.psrli.d imm8 not const");
+ simd_for_each_lane(fx, a, ret, |fx, _lane_layout, res_lane_layout, lane| {
+ let res_lane = match imm8.val.try_to_bits(Size::from_bytes(4)).unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8)) {
+ imm8 if imm8 < 32 => fx.bcx.ins().ushr_imm(lane, i64::from(imm8 as u8)),
+ _ => fx.bcx.ins().iconst(types::I32, 0),
+ };
+ CValue::by_val(res_lane, res_lane_layout)
+ });
+ };
+ llvm.x86.sse2.pslli.d, (c a, o imm8) {
+ let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8).expect("llvm.x86.sse2.psrli.d imm8 not const");
+ simd_for_each_lane(fx, a, ret, |fx, _lane_layout, res_lane_layout, lane| {
+ let res_lane = match imm8.val.try_to_bits(Size::from_bytes(4)).unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8)) {
+ imm8 if imm8 < 32 => fx.bcx.ins().ishl_imm(lane, i64::from(imm8 as u8)),
+ _ => fx.bcx.ins().iconst(types::I32, 0),
+ };
+ CValue::by_val(res_lane, res_lane_layout)
+ });
+ };
+ llvm.x86.sse2.storeu.dq, (v mem_addr, c a) {
+ // FIXME correctly handle the unalignment
+ let dest = CPlace::for_ptr(Pointer::new(mem_addr), a.layout());
+ dest.write_cvalue(fx, a);
+ };
+ }
+
+ if let Some((_, dest)) = destination {
+ let ret_block = fx.get_block(dest);
+ fx.bcx.ins().jump(ret_block, &[]);
+ } else {
+ trap_unreachable(fx, "[corruption] Diverging intrinsic returned.");
+ }
+}
+
+// llvm.x86.avx2.vperm2i128
+// llvm.x86.ssse3.pshuf.b.128
+// llvm.x86.avx2.pshuf.b
+// llvm.x86.avx2.psrli.w
+// llvm.x86.sse2.psrli.w
--- /dev/null
- macro atomic_binop_return_old($fx:expr, $op:ident<$T:ident>($ptr:ident, $src:ident) -> $ret:ident) {
- crate::atomic_shim::lock_global_lock($fx);
-
- let clif_ty = $fx.clif_type($T).unwrap();
- let old = $fx.bcx.ins().load(clif_ty, MemFlags::new(), $ptr, 0);
- let new = $fx.bcx.ins().$op(old, $src);
- $fx.bcx.ins().store(MemFlags::new(), new, $ptr, 0);
- $ret.write_cvalue($fx, CValue::by_val(old, $fx.layout_of($T)));
-
- crate::atomic_shim::unlock_global_lock($fx);
- }
-
- macro atomic_minmax($fx:expr, $cc:expr, <$T:ident> ($ptr:ident, $src:ident) -> $ret:ident) {
- crate::atomic_shim::lock_global_lock($fx);
-
- // Read old
- let clif_ty = $fx.clif_type($T).unwrap();
- let old = $fx.bcx.ins().load(clif_ty, MemFlags::new(), $ptr, 0);
-
- // Compare
- let is_eq = $fx.bcx.ins().icmp(IntCC::SignedGreaterThan, old, $src);
- let new = $fx.bcx.ins().select(is_eq, old, $src);
-
- // Write new
- $fx.bcx.ins().store(MemFlags::new(), new, $ptr, 0);
-
- let ret_val = CValue::by_val(old, $ret.layout());
- $ret.write_cvalue($fx, ret_val);
-
- crate::atomic_shim::unlock_global_lock($fx);
- }
-
+//! Codegen of intrinsics. This includes `extern "rust-intrinsic"`, `extern "platform-intrinsic"`
+//! and LLVM intrinsics that have symbol names starting with `llvm.`.
+
+mod cpuid;
+mod llvm;
+mod simd;
+
+pub(crate) use cpuid::codegen_cpuid_call;
+pub(crate) use llvm::codegen_llvm_intrinsic_call;
+
+use crate::prelude::*;
++use cranelift_codegen::ir::AtomicRmwOp;
+use rustc_middle::ty::print::with_no_trimmed_paths;
+
+macro intrinsic_pat {
+ (_) => {
+ _
+ },
+ ($name:ident) => {
+ stringify!($name)
+ },
+ ($name:literal) => {
+ stringify!($name)
+ },
+ ($x:ident . $($xs:tt).*) => {
+ concat!(stringify!($x), ".", intrinsic_pat!($($xs).*))
+ }
+}
+
+macro intrinsic_arg {
+ (o $fx:expr, $arg:ident) => {
+ $arg
+ },
+ (c $fx:expr, $arg:ident) => {
+ codegen_operand($fx, $arg)
+ },
+ (v $fx:expr, $arg:ident) => {
+ codegen_operand($fx, $arg).load_scalar($fx)
+ }
+}
+
+macro intrinsic_substs {
+ ($substs:expr, $index:expr,) => {},
+ ($substs:expr, $index:expr, $first:ident $(,$rest:ident)*) => {
+ let $first = $substs.type_at($index);
+ intrinsic_substs!($substs, $index+1, $($rest),*);
+ }
+}
+
+macro intrinsic_match {
+ ($fx:expr, $intrinsic:expr, $substs:expr, $args:expr,
+ _ => $unknown:block;
+ $(
+ $($($name:tt).*)|+ $(if $cond:expr)?, $(<$($subst:ident),*>)? ($($a:ident $arg:ident),*) $content:block;
+ )*) => {
+ let _ = $substs; // Silence warning when substs is unused.
+ match $intrinsic {
+ $(
+ $(intrinsic_pat!($($name).*))|* $(if $cond)? => {
+ #[allow(unused_parens, non_snake_case)]
+ {
+ $(
+ intrinsic_substs!($substs, 0, $($subst),*);
+ )?
+ if let [$($arg),*] = $args {
+ let ($($arg,)*) = (
+ $(intrinsic_arg!($a $fx, $arg),)*
+ );
+ #[warn(unused_parens, non_snake_case)]
+ {
+ $content
+ }
+ } else {
+ bug!("wrong number of args for intrinsic {:?}", $intrinsic);
+ }
+ }
+ }
+ )*
+ _ => $unknown,
+ }
+ }
+}
+
+macro call_intrinsic_match {
+ ($fx:expr, $intrinsic:expr, $substs:expr, $ret:expr, $destination:expr, $args:expr, $(
+ $name:ident($($arg:ident),*) -> $ty:ident => $func:ident,
+ )*) => {
+ match $intrinsic {
+ $(
+ stringify!($name) => {
+ assert!($substs.is_noop());
+ if let [$(ref $arg),*] = *$args {
+ let ($($arg,)*) = (
+ $(codegen_operand($fx, $arg),)*
+ );
+ let res = $fx.easy_call(stringify!($func), &[$($arg),*], $fx.tcx.types.$ty);
+ $ret.write_cvalue($fx, res);
+
+ if let Some((_, dest)) = $destination {
+ let ret_block = $fx.get_block(dest);
+ $fx.bcx.ins().jump(ret_block, &[]);
+ return;
+ } else {
+ unreachable!();
+ }
+ } else {
+ bug!("wrong number of args for intrinsic {:?}", $intrinsic);
+ }
+ }
+ )*
+ _ => {}
+ }
+ }
+}
+
- fn simd_for_each_lane<'tcx, M: Module>(
- fx: &mut FunctionCx<'_, 'tcx, M>,
+macro validate_atomic_type($fx:ident, $intrinsic:ident, $span:ident, $ty:expr) {
+ match $ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ $fx.tcx.sess.span_err(
+ $span,
+ &format!(
+ "`{}` intrinsic: expected basic integer or raw pointer type, found `{:?}`",
+ $intrinsic, $ty
+ ),
+ );
+ // Prevent verifier error
+ crate::trap::trap_unreachable($fx, "compilation should not have succeeded");
+ return;
+ }
+ }
+}
+
+macro validate_simd_type($fx:ident, $intrinsic:ident, $span:ident, $ty:expr) {
+ if !$ty.is_simd() {
+ $fx.tcx.sess.span_err($span, &format!("invalid monomorphization of `{}` intrinsic: expected SIMD input type, found non-SIMD `{}`", $intrinsic, $ty));
+ // Prevent verifier error
+ crate::trap::trap_unreachable($fx, "compilation should not have succeeded");
+ return;
+ }
+}
+
+pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Option<Type> {
+ let (element, count) = match &layout.abi {
+ Abi::Vector { element, count } => (element.clone(), *count),
+ _ => unreachable!(),
+ };
+
+ match scalar_to_clif_type(tcx, element).by(u16::try_from(count).unwrap()) {
+ // Cranelift currently only implements icmp for 128bit vectors.
+ Some(vector_ty) if vector_ty.bits() == 128 => Some(vector_ty),
+ _ => None,
+ }
+}
+
- &mut FunctionCx<'_, 'tcx, M>,
++fn simd_for_each_lane<'tcx>(
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ val: CValue<'tcx>,
+ ret: CPlace<'tcx>,
+ f: impl Fn(
- fn simd_pair_for_each_lane<'tcx, M: Module>(
- fx: &mut FunctionCx<'_, 'tcx, M>,
++ &mut FunctionCx<'_, '_, 'tcx>,
+ TyAndLayout<'tcx>,
+ TyAndLayout<'tcx>,
+ Value,
+ ) -> CValue<'tcx>,
+) {
+ let layout = val.layout();
+
+ let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
+ let lane_layout = fx.layout_of(lane_ty);
+ let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
+ let ret_lane_layout = fx.layout_of(ret_lane_ty);
+ assert_eq!(lane_count, ret_lane_count);
+
+ for lane_idx in 0..lane_count {
+ let lane_idx = mir::Field::new(lane_idx.try_into().unwrap());
+ let lane = val.value_field(fx, lane_idx).load_scalar(fx);
+
+ let res_lane = f(fx, lane_layout, ret_lane_layout, lane);
+
+ ret.place_field(fx, lane_idx).write_cvalue(fx, res_lane);
+ }
+}
+
- &mut FunctionCx<'_, 'tcx, M>,
++fn simd_pair_for_each_lane<'tcx>(
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ x: CValue<'tcx>,
+ y: CValue<'tcx>,
+ ret: CPlace<'tcx>,
+ f: impl Fn(
- fn simd_reduce<'tcx, M: Module>(
- fx: &mut FunctionCx<'_, 'tcx, M>,
++ &mut FunctionCx<'_, '_, 'tcx>,
+ TyAndLayout<'tcx>,
+ TyAndLayout<'tcx>,
+ Value,
+ Value,
+ ) -> CValue<'tcx>,
+) {
+ assert_eq!(x.layout(), y.layout());
+ let layout = x.layout();
+
+ let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
+ let lane_layout = fx.layout_of(lane_ty);
+ let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
+ let ret_lane_layout = fx.layout_of(ret_lane_ty);
+ assert_eq!(lane_count, ret_lane_count);
+
+ for lane in 0..lane_count {
+ let lane = mir::Field::new(lane.try_into().unwrap());
+ let x_lane = x.value_field(fx, lane).load_scalar(fx);
+ let y_lane = y.value_field(fx, lane).load_scalar(fx);
+
+ let res_lane = f(fx, lane_layout, ret_lane_layout, x_lane, y_lane);
+
+ ret.place_field(fx, lane).write_cvalue(fx, res_lane);
+ }
+}
+
- f: impl Fn(&mut FunctionCx<'_, 'tcx, M>, TyAndLayout<'tcx>, Value, Value) -> Value,
++fn simd_reduce<'tcx>(
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ val: CValue<'tcx>,
+ ret: CPlace<'tcx>,
- let lane = val
- .value_field(fx, mir::Field::new(lane_idx.try_into().unwrap()))
- .load_scalar(fx);
++ f: impl Fn(&mut FunctionCx<'_, '_, 'tcx>, TyAndLayout<'tcx>, Value, Value) -> Value,
+) {
+ let (lane_count, lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
+ let lane_layout = fx.layout_of(lane_ty);
+ assert_eq!(lane_layout, ret.layout());
+
+ let mut res_val = val.value_field(fx, mir::Field::new(0)).load_scalar(fx);
+ for lane_idx in 1..lane_count {
- fn simd_reduce_bool<'tcx, M: Module>(
- fx: &mut FunctionCx<'_, 'tcx, M>,
++ let lane =
++ val.value_field(fx, mir::Field::new(lane_idx.try_into().unwrap())).load_scalar(fx);
+ res_val = f(fx, lane_layout, res_val, lane);
+ }
+ let res = CValue::by_val(res_val, lane_layout);
+ ret.write_cvalue(fx, res);
+}
+
- f: impl Fn(&mut FunctionCx<'_, 'tcx, M>, Value, Value) -> Value,
++fn simd_reduce_bool<'tcx>(
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ val: CValue<'tcx>,
+ ret: CPlace<'tcx>,
- let lane = val
- .value_field(fx, mir::Field::new(lane_idx.try_into().unwrap()))
- .load_scalar(fx);
++ f: impl Fn(&mut FunctionCx<'_, '_, 'tcx>, Value, Value) -> Value,
+) {
+ let (lane_count, _lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
+ assert!(ret.layout().ty.is_bool());
+
+ let res_val = val.value_field(fx, mir::Field::new(0)).load_scalar(fx);
+ let mut res_val = fx.bcx.ins().band_imm(res_val, 1); // mask to boolean
+ for lane_idx in 1..lane_count {
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
++ let lane =
++ val.value_field(fx, mir::Field::new(lane_idx.try_into().unwrap())).load_scalar(fx);
+ let lane = fx.bcx.ins().band_imm(lane, 1); // mask to boolean
+ res_val = f(fx, res_val, lane);
+ }
+ let res = CValue::by_val(res_val, ret.layout());
+ ret.write_cvalue(fx, res);
+}
+
+fn bool_to_zero_or_max_uint<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ layout: TyAndLayout<'tcx>,
+ val: Value,
+) -> CValue<'tcx> {
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let int_ty = match ty {
+ types::F32 => types::I32,
+ types::F64 => types::I64,
+ ty => ty,
+ };
+
+ let val = fx.bcx.ins().bint(int_ty, val);
+ let mut res = fx.bcx.ins().ineg(val);
+
+ if ty.is_float() {
+ res = fx.bcx.ins().bitcast(ty, res);
+ }
+
+ CValue::by_val(res, layout)
+}
+
+macro simd_cmp {
+ ($fx:expr, $cc:ident|$cc_f:ident($x:ident, $y:ident) -> $ret:ident) => {
+ let vector_ty = clif_vector_type($fx.tcx, $x.layout());
+
+ if let Some(vector_ty) = vector_ty {
+ let x = $x.load_scalar($fx);
+ let y = $y.load_scalar($fx);
+ let val = $fx.bcx.ins().icmp(IntCC::$cc, x, y);
+
+ // HACK This depends on the fact that icmp for vectors represents bools as 0 and !0, not 0 and 1.
+ let val = $fx.bcx.ins().raw_bitcast(vector_ty, val);
+
+ $ret.write_cvalue($fx, CValue::by_val(val, $ret.layout()));
+ } else {
+ simd_pair_for_each_lane(
+ $fx,
+ $x,
+ $y,
+ $ret,
+ |fx, lane_layout, res_lane_layout, x_lane, y_lane| {
+ let res_lane = match lane_layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) => fx.bcx.ins().icmp(IntCC::$cc, x_lane, y_lane),
+ ty::Float(_) => fx.bcx.ins().fcmp(FloatCC::$cc_f, x_lane, y_lane),
+ _ => unreachable!("{:?}", lane_layout.ty),
+ };
+ bool_to_zero_or_max_uint(fx, res_lane_layout, res_lane)
+ },
+ );
+ }
+ },
+ ($fx:expr, $cc_u:ident|$cc_s:ident|$cc_f:ident($x:ident, $y:ident) -> $ret:ident) => {
+ // FIXME use vector icmp when possible
+ simd_pair_for_each_lane(
+ $fx,
+ $x,
+ $y,
+ $ret,
+ |fx, lane_layout, res_lane_layout, x_lane, y_lane| {
+ let res_lane = match lane_layout.ty.kind() {
+ ty::Uint(_) => fx.bcx.ins().icmp(IntCC::$cc_u, x_lane, y_lane),
+ ty::Int(_) => fx.bcx.ins().icmp(IntCC::$cc_s, x_lane, y_lane),
+ ty::Float(_) => fx.bcx.ins().fcmp(FloatCC::$cc_f, x_lane, y_lane),
+ _ => unreachable!("{:?}", lane_layout.ty),
+ };
+ bool_to_zero_or_max_uint(fx, res_lane_layout, res_lane)
+ },
+ );
+ },
+}
+
+macro simd_int_binop {
+ ($fx:expr, $op:ident($x:ident, $y:ident) -> $ret:ident) => {
+ simd_int_binop!($fx, $op|$op($x, $y) -> $ret);
+ },
+ ($fx:expr, $op_u:ident|$op_s:ident($x:ident, $y:ident) -> $ret:ident) => {
+ simd_pair_for_each_lane(
+ $fx,
+ $x,
+ $y,
+ $ret,
+ |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
+ let res_lane = match lane_layout.ty.kind() {
+ ty::Uint(_) => fx.bcx.ins().$op_u(x_lane, y_lane),
+ ty::Int(_) => fx.bcx.ins().$op_s(x_lane, y_lane),
+ _ => unreachable!("{:?}", lane_layout.ty),
+ };
+ CValue::by_val(res_lane, ret_lane_layout)
+ },
+ );
+ },
+}
+
+macro simd_int_flt_binop {
+ ($fx:expr, $op:ident|$op_f:ident($x:ident, $y:ident) -> $ret:ident) => {
+ simd_int_flt_binop!($fx, $op|$op|$op_f($x, $y) -> $ret);
+ },
+ ($fx:expr, $op_u:ident|$op_s:ident|$op_f:ident($x:ident, $y:ident) -> $ret:ident) => {
+ simd_pair_for_each_lane(
+ $fx,
+ $x,
+ $y,
+ $ret,
+ |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
+ let res_lane = match lane_layout.ty.kind() {
+ ty::Uint(_) => fx.bcx.ins().$op_u(x_lane, y_lane),
+ ty::Int(_) => fx.bcx.ins().$op_s(x_lane, y_lane),
+ ty::Float(_) => fx.bcx.ins().$op_f(x_lane, y_lane),
+ _ => unreachable!("{:?}", lane_layout.ty),
+ };
+ CValue::by_val(res_lane, ret_lane_layout)
+ },
+ );
+ },
+}
+
+macro simd_flt_binop($fx:expr, $op:ident($x:ident, $y:ident) -> $ret:ident) {
+ simd_pair_for_each_lane(
+ $fx,
+ $x,
+ $y,
+ $ret,
+ |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
+ let res_lane = match lane_layout.ty.kind() {
+ ty::Float(_) => fx.bcx.ins().$op(x_lane, y_lane),
+ _ => unreachable!("{:?}", lane_layout.ty),
+ };
+ CValue::by_val(res_lane, ret_lane_layout)
+ },
+ );
+}
+
+pub(crate) fn codegen_intrinsic_call<'tcx>(
- crate::atomic_shim::lock_global_lock(fx);
- crate::atomic_shim::unlock_global_lock(fx);
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ instance: Instance<'tcx>,
+ args: &[mir::Operand<'tcx>],
+ destination: Option<(CPlace<'tcx>, BasicBlock)>,
+ span: Span,
+) {
+ let def_id = instance.def_id();
+ let substs = instance.substs;
+
+ let intrinsic = fx.tcx.item_name(def_id).as_str();
+ let intrinsic = &intrinsic[..];
+
+ let ret = match destination {
+ Some((place, _)) => place,
+ None => {
+ // Insert non returning intrinsics here
+ match intrinsic {
+ "abort" => {
+ trap_abort(fx, "Called intrinsic::abort.");
+ }
+ "transmute" => {
+ crate::base::codegen_panic(fx, "Transmuting to uninhabited type.", span);
+ }
+ _ => unimplemented!("unsupported instrinsic {}", intrinsic),
+ }
+ return;
+ }
+ };
+
+ if intrinsic.starts_with("simd_") {
+ self::simd::codegen_simd_intrinsic_call(fx, instance, args, ret, span);
+ let ret_block = fx.get_block(destination.expect("SIMD intrinsics don't diverge").1);
+ fx.bcx.ins().jump(ret_block, &[]);
+ return;
+ }
+
+ let usize_layout = fx.layout_of(fx.tcx.types.usize);
+
+ call_intrinsic_match! {
+ fx, intrinsic, substs, ret, destination, args,
+ expf32(flt) -> f32 => expf,
+ expf64(flt) -> f64 => exp,
+ exp2f32(flt) -> f32 => exp2f,
+ exp2f64(flt) -> f64 => exp2,
+ sqrtf32(flt) -> f32 => sqrtf,
+ sqrtf64(flt) -> f64 => sqrt,
+ powif32(a, x) -> f32 => __powisf2, // compiler-builtins
+ powif64(a, x) -> f64 => __powidf2, // compiler-builtins
+ powf32(a, x) -> f32 => powf,
+ powf64(a, x) -> f64 => pow,
+ logf32(flt) -> f32 => logf,
+ logf64(flt) -> f64 => log,
+ log2f32(flt) -> f32 => log2f,
+ log2f64(flt) -> f64 => log2,
+ log10f32(flt) -> f32 => log10f,
+ log10f64(flt) -> f64 => log10,
+ fabsf32(flt) -> f32 => fabsf,
+ fabsf64(flt) -> f64 => fabs,
+ fmaf32(x, y, z) -> f32 => fmaf,
+ fmaf64(x, y, z) -> f64 => fma,
+ copysignf32(x, y) -> f32 => copysignf,
+ copysignf64(x, y) -> f64 => copysign,
+
+ // rounding variants
+ // FIXME use clif insts
+ floorf32(flt) -> f32 => floorf,
+ floorf64(flt) -> f64 => floor,
+ ceilf32(flt) -> f32 => ceilf,
+ ceilf64(flt) -> f64 => ceil,
+ truncf32(flt) -> f32 => truncf,
+ truncf64(flt) -> f64 => trunc,
+ roundf32(flt) -> f32 => roundf,
+ roundf64(flt) -> f64 => round,
+
+ // trigonometry
+ sinf32(flt) -> f32 => sinf,
+ sinf64(flt) -> f64 => sin,
+ cosf32(flt) -> f32 => cosf,
+ cosf64(flt) -> f64 => cos,
+ tanf32(flt) -> f32 => tanf,
+ tanf64(flt) -> f64 => tan,
+ }
+
+ intrinsic_match! {
+ fx, intrinsic, substs, args,
+ _ => {
+ fx.tcx.sess.span_fatal(span, &format!("unsupported intrinsic {}", intrinsic));
+ };
+
+ assume, (c _a) {};
+ likely | unlikely, (c a) {
+ ret.write_cvalue(fx, a);
+ };
+ breakpoint, () {
+ fx.bcx.ins().debugtrap();
+ };
+ copy | copy_nonoverlapping, <elem_ty> (v src, v dst, v count) {
+ let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
+ assert_eq!(args.len(), 3);
+ let byte_amount = if elem_size != 1 {
+ fx.bcx.ins().imul_imm(count, elem_size as i64)
+ } else {
+ count
+ };
+
+ if intrinsic.contains("nonoverlapping") {
+ // FIXME emit_small_memcpy
+ fx.bcx.call_memcpy(fx.cx.module.target_config(), dst, src, byte_amount);
+ } else {
+ // FIXME emit_small_memmove
+ fx.bcx.call_memmove(fx.cx.module.target_config(), dst, src, byte_amount);
+ }
+ };
+ // NOTE: the volatile variants have src and dst swapped
+ volatile_copy_memory | volatile_copy_nonoverlapping_memory, <elem_ty> (v dst, v src, v count) {
+ let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
+ assert_eq!(args.len(), 3);
+ let byte_amount = if elem_size != 1 {
+ fx.bcx.ins().imul_imm(count, elem_size as i64)
+ } else {
+ count
+ };
+
+ // FIXME make the copy actually volatile when using emit_small_mem{cpy,move}
+ if intrinsic.contains("nonoverlapping") {
+ // FIXME emit_small_memcpy
+ fx.bcx.call_memcpy(fx.cx.module.target_config(), dst, src, byte_amount);
+ } else {
+ // FIXME emit_small_memmove
+ fx.bcx.call_memmove(fx.cx.module.target_config(), dst, src, byte_amount);
+ }
+ };
+ size_of_val, <T> (c ptr) {
+ let layout = fx.layout_of(T);
+ let size = if layout.is_unsized() {
+ let (_ptr, info) = ptr.load_scalar_pair(fx);
+ let (size, _align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
+ size
+ } else {
+ fx
+ .bcx
+ .ins()
+ .iconst(fx.pointer_type, layout.size.bytes() as i64)
+ };
+ ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
+ };
+ min_align_of_val, <T> (c ptr) {
+ let layout = fx.layout_of(T);
+ let align = if layout.is_unsized() {
+ let (_ptr, info) = ptr.load_scalar_pair(fx);
+ let (_size, align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
+ align
+ } else {
+ fx
+ .bcx
+ .ins()
+ .iconst(fx.pointer_type, layout.align.abi.bytes() as i64)
+ };
+ ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
+ };
+
+ _ if intrinsic.starts_with("unchecked_") || intrinsic == "exact_div", (c x, c y) {
+ // FIXME trap on overflow
+ let bin_op = match intrinsic {
+ "unchecked_add" => BinOp::Add,
+ "unchecked_sub" => BinOp::Sub,
+ "unchecked_div" | "exact_div" => BinOp::Div,
+ "unchecked_rem" => BinOp::Rem,
+ "unchecked_shl" => BinOp::Shl,
+ "unchecked_shr" => BinOp::Shr,
+ _ => unreachable!("intrinsic {}", intrinsic),
+ };
+ let res = crate::num::codegen_int_binop(fx, bin_op, x, y);
+ ret.write_cvalue(fx, res);
+ };
+ _ if intrinsic.ends_with("_with_overflow"), (c x, c y) {
+ assert_eq!(x.layout().ty, y.layout().ty);
+ let bin_op = match intrinsic {
+ "add_with_overflow" => BinOp::Add,
+ "sub_with_overflow" => BinOp::Sub,
+ "mul_with_overflow" => BinOp::Mul,
+ _ => unreachable!("intrinsic {}", intrinsic),
+ };
+
+ let res = crate::num::codegen_checked_int_binop(
+ fx,
+ bin_op,
+ x,
+ y,
+ );
+ ret.write_cvalue(fx, res);
+ };
+ _ if intrinsic.starts_with("saturating_"), <T> (c lhs, c rhs) {
+ assert_eq!(lhs.layout().ty, rhs.layout().ty);
+ let bin_op = match intrinsic {
+ "saturating_add" => BinOp::Add,
+ "saturating_sub" => BinOp::Sub,
+ _ => unreachable!("intrinsic {}", intrinsic),
+ };
+
+ let signed = type_sign(T);
+
+ let checked_res = crate::num::codegen_checked_int_binop(
+ fx,
+ bin_op,
+ lhs,
+ rhs,
+ );
+
+ let (val, has_overflow) = checked_res.load_scalar_pair(fx);
+ let clif_ty = fx.clif_type(T).unwrap();
+
+ // `select.i8` is not implemented by Cranelift.
+ let has_overflow = fx.bcx.ins().uextend(types::I32, has_overflow);
+
+ let (min, max) = type_min_max_value(&mut fx.bcx, clif_ty, signed);
+
+ let val = match (intrinsic, signed) {
+ ("saturating_add", false) => fx.bcx.ins().select(has_overflow, max, val),
+ ("saturating_sub", false) => fx.bcx.ins().select(has_overflow, min, val),
+ ("saturating_add", true) => {
+ let rhs = rhs.load_scalar(fx);
+ let rhs_ge_zero = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
+ let sat_val = fx.bcx.ins().select(rhs_ge_zero, max, min);
+ fx.bcx.ins().select(has_overflow, sat_val, val)
+ }
+ ("saturating_sub", true) => {
+ let rhs = rhs.load_scalar(fx);
+ let rhs_ge_zero = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
+ let sat_val = fx.bcx.ins().select(rhs_ge_zero, min, max);
+ fx.bcx.ins().select(has_overflow, sat_val, val)
+ }
+ _ => unreachable!(),
+ };
+
+ let res = CValue::by_val(val, fx.layout_of(T));
+
+ ret.write_cvalue(fx, res);
+ };
+ rotate_left, <T>(v x, v y) {
+ let layout = fx.layout_of(T);
+ let res = fx.bcx.ins().rotl(x, y);
+ ret.write_cvalue(fx, CValue::by_val(res, layout));
+ };
+ rotate_right, <T>(v x, v y) {
+ let layout = fx.layout_of(T);
+ let res = fx.bcx.ins().rotr(x, y);
+ ret.write_cvalue(fx, CValue::by_val(res, layout));
+ };
+
+ // The only difference between offset and arith_offset is regarding UB. Because Cranelift
+ // doesn't have UB both are codegen'ed the same way
+ offset | arith_offset, (c base, v offset) {
+ let pointee_ty = base.layout().ty.builtin_deref(true).unwrap().ty;
+ let pointee_size = fx.layout_of(pointee_ty).size.bytes();
+ let ptr_diff = if pointee_size != 1 {
+ fx.bcx.ins().imul_imm(offset, pointee_size as i64)
+ } else {
+ offset
+ };
+ let base_val = base.load_scalar(fx);
+ let res = fx.bcx.ins().iadd(base_val, ptr_diff);
+ ret.write_cvalue(fx, CValue::by_val(res, base.layout()));
+ };
+
+ transmute, (c from) {
+ ret.write_cvalue_transmute(fx, from);
+ };
+ write_bytes | volatile_set_memory, (c dst, v val, v count) {
+ let pointee_ty = dst.layout().ty.builtin_deref(true).unwrap().ty;
+ let pointee_size = fx.layout_of(pointee_ty).size.bytes();
+ let count = if pointee_size != 1 {
+ fx.bcx.ins().imul_imm(count, pointee_size as i64)
+ } else {
+ count
+ };
+ let dst_ptr = dst.load_scalar(fx);
+ // FIXME make the memset actually volatile when switching to emit_small_memset
+ // FIXME use emit_small_memset
+ fx.bcx.call_memset(fx.cx.module.target_config(), dst_ptr, val, count);
+ };
+ ctlz | ctlz_nonzero, <T> (v arg) {
+ // FIXME trap on `ctlz_nonzero` with zero arg.
+ let res = if T == fx.tcx.types.u128 || T == fx.tcx.types.i128 {
+ // FIXME verify this algorithm is correct
+ let (lsb, msb) = fx.bcx.ins().isplit(arg);
+ let lsb_lz = fx.bcx.ins().clz(lsb);
+ let msb_lz = fx.bcx.ins().clz(msb);
+ let msb_is_zero = fx.bcx.ins().icmp_imm(IntCC::Equal, msb, 0);
+ let lsb_lz_plus_64 = fx.bcx.ins().iadd_imm(lsb_lz, 64);
+ let res = fx.bcx.ins().select(msb_is_zero, lsb_lz_plus_64, msb_lz);
+ fx.bcx.ins().uextend(types::I128, res)
+ } else {
+ fx.bcx.ins().clz(arg)
+ };
+ let res = CValue::by_val(res, fx.layout_of(T));
+ ret.write_cvalue(fx, res);
+ };
+ cttz | cttz_nonzero, <T> (v arg) {
+ // FIXME trap on `cttz_nonzero` with zero arg.
+ let res = if T == fx.tcx.types.u128 || T == fx.tcx.types.i128 {
+ // FIXME verify this algorithm is correct
+ let (lsb, msb) = fx.bcx.ins().isplit(arg);
+ let lsb_tz = fx.bcx.ins().ctz(lsb);
+ let msb_tz = fx.bcx.ins().ctz(msb);
+ let lsb_is_zero = fx.bcx.ins().icmp_imm(IntCC::Equal, lsb, 0);
+ let msb_tz_plus_64 = fx.bcx.ins().iadd_imm(msb_tz, 64);
+ let res = fx.bcx.ins().select(lsb_is_zero, msb_tz_plus_64, lsb_tz);
+ fx.bcx.ins().uextend(types::I128, res)
+ } else {
+ fx.bcx.ins().ctz(arg)
+ };
+ let res = CValue::by_val(res, fx.layout_of(T));
+ ret.write_cvalue(fx, res);
+ };
+ ctpop, <T> (v arg) {
+ let res = fx.bcx.ins().popcnt(arg);
+ let res = CValue::by_val(res, fx.layout_of(T));
+ ret.write_cvalue(fx, res);
+ };
+ bitreverse, <T> (v arg) {
+ let res = fx.bcx.ins().bitrev(arg);
+ let res = CValue::by_val(res, fx.layout_of(T));
+ ret.write_cvalue(fx, res);
+ };
+ bswap, <T> (v arg) {
+ // FIXME(CraneStation/cranelift#794) add bswap instruction to cranelift
+ fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value {
+ match bcx.func.dfg.value_type(v) {
+ types::I8 => v,
+
+ // https://code.woboq.org/gcc/include/bits/byteswap.h.html
+ types::I16 => {
+ let tmp1 = bcx.ins().ishl_imm(v, 8);
+ let n1 = bcx.ins().band_imm(tmp1, 0xFF00);
+
+ let tmp2 = bcx.ins().ushr_imm(v, 8);
+ let n2 = bcx.ins().band_imm(tmp2, 0x00FF);
+
+ bcx.ins().bor(n1, n2)
+ }
+ types::I32 => {
+ let tmp1 = bcx.ins().ishl_imm(v, 24);
+ let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000);
+
+ let tmp2 = bcx.ins().ishl_imm(v, 8);
+ let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000);
+
+ let tmp3 = bcx.ins().ushr_imm(v, 8);
+ let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00);
+
+ let tmp4 = bcx.ins().ushr_imm(v, 24);
+ let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF);
+
+ let or_tmp1 = bcx.ins().bor(n1, n2);
+ let or_tmp2 = bcx.ins().bor(n3, n4);
+ bcx.ins().bor(or_tmp1, or_tmp2)
+ }
+ types::I64 => {
+ let tmp1 = bcx.ins().ishl_imm(v, 56);
+ let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000_0000_0000u64 as i64);
+
+ let tmp2 = bcx.ins().ishl_imm(v, 40);
+ let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000_0000_0000u64 as i64);
+
+ let tmp3 = bcx.ins().ishl_imm(v, 24);
+ let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00_0000_0000u64 as i64);
+
+ let tmp4 = bcx.ins().ishl_imm(v, 8);
+ let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF_0000_0000u64 as i64);
+
+ let tmp5 = bcx.ins().ushr_imm(v, 8);
+ let n5 = bcx.ins().band_imm(tmp5, 0x0000_0000_FF00_0000u64 as i64);
+
+ let tmp6 = bcx.ins().ushr_imm(v, 24);
+ let n6 = bcx.ins().band_imm(tmp6, 0x0000_0000_00FF_0000u64 as i64);
+
+ let tmp7 = bcx.ins().ushr_imm(v, 40);
+ let n7 = bcx.ins().band_imm(tmp7, 0x0000_0000_0000_FF00u64 as i64);
+
+ let tmp8 = bcx.ins().ushr_imm(v, 56);
+ let n8 = bcx.ins().band_imm(tmp8, 0x0000_0000_0000_00FFu64 as i64);
+
+ let or_tmp1 = bcx.ins().bor(n1, n2);
+ let or_tmp2 = bcx.ins().bor(n3, n4);
+ let or_tmp3 = bcx.ins().bor(n5, n6);
+ let or_tmp4 = bcx.ins().bor(n7, n8);
+
+ let or_tmp5 = bcx.ins().bor(or_tmp1, or_tmp2);
+ let or_tmp6 = bcx.ins().bor(or_tmp3, or_tmp4);
+ bcx.ins().bor(or_tmp5, or_tmp6)
+ }
+ types::I128 => {
+ let (lo, hi) = bcx.ins().isplit(v);
+ let lo = swap(bcx, lo);
+ let hi = swap(bcx, hi);
+ bcx.ins().iconcat(hi, lo)
+ }
+ ty => unreachable!("bswap {}", ty),
+ }
+ }
+ let res = CValue::by_val(swap(&mut fx.bcx, arg), fx.layout_of(T));
+ ret.write_cvalue(fx, res);
+ };
+ assert_inhabited | assert_zero_valid | assert_uninit_valid, <T> () {
+ let layout = fx.layout_of(T);
+ if layout.abi.is_uninhabited() {
+ with_no_trimmed_paths(|| crate::base::codegen_panic(
+ fx,
+ &format!("attempted to instantiate uninhabited type `{}`", T),
+ span,
+ ));
+ return;
+ }
+
+ if intrinsic == "assert_zero_valid" && !layout.might_permit_raw_init(fx, /*zero:*/ true).unwrap() {
+ with_no_trimmed_paths(|| crate::base::codegen_panic(
+ fx,
+ &format!("attempted to zero-initialize type `{}`, which is invalid", T),
+ span,
+ ));
+ return;
+ }
+
+ if intrinsic == "assert_uninit_valid" && !layout.might_permit_raw_init(fx, /*zero:*/ false).unwrap() {
+ with_no_trimmed_paths(|| crate::base::codegen_panic(
+ fx,
+ &format!("attempted to leave type `{}` uninitialized, which is invalid", T),
+ span,
+ ));
+ return;
+ }
+ };
+
+ volatile_load | unaligned_volatile_load, (c ptr) {
+ // Cranelift treats loads as volatile by default
+ // FIXME ignore during stack2reg optimization
+ // FIXME correctly handle unaligned_volatile_load
+ let inner_layout =
+ fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
+ let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
+ ret.write_cvalue(fx, val);
+ };
+ volatile_store | unaligned_volatile_store, (v ptr, c val) {
+ // Cranelift treats stores as volatile by default
+ // FIXME ignore during stack2reg optimization
+ // FIXME correctly handle unaligned_volatile_store
+ let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
+ dest.write_cvalue(fx, val);
+ };
+
+ pref_align_of | min_align_of | needs_drop | type_id | type_name | variant_count, () {
+ let const_val =
+ fx.tcx.const_eval_instance(ParamEnv::reveal_all(), instance, None).unwrap();
+ let val = crate::constant::codegen_const_value(
+ fx,
+ const_val,
+ ret.layout().ty,
+ );
+ ret.write_cvalue(fx, val);
+ };
+
+ ptr_offset_from, <T> (v ptr, v base) {
+ let isize_layout = fx.layout_of(fx.tcx.types.isize);
+
+ let pointee_size: u64 = fx.layout_of(T).size.bytes();
+ let diff = fx.bcx.ins().isub(ptr, base);
+ // FIXME this can be an exact division.
+ let val = CValue::by_val(fx.bcx.ins().sdiv_imm(diff, pointee_size as i64), isize_layout);
+ ret.write_cvalue(fx, val);
+ };
+
+ ptr_guaranteed_eq, (c a, c b) {
+ let val = crate::num::codegen_ptr_binop(fx, BinOp::Eq, a, b);
+ ret.write_cvalue(fx, val);
+ };
+
+ ptr_guaranteed_ne, (c a, c b) {
+ let val = crate::num::codegen_ptr_binop(fx, BinOp::Ne, a, b);
+ ret.write_cvalue(fx, val);
+ };
+
+ caller_location, () {
+ let caller_location = fx.get_caller_location(span);
+ ret.write_cvalue(fx, caller_location);
+ };
+
+ _ if intrinsic.starts_with("atomic_fence"), () {
- crate::atomic_shim::lock_global_lock(fx);
- crate::atomic_shim::unlock_global_lock(fx);
++ fx.bcx.ins().fence();
+ };
+ _ if intrinsic.starts_with("atomic_singlethreadfence"), () {
- _ if intrinsic.starts_with("atomic_load"), (c ptr) {
- crate::atomic_shim::lock_global_lock(fx);
++ // FIXME use a compiler fence once Cranelift supports it
++ fx.bcx.ins().fence();
+ };
- let inner_layout =
- fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
- validate_atomic_type!(fx, intrinsic, span, inner_layout.ty);
- let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
- ret.write_cvalue(fx, val);
++ _ if intrinsic.starts_with("atomic_load"), <T> (v ptr) {
++ validate_atomic_type!(fx, intrinsic, span, T);
++ let ty = fx.clif_type(T).unwrap();
+
- crate::atomic_shim::unlock_global_lock(fx);
++ let val = fx.bcx.ins().atomic_load(ty, MemFlags::trusted(), ptr);
+
- crate::atomic_shim::lock_global_lock(fx);
-
- let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
- dest.write_cvalue(fx, val);
++ let val = CValue::by_val(val, fx.layout_of(T));
++ ret.write_cvalue(fx, val);
+ };
+ _ if intrinsic.starts_with("atomic_store"), (v ptr, c val) {
+ validate_atomic_type!(fx, intrinsic, span, val.layout().ty);
+
- crate::atomic_shim::unlock_global_lock(fx);
++ let val = val.load_scalar(fx);
+
- _ if intrinsic.starts_with("atomic_xchg"), <T> (v ptr, c src) {
- validate_atomic_type!(fx, intrinsic, span, T);
++ fx.bcx.ins().atomic_store(MemFlags::trusted(), val, ptr);
+ };
- crate::atomic_shim::lock_global_lock(fx);
-
- // Read old
- let clif_ty = fx.clif_type(T).unwrap();
- let old = fx.bcx.ins().load(clif_ty, MemFlags::new(), ptr, 0);
- ret.write_cvalue(fx, CValue::by_val(old, fx.layout_of(T)));
++ _ if intrinsic.starts_with("atomic_xchg"), (v ptr, c new) {
++ let layout = new.layout();
++ validate_atomic_type!(fx, intrinsic, span, layout.ty);
++ let ty = fx.clif_type(layout.ty).unwrap();
+
- // Write new
- let dest = CPlace::for_ptr(Pointer::new(ptr), src.layout());
- dest.write_cvalue(fx, src);
++ let new = new.load_scalar(fx);
+
- crate::atomic_shim::unlock_global_lock(fx);
++ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xchg, ptr, new);
+
- _ if intrinsic.starts_with("atomic_cxchg"), <T> (v ptr, c test_old, c new) { // both atomic_cxchg_* and atomic_cxchgweak_*
- validate_atomic_type!(fx, intrinsic, span, T);
++ let old = CValue::by_val(old, layout);
++ ret.write_cvalue(fx, old);
+ };
- crate::atomic_shim::lock_global_lock(fx);
-
- // Read old
- let clif_ty = fx.clif_type(T).unwrap();
- let old = fx.bcx.ins().load(clif_ty, MemFlags::new(), ptr, 0);
-
- // Compare
++ _ if intrinsic.starts_with("atomic_cxchg"), (v ptr, c test_old, c new) { // both atomic_cxchg_* and atomic_cxchgweak_*
++ let layout = new.layout();
++ validate_atomic_type!(fx, intrinsic, span, layout.ty);
+
+ let test_old = test_old.load_scalar(fx);
+ let new = new.load_scalar(fx);
+
- let new = fx.bcx.ins().select(is_eq, new, old); // Keep old if not equal to test_old
-
- // Write new
- fx.bcx.ins().store(MemFlags::new(), new, ptr, 0);
++ let old = fx.bcx.ins().atomic_cas(MemFlags::trusted(), ptr, test_old, new);
+ let is_eq = fx.bcx.ins().icmp(IntCC::Equal, old, test_old);
- ret.write_cvalue(fx, ret_val);
-
- crate::atomic_shim::unlock_global_lock(fx);
+
+ let ret_val = CValue::by_val_pair(old, fx.bcx.ins().bint(types::I8, is_eq), ret.layout());
- _ if intrinsic.starts_with("atomic_xadd"), <T> (v ptr, c amount) {
- validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
++ ret.write_cvalue(fx, ret_val)
+ };
+
- atomic_binop_return_old! (fx, iadd<T>(ptr, amount) -> ret);
++ _ if intrinsic.starts_with("atomic_xadd"), (v ptr, c amount) {
++ let layout = amount.layout();
++ validate_atomic_type!(fx, intrinsic, span, layout.ty);
++ let ty = fx.clif_type(layout.ty).unwrap();
++
+ let amount = amount.load_scalar(fx);
- _ if intrinsic.starts_with("atomic_xsub"), <T> (v ptr, c amount) {
- validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
++
++ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Add, ptr, amount);
++
++ let old = CValue::by_val(old, layout);
++ ret.write_cvalue(fx, old);
+ };
- atomic_binop_return_old! (fx, isub<T>(ptr, amount) -> ret);
++ _ if intrinsic.starts_with("atomic_xsub"), (v ptr, c amount) {
++ let layout = amount.layout();
++ validate_atomic_type!(fx, intrinsic, span, layout.ty);
++ let ty = fx.clif_type(layout.ty).unwrap();
++
+ let amount = amount.load_scalar(fx);
- _ if intrinsic.starts_with("atomic_and"), <T> (v ptr, c src) {
- validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
++
++ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Sub, ptr, amount);
++
++ let old = CValue::by_val(old, layout);
++ ret.write_cvalue(fx, old);
+ };
- atomic_binop_return_old! (fx, band<T>(ptr, src) -> ret);
++ _ if intrinsic.starts_with("atomic_and"), (v ptr, c src) {
++ let layout = src.layout();
++ validate_atomic_type!(fx, intrinsic, span, layout.ty);
++ let ty = fx.clif_type(layout.ty).unwrap();
++
+ let src = src.load_scalar(fx);
- _ if intrinsic.starts_with("atomic_nand"), <T> (v ptr, c src) {
- validate_atomic_type!(fx, intrinsic, span, T);
++
++ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::And, ptr, src);
++
++ let old = CValue::by_val(old, layout);
++ ret.write_cvalue(fx, old);
+ };
- crate::atomic_shim::lock_global_lock(fx);
-
- let clif_ty = fx.clif_type(T).unwrap();
- let old = fx.bcx.ins().load(clif_ty, MemFlags::new(), ptr, 0);
- let and = fx.bcx.ins().band(old, src);
- let new = fx.bcx.ins().bnot(and);
- fx.bcx.ins().store(MemFlags::new(), new, ptr, 0);
- ret.write_cvalue(fx, CValue::by_val(old, fx.layout_of(T)));
++ _ if intrinsic.starts_with("atomic_or"), (v ptr, c src) {
++ let layout = src.layout();
++ validate_atomic_type!(fx, intrinsic, span, layout.ty);
++ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
- crate::atomic_shim::unlock_global_lock(fx);
++ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Or, ptr, src);
+
- _ if intrinsic.starts_with("atomic_or"), <T> (v ptr, c src) {
- validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
++ let old = CValue::by_val(old, layout);
++ ret.write_cvalue(fx, old);
+ };
- atomic_binop_return_old! (fx, bor<T>(ptr, src) -> ret);
++ _ if intrinsic.starts_with("atomic_xor"), (v ptr, c src) {
++ let layout = src.layout();
++ validate_atomic_type!(fx, intrinsic, span, layout.ty);
++ let ty = fx.clif_type(layout.ty).unwrap();
++
+ let src = src.load_scalar(fx);
- _ if intrinsic.starts_with("atomic_xor"), <T> (v ptr, c src) {
- validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
++
++ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xor, ptr, src);
++
++ let old = CValue::by_val(old, layout);
++ ret.write_cvalue(fx, old);
+ };
- atomic_binop_return_old! (fx, bxor<T>(ptr, src) -> ret);
++
++ // FIXME https://github.com/bytecodealliance/wasmtime/issues/2647
++ _ if intrinsic.starts_with("atomic_nand"), (v ptr, c src) {
++ let layout = src.layout();
++ validate_atomic_type!(fx, intrinsic, span, layout.ty);
++ let ty = fx.clif_type(layout.ty).unwrap();
++
+ let src = src.load_scalar(fx);
- _ if intrinsic.starts_with("atomic_max"), <T> (v ptr, c src) {
- validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
++
++ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Nand, ptr, src);
++
++ let old = CValue::by_val(old, layout);
++ ret.write_cvalue(fx, old);
+ };
++ _ if intrinsic.starts_with("atomic_max"), (v ptr, c src) {
++ let layout = src.layout();
++ validate_atomic_type!(fx, intrinsic, span, layout.ty);
++ let ty = fx.clif_type(layout.ty).unwrap();
+
- atomic_minmax!(fx, IntCC::SignedGreaterThan, <T> (ptr, src) -> ret);
+ let src = src.load_scalar(fx);
- _ if intrinsic.starts_with("atomic_umax"), <T> (v ptr, c src) {
- validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
++
++ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smax, ptr, src);
++
++ let old = CValue::by_val(old, layout);
++ ret.write_cvalue(fx, old);
+ };
- atomic_minmax!(fx, IntCC::UnsignedGreaterThan, <T> (ptr, src) -> ret);
++ _ if intrinsic.starts_with("atomic_umax"), (v ptr, c src) {
++ let layout = src.layout();
++ validate_atomic_type!(fx, intrinsic, span, layout.ty);
++ let ty = fx.clif_type(layout.ty).unwrap();
++
+ let src = src.load_scalar(fx);
- _ if intrinsic.starts_with("atomic_min"), <T> (v ptr, c src) {
- validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
++
++ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umax, ptr, src);
++
++ let old = CValue::by_val(old, layout);
++ ret.write_cvalue(fx, old);
+ };
- atomic_minmax!(fx, IntCC::SignedLessThan, <T> (ptr, src) -> ret);
++ _ if intrinsic.starts_with("atomic_min"), (v ptr, c src) {
++ let layout = src.layout();
++ validate_atomic_type!(fx, intrinsic, span, layout.ty);
++ let ty = fx.clif_type(layout.ty).unwrap();
++
+ let src = src.load_scalar(fx);
- _ if intrinsic.starts_with("atomic_umin"), <T> (v ptr, c src) {
- validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
++
++ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smin, ptr, src);
++
++ let old = CValue::by_val(old, layout);
++ ret.write_cvalue(fx, old);
+ };
- atomic_minmax!(fx, IntCC::UnsignedLessThan, <T> (ptr, src) -> ret);
++ _ if intrinsic.starts_with("atomic_umin"), (v ptr, c src) {
++ let layout = src.layout();
++ validate_atomic_type!(fx, intrinsic, span, layout.ty);
++ let ty = fx.clif_type(layout.ty).unwrap();
++
+ let src = src.load_scalar(fx);
++
++ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umin, ptr, src);
++
++ let old = CValue::by_val(old, layout);
++ ret.write_cvalue(fx, old);
+ };
+
+ minnumf32, (v a, v b) {
+ let val = fx.bcx.ins().fmin(a, b);
+ let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
+ ret.write_cvalue(fx, val);
+ };
+ minnumf64, (v a, v b) {
+ let val = fx.bcx.ins().fmin(a, b);
+ let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
+ ret.write_cvalue(fx, val);
+ };
+ maxnumf32, (v a, v b) {
+ let val = fx.bcx.ins().fmax(a, b);
+ let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
+ ret.write_cvalue(fx, val);
+ };
+ maxnumf64, (v a, v b) {
+ let val = fx.bcx.ins().fmax(a, b);
+ let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
+ ret.write_cvalue(fx, val);
+ };
+
+ try, (v f, v data, v _catch_fn) {
+ // FIXME once unwinding is supported, change this to actually catch panics
+ let f_sig = fx.bcx.func.import_signature(Signature {
+ call_conv: CallConv::triple_default(fx.triple()),
+ params: vec![AbiParam::new(fx.bcx.func.dfg.value_type(data))],
+ returns: vec![],
+ });
+
+ fx.bcx.ins().call_indirect(f_sig, f, &[data]);
+
+ let layout = ret.layout();
+ let ret_val = CValue::const_val(fx, layout, ty::ScalarInt::null(layout.size));
+ ret.write_cvalue(fx, ret_val);
+ };
+
+ fadd_fast | fsub_fast | fmul_fast | fdiv_fast | frem_fast, (c x, c y) {
+ let res = crate::num::codegen_float_binop(fx, match intrinsic {
+ "fadd_fast" => BinOp::Add,
+ "fsub_fast" => BinOp::Sub,
+ "fmul_fast" => BinOp::Mul,
+ "fdiv_fast" => BinOp::Div,
+ "frem_fast" => BinOp::Rem,
+ _ => unreachable!(),
+ }, x, y);
+ ret.write_cvalue(fx, res);
+ };
+ float_to_int_unchecked, (v f) {
+ let res = crate::cast::clif_int_or_float_cast(
+ fx,
+ f,
+ false,
+ fx.clif_type(ret.layout().ty).unwrap(),
+ type_sign(ret.layout().ty),
+ );
+ ret.write_cvalue(fx, CValue::by_val(res, ret.layout()));
+ };
+ }
+
+ if let Some((_, dest)) = destination {
+ let ret_block = fx.get_block(dest);
+ fx.bcx.ins().jump(ret_block, &[]);
+ } else {
+ trap_unreachable(fx, "[corruption] Diverging intrinsic returned.");
+ }
+}
--- /dev/null
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+//! Codegen `extern "platform-intrinsic"` intrinsics.
+
+use super::*;
+use crate::prelude::*;
+
+pub(super) fn codegen_simd_intrinsic_call<'tcx>(
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ instance: Instance<'tcx>,
+ args: &[mir::Operand<'tcx>],
+ ret: CPlace<'tcx>,
+ span: Span,
+) {
+ let def_id = instance.def_id();
+ let substs = instance.substs;
+
+ let intrinsic = fx.tcx.item_name(def_id).as_str();
+ let intrinsic = &intrinsic[..];
+
+ intrinsic_match! {
+ fx, intrinsic, substs, args,
+ _ => {
+ fx.tcx.sess.span_fatal(span, &format!("Unknown SIMD intrinsic {}", intrinsic));
+ };
+
+ simd_cast, (c a) {
+ validate_simd_type!(fx, intrinsic, span, a.layout().ty);
+ simd_for_each_lane(fx, a, ret, |fx, lane_layout, ret_lane_layout, lane| {
+ let ret_lane_ty = fx.clif_type(ret_lane_layout.ty).unwrap();
+
+ let from_signed = type_sign(lane_layout.ty);
+ let to_signed = type_sign(ret_lane_layout.ty);
+
+ let ret_lane = clif_int_or_float_cast(fx, lane, from_signed, ret_lane_ty, to_signed);
+ CValue::by_val(ret_lane, ret_lane_layout)
+ });
+ };
+
+ simd_eq, (c x, c y) {
+ validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+ simd_cmp!(fx, Equal|Equal(x, y) -> ret);
+ };
+ simd_ne, (c x, c y) {
+ validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+ simd_cmp!(fx, NotEqual|NotEqual(x, y) -> ret);
+ };
+ simd_lt, (c x, c y) {
+ validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+ simd_cmp!(fx, UnsignedLessThan|SignedLessThan|LessThan(x, y) -> ret);
+ };
+ simd_le, (c x, c y) {
+ validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+ simd_cmp!(fx, UnsignedLessThanOrEqual|SignedLessThanOrEqual|LessThanOrEqual(x, y) -> ret);
+ };
+ simd_gt, (c x, c y) {
+ validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+ simd_cmp!(fx, UnsignedGreaterThan|SignedGreaterThan|GreaterThan(x, y) -> ret);
+ };
+ simd_ge, (c x, c y) {
+ validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+ simd_cmp!(
+ fx,
+ UnsignedGreaterThanOrEqual|SignedGreaterThanOrEqual|GreaterThanOrEqual
+ (x, y) -> ret
+ );
+ };
+
+ // simd_shuffle32<T, U>(x: T, y: T, idx: [u32; 32]) -> U
+ _ if intrinsic.starts_with("simd_shuffle"), (c x, c y, o idx) {
+ validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+
+ let n: u16 = intrinsic["simd_shuffle".len()..].parse().unwrap();
+
+ assert_eq!(x.layout(), y.layout());
+ let layout = x.layout();
+
+ let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
+ let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
+
+ assert_eq!(lane_ty, ret_lane_ty);
+ assert_eq!(u64::from(n), ret_lane_count);
+
+ let total_len = lane_count * 2;
+
+ let indexes = {
+ use rustc_middle::mir::interpret::*;
+ let idx_const = crate::constant::mir_operand_get_const_val(fx, idx).expect("simd_shuffle* idx not const");
+
+ let idx_bytes = match idx_const.val {
+ ty::ConstKind::Value(ConstValue::ByRef { alloc, offset }) => {
+ let ptr = Pointer::new(AllocId(0 /* dummy */), offset);
+ let size = Size::from_bytes(4 * u64::from(ret_lane_count) /* size_of([u32; ret_lane_count]) */);
+ alloc.get_bytes(fx, ptr, size).unwrap()
+ }
+ _ => unreachable!("{:?}", idx_const),
+ };
+
+ (0..ret_lane_count).map(|i| {
+ let i = usize::try_from(i).unwrap();
+ let idx = rustc_middle::mir::interpret::read_target_uint(
+ fx.tcx.data_layout.endian,
+ &idx_bytes[4*i.. 4*i + 4],
+ ).expect("read_target_uint");
+ u16::try_from(idx).expect("try_from u32")
+ }).collect::<Vec<u16>>()
+ };
+
+ for &idx in &indexes {
+ assert!(u64::from(idx) < total_len, "idx {} out of range 0..{}", idx, total_len);
+ }
+
+ for (out_idx, in_idx) in indexes.into_iter().enumerate() {
+ let in_lane = if u64::from(in_idx) < lane_count {
+ x.value_field(fx, mir::Field::new(in_idx.into()))
+ } else {
+ y.value_field(fx, mir::Field::new(usize::from(in_idx) - usize::try_from(lane_count).unwrap()))
+ };
+ let out_lane = ret.place_field(fx, mir::Field::new(out_idx));
+ out_lane.write_cvalue(fx, in_lane);
+ }
+ };
+
+ simd_insert, (c base, o idx, c val) {
+ // FIXME validate
+ let idx_const = if let Some(idx_const) = crate::constant::mir_operand_get_const_val(fx, idx) {
+ idx_const
+ } else {
+ fx.tcx.sess.span_fatal(
+ span,
+ "Index argument for `simd_insert` is not a constant",
+ );
+ };
+
+ let idx = idx_const.val.try_to_bits(Size::from_bytes(4 /* u32*/)).unwrap_or_else(|| panic!("kind not scalar: {:?}", idx_const));
+ let (lane_count, _lane_ty) = base.layout().ty.simd_size_and_type(fx.tcx);
+ if idx >= lane_count.into() {
+ fx.tcx.sess.span_fatal(fx.mir.span, &format!("[simd_insert] idx {} >= lane_count {}", idx, lane_count));
+ }
+
+ ret.write_cvalue(fx, base);
+ let ret_lane = ret.place_field(fx, mir::Field::new(idx.try_into().unwrap()));
+ ret_lane.write_cvalue(fx, val);
+ };
+
+ simd_extract, (c v, o idx) {
+ validate_simd_type!(fx, intrinsic, span, v.layout().ty);
+ let idx_const = if let Some(idx_const) = crate::constant::mir_operand_get_const_val(fx, idx) {
+ idx_const
+ } else {
+ fx.tcx.sess.span_warn(
+ span,
+ "Index argument for `simd_extract` is not a constant",
+ );
+ let res = crate::trap::trap_unimplemented_ret_value(
+ fx,
+ ret.layout(),
+ "Index argument for `simd_extract` is not a constant",
+ );
+ ret.write_cvalue(fx, res);
+ return;
+ };
+
+ let idx = idx_const.val.try_to_bits(Size::from_bytes(4 /* u32*/)).unwrap_or_else(|| panic!("kind not scalar: {:?}", idx_const));
+ let (lane_count, _lane_ty) = v.layout().ty.simd_size_and_type(fx.tcx);
+ if idx >= lane_count.into() {
+ fx.tcx.sess.span_fatal(fx.mir.span, &format!("[simd_extract] idx {} >= lane_count {}", idx, lane_count));
+ }
+
+ let ret_lane = v.value_field(fx, mir::Field::new(idx.try_into().unwrap()));
+ ret.write_cvalue(fx, ret_lane);
+ };
+
+ simd_add, (c x, c y) {
+ validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+ simd_int_flt_binop!(fx, iadd|fadd(x, y) -> ret);
+ };
+ simd_sub, (c x, c y) {
+ validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+ simd_int_flt_binop!(fx, isub|fsub(x, y) -> ret);
+ };
+ simd_mul, (c x, c y) {
+ validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+ simd_int_flt_binop!(fx, imul|fmul(x, y) -> ret);
+ };
+ simd_div, (c x, c y) {
+ validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+ simd_int_flt_binop!(fx, udiv|sdiv|fdiv(x, y) -> ret);
+ };
+ simd_shl, (c x, c y) {
+ validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+ simd_int_binop!(fx, ishl(x, y) -> ret);
+ };
+ simd_shr, (c x, c y) {
+ validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+ simd_int_binop!(fx, ushr|sshr(x, y) -> ret);
+ };
+ simd_and, (c x, c y) {
+ validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+ simd_int_binop!(fx, band(x, y) -> ret);
+ };
+ simd_or, (c x, c y) {
+ validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+ simd_int_binop!(fx, bor(x, y) -> ret);
+ };
+ simd_xor, (c x, c y) {
+ validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+ simd_int_binop!(fx, bxor(x, y) -> ret);
+ };
+
+ simd_fma, (c a, c b, c c) {
+ validate_simd_type!(fx, intrinsic, span, a.layout().ty);
+ assert_eq!(a.layout(), b.layout());
+ assert_eq!(a.layout(), c.layout());
+ let layout = a.layout();
+
+ let (lane_count, _lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
+ let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
+ assert_eq!(lane_count, ret_lane_count);
+ let ret_lane_layout = fx.layout_of(ret_lane_ty);
+
+ for lane in 0..lane_count {
+ let lane = mir::Field::new(lane.try_into().unwrap());
+ let a_lane = a.value_field(fx, lane).load_scalar(fx);
+ let b_lane = b.value_field(fx, lane).load_scalar(fx);
+ let c_lane = c.value_field(fx, lane).load_scalar(fx);
+
+ let mul_lane = fx.bcx.ins().fmul(a_lane, b_lane);
+ let res_lane = CValue::by_val(fx.bcx.ins().fadd(mul_lane, c_lane), ret_lane_layout);
+
+ ret.place_field(fx, lane).write_cvalue(fx, res_lane);
+ }
+ };
+
+ simd_fmin, (c x, c y) {
+ validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+ simd_flt_binop!(fx, fmin(x, y) -> ret);
+ };
+ simd_fmax, (c x, c y) {
+ validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+ simd_flt_binop!(fx, fmax(x, y) -> ret);
+ };
+
+ simd_reduce_add_ordered | simd_reduce_add_unordered, (c v) {
+ validate_simd_type!(fx, intrinsic, span, v.layout().ty);
+ simd_reduce(fx, v, ret, |fx, lane_layout, a, b| {
+ if lane_layout.ty.is_floating_point() {
+ fx.bcx.ins().fadd(a, b)
+ } else {
+ fx.bcx.ins().iadd(a, b)
+ }
+ });
+ };
+
+ simd_reduce_mul_ordered | simd_reduce_mul_unordered, (c v) {
+ validate_simd_type!(fx, intrinsic, span, v.layout().ty);
+ simd_reduce(fx, v, ret, |fx, lane_layout, a, b| {
+ if lane_layout.ty.is_floating_point() {
+ fx.bcx.ins().fmul(a, b)
+ } else {
+ fx.bcx.ins().imul(a, b)
+ }
+ });
+ };
+
+ simd_reduce_all, (c v) {
+ validate_simd_type!(fx, intrinsic, span, v.layout().ty);
+ simd_reduce_bool(fx, v, ret, |fx, a, b| fx.bcx.ins().band(a, b));
+ };
+
+ simd_reduce_any, (c v) {
+ validate_simd_type!(fx, intrinsic, span, v.layout().ty);
+ simd_reduce_bool(fx, v, ret, |fx, a, b| fx.bcx.ins().bor(a, b));
+ };
+
+ // simd_fabs
+ // simd_saturating_add
+ // simd_bitmask
+ // simd_select
+ // simd_rem
+ }
+}
--- /dev/null
- #[cfg(feature = "jit")]
- extern crate libc;
+#![feature(
+ rustc_private,
+ decl_macro,
+ type_alias_impl_trait,
+ associated_type_bounds,
+ never_type,
+ try_blocks,
+ hash_drain_filter
+)]
+#![warn(rust_2018_idioms)]
+#![warn(unused_lifetimes)]
+#![warn(unreachable_pub)]
+
- mod atomic_shim;
+extern crate snap;
+#[macro_use]
+extern crate rustc_middle;
+extern crate rustc_ast;
+extern crate rustc_codegen_ssa;
+extern crate rustc_data_structures;
+extern crate rustc_errors;
+extern crate rustc_fs_util;
+extern crate rustc_hir;
+extern crate rustc_incremental;
+extern crate rustc_index;
+extern crate rustc_session;
+extern crate rustc_span;
+extern crate rustc_target;
+
+// This prevents duplicating functions and statics that are already part of the host rustc process.
+#[allow(unused_extern_crates)]
+extern crate rustc_driver;
+
+use std::any::Any;
+use std::str::FromStr;
+
+use rustc_codegen_ssa::traits::CodegenBackend;
+use rustc_codegen_ssa::CodegenResults;
+use rustc_errors::ErrorReported;
+use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
+use rustc_middle::middle::cstore::{EncodedMetadata, MetadataLoader};
+use rustc_middle::ty::query::Providers;
+use rustc_session::config::OutputFilenames;
+use rustc_session::Session;
+
+use cranelift_codegen::settings::{self, Configurable};
+
+use crate::constant::ConstantCx;
+use crate::prelude::*;
+
+mod abi;
+mod allocator;
+mod analyze;
+mod archive;
- struct CodegenCx<'tcx, M: Module> {
+mod backend;
+mod base;
+mod cast;
+mod codegen_i128;
+mod common;
+mod constant;
+mod debuginfo;
+mod discriminant;
+mod driver;
+mod inline_asm;
+mod intrinsics;
+mod linkage;
+mod main_shim;
+mod metadata;
+mod num;
+mod optimize;
+mod pointer;
+mod pretty_clif;
+mod toolchain;
+mod trap;
+mod unsize;
+mod value_and_place;
+mod vtable;
+
+mod prelude {
+ pub(crate) use std::convert::{TryFrom, TryInto};
+
+ pub(crate) use rustc_span::Span;
+
+ pub(crate) use rustc_hir::def_id::{DefId, LOCAL_CRATE};
+ pub(crate) use rustc_middle::bug;
+ pub(crate) use rustc_middle::mir::{self, *};
+ pub(crate) use rustc_middle::ty::layout::{self, TyAndLayout};
+ pub(crate) use rustc_middle::ty::{
+ self, FloatTy, Instance, InstanceDef, IntTy, ParamEnv, Ty, TyCtxt, TypeAndMut,
+ TypeFoldable, UintTy,
+ };
+ pub(crate) use rustc_target::abi::{Abi, LayoutOf, Scalar, Size, VariantIdx};
+
+ pub(crate) use rustc_data_structures::fx::FxHashMap;
+
+ pub(crate) use rustc_index::vec::Idx;
+
+ pub(crate) use cranelift_codegen::entity::EntitySet;
+ pub(crate) use cranelift_codegen::ir::condcodes::{FloatCC, IntCC};
+ pub(crate) use cranelift_codegen::ir::function::Function;
+ pub(crate) use cranelift_codegen::ir::types;
+ pub(crate) use cranelift_codegen::ir::{
+ AbiParam, Block, ExternalName, FuncRef, Inst, InstBuilder, MemFlags, Signature, SourceLoc,
+ StackSlot, StackSlotData, StackSlotKind, TrapCode, Type, Value,
+ };
+ pub(crate) use cranelift_codegen::isa::{self, CallConv};
+ pub(crate) use cranelift_codegen::Context;
+ pub(crate) use cranelift_frontend::{FunctionBuilder, FunctionBuilderContext, Variable};
+ pub(crate) use cranelift_module::{self, DataContext, DataId, FuncId, Linkage, Module};
+
+ pub(crate) use crate::abi::*;
+ pub(crate) use crate::base::{codegen_operand, codegen_place};
+ pub(crate) use crate::cast::*;
+ pub(crate) use crate::common::*;
+ pub(crate) use crate::debuginfo::{DebugContext, UnwindContext};
+ pub(crate) use crate::pointer::Pointer;
+ pub(crate) use crate::trap::*;
+ pub(crate) use crate::value_and_place::{CPlace, CPlaceInner, CValue};
+}
+
+struct PrintOnPanic<F: Fn() -> String>(F);
+impl<F: Fn() -> String> Drop for PrintOnPanic<F> {
+ fn drop(&mut self) {
+ if ::std::thread::panicking() {
+ println!("{}", (self.0)());
+ }
+ }
+}
+
- module: M,
++struct CodegenCx<'m, 'tcx: 'm> {
+ tcx: TyCtxt<'tcx>,
- impl<'tcx, M: Module> CodegenCx<'tcx, M> {
- fn new(tcx: TyCtxt<'tcx>, module: M, debug_info: bool, pic_eh_frame: bool) -> Self {
- let unwind_context = UnwindContext::new(tcx, module.isa(), pic_eh_frame);
- let debug_context = if debug_info {
- Some(DebugContext::new(tcx, module.isa()))
- } else {
- None
- };
++ module: &'m mut dyn Module,
+ global_asm: String,
+ constants_cx: ConstantCx,
+ cached_context: Context,
+ vtables: FxHashMap<(Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>), DataId>,
+ debug_context: Option<DebugContext<'tcx>>,
+ unwind_context: UnwindContext<'tcx>,
+}
+
- fn finalize(mut self) -> (M, String, Option<DebugContext<'tcx>>, UnwindContext<'tcx>) {
- self.constants_cx.finalize(self.tcx, &mut self.module);
- (
- self.module,
- self.global_asm,
- self.debug_context,
- self.unwind_context,
- )
++impl<'m, 'tcx> CodegenCx<'m, 'tcx> {
++ fn new(
++ tcx: TyCtxt<'tcx>,
++ backend_config: BackendConfig,
++ module: &'m mut dyn Module,
++ debug_info: bool,
++ ) -> Self {
++ let unwind_context = UnwindContext::new(
++ tcx,
++ module.isa(),
++ matches!(backend_config.codegen_mode, CodegenMode::Aot),
++ );
++ let debug_context =
++ if debug_info { Some(DebugContext::new(tcx, module.isa())) } else { None };
+ CodegenCx {
+ tcx,
+ module,
+ global_asm: String::new(),
+ constants_cx: ConstantCx::default(),
+ cached_context: Context::new(),
+ vtables: FxHashMap::default(),
+ debug_context,
+ unwind_context,
+ }
+ }
+
- .set(
- "enable_verifier",
- if cfg!(debug_assertions) {
- "true"
- } else {
- "false"
- },
- )
++ fn finalize(self) -> (String, Option<DebugContext<'tcx>>, UnwindContext<'tcx>) {
++ self.constants_cx.finalize(self.tcx, self.module);
++ (self.global_asm, self.debug_context, self.unwind_context)
+ }
+}
+
+#[derive(Copy, Clone, Debug)]
+pub enum CodegenMode {
+ Aot,
+ Jit,
+ JitLazy,
+}
+
+impl Default for CodegenMode {
+ fn default() -> Self {
+ CodegenMode::Aot
+ }
+}
+
+impl FromStr for CodegenMode {
+ type Err = String;
+
+ fn from_str(s: &str) -> Result<Self, Self::Err> {
+ match s {
+ "aot" => Ok(CodegenMode::Aot),
+ "jit" => Ok(CodegenMode::Jit),
+ "jit-lazy" => Ok(CodegenMode::JitLazy),
+ _ => Err(format!("Unknown codegen mode `{}`", s)),
+ }
+ }
+}
+
+#[derive(Copy, Clone, Debug, Default)]
+pub struct BackendConfig {
+ pub codegen_mode: CodegenMode,
+}
+
+impl BackendConfig {
+ fn from_opts(opts: &[String]) -> Result<Self, String> {
+ let mut config = BackendConfig::default();
+ for opt in opts {
+ if let Some((name, value)) = opt.split_once('=') {
+ match name {
+ "mode" => config.codegen_mode = value.parse()?,
+ _ => return Err(format!("Unknown option `{}`", name)),
+ }
+ } else {
+ return Err(format!("Invalid option `{}`", opt));
+ }
+ }
+ Ok(config)
+ }
+}
+
+pub struct CraneliftCodegenBackend {
+ pub config: Option<BackendConfig>,
+}
+
+impl CodegenBackend for CraneliftCodegenBackend {
+ fn init(&self, sess: &Session) {
+ if sess.lto() != rustc_session::config::Lto::No && sess.opts.cg.embed_bitcode {
+ sess.warn("LTO is not supported. You may get a linker error.");
+ }
+ }
+
+ fn metadata_loader(&self) -> Box<dyn MetadataLoader + Sync> {
+ Box::new(crate::metadata::CraneliftMetadataLoader)
+ }
+
+ fn provide(&self, _providers: &mut Providers) {}
+ fn provide_extern(&self, _providers: &mut Providers) {}
+
+ fn target_features(&self, _sess: &Session) -> Vec<rustc_span::Symbol> {
+ vec![]
+ }
+
+ fn codegen_crate<'tcx>(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ metadata: EncodedMetadata,
+ need_metadata_module: bool,
+ ) -> Box<dyn Any> {
+ let config = if let Some(config) = self.config {
+ config
+ } else {
+ BackendConfig::from_opts(&tcx.sess.opts.cg.llvm_args)
+ .unwrap_or_else(|err| tcx.sess.fatal(&err))
+ };
+ let res = driver::codegen_crate(tcx, metadata, need_metadata_module, config);
+
+ res
+ }
+
+ fn join_codegen(
+ &self,
+ ongoing_codegen: Box<dyn Any>,
+ _sess: &Session,
+ ) -> Result<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>), ErrorReported> {
+ Ok(*ongoing_codegen
+ .downcast::<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>)>()
+ .unwrap())
+ }
+
+ fn link(
+ &self,
+ sess: &Session,
+ codegen_results: CodegenResults,
+ outputs: &OutputFilenames,
+ ) -> Result<(), ErrorReported> {
+ use rustc_codegen_ssa::back::link::link_binary;
+
+ let target_cpu = crate::target_triple(sess).to_string();
+ link_binary::<crate::archive::ArArchiveBuilder<'_>>(
+ sess,
+ &codegen_results,
+ outputs,
+ &codegen_results.crate_name.as_str(),
+ &target_cpu,
+ );
+
+ Ok(())
+ }
+}
+
+fn target_triple(sess: &Session) -> target_lexicon::Triple {
+ sess.target.llvm_target.parse().unwrap()
+}
+
+fn build_isa(sess: &Session) -> Box<dyn isa::TargetIsa + 'static> {
+ use target_lexicon::BinaryFormat;
+
+ let target_triple = crate::target_triple(sess);
+
+ let mut flags_builder = settings::builder();
+ flags_builder.enable("is_pic").unwrap();
+ flags_builder.set("enable_probestack", "false").unwrap(); // __cranelift_probestack is not provided
+ flags_builder
- let variant = if cfg!(feature = "oldbe") {
- cranelift_codegen::isa::BackendVariant::Legacy
- } else {
- cranelift_codegen::isa::BackendVariant::MachInst
- };
++ .set("enable_verifier", if cfg!(debug_assertions) { "true" } else { "false" })
+ .unwrap();
+
+ let tls_model = match target_triple.binary_format {
+ BinaryFormat::Elf => "elf_gd",
+ BinaryFormat::Macho => "macho",
+ BinaryFormat::Coff => "coff",
+ _ => "none",
+ };
+ flags_builder.set("tls_model", tls_model).unwrap();
+
+ flags_builder.set("enable_simd", "true").unwrap();
+
+ use rustc_session::config::OptLevel;
+ match sess.opts.optimize {
+ OptLevel::No => {
+ flags_builder.set("opt_level", "none").unwrap();
+ }
+ OptLevel::Less | OptLevel::Default => {}
+ OptLevel::Aggressive => {
+ flags_builder.set("opt_level", "speed_and_size").unwrap();
+ }
+ OptLevel::Size | OptLevel::SizeMin => {
+ sess.warn("Optimizing for size is not supported. Just ignoring the request");
+ }
+ }
+
+ let flags = settings::Flags::new(flags_builder);
+
++ let variant = cranelift_codegen::isa::BackendVariant::MachInst;
+ let mut isa_builder = cranelift_codegen::isa::lookup_variant(target_triple, variant).unwrap();
+ // Don't use "haswell", as it implies `has_lzcnt`.macOS CI is still at Ivy Bridge EP, so `lzcnt`
+ // is interpreted as `bsr`.
+ isa_builder.enable("nehalem").unwrap();
+ isa_builder.finish(flags)
+}
+
+/// This is the entrypoint for a hot plugged rustc_codegen_cranelift
+#[no_mangle]
+pub fn __rustc_codegen_backend() -> Box<dyn CodegenBackend> {
+ Box::new(CraneliftCodegenBackend { config: None })
+}
--- /dev/null
- use_jit: bool,
+use rustc_hir::LangItem;
+use rustc_session::config::EntryFnType;
+
+use crate::prelude::*;
+
+/// Create the `main` function which will initialize the rust runtime and call
+/// users main function.
+pub(crate) fn maybe_create_entry_wrapper(
+ tcx: TyCtxt<'_>,
+ module: &mut impl Module,
+ unwind_context: &mut UnwindContext<'_>,
- create_entry_fn(
- tcx,
- module,
- unwind_context,
- main_def_id,
- use_start_lang_item,
- use_jit,
- );
+) {
+ let (main_def_id, use_start_lang_item) = match tcx.entry_fn(LOCAL_CRATE) {
+ Some((def_id, entry_ty)) => (
+ def_id.to_def_id(),
+ match entry_ty {
+ EntryFnType::Main => true,
+ EntryFnType::Start => false,
+ },
+ ),
+ None => return,
+ };
+
+ let instance = Instance::mono(tcx, main_def_id).polymorphize(tcx);
+ if module.get_name(&*tcx.symbol_name(instance).name).is_none() {
+ return;
+ }
+
- use_jit: bool,
++ create_entry_fn(tcx, module, unwind_context, main_def_id, use_start_lang_item);
+
+ fn create_entry_fn(
+ tcx: TyCtxt<'_>,
+ m: &mut impl Module,
+ unwind_context: &mut UnwindContext<'_>,
+ rust_main_def_id: DefId,
+ use_start_lang_item: bool,
- returns: vec![AbiParam::new(
- m.target_config().pointer_type(), /*isize*/
- )],
+ ) {
+ let main_ret_ty = tcx.fn_sig(rust_main_def_id).output();
+ // Given that `main()` has no arguments,
+ // then its return type cannot have
+ // late-bound regions, since late-bound
+ // regions must appear in the argument
+ // listing.
+ let main_ret_ty = tcx.erase_regions(main_ret_ty.no_bound_vars().unwrap());
+
+ let cmain_sig = Signature {
+ params: vec![
+ AbiParam::new(m.target_config().pointer_type()),
+ AbiParam::new(m.target_config().pointer_type()),
+ ],
- let cmain_func_id = m
- .declare_function("main", Linkage::Export, &cmain_sig)
- .unwrap();
++ returns: vec![AbiParam::new(m.target_config().pointer_type() /*isize*/)],
+ call_conv: CallConv::triple_default(m.isa().triple()),
+ };
+
- let main_func_id = m
- .declare_function(&main_name, Linkage::Import, &main_sig)
- .unwrap();
++ let cmain_func_id = m.declare_function("main", Linkage::Export, &cmain_sig).unwrap();
+
+ let instance = Instance::mono(tcx, rust_main_def_id).polymorphize(tcx);
+
+ let main_name = tcx.symbol_name(instance).name.to_string();
+ let main_sig = get_function_sig(tcx, m.isa().triple(), instance);
- crate::atomic_shim::init_global_lock(m, &mut bcx, use_jit);
-
++ let main_func_id = m.declare_function(&main_name, Linkage::Import, &main_sig).unwrap();
+
+ let mut ctx = Context::new();
+ ctx.func = Function::with_name_signature(ExternalName::user(0, 0), cmain_sig);
+ {
+ let mut func_ctx = FunctionBuilderContext::new();
+ let mut bcx = FunctionBuilder::new(&mut ctx.func, &mut func_ctx);
+
+ let block = bcx.create_block();
+ bcx.switch_to_block(block);
+ let arg_argc = bcx.append_block_param(block, m.target_config().pointer_type());
+ let arg_argv = bcx.append_block_param(block, m.target_config().pointer_type());
+
- let main_val = bcx
- .ins()
- .func_addr(m.target_config().pointer_type(), main_func_ref);
+ let main_func_ref = m.declare_func_in_func(main_func_id, &mut bcx.func);
+
+ let call_inst = if use_start_lang_item {
+ let start_def_id = tcx.require_lang_item(LangItem::Start, None);
+ let start_instance = Instance::resolve(
+ tcx,
+ ParamEnv::reveal_all(),
+ start_def_id,
+ tcx.intern_substs(&[main_ret_ty.into()]),
+ )
+ .unwrap()
+ .unwrap()
+ .polymorphize(tcx);
+ let start_func_id = import_function(tcx, m, start_instance);
+
++ let main_val = bcx.ins().func_addr(m.target_config().pointer_type(), main_func_ref);
+
+ let func_ref = m.declare_func_in_func(start_func_id, &mut bcx.func);
+ bcx.ins().call(func_ref, &[main_val, arg_argc, arg_argv])
+ } else {
+ // using user-defined start fn
+ bcx.ins().call(main_func_ref, &[arg_argc, arg_argv])
+ };
+
+ let result = bcx.inst_results(call_inst)[0];
+ bcx.ins().return_(&[result]);
+ bcx.seal_all_blocks();
+ bcx.finalize();
+ }
+ m.define_function(
+ cmain_func_id,
+ &mut ctx,
+ &mut cranelift_codegen::binemit::NullTrapSink {},
+ )
+ .unwrap();
+ unwind_context.add_function(cmain_func_id, &ctx, m.isa());
+ }
+}
--- /dev/null
- FrameEncoder::new(&mut compressed)
- .write_all(&metadata.raw_data)
- .unwrap();
+//! Reading and writing of the rustc metadata for rlibs and dylibs
+
+use std::convert::TryFrom;
+use std::fs::File;
+use std::path::Path;
+
+use rustc_codegen_ssa::METADATA_FILENAME;
+use rustc_data_structures::owning_ref::OwningRef;
+use rustc_data_structures::rustc_erase_owner;
+use rustc_data_structures::sync::MetadataRef;
+use rustc_middle::middle::cstore::{EncodedMetadata, MetadataLoader};
+use rustc_middle::ty::TyCtxt;
+use rustc_session::config;
+use rustc_target::spec::Target;
+
+use crate::backend::WriteMetadata;
+
+pub(crate) struct CraneliftMetadataLoader;
+
+impl MetadataLoader for CraneliftMetadataLoader {
+ fn get_rlib_metadata(&self, _target: &Target, path: &Path) -> Result<MetadataRef, String> {
+ let mut archive = ar::Archive::new(File::open(path).map_err(|e| format!("{:?}", e))?);
+ // Iterate over all entries in the archive:
+ while let Some(entry_result) = archive.next_entry() {
+ let mut entry = entry_result.map_err(|e| format!("{:?}", e))?;
+ if entry.header().identifier() == METADATA_FILENAME.as_bytes() {
+ let mut buf = Vec::with_capacity(
+ usize::try_from(entry.header().size())
+ .expect("Rlib metadata file too big to load into memory."),
+ );
+ ::std::io::copy(&mut entry, &mut buf).map_err(|e| format!("{:?}", e))?;
+ let buf: OwningRef<Vec<u8>, [u8]> = OwningRef::new(buf);
+ return Ok(rustc_erase_owner!(buf.map_owner_box()));
+ }
+ }
+
+ Err("couldn't find metadata entry".to_string())
+ }
+
+ fn get_dylib_metadata(&self, _target: &Target, path: &Path) -> Result<MetadataRef, String> {
+ use object::{Object, ObjectSection};
+ let file = std::fs::read(path).map_err(|e| format!("read:{:?}", e))?;
+ let file = object::File::parse(&file).map_err(|e| format!("parse: {:?}", e))?;
+ let buf = file
+ .section_by_name(".rustc")
+ .ok_or("no .rustc section")?
+ .data()
+ .map_err(|e| format!("failed to read .rustc section: {:?}", e))?
+ .to_owned();
+ let buf: OwningRef<Vec<u8>, [u8]> = OwningRef::new(buf);
+ Ok(rustc_erase_owner!(buf.map_owner_box()))
+ }
+}
+
+// Adapted from https://github.com/rust-lang/rust/blob/da573206f87b5510de4b0ee1a9c044127e409bd3/src/librustc_codegen_llvm/base.rs#L47-L112
+pub(crate) fn write_metadata<P: WriteMetadata>(
+ tcx: TyCtxt<'_>,
+ product: &mut P,
+) -> EncodedMetadata {
+ use snap::write::FrameEncoder;
+ use std::io::Write;
+
+ #[derive(PartialEq, Eq, PartialOrd, Ord)]
+ enum MetadataKind {
+ None,
+ Uncompressed,
+ Compressed,
+ }
+
+ let kind = tcx
+ .sess
+ .crate_types()
+ .iter()
+ .map(|ty| match *ty {
+ config::CrateType::Executable
+ | config::CrateType::Staticlib
+ | config::CrateType::Cdylib => MetadataKind::None,
+
+ config::CrateType::Rlib => MetadataKind::Uncompressed,
+
+ config::CrateType::Dylib | config::CrateType::ProcMacro => MetadataKind::Compressed,
+ })
+ .max()
+ .unwrap_or(MetadataKind::None);
+
+ if kind == MetadataKind::None {
+ return EncodedMetadata::new();
+ }
+
+ let metadata = tcx.encode_metadata();
+ if kind == MetadataKind::Uncompressed {
+ return metadata;
+ }
+
+ assert!(kind == MetadataKind::Compressed);
+ let mut compressed = tcx.metadata_encoding_version();
++ FrameEncoder::new(&mut compressed).write_all(&metadata.raw_data).unwrap();
+
+ product.add_rustc_section(
+ rustc_middle::middle::exported_symbols::metadata_symbol_name(tcx),
+ compressed,
+ tcx.sess.target.is_like_osx,
+ );
+
+ metadata
+}
--- /dev/null
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+//! Various operations on integer and floating-point numbers
+
+use crate::prelude::*;
+
+pub(crate) fn bin_op_to_intcc(bin_op: BinOp, signed: bool) -> Option<IntCC> {
+ use BinOp::*;
+ use IntCC::*;
+ Some(match bin_op {
+ Eq => Equal,
+ Lt => {
+ if signed {
+ SignedLessThan
+ } else {
+ UnsignedLessThan
+ }
+ }
+ Le => {
+ if signed {
+ SignedLessThanOrEqual
+ } else {
+ UnsignedLessThanOrEqual
+ }
+ }
+ Ne => NotEqual,
+ Ge => {
+ if signed {
+ SignedGreaterThanOrEqual
+ } else {
+ UnsignedGreaterThanOrEqual
+ }
+ }
+ Gt => {
+ if signed {
+ SignedGreaterThan
+ } else {
+ UnsignedGreaterThan
+ }
+ }
+ _ => return None,
+ })
+}
+
+fn codegen_compare_bin_op<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ bin_op: BinOp,
+ signed: bool,
+ lhs: Value,
+ rhs: Value,
+) -> CValue<'tcx> {
+ let intcc = crate::num::bin_op_to_intcc(bin_op, signed).unwrap();
+ let val = fx.bcx.ins().icmp(intcc, lhs, rhs);
+ let val = fx.bcx.ins().bint(types::I8, val);
+ CValue::by_val(val, fx.layout_of(fx.tcx.types.bool))
+}
+
+pub(crate) fn codegen_binop<'tcx>(
- _ => unreachable!(
- "{:?}({:?}, {:?})",
- bin_op,
- in_lhs.layout().ty,
- in_rhs.layout().ty
- ),
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ bin_op: BinOp,
+ in_lhs: CValue<'tcx>,
+ in_rhs: CValue<'tcx>,
+) -> CValue<'tcx> {
+ match bin_op {
+ BinOp::Eq | BinOp::Lt | BinOp::Le | BinOp::Ne | BinOp::Ge | BinOp::Gt => {
+ match in_lhs.layout().ty.kind() {
+ ty::Bool | ty::Uint(_) | ty::Int(_) | ty::Char => {
+ let signed = type_sign(in_lhs.layout().ty);
+ let lhs = in_lhs.load_scalar(fx);
+ let rhs = in_rhs.load_scalar(fx);
+
+ let (lhs, rhs) = if (bin_op == BinOp::Eq || bin_op == BinOp::Ne)
+ && (in_lhs.layout().ty.kind() == fx.tcx.types.i8.kind()
+ || in_lhs.layout().ty.kind() == fx.tcx.types.i16.kind())
+ {
+ // FIXME(CraneStation/cranelift#896) icmp_imm.i8/i16 with eq/ne for signed ints is implemented wrong.
+ (
+ fx.bcx.ins().sextend(types::I32, lhs),
+ fx.bcx.ins().sextend(types::I32, rhs),
+ )
+ } else {
+ (lhs, rhs)
+ };
+
+ return codegen_compare_bin_op(fx, bin_op, signed, lhs, rhs);
+ }
+ _ => {}
+ }
+ }
+ _ => {}
+ }
+
+ match in_lhs.layout().ty.kind() {
+ ty::Bool => crate::num::codegen_bool_binop(fx, bin_op, in_lhs, in_rhs),
+ ty::Uint(_) | ty::Int(_) => crate::num::codegen_int_binop(fx, bin_op, in_lhs, in_rhs),
+ ty::Float(_) => crate::num::codegen_float_binop(fx, bin_op, in_lhs, in_rhs),
+ ty::RawPtr(..) | ty::FnPtr(..) => crate::num::codegen_ptr_binop(fx, bin_op, in_lhs, in_rhs),
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
++ _ => unreachable!("{:?}({:?}, {:?})", bin_op, in_lhs.layout().ty, in_rhs.layout().ty),
+ }
+}
+
+pub(crate) fn codegen_bool_binop<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ bin_op: BinOp,
+ in_lhs: CValue<'tcx>,
+ in_rhs: CValue<'tcx>,
+) -> CValue<'tcx> {
+ let lhs = in_lhs.load_scalar(fx);
+ let rhs = in_rhs.load_scalar(fx);
+
+ let b = fx.bcx.ins();
+ let res = match bin_op {
+ BinOp::BitXor => b.bxor(lhs, rhs),
+ BinOp::BitAnd => b.band(lhs, rhs),
+ BinOp::BitOr => b.bor(lhs, rhs),
+ // Compare binops handles by `codegen_binop`.
+ _ => unreachable!("{:?}({:?}, {:?})", bin_op, in_lhs, in_rhs),
+ };
+
+ CValue::by_val(res, fx.layout_of(fx.tcx.types.bool))
+}
+
+pub(crate) fn codegen_int_binop<'tcx>(
- _ => unreachable!(
- "{:?}({:?}, {:?})",
- bin_op,
- in_lhs.layout().ty,
- in_rhs.layout().ty
- ),
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ bin_op: BinOp,
+ in_lhs: CValue<'tcx>,
+ in_rhs: CValue<'tcx>,
+) -> CValue<'tcx> {
+ if bin_op != BinOp::Shl && bin_op != BinOp::Shr {
+ assert_eq!(
+ in_lhs.layout().ty,
+ in_rhs.layout().ty,
+ "int binop requires lhs and rhs of same type"
+ );
+ }
+
+ if let Some(res) = crate::codegen_i128::maybe_codegen(fx, bin_op, false, in_lhs, in_rhs) {
+ return res;
+ }
+
+ let signed = type_sign(in_lhs.layout().ty);
+
+ let lhs = in_lhs.load_scalar(fx);
+ let rhs = in_rhs.load_scalar(fx);
+
+ let b = fx.bcx.ins();
+ let val = match bin_op {
+ BinOp::Add => b.iadd(lhs, rhs),
+ BinOp::Sub => b.isub(lhs, rhs),
+ BinOp::Mul => b.imul(lhs, rhs),
+ BinOp::Div => {
+ if signed {
+ b.sdiv(lhs, rhs)
+ } else {
+ b.udiv(lhs, rhs)
+ }
+ }
+ BinOp::Rem => {
+ if signed {
+ b.srem(lhs, rhs)
+ } else {
+ b.urem(lhs, rhs)
+ }
+ }
+ BinOp::BitXor => b.bxor(lhs, rhs),
+ BinOp::BitAnd => b.band(lhs, rhs),
+ BinOp::BitOr => b.bor(lhs, rhs),
+ BinOp::Shl => {
+ let lhs_ty = fx.bcx.func.dfg.value_type(lhs);
+ let actual_shift = fx.bcx.ins().band_imm(rhs, i64::from(lhs_ty.bits() - 1));
+ let actual_shift = clif_intcast(fx, actual_shift, types::I8, false);
+ fx.bcx.ins().ishl(lhs, actual_shift)
+ }
+ BinOp::Shr => {
+ let lhs_ty = fx.bcx.func.dfg.value_type(lhs);
+ let actual_shift = fx.bcx.ins().band_imm(rhs, i64::from(lhs_ty.bits() - 1));
+ let actual_shift = clif_intcast(fx, actual_shift, types::I8, false);
+ if signed {
+ fx.bcx.ins().sshr(lhs, actual_shift)
+ } else {
+ fx.bcx.ins().ushr(lhs, actual_shift)
+ }
+ }
+ // Compare binops handles by `codegen_binop`.
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
++ _ => unreachable!("{:?}({:?}, {:?})", bin_op, in_lhs.layout().ty, in_rhs.layout().ty),
+ };
+
+ CValue::by_val(val, in_lhs.layout())
+}
+
+pub(crate) fn codegen_checked_int_binop<'tcx>(
- fx.bcx
- .ins()
- .icmp_imm(IntCC::SignedLessThan, val, -(1 << (ty.bits() - 1)));
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ bin_op: BinOp,
+ in_lhs: CValue<'tcx>,
+ in_rhs: CValue<'tcx>,
+) -> CValue<'tcx> {
+ if bin_op != BinOp::Shl && bin_op != BinOp::Shr {
+ assert_eq!(
+ in_lhs.layout().ty,
+ in_rhs.layout().ty,
+ "checked int binop requires lhs and rhs of same type"
+ );
+ }
+
+ let lhs = in_lhs.load_scalar(fx);
+ let rhs = in_rhs.load_scalar(fx);
+
+ if let Some(res) = crate::codegen_i128::maybe_codegen(fx, bin_op, true, in_lhs, in_rhs) {
+ return res;
+ }
+
+ let signed = type_sign(in_lhs.layout().ty);
+
+ let (res, has_overflow) = match bin_op {
+ BinOp::Add => {
+ /*let (val, c_out) = fx.bcx.ins().iadd_cout(lhs, rhs);
+ (val, c_out)*/
+ // FIXME(CraneStation/cranelift#849) legalize iadd_cout for i8 and i16
+ let val = fx.bcx.ins().iadd(lhs, rhs);
+ let has_overflow = if !signed {
+ fx.bcx.ins().icmp(IntCC::UnsignedLessThan, val, lhs)
+ } else {
+ let rhs_is_negative = fx.bcx.ins().icmp_imm(IntCC::SignedLessThan, rhs, 0);
+ let slt = fx.bcx.ins().icmp(IntCC::SignedLessThan, val, lhs);
+ fx.bcx.ins().bxor(rhs_is_negative, slt)
+ };
+ (val, has_overflow)
+ }
+ BinOp::Sub => {
+ /*let (val, b_out) = fx.bcx.ins().isub_bout(lhs, rhs);
+ (val, b_out)*/
+ // FIXME(CraneStation/cranelift#849) legalize isub_bout for i8 and i16
+ let val = fx.bcx.ins().isub(lhs, rhs);
+ let has_overflow = if !signed {
+ fx.bcx.ins().icmp(IntCC::UnsignedGreaterThan, val, lhs)
+ } else {
+ let rhs_is_negative = fx.bcx.ins().icmp_imm(IntCC::SignedLessThan, rhs, 0);
+ let sgt = fx.bcx.ins().icmp(IntCC::SignedGreaterThan, val, lhs);
+ fx.bcx.ins().bxor(rhs_is_negative, sgt)
+ };
+ (val, has_overflow)
+ }
+ BinOp::Mul => {
+ let ty = fx.bcx.func.dfg.value_type(lhs);
+ match ty {
+ types::I8 | types::I16 | types::I32 if !signed => {
+ let lhs = fx.bcx.ins().uextend(ty.double_width().unwrap(), lhs);
+ let rhs = fx.bcx.ins().uextend(ty.double_width().unwrap(), rhs);
+ let val = fx.bcx.ins().imul(lhs, rhs);
+ let has_overflow = fx.bcx.ins().icmp_imm(
+ IntCC::UnsignedGreaterThan,
+ val,
+ (1 << ty.bits()) - 1,
+ );
+ let val = fx.bcx.ins().ireduce(ty, val);
+ (val, has_overflow)
+ }
+ types::I8 | types::I16 | types::I32 if signed => {
+ let lhs = fx.bcx.ins().sextend(ty.double_width().unwrap(), lhs);
+ let rhs = fx.bcx.ins().sextend(ty.double_width().unwrap(), rhs);
+ let val = fx.bcx.ins().imul(lhs, rhs);
+ let has_underflow =
- let has_overflow = fx
- .bcx
- .ins()
- .icmp_imm(IntCC::UnsignedGreaterThan, rhs, max_shift);
++ fx.bcx.ins().icmp_imm(IntCC::SignedLessThan, val, -(1 << (ty.bits() - 1)));
+ let has_overflow = fx.bcx.ins().icmp_imm(
+ IntCC::SignedGreaterThan,
+ val,
+ (1 << (ty.bits() - 1)) - 1,
+ );
+ let val = fx.bcx.ins().ireduce(ty, val);
+ (val, fx.bcx.ins().bor(has_underflow, has_overflow))
+ }
+ types::I64 => {
+ let val = fx.bcx.ins().imul(lhs, rhs);
+ let has_overflow = if !signed {
+ let val_hi = fx.bcx.ins().umulhi(lhs, rhs);
+ fx.bcx.ins().icmp_imm(IntCC::NotEqual, val_hi, 0)
+ } else {
+ let val_hi = fx.bcx.ins().smulhi(lhs, rhs);
+ let not_all_zero = fx.bcx.ins().icmp_imm(IntCC::NotEqual, val_hi, 0);
+ let not_all_ones = fx.bcx.ins().icmp_imm(
+ IntCC::NotEqual,
+ val_hi,
+ u64::try_from((1u128 << ty.bits()) - 1).unwrap() as i64,
+ );
+ fx.bcx.ins().band(not_all_zero, not_all_ones)
+ };
+ (val, has_overflow)
+ }
+ types::I128 => {
+ unreachable!("i128 should have been handled by codegen_i128::maybe_codegen")
+ }
+ _ => unreachable!("invalid non-integer type {}", ty),
+ }
+ }
+ BinOp::Shl => {
+ let lhs_ty = fx.bcx.func.dfg.value_type(lhs);
+ let actual_shift = fx.bcx.ins().band_imm(rhs, i64::from(lhs_ty.bits() - 1));
+ let actual_shift = clif_intcast(fx, actual_shift, types::I8, false);
+ let val = fx.bcx.ins().ishl(lhs, actual_shift);
+ let ty = fx.bcx.func.dfg.value_type(val);
+ let max_shift = i64::from(ty.bits()) - 1;
- let has_overflow = fx
- .bcx
- .ins()
- .icmp_imm(IntCC::UnsignedGreaterThan, rhs, max_shift);
++ let has_overflow = fx.bcx.ins().icmp_imm(IntCC::UnsignedGreaterThan, rhs, max_shift);
+ (val, has_overflow)
+ }
+ BinOp::Shr => {
+ let lhs_ty = fx.bcx.func.dfg.value_type(lhs);
+ let actual_shift = fx.bcx.ins().band_imm(rhs, i64::from(lhs_ty.bits() - 1));
+ let actual_shift = clif_intcast(fx, actual_shift, types::I8, false);
+ let val = if !signed {
+ fx.bcx.ins().ushr(lhs, actual_shift)
+ } else {
+ fx.bcx.ins().sshr(lhs, actual_shift)
+ };
+ let ty = fx.bcx.func.dfg.value_type(val);
+ let max_shift = i64::from(ty.bits()) - 1;
- _ => bug!(
- "binop {:?} on checked int/uint lhs: {:?} rhs: {:?}",
- bin_op,
- in_lhs,
- in_rhs
- ),
++ let has_overflow = fx.bcx.ins().icmp_imm(IntCC::UnsignedGreaterThan, rhs, max_shift);
+ (val, has_overflow)
+ }
- // FIXME directly write to result place instead
- let out_place = CPlace::new_stack_slot(
- fx,
- fx.layout_of(
- fx.tcx
- .mk_tup([in_lhs.layout().ty, fx.tcx.types.bool].iter()),
- ),
- );
- let out_layout = out_place.layout();
- out_place.write_cvalue(fx, CValue::by_val_pair(res, has_overflow, out_layout));
-
- out_place.to_cvalue(fx)
++ _ => bug!("binop {:?} on checked int/uint lhs: {:?} rhs: {:?}", bin_op, in_lhs, in_rhs),
+ };
+
+ let has_overflow = fx.bcx.ins().bint(types::I8, has_overflow);
+
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
++ let out_layout = fx.layout_of(fx.tcx.mk_tup([in_lhs.layout().ty, fx.tcx.types.bool].iter()));
++ CValue::by_val_pair(res, has_overflow, out_layout)
+}
+
+pub(crate) fn codegen_float_binop<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ bin_op: BinOp,
+ in_lhs: CValue<'tcx>,
+ in_rhs: CValue<'tcx>,
+) -> CValue<'tcx> {
+ assert_eq!(in_lhs.layout().ty, in_rhs.layout().ty);
+
+ let lhs = in_lhs.load_scalar(fx);
+ let rhs = in_rhs.load_scalar(fx);
+
+ let b = fx.bcx.ins();
+ let res = match bin_op {
+ BinOp::Add => b.fadd(lhs, rhs),
+ BinOp::Sub => b.fsub(lhs, rhs),
+ BinOp::Mul => b.fmul(lhs, rhs),
+ BinOp::Div => b.fdiv(lhs, rhs),
+ BinOp::Rem => {
+ let name = match in_lhs.layout().ty.kind() {
+ ty::Float(FloatTy::F32) => "fmodf",
+ ty::Float(FloatTy::F64) => "fmod",
+ _ => bug!(),
+ };
+ return fx.easy_call(name, &[in_lhs, in_rhs], in_lhs.layout().ty);
+ }
+ BinOp::Eq | BinOp::Lt | BinOp::Le | BinOp::Ne | BinOp::Ge | BinOp::Gt => {
+ let fltcc = match bin_op {
+ BinOp::Eq => FloatCC::Equal,
+ BinOp::Lt => FloatCC::LessThan,
+ BinOp::Le => FloatCC::LessThanOrEqual,
+ BinOp::Ne => FloatCC::NotEqual,
+ BinOp::Ge => FloatCC::GreaterThanOrEqual,
+ BinOp::Gt => FloatCC::GreaterThan,
+ _ => unreachable!(),
+ };
+ let val = fx.bcx.ins().fcmp(fltcc, lhs, rhs);
+ let val = fx.bcx.ins().bint(types::I8, val);
+ return CValue::by_val(val, fx.layout_of(fx.tcx.types.bool));
+ }
+ _ => unreachable!("{:?}({:?}, {:?})", bin_op, in_lhs, in_rhs),
+ };
+
+ CValue::by_val(res, in_lhs.layout())
+}
+
+pub(crate) fn codegen_ptr_binop<'tcx>(
- fx.bcx
- .ins()
- .icmp(bin_op_to_intcc(bin_op, false).unwrap(), lhs_ptr, rhs_ptr);
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ bin_op: BinOp,
+ in_lhs: CValue<'tcx>,
+ in_rhs: CValue<'tcx>,
+) -> CValue<'tcx> {
+ let is_thin_ptr = in_lhs
+ .layout()
+ .ty
+ .builtin_deref(true)
+ .map(|TypeAndMut { ty, mutbl: _ }| !has_ptr_meta(fx.tcx, ty))
+ .unwrap_or(true);
+
+ if is_thin_ptr {
+ match bin_op {
+ BinOp::Eq | BinOp::Lt | BinOp::Le | BinOp::Ne | BinOp::Ge | BinOp::Gt => {
+ let lhs = in_lhs.load_scalar(fx);
+ let rhs = in_rhs.load_scalar(fx);
+
+ return codegen_compare_bin_op(fx, bin_op, false, lhs, rhs);
+ }
+ BinOp::Offset => {
+ let pointee_ty = in_lhs.layout().ty.builtin_deref(true).unwrap().ty;
+ let (base, offset) = (in_lhs, in_rhs.load_scalar(fx));
+ let pointee_size = fx.layout_of(pointee_ty).size.bytes();
+ let ptr_diff = fx.bcx.ins().imul_imm(offset, pointee_size as i64);
+ let base_val = base.load_scalar(fx);
+ let res = fx.bcx.ins().iadd(base_val, ptr_diff);
+ return CValue::by_val(res, base.layout());
+ }
+ _ => unreachable!("{:?}({:?}, {:?})", bin_op, in_lhs, in_rhs),
+ };
+ } else {
+ let (lhs_ptr, lhs_extra) = in_lhs.load_scalar_pair(fx);
+ let (rhs_ptr, rhs_extra) = in_rhs.load_scalar_pair(fx);
+
+ let res = match bin_op {
+ BinOp::Eq => {
+ let ptr_eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_ptr, rhs_ptr);
+ let extra_eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_extra, rhs_extra);
+ fx.bcx.ins().band(ptr_eq, extra_eq)
+ }
+ BinOp::Ne => {
+ let ptr_ne = fx.bcx.ins().icmp(IntCC::NotEqual, lhs_ptr, rhs_ptr);
+ let extra_ne = fx.bcx.ins().icmp(IntCC::NotEqual, lhs_extra, rhs_extra);
+ fx.bcx.ins().bor(ptr_ne, extra_ne)
+ }
+ BinOp::Lt | BinOp::Le | BinOp::Ge | BinOp::Gt => {
+ let ptr_eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_ptr, rhs_ptr);
+
+ let ptr_cmp =
- CValue::by_val(
- fx.bcx.ins().bint(types::I8, res),
- fx.layout_of(fx.tcx.types.bool),
- )
++ fx.bcx.ins().icmp(bin_op_to_intcc(bin_op, false).unwrap(), lhs_ptr, rhs_ptr);
+ let extra_cmp = fx.bcx.ins().icmp(
+ bin_op_to_intcc(bin_op, false).unwrap(),
+ lhs_extra,
+ rhs_extra,
+ );
+
+ fx.bcx.ins().select(ptr_eq, extra_cmp, ptr_cmp)
+ }
+ _ => panic!("bin_op {:?} on ptr", bin_op),
+ };
+
++ CValue::by_val(fx.bcx.ins().bint(types::I8, res), fx.layout_of(fx.tcx.types.bool))
+ }
+}
--- /dev/null
- for block in cold_blocks
- .keys()
- .filter(|&block| cold_blocks.contains(block))
- {
+//! This optimization moves cold code to the end of the function.
+//!
+//! Some code is executed much less often than other code. For example panicking or the
+//! landingpads for unwinding. By moving this cold code to the end of the function the average
+//! amount of jumps is reduced and the code locality is improved.
+//!
+//! # Undefined behaviour
+//!
+//! This optimization doesn't assume anything that isn't already assumed by Cranelift itself.
+
+use crate::prelude::*;
+
+pub(super) fn optimize_function(ctx: &mut Context, cold_blocks: &EntitySet<Block>) {
+ // FIXME Move the block in place instead of remove and append once
+ // bytecodealliance/cranelift#1339 is implemented.
+
+ let mut block_insts = FxHashMap::default();
- for block in cold_blocks
- .keys()
- .filter(|&block| cold_blocks.contains(block))
- {
++ for block in cold_blocks.keys().filter(|&block| cold_blocks.contains(block)) {
+ let insts = ctx.func.layout.block_insts(block).collect::<Vec<_>>();
+ for &inst in &insts {
+ ctx.func.layout.remove_inst(inst);
+ }
+ block_insts.insert(block, insts);
+ ctx.func.layout.remove_block(block);
+ }
+
+ // And then append them at the back again.
++ for block in cold_blocks.keys().filter(|&block| cold_blocks.contains(block)) {
+ ctx.func.layout.append_block(block);
+ for inst in block_insts.remove(&block).unwrap() {
+ ctx.func.layout.append_inst(inst, block);
+ }
+ }
+}
--- /dev/null
- self::stack2reg::optimize_function(ctx, clif_comments);
+//! Various optimizations specific to cg_clif
+
+use crate::prelude::*;
+
+mod code_layout;
+pub(crate) mod peephole;
+mod stack2reg;
+
+pub(crate) fn optimize_function<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ instance: Instance<'tcx>,
+ ctx: &mut Context,
+ cold_blocks: &EntitySet<Block>,
+ clif_comments: &mut crate::pretty_clif::CommentWriter,
+) {
+ // The code_layout optimization is very cheap.
+ self::code_layout::optimize_function(ctx, cold_blocks);
+
+ if tcx.sess.opts.optimize == rustc_session::config::OptLevel::No {
+ return; // FIXME classify optimizations over opt levels
+ }
++
++ // FIXME(#1142) stack2reg miscompiles lewton
++ if false {
++ self::stack2reg::optimize_function(ctx, clif_comments);
++ }
++
+ crate::pretty_clif::write_clif_file(tcx, "stack2reg", None, instance, &ctx, &*clif_comments);
+ crate::base::verify_func(tcx, &*clif_comments, &ctx.func);
+}
--- /dev/null
- InstructionData::Unary {
- opcode: Opcode::Bint,
- arg,
- } => arg,
+//! Peephole optimizations that can be performed while creating clif ir.
+
+use cranelift_codegen::ir::{
+ condcodes::IntCC, types, InstBuilder, InstructionData, Opcode, Value, ValueDef,
+};
+use cranelift_frontend::FunctionBuilder;
+
+/// If the given value was produced by a `bint` instruction, return it's input, otherwise return the
+/// given value.
+pub(crate) fn maybe_unwrap_bint(bcx: &mut FunctionBuilder<'_>, arg: Value) -> Value {
+ if let ValueDef::Result(arg_inst, 0) = bcx.func.dfg.value_def(arg) {
+ match bcx.func.dfg[arg_inst] {
- InstructionData::Load {
- opcode: Opcode::Load,
- arg: ptr,
- flags,
- offset,
- } => {
++ InstructionData::Unary { opcode: Opcode::Bint, arg } => arg,
+ _ => arg,
+ }
+ } else {
+ arg
+ }
+}
+
+/// If the given value was produced by the lowering of `Rvalue::Not` return the input and true,
+/// otherwise return the given value and false.
+pub(crate) fn maybe_unwrap_bool_not(bcx: &mut FunctionBuilder<'_>, arg: Value) -> (Value, bool) {
+ if let ValueDef::Result(arg_inst, 0) = bcx.func.dfg.value_def(arg) {
+ match bcx.func.dfg[arg_inst] {
+ // This is the lowering of `Rvalue::Not`
+ InstructionData::IntCompareImm {
+ opcode: Opcode::IcmpImm,
+ cond: IntCC::Equal,
+ arg,
+ imm,
+ } if imm.bits() == 0 => (arg, true),
+ _ => (arg, false),
+ }
+ } else {
+ (arg, false)
+ }
+}
+
+pub(crate) fn make_branchable_value(bcx: &mut FunctionBuilder<'_>, arg: Value) -> Value {
+ if bcx.func.dfg.value_type(arg).is_bool() {
+ return arg;
+ }
+
+ (|| {
+ let arg_inst = if let ValueDef::Result(arg_inst, 0) = bcx.func.dfg.value_def(arg) {
+ arg_inst
+ } else {
+ return None;
+ };
+
+ match bcx.func.dfg[arg_inst] {
+ // This is the lowering of Rvalue::Not
- InstructionData::UnaryBool {
- opcode: Opcode::Bconst,
- imm,
- } => {
++ InstructionData::Load { opcode: Opcode::Load, arg: ptr, flags, offset } => {
+ // Using `load.i8 + uextend.i32` would legalize to `uload8 + ireduce.i8 +
+ // uextend.i32`. Just `uload8` is much faster.
+ match bcx.func.dfg.ctrl_typevar(arg_inst) {
+ types::I8 => Some(bcx.ins().uload8(types::I32, flags, ptr, offset)),
+ types::I16 => Some(bcx.ins().uload16(types::I32, flags, ptr, offset)),
+ _ => None,
+ }
+ }
+ _ => None,
+ }
+ })()
+ .unwrap_or_else(|| {
+ match bcx.func.dfg.value_type(arg) {
+ types::I8 | types::I16 => {
+ // WORKAROUND for brz.i8 and brnz.i8 not yet being implemented
+ bcx.ins().uextend(types::I32, arg)
+ }
+ _ => arg,
+ }
+ })
+}
+
+/// Returns whether the branch is statically known to be taken or `None` if it isn't statically known.
+pub(crate) fn maybe_known_branch_taken(
+ bcx: &FunctionBuilder<'_>,
+ arg: Value,
+ test_zero: bool,
+) -> Option<bool> {
+ let arg_inst = if let ValueDef::Result(arg_inst, 0) = bcx.func.dfg.value_def(arg) {
+ arg_inst
+ } else {
+ return None;
+ };
+
+ match bcx.func.dfg[arg_inst] {
- InstructionData::UnaryImm {
- opcode: Opcode::Iconst,
- imm,
- } => {
++ InstructionData::UnaryBool { opcode: Opcode::Bconst, imm } => {
+ if test_zero {
+ Some(!imm)
+ } else {
+ Some(imm)
+ }
+ }
++ InstructionData::UnaryImm { opcode: Opcode::Iconst, imm } => {
+ if test_zero {
+ Some(imm.bits() == 0)
+ } else {
+ Some(imm.bits() != 0)
+ }
+ }
+ _ => None,
+ }
+}
--- /dev/null
- OptimizeContext {
- ctx,
- stack_slot_usage_map,
- }
+//! This optimization replaces stack accesses with SSA variables and removes dead stores when possible.
+//!
+//! # Undefined behaviour
+//!
+//! This optimization is based on the assumption that stack slots which don't have their address
+//! leaked through `stack_addr` are only accessed using `stack_load` and `stack_store` in the
+//! function which has the stack slots. This optimization also assumes that stack slot accesses
+//! are never out of bounds. If these assumptions are not correct, then this optimization may remove
+//! `stack_store` instruction incorrectly, or incorrectly use a previously stored value as the value
+//! being loaded by a `stack_load`.
+
+use std::collections::BTreeMap;
+use std::fmt;
+use std::ops::Not;
+
+use rustc_data_structures::fx::FxHashSet;
+
+use cranelift_codegen::cursor::{Cursor, FuncCursor};
+use cranelift_codegen::ir::immediates::Offset32;
+use cranelift_codegen::ir::{InstructionData, Opcode, ValueDef};
+
+use crate::prelude::*;
+
+/// Workaround for `StackSlot` not implementing `Ord`.
+#[derive(Copy, Clone, PartialEq, Eq)]
+struct OrdStackSlot(StackSlot);
+
+impl fmt::Debug for OrdStackSlot {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{:?}", self.0)
+ }
+}
+
+impl PartialOrd for OrdStackSlot {
+ fn partial_cmp(&self, rhs: &Self) -> Option<std::cmp::Ordering> {
+ self.0.as_u32().partial_cmp(&rhs.0.as_u32())
+ }
+}
+
+impl Ord for OrdStackSlot {
+ fn cmp(&self, rhs: &Self) -> std::cmp::Ordering {
+ self.0.as_u32().cmp(&rhs.0.as_u32())
+ }
+}
+
+#[derive(Debug, Default)]
+struct StackSlotUsage {
+ stack_addr: FxHashSet<Inst>,
+ stack_load: FxHashSet<Inst>,
+ stack_store: FxHashSet<Inst>,
+}
+
+impl StackSlotUsage {
+ fn potential_stores_for_load(&self, ctx: &Context, load: Inst) -> Vec<Inst> {
+ self.stack_store
+ .iter()
+ .cloned()
+ .filter(|&store| {
+ match spatial_overlap(&ctx.func, store, load) {
+ SpatialOverlap::No => false, // Can never be the source of the loaded value.
+ SpatialOverlap::Partial | SpatialOverlap::Full => true,
+ }
+ })
+ .filter(|&store| {
+ match temporal_order(ctx, store, load) {
+ TemporalOrder::NeverBefore => false, // Can never be the source of the loaded value.
+ TemporalOrder::MaybeBefore | TemporalOrder::DefinitivelyBefore => true,
+ }
+ })
+ .collect::<Vec<Inst>>()
+ }
+
+ fn potential_loads_of_store(&self, ctx: &Context, store: Inst) -> Vec<Inst> {
+ self.stack_load
+ .iter()
+ .cloned()
+ .filter(|&load| {
+ match spatial_overlap(&ctx.func, store, load) {
+ SpatialOverlap::No => false, // Can never be the source of the loaded value.
+ SpatialOverlap::Partial | SpatialOverlap::Full => true,
+ }
+ })
+ .filter(|&load| {
+ match temporal_order(ctx, store, load) {
+ TemporalOrder::NeverBefore => false, // Can never be the source of the loaded value.
+ TemporalOrder::MaybeBefore | TemporalOrder::DefinitivelyBefore => true,
+ }
+ })
+ .collect::<Vec<Inst>>()
+ }
+
+ fn remove_unused_stack_addr(func: &mut Function, inst: Inst) {
+ func.dfg.detach_results(inst);
+ func.dfg.replace(inst).nop();
+ }
+
+ fn remove_unused_load(func: &mut Function, load: Inst) {
+ func.dfg.detach_results(load);
+ func.dfg.replace(load).nop();
+ }
+
+ fn remove_dead_store(&mut self, func: &mut Function, store: Inst) {
+ func.dfg.replace(store).nop();
+ self.stack_store.remove(&store);
+ }
+
+ fn change_load_to_alias(&mut self, func: &mut Function, load: Inst, value: Value) {
+ let loaded_value = func.dfg.inst_results(load)[0];
+ let loaded_type = func.dfg.value_type(loaded_value);
+
+ if func.dfg.value_type(value) == loaded_type {
+ func.dfg.detach_results(load);
+ func.dfg.replace(load).nop();
+ func.dfg.change_to_alias(loaded_value, value);
+ } else {
+ func.dfg.replace(load).bitcast(loaded_type, value);
+ }
+
+ self.stack_load.remove(&load);
+ }
+}
+
+struct OptimizeContext<'a> {
+ ctx: &'a mut Context,
+ stack_slot_usage_map: BTreeMap<OrdStackSlot, StackSlotUsage>,
+}
+
+impl<'a> OptimizeContext<'a> {
+ fn for_context(ctx: &'a mut Context) -> Self {
+ ctx.flowgraph(); // Compute cfg and domtree.
+
+ // Record all stack_addr, stack_load and stack_store instructions.
+ let mut stack_slot_usage_map = BTreeMap::<OrdStackSlot, StackSlotUsage>::new();
+
+ let mut cursor = FuncCursor::new(&mut ctx.func);
+ while let Some(_block) = cursor.next_block() {
+ while let Some(inst) = cursor.next_inst() {
+ match cursor.func.dfg[inst] {
+ InstructionData::StackLoad {
+ opcode: Opcode::StackAddr,
+ stack_slot,
+ offset: _,
+ } => {
+ stack_slot_usage_map
+ .entry(OrdStackSlot(stack_slot))
+ .or_insert_with(StackSlotUsage::default)
+ .stack_addr
+ .insert(inst);
+ }
+ InstructionData::StackLoad {
+ opcode: Opcode::StackLoad,
+ stack_slot,
+ offset: _,
+ } => {
+ stack_slot_usage_map
+ .entry(OrdStackSlot(stack_slot))
+ .or_insert_with(StackSlotUsage::default)
+ .stack_load
+ .insert(inst);
+ }
+ InstructionData::StackStore {
+ opcode: Opcode::StackStore,
+ arg: _,
+ stack_slot,
+ offset: _,
+ } => {
+ stack_slot_usage_map
+ .entry(OrdStackSlot(stack_slot))
+ .or_insert_with(StackSlotUsage::default)
+ .stack_store
+ .insert(inst);
+ }
+ _ => {}
+ }
+ }
+ }
+
- #[cfg_attr(not(debug_assertions), allow(unused_variables))] clif_comments: &mut crate::pretty_clif::CommentWriter,
++ OptimizeContext { ctx, stack_slot_usage_map }
+ }
+}
+
+pub(super) fn optimize_function(
+ ctx: &mut Context,
- InstructionData::Load {
- opcode: Opcode::Load,
- arg: addr,
- flags: _,
- offset,
- } => {
++ #[cfg_attr(not(debug_assertions), allow(unused_variables))]
++ clif_comments: &mut crate::pretty_clif::CommentWriter,
+) {
+ combine_stack_addr_with_load_store(&mut ctx.func);
+
+ let mut opt_ctx = OptimizeContext::for_context(ctx);
+
+ // FIXME Repeat following instructions until fixpoint.
+
+ remove_unused_stack_addr_and_stack_load(&mut opt_ctx);
+
+ #[cfg(debug_assertions)]
+ {
+ for (&OrdStackSlot(stack_slot), usage) in &opt_ctx.stack_slot_usage_map {
+ clif_comments.add_comment(stack_slot, format!("used by: {:?}", usage));
+ }
+ }
+
+ for (stack_slot, users) in opt_ctx.stack_slot_usage_map.iter_mut() {
+ if users.stack_addr.is_empty().not() {
+ // Stack addr leaked; there may be unknown loads and stores.
+ // FIXME use stacked borrows to optimize
+ continue;
+ }
+
+ for load in users.stack_load.clone().into_iter() {
+ let potential_stores = users.potential_stores_for_load(&opt_ctx.ctx, load);
+
+ #[cfg(debug_assertions)]
+ for &store in &potential_stores {
+ clif_comments.add_comment(
+ load,
+ format!(
+ "Potential store -> load forwarding {} -> {} ({:?}, {:?})",
+ opt_ctx.ctx.func.dfg.display_inst(store, None),
+ opt_ctx.ctx.func.dfg.display_inst(load, None),
+ spatial_overlap(&opt_ctx.ctx.func, store, load),
+ temporal_order(&opt_ctx.ctx, store, load),
+ ),
+ );
+ }
+
+ match *potential_stores {
+ [] => {
+ #[cfg(debug_assertions)]
+ clif_comments
+ .add_comment(load, "[BUG?] Reading uninitialized memory".to_string());
+ }
+ [store]
+ if spatial_overlap(&opt_ctx.ctx.func, store, load) == SpatialOverlap::Full
+ && temporal_order(&opt_ctx.ctx, store, load)
+ == TemporalOrder::DefinitivelyBefore =>
+ {
+ // Only one store could have been the origin of the value.
+ let stored_value = opt_ctx.ctx.func.dfg.inst_args(store)[0];
+
+ #[cfg(debug_assertions)]
+ clif_comments
+ .add_comment(load, format!("Store to load forward {} -> {}", store, load));
+
+ users.change_load_to_alias(&mut opt_ctx.ctx.func, load, stored_value);
+ }
+ _ => {} // FIXME implement this
+ }
+ }
+
+ for store in users.stack_store.clone().into_iter() {
+ let potential_loads = users.potential_loads_of_store(&opt_ctx.ctx, store);
+
+ #[cfg(debug_assertions)]
+ for &load in &potential_loads {
+ clif_comments.add_comment(
+ store,
+ format!(
+ "Potential load from store {} <- {} ({:?}, {:?})",
+ opt_ctx.ctx.func.dfg.display_inst(load, None),
+ opt_ctx.ctx.func.dfg.display_inst(store, None),
+ spatial_overlap(&opt_ctx.ctx.func, store, load),
+ temporal_order(&opt_ctx.ctx, store, load),
+ ),
+ );
+ }
+
+ if potential_loads.is_empty() {
+ // Never loaded; can safely remove all stores and the stack slot.
+ // FIXME also remove stores when there is always a next store before a load.
+
+ #[cfg(debug_assertions)]
+ clif_comments.add_comment(
+ store,
+ format!(
+ "Remove dead stack store {} of {}",
+ opt_ctx.ctx.func.dfg.display_inst(store, None),
+ stack_slot.0
+ ),
+ );
+
+ users.remove_dead_store(&mut opt_ctx.ctx.func, store);
+ }
+ }
+
+ if users.stack_store.is_empty() && users.stack_load.is_empty() {
+ opt_ctx.ctx.func.stack_slots[stack_slot.0].size = 0;
+ }
+ }
+}
+
+fn combine_stack_addr_with_load_store(func: &mut Function) {
+ // Turn load and store into stack_load and stack_store when possible.
+ let mut cursor = FuncCursor::new(func);
+ while let Some(_block) = cursor.next_block() {
+ while let Some(inst) = cursor.next_inst() {
+ match cursor.func.dfg[inst] {
- stack_addr_load_insts_users
- .get(inst)
- .map(|users| users.is_empty())
- .unwrap_or(true)
++ InstructionData::Load { opcode: Opcode::Load, arg: addr, flags: _, offset } => {
+ if cursor.func.dfg.ctrl_typevar(inst) == types::I128
+ || cursor.func.dfg.ctrl_typevar(inst).is_vector()
+ {
+ continue; // WORKAROUD: stack_load.i128 not yet implemented
+ }
+ if let Some((stack_slot, stack_addr_offset)) =
+ try_get_stack_slot_and_offset_for_addr(cursor.func, addr)
+ {
+ if let Some(combined_offset) = offset.try_add_i64(stack_addr_offset.into())
+ {
+ let ty = cursor.func.dfg.ctrl_typevar(inst);
+ cursor.func.dfg.replace(inst).stack_load(
+ ty,
+ stack_slot,
+ combined_offset,
+ );
+ }
+ }
+ }
+ InstructionData::Store {
+ opcode: Opcode::Store,
+ args: [value, addr],
+ flags: _,
+ offset,
+ } => {
+ if cursor.func.dfg.ctrl_typevar(inst) == types::I128
+ || cursor.func.dfg.ctrl_typevar(inst).is_vector()
+ {
+ continue; // WORKAROUND: stack_store.i128 not yet implemented
+ }
+ if let Some((stack_slot, stack_addr_offset)) =
+ try_get_stack_slot_and_offset_for_addr(cursor.func, addr)
+ {
+ if let Some(combined_offset) = offset.try_add_i64(stack_addr_offset.into())
+ {
+ cursor.func.dfg.replace(inst).stack_store(
+ value,
+ stack_slot,
+ combined_offset,
+ );
+ }
+ }
+ }
+ _ => {}
+ }
+ }
+ }
+}
+
+fn remove_unused_stack_addr_and_stack_load(opt_ctx: &mut OptimizeContext<'_>) {
+ // FIXME incrementally rebuild on each call?
+ let mut stack_addr_load_insts_users = FxHashMap::<Inst, FxHashSet<Inst>>::default();
+
+ let mut cursor = FuncCursor::new(&mut opt_ctx.ctx.func);
+ while let Some(_block) = cursor.next_block() {
+ while let Some(inst) = cursor.next_inst() {
+ for &arg in cursor.func.dfg.inst_args(inst) {
+ if let ValueDef::Result(arg_origin, 0) = cursor.func.dfg.value_def(arg) {
+ match cursor.func.dfg[arg_origin].opcode() {
+ Opcode::StackAddr | Opcode::StackLoad => {
+ stack_addr_load_insts_users
+ .entry(arg_origin)
+ .or_insert_with(FxHashSet::default)
+ .insert(inst);
+ }
+ _ => {}
+ }
+ }
+ }
+ }
+ }
+
+ #[cfg(debug_assertions)]
+ for inst in stack_addr_load_insts_users.keys() {
+ let mut is_recorded_stack_addr_or_stack_load = false;
+ for stack_slot_users in opt_ctx.stack_slot_usage_map.values() {
+ is_recorded_stack_addr_or_stack_load |= stack_slot_users.stack_addr.contains(inst)
+ || stack_slot_users.stack_load.contains(inst);
+ }
+ assert!(is_recorded_stack_addr_or_stack_load);
+ }
+
+ // Replace all unused stack_addr and stack_load instructions with nop.
+ let mut func = &mut opt_ctx.ctx.func;
+
+ for stack_slot_users in opt_ctx.stack_slot_usage_map.values_mut() {
+ stack_slot_users
+ .stack_addr
+ .drain_filter(|inst| {
- stack_addr_load_insts_users
- .get(inst)
- .map(|users| users.is_empty())
- .unwrap_or(true)
++ stack_addr_load_insts_users.get(inst).map(|users| users.is_empty()).unwrap_or(true)
+ })
+ .for_each(|inst| StackSlotUsage::remove_unused_stack_addr(&mut func, inst));
+
+ stack_slot_users
+ .stack_load
+ .drain_filter(|inst| {
- if let InstructionData::StackLoad {
- opcode: Opcode::StackAddr,
- stack_slot,
- offset,
- } = func.dfg[addr_inst]
++ stack_addr_load_insts_users.get(inst).map(|users| users.is_empty()).unwrap_or(true)
+ })
+ .for_each(|inst| StackSlotUsage::remove_unused_load(&mut func, inst));
+ }
+}
+
+fn try_get_stack_slot_and_offset_for_addr(
+ func: &Function,
+ addr: Value,
+) -> Option<(StackSlot, Offset32)> {
+ if let ValueDef::Result(addr_inst, 0) = func.dfg.value_def(addr) {
- InstructionData::StackLoad {
- opcode: Opcode::StackAddr,
- stack_slot,
- offset,
- }
- | InstructionData::StackLoad {
- opcode: Opcode::StackLoad,
- stack_slot,
- offset,
- }
++ if let InstructionData::StackLoad { opcode: Opcode::StackAddr, stack_slot, offset } =
++ func.dfg[addr_inst]
+ {
+ return Some((stack_slot, offset));
+ }
+ }
+ None
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+enum SpatialOverlap {
+ No,
+ Partial,
+ Full,
+}
+
+fn spatial_overlap(func: &Function, src: Inst, dest: Inst) -> SpatialOverlap {
+ fn inst_info(func: &Function, inst: Inst) -> (StackSlot, Offset32, u32) {
+ match func.dfg[inst] {
- let dest_end: i64 = dest_offset
- .try_add_i64(i64::from(dest_size))
- .unwrap()
- .into();
++ InstructionData::StackLoad { opcode: Opcode::StackAddr, stack_slot, offset }
++ | InstructionData::StackLoad { opcode: Opcode::StackLoad, stack_slot, offset }
+ | InstructionData::StackStore {
+ opcode: Opcode::StackStore,
+ stack_slot,
+ offset,
+ arg: _,
+ } => (stack_slot, offset, func.dfg.ctrl_typevar(inst).bytes()),
+ _ => unreachable!("{:?}", func.dfg[inst]),
+ }
+ }
+
+ debug_assert_ne!(src, dest);
+
+ let (src_ss, src_offset, src_size) = inst_info(func, src);
+ let (dest_ss, dest_offset, dest_size) = inst_info(func, dest);
+
+ if src_ss != dest_ss {
+ return SpatialOverlap::No;
+ }
+
+ if src_offset == dest_offset && src_size == dest_size {
+ return SpatialOverlap::Full;
+ }
+
+ let src_end: i64 = src_offset.try_add_i64(i64::from(src_size)).unwrap().into();
++ let dest_end: i64 = dest_offset.try_add_i64(i64::from(dest_size)).unwrap().into();
+ if src_end <= dest_offset.into() || dest_end <= src_offset.into() {
+ return SpatialOverlap::No;
+ }
+
+ SpatialOverlap::Partial
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+enum TemporalOrder {
+ /// `src` will never be executed before `dest`.
+ NeverBefore,
+
+ /// `src` may be executed before `dest`.
+ MaybeBefore,
+
+ /// `src` will always be executed before `dest`.
+ /// There may still be other instructions in between.
+ DefinitivelyBefore,
+}
+
+fn temporal_order(ctx: &Context, src: Inst, dest: Inst) -> TemporalOrder {
+ debug_assert_ne!(src, dest);
+
+ if ctx.domtree.dominates(src, dest, &ctx.func.layout) {
+ TemporalOrder::DefinitivelyBefore
+ } else if ctx.domtree.dominates(src, dest, &ctx.func.layout) {
+ TemporalOrder::NeverBefore
+ } else {
+ TemporalOrder::MaybeBefore
+ }
+}
--- /dev/null
- Pointer {
- base: PointerBase::Addr(addr),
- offset: Offset32::new(0),
- }
+//! Defines [`Pointer`] which is used to improve the quality of the generated clif ir for pointer
+//! operations.
+
+use crate::prelude::*;
+
+use rustc_target::abi::Align;
+
+use cranelift_codegen::ir::immediates::Offset32;
+
+/// A pointer pointing either to a certain address, a certain stack slot or nothing.
+#[derive(Copy, Clone, Debug)]
+pub(crate) struct Pointer {
+ base: PointerBase,
+ offset: Offset32,
+}
+
+#[derive(Copy, Clone, Debug)]
+pub(crate) enum PointerBase {
+ Addr(Value),
+ Stack(StackSlot),
+ Dangling(Align),
+}
+
+impl Pointer {
+ pub(crate) fn new(addr: Value) -> Self {
- Pointer {
- base: PointerBase::Stack(stack_slot),
- offset: Offset32::new(0),
- }
++ Pointer { base: PointerBase::Addr(addr), offset: Offset32::new(0) }
+ }
+
+ pub(crate) fn stack_slot(stack_slot: StackSlot) -> Self {
- pub(crate) fn const_addr<'a, 'tcx>(
- fx: &mut FunctionCx<'a, 'tcx, impl Module>,
- addr: i64,
- ) -> Self {
++ Pointer { base: PointerBase::Stack(stack_slot), offset: Offset32::new(0) }
+ }
+
- Pointer {
- base: PointerBase::Addr(addr),
- offset: Offset32::new(0),
- }
++ pub(crate) fn const_addr(fx: &mut FunctionCx<'_, '_, '_>, addr: i64) -> Self {
+ let addr = fx.bcx.ins().iconst(fx.pointer_type, addr);
- Pointer {
- base: PointerBase::Dangling(align),
- offset: Offset32::new(0),
- }
++ Pointer { base: PointerBase::Addr(addr), offset: Offset32::new(0) }
+ }
+
+ pub(crate) fn dangling(align: Align) -> Self {
- pub(crate) fn get_addr<'a, 'tcx>(self, fx: &mut FunctionCx<'a, 'tcx, impl Module>) -> Value {
++ Pointer { base: PointerBase::Dangling(align), offset: Offset32::new(0) }
+ }
+
+ #[cfg(debug_assertions)]
+ pub(crate) fn base_and_offset(self) -> (PointerBase, Offset32) {
+ (self.base, self.offset)
+ }
+
- if offset == 0 {
- base_addr
- } else {
- fx.bcx.ins().iadd_imm(base_addr, offset)
- }
++ pub(crate) fn get_addr(self, fx: &mut FunctionCx<'_, '_, '_>) -> Value {
+ match self.base {
+ PointerBase::Addr(base_addr) => {
+ let offset: i64 = self.offset.into();
- fx.bcx
- .ins()
- .stack_addr(fx.pointer_type, stack_slot, self.offset)
++ if offset == 0 { base_addr } else { fx.bcx.ins().iadd_imm(base_addr, offset) }
+ }
+ PointerBase::Stack(stack_slot) => {
- PointerBase::Dangling(align) => fx
- .bcx
- .ins()
- .iconst(fx.pointer_type, i64::try_from(align.bytes()).unwrap()),
++ fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, self.offset)
++ }
++ PointerBase::Dangling(align) => {
++ fx.bcx.ins().iconst(fx.pointer_type, i64::try_from(align.bytes()).unwrap())
+ }
- pub(crate) fn offset<'a, 'tcx>(
- self,
- fx: &mut FunctionCx<'a, 'tcx, impl Module>,
- extra_offset: Offset32,
- ) -> Self {
+ }
+ }
+
- pub(crate) fn offset_i64<'a, 'tcx>(
- self,
- fx: &mut FunctionCx<'a, 'tcx, impl Module>,
- extra_offset: i64,
- ) -> Self {
++ pub(crate) fn offset(self, fx: &mut FunctionCx<'_, '_, '_>, extra_offset: Offset32) -> Self {
+ self.offset_i64(fx, extra_offset.into())
+ }
+
- Pointer {
- base: self.base,
- offset: new_offset,
- }
++ pub(crate) fn offset_i64(self, fx: &mut FunctionCx<'_, '_, '_>, extra_offset: i64) -> Self {
+ if let Some(new_offset) = self.offset.try_add_i64(extra_offset) {
- PointerBase::Dangling(align) => fx
- .bcx
- .ins()
- .iconst(fx.pointer_type, i64::try_from(align.bytes()).unwrap()),
++ Pointer { base: self.base, offset: new_offset }
+ } else {
+ let base_offset: i64 = self.offset.into();
+ if let Some(new_offset) = base_offset.checked_add(extra_offset) {
+ let base_addr = match self.base {
+ PointerBase::Addr(addr) => addr,
+ PointerBase::Stack(stack_slot) => {
+ fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, 0)
+ }
- Pointer {
- base: PointerBase::Addr(addr),
- offset: Offset32::new(0),
- }
++ PointerBase::Dangling(align) => {
++ fx.bcx.ins().iconst(fx.pointer_type, i64::try_from(align.bytes()).unwrap())
++ }
+ };
+ let addr = fx.bcx.ins().iadd_imm(base_addr, new_offset);
- pub(crate) fn offset_value<'a, 'tcx>(
- self,
- fx: &mut FunctionCx<'a, 'tcx, impl Module>,
- extra_offset: Value,
- ) -> Self {
++ Pointer { base: PointerBase::Addr(addr), offset: Offset32::new(0) }
+ } else {
+ panic!(
+ "self.offset ({}) + extra_offset ({}) not representable in i64",
+ base_offset, extra_offset
+ );
+ }
+ }
+ }
+
- let base_addr = fx
- .bcx
- .ins()
- .stack_addr(fx.pointer_type, stack_slot, self.offset);
++ pub(crate) fn offset_value(self, fx: &mut FunctionCx<'_, '_, '_>, extra_offset: Value) -> Self {
+ match self.base {
+ PointerBase::Addr(addr) => Pointer {
+ base: PointerBase::Addr(fx.bcx.ins().iadd(addr, extra_offset)),
+ offset: self.offset,
+ },
+ PointerBase::Stack(stack_slot) => {
- let addr = fx
- .bcx
- .ins()
- .iconst(fx.pointer_type, i64::try_from(align.bytes()).unwrap());
++ let base_addr = fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, self.offset);
+ Pointer {
+ base: PointerBase::Addr(fx.bcx.ins().iadd(base_addr, extra_offset)),
+ offset: Offset32::new(0),
+ }
+ }
+ PointerBase::Dangling(align) => {
- pub(crate) fn load<'a, 'tcx>(
- self,
- fx: &mut FunctionCx<'a, 'tcx, impl Module>,
- ty: Type,
- flags: MemFlags,
- ) -> Value {
++ let addr =
++ fx.bcx.ins().iconst(fx.pointer_type, i64::try_from(align.bytes()).unwrap());
+ Pointer {
+ base: PointerBase::Addr(fx.bcx.ins().iadd(addr, extra_offset)),
+ offset: self.offset,
+ }
+ }
+ }
+ }
+
- PointerBase::Stack(stack_slot) => {
- if ty == types::I128 || ty.is_vector() {
- // WORKAROUND for stack_load.i128 and stack_load.iXxY not being implemented
- let base_addr = fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, 0);
- fx.bcx.ins().load(ty, flags, base_addr, self.offset)
- } else {
- fx.bcx.ins().stack_load(ty, stack_slot, self.offset)
- }
- }
++ pub(crate) fn load(self, fx: &mut FunctionCx<'_, '_, '_>, ty: Type, flags: MemFlags) -> Value {
+ match self.base {
+ PointerBase::Addr(base_addr) => fx.bcx.ins().load(ty, flags, base_addr, self.offset),
- pub(crate) fn store<'a, 'tcx>(
- self,
- fx: &mut FunctionCx<'a, 'tcx, impl Module>,
- value: Value,
- flags: MemFlags,
- ) {
++ PointerBase::Stack(stack_slot) => fx.bcx.ins().stack_load(ty, stack_slot, self.offset),
+ PointerBase::Dangling(_align) => unreachable!(),
+ }
+ }
+
- let val_ty = fx.bcx.func.dfg.value_type(value);
- if val_ty == types::I128 || val_ty.is_vector() {
- // WORKAROUND for stack_store.i128 and stack_store.iXxY not being implemented
- let base_addr = fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, 0);
- fx.bcx.ins().store(flags, value, base_addr, self.offset);
- } else {
- fx.bcx.ins().stack_store(value, stack_slot, self.offset);
- }
++ pub(crate) fn store(self, fx: &mut FunctionCx<'_, '_, '_>, value: Value, flags: MemFlags) {
+ match self.base {
+ PointerBase::Addr(base_addr) => {
+ fx.bcx.ins().store(flags, value, base_addr, self.offset);
+ }
+ PointerBase::Stack(stack_slot) => {
++ fx.bcx.ins().stack_store(value, stack_slot, self.offset);
+ }
+ PointerBase::Dangling(_align) => unreachable!(),
+ }
+ }
+}
--- /dev/null
- format!(
- "abi {:?}",
- FnAbi::of_instance(&RevealAllLayoutCx(tcx), instance, &[])
- ),
+//! This module provides the [CommentWriter] which makes it possible
+//! to add comments to the written cranelift ir.
+//!
+//! # Example
+//!
+//! ```clif
+//! test compile
+//! target x86_64
+//!
+//! function u0:0(i64, i64, i64) system_v {
+//! ; symbol _ZN119_$LT$example..IsNotEmpty$u20$as$u20$mini_core..FnOnce$LT$$LP$$RF$$u27$a$u20$$RF$$u27$b$u20$$u5b$u16$u5d$$C$$RP$$GT$$GT$9call_once17he85059d5e6a760a0E
+//! ; instance Instance { def: Item(DefId(0/0:29 ~ example[8787]::{{impl}}[0]::call_once[0])), substs: [ReErased, ReErased] }
+//! ; sig ([IsNotEmpty, (&&[u16],)]; c_variadic: false)->(u8, u8)
+//!
+//! ; ssa {_2: NOT_SSA, _4: NOT_SSA, _0: NOT_SSA, _3: (empty), _1: NOT_SSA}
+//! ; msg loc.idx param pass mode ssa flags ty
+//! ; ret _0 = v0 ByRef NOT_SSA (u8, u8)
+//! ; arg _1 = v1 ByRef NOT_SSA IsNotEmpty
+//! ; arg _2.0 = v2 ByVal(types::I64) NOT_SSA &&[u16]
+//!
+//! ss0 = explicit_slot 0 ; _1: IsNotEmpty size=0 align=1,8
+//! ss1 = explicit_slot 8 ; _2: (&&[u16],) size=8 align=8,8
+//! ss2 = explicit_slot 8 ; _4: (&&[u16],) size=8 align=8,8
+//! sig0 = (i64, i64, i64) system_v
+//! sig1 = (i64, i64, i64) system_v
+//! fn0 = colocated u0:6 sig1 ; Instance { def: Item(DefId(0/0:31 ~ example[8787]::{{impl}}[1]::call_mut[0])), substs: [ReErased, ReErased] }
+//!
+//! block0(v0: i64, v1: i64, v2: i64):
+//! v3 = stack_addr.i64 ss0
+//! v4 = stack_addr.i64 ss1
+//! store v2, v4
+//! v5 = stack_addr.i64 ss2
+//! jump block1
+//!
+//! block1:
+//! nop
+//! ; _3 = &mut _1
+//! ; _4 = _2
+//! v6 = load.i64 v4
+//! store v6, v5
+//! ;
+//! ; _0 = const mini_core::FnMut::call_mut(move _3, move _4)
+//! v7 = load.i64 v5
+//! call fn0(v0, v3, v7)
+//! jump block2
+//!
+//! block2:
+//! nop
+//! ;
+//! ; return
+//! return
+//! }
+//! ```
+
+use std::fmt;
+use std::io::Write;
+
+use cranelift_codegen::{
+ entity::SecondaryMap,
+ ir::{entities::AnyEntity, function::DisplayFunctionAnnotations},
+ write::{FuncWriter, PlainWriter},
+};
+
+use rustc_middle::ty::layout::FnAbiExt;
+use rustc_session::config::OutputType;
+use rustc_target::abi::call::FnAbi;
+
+use crate::prelude::*;
+
+#[derive(Debug)]
+pub(crate) struct CommentWriter {
+ global_comments: Vec<String>,
+ entity_comments: FxHashMap<AnyEntity, String>,
+}
+
+impl CommentWriter {
+ pub(crate) fn new<'tcx>(tcx: TyCtxt<'tcx>, instance: Instance<'tcx>) -> Self {
+ let global_comments = if cfg!(debug_assertions) {
+ vec![
+ format!("symbol {}", tcx.symbol_name(instance).name),
+ format!("instance {:?}", instance),
- CommentWriter {
- global_comments,
- entity_comments: FxHashMap::default(),
- }
++ format!("abi {:?}", FnAbi::of_instance(&RevealAllLayoutCx(tcx), instance, &[])),
+ String::new(),
+ ]
+ } else {
+ vec![]
+ };
+
- impl<M: Module> FunctionCx<'_, '_, M> {
++ CommentWriter { global_comments, entity_comments: FxHashMap::default() }
+ }
+}
+
+#[cfg(debug_assertions)]
+impl CommentWriter {
+ pub(crate) fn add_global_comment<S: Into<String>>(&mut self, comment: S) {
+ self.global_comments.push(comment.into());
+ }
+
+ pub(crate) fn add_comment<S: Into<String> + AsRef<str>, E: Into<AnyEntity>>(
+ &mut self,
+ entity: E,
+ comment: S,
+ ) {
+ use std::collections::hash_map::Entry;
+ match self.entity_comments.entry(entity.into()) {
+ Entry::Occupied(mut occ) => {
+ occ.get_mut().push('\n');
+ occ.get_mut().push_str(comment.as_ref());
+ }
+ Entry::Vacant(vac) => {
+ vac.insert(comment.into());
+ }
+ }
+ }
+}
+
+impl FuncWriter for &'_ CommentWriter {
+ fn write_preamble(
+ &mut self,
+ w: &mut dyn fmt::Write,
+ func: &Function,
+ reg_info: Option<&isa::RegInfo>,
+ ) -> Result<bool, fmt::Error> {
+ for comment in &self.global_comments {
+ if !comment.is_empty() {
+ writeln!(w, "; {}", comment)?;
+ } else {
+ writeln!(w)?;
+ }
+ }
+ if !self.global_comments.is_empty() {
+ writeln!(w)?;
+ }
+
+ self.super_preamble(w, func, reg_info)
+ }
+
+ fn write_entity_definition(
+ &mut self,
+ w: &mut dyn fmt::Write,
+ _func: &Function,
+ entity: AnyEntity,
+ value: &dyn fmt::Display,
+ ) -> fmt::Result {
+ write!(w, " {} = {}", entity, value)?;
+
+ if let Some(comment) = self.entity_comments.get(&entity) {
+ writeln!(w, " ; {}", comment.replace('\n', "\n; "))
+ } else {
+ writeln!(w)
+ }
+ }
+
+ fn write_block_header(
+ &mut self,
+ w: &mut dyn fmt::Write,
+ func: &Function,
+ isa: Option<&dyn isa::TargetIsa>,
+ block: Block,
+ indent: usize,
+ ) -> fmt::Result {
+ PlainWriter.write_block_header(w, func, isa, block, indent)
+ }
+
+ fn write_instruction(
+ &mut self,
+ w: &mut dyn fmt::Write,
+ func: &Function,
+ aliases: &SecondaryMap<Value, Vec<Value>>,
+ isa: Option<&dyn isa::TargetIsa>,
+ inst: Inst,
+ indent: usize,
+ ) -> fmt::Result {
+ PlainWriter.write_instruction(w, func, aliases, isa, inst, indent)?;
+ if let Some(comment) = self.entity_comments.get(&inst.into()) {
+ writeln!(w, "; {}", comment.replace('\n', "\n; "))?;
+ }
+ Ok(())
+ }
+}
+
+#[cfg(debug_assertions)]
- cfg!(debug_assertions)
- || tcx
- .sess
- .opts
- .output_types
- .contains_key(&OutputType::LlvmAssembly)
++impl FunctionCx<'_, '_, '_> {
+ pub(crate) fn add_global_comment<S: Into<String>>(&mut self, comment: S) {
+ self.clif_comments.add_global_comment(comment);
+ }
+
+ pub(crate) fn add_comment<S: Into<String> + AsRef<str>, E: Into<AnyEntity>>(
+ &mut self,
+ entity: E,
+ comment: S,
+ ) {
+ self.clif_comments.add_comment(entity, comment);
+ }
+}
+
+pub(crate) fn should_write_ir(tcx: TyCtxt<'_>) -> bool {
- write_ir_file(
- tcx,
- &format!("{}.{}.clif", tcx.symbol_name(instance).name, postfix),
- |file| {
- let value_ranges = isa.map(|isa| {
- context
- .build_value_labels_ranges(isa)
- .expect("value location ranges")
- });
++ tcx.sess.opts.output_types.contains_key(&OutputType::LlvmAssembly)
+}
+
+pub(crate) fn write_ir_file<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ name: &str,
+ write: impl FnOnce(&mut dyn Write) -> std::io::Result<()>,
+) {
+ if !should_write_ir(tcx) {
+ return;
+ }
+
+ let clif_output_dir = tcx.output_filenames(LOCAL_CRATE).with_extension("clif");
+
+ match std::fs::create_dir(&clif_output_dir) {
+ Ok(()) => {}
+ Err(err) if err.kind() == std::io::ErrorKind::AlreadyExists => {}
+ res @ Err(_) => res.unwrap(),
+ }
+
+ let clif_file_name = clif_output_dir.join(name);
+
+ let res: std::io::Result<()> = try {
+ let mut file = std::fs::File::create(clif_file_name)?;
+ write(&mut file)?;
+ };
+ if let Err(err) = res {
+ tcx.sess.warn(&format!("error writing ir file: {}", err));
+ }
+}
+
+pub(crate) fn write_clif_file<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ postfix: &str,
+ isa: Option<&dyn cranelift_codegen::isa::TargetIsa>,
+ instance: Instance<'tcx>,
+ context: &cranelift_codegen::Context,
+ mut clif_comments: &CommentWriter,
+) {
- let mut clif = String::new();
- cranelift_codegen::write::decorate_function(
- &mut clif_comments,
- &mut clif,
- &context.func,
- &DisplayFunctionAnnotations {
- isa: Some(&*crate::build_isa(tcx.sess)),
- value_ranges: value_ranges.as_ref(),
- },
- )
- .unwrap();
++ write_ir_file(tcx, &format!("{}.{}.clif", tcx.symbol_name(instance).name, postfix), |file| {
++ let value_ranges =
++ isa.map(|isa| context.build_value_labels_ranges(isa).expect("value location ranges"));
+
- writeln!(file, "test compile")?;
- writeln!(file, "set is_pic")?;
- writeln!(file, "set enable_simd")?;
- writeln!(file, "target {} haswell", crate::target_triple(tcx.sess))?;
- writeln!(file)?;
- file.write_all(clif.as_bytes())?;
- Ok(())
- },
- );
++ let mut clif = String::new();
++ cranelift_codegen::write::decorate_function(
++ &mut clif_comments,
++ &mut clif,
++ &context.func,
++ &DisplayFunctionAnnotations {
++ isa: Some(&*crate::build_isa(tcx.sess)),
++ value_ranges: value_ranges.as_ref(),
++ },
++ )
++ .unwrap();
+
- impl<M: Module> fmt::Debug for FunctionCx<'_, '_, M> {
++ writeln!(file, "test compile")?;
++ writeln!(file, "set is_pic")?;
++ writeln!(file, "set enable_simd")?;
++ writeln!(file, "target {} haswell", crate::target_triple(tcx.sess))?;
++ writeln!(file)?;
++ file.write_all(clif.as_bytes())?;
++ Ok(())
++ });
+}
+
++impl fmt::Debug for FunctionCx<'_, '_, '_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ writeln!(f, "{:?}", self.instance.substs)?;
+ writeln!(f, "{:?}", self.local_map)?;
+
+ let mut clif = String::new();
+ ::cranelift_codegen::write::decorate_function(
+ &mut &self.clif_comments,
+ &mut clif,
+ &self.bcx.func,
+ &DisplayFunctionAnnotations::default(),
+ )
+ .unwrap();
+ writeln!(f, "\n{}", clif)
+ }
+}
--- /dev/null
- let stem = linker
- .file_stem()
- .and_then(|stem| stem.to_str())
- .unwrap_or_else(|| {
- sess.fatal("couldn't extract file stem from specified linker")
- });
+//! Locating various executables part of a C toolchain.
+
+use std::path::PathBuf;
+
+use rustc_middle::bug;
+use rustc_session::Session;
+use rustc_target::spec::LinkerFlavor;
+
+/// Tries to infer the path of a binary for the target toolchain from the linker name.
+pub(crate) fn get_toolchain_binary(sess: &Session, tool: &str) -> PathBuf {
+ let (mut linker, _linker_flavor) = linker_and_flavor(sess);
+ let linker_file_name = linker
+ .file_name()
+ .and_then(|name| name.to_str())
+ .unwrap_or_else(|| sess.fatal("couldn't extract file name from specified linker"));
+
+ if linker_file_name == "ld.lld" {
+ if tool != "ld" {
+ linker.set_file_name(tool)
+ }
+ } else {
+ let tool_file_name = linker_file_name
+ .replace("ld", tool)
+ .replace("gcc", tool)
+ .replace("clang", tool)
+ .replace("cc", tool);
+
+ linker.set_file_name(tool_file_name)
+ }
+
+ linker
+}
+
+// Adapted from https://github.com/rust-lang/rust/blob/5db778affee7c6600c8e7a177c48282dab3f6292/src/librustc_codegen_ssa/back/link.rs#L848-L931
+fn linker_and_flavor(sess: &Session) -> (PathBuf, LinkerFlavor) {
+ fn infer_from(
+ sess: &Session,
+ linker: Option<PathBuf>,
+ flavor: Option<LinkerFlavor>,
+ ) -> Option<(PathBuf, LinkerFlavor)> {
+ match (linker, flavor) {
+ (Some(linker), Some(flavor)) => Some((linker, flavor)),
+ // only the linker flavor is known; use the default linker for the selected flavor
+ (None, Some(flavor)) => Some((
+ PathBuf::from(match flavor {
+ LinkerFlavor::Em => {
+ if cfg!(windows) {
+ "emcc.bat"
+ } else {
+ "emcc"
+ }
+ }
+ LinkerFlavor::Gcc => {
+ if cfg!(any(target_os = "solaris", target_os = "illumos")) {
+ // On historical Solaris systems, "cc" may have
+ // been Sun Studio, which is not flag-compatible
+ // with "gcc". This history casts a long shadow,
+ // and many modern illumos distributions today
+ // ship GCC as "gcc" without also making it
+ // available as "cc".
+ "gcc"
+ } else {
+ "cc"
+ }
+ }
+ LinkerFlavor::Ld => "ld",
+ LinkerFlavor::Msvc => "link.exe",
+ LinkerFlavor::Lld(_) => "lld",
+ LinkerFlavor::PtxLinker => "rust-ptx-linker",
+ }),
+ flavor,
+ )),
+ (Some(linker), None) => {
- if let Some(ret) = infer_from(
- sess,
- sess.opts.cg.linker.clone(),
- sess.opts.cg.linker_flavor,
- ) {
++ let stem = linker.file_stem().and_then(|stem| stem.to_str()).unwrap_or_else(|| {
++ sess.fatal("couldn't extract file stem from specified linker")
++ });
+
+ let flavor = if stem == "emcc" {
+ LinkerFlavor::Em
+ } else if stem == "gcc"
+ || stem.ends_with("-gcc")
+ || stem == "clang"
+ || stem.ends_with("-clang")
+ {
+ LinkerFlavor::Gcc
+ } else if stem == "ld" || stem == "ld.lld" || stem.ends_with("-ld") {
+ LinkerFlavor::Ld
+ } else if stem == "link" || stem == "lld-link" {
+ LinkerFlavor::Msvc
+ } else if stem == "lld" || stem == "rust-lld" {
+ LinkerFlavor::Lld(sess.target.lld_flavor)
+ } else {
+ // fall back to the value in the target spec
+ sess.target.linker_flavor
+ };
+
+ Some((linker, flavor))
+ }
+ (None, None) => None,
+ }
+ }
+
+ // linker and linker flavor specified via command line have precedence over what the target
+ // specification specifies
++ if let Some(ret) = infer_from(sess, sess.opts.cg.linker.clone(), sess.opts.cg.linker_flavor) {
+ return ret;
+ }
+
+ if let Some(ret) = infer_from(
+ sess,
+ sess.target.linker.clone().map(PathBuf::from),
+ Some(sess.target.linker_flavor),
+ ) {
+ return ret;
+ }
+
+ bug!("Not enough information provided to determine how to invoke the linker");
+}
--- /dev/null
- fn codegen_print(fx: &mut FunctionCx<'_, '_, impl Module>, msg: &str) {
+//! Helpers used to print a message and abort in case of certain panics and some detected UB.
+
+use crate::prelude::*;
+
- pub(crate) fn trap_abort(fx: &mut FunctionCx<'_, '_, impl Module>, msg: impl AsRef<str>) {
++fn codegen_print(fx: &mut FunctionCx<'_, '_, '_>, msg: &str) {
+ let puts = fx
+ .cx
+ .module
+ .declare_function(
+ "puts",
+ Linkage::Import,
+ &Signature {
+ call_conv: CallConv::triple_default(fx.triple()),
+ params: vec![AbiParam::new(pointer_ty(fx.tcx))],
+ returns: vec![AbiParam::new(types::I32)],
+ },
+ )
+ .unwrap();
+ let puts = fx.cx.module.declare_func_in_func(puts, &mut fx.bcx.func);
+ #[cfg(debug_assertions)]
+ {
+ fx.add_comment(puts, "puts");
+ }
+
+ let symbol_name = fx.tcx.symbol_name(fx.instance);
+ let real_msg = format!("trap at {:?} ({}): {}\0", fx.instance, symbol_name, msg);
+ let msg_ptr = fx.anonymous_str("trap", &real_msg);
+ fx.bcx.ins().call(puts, &[msg_ptr]);
+}
+
+/// Trap code: user1
- pub(crate) fn trap_unreachable(fx: &mut FunctionCx<'_, '_, impl Module>, msg: impl AsRef<str>) {
++pub(crate) fn trap_abort(fx: &mut FunctionCx<'_, '_, '_>, msg: impl AsRef<str>) {
+ codegen_print(fx, msg.as_ref());
+ fx.bcx.ins().trap(TrapCode::User(1));
+}
+
+/// Use this for example when a function call should never return. This will fill the current block,
+/// so you can **not** add instructions to it afterwards.
+///
+/// Trap code: user65535
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
++pub(crate) fn trap_unreachable(fx: &mut FunctionCx<'_, '_, '_>, msg: impl AsRef<str>) {
+ codegen_print(fx, msg.as_ref());
+ fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
+}
+
+/// Like `trap_unreachable` but returns a fake value of the specified type.
+///
+/// Trap code: user65535
+pub(crate) fn trap_unreachable_ret_value<'tcx>(
- pub(crate) fn trap_unimplemented(fx: &mut FunctionCx<'_, '_, impl Module>, msg: impl AsRef<str>) {
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ dest_layout: TyAndLayout<'tcx>,
+ msg: impl AsRef<str>,
+) -> CValue<'tcx> {
+ codegen_print(fx, msg.as_ref());
+ let true_ = fx.bcx.ins().iconst(types::I32, 1);
+ fx.bcx.ins().trapnz(true_, TrapCode::UnreachableCodeReached);
+ CValue::by_ref(Pointer::const_addr(fx, 0), dest_layout)
+}
+
+/// Use this when something is unimplemented, but `libcore` or `libstd` requires it to codegen.
+/// Unlike `trap_unreachable` this will not fill the current block, so you **must** add instructions
+/// to it afterwards.
+///
+/// Trap code: user65535
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
++pub(crate) fn trap_unimplemented(fx: &mut FunctionCx<'_, '_, '_>, msg: impl AsRef<str>) {
+ codegen_print(fx, msg.as_ref());
+ let true_ = fx.bcx.ins().iconst(types::I32, 1);
+ fx.bcx.ins().trapnz(true_, TrapCode::User(!0));
+}
+
+/// Like `trap_unimplemented` but returns a fake value of the specified type.
+///
+/// Trap code: user65535
+pub(crate) fn trap_unimplemented_ret_value<'tcx>(
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ dest_layout: TyAndLayout<'tcx>,
+ msg: impl AsRef<str>,
+) -> CValue<'tcx> {
+ trap_unimplemented(fx, msg);
+ CValue::by_ref(Pointer::const_addr(fx, 0), dest_layout)
+}
--- /dev/null
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+//! Codegen of the [`PointerCast::Unsize`] operation.
+//!
+//! [`PointerCast::Unsize`]: `rustc_middle::ty::adjustment::PointerCast::Unsize`
+
+use crate::prelude::*;
+
+// Adapted from https://github.com/rust-lang/rust/blob/2a663555ddf36f6b041445894a8c175cd1bc718c/src/librustc_codegen_ssa/base.rs#L159-L307
+
+/// Retrieve the information we are losing (making dynamic) in an unsizing
+/// adjustment.
+///
+/// The `old_info` argument is a bit funny. It is intended for use
+/// in an upcast, where the new vtable for an object will be derived
+/// from the old one.
+pub(crate) fn unsized_info<'tcx>(
- fx.tcx
- .struct_lockstep_tails_erasing_lifetimes(source, target, ParamEnv::reveal_all());
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ source: Ty<'tcx>,
+ target: Ty<'tcx>,
+ old_info: Option<Value>,
+) -> Value {
+ let (source, target) =
- (&ty::Array(_, len), &ty::Slice(_)) => fx.bcx.ins().iconst(
- fx.pointer_type,
- len.eval_usize(fx.tcx, ParamEnv::reveal_all()) as i64,
- ),
++ fx.tcx.struct_lockstep_tails_erasing_lifetimes(source, target, ParamEnv::reveal_all());
+ match (&source.kind(), &target.kind()) {
- _ => bug!(
- "unsized_info: invalid unsizing {:?} -> {:?}",
- source,
- target
- ),
++ (&ty::Array(_, len), &ty::Slice(_)) => fx
++ .bcx
++ .ins()
++ .iconst(fx.pointer_type, len.eval_usize(fx.tcx, ParamEnv::reveal_all()) as i64),
+ (&ty::Dynamic(..), &ty::Dynamic(..)) => {
+ // For now, upcasts are limited to changes in marker
+ // traits, and hence never actually require an actual
+ // change to the vtable.
+ old_info.expect("unsized_info: missing old info for trait upcast")
+ }
+ (_, &ty::Dynamic(ref data, ..)) => {
+ crate::vtable::get_vtable(fx, fx.layout_of(source), data.principal())
+ }
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
++ _ => bug!("unsized_info: invalid unsizing {:?} -> {:?}", source, target),
+ }
+}
+
+/// Coerce `src` to `dst_ty`. `src_ty` must be a thin pointer.
+fn unsize_thin_ptr<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ src: Value,
+ src_layout: TyAndLayout<'tcx>,
+ dst_layout: TyAndLayout<'tcx>,
+) -> (Value, Value) {
+ match (&src_layout.ty.kind(), &dst_layout.ty.kind()) {
+ (&ty::Ref(_, a, _), &ty::Ref(_, b, _))
+ | (&ty::Ref(_, a, _), &ty::RawPtr(ty::TypeAndMut { ty: b, .. }))
+ | (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }), &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => {
+ assert!(!fx.layout_of(a).is_unsized());
+ (src, unsized_info(fx, a, b, None))
+ }
+ (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => {
+ let (a, b) = (src_layout.ty.boxed_ty(), dst_layout.ty.boxed_ty());
+ assert!(!fx.layout_of(a).is_unsized());
+ (src, unsized_info(fx, a, b, None))
+ }
+ (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
+ assert_eq!(def_a, def_b);
+
+ let mut result = None;
+ for i in 0..src_layout.fields.count() {
+ let src_f = src_layout.field(fx, i);
+ assert_eq!(src_layout.fields.offset(i).bytes(), 0);
+ assert_eq!(dst_layout.fields.offset(i).bytes(), 0);
+ if src_f.is_zst() {
+ continue;
+ }
+ assert_eq!(src_layout.size, src_f.size);
+
+ let dst_f = dst_layout.field(fx, i);
+ assert_ne!(src_f.ty, dst_f.ty);
+ assert_eq!(result, None);
+ result = Some(unsize_thin_ptr(fx, src, src_f, dst_f));
+ }
+ result.unwrap()
+ }
+ _ => bug!("unsize_thin_ptr: called on bad types"),
+ }
+}
+
+/// Coerce `src`, which is a reference to a value of type `src_ty`,
+/// to a value of type `dst_ty` and store the result in `dst`
+pub(crate) fn coerce_unsized_into<'tcx>(
- let (base, info) = if fx
- .layout_of(src.layout().ty.builtin_deref(true).unwrap().ty)
- .is_unsized()
- {
- // fat-ptr to fat-ptr unsize preserves the vtable
- // i.e., &'a fmt::Debug+Send => &'a fmt::Debug
- src.load_scalar_pair(fx)
- } else {
- let base = src.load_scalar(fx);
- unsize_thin_ptr(fx, base, src.layout(), dst.layout())
- };
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ src: CValue<'tcx>,
+ dst: CPlace<'tcx>,
+) {
+ let src_ty = src.layout().ty;
+ let dst_ty = dst.layout().ty;
+ let mut coerce_ptr = || {
- _ => bug!(
- "coerce_unsized_into: invalid coercion {:?} -> {:?}",
- src_ty,
- dst_ty
- ),
++ let (base, info) =
++ if fx.layout_of(src.layout().ty.builtin_deref(true).unwrap().ty).is_unsized() {
++ // fat-ptr to fat-ptr unsize preserves the vtable
++ // i.e., &'a fmt::Debug+Send => &'a fmt::Debug
++ src.load_scalar_pair(fx)
++ } else {
++ let base = src.load_scalar(fx);
++ unsize_thin_ptr(fx, base, src.layout(), dst.layout())
++ };
+ dst.write_cvalue(fx, CValue::by_val_pair(base, info, dst.layout()));
+ };
+ match (&src_ty.kind(), &dst_ty.kind()) {
+ (&ty::Ref(..), &ty::Ref(..))
+ | (&ty::Ref(..), &ty::RawPtr(..))
+ | (&ty::RawPtr(..), &ty::RawPtr(..)) => coerce_ptr(),
+ (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
+ assert_eq!(def_a, def_b);
+
+ for i in 0..def_a.variants[VariantIdx::new(0)].fields.len() {
+ let src_f = src.value_field(fx, mir::Field::new(i));
+ let dst_f = dst.place_field(fx, mir::Field::new(i));
+
+ if dst_f.layout().is_zst() {
+ continue;
+ }
+
+ if src_f.layout().ty == dst_f.layout().ty {
+ dst_f.write_cvalue(fx, src_f);
+ } else {
+ coerce_unsized_into(fx, src_f, dst_f);
+ }
+ }
+ }
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
++ _ => bug!("coerce_unsized_into: invalid coercion {:?} -> {:?}", src_ty, dst_ty),
+ }
+}
+
+// Adapted from https://github.com/rust-lang/rust/blob/2a663555ddf36f6b041445894a8c175cd1bc718c/src/librustc_codegen_ssa/glue.rs
+
+pub(crate) fn size_and_align_of_dst<'tcx>(
- let size = fx
- .bcx
- .ins()
- .iconst(fx.pointer_type, layout.size.bytes() as i64);
- let align = fx
- .bcx
- .ins()
- .iconst(fx.pointer_type, layout.align.abi.bytes() as i64);
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ layout: TyAndLayout<'tcx>,
+ info: Value,
+) -> (Value, Value) {
+ if !layout.is_unsized() {
- (
- crate::vtable::size_of_obj(fx, info),
- crate::vtable::min_align_of_obj(fx, info),
- )
++ let size = fx.bcx.ins().iconst(fx.pointer_type, layout.size.bytes() as i64);
++ let align = fx.bcx.ins().iconst(fx.pointer_type, layout.align.abi.bytes() as i64);
+ return (size, align);
+ }
+ match layout.ty.kind() {
+ ty::Dynamic(..) => {
+ // load size/align from vtable
- fx.bcx
- .ins()
- .iconst(fx.pointer_type, unit.align.abi.bytes() as i64),
++ (crate::vtable::size_of_obj(fx, info), crate::vtable::min_align_of_obj(fx, info))
+ }
+ ty::Slice(_) | ty::Str => {
+ let unit = layout.field(fx, 0);
+ // The info in this case is the length of the str, so the size is that
+ // times the unit size.
+ (
+ fx.bcx.ins().imul_imm(info, unit.size.bytes() as i64),
- let cmp = fx
- .bcx
- .ins()
- .icmp(IntCC::UnsignedGreaterThan, sized_align, unsized_align);
++ fx.bcx.ins().iconst(fx.pointer_type, unit.align.abi.bytes() as i64),
+ )
+ }
+ _ => {
+ // First get the size of all statically known fields.
+ // Don't use size_of because it also rounds up to alignment, which we
+ // want to avoid, as the unsized field's alignment could be smaller.
+ assert!(!layout.ty.is_simd());
+
+ let i = layout.fields.count() - 1;
+ let sized_size = layout.fields.offset(i).bytes();
+ let sized_align = layout.align.abi.bytes();
+ let sized_align = fx.bcx.ins().iconst(fx.pointer_type, sized_align as i64);
+
+ // Recurse to get the size of the dynamically sized field (must be
+ // the last field).
+ let field_layout = layout.field(fx, i);
+ let (unsized_size, mut unsized_align) = size_and_align_of_dst(fx, field_layout, info);
+
+ // FIXME (#26403, #27023): We should be adding padding
+ // to `sized_size` (to accommodate the `unsized_align`
+ // required of the unsized field that follows) before
+ // summing it with `sized_size`. (Note that since #26403
+ // is unfixed, we do not yet add the necessary padding
+ // here. But this is where the add would go.)
+
+ // Return the sum of sizes and max of aligns.
+ let size = fx.bcx.ins().iadd_imm(unsized_size, sized_size as i64);
+
+ // Packed types ignore the alignment of their fields.
+ if let ty::Adt(def, _) = layout.ty.kind() {
+ if def.repr.packed() {
+ unsized_align = sized_align;
+ }
+ }
+
+ // Choose max of two known alignments (combined value must
+ // be aligned according to more restrictive of the two).
++ let cmp = fx.bcx.ins().icmp(IntCC::UnsignedGreaterThan, sized_align, unsized_align);
+ let align = fx.bcx.ins().select(cmp, sized_align, unsized_align);
+
+ // Issue #27023: must add any necessary padding to `size`
+ // (to make it a multiple of `align`) before returning it.
+ //
+ // Namely, the returned size should be, in C notation:
+ //
+ // `size + ((size & (align-1)) ? align : 0)`
+ //
+ // emulated via the semi-standard fast bit trick:
+ //
+ // `(size + (align-1)) & -align`
+ let addend = fx.bcx.ins().iadd_imm(align, -1);
+ let add = fx.bcx.ins().iadd(size, addend);
+ let neg = fx.bcx.ins().ineg(align);
+ let size = fx.bcx.ins().band(add, neg);
+
+ (size, align)
+ }
+ }
+}
--- /dev/null
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+//! Definition of [`CValue`] and [`CPlace`]
+
+use crate::prelude::*;
+
+use cranelift_codegen::entity::EntityRef;
+use cranelift_codegen::ir::immediates::Offset32;
+
+fn codegen_field<'tcx>(
- let simple = |fx: &mut FunctionCx<'_, '_, _>| {
- (
- base.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap()),
- field_layout,
- )
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ base: Pointer,
+ extra: Option<Value>,
+ layout: TyAndLayout<'tcx>,
+ field: mir::Field,
+) -> (Pointer, TyAndLayout<'tcx>) {
+ let field_offset = layout.fields.offset(field.index());
+ let field_layout = layout.field(&*fx, field.index());
+
- let b_offset = a_scalar
- .value
- .size(&tcx)
- .align_to(b_scalar.value.align(&tcx).abi);
++ let simple = |fx: &mut FunctionCx<'_, '_, '_>| {
++ (base.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap()), field_layout)
+ };
+
+ if let Some(extra) = extra {
+ if !field_layout.is_unsized() {
+ return simple(fx);
+ }
+ match field_layout.ty.kind() {
+ ty::Slice(..) | ty::Str | ty::Foreign(..) => simple(fx),
+ ty::Adt(def, _) if def.repr.packed() => {
+ assert_eq!(layout.align.abi.bytes(), 1);
+ simple(fx)
+ }
+ _ => {
+ // We have to align the offset for DST's
+ let unaligned_offset = field_offset.bytes();
+ let (_, unsized_align) =
+ crate::unsize::size_and_align_of_dst(fx, field_layout, extra);
+
+ let one = fx.bcx.ins().iconst(pointer_ty(fx.tcx), 1);
+ let align_sub_1 = fx.bcx.ins().isub(unsized_align, one);
+ let and_lhs = fx.bcx.ins().iadd_imm(align_sub_1, unaligned_offset as i64);
+ let zero = fx.bcx.ins().iconst(pointer_ty(fx.tcx), 0);
+ let and_rhs = fx.bcx.ins().isub(zero, unsized_align);
+ let offset = fx.bcx.ins().band(and_lhs, and_rhs);
+
+ (base.offset_value(fx, offset), field_layout)
+ }
+ }
+ } else {
+ simple(fx)
+ }
+}
+
+fn scalar_pair_calculate_b_offset(
+ tcx: TyCtxt<'_>,
+ a_scalar: &Scalar,
+ b_scalar: &Scalar,
+) -> Offset32 {
- pub(crate) fn force_stack(
- self,
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
- ) -> (Pointer, Option<Value>) {
++ let b_offset = a_scalar.value.size(&tcx).align_to(b_scalar.value.align(&tcx).abi);
+ Offset32::new(b_offset.bytes().try_into().unwrap())
+}
+
+/// A read-only value
+#[derive(Debug, Copy, Clone)]
+pub(crate) struct CValue<'tcx>(CValueInner, TyAndLayout<'tcx>);
+
+#[derive(Debug, Copy, Clone)]
+enum CValueInner {
+ ByRef(Pointer, Option<Value>),
+ ByVal(Value),
+ ByValPair(Value, Value),
+}
+
+impl<'tcx> CValue<'tcx> {
+ pub(crate) fn by_ref(ptr: Pointer, layout: TyAndLayout<'tcx>) -> CValue<'tcx> {
+ CValue(CValueInner::ByRef(ptr, None), layout)
+ }
+
+ pub(crate) fn by_ref_unsized(
+ ptr: Pointer,
+ meta: Value,
+ layout: TyAndLayout<'tcx>,
+ ) -> CValue<'tcx> {
+ CValue(CValueInner::ByRef(ptr, Some(meta)), layout)
+ }
+
+ pub(crate) fn by_val(value: Value, layout: TyAndLayout<'tcx>) -> CValue<'tcx> {
+ CValue(CValueInner::ByVal(value), layout)
+ }
+
+ pub(crate) fn by_val_pair(
+ value: Value,
+ extra: Value,
+ layout: TyAndLayout<'tcx>,
+ ) -> CValue<'tcx> {
+ CValue(CValueInner::ByValPair(value, extra), layout)
+ }
+
+ pub(crate) fn layout(&self) -> TyAndLayout<'tcx> {
+ self.1
+ }
+
+ // FIXME remove
- pub(crate) fn load_scalar(self, fx: &mut FunctionCx<'_, 'tcx, impl Module>) -> Value {
++ pub(crate) fn force_stack(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> (Pointer, Option<Value>) {
+ let layout = self.1;
+ match self.0 {
+ CValueInner::ByRef(ptr, meta) => (ptr, meta),
+ CValueInner::ByVal(_) | CValueInner::ByValPair(_, _) => {
+ let cplace = CPlace::new_stack_slot(fx, layout);
+ cplace.write_cvalue(fx, self);
+ (cplace.to_ptr(), None)
+ }
+ }
+ }
+
+ pub(crate) fn try_to_ptr(self) -> Option<(Pointer, Option<Value>)> {
+ match self.0 {
+ CValueInner::ByRef(ptr, meta) => Some((ptr, meta)),
+ CValueInner::ByVal(_) | CValueInner::ByValPair(_, _) => None,
+ }
+ }
+
+ /// Load a value with layout.abi of scalar
- pub(crate) fn load_scalar_pair(
- self,
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
- ) -> (Value, Value) {
++ pub(crate) fn load_scalar(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> Value {
+ let layout = self.1;
+ match self.0 {
+ CValueInner::ByRef(ptr, None) => {
+ let clif_ty = match layout.abi {
+ Abi::Scalar(ref scalar) => scalar_to_clif_type(fx.tcx, scalar.clone()),
+ Abi::Vector { ref element, count } => {
+ scalar_to_clif_type(fx.tcx, element.clone())
+ .by(u16::try_from(count).unwrap())
+ .unwrap()
+ }
+ _ => unreachable!("{:?}", layout.ty),
+ };
+ let mut flags = MemFlags::new();
+ flags.set_notrap();
+ ptr.load(fx, clif_ty, flags)
+ }
+ CValueInner::ByVal(value) => value,
+ CValueInner::ByRef(_, Some(_)) => bug!("load_scalar for unsized value not allowed"),
+ CValueInner::ByValPair(_, _) => bug!("Please use load_scalar_pair for ByValPair"),
+ }
+ }
+
+ /// Load a value pair with layout.abi of scalar pair
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
++ pub(crate) fn load_scalar_pair(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> (Value, Value) {
+ let layout = self.1;
+ match self.0 {
+ CValueInner::ByRef(ptr, None) => {
+ let (a_scalar, b_scalar) = match &layout.abi {
+ Abi::ScalarPair(a, b) => (a, b),
+ _ => unreachable!("load_scalar_pair({:?})", self),
+ };
+ let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
+ let clif_ty1 = scalar_to_clif_type(fx.tcx, a_scalar.clone());
+ let clif_ty2 = scalar_to_clif_type(fx.tcx, b_scalar.clone());
+ let mut flags = MemFlags::new();
+ flags.set_notrap();
+ let val1 = ptr.load(fx, clif_ty1, flags);
+ let val2 = ptr.offset(fx, b_offset).load(fx, clif_ty2, flags);
+ (val1, val2)
+ }
+ CValueInner::ByRef(_, Some(_)) => {
+ bug!("load_scalar_pair for unsized value not allowed")
+ }
+ CValueInner::ByVal(_) => bug!("Please use load_scalar for ByVal"),
+ CValueInner::ByValPair(val1, val2) => (val1, val2),
+ }
+ }
+
+ pub(crate) fn value_field(
+ self,
- pub(crate) fn unsize_value(
- self,
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
- dest: CPlace<'tcx>,
- ) {
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ field: mir::Field,
+ ) -> CValue<'tcx> {
+ let layout = self.1;
+ match self.0 {
+ CValueInner::ByVal(val) => match layout.abi {
+ Abi::Vector { element: _, count } => {
+ let count = u8::try_from(count).expect("SIMD type with more than 255 lanes???");
+ let field = u8::try_from(field.index()).unwrap();
+ assert!(field < count);
+ let lane = fx.bcx.ins().extractlane(val, field);
+ let field_layout = layout.field(&*fx, usize::from(field));
+ CValue::by_val(lane, field_layout)
+ }
+ _ => unreachable!("value_field for ByVal with abi {:?}", layout.abi),
+ },
+ CValueInner::ByValPair(val1, val2) => match layout.abi {
+ Abi::ScalarPair(_, _) => {
+ let val = match field.as_u32() {
+ 0 => val1,
+ 1 => val2,
+ _ => bug!("field should be 0 or 1"),
+ };
+ let field_layout = layout.field(&*fx, usize::from(field));
+ CValue::by_val(val, field_layout)
+ }
+ _ => unreachable!("value_field for ByValPair with abi {:?}", layout.abi),
+ },
+ CValueInner::ByRef(ptr, None) => {
+ let (field_ptr, field_layout) = codegen_field(fx, ptr, None, layout, field);
+ CValue::by_ref(field_ptr, field_layout)
+ }
+ CValueInner::ByRef(_, Some(_)) => todo!(),
+ }
+ }
+
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
++ pub(crate) fn unsize_value(self, fx: &mut FunctionCx<'_, '_, 'tcx>, dest: CPlace<'tcx>) {
+ crate::unsize::coerce_unsized_into(fx, self, dest);
+ }
+
+ /// If `ty` is signed, `const_val` must already be sign extended.
+ pub(crate) fn const_val(
- assert_eq!(const_val.size(), layout.size);
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ layout: TyAndLayout<'tcx>,
+ const_val: ty::ScalarInt,
+ ) -> CValue<'tcx> {
- let msb = fx
- .bcx
- .ins()
- .iconst(types::I64, (const_val >> 64) as u64 as i64);
++ assert_eq!(const_val.size(), layout.size, "{:#?}: {:?}", const_val, layout);
+ use cranelift_codegen::ir::immediates::{Ieee32, Ieee64};
+
+ let clif_ty = fx.clif_type(layout.ty).unwrap();
+
+ if let ty::Bool = layout.ty.kind() {
+ assert!(
+ const_val == ty::ScalarInt::FALSE || const_val == ty::ScalarInt::TRUE,
+ "Invalid bool 0x{:032X}",
+ const_val
+ );
+ }
+
+ let val = match layout.ty.kind() {
+ ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
+ let const_val = const_val.to_bits(layout.size).unwrap();
+ let lsb = fx.bcx.ins().iconst(types::I64, const_val as u64 as i64);
- ty::Bool | ty::Char | ty::Uint(_) | ty::Int(_) | ty::Ref(..)
- | ty::RawPtr(..) => {
- fx
- .bcx
- .ins()
- .iconst(clif_ty, const_val.to_bits(layout.size).unwrap() as i64)
++ let msb = fx.bcx.ins().iconst(types::I64, (const_val >> 64) as u64 as i64);
+ fx.bcx.ins().iconcat(lsb, msb)
+ }
- assert!(matches!(
- self.layout().ty.kind(),
- ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..)
- ));
- assert!(matches!(
- layout.ty.kind(),
- ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..)
- ));
++ ty::Bool | ty::Char | ty::Uint(_) | ty::Int(_) | ty::Ref(..) | ty::RawPtr(..) => {
++ fx.bcx.ins().iconst(clif_ty, const_val.to_bits(layout.size).unwrap() as i64)
+ }
+ ty::Float(FloatTy::F32) => {
+ fx.bcx.ins().f32const(Ieee32::with_bits(u32::try_from(const_val).unwrap()))
+ }
+ ty::Float(FloatTy::F64) => {
+ fx.bcx.ins().f64const(Ieee64::with_bits(u64::try_from(const_val).unwrap()))
+ }
+ _ => panic!(
+ "CValue::const_val for non bool/char/float/integer/pointer type {:?} is not allowed",
+ layout.ty
+ ),
+ };
+
+ CValue::by_val(val, layout)
+ }
+
+ pub(crate) fn cast_pointer_to(self, layout: TyAndLayout<'tcx>) -> Self {
- CPlace {
- inner: CPlaceInner::Addr(Pointer::dangling(layout.align.pref), None),
- layout,
- }
++ assert!(matches!(self.layout().ty.kind(), ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..)));
++ assert!(matches!(layout.ty.kind(), ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..)));
+ assert_eq!(self.layout().abi, layout.abi);
+ CValue(self.0, layout)
+ }
+}
+
+/// A place where you can write a value to or read a value from
+#[derive(Debug, Copy, Clone)]
+pub(crate) struct CPlace<'tcx> {
+ inner: CPlaceInner,
+ layout: TyAndLayout<'tcx>,
+}
+
+#[derive(Debug, Copy, Clone)]
+pub(crate) enum CPlaceInner {
+ Var(Local, Variable),
+ VarPair(Local, Variable, Variable),
+ VarLane(Local, Variable, u8),
+ Addr(Pointer, Option<Value>),
+}
+
+impl<'tcx> CPlace<'tcx> {
+ pub(crate) fn layout(&self) -> TyAndLayout<'tcx> {
+ self.layout
+ }
+
+ pub(crate) fn inner(&self) -> &CPlaceInner {
+ &self.inner
+ }
+
+ pub(crate) fn no_place(layout: TyAndLayout<'tcx>) -> CPlace<'tcx> {
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
++ CPlace { inner: CPlaceInner::Addr(Pointer::dangling(layout.align.pref), None), layout }
+ }
+
+ pub(crate) fn new_stack_slot(
- CPlace {
- inner: CPlaceInner::Addr(Pointer::stack_slot(stack_slot), None),
- layout,
- }
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ layout: TyAndLayout<'tcx>,
+ ) -> CPlace<'tcx> {
+ assert!(!layout.is_unsized());
+ if layout.size.bytes() == 0 {
+ return CPlace::no_place(layout);
+ }
+
+ let stack_slot = fx.bcx.create_stack_slot(StackSlotData {
+ kind: StackSlotKind::ExplicitSlot,
+ // FIXME Don't force the size to a multiple of 16 bytes once Cranelift gets a way to
+ // specify stack slot alignment.
+ size: (u32::try_from(layout.size.bytes()).unwrap() + 15) / 16 * 16,
+ offset: None,
+ });
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
++ CPlace { inner: CPlaceInner::Addr(Pointer::stack_slot(stack_slot), None), layout }
+ }
+
+ pub(crate) fn new_var(
- CPlace {
- inner: CPlaceInner::Var(local, var),
- layout,
- }
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ local: Local,
+ layout: TyAndLayout<'tcx>,
+ ) -> CPlace<'tcx> {
+ let var = Variable::with_u32(fx.next_ssa_var);
+ fx.next_ssa_var += 1;
+ fx.bcx.declare_var(var, fx.clif_type(layout.ty).unwrap());
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
++ CPlace { inner: CPlaceInner::Var(local, var), layout }
+ }
+
+ pub(crate) fn new_var_pair(
- CPlace {
- inner: CPlaceInner::VarPair(local, var1, var2),
- layout,
- }
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ local: Local,
+ layout: TyAndLayout<'tcx>,
+ ) -> CPlace<'tcx> {
+ let var1 = Variable::with_u32(fx.next_ssa_var);
+ fx.next_ssa_var += 1;
+ let var2 = Variable::with_u32(fx.next_ssa_var);
+ fx.next_ssa_var += 1;
+
+ let (ty1, ty2) = fx.clif_pair_type(layout.ty).unwrap();
+ fx.bcx.declare_var(var1, ty1);
+ fx.bcx.declare_var(var2, ty2);
- CPlace {
- inner: CPlaceInner::Addr(ptr, None),
- layout,
- }
++ CPlace { inner: CPlaceInner::VarPair(local, var1, var2), layout }
+ }
+
+ pub(crate) fn for_ptr(ptr: Pointer, layout: TyAndLayout<'tcx>) -> CPlace<'tcx> {
- CPlace {
- inner: CPlaceInner::Addr(ptr, Some(extra)),
- layout,
- }
++ CPlace { inner: CPlaceInner::Addr(ptr, None), layout }
+ }
+
+ pub(crate) fn for_ptr_with_extra(
+ ptr: Pointer,
+ extra: Value,
+ layout: TyAndLayout<'tcx>,
+ ) -> CPlace<'tcx> {
- pub(crate) fn to_cvalue(self, fx: &mut FunctionCx<'_, 'tcx, impl Module>) -> CValue<'tcx> {
++ CPlace { inner: CPlaceInner::Addr(ptr, Some(extra)), layout }
+ }
+
- fx.bcx
- .set_val_label(val, cranelift_codegen::ir::ValueLabel::new(var.index()));
++ pub(crate) fn to_cvalue(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> CValue<'tcx> {
+ let layout = self.layout();
+ match self.inner {
+ CPlaceInner::Var(_local, var) => {
+ let val = fx.bcx.use_var(var);
- fx.bcx
- .set_val_label(val1, cranelift_codegen::ir::ValueLabel::new(var1.index()));
++ //fx.bcx.set_val_label(val, cranelift_codegen::ir::ValueLabel::new(var.index()));
+ CValue::by_val(val, layout)
+ }
+ CPlaceInner::VarPair(_local, var1, var2) => {
+ let val1 = fx.bcx.use_var(var1);
- fx.bcx
- .set_val_label(val2, cranelift_codegen::ir::ValueLabel::new(var2.index()));
++ //fx.bcx.set_val_label(val1, cranelift_codegen::ir::ValueLabel::new(var1.index()));
+ let val2 = fx.bcx.use_var(var2);
- fx.bcx
- .set_val_label(val, cranelift_codegen::ir::ValueLabel::new(var.index()));
++ //fx.bcx.set_val_label(val2, cranelift_codegen::ir::ValueLabel::new(var2.index()));
+ CValue::by_val_pair(val1, val2, layout)
+ }
+ CPlaceInner::VarLane(_local, var, lane) => {
+ let val = fx.bcx.use_var(var);
- pub(crate) fn write_cvalue(
- self,
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
- from: CValue<'tcx>,
- ) {
++ //fx.bcx.set_val_label(val, cranelift_codegen::ir::ValueLabel::new(var.index()));
+ let val = fx.bcx.ins().extractlane(val, lane);
+ CValue::by_val(val, layout)
+ }
+ CPlaceInner::Addr(ptr, extra) => {
+ if let Some(extra) = extra {
+ CValue::by_ref_unsized(ptr, extra, layout)
+ } else {
+ CValue::by_ref(ptr, layout)
+ }
+ }
+ }
+ }
+
+ pub(crate) fn to_ptr(self) -> Pointer {
+ match self.to_ptr_maybe_unsized() {
+ (ptr, None) => ptr,
+ (_, Some(_)) => bug!("Expected sized cplace, found {:?}", self),
+ }
+ }
+
+ pub(crate) fn to_ptr_maybe_unsized(self) -> (Pointer, Option<Value>) {
+ match self.inner {
+ CPlaceInner::Addr(ptr, extra) => (ptr, extra),
+ CPlaceInner::Var(_, _)
+ | CPlaceInner::VarPair(_, _, _)
+ | CPlaceInner::VarLane(_, _, _) => bug!("Expected CPlace::Addr, found {:?}", self),
+ }
+ }
+
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
++ pub(crate) fn write_cvalue(self, fx: &mut FunctionCx<'_, '_, 'tcx>, from: CValue<'tcx>) {
+ assert_assignable(fx, from.layout().ty, self.layout().ty);
+
+ self.write_cvalue_maybe_transmute(fx, from, "write_cvalue");
+ }
+
+ pub(crate) fn write_cvalue_transmute(
+ self,
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ from: CValue<'tcx>,
+ ) {
+ self.write_cvalue_maybe_transmute(fx, from, "write_cvalue_transmute");
+ }
+
+ fn write_cvalue_maybe_transmute(
+ self,
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ from: CValue<'tcx>,
+ #[cfg_attr(not(debug_assertions), allow(unused_variables))] method: &'static str,
+ ) {
+ fn transmute_value<'tcx>(
- fx.bcx
- .set_val_label(data, cranelift_codegen::ir::ValueLabel::new(var.index()));
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ var: Variable,
+ data: Value,
+ dst_ty: Type,
+ ) {
+ let src_ty = fx.bcx.func.dfg.value_type(data);
+ assert_eq!(
+ src_ty.bytes(),
+ dst_ty.bytes(),
+ "write_cvalue_transmute: {:?} -> {:?}",
+ src_ty,
+ dst_ty,
+ );
+ let data = match (src_ty, dst_ty) {
+ (_, _) if src_ty == dst_ty => data,
+
+ // This is a `write_cvalue_transmute`.
+ (types::I32, types::F32)
+ | (types::F32, types::I32)
+ | (types::I64, types::F64)
+ | (types::F64, types::I64) => fx.bcx.ins().bitcast(dst_ty, data),
+ _ if src_ty.is_vector() && dst_ty.is_vector() => {
+ fx.bcx.ins().raw_bitcast(dst_ty, data)
+ }
+ _ if src_ty.is_vector() || dst_ty.is_vector() => {
+ // FIXME do something more efficient for transmutes between vectors and integers.
+ let stack_slot = fx.bcx.create_stack_slot(StackSlotData {
+ kind: StackSlotKind::ExplicitSlot,
+ // FIXME Don't force the size to a multiple of 16 bytes once Cranelift gets a way to
+ // specify stack slot alignment.
+ size: (src_ty.bytes() + 15) / 16 * 16,
+ offset: None,
+ });
+ let ptr = Pointer::stack_slot(stack_slot);
+ ptr.store(fx, data, MemFlags::trusted());
+ ptr.load(fx, dst_ty, MemFlags::trusted())
+ }
+ _ => unreachable!("write_cvalue_transmute: {:?} -> {:?}", src_ty, dst_ty),
+ };
- fx.bcx
- .set_val_label(vector, cranelift_codegen::ir::ValueLabel::new(var.index()));
++ //fx.bcx.set_val_label(data, cranelift_codegen::ir::ValueLabel::new(var.index()));
+ fx.bcx.def_var(var, data);
+ }
+
+ assert_eq!(self.layout().size, from.layout().size);
+
+ #[cfg(debug_assertions)]
+ {
+ use cranelift_codegen::cursor::{Cursor, CursorPosition};
+ let cur_block = match fx.bcx.cursor().position() {
+ CursorPosition::After(block) => block,
+ _ => unreachable!(),
+ };
+ fx.add_comment(
+ fx.bcx.func.layout.last_inst(cur_block).unwrap(),
+ format!(
+ "{}: {:?}: {:?} <- {:?}: {:?}",
+ method,
+ self.inner(),
+ self.layout().ty,
+ from.0,
+ from.layout().ty
+ ),
+ );
+ }
+
+ let dst_layout = self.layout();
+ let to_ptr = match self.inner {
+ CPlaceInner::Var(_local, var) => {
+ let data = CValue(from.0, dst_layout).load_scalar(fx);
+ let dst_ty = fx.clif_type(self.layout().ty).unwrap();
+ transmute_value(fx, var, data, dst_ty);
+ return;
+ }
+ CPlaceInner::VarPair(_local, var1, var2) => {
+ let (data1, data2) = CValue(from.0, dst_layout).load_scalar_pair(fx);
+ let (dst_ty1, dst_ty2) = fx.clif_pair_type(self.layout().ty).unwrap();
+ transmute_value(fx, var1, data1, dst_ty1);
+ transmute_value(fx, var2, data2, dst_ty2);
+ return;
+ }
+ CPlaceInner::VarLane(_local, var, lane) => {
+ let data = from.load_scalar(fx);
+
+ // First get the old vector
+ let vector = fx.bcx.use_var(var);
- fx.bcx
- .set_val_label(vector, cranelift_codegen::ir::ValueLabel::new(var.index()));
++ //fx.bcx.set_val_label(vector, cranelift_codegen::ir::ValueLabel::new(var.index()));
+
+ // Next insert the written lane into the vector
+ let vector = fx.bcx.ins().insertlane(vector, data, lane);
+
+ // Finally write the new vector
- bug!(
- "Non ScalarPair abi {:?} for ByValPair CValue",
- dst_layout.abi
- );
++ //fx.bcx.set_val_label(vector, cranelift_codegen::ir::ValueLabel::new(var.index()));
+ fx.bcx.def_var(var, vector);
+
+ return;
+ }
+ CPlaceInner::Addr(ptr, None) => {
+ if dst_layout.size == Size::ZERO || dst_layout.abi == Abi::Uninhabited {
+ return;
+ }
+ ptr
+ }
+ CPlaceInner::Addr(_, Some(_)) => bug!("Can't write value to unsized place {:?}", self),
+ };
+
+ let mut flags = MemFlags::new();
+ flags.set_notrap();
+ match from.layout().abi {
+ // FIXME make Abi::Vector work too
+ Abi::Scalar(_) => {
+ let val = from.load_scalar(fx);
+ to_ptr.store(fx, val, flags);
+ return;
+ }
+ Abi::ScalarPair(ref a_scalar, ref b_scalar) => {
+ let (value, extra) = from.load_scalar_pair(fx);
+ let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
+ to_ptr.store(fx, value, flags);
+ to_ptr.offset(fx, b_offset).store(fx, extra, flags);
+ return;
+ }
+ _ => {}
+ }
+
+ match from.0 {
+ CValueInner::ByVal(val) => {
+ to_ptr.store(fx, val, flags);
+ }
+ CValueInner::ByValPair(_, _) => {
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
++ bug!("Non ScalarPair abi {:?} for ByValPair CValue", dst_layout.abi);
+ }
+ CValueInner::ByRef(from_ptr, None) => {
+ let from_addr = from_ptr.get_addr(fx);
+ let to_addr = to_ptr.get_addr(fx);
+ let src_layout = from.1;
+ let size = dst_layout.size.bytes();
+ let src_align = src_layout.align.abi.bytes() as u8;
+ let dst_align = dst_layout.align.abi.bytes() as u8;
+ fx.bcx.emit_small_memory_copy(
+ fx.cx.module.target_config(),
+ to_addr,
+ from_addr,
+ size,
+ dst_align,
+ src_align,
+ true,
+ );
+ }
+ CValueInner::ByRef(_, Some(_)) => todo!(),
+ }
+ }
+
+ pub(crate) fn place_field(
+ self,
- 0 => {
- return CPlace {
- inner: CPlaceInner::Var(local, var1),
- layout,
- }
- }
- 1 => {
- return CPlace {
- inner: CPlaceInner::Var(local, var2),
- layout,
- }
- }
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ field: mir::Field,
+ ) -> CPlace<'tcx> {
+ let layout = self.layout();
+
+ match self.inner {
+ CPlaceInner::Var(local, var) => {
+ if let Abi::Vector { .. } = layout.abi {
+ return CPlace {
+ inner: CPlaceInner::VarLane(local, var, field.as_u32().try_into().unwrap()),
+ layout: layout.field(fx, field.as_u32().try_into().unwrap()),
+ };
+ }
+ }
+ CPlaceInner::VarPair(local, var1, var2) => {
+ let layout = layout.field(&*fx, field.index());
+
+ match field.as_u32() {
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
++ 0 => return CPlace { inner: CPlaceInner::Var(local, var1), layout },
++ 1 => return CPlace { inner: CPlaceInner::Var(local, var2), layout },
+ _ => unreachable!("field should be 0 or 1"),
+ }
+ }
+ _ => {}
+ }
+
+ let (base, extra) = self.to_ptr_maybe_unsized();
+
+ let (field_ptr, field_layout) = codegen_field(fx, base, extra, layout, field);
+ if field_layout.is_unsized() {
+ CPlace::for_ptr_with_extra(field_ptr, extra.unwrap(), field_layout)
+ } else {
+ CPlace::for_ptr(field_ptr, field_layout)
+ }
+ }
+
+ pub(crate) fn place_index(
+ self,
- let offset = fx
- .bcx
- .ins()
- .imul_imm(index, elem_layout.size.bytes() as i64);
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ index: Value,
+ ) -> CPlace<'tcx> {
+ let (elem_layout, ptr) = match self.layout().ty.kind() {
+ ty::Array(elem_ty, _) => (fx.layout_of(elem_ty), self.to_ptr()),
+ ty::Slice(elem_ty) => (fx.layout_of(elem_ty), self.to_ptr_maybe_unsized().0),
+ _ => bug!("place_index({:?})", self.layout().ty),
+ };
+
- pub(crate) fn place_deref(self, fx: &mut FunctionCx<'_, 'tcx, impl Module>) -> CPlace<'tcx> {
++ let offset = fx.bcx.ins().imul_imm(index, elem_layout.size.bytes() as i64);
+
+ CPlace::for_ptr(ptr.offset_value(fx, offset), elem_layout)
+ }
+
- CPlace::for_ptr(
- Pointer::new(self.to_cvalue(fx).load_scalar(fx)),
- inner_layout,
- )
++ pub(crate) fn place_deref(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> CPlace<'tcx> {
+ let inner_layout = fx.layout_of(self.layout().ty.builtin_deref(true).unwrap().ty);
+ if has_ptr_meta(fx.tcx, inner_layout.ty) {
+ let (addr, extra) = self.to_cvalue(fx).load_scalar_pair(fx);
+ CPlace::for_ptr_with_extra(Pointer::new(addr), extra, inner_layout)
+ } else {
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
++ CPlace::for_ptr(Pointer::new(self.to_cvalue(fx).load_scalar(fx)), inner_layout)
+ }
+ }
+
+ pub(crate) fn place_ref(
+ self,
- fx: &FunctionCx<'_, 'tcx, impl Module>,
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ layout: TyAndLayout<'tcx>,
+ ) -> CValue<'tcx> {
+ if has_ptr_meta(fx.tcx, self.layout().ty) {
+ let (ptr, extra) = self.to_ptr_maybe_unsized();
+ CValue::by_val_pair(
+ ptr.get_addr(fx),
+ extra.expect("unsized type without metadata"),
+ layout,
+ )
+ } else {
+ CValue::by_val(self.to_ptr().get_addr(fx), layout)
+ }
+ }
+
+ pub(crate) fn downcast_variant(
+ self,
- CPlace {
- inner: self.inner,
- layout,
- }
++ fx: &FunctionCx<'_, '_, 'tcx>,
+ variant: VariantIdx,
+ ) -> Self {
+ assert!(!self.layout().is_unsized());
+ let layout = self.layout().for_variant(fx, variant);
- fx: &FunctionCx<'_, 'tcx, impl Module>,
++ CPlace { inner: self.inner, layout }
+ }
+}
+
+#[track_caller]
+pub(crate) fn assert_assignable<'tcx>(
- let from = fx
- .tcx
- .normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), from);
- let to = fx
- .tcx
- .normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), to);
++ fx: &FunctionCx<'_, '_, 'tcx>,
+ from_ty: Ty<'tcx>,
+ to_ty: Ty<'tcx>,
+) {
+ match (from_ty.kind(), to_ty.kind()) {
+ (ty::Ref(_, a, _), ty::Ref(_, b, _))
+ | (
+ ty::RawPtr(TypeAndMut { ty: a, mutbl: _ }),
+ ty::RawPtr(TypeAndMut { ty: b, mutbl: _ }),
+ ) => {
+ assert_assignable(fx, a, b);
+ }
+ (ty::Ref(_, a, _), ty::RawPtr(TypeAndMut { ty: b, mutbl: _ }))
+ | (ty::RawPtr(TypeAndMut { ty: a, mutbl: _ }), ty::Ref(_, b, _)) => {
+ assert_assignable(fx, a, b);
+ }
+ (ty::FnPtr(_), ty::FnPtr(_)) => {
+ let from_sig = fx.tcx.normalize_erasing_late_bound_regions(
+ ParamEnv::reveal_all(),
+ from_ty.fn_sig(fx.tcx),
+ );
+ let to_sig = fx
+ .tcx
+ .normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), to_ty.fn_sig(fx.tcx));
+ assert_eq!(
+ from_sig, to_sig,
+ "Can't write fn ptr with incompatible sig {:?} to place with sig {:?}\n\n{:#?}",
+ from_sig, to_sig, fx,
+ );
+ // fn(&T) -> for<'l> fn(&'l T) is allowed
+ }
+ (&ty::Dynamic(from_traits, _), &ty::Dynamic(to_traits, _)) => {
+ for (from, to) in from_traits.iter().zip(to_traits) {
++ let from =
++ fx.tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), from);
++ let to = fx.tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), to);
+ assert_eq!(
+ from, to,
+ "Can't write trait object of incompatible traits {:?} to place with traits {:?}\n\n{:#?}",
+ from_traits, to_traits, fx,
+ );
+ }
+ // dyn for<'r> Trait<'r> -> dyn Trait<'_> is allowed
+ }
+ _ => {
+ assert_eq!(
+ from_ty, to_ty,
+ "Can't write value with incompatible type {:?} to place with type {:?}\n\n{:#?}",
+ from_ty, to_ty, fx,
+ );
+ }
+ }
+}
--- /dev/null
- pub(crate) fn drop_fn_of_obj(fx: &mut FunctionCx<'_, '_, impl Module>, vtable: Value) -> Value {
+//! Codegen vtables and vtable accesses.
+//!
+//! See librustc_codegen_llvm/meth.rs for reference
+// FIXME dedup this logic between miri, cg_llvm and cg_clif
+
+use crate::prelude::*;
+
+const DROP_FN_INDEX: usize = 0;
+const SIZE_INDEX: usize = 1;
+const ALIGN_INDEX: usize = 2;
+
+fn vtable_memflags() -> MemFlags {
+ let mut flags = MemFlags::trusted(); // A vtable access is always aligned and will never trap.
+ flags.set_readonly(); // A vtable is always read-only.
+ flags
+}
+
- pub(crate) fn size_of_obj(fx: &mut FunctionCx<'_, '_, impl Module>, vtable: Value) -> Value {
++pub(crate) fn drop_fn_of_obj(fx: &mut FunctionCx<'_, '_, '_>, vtable: Value) -> Value {
+ let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes() as usize;
+ fx.bcx.ins().load(
+ pointer_ty(fx.tcx),
+ vtable_memflags(),
+ vtable,
+ (DROP_FN_INDEX * usize_size) as i32,
+ )
+}
+
- pub(crate) fn min_align_of_obj(fx: &mut FunctionCx<'_, '_, impl Module>, vtable: Value) -> Value {
++pub(crate) fn size_of_obj(fx: &mut FunctionCx<'_, '_, '_>, vtable: Value) -> Value {
+ let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes() as usize;
+ fx.bcx.ins().load(
+ pointer_ty(fx.tcx),
+ vtable_memflags(),
+ vtable,
+ (SIZE_INDEX * usize_size) as i32,
+ )
+}
+
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
++pub(crate) fn min_align_of_obj(fx: &mut FunctionCx<'_, '_, '_>, vtable: Value) -> Value {
+ let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes() as usize;
+ fx.bcx.ins().load(
+ pointer_ty(fx.tcx),
+ vtable_memflags(),
+ vtable,
+ (ALIGN_INDEX * usize_size) as i32,
+ )
+}
+
+pub(crate) fn get_ptr_and_method_ref<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ arg: CValue<'tcx>,
+ idx: usize,
+) -> (Value, Value) {
+ let (ptr, vtable) = if let Abi::ScalarPair(_, _) = arg.layout().abi {
+ arg.load_scalar_pair(fx)
+ } else {
+ let (ptr, vtable) = arg.try_to_ptr().unwrap();
+ (ptr.get_addr(fx), vtable.unwrap())
+ };
+
+ let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes();
+ let func_ref = fx.bcx.ins().load(
+ pointer_ty(fx.tcx),
+ vtable_memflags(),
+ vtable,
+ ((idx + 3) * usize_size as usize) as i32,
+ );
+ (ptr, func_ref)
+}
+
+pub(crate) fn get_vtable<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ layout: TyAndLayout<'tcx>,
+ trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
+) -> Value {
+ let data_id = if let Some(data_id) = fx.cx.vtables.get(&(layout.ty, trait_ref)) {
+ *data_id
+ } else {
+ let data_id = build_vtable(fx, layout, trait_ref);
+ fx.cx.vtables.insert((layout.ty, trait_ref), data_id);
+ data_id
+ };
+
+ let local_data_id = fx.cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
+ fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
+}
+
+fn build_vtable<'tcx>(
- &mut fx.cx.module,
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ layout: TyAndLayout<'tcx>,
+ trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
+) -> DataId {
+ let tcx = fx.tcx;
+ let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes() as usize;
+
+ let drop_in_place_fn = import_function(
+ tcx,
- &mut fx.cx.module,
++ fx.cx.module,
+ Instance::resolve_drop_in_place(tcx, layout.ty).polymorphize(fx.tcx),
+ );
+
+ let mut components: Vec<_> = vec![Some(drop_in_place_fn), None, None];
+
+ let methods_root;
+ let methods = if let Some(trait_ref) = trait_ref {
+ methods_root = tcx.vtable_methods(trait_ref.with_self_ty(tcx, layout.ty));
+ methods_root.iter()
+ } else {
+ (&[]).iter()
+ };
+ let methods = methods.cloned().map(|opt_mth| {
+ opt_mth.map(|(def_id, substs)| {
+ import_function(
+ tcx,
- let pointer_size = tcx
- .layout_of(ParamEnv::reveal_all().and(tcx.types.usize))
- .unwrap()
- .size
- .bytes() as usize;
++ fx.cx.module,
+ Instance::resolve_for_vtable(tcx, ParamEnv::reveal_all(), def_id, substs)
+ .unwrap()
+ .polymorphize(fx.tcx),
+ )
+ })
+ });
+ components.extend(methods);
+
+ let mut data_ctx = DataContext::new();
+ let mut data = ::std::iter::repeat(0u8)
+ .take(components.len() * usize_size)
+ .collect::<Vec<u8>>()
+ .into_boxed_slice();
+
+ write_usize(fx.tcx, &mut data, SIZE_INDEX, layout.size.bytes());
+ write_usize(fx.tcx, &mut data, ALIGN_INDEX, layout.align.abi.bytes());
+ data_ctx.define(data);
+
+ for (i, component) in components.into_iter().enumerate() {
+ if let Some(func_id) = component {
+ let func_ref = fx.cx.module.declare_func_in_data(func_id, &mut data_ctx);
+ data_ctx.write_function_addr((i * usize_size) as u32, func_ref);
+ }
+ }
+
+ data_ctx.set_align(fx.tcx.data_layout.pointer_align.pref.bytes());
+
+ let data_id = fx
+ .cx
+ .module
+ .declare_data(
+ &format!(
+ "__vtable.{}.for.{:?}.{}",
+ trait_ref
+ .as_ref()
+ .map(|trait_ref| format!("{:?}", trait_ref.skip_binder()).into())
+ .unwrap_or(std::borrow::Cow::Borrowed("???")),
+ layout.ty,
+ fx.cx.vtables.len(),
+ ),
+ Linkage::Local,
+ false,
+ false,
+ )
+ .unwrap();
+
+ // FIXME don't duplicate definitions in lazy jit mode
+ let _ = fx.cx.module.define_data(data_id, &data_ctx);
+
+ data_id
+}
+
+fn write_usize(tcx: TyCtxt<'_>, buf: &mut [u8], idx: usize, num: u64) {
++ let pointer_size =
++ tcx.layout_of(ParamEnv::reveal_all().and(tcx.types.usize)).unwrap().size.bytes() as usize;
+ let target = &mut buf[idx * pointer_size..(idx + 1) * pointer_size];
+
+ match tcx.data_layout.endian {
+ rustc_target::abi::Endian::Little => match pointer_size {
+ 4 => target.copy_from_slice(&(num as u32).to_le_bytes()),
+ 8 => target.copy_from_slice(&(num as u64).to_le_bytes()),
+ _ => todo!("pointer size {} is not yet supported", pointer_size),
+ },
+ rustc_target::abi::Endian::Big => match pointer_size {
+ 4 => target.copy_from_slice(&(num as u32).to_be_bytes()),
+ 8 => target.copy_from_slice(&(num as u64).to_be_bytes()),
+ _ => todo!("pointer size {} is not yet supported", pointer_size),
+ },
+ }
+}
--- /dev/null
- #!/bin/bash
++#!/usr/bin/env bash
+set -e
+
+./build.sh --sysroot none "$@"
+
+rm -r target/out || true
+
+scripts/tests.sh no_sysroot
+
+./build.sh "$@"
+
+scripts/tests.sh base_sysroot
+scripts/tests.sh extended_sysroot