--- /dev/null
+{
+ // source for rustc_* is not included in the rust-src component; disable the errors about this
+ "rust-analyzer.diagnostics.disabled": ["unresolved-extern-crate"],
++ "rust-analyzer.assist.importMergeBehaviour": "last",
+ "rust-analyzer.cargo.loadOutDirsFromCheck": true,
+ "rust-analyzer.linkedProjects": [
+ "./Cargo.toml",
+ //"./build_sysroot/sysroot_src/src/libstd/Cargo.toml",
+ {
+ "roots": [
+ "./example/mini_core.rs",
+ "./example/mini_core_hello_world.rs",
+ "./example/mod_bench.rs"
+ ],
+ "crates": [
+ {
+ "root_module": "./example/mini_core.rs",
+ "edition": "2018",
+ "deps": [],
+ "cfg": [],
+ },
+ {
+ "root_module": "./example/mini_core_hello_world.rs",
+ "edition": "2018",
+ "deps": [{ "crate": 0, "name": "mini_core" }],
+ "cfg": [],
+ },
+ {
+ "root_module": "./example/mod_bench.rs",
+ "edition": "2018",
+ "deps": [],
+ "cfg": [],
+ },
+ ]
+ },
+ {
+ "roots": ["./scripts/filter_profile.rs"],
+ "crates": [
+ {
+ "root_module": "./scripts/filter_profile.rs",
+ "edition": "2018",
+ "deps": [{ "crate": 1, "name": "std" }],
+ "cfg": [],
+ },
+ {
+ "root_module": "./build_sysroot/sysroot_src/library/std/src/lib.rs",
+ "edition": "2018",
+ "deps": [],
+ "cfg": [],
+ },
+ ]
+ }
+ ]
+}
--- /dev/null
- source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#19640367dbf0da7093e61add3306c8d092644fb3"
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+[[package]]
+name = "anyhow"
+version = "1.0.34"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bf8dcb5b4bbaa28653b647d8c77bd4ed40183b48882e130c1f1ffb73de069fd7"
+
+[[package]]
+name = "ar"
+version = "0.8.0"
+source = "git+https://github.com/bjorn3/rust-ar.git?branch=do_not_remove_cg_clif_ranlib#de9ab0e56bf3a208381d342aa5b60f9ff2891648"
+
+[[package]]
+name = "autocfg"
+version = "1.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
+
+[[package]]
+name = "bitflags"
+version = "1.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
+
+[[package]]
+name = "byteorder"
+version = "1.3.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de"
+
+[[package]]
+name = "cc"
+version = "1.0.62"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f1770ced377336a88a67c473594ccc14eca6f4559217c34f64aac8f83d641b40"
+
+[[package]]
+name = "cfg-if"
+version = "0.1.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
+
+[[package]]
+name = "cfg-if"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
+
+[[package]]
+name = "cranelift-bforest"
+version = "0.68.0"
- source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#19640367dbf0da7093e61add3306c8d092644fb3"
++source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#8f7f8ee0b4c5007ace6de29b45505c360450b1bb"
+dependencies = [
+ "cranelift-entity",
+]
+
+[[package]]
+name = "cranelift-codegen"
+version = "0.68.0"
- source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#19640367dbf0da7093e61add3306c8d092644fb3"
++source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#8f7f8ee0b4c5007ace6de29b45505c360450b1bb"
+dependencies = [
+ "byteorder",
+ "cranelift-bforest",
+ "cranelift-codegen-meta",
+ "cranelift-codegen-shared",
+ "cranelift-entity",
+ "gimli",
+ "log",
+ "regalloc",
+ "smallvec",
+ "target-lexicon",
+ "thiserror",
+]
+
+[[package]]
+name = "cranelift-codegen-meta"
+version = "0.68.0"
- source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#19640367dbf0da7093e61add3306c8d092644fb3"
++source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#8f7f8ee0b4c5007ace6de29b45505c360450b1bb"
+dependencies = [
+ "cranelift-codegen-shared",
+ "cranelift-entity",
+]
+
+[[package]]
+name = "cranelift-codegen-shared"
+version = "0.68.0"
- source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#19640367dbf0da7093e61add3306c8d092644fb3"
++source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#8f7f8ee0b4c5007ace6de29b45505c360450b1bb"
+
+[[package]]
+name = "cranelift-entity"
+version = "0.68.0"
- source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#19640367dbf0da7093e61add3306c8d092644fb3"
++source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#8f7f8ee0b4c5007ace6de29b45505c360450b1bb"
+
+[[package]]
+name = "cranelift-frontend"
+version = "0.68.0"
- source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#19640367dbf0da7093e61add3306c8d092644fb3"
++source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#8f7f8ee0b4c5007ace6de29b45505c360450b1bb"
+dependencies = [
+ "cranelift-codegen",
+ "log",
+ "smallvec",
+ "target-lexicon",
+]
+
++[[package]]
++name = "cranelift-jit"
++version = "0.68.0"
++source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#8f7f8ee0b4c5007ace6de29b45505c360450b1bb"
++dependencies = [
++ "anyhow",
++ "cranelift-codegen",
++ "cranelift-entity",
++ "cranelift-module",
++ "cranelift-native",
++ "errno",
++ "libc",
++ "log",
++ "region",
++ "target-lexicon",
++ "winapi",
++]
++
+[[package]]
+name = "cranelift-module"
+version = "0.68.0"
- source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#19640367dbf0da7093e61add3306c8d092644fb3"
++source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#8f7f8ee0b4c5007ace6de29b45505c360450b1bb"
+dependencies = [
+ "anyhow",
+ "cranelift-codegen",
+ "cranelift-entity",
+ "log",
+ "thiserror",
+]
+
+[[package]]
+name = "cranelift-native"
+version = "0.68.0"
- source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#19640367dbf0da7093e61add3306c8d092644fb3"
++source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#8f7f8ee0b4c5007ace6de29b45505c360450b1bb"
+dependencies = [
+ "cranelift-codegen",
+ "raw-cpuid",
+ "target-lexicon",
+]
+
+[[package]]
+name = "cranelift-object"
+version = "0.68.0"
- [[package]]
- name = "cranelift-simplejit"
- version = "0.68.0"
- source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#19640367dbf0da7093e61add3306c8d092644fb3"
- dependencies = [
- "cranelift-codegen",
- "cranelift-entity",
- "cranelift-module",
- "cranelift-native",
- "errno",
- "libc",
- "log",
- "region",
- "target-lexicon",
- "winapi",
- ]
-
++source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#8f7f8ee0b4c5007ace6de29b45505c360450b1bb"
+dependencies = [
+ "anyhow",
+ "cranelift-codegen",
+ "cranelift-module",
+ "log",
+ "object",
+ "target-lexicon",
+]
+
- "cranelift-simplejit",
+[[package]]
+name = "crc32fast"
+version = "1.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a"
+dependencies = [
+ "cfg-if 1.0.0",
+]
+
+[[package]]
+name = "errno"
+version = "0.2.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fa68f2fb9cae9d37c9b2b3584aba698a2e97f72d7aef7b9f7aa71d8b54ce46fe"
+dependencies = [
+ "errno-dragonfly",
+ "libc",
+ "winapi",
+]
+
+[[package]]
+name = "errno-dragonfly"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "14ca354e36190500e1e1fb267c647932382b54053c50b14970856c0b00a35067"
+dependencies = [
+ "gcc",
+ "libc",
+]
+
+[[package]]
+name = "gcc"
+version = "0.3.55"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8f5f3913fa0bfe7ee1fd8248b6b9f42a5af4b9d65ec2dd2c3c26132b950ecfc2"
+
+[[package]]
+name = "gimli"
+version = "0.23.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f6503fe142514ca4799d4c26297c4248239fe8838d827db6bd6065c6ed29a6ce"
+dependencies = [
+ "indexmap",
+]
+
+[[package]]
+name = "hashbrown"
+version = "0.9.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04"
+
+[[package]]
+name = "indexmap"
+version = "1.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "55e2e4c765aa53a0424761bf9f41aa7a6ac1efa87238f59560640e27fca028f2"
+dependencies = [
+ "autocfg",
+ "hashbrown",
+]
+
+[[package]]
+name = "libc"
+version = "0.2.80"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4d58d1b70b004888f764dfbf6a26a3b0342a1632d33968e4a179d8011c760614"
+
+[[package]]
+name = "libloading"
+version = "0.6.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1090080fe06ec2648d0da3881d9453d97e71a45f00eb179af7fdd7e3f686fdb0"
+dependencies = [
+ "cfg-if 1.0.0",
+ "winapi",
+]
+
+[[package]]
+name = "log"
+version = "0.4.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b"
+dependencies = [
+ "cfg-if 0.1.10",
+]
+
+[[package]]
+name = "mach"
+version = "0.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b823e83b2affd8f40a9ee8c29dbc56404c1e34cd2710921f2801e2cf29527afa"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "object"
+version = "0.22.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8d3b63360ec3cb337817c2dbd47ab4a0f170d285d8e5a2064600f3def1402397"
+dependencies = [
+ "crc32fast",
+ "indexmap",
+]
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.24"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1e0704ee1a7e00d7bb417d0770ea303c1bccbabf0ef1667dae92b5967f5f8a71"
+dependencies = [
+ "unicode-xid",
+]
+
+[[package]]
+name = "quote"
+version = "1.0.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37"
+dependencies = [
+ "proc-macro2",
+]
+
+[[package]]
+name = "raw-cpuid"
+version = "8.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1fdf7d9dbd43f3d81d94a49c1c3df73cc2b3827995147e6cf7f89d4ec5483e73"
+dependencies = [
+ "bitflags",
+ "cc",
+ "rustc_version",
+]
+
+[[package]]
+name = "regalloc"
+version = "0.0.31"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "571f7f397d61c4755285cd37853fe8e03271c243424a907415909379659381c5"
+dependencies = [
+ "log",
+ "rustc-hash",
+ "smallvec",
+]
+
+[[package]]
+name = "region"
+version = "2.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "877e54ea2adcd70d80e9179344c97f93ef0dffd6b03e1f4529e6e83ab2fa9ae0"
+dependencies = [
+ "bitflags",
+ "libc",
+ "mach",
+ "winapi",
+]
+
+[[package]]
+name = "rustc-hash"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2"
+
+[[package]]
+name = "rustc_codegen_cranelift"
+version = "0.1.0"
+dependencies = [
+ "ar",
+ "cranelift-codegen",
+ "cranelift-frontend",
++ "cranelift-jit",
+ "cranelift-module",
+ "cranelift-object",
+ "gimli",
+ "indexmap",
+ "libloading",
+ "object",
+ "target-lexicon",
+]
+
+[[package]]
+name = "rustc_version"
+version = "0.2.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a"
+dependencies = [
+ "semver",
+]
+
+[[package]]
+name = "semver"
+version = "0.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403"
+dependencies = [
+ "semver-parser",
+]
+
+[[package]]
+name = "semver-parser"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3"
+
+[[package]]
+name = "smallvec"
+version = "1.4.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fbee7696b84bbf3d89a1c2eccff0850e3047ed46bfcd2e92c29a2d074d57e252"
+
+[[package]]
+name = "syn"
+version = "1.0.48"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cc371affeffc477f42a221a1e4297aedcea33d47d19b61455588bd9d8f6b19ac"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-xid",
+]
+
+[[package]]
+name = "target-lexicon"
+version = "0.11.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4ee5a98e506fb7231a304c3a1bd7c132a55016cf65001e0282480665870dfcb9"
+
+[[package]]
+name = "thiserror"
+version = "1.0.22"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0e9ae34b84616eedaaf1e9dd6026dbe00dcafa92aa0c8077cb69df1fcfe5e53e"
+dependencies = [
+ "thiserror-impl",
+]
+
+[[package]]
+name = "thiserror-impl"
+version = "1.0.22"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9ba20f23e85b10754cd195504aebf6a27e2e6cbe28c17778a0c930724628dd56"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "unicode-xid"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564"
+
+[[package]]
+name = "winapi"
+version = "0.3.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
+dependencies = [
+ "winapi-i686-pc-windows-gnu",
+ "winapi-x86_64-pc-windows-gnu",
+]
+
+[[package]]
+name = "winapi-i686-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
+
+[[package]]
+name = "winapi-x86_64-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
--- /dev/null
- cranelift-simplejit = { git = "https://github.com/bytecodealliance/wasmtime/", branch = "main", optional = true }
+[package]
+name = "rustc_codegen_cranelift"
+version = "0.1.0"
+authors = ["bjorn3 <bjorn3@users.noreply.github.com>"]
+edition = "2018"
+
+[lib]
+crate-type = ["dylib"]
+
+[dependencies]
+# These have to be in sync with each other
+cranelift-codegen = { git = "https://github.com/bytecodealliance/wasmtime/", branch = "main", features = ["unwind"] }
+cranelift-frontend = { git = "https://github.com/bytecodealliance/wasmtime/", branch = "main" }
+cranelift-module = { git = "https://github.com/bytecodealliance/wasmtime/", branch = "main" }
- #cranelift-simplejit = { path = "../wasmtime/cranelift/simplejit" }
++cranelift-jit = { git = "https://github.com/bytecodealliance/wasmtime/", branch = "main", optional = true }
+cranelift-object = { git = "https://github.com/bytecodealliance/wasmtime/", branch = "main" }
+target-lexicon = "0.11.0"
+gimli = { version = "0.23.0", default-features = false, features = ["write"]}
+object = { version = "0.22.0", default-features = false, features = ["std", "read_core", "write", "coff", "elf", "macho", "pe"] }
+
+ar = { git = "https://github.com/bjorn3/rust-ar.git", branch = "do_not_remove_cg_clif_ranlib" }
+indexmap = "1.0.2"
+libloading = { version = "0.6.0", optional = true }
+
+# Uncomment to use local checkout of cranelift
+#[patch."https://github.com/bytecodealliance/wasmtime/"]
+#cranelift-codegen = { path = "../wasmtime/cranelift/codegen" }
+#cranelift-frontend = { path = "../wasmtime/cranelift/frontend" }
+#cranelift-module = { path = "../wasmtime/cranelift/module" }
- jit = ["cranelift-simplejit", "libloading"]
++#cranelift-jit = { path = "../wasmtime/cranelift/jit" }
+#cranelift-object = { path = "../wasmtime/cranelift/object" }
+
+#[patch.crates-io]
+#gimli = { path = "../" }
+
+[features]
+default = ["jit", "inline_asm"]
++jit = ["cranelift-jit", "libloading"]
+inline_asm = []
+
+[profile.dev]
+# By compiling dependencies with optimizations, performing tests gets much faster.
+opt-level = 3
+
+[profile.dev.package.rustc_codegen_cranelift]
+# Disabling optimizations for cg_clif itself makes compilation after a change faster.
+opt-level = 0
+
+[profile.release.package.rustc_codegen_cranelift]
+incremental = true
+
+# Disable optimizations and debuginfo of build scripts and some of the heavy build deps, as the
+# execution time of build scripts is so fast that optimizing them slows down the total build time.
+[profile.dev.build-override]
+opt-level = 0
+debug = false
+
+[profile.release.build-override]
+opt-level = 0
+debug = false
+
+[profile.dev.package.cranelift-codegen-meta]
+opt-level = 0
+debug = false
+
+[profile.release.package.cranelift-codegen-meta]
+opt-level = 0
+debug = false
+
+[profile.dev.package.syn]
+opt-level = 0
+debug = false
+
+[profile.release.package.syn]
+opt-level = 0
+debug = false
--- /dev/null
- The goal of this project is to create an alternative codegen backend for the rust compiler based on [Cranelift](https://github.com/bytecodealliance/wasmtime/blob/master/cranelift).
+# WIP Cranelift codegen backend for rust
+
+> âš âš âš Certain kinds of FFI don't work yet. âš âš âš
+
- $ $cg_clif_dir/build/bin/cg_clif --jit my_crate.rs
++The goal of this project is to create an alternative codegen backend for the rust compiler based on [Cranelift](https://github.com/bytecodealliance/wasmtime/blob/main/cranelift).
+This has the potential to improve compilation times in debug mode.
+If your project doesn't use any of the things listed under "Not yet supported", it should work fine.
+If not please open an issue.
+
+## Building and testing
+
+```bash
+$ git clone https://github.com/bjorn3/rustc_codegen_cranelift.git
+$ cd rustc_codegen_cranelift
+$ ./prepare.sh # download and patch sysroot src and install hyperfine for benchmarking
+$ ./build.sh
+```
+
+To run the test suite replace the last command with:
+
+```bash
+$ ./test.sh
+```
+
+This will implicitly build cg_clif too. Both `build.sh` and `test.sh` accept a `--debug` argument to
+build in debug mode.
+
+Alternatively you can download a pre built version from [GHA]. It is listed in the artifacts section
+of workflow runs. Unfortunately due to GHA restrictions you need to be logged in to access it.
+
+[GHA]: https://github.com/bjorn3/rustc_codegen_cranelift/actions?query=branch%3Amaster+event%3Apush+is%3Asuccess
+
+## Usage
+
+rustc_codegen_cranelift can be used as a near-drop-in replacement for `cargo build` or `cargo run` for existing projects.
+
+Assuming `$cg_clif_dir` is the directory you cloned this repo into and you followed the instructions (`prepare.sh` and `build.sh` or `test.sh`).
+
+### Cargo
+
+In the directory with your project (where you can do the usual `cargo build`), run:
+
+```bash
+$ $cg_clif_dir/build/cargo.sh run
+```
+
+This should build and run your project with rustc_codegen_cranelift instead of the usual LLVM backend.
+
+### Rustc
+
+> You should prefer using the Cargo method.
+
+```bash
+$ $cg_clif_dir/build/bin/cg_clif my_crate.rs
+```
+
+### Jit mode
+
+In jit mode cg_clif will immediately execute your code without creating an executable file.
+
+> This requires all dependencies to be available as dynamic library.
+> The jit mode will probably need cargo integration to make this possible.
+
+```bash
+$ $cg_clif_dir/build/cargo.sh jit
+```
+
+or
+
+```bash
- echo "$@" | $cg_clif_dir/build/bin/cg_clif - --jit
++$ $cg_clif_dir/build/bin/cg_clif -Cllvm-args=mode=jit -Cprefer-dynamic my_crate.rs
++```
++
++There is also an experimental lazy jit mode. In this mode functions are only compiled once they are
++first called. It currently does not work with multi-threaded programs. When a not yet compiled
++function is called from another thread than the main thread, you will get an ICE.
++
++```bash
++$ $cg_clif_dir/build/cargo.sh lazy-jit
+```
+
+### Shell
+
+These are a few functions that allow you to easily run rust code from the shell using cg_clif as jit.
+
+```bash
+function jit_naked() {
++ echo "$@" | $cg_clif_dir/build/bin/cg_clif - -Cllvm-args=mode=jit -Cprefer-dynamic
+}
+
+function jit() {
+ jit_naked "fn main() { $@ }"
+}
+
+function jit_calc() {
+ jit 'println!("0x{:x}", ' $@ ');';
+}
+```
+
+## Env vars
+
+[see env_vars.md](docs/env_vars.md)
+
+## Not yet supported
+
+* Good non-rust abi support ([several problems](https://github.com/bjorn3/rustc_codegen_cranelift/issues/10))
+* Inline assembly ([no cranelift support](https://github.com/bytecodealliance/wasmtime/issues/1041)
+ * On Linux there is support for invoking an external assembler for `global_asm!` and `asm!`.
+ `llvm_asm!` will remain unimplemented forever. `asm!` doesn't yet support reg classes. You
+ have to specify specific registers instead.
+* SIMD ([tracked here](https://github.com/bjorn3/rustc_codegen_cranelift/issues/171), some basic things work)
--- /dev/null
- version = "1.0.65"
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+[[package]]
+name = "addr2line"
+version = "0.14.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7c0929d69e78dd9bf5408269919fcbcaeb2e35e5d43e5815517cdc6a8e11a423"
+dependencies = [
+ "compiler_builtins",
+ "gimli",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "adler"
+version = "0.2.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ee2a4ec343196209d6594e19543ae87a39f96d5534d7174822a3ad825dd6ed7e"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "alloc"
+version = "0.0.0"
+dependencies = [
+ "compiler_builtins",
+ "core",
+]
+
+[[package]]
+name = "alloc_system"
+version = "0.0.0"
+dependencies = [
+ "compiler_builtins",
+ "core",
+ "libc",
+]
+
+[[package]]
+name = "autocfg"
+version = "1.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
+
+[[package]]
+name = "cc"
- checksum = "95752358c8f7552394baf48cd82695b345628ad3f170d607de3ca03b8dacca15"
++version = "1.0.66"
+source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.2.80"
++checksum = "4c0496836a84f8d0495758516b8621a622beb77c0fed418570e50764093ced48"
+
+[[package]]
+name = "cfg-if"
+version = "0.1.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "compiler_builtins"
+version = "0.1.36"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7cd0782e0a7da7598164153173e5a5d4d9b1da094473c98dce0ff91406112369"
+dependencies = [
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "core"
+version = "0.0.0"
+
+[[package]]
+name = "dlmalloc"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "332570860c2edf2d57914987bf9e24835425f75825086b6ba7d1e6a3e4f1f254"
+dependencies = [
+ "compiler_builtins",
+ "libc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "fortanix-sgx-abi"
+version = "0.3.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c56c422ef86062869b2d57ae87270608dc5929969dd130a6e248979cf4fb6ca6"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "getopts"
+version = "0.2.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "14dbbfd5c71d70241ecf9e6f13737f7b5ce823821063188d7e46c41d371eebd5"
+dependencies = [
+ "rustc-std-workspace-core",
+ "rustc-std-workspace-std",
+ "unicode-width",
+]
+
+[[package]]
+name = "gimli"
+version = "0.23.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f6503fe142514ca4799d4c26297c4248239fe8838d827db6bd6065c6ed29a6ce"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "hashbrown"
+version = "0.9.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "hermit-abi"
+version = "0.1.17"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5aca5565f760fb5b220e499d72710ed156fdb74e631659e99377d9ebfbd13ae8"
+dependencies = [
+ "compiler_builtins",
+ "libc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "libc"
- checksum = "4d58d1b70b004888f764dfbf6a26a3b0342a1632d33968e4a179d8011c760614"
++version = "0.2.81"
+source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "1482821306169ec4d07f6aca392a4681f66c75c9918aa49641a2595db64053cb"
+dependencies = [
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "miniz_oxide"
+version = "0.4.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0f2d26ec3309788e423cfbf68ad1800f061638098d76a83681af979dc4eda19d"
+dependencies = [
+ "adler",
+ "autocfg",
+ "compiler_builtins",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "object"
+version = "0.22.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8d3b63360ec3cb337817c2dbd47ab4a0f170d285d8e5a2064600f3def1402397"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "panic_abort"
+version = "0.0.0"
+dependencies = [
+ "cfg-if",
+ "compiler_builtins",
+ "core",
+ "libc",
+]
+
+[[package]]
+name = "panic_unwind"
+version = "0.0.0"
+dependencies = [
+ "alloc",
+ "cfg-if",
+ "compiler_builtins",
+ "core",
+ "libc",
+ "unwind",
+]
+
+[[package]]
+name = "proc_macro"
+version = "0.0.0"
+dependencies = [
+ "std",
+]
+
+[[package]]
+name = "rustc-demangle"
+version = "0.1.18"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6e3bad0ee36814ca07d7968269dd4b7ec89ec2da10c4bb613928d3077083c232"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "rustc-std-workspace-alloc"
+version = "1.99.0"
+dependencies = [
+ "alloc",
+]
+
+[[package]]
+name = "rustc-std-workspace-core"
+version = "1.99.0"
+dependencies = [
+ "core",
+]
+
+[[package]]
+name = "rustc-std-workspace-std"
+version = "1.99.0"
+dependencies = [
+ "std",
+]
+
+[[package]]
+name = "std"
+version = "0.0.0"
+dependencies = [
+ "addr2line",
+ "alloc",
+ "cfg-if",
+ "compiler_builtins",
+ "core",
+ "dlmalloc",
+ "fortanix-sgx-abi",
+ "hashbrown",
+ "hermit-abi",
+ "libc",
+ "miniz_oxide",
+ "object",
+ "panic_abort",
+ "panic_unwind",
+ "rustc-demangle",
+ "unwind",
+ "wasi",
+]
+
+[[package]]
+name = "sysroot"
+version = "0.0.0"
+dependencies = [
+ "alloc",
+ "alloc_system",
+ "compiler_builtins",
+ "core",
+ "std",
+ "test",
+]
+
+[[package]]
+name = "term"
+version = "0.0.0"
+dependencies = [
+ "core",
+ "std",
+]
+
+[[package]]
+name = "test"
+version = "0.0.0"
+dependencies = [
+ "cfg-if",
+ "core",
+ "getopts",
+ "libc",
+ "panic_abort",
+ "panic_unwind",
+ "proc_macro",
+ "std",
+ "term",
+]
+
+[[package]]
+name = "unicode-width"
+version = "0.1.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-core",
+ "rustc-std-workspace-std",
+]
+
+[[package]]
+name = "unwind"
+version = "0.0.0"
+dependencies = [
+ "cc",
+ "cfg-if",
+ "compiler_builtins",
+ "core",
+ "libc",
+]
+
+[[package]]
+name = "wasi"
+version = "0.9.0+wasi-snapshot-preview1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
--- /dev/null
- compiler_builtins = "0.1"
+[package]
+authors = ["bjorn3 <bjorn3@users.noreply.github.com>"]
+name = "sysroot"
+version = "0.0.0"
+
+[dependencies]
+core = { path = "./sysroot_src/library/core" }
+alloc = { path = "./sysroot_src/library/alloc" }
+std = { path = "./sysroot_src/library/std", features = ["panic_unwind", "backtrace"] }
+test = { path = "./sysroot_src/library/test" }
+
+alloc_system = { path = "./alloc_system" }
+
++compiler_builtins = { version = "=0.1.36", default-features = false }
++
+[patch.crates-io]
+rustc-std-workspace-core = { path = "./sysroot_src/library/rustc-std-workspace-core" }
+rustc-std-workspace-alloc = { path = "./sysroot_src/library/rustc-std-workspace-alloc" }
+rustc-std-workspace-std = { path = "./sysroot_src/library/rustc-std-workspace-std" }
+
+[profile.dev]
+lto = "off"
+
+[profile.release]
+debug = true
+incremental = true
+lto = "off"
--- /dev/null
+#![feature(core_intrinsics, generators, generator_trait, is_sorted)]
+
+#[cfg(target_arch = "x86_64")]
+use std::arch::x86_64::*;
+use std::io::Write;
+use std::ops::Generator;
+
+fn main() {
+ println!("{:?}", std::env::args().collect::<Vec<_>>());
+
+ let mutex = std::sync::Mutex::new(());
+ let _guard = mutex.lock().unwrap();
+
+ let _ = ::std::iter::repeat('a' as u8).take(10).collect::<Vec<_>>();
+ let stderr = ::std::io::stderr();
+ let mut stderr = stderr.lock();
+
++ // FIXME support lazy jit when multi threading
++ #[cfg(not(lazy_jit))]
+ std::thread::spawn(move || {
+ println!("Hello from another thread!");
+ });
+
+ writeln!(stderr, "some {} text", "<unknown>").unwrap();
+
+ let _ = std::process::Command::new("true").env("c", "d").spawn();
+
+ println!("cargo:rustc-link-lib=z");
+
+ static ONCE: std::sync::Once = std::sync::Once::new();
+ ONCE.call_once(|| {});
+
+ let _eq = LoopState::Continue(()) == LoopState::Break(());
+
+ // Make sure ByValPair values with differently sized components are correctly passed
+ map(None::<(u8, Box<Instruction>)>);
+
+ println!("{}", 2.3f32.exp());
+ println!("{}", 2.3f32.exp2());
+ println!("{}", 2.3f32.abs());
+ println!("{}", 2.3f32.sqrt());
+ println!("{}", 2.3f32.floor());
+ println!("{}", 2.3f32.ceil());
+ println!("{}", 2.3f32.min(1.0));
+ println!("{}", 2.3f32.max(1.0));
+ println!("{}", 2.3f32.powi(2));
+ println!("{}", 2.3f32.log2());
+ assert_eq!(2.3f32.copysign(-1.0), -2.3f32);
+ println!("{}", 2.3f32.powf(2.0));
+
+ assert_eq!(-128i8, (-128i8).saturating_sub(1));
+ assert_eq!(127i8, 127i8.saturating_sub(-128));
+ assert_eq!(-128i8, (-128i8).saturating_add(-128));
+ assert_eq!(127i8, 127i8.saturating_add(1));
+
+ assert_eq!(0b0000000000000000000000000010000010000000000000000000000000000000_0000000000100000000000000000000000001000000000000100000000000000u128.leading_zeros(), 26);
+ assert_eq!(0b0000000000000000000000000010000000000000000000000000000000000000_0000000000000000000000000000000000001000000000000000000010000000u128.trailing_zeros(), 7);
+ assert_eq!(core::intrinsics::saturating_sub(0, -170141183460469231731687303715884105728i128), 170141183460469231731687303715884105727i128);
+
+ let _d = 0i128.checked_div(2i128);
+ let _d = 0u128.checked_div(2u128);
+ assert_eq!(1u128 + 2, 3);
+
+ assert_eq!(0b100010000000000000000000000000000u128 >> 10, 0b10001000000000000000000u128);
+ assert_eq!(0xFEDCBA987654321123456789ABCDEFu128 >> 64, 0xFEDCBA98765432u128);
+ assert_eq!(0xFEDCBA987654321123456789ABCDEFu128 as i128 >> 64, 0xFEDCBA98765432i128);
+
+ let tmp = 353985398u128;
+ assert_eq!(tmp * 932490u128, 330087843781020u128);
+
+ let tmp = -0x1234_5678_9ABC_DEF0i64;
+ assert_eq!(tmp as i128, -0x1234_5678_9ABC_DEF0i128);
+
+ // Check that all u/i128 <-> float casts work correctly.
+ let houndred_u128 = 100u128;
+ let houndred_i128 = 100i128;
+ let houndred_f32 = 100.0f32;
+ let houndred_f64 = 100.0f64;
+ assert_eq!(houndred_u128 as f32, 100.0);
+ assert_eq!(houndred_u128 as f64, 100.0);
+ assert_eq!(houndred_f32 as u128, 100);
+ assert_eq!(houndred_f64 as u128, 100);
+ assert_eq!(houndred_i128 as f32, 100.0);
+ assert_eq!(houndred_i128 as f64, 100.0);
+ assert_eq!(houndred_f32 as i128, 100);
+ assert_eq!(houndred_f64 as i128, 100);
+
+ // Test signed 128bit comparing
+ let max = usize::MAX as i128;
+ if 100i128 < 0i128 || 100i128 > max {
+ panic!();
+ }
+
+ test_checked_mul();
+
+ let _a = 1u32 << 2u8;
+
+ let empty: [i32; 0] = [];
+ assert!(empty.is_sorted());
+
+ println!("{:?}", std::intrinsics::caller_location());
+
+ #[cfg(target_arch = "x86_64")]
+ unsafe {
+ test_simd();
+ }
+
+ Box::pin(move |mut _task_context| {
+ yield ();
+ }).as_mut().resume(0);
+
+ #[derive(Copy, Clone)]
+ enum Nums {
+ NegOne = -1,
+ }
+
+ let kind = Nums::NegOne;
+ assert_eq!(-1i128, kind as i128);
+
+ let options = [1u128];
+ match options[0] {
+ 1 => (),
+ 0 => loop {},
+ v => panic(v),
+ };
+}
+
+fn panic(_: u128) {
+ panic!();
+}
+
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "sse2")]
+unsafe fn test_simd() {
+ assert!(is_x86_feature_detected!("sse2"));
+
+ let x = _mm_setzero_si128();
+ let y = _mm_set1_epi16(7);
+ let or = _mm_or_si128(x, y);
+ let cmp_eq = _mm_cmpeq_epi8(y, y);
+ let cmp_lt = _mm_cmplt_epi8(y, y);
+
+ assert_eq!(std::mem::transmute::<_, [u16; 8]>(or), [7, 7, 7, 7, 7, 7, 7, 7]);
+ assert_eq!(std::mem::transmute::<_, [u16; 8]>(cmp_eq), [0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff]);
+ assert_eq!(std::mem::transmute::<_, [u16; 8]>(cmp_lt), [0, 0, 0, 0, 0, 0, 0, 0]);
+
+ test_mm_slli_si128();
+ test_mm_movemask_epi8();
+ test_mm256_movemask_epi8();
+ test_mm_add_epi8();
+ test_mm_add_pd();
+ test_mm_cvtepi8_epi16();
+ test_mm_cvtsi128_si64();
+
+ test_mm_extract_epi8();
+ test_mm_insert_epi16();
+
+ let mask1 = _mm_movemask_epi8(dbg!(_mm_setr_epi8(255u8 as i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)));
+ assert_eq!(mask1, 1);
+}
+
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "sse2")]
+unsafe fn test_mm_slli_si128() {
+ #[rustfmt::skip]
+ let a = _mm_setr_epi8(
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ );
+ let r = _mm_slli_si128(a, 1);
+ let e = _mm_setr_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
+ assert_eq_m128i(r, e);
+
+ #[rustfmt::skip]
+ let a = _mm_setr_epi8(
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ );
+ let r = _mm_slli_si128(a, 15);
+ let e = _mm_setr_epi8(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1);
+ assert_eq_m128i(r, e);
+
+ #[rustfmt::skip]
+ let a = _mm_setr_epi8(
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ );
+ let r = _mm_slli_si128(a, 16);
+ assert_eq_m128i(r, _mm_set1_epi8(0));
+
+ #[rustfmt::skip]
+ let a = _mm_setr_epi8(
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ );
+ let r = _mm_slli_si128(a, -1);
+ assert_eq_m128i(_mm_set1_epi8(0), r);
+
+ #[rustfmt::skip]
+ let a = _mm_setr_epi8(
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ );
+ let r = _mm_slli_si128(a, -0x80000000);
+ assert_eq_m128i(r, _mm_set1_epi8(0));
+}
+
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "sse2")]
+unsafe fn test_mm_movemask_epi8() {
+ #[rustfmt::skip]
+ let a = _mm_setr_epi8(
+ 0b1000_0000u8 as i8, 0b0, 0b1000_0000u8 as i8, 0b01,
+ 0b0101, 0b1111_0000u8 as i8, 0, 0,
+ 0, 0, 0b1111_0000u8 as i8, 0b0101,
+ 0b01, 0b1000_0000u8 as i8, 0b0, 0b1000_0000u8 as i8,
+ );
+ let r = _mm_movemask_epi8(a);
+ assert_eq!(r, 0b10100100_00100101);
+}
+
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "avx2")]
+unsafe fn test_mm256_movemask_epi8() {
+ let a = _mm256_set1_epi8(-1);
+ let r = _mm256_movemask_epi8(a);
+ let e = -1;
+ assert_eq!(r, e);
+}
+
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "sse2")]
+unsafe fn test_mm_add_epi8() {
+ let a = _mm_setr_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
+ #[rustfmt::skip]
+ let b = _mm_setr_epi8(
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ );
+ let r = _mm_add_epi8(a, b);
+ #[rustfmt::skip]
+ let e = _mm_setr_epi8(
+ 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46,
+ );
+ assert_eq_m128i(r, e);
+}
+
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "sse2")]
+unsafe fn test_mm_add_pd() {
+ let a = _mm_setr_pd(1.0, 2.0);
+ let b = _mm_setr_pd(5.0, 10.0);
+ let r = _mm_add_pd(a, b);
+ assert_eq_m128d(r, _mm_setr_pd(6.0, 12.0));
+}
+
+#[cfg(target_arch = "x86_64")]
+fn assert_eq_m128i(x: std::arch::x86_64::__m128i, y: std::arch::x86_64::__m128i) {
+ unsafe {
+ assert_eq!(std::mem::transmute::<_, [u8; 16]>(x), std::mem::transmute::<_, [u8; 16]>(y));
+ }
+}
+
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "sse2")]
+pub unsafe fn assert_eq_m128d(a: __m128d, b: __m128d) {
+ if _mm_movemask_pd(_mm_cmpeq_pd(a, b)) != 0b11 {
+ panic!("{:?} != {:?}", a, b);
+ }
+}
+
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "sse2")]
+unsafe fn test_mm_cvtsi128_si64() {
+ let r = _mm_cvtsi128_si64(std::mem::transmute::<[i64; 2], _>([5, 0]));
+ assert_eq!(r, 5);
+}
+
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "sse4.1")]
+unsafe fn test_mm_cvtepi8_epi16() {
+ let a = _mm_set1_epi8(10);
+ let r = _mm_cvtepi8_epi16(a);
+ let e = _mm_set1_epi16(10);
+ assert_eq_m128i(r, e);
+ let a = _mm_set1_epi8(-10);
+ let r = _mm_cvtepi8_epi16(a);
+ let e = _mm_set1_epi16(-10);
+ assert_eq_m128i(r, e);
+}
+
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "sse4.1")]
+unsafe fn test_mm_extract_epi8() {
+ #[rustfmt::skip]
+ let a = _mm_setr_epi8(
+ -1, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15
+ );
+ let r1 = _mm_extract_epi8(a, 0);
+ let r2 = _mm_extract_epi8(a, 19);
+ assert_eq!(r1, 0xFF);
+ assert_eq!(r2, 3);
+}
+
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "sse2")]
+unsafe fn test_mm_insert_epi16() {
+ let a = _mm_setr_epi16(0, 1, 2, 3, 4, 5, 6, 7);
+ let r = _mm_insert_epi16(a, 9, 0);
+ let e = _mm_setr_epi16(9, 1, 2, 3, 4, 5, 6, 7);
+ assert_eq_m128i(r, e);
+}
+
+fn test_checked_mul() {
+ let u: Option<u8> = u8::from_str_radix("1000", 10).ok();
+ assert_eq!(u, None);
+
+ assert_eq!(1u8.checked_mul(255u8), Some(255u8));
+ assert_eq!(255u8.checked_mul(255u8), None);
+ assert_eq!(1i8.checked_mul(127i8), Some(127i8));
+ assert_eq!(127i8.checked_mul(127i8), None);
+ assert_eq!((-1i8).checked_mul(-127i8), Some(127i8));
+ assert_eq!(1i8.checked_mul(-128i8), Some(-128i8));
+ assert_eq!((-128i8).checked_mul(-128i8), None);
+
+ assert_eq!(1u64.checked_mul(u64::MAX), Some(u64::MAX));
+ assert_eq!(u64::MAX.checked_mul(u64::MAX), None);
+ assert_eq!(1i64.checked_mul(i64::MAX), Some(i64::MAX));
+ assert_eq!(i64::MAX.checked_mul(i64::MAX), None);
+ assert_eq!((-1i64).checked_mul(i64::MIN + 1), Some(i64::MAX));
+ assert_eq!(1i64.checked_mul(i64::MIN), Some(i64::MIN));
+ assert_eq!(i64::MIN.checked_mul(i64::MIN), None);
+}
+
+#[derive(PartialEq)]
+enum LoopState {
+ Continue(()),
+ Break(())
+}
+
+pub enum Instruction {
+ Increment,
+ Loop,
+}
+
+fn map(a: Option<(u8, Box<Instruction>)>) -> Option<Box<Instruction>> {
+ match a {
+ None => None,
+ Some((_, instr)) => Some(instr),
+ }
+}
--- /dev/null
- nightly-2020-11-27
++nightly-2020-12-23
--- /dev/null
- cargo "+${TOOLCHAIN}" rustc "$@" -- --jit
+#!/bin/bash
+
+dir=$(dirname "$0")
+source "$dir/config.sh"
+
+# read nightly compiler from rust-toolchain file
+TOOLCHAIN=$(cat "$dir/rust-toolchain")
+
+cmd=$1
+shift || true
+
+if [[ "$cmd" = "jit" ]]; then
++cargo "+${TOOLCHAIN}" rustc "$@" -- -Cllvm-args=mode=jit -Cprefer-dynamic
++elif [[ "$cmd" = "lazy-jit" ]]; then
++cargo "+${TOOLCHAIN}" rustc "$@" -- -Cllvm-args=mode=jit-lazy -Cprefer-dynamic
+else
+cargo "+${TOOLCHAIN}" "$cmd" "$@"
+fi
--- /dev/null
- PROFILE=$1 OUTPUT=$2 exec $RUSTC $RUSTFLAGS --jit $0
+#!/bin/bash
+#![forbid(unsafe_code)]/* This line is ignored by bash
+# This block is ignored by rustc
+pushd $(dirname "$0")/../
+source build/config.sh
+popd
++PROFILE=$1 OUTPUT=$2 exec $RUSTC $RUSTFLAGS -Cllvm-args=mode=jit -Cprefer-dynamic $0
+#*/
+
+//! This program filters away uninteresting samples and trims uninteresting frames for stackcollapse
+//! profiles.
+//!
+//! Usage: ./filter_profile.rs <profile in stackcollapse format> <output file>
+//!
+//! This file is specially crafted to be both a valid bash script and valid rust source file. If
+//! executed as bash script this will run the rust source using cg_clif in JIT mode.
+
+use std::io::Write;
+
+fn main() -> Result<(), Box<dyn std::error::Error>> {
+ let profile_name = std::env::var("PROFILE").unwrap();
+ let output_name = std::env::var("OUTPUT").unwrap();
+ if profile_name.is_empty() || output_name.is_empty() {
+ println!("Usage: ./filter_profile.rs <profile in stackcollapse format> <output file>");
+ std::process::exit(1);
+ }
+ let profile = std::fs::read_to_string(profile_name)
+ .map_err(|err| format!("Failed to read profile {}", err))?;
+ let mut output = std::fs::OpenOptions::new()
+ .create(true)
+ .write(true)
+ .truncate(true)
+ .open(output_name)?;
+
+ for line in profile.lines() {
+ let mut stack = &line[..line.rfind(" ").unwrap()];
+ let count = &line[line.rfind(" ").unwrap() + 1..];
+
+ // Filter away uninteresting samples
+ if !stack.contains("rustc_codegen_cranelift") {
+ continue;
+ }
+
+ if stack.contains("rustc_mir::monomorphize::partitioning::collect_and_partition_mono_items")
+ || stack.contains("rustc_incremental::assert_dep_graph::assert_dep_graph")
+ || stack.contains("rustc_symbol_mangling::test::report_symbol_names")
+ {
+ continue;
+ }
+
+ // Trim start
+ if let Some(index) = stack.find("rustc_interface::passes::configure_and_expand") {
+ stack = &stack[index..];
+ } else if let Some(index) = stack.find("rustc_interface::passes::analysis") {
+ stack = &stack[index..];
+ } else if let Some(index) = stack.find("rustc_interface::passes::start_codegen") {
+ stack = &stack[index..];
+ } else if let Some(index) = stack.find("rustc_interface::queries::Linker::link") {
+ stack = &stack[index..];
+ }
+
+ if let Some(index) = stack.find("rustc_codegen_cranelift::driver::aot::module_codegen") {
+ stack = &stack[index..];
+ }
+
+ // Trim end
+ const MALLOC: &str = "malloc";
+ if let Some(index) = stack.find(MALLOC) {
+ stack = &stack[..index + MALLOC.len()];
+ }
+
+ const FREE: &str = "free";
+ if let Some(index) = stack.find(FREE) {
+ stack = &stack[..index + FREE.len()];
+ }
+
+ const TYPECK_ITEM_BODIES: &str = "rustc_typeck::check::typeck_item_bodies";
+ if let Some(index) = stack.find(TYPECK_ITEM_BODIES) {
+ stack = &stack[..index + TYPECK_ITEM_BODIES.len()];
+ }
+
+ const COLLECT_AND_PARTITION_MONO_ITEMS: &str =
+ "rustc_mir::monomorphize::partitioning::collect_and_partition_mono_items";
+ if let Some(index) = stack.find(COLLECT_AND_PARTITION_MONO_ITEMS) {
+ stack = &stack[..index + COLLECT_AND_PARTITION_MONO_ITEMS.len()];
+ }
+
+ const ASSERT_DEP_GRAPH: &str = "rustc_incremental::assert_dep_graph::assert_dep_graph";
+ if let Some(index) = stack.find(ASSERT_DEP_GRAPH) {
+ stack = &stack[..index + ASSERT_DEP_GRAPH.len()];
+ }
+
+ const REPORT_SYMBOL_NAMES: &str = "rustc_symbol_mangling::test::report_symbol_names";
+ if let Some(index) = stack.find(REPORT_SYMBOL_NAMES) {
+ stack = &stack[..index + REPORT_SYMBOL_NAMES.len()];
+ }
+
+ const ENCODE_METADATA: &str = "rustc_middle::ty::context::TyCtxt::encode_metadata";
+ if let Some(index) = stack.find(ENCODE_METADATA) {
+ stack = &stack[..index + ENCODE_METADATA.len()];
+ }
+
+ const SUBST_AND_NORMALIZE_ERASING_REGIONS: &str = "rustc_middle::ty::normalize_erasing_regions::<impl rustc_middle::ty::context::TyCtxt>::subst_and_normalize_erasing_regions";
+ if let Some(index) = stack.find(SUBST_AND_NORMALIZE_ERASING_REGIONS) {
+ stack = &stack[..index + SUBST_AND_NORMALIZE_ERASING_REGIONS.len()];
+ }
+
+ const NORMALIZE_ERASING_LATE_BOUND_REGIONS: &str = "rustc_middle::ty::normalize_erasing_regions::<impl rustc_middle::ty::context::TyCtxt>::normalize_erasing_late_bound_regions";
+ if let Some(index) = stack.find(NORMALIZE_ERASING_LATE_BOUND_REGIONS) {
+ stack = &stack[..index + NORMALIZE_ERASING_LATE_BOUND_REGIONS.len()];
+ }
+
+ const INST_BUILD: &str = "<cranelift_frontend::frontend::FuncInstBuilder as cranelift_codegen::ir::builder::InstBuilderBase>::build";
+ if let Some(index) = stack.find(INST_BUILD) {
+ stack = &stack[..index + INST_BUILD.len()];
+ }
+
+ output.write_all(stack.as_bytes())?;
+ output.write_all(&*b" ")?;
+ output.write_all(count.as_bytes())?;
+ output.write_all(&*b"\n")?;
+ }
+
+ Ok(())
+}
--- /dev/null
- CG_CLIF_JIT_ARGS="abc bcd" $MY_RUSTC --jit example/mini_core_hello_world.rs --cfg jit --target "$HOST_TRIPLE"
+#!/bin/bash
+
+set -e
+
+source build/config.sh
+export CG_CLIF_INCR_CACHE_DISABLED=1
+MY_RUSTC="$RUSTC $RUSTFLAGS -L crate=target/out --out-dir target/out -Cdebuginfo=2"
+
+function no_sysroot_tests() {
+ echo "[BUILD] mini_core"
+ $MY_RUSTC example/mini_core.rs --crate-name mini_core --crate-type lib,dylib --target "$TARGET_TRIPLE"
+
+ echo "[BUILD] example"
+ $MY_RUSTC example/example.rs --crate-type lib --target "$TARGET_TRIPLE"
+
+ if [[ "$JIT_SUPPORTED" = "1" ]]; then
+ echo "[JIT] mini_core_hello_world"
- $MY_RUSTC --jit example/std_example.rs --target "$HOST_TRIPLE"
++ CG_CLIF_JIT_ARGS="abc bcd" $MY_RUSTC -Cllvm-args=mode=jit -Cprefer-dynamic example/mini_core_hello_world.rs --cfg jit --target "$HOST_TRIPLE"
++
++ echo "[JIT-lazy] mini_core_hello_world"
++ CG_CLIF_JIT_ARGS="abc bcd" $MY_RUSTC -Cllvm-args=mode=jit-lazy -Cprefer-dynamic example/mini_core_hello_world.rs --cfg jit --target "$HOST_TRIPLE"
+ else
+ echo "[JIT] mini_core_hello_world (skipped)"
+ fi
+
+ echo "[AOT] mini_core_hello_world"
+ $MY_RUSTC example/mini_core_hello_world.rs --crate-name mini_core_hello_world --crate-type bin -g --target "$TARGET_TRIPLE"
+ $RUN_WRAPPER ./target/out/mini_core_hello_world abc bcd
+ # (echo "break set -n main"; echo "run"; sleep 1; echo "si -c 10"; sleep 1; echo "frame variable") | lldb -- ./target/out/mini_core_hello_world abc bcd
+
+ echo "[AOT] arbitrary_self_types_pointers_and_wrappers"
+ $MY_RUSTC example/arbitrary_self_types_pointers_and_wrappers.rs --crate-name arbitrary_self_types_pointers_and_wrappers --crate-type bin --target "$TARGET_TRIPLE"
+ $RUN_WRAPPER ./target/out/arbitrary_self_types_pointers_and_wrappers
+}
+
+function base_sysroot_tests() {
+ echo "[AOT] alloc_example"
+ $MY_RUSTC example/alloc_example.rs --crate-type bin --target "$TARGET_TRIPLE"
+ $RUN_WRAPPER ./target/out/alloc_example
+
+ if [[ "$JIT_SUPPORTED" = "1" ]]; then
+ echo "[JIT] std_example"
++ $MY_RUSTC -Cllvm-args=mode=jit -Cprefer-dynamic example/std_example.rs --target "$HOST_TRIPLE"
++
++ echo "[JIT-lazy] std_example"
++ $MY_RUSTC -Cllvm-args=mode=jit-lazy -Cprefer-dynamic example/std_example.rs --cfg lazy_jit --target "$HOST_TRIPLE"
+ else
+ echo "[JIT] std_example (skipped)"
+ fi
+
+ echo "[AOT] dst_field_align"
+ # FIXME Re-add -Zmir-opt-level=2 once rust-lang/rust#67529 is fixed.
+ $MY_RUSTC example/dst-field-align.rs --crate-name dst_field_align --crate-type bin --target "$TARGET_TRIPLE"
+ $RUN_WRAPPER ./target/out/dst_field_align || (echo $?; false)
+
+ echo "[AOT] std_example"
+ $MY_RUSTC example/std_example.rs --crate-type bin --target "$TARGET_TRIPLE"
+ $RUN_WRAPPER ./target/out/std_example arg
+
+ echo "[AOT] subslice-patterns-const-eval"
+ $MY_RUSTC example/subslice-patterns-const-eval.rs --crate-type bin -Cpanic=abort --target "$TARGET_TRIPLE"
+ $RUN_WRAPPER ./target/out/subslice-patterns-const-eval
+
+ echo "[AOT] track-caller-attribute"
+ $MY_RUSTC example/track-caller-attribute.rs --crate-type bin -Cpanic=abort --target "$TARGET_TRIPLE"
+ $RUN_WRAPPER ./target/out/track-caller-attribute
+
+ echo "[AOT] mod_bench"
+ $MY_RUSTC example/mod_bench.rs --crate-type bin --target "$TARGET_TRIPLE"
+ $RUN_WRAPPER ./target/out/mod_bench
+
+ pushd rand
+ rm -r ./target || true
+ ../build/cargo.sh test --workspace
+ popd
+}
+
+function extended_sysroot_tests() {
+ pushd simple-raytracer
+ if [[ "$HOST_TRIPLE" = "$TARGET_TRIPLE" ]]; then
+ echo "[BENCH COMPILE] ebobby/simple-raytracer"
+ hyperfine --runs "${RUN_RUNS:-10}" --warmup 1 --prepare "cargo clean" \
+ "RUSTC=rustc RUSTFLAGS='' cargo build" \
+ "../build/cargo.sh build"
+
+ echo "[BENCH RUN] ebobby/simple-raytracer"
+ cp ./target/debug/main ./raytracer_cg_clif
+ hyperfine --runs "${RUN_RUNS:-10}" ./raytracer_cg_llvm ./raytracer_cg_clif
+ else
+ echo "[BENCH COMPILE] ebobby/simple-raytracer (skipped)"
+ echo "[COMPILE] ebobby/simple-raytracer"
+ ../cargo.sh build
+ echo "[BENCH RUN] ebobby/simple-raytracer (skipped)"
+ fi
+ popd
+
+ pushd build_sysroot/sysroot_src/library/core/tests
+ echo "[TEST] libcore"
+ rm -r ./target || true
+ ../../../../../build/cargo.sh test
+ popd
+
+ pushd regex
+ echo "[TEST] rust-lang/regex example shootout-regex-dna"
+ ../build/cargo.sh clean
+ # Make sure `[codegen mono items] start` doesn't poison the diff
+ ../build/cargo.sh build --example shootout-regex-dna
+ cat examples/regexdna-input.txt | ../build/cargo.sh run --example shootout-regex-dna | grep -v "Spawned thread" > res.txt
+ diff -u res.txt examples/regexdna-output.txt
+
+ echo "[TEST] rust-lang/regex tests"
+ ../build/cargo.sh test --tests -- --exclude-should-panic --test-threads 1 -Zunstable-options -q
+ popd
+}
+
+case "$1" in
+ "no_sysroot")
+ no_sysroot_tests
+ ;;
+ "base_sysroot")
+ base_sysroot_tests
+ ;;
+ "extended_sysroot")
+ extended_sysroot_tests
+ ;;
+ *)
+ echo "unknown test suite"
+ ;;
+esac
--- /dev/null
- let triple = crate::build_isa(sess, true).triple().clone();
+//! Abstraction around the object writing crate
+
+use std::convert::{TryFrom, TryInto};
+
+use rustc_data_structures::fx::FxHashMap;
+use rustc_session::Session;
+
+use cranelift_module::FuncId;
+
+use object::write::*;
+use object::{RelocationEncoding, RelocationKind, SectionKind, SymbolFlags};
+
+use cranelift_object::{ObjectBuilder, ObjectModule, ObjectProduct};
+
+use gimli::SectionId;
+
+use crate::debuginfo::{DebugReloc, DebugRelocName};
+
+pub(crate) trait WriteMetadata {
+ fn add_rustc_section(&mut self, symbol_name: String, data: Vec<u8>, is_like_osx: bool);
+}
+
+impl WriteMetadata for object::write::Object {
+ fn add_rustc_section(&mut self, symbol_name: String, data: Vec<u8>, _is_like_osx: bool) {
+ let segment = self
+ .segment_name(object::write::StandardSegment::Data)
+ .to_vec();
+ let section_id = self.add_section(segment, b".rustc".to_vec(), object::SectionKind::Data);
+ let offset = self.append_section_data(section_id, &data, 1);
+ // For MachO and probably PE this is necessary to prevent the linker from throwing away the
+ // .rustc section. For ELF this isn't necessary, but it also doesn't harm.
+ self.add_symbol(object::write::Symbol {
+ name: symbol_name.into_bytes(),
+ value: offset,
+ size: data.len() as u64,
+ kind: object::SymbolKind::Data,
+ scope: object::SymbolScope::Dynamic,
+ weak: false,
+ section: SymbolSection::Section(section_id),
+ flags: SymbolFlags::None,
+ });
+ }
+}
+
+pub(crate) trait WriteDebugInfo {
+ type SectionId: Copy;
+
+ fn add_debug_section(&mut self, name: SectionId, data: Vec<u8>) -> Self::SectionId;
+ fn add_debug_reloc(
+ &mut self,
+ section_map: &FxHashMap<SectionId, Self::SectionId>,
+ from: &Self::SectionId,
+ reloc: &DebugReloc,
+ );
+}
+
+impl WriteDebugInfo for ObjectProduct {
+ type SectionId = (object::write::SectionId, object::write::SymbolId);
+
+ fn add_debug_section(
+ &mut self,
+ id: SectionId,
+ data: Vec<u8>,
+ ) -> (object::write::SectionId, object::write::SymbolId) {
+ let name = if self.object.format() == object::BinaryFormat::MachO {
+ id.name().replace('.', "__") // machO expects __debug_info instead of .debug_info
+ } else {
+ id.name().to_string()
+ }
+ .into_bytes();
+
+ let segment = self.object.segment_name(StandardSegment::Debug).to_vec();
+ // FIXME use SHT_X86_64_UNWIND for .eh_frame
+ let section_id = self.object.add_section(
+ segment,
+ name,
+ if id == SectionId::EhFrame {
+ SectionKind::ReadOnlyData
+ } else {
+ SectionKind::Debug
+ },
+ );
+ self.object
+ .section_mut(section_id)
+ .set_data(data, if id == SectionId::EhFrame { 8 } else { 1 });
+ let symbol_id = self.object.section_symbol(section_id);
+ (section_id, symbol_id)
+ }
+
+ fn add_debug_reloc(
+ &mut self,
+ section_map: &FxHashMap<SectionId, Self::SectionId>,
+ from: &Self::SectionId,
+ reloc: &DebugReloc,
+ ) {
+ let (symbol, symbol_offset) = match reloc.name {
+ DebugRelocName::Section(id) => (section_map.get(&id).unwrap().1, 0),
+ DebugRelocName::Symbol(id) => {
+ let symbol_id = self.function_symbol(FuncId::from_u32(id.try_into().unwrap()));
+ self.object
+ .symbol_section_and_offset(symbol_id)
+ .expect("Debug reloc for undef sym???")
+ }
+ };
+ self.object
+ .add_relocation(
+ from.0,
+ Relocation {
+ offset: u64::from(reloc.offset),
+ symbol,
+ kind: reloc.kind,
+ encoding: RelocationEncoding::Generic,
+ size: reloc.size * 8,
+ addend: i64::try_from(symbol_offset).unwrap() + reloc.addend,
+ },
+ )
+ .unwrap();
+ }
+}
+
+// FIXME remove once atomic instructions are implemented in Cranelift.
+pub(crate) trait AddConstructor {
+ fn add_constructor(&mut self, func_id: FuncId);
+}
+
+impl AddConstructor for ObjectProduct {
+ fn add_constructor(&mut self, func_id: FuncId) {
+ let symbol = self.function_symbol(func_id);
+ let segment = self
+ .object
+ .segment_name(object::write::StandardSegment::Data);
+ let init_array_section =
+ self.object
+ .add_section(segment.to_vec(), b".init_array".to_vec(), SectionKind::Data);
+ let address_size = self
+ .object
+ .architecture()
+ .address_size()
+ .expect("address_size must be known")
+ .bytes();
+ self.object.append_section_data(
+ init_array_section,
+ &std::iter::repeat(0)
+ .take(address_size.into())
+ .collect::<Vec<u8>>(),
+ 8,
+ );
+ self.object
+ .add_relocation(
+ init_array_section,
+ object::write::Relocation {
+ offset: 0,
+ size: address_size * 8,
+ kind: RelocationKind::Absolute,
+ encoding: RelocationEncoding::Generic,
+ symbol,
+ addend: 0,
+ },
+ )
+ .unwrap();
+ }
+}
+
+pub(crate) fn with_object(sess: &Session, name: &str, f: impl FnOnce(&mut Object)) -> Vec<u8> {
- crate::build_isa(sess, true),
++ let triple = crate::build_isa(sess).triple().clone();
+
+ let binary_format = match triple.binary_format {
+ target_lexicon::BinaryFormat::Elf => object::BinaryFormat::Elf,
+ target_lexicon::BinaryFormat::Coff => object::BinaryFormat::Coff,
+ target_lexicon::BinaryFormat::Macho => object::BinaryFormat::MachO,
+ binary_format => sess.fatal(&format!("binary format {} is unsupported", binary_format)),
+ };
+ let architecture = match triple.architecture {
+ target_lexicon::Architecture::X86_32(_) => object::Architecture::I386,
+ target_lexicon::Architecture::X86_64 => object::Architecture::X86_64,
+ target_lexicon::Architecture::Arm(_) => object::Architecture::Arm,
+ target_lexicon::Architecture::Aarch64(_) => object::Architecture::Aarch64,
+ architecture => sess.fatal(&format!(
+ "target architecture {:?} is unsupported",
+ architecture,
+ )),
+ };
+ let endian = match triple.endianness().unwrap() {
+ target_lexicon::Endianness::Little => object::Endianness::Little,
+ target_lexicon::Endianness::Big => object::Endianness::Big,
+ };
+
+ let mut metadata_object = object::write::Object::new(binary_format, architecture, endian);
+ metadata_object.add_file_symbol(name.as_bytes().to_vec());
+ f(&mut metadata_object);
+ metadata_object.write().unwrap()
+}
+
+pub(crate) fn make_module(sess: &Session, name: String) -> ObjectModule {
+ let mut builder = ObjectBuilder::new(
++ crate::build_isa(sess),
+ name + ".o",
+ cranelift_module::default_libcall_names(),
+ )
+ .unwrap();
+ // Unlike cg_llvm, cg_clif defaults to disabling -Zfunction-sections. For cg_llvm binary size
+ // is important, while cg_clif cares more about compilation times. Enabling -Zfunction-sections
+ // can easily double the amount of time necessary to perform linking.
+ builder.per_function_section(sess.opts.debugging_opts.function_sections.unwrap_or(false));
+ ObjectModule::new(builder)
+}
--- /dev/null
- if switch_ty.kind() == fx.tcx.types.bool.kind() {
+//! Codegen of a single function
+
+use rustc_index::vec::IndexVec;
+use rustc_middle::ty::adjustment::PointerCast;
+
+use crate::prelude::*;
+
+pub(crate) fn codegen_fn<'tcx>(
+ cx: &mut crate::CodegenCx<'tcx, impl Module>,
+ instance: Instance<'tcx>,
+ linkage: Linkage,
+) {
+ let tcx = cx.tcx;
+
+ let _inst_guard =
+ crate::PrintOnPanic(|| format!("{:?} {}", instance, tcx.symbol_name(instance).name));
+ debug_assert!(!instance.substs.needs_infer());
+
+ let mir = tcx.instance_mir(instance.def);
+
+ // Declare function
+ let (name, sig) = get_function_name_and_sig(tcx, cx.module.isa().triple(), instance, false);
+ let func_id = cx.module.declare_function(&name, linkage, &sig).unwrap();
+
+ cx.cached_context.clear();
+
+ // Make the FunctionBuilder
+ let mut func_ctx = FunctionBuilderContext::new();
+ let mut func = std::mem::replace(&mut cx.cached_context.func, Function::new());
+ func.name = ExternalName::user(0, func_id.as_u32());
+ func.signature = sig;
+ func.collect_debug_info();
+
+ let mut bcx = FunctionBuilder::new(&mut func, &mut func_ctx);
+
+ // Predefine blocks
+ let start_block = bcx.create_block();
+ let block_map: IndexVec<BasicBlock, Block> = (0..mir.basic_blocks().len())
+ .map(|_| bcx.create_block())
+ .collect();
+
+ // Make FunctionCx
+ let pointer_type = cx.module.target_config().pointer_type();
+ let clif_comments = crate::pretty_clif::CommentWriter::new(tcx, instance);
+
+ let mut fx = FunctionCx {
+ cx,
+ tcx,
+ pointer_type,
+
+ instance,
+ mir,
+
+ bcx,
+ block_map,
+ local_map: IndexVec::with_capacity(mir.local_decls.len()),
+ caller_location: None, // set by `codegen_fn_prelude`
+ cold_blocks: EntitySet::new(),
+
+ clif_comments,
+ source_info_set: indexmap::IndexSet::new(),
+ next_ssa_var: 0,
+
+ inline_asm_index: 0,
+ };
+
+ let arg_uninhabited = fx.mir.args_iter().any(|arg| {
+ fx.layout_of(fx.monomorphize(&fx.mir.local_decls[arg].ty))
+ .abi
+ .is_uninhabited()
+ });
+
+ if arg_uninhabited {
+ fx.bcx
+ .append_block_params_for_function_params(fx.block_map[START_BLOCK]);
+ fx.bcx.switch_to_block(fx.block_map[START_BLOCK]);
+ crate::trap::trap_unreachable(&mut fx, "function has uninhabited argument");
+ } else {
+ tcx.sess.time("codegen clif ir", || {
+ tcx.sess.time("codegen prelude", || {
+ crate::abi::codegen_fn_prelude(&mut fx, start_block)
+ });
+ codegen_fn_content(&mut fx);
+ });
+ }
+
+ // Recover all necessary data from fx, before accessing func will prevent future access to it.
+ let instance = fx.instance;
+ let mut clif_comments = fx.clif_comments;
+ let source_info_set = fx.source_info_set;
+ let local_map = fx.local_map;
+ let cold_blocks = fx.cold_blocks;
+
+ // Store function in context
+ let context = &mut cx.cached_context;
+ context.func = func;
+
+ crate::pretty_clif::write_clif_file(tcx, "unopt", None, instance, &context, &clif_comments);
+
+ // Verify function
+ verify_func(tcx, &clif_comments, &context.func);
+
+ // Perform rust specific optimizations
+ tcx.sess.time("optimize clif ir", || {
+ crate::optimize::optimize_function(
+ tcx,
+ instance,
+ context,
+ &cold_blocks,
+ &mut clif_comments,
+ );
+ });
+
+ // If the return block is not reachable, then the SSA builder may have inserted an `iconst.i128`
+ // instruction, which doesn't have an encoding.
+ context.compute_cfg();
+ context.compute_domtree();
+ context.eliminate_unreachable_code(cx.module.isa()).unwrap();
+ context.dce(cx.module.isa()).unwrap();
+
++ context.want_disasm = crate::pretty_clif::should_write_ir(tcx);
++
+ // Define function
+ let module = &mut cx.module;
+ tcx.sess.time("define function", || {
+ module
+ .define_function(
+ func_id,
+ context,
+ &mut cranelift_codegen::binemit::NullTrapSink {},
+ )
+ .unwrap()
+ });
+
+ // Write optimized function to file for debugging
+ crate::pretty_clif::write_clif_file(
+ tcx,
+ "opt",
+ Some(cx.module.isa()),
+ instance,
+ &context,
+ &clif_comments,
+ );
+
++ if let Some(mach_compile_result) = &context.mach_compile_result {
++ if let Some(disasm) = &mach_compile_result.disasm {
++ crate::pretty_clif::write_ir_file(
++ tcx,
++ &format!("{}.vcode", tcx.symbol_name(instance).name),
++ |file| file.write_all(disasm.as_bytes()),
++ )
++ }
++ }
++
+ // Define debuginfo for function
+ let isa = cx.module.isa();
+ let debug_context = &mut cx.debug_context;
+ let unwind_context = &mut cx.unwind_context;
+ tcx.sess.time("generate debug info", || {
+ if let Some(debug_context) = debug_context {
+ debug_context.define_function(
+ instance,
+ func_id,
+ &name,
+ isa,
+ context,
+ &source_info_set,
+ local_map,
+ );
+ }
+ unwind_context.add_function(func_id, &context, isa);
+ });
+
+ // Clear context to make it usable for the next function
+ context.clear();
+}
+
+pub(crate) fn verify_func(
+ tcx: TyCtxt<'_>,
+ writer: &crate::pretty_clif::CommentWriter,
+ func: &Function,
+) {
+ tcx.sess.time("verify clif ir", || {
+ let flags = cranelift_codegen::settings::Flags::new(cranelift_codegen::settings::builder());
+ match cranelift_codegen::verify_function(&func, &flags) {
+ Ok(_) => {}
+ Err(err) => {
+ tcx.sess.err(&format!("{:?}", err));
+ let pretty_error = cranelift_codegen::print_errors::pretty_verifier_error(
+ &func,
+ None,
+ Some(Box::new(writer)),
+ err,
+ );
+ tcx.sess
+ .fatal(&format!("cranelift verify error:\n{}", pretty_error));
+ }
+ }
+ });
+}
+
+fn codegen_fn_content(fx: &mut FunctionCx<'_, '_, impl Module>) {
+ crate::constant::check_constants(fx);
+
+ for (bb, bb_data) in fx.mir.basic_blocks().iter_enumerated() {
+ let block = fx.get_block(bb);
+ fx.bcx.switch_to_block(block);
+
+ if bb_data.is_cleanup {
+ // Unwinding after panicking is not supported
+ continue;
+
+ // FIXME once unwinding is supported uncomment next lines
+ // // Unwinding is unlikely to happen, so mark cleanup block's as cold.
+ // fx.cold_blocks.insert(block);
+ }
+
+ fx.bcx.ins().nop();
+ for stmt in &bb_data.statements {
+ fx.set_debug_loc(stmt.source_info);
+ codegen_stmt(fx, block, stmt);
+ }
+
+ #[cfg(debug_assertions)]
+ {
+ let mut terminator_head = "\n".to_string();
+ bb_data
+ .terminator()
+ .kind
+ .fmt_head(&mut terminator_head)
+ .unwrap();
+ let inst = fx.bcx.func.layout.last_inst(block).unwrap();
+ fx.add_comment(inst, terminator_head);
+ }
+
+ fx.set_debug_loc(bb_data.terminator().source_info);
+
+ match &bb_data.terminator().kind {
+ TerminatorKind::Goto { target } => {
+ if let TerminatorKind::Return = fx.mir[*target].terminator().kind {
+ let mut can_immediately_return = true;
+ for stmt in &fx.mir[*target].statements {
+ if let StatementKind::StorageDead(_) = stmt.kind {
+ } else {
+ // FIXME Can sometimes happen, see rust-lang/rust#70531
+ can_immediately_return = false;
+ break;
+ }
+ }
+
+ if can_immediately_return {
+ crate::abi::codegen_return(fx);
+ continue;
+ }
+ }
+
+ let block = fx.get_block(*target);
+ fx.bcx.ins().jump(block, &[]);
+ }
+ TerminatorKind::Return => {
+ crate::abi::codegen_return(fx);
+ }
+ TerminatorKind::Assert {
+ cond,
+ expected,
+ msg,
+ target,
+ cleanup: _,
+ } => {
+ if !fx.tcx.sess.overflow_checks() {
+ if let mir::AssertKind::OverflowNeg(_) = *msg {
+ let target = fx.get_block(*target);
+ fx.bcx.ins().jump(target, &[]);
+ continue;
+ }
+ }
+ let cond = codegen_operand(fx, cond).load_scalar(fx);
+
+ let target = fx.get_block(*target);
+ let failure = fx.bcx.create_block();
+ fx.cold_blocks.insert(failure);
+
+ if *expected {
+ fx.bcx.ins().brz(cond, failure, &[]);
+ } else {
+ fx.bcx.ins().brnz(cond, failure, &[]);
+ };
+ fx.bcx.ins().jump(target, &[]);
+
+ fx.bcx.switch_to_block(failure);
+ fx.bcx.ins().nop();
+
+ match msg {
+ AssertKind::BoundsCheck { ref len, ref index } => {
+ let len = codegen_operand(fx, len).load_scalar(fx);
+ let index = codegen_operand(fx, index).load_scalar(fx);
+ let location = fx
+ .get_caller_location(bb_data.terminator().source_info.span)
+ .load_scalar(fx);
+
+ codegen_panic_inner(
+ fx,
+ rustc_hir::LangItem::PanicBoundsCheck,
+ &[index, len, location],
+ bb_data.terminator().source_info.span,
+ );
+ }
+ _ => {
+ let msg_str = msg.description();
+ codegen_panic(fx, msg_str, bb_data.terminator().source_info.span);
+ }
+ }
+ }
+
+ TerminatorKind::SwitchInt {
+ discr,
+ switch_ty,
+ targets,
+ } => {
+ let discr = codegen_operand(fx, discr).load_scalar(fx);
+
- if test_zero {
- fx.bcx.ins().brz(discr, then_block, &[]);
- fx.bcx.ins().jump(else_block, &[]);
++ let use_bool_opt = switch_ty.kind() == fx.tcx.types.bool.kind()
++ || (targets.iter().count() == 1 && targets.iter().next().unwrap().0 == 0);
++ if use_bool_opt {
+ assert_eq!(targets.iter().count(), 1);
+ let (then_value, then_block) = targets.iter().next().unwrap();
+ let then_block = fx.get_block(then_block);
+ let else_block = fx.get_block(targets.otherwise());
+ let test_zero = match then_value {
+ 0 => true,
+ 1 => false,
+ _ => unreachable!("{:?}", targets),
+ };
+
+ let discr = crate::optimize::peephole::maybe_unwrap_bint(&mut fx.bcx, discr);
+ let (discr, is_inverted) =
+ crate::optimize::peephole::maybe_unwrap_bool_not(&mut fx.bcx, discr);
+ let test_zero = if is_inverted { !test_zero } else { test_zero };
+ let discr = crate::optimize::peephole::maybe_unwrap_bint(&mut fx.bcx, discr);
+ let discr =
+ crate::optimize::peephole::make_branchable_value(&mut fx.bcx, discr);
- fx.bcx.ins().brnz(discr, then_block, &[]);
- fx.bcx.ins().jump(else_block, &[]);
++ if let Some(taken) = crate::optimize::peephole::maybe_known_branch_taken(
++ &fx.bcx, discr, test_zero,
++ ) {
++ if taken {
++ fx.bcx.ins().jump(then_block, &[]);
++ } else {
++ fx.bcx.ins().jump(else_block, &[]);
++ }
+ } else {
++ if test_zero {
++ fx.bcx.ins().brz(discr, then_block, &[]);
++ fx.bcx.ins().jump(else_block, &[]);
++ } else {
++ fx.bcx.ins().brnz(discr, then_block, &[]);
++ fx.bcx.ins().jump(else_block, &[]);
++ }
+ }
+ } else {
+ let mut switch = ::cranelift_frontend::Switch::new();
+ for (value, block) in targets.iter() {
+ let block = fx.get_block(block);
+ switch.set_entry(value, block);
+ }
+ let otherwise_block = fx.get_block(targets.otherwise());
+ switch.emit(&mut fx.bcx, discr, otherwise_block);
+ }
+ }
+ TerminatorKind::Call {
+ func,
+ args,
+ destination,
+ fn_span,
+ cleanup: _,
+ from_hir_call: _,
+ } => {
+ fx.tcx.sess.time("codegen call", || {
+ crate::abi::codegen_terminator_call(
+ fx,
+ *fn_span,
+ block,
+ func,
+ args,
+ *destination,
+ )
+ });
+ }
+ TerminatorKind::InlineAsm {
+ template,
+ operands,
+ options,
+ destination,
+ line_spans: _,
+ } => {
+ crate::inline_asm::codegen_inline_asm(
+ fx,
+ bb_data.terminator().source_info.span,
+ template,
+ operands,
+ *options,
+ );
+
+ match *destination {
+ Some(destination) => {
+ let destination_block = fx.get_block(destination);
+ fx.bcx.ins().jump(destination_block, &[]);
+ }
+ None => {
+ crate::trap::trap_unreachable(
+ fx,
+ "[corruption] Returned from noreturn inline asm",
+ );
+ }
+ }
+ }
+ TerminatorKind::Resume | TerminatorKind::Abort => {
+ trap_unreachable(fx, "[corruption] Unwinding bb reached.");
+ }
+ TerminatorKind::Unreachable => {
+ trap_unreachable(fx, "[corruption] Hit unreachable code.");
+ }
+ TerminatorKind::Yield { .. }
+ | TerminatorKind::FalseEdge { .. }
+ | TerminatorKind::FalseUnwind { .. }
+ | TerminatorKind::DropAndReplace { .. }
+ | TerminatorKind::GeneratorDrop => {
+ bug!("shouldn't exist at codegen {:?}", bb_data.terminator());
+ }
+ TerminatorKind::Drop {
+ place,
+ target,
+ unwind: _,
+ } => {
+ let drop_place = codegen_place(fx, *place);
+ crate::abi::codegen_drop(fx, bb_data.terminator().source_info.span, drop_place);
+
+ let target_block = fx.get_block(*target);
+ fx.bcx.ins().jump(target_block, &[]);
+ }
+ };
+ }
+
+ fx.bcx.seal_all_blocks();
+ fx.bcx.finalize();
+}
+
+fn codegen_stmt<'tcx>(
+ fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ #[allow(unused_variables)] cur_block: Block,
+ stmt: &Statement<'tcx>,
+) {
+ let _print_guard = crate::PrintOnPanic(|| format!("stmt {:?}", stmt));
+
+ fx.set_debug_loc(stmt.source_info);
+
+ #[cfg(false_debug_assertions)]
+ match &stmt.kind {
+ StatementKind::StorageLive(..) | StatementKind::StorageDead(..) => {} // Those are not very useful
+ _ => {
+ let inst = fx.bcx.func.layout.last_inst(cur_block).unwrap();
+ fx.add_comment(inst, format!("{:?}", stmt));
+ }
+ }
+
+ match &stmt.kind {
+ StatementKind::SetDiscriminant {
+ place,
+ variant_index,
+ } => {
+ let place = codegen_place(fx, **place);
+ crate::discriminant::codegen_set_discriminant(fx, place, *variant_index);
+ }
+ StatementKind::Assign(to_place_and_rval) => {
+ let lval = codegen_place(fx, to_place_and_rval.0);
+ let dest_layout = lval.layout();
+ match to_place_and_rval.1 {
+ Rvalue::Use(ref operand) => {
+ let val = codegen_operand(fx, operand);
+ lval.write_cvalue(fx, val);
+ }
+ Rvalue::Ref(_, _, place) | Rvalue::AddressOf(_, place) => {
+ let place = codegen_place(fx, place);
+ let ref_ = place.place_ref(fx, lval.layout());
+ lval.write_cvalue(fx, ref_);
+ }
+ Rvalue::ThreadLocalRef(def_id) => {
+ let val = crate::constant::codegen_tls_ref(fx, def_id, lval.layout());
+ lval.write_cvalue(fx, val);
+ }
+ Rvalue::BinaryOp(bin_op, ref lhs, ref rhs) => {
+ let lhs = codegen_operand(fx, lhs);
+ let rhs = codegen_operand(fx, rhs);
+
+ let res = crate::num::codegen_binop(fx, bin_op, lhs, rhs);
+ lval.write_cvalue(fx, res);
+ }
+ Rvalue::CheckedBinaryOp(bin_op, ref lhs, ref rhs) => {
+ let lhs = codegen_operand(fx, lhs);
+ let rhs = codegen_operand(fx, rhs);
+
+ let res = if !fx.tcx.sess.overflow_checks() {
+ let val =
+ crate::num::codegen_int_binop(fx, bin_op, lhs, rhs).load_scalar(fx);
+ let is_overflow = fx.bcx.ins().iconst(types::I8, 0);
+ CValue::by_val_pair(val, is_overflow, lval.layout())
+ } else {
+ crate::num::codegen_checked_int_binop(fx, bin_op, lhs, rhs)
+ };
+
+ lval.write_cvalue(fx, res);
+ }
+ Rvalue::UnaryOp(un_op, ref operand) => {
+ let operand = codegen_operand(fx, operand);
+ let layout = operand.layout();
+ let val = operand.load_scalar(fx);
+ let res = match un_op {
+ UnOp::Not => match layout.ty.kind() {
+ ty::Bool => {
+ let res = fx.bcx.ins().icmp_imm(IntCC::Equal, val, 0);
+ CValue::by_val(fx.bcx.ins().bint(types::I8, res), layout)
+ }
+ ty::Uint(_) | ty::Int(_) => {
+ CValue::by_val(fx.bcx.ins().bnot(val), layout)
+ }
+ _ => unreachable!("un op Not for {:?}", layout.ty),
+ },
+ UnOp::Neg => match layout.ty.kind() {
+ ty::Int(IntTy::I128) => {
+ // FIXME remove this case once ineg.i128 works
+ let zero =
+ CValue::const_val(fx, layout, ty::ScalarInt::null(layout.size));
+ crate::num::codegen_int_binop(fx, BinOp::Sub, zero, operand)
+ }
+ ty::Int(_) => CValue::by_val(fx.bcx.ins().ineg(val), layout),
+ ty::Float(_) => CValue::by_val(fx.bcx.ins().fneg(val), layout),
+ _ => unreachable!("un op Neg for {:?}", layout.ty),
+ },
+ };
+ lval.write_cvalue(fx, res);
+ }
+ Rvalue::Cast(
+ CastKind::Pointer(PointerCast::ReifyFnPointer),
+ ref operand,
+ to_ty,
+ ) => {
+ let from_ty = fx.monomorphize(operand.ty(&fx.mir.local_decls, fx.tcx));
+ let to_layout = fx.layout_of(fx.monomorphize(to_ty));
+ match *from_ty.kind() {
+ ty::FnDef(def_id, substs) => {
+ let func_ref = fx.get_function_ref(
+ Instance::resolve_for_fn_ptr(
+ fx.tcx,
+ ParamEnv::reveal_all(),
+ def_id,
+ substs,
+ )
+ .unwrap()
+ .polymorphize(fx.tcx),
+ );
+ let func_addr = fx.bcx.ins().func_addr(fx.pointer_type, func_ref);
+ lval.write_cvalue(fx, CValue::by_val(func_addr, to_layout));
+ }
+ _ => bug!("Trying to ReifyFnPointer on non FnDef {:?}", from_ty),
+ }
+ }
+ Rvalue::Cast(
+ CastKind::Pointer(PointerCast::UnsafeFnPointer),
+ ref operand,
+ to_ty,
+ )
+ | Rvalue::Cast(
+ CastKind::Pointer(PointerCast::MutToConstPointer),
+ ref operand,
+ to_ty,
+ )
+ | Rvalue::Cast(
+ CastKind::Pointer(PointerCast::ArrayToPointer),
+ ref operand,
+ to_ty,
+ ) => {
+ let to_layout = fx.layout_of(fx.monomorphize(to_ty));
+ let operand = codegen_operand(fx, operand);
+ lval.write_cvalue(fx, operand.cast_pointer_to(to_layout));
+ }
+ Rvalue::Cast(CastKind::Misc, ref operand, to_ty) => {
+ let operand = codegen_operand(fx, operand);
+ let from_ty = operand.layout().ty;
+ let to_ty = fx.monomorphize(to_ty);
+
+ fn is_fat_ptr<'tcx>(
+ fx: &FunctionCx<'_, 'tcx, impl Module>,
+ ty: Ty<'tcx>,
+ ) -> bool {
+ ty.builtin_deref(true)
+ .map(
+ |ty::TypeAndMut {
+ ty: pointee_ty,
+ mutbl: _,
+ }| {
+ has_ptr_meta(fx.tcx, pointee_ty)
+ },
+ )
+ .unwrap_or(false)
+ }
+
+ if is_fat_ptr(fx, from_ty) {
+ if is_fat_ptr(fx, to_ty) {
+ // fat-ptr -> fat-ptr
+ lval.write_cvalue(fx, operand.cast_pointer_to(dest_layout));
+ } else {
+ // fat-ptr -> thin-ptr
+ let (ptr, _extra) = operand.load_scalar_pair(fx);
+ lval.write_cvalue(fx, CValue::by_val(ptr, dest_layout))
+ }
+ } else if let ty::Adt(adt_def, _substs) = from_ty.kind() {
+ // enum -> discriminant value
+ assert!(adt_def.is_enum());
+ match to_ty.kind() {
+ ty::Uint(_) | ty::Int(_) => {}
+ _ => unreachable!("cast adt {} -> {}", from_ty, to_ty),
+ }
+
+ use rustc_target::abi::{Int, TagEncoding, Variants};
+
+ match operand.layout().variants {
+ Variants::Single { index } => {
+ let discr = operand
+ .layout()
+ .ty
+ .discriminant_for_variant(fx.tcx, index)
+ .unwrap();
+ let discr = if discr.ty.is_signed() {
+ fx.layout_of(discr.ty).size.sign_extend(discr.val)
+ } else {
+ discr.val
+ };
+ let discr = discr.into();
+
+ let discr = CValue::const_val(fx, fx.layout_of(to_ty), discr);
+ lval.write_cvalue(fx, discr);
+ }
+ Variants::Multiple {
+ ref tag,
+ tag_field,
+ tag_encoding: TagEncoding::Direct,
+ variants: _,
+ } => {
+ let cast_to = fx.clif_type(dest_layout.ty).unwrap();
+
+ // Read the tag/niche-encoded discriminant from memory.
+ let encoded_discr =
+ operand.value_field(fx, mir::Field::new(tag_field));
+ let encoded_discr = encoded_discr.load_scalar(fx);
+
+ // Decode the discriminant (specifically if it's niche-encoded).
+ let signed = match tag.value {
+ Int(_, signed) => signed,
+ _ => false,
+ };
+ let val = clif_intcast(fx, encoded_discr, cast_to, signed);
+ let val = CValue::by_val(val, dest_layout);
+ lval.write_cvalue(fx, val);
+ }
+ Variants::Multiple { .. } => unreachable!(),
+ }
+ } else {
+ let to_clif_ty = fx.clif_type(to_ty).unwrap();
+ let from = operand.load_scalar(fx);
+
+ let res = clif_int_or_float_cast(
+ fx,
+ from,
+ type_sign(from_ty),
+ to_clif_ty,
+ type_sign(to_ty),
+ );
+ lval.write_cvalue(fx, CValue::by_val(res, dest_layout));
+ }
+ }
+ Rvalue::Cast(
+ CastKind::Pointer(PointerCast::ClosureFnPointer(_)),
+ ref operand,
+ _to_ty,
+ ) => {
+ let operand = codegen_operand(fx, operand);
+ match *operand.layout().ty.kind() {
+ ty::Closure(def_id, substs) => {
+ let instance = Instance::resolve_closure(
+ fx.tcx,
+ def_id,
+ substs,
+ ty::ClosureKind::FnOnce,
+ )
+ .polymorphize(fx.tcx);
+ let func_ref = fx.get_function_ref(instance);
+ let func_addr = fx.bcx.ins().func_addr(fx.pointer_type, func_ref);
+ lval.write_cvalue(fx, CValue::by_val(func_addr, lval.layout()));
+ }
+ _ => bug!("{} cannot be cast to a fn ptr", operand.layout().ty),
+ }
+ }
+ Rvalue::Cast(CastKind::Pointer(PointerCast::Unsize), ref operand, _to_ty) => {
+ let operand = codegen_operand(fx, operand);
+ operand.unsize_value(fx, lval);
+ }
+ Rvalue::Discriminant(place) => {
+ let place = codegen_place(fx, place);
+ let value = place.to_cvalue(fx);
+ let discr =
+ crate::discriminant::codegen_get_discriminant(fx, value, dest_layout);
+ lval.write_cvalue(fx, discr);
+ }
+ Rvalue::Repeat(ref operand, times) => {
+ let operand = codegen_operand(fx, operand);
+ let times = fx
+ .monomorphize(times)
+ .eval(fx.tcx, ParamEnv::reveal_all())
+ .val
+ .try_to_bits(fx.tcx.data_layout.pointer_size)
+ .unwrap();
+ if fx.clif_type(operand.layout().ty) == Some(types::I8) {
+ let times = fx.bcx.ins().iconst(fx.pointer_type, times as i64);
+ // FIXME use emit_small_memset where possible
+ let addr = lval.to_ptr().get_addr(fx);
+ let val = operand.load_scalar(fx);
+ fx.bcx
+ .call_memset(fx.cx.module.target_config(), addr, val, times);
+ } else {
+ let loop_block = fx.bcx.create_block();
+ let loop_block2 = fx.bcx.create_block();
+ let done_block = fx.bcx.create_block();
+ let index = fx.bcx.append_block_param(loop_block, fx.pointer_type);
+ let zero = fx.bcx.ins().iconst(fx.pointer_type, 0);
+ fx.bcx.ins().jump(loop_block, &[zero]);
+
+ fx.bcx.switch_to_block(loop_block);
+ let done = fx.bcx.ins().icmp_imm(IntCC::Equal, index, times as i64);
+ fx.bcx.ins().brnz(done, done_block, &[]);
+ fx.bcx.ins().jump(loop_block2, &[]);
+
+ fx.bcx.switch_to_block(loop_block2);
+ let to = lval.place_index(fx, index);
+ to.write_cvalue(fx, operand);
+ let index = fx.bcx.ins().iadd_imm(index, 1);
+ fx.bcx.ins().jump(loop_block, &[index]);
+
+ fx.bcx.switch_to_block(done_block);
+ fx.bcx.ins().nop();
+ }
+ }
+ Rvalue::Len(place) => {
+ let place = codegen_place(fx, place);
+ let usize_layout = fx.layout_of(fx.tcx.types.usize);
+ let len = codegen_array_len(fx, place);
+ lval.write_cvalue(fx, CValue::by_val(len, usize_layout));
+ }
+ Rvalue::NullaryOp(NullOp::Box, content_ty) => {
+ let usize_type = fx.clif_type(fx.tcx.types.usize).unwrap();
+ let content_ty = fx.monomorphize(content_ty);
+ let layout = fx.layout_of(content_ty);
+ let llsize = fx.bcx.ins().iconst(usize_type, layout.size.bytes() as i64);
+ let llalign = fx
+ .bcx
+ .ins()
+ .iconst(usize_type, layout.align.abi.bytes() as i64);
+ let box_layout = fx.layout_of(fx.tcx.mk_box(content_ty));
+
+ // Allocate space:
+ let def_id = match fx
+ .tcx
+ .lang_items()
+ .require(rustc_hir::LangItem::ExchangeMalloc)
+ {
+ Ok(id) => id,
+ Err(s) => {
+ fx.tcx
+ .sess
+ .fatal(&format!("allocation of `{}` {}", box_layout.ty, s));
+ }
+ };
+ let instance = ty::Instance::mono(fx.tcx, def_id).polymorphize(fx.tcx);
+ let func_ref = fx.get_function_ref(instance);
+ let call = fx.bcx.ins().call(func_ref, &[llsize, llalign]);
+ let ptr = fx.bcx.inst_results(call)[0];
+ lval.write_cvalue(fx, CValue::by_val(ptr, box_layout));
+ }
+ Rvalue::NullaryOp(NullOp::SizeOf, ty) => {
+ assert!(lval
+ .layout()
+ .ty
+ .is_sized(fx.tcx.at(stmt.source_info.span), ParamEnv::reveal_all()));
+ let ty_size = fx.layout_of(fx.monomorphize(ty)).size.bytes();
+ let val =
+ CValue::const_val(fx, fx.layout_of(fx.tcx.types.usize), ty_size.into());
+ lval.write_cvalue(fx, val);
+ }
+ Rvalue::Aggregate(ref kind, ref operands) => match kind.as_ref() {
+ AggregateKind::Array(_ty) => {
+ for (i, operand) in operands.iter().enumerate() {
+ let operand = codegen_operand(fx, operand);
+ let index = fx.bcx.ins().iconst(fx.pointer_type, i as i64);
+ let to = lval.place_index(fx, index);
+ to.write_cvalue(fx, operand);
+ }
+ }
+ _ => unreachable!("shouldn't exist at codegen {:?}", to_place_and_rval.1),
+ },
+ }
+ }
+ StatementKind::StorageLive(_)
+ | StatementKind::StorageDead(_)
+ | StatementKind::Nop
+ | StatementKind::FakeRead(..)
+ | StatementKind::Retag { .. }
+ | StatementKind::AscribeUserType(..) => {}
+
+ StatementKind::LlvmInlineAsm(asm) => {
+ use rustc_span::symbol::Symbol;
+ let LlvmInlineAsm {
+ asm,
+ outputs,
+ inputs,
+ } = &**asm;
+ let rustc_hir::LlvmInlineAsmInner {
+ asm: asm_code, // Name
+ outputs: output_names, // Vec<LlvmInlineAsmOutput>
+ inputs: input_names, // Vec<Name>
+ clobbers, // Vec<Name>
+ volatile, // bool
+ alignstack, // bool
+ dialect: _,
+ asm_str_style: _,
+ } = asm;
+ match asm_code.as_str().trim() {
+ "" => {
+ // Black box
+ }
+ "mov %rbx, %rsi\n cpuid\n xchg %rbx, %rsi" => {
+ assert_eq!(
+ input_names,
+ &[Symbol::intern("{eax}"), Symbol::intern("{ecx}")]
+ );
+ assert_eq!(output_names.len(), 4);
+ for (i, c) in (&["={eax}", "={esi}", "={ecx}", "={edx}"])
+ .iter()
+ .enumerate()
+ {
+ assert_eq!(&output_names[i].constraint.as_str(), c);
+ assert!(!output_names[i].is_rw);
+ assert!(!output_names[i].is_indirect);
+ }
+
+ assert_eq!(clobbers, &[]);
+
+ assert!(!volatile);
+ assert!(!alignstack);
+
+ assert_eq!(inputs.len(), 2);
+ let leaf = codegen_operand(fx, &inputs[0].1).load_scalar(fx); // %eax
+ let subleaf = codegen_operand(fx, &inputs[1].1).load_scalar(fx); // %ecx
+
+ let (eax, ebx, ecx, edx) =
+ crate::intrinsics::codegen_cpuid_call(fx, leaf, subleaf);
+
+ assert_eq!(outputs.len(), 4);
+ codegen_place(fx, outputs[0])
+ .write_cvalue(fx, CValue::by_val(eax, fx.layout_of(fx.tcx.types.u32)));
+ codegen_place(fx, outputs[1])
+ .write_cvalue(fx, CValue::by_val(ebx, fx.layout_of(fx.tcx.types.u32)));
+ codegen_place(fx, outputs[2])
+ .write_cvalue(fx, CValue::by_val(ecx, fx.layout_of(fx.tcx.types.u32)));
+ codegen_place(fx, outputs[3])
+ .write_cvalue(fx, CValue::by_val(edx, fx.layout_of(fx.tcx.types.u32)));
+ }
+ "xgetbv" => {
+ assert_eq!(input_names, &[Symbol::intern("{ecx}")]);
+
+ assert_eq!(output_names.len(), 2);
+ for (i, c) in (&["={eax}", "={edx}"]).iter().enumerate() {
+ assert_eq!(&output_names[i].constraint.as_str(), c);
+ assert!(!output_names[i].is_rw);
+ assert!(!output_names[i].is_indirect);
+ }
+
+ assert_eq!(clobbers, &[]);
+
+ assert!(!volatile);
+ assert!(!alignstack);
+
+ crate::trap::trap_unimplemented(fx, "_xgetbv arch intrinsic is not supported");
+ }
+ // ___chkstk, ___chkstk_ms and __alloca are only used on Windows
+ _ if fx
+ .tcx
+ .symbol_name(fx.instance)
+ .name
+ .starts_with("___chkstk") =>
+ {
+ crate::trap::trap_unimplemented(fx, "Stack probes are not supported");
+ }
+ _ if fx.tcx.symbol_name(fx.instance).name == "__alloca" => {
+ crate::trap::trap_unimplemented(fx, "Alloca is not supported");
+ }
+ // Used in sys::windows::abort_internal
+ "int $$0x29" => {
+ crate::trap::trap_unimplemented(fx, "Windows abort");
+ }
+ _ => fx
+ .tcx
+ .sess
+ .span_fatal(stmt.source_info.span, "Inline assembly is not supported"),
+ }
+ }
+ StatementKind::Coverage { .. } => fx.tcx.sess.fatal("-Zcoverage is unimplemented"),
+ }
+}
+
+fn codegen_array_len<'tcx>(
+ fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ place: CPlace<'tcx>,
+) -> Value {
+ match *place.layout().ty.kind() {
+ ty::Array(_elem_ty, len) => {
+ let len = fx
+ .monomorphize(len)
+ .eval_usize(fx.tcx, ParamEnv::reveal_all()) as i64;
+ fx.bcx.ins().iconst(fx.pointer_type, len)
+ }
+ ty::Slice(_elem_ty) => place
+ .to_ptr_maybe_unsized()
+ .1
+ .expect("Length metadata for slice place"),
+ _ => bug!("Rvalue::Len({:?})", place),
+ }
+}
+
+pub(crate) fn codegen_place<'tcx>(
+ fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ place: Place<'tcx>,
+) -> CPlace<'tcx> {
+ let mut cplace = fx.get_local_place(place.local);
+
+ for elem in place.projection {
+ match elem {
+ PlaceElem::Deref => {
+ cplace = cplace.place_deref(fx);
+ }
+ PlaceElem::Field(field, _ty) => {
+ cplace = cplace.place_field(fx, field);
+ }
+ PlaceElem::Index(local) => {
+ let index = fx.get_local_place(local).to_cvalue(fx).load_scalar(fx);
+ cplace = cplace.place_index(fx, index);
+ }
+ PlaceElem::ConstantIndex {
+ offset,
+ min_length: _,
+ from_end,
+ } => {
+ let offset: u64 = offset;
+ let index = if !from_end {
+ fx.bcx.ins().iconst(fx.pointer_type, offset as i64)
+ } else {
+ let len = codegen_array_len(fx, cplace);
+ fx.bcx.ins().iadd_imm(len, -(offset as i64))
+ };
+ cplace = cplace.place_index(fx, index);
+ }
+ PlaceElem::Subslice { from, to, from_end } => {
+ // These indices are generated by slice patterns.
+ // slice[from:-to] in Python terms.
+
+ let from: u64 = from;
+ let to: u64 = to;
+
+ match cplace.layout().ty.kind() {
+ ty::Array(elem_ty, _len) => {
+ assert!(!from_end, "array subslices are never `from_end`");
+ let elem_layout = fx.layout_of(elem_ty);
+ let ptr = cplace.to_ptr();
+ cplace = CPlace::for_ptr(
+ ptr.offset_i64(fx, elem_layout.size.bytes() as i64 * (from as i64)),
+ fx.layout_of(fx.tcx.mk_array(elem_ty, to - from)),
+ );
+ }
+ ty::Slice(elem_ty) => {
+ assert!(from_end, "slice subslices should be `from_end`");
+ let elem_layout = fx.layout_of(elem_ty);
+ let (ptr, len) = cplace.to_ptr_maybe_unsized();
+ let len = len.unwrap();
+ cplace = CPlace::for_ptr_with_extra(
+ ptr.offset_i64(fx, elem_layout.size.bytes() as i64 * (from as i64)),
+ fx.bcx.ins().iadd_imm(len, -(from as i64 + to as i64)),
+ cplace.layout(),
+ );
+ }
+ _ => unreachable!(),
+ }
+ }
+ PlaceElem::Downcast(_adt_def, variant) => {
+ cplace = cplace.downcast_variant(fx, variant);
+ }
+ }
+ }
+
+ cplace
+}
+
+pub(crate) fn codegen_operand<'tcx>(
+ fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ operand: &Operand<'tcx>,
+) -> CValue<'tcx> {
+ match operand {
+ Operand::Move(place) | Operand::Copy(place) => {
+ let cplace = codegen_place(fx, *place);
+ cplace.to_cvalue(fx)
+ }
+ Operand::Constant(const_) => crate::constant::codegen_constant(fx, const_),
+ }
+}
+
+pub(crate) fn codegen_panic<'tcx>(
+ fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ msg_str: &str,
+ span: Span,
+) {
+ let location = fx.get_caller_location(span).load_scalar(fx);
+
+ let msg_ptr = fx.anonymous_str("assert", msg_str);
+ let msg_len = fx
+ .bcx
+ .ins()
+ .iconst(fx.pointer_type, i64::try_from(msg_str.len()).unwrap());
+ let args = [msg_ptr, msg_len, location];
+
+ codegen_panic_inner(fx, rustc_hir::LangItem::Panic, &args, span);
+}
+
+pub(crate) fn codegen_panic_inner<'tcx>(
+ fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ lang_item: rustc_hir::LangItem,
+ args: &[Value],
+ span: Span,
+) {
+ let def_id = fx
+ .tcx
+ .lang_items()
+ .require(lang_item)
+ .unwrap_or_else(|s| fx.tcx.sess.span_fatal(span, &s));
+
+ let instance = Instance::mono(fx.tcx, def_id).polymorphize(fx.tcx);
+ let symbol_name = fx.tcx.symbol_name(instance).name;
+
+ fx.lib_call(
+ &*symbol_name,
+ vec![fx.pointer_type, fx.pointer_type, fx.pointer_type],
+ vec![],
+ args,
+ );
+
+ crate::trap::trap_unreachable(fx, "panic lang item returned");
+}
--- /dev/null
- let mut use_jit = false;
-
- let mut args = std::env::args_os()
+#![feature(rustc_private)]
+
+extern crate rustc_data_structures;
+extern crate rustc_driver;
+extern crate rustc_interface;
+extern crate rustc_session;
+extern crate rustc_target;
+
+use rustc_data_structures::profiling::print_time_passes_entry;
+use rustc_interface::interface;
+use rustc_session::config::ErrorOutputType;
+use rustc_session::early_error;
+use rustc_target::spec::PanicStrategy;
+
+#[derive(Default)]
+pub struct CraneliftPassesCallbacks {
+ time_passes: bool,
+}
+
+impl rustc_driver::Callbacks for CraneliftPassesCallbacks {
+ fn config(&mut self, config: &mut interface::Config) {
+ // If a --prints=... option has been given, we don't print the "total"
+ // time because it will mess up the --prints output. See #64339.
+ self.time_passes = config.opts.prints.is_empty()
+ && (config.opts.debugging_opts.time_passes || config.opts.debugging_opts.time);
+
+ config.opts.cg.panic = Some(PanicStrategy::Abort);
+ config.opts.debugging_opts.panic_abort_tests = true;
+ config.opts.maybe_sysroot = Some(config.opts.maybe_sysroot.clone().unwrap_or_else(|| {
+ std::env::current_exe()
+ .unwrap()
+ .parent()
+ .unwrap()
+ .parent()
+ .unwrap()
+ .to_owned()
+ }));
+ }
+}
+
+fn main() {
+ let start = std::time::Instant::now();
+ rustc_driver::init_rustc_env_logger();
+ let mut callbacks = CraneliftPassesCallbacks::default();
+ rustc_driver::install_ice_hook();
+ let exit_code = rustc_driver::catch_with_exit_code(|| {
- .filter(|arg| {
- if arg == "--jit" {
- use_jit = true;
- false
- } else {
- true
- }
- })
++ let args = std::env::args_os()
+ .enumerate()
+ .map(|(i, arg)| {
+ arg.into_string().unwrap_or_else(|arg| {
+ early_error(
+ ErrorOutputType::default(),
+ &format!("Argument {} is not valid Unicode: {:?}", i, arg),
+ )
+ })
+ })
- if use_jit {
- args.push("-Cprefer-dynamic".to_string());
- }
+ .collect::<Vec<_>>();
- Box::new(rustc_codegen_cranelift::CraneliftCodegenBackend {
- config: rustc_codegen_cranelift::BackendConfig { use_jit },
- })
+ let mut run_compiler = rustc_driver::RunCompiler::new(&args, &mut callbacks);
+ run_compiler.set_make_codegen_backend(Some(Box::new(move |_| {
++ Box::new(rustc_codegen_cranelift::CraneliftCodegenBackend { config: None })
+ })));
+ run_compiler.run()
+ });
+ // The extra `\t` is necessary to align this label with the others.
+ print_time_passes_entry(callbacks.time_passes, "\ttotal", start.elapsed());
+ std::process::exit(exit_code)
+}
--- /dev/null
- Box::new(rustc_codegen_cranelift::CraneliftCodegenBackend {
- config: rustc_codegen_cranelift::BackendConfig { use_jit: false },
- })
+//! The only difference between this and cg_clif.rs is that this binary defaults to using cg_llvm
+//! instead of cg_clif and requires `--clif` to use cg_clif and that this binary doesn't have JIT
+//! support.
+//! This is necessary as with Cargo `RUSTC` applies to both target crates and host crates. The host
+//! crates must be built with cg_llvm as we are currently building a sysroot for cg_clif.
+//! `RUSTFLAGS` however is only applied to target crates, so `--clif` would only be passed to the
+//! target crates.
+
+#![feature(rustc_private)]
+
+extern crate rustc_data_structures;
+extern crate rustc_driver;
+extern crate rustc_interface;
+extern crate rustc_session;
+extern crate rustc_target;
+
+use std::path::PathBuf;
+
+use rustc_interface::interface;
+use rustc_session::config::ErrorOutputType;
+use rustc_session::early_error;
+use rustc_target::spec::PanicStrategy;
+
+fn find_sysroot() -> String {
+ // Taken from https://github.com/Manishearth/rust-clippy/pull/911.
+ let home = option_env!("RUSTUP_HOME").or(option_env!("MULTIRUST_HOME"));
+ let toolchain = option_env!("RUSTUP_TOOLCHAIN").or(option_env!("MULTIRUST_TOOLCHAIN"));
+ match (home, toolchain) {
+ (Some(home), Some(toolchain)) => format!("{}/toolchains/{}", home, toolchain),
+ _ => option_env!("RUST_SYSROOT")
+ .expect("need to specify RUST_SYSROOT env var or use rustup or multirust")
+ .to_owned(),
+ }
+}
+
+pub struct CraneliftPassesCallbacks {
+ use_clif: bool,
+}
+
+impl rustc_driver::Callbacks for CraneliftPassesCallbacks {
+ fn config(&mut self, config: &mut interface::Config) {
+ if !self.use_clif {
+ config.opts.maybe_sysroot = Some(PathBuf::from(find_sysroot()));
+ return;
+ }
+
+ config.opts.cg.panic = Some(PanicStrategy::Abort);
+ config.opts.debugging_opts.panic_abort_tests = true;
+ config.opts.maybe_sysroot = Some(
+ std::env::current_exe()
+ .unwrap()
+ .parent()
+ .unwrap()
+ .parent()
+ .unwrap()
+ .parent()
+ .unwrap()
+ .join("build_sysroot")
+ .join("sysroot"),
+ );
+ }
+}
+
+fn main() {
+ rustc_driver::init_rustc_env_logger();
+ rustc_driver::install_ice_hook();
+ let exit_code = rustc_driver::catch_with_exit_code(|| {
+ let mut use_clif = false;
+
+ let args = std::env::args_os()
+ .enumerate()
+ .map(|(i, arg)| {
+ arg.into_string().unwrap_or_else(|arg| {
+ early_error(
+ ErrorOutputType::default(),
+ &format!("Argument {} is not valid Unicode: {:?}", i, arg),
+ )
+ })
+ })
+ .filter(|arg| {
+ if arg == "--clif" {
+ use_clif = true;
+ false
+ } else {
+ true
+ }
+ })
+ .collect::<Vec<_>>();
+
+ let mut callbacks = CraneliftPassesCallbacks { use_clif };
+
+ let mut run_compiler = rustc_driver::RunCompiler::new(&args, &mut callbacks);
+ if use_clif {
+ run_compiler.set_make_codegen_backend(Some(Box::new(move |_| {
++ Box::new(rustc_codegen_cranelift::CraneliftCodegenBackend { config: None })
+ })));
+ }
+ run_compiler.run()
+ });
+ std::process::exit(exit_code)
+}
--- /dev/null
- matches!(fx.bcx.func.global_values[local_data_id], GlobalValueData::Symbol { tls: false, ..}),
+//! Handling of `static`s, `const`s and promoted allocations
+
+use rustc_span::DUMMY_SP;
+
+use rustc_data_structures::fx::FxHashSet;
+use rustc_errors::ErrorReported;
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
+use rustc_middle::mir::interpret::{
+ read_target_uint, AllocId, Allocation, ConstValue, ErrorHandled, GlobalAlloc, Pointer, Scalar,
+};
+use rustc_middle::ty::{Const, ConstKind};
+
+use cranelift_codegen::ir::GlobalValueData;
+use cranelift_module::*;
+
+use crate::prelude::*;
+
+#[derive(Default)]
+pub(crate) struct ConstantCx {
+ todo: Vec<TodoItem>,
+ done: FxHashSet<DataId>,
+}
+
+#[derive(Copy, Clone, Debug)]
+enum TodoItem {
+ Alloc(AllocId),
+ Static(DefId),
+}
+
+impl ConstantCx {
+ pub(crate) fn finalize(mut self, tcx: TyCtxt<'_>, module: &mut impl Module) {
+ //println!("todo {:?}", self.todo);
+ define_all_allocs(tcx, module, &mut self);
+ //println!("done {:?}", self.done);
+ self.done.clear();
+ }
+}
+
+pub(crate) fn check_constants(fx: &mut FunctionCx<'_, '_, impl Module>) {
+ for constant in &fx.mir.required_consts {
+ let const_ = fx.monomorphize(constant.literal);
+ match const_.val {
+ ConstKind::Value(_) => {}
+ ConstKind::Unevaluated(def, ref substs, promoted) => {
+ if let Err(err) =
+ fx.tcx
+ .const_eval_resolve(ParamEnv::reveal_all(), def, substs, promoted, None)
+ {
+ match err {
+ ErrorHandled::Reported(ErrorReported) | ErrorHandled::Linted => {
+ fx.tcx
+ .sess
+ .span_err(constant.span, "erroneous constant encountered");
+ }
+ ErrorHandled::TooGeneric => {
+ span_bug!(
+ constant.span,
+ "codgen encountered polymorphic constant: {:?}",
+ err
+ );
+ }
+ }
+ }
+ }
+ ConstKind::Param(_)
+ | ConstKind::Infer(_)
+ | ConstKind::Bound(_, _)
+ | ConstKind::Placeholder(_)
+ | ConstKind::Error(_) => unreachable!("{:?}", const_),
+ }
+ }
+}
+
+pub(crate) fn codegen_static(constants_cx: &mut ConstantCx, def_id: DefId) {
+ constants_cx.todo.push(TodoItem::Static(def_id));
+}
+
+pub(crate) fn codegen_tls_ref<'tcx>(
+ fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ def_id: DefId,
+ layout: TyAndLayout<'tcx>,
+) -> CValue<'tcx> {
+ let data_id = data_id_for_static(fx.tcx, &mut fx.cx.module, def_id, false);
+ let local_data_id = fx.cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
+ #[cfg(debug_assertions)]
+ fx.add_comment(local_data_id, format!("tls {:?}", def_id));
+ let tls_ptr = fx.bcx.ins().tls_value(fx.pointer_type, local_data_id);
+ CValue::by_val(tls_ptr, layout)
+}
+
+fn codegen_static_ref<'tcx>(
+ fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ def_id: DefId,
+ layout: TyAndLayout<'tcx>,
+) -> CPlace<'tcx> {
+ let data_id = data_id_for_static(fx.tcx, &mut fx.cx.module, def_id, false);
+ let local_data_id = fx.cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
+ #[cfg(debug_assertions)]
+ fx.add_comment(local_data_id, format!("{:?}", def_id));
+ let global_ptr = fx.bcx.ins().global_value(fx.pointer_type, local_data_id);
+ assert!(!layout.is_unsized(), "unsized statics aren't supported");
+ assert!(
- module.define_data(data_id, &data_ctx).unwrap();
++ matches!(
++ fx.bcx.func.global_values[local_data_id],
++ GlobalValueData::Symbol { tls: false, .. }
++ ),
+ "tls static referenced without Rvalue::ThreadLocalRef"
+ );
+ CPlace::for_ptr(crate::pointer::Pointer::new(global_ptr), layout)
+}
+
+pub(crate) fn codegen_constant<'tcx>(
+ fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ constant: &Constant<'tcx>,
+) -> CValue<'tcx> {
+ let const_ = fx.monomorphize(constant.literal);
+ let const_val = match const_.val {
+ ConstKind::Value(const_val) => const_val,
+ ConstKind::Unevaluated(def, ref substs, promoted) if fx.tcx.is_static(def.did) => {
+ assert!(substs.is_empty());
+ assert!(promoted.is_none());
+
+ return codegen_static_ref(
+ fx,
+ def.did,
+ fx.layout_of(fx.monomorphize(&constant.literal.ty)),
+ )
+ .to_cvalue(fx);
+ }
+ ConstKind::Unevaluated(def, ref substs, promoted) => {
+ match fx
+ .tcx
+ .const_eval_resolve(ParamEnv::reveal_all(), def, substs, promoted, None)
+ {
+ Ok(const_val) => const_val,
+ Err(_) => {
+ if promoted.is_none() {
+ fx.tcx
+ .sess
+ .span_err(constant.span, "erroneous constant encountered");
+ }
+ return crate::trap::trap_unreachable_ret_value(
+ fx,
+ fx.layout_of(const_.ty),
+ "erroneous constant encountered",
+ );
+ }
+ }
+ }
+ ConstKind::Param(_)
+ | ConstKind::Infer(_)
+ | ConstKind::Bound(_, _)
+ | ConstKind::Placeholder(_)
+ | ConstKind::Error(_) => unreachable!("{:?}", const_),
+ };
+
+ codegen_const_value(fx, const_val, const_.ty)
+}
+
+pub(crate) fn codegen_const_value<'tcx>(
+ fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ const_val: ConstValue<'tcx>,
+ ty: Ty<'tcx>,
+) -> CValue<'tcx> {
+ let layout = fx.layout_of(ty);
+ assert!(!layout.is_unsized(), "sized const value");
+
+ if layout.is_zst() {
+ return CValue::by_ref(crate::Pointer::dangling(layout.align.pref), layout);
+ }
+
+ match const_val {
+ ConstValue::Scalar(x) => {
+ if fx.clif_type(layout.ty).is_none() {
+ let (size, align) = (layout.size, layout.align.pref);
+ let mut alloc = Allocation::from_bytes(
+ std::iter::repeat(0)
+ .take(size.bytes_usize())
+ .collect::<Vec<u8>>(),
+ align,
+ );
+ let ptr = Pointer::new(AllocId(!0), Size::ZERO); // The alloc id is never used
+ alloc.write_scalar(fx, ptr, x.into(), size).unwrap();
+ let alloc = fx.tcx.intern_const_alloc(alloc);
+ return CValue::by_ref(pointer_for_allocation(fx, alloc), layout);
+ }
+
+ match x {
+ Scalar::Int(int) => CValue::const_val(fx, layout, int),
+ Scalar::Ptr(ptr) => {
+ let alloc_kind = fx.tcx.get_global_alloc(ptr.alloc_id);
+ let base_addr = match alloc_kind {
+ Some(GlobalAlloc::Memory(alloc)) => {
+ fx.cx.constants_cx.todo.push(TodoItem::Alloc(ptr.alloc_id));
+ let data_id = data_id_for_alloc_id(
+ &mut fx.cx.module,
+ ptr.alloc_id,
+ alloc.mutability,
+ );
+ let local_data_id =
+ fx.cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
+ #[cfg(debug_assertions)]
+ fx.add_comment(local_data_id, format!("{:?}", ptr.alloc_id));
+ fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
+ }
+ Some(GlobalAlloc::Function(instance)) => {
+ let func_id =
+ crate::abi::import_function(fx.tcx, &mut fx.cx.module, instance);
+ let local_func_id =
+ fx.cx.module.declare_func_in_func(func_id, &mut fx.bcx.func);
+ fx.bcx.ins().func_addr(fx.pointer_type, local_func_id)
+ }
+ Some(GlobalAlloc::Static(def_id)) => {
+ assert!(fx.tcx.is_static(def_id));
+ let data_id =
+ data_id_for_static(fx.tcx, &mut fx.cx.module, def_id, false);
+ let local_data_id =
+ fx.cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
+ #[cfg(debug_assertions)]
+ fx.add_comment(local_data_id, format!("{:?}", def_id));
+ fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
+ }
+ None => bug!("missing allocation {:?}", ptr.alloc_id),
+ };
+ let val = if ptr.offset.bytes() != 0 {
+ fx.bcx
+ .ins()
+ .iadd_imm(base_addr, i64::try_from(ptr.offset.bytes()).unwrap())
+ } else {
+ base_addr
+ };
+ CValue::by_val(val, layout)
+ }
+ }
+ }
+ ConstValue::ByRef { alloc, offset } => CValue::by_ref(
+ pointer_for_allocation(fx, alloc)
+ .offset_i64(fx, i64::try_from(offset.bytes()).unwrap()),
+ layout,
+ ),
+ ConstValue::Slice { data, start, end } => {
+ let ptr = pointer_for_allocation(fx, data)
+ .offset_i64(fx, i64::try_from(start).unwrap())
+ .get_addr(fx);
+ let len = fx.bcx.ins().iconst(
+ fx.pointer_type,
+ i64::try_from(end.checked_sub(start).unwrap()).unwrap(),
+ );
+ CValue::by_val_pair(ptr, len, layout)
+ }
+ }
+}
+
+fn pointer_for_allocation<'tcx>(
+ fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ alloc: &'tcx Allocation,
+) -> crate::pointer::Pointer {
+ let alloc_id = fx.tcx.create_memory_alloc(alloc);
+ fx.cx.constants_cx.todo.push(TodoItem::Alloc(alloc_id));
+ let data_id = data_id_for_alloc_id(&mut fx.cx.module, alloc_id, alloc.mutability);
+
+ let local_data_id = fx.cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
+ #[cfg(debug_assertions)]
+ fx.add_comment(local_data_id, format!("{:?}", alloc_id));
+ let global_ptr = fx.bcx.ins().global_value(fx.pointer_type, local_data_id);
+ crate::pointer::Pointer::new(global_ptr)
+}
+
+fn data_id_for_alloc_id(
+ module: &mut impl Module,
+ alloc_id: AllocId,
+ mutability: rustc_hir::Mutability,
+) -> DataId {
+ module
+ .declare_data(
+ &format!(".L__alloc_{:x}", alloc_id.0),
+ Linkage::Local,
+ mutability == rustc_hir::Mutability::Mut,
+ false,
+ )
+ .unwrap()
+}
+
+fn data_id_for_static(
+ tcx: TyCtxt<'_>,
+ module: &mut impl Module,
+ def_id: DefId,
+ definition: bool,
+) -> DataId {
+ let rlinkage = tcx.codegen_fn_attrs(def_id).linkage;
+ let linkage = if definition {
+ crate::linkage::get_static_linkage(tcx, def_id)
+ } else if rlinkage == Some(rustc_middle::mir::mono::Linkage::ExternalWeak)
+ || rlinkage == Some(rustc_middle::mir::mono::Linkage::WeakAny)
+ {
+ Linkage::Preemptible
+ } else {
+ Linkage::Import
+ };
+
+ let instance = Instance::mono(tcx, def_id).polymorphize(tcx);
+ let symbol_name = tcx.symbol_name(instance).name;
+ let ty = instance.ty(tcx, ParamEnv::reveal_all());
+ let is_mutable = if tcx.is_mutable_static(def_id) {
+ true
+ } else {
+ !ty.is_freeze(tcx.at(DUMMY_SP), ParamEnv::reveal_all())
+ };
+ let align = tcx
+ .layout_of(ParamEnv::reveal_all().and(ty))
+ .unwrap()
+ .align
+ .pref
+ .bytes();
+
+ let attrs = tcx.codegen_fn_attrs(def_id);
+
+ let data_id = module
+ .declare_data(
+ &*symbol_name,
+ linkage,
+ is_mutable,
+ attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL),
+ )
+ .unwrap();
+
+ if rlinkage.is_some() {
+ // Comment copied from https://github.com/rust-lang/rust/blob/45060c2a66dfd667f88bd8b94261b28a58d85bd5/src/librustc_codegen_llvm/consts.rs#L141
+ // Declare an internal global `extern_with_linkage_foo` which
+ // is initialized with the address of `foo`. If `foo` is
+ // discarded during linking (for example, if `foo` has weak
+ // linkage and there are no definitions), then
+ // `extern_with_linkage_foo` will instead be initialized to
+ // zero.
+
+ let ref_name = format!("_rust_extern_with_linkage_{}", symbol_name);
+ let ref_data_id = module
+ .declare_data(&ref_name, Linkage::Local, false, false)
+ .unwrap();
+ let mut data_ctx = DataContext::new();
+ data_ctx.set_align(align);
+ let data = module.declare_data_in_data(data_id, &mut data_ctx);
+ data_ctx.define(
+ std::iter::repeat(0)
+ .take(pointer_ty(tcx).bytes() as usize)
+ .collect(),
+ );
+ data_ctx.write_data_addr(0, data, 0);
+ match module.define_data(ref_data_id, &data_ctx) {
+ // Every time the static is referenced there will be another definition of this global,
+ // so duplicate definitions are expected and allowed.
+ Err(ModuleError::DuplicateDefinition(_)) => {}
+ res => res.unwrap(),
+ }
+ ref_data_id
+ } else {
+ data_id
+ }
+}
+
+fn define_all_allocs(tcx: TyCtxt<'_>, module: &mut impl Module, cx: &mut ConstantCx) {
+ while let Some(todo_item) = cx.todo.pop() {
+ let (data_id, alloc, section_name) = match todo_item {
+ TodoItem::Alloc(alloc_id) => {
+ //println!("alloc_id {}", alloc_id);
+ let alloc = match tcx.get_global_alloc(alloc_id).unwrap() {
+ GlobalAlloc::Memory(alloc) => alloc,
+ GlobalAlloc::Function(_) | GlobalAlloc::Static(_) => unreachable!(),
+ };
+ let data_id = data_id_for_alloc_id(module, alloc_id, alloc.mutability);
+ (data_id, alloc, None)
+ }
+ TodoItem::Static(def_id) => {
+ //println!("static {:?}", def_id);
+
+ let section_name = tcx
+ .codegen_fn_attrs(def_id)
+ .link_section
+ .map(|s| s.as_str());
+
+ let alloc = tcx.eval_static_initializer(def_id).unwrap();
+
+ let data_id = data_id_for_static(tcx, module, def_id, true);
+ (data_id, alloc, section_name)
+ }
+ };
+
+ //("data_id {}", data_id);
+ if cx.done.contains(&data_id) {
+ continue;
+ }
+
+ let mut data_ctx = DataContext::new();
+ data_ctx.set_align(alloc.align.bytes());
+
+ if let Some(section_name) = section_name {
+ // FIXME set correct segment for Mach-O files
+ data_ctx.set_segment_section("", &*section_name);
+ }
+
+ let bytes = alloc
+ .inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len())
+ .to_vec();
+ data_ctx.define(bytes.into_boxed_slice());
+
+ for &(offset, (_tag, reloc)) in alloc.relocations().iter() {
+ let addend = {
+ let endianness = tcx.data_layout.endian;
+ let offset = offset.bytes() as usize;
+ let ptr_size = tcx.data_layout.pointer_size;
+ let bytes = &alloc.inspect_with_uninit_and_ptr_outside_interpreter(
+ offset..offset + ptr_size.bytes() as usize,
+ );
+ read_target_uint(endianness, bytes).unwrap()
+ };
+
+ let reloc_target_alloc = tcx.get_global_alloc(reloc).unwrap();
+ let data_id = match reloc_target_alloc {
+ GlobalAlloc::Function(instance) => {
+ assert_eq!(addend, 0);
+ let func_id = crate::abi::import_function(tcx, module, instance);
+ let local_func_id = module.declare_func_in_data(func_id, &mut data_ctx);
+ data_ctx.write_function_addr(offset.bytes() as u32, local_func_id);
+ continue;
+ }
+ GlobalAlloc::Memory(target_alloc) => {
+ cx.todo.push(TodoItem::Alloc(reloc));
+ data_id_for_alloc_id(module, reloc, target_alloc.mutability)
+ }
+ GlobalAlloc::Static(def_id) => {
+ if tcx
+ .codegen_fn_attrs(def_id)
+ .flags
+ .contains(CodegenFnAttrFlags::THREAD_LOCAL)
+ {
+ tcx.sess.fatal(&format!(
+ "Allocation {:?} contains reference to TLS value {:?}",
+ alloc, def_id
+ ));
+ }
+
+ // Don't push a `TodoItem::Static` here, as it will cause statics used by
+ // multiple crates to be duplicated between them. It isn't necessary anyway,
+ // as it will get pushed by `codegen_static` when necessary.
+ data_id_for_static(tcx, module, def_id, false)
+ }
+ };
+
+ let global_value = module.declare_data_in_data(data_id, &mut data_ctx);
+ data_ctx.write_data_addr(offset.bytes() as u32, global_value, addend as i64);
+ }
+
++ // FIXME don't duplicate definitions in lazy jit mode
++ let _ = module.define_data(data_id, &data_ctx);
+ cx.done.insert(data_id);
+ }
+
+ assert!(cx.todo.is_empty(), "{:?}", cx.todo);
+}
+
+pub(crate) fn mir_operand_get_const_val<'tcx>(
+ fx: &FunctionCx<'_, 'tcx, impl Module>,
+ operand: &Operand<'tcx>,
+) -> Option<&'tcx Const<'tcx>> {
+ match operand {
+ Operand::Copy(_) | Operand::Move(_) => None,
+ Operand::Constant(const_) => Some(
+ fx.monomorphize(const_.literal)
+ .eval(fx.tcx, ParamEnv::reveal_all()),
+ ),
+ }
+}
--- /dev/null
- pub(super) fn relocate_for_jit(
- mut self,
- jit_module: &cranelift_simplejit::SimpleJITModule,
- ) -> Vec<u8> {
+//! Write the debuginfo into an object file.
+
+use rustc_data_structures::fx::FxHashMap;
+
+use gimli::write::{Address, AttributeValue, EndianVec, Result, Sections, Writer};
+use gimli::{RunTimeEndian, SectionId};
+
+use crate::backend::WriteDebugInfo;
+
+use super::DebugContext;
+
+impl DebugContext<'_> {
+ pub(crate) fn emit<P: WriteDebugInfo>(&mut self, product: &mut P) {
+ let unit_range_list_id = self.dwarf.unit.ranges.add(self.unit_range_list.clone());
+ let root = self.dwarf.unit.root();
+ let root = self.dwarf.unit.get_mut(root);
+ root.set(
+ gimli::DW_AT_ranges,
+ AttributeValue::RangeListRef(unit_range_list_id),
+ );
+
+ let mut sections = Sections::new(WriterRelocate::new(self.endian));
+ self.dwarf.write(&mut sections).unwrap();
+
+ let mut section_map = FxHashMap::default();
+ let _: Result<()> = sections.for_each_mut(|id, section| {
+ if !section.writer.slice().is_empty() {
+ let section_id = product.add_debug_section(id, section.writer.take());
+ section_map.insert(id, section_id);
+ }
+ Ok(())
+ });
+
+ let _: Result<()> = sections.for_each(|id, section| {
+ if let Some(section_id) = section_map.get(&id) {
+ for reloc in §ion.relocs {
+ product.add_debug_reloc(§ion_map, section_id, reloc);
+ }
+ }
+ Ok(())
+ });
+ }
+}
+
+#[derive(Clone)]
+pub(crate) struct DebugReloc {
+ pub(crate) offset: u32,
+ pub(crate) size: u8,
+ pub(crate) name: DebugRelocName,
+ pub(crate) addend: i64,
+ pub(crate) kind: object::RelocationKind,
+}
+
+#[derive(Clone)]
+pub(crate) enum DebugRelocName {
+ Section(SectionId),
+ Symbol(usize),
+}
+
+/// A [`Writer`] that collects all necessary relocations.
+#[derive(Clone)]
+pub(super) struct WriterRelocate {
+ pub(super) relocs: Vec<DebugReloc>,
+ pub(super) writer: EndianVec<RunTimeEndian>,
+}
+
+impl WriterRelocate {
+ pub(super) fn new(endian: RunTimeEndian) -> Self {
+ WriterRelocate {
+ relocs: Vec::new(),
+ writer: EndianVec::new(endian),
+ }
+ }
+
+ /// Perform the collected relocations to be usable for JIT usage.
+ #[cfg(feature = "jit")]
++ pub(super) fn relocate_for_jit(mut self, jit_module: &cranelift_jit::JITModule) -> Vec<u8> {
+ use std::convert::TryInto;
+
+ for reloc in self.relocs.drain(..) {
+ match reloc.name {
+ super::DebugRelocName::Section(_) => unreachable!(),
+ super::DebugRelocName::Symbol(sym) => {
+ let addr = jit_module.get_finalized_function(
+ cranelift_module::FuncId::from_u32(sym.try_into().unwrap()),
+ );
+ let val = (addr as u64 as i64 + reloc.addend) as u64;
+ self.writer
+ .write_udata_at(reloc.offset as usize, val, reloc.size)
+ .unwrap();
+ }
+ }
+ }
+ self.writer.into_vec()
+ }
+}
+
+impl Writer for WriterRelocate {
+ type Endian = RunTimeEndian;
+
+ fn endian(&self) -> Self::Endian {
+ self.writer.endian()
+ }
+
+ fn len(&self) -> usize {
+ self.writer.len()
+ }
+
+ fn write(&mut self, bytes: &[u8]) -> Result<()> {
+ self.writer.write(bytes)
+ }
+
+ fn write_at(&mut self, offset: usize, bytes: &[u8]) -> Result<()> {
+ self.writer.write_at(offset, bytes)
+ }
+
+ fn write_address(&mut self, address: Address, size: u8) -> Result<()> {
+ match address {
+ Address::Constant(val) => self.write_udata(val, size),
+ Address::Symbol { symbol, addend } => {
+ let offset = self.len() as u64;
+ self.relocs.push(DebugReloc {
+ offset: offset as u32,
+ size,
+ name: DebugRelocName::Symbol(symbol),
+ addend: addend as i64,
+ kind: object::RelocationKind::Absolute,
+ });
+ self.write_udata(0, size)
+ }
+ }
+ }
+
+ fn write_offset(&mut self, val: usize, section: SectionId, size: u8) -> Result<()> {
+ let offset = self.len() as u32;
+ self.relocs.push(DebugReloc {
+ offset,
+ size,
+ name: DebugRelocName::Section(section),
+ addend: val as i64,
+ kind: object::RelocationKind::Absolute,
+ });
+ self.write_udata(0, size)
+ }
+
+ fn write_offset_at(
+ &mut self,
+ offset: usize,
+ val: usize,
+ section: SectionId,
+ size: u8,
+ ) -> Result<()> {
+ self.relocs.push(DebugReloc {
+ offset: offset as u32,
+ size,
+ name: DebugRelocName::Section(section),
+ addend: val as i64,
+ kind: object::RelocationKind::Absolute,
+ });
+ self.write_udata_at(offset, 0, size)
+ }
+
+ fn write_eh_pointer(&mut self, address: Address, eh_pe: gimli::DwEhPe, size: u8) -> Result<()> {
+ match address {
+ // Address::Constant arm copied from gimli
+ Address::Constant(val) => {
+ // Indirect doesn't matter here.
+ let val = match eh_pe.application() {
+ gimli::DW_EH_PE_absptr => val,
+ gimli::DW_EH_PE_pcrel => {
+ // TODO: better handling of sign
+ let offset = self.len() as u64;
+ offset.wrapping_sub(val)
+ }
+ _ => {
+ return Err(gimli::write::Error::UnsupportedPointerEncoding(eh_pe));
+ }
+ };
+ self.write_eh_pointer_data(val, eh_pe.format(), size)
+ }
+ Address::Symbol { symbol, addend } => match eh_pe.application() {
+ gimli::DW_EH_PE_pcrel => {
+ let size = match eh_pe.format() {
+ gimli::DW_EH_PE_sdata4 => 4,
+ _ => return Err(gimli::write::Error::UnsupportedPointerEncoding(eh_pe)),
+ };
+ self.relocs.push(DebugReloc {
+ offset: self.len() as u32,
+ size,
+ name: DebugRelocName::Symbol(symbol),
+ addend,
+ kind: object::RelocationKind::Relative,
+ });
+ self.write_udata(0, size)
+ }
+ _ => Err(gimli::write::Error::UnsupportedPointerEncoding(eh_pe)),
+ },
+ }
+ }
+}
--- /dev/null
- pub(crate) fn new(tcx: TyCtxt<'tcx>, isa: &dyn TargetIsa) -> Self {
+//! Unwind info generation (`.eh_frame`)
+
+use crate::prelude::*;
+
+use cranelift_codegen::isa::{unwind::UnwindInfo, TargetIsa};
+
+use gimli::write::{Address, CieId, EhFrame, FrameTable, Section};
+
+use crate::backend::WriteDebugInfo;
+
+pub(crate) struct UnwindContext<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ frame_table: FrameTable,
+ cie_id: Option<CieId>,
+}
+
+impl<'tcx> UnwindContext<'tcx> {
- if isa.flags().is_pic() {
++ pub(crate) fn new(tcx: TyCtxt<'tcx>, isa: &dyn TargetIsa, pic_eh_frame: bool) -> Self {
+ let mut frame_table = FrameTable::default();
+
+ let cie_id = if let Some(mut cie) = isa.create_systemv_cie() {
- jit_module: &cranelift_simplejit::SimpleJITModule,
++ if pic_eh_frame {
+ cie.fde_address_encoding =
+ gimli::DwEhPe(gimli::DW_EH_PE_pcrel.0 | gimli::DW_EH_PE_sdata4.0);
+ }
+ Some(frame_table.add_cie(cie))
+ } else {
+ None
+ };
+
+ UnwindContext {
+ tcx,
+ frame_table,
+ cie_id,
+ }
+ }
+
+ pub(crate) fn add_function(&mut self, func_id: FuncId, context: &Context, isa: &dyn TargetIsa) {
+ let unwind_info = if let Some(unwind_info) = context.create_unwind_info(isa).unwrap() {
+ unwind_info
+ } else {
+ return;
+ };
+
+ match unwind_info {
+ UnwindInfo::SystemV(unwind_info) => {
+ self.frame_table.add_fde(
+ self.cie_id.unwrap(),
+ unwind_info.to_fde(Address::Symbol {
+ symbol: func_id.as_u32() as usize,
+ addend: 0,
+ }),
+ );
+ }
+ UnwindInfo::WindowsX64(_) => {
+ // FIXME implement this
+ }
+ unwind_info => unimplemented!("{:?}", unwind_info),
+ }
+ }
+
+ pub(crate) fn emit<P: WriteDebugInfo>(self, product: &mut P) {
+ let mut eh_frame = EhFrame::from(super::emit::WriterRelocate::new(super::target_endian(
+ self.tcx,
+ )));
+ self.frame_table.write_eh_frame(&mut eh_frame).unwrap();
+
+ if !eh_frame.0.writer.slice().is_empty() {
+ let id = eh_frame.id();
+ let section_id = product.add_debug_section(id, eh_frame.0.writer.into_vec());
+ let mut section_map = FxHashMap::default();
+ section_map.insert(id, section_id);
+
+ for reloc in &eh_frame.0.relocs {
+ product.add_debug_reloc(§ion_map, §ion_id, reloc);
+ }
+ }
+ }
+
+ #[cfg(feature = "jit")]
+ pub(crate) unsafe fn register_jit(
+ self,
++ jit_module: &cranelift_jit::JITModule,
+ ) -> Option<UnwindRegistry> {
+ let mut eh_frame = EhFrame::from(super::emit::WriterRelocate::new(super::target_endian(
+ self.tcx,
+ )));
+ self.frame_table.write_eh_frame(&mut eh_frame).unwrap();
+
+ if eh_frame.0.writer.slice().is_empty() {
+ return None;
+ }
+
+ let mut eh_frame = eh_frame.0.relocate_for_jit(jit_module);
+
+ // GCC expects a terminating "empty" length, so write a 0 length at the end of the table.
+ eh_frame.extend(&[0, 0, 0, 0]);
+
+ let mut registrations = Vec::new();
+
+ // =======================================================================
+ // Everything after this line up to the end of the file is loosly based on
+ // https://github.com/bytecodealliance/wasmtime/blob/4471a82b0c540ff48960eca6757ccce5b1b5c3e4/crates/jit/src/unwind/systemv.rs
+ #[cfg(target_os = "macos")]
+ {
+ // On macOS, `__register_frame` takes a pointer to a single FDE
+ let start = eh_frame.as_ptr();
+ let end = start.add(eh_frame.len());
+ let mut current = start;
+
+ // Walk all of the entries in the frame table and register them
+ while current < end {
+ let len = std::ptr::read::<u32>(current as *const u32) as usize;
+
+ // Skip over the CIE
+ if current != start {
+ __register_frame(current);
+ registrations.push(current as usize);
+ }
+
+ // Move to the next table entry (+4 because the length itself is not inclusive)
+ current = current.add(len + 4);
+ }
+ }
+ #[cfg(not(target_os = "macos"))]
+ {
+ // On other platforms, `__register_frame` will walk the FDEs until an entry of length 0
+ let ptr = eh_frame.as_ptr();
+ __register_frame(ptr);
+ registrations.push(ptr as usize);
+ }
+
+ Some(UnwindRegistry {
+ _frame_table: eh_frame,
+ registrations,
+ })
+ }
+}
+
+/// Represents a registry of function unwind information for System V ABI.
+pub(crate) struct UnwindRegistry {
+ _frame_table: Vec<u8>,
+ registrations: Vec<usize>,
+}
+
+extern "C" {
+ // libunwind import
+ fn __register_frame(fde: *const u8);
+ fn __deregister_frame(fde: *const u8);
+}
+
+impl Drop for UnwindRegistry {
+ fn drop(&mut self) {
+ unsafe {
+ // libgcc stores the frame entries as a linked list in decreasing sort order
+ // based on the PC value of the registered entry.
+ //
+ // As we store the registrations in increasing order, it would be O(N^2) to
+ // deregister in that order.
+ //
+ // To ensure that we just pop off the first element in the list upon every
+ // deregistration, walk our list of registrations backwards.
+ for fde in self.registrations.iter().rev() {
+ __deregister_frame(*fde as *const _);
+ }
+ }
+ }
+}
--- /dev/null
- use rustc_middle::mir::mono::CodegenUnit;
+//! The AOT driver uses [`cranelift_object`] to write object files suitable for linking into a
+//! standalone executable.
+
+use std::path::PathBuf;
+
+use rustc_codegen_ssa::back::linker::LinkerInfo;
+use rustc_codegen_ssa::{CodegenResults, CompiledModule, CrateInfo, ModuleKind};
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
+use rustc_middle::middle::cstore::EncodedMetadata;
- let mut cx = crate::CodegenCx::new(tcx, module, tcx.sess.opts.debuginfo != DebugInfo::None);
++use rustc_middle::mir::mono::{CodegenUnit, MonoItem};
+use rustc_session::cgu_reuse_tracker::CguReuse;
+use rustc_session::config::{DebugInfo, OutputType};
+
+use cranelift_object::{ObjectModule, ObjectProduct};
+
+use crate::prelude::*;
+
+use crate::backend::AddConstructor;
+
+fn new_module(tcx: TyCtxt<'_>, name: String) -> ObjectModule {
+ let module = crate::backend::make_module(tcx.sess, name);
+ assert_eq!(pointer_ty(tcx), module.target_config().pointer_type());
+ module
+}
+
+struct ModuleCodegenResult(CompiledModule, Option<(WorkProductId, WorkProduct)>);
+
+impl<HCX> HashStable<HCX> for ModuleCodegenResult {
+ fn hash_stable(&self, _: &mut HCX, _: &mut StableHasher) {
+ // do nothing
+ }
+}
+
+fn emit_module(
+ tcx: TyCtxt<'_>,
+ name: String,
+ kind: ModuleKind,
+ module: ObjectModule,
+ debug: Option<DebugContext<'_>>,
+ unwind_context: UnwindContext<'_>,
+ map_product: impl FnOnce(ObjectProduct) -> ObjectProduct,
+) -> ModuleCodegenResult {
+ let mut product = module.finish();
+
+ if let Some(mut debug) = debug {
+ debug.emit(&mut product);
+ }
+
+ unwind_context.emit(&mut product);
+
+ let product = map_product(product);
+
+ let tmp_file = tcx
+ .output_filenames(LOCAL_CRATE)
+ .temp_path(OutputType::Object, Some(&name));
+ let obj = product.object.write().unwrap();
+ if let Err(err) = std::fs::write(&tmp_file, obj) {
+ tcx.sess
+ .fatal(&format!("error writing object file: {}", err));
+ }
+
+ let work_product = if std::env::var("CG_CLIF_INCR_CACHE_DISABLED").is_ok() {
+ None
+ } else {
+ rustc_incremental::copy_cgu_workproduct_to_incr_comp_cache_dir(
+ tcx.sess,
+ &name,
+ &Some(tmp_file.clone()),
+ )
+ };
+
+ ModuleCodegenResult(
+ CompiledModule {
+ name,
+ kind,
+ object: Some(tmp_file),
+ dwarf_object: None,
+ bytecode: None,
+ },
+ work_product,
+ )
+}
+
+fn reuse_workproduct_for_cgu(
+ tcx: TyCtxt<'_>,
+ cgu: &CodegenUnit<'_>,
+ work_products: &mut FxHashMap<WorkProductId, WorkProduct>,
+) -> CompiledModule {
+ let incr_comp_session_dir = tcx.sess.incr_comp_session_dir();
+ let mut object = None;
+ let work_product = cgu.work_product(tcx);
+ if let Some(saved_file) = &work_product.saved_file {
+ let obj_out = tcx
+ .output_filenames(LOCAL_CRATE)
+ .temp_path(OutputType::Object, Some(&cgu.name().as_str()));
+ object = Some(obj_out.clone());
+ let source_file = rustc_incremental::in_incr_comp_dir(&incr_comp_session_dir, &saved_file);
+ if let Err(err) = rustc_fs_util::link_or_copy(&source_file, &obj_out) {
+ tcx.sess.err(&format!(
+ "unable to copy {} to {}: {}",
+ source_file.display(),
+ obj_out.display(),
+ err
+ ));
+ }
+ }
+
+ work_products.insert(cgu.work_product_id(), work_product);
+
+ CompiledModule {
+ name: cgu.name().to_string(),
+ kind: ModuleKind::Regular,
+ object,
+ dwarf_object: None,
+ bytecode: None,
+ }
+}
+
+fn module_codegen(tcx: TyCtxt<'_>, cgu_name: rustc_span::Symbol) -> ModuleCodegenResult {
+ let cgu = tcx.codegen_unit(cgu_name);
+ let mono_items = cgu.items_in_deterministic_order(tcx);
+
+ let mut module = new_module(tcx, cgu_name.as_str().to_string());
+
+ // Initialize the global atomic mutex using a constructor for proc-macros.
+ // FIXME implement atomic instructions in Cranelift.
+ let mut init_atomics_mutex_from_constructor = None;
+ if tcx
+ .sess
+ .crate_types()
+ .contains(&rustc_session::config::CrateType::ProcMacro)
+ {
+ if mono_items.iter().any(|(mono_item, _)| match mono_item {
+ rustc_middle::mir::mono::MonoItem::Static(def_id) => tcx
+ .symbol_name(Instance::mono(tcx, *def_id))
+ .name
+ .contains("__rustc_proc_macro_decls_"),
+ _ => false,
+ }) {
+ init_atomics_mutex_from_constructor =
+ Some(crate::atomic_shim::init_global_lock_constructor(
+ &mut module,
+ &format!("{}_init_atomics_mutex", cgu_name.as_str()),
+ ));
+ }
+ }
+
- super::codegen_mono_item(&mut cx, mono_item, linkage);
++ let mut cx = crate::CodegenCx::new(
++ tcx,
++ module,
++ tcx.sess.opts.debuginfo != DebugInfo::None,
++ true,
++ );
+ super::predefine_mono_items(&mut cx, &mono_items);
+ for (mono_item, (linkage, visibility)) in mono_items {
+ let linkage = crate::linkage::get_clif_linkage(mono_item, linkage, visibility);
- let mut allocator_unwind_context = UnwindContext::new(tcx, allocator_module.isa());
++ match mono_item {
++ MonoItem::Fn(inst) => {
++ cx.tcx.sess.time("codegen fn", || {
++ crate::base::codegen_fn(&mut cx, inst, linkage)
++ });
++ }
++ MonoItem::Static(def_id) => {
++ crate::constant::codegen_static(&mut cx.constants_cx, def_id)
++ }
++ MonoItem::GlobalAsm(hir_id) => {
++ let item = cx.tcx.hir().expect_item(hir_id);
++ if let rustc_hir::ItemKind::GlobalAsm(rustc_hir::GlobalAsm { asm }) = item.kind {
++ cx.global_asm.push_str(&*asm.as_str());
++ cx.global_asm.push_str("\n\n");
++ } else {
++ bug!("Expected GlobalAsm found {:?}", item);
++ }
++ }
++ }
+ }
+ let (mut module, global_asm, debug, mut unwind_context) =
+ tcx.sess.time("finalize CodegenCx", || cx.finalize());
+ crate::main_shim::maybe_create_entry_wrapper(tcx, &mut module, &mut unwind_context, false);
+
+ let codegen_result = emit_module(
+ tcx,
+ cgu.name().as_str().to_string(),
+ ModuleKind::Regular,
+ module,
+ debug,
+ unwind_context,
+ |mut product| {
+ if let Some(func_id) = init_atomics_mutex_from_constructor {
+ product.add_constructor(func_id);
+ }
+
+ product
+ },
+ );
+
+ codegen_global_asm(tcx, &cgu.name().as_str(), &global_asm);
+
+ codegen_result
+}
+
+pub(super) fn run_aot(
+ tcx: TyCtxt<'_>,
+ metadata: EncodedMetadata,
+ need_metadata_module: bool,
+) -> Box<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>)> {
+ let mut work_products = FxHashMap::default();
+
+ let cgus = if tcx.sess.opts.output_types.should_codegen() {
+ tcx.collect_and_partition_mono_items(LOCAL_CRATE).1
+ } else {
+ // If only `--emit metadata` is used, we shouldn't perform any codegen.
+ // Also `tcx.collect_and_partition_mono_items` may panic in that case.
+ &[]
+ };
+
+ if tcx.dep_graph.is_fully_enabled() {
+ for cgu in &*cgus {
+ tcx.ensure().codegen_unit(cgu.name());
+ }
+ }
+
+ let modules = super::time(tcx, "codegen mono items", || {
+ cgus.iter()
+ .map(|cgu| {
+ let cgu_reuse = determine_cgu_reuse(tcx, cgu);
+ tcx.sess
+ .cgu_reuse_tracker
+ .set_actual_reuse(&cgu.name().as_str(), cgu_reuse);
+
+ match cgu_reuse {
+ _ if std::env::var("CG_CLIF_INCR_CACHE_DISABLED").is_ok() => {}
+ CguReuse::No => {}
+ CguReuse::PreLto => {
+ return reuse_workproduct_for_cgu(tcx, &*cgu, &mut work_products);
+ }
+ CguReuse::PostLto => unreachable!(),
+ }
+
+ let dep_node = cgu.codegen_dep_node(tcx);
+ let (ModuleCodegenResult(module, work_product), _) = tcx.dep_graph.with_task(
+ dep_node,
+ tcx,
+ cgu.name(),
+ module_codegen,
+ rustc_middle::dep_graph::hash_result,
+ );
+
+ if let Some((id, product)) = work_product {
+ work_products.insert(id, product);
+ }
+
+ module
+ })
+ .collect::<Vec<_>>()
+ });
+
+ tcx.sess.abort_if_errors();
+
+ let mut allocator_module = new_module(tcx, "allocator_shim".to_string());
++ let mut allocator_unwind_context = UnwindContext::new(tcx, allocator_module.isa(), true);
+ let created_alloc_shim =
+ crate::allocator::codegen(tcx, &mut allocator_module, &mut allocator_unwind_context);
+
+ let allocator_module = if created_alloc_shim {
+ let ModuleCodegenResult(module, work_product) = emit_module(
+ tcx,
+ "allocator_shim".to_string(),
+ ModuleKind::Allocator,
+ allocator_module,
+ None,
+ allocator_unwind_context,
+ |product| product,
+ );
+ if let Some((id, product)) = work_product {
+ work_products.insert(id, product);
+ }
+ Some(module)
+ } else {
+ None
+ };
+
+ rustc_incremental::assert_dep_graph(tcx);
+ rustc_incremental::save_dep_graph(tcx);
+
+ let metadata_module = if need_metadata_module {
+ let _timer = tcx.prof.generic_activity("codegen crate metadata");
+ let (metadata_cgu_name, tmp_file) = tcx.sess.time("write compressed metadata", || {
+ use rustc_middle::mir::mono::CodegenUnitNameBuilder;
+
+ let cgu_name_builder = &mut CodegenUnitNameBuilder::new(tcx);
+ let metadata_cgu_name = cgu_name_builder
+ .build_cgu_name(LOCAL_CRATE, &["crate"], Some("metadata"))
+ .as_str()
+ .to_string();
+
+ let tmp_file = tcx
+ .output_filenames(LOCAL_CRATE)
+ .temp_path(OutputType::Metadata, Some(&metadata_cgu_name));
+
+ let obj = crate::backend::with_object(tcx.sess, &metadata_cgu_name, |object| {
+ crate::metadata::write_metadata(tcx, object);
+ });
+
+ if let Err(err) = std::fs::write(&tmp_file, obj) {
+ tcx.sess
+ .fatal(&format!("error writing metadata object file: {}", err));
+ }
+
+ (metadata_cgu_name, tmp_file)
+ });
+
+ Some(CompiledModule {
+ name: metadata_cgu_name,
+ kind: ModuleKind::Metadata,
+ object: Some(tmp_file),
+ dwarf_object: None,
+ bytecode: None,
+ })
+ } else {
+ None
+ };
+
+ if tcx.sess.opts.output_types.should_codegen() {
+ rustc_incremental::assert_module_sources::assert_module_sources(tcx);
+ }
+
+ Box::new((
+ CodegenResults {
+ crate_name: tcx.crate_name(LOCAL_CRATE),
+ modules,
+ allocator_module,
+ metadata_module,
+ metadata,
+ windows_subsystem: None, // Windows is not yet supported
+ linker_info: LinkerInfo::new(tcx),
+ crate_info: CrateInfo::new(tcx),
+ },
+ work_products,
+ ))
+}
+
+fn codegen_global_asm(tcx: TyCtxt<'_>, cgu_name: &str, global_asm: &str) {
+ use std::io::Write;
+ use std::process::{Command, Stdio};
+
+ if global_asm.is_empty() {
+ return;
+ }
+
+ if cfg!(not(feature = "inline_asm"))
+ || tcx.sess.target.is_like_osx
+ || tcx.sess.target.is_like_windows
+ {
+ if global_asm.contains("__rust_probestack") {
+ return;
+ }
+
+ // FIXME fix linker error on macOS
+ if cfg!(not(feature = "inline_asm")) {
+ tcx.sess.fatal(
+ "asm! and global_asm! support is disabled while compiling rustc_codegen_cranelift",
+ );
+ } else {
+ tcx.sess
+ .fatal("asm! and global_asm! are not yet supported on macOS and Windows");
+ }
+ }
+
+ let assembler = crate::toolchain::get_toolchain_binary(tcx.sess, "as");
+ let linker = crate::toolchain::get_toolchain_binary(tcx.sess, "ld");
+
+ // Remove all LLVM style comments
+ let global_asm = global_asm
+ .lines()
+ .map(|line| {
+ if let Some(index) = line.find("//") {
+ &line[0..index]
+ } else {
+ line
+ }
+ })
+ .collect::<Vec<_>>()
+ .join("\n");
+
+ let output_object_file = tcx
+ .output_filenames(LOCAL_CRATE)
+ .temp_path(OutputType::Object, Some(cgu_name));
+
+ // Assemble `global_asm`
+ let global_asm_object_file = add_file_stem_postfix(output_object_file.clone(), ".asm");
+ let mut child = Command::new(assembler)
+ .arg("-o")
+ .arg(&global_asm_object_file)
+ .stdin(Stdio::piped())
+ .spawn()
+ .expect("Failed to spawn `as`.");
+ child
+ .stdin
+ .take()
+ .unwrap()
+ .write_all(global_asm.as_bytes())
+ .unwrap();
+ let status = child.wait().expect("Failed to wait for `as`.");
+ if !status.success() {
+ tcx.sess
+ .fatal(&format!("Failed to assemble `{}`", global_asm));
+ }
+
+ // Link the global asm and main object file together
+ let main_object_file = add_file_stem_postfix(output_object_file.clone(), ".main");
+ std::fs::rename(&output_object_file, &main_object_file).unwrap();
+ let status = Command::new(linker)
+ .arg("-r") // Create a new object file
+ .arg("-o")
+ .arg(output_object_file)
+ .arg(&main_object_file)
+ .arg(&global_asm_object_file)
+ .status()
+ .unwrap();
+ if !status.success() {
+ tcx.sess.fatal(&format!(
+ "Failed to link `{}` and `{}` together",
+ main_object_file.display(),
+ global_asm_object_file.display(),
+ ));
+ }
+
+ std::fs::remove_file(global_asm_object_file).unwrap();
+ std::fs::remove_file(main_object_file).unwrap();
+}
+
+fn add_file_stem_postfix(mut path: PathBuf, postfix: &str) -> PathBuf {
+ let mut new_filename = path.file_stem().unwrap().to_owned();
+ new_filename.push(postfix);
+ if let Some(extension) = path.extension() {
+ new_filename.push(".");
+ new_filename.push(extension);
+ }
+ path.set_file_name(new_filename);
+ path
+}
+
+// Adapted from https://github.com/rust-lang/rust/blob/303d8aff6092709edd4dbd35b1c88e9aa40bf6d8/src/librustc_codegen_ssa/base.rs#L922-L953
+fn determine_cgu_reuse<'tcx>(tcx: TyCtxt<'tcx>, cgu: &CodegenUnit<'tcx>) -> CguReuse {
+ if !tcx.dep_graph.is_fully_enabled() {
+ return CguReuse::No;
+ }
+
+ let work_product_id = &cgu.work_product_id();
+ if tcx
+ .dep_graph
+ .previous_work_product(work_product_id)
+ .is_none()
+ {
+ // We don't have anything cached for this CGU. This can happen
+ // if the CGU did not exist in the previous session.
+ return CguReuse::No;
+ }
+
+ // Try to mark the CGU as green. If it we can do so, it means that nothing
+ // affecting the LLVM module has changed and we can re-use a cached version.
+ // If we compile with any kind of LTO, this means we can re-use the bitcode
+ // of the Pre-LTO stage (possibly also the Post-LTO version but we'll only
+ // know that later). If we are not doing LTO, there is only one optimized
+ // version of each module, so we re-use that.
+ let dep_node = cgu.codegen_dep_node(tcx);
+ assert!(
+ !tcx.dep_graph.dep_node_exists(&dep_node),
+ "CompileCodegenUnit dep-node for CGU `{}` already exists before marking.",
+ cgu.name()
+ );
+
+ if tcx.dep_graph.try_mark_green(tcx, &dep_node).is_some() {
+ CguReuse::PreLto
+ } else {
+ CguReuse::No
+ }
+}
--- /dev/null
- use cranelift_simplejit::{SimpleJITBuilder, SimpleJITModule};
+//! The JIT driver uses [`cranelift_simplejit`] to JIT execute programs without writing any object
+//! files.
+
++use std::cell::RefCell;
+use std::ffi::CString;
+use std::os::raw::{c_char, c_int};
+
+use rustc_codegen_ssa::CrateInfo;
++use rustc_middle::mir::mono::MonoItem;
+
- pub(super) fn run_jit(tcx: TyCtxt<'_>) -> ! {
++use cranelift_jit::{JITBuilder, JITModule};
+
+use crate::prelude::*;
++use crate::{CodegenCx, CodegenMode};
+
- let mut jit_builder = SimpleJITBuilder::with_isa(
- crate::build_isa(tcx.sess, false),
++thread_local! {
++ pub static CURRENT_MODULE: RefCell<Option<JITModule>> = RefCell::new(None);
++}
++
++pub(super) fn run_jit(tcx: TyCtxt<'_>, codegen_mode: CodegenMode) -> ! {
+ if !tcx.sess.opts.output_types.should_codegen() {
+ tcx.sess.fatal("JIT mode doesn't work with `cargo check`.");
+ }
+
+ #[cfg(unix)]
+ unsafe {
+ // When not using our custom driver rustc will open us without the RTLD_GLOBAL flag, so
+ // __cg_clif_global_atomic_mutex will not be exported. We fix this by opening ourself again
+ // as global.
+ // FIXME remove once atomic_shim is gone
+
+ let mut dl_info: libc::Dl_info = std::mem::zeroed();
+ assert_ne!(
+ libc::dladdr(run_jit as *const libc::c_void, &mut dl_info),
+ 0
+ );
+ assert_ne!(
+ libc::dlopen(dl_info.dli_fname, libc::RTLD_NOW | libc::RTLD_GLOBAL),
+ std::ptr::null_mut(),
+ );
+ }
+
+ let imported_symbols = load_imported_symbols_for_jit(tcx);
+
- let mut jit_module = SimpleJITModule::new(jit_builder);
++ let mut jit_builder = JITBuilder::with_isa(
++ crate::build_isa(tcx.sess),
+ cranelift_module::default_libcall_names(),
+ );
++ jit_builder.hotswap(matches!(codegen_mode, CodegenMode::JitLazy));
+ jit_builder.symbols(imported_symbols);
- let mut cx = crate::CodegenCx::new(tcx, jit_module, false);
++ let mut jit_module = JITModule::new(jit_builder);
+ assert_eq!(pointer_ty(tcx), jit_module.target_config().pointer_type());
+
+ let sig = Signature {
+ params: vec![
+ AbiParam::new(jit_module.target_config().pointer_type()),
+ AbiParam::new(jit_module.target_config().pointer_type()),
+ ],
+ returns: vec![AbiParam::new(
+ jit_module.target_config().pointer_type(), /*isize*/
+ )],
+ call_conv: CallConv::triple_default(&crate::target_triple(tcx.sess)),
+ };
+ let main_func_id = jit_module
+ .declare_function("main", Linkage::Import, &sig)
+ .unwrap();
+
+ let (_, cgus) = tcx.collect_and_partition_mono_items(LOCAL_CRATE);
+ let mono_items = cgus
+ .iter()
+ .map(|cgu| cgu.items_in_deterministic_order(tcx).into_iter())
+ .flatten()
+ .collect::<FxHashMap<_, (_, _)>>()
+ .into_iter()
+ .collect::<Vec<(_, (_, _))>>();
+
- let (mut jit_module, global_asm, _debug, mut unwind_context) =
- super::time(tcx, "codegen mono items", || {
- super::predefine_mono_items(&mut cx, &mono_items);
- for (mono_item, (linkage, visibility)) in mono_items {
- let linkage = crate::linkage::get_clif_linkage(mono_item, linkage, visibility);
- super::codegen_mono_item(&mut cx, mono_item, linkage);
++ let mut cx = crate::CodegenCx::new(tcx, jit_module, false, false);
+
- tcx.sess.time("finalize CodegenCx", || cx.finalize())
- });
++ super::time(tcx, "codegen mono items", || {
++ super::predefine_mono_items(&mut cx, &mono_items);
++ for (mono_item, (linkage, visibility)) in mono_items {
++ let linkage = crate::linkage::get_clif_linkage(mono_item, linkage, visibility);
++ match mono_item {
++ MonoItem::Fn(inst) => match codegen_mode {
++ CodegenMode::Aot => unreachable!(),
++ CodegenMode::Jit => {
++ cx.tcx.sess.time("codegen fn", || {
++ crate::base::codegen_fn(&mut cx, inst, linkage)
++ });
++ }
++ CodegenMode::JitLazy => codegen_shim(&mut cx, inst),
++ },
++ MonoItem::Static(def_id) => {
++ crate::constant::codegen_static(&mut cx.constants_cx, def_id);
++ }
++ MonoItem::GlobalAsm(hir_id) => {
++ let item = cx.tcx.hir().expect_item(hir_id);
++ tcx.sess
++ .span_fatal(item.span, "Global asm is not supported in JIT mode");
++ }
+ }
- tcx.sess.fatal("Global asm is not supported in JIT mode");
++ }
++ });
++
++ let (mut jit_module, global_asm, _debug, mut unwind_context) =
++ tcx.sess.time("finalize CodegenCx", || cx.finalize());
++ jit_module.finalize_definitions();
++
+ if !global_asm.is_empty() {
- println!("Rustc codegen cranelift will JIT run the executable, because --jit was passed");
++ tcx.sess.fatal("Inline asm is not supported in JIT mode");
+ }
++
+ crate::main_shim::maybe_create_entry_wrapper(tcx, &mut jit_module, &mut unwind_context, true);
+ crate::allocator::codegen(tcx, &mut jit_module, &mut unwind_context);
+
+ tcx.sess.abort_if_errors();
+
+ jit_module.finalize_definitions();
+
+ let _unwind_register_guard = unsafe { unwind_context.register_jit(&jit_module) };
+
+ let finalized_main: *const u8 = jit_module.get_finalized_function(main_func_id);
+
++ println!("Rustc codegen cranelift will JIT run the executable, because -Cllvm-args=mode=jit was passed");
+
+ let f: extern "C" fn(c_int, *const *const c_char) -> c_int =
+ unsafe { ::std::mem::transmute(finalized_main) };
+
+ let args = ::std::env::var("CG_CLIF_JIT_ARGS").unwrap_or_else(|_| String::new());
+ let args = std::iter::once(&*tcx.crate_name(LOCAL_CRATE).as_str().to_string())
+ .chain(args.split(' '))
+ .map(|arg| CString::new(arg).unwrap())
+ .collect::<Vec<_>>();
+ let mut argv = args.iter().map(|arg| arg.as_ptr()).collect::<Vec<_>>();
+
+ // Push a null pointer as a terminating argument. This is required by POSIX and
+ // useful as some dynamic linkers use it as a marker to jump over.
+ argv.push(std::ptr::null());
+
++ CURRENT_MODULE
++ .with(|current_module| assert!(current_module.borrow_mut().replace(jit_module).is_none()));
++
+ let ret = f(args.len() as c_int, argv.as_ptr());
+
+ std::process::exit(ret);
+}
+
++#[no_mangle]
++extern "C" fn __clif_jit_fn(instance_ptr: *const Instance<'static>) -> *const u8 {
++ rustc_middle::ty::tls::with(|tcx| {
++ // lift is used to ensure the correct lifetime for instance.
++ let instance = tcx.lift(unsafe { *instance_ptr }).unwrap();
++
++ CURRENT_MODULE.with(|jit_module| {
++ let mut jit_module = jit_module.borrow_mut();
++ let jit_module = jit_module.as_mut().unwrap();
++ let mut cx = crate::CodegenCx::new(tcx, jit_module, false, false);
++
++ let (name, sig) = crate::abi::get_function_name_and_sig(
++ tcx,
++ cx.module.isa().triple(),
++ instance,
++ true,
++ );
++ let func_id = cx
++ .module
++ .declare_function(&name, Linkage::Export, &sig)
++ .unwrap();
++ cx.module.prepare_for_function_redefine(func_id).unwrap();
++
++ tcx.sess.time("codegen fn", || {
++ crate::base::codegen_fn(&mut cx, instance, Linkage::Export)
++ });
++
++ let (jit_module, global_asm, _debug_context, unwind_context) = cx.finalize();
++ assert!(global_asm.is_empty());
++ jit_module.finalize_definitions();
++ std::mem::forget(unsafe { unwind_context.register_jit(&jit_module) });
++ jit_module.get_finalized_function(func_id)
++ })
++ })
++}
++
+fn load_imported_symbols_for_jit(tcx: TyCtxt<'_>) -> Vec<(String, *const u8)> {
+ use rustc_middle::middle::dependency_format::Linkage;
+
+ let mut dylib_paths = Vec::new();
+
+ let crate_info = CrateInfo::new(tcx);
+ let formats = tcx.dependency_formats(LOCAL_CRATE);
+ let data = &formats
+ .iter()
+ .find(|(crate_type, _data)| *crate_type == rustc_session::config::CrateType::Executable)
+ .unwrap()
+ .1;
+ for &(cnum, _) in &crate_info.used_crates_dynamic {
+ let src = &crate_info.used_crate_source[&cnum];
+ match data[cnum.as_usize() - 1] {
+ Linkage::NotLinked | Linkage::IncludedFromDylib => {}
+ Linkage::Static => {
+ let name = tcx.crate_name(cnum);
+ let mut err = tcx
+ .sess
+ .struct_err(&format!("Can't load static lib {}", name.as_str()));
+ err.note("rustc_codegen_cranelift can only load dylibs in JIT mode.");
+ err.emit();
+ }
+ Linkage::Dynamic => {
+ dylib_paths.push(src.dylib.as_ref().unwrap().0.clone());
+ }
+ }
+ }
+
+ let mut imported_symbols = Vec::new();
+ for path in dylib_paths {
+ use object::{Object, ObjectSymbol};
+ let lib = libloading::Library::new(&path).unwrap();
+ let obj = std::fs::read(path).unwrap();
+ let obj = object::File::parse(&obj).unwrap();
+ imported_symbols.extend(obj.dynamic_symbols().filter_map(|symbol| {
+ let name = symbol.name().unwrap().to_string();
+ if name.is_empty() || !symbol.is_global() || symbol.is_undefined() {
+ return None;
+ }
+ let dlsym_name = if cfg!(target_os = "macos") {
+ // On macOS `dlsym` expects the name without leading `_`.
+ assert!(name.starts_with('_'), "{:?}", name);
+ &name[1..]
+ } else {
+ &name
+ };
+ let symbol: libloading::Symbol<'_, *const u8> =
+ unsafe { lib.get(dlsym_name.as_bytes()) }.unwrap();
+ Some((name, *symbol))
+ }));
+ std::mem::forget(lib)
+ }
+
+ tcx.sess.abort_if_errors();
+
+ imported_symbols
+}
++
++pub(super) fn codegen_shim<'tcx>(cx: &mut CodegenCx<'tcx, impl Module>, inst: Instance<'tcx>) {
++ let tcx = cx.tcx;
++
++ let pointer_type = cx.module.target_config().pointer_type();
++
++ let (name, sig) =
++ crate::abi::get_function_name_and_sig(tcx, cx.module.isa().triple(), inst, true);
++ let func_id = cx
++ .module
++ .declare_function(&name, Linkage::Export, &sig)
++ .unwrap();
++
++ let instance_ptr = Box::into_raw(Box::new(inst));
++
++ let jit_fn = cx
++ .module
++ .declare_function(
++ "__clif_jit_fn",
++ Linkage::Import,
++ &Signature {
++ call_conv: cx.module.target_config().default_call_conv,
++ params: vec![AbiParam::new(pointer_type)],
++ returns: vec![AbiParam::new(pointer_type)],
++ },
++ )
++ .unwrap();
++
++ let mut trampoline = Function::with_name_signature(ExternalName::default(), sig.clone());
++ let mut builder_ctx = FunctionBuilderContext::new();
++ let mut trampoline_builder = FunctionBuilder::new(&mut trampoline, &mut builder_ctx);
++
++ let jit_fn = cx
++ .module
++ .declare_func_in_func(jit_fn, trampoline_builder.func);
++ let sig_ref = trampoline_builder.func.import_signature(sig);
++
++ let entry_block = trampoline_builder.create_block();
++ trampoline_builder.append_block_params_for_function_params(entry_block);
++ let fn_args = trampoline_builder
++ .func
++ .dfg
++ .block_params(entry_block)
++ .to_vec();
++
++ trampoline_builder.switch_to_block(entry_block);
++ let instance_ptr = trampoline_builder
++ .ins()
++ .iconst(pointer_type, instance_ptr as u64 as i64);
++ let jitted_fn = trampoline_builder.ins().call(jit_fn, &[instance_ptr]);
++ let jitted_fn = trampoline_builder.func.dfg.inst_results(jitted_fn)[0];
++ let call_inst = trampoline_builder
++ .ins()
++ .call_indirect(sig_ref, jitted_fn, &fn_args);
++ let ret_vals = trampoline_builder.func.dfg.inst_results(call_inst).to_vec();
++ trampoline_builder.ins().return_(&ret_vals);
++
++ cx.module
++ .define_function(
++ func_id,
++ &mut Context::for_function(trampoline),
++ &mut cranelift_codegen::binemit::NullTrapSink {},
++ )
++ .unwrap();
++}
--- /dev/null
- if config.use_jit {
- let is_executable = tcx
- .sess
- .crate_types()
- .contains(&rustc_session::config::CrateType::Executable);
- if !is_executable {
- tcx.sess.fatal("can't jit non-executable crate");
- }
+//! Drivers are responsible for calling [`codegen_mono_item`] and performing any further actions
+//! like JIT executing or writing object files.
+
+use std::any::Any;
+
+use rustc_middle::middle::cstore::EncodedMetadata;
+use rustc_middle::mir::mono::{Linkage as RLinkage, MonoItem, Visibility};
+
+use crate::prelude::*;
++use crate::CodegenMode;
+
+mod aot;
+#[cfg(feature = "jit")]
+mod jit;
+
+pub(crate) fn codegen_crate(
+ tcx: TyCtxt<'_>,
+ metadata: EncodedMetadata,
+ need_metadata_module: bool,
+ config: crate::BackendConfig,
+) -> Box<dyn Any> {
+ tcx.sess.abort_if_errors();
+
- #[cfg(feature = "jit")]
- let _: ! = jit::run_jit(tcx);
++ match config.codegen_mode {
++ CodegenMode::Aot => aot::run_aot(tcx, metadata, need_metadata_module),
++ CodegenMode::Jit | CodegenMode::JitLazy => {
++ let is_executable = tcx
++ .sess
++ .crate_types()
++ .contains(&rustc_session::config::CrateType::Executable);
++ if !is_executable {
++ tcx.sess.fatal("can't jit non-executable crate");
++ }
+
- #[cfg(not(feature = "jit"))]
- tcx.sess
- .fatal("jit support was disabled when compiling rustc_codegen_cranelift");
++ #[cfg(feature = "jit")]
++ let _: ! = jit::run_jit(tcx, config.codegen_mode);
+
-
- aot::run_aot(tcx, metadata, need_metadata_module)
++ #[cfg(not(feature = "jit"))]
++ tcx.sess
++ .fatal("jit support was disabled when compiling rustc_codegen_cranelift");
++ }
+ }
- fn codegen_mono_item<'tcx, M: Module>(
- cx: &mut crate::CodegenCx<'tcx, M>,
- mono_item: MonoItem<'tcx>,
- linkage: Linkage,
- ) {
- match mono_item {
- MonoItem::Fn(inst) => {
- cx.tcx
- .sess
- .time("codegen fn", || crate::base::codegen_fn(cx, inst, linkage));
- }
- MonoItem::Static(def_id) => crate::constant::codegen_static(&mut cx.constants_cx, def_id),
- MonoItem::GlobalAsm(hir_id) => {
- let item = cx.tcx.hir().expect_item(hir_id);
- if let rustc_hir::ItemKind::GlobalAsm(rustc_hir::GlobalAsm { asm }) = item.kind {
- cx.global_asm.push_str(&*asm.as_str());
- cx.global_asm.push_str("\n\n");
- } else {
- bug!("Expected GlobalAsm found {:?}", item);
- }
- }
- }
- }
-
+}
+
+fn predefine_mono_items<'tcx>(
+ cx: &mut crate::CodegenCx<'tcx, impl Module>,
+ mono_items: &[(MonoItem<'tcx>, (RLinkage, Visibility))],
+) {
+ cx.tcx.sess.time("predefine functions", || {
+ for &(mono_item, (linkage, visibility)) in mono_items {
+ match mono_item {
+ MonoItem::Fn(instance) => {
+ let (name, sig) = get_function_name_and_sig(
+ cx.tcx,
+ cx.module.isa().triple(),
+ instance,
+ false,
+ );
+ let linkage = crate::linkage::get_clif_linkage(mono_item, linkage, visibility);
+ cx.module.declare_function(&name, linkage, &sig).unwrap();
+ }
+ MonoItem::Static(_) | MonoItem::GlobalAsm(_) => {}
+ }
+ }
+ });
+}
+
+fn time<R>(tcx: TyCtxt<'_>, name: &'static str, f: impl FnOnce() -> R) -> R {
+ if std::env::var("CG_CLIF_DISPLAY_CG_TIME")
+ .as_ref()
+ .map(|val| &**val)
+ == Ok("1")
+ {
+ println!("[{:<30}: {}] start", tcx.crate_name(LOCAL_CRATE), name);
+ let before = std::time::Instant::now();
+ let res = tcx.sess.time(name, f);
+ let after = std::time::Instant::now();
+ println!(
+ "[{:<30}: {}] end time: {:?}",
+ tcx.crate_name(LOCAL_CRATE),
+ name,
+ after - before
+ );
+ res
+ } else {
+ tcx.sess.time(name, f)
+ }
+}
--- /dev/null
- let (lane_layout, lane_count) = lane_type_and_count(fx.tcx, a.layout());
- let lane_ty = fx.clif_type(lane_layout.ty).unwrap();
+//! Emulate LLVM intrinsics
+
+use crate::intrinsics::*;
+use crate::prelude::*;
+
+use rustc_middle::ty::subst::SubstsRef;
+
+pub(crate) fn codegen_llvm_intrinsic_call<'tcx>(
+ fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ intrinsic: &str,
+ substs: SubstsRef<'tcx>,
+ args: &[mir::Operand<'tcx>],
+ destination: Option<(CPlace<'tcx>, BasicBlock)>,
+) {
+ let ret = destination.unwrap().0;
+
+ intrinsic_match! {
+ fx, intrinsic, substs, args,
+ _ => {
+ fx.tcx.sess.warn(&format!("unsupported llvm intrinsic {}; replacing with trap", intrinsic));
+ crate::trap::trap_unimplemented(fx, intrinsic);
+ };
+
+ // Used by `_mm_movemask_epi8` and `_mm256_movemask_epi8`
+ llvm.x86.sse2.pmovmskb.128 | llvm.x86.avx2.pmovmskb | llvm.x86.sse2.movmsk.pd, (c a) {
++ let (lane_count, lane_ty) = a.layout().ty.simd_size_and_type(fx.tcx);
++ let lane_ty = fx.clif_type(lane_ty).unwrap();
+ assert!(lane_count <= 32);
+
+ let mut res = fx.bcx.ins().iconst(types::I32, 0);
+
+ for lane in (0..lane_count).rev() {
+ let a_lane = a.value_field(fx, mir::Field::new(lane.try_into().unwrap())).load_scalar(fx);
+
+ // cast float to int
+ let a_lane = match lane_ty {
+ types::F32 => fx.bcx.ins().bitcast(types::I32, a_lane),
+ types::F64 => fx.bcx.ins().bitcast(types::I64, a_lane),
+ _ => a_lane,
+ };
+
+ // extract sign bit of an int
+ let a_lane_sign = fx.bcx.ins().ushr_imm(a_lane, i64::from(lane_ty.bits() - 1));
+
+ // shift sign bit into result
+ let a_lane_sign = clif_intcast(fx, a_lane_sign, types::I32, false);
+ res = fx.bcx.ins().ishl_imm(res, 1);
+ res = fx.bcx.ins().bor(res, a_lane_sign);
+ }
+
+ let res = CValue::by_val(res, fx.layout_of(fx.tcx.types.i32));
+ ret.write_cvalue(fx, res);
+ };
+ llvm.x86.sse2.cmp.ps | llvm.x86.sse2.cmp.pd, (c x, c y, o kind) {
+ let kind_const = crate::constant::mir_operand_get_const_val(fx, kind).expect("llvm.x86.sse2.cmp.* kind not const");
+ let flt_cc = match kind_const.val.try_to_bits(Size::from_bytes(1)).unwrap_or_else(|| panic!("kind not scalar: {:?}", kind_const)) {
+ 0 => FloatCC::Equal,
+ 1 => FloatCC::LessThan,
+ 2 => FloatCC::LessThanOrEqual,
+ 7 => {
+ unimplemented!("Compares corresponding elements in `a` and `b` to see if neither is `NaN`.");
+ }
+ 3 => {
+ unimplemented!("Compares corresponding elements in `a` and `b` to see if either is `NaN`.");
+ }
+ 4 => FloatCC::NotEqual,
+ 5 => {
+ unimplemented!("not less than");
+ }
+ 6 => {
+ unimplemented!("not less than or equal");
+ }
+ kind => unreachable!("kind {:?}", kind),
+ };
+
+ simd_pair_for_each_lane(fx, x, y, ret, |fx, lane_layout, res_lane_layout, x_lane, y_lane| {
+ let res_lane = match lane_layout.ty.kind() {
+ ty::Float(_) => fx.bcx.ins().fcmp(flt_cc, x_lane, y_lane),
+ _ => unreachable!("{:?}", lane_layout.ty),
+ };
+ bool_to_zero_or_max_uint(fx, res_lane_layout, res_lane)
+ });
+ };
+ llvm.x86.sse2.psrli.d, (c a, o imm8) {
+ let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8).expect("llvm.x86.sse2.psrli.d imm8 not const");
+ simd_for_each_lane(fx, a, ret, |fx, _lane_layout, res_lane_layout, lane| {
+ let res_lane = match imm8.val.try_to_bits(Size::from_bytes(4)).unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8)) {
+ imm8 if imm8 < 32 => fx.bcx.ins().ushr_imm(lane, i64::from(imm8 as u8)),
+ _ => fx.bcx.ins().iconst(types::I32, 0),
+ };
+ CValue::by_val(res_lane, res_lane_layout)
+ });
+ };
+ llvm.x86.sse2.pslli.d, (c a, o imm8) {
+ let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8).expect("llvm.x86.sse2.psrli.d imm8 not const");
+ simd_for_each_lane(fx, a, ret, |fx, _lane_layout, res_lane_layout, lane| {
+ let res_lane = match imm8.val.try_to_bits(Size::from_bytes(4)).unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8)) {
+ imm8 if imm8 < 32 => fx.bcx.ins().ishl_imm(lane, i64::from(imm8 as u8)),
+ _ => fx.bcx.ins().iconst(types::I32, 0),
+ };
+ CValue::by_val(res_lane, res_lane_layout)
+ });
+ };
+ llvm.x86.sse2.storeu.dq, (v mem_addr, c a) {
+ // FIXME correctly handle the unalignment
+ let dest = CPlace::for_ptr(Pointer::new(mem_addr), a.layout());
+ dest.write_cvalue(fx, a);
+ };
+ }
+
+ if let Some((_, dest)) = destination {
+ let ret_block = fx.get_block(dest);
+ fx.bcx.ins().jump(ret_block, &[]);
+ } else {
+ trap_unreachable(fx, "[corruption] Diverging intrinsic returned.");
+ }
+}
+
+// llvm.x86.avx2.vperm2i128
+// llvm.x86.ssse3.pshuf.b.128
+// llvm.x86.avx2.pshuf.b
+// llvm.x86.avx2.psrli.w
+// llvm.x86.sse2.psrli.w
--- /dev/null
- fn lane_type_and_count<'tcx>(
- tcx: TyCtxt<'tcx>,
- layout: TyAndLayout<'tcx>,
- ) -> (TyAndLayout<'tcx>, u16) {
- assert!(layout.ty.is_simd());
- let lane_count = match layout.fields {
- rustc_target::abi::FieldsShape::Array { stride: _, count } => u16::try_from(count).unwrap(),
- _ => unreachable!("lane_type_and_count({:?})", layout),
- };
- let lane_layout = layout
- .field(
- &ty::layout::LayoutCx {
- tcx,
- param_env: ParamEnv::reveal_all(),
- },
- 0,
- )
- .unwrap();
- (lane_layout, lane_count)
- }
-
+//! Codegen of intrinsics. This includes `extern "rust-intrinsic"`, `extern "platform-intrinsic"`
+//! and LLVM intrinsics that have symbol names starting with `llvm.`.
+
+mod cpuid;
+mod llvm;
+mod simd;
+
+pub(crate) use cpuid::codegen_cpuid_call;
+pub(crate) use llvm::codegen_llvm_intrinsic_call;
+
+use crate::prelude::*;
+use rustc_middle::ty::print::with_no_trimmed_paths;
+
+macro intrinsic_pat {
+ (_) => {
+ _
+ },
+ ($name:ident) => {
+ stringify!($name)
+ },
+ ($name:literal) => {
+ stringify!($name)
+ },
+ ($x:ident . $($xs:tt).*) => {
+ concat!(stringify!($x), ".", intrinsic_pat!($($xs).*))
+ }
+}
+
+macro intrinsic_arg {
+ (o $fx:expr, $arg:ident) => {
+ $arg
+ },
+ (c $fx:expr, $arg:ident) => {
+ codegen_operand($fx, $arg)
+ },
+ (v $fx:expr, $arg:ident) => {
+ codegen_operand($fx, $arg).load_scalar($fx)
+ }
+}
+
+macro intrinsic_substs {
+ ($substs:expr, $index:expr,) => {},
+ ($substs:expr, $index:expr, $first:ident $(,$rest:ident)*) => {
+ let $first = $substs.type_at($index);
+ intrinsic_substs!($substs, $index+1, $($rest),*);
+ }
+}
+
+macro intrinsic_match {
+ ($fx:expr, $intrinsic:expr, $substs:expr, $args:expr,
+ _ => $unknown:block;
+ $(
+ $($($name:tt).*)|+ $(if $cond:expr)?, $(<$($subst:ident),*>)? ($($a:ident $arg:ident),*) $content:block;
+ )*) => {
+ let _ = $substs; // Silence warning when substs is unused.
+ match $intrinsic {
+ $(
+ $(intrinsic_pat!($($name).*))|* $(if $cond)? => {
+ #[allow(unused_parens, non_snake_case)]
+ {
+ $(
+ intrinsic_substs!($substs, 0, $($subst),*);
+ )?
+ if let [$($arg),*] = $args {
+ let ($($arg,)*) = (
+ $(intrinsic_arg!($a $fx, $arg),)*
+ );
+ #[warn(unused_parens, non_snake_case)]
+ {
+ $content
+ }
+ } else {
+ bug!("wrong number of args for intrinsic {:?}", $intrinsic);
+ }
+ }
+ }
+ )*
+ _ => $unknown,
+ }
+ }
+}
+
+macro call_intrinsic_match {
+ ($fx:expr, $intrinsic:expr, $substs:expr, $ret:expr, $destination:expr, $args:expr, $(
+ $name:ident($($arg:ident),*) -> $ty:ident => $func:ident,
+ )*) => {
+ match $intrinsic {
+ $(
+ stringify!($name) => {
+ assert!($substs.is_noop());
+ if let [$(ref $arg),*] = *$args {
+ let ($($arg,)*) = (
+ $(codegen_operand($fx, $arg),)*
+ );
+ let res = $fx.easy_call(stringify!($func), &[$($arg),*], $fx.tcx.types.$ty);
+ $ret.write_cvalue($fx, res);
+
+ if let Some((_, dest)) = $destination {
+ let ret_block = $fx.get_block(dest);
+ $fx.bcx.ins().jump(ret_block, &[]);
+ return;
+ } else {
+ unreachable!();
+ }
+ } else {
+ bug!("wrong number of args for intrinsic {:?}", $intrinsic);
+ }
+ }
+ )*
+ _ => {}
+ }
+ }
+}
+
+macro atomic_binop_return_old($fx:expr, $op:ident<$T:ident>($ptr:ident, $src:ident) -> $ret:ident) {
+ crate::atomic_shim::lock_global_lock($fx);
+
+ let clif_ty = $fx.clif_type($T).unwrap();
+ let old = $fx.bcx.ins().load(clif_ty, MemFlags::new(), $ptr, 0);
+ let new = $fx.bcx.ins().$op(old, $src);
+ $fx.bcx.ins().store(MemFlags::new(), new, $ptr, 0);
+ $ret.write_cvalue($fx, CValue::by_val(old, $fx.layout_of($T)));
+
+ crate::atomic_shim::unlock_global_lock($fx);
+}
+
+macro atomic_minmax($fx:expr, $cc:expr, <$T:ident> ($ptr:ident, $src:ident) -> $ret:ident) {
+ crate::atomic_shim::lock_global_lock($fx);
+
+ // Read old
+ let clif_ty = $fx.clif_type($T).unwrap();
+ let old = $fx.bcx.ins().load(clif_ty, MemFlags::new(), $ptr, 0);
+
+ // Compare
+ let is_eq = $fx.bcx.ins().icmp(IntCC::SignedGreaterThan, old, $src);
+ let new = $fx.bcx.ins().select(is_eq, old, $src);
+
+ // Write new
+ $fx.bcx.ins().store(MemFlags::new(), new, $ptr, 0);
+
+ let ret_val = CValue::by_val(old, $ret.layout());
+ $ret.write_cvalue($fx, ret_val);
+
+ crate::atomic_shim::unlock_global_lock($fx);
+}
+
+macro validate_atomic_type($fx:ident, $intrinsic:ident, $span:ident, $ty:expr) {
+ match $ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ $fx.tcx.sess.span_err(
+ $span,
+ &format!(
+ "`{}` intrinsic: expected basic integer or raw pointer type, found `{:?}`",
+ $intrinsic, $ty
+ ),
+ );
+ // Prevent verifier error
+ crate::trap::trap_unreachable($fx, "compilation should not have succeeded");
+ return;
+ }
+ }
+}
+
+macro validate_simd_type($fx:ident, $intrinsic:ident, $span:ident, $ty:expr) {
+ if !$ty.is_simd() {
+ $fx.tcx.sess.span_err($span, &format!("invalid monomorphization of `{}` intrinsic: expected SIMD input type, found non-SIMD `{}`", $intrinsic, $ty));
+ // Prevent verifier error
+ crate::trap::trap_unreachable($fx, "compilation should not have succeeded");
+ return;
+ }
+}
+
- let (lane_layout, lane_count) = lane_type_and_count(fx.tcx, layout);
- let (ret_lane_layout, ret_lane_count) = lane_type_and_count(fx.tcx, ret.layout());
+pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Option<Type> {
+ let (element, count) = match &layout.abi {
+ Abi::Vector { element, count } => (element.clone(), *count),
+ _ => unreachable!(),
+ };
+
+ match scalar_to_clif_type(tcx, element).by(u16::try_from(count).unwrap()) {
+ // Cranelift currently only implements icmp for 128bit vectors.
+ Some(vector_ty) if vector_ty.bits() == 128 => Some(vector_ty),
+ _ => None,
+ }
+}
+
+fn simd_for_each_lane<'tcx, M: Module>(
+ fx: &mut FunctionCx<'_, 'tcx, M>,
+ val: CValue<'tcx>,
+ ret: CPlace<'tcx>,
+ f: impl Fn(
+ &mut FunctionCx<'_, 'tcx, M>,
+ TyAndLayout<'tcx>,
+ TyAndLayout<'tcx>,
+ Value,
+ ) -> CValue<'tcx>,
+) {
+ let layout = val.layout();
+
- let (lane_layout, lane_count) = lane_type_and_count(fx.tcx, layout);
- let (ret_lane_layout, ret_lane_count) = lane_type_and_count(fx.tcx, ret.layout());
++ let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
++ let lane_layout = fx.layout_of(lane_ty);
++ let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
++ let ret_lane_layout = fx.layout_of(ret_lane_ty);
+ assert_eq!(lane_count, ret_lane_count);
+
+ for lane_idx in 0..lane_count {
+ let lane_idx = mir::Field::new(lane_idx.try_into().unwrap());
+ let lane = val.value_field(fx, lane_idx).load_scalar(fx);
+
+ let res_lane = f(fx, lane_layout, ret_lane_layout, lane);
+
+ ret.place_field(fx, lane_idx).write_cvalue(fx, res_lane);
+ }
+}
+
+fn simd_pair_for_each_lane<'tcx, M: Module>(
+ fx: &mut FunctionCx<'_, 'tcx, M>,
+ x: CValue<'tcx>,
+ y: CValue<'tcx>,
+ ret: CPlace<'tcx>,
+ f: impl Fn(
+ &mut FunctionCx<'_, 'tcx, M>,
+ TyAndLayout<'tcx>,
+ TyAndLayout<'tcx>,
+ Value,
+ Value,
+ ) -> CValue<'tcx>,
+) {
+ assert_eq!(x.layout(), y.layout());
+ let layout = x.layout();
+
- let (lane_layout, lane_count) = lane_type_and_count(fx.tcx, val.layout());
++ let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
++ let lane_layout = fx.layout_of(lane_ty);
++ let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
++ let ret_lane_layout = fx.layout_of(ret_lane_ty);
+ assert_eq!(lane_count, ret_lane_count);
+
+ for lane in 0..lane_count {
+ let lane = mir::Field::new(lane.try_into().unwrap());
+ let x_lane = x.value_field(fx, lane).load_scalar(fx);
+ let y_lane = y.value_field(fx, lane).load_scalar(fx);
+
+ let res_lane = f(fx, lane_layout, ret_lane_layout, x_lane, y_lane);
+
+ ret.place_field(fx, lane).write_cvalue(fx, res_lane);
+ }
+}
+
+fn simd_reduce<'tcx, M: Module>(
+ fx: &mut FunctionCx<'_, 'tcx, M>,
+ val: CValue<'tcx>,
+ ret: CPlace<'tcx>,
+ f: impl Fn(&mut FunctionCx<'_, 'tcx, M>, TyAndLayout<'tcx>, Value, Value) -> Value,
+) {
- .value_field(fx, mir::Field::new(lane_idx.into()))
++ let (lane_count, lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
++ let lane_layout = fx.layout_of(lane_ty);
+ assert_eq!(lane_layout, ret.layout());
+
+ let mut res_val = val.value_field(fx, mir::Field::new(0)).load_scalar(fx);
+ for lane_idx in 1..lane_count {
+ let lane = val
- let (_lane_layout, lane_count) = lane_type_and_count(fx.tcx, val.layout());
++ .value_field(fx, mir::Field::new(lane_idx.try_into().unwrap()))
+ .load_scalar(fx);
+ res_val = f(fx, lane_layout, res_val, lane);
+ }
+ let res = CValue::by_val(res_val, lane_layout);
+ ret.write_cvalue(fx, res);
+}
+
+fn simd_reduce_bool<'tcx, M: Module>(
+ fx: &mut FunctionCx<'_, 'tcx, M>,
+ val: CValue<'tcx>,
+ ret: CPlace<'tcx>,
+ f: impl Fn(&mut FunctionCx<'_, 'tcx, M>, Value, Value) -> Value,
+) {
- .value_field(fx, mir::Field::new(lane_idx.into()))
++ let (lane_count, _lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
+ assert!(ret.layout().ty.is_bool());
+
+ let res_val = val.value_field(fx, mir::Field::new(0)).load_scalar(fx);
+ let mut res_val = fx.bcx.ins().band_imm(res_val, 1); // mask to boolean
+ for lane_idx in 1..lane_count {
+ let lane = val
- "unreachable" => {
- trap_unreachable(fx, "[corruption] Called intrinsic::unreachable.");
- }
++ .value_field(fx, mir::Field::new(lane_idx.try_into().unwrap()))
+ .load_scalar(fx);
+ let lane = fx.bcx.ins().band_imm(lane, 1); // mask to boolean
+ res_val = f(fx, res_val, lane);
+ }
+ let res = CValue::by_val(res_val, ret.layout());
+ ret.write_cvalue(fx, res);
+}
+
+fn bool_to_zero_or_max_uint<'tcx>(
+ fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ layout: TyAndLayout<'tcx>,
+ val: Value,
+) -> CValue<'tcx> {
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let int_ty = match ty {
+ types::F32 => types::I32,
+ types::F64 => types::I64,
+ ty => ty,
+ };
+
+ let val = fx.bcx.ins().bint(int_ty, val);
+ let mut res = fx.bcx.ins().ineg(val);
+
+ if ty.is_float() {
+ res = fx.bcx.ins().bitcast(ty, res);
+ }
+
+ CValue::by_val(res, layout)
+}
+
+macro simd_cmp {
+ ($fx:expr, $cc:ident|$cc_f:ident($x:ident, $y:ident) -> $ret:ident) => {
+ let vector_ty = clif_vector_type($fx.tcx, $x.layout());
+
+ if let Some(vector_ty) = vector_ty {
+ let x = $x.load_scalar($fx);
+ let y = $y.load_scalar($fx);
+ let val = $fx.bcx.ins().icmp(IntCC::$cc, x, y);
+
+ // HACK This depends on the fact that icmp for vectors represents bools as 0 and !0, not 0 and 1.
+ let val = $fx.bcx.ins().raw_bitcast(vector_ty, val);
+
+ $ret.write_cvalue($fx, CValue::by_val(val, $ret.layout()));
+ } else {
+ simd_pair_for_each_lane(
+ $fx,
+ $x,
+ $y,
+ $ret,
+ |fx, lane_layout, res_lane_layout, x_lane, y_lane| {
+ let res_lane = match lane_layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) => fx.bcx.ins().icmp(IntCC::$cc, x_lane, y_lane),
+ ty::Float(_) => fx.bcx.ins().fcmp(FloatCC::$cc_f, x_lane, y_lane),
+ _ => unreachable!("{:?}", lane_layout.ty),
+ };
+ bool_to_zero_or_max_uint(fx, res_lane_layout, res_lane)
+ },
+ );
+ }
+ },
+ ($fx:expr, $cc_u:ident|$cc_s:ident|$cc_f:ident($x:ident, $y:ident) -> $ret:ident) => {
+ // FIXME use vector icmp when possible
+ simd_pair_for_each_lane(
+ $fx,
+ $x,
+ $y,
+ $ret,
+ |fx, lane_layout, res_lane_layout, x_lane, y_lane| {
+ let res_lane = match lane_layout.ty.kind() {
+ ty::Uint(_) => fx.bcx.ins().icmp(IntCC::$cc_u, x_lane, y_lane),
+ ty::Int(_) => fx.bcx.ins().icmp(IntCC::$cc_s, x_lane, y_lane),
+ ty::Float(_) => fx.bcx.ins().fcmp(FloatCC::$cc_f, x_lane, y_lane),
+ _ => unreachable!("{:?}", lane_layout.ty),
+ };
+ bool_to_zero_or_max_uint(fx, res_lane_layout, res_lane)
+ },
+ );
+ },
+}
+
+macro simd_int_binop {
+ ($fx:expr, $op:ident($x:ident, $y:ident) -> $ret:ident) => {
+ simd_int_binop!($fx, $op|$op($x, $y) -> $ret);
+ },
+ ($fx:expr, $op_u:ident|$op_s:ident($x:ident, $y:ident) -> $ret:ident) => {
+ simd_pair_for_each_lane(
+ $fx,
+ $x,
+ $y,
+ $ret,
+ |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
+ let res_lane = match lane_layout.ty.kind() {
+ ty::Uint(_) => fx.bcx.ins().$op_u(x_lane, y_lane),
+ ty::Int(_) => fx.bcx.ins().$op_s(x_lane, y_lane),
+ _ => unreachable!("{:?}", lane_layout.ty),
+ };
+ CValue::by_val(res_lane, ret_lane_layout)
+ },
+ );
+ },
+}
+
+macro simd_int_flt_binop {
+ ($fx:expr, $op:ident|$op_f:ident($x:ident, $y:ident) -> $ret:ident) => {
+ simd_int_flt_binop!($fx, $op|$op|$op_f($x, $y) -> $ret);
+ },
+ ($fx:expr, $op_u:ident|$op_s:ident|$op_f:ident($x:ident, $y:ident) -> $ret:ident) => {
+ simd_pair_for_each_lane(
+ $fx,
+ $x,
+ $y,
+ $ret,
+ |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
+ let res_lane = match lane_layout.ty.kind() {
+ ty::Uint(_) => fx.bcx.ins().$op_u(x_lane, y_lane),
+ ty::Int(_) => fx.bcx.ins().$op_s(x_lane, y_lane),
+ ty::Float(_) => fx.bcx.ins().$op_f(x_lane, y_lane),
+ _ => unreachable!("{:?}", lane_layout.ty),
+ };
+ CValue::by_val(res_lane, ret_lane_layout)
+ },
+ );
+ },
+}
+
+macro simd_flt_binop($fx:expr, $op:ident($x:ident, $y:ident) -> $ret:ident) {
+ simd_pair_for_each_lane(
+ $fx,
+ $x,
+ $y,
+ $ret,
+ |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
+ let res_lane = match lane_layout.ty.kind() {
+ ty::Float(_) => fx.bcx.ins().$op(x_lane, y_lane),
+ _ => unreachable!("{:?}", lane_layout.ty),
+ };
+ CValue::by_val(res_lane, ret_lane_layout)
+ },
+ );
+}
+
+pub(crate) fn codegen_intrinsic_call<'tcx>(
+ fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ instance: Instance<'tcx>,
+ args: &[mir::Operand<'tcx>],
+ destination: Option<(CPlace<'tcx>, BasicBlock)>,
+ span: Span,
+) {
+ let def_id = instance.def_id();
+ let substs = instance.substs;
+
+ let intrinsic = fx.tcx.item_name(def_id).as_str();
+ let intrinsic = &intrinsic[..];
+
+ let ret = match destination {
+ Some((place, _)) => place,
+ None => {
+ // Insert non returning intrinsics here
+ match intrinsic {
+ "abort" => {
+ trap_abort(fx, "Called intrinsic::abort.");
+ }
- discriminant_value, (c ptr) {
- let pointee_layout = fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
- let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), pointee_layout);
- let discr = crate::discriminant::codegen_get_discriminant(fx, val, ret.layout());
- ret.write_cvalue(fx, discr);
- };
+ "transmute" => {
+ crate::base::codegen_panic(fx, "Transmuting to uninhabited type.", span);
+ }
+ _ => unimplemented!("unsupported instrinsic {}", intrinsic),
+ }
+ return;
+ }
+ };
+
+ if intrinsic.starts_with("simd_") {
+ self::simd::codegen_simd_intrinsic_call(fx, instance, args, ret, span);
+ let ret_block = fx.get_block(destination.expect("SIMD intrinsics don't diverge").1);
+ fx.bcx.ins().jump(ret_block, &[]);
+ return;
+ }
+
+ let usize_layout = fx.layout_of(fx.tcx.types.usize);
+
+ call_intrinsic_match! {
+ fx, intrinsic, substs, ret, destination, args,
+ expf32(flt) -> f32 => expf,
+ expf64(flt) -> f64 => exp,
+ exp2f32(flt) -> f32 => exp2f,
+ exp2f64(flt) -> f64 => exp2,
+ sqrtf32(flt) -> f32 => sqrtf,
+ sqrtf64(flt) -> f64 => sqrt,
+ powif32(a, x) -> f32 => __powisf2, // compiler-builtins
+ powif64(a, x) -> f64 => __powidf2, // compiler-builtins
+ powf32(a, x) -> f32 => powf,
+ powf64(a, x) -> f64 => pow,
+ logf32(flt) -> f32 => logf,
+ logf64(flt) -> f64 => log,
+ log2f32(flt) -> f32 => log2f,
+ log2f64(flt) -> f64 => log2,
+ log10f32(flt) -> f32 => log10f,
+ log10f64(flt) -> f64 => log10,
+ fabsf32(flt) -> f32 => fabsf,
+ fabsf64(flt) -> f64 => fabs,
+ fmaf32(x, y, z) -> f32 => fmaf,
+ fmaf64(x, y, z) -> f64 => fma,
+ copysignf32(x, y) -> f32 => copysignf,
+ copysignf64(x, y) -> f64 => copysign,
+
+ // rounding variants
+ // FIXME use clif insts
+ floorf32(flt) -> f32 => floorf,
+ floorf64(flt) -> f64 => floor,
+ ceilf32(flt) -> f32 => ceilf,
+ ceilf64(flt) -> f64 => ceil,
+ truncf32(flt) -> f32 => truncf,
+ truncf64(flt) -> f64 => trunc,
+ roundf32(flt) -> f32 => roundf,
+ roundf64(flt) -> f64 => round,
+
+ // trigonometry
+ sinf32(flt) -> f32 => sinf,
+ sinf64(flt) -> f64 => sin,
+ cosf32(flt) -> f32 => cosf,
+ cosf64(flt) -> f64 => cos,
+ tanf32(flt) -> f32 => tanf,
+ tanf64(flt) -> f64 => tan,
+ }
+
+ intrinsic_match! {
+ fx, intrinsic, substs, args,
+ _ => {
+ fx.tcx.sess.span_fatal(span, &format!("unsupported intrinsic {}", intrinsic));
+ };
+
+ assume, (c _a) {};
+ likely | unlikely, (c a) {
+ ret.write_cvalue(fx, a);
+ };
+ breakpoint, () {
+ fx.bcx.ins().debugtrap();
+ };
+ copy | copy_nonoverlapping, <elem_ty> (v src, v dst, v count) {
+ let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
+ assert_eq!(args.len(), 3);
+ let byte_amount = if elem_size != 1 {
+ fx.bcx.ins().imul_imm(count, elem_size as i64)
+ } else {
+ count
+ };
+
+ if intrinsic.contains("nonoverlapping") {
+ // FIXME emit_small_memcpy
+ fx.bcx.call_memcpy(fx.cx.module.target_config(), dst, src, byte_amount);
+ } else {
+ // FIXME emit_small_memmove
+ fx.bcx.call_memmove(fx.cx.module.target_config(), dst, src, byte_amount);
+ }
+ };
+ // NOTE: the volatile variants have src and dst swapped
+ volatile_copy_memory | volatile_copy_nonoverlapping_memory, <elem_ty> (v dst, v src, v count) {
+ let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
+ assert_eq!(args.len(), 3);
+ let byte_amount = if elem_size != 1 {
+ fx.bcx.ins().imul_imm(count, elem_size as i64)
+ } else {
+ count
+ };
+
+ // FIXME make the copy actually volatile when using emit_small_mem{cpy,move}
+ if intrinsic.contains("nonoverlapping") {
+ // FIXME emit_small_memcpy
+ fx.bcx.call_memcpy(fx.cx.module.target_config(), dst, src, byte_amount);
+ } else {
+ // FIXME emit_small_memmove
+ fx.bcx.call_memmove(fx.cx.module.target_config(), dst, src, byte_amount);
+ }
+ };
- _ if intrinsic.starts_with("wrapping_"), (c x, c y) {
- assert_eq!(x.layout().ty, y.layout().ty);
- let bin_op = match intrinsic {
- "wrapping_add" => BinOp::Add,
- "wrapping_sub" => BinOp::Sub,
- "wrapping_mul" => BinOp::Mul,
- _ => unreachable!("intrinsic {}", intrinsic),
- };
- let res = crate::num::codegen_int_binop(
- fx,
- bin_op,
- x,
- y,
- );
- ret.write_cvalue(fx, res);
- };
+ size_of_val, <T> (c ptr) {
+ let layout = fx.layout_of(T);
+ let size = if layout.is_unsized() {
+ let (_ptr, info) = ptr.load_scalar_pair(fx);
+ let (size, _align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
+ size
+ } else {
+ fx
+ .bcx
+ .ins()
+ .iconst(fx.pointer_type, layout.size.bytes() as i64)
+ };
+ ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
+ };
+ min_align_of_val, <T> (c ptr) {
+ let layout = fx.layout_of(T);
+ let align = if layout.is_unsized() {
+ let (_ptr, info) = ptr.load_scalar_pair(fx);
+ let (_size, align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
+ align
+ } else {
+ fx
+ .bcx
+ .ins()
+ .iconst(fx.pointer_type, layout.align.abi.bytes() as i64)
+ };
+ ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
+ };
+
+ _ if intrinsic.starts_with("unchecked_") || intrinsic == "exact_div", (c x, c y) {
+ // FIXME trap on overflow
+ let bin_op = match intrinsic {
+ "unchecked_add" => BinOp::Add,
+ "unchecked_sub" => BinOp::Sub,
+ "unchecked_div" | "exact_div" => BinOp::Div,
+ "unchecked_rem" => BinOp::Rem,
+ "unchecked_shl" => BinOp::Shl,
+ "unchecked_shr" => BinOp::Shr,
+ _ => unreachable!("intrinsic {}", intrinsic),
+ };
+ let res = crate::num::codegen_int_binop(fx, bin_op, x, y);
+ ret.write_cvalue(fx, res);
+ };
+ _ if intrinsic.ends_with("_with_overflow"), (c x, c y) {
+ assert_eq!(x.layout().ty, y.layout().ty);
+ let bin_op = match intrinsic {
+ "add_with_overflow" => BinOp::Add,
+ "sub_with_overflow" => BinOp::Sub,
+ "mul_with_overflow" => BinOp::Mul,
+ _ => unreachable!("intrinsic {}", intrinsic),
+ };
+
+ let res = crate::num::codegen_checked_int_binop(
+ fx,
+ bin_op,
+ x,
+ y,
+ );
+ ret.write_cvalue(fx, res);
+ };
- size_of | pref_align_of | min_align_of | needs_drop | type_id | type_name | variant_count, () {
+ _ if intrinsic.starts_with("saturating_"), <T> (c lhs, c rhs) {
+ assert_eq!(lhs.layout().ty, rhs.layout().ty);
+ let bin_op = match intrinsic {
+ "saturating_add" => BinOp::Add,
+ "saturating_sub" => BinOp::Sub,
+ _ => unreachable!("intrinsic {}", intrinsic),
+ };
+
+ let signed = type_sign(T);
+
+ let checked_res = crate::num::codegen_checked_int_binop(
+ fx,
+ bin_op,
+ lhs,
+ rhs,
+ );
+
+ let (val, has_overflow) = checked_res.load_scalar_pair(fx);
+ let clif_ty = fx.clif_type(T).unwrap();
+
+ // `select.i8` is not implemented by Cranelift.
+ let has_overflow = fx.bcx.ins().uextend(types::I32, has_overflow);
+
+ let (min, max) = type_min_max_value(&mut fx.bcx, clif_ty, signed);
+
+ let val = match (intrinsic, signed) {
+ ("saturating_add", false) => fx.bcx.ins().select(has_overflow, max, val),
+ ("saturating_sub", false) => fx.bcx.ins().select(has_overflow, min, val),
+ ("saturating_add", true) => {
+ let rhs = rhs.load_scalar(fx);
+ let rhs_ge_zero = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
+ let sat_val = fx.bcx.ins().select(rhs_ge_zero, max, min);
+ fx.bcx.ins().select(has_overflow, sat_val, val)
+ }
+ ("saturating_sub", true) => {
+ let rhs = rhs.load_scalar(fx);
+ let rhs_ge_zero = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
+ let sat_val = fx.bcx.ins().select(rhs_ge_zero, min, max);
+ fx.bcx.ins().select(has_overflow, sat_val, val)
+ }
+ _ => unreachable!(),
+ };
+
+ let res = CValue::by_val(val, fx.layout_of(T));
+
+ ret.write_cvalue(fx, res);
+ };
+ rotate_left, <T>(v x, v y) {
+ let layout = fx.layout_of(T);
+ let res = fx.bcx.ins().rotl(x, y);
+ ret.write_cvalue(fx, CValue::by_val(res, layout));
+ };
+ rotate_right, <T>(v x, v y) {
+ let layout = fx.layout_of(T);
+ let res = fx.bcx.ins().rotr(x, y);
+ ret.write_cvalue(fx, CValue::by_val(res, layout));
+ };
+
+ // The only difference between offset and arith_offset is regarding UB. Because Cranelift
+ // doesn't have UB both are codegen'ed the same way
+ offset | arith_offset, (c base, v offset) {
+ let pointee_ty = base.layout().ty.builtin_deref(true).unwrap().ty;
+ let pointee_size = fx.layout_of(pointee_ty).size.bytes();
+ let ptr_diff = if pointee_size != 1 {
+ fx.bcx.ins().imul_imm(offset, pointee_size as i64)
+ } else {
+ offset
+ };
+ let base_val = base.load_scalar(fx);
+ let res = fx.bcx.ins().iadd(base_val, ptr_diff);
+ ret.write_cvalue(fx, CValue::by_val(res, base.layout()));
+ };
+
+ transmute, (c from) {
+ ret.write_cvalue_transmute(fx, from);
+ };
+ write_bytes | volatile_set_memory, (c dst, v val, v count) {
+ let pointee_ty = dst.layout().ty.builtin_deref(true).unwrap().ty;
+ let pointee_size = fx.layout_of(pointee_ty).size.bytes();
+ let count = if pointee_size != 1 {
+ fx.bcx.ins().imul_imm(count, pointee_size as i64)
+ } else {
+ count
+ };
+ let dst_ptr = dst.load_scalar(fx);
+ // FIXME make the memset actually volatile when switching to emit_small_memset
+ // FIXME use emit_small_memset
+ fx.bcx.call_memset(fx.cx.module.target_config(), dst_ptr, val, count);
+ };
+ ctlz | ctlz_nonzero, <T> (v arg) {
+ // FIXME trap on `ctlz_nonzero` with zero arg.
+ let res = if T == fx.tcx.types.u128 || T == fx.tcx.types.i128 {
+ // FIXME verify this algorithm is correct
+ let (lsb, msb) = fx.bcx.ins().isplit(arg);
+ let lsb_lz = fx.bcx.ins().clz(lsb);
+ let msb_lz = fx.bcx.ins().clz(msb);
+ let msb_is_zero = fx.bcx.ins().icmp_imm(IntCC::Equal, msb, 0);
+ let lsb_lz_plus_64 = fx.bcx.ins().iadd_imm(lsb_lz, 64);
+ let res = fx.bcx.ins().select(msb_is_zero, lsb_lz_plus_64, msb_lz);
+ fx.bcx.ins().uextend(types::I128, res)
+ } else {
+ fx.bcx.ins().clz(arg)
+ };
+ let res = CValue::by_val(res, fx.layout_of(T));
+ ret.write_cvalue(fx, res);
+ };
+ cttz | cttz_nonzero, <T> (v arg) {
+ // FIXME trap on `cttz_nonzero` with zero arg.
+ let res = if T == fx.tcx.types.u128 || T == fx.tcx.types.i128 {
+ // FIXME verify this algorithm is correct
+ let (lsb, msb) = fx.bcx.ins().isplit(arg);
+ let lsb_tz = fx.bcx.ins().ctz(lsb);
+ let msb_tz = fx.bcx.ins().ctz(msb);
+ let lsb_is_zero = fx.bcx.ins().icmp_imm(IntCC::Equal, lsb, 0);
+ let msb_tz_plus_64 = fx.bcx.ins().iadd_imm(msb_tz, 64);
+ let res = fx.bcx.ins().select(lsb_is_zero, msb_tz_plus_64, lsb_tz);
+ fx.bcx.ins().uextend(types::I128, res)
+ } else {
+ fx.bcx.ins().ctz(arg)
+ };
+ let res = CValue::by_val(res, fx.layout_of(T));
+ ret.write_cvalue(fx, res);
+ };
+ ctpop, <T> (v arg) {
+ let res = fx.bcx.ins().popcnt(arg);
+ let res = CValue::by_val(res, fx.layout_of(T));
+ ret.write_cvalue(fx, res);
+ };
+ bitreverse, <T> (v arg) {
+ let res = fx.bcx.ins().bitrev(arg);
+ let res = CValue::by_val(res, fx.layout_of(T));
+ ret.write_cvalue(fx, res);
+ };
+ bswap, <T> (v arg) {
+ // FIXME(CraneStation/cranelift#794) add bswap instruction to cranelift
+ fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value {
+ match bcx.func.dfg.value_type(v) {
+ types::I8 => v,
+
+ // https://code.woboq.org/gcc/include/bits/byteswap.h.html
+ types::I16 => {
+ let tmp1 = bcx.ins().ishl_imm(v, 8);
+ let n1 = bcx.ins().band_imm(tmp1, 0xFF00);
+
+ let tmp2 = bcx.ins().ushr_imm(v, 8);
+ let n2 = bcx.ins().band_imm(tmp2, 0x00FF);
+
+ bcx.ins().bor(n1, n2)
+ }
+ types::I32 => {
+ let tmp1 = bcx.ins().ishl_imm(v, 24);
+ let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000);
+
+ let tmp2 = bcx.ins().ishl_imm(v, 8);
+ let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000);
+
+ let tmp3 = bcx.ins().ushr_imm(v, 8);
+ let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00);
+
+ let tmp4 = bcx.ins().ushr_imm(v, 24);
+ let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF);
+
+ let or_tmp1 = bcx.ins().bor(n1, n2);
+ let or_tmp2 = bcx.ins().bor(n3, n4);
+ bcx.ins().bor(or_tmp1, or_tmp2)
+ }
+ types::I64 => {
+ let tmp1 = bcx.ins().ishl_imm(v, 56);
+ let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000_0000_0000u64 as i64);
+
+ let tmp2 = bcx.ins().ishl_imm(v, 40);
+ let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000_0000_0000u64 as i64);
+
+ let tmp3 = bcx.ins().ishl_imm(v, 24);
+ let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00_0000_0000u64 as i64);
+
+ let tmp4 = bcx.ins().ishl_imm(v, 8);
+ let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF_0000_0000u64 as i64);
+
+ let tmp5 = bcx.ins().ushr_imm(v, 8);
+ let n5 = bcx.ins().band_imm(tmp5, 0x0000_0000_FF00_0000u64 as i64);
+
+ let tmp6 = bcx.ins().ushr_imm(v, 24);
+ let n6 = bcx.ins().band_imm(tmp6, 0x0000_0000_00FF_0000u64 as i64);
+
+ let tmp7 = bcx.ins().ushr_imm(v, 40);
+ let n7 = bcx.ins().band_imm(tmp7, 0x0000_0000_0000_FF00u64 as i64);
+
+ let tmp8 = bcx.ins().ushr_imm(v, 56);
+ let n8 = bcx.ins().band_imm(tmp8, 0x0000_0000_0000_00FFu64 as i64);
+
+ let or_tmp1 = bcx.ins().bor(n1, n2);
+ let or_tmp2 = bcx.ins().bor(n3, n4);
+ let or_tmp3 = bcx.ins().bor(n5, n6);
+ let or_tmp4 = bcx.ins().bor(n7, n8);
+
+ let or_tmp5 = bcx.ins().bor(or_tmp1, or_tmp2);
+ let or_tmp6 = bcx.ins().bor(or_tmp3, or_tmp4);
+ bcx.ins().bor(or_tmp5, or_tmp6)
+ }
+ types::I128 => {
+ let (lo, hi) = bcx.ins().isplit(v);
+ let lo = swap(bcx, lo);
+ let hi = swap(bcx, hi);
+ bcx.ins().iconcat(hi, lo)
+ }
+ ty => unreachable!("bswap {}", ty),
+ }
+ };
+ let res = CValue::by_val(swap(&mut fx.bcx, arg), fx.layout_of(T));
+ ret.write_cvalue(fx, res);
+ };
+ assert_inhabited | assert_zero_valid | assert_uninit_valid, <T> () {
+ let layout = fx.layout_of(T);
+ if layout.abi.is_uninhabited() {
+ with_no_trimmed_paths(|| crate::base::codegen_panic(
+ fx,
+ &format!("attempted to instantiate uninhabited type `{}`", T),
+ span,
+ ));
+ return;
+ }
+
+ if intrinsic == "assert_zero_valid" && !layout.might_permit_raw_init(fx, /*zero:*/ true).unwrap() {
+ with_no_trimmed_paths(|| crate::base::codegen_panic(
+ fx,
+ &format!("attempted to zero-initialize type `{}`, which is invalid", T),
+ span,
+ ));
+ return;
+ }
+
+ if intrinsic == "assert_uninit_valid" && !layout.might_permit_raw_init(fx, /*zero:*/ false).unwrap() {
+ with_no_trimmed_paths(|| crate::base::codegen_panic(
+ fx,
+ &format!("attempted to leave type `{}` uninitialized, which is invalid", T),
+ span,
+ ));
+ return;
+ }
+ };
+
+ volatile_load | unaligned_volatile_load, (c ptr) {
+ // Cranelift treats loads as volatile by default
+ // FIXME ignore during stack2reg optimization
+ // FIXME correctly handle unaligned_volatile_load
+ let inner_layout =
+ fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
+ let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
+ ret.write_cvalue(fx, val);
+ };
+ volatile_store | unaligned_volatile_store, (v ptr, c val) {
+ // Cranelift treats stores as volatile by default
+ // FIXME ignore during stack2reg optimization
+ // FIXME correctly handle unaligned_volatile_store
+ let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
+ dest.write_cvalue(fx, val);
+ };
+
++ pref_align_of | min_align_of | needs_drop | type_id | type_name | variant_count, () {
+ let const_val =
+ fx.tcx.const_eval_instance(ParamEnv::reveal_all(), instance, None).unwrap();
+ let val = crate::constant::codegen_const_value(
+ fx,
+ const_val,
+ ret.layout().ty,
+ );
+ ret.write_cvalue(fx, val);
+ };
+
+ ptr_offset_from, <T> (v ptr, v base) {
+ let isize_layout = fx.layout_of(fx.tcx.types.isize);
+
+ let pointee_size: u64 = fx.layout_of(T).size.bytes();
+ let diff = fx.bcx.ins().isub(ptr, base);
+ // FIXME this can be an exact division.
+ let val = CValue::by_val(fx.bcx.ins().sdiv_imm(diff, pointee_size as i64), isize_layout);
+ ret.write_cvalue(fx, val);
+ };
+
+ ptr_guaranteed_eq, (c a, c b) {
+ let val = crate::num::codegen_ptr_binop(fx, BinOp::Eq, a, b);
+ ret.write_cvalue(fx, val);
+ };
+
+ ptr_guaranteed_ne, (c a, c b) {
+ let val = crate::num::codegen_ptr_binop(fx, BinOp::Ne, a, b);
+ ret.write_cvalue(fx, val);
+ };
+
+ caller_location, () {
+ let caller_location = fx.get_caller_location(span);
+ ret.write_cvalue(fx, caller_location);
+ };
+
+ _ if intrinsic.starts_with("atomic_fence"), () {
+ crate::atomic_shim::lock_global_lock(fx);
+ crate::atomic_shim::unlock_global_lock(fx);
+ };
+ _ if intrinsic.starts_with("atomic_singlethreadfence"), () {
+ crate::atomic_shim::lock_global_lock(fx);
+ crate::atomic_shim::unlock_global_lock(fx);
+ };
+ _ if intrinsic.starts_with("atomic_load"), (c ptr) {
+ crate::atomic_shim::lock_global_lock(fx);
+
+ let inner_layout =
+ fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
+ validate_atomic_type!(fx, intrinsic, span, inner_layout.ty);
+ let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
+ ret.write_cvalue(fx, val);
+
+ crate::atomic_shim::unlock_global_lock(fx);
+ };
+ _ if intrinsic.starts_with("atomic_store"), (v ptr, c val) {
+ validate_atomic_type!(fx, intrinsic, span, val.layout().ty);
+
+ crate::atomic_shim::lock_global_lock(fx);
+
+ let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
+ dest.write_cvalue(fx, val);
+
+ crate::atomic_shim::unlock_global_lock(fx);
+ };
+ _ if intrinsic.starts_with("atomic_xchg"), <T> (v ptr, c src) {
+ validate_atomic_type!(fx, intrinsic, span, T);
+
+ crate::atomic_shim::lock_global_lock(fx);
+
+ // Read old
+ let clif_ty = fx.clif_type(T).unwrap();
+ let old = fx.bcx.ins().load(clif_ty, MemFlags::new(), ptr, 0);
+ ret.write_cvalue(fx, CValue::by_val(old, fx.layout_of(T)));
+
+ // Write new
+ let dest = CPlace::for_ptr(Pointer::new(ptr), src.layout());
+ dest.write_cvalue(fx, src);
+
+ crate::atomic_shim::unlock_global_lock(fx);
+ };
+ _ if intrinsic.starts_with("atomic_cxchg"), <T> (v ptr, c test_old, c new) { // both atomic_cxchg_* and atomic_cxchgweak_*
+ validate_atomic_type!(fx, intrinsic, span, T);
+
+ let test_old = test_old.load_scalar(fx);
+ let new = new.load_scalar(fx);
+
+ crate::atomic_shim::lock_global_lock(fx);
+
+ // Read old
+ let clif_ty = fx.clif_type(T).unwrap();
+ let old = fx.bcx.ins().load(clif_ty, MemFlags::new(), ptr, 0);
+
+ // Compare
+ let is_eq = fx.bcx.ins().icmp(IntCC::Equal, old, test_old);
+ let new = fx.bcx.ins().select(is_eq, new, old); // Keep old if not equal to test_old
+
+ // Write new
+ fx.bcx.ins().store(MemFlags::new(), new, ptr, 0);
+
+ let ret_val = CValue::by_val_pair(old, fx.bcx.ins().bint(types::I8, is_eq), ret.layout());
+ ret.write_cvalue(fx, ret_val);
+
+ crate::atomic_shim::unlock_global_lock(fx);
+ };
+
+ _ if intrinsic.starts_with("atomic_xadd"), <T> (v ptr, c amount) {
+ validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
+ let amount = amount.load_scalar(fx);
+ atomic_binop_return_old! (fx, iadd<T>(ptr, amount) -> ret);
+ };
+ _ if intrinsic.starts_with("atomic_xsub"), <T> (v ptr, c amount) {
+ validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
+ let amount = amount.load_scalar(fx);
+ atomic_binop_return_old! (fx, isub<T>(ptr, amount) -> ret);
+ };
+ _ if intrinsic.starts_with("atomic_and"), <T> (v ptr, c src) {
+ validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
+ let src = src.load_scalar(fx);
+ atomic_binop_return_old! (fx, band<T>(ptr, src) -> ret);
+ };
+ _ if intrinsic.starts_with("atomic_nand"), <T> (v ptr, c src) {
+ validate_atomic_type!(fx, intrinsic, span, T);
+
+ let src = src.load_scalar(fx);
+
+ crate::atomic_shim::lock_global_lock(fx);
+
+ let clif_ty = fx.clif_type(T).unwrap();
+ let old = fx.bcx.ins().load(clif_ty, MemFlags::new(), ptr, 0);
+ let and = fx.bcx.ins().band(old, src);
+ let new = fx.bcx.ins().bnot(and);
+ fx.bcx.ins().store(MemFlags::new(), new, ptr, 0);
+ ret.write_cvalue(fx, CValue::by_val(old, fx.layout_of(T)));
+
+ crate::atomic_shim::unlock_global_lock(fx);
+ };
+ _ if intrinsic.starts_with("atomic_or"), <T> (v ptr, c src) {
+ validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
+ let src = src.load_scalar(fx);
+ atomic_binop_return_old! (fx, bor<T>(ptr, src) -> ret);
+ };
+ _ if intrinsic.starts_with("atomic_xor"), <T> (v ptr, c src) {
+ validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
+ let src = src.load_scalar(fx);
+ atomic_binop_return_old! (fx, bxor<T>(ptr, src) -> ret);
+ };
+
+ _ if intrinsic.starts_with("atomic_max"), <T> (v ptr, c src) {
+ validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
+ let src = src.load_scalar(fx);
+ atomic_minmax!(fx, IntCC::SignedGreaterThan, <T> (ptr, src) -> ret);
+ };
+ _ if intrinsic.starts_with("atomic_umax"), <T> (v ptr, c src) {
+ validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
+ let src = src.load_scalar(fx);
+ atomic_minmax!(fx, IntCC::UnsignedGreaterThan, <T> (ptr, src) -> ret);
+ };
+ _ if intrinsic.starts_with("atomic_min"), <T> (v ptr, c src) {
+ validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
+ let src = src.load_scalar(fx);
+ atomic_minmax!(fx, IntCC::SignedLessThan, <T> (ptr, src) -> ret);
+ };
+ _ if intrinsic.starts_with("atomic_umin"), <T> (v ptr, c src) {
+ validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
+ let src = src.load_scalar(fx);
+ atomic_minmax!(fx, IntCC::UnsignedLessThan, <T> (ptr, src) -> ret);
+ };
+
+ minnumf32, (v a, v b) {
+ let val = fx.bcx.ins().fmin(a, b);
+ let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
+ ret.write_cvalue(fx, val);
+ };
+ minnumf64, (v a, v b) {
+ let val = fx.bcx.ins().fmin(a, b);
+ let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
+ ret.write_cvalue(fx, val);
+ };
+ maxnumf32, (v a, v b) {
+ let val = fx.bcx.ins().fmax(a, b);
+ let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
+ ret.write_cvalue(fx, val);
+ };
+ maxnumf64, (v a, v b) {
+ let val = fx.bcx.ins().fmax(a, b);
+ let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
+ ret.write_cvalue(fx, val);
+ };
+
+ try, (v f, v data, v _catch_fn) {
+ // FIXME once unwinding is supported, change this to actually catch panics
+ let f_sig = fx.bcx.func.import_signature(Signature {
+ call_conv: CallConv::triple_default(fx.triple()),
+ params: vec![AbiParam::new(fx.bcx.func.dfg.value_type(data))],
+ returns: vec![],
+ });
+
+ fx.bcx.ins().call_indirect(f_sig, f, &[data]);
+
+ let layout = ret.layout();
+ let ret_val = CValue::const_val(fx, layout, ty::ScalarInt::null(layout.size));
+ ret.write_cvalue(fx, ret_val);
+ };
+
+ fadd_fast | fsub_fast | fmul_fast | fdiv_fast | frem_fast, (c x, c y) {
+ let res = crate::num::codegen_float_binop(fx, match intrinsic {
+ "fadd_fast" => BinOp::Add,
+ "fsub_fast" => BinOp::Sub,
+ "fmul_fast" => BinOp::Mul,
+ "fdiv_fast" => BinOp::Div,
+ "frem_fast" => BinOp::Rem,
+ _ => unreachable!(),
+ }, x, y);
+ ret.write_cvalue(fx, res);
+ };
+ float_to_int_unchecked, (v f) {
+ let res = crate::cast::clif_int_or_float_cast(
+ fx,
+ f,
+ false,
+ fx.clif_type(ret.layout().ty).unwrap(),
+ type_sign(ret.layout().ty),
+ );
+ ret.write_cvalue(fx, CValue::by_val(res, ret.layout()));
+ };
+ }
+
+ if let Some((_, dest)) = destination {
+ let ret_block = fx.get_block(dest);
+ fx.bcx.ins().jump(ret_block, &[]);
+ } else {
+ trap_unreachable(fx, "[corruption] Diverging intrinsic returned.");
+ }
+}
--- /dev/null
- let (lane_type, lane_count) = lane_type_and_count(fx.tcx, layout);
- let (ret_lane_type, ret_lane_count) = lane_type_and_count(fx.tcx, ret.layout());
+//! Codegen `extern "platform-intrinsic"` intrinsics.
+
+use super::*;
+use crate::prelude::*;
+
+pub(super) fn codegen_simd_intrinsic_call<'tcx>(
+ fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ instance: Instance<'tcx>,
+ args: &[mir::Operand<'tcx>],
+ ret: CPlace<'tcx>,
+ span: Span,
+) {
+ let def_id = instance.def_id();
+ let substs = instance.substs;
+
+ let intrinsic = fx.tcx.item_name(def_id).as_str();
+ let intrinsic = &intrinsic[..];
+
+ intrinsic_match! {
+ fx, intrinsic, substs, args,
+ _ => {
+ fx.tcx.sess.span_fatal(span, &format!("Unknown SIMD intrinsic {}", intrinsic));
+ };
+
+ simd_cast, (c a) {
+ validate_simd_type!(fx, intrinsic, span, a.layout().ty);
+ simd_for_each_lane(fx, a, ret, |fx, lane_layout, ret_lane_layout, lane| {
+ let ret_lane_ty = fx.clif_type(ret_lane_layout.ty).unwrap();
+
+ let from_signed = type_sign(lane_layout.ty);
+ let to_signed = type_sign(ret_lane_layout.ty);
+
+ let ret_lane = clif_int_or_float_cast(fx, lane, from_signed, ret_lane_ty, to_signed);
+ CValue::by_val(ret_lane, ret_lane_layout)
+ });
+ };
+
+ simd_eq, (c x, c y) {
+ validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+ simd_cmp!(fx, Equal|Equal(x, y) -> ret);
+ };
+ simd_ne, (c x, c y) {
+ validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+ simd_cmp!(fx, NotEqual|NotEqual(x, y) -> ret);
+ };
+ simd_lt, (c x, c y) {
+ validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+ simd_cmp!(fx, UnsignedLessThan|SignedLessThan|LessThan(x, y) -> ret);
+ };
+ simd_le, (c x, c y) {
+ validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+ simd_cmp!(fx, UnsignedLessThanOrEqual|SignedLessThanOrEqual|LessThanOrEqual(x, y) -> ret);
+ };
+ simd_gt, (c x, c y) {
+ validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+ simd_cmp!(fx, UnsignedGreaterThan|SignedGreaterThan|GreaterThan(x, y) -> ret);
+ };
+ simd_ge, (c x, c y) {
+ validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+ simd_cmp!(
+ fx,
+ UnsignedGreaterThanOrEqual|SignedGreaterThanOrEqual|GreaterThanOrEqual
+ (x, y) -> ret
+ );
+ };
+
+ // simd_shuffle32<T, U>(x: T, y: T, idx: [u32; 32]) -> U
+ _ if intrinsic.starts_with("simd_shuffle"), (c x, c y, o idx) {
+ validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+
+ let n: u16 = intrinsic["simd_shuffle".len()..].parse().unwrap();
+
+ assert_eq!(x.layout(), y.layout());
+ let layout = x.layout();
+
- assert_eq!(lane_type, ret_lane_type);
- assert_eq!(n, ret_lane_count);
++ let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
++ let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
+
- assert!(idx < total_len, "idx {} out of range 0..{}", idx, total_len);
++ assert_eq!(lane_ty, ret_lane_ty);
++ assert_eq!(u64::from(n), ret_lane_count);
+
+ let total_len = lane_count * 2;
+
+ let indexes = {
+ use rustc_middle::mir::interpret::*;
+ let idx_const = crate::constant::mir_operand_get_const_val(fx, idx).expect("simd_shuffle* idx not const");
+
+ let idx_bytes = match idx_const.val {
+ ty::ConstKind::Value(ConstValue::ByRef { alloc, offset }) => {
+ let ptr = Pointer::new(AllocId(0 /* dummy */), offset);
+ let size = Size::from_bytes(4 * u64::from(ret_lane_count) /* size_of([u32; ret_lane_count]) */);
+ alloc.get_bytes(fx, ptr, size).unwrap()
+ }
+ _ => unreachable!("{:?}", idx_const),
+ };
+
+ (0..ret_lane_count).map(|i| {
+ let i = usize::try_from(i).unwrap();
+ let idx = rustc_middle::mir::interpret::read_target_uint(
+ fx.tcx.data_layout.endian,
+ &idx_bytes[4*i.. 4*i + 4],
+ ).expect("read_target_uint");
+ u16::try_from(idx).expect("try_from u32")
+ }).collect::<Vec<u16>>()
+ };
+
+ for &idx in &indexes {
- let in_lane = if in_idx < lane_count {
++ assert!(u64::from(idx) < total_len, "idx {} out of range 0..{}", idx, total_len);
+ }
+
+ for (out_idx, in_idx) in indexes.into_iter().enumerate() {
- y.value_field(fx, mir::Field::new((in_idx - lane_count).into()))
++ let in_lane = if u64::from(in_idx) < lane_count {
+ x.value_field(fx, mir::Field::new(in_idx.into()))
+ } else {
- let (_lane_type, lane_count) = lane_type_and_count(fx.tcx, base.layout());
++ y.value_field(fx, mir::Field::new(usize::from(in_idx) - usize::try_from(lane_count).unwrap()))
+ };
+ let out_lane = ret.place_field(fx, mir::Field::new(out_idx));
+ out_lane.write_cvalue(fx, in_lane);
+ }
+ };
+
+ simd_insert, (c base, o idx, c val) {
+ // FIXME validate
+ let idx_const = if let Some(idx_const) = crate::constant::mir_operand_get_const_val(fx, idx) {
+ idx_const
+ } else {
+ fx.tcx.sess.span_fatal(
+ span,
+ "Index argument for `simd_insert` is not a constant",
+ );
+ };
+
+ let idx = idx_const.val.try_to_bits(Size::from_bytes(4 /* u32*/)).unwrap_or_else(|| panic!("kind not scalar: {:?}", idx_const));
- let (_lane_type, lane_count) = lane_type_and_count(fx.tcx, v.layout());
++ let (lane_count, _lane_ty) = base.layout().ty.simd_size_and_type(fx.tcx);
+ if idx >= lane_count.into() {
+ fx.tcx.sess.span_fatal(fx.mir.span, &format!("[simd_insert] idx {} >= lane_count {}", idx, lane_count));
+ }
+
+ ret.write_cvalue(fx, base);
+ let ret_lane = ret.place_field(fx, mir::Field::new(idx.try_into().unwrap()));
+ ret_lane.write_cvalue(fx, val);
+ };
+
+ simd_extract, (c v, o idx) {
+ validate_simd_type!(fx, intrinsic, span, v.layout().ty);
+ let idx_const = if let Some(idx_const) = crate::constant::mir_operand_get_const_val(fx, idx) {
+ idx_const
+ } else {
+ fx.tcx.sess.span_warn(
+ span,
+ "Index argument for `simd_extract` is not a constant",
+ );
+ let res = crate::trap::trap_unimplemented_ret_value(
+ fx,
+ ret.layout(),
+ "Index argument for `simd_extract` is not a constant",
+ );
+ ret.write_cvalue(fx, res);
+ return;
+ };
+
+ let idx = idx_const.val.try_to_bits(Size::from_bytes(4 /* u32*/)).unwrap_or_else(|| panic!("kind not scalar: {:?}", idx_const));
- let (_lane_layout, lane_count) = lane_type_and_count(fx.tcx, layout);
- let (ret_lane_layout, ret_lane_count) = lane_type_and_count(fx.tcx, ret.layout());
++ let (lane_count, _lane_ty) = v.layout().ty.simd_size_and_type(fx.tcx);
+ if idx >= lane_count.into() {
+ fx.tcx.sess.span_fatal(fx.mir.span, &format!("[simd_extract] idx {} >= lane_count {}", idx, lane_count));
+ }
+
+ let ret_lane = v.value_field(fx, mir::Field::new(idx.try_into().unwrap()));
+ ret.write_cvalue(fx, ret_lane);
+ };
+
+ simd_add, (c x, c y) {
+ validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+ simd_int_flt_binop!(fx, iadd|fadd(x, y) -> ret);
+ };
+ simd_sub, (c x, c y) {
+ validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+ simd_int_flt_binop!(fx, isub|fsub(x, y) -> ret);
+ };
+ simd_mul, (c x, c y) {
+ validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+ simd_int_flt_binop!(fx, imul|fmul(x, y) -> ret);
+ };
+ simd_div, (c x, c y) {
+ validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+ simd_int_flt_binop!(fx, udiv|sdiv|fdiv(x, y) -> ret);
+ };
+ simd_shl, (c x, c y) {
+ validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+ simd_int_binop!(fx, ishl(x, y) -> ret);
+ };
+ simd_shr, (c x, c y) {
+ validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+ simd_int_binop!(fx, ushr|sshr(x, y) -> ret);
+ };
+ simd_and, (c x, c y) {
+ validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+ simd_int_binop!(fx, band(x, y) -> ret);
+ };
+ simd_or, (c x, c y) {
+ validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+ simd_int_binop!(fx, bor(x, y) -> ret);
+ };
+ simd_xor, (c x, c y) {
+ validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+ simd_int_binop!(fx, bxor(x, y) -> ret);
+ };
+
+ simd_fma, (c a, c b, c c) {
+ validate_simd_type!(fx, intrinsic, span, a.layout().ty);
+ assert_eq!(a.layout(), b.layout());
+ assert_eq!(a.layout(), c.layout());
+ let layout = a.layout();
+
- let lane = mir::Field::new(lane.into());
++ let (lane_count, _lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
++ let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
+ assert_eq!(lane_count, ret_lane_count);
++ let ret_lane_layout = fx.layout_of(ret_lane_ty);
+
+ for lane in 0..lane_count {
++ let lane = mir::Field::new(lane.try_into().unwrap());
+ let a_lane = a.value_field(fx, lane).load_scalar(fx);
+ let b_lane = b.value_field(fx, lane).load_scalar(fx);
+ let c_lane = c.value_field(fx, lane).load_scalar(fx);
+
+ let mul_lane = fx.bcx.ins().fmul(a_lane, b_lane);
+ let res_lane = CValue::by_val(fx.bcx.ins().fadd(mul_lane, c_lane), ret_lane_layout);
+
+ ret.place_field(fx, lane).write_cvalue(fx, res_lane);
+ }
+ };
+
+ simd_fmin, (c x, c y) {
+ validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+ simd_flt_binop!(fx, fmin(x, y) -> ret);
+ };
+ simd_fmax, (c x, c y) {
+ validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+ simd_flt_binop!(fx, fmax(x, y) -> ret);
+ };
+
+ simd_reduce_add_ordered | simd_reduce_add_unordered, (c v) {
+ validate_simd_type!(fx, intrinsic, span, v.layout().ty);
+ simd_reduce(fx, v, ret, |fx, lane_layout, a, b| {
+ if lane_layout.ty.is_floating_point() {
+ fx.bcx.ins().fadd(a, b)
+ } else {
+ fx.bcx.ins().iadd(a, b)
+ }
+ });
+ };
+
+ simd_reduce_mul_ordered | simd_reduce_mul_unordered, (c v) {
+ validate_simd_type!(fx, intrinsic, span, v.layout().ty);
+ simd_reduce(fx, v, ret, |fx, lane_layout, a, b| {
+ if lane_layout.ty.is_floating_point() {
+ fx.bcx.ins().fmul(a, b)
+ } else {
+ fx.bcx.ins().imul(a, b)
+ }
+ });
+ };
+
+ simd_reduce_all, (c v) {
+ validate_simd_type!(fx, intrinsic, span, v.layout().ty);
+ simd_reduce_bool(fx, v, ret, |fx, a, b| fx.bcx.ins().band(a, b));
+ };
+
+ simd_reduce_any, (c v) {
+ validate_simd_type!(fx, intrinsic, span, v.layout().ty);
+ simd_reduce_bool(fx, v, ret, |fx, a, b| fx.bcx.ins().bor(a, b));
+ };
+
+ // simd_fabs
+ // simd_saturating_add
+ // simd_bitmask
+ // simd_select
+ // simd_rem
+ }
+}
--- /dev/null
- hash_drain_filter
+#![feature(
+ rustc_private,
+ decl_macro,
+ type_alias_impl_trait,
+ associated_type_bounds,
+ never_type,
+ try_blocks,
- fn new(tcx: TyCtxt<'tcx>, module: M, debug_info: bool) -> Self {
- let unwind_context = UnwindContext::new(tcx, module.isa());
++ hash_drain_filter,
++ str_split_once
+)]
+#![warn(rust_2018_idioms)]
+#![warn(unused_lifetimes)]
+#![warn(unreachable_pub)]
+
+#[cfg(feature = "jit")]
+extern crate libc;
+extern crate snap;
+#[macro_use]
+extern crate rustc_middle;
+extern crate rustc_ast;
+extern crate rustc_codegen_ssa;
+extern crate rustc_data_structures;
+extern crate rustc_errors;
+extern crate rustc_fs_util;
+extern crate rustc_hir;
+extern crate rustc_incremental;
+extern crate rustc_index;
+extern crate rustc_session;
+extern crate rustc_span;
+extern crate rustc_symbol_mangling;
+extern crate rustc_target;
+
+// This prevents duplicating functions and statics that are already part of the host rustc process.
+#[allow(unused_extern_crates)]
+extern crate rustc_driver;
+
+use std::any::Any;
++use std::str::FromStr;
+
+use rustc_codegen_ssa::traits::CodegenBackend;
+use rustc_codegen_ssa::CodegenResults;
+use rustc_errors::ErrorReported;
+use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
+use rustc_middle::middle::cstore::{EncodedMetadata, MetadataLoader};
+use rustc_middle::ty::query::Providers;
+use rustc_session::config::OutputFilenames;
+use rustc_session::Session;
+
+use cranelift_codegen::settings::{self, Configurable};
+
+use crate::constant::ConstantCx;
+use crate::prelude::*;
+
+mod abi;
+mod allocator;
+mod analyze;
+mod archive;
+mod atomic_shim;
+mod backend;
+mod base;
+mod cast;
+mod codegen_i128;
+mod common;
+mod constant;
+mod debuginfo;
+mod discriminant;
+mod driver;
+mod inline_asm;
+mod intrinsics;
+mod linkage;
+mod main_shim;
+mod metadata;
+mod num;
+mod optimize;
+mod pointer;
+mod pretty_clif;
+mod toolchain;
+mod trap;
+mod unsize;
+mod value_and_place;
+mod vtable;
+
+mod prelude {
+ pub(crate) use std::convert::{TryFrom, TryInto};
+
+ pub(crate) use rustc_ast::ast::{FloatTy, IntTy, UintTy};
+ pub(crate) use rustc_span::Span;
+
+ pub(crate) use rustc_hir::def_id::{DefId, LOCAL_CRATE};
+ pub(crate) use rustc_middle::bug;
+ pub(crate) use rustc_middle::mir::{self, *};
+ pub(crate) use rustc_middle::ty::layout::{self, TyAndLayout};
+ pub(crate) use rustc_middle::ty::{
+ self, FnSig, Instance, InstanceDef, ParamEnv, Ty, TyCtxt, TypeAndMut, TypeFoldable,
+ };
+ pub(crate) use rustc_target::abi::{Abi, LayoutOf, Scalar, Size, VariantIdx};
+
+ pub(crate) use rustc_data_structures::fx::FxHashMap;
+
+ pub(crate) use rustc_index::vec::Idx;
+
+ pub(crate) use cranelift_codegen::entity::EntitySet;
+ pub(crate) use cranelift_codegen::ir::condcodes::{FloatCC, IntCC};
+ pub(crate) use cranelift_codegen::ir::function::Function;
+ pub(crate) use cranelift_codegen::ir::types;
+ pub(crate) use cranelift_codegen::ir::{
+ AbiParam, Block, ExternalName, FuncRef, Inst, InstBuilder, MemFlags, Signature, SourceLoc,
+ StackSlot, StackSlotData, StackSlotKind, TrapCode, Type, Value,
+ };
+ pub(crate) use cranelift_codegen::isa::{self, CallConv};
+ pub(crate) use cranelift_codegen::Context;
+ pub(crate) use cranelift_frontend::{FunctionBuilder, FunctionBuilderContext, Variable};
+ pub(crate) use cranelift_module::{self, DataContext, DataId, FuncId, Linkage, Module};
+
+ pub(crate) use crate::abi::*;
+ pub(crate) use crate::base::{codegen_operand, codegen_place};
+ pub(crate) use crate::cast::*;
+ pub(crate) use crate::common::*;
+ pub(crate) use crate::debuginfo::{DebugContext, UnwindContext};
+ pub(crate) use crate::pointer::Pointer;
+ pub(crate) use crate::trap::*;
+ pub(crate) use crate::value_and_place::{CPlace, CPlaceInner, CValue};
+}
+
+struct PrintOnPanic<F: Fn() -> String>(F);
+impl<F: Fn() -> String> Drop for PrintOnPanic<F> {
+ fn drop(&mut self) {
+ if ::std::thread::panicking() {
+ println!("{}", (self.0)());
+ }
+ }
+}
+
+struct CodegenCx<'tcx, M: Module> {
+ tcx: TyCtxt<'tcx>,
+ module: M,
+ global_asm: String,
+ constants_cx: ConstantCx,
+ cached_context: Context,
+ vtables: FxHashMap<(Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>), DataId>,
+ debug_context: Option<DebugContext<'tcx>>,
+ unwind_context: UnwindContext<'tcx>,
+}
+
+impl<'tcx, M: Module> CodegenCx<'tcx, M> {
- pub use_jit: bool,
++ fn new(tcx: TyCtxt<'tcx>, module: M, debug_info: bool, pic_eh_frame: bool) -> Self {
++ let unwind_context = UnwindContext::new(tcx, module.isa(), pic_eh_frame);
+ let debug_context = if debug_info {
+ Some(DebugContext::new(tcx, module.isa()))
+ } else {
+ None
+ };
+ CodegenCx {
+ tcx,
+ module,
+ global_asm: String::new(),
+ constants_cx: ConstantCx::default(),
+ cached_context: Context::new(),
+ vtables: FxHashMap::default(),
+ debug_context,
+ unwind_context,
+ }
+ }
+
+ fn finalize(mut self) -> (M, String, Option<DebugContext<'tcx>>, UnwindContext<'tcx>) {
+ self.constants_cx.finalize(self.tcx, &mut self.module);
+ (
+ self.module,
+ self.global_asm,
+ self.debug_context,
+ self.unwind_context,
+ )
+ }
+}
+
+#[derive(Copy, Clone, Debug)]
++pub enum CodegenMode {
++ Aot,
++ Jit,
++ JitLazy,
++}
++
++impl Default for CodegenMode {
++ fn default() -> Self {
++ CodegenMode::Aot
++ }
++}
++
++impl FromStr for CodegenMode {
++ type Err = String;
++
++ fn from_str(s: &str) -> Result<Self, Self::Err> {
++ match s {
++ "aot" => Ok(CodegenMode::Aot),
++ "jit" => Ok(CodegenMode::Jit),
++ "jit-lazy" => Ok(CodegenMode::JitLazy),
++ _ => Err(format!("Unknown codegen mode `{}`", s)),
++ }
++ }
++}
++
++#[derive(Copy, Clone, Debug, Default)]
+pub struct BackendConfig {
- pub config: BackendConfig,
++ pub codegen_mode: CodegenMode,
++}
++
++impl BackendConfig {
++ fn from_opts(opts: &[String]) -> Result<Self, String> {
++ let mut config = BackendConfig::default();
++ for opt in opts {
++ if let Some((name, value)) = opt.split_once('=') {
++ match name {
++ "mode" => config.codegen_mode = value.parse()?,
++ _ => return Err(format!("Unknown option `{}`", name)),
++ }
++ } else {
++ return Err(format!("Invalid option `{}`", opt));
++ }
++ }
++ Ok(config)
++ }
+}
+
+pub struct CraneliftCodegenBackend {
- let res = driver::codegen_crate(tcx, metadata, need_metadata_module, self.config);
++ pub config: Option<BackendConfig>,
+}
+
+impl CodegenBackend for CraneliftCodegenBackend {
+ fn init(&self, sess: &Session) {
+ if sess.lto() != rustc_session::config::Lto::No && sess.opts.cg.embed_bitcode {
+ sess.warn("LTO is not supported. You may get a linker error.");
+ }
+ }
+
+ fn metadata_loader(&self) -> Box<dyn MetadataLoader + Sync> {
+ Box::new(crate::metadata::CraneliftMetadataLoader)
+ }
+
+ fn provide(&self, _providers: &mut Providers) {}
+ fn provide_extern(&self, _providers: &mut Providers) {}
+
+ fn target_features(&self, _sess: &Session) -> Vec<rustc_span::Symbol> {
+ vec![]
+ }
+
+ fn codegen_crate<'tcx>(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ metadata: EncodedMetadata,
+ need_metadata_module: bool,
+ ) -> Box<dyn Any> {
- fn build_isa(sess: &Session, enable_pic: bool) -> Box<dyn isa::TargetIsa + 'static> {
++ let config = if let Some(config) = self.config {
++ config
++ } else {
++ BackendConfig::from_opts(&tcx.sess.opts.cg.llvm_args)
++ .unwrap_or_else(|err| tcx.sess.fatal(&err))
++ };
++ let res = driver::codegen_crate(tcx, metadata, need_metadata_module, config);
+
+ rustc_symbol_mangling::test::report_symbol_names(tcx);
+
+ res
+ }
+
+ fn join_codegen(
+ &self,
+ ongoing_codegen: Box<dyn Any>,
+ _sess: &Session,
+ ) -> Result<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>), ErrorReported> {
+ Ok(*ongoing_codegen
+ .downcast::<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>)>()
+ .unwrap())
+ }
+
+ fn link(
+ &self,
+ sess: &Session,
+ codegen_results: CodegenResults,
+ outputs: &OutputFilenames,
+ ) -> Result<(), ErrorReported> {
+ use rustc_codegen_ssa::back::link::link_binary;
+
+ let _timer = sess.prof.generic_activity("link_crate");
+
+ sess.time("linking", || {
+ let target_cpu = crate::target_triple(sess).to_string();
+ link_binary::<crate::archive::ArArchiveBuilder<'_>>(
+ sess,
+ &codegen_results,
+ outputs,
+ &codegen_results.crate_name.as_str(),
+ &target_cpu,
+ );
+ });
+
+ Ok(())
+ }
+}
+
+fn target_triple(sess: &Session) -> target_lexicon::Triple {
+ sess.target.llvm_target.parse().unwrap()
+}
+
- if enable_pic {
- flags_builder.enable("is_pic").unwrap();
- } else {
- flags_builder.set("is_pic", "false").unwrap();
- }
++fn build_isa(sess: &Session) -> Box<dyn isa::TargetIsa + 'static> {
+ use target_lexicon::BinaryFormat;
+
+ let target_triple = crate::target_triple(sess);
+
+ let mut flags_builder = settings::builder();
- // FIXME(CraneStation/cranelift#732) fix LICM in presence of jump tables
- /*
++ flags_builder.enable("is_pic").unwrap();
+ flags_builder.set("enable_probestack", "false").unwrap(); // __cranelift_probestack is not provided
+ flags_builder
+ .set(
+ "enable_verifier",
+ if cfg!(debug_assertions) {
+ "true"
+ } else {
+ "false"
+ },
+ )
+ .unwrap();
+
+ let tls_model = match target_triple.binary_format {
+ BinaryFormat::Elf => "elf_gd",
+ BinaryFormat::Macho => "macho",
+ BinaryFormat::Coff => "coff",
+ _ => "none",
+ };
+ flags_builder.set("tls_model", tls_model).unwrap();
+
+ flags_builder.set("enable_simd", "true").unwrap();
+
- }*/
+ use rustc_session::config::OptLevel;
+ match sess.opts.optimize {
+ OptLevel::No => {
+ flags_builder.set("opt_level", "none").unwrap();
+ }
+ OptLevel::Less | OptLevel::Default => {}
+ OptLevel::Aggressive => {
+ flags_builder.set("opt_level", "speed_and_size").unwrap();
+ }
+ OptLevel::Size | OptLevel::SizeMin => {
+ sess.warn("Optimizing for size is not supported. Just ignoring the request");
+ }
- Box::new(CraneliftCodegenBackend {
- config: BackendConfig { use_jit: false },
- })
++ }
+
+ let flags = settings::Flags::new(flags_builder);
+
+ let mut isa_builder = cranelift_codegen::isa::lookup(target_triple).unwrap();
+ // Don't use "haswell", as it implies `has_lzcnt`.macOS CI is still at Ivy Bridge EP, so `lzcnt`
+ // is interpreted as `bsr`.
+ isa_builder.enable("nehalem").unwrap();
+ isa_builder.finish(flags)
+}
+
+/// This is the entrypoint for a hot plugged rustc_codegen_cranelift
+#[no_mangle]
+pub fn __rustc_codegen_backend() -> Box<dyn CodegenBackend> {
++ Box::new(CraneliftCodegenBackend { config: None })
+}
--- /dev/null
- types::I8 | types::I32 => {
+//! Peephole optimizations that can be performed while creating clif ir.
+
+use cranelift_codegen::ir::{
+ condcodes::IntCC, types, InstBuilder, InstructionData, Opcode, Value, ValueDef,
+};
+use cranelift_frontend::FunctionBuilder;
+
+/// If the given value was produced by a `bint` instruction, return it's input, otherwise return the
+/// given value.
+pub(crate) fn maybe_unwrap_bint(bcx: &mut FunctionBuilder<'_>, arg: Value) -> Value {
+ if let ValueDef::Result(arg_inst, 0) = bcx.func.dfg.value_def(arg) {
+ match bcx.func.dfg[arg_inst] {
+ InstructionData::Unary {
+ opcode: Opcode::Bint,
+ arg,
+ } => arg,
+ _ => arg,
+ }
+ } else {
+ arg
+ }
+}
+
+/// If the given value was produced by the lowering of `Rvalue::Not` return the input and true,
+/// otherwise return the given value and false.
+pub(crate) fn maybe_unwrap_bool_not(bcx: &mut FunctionBuilder<'_>, arg: Value) -> (Value, bool) {
+ if let ValueDef::Result(arg_inst, 0) = bcx.func.dfg.value_def(arg) {
+ match bcx.func.dfg[arg_inst] {
+ // This is the lowering of `Rvalue::Not`
+ InstructionData::IntCompareImm {
+ opcode: Opcode::IcmpImm,
+ cond: IntCC::Equal,
+ arg,
+ imm,
+ } if imm.bits() == 0 => (arg, true),
+ _ => (arg, false),
+ }
+ } else {
+ (arg, false)
+ }
+}
+
+pub(crate) fn make_branchable_value(bcx: &mut FunctionBuilder<'_>, arg: Value) -> Value {
+ if bcx.func.dfg.value_type(arg).is_bool() {
+ return arg;
+ }
+
+ (|| {
+ let arg_inst = if let ValueDef::Result(arg_inst, 0) = bcx.func.dfg.value_def(arg) {
+ arg_inst
+ } else {
+ return None;
+ };
+
+ match bcx.func.dfg[arg_inst] {
+ // This is the lowering of Rvalue::Not
+ InstructionData::Load {
+ opcode: Opcode::Load,
+ arg: ptr,
+ flags,
+ offset,
+ } => {
+ // Using `load.i8 + uextend.i32` would legalize to `uload8 + ireduce.i8 +
+ // uextend.i32`. Just `uload8` is much faster.
+ match bcx.func.dfg.ctrl_typevar(arg_inst) {
+ types::I8 => Some(bcx.ins().uload8(types::I32, flags, ptr, offset)),
+ types::I16 => Some(bcx.ins().uload16(types::I32, flags, ptr, offset)),
+ _ => None,
+ }
+ }
+ _ => None,
+ }
+ })()
+ .unwrap_or_else(|| {
+ match bcx.func.dfg.value_type(arg) {
++ types::I8 | types::I16 => {
+ // WORKAROUND for brz.i8 and brnz.i8 not yet being implemented
+ bcx.ins().uextend(types::I32, arg)
+ }
+ _ => arg,
+ }
+ })
+}
++
++/// Returns whether the branch is statically known to be taken or `None` if it isn't statically known.
++pub(crate) fn maybe_known_branch_taken(
++ bcx: &FunctionBuilder<'_>,
++ arg: Value,
++ test_zero: bool,
++) -> Option<bool> {
++ let arg_inst = if let ValueDef::Result(arg_inst, 0) = bcx.func.dfg.value_def(arg) {
++ arg_inst
++ } else {
++ return None;
++ };
++
++ match bcx.func.dfg[arg_inst] {
++ InstructionData::UnaryBool {
++ opcode: Opcode::Bconst,
++ imm,
++ } => {
++ if test_zero {
++ Some(!imm)
++ } else {
++ Some(imm)
++ }
++ }
++ InstructionData::UnaryImm {
++ opcode: Opcode::Iconst,
++ imm,
++ } => {
++ if test_zero {
++ Some(imm.bits() == 0)
++ } else {
++ Some(imm.bits() != 0)
++ }
++ }
++ _ => None,
++ }
++}
--- /dev/null
- pub(crate) fn write_clif_file<'tcx>(
- tcx: TyCtxt<'tcx>,
- postfix: &str,
- isa: Option<&dyn cranelift_codegen::isa::TargetIsa>,
- instance: Instance<'tcx>,
- context: &cranelift_codegen::Context,
- mut clif_comments: &CommentWriter,
- ) {
- use std::io::Write;
-
- if !cfg!(debug_assertions)
- && !tcx
+//! This module provides the [CommentWriter] which makes it possible
+//! to add comments to the written cranelift ir.
+//!
+//! # Example
+//!
+//! ```clif
+//! test compile
+//! target x86_64
+//!
+//! function u0:0(i64, i64, i64) system_v {
+//! ; symbol _ZN119_$LT$example..IsNotEmpty$u20$as$u20$mini_core..FnOnce$LT$$LP$$RF$$u27$a$u20$$RF$$u27$b$u20$$u5b$u16$u5d$$C$$RP$$GT$$GT$9call_once17he85059d5e6a760a0E
+//! ; instance Instance { def: Item(DefId(0/0:29 ~ example[8787]::{{impl}}[0]::call_once[0])), substs: [ReErased, ReErased] }
+//! ; sig ([IsNotEmpty, (&&[u16],)]; c_variadic: false)->(u8, u8)
+//!
+//! ; ssa {_2: NOT_SSA, _4: NOT_SSA, _0: NOT_SSA, _3: (empty), _1: NOT_SSA}
+//! ; msg loc.idx param pass mode ssa flags ty
+//! ; ret _0 = v0 ByRef NOT_SSA (u8, u8)
+//! ; arg _1 = v1 ByRef NOT_SSA IsNotEmpty
+//! ; arg _2.0 = v2 ByVal(types::I64) NOT_SSA &&[u16]
+//!
+//! ss0 = explicit_slot 0 ; _1: IsNotEmpty size=0 align=1,8
+//! ss1 = explicit_slot 8 ; _2: (&&[u16],) size=8 align=8,8
+//! ss2 = explicit_slot 8 ; _4: (&&[u16],) size=8 align=8,8
+//! sig0 = (i64, i64, i64) system_v
+//! sig1 = (i64, i64, i64) system_v
+//! fn0 = colocated u0:6 sig1 ; Instance { def: Item(DefId(0/0:31 ~ example[8787]::{{impl}}[1]::call_mut[0])), substs: [ReErased, ReErased] }
+//!
+//! block0(v0: i64, v1: i64, v2: i64):
+//! v3 = stack_addr.i64 ss0
+//! v4 = stack_addr.i64 ss1
+//! store v2, v4
+//! v5 = stack_addr.i64 ss2
+//! jump block1
+//!
+//! block1:
+//! nop
+//! ; _3 = &mut _1
+//! ; _4 = _2
+//! v6 = load.i64 v4
+//! store v6, v5
+//! ;
+//! ; _0 = const mini_core::FnMut::call_mut(move _3, move _4)
+//! v7 = load.i64 v5
+//! call fn0(v0, v3, v7)
+//! jump block2
+//!
+//! block2:
+//! nop
+//! ;
+//! ; return
+//! return
+//! }
+//! ```
+
+use std::fmt;
++use std::io::Write;
+
+use cranelift_codegen::{
+ entity::SecondaryMap,
+ ir::{entities::AnyEntity, function::DisplayFunctionAnnotations},
+ write::{FuncWriter, PlainWriter},
+};
+
+use rustc_session::config::OutputType;
+
+use crate::prelude::*;
+
+#[derive(Debug)]
+pub(crate) struct CommentWriter {
+ global_comments: Vec<String>,
+ entity_comments: FxHashMap<AnyEntity, String>,
+}
+
+impl CommentWriter {
+ pub(crate) fn new<'tcx>(tcx: TyCtxt<'tcx>, instance: Instance<'tcx>) -> Self {
+ let global_comments = if cfg!(debug_assertions) {
+ vec![
+ format!("symbol {}", tcx.symbol_name(instance).name),
+ format!("instance {:?}", instance),
+ format!(
+ "sig {:?}",
+ tcx.normalize_erasing_late_bound_regions(
+ ParamEnv::reveal_all(),
+ crate::abi::fn_sig_for_fn_abi(tcx, instance)
+ )
+ ),
+ String::new(),
+ ]
+ } else {
+ vec![]
+ };
+
+ CommentWriter {
+ global_comments,
+ entity_comments: FxHashMap::default(),
+ }
+ }
+}
+
+#[cfg(debug_assertions)]
+impl CommentWriter {
+ pub(crate) fn add_global_comment<S: Into<String>>(&mut self, comment: S) {
+ self.global_comments.push(comment.into());
+ }
+
+ pub(crate) fn add_comment<S: Into<String> + AsRef<str>, E: Into<AnyEntity>>(
+ &mut self,
+ entity: E,
+ comment: S,
+ ) {
+ use std::collections::hash_map::Entry;
+ match self.entity_comments.entry(entity.into()) {
+ Entry::Occupied(mut occ) => {
+ occ.get_mut().push('\n');
+ occ.get_mut().push_str(comment.as_ref());
+ }
+ Entry::Vacant(vac) => {
+ vac.insert(comment.into());
+ }
+ }
+ }
+}
+
+impl FuncWriter for &'_ CommentWriter {
+ fn write_preamble(
+ &mut self,
+ w: &mut dyn fmt::Write,
+ func: &Function,
+ reg_info: Option<&isa::RegInfo>,
+ ) -> Result<bool, fmt::Error> {
+ for comment in &self.global_comments {
+ if !comment.is_empty() {
+ writeln!(w, "; {}", comment)?;
+ } else {
+ writeln!(w)?;
+ }
+ }
+ if !self.global_comments.is_empty() {
+ writeln!(w)?;
+ }
+
+ self.super_preamble(w, func, reg_info)
+ }
+
+ fn write_entity_definition(
+ &mut self,
+ w: &mut dyn fmt::Write,
+ _func: &Function,
+ entity: AnyEntity,
+ value: &dyn fmt::Display,
+ ) -> fmt::Result {
+ write!(w, " {} = {}", entity, value)?;
+
+ if let Some(comment) = self.entity_comments.get(&entity) {
+ writeln!(w, " ; {}", comment.replace('\n', "\n; "))
+ } else {
+ writeln!(w)
+ }
+ }
+
+ fn write_block_header(
+ &mut self,
+ w: &mut dyn fmt::Write,
+ func: &Function,
+ isa: Option<&dyn isa::TargetIsa>,
+ block: Block,
+ indent: usize,
+ ) -> fmt::Result {
+ PlainWriter.write_block_header(w, func, isa, block, indent)
+ }
+
+ fn write_instruction(
+ &mut self,
+ w: &mut dyn fmt::Write,
+ func: &Function,
+ aliases: &SecondaryMap<Value, Vec<Value>>,
+ isa: Option<&dyn isa::TargetIsa>,
+ inst: Inst,
+ indent: usize,
+ ) -> fmt::Result {
+ PlainWriter.write_instruction(w, func, aliases, isa, inst, indent)?;
+ if let Some(comment) = self.entity_comments.get(&inst.into()) {
+ writeln!(w, "; {}", comment.replace('\n', "\n; "))?;
+ }
+ Ok(())
+ }
+}
+
+#[cfg(debug_assertions)]
+impl<M: Module> FunctionCx<'_, '_, M> {
+ pub(crate) fn add_global_comment<S: Into<String>>(&mut self, comment: S) {
+ self.clif_comments.add_global_comment(comment);
+ }
+
+ pub(crate) fn add_comment<S: Into<String> + AsRef<str>, E: Into<AnyEntity>>(
+ &mut self,
+ entity: E,
+ comment: S,
+ ) {
+ self.clif_comments.add_comment(entity, comment);
+ }
+}
+
- {
++pub(crate) fn should_write_ir(tcx: TyCtxt<'_>) -> bool {
++ cfg!(debug_assertions)
++ || tcx
+ .sess
+ .opts
+ .output_types
+ .contains_key(&OutputType::LlvmAssembly)
- let value_ranges = isa.map(|isa| {
- context
- .build_value_labels_ranges(isa)
- .expect("value location ranges")
- });
-
++}
++
++pub(crate) fn write_ir_file<'tcx>(
++ tcx: TyCtxt<'tcx>,
++ name: &str,
++ write: impl FnOnce(&mut dyn Write) -> std::io::Result<()>,
++) {
++ if !should_write_ir(tcx) {
+ return;
+ }
+
- let clif_file_name = clif_output_dir.join(format!(
- "{}.{}.clif",
- tcx.symbol_name(instance).name,
- postfix
- ));
-
- let mut clif = String::new();
- cranelift_codegen::write::decorate_function(
- &mut clif_comments,
- &mut clif,
- &context.func,
- &DisplayFunctionAnnotations {
- isa: Some(&*crate::build_isa(
- tcx.sess, true, /* PIC doesn't matter here */
- )),
- value_ranges: value_ranges.as_ref(),
- },
- )
- .unwrap();
+ let clif_output_dir = tcx.output_filenames(LOCAL_CRATE).with_extension("clif");
+
+ match std::fs::create_dir(&clif_output_dir) {
+ Ok(()) => {}
+ Err(err) if err.kind() == std::io::ErrorKind::AlreadyExists => {}
+ res @ Err(_) => res.unwrap(),
+ }
+
- let target_triple = crate::target_triple(tcx.sess);
- writeln!(file, "test compile")?;
- writeln!(file, "set is_pic")?;
- writeln!(file, "set enable_simd")?;
- writeln!(file, "target {} haswell", target_triple)?;
- writeln!(file)?;
- file.write_all(clif.as_bytes())?;
++ let clif_file_name = clif_output_dir.join(name);
+
+ let res: std::io::Result<()> = try {
+ let mut file = std::fs::File::create(clif_file_name)?;
- tcx.sess.warn(&format!("err writing clif file: {}", err));
++ write(&mut file)?;
+ };
+ if let Err(err) = res {
++ tcx.sess.warn(&format!("error writing ir file: {}", err));
+ }
+}
+
++pub(crate) fn write_clif_file<'tcx>(
++ tcx: TyCtxt<'tcx>,
++ postfix: &str,
++ isa: Option<&dyn cranelift_codegen::isa::TargetIsa>,
++ instance: Instance<'tcx>,
++ context: &cranelift_codegen::Context,
++ mut clif_comments: &CommentWriter,
++) {
++ write_ir_file(
++ tcx,
++ &format!("{}.{}.clif", tcx.symbol_name(instance).name, postfix),
++ |file| {
++ let value_ranges = isa.map(|isa| {
++ context
++ .build_value_labels_ranges(isa)
++ .expect("value location ranges")
++ });
++
++ let mut clif = String::new();
++ cranelift_codegen::write::decorate_function(
++ &mut clif_comments,
++ &mut clif,
++ &context.func,
++ &DisplayFunctionAnnotations {
++ isa: Some(&*crate::build_isa(tcx.sess)),
++ value_ranges: value_ranges.as_ref(),
++ },
++ )
++ .unwrap();
++
++ writeln!(file, "test compile")?;
++ writeln!(file, "set is_pic")?;
++ writeln!(file, "set enable_simd")?;
++ writeln!(file, "target {} haswell", crate::target_triple(tcx.sess))?;
++ writeln!(file)?;
++ file.write_all(clif.as_bytes())?;
++ Ok(())
++ },
++ );
++}
++
+impl<M: Module> fmt::Debug for FunctionCx<'_, '_, M> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ writeln!(f, "{:?}", self.instance.substs)?;
+ writeln!(f, "{:?}", self.local_map)?;
+
+ let mut clif = String::new();
+ ::cranelift_codegen::write::decorate_function(
+ &mut &self.clif_comments,
+ &mut clif,
+ &self.bcx.func,
+ &DisplayFunctionAnnotations::default(),
+ )
+ .unwrap();
+ writeln!(f, "\n{}", clif)
+ }
+}
--- /dev/null
- fx.cx.module.define_data(data_id, &data_ctx).unwrap();
+//! Codegen vtables and vtable accesses.
+//!
+//! See librustc_codegen_llvm/meth.rs for reference
+// FIXME dedup this logic between miri, cg_llvm and cg_clif
+
+use crate::prelude::*;
+
+const DROP_FN_INDEX: usize = 0;
+const SIZE_INDEX: usize = 1;
+const ALIGN_INDEX: usize = 2;
+
+fn vtable_memflags() -> MemFlags {
+ let mut flags = MemFlags::trusted(); // A vtable access is always aligned and will never trap.
+ flags.set_readonly(); // A vtable is always read-only.
+ flags
+}
+
+pub(crate) fn drop_fn_of_obj(fx: &mut FunctionCx<'_, '_, impl Module>, vtable: Value) -> Value {
+ let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes() as usize;
+ fx.bcx.ins().load(
+ pointer_ty(fx.tcx),
+ vtable_memflags(),
+ vtable,
+ (DROP_FN_INDEX * usize_size) as i32,
+ )
+}
+
+pub(crate) fn size_of_obj(fx: &mut FunctionCx<'_, '_, impl Module>, vtable: Value) -> Value {
+ let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes() as usize;
+ fx.bcx.ins().load(
+ pointer_ty(fx.tcx),
+ vtable_memflags(),
+ vtable,
+ (SIZE_INDEX * usize_size) as i32,
+ )
+}
+
+pub(crate) fn min_align_of_obj(fx: &mut FunctionCx<'_, '_, impl Module>, vtable: Value) -> Value {
+ let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes() as usize;
+ fx.bcx.ins().load(
+ pointer_ty(fx.tcx),
+ vtable_memflags(),
+ vtable,
+ (ALIGN_INDEX * usize_size) as i32,
+ )
+}
+
+pub(crate) fn get_ptr_and_method_ref<'tcx>(
+ fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ arg: CValue<'tcx>,
+ idx: usize,
+) -> (Value, Value) {
+ let (ptr, vtable) = if let Abi::ScalarPair(_, _) = arg.layout().abi {
+ arg.load_scalar_pair(fx)
+ } else {
+ let (ptr, vtable) = arg.try_to_ptr().unwrap();
+ (ptr.get_addr(fx), vtable.unwrap())
+ };
+
+ let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes();
+ let func_ref = fx.bcx.ins().load(
+ pointer_ty(fx.tcx),
+ vtable_memflags(),
+ vtable,
+ ((idx + 3) * usize_size as usize) as i32,
+ );
+ (ptr, func_ref)
+}
+
+pub(crate) fn get_vtable<'tcx>(
+ fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ layout: TyAndLayout<'tcx>,
+ trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
+) -> Value {
+ let data_id = if let Some(data_id) = fx.cx.vtables.get(&(layout.ty, trait_ref)) {
+ *data_id
+ } else {
+ let data_id = build_vtable(fx, layout, trait_ref);
+ fx.cx.vtables.insert((layout.ty, trait_ref), data_id);
+ data_id
+ };
+
+ let local_data_id = fx.cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
+ fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
+}
+
+fn build_vtable<'tcx>(
+ fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ layout: TyAndLayout<'tcx>,
+ trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
+) -> DataId {
+ let tcx = fx.tcx;
+ let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes() as usize;
+
+ let drop_in_place_fn = import_function(
+ tcx,
+ &mut fx.cx.module,
+ Instance::resolve_drop_in_place(tcx, layout.ty).polymorphize(fx.tcx),
+ );
+
+ let mut components: Vec<_> = vec![Some(drop_in_place_fn), None, None];
+
+ let methods_root;
+ let methods = if let Some(trait_ref) = trait_ref {
+ methods_root = tcx.vtable_methods(trait_ref.with_self_ty(tcx, layout.ty));
+ methods_root.iter()
+ } else {
+ (&[]).iter()
+ };
+ let methods = methods.cloned().map(|opt_mth| {
+ opt_mth.map(|(def_id, substs)| {
+ import_function(
+ tcx,
+ &mut fx.cx.module,
+ Instance::resolve_for_vtable(tcx, ParamEnv::reveal_all(), def_id, substs)
+ .unwrap()
+ .polymorphize(fx.tcx),
+ )
+ })
+ });
+ components.extend(methods);
+
+ let mut data_ctx = DataContext::new();
+ let mut data = ::std::iter::repeat(0u8)
+ .take(components.len() * usize_size)
+ .collect::<Vec<u8>>()
+ .into_boxed_slice();
+
+ write_usize(fx.tcx, &mut data, SIZE_INDEX, layout.size.bytes());
+ write_usize(fx.tcx, &mut data, ALIGN_INDEX, layout.align.abi.bytes());
+ data_ctx.define(data);
+
+ for (i, component) in components.into_iter().enumerate() {
+ if let Some(func_id) = component {
+ let func_ref = fx.cx.module.declare_func_in_data(func_id, &mut data_ctx);
+ data_ctx.write_function_addr((i * usize_size) as u32, func_ref);
+ }
+ }
+
+ data_ctx.set_align(fx.tcx.data_layout.pointer_align.pref.bytes());
+
+ let data_id = fx
+ .cx
+ .module
+ .declare_data(
+ &format!(
+ "__vtable.{}.for.{:?}.{}",
+ trait_ref
+ .as_ref()
+ .map(|trait_ref| format!("{:?}", trait_ref.skip_binder()).into())
+ .unwrap_or(std::borrow::Cow::Borrowed("???")),
+ layout.ty,
+ fx.cx.vtables.len(),
+ ),
+ Linkage::Local,
+ false,
+ false,
+ )
+ .unwrap();
+
++ // FIXME don't duplicate definitions in lazy jit mode
++ let _ = fx.cx.module.define_data(data_id, &data_ctx);
+
+ data_id
+}
+
+fn write_usize(tcx: TyCtxt<'_>, buf: &mut [u8], idx: usize, num: u64) {
+ let pointer_size = tcx
+ .layout_of(ParamEnv::reveal_all().and(tcx.types.usize))
+ .unwrap()
+ .size
+ .bytes() as usize;
+ let target = &mut buf[idx * pointer_size..(idx + 1) * pointer_size];
+
+ match tcx.data_layout.endian {
+ rustc_target::abi::Endian::Little => match pointer_size {
+ 4 => target.copy_from_slice(&(num as u32).to_le_bytes()),
+ 8 => target.copy_from_slice(&(num as u64).to_le_bytes()),
+ _ => todo!("pointer size {} is not yet supported", pointer_size),
+ },
+ rustc_target::abi::Endian::Big => match pointer_size {
+ 4 => target.copy_from_slice(&(num as u32).to_be_bytes()),
+ 8 => target.copy_from_slice(&(num as u64).to_be_bytes()),
+ _ => todo!("pointer size {} is not yet supported", pointer_size),
+ },
+ }
+}