--- /dev/null
- version = "1.0.51"
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "anyhow"
- checksum = "8b26702f315f53b6071259e15dd9d64528213b44d61de1ec926eca7715d62203"
++version = "1.0.53"
+source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.78.0"
++checksum = "94a45b455c14666b85fc40a019e8ab9eb75e3a124e05494f5397122bc9eb06e0"
+
+[[package]]
+name = "ar"
+version = "0.8.0"
+source = "git+https://github.com/bjorn3/rust-ar.git?branch=do_not_remove_cg_clif_ranlib#de9ab0e56bf3a208381d342aa5b60f9ff2891648"
+
+[[package]]
+name = "autocfg"
+version = "1.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
+
+[[package]]
+name = "bitflags"
+version = "1.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
+
+[[package]]
+name = "cfg-if"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
+
+[[package]]
+name = "cranelift-bforest"
- checksum = "cc0cb7df82c8cf8f2e6a8dd394a0932a71369c160cc9b027dca414fced242513"
++version = "0.81.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.78.0"
++checksum = "71447555acc6c875c52c407d572fc1327dc5c34cba72b4b2e7ad048aa4e4fd19"
+dependencies = [
+ "cranelift-entity",
+]
+
+[[package]]
+name = "cranelift-codegen"
- checksum = "fe4463c15fa42eee909e61e5eac4866b7c6d22d0d8c621e57a0c5380753bfa8c"
++version = "0.81.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.78.0"
++checksum = "ec9a10261891a7a919b0d4f6aa73582e88441d9a8f6173c88efbe4a5a362ea67"
+dependencies = [
+ "cranelift-bforest",
+ "cranelift-codegen-meta",
+ "cranelift-codegen-shared",
+ "cranelift-entity",
+ "gimli",
+ "log",
+ "regalloc",
+ "smallvec",
+ "target-lexicon",
+]
+
+[[package]]
+name = "cranelift-codegen-meta"
- checksum = "793f6a94a053a55404ea16e1700202a88101672b8cd6b4df63e13cde950852bf"
++version = "0.81.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
- "cranelift-entity",
++checksum = "815755d76fcbcf6e17ab888545b28ab775f917cb12ce0797e60cd41a2288692c"
+dependencies = [
+ "cranelift-codegen-shared",
- version = "0.78.0"
+]
+
+[[package]]
+name = "cranelift-codegen-shared"
- checksum = "44aa1846df275bce5eb30379d65964c7afc63c05a117076e62a119c25fe174be"
++version = "0.81.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.78.0"
++checksum = "23ea92f2a67335a2e4d3c9c65624c3b14ae287d595b0650822c41824febab66b"
+
+[[package]]
+name = "cranelift-entity"
- checksum = "a3a45d8d6318bf8fc518154d9298eab2a8154ec068a8885ff113f6db8d69bb3a"
++version = "0.81.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.78.0"
++checksum = "bd25847875e388c500ad3624b4d2e14067955c93185194a7222246a25b91c975"
+
+[[package]]
+name = "cranelift-frontend"
- checksum = "e07339bd461766deb7605169de039e01954768ff730fa1254e149001884a8525"
++version = "0.81.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.78.0"
++checksum = "308bcfb7eb47bdf5ff6e1ace262af4ed39ec19f204c751fffb037e0e82a0c8bf"
+dependencies = [
+ "cranelift-codegen",
+ "log",
+ "smallvec",
+ "target-lexicon",
+]
+
+[[package]]
+name = "cranelift-jit"
- checksum = "0e8f0d60fb5d67f7a1e5c49db38ba96d1c846921faef02085fc5590b74781747"
++version = "0.81.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.78.0"
++checksum = "f560b3a314b8d15facf411e5d29b917c3e787a2bbc3fcdc5183bc0c5b7d4fe01"
+dependencies = [
+ "anyhow",
+ "cranelift-codegen",
+ "cranelift-entity",
+ "cranelift-module",
+ "cranelift-native",
+ "libc",
+ "log",
+ "region",
+ "target-lexicon",
+ "winapi",
+]
+
+[[package]]
+name = "cranelift-module"
- checksum = "825ac7e0959cbe7ddc9cc21209f0319e611a57f9fcb2b723861fe7ef2017e651"
++version = "0.81.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
- "cranelift-entity",
- "log",
++checksum = "3a57aba9e603d694d1430ff38bd914bae23ef9c2e44b25a65e318905807e654c"
+dependencies = [
+ "anyhow",
+ "cranelift-codegen",
- version = "0.78.0"
+]
+
+[[package]]
+name = "cranelift-native"
- checksum = "03e2fca76ff57e0532936a71e3fc267eae6a19a86656716479c66e7f912e3d7b"
++version = "0.81.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.78.0"
++checksum = "12cdc799aee673be2317e631d4569a1ba0a7e77a07a7ce45557086d2e02e9514"
+dependencies = [
+ "cranelift-codegen",
+ "libc",
+ "target-lexicon",
+]
+
+[[package]]
+name = "cranelift-object"
- checksum = "55500d0fc9bb05c0944fc4506649249d28f55bd4fe95b87f0e55bf41058f0e6d"
++version = "0.81.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "1.3.0"
++checksum = "502a7333836052fcdf4425d7f7a21264d99f862d32b9c3a0e47cd920487a9b60"
+dependencies = [
+ "anyhow",
+ "cranelift-codegen",
+ "cranelift-module",
+ "log",
+ "object",
+ "target-lexicon",
+]
+
+[[package]]
+name = "crc32fast"
- checksum = "738c290dfaea84fc1ca15ad9c168d083b05a714e1efddd8edaab678dc28d2836"
++version = "1.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.25.0"
++checksum = "a2209c310e29876f7f0b2721e7e26b84aff178aa3da5d091f9bfbf47669e60e3"
+dependencies = [
+ "cfg-if",
+]
+
+[[package]]
+name = "gimli"
- checksum = "f0a01e0497841a3b2db4f8afa483cce65f7e96a3498bd6c541734792aeac8fe7"
++version = "0.26.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.2.112"
++checksum = "78cc372d058dcf6d5ecd98510e7fbc9e5aec4d21de70f65fea8fecebcd881bd4"
+dependencies = [
+ "indexmap",
+]
+
+[[package]]
+name = "hashbrown"
+version = "0.11.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e"
+
+[[package]]
+name = "indexmap"
+version = "1.8.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "282a6247722caba404c065016bbfa522806e51714c34f5dfc3e4a3a46fcb4223"
+dependencies = [
+ "autocfg",
+ "hashbrown",
+]
+
+[[package]]
+name = "libc"
- checksum = "1b03d17f364a3a042d5e5d46b053bbbf82c92c9430c592dd4c064dc6ee997125"
++version = "0.2.116"
+source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.0.32"
++checksum = "565dbd88872dbe4cc8a46e527f26483c1d1f7afa6b884a3bd6cd893d4f98da74"
+
+[[package]]
+name = "libloading"
+version = "0.6.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "351a32417a12d5f7e82c368a66781e307834dae04c6ce0cd4456d52989229883"
+dependencies = [
+ "cfg-if",
+ "winapi",
+]
+
+[[package]]
+name = "log"
+version = "0.4.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710"
+dependencies = [
+ "cfg-if",
+]
+
+[[package]]
+name = "mach"
+version = "0.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b823e83b2affd8f40a9ee8c29dbc56404c1e34cd2710921f2801e2cf29527afa"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "memchr"
+version = "2.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a"
+
+[[package]]
+name = "object"
+version = "0.27.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "67ac1d3f9a1d3616fd9a60c8d74296f22406a238b6a72f5cc1e6f314df4ffbf9"
+dependencies = [
+ "crc32fast",
+ "indexmap",
+ "memchr",
+]
+
+[[package]]
+name = "regalloc"
- checksum = "a6304468554ed921da3d32c355ea107b8d13d7b8996c3adfb7aab48d3bc321f4"
++version = "0.0.34"
+source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "1.7.0"
++checksum = "62446b1d3ebf980bdc68837700af1d77b37bc430e524bf95319c6eada2a4cc02"
+dependencies = [
+ "log",
+ "rustc-hash",
+ "smallvec",
+]
+
+[[package]]
+name = "region"
+version = "2.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "877e54ea2adcd70d80e9179344c97f93ef0dffd6b03e1f4529e6e83ab2fa9ae0"
+dependencies = [
+ "bitflags",
+ "libc",
+ "mach",
+ "winapi",
+]
+
+[[package]]
+name = "rustc-hash"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2"
+
+[[package]]
+name = "rustc_codegen_cranelift"
+version = "0.1.0"
+dependencies = [
+ "ar",
+ "cranelift-codegen",
+ "cranelift-frontend",
+ "cranelift-jit",
+ "cranelift-module",
+ "cranelift-native",
+ "cranelift-object",
+ "gimli",
+ "indexmap",
+ "libloading",
+ "object",
+ "smallvec",
+ "target-lexicon",
+]
+
+[[package]]
+name = "smallvec"
- checksum = "1ecab6c735a6bb4139c0caafd0cc3635748bbb3acf4550e8138122099251f309"
++version = "1.8.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "f2dd574626839106c320a323308629dcb1acfc96e32a8cba364ddc61ac23ee83"
+
+[[package]]
+name = "target-lexicon"
+version = "0.12.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d9bffcddbc2458fa3e6058414599e3c838a022abae82e5c67b4f7f80298d5bff"
+
+[[package]]
+name = "winapi"
+version = "0.3.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
+dependencies = [
+ "winapi-i686-pc-windows-gnu",
+ "winapi-x86_64-pc-windows-gnu",
+]
+
+[[package]]
+name = "winapi-i686-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
+
+[[package]]
+name = "winapi-x86_64-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
--- /dev/null
- cranelift-codegen = { version = "0.78.0", features = ["unwind", "all-arch"] }
- cranelift-frontend = "0.78.0"
- cranelift-module = "0.78.0"
- cranelift-native = "0.78.0"
- cranelift-jit = { version = "0.78.0", optional = true }
- cranelift-object = "0.78.0"
+[package]
+name = "rustc_codegen_cranelift"
+version = "0.1.0"
+edition = "2021"
+
+[lib]
+crate-type = ["dylib"]
+
+[dependencies]
+# These have to be in sync with each other
- gimli = { version = "0.25.0", default-features = false, features = ["write"]}
++cranelift-codegen = { version = "0.81.0", features = ["unwind", "all-arch"] }
++cranelift-frontend = "0.81.0"
++cranelift-module = "0.81.0"
++cranelift-native = "0.81.0"
++cranelift-jit = { version = "0.81.0", optional = true }
++cranelift-object = "0.81.0"
+target-lexicon = "0.12.0"
++gimli = { version = "0.26.0", default-features = false, features = ["write"]}
+object = { version = "0.27.0", default-features = false, features = ["std", "read_core", "write", "archive", "coff", "elf", "macho", "pe"] }
+
+ar = { git = "https://github.com/bjorn3/rust-ar.git", branch = "do_not_remove_cg_clif_ranlib" }
+indexmap = "1.8.0"
+libloading = { version = "0.6.0", optional = true }
+smallvec = "1.6.1"
+
+[patch.crates-io]
+# Uncomment to use local checkout of cranelift
+#cranelift-codegen = { path = "../wasmtime/cranelift/codegen" }
+#cranelift-frontend = { path = "../wasmtime/cranelift/frontend" }
+#cranelift-module = { path = "../wasmtime/cranelift/module" }
+#cranelift-native = { path = "../wasmtime/cranelift/native" }
+#cranelift-jit = { path = "../wasmtime/cranelift/jit" }
+#cranelift-object = { path = "../wasmtime/cranelift/object" }
+
+#gimli = { path = "../" }
+
+[features]
+# Enable features not ready to be enabled when compiling as part of rustc
+unstable-features = ["jit", "inline_asm"]
+jit = ["cranelift-jit", "libloading"]
+inline_asm = []
+
+# Disable optimizations and debuginfo of build scripts and some of the heavy build deps, as the
+# execution time of build scripts is so fast that optimizing them slows down the total build time.
+[profile.release.build-override]
+opt-level = 0
+debug = false
+
+[profile.release.package.cranelift-codegen-meta]
+opt-level = 0
+debug = false
+
+[package.metadata.rust-analyzer]
+rustc_private = true
--- /dev/null
- version = "1.0.1"
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "addr2line"
+version = "0.16.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3e61f2b7f93d2c7d2b08263acaa4a363b3e276806c68af6134c44f523bf1aacd"
+dependencies = [
+ "compiler_builtins",
+ "gimli",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "adler"
+version = "1.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "alloc"
+version = "0.0.0"
+dependencies = [
+ "compiler_builtins",
+ "core",
+]
+
+[[package]]
+name = "autocfg"
- checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
++version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "1.0.72"
++checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
+
+[[package]]
+name = "cc"
- checksum = "22a9137b95ea06864e018375b72adfb7db6e6f68cfc8df5a04d00288050485ee"
++version = "1.0.73"
+source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.1.66"
++checksum = "2fff2a6927b3bb87f9595d67196a70493f627687a71d87a0d692242c33f58c11"
+
+[[package]]
+name = "cfg-if"
+version = "0.1.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "compiler_builtins"
- version = "0.11.2"
++version = "0.1.70"
+dependencies = [
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "core"
+version = "0.0.0"
+
+[[package]]
+name = "dlmalloc"
+version = "0.2.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a6fe28e0bf9357092740362502f5cc7955d8dc125ebda71dec72336c2e15c62e"
+dependencies = [
+ "compiler_builtins",
+ "libc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "fortanix-sgx-abi"
+version = "0.3.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c56c422ef86062869b2d57ae87270608dc5929969dd130a6e248979cf4fb6ca6"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "getopts"
+version = "0.2.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "14dbbfd5c71d70241ecf9e6f13737f7b5ce823821063188d7e46c41d371eebd5"
+dependencies = [
+ "rustc-std-workspace-core",
+ "rustc-std-workspace-std",
+ "unicode-width",
+]
+
+[[package]]
+name = "gimli"
+version = "0.25.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f0a01e0497841a3b2db4f8afa483cce65f7e96a3498bd6c541734792aeac8fe7"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "hashbrown"
- checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e"
++version = "0.12.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.1.19"
++checksum = "8c21d40587b92fa6a6c6e3c1bdbf87d75511db5672f9c93175574b3a00df1758"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "hermit-abi"
- checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33"
++version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.2.112"
++checksum = "1ab7905ea95c6d9af62940f9d7dd9596d54c334ae2c15300c482051292d5637f"
+dependencies = [
+ "compiler_builtins",
+ "libc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "libc"
- checksum = "1b03d17f364a3a042d5e5d46b053bbbf82c92c9430c592dd4c064dc6ee997125"
++version = "0.2.119"
+source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.9.0+wasi-snapshot-preview1"
++checksum = "1bf2e165bb3457c8e098ea76f3e3bc9db55f87aa90d52d0e6be741470916aaa4"
+dependencies = [
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "memchr"
+version = "2.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "miniz_oxide"
+version = "0.4.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b"
+dependencies = [
+ "adler",
+ "autocfg",
+ "compiler_builtins",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "object"
+version = "0.26.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "39f37e50073ccad23b6d09bcb5b263f4e76d3bb6038e4a3c08e52162ffa8abc2"
+dependencies = [
+ "compiler_builtins",
+ "memchr",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "panic_abort"
+version = "0.0.0"
+dependencies = [
+ "alloc",
+ "cfg-if",
+ "compiler_builtins",
+ "core",
+ "libc",
+]
+
+[[package]]
+name = "panic_unwind"
+version = "0.0.0"
+dependencies = [
+ "alloc",
+ "cfg-if",
+ "compiler_builtins",
+ "core",
+ "libc",
+ "unwind",
+]
+
+[[package]]
+name = "proc_macro"
+version = "0.0.0"
+dependencies = [
+ "std",
+]
+
+[[package]]
+name = "rustc-demangle"
+version = "0.1.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "rustc-std-workspace-alloc"
+version = "1.99.0"
+dependencies = [
+ "alloc",
+]
+
+[[package]]
+name = "rustc-std-workspace-core"
+version = "1.99.0"
+dependencies = [
+ "core",
+]
+
+[[package]]
+name = "rustc-std-workspace-std"
+version = "1.99.0"
+dependencies = [
+ "std",
+]
+
+[[package]]
+name = "std"
+version = "0.0.0"
+dependencies = [
+ "addr2line",
+ "alloc",
+ "cfg-if",
+ "compiler_builtins",
+ "core",
+ "dlmalloc",
+ "fortanix-sgx-abi",
+ "hashbrown",
+ "hermit-abi",
+ "libc",
+ "miniz_oxide",
+ "object",
+ "panic_abort",
+ "panic_unwind",
+ "rustc-demangle",
+ "std_detect",
+ "unwind",
+ "wasi",
+]
+
+[[package]]
+name = "std_detect"
+version = "0.1.5"
+dependencies = [
+ "cfg-if",
+ "compiler_builtins",
+ "libc",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "sysroot"
+version = "0.0.0"
+dependencies = [
+ "alloc",
+ "compiler_builtins",
+ "core",
+ "std",
+ "test",
+]
+
+[[package]]
+name = "test"
+version = "0.0.0"
+dependencies = [
+ "cfg-if",
+ "core",
+ "getopts",
+ "libc",
+ "panic_abort",
+ "panic_unwind",
+ "proc_macro",
+ "std",
+]
+
+[[package]]
+name = "unicode-width"
+version = "0.1.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-core",
+ "rustc-std-workspace-std",
+]
+
+[[package]]
+name = "unwind"
+version = "0.0.0"
+dependencies = [
+ "cc",
+ "cfg-if",
+ "compiler_builtins",
+ "core",
+ "libc",
+]
+
+[[package]]
+name = "wasi"
- checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519"
++version = "0.11.0+wasi-snapshot-preview1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
--- /dev/null
- crate::utils::spawn_and_wait(cmd);
+use std::env;
+use std::path::{Path, PathBuf};
+use std::process::Command;
+
+pub(crate) fn build_backend(
+ channel: &str,
+ host_triple: &str,
+ use_unstable_features: bool,
+) -> PathBuf {
+ let mut cmd = Command::new("cargo");
+ cmd.arg("build").arg("--target").arg(host_triple);
+
+ cmd.env("CARGO_BUILD_INCREMENTAL", "true"); // Force incr comp even in release mode
+
+ let mut rustflags = env::var("RUSTFLAGS").unwrap_or_default();
+
+ if env::var("CI").as_ref().map(|val| &**val) == Ok("true") {
+ // Deny warnings on CI
+ rustflags += " -Dwarnings";
+
+ // Disabling incr comp reduces cache size and incr comp doesn't save as much on CI anyway
+ cmd.env("CARGO_BUILD_INCREMENTAL", "false");
+ }
+
+ if use_unstable_features {
+ cmd.arg("--features").arg("unstable-features");
+ }
+
+ match channel {
+ "debug" => {}
+ "release" => {
+ cmd.arg("--release");
+ }
+ _ => unreachable!(),
+ }
+
+ // Set the rpath to make the cg_clif executable find librustc_codegen_cranelift without changing
+ // LD_LIBRARY_PATH
+ if cfg!(unix) {
+ if cfg!(target_os = "macos") {
+ rustflags += " -Csplit-debuginfo=unpacked \
+ -Clink-arg=-Wl,-rpath,@loader_path/../lib \
+ -Zosx-rpath-install-name";
+ } else {
+ rustflags += " -Clink-arg=-Wl,-rpath=$ORIGIN/../lib ";
+ }
+ }
+
+ cmd.env("RUSTFLAGS", rustflags);
+
+ eprintln!("[BUILD] rustc_codegen_cranelift");
++ super::utils::spawn_and_wait(cmd);
+
+ Path::new("target").join(host_triple).join(channel)
+}
--- /dev/null
- use crate::rustc_info::{get_file_name, get_rustc_version};
- use crate::utils::{spawn_and_wait, try_hard_link};
- use crate::SysrootKind;
+use std::env;
+use std::fs;
+use std::path::{Path, PathBuf};
+use std::process::{self, Command};
+
- let default_sysroot = crate::rustc_info::get_default_sysroot();
++use super::rustc_info::{get_file_name, get_rustc_version};
++use super::utils::{spawn_and_wait, try_hard_link};
++use super::SysrootKind;
+
+pub(crate) fn build_sysroot(
+ channel: &str,
+ sysroot_kind: SysrootKind,
+ target_dir: &Path,
+ cg_clif_build_dir: PathBuf,
+ host_triple: &str,
+ target_triple: &str,
+) {
+ if target_dir.exists() {
+ fs::remove_dir_all(target_dir).unwrap();
+ }
+ fs::create_dir_all(target_dir.join("bin")).unwrap();
+ fs::create_dir_all(target_dir.join("lib")).unwrap();
+
+ // Copy the backend
+ for file in ["cg_clif", "cg_clif_build_sysroot"] {
+ try_hard_link(
+ cg_clif_build_dir.join(get_file_name(file, "bin")),
+ target_dir.join("bin").join(get_file_name(file, "bin")),
+ );
+ }
+
+ let cg_clif_dylib = get_file_name("rustc_codegen_cranelift", "dylib");
+ try_hard_link(
+ cg_clif_build_dir.join(&cg_clif_dylib),
+ target_dir
+ .join(if cfg!(windows) {
+ // Windows doesn't have rpath support, so the cg_clif dylib needs to be next to the
+ // binaries.
+ "bin"
+ } else {
+ "lib"
+ })
+ .join(cg_clif_dylib),
+ );
+
+ // Build and copy cargo wrapper
+ let mut build_cargo_wrapper_cmd = Command::new("rustc");
+ build_cargo_wrapper_cmd
+ .arg("scripts/cargo-clif.rs")
+ .arg("-o")
+ .arg(target_dir.join("cargo-clif"))
+ .arg("-g");
+ spawn_and_wait(build_cargo_wrapper_cmd);
+
- if !crate::config::get_bool("keep_sysroot") {
++ let default_sysroot = super::rustc_info::get_default_sysroot();
+
+ let rustlib = target_dir.join("lib").join("rustlib");
+ let host_rustlib_lib = rustlib.join(host_triple).join("lib");
+ let target_rustlib_lib = rustlib.join(target_triple).join("lib");
+ fs::create_dir_all(&host_rustlib_lib).unwrap();
+ fs::create_dir_all(&target_rustlib_lib).unwrap();
+
+ if target_triple == "x86_64-pc-windows-gnu" {
+ if !default_sysroot.join("lib").join("rustlib").join(target_triple).join("lib").exists() {
+ eprintln!(
+ "The x86_64-pc-windows-gnu target needs to be installed first before it is possible \
+ to compile a sysroot for it.",
+ );
+ process::exit(1);
+ }
+ for file in fs::read_dir(
+ default_sysroot.join("lib").join("rustlib").join(target_triple).join("lib"),
+ )
+ .unwrap()
+ {
+ let file = file.unwrap().path();
+ if file.extension().map_or(true, |ext| ext.to_str().unwrap() != "o") {
+ continue; // only copy object files
+ }
+ try_hard_link(&file, target_rustlib_lib.join(file.file_name().unwrap()));
+ }
+ }
+
+ match sysroot_kind {
+ SysrootKind::None => {} // Nothing to do
+ SysrootKind::Llvm => {
+ for file in fs::read_dir(
+ default_sysroot.join("lib").join("rustlib").join(host_triple).join("lib"),
+ )
+ .unwrap()
+ {
+ let file = file.unwrap().path();
+ let file_name_str = file.file_name().unwrap().to_str().unwrap();
+ if (file_name_str.contains("rustc_")
+ && !file_name_str.contains("rustc_std_workspace_")
+ && !file_name_str.contains("rustc_demangle"))
+ || file_name_str.contains("chalk")
+ || file_name_str.contains("tracing")
+ || file_name_str.contains("regex")
+ {
+ // These are large crates that are part of the rustc-dev component and are not
+ // necessary to run regular programs.
+ continue;
+ }
+ try_hard_link(&file, host_rustlib_lib.join(file.file_name().unwrap()));
+ }
+
+ if target_triple != host_triple {
+ for file in fs::read_dir(
+ default_sysroot.join("lib").join("rustlib").join(target_triple).join("lib"),
+ )
+ .unwrap()
+ {
+ let file = file.unwrap().path();
+ try_hard_link(&file, target_rustlib_lib.join(file.file_name().unwrap()));
+ }
+ }
+ }
+ SysrootKind::Clif => {
+ build_clif_sysroot_for_triple(channel, target_dir, host_triple, None);
+
+ if host_triple != target_triple {
+ // When cross-compiling it is often necessary to manually pick the right linker
+ let linker = if target_triple == "aarch64-unknown-linux-gnu" {
+ Some("aarch64-linux-gnu-gcc")
+ } else {
+ None
+ };
+ build_clif_sysroot_for_triple(channel, target_dir, target_triple, linker);
+ }
+
+ // Copy std for the host to the lib dir. This is necessary for the jit mode to find
+ // libstd.
+ for file in fs::read_dir(host_rustlib_lib).unwrap() {
+ let file = file.unwrap().path();
+ if file.file_name().unwrap().to_str().unwrap().contains("std-") {
+ try_hard_link(&file, target_dir.join("lib").join(file.file_name().unwrap()));
+ }
+ }
+ }
+ }
+}
+
+fn build_clif_sysroot_for_triple(
+ channel: &str,
+ target_dir: &Path,
+ triple: &str,
+ linker: Option<&str>,
+) {
+ match fs::read_to_string(Path::new("build_sysroot").join("rustc_version")) {
+ Err(e) => {
+ eprintln!("Failed to get rustc version for patched sysroot source: {}", e);
+ eprintln!("Hint: Try `./y.rs prepare` to patch the sysroot source");
+ process::exit(1);
+ }
+ Ok(source_version) => {
+ let rustc_version = get_rustc_version();
+ if source_version != rustc_version {
+ eprintln!("The patched sysroot source is outdated");
+ eprintln!("Source version: {}", source_version.trim());
+ eprintln!("Rustc version: {}", rustc_version.trim());
+ eprintln!("Hint: Try `./y.rs prepare` to update the patched sysroot source");
+ process::exit(1);
+ }
+ }
+ }
+
+ let build_dir = Path::new("build_sysroot").join("target").join(triple).join(channel);
+
++ if !super::config::get_bool("keep_sysroot") {
+ // Cleanup the target dir with the exception of build scripts and the incremental cache
+ for dir in ["build", "deps", "examples", "native"] {
+ if build_dir.join(dir).exists() {
+ fs::remove_dir_all(build_dir.join(dir)).unwrap();
+ }
+ }
+ }
+
+ // Build sysroot
+ let mut build_cmd = Command::new("cargo");
+ build_cmd.arg("build").arg("--target").arg(triple).current_dir("build_sysroot");
+ let mut rustflags = "--clif -Zforce-unstable-if-unmarked".to_string();
+ if channel == "release" {
+ build_cmd.arg("--release");
+ rustflags.push_str(" -Zmir-opt-level=3");
+ }
+ if let Some(linker) = linker {
+ use std::fmt::Write;
+ write!(rustflags, " -Clinker={}", linker).unwrap();
+ }
+ build_cmd.env("RUSTFLAGS", rustflags);
+ build_cmd.env(
+ "RUSTC",
+ env::current_dir().unwrap().join(target_dir).join("bin").join("cg_clif_build_sysroot"),
+ );
+ build_cmd.env("__CARGO_DEFAULT_LIB_METADATA", "cg_clif");
+ spawn_and_wait(build_cmd);
+
+ // Copy all relevant files to the sysroot
+ for entry in
+ fs::read_dir(Path::new("build_sysroot/target").join(triple).join(channel).join("deps"))
+ .unwrap()
+ {
+ let entry = entry.unwrap();
+ if let Some(ext) = entry.path().extension() {
+ if ext == "rmeta" || ext == "d" || ext == "dSYM" {
+ continue;
+ }
+ } else {
+ continue;
+ };
+ try_hard_link(
+ entry.path(),
+ target_dir.join("lib").join("rustlib").join(triple).join("lib").join(entry.file_name()),
+ );
+ }
+}
--- /dev/null
--- /dev/null
++use std::env;
++use std::path::PathBuf;
++use std::process;
++
++mod build_backend;
++mod build_sysroot;
++mod config;
++mod prepare;
++mod rustc_info;
++mod utils;
++
++fn usage() {
++ eprintln!("Usage:");
++ eprintln!(" ./y.rs prepare");
++ eprintln!(
++ " ./y.rs build [--debug] [--sysroot none|clif|llvm] [--target-dir DIR] [--no-unstable-features]"
++ );
++}
++
++macro_rules! arg_error {
++ ($($err:tt)*) => {{
++ eprintln!($($err)*);
++ usage();
++ std::process::exit(1);
++ }};
++}
++
++enum Command {
++ Build,
++}
++
++#[derive(Copy, Clone)]
++pub(crate) enum SysrootKind {
++ None,
++ Clif,
++ Llvm,
++}
++
++pub fn main() {
++ env::set_var("CG_CLIF_DISPLAY_CG_TIME", "1");
++ env::set_var("CG_CLIF_DISABLE_INCR_CACHE", "1");
++ // The target dir is expected in the default location. Guard against the user changing it.
++ env::set_var("CARGO_TARGET_DIR", "target");
++
++ let mut args = env::args().skip(1);
++ let command = match args.next().as_deref() {
++ Some("prepare") => {
++ if args.next().is_some() {
++ arg_error!("./x.rs prepare doesn't expect arguments");
++ }
++ prepare::prepare();
++ process::exit(0);
++ }
++ Some("build") => Command::Build,
++ Some(flag) if flag.starts_with('-') => arg_error!("Expected command found flag {}", flag),
++ Some(command) => arg_error!("Unknown command {}", command),
++ None => {
++ usage();
++ process::exit(0);
++ }
++ };
++
++ let mut target_dir = PathBuf::from("build");
++ let mut channel = "release";
++ let mut sysroot_kind = SysrootKind::Clif;
++ let mut use_unstable_features = true;
++ while let Some(arg) = args.next().as_deref() {
++ match arg {
++ "--target-dir" => {
++ target_dir = PathBuf::from(args.next().unwrap_or_else(|| {
++ arg_error!("--target-dir requires argument");
++ }))
++ }
++ "--debug" => channel = "debug",
++ "--sysroot" => {
++ sysroot_kind = match args.next().as_deref() {
++ Some("none") => SysrootKind::None,
++ Some("clif") => SysrootKind::Clif,
++ Some("llvm") => SysrootKind::Llvm,
++ Some(arg) => arg_error!("Unknown sysroot kind {}", arg),
++ None => arg_error!("--sysroot requires argument"),
++ }
++ }
++ "--no-unstable-features" => use_unstable_features = false,
++ flag if flag.starts_with("-") => arg_error!("Unknown flag {}", flag),
++ arg => arg_error!("Unexpected argument {}", arg),
++ }
++ }
++
++ let host_triple = if let Ok(host_triple) = std::env::var("HOST_TRIPLE") {
++ host_triple
++ } else if let Some(host_triple) = config::get_value("host") {
++ host_triple
++ } else {
++ rustc_info::get_host_triple()
++ };
++ let target_triple = if let Ok(target_triple) = std::env::var("TARGET_TRIPLE") {
++ if target_triple != "" {
++ target_triple
++ } else {
++ host_triple.clone() // Empty target triple can happen on GHA
++ }
++ } else if let Some(target_triple) = config::get_value("target") {
++ target_triple
++ } else {
++ host_triple.clone()
++ };
++
++ if target_triple.ends_with("-msvc") {
++ eprintln!("The MSVC toolchain is not yet supported by rustc_codegen_cranelift.");
++ eprintln!("Switch to the MinGW toolchain for Windows support.");
++ eprintln!("Hint: You can use `rustup set default-host x86_64-pc-windows-gnu` to");
++ eprintln!("set the global default target to MinGW");
++ process::exit(1);
++ }
++
++ let cg_clif_build_dir =
++ build_backend::build_backend(channel, &host_triple, use_unstable_features);
++ build_sysroot::build_sysroot(
++ channel,
++ sysroot_kind,
++ &target_dir,
++ cg_clif_build_dir,
++ &host_triple,
++ &target_triple,
++ );
++}
--- /dev/null
- use crate::rustc_info::{get_file_name, get_rustc_path, get_rustc_version};
- use crate::utils::{copy_dir_recursively, spawn_and_wait};
+use std::env;
+use std::ffi::OsStr;
+use std::ffi::OsString;
+use std::fs;
+use std::path::Path;
+use std::process::Command;
+
- "0.1.66",
++use super::rustc_info::{get_file_name, get_rustc_path, get_rustc_version};
++use super::utils::{copy_dir_recursively, spawn_and_wait};
+
+pub(crate) fn prepare() {
+ prepare_sysroot();
+
+ eprintln!("[INSTALL] hyperfine");
+ Command::new("cargo").arg("install").arg("hyperfine").spawn().unwrap().wait().unwrap();
+
+ clone_repo(
+ "rand",
+ "https://github.com/rust-random/rand.git",
+ "0f933f9c7176e53b2a3c7952ded484e1783f0bf1",
+ );
+ apply_patches("rand", Path::new("rand"));
+
+ clone_repo(
+ "regex",
+ "https://github.com/rust-lang/regex.git",
+ "341f207c1071f7290e3f228c710817c280c8dca1",
+ );
+
+ clone_repo(
+ "portable-simd",
+ "https://github.com/rust-lang/portable-simd",
+ "b8d6b6844602f80af79cd96401339ec594d472d8",
+ );
+ apply_patches("portable-simd", Path::new("portable-simd"));
+
+ clone_repo(
+ "simple-raytracer",
+ "https://github.com/ebobby/simple-raytracer",
+ "804a7a21b9e673a482797aa289a18ed480e4d813",
+ );
+
+ eprintln!("[LLVM BUILD] simple-raytracer");
+ let mut build_cmd = Command::new("cargo");
+ build_cmd.arg("build").env_remove("CARGO_TARGET_DIR").current_dir("simple-raytracer");
+ spawn_and_wait(build_cmd);
+ fs::copy(
+ Path::new("simple-raytracer/target/debug").join(get_file_name("main", "bin")),
+ // FIXME use get_file_name here too once testing is migrated to rust
+ "simple-raytracer/raytracer_cg_llvm",
+ )
+ .unwrap();
+}
+
+fn prepare_sysroot() {
+ let rustc_path = get_rustc_path();
+ let sysroot_src_orig = rustc_path.parent().unwrap().join("../lib/rustlib/src/rust");
+ let sysroot_src = env::current_dir().unwrap().join("build_sysroot").join("sysroot_src");
+
+ assert!(sysroot_src_orig.exists());
+
+ if sysroot_src.exists() {
+ fs::remove_dir_all(&sysroot_src).unwrap();
+ }
+ fs::create_dir_all(sysroot_src.join("library")).unwrap();
+ eprintln!("[COPY] sysroot src");
+ copy_dir_recursively(&sysroot_src_orig.join("library"), &sysroot_src.join("library"));
+
+ let rustc_version = get_rustc_version();
+ fs::write(Path::new("build_sysroot").join("rustc_version"), &rustc_version).unwrap();
+
+ eprintln!("[GIT] init");
+ let mut git_init_cmd = Command::new("git");
+ git_init_cmd.arg("init").arg("-q").current_dir(&sysroot_src);
+ spawn_and_wait(git_init_cmd);
+
+ let mut git_add_cmd = Command::new("git");
+ git_add_cmd.arg("add").arg(".").current_dir(&sysroot_src);
+ spawn_and_wait(git_add_cmd);
+
+ let mut git_commit_cmd = Command::new("git");
+ git_commit_cmd
+ .arg("commit")
+ .arg("-m")
+ .arg("Initial commit")
+ .arg("-q")
+ .current_dir(&sysroot_src);
+ spawn_and_wait(git_commit_cmd);
+
+ apply_patches("sysroot", &sysroot_src);
+
+ clone_repo(
+ "build_sysroot/compiler-builtins",
+ "https://github.com/rust-lang/compiler-builtins.git",
++ "0.1.70",
+ );
+ apply_patches("compiler-builtins", Path::new("build_sysroot/compiler-builtins"));
+}
+
+fn clone_repo(target_dir: &str, repo: &str, rev: &str) {
+ eprintln!("[CLONE] {}", repo);
+ // Ignore exit code as the repo may already have been checked out
+ Command::new("git").arg("clone").arg(repo).arg(target_dir).spawn().unwrap().wait().unwrap();
+
+ let mut clean_cmd = Command::new("git");
+ clean_cmd.arg("checkout").arg("--").arg(".").current_dir(target_dir);
+ spawn_and_wait(clean_cmd);
+
+ let mut checkout_cmd = Command::new("git");
+ checkout_cmd.arg("checkout").arg("-q").arg(rev).current_dir(target_dir);
+ spawn_and_wait(checkout_cmd);
+}
+
+fn get_patches(crate_name: &str) -> Vec<OsString> {
+ let mut patches: Vec<_> = fs::read_dir("patches")
+ .unwrap()
+ .map(|entry| entry.unwrap().path())
+ .filter(|path| path.extension() == Some(OsStr::new("patch")))
+ .map(|path| path.file_name().unwrap().to_owned())
+ .filter(|file_name| {
+ file_name.to_str().unwrap().split_once("-").unwrap().1.starts_with(crate_name)
+ })
+ .collect();
+ patches.sort();
+ patches
+}
+
+fn apply_patches(crate_name: &str, target_dir: &Path) {
+ for patch in get_patches(crate_name) {
+ eprintln!("[PATCH] {:?} <- {:?}", target_dir.file_name().unwrap(), patch);
+ let patch_arg = env::current_dir().unwrap().join("patches").join(patch);
+ let mut apply_patch_cmd = Command::new("git");
+ apply_patch_cmd.arg("am").arg(patch_arg).arg("-q").current_dir(target_dir);
+ spawn_and_wait(apply_patch_cmd);
+ }
+}
--- /dev/null
- rm -rf target/ build/ perf.data{,.old}
+#!/usr/bin/env bash
+set -e
+
+rm -rf build_sysroot/{sysroot_src/,target/,compiler-builtins/,rustc_version}
++rm -rf target/ build/ perf.data{,.old} y.bin
+rm -rf rand/ regex/ simple-raytracer/ portable-simd/
--- /dev/null
- pub extern "C" fn __llvm_memcpy_element_unordered_atomic_8(dest: *mut u64, src: *const u64, bytes: usize) -> () {
+From 1d574bf5e32d51641dcacaf8ef777e95b44f6f2a Mon Sep 17 00:00:00 2001
+From: bjorn3 <bjorn3@users.noreply.github.com>
+Date: Thu, 18 Feb 2021 18:30:55 +0100
+Subject: [PATCH] Disable 128bit atomic operations
+
+Cranelift doesn't support them yet
+---
+ src/mem/mod.rs | 12 ------------
+ 1 file changed, 12 deletions(-)
+
+diff --git a/src/mem/mod.rs b/src/mem/mod.rs
+index 107762c..2d1ae10 100644
+--- a/src/mem/mod.rs
++++ b/src/mem/mod.rs
+@@ -137,10 +137,6 @@ intrinsics! {
- - pub extern "C" fn __llvm_memcpy_element_unordered_atomic_16(dest: *mut u128, src: *const u128, bytes: usize) -> () {
++ pub unsafe extern "C" fn __llvm_memcpy_element_unordered_atomic_8(dest: *mut u64, src: *const u64, bytes: usize) -> () {
+ memcpy_element_unordered_atomic(dest, src, bytes);
+ }
+- #[cfg(target_has_atomic_load_store = "128")]
- pub extern "C" fn __llvm_memmove_element_unordered_atomic_1(dest: *mut u8, src: *const u8, bytes: usize) -> () {
++- pub unsafe extern "C" fn __llvm_memcpy_element_unordered_atomic_16(dest: *mut u128, src: *const u128, bytes: usize) -> () {
+- memcpy_element_unordered_atomic(dest, src, bytes);
+- }
+
+ #[cfg(target_has_atomic_load_store = "8")]
- pub extern "C" fn __llvm_memmove_element_unordered_atomic_8(dest: *mut u64, src: *const u64, bytes: usize) -> () {
++ pub unsafe extern "C" fn __llvm_memmove_element_unordered_atomic_1(dest: *mut u8, src: *const u8, bytes: usize) -> () {
+@@ -158,10 +154,6 @@ intrinsics! {
- - pub extern "C" fn __llvm_memmove_element_unordered_atomic_16(dest: *mut u128, src: *const u128, bytes: usize) -> () {
++ pub unsafe extern "C" fn __llvm_memmove_element_unordered_atomic_8(dest: *mut u64, src: *const u64, bytes: usize) -> () {
+ memmove_element_unordered_atomic(dest, src, bytes);
+ }
+- #[cfg(target_has_atomic_load_store = "128")]
- pub extern "C" fn __llvm_memset_element_unordered_atomic_1(s: *mut u8, c: u8, bytes: usize) -> () {
++- pub unsafe extern "C" fn __llvm_memmove_element_unordered_atomic_16(dest: *mut u128, src: *const u128, bytes: usize) -> () {
+- memmove_element_unordered_atomic(dest, src, bytes);
+- }
+
+ #[cfg(target_has_atomic_load_store = "8")]
- pub extern "C" fn __llvm_memset_element_unordered_atomic_8(s: *mut u64, c: u8, bytes: usize) -> () {
++ pub unsafe extern "C" fn __llvm_memset_element_unordered_atomic_1(s: *mut u8, c: u8, bytes: usize) -> () {
+@@ -179,8 +171,4 @@ intrinsics! {
- - pub extern "C" fn __llvm_memset_element_unordered_atomic_16(s: *mut u128, c: u8, bytes: usize) -> () {
++ pub unsafe extern "C" fn __llvm_memset_element_unordered_atomic_8(s: *mut u64, c: u8, bytes: usize) -> () {
+ memset_element_unordered_atomic(s, c, bytes);
+ }
+- #[cfg(target_has_atomic_load_store = "128")]
++- pub unsafe extern "C" fn __llvm_memset_element_unordered_atomic_16(s: *mut u128, c: u8, bytes: usize) -> () {
+- memset_element_unordered_atomic(s, c, bytes);
+- }
+ }
+--
+2.26.2.7.g19db9cfb68
+
--- /dev/null
- +edition = "2018"
+From f6befc4bb51d84f5f1cf35938a168c953d421350 Mon Sep 17 00:00:00 2001
+From: bjorn3 <bjorn3@users.noreply.github.com>
+Date: Sun, 24 Nov 2019 15:10:23 +0100
+Subject: [PATCH] [core] Disable not compiling tests
+
+---
+ library/core/tests/Cargo.toml | 8 ++++++++
+ library/core/tests/num/flt2dec/mod.rs | 1 -
+ library/core/tests/num/int_macros.rs | 2 ++
+ library/core/tests/num/uint_macros.rs | 2 ++
+ library/core/tests/ptr.rs | 2 ++
+ library/core/tests/slice.rs | 2 ++
+ 6 files changed, 16 insertions(+), 1 deletion(-)
+ create mode 100644 library/core/tests/Cargo.toml
+
+diff --git a/library/core/tests/Cargo.toml b/library/core/tests/Cargo.toml
+new file mode 100644
+index 0000000..46fd999
+--- /dev/null
++++ b/library/core/tests/Cargo.toml
+@@ -0,0 +1,8 @@
++[package]
++name = "core"
++version = "0.0.0"
- assert!(ys == zs);
+++edition = "2021"
++
++[lib]
++name = "coretests"
++path = "lib.rs"
+diff --git a/library/core/tests/num/flt2dec/mod.rs b/library/core/tests/num/flt2dec/mod.rs
+index a35897e..f0bf645 100644
+--- a/library/core/tests/num/flt2dec/mod.rs
++++ b/library/core/tests/num/flt2dec/mod.rs
+@@ -13,7 +13,6 @@ mod strategy {
+ mod dragon;
+ mod grisu;
+ }
+-mod random;
+
+ pub fn decode_finite<T: DecodableFloat>(v: T) -> Decoded {
+ match decode(v).1 {
+diff --git a/library/core/tests/ptr.rs b/library/core/tests/ptr.rs
+index 1a6be3a..42dbd59 100644
+--- a/library/core/tests/ptr.rs
++++ b/library/core/tests/ptr.rs
+@@ -250,6 +250,7 @@ fn test_unsized_nonnull() {
++ };
+ }
+
++/*
+ #[test]
+ #[allow(warnings)]
+ // Have a symbol for the test below. It doesn’t need to be an actual variadic function, match the
+@@ -277,6 +277,7 @@ pub fn test_variadic_fnptr() {
+ let mut s = SipHasher::new();
+ assert_eq!(p.hash(&mut s), q.hash(&mut s));
+ }
++*/
+
+ #[test]
+ fn write_unaligned_drop() {
+diff --git a/library/core/tests/slice.rs b/library/core/tests/slice.rs
+index 6609bc3..241b497 100644
+--- a/library/core/tests/slice.rs
++++ b/library/core/tests/slice.rs
+@@ -1209,6 +1209,7 @@ fn brute_force_rotate_test_1() {
+ }
+ }
+
++/*
+ #[test]
+ #[cfg(not(target_arch = "wasm32"))]
+ fn sort_unstable() {
+@@ -1394,6 +1395,7 @@ fn partition_at_index() {
+ v.select_nth_unstable(0);
+ assert!(v == [0xDEADBEEF]);
+ }
++*/
+
+ #[test]
+ #[should_panic(expected = "index 0 greater than length of slice")]
+--
+2.21.0 (Apple Git-122)
--- /dev/null
- library/std/src/time/monotonic.rs | 6 +++--
+From ad7ffe71baba46865f2e65266ab025920dfdc20b Mon Sep 17 00:00:00 2001
+From: bjorn3 <bjorn3@users.noreply.github.com>
+Date: Thu, 18 Feb 2021 18:45:28 +0100
+Subject: [PATCH] Disable 128bit atomic operations
+
+Cranelift doesn't support them yet
+---
+ library/core/src/panic/unwind_safe.rs | 6 -----
+ library/core/src/sync/atomic.rs | 38 ---------------------------
+ library/core/tests/atomic.rs | 4 ---
- diff --git a/library/std/src/time/monotonic.rs b/library/std/src/time/monotonic.rs
- index fa96b7a..2854f9c 100644
- --- a/library/std/src/time/monotonic.rs
- +++ b/library/std/src/time/monotonic.rs
- @@ -5,7 +5,7 @@ pub(super) fn monotonize(raw: time::Instant) -> time::Instant {
- inner::monotonize(raw)
- }
-
- -#[cfg(any(all(target_has_atomic = "64", not(target_has_atomic = "128")), target_arch = "aarch64"))]
- +#[cfg(target_has_atomic = "64")]
- pub mod inner {
- use crate::sync::atomic::AtomicU64;
- use crate::sync::atomic::Ordering::*;
- @@ -70,6 +70,7 @@ pub mod inner {
- }
- }
-
- +/*
- #[cfg(all(target_has_atomic = "128", not(target_arch = "aarch64")))]
- pub mod inner {
- use crate::sync::atomic::AtomicU128;
- @@ -94,8 +95,9 @@ pub mod inner {
- ZERO.checked_add_duration(&Duration::new(secs, nanos)).unwrap()
- }
- }
- +*/
-
- -#[cfg(not(any(target_has_atomic = "64", target_has_atomic = "128")))]
- +#[cfg(not(target_has_atomic = "64"))]
- pub mod inner {
- use crate::cmp;
- use crate::sys::time;
+ 4 files changed, 4 insertions(+), 50 deletions(-)
+
+diff --git a/library/core/src/panic/unwind_safe.rs b/library/core/src/panic/unwind_safe.rs
+index 092b7cf..158cf71 100644
+--- a/library/core/src/panic/unwind_safe.rs
++++ b/library/core/src/panic/unwind_safe.rs
+@@ -216,9 +216,6 @@ impl RefUnwindSafe for crate::sync::atomic::AtomicI32 {}
+ #[cfg(target_has_atomic_load_store = "64")]
+ #[stable(feature = "integer_atomics_stable", since = "1.34.0")]
+ impl RefUnwindSafe for crate::sync::atomic::AtomicI64 {}
+-#[cfg(target_has_atomic_load_store = "128")]
+-#[unstable(feature = "integer_atomics", issue = "32976")]
+-impl RefUnwindSafe for crate::sync::atomic::AtomicI128 {}
+
+ #[cfg(target_has_atomic_load_store = "ptr")]
+ #[stable(feature = "unwind_safe_atomic_refs", since = "1.14.0")]
+@@ -235,9 +232,6 @@ impl RefUnwindSafe for crate::sync::atomic::AtomicU32 {}
+ #[cfg(target_has_atomic_load_store = "64")]
+ #[stable(feature = "integer_atomics_stable", since = "1.34.0")]
+ impl RefUnwindSafe for crate::sync::atomic::AtomicU64 {}
+-#[cfg(target_has_atomic_load_store = "128")]
+-#[unstable(feature = "integer_atomics", issue = "32976")]
+-impl RefUnwindSafe for crate::sync::atomic::AtomicU128 {}
+
+ #[cfg(target_has_atomic_load_store = "8")]
+ #[stable(feature = "unwind_safe_atomic_refs", since = "1.14.0")]
+diff --git a/library/core/src/sync/atomic.rs b/library/core/src/sync/atomic.rs
+index d9de37e..8293fce 100644
+--- a/library/core/src/sync/atomic.rs
++++ b/library/core/src/sync/atomic.rs
+@@ -2234,44 +2234,6 @@ atomic_int! {
+ "AtomicU64::new(0)",
+ u64 AtomicU64 ATOMIC_U64_INIT
+ }
+-#[cfg(target_has_atomic_load_store = "128")]
+-atomic_int! {
+- cfg(target_has_atomic = "128"),
+- cfg(target_has_atomic_equal_alignment = "128"),
+- unstable(feature = "integer_atomics", issue = "32976"),
+- unstable(feature = "integer_atomics", issue = "32976"),
+- unstable(feature = "integer_atomics", issue = "32976"),
+- unstable(feature = "integer_atomics", issue = "32976"),
+- unstable(feature = "integer_atomics", issue = "32976"),
+- unstable(feature = "integer_atomics", issue = "32976"),
+- rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
+- unstable(feature = "integer_atomics", issue = "32976"),
+- "i128",
+- "#![feature(integer_atomics)]\n\n",
+- atomic_min, atomic_max,
+- 16,
+- "AtomicI128::new(0)",
+- i128 AtomicI128 ATOMIC_I128_INIT
+-}
+-#[cfg(target_has_atomic_load_store = "128")]
+-atomic_int! {
+- cfg(target_has_atomic = "128"),
+- cfg(target_has_atomic_equal_alignment = "128"),
+- unstable(feature = "integer_atomics", issue = "32976"),
+- unstable(feature = "integer_atomics", issue = "32976"),
+- unstable(feature = "integer_atomics", issue = "32976"),
+- unstable(feature = "integer_atomics", issue = "32976"),
+- unstable(feature = "integer_atomics", issue = "32976"),
+- unstable(feature = "integer_atomics", issue = "32976"),
+- rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
+- unstable(feature = "integer_atomics", issue = "32976"),
+- "u128",
+- "#![feature(integer_atomics)]\n\n",
+- atomic_umin, atomic_umax,
+- 16,
+- "AtomicU128::new(0)",
+- u128 AtomicU128 ATOMIC_U128_INIT
+-}
+
+ macro_rules! atomic_int_ptr_sized {
+ ( $($target_pointer_width:literal $align:literal)* ) => { $(
+diff --git a/library/core/tests/atomic.rs b/library/core/tests/atomic.rs
+index b735957..ea728b6 100644
+--- a/library/core/tests/atomic.rs
++++ b/library/core/tests/atomic.rs
+@@ -185,10 +185,6 @@ fn atomic_alignment() {
+ assert_eq!(align_of::<AtomicU64>(), size_of::<AtomicU64>());
+ #[cfg(target_has_atomic = "64")]
+ assert_eq!(align_of::<AtomicI64>(), size_of::<AtomicI64>());
+- #[cfg(target_has_atomic = "128")]
+- assert_eq!(align_of::<AtomicU128>(), size_of::<AtomicU128>());
+- #[cfg(target_has_atomic = "128")]
+- assert_eq!(align_of::<AtomicI128>(), size_of::<AtomicI128>());
+ #[cfg(target_has_atomic = "ptr")]
+ assert_eq!(align_of::<AtomicUsize>(), size_of::<AtomicUsize>());
+ #[cfg(target_has_atomic = "ptr")]
+--
+2.26.2.7.g19db9cfb68
+
--- /dev/null
- channel = "nightly-2021-12-30"
+[toolchain]
++channel = "nightly-2022-02-23"
+components = ["rust-src", "rustc-dev", "llvm-tools-preview"]
--- /dev/null
- cargo install ripgrep
+#!/bin/bash
+set -e
+
+cd $(dirname "$0")/../
+
+source ./scripts/setup_rust_fork.sh
+
+echo "[TEST] Test suite of rustc"
+pushd rust
+
++command -v rg >/dev/null 2>&1 || cargo install ripgrep
+
+rm -r src/test/ui/{extern/,panics/,unsized-locals/,lto/,simd*,linkage*,unwind-*.rs} || true
+for test in $(rg --files-with-matches "asm!|catch_unwind|should_panic|lto|// needs-asm-support" src/test/ui); do
+ rm $test
+done
+
+for test in $(rg -i --files-with-matches "//(\[\w+\])?~|// error-pattern:|// build-fail|// run-fail|-Cllvm-args" src/test/ui); do
+ rm $test
+done
+
+git checkout -- src/test/ui/issues/auxiliary/issue-3136-a.rs # contains //~ERROR, but shouldn't be removed
+
+# these all depend on unwinding support
+rm src/test/ui/backtrace.rs
+rm src/test/ui/array-slice-vec/box-of-array-of-drop-*.rs
+rm src/test/ui/array-slice-vec/slice-panic-*.rs
+rm src/test/ui/array-slice-vec/nested-vec-3.rs
+rm src/test/ui/cleanup-rvalue-temp-during-incomplete-alloc.rs
+rm src/test/ui/issues/issue-26655.rs
+rm src/test/ui/issues/issue-29485.rs
+rm src/test/ui/issues/issue-30018-panic.rs
+rm src/test/ui/process/multi-panic.rs
+rm src/test/ui/sepcomp/sepcomp-unwind.rs
+rm src/test/ui/structs-enums/unit-like-struct-drop-run.rs
+rm src/test/ui/drop/terminate-in-initializer.rs
+rm src/test/ui/threads-sendsync/task-stderr.rs
+rm src/test/ui/numbers-arithmetic/int-abs-overflow.rs
+rm src/test/ui/drop/drop-trait-enum.rs
+rm src/test/ui/numbers-arithmetic/issue-8460.rs
+rm src/test/ui/runtime/rt-explody-panic-payloads.rs
+rm src/test/incremental/change_crate_dep_kind.rs
+rm src/test/ui/threads-sendsync/unwind-resource.rs
+
+rm src/test/ui/issues/issue-28950.rs # depends on stack size optimizations
+rm src/test/ui/codegen/init-large-type.rs # same
+rm src/test/ui/sse2.rs # cpuid not supported, so sse2 not detected
+rm src/test/ui/issues/issue-33992.rs # unsupported linkages
+rm src/test/ui/issues/issue-51947.rs # same
+rm src/test/incremental/hashes/function_interfaces.rs # same
+rm src/test/incremental/hashes/statics.rs # same
+rm src/test/ui/numbers-arithmetic/saturating-float-casts.rs # intrinsic gives different but valid result
+rm src/test/ui/mir/mir_misc_casts.rs # depends on deduplication of constants
+rm src/test/ui/mir/mir_raw_fat_ptr.rs # same
+rm src/test/ui/consts/issue-33537.rs # same
+rm src/test/ui/async-await/async-fn-size-moved-locals.rs # -Cpanic=abort shrinks some generator by one byte
+rm src/test/ui/async-await/async-fn-size-uninit-locals.rs # same
+rm src/test/ui/generator/size-moved-locals.rs # same
+rm src/test/ui/fn/dyn-fn-alignment.rs # wants a 256 byte alignment
+rm src/test/ui/test-attrs/test-fn-signature-verification-for-explicit-return-type.rs # "Cannot run dynamic test fn out-of-process"
+rm src/test/ui/intrinsics/intrinsic-nearby.rs # unimplemented nearbyintf32 and nearbyintf64 intrinsics
+
+rm src/test/incremental/hashes/inline_asm.rs # inline asm
+rm src/test/incremental/issue-72386.rs # same
+rm src/test/incremental/lto.rs # requires lto
+rm src/test/incremental/dirty_clean.rs # TODO
+
+rm -r src/test/run-make/emit-shared-files # requires the rustdoc executable in build/bin/
+rm -r src/test/run-make/unstable-flag-required # same
+rm -r src/test/run-make/rustdoc-* # same
+rm -r src/test/run-make/emit-named-files # requires full --emit support
+
+rm -r src/test/run-pass-valgrind/unsized-locals
+
+rm src/test/ui/json-bom-plus-crlf-multifile.rs # differing warning
+rm src/test/ui/json-bom-plus-crlf.rs # same
+rm src/test/ui/intrinsics/const-eval-select-x86_64.rs # same
+rm src/test/ui/match/issue-82392.rs # differing error
+rm src/test/ui/consts/min_const_fn/address_of_const.rs # same
+rm src/test/ui/consts/issue-miri-1910.rs # same
++rm src/test/ui/generic-associated-types/bugs/issue-80626.rs # same
++rm src/test/ui/generic-associated-types/bugs/issue-89008.rs # same
+rm src/test/ui/type-alias-impl-trait/cross_crate_ice*.rs # requires removed aux dep
+
+rm src/test/ui/allocator/no_std-alloc-error-handler-default.rs # missing rust_oom definition
+rm src/test/ui/cfg/cfg-panic.rs
+rm -r src/test/ui/hygiene/
+
+rm -r src/test/ui/polymorphization/ # polymorphization not yet supported
+rm src/test/codegen-units/polymorphization/unused_type_parameters.rs # same
+
+rm -r src/test/run-make/fmt-write-bloat/ # tests an optimization
+rm src/test/ui/abi/mir/mir_codegen_calls_variadic.rs # requires float varargs
+rm src/test/ui/abi/variadic-ffi.rs # requires callee side vararg support
+
+rm src/test/ui/command/command-current-dir.rs # can't find libstd.so
+
+rm src/test/ui/abi/stack-protector.rs # requires stack protector support
+
+rm src/test/incremental/issue-80691-bad-eval-cache.rs # wrong exit code
+rm src/test/incremental/spike-neg1.rs # errors out for some reason
+rm src/test/incremental/spike-neg2.rs # same
++rm src/test/ui/issues/issue-74564-if-expr-stack-overflow.rs # gives a stackoverflow before the backend runs
+
+rm src/test/incremental/thinlto/cgu_invalidated_when_import_{added,removed}.rs # requires LLVM
+
+echo "[TEST] rustc test suite"
+RUST_TEST_NOCAPTURE=1 COMPILETEST_FORCE_STAGE0=1 ./x.py test --stage 0 src/test/{codegen-units,run-make,run-pass-valgrind,ui,incremental}
+popd
--- /dev/null
- use cranelift_codegen::binemit::{NullStackMapSink, NullTrapSink};
+//! Allocator shim
+// Adapted from rustc
+
+use crate::prelude::*;
+
- module
- .define_function(func_id, &mut ctx, &mut NullTrapSink {}, &mut NullStackMapSink {})
- .unwrap();
+use rustc_ast::expand::allocator::{AllocatorKind, AllocatorTy, ALLOCATOR_METHODS};
+
+/// Returns whether an allocator shim was created
+pub(crate) fn codegen(
+ tcx: TyCtxt<'_>,
+ module: &mut impl Module,
+ unwind_context: &mut UnwindContext,
+) -> bool {
+ let any_dynamic_crate = tcx.dependency_formats(()).iter().any(|(_, list)| {
+ use rustc_middle::middle::dependency_format::Linkage;
+ list.iter().any(|&linkage| linkage == Linkage::Dynamic)
+ });
+ if any_dynamic_crate {
+ false
+ } else if let Some(kind) = tcx.allocator_kind(()) {
+ codegen_inner(module, unwind_context, kind, tcx.lang_items().oom().is_some());
+ true
+ } else {
+ false
+ }
+}
+
+fn codegen_inner(
+ module: &mut impl Module,
+ unwind_context: &mut UnwindContext,
+ kind: AllocatorKind,
+ has_alloc_error_handler: bool,
+) {
+ let usize_ty = module.target_config().pointer_type();
+
+ for method in ALLOCATOR_METHODS {
+ let mut arg_tys = Vec::with_capacity(method.inputs.len());
+ for ty in method.inputs.iter() {
+ match *ty {
+ AllocatorTy::Layout => {
+ arg_tys.push(usize_ty); // size
+ arg_tys.push(usize_ty); // align
+ }
+ AllocatorTy::Ptr => arg_tys.push(usize_ty),
+ AllocatorTy::Usize => arg_tys.push(usize_ty),
+
+ AllocatorTy::ResultPtr | AllocatorTy::Unit => panic!("invalid allocator arg"),
+ }
+ }
+ let output = match method.output {
+ AllocatorTy::ResultPtr => Some(usize_ty),
+ AllocatorTy::Unit => None,
+
+ AllocatorTy::Layout | AllocatorTy::Usize | AllocatorTy::Ptr => {
+ panic!("invalid allocator output")
+ }
+ };
+
+ let sig = Signature {
+ call_conv: CallConv::triple_default(module.isa().triple()),
+ params: arg_tys.iter().cloned().map(AbiParam::new).collect(),
+ returns: output.into_iter().map(AbiParam::new).collect(),
+ };
+
+ let caller_name = format!("__rust_{}", method.name);
+ let callee_name = kind.fn_name(method.name);
+
+ let func_id = module.declare_function(&caller_name, Linkage::Export, &sig).unwrap();
+
+ let callee_func_id = module.declare_function(&callee_name, Linkage::Import, &sig).unwrap();
+
+ let mut ctx = Context::new();
+ ctx.func = Function::with_name_signature(ExternalName::user(0, 0), sig.clone());
+ {
+ let mut func_ctx = FunctionBuilderContext::new();
+ let mut bcx = FunctionBuilder::new(&mut ctx.func, &mut func_ctx);
+
+ let block = bcx.create_block();
+ bcx.switch_to_block(block);
+ let args = arg_tys
+ .into_iter()
+ .map(|ty| bcx.append_block_param(block, ty))
+ .collect::<Vec<Value>>();
+
+ let callee_func_ref = module.declare_func_in_func(callee_func_id, &mut bcx.func);
+ let call_inst = bcx.ins().call(callee_func_ref, &args);
+ let results = bcx.inst_results(call_inst).to_vec(); // Clone to prevent borrow error
+
+ bcx.ins().return_(&results);
+ bcx.seal_all_blocks();
+ bcx.finalize();
+ }
- module
- .define_function(func_id, &mut ctx, &mut NullTrapSink {}, &mut NullStackMapSink {})
- .unwrap();
++ module.define_function(func_id, &mut ctx).unwrap();
+ unwind_context.add_function(func_id, &ctx, module.isa());
+ }
+
+ let sig = Signature {
+ call_conv: CallConv::triple_default(module.isa().triple()),
+ params: vec![AbiParam::new(usize_ty), AbiParam::new(usize_ty)],
+ returns: vec![],
+ };
+
+ let callee_name = if has_alloc_error_handler { "__rg_oom" } else { "__rdl_oom" };
+
+ let func_id =
+ module.declare_function("__rust_alloc_error_handler", Linkage::Export, &sig).unwrap();
+
+ let callee_func_id = module.declare_function(callee_name, Linkage::Import, &sig).unwrap();
+
+ let mut ctx = Context::new();
+ ctx.func = Function::with_name_signature(ExternalName::user(0, 0), sig);
+ {
+ let mut func_ctx = FunctionBuilderContext::new();
+ let mut bcx = FunctionBuilder::new(&mut ctx.func, &mut func_ctx);
+
+ let block = bcx.create_block();
+ bcx.switch_to_block(block);
+ let args = (&[usize_ty, usize_ty])
+ .iter()
+ .map(|&ty| bcx.append_block_param(block, ty))
+ .collect::<Vec<Value>>();
+
+ let callee_func_ref = module.declare_func_in_func(callee_func_id, &mut bcx.func);
+ bcx.ins().call(callee_func_ref, &args);
+
+ bcx.ins().trap(TrapCode::UnreachableCodeReached);
+ bcx.seal_all_blocks();
+ bcx.finalize();
+ }
++ module.define_function(func_id, &mut ctx).unwrap();
+ unwind_context.add_function(func_id, &ctx, module.isa());
+}
--- /dev/null
- use cranelift_codegen::binemit::{NullStackMapSink, NullTrapSink};
+//! Codegen of a single function
+
- let mut clif_comments = fx.clif_comments;
+use rustc_ast::InlineAsmOptions;
+use rustc_index::vec::IndexVec;
+use rustc_middle::ty::adjustment::PointerCast;
+use rustc_middle::ty::layout::FnAbiOf;
+
++use indexmap::IndexSet;
++
+use crate::constant::ConstantCx;
+use crate::prelude::*;
++use crate::pretty_clif::CommentWriter;
+
+pub(crate) fn codegen_fn<'tcx>(
+ cx: &mut crate::CodegenCx<'tcx>,
+ module: &mut dyn Module,
+ instance: Instance<'tcx>,
+) {
+ let tcx = cx.tcx;
+
+ let _inst_guard =
+ crate::PrintOnPanic(|| format!("{:?} {}", instance, tcx.symbol_name(instance).name));
+ debug_assert!(!instance.substs.needs_infer());
+
+ let mir = tcx.instance_mir(instance.def);
+ let _mir_guard = crate::PrintOnPanic(|| {
+ let mut buf = Vec::new();
+ rustc_middle::mir::write_mir_pretty(tcx, Some(instance.def_id()), &mut buf).unwrap();
+ String::from_utf8_lossy(&buf).into_owned()
+ });
+
+ // Declare function
+ let symbol_name = tcx.symbol_name(instance);
+ let sig = get_function_sig(tcx, module.isa().triple(), instance);
+ let func_id = module.declare_function(symbol_name.name, Linkage::Local, &sig).unwrap();
+
+ cx.cached_context.clear();
+
+ // Make the FunctionBuilder
+ let mut func_ctx = FunctionBuilderContext::new();
+ let mut func = std::mem::replace(&mut cx.cached_context.func, Function::new());
+ func.name = ExternalName::user(0, func_id.as_u32());
+ func.signature = sig;
+ func.collect_debug_info();
+
+ let mut bcx = FunctionBuilder::new(&mut func, &mut func_ctx);
+
+ // Predefine blocks
+ let start_block = bcx.create_block();
+ let block_map: IndexVec<BasicBlock, Block> =
+ (0..mir.basic_blocks().len()).map(|_| bcx.create_block()).collect();
+
+ // Make FunctionCx
+ let target_config = module.target_config();
+ let pointer_type = target_config.pointer_type();
+ let clif_comments = crate::pretty_clif::CommentWriter::new(tcx, instance);
+
+ let mut fx = FunctionCx {
+ cx,
+ module,
+ tcx,
+ target_config,
+ pointer_type,
+ constants_cx: ConstantCx::new(),
+
+ instance,
+ symbol_name,
+ mir,
+ fn_abi: Some(RevealAllLayoutCx(tcx).fn_abi_of_instance(instance, ty::List::empty())),
+
+ bcx,
+ block_map,
+ local_map: IndexVec::with_capacity(mir.local_decls.len()),
+ caller_location: None, // set by `codegen_fn_prelude`
+
+ clif_comments,
+ source_info_set: indexmap::IndexSet::new(),
+ next_ssa_var: 0,
+ };
+
+ let arg_uninhabited = fx
+ .mir
+ .args_iter()
+ .any(|arg| fx.layout_of(fx.monomorphize(fx.mir.local_decls[arg].ty)).abi.is_uninhabited());
+
+ if !crate::constant::check_constants(&mut fx) {
+ fx.bcx.append_block_params_for_function_params(fx.block_map[START_BLOCK]);
+ fx.bcx.switch_to_block(fx.block_map[START_BLOCK]);
+ crate::trap::trap_unreachable(&mut fx, "compilation should have been aborted");
+ } else if arg_uninhabited {
+ fx.bcx.append_block_params_for_function_params(fx.block_map[START_BLOCK]);
+ fx.bcx.switch_to_block(fx.block_map[START_BLOCK]);
+ crate::trap::trap_unreachable(&mut fx, "function has uninhabited argument");
+ } else {
+ tcx.sess.time("codegen clif ir", || {
+ tcx.sess
+ .time("codegen prelude", || crate::abi::codegen_fn_prelude(&mut fx, start_block));
+ codegen_fn_content(&mut fx);
+ });
+ }
+
+ // Recover all necessary data from fx, before accessing func will prevent future access to it.
+ let instance = fx.instance;
- // Store function in context
- let context = &mut cx.cached_context;
- context.func = func;
-
++ let clif_comments = fx.clif_comments;
+ let source_info_set = fx.source_info_set;
+ let local_map = fx.local_map;
+
+ fx.constants_cx.finalize(fx.tcx, &mut *fx.module);
+
- &context,
+ crate::pretty_clif::write_clif_file(
+ tcx,
+ "unopt",
+ module.isa(),
+ instance,
- verify_func(tcx, &clif_comments, &context.func);
++ &func,
+ &clif_comments,
+ );
+
+ // Verify function
- module
- .define_function(func_id, context, &mut NullTrapSink {}, &mut NullStackMapSink {})
- .unwrap()
++ verify_func(tcx, &clif_comments, &func);
++
++ compile_fn(
++ cx,
++ module,
++ instance,
++ symbol_name.name,
++ func_id,
++ func,
++ clif_comments,
++ source_info_set,
++ local_map,
++ );
++}
++
++fn compile_fn<'tcx>(
++ cx: &mut crate::CodegenCx<'tcx>,
++ module: &mut dyn Module,
++ instance: Instance<'tcx>,
++ symbol_name: &str,
++ func_id: FuncId,
++ func: Function,
++ mut clif_comments: CommentWriter,
++ source_info_set: IndexSet<SourceInfo>,
++ local_map: IndexVec<mir::Local, CPlace<'tcx>>,
++) {
++ let tcx = cx.tcx;
++
++ // Store function in context
++ let context = &mut cx.cached_context;
++ context.clear();
++ context.func = func;
+
+ // If the return block is not reachable, then the SSA builder may have inserted an `iconst.i128`
+ // instruction, which doesn't have an encoding.
+ context.compute_cfg();
+ context.compute_domtree();
+ context.eliminate_unreachable_code(module.isa()).unwrap();
+ context.dce(module.isa()).unwrap();
+ // Some Cranelift optimizations expect the domtree to not yet be computed and as such don't
+ // invalidate it when it would change.
+ context.domtree.clear();
+
+ // Perform rust specific optimizations
+ tcx.sess.time("optimize clif ir", || {
+ crate::optimize::optimize_function(
+ tcx,
+ module.isa(),
+ instance,
+ context,
+ &mut clif_comments,
+ );
+ });
+
+ // Define function
+ tcx.sess.time("define function", || {
+ context.want_disasm = crate::pretty_clif::should_write_ir(tcx);
- &context,
++ module.define_function(func_id, context).unwrap()
+ });
+
+ // Write optimized function to file for debugging
+ crate::pretty_clif::write_clif_file(
+ tcx,
+ "opt",
+ module.isa(),
+ instance,
- symbol_name.name,
++ &context.func,
+ &clif_comments,
+ );
+
+ if let Some(disasm) = &context.mach_compile_result.as_ref().unwrap().disasm {
+ crate::pretty_clif::write_ir_file(
+ tcx,
+ || format!("{}.vcode", tcx.symbol_name(instance).name),
+ |file| file.write_all(disasm.as_bytes()),
+ )
+ }
+
+ // Define debuginfo for function
+ let isa = module.isa();
+ let debug_context = &mut cx.debug_context;
+ let unwind_context = &mut cx.unwind_context;
+ tcx.sess.time("generate debug info", || {
+ if let Some(debug_context) = debug_context {
+ debug_context.define_function(
+ instance,
+ func_id,
-
- // Clear context to make it usable for the next function
- context.clear();
++ symbol_name,
+ isa,
+ context,
+ &source_info_set,
+ local_map,
+ );
+ }
+ unwind_context.add_function(func_id, &context, isa);
+ });
+}
+
+pub(crate) fn verify_func(
+ tcx: TyCtxt<'_>,
+ writer: &crate::pretty_clif::CommentWriter,
+ func: &Function,
+) {
+ tcx.sess.time("verify clif ir", || {
+ let flags = cranelift_codegen::settings::Flags::new(cranelift_codegen::settings::builder());
+ match cranelift_codegen::verify_function(&func, &flags) {
+ Ok(_) => {}
+ Err(err) => {
+ tcx.sess.err(&format!("{:?}", err));
+ let pretty_error = cranelift_codegen::print_errors::pretty_verifier_error(
+ &func,
+ Some(Box::new(writer)),
+ err,
+ );
+ tcx.sess.fatal(&format!("cranelift verify error:\n{}", pretty_error));
+ }
+ }
+ });
+}
+
+fn codegen_fn_content(fx: &mut FunctionCx<'_, '_, '_>) {
+ for (bb, bb_data) in fx.mir.basic_blocks().iter_enumerated() {
+ let block = fx.get_block(bb);
+ fx.bcx.switch_to_block(block);
+
+ if bb_data.is_cleanup {
+ // Unwinding after panicking is not supported
+ continue;
+
+ // FIXME Once unwinding is supported and Cranelift supports marking blocks as cold, do
+ // so for cleanup blocks.
+ }
+
+ fx.bcx.ins().nop();
+ for stmt in &bb_data.statements {
+ fx.set_debug_loc(stmt.source_info);
+ codegen_stmt(fx, block, stmt);
+ }
+
+ if fx.clif_comments.enabled() {
+ let mut terminator_head = "\n".to_string();
+ bb_data.terminator().kind.fmt_head(&mut terminator_head).unwrap();
+ let inst = fx.bcx.func.layout.last_inst(block).unwrap();
+ fx.add_comment(inst, terminator_head);
+ }
+
+ let source_info = bb_data.terminator().source_info;
+ fx.set_debug_loc(source_info);
+
+ match &bb_data.terminator().kind {
+ TerminatorKind::Goto { target } => {
+ if let TerminatorKind::Return = fx.mir[*target].terminator().kind {
+ let mut can_immediately_return = true;
+ for stmt in &fx.mir[*target].statements {
+ if let StatementKind::StorageDead(_) = stmt.kind {
+ } else {
+ // FIXME Can sometimes happen, see rust-lang/rust#70531
+ can_immediately_return = false;
+ break;
+ }
+ }
+
+ if can_immediately_return {
+ crate::abi::codegen_return(fx);
+ continue;
+ }
+ }
+
+ let block = fx.get_block(*target);
+ fx.bcx.ins().jump(block, &[]);
+ }
+ TerminatorKind::Return => {
+ crate::abi::codegen_return(fx);
+ }
+ TerminatorKind::Assert { cond, expected, msg, target, cleanup: _ } => {
+ if !fx.tcx.sess.overflow_checks() {
+ if let mir::AssertKind::OverflowNeg(_) = *msg {
+ let target = fx.get_block(*target);
+ fx.bcx.ins().jump(target, &[]);
+ continue;
+ }
+ }
+ let cond = codegen_operand(fx, cond).load_scalar(fx);
+
+ let target = fx.get_block(*target);
+ let failure = fx.bcx.create_block();
+ // FIXME Mark failure block as cold once Cranelift supports it
+
+ if *expected {
+ fx.bcx.ins().brz(cond, failure, &[]);
+ } else {
+ fx.bcx.ins().brnz(cond, failure, &[]);
+ };
+ fx.bcx.ins().jump(target, &[]);
+
+ fx.bcx.switch_to_block(failure);
+ fx.bcx.ins().nop();
+
+ match msg {
+ AssertKind::BoundsCheck { ref len, ref index } => {
+ let len = codegen_operand(fx, len).load_scalar(fx);
+ let index = codegen_operand(fx, index).load_scalar(fx);
+ let location = fx.get_caller_location(source_info.span).load_scalar(fx);
+
+ codegen_panic_inner(
+ fx,
+ rustc_hir::LangItem::PanicBoundsCheck,
+ &[index, len, location],
+ source_info.span,
+ );
+ }
+ _ => {
+ let msg_str = msg.description();
+ codegen_panic(fx, msg_str, source_info.span);
+ }
+ }
+ }
+
+ TerminatorKind::SwitchInt { discr, switch_ty, targets } => {
+ let discr = codegen_operand(fx, discr).load_scalar(fx);
+
+ let use_bool_opt = switch_ty.kind() == fx.tcx.types.bool.kind()
+ || (targets.iter().count() == 1 && targets.iter().next().unwrap().0 == 0);
+ if use_bool_opt {
+ assert_eq!(targets.iter().count(), 1);
+ let (then_value, then_block) = targets.iter().next().unwrap();
+ let then_block = fx.get_block(then_block);
+ let else_block = fx.get_block(targets.otherwise());
+ let test_zero = match then_value {
+ 0 => true,
+ 1 => false,
+ _ => unreachable!("{:?}", targets),
+ };
+
+ let discr = crate::optimize::peephole::maybe_unwrap_bint(&mut fx.bcx, discr);
+ let (discr, is_inverted) =
+ crate::optimize::peephole::maybe_unwrap_bool_not(&mut fx.bcx, discr);
+ let test_zero = if is_inverted { !test_zero } else { test_zero };
+ let discr = crate::optimize::peephole::maybe_unwrap_bint(&mut fx.bcx, discr);
+ if let Some(taken) = crate::optimize::peephole::maybe_known_branch_taken(
+ &fx.bcx, discr, test_zero,
+ ) {
+ if taken {
+ fx.bcx.ins().jump(then_block, &[]);
+ } else {
+ fx.bcx.ins().jump(else_block, &[]);
+ }
+ } else {
+ if test_zero {
+ fx.bcx.ins().brz(discr, then_block, &[]);
+ fx.bcx.ins().jump(else_block, &[]);
+ } else {
+ fx.bcx.ins().brnz(discr, then_block, &[]);
+ fx.bcx.ins().jump(else_block, &[]);
+ }
+ }
+ } else {
+ let mut switch = ::cranelift_frontend::Switch::new();
+ for (value, block) in targets.iter() {
+ let block = fx.get_block(block);
+ switch.set_entry(value, block);
+ }
+ let otherwise_block = fx.get_block(targets.otherwise());
+ switch.emit(&mut fx.bcx, discr, otherwise_block);
+ }
+ }
+ TerminatorKind::Call {
+ func,
+ args,
+ destination,
+ fn_span,
+ cleanup: _,
+ from_hir_call: _,
+ } => {
+ fx.tcx.sess.time("codegen call", || {
+ crate::abi::codegen_terminator_call(fx, *fn_span, func, args, *destination)
+ });
+ }
+ TerminatorKind::InlineAsm {
+ template,
+ operands,
+ options,
+ destination,
+ line_spans: _,
+ cleanup: _,
+ } => {
+ if options.contains(InlineAsmOptions::MAY_UNWIND) {
+ fx.tcx.sess.span_fatal(
+ source_info.span,
+ "cranelift doesn't support unwinding from inline assembly.",
+ );
+ }
+
+ crate::inline_asm::codegen_inline_asm(
+ fx,
+ source_info.span,
+ template,
+ operands,
+ *options,
+ );
+
+ match *destination {
+ Some(destination) => {
+ let destination_block = fx.get_block(destination);
+ fx.bcx.ins().jump(destination_block, &[]);
+ }
+ None => {
+ crate::trap::trap_unreachable(
+ fx,
+ "[corruption] Returned from noreturn inline asm",
+ );
+ }
+ }
+ }
+ TerminatorKind::Resume | TerminatorKind::Abort => {
+ trap_unreachable(fx, "[corruption] Unwinding bb reached.");
+ }
+ TerminatorKind::Unreachable => {
+ trap_unreachable(fx, "[corruption] Hit unreachable code.");
+ }
+ TerminatorKind::Yield { .. }
+ | TerminatorKind::FalseEdge { .. }
+ | TerminatorKind::FalseUnwind { .. }
+ | TerminatorKind::DropAndReplace { .. }
+ | TerminatorKind::GeneratorDrop => {
+ bug!("shouldn't exist at codegen {:?}", bb_data.terminator());
+ }
+ TerminatorKind::Drop { place, target, unwind: _ } => {
+ let drop_place = codegen_place(fx, *place);
+ crate::abi::codegen_drop(fx, source_info.span, drop_place);
+
+ let target_block = fx.get_block(*target);
+ fx.bcx.ins().jump(target_block, &[]);
+ }
+ };
+ }
+
+ fx.bcx.seal_all_blocks();
+ fx.bcx.finalize();
+}
+
+fn codegen_stmt<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ #[allow(unused_variables)] cur_block: Block,
+ stmt: &Statement<'tcx>,
+) {
+ let _print_guard = crate::PrintOnPanic(|| format!("stmt {:?}", stmt));
+
+ fx.set_debug_loc(stmt.source_info);
+
+ #[cfg(disabled)]
+ match &stmt.kind {
+ StatementKind::StorageLive(..) | StatementKind::StorageDead(..) => {} // Those are not very useful
+ _ => {
+ if fx.clif_comments.enabled() {
+ let inst = fx.bcx.func.layout.last_inst(cur_block).unwrap();
+ fx.add_comment(inst, format!("{:?}", stmt));
+ }
+ }
+ }
+
+ match &stmt.kind {
+ StatementKind::SetDiscriminant { place, variant_index } => {
+ let place = codegen_place(fx, **place);
+ crate::discriminant::codegen_set_discriminant(fx, place, *variant_index);
+ }
+ StatementKind::Assign(to_place_and_rval) => {
+ let lval = codegen_place(fx, to_place_and_rval.0);
+ let dest_layout = lval.layout();
+ match to_place_and_rval.1 {
+ Rvalue::Use(ref operand) => {
+ let val = codegen_operand(fx, operand);
+ lval.write_cvalue(fx, val);
+ }
+ Rvalue::Ref(_, _, place) | Rvalue::AddressOf(_, place) => {
+ let place = codegen_place(fx, place);
+ let ref_ = place.place_ref(fx, lval.layout());
+ lval.write_cvalue(fx, ref_);
+ }
+ Rvalue::ThreadLocalRef(def_id) => {
+ let val = crate::constant::codegen_tls_ref(fx, def_id, lval.layout());
+ lval.write_cvalue(fx, val);
+ }
+ Rvalue::BinaryOp(bin_op, ref lhs_rhs) => {
+ let lhs = codegen_operand(fx, &lhs_rhs.0);
+ let rhs = codegen_operand(fx, &lhs_rhs.1);
+
+ let res = crate::num::codegen_binop(fx, bin_op, lhs, rhs);
+ lval.write_cvalue(fx, res);
+ }
+ Rvalue::CheckedBinaryOp(bin_op, ref lhs_rhs) => {
+ let lhs = codegen_operand(fx, &lhs_rhs.0);
+ let rhs = codegen_operand(fx, &lhs_rhs.1);
+
+ let res = if !fx.tcx.sess.overflow_checks() {
+ let val =
+ crate::num::codegen_int_binop(fx, bin_op, lhs, rhs).load_scalar(fx);
+ let is_overflow = fx.bcx.ins().iconst(types::I8, 0);
+ CValue::by_val_pair(val, is_overflow, lval.layout())
+ } else {
+ crate::num::codegen_checked_int_binop(fx, bin_op, lhs, rhs)
+ };
+
+ lval.write_cvalue(fx, res);
+ }
+ Rvalue::UnaryOp(un_op, ref operand) => {
+ let operand = codegen_operand(fx, operand);
+ let layout = operand.layout();
+ let val = operand.load_scalar(fx);
+ let res = match un_op {
+ UnOp::Not => match layout.ty.kind() {
+ ty::Bool => {
+ let res = fx.bcx.ins().icmp_imm(IntCC::Equal, val, 0);
+ CValue::by_val(fx.bcx.ins().bint(types::I8, res), layout)
+ }
+ ty::Uint(_) | ty::Int(_) => {
+ CValue::by_val(fx.bcx.ins().bnot(val), layout)
+ }
+ _ => unreachable!("un op Not for {:?}", layout.ty),
+ },
+ UnOp::Neg => match layout.ty.kind() {
+ ty::Int(IntTy::I128) => {
+ // FIXME remove this case once ineg.i128 works
+ let zero =
+ CValue::const_val(fx, layout, ty::ScalarInt::null(layout.size));
+ crate::num::codegen_int_binop(fx, BinOp::Sub, zero, operand)
+ }
+ ty::Int(_) => CValue::by_val(fx.bcx.ins().ineg(val), layout),
+ ty::Float(_) => CValue::by_val(fx.bcx.ins().fneg(val), layout),
+ _ => unreachable!("un op Neg for {:?}", layout.ty),
+ },
+ };
+ lval.write_cvalue(fx, res);
+ }
+ Rvalue::Cast(
+ CastKind::Pointer(PointerCast::ReifyFnPointer),
+ ref operand,
+ to_ty,
+ ) => {
+ let from_ty = fx.monomorphize(operand.ty(&fx.mir.local_decls, fx.tcx));
+ let to_layout = fx.layout_of(fx.monomorphize(to_ty));
+ match *from_ty.kind() {
+ ty::FnDef(def_id, substs) => {
+ let func_ref = fx.get_function_ref(
+ Instance::resolve_for_fn_ptr(
+ fx.tcx,
+ ParamEnv::reveal_all(),
+ def_id,
+ substs,
+ )
+ .unwrap()
+ .polymorphize(fx.tcx),
+ );
+ let func_addr = fx.bcx.ins().func_addr(fx.pointer_type, func_ref);
+ lval.write_cvalue(fx, CValue::by_val(func_addr, to_layout));
+ }
+ _ => bug!("Trying to ReifyFnPointer on non FnDef {:?}", from_ty),
+ }
+ }
+ Rvalue::Cast(
+ CastKind::Pointer(PointerCast::UnsafeFnPointer),
+ ref operand,
+ to_ty,
+ )
+ | Rvalue::Cast(
+ CastKind::Pointer(PointerCast::MutToConstPointer),
+ ref operand,
+ to_ty,
+ )
+ | Rvalue::Cast(
+ CastKind::Pointer(PointerCast::ArrayToPointer),
+ ref operand,
+ to_ty,
+ ) => {
+ let to_layout = fx.layout_of(fx.monomorphize(to_ty));
+ let operand = codegen_operand(fx, operand);
+ lval.write_cvalue(fx, operand.cast_pointer_to(to_layout));
+ }
+ Rvalue::Cast(CastKind::Misc, ref operand, to_ty) => {
+ let operand = codegen_operand(fx, operand);
+ let from_ty = operand.layout().ty;
+ let to_ty = fx.monomorphize(to_ty);
+
+ fn is_fat_ptr<'tcx>(fx: &FunctionCx<'_, '_, 'tcx>, ty: Ty<'tcx>) -> bool {
+ ty.builtin_deref(true)
+ .map(|ty::TypeAndMut { ty: pointee_ty, mutbl: _ }| {
+ has_ptr_meta(fx.tcx, pointee_ty)
+ })
+ .unwrap_or(false)
+ }
+
+ if is_fat_ptr(fx, from_ty) {
+ if is_fat_ptr(fx, to_ty) {
+ // fat-ptr -> fat-ptr
+ lval.write_cvalue(fx, operand.cast_pointer_to(dest_layout));
+ } else {
+ // fat-ptr -> thin-ptr
+ let (ptr, _extra) = operand.load_scalar_pair(fx);
+ lval.write_cvalue(fx, CValue::by_val(ptr, dest_layout))
+ }
+ } else if let ty::Adt(adt_def, _substs) = from_ty.kind() {
+ // enum -> discriminant value
+ assert!(adt_def.is_enum());
+ match to_ty.kind() {
+ ty::Uint(_) | ty::Int(_) => {}
+ _ => unreachable!("cast adt {} -> {}", from_ty, to_ty),
+ }
+ let to_clif_ty = fx.clif_type(to_ty).unwrap();
+
+ let discriminant = crate::discriminant::codegen_get_discriminant(
+ fx,
+ operand,
+ fx.layout_of(operand.layout().ty.discriminant_ty(fx.tcx)),
+ )
+ .load_scalar(fx);
+
+ let res = crate::cast::clif_intcast(
+ fx,
+ discriminant,
+ to_clif_ty,
+ to_ty.is_signed(),
+ );
+ lval.write_cvalue(fx, CValue::by_val(res, dest_layout));
+ } else {
+ let to_clif_ty = fx.clif_type(to_ty).unwrap();
+ let from = operand.load_scalar(fx);
+
+ let res = clif_int_or_float_cast(
+ fx,
+ from,
+ type_sign(from_ty),
+ to_clif_ty,
+ type_sign(to_ty),
+ );
+ lval.write_cvalue(fx, CValue::by_val(res, dest_layout));
+ }
+ }
+ Rvalue::Cast(
+ CastKind::Pointer(PointerCast::ClosureFnPointer(_)),
+ ref operand,
+ _to_ty,
+ ) => {
+ let operand = codegen_operand(fx, operand);
+ match *operand.layout().ty.kind() {
+ ty::Closure(def_id, substs) => {
+ let instance = Instance::resolve_closure(
+ fx.tcx,
+ def_id,
+ substs,
+ ty::ClosureKind::FnOnce,
+ )
+ .polymorphize(fx.tcx);
+ let func_ref = fx.get_function_ref(instance);
+ let func_addr = fx.bcx.ins().func_addr(fx.pointer_type, func_ref);
+ lval.write_cvalue(fx, CValue::by_val(func_addr, lval.layout()));
+ }
+ _ => bug!("{} cannot be cast to a fn ptr", operand.layout().ty),
+ }
+ }
+ Rvalue::Cast(CastKind::Pointer(PointerCast::Unsize), ref operand, _to_ty) => {
+ let operand = codegen_operand(fx, operand);
+ operand.unsize_value(fx, lval);
+ }
+ Rvalue::Discriminant(place) => {
+ let place = codegen_place(fx, place);
+ let value = place.to_cvalue(fx);
+ let discr =
+ crate::discriminant::codegen_get_discriminant(fx, value, dest_layout);
+ lval.write_cvalue(fx, discr);
+ }
+ Rvalue::Repeat(ref operand, times) => {
+ let operand = codegen_operand(fx, operand);
+ let times = fx
+ .monomorphize(times)
+ .eval(fx.tcx, ParamEnv::reveal_all())
+ .val()
+ .try_to_bits(fx.tcx.data_layout.pointer_size)
+ .unwrap();
+ if operand.layout().size.bytes() == 0 {
+ // Do nothing for ZST's
+ } else if fx.clif_type(operand.layout().ty) == Some(types::I8) {
+ let times = fx.bcx.ins().iconst(fx.pointer_type, times as i64);
+ // FIXME use emit_small_memset where possible
+ let addr = lval.to_ptr().get_addr(fx);
+ let val = operand.load_scalar(fx);
+ fx.bcx.call_memset(fx.target_config, addr, val, times);
+ } else {
+ let loop_block = fx.bcx.create_block();
+ let loop_block2 = fx.bcx.create_block();
+ let done_block = fx.bcx.create_block();
+ let index = fx.bcx.append_block_param(loop_block, fx.pointer_type);
+ let zero = fx.bcx.ins().iconst(fx.pointer_type, 0);
+ fx.bcx.ins().jump(loop_block, &[zero]);
+
+ fx.bcx.switch_to_block(loop_block);
+ let done = fx.bcx.ins().icmp_imm(IntCC::Equal, index, times as i64);
+ fx.bcx.ins().brnz(done, done_block, &[]);
+ fx.bcx.ins().jump(loop_block2, &[]);
+
+ fx.bcx.switch_to_block(loop_block2);
+ let to = lval.place_index(fx, index);
+ to.write_cvalue(fx, operand);
+ let index = fx.bcx.ins().iadd_imm(index, 1);
+ fx.bcx.ins().jump(loop_block, &[index]);
+
+ fx.bcx.switch_to_block(done_block);
+ fx.bcx.ins().nop();
+ }
+ }
+ Rvalue::Len(place) => {
+ let place = codegen_place(fx, place);
+ let usize_layout = fx.layout_of(fx.tcx.types.usize);
+ let len = codegen_array_len(fx, place);
+ lval.write_cvalue(fx, CValue::by_val(len, usize_layout));
+ }
+ Rvalue::ShallowInitBox(ref operand, content_ty) => {
+ let content_ty = fx.monomorphize(content_ty);
+ let box_layout = fx.layout_of(fx.tcx.mk_box(content_ty));
+ let operand = codegen_operand(fx, operand);
+ let operand = operand.load_scalar(fx);
+ lval.write_cvalue(fx, CValue::by_val(operand, box_layout));
+ }
+ Rvalue::NullaryOp(null_op, ty) => {
+ assert!(
+ lval.layout()
+ .ty
+ .is_sized(fx.tcx.at(stmt.source_info.span), ParamEnv::reveal_all())
+ );
+ let layout = fx.layout_of(fx.monomorphize(ty));
+ let val = match null_op {
+ NullOp::SizeOf => layout.size.bytes(),
+ NullOp::AlignOf => layout.align.abi.bytes(),
+ };
+ let val = CValue::const_val(fx, fx.layout_of(fx.tcx.types.usize), val.into());
+ lval.write_cvalue(fx, val);
+ }
+ Rvalue::Aggregate(ref kind, ref operands) => match kind.as_ref() {
+ AggregateKind::Array(_ty) => {
+ for (i, operand) in operands.iter().enumerate() {
+ let operand = codegen_operand(fx, operand);
+ let index = fx.bcx.ins().iconst(fx.pointer_type, i as i64);
+ let to = lval.place_index(fx, index);
+ to.write_cvalue(fx, operand);
+ }
+ }
+ _ => unreachable!("shouldn't exist at codegen {:?}", to_place_and_rval.1),
+ },
+ }
+ }
+ StatementKind::StorageLive(_)
+ | StatementKind::StorageDead(_)
+ | StatementKind::Nop
+ | StatementKind::FakeRead(..)
+ | StatementKind::Retag { .. }
+ | StatementKind::AscribeUserType(..) => {}
+
+ StatementKind::Coverage { .. } => fx.tcx.sess.fatal("-Zcoverage is unimplemented"),
+ StatementKind::CopyNonOverlapping(inner) => {
+ let dst = codegen_operand(fx, &inner.dst);
+ let pointee = dst
+ .layout()
+ .pointee_info_at(fx, rustc_target::abi::Size::ZERO)
+ .expect("Expected pointer");
+ let dst = dst.load_scalar(fx);
+ let src = codegen_operand(fx, &inner.src).load_scalar(fx);
+ let count = codegen_operand(fx, &inner.count).load_scalar(fx);
+ let elem_size: u64 = pointee.size.bytes();
+ let bytes =
+ if elem_size != 1 { fx.bcx.ins().imul_imm(count, elem_size as i64) } else { count };
+ fx.bcx.call_memcpy(fx.target_config, dst, src, bytes);
+ }
+ }
+}
+
+fn codegen_array_len<'tcx>(fx: &mut FunctionCx<'_, '_, 'tcx>, place: CPlace<'tcx>) -> Value {
+ match *place.layout().ty.kind() {
+ ty::Array(_elem_ty, len) => {
+ let len = fx.monomorphize(len).eval_usize(fx.tcx, ParamEnv::reveal_all()) as i64;
+ fx.bcx.ins().iconst(fx.pointer_type, len)
+ }
+ ty::Slice(_elem_ty) => {
+ place.to_ptr_maybe_unsized().1.expect("Length metadata for slice place")
+ }
+ _ => bug!("Rvalue::Len({:?})", place),
+ }
+}
+
+pub(crate) fn codegen_place<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ place: Place<'tcx>,
+) -> CPlace<'tcx> {
+ let mut cplace = fx.get_local_place(place.local);
+
+ for elem in place.projection {
+ match elem {
+ PlaceElem::Deref => {
+ cplace = cplace.place_deref(fx);
+ }
+ PlaceElem::Field(field, _ty) => {
+ cplace = cplace.place_field(fx, field);
+ }
+ PlaceElem::Index(local) => {
+ let index = fx.get_local_place(local).to_cvalue(fx).load_scalar(fx);
+ cplace = cplace.place_index(fx, index);
+ }
+ PlaceElem::ConstantIndex { offset, min_length: _, from_end } => {
+ let offset: u64 = offset;
+ let index = if !from_end {
+ fx.bcx.ins().iconst(fx.pointer_type, offset as i64)
+ } else {
+ let len = codegen_array_len(fx, cplace);
+ fx.bcx.ins().iadd_imm(len, -(offset as i64))
+ };
+ cplace = cplace.place_index(fx, index);
+ }
+ PlaceElem::Subslice { from, to, from_end } => {
+ // These indices are generated by slice patterns.
+ // slice[from:-to] in Python terms.
+
+ let from: u64 = from;
+ let to: u64 = to;
+
+ match cplace.layout().ty.kind() {
+ ty::Array(elem_ty, _len) => {
+ assert!(!from_end, "array subslices are never `from_end`");
+ let elem_layout = fx.layout_of(*elem_ty);
+ let ptr = cplace.to_ptr();
+ cplace = CPlace::for_ptr(
+ ptr.offset_i64(fx, elem_layout.size.bytes() as i64 * (from as i64)),
+ fx.layout_of(fx.tcx.mk_array(*elem_ty, to - from)),
+ );
+ }
+ ty::Slice(elem_ty) => {
+ assert!(from_end, "slice subslices should be `from_end`");
+ let elem_layout = fx.layout_of(*elem_ty);
+ let (ptr, len) = cplace.to_ptr_maybe_unsized();
+ let len = len.unwrap();
+ cplace = CPlace::for_ptr_with_extra(
+ ptr.offset_i64(fx, elem_layout.size.bytes() as i64 * (from as i64)),
+ fx.bcx.ins().iadd_imm(len, -(from as i64 + to as i64)),
+ cplace.layout(),
+ );
+ }
+ _ => unreachable!(),
+ }
+ }
+ PlaceElem::Downcast(_adt_def, variant) => {
+ cplace = cplace.downcast_variant(fx, variant);
+ }
+ }
+ }
+
+ cplace
+}
+
+pub(crate) fn codegen_operand<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ operand: &Operand<'tcx>,
+) -> CValue<'tcx> {
+ match operand {
+ Operand::Move(place) | Operand::Copy(place) => {
+ let cplace = codegen_place(fx, *place);
+ cplace.to_cvalue(fx)
+ }
+ Operand::Constant(const_) => crate::constant::codegen_constant(fx, const_),
+ }
+}
+
+pub(crate) fn codegen_panic<'tcx>(fx: &mut FunctionCx<'_, '_, 'tcx>, msg_str: &str, span: Span) {
+ let location = fx.get_caller_location(span).load_scalar(fx);
+
+ let msg_ptr = fx.anonymous_str(msg_str);
+ let msg_len = fx.bcx.ins().iconst(fx.pointer_type, i64::try_from(msg_str.len()).unwrap());
+ let args = [msg_ptr, msg_len, location];
+
+ codegen_panic_inner(fx, rustc_hir::LangItem::Panic, &args, span);
+}
+
+pub(crate) fn codegen_panic_inner<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ lang_item: rustc_hir::LangItem,
+ args: &[Value],
+ span: Span,
+) {
+ let def_id =
+ fx.tcx.lang_items().require(lang_item).unwrap_or_else(|s| fx.tcx.sess.span_fatal(span, &s));
+
+ let instance = Instance::mono(fx.tcx, def_id).polymorphize(fx.tcx);
+ let symbol_name = fx.tcx.symbol_name(instance).name;
+
+ fx.lib_call(
+ &*symbol_name,
+ vec![
+ AbiParam::new(fx.pointer_type),
+ AbiParam::new(fx.pointer_type),
+ AbiParam::new(fx.pointer_type),
+ ],
+ vec![],
+ args,
+ );
+
+ crate::trap::trap_unreachable(fx, "panic lang item returned");
+}
--- /dev/null
+//! The AOT driver uses [`cranelift_object`] to write object files suitable for linking into a
+//! standalone executable.
+
+use std::path::PathBuf;
+
+use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
+use rustc_codegen_ssa::back::metadata::create_compressed_metadata_file;
+use rustc_codegen_ssa::{CodegenResults, CompiledModule, CrateInfo, ModuleKind};
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_metadata::EncodedMetadata;
+use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
+use rustc_middle::mir::mono::{CodegenUnit, MonoItem};
+use rustc_session::cgu_reuse_tracker::CguReuse;
+use rustc_session::config::{DebugInfo, OutputType};
+use rustc_session::Session;
+
+use cranelift_codegen::isa::TargetIsa;
+use cranelift_object::{ObjectBuilder, ObjectModule};
+
+use crate::{prelude::*, BackendConfig};
+
+struct ModuleCodegenResult(CompiledModule, Option<(WorkProductId, WorkProduct)>);
+
+impl<HCX> HashStable<HCX> for ModuleCodegenResult {
+ fn hash_stable(&self, _: &mut HCX, _: &mut StableHasher) {
+ // do nothing
+ }
+}
+
+fn make_module(sess: &Session, isa: Box<dyn TargetIsa>, name: String) -> ObjectModule {
+ let mut builder =
+ ObjectBuilder::new(isa, name + ".o", cranelift_module::default_libcall_names()).unwrap();
+ // Unlike cg_llvm, cg_clif defaults to disabling -Zfunction-sections. For cg_llvm binary size
+ // is important, while cg_clif cares more about compilation times. Enabling -Zfunction-sections
+ // can easily double the amount of time necessary to perform linking.
+ builder.per_function_section(sess.opts.debugging_opts.function_sections.unwrap_or(false));
+ ObjectModule::new(builder)
+}
+
+fn emit_module(
+ tcx: TyCtxt<'_>,
+ backend_config: &BackendConfig,
+ name: String,
+ kind: ModuleKind,
+ module: ObjectModule,
+ debug: Option<DebugContext<'_>>,
+ unwind_context: UnwindContext,
+) -> ModuleCodegenResult {
+ let mut product = module.finish();
+
+ if let Some(mut debug) = debug {
+ debug.emit(&mut product);
+ }
+
+ unwind_context.emit(&mut product);
+
+ let tmp_file = tcx.output_filenames(()).temp_path(OutputType::Object, Some(&name));
+ let obj = product.object.write().unwrap();
++
++ tcx.sess.prof.artifact_size("object_file", name.clone(), obj.len().try_into().unwrap());
++
+ if let Err(err) = std::fs::write(&tmp_file, obj) {
+ tcx.sess.fatal(&format!("error writing object file: {}", err));
+ }
+
+ let work_product = if backend_config.disable_incr_cache {
+ None
+ } else {
+ rustc_incremental::copy_cgu_workproduct_to_incr_comp_cache_dir(
+ tcx.sess,
+ &name,
+ &Some(tmp_file.clone()),
+ )
+ };
+
+ ModuleCodegenResult(
+ CompiledModule { name, kind, object: Some(tmp_file), dwarf_object: None, bytecode: None },
+ work_product,
+ )
+}
+
+fn reuse_workproduct_for_cgu(
+ tcx: TyCtxt<'_>,
+ cgu: &CodegenUnit<'_>,
+ work_products: &mut FxHashMap<WorkProductId, WorkProduct>,
+) -> CompiledModule {
+ let mut object = None;
+ let work_product = cgu.work_product(tcx);
+ if let Some(saved_file) = &work_product.saved_file {
+ let obj_out =
+ tcx.output_filenames(()).temp_path(OutputType::Object, Some(cgu.name().as_str()));
+ object = Some(obj_out.clone());
+ let source_file = rustc_incremental::in_incr_comp_dir_sess(&tcx.sess, &saved_file);
+ if let Err(err) = rustc_fs_util::link_or_copy(&source_file, &obj_out) {
+ tcx.sess.err(&format!(
+ "unable to copy {} to {}: {}",
+ source_file.display(),
+ obj_out.display(),
+ err
+ ));
+ }
+ }
+
+ work_products.insert(cgu.work_product_id(), work_product);
+
+ CompiledModule {
+ name: cgu.name().to_string(),
+ kind: ModuleKind::Regular,
+ object,
+ dwarf_object: None,
+ bytecode: None,
+ }
+}
+
+fn module_codegen(
+ tcx: TyCtxt<'_>,
+ (backend_config, cgu_name): (BackendConfig, rustc_span::Symbol),
+) -> ModuleCodegenResult {
+ let cgu = tcx.codegen_unit(cgu_name);
+ let mono_items = cgu.items_in_deterministic_order(tcx);
+
+ let isa = crate::build_isa(tcx.sess, &backend_config);
+ let mut module = make_module(tcx.sess, isa, cgu_name.as_str().to_string());
+
+ let mut cx = crate::CodegenCx::new(
+ tcx,
+ backend_config.clone(),
+ module.isa(),
+ tcx.sess.opts.debuginfo != DebugInfo::None,
+ cgu_name,
+ );
+ super::predefine_mono_items(tcx, &mut module, &mono_items);
+ for (mono_item, _) in mono_items {
+ match mono_item {
+ MonoItem::Fn(inst) => {
+ cx.tcx
+ .sess
+ .time("codegen fn", || crate::base::codegen_fn(&mut cx, &mut module, inst));
+ }
+ MonoItem::Static(def_id) => crate::constant::codegen_static(tcx, &mut module, def_id),
+ MonoItem::GlobalAsm(item_id) => {
+ let item = cx.tcx.hir().item(item_id);
+ if let rustc_hir::ItemKind::GlobalAsm(asm) = item.kind {
+ if !asm.options.contains(InlineAsmOptions::ATT_SYNTAX) {
+ cx.global_asm.push_str("\n.intel_syntax noprefix\n");
+ } else {
+ cx.global_asm.push_str("\n.att_syntax\n");
+ }
+ for piece in asm.template {
+ match *piece {
+ InlineAsmTemplatePiece::String(ref s) => cx.global_asm.push_str(s),
+ InlineAsmTemplatePiece::Placeholder { .. } => todo!(),
+ }
+ }
+ cx.global_asm.push_str("\n.att_syntax\n\n");
+ } else {
+ bug!("Expected GlobalAsm found {:?}", item);
+ }
+ }
+ }
+ }
+ crate::main_shim::maybe_create_entry_wrapper(
+ tcx,
+ &mut module,
+ &mut cx.unwind_context,
+ false,
+ cgu.is_primary(),
+ );
+
+ let debug_context = cx.debug_context;
+ let unwind_context = cx.unwind_context;
+ let codegen_result = tcx.sess.time("write object file", || {
+ emit_module(
+ tcx,
+ &backend_config,
+ cgu.name().as_str().to_string(),
+ ModuleKind::Regular,
+ module,
+ debug_context,
+ unwind_context,
+ )
+ });
+
+ codegen_global_asm(tcx, cgu.name().as_str(), &cx.global_asm);
+
+ codegen_result
+}
+
+pub(crate) fn run_aot(
+ tcx: TyCtxt<'_>,
+ backend_config: BackendConfig,
+ metadata: EncodedMetadata,
+ need_metadata_module: bool,
+) -> Box<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>)> {
+ let mut work_products = FxHashMap::default();
+
+ let cgus = if tcx.sess.opts.output_types.should_codegen() {
+ tcx.collect_and_partition_mono_items(()).1
+ } else {
+ // If only `--emit metadata` is used, we shouldn't perform any codegen.
+ // Also `tcx.collect_and_partition_mono_items` may panic in that case.
+ &[]
+ };
+
+ if tcx.dep_graph.is_fully_enabled() {
+ for cgu in &*cgus {
+ tcx.ensure().codegen_unit(cgu.name());
+ }
+ }
+
+ let modules = super::time(tcx, backend_config.display_cg_time, "codegen mono items", || {
+ cgus.iter()
+ .map(|cgu| {
+ let cgu_reuse = determine_cgu_reuse(tcx, cgu);
+ tcx.sess.cgu_reuse_tracker.set_actual_reuse(cgu.name().as_str(), cgu_reuse);
+
+ match cgu_reuse {
+ _ if backend_config.disable_incr_cache => {}
+ CguReuse::No => {}
+ CguReuse::PreLto => {
+ return reuse_workproduct_for_cgu(tcx, &*cgu, &mut work_products);
+ }
+ CguReuse::PostLto => unreachable!(),
+ }
+
+ let dep_node = cgu.codegen_dep_node(tcx);
+ let (ModuleCodegenResult(module, work_product), _) = tcx.dep_graph.with_task(
+ dep_node,
+ tcx,
+ (backend_config.clone(), cgu.name()),
+ module_codegen,
+ Some(rustc_middle::dep_graph::hash_result),
+ );
+
+ if let Some((id, product)) = work_product {
+ work_products.insert(id, product);
+ }
+
+ module
+ })
+ .collect::<Vec<_>>()
+ });
+
+ tcx.sess.abort_if_errors();
+
+ let isa = crate::build_isa(tcx.sess, &backend_config);
+ let mut allocator_module = make_module(tcx.sess, isa, "allocator_shim".to_string());
+ assert_eq!(pointer_ty(tcx), allocator_module.target_config().pointer_type());
+ let mut allocator_unwind_context = UnwindContext::new(allocator_module.isa(), true);
+ let created_alloc_shim =
+ crate::allocator::codegen(tcx, &mut allocator_module, &mut allocator_unwind_context);
+
+ let allocator_module = if created_alloc_shim {
+ let ModuleCodegenResult(module, work_product) = emit_module(
+ tcx,
+ &backend_config,
+ "allocator_shim".to_string(),
+ ModuleKind::Allocator,
+ allocator_module,
+ None,
+ allocator_unwind_context,
+ );
+ if let Some((id, product)) = work_product {
+ work_products.insert(id, product);
+ }
+ Some(module)
+ } else {
+ None
+ };
+
+ let metadata_module = if need_metadata_module {
+ let _timer = tcx.prof.generic_activity("codegen crate metadata");
+ let (metadata_cgu_name, tmp_file) = tcx.sess.time("write compressed metadata", || {
+ use rustc_middle::mir::mono::CodegenUnitNameBuilder;
+
+ let cgu_name_builder = &mut CodegenUnitNameBuilder::new(tcx);
+ let metadata_cgu_name = cgu_name_builder
+ .build_cgu_name(LOCAL_CRATE, &["crate"], Some("metadata"))
+ .as_str()
+ .to_string();
+
+ let tmp_file =
+ tcx.output_filenames(()).temp_path(OutputType::Metadata, Some(&metadata_cgu_name));
+
+ let symbol_name = rustc_middle::middle::exported_symbols::metadata_symbol_name(tcx);
+ let obj = create_compressed_metadata_file(tcx.sess, &metadata, &symbol_name);
+
+ if let Err(err) = std::fs::write(&tmp_file, obj) {
+ tcx.sess.fatal(&format!("error writing metadata object file: {}", err));
+ }
+
+ (metadata_cgu_name, tmp_file)
+ });
+
+ Some(CompiledModule {
+ name: metadata_cgu_name,
+ kind: ModuleKind::Metadata,
+ object: Some(tmp_file),
+ dwarf_object: None,
+ bytecode: None,
+ })
+ } else {
+ None
+ };
+
+ // FIXME handle `-Ctarget-cpu=native`
+ let target_cpu =
+ tcx.sess.opts.cg.target_cpu.as_ref().unwrap_or(&tcx.sess.target.cpu).to_owned();
+ Box::new((
+ CodegenResults {
+ modules,
+ allocator_module,
+ metadata_module,
+ metadata,
+ crate_info: CrateInfo::new(tcx, target_cpu),
+ },
+ work_products,
+ ))
+}
+
+fn codegen_global_asm(tcx: TyCtxt<'_>, cgu_name: &str, global_asm: &str) {
+ use std::io::Write;
+ use std::process::{Command, Stdio};
+
+ if global_asm.is_empty() {
+ return;
+ }
+
+ if cfg!(not(feature = "inline_asm"))
+ || tcx.sess.target.is_like_osx
+ || tcx.sess.target.is_like_windows
+ {
+ if global_asm.contains("__rust_probestack") {
+ return;
+ }
+
+ // FIXME fix linker error on macOS
+ if cfg!(not(feature = "inline_asm")) {
+ tcx.sess.fatal(
+ "asm! and global_asm! support is disabled while compiling rustc_codegen_cranelift",
+ );
+ } else {
+ tcx.sess.fatal("asm! and global_asm! are not yet supported on macOS and Windows");
+ }
+ }
+
+ let assembler = crate::toolchain::get_toolchain_binary(tcx.sess, "as");
+ let linker = crate::toolchain::get_toolchain_binary(tcx.sess, "ld");
+
+ // Remove all LLVM style comments
+ let global_asm = global_asm
+ .lines()
+ .map(|line| if let Some(index) = line.find("//") { &line[0..index] } else { line })
+ .collect::<Vec<_>>()
+ .join("\n");
+
+ let output_object_file = tcx.output_filenames(()).temp_path(OutputType::Object, Some(cgu_name));
+
+ // Assemble `global_asm`
+ let global_asm_object_file = add_file_stem_postfix(output_object_file.clone(), ".asm");
+ let mut child = Command::new(assembler)
+ .arg("-o")
+ .arg(&global_asm_object_file)
+ .stdin(Stdio::piped())
+ .spawn()
+ .expect("Failed to spawn `as`.");
+ child.stdin.take().unwrap().write_all(global_asm.as_bytes()).unwrap();
+ let status = child.wait().expect("Failed to wait for `as`.");
+ if !status.success() {
+ tcx.sess.fatal(&format!("Failed to assemble `{}`", global_asm));
+ }
+
+ // Link the global asm and main object file together
+ let main_object_file = add_file_stem_postfix(output_object_file.clone(), ".main");
+ std::fs::rename(&output_object_file, &main_object_file).unwrap();
+ let status = Command::new(linker)
+ .arg("-r") // Create a new object file
+ .arg("-o")
+ .arg(output_object_file)
+ .arg(&main_object_file)
+ .arg(&global_asm_object_file)
+ .status()
+ .unwrap();
+ if !status.success() {
+ tcx.sess.fatal(&format!(
+ "Failed to link `{}` and `{}` together",
+ main_object_file.display(),
+ global_asm_object_file.display(),
+ ));
+ }
+
+ std::fs::remove_file(global_asm_object_file).unwrap();
+ std::fs::remove_file(main_object_file).unwrap();
+}
+
+fn add_file_stem_postfix(mut path: PathBuf, postfix: &str) -> PathBuf {
+ let mut new_filename = path.file_stem().unwrap().to_owned();
+ new_filename.push(postfix);
+ if let Some(extension) = path.extension() {
+ new_filename.push(".");
+ new_filename.push(extension);
+ }
+ path.set_file_name(new_filename);
+ path
+}
+
+// Adapted from https://github.com/rust-lang/rust/blob/303d8aff6092709edd4dbd35b1c88e9aa40bf6d8/src/librustc_codegen_ssa/base.rs#L922-L953
+fn determine_cgu_reuse<'tcx>(tcx: TyCtxt<'tcx>, cgu: &CodegenUnit<'tcx>) -> CguReuse {
+ if !tcx.dep_graph.is_fully_enabled() {
+ return CguReuse::No;
+ }
+
+ let work_product_id = &cgu.work_product_id();
+ if tcx.dep_graph.previous_work_product(work_product_id).is_none() {
+ // We don't have anything cached for this CGU. This can happen
+ // if the CGU did not exist in the previous session.
+ return CguReuse::No;
+ }
+
+ // Try to mark the CGU as green. If it we can do so, it means that nothing
+ // affecting the LLVM module has changed and we can re-use a cached version.
+ // If we compile with any kind of LTO, this means we can re-use the bitcode
+ // of the Pre-LTO stage (possibly also the Post-LTO version but we'll only
+ // know that later). If we are not doing LTO, there is only one optimized
+ // version of each module, so we re-use that.
+ let dep_node = cgu.codegen_dep_node(tcx);
+ assert!(
+ !tcx.dep_graph.dep_node_exists(&dep_node),
+ "CompileCodegenUnit dep-node for CGU `{}` already exists before marking.",
+ cgu.name()
+ );
+
+ if tcx.try_mark_green(&dep_node) { CguReuse::PreLto } else { CguReuse::No }
+}
--- /dev/null
- use cranelift_codegen::binemit::{NullStackMapSink, NullTrapSink};
+//! The JIT driver uses [`cranelift_jit`] to JIT execute programs without writing any object
+//! files.
+
+use std::cell::RefCell;
+use std::ffi::CString;
+use std::lazy::SyncOnceCell;
+use std::os::raw::{c_char, c_int};
+use std::sync::{mpsc, Mutex};
+
- module
- .define_function(
- func_id,
- &mut cx.cached_context,
- &mut NullTrapSink {},
- &mut NullStackMapSink {},
- )
- .unwrap();
+use rustc_codegen_ssa::CrateInfo;
+use rustc_middle::mir::mono::MonoItem;
+use rustc_session::Session;
+use rustc_span::Symbol;
+
+use cranelift_jit::{JITBuilder, JITModule};
+
+use crate::{prelude::*, BackendConfig};
+use crate::{CodegenCx, CodegenMode};
+
+struct JitState {
+ backend_config: BackendConfig,
+ jit_module: JITModule,
+}
+
+thread_local! {
+ static LAZY_JIT_STATE: RefCell<Option<JitState>> = const { RefCell::new(None) };
+}
+
+/// The Sender owned by the rustc thread
+static GLOBAL_MESSAGE_SENDER: SyncOnceCell<Mutex<mpsc::Sender<UnsafeMessage>>> =
+ SyncOnceCell::new();
+
+/// A message that is sent from the jitted runtime to the rustc thread.
+/// Senders are responsible for upholding `Send` semantics.
+enum UnsafeMessage {
+ /// Request that the specified `Instance` be lazily jitted.
+ ///
+ /// Nothing accessible through `instance_ptr` may be moved or mutated by the sender after
+ /// this message is sent.
+ JitFn {
+ instance_ptr: *const Instance<'static>,
+ trampoline_ptr: *const u8,
+ tx: mpsc::Sender<*const u8>,
+ },
+}
+unsafe impl Send for UnsafeMessage {}
+
+impl UnsafeMessage {
+ /// Send the message.
+ fn send(self) -> Result<(), mpsc::SendError<UnsafeMessage>> {
+ thread_local! {
+ /// The Sender owned by the local thread
+ static LOCAL_MESSAGE_SENDER: mpsc::Sender<UnsafeMessage> =
+ GLOBAL_MESSAGE_SENDER
+ .get().unwrap()
+ .lock().unwrap()
+ .clone();
+ }
+ LOCAL_MESSAGE_SENDER.with(|sender| sender.send(self))
+ }
+}
+
+fn create_jit_module<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ backend_config: &BackendConfig,
+ hotswap: bool,
+) -> (JITModule, CodegenCx<'tcx>) {
+ let crate_info = CrateInfo::new(tcx, "dummy_target_cpu".to_string());
+ let imported_symbols = load_imported_symbols_for_jit(tcx.sess, crate_info);
+
+ let isa = crate::build_isa(tcx.sess, backend_config);
+ let mut jit_builder = JITBuilder::with_isa(isa, cranelift_module::default_libcall_names());
+ jit_builder.hotswap(hotswap);
+ crate::compiler_builtins::register_functions_for_jit(&mut jit_builder);
+ jit_builder.symbols(imported_symbols);
+ let mut jit_module = JITModule::new(jit_builder);
+
+ let mut cx = crate::CodegenCx::new(
+ tcx,
+ backend_config.clone(),
+ jit_module.isa(),
+ false,
+ Symbol::intern("dummy_cgu_name"),
+ );
+
+ crate::allocator::codegen(tcx, &mut jit_module, &mut cx.unwind_context);
+ crate::main_shim::maybe_create_entry_wrapper(
+ tcx,
+ &mut jit_module,
+ &mut cx.unwind_context,
+ true,
+ true,
+ );
+
+ (jit_module, cx)
+}
+
+pub(crate) fn run_jit(tcx: TyCtxt<'_>, backend_config: BackendConfig) -> ! {
+ if !tcx.sess.opts.output_types.should_codegen() {
+ tcx.sess.fatal("JIT mode doesn't work with `cargo check`");
+ }
+
+ if !tcx.sess.crate_types().contains(&rustc_session::config::CrateType::Executable) {
+ tcx.sess.fatal("can't jit non-executable crate");
+ }
+
+ let (mut jit_module, mut cx) = create_jit_module(
+ tcx,
+ &backend_config,
+ matches!(backend_config.codegen_mode, CodegenMode::JitLazy),
+ );
+
+ let (_, cgus) = tcx.collect_and_partition_mono_items(());
+ let mono_items = cgus
+ .iter()
+ .map(|cgu| cgu.items_in_deterministic_order(tcx).into_iter())
+ .flatten()
+ .collect::<FxHashMap<_, (_, _)>>()
+ .into_iter()
+ .collect::<Vec<(_, (_, _))>>();
+
+ super::time(tcx, backend_config.display_cg_time, "codegen mono items", || {
+ super::predefine_mono_items(tcx, &mut jit_module, &mono_items);
+ for (mono_item, _) in mono_items {
+ match mono_item {
+ MonoItem::Fn(inst) => match backend_config.codegen_mode {
+ CodegenMode::Aot => unreachable!(),
+ CodegenMode::Jit => {
+ cx.tcx.sess.time("codegen fn", || {
+ crate::base::codegen_fn(&mut cx, &mut jit_module, inst)
+ });
+ }
+ CodegenMode::JitLazy => codegen_shim(&mut cx, &mut jit_module, inst),
+ },
+ MonoItem::Static(def_id) => {
+ crate::constant::codegen_static(tcx, &mut jit_module, def_id);
+ }
+ MonoItem::GlobalAsm(item_id) => {
+ let item = tcx.hir().item(item_id);
+ tcx.sess.span_fatal(item.span, "Global asm is not supported in JIT mode");
+ }
+ }
+ }
+ });
+
+ if !cx.global_asm.is_empty() {
+ tcx.sess.fatal("Inline asm is not supported in JIT mode");
+ }
+
+ tcx.sess.abort_if_errors();
+
+ jit_module.finalize_definitions();
+ unsafe { cx.unwind_context.register_jit(&jit_module) };
+
+ println!(
+ "Rustc codegen cranelift will JIT run the executable, because -Cllvm-args=mode=jit was passed"
+ );
+
+ let args = std::iter::once(&*tcx.crate_name(LOCAL_CRATE).as_str().to_string())
+ .chain(backend_config.jit_args.iter().map(|arg| &**arg))
+ .map(|arg| CString::new(arg).unwrap())
+ .collect::<Vec<_>>();
+
+ let start_sig = Signature {
+ params: vec![
+ AbiParam::new(jit_module.target_config().pointer_type()),
+ AbiParam::new(jit_module.target_config().pointer_type()),
+ ],
+ returns: vec![AbiParam::new(jit_module.target_config().pointer_type() /*isize*/)],
+ call_conv: jit_module.target_config().default_call_conv,
+ };
+ let start_func_id = jit_module.declare_function("main", Linkage::Import, &start_sig).unwrap();
+ let finalized_start: *const u8 = jit_module.get_finalized_function(start_func_id);
+
+ LAZY_JIT_STATE.with(|lazy_jit_state| {
+ let mut lazy_jit_state = lazy_jit_state.borrow_mut();
+ assert!(lazy_jit_state.is_none());
+ *lazy_jit_state = Some(JitState { backend_config, jit_module });
+ });
+
+ let f: extern "C" fn(c_int, *const *const c_char) -> c_int =
+ unsafe { ::std::mem::transmute(finalized_start) };
+
+ let (tx, rx) = mpsc::channel();
+ GLOBAL_MESSAGE_SENDER.set(Mutex::new(tx)).unwrap();
+
+ // Spawn the jitted runtime in a new thread so that this rustc thread can handle messages
+ // (eg to lazily JIT further functions as required)
+ std::thread::spawn(move || {
+ let mut argv = args.iter().map(|arg| arg.as_ptr()).collect::<Vec<_>>();
+
+ // Push a null pointer as a terminating argument. This is required by POSIX and
+ // useful as some dynamic linkers use it as a marker to jump over.
+ argv.push(std::ptr::null());
+
+ let ret = f(args.len() as c_int, argv.as_ptr());
+ std::process::exit(ret);
+ });
+
+ // Handle messages
+ loop {
+ match rx.recv().unwrap() {
+ // lazy JIT compilation request - compile requested instance and return pointer to result
+ UnsafeMessage::JitFn { instance_ptr, trampoline_ptr, tx } => {
+ tx.send(jit_fn(instance_ptr, trampoline_ptr))
+ .expect("jitted runtime hung up before response to lazy JIT request was sent");
+ }
+ }
+ }
+}
+
+#[no_mangle]
+extern "C" fn __clif_jit_fn(
+ instance_ptr: *const Instance<'static>,
+ trampoline_ptr: *const u8,
+) -> *const u8 {
+ // send the JIT request to the rustc thread, with a channel for the response
+ let (tx, rx) = mpsc::channel();
+ UnsafeMessage::JitFn { instance_ptr, trampoline_ptr, tx }
+ .send()
+ .expect("rustc thread hung up before lazy JIT request was sent");
+
+ // block on JIT compilation result
+ rx.recv().expect("rustc thread hung up before responding to sent lazy JIT request")
+}
+
+fn jit_fn(instance_ptr: *const Instance<'static>, trampoline_ptr: *const u8) -> *const u8 {
+ rustc_middle::ty::tls::with(|tcx| {
+ // lift is used to ensure the correct lifetime for instance.
+ let instance = tcx.lift(unsafe { *instance_ptr }).unwrap();
+
+ LAZY_JIT_STATE.with(|lazy_jit_state| {
+ let mut lazy_jit_state = lazy_jit_state.borrow_mut();
+ let lazy_jit_state = lazy_jit_state.as_mut().unwrap();
+ let jit_module = &mut lazy_jit_state.jit_module;
+ let backend_config = lazy_jit_state.backend_config.clone();
+
+ let name = tcx.symbol_name(instance).name;
+ let sig = crate::abi::get_function_sig(tcx, jit_module.isa().triple(), instance);
+ let func_id = jit_module.declare_function(name, Linkage::Export, &sig).unwrap();
+
+ let current_ptr = jit_module.read_got_entry(func_id);
+
+ // If the function's GOT entry has already been updated to point at something other
+ // than the shim trampoline, don't re-jit but just return the new pointer instead.
+ // This does not need synchronization as this code is executed only by a sole rustc
+ // thread.
+ if current_ptr != trampoline_ptr {
+ return current_ptr;
+ }
+
+ jit_module.prepare_for_function_redefine(func_id).unwrap();
+
+ let mut cx = crate::CodegenCx::new(
+ tcx,
+ backend_config,
+ jit_module.isa(),
+ false,
+ Symbol::intern("dummy_cgu_name"),
+ );
+ tcx.sess.time("codegen fn", || crate::base::codegen_fn(&mut cx, jit_module, instance));
+
+ assert!(cx.global_asm.is_empty());
+ jit_module.finalize_definitions();
+ unsafe { cx.unwind_context.register_jit(&jit_module) };
+ jit_module.get_finalized_function(func_id)
+ })
+ })
+}
+
+fn load_imported_symbols_for_jit(
+ sess: &Session,
+ crate_info: CrateInfo,
+) -> Vec<(String, *const u8)> {
+ use rustc_middle::middle::dependency_format::Linkage;
+
+ let mut dylib_paths = Vec::new();
+
+ let data = &crate_info
+ .dependency_formats
+ .iter()
+ .find(|(crate_type, _data)| *crate_type == rustc_session::config::CrateType::Executable)
+ .unwrap()
+ .1;
+ for &cnum in &crate_info.used_crates {
+ let src = &crate_info.used_crate_source[&cnum];
+ match data[cnum.as_usize() - 1] {
+ Linkage::NotLinked | Linkage::IncludedFromDylib => {}
+ Linkage::Static => {
+ let name = &crate_info.crate_name[&cnum];
+ let mut err = sess.struct_err(&format!("Can't load static lib {}", name.as_str()));
+ err.note("rustc_codegen_cranelift can only load dylibs in JIT mode.");
+ err.emit();
+ }
+ Linkage::Dynamic => {
+ dylib_paths.push(src.dylib.as_ref().unwrap().0.clone());
+ }
+ }
+ }
+
+ let mut imported_symbols = Vec::new();
+ for path in dylib_paths {
+ use object::{Object, ObjectSymbol};
+ let lib = libloading::Library::new(&path).unwrap();
+ let obj = std::fs::read(path).unwrap();
+ let obj = object::File::parse(&*obj).unwrap();
+ imported_symbols.extend(obj.dynamic_symbols().filter_map(|symbol| {
+ let name = symbol.name().unwrap().to_string();
+ if name.is_empty() || !symbol.is_global() || symbol.is_undefined() {
+ return None;
+ }
+ if name.starts_with("rust_metadata_") {
+ // The metadata is part of a section that is not loaded by the dynamic linker in
+ // case of cg_llvm.
+ return None;
+ }
+ let dlsym_name = if cfg!(target_os = "macos") {
+ // On macOS `dlsym` expects the name without leading `_`.
+ assert!(name.starts_with('_'), "{:?}", name);
+ &name[1..]
+ } else {
+ &name
+ };
+ let symbol: libloading::Symbol<'_, *const u8> =
+ unsafe { lib.get(dlsym_name.as_bytes()) }.unwrap();
+ Some((name, *symbol))
+ }));
+ std::mem::forget(lib)
+ }
+
+ sess.abort_if_errors();
+
+ imported_symbols
+}
+
+fn codegen_shim<'tcx>(cx: &mut CodegenCx<'tcx>, module: &mut JITModule, inst: Instance<'tcx>) {
+ let tcx = cx.tcx;
+
+ let pointer_type = module.target_config().pointer_type();
+
+ let name = tcx.symbol_name(inst).name;
+ let sig = crate::abi::get_function_sig(tcx, module.isa().triple(), inst);
+ let func_id = module.declare_function(name, Linkage::Export, &sig).unwrap();
+
+ let instance_ptr = Box::into_raw(Box::new(inst));
+
+ let jit_fn = module
+ .declare_function(
+ "__clif_jit_fn",
+ Linkage::Import,
+ &Signature {
+ call_conv: module.target_config().default_call_conv,
+ params: vec![AbiParam::new(pointer_type), AbiParam::new(pointer_type)],
+ returns: vec![AbiParam::new(pointer_type)],
+ },
+ )
+ .unwrap();
+
+ cx.cached_context.clear();
+ let trampoline = &mut cx.cached_context.func;
+ trampoline.signature = sig.clone();
+
+ let mut builder_ctx = FunctionBuilderContext::new();
+ let mut trampoline_builder = FunctionBuilder::new(trampoline, &mut builder_ctx);
+
+ let trampoline_fn = module.declare_func_in_func(func_id, trampoline_builder.func);
+ let jit_fn = module.declare_func_in_func(jit_fn, trampoline_builder.func);
+ let sig_ref = trampoline_builder.func.import_signature(sig);
+
+ let entry_block = trampoline_builder.create_block();
+ trampoline_builder.append_block_params_for_function_params(entry_block);
+ let fn_args = trampoline_builder.func.dfg.block_params(entry_block).to_vec();
+
+ trampoline_builder.switch_to_block(entry_block);
+ let instance_ptr = trampoline_builder.ins().iconst(pointer_type, instance_ptr as u64 as i64);
+ let trampoline_ptr = trampoline_builder.ins().func_addr(pointer_type, trampoline_fn);
+ let jitted_fn = trampoline_builder.ins().call(jit_fn, &[instance_ptr, trampoline_ptr]);
+ let jitted_fn = trampoline_builder.func.dfg.inst_results(jitted_fn)[0];
+ let call_inst = trampoline_builder.ins().call_indirect(sig_ref, jitted_fn, &fn_args);
+ let ret_vals = trampoline_builder.func.dfg.inst_results(call_inst).to_vec();
+ trampoline_builder.ins().return_(&ret_vals);
+
++ module.define_function(func_id, &mut cx.cached_context).unwrap();
+}
--- /dev/null
- substs: SubstsRef<'tcx>,
+//! Emulate LLVM intrinsics
+
+use crate::intrinsics::*;
+use crate::prelude::*;
+
+use rustc_middle::ty::subst::SubstsRef;
+
+pub(crate) fn codegen_llvm_intrinsic_call<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ intrinsic: &str,
- fx, intrinsic, substs, args,
++ _substs: SubstsRef<'tcx>,
+ args: &[mir::Operand<'tcx>],
+ destination: Option<(CPlace<'tcx>, BasicBlock)>,
+) {
+ let ret = destination.unwrap().0;
+
+ intrinsic_match! {
- let kind_const = crate::constant::mir_operand_get_const_val(fx, kind).expect("llvm.x86.sse2.cmp.* kind not const");
- let flt_cc = match kind_const.try_to_bits(Size::from_bytes(1)).unwrap_or_else(|| panic!("kind not scalar: {:?}", kind_const)) {
++ fx, intrinsic, args,
+ _ => {
+ fx.tcx.sess.warn(&format!("unsupported llvm intrinsic {}; replacing with trap", intrinsic));
+ crate::trap::trap_unimplemented(fx, intrinsic);
+ };
+
+ // Used by `_mm_movemask_epi8` and `_mm256_movemask_epi8`
+ "llvm.x86.sse2.pmovmskb.128" | "llvm.x86.avx2.pmovmskb" | "llvm.x86.sse2.movmsk.pd", (c a) {
+ let (lane_count, lane_ty) = a.layout().ty.simd_size_and_type(fx.tcx);
+ let lane_ty = fx.clif_type(lane_ty).unwrap();
+ assert!(lane_count <= 32);
+
+ let mut res = fx.bcx.ins().iconst(types::I32, 0);
+
+ for lane in (0..lane_count).rev() {
+ let a_lane = a.value_field(fx, mir::Field::new(lane.try_into().unwrap())).load_scalar(fx);
+
+ // cast float to int
+ let a_lane = match lane_ty {
+ types::F32 => fx.bcx.ins().bitcast(types::I32, a_lane),
+ types::F64 => fx.bcx.ins().bitcast(types::I64, a_lane),
+ _ => a_lane,
+ };
+
+ // extract sign bit of an int
+ let a_lane_sign = fx.bcx.ins().ushr_imm(a_lane, i64::from(lane_ty.bits() - 1));
+
+ // shift sign bit into result
+ let a_lane_sign = clif_intcast(fx, a_lane_sign, types::I32, false);
+ res = fx.bcx.ins().ishl_imm(res, 1);
+ res = fx.bcx.ins().bor(res, a_lane_sign);
+ }
+
+ let res = CValue::by_val(res, fx.layout_of(fx.tcx.types.i32));
+ ret.write_cvalue(fx, res);
+ };
+ "llvm.x86.sse2.cmp.ps" | "llvm.x86.sse2.cmp.pd", (c x, c y, o kind) {
- simd_pair_for_each_lane(fx, x, y, ret, |fx, lane_layout, res_lane_layout, x_lane, y_lane| {
- let res_lane = match lane_layout.ty.kind() {
++ let kind = crate::constant::mir_operand_get_const_val(fx, kind).expect("llvm.x86.sse2.cmp.* kind not const");
++ let flt_cc = match kind.try_to_bits(Size::from_bytes(1)).unwrap_or_else(|| panic!("kind not scalar: {:?}", kind)) {
+ 0 => FloatCC::Equal,
+ 1 => FloatCC::LessThan,
+ 2 => FloatCC::LessThanOrEqual,
+ 7 => {
+ unimplemented!("Compares corresponding elements in `a` and `b` to see if neither is `NaN`.");
+ }
+ 3 => {
+ unimplemented!("Compares corresponding elements in `a` and `b` to see if either is `NaN`.");
+ }
+ 4 => FloatCC::NotEqual,
+ 5 => {
+ unimplemented!("not less than");
+ }
+ 6 => {
+ unimplemented!("not less than or equal");
+ }
+ kind => unreachable!("kind {:?}", kind),
+ };
+
- _ => unreachable!("{:?}", lane_layout.ty),
++ simd_pair_for_each_lane(fx, x, y, ret, &|fx, lane_ty, res_lane_ty, x_lane, y_lane| {
++ let res_lane = match lane_ty.kind() {
+ ty::Float(_) => fx.bcx.ins().fcmp(flt_cc, x_lane, y_lane),
- bool_to_zero_or_max_uint(fx, res_lane_layout, res_lane)
++ _ => unreachable!("{:?}", lane_ty),
+ };
- simd_for_each_lane(fx, a, ret, |fx, _lane_layout, res_lane_layout, lane| {
- let res_lane = match imm8.try_to_bits(Size::from_bytes(4)).unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8)) {
++ bool_to_zero_or_max_uint(fx, res_lane_ty, res_lane)
+ });
+ };
+ "llvm.x86.sse2.psrli.d", (c a, o imm8) {
+ let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8).expect("llvm.x86.sse2.psrli.d imm8 not const");
- };
- CValue::by_val(res_lane, res_lane_layout)
++ simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| {
++ match imm8.try_to_bits(Size::from_bytes(4)).unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8)) {
+ imm8 if imm8 < 32 => fx.bcx.ins().ushr_imm(lane, i64::from(imm8 as u8)),
+ _ => fx.bcx.ins().iconst(types::I32, 0),
- simd_for_each_lane(fx, a, ret, |fx, _lane_layout, res_lane_layout, lane| {
- let res_lane = match imm8.try_to_bits(Size::from_bytes(4)).unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8)) {
++ }
+ });
+ };
+ "llvm.x86.sse2.pslli.d", (c a, o imm8) {
+ let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8).expect("llvm.x86.sse2.psrli.d imm8 not const");
- };
- CValue::by_val(res_lane, res_lane_layout)
++ simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| {
++ match imm8.try_to_bits(Size::from_bytes(4)).unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8)) {
+ imm8 if imm8 < 32 => fx.bcx.ins().ishl_imm(lane, i64::from(imm8 as u8)),
+ _ => fx.bcx.ins().iconst(types::I32, 0),
++ }
+ });
+ };
+ "llvm.x86.sse2.storeu.dq", (v mem_addr, c a) {
+ // FIXME correctly handle the unalignment
+ let dest = CPlace::for_ptr(Pointer::new(mem_addr), a.layout());
+ dest.write_cvalue(fx, a);
+ };
+ "llvm.x86.addcarry.64", (v c_in, c a, c b) {
+ llvm_add_sub(
+ fx,
+ BinOp::Add,
+ ret,
+ c_in,
+ a,
+ b
+ );
+ };
+ "llvm.x86.subborrow.64", (v b_in, c a, c b) {
+ llvm_add_sub(
+ fx,
+ BinOp::Sub,
+ ret,
+ b_in,
+ a,
+ b
+ );
+ };
+ }
+
+ if let Some((_, dest)) = destination {
+ let ret_block = fx.get_block(dest);
+ fx.bcx.ins().jump(ret_block, &[]);
+ } else {
+ trap_unreachable(fx, "[corruption] Diverging intrinsic returned.");
+ }
+}
+
+// llvm.x86.avx2.vperm2i128
+// llvm.x86.ssse3.pshuf.b.128
+// llvm.x86.avx2.pshuf.b
+// llvm.x86.avx2.psrli.w
+// llvm.x86.sse2.psrli.w
+
+fn llvm_add_sub<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ bin_op: BinOp,
+ ret: CPlace<'tcx>,
+ cb_in: Value,
+ a: CValue<'tcx>,
+ b: CValue<'tcx>,
+) {
+ assert_eq!(
+ a.layout().ty,
+ fx.tcx.types.u64,
+ "llvm.x86.addcarry.64/llvm.x86.subborrow.64 second operand must be u64"
+ );
+ assert_eq!(
+ b.layout().ty,
+ fx.tcx.types.u64,
+ "llvm.x86.addcarry.64/llvm.x86.subborrow.64 third operand must be u64"
+ );
+
+ // c + carry -> c + first intermediate carry or borrow respectively
+ let int0 = crate::num::codegen_checked_int_binop(fx, bin_op, a, b);
+ let c = int0.value_field(fx, mir::Field::new(0));
+ let cb0 = int0.value_field(fx, mir::Field::new(1)).load_scalar(fx);
+
+ // c + carry -> c + second intermediate carry or borrow respectively
+ let cb_in_as_u64 = fx.bcx.ins().uextend(types::I64, cb_in);
+ let cb_in_as_u64 = CValue::by_val(cb_in_as_u64, fx.layout_of(fx.tcx.types.u64));
+ let int1 = crate::num::codegen_checked_int_binop(fx, bin_op, c, cb_in_as_u64);
+ let (c, cb1) = int1.load_scalar_pair(fx);
+
+ // carry0 | carry1 -> carry or borrow respectively
+ let cb_out = fx.bcx.ins().bor(cb0, cb1);
+
+ let layout = fx.layout_of(fx.tcx.mk_tup([fx.tcx.types.u8, fx.tcx.types.u64].iter()));
+ let val = CValue::by_val_pair(cb_out, c, layout);
+ ret.write_cvalue(fx, val);
+}
--- /dev/null
- use rustc_span::symbol::{kw, sym};
+//! Codegen of intrinsics. This includes `extern "rust-intrinsic"`, `extern "platform-intrinsic"`
+//! and LLVM intrinsics that have symbol names starting with `llvm.`.
+
+mod cpuid;
+mod llvm;
+mod simd;
+
+pub(crate) use cpuid::codegen_cpuid_call;
+pub(crate) use llvm::codegen_llvm_intrinsic_call;
+
+use rustc_middle::ty::print::with_no_trimmed_paths;
- (o $fx:expr, $arg:ident) => {
- $arg
- },
++use rustc_middle::ty::subst::SubstsRef;
++use rustc_span::symbol::{kw, sym, Symbol};
+
+use crate::prelude::*;
+use cranelift_codegen::ir::AtomicRmwOp;
+
+macro intrinsic_pat {
+ (_) => {
+ _
+ },
+ ($name:ident) => {
+ sym::$name
+ },
+ (kw.$name:ident) => {
+ kw::$name
+ },
+ ($name:literal) => {
+ $name
+ },
+}
+
+macro intrinsic_arg {
- codegen_operand($fx, $arg)
++ (o $fx:expr, $arg:ident) => {},
+ (c $fx:expr, $arg:ident) => {
- codegen_operand($fx, $arg).load_scalar($fx)
- }
- }
-
- macro intrinsic_substs {
- ($substs:expr, $index:expr,) => {},
- ($substs:expr, $index:expr, $first:ident $(,$rest:ident)*) => {
- let $first = $substs.type_at($index);
- intrinsic_substs!($substs, $index+1, $($rest),*);
++ let $arg = codegen_operand($fx, $arg);
+ },
+ (v $fx:expr, $arg:ident) => {
- ($fx:expr, $intrinsic:expr, $substs:expr, $args:expr,
++ let $arg = codegen_operand($fx, $arg).load_scalar($fx);
+ }
+}
+
+macro intrinsic_match {
- $($($name:tt).*)|+ $(if $cond:expr)?, $(<$($subst:ident),*>)? ($($a:ident $arg:ident),*) $content:block;
++ ($fx:expr, $intrinsic:expr, $args:expr,
+ _ => $unknown:block;
+ $(
- let _ = $substs; // Silence warning when substs is unused.
++ $($($name:tt).*)|+ $(if $cond:expr)?, ($($a:ident $arg:ident),*) $content:block;
+ )*) => {
- #[allow(unused_parens, non_snake_case)]
- {
- $(
- intrinsic_substs!($substs, 0, $($subst),*);
- )?
- if let [$($arg),*] = $args {
- let ($($arg,)*) = (
- $(intrinsic_arg!($a $fx, $arg),)*
- );
- #[warn(unused_parens, non_snake_case)]
- {
- $content
- }
- } else {
- bug!("wrong number of args for intrinsic {:?}", $intrinsic);
- }
- }
- }
- )*
- _ => $unknown,
- }
- }
- }
-
- macro call_intrinsic_match {
- ($fx:expr, $intrinsic:expr, $substs:expr, $ret:expr, $destination:expr, $args:expr, $(
- $name:ident($($arg:ident),*) -> $ty:ident => $func:ident,
- )*) => {
- match $intrinsic {
- $(
- sym::$name => {
- assert!($substs.is_empty());
- if let [$(ref $arg),*] = *$args {
- let ($($arg,)*) = (
- $(codegen_operand($fx, $arg),)*
- );
- let res = $fx.easy_call(stringify!($func), &[$($arg),*], $fx.tcx.types.$ty);
- $ret.write_cvalue($fx, res);
-
- if let Some((_, dest)) = $destination {
- let ret_block = $fx.get_block(dest);
- $fx.bcx.ins().jump(ret_block, &[]);
- return;
- } else {
- unreachable!();
- }
+ match $intrinsic {
+ $(
+ $(intrinsic_pat!($($name).*))|* $(if $cond)? => {
- _ => {}
- }
- }
- }
-
- macro validate_atomic_type($fx:ident, $intrinsic:ident, $span:ident, $ty:expr) {
- match $ty.kind() {
- ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
- _ => {
- $fx.tcx.sess.span_err(
- $span,
- &format!(
- "`{}` intrinsic: expected basic integer or raw pointer type, found `{:?}`",
- $intrinsic, $ty
- ),
- );
- // Prevent verifier error
- crate::trap::trap_unreachable($fx, "compilation should not have succeeded");
- return;
++ if let [$($arg),*] = $args {
++ $(intrinsic_arg!($a $fx, $arg);)*
++ $content
+ } else {
+ bug!("wrong number of args for intrinsic {:?}", $intrinsic);
+ }
+ }
+ )*
- macro validate_simd_type($fx:ident, $intrinsic:ident, $span:ident, $ty:expr) {
- if !$ty.is_simd() {
- $fx.tcx.sess.span_err($span, &format!("invalid monomorphization of `{}` intrinsic: expected SIMD input type, found non-SIMD `{}`", $intrinsic, $ty));
- // Prevent verifier error
- crate::trap::trap_unreachable($fx, "compilation should not have succeeded");
- return;
- }
++ _ => $unknown,
+ }
+ }
+}
+
- f: impl Fn(
- &mut FunctionCx<'_, '_, 'tcx>,
- TyAndLayout<'tcx>,
- TyAndLayout<'tcx>,
- Value,
- ) -> CValue<'tcx>,
++fn report_atomic_type_validation_error<'tcx>(
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
++ intrinsic: Symbol,
++ span: Span,
++ ty: Ty<'tcx>,
++) {
++ fx.tcx.sess.span_err(
++ span,
++ &format!(
++ "`{}` intrinsic: expected basic integer or raw pointer type, found `{:?}`",
++ intrinsic, ty
++ ),
++ );
++ // Prevent verifier error
++ crate::trap::trap_unreachable(fx, "compilation should not have succeeded");
+}
+
+pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Option<Type> {
+ let (element, count) = match layout.abi {
+ Abi::Vector { element, count } => (element, count),
+ _ => unreachable!(),
+ };
+
+ match scalar_to_clif_type(tcx, element).by(u16::try_from(count).unwrap()) {
+ // Cranelift currently only implements icmp for 128bit vectors.
+ Some(vector_ty) if vector_ty.bits() == 128 => Some(vector_ty),
+ _ => None,
+ }
+}
+
+fn simd_for_each_lane<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ val: CValue<'tcx>,
+ ret: CPlace<'tcx>,
- let res_lane = f(fx, lane_layout, ret_lane_layout, lane);
++ f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Ty<'tcx>, Value) -> Value,
+) {
+ let layout = val.layout();
+
+ let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
+ let lane_layout = fx.layout_of(lane_ty);
+ let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
+ let ret_lane_layout = fx.layout_of(ret_lane_ty);
+ assert_eq!(lane_count, ret_lane_count);
+
+ for lane_idx in 0..lane_count {
+ let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
+
- f: impl Fn(
- &mut FunctionCx<'_, '_, 'tcx>,
- TyAndLayout<'tcx>,
- TyAndLayout<'tcx>,
- Value,
- Value,
- ) -> CValue<'tcx>,
++ let res_lane = f(fx, lane_layout.ty, ret_lane_layout.ty, lane);
++ let res_lane = CValue::by_val(res_lane, ret_lane_layout);
+
+ ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
+ }
+}
+
+fn simd_pair_for_each_lane<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ x: CValue<'tcx>,
+ y: CValue<'tcx>,
+ ret: CPlace<'tcx>,
- let res_lane = f(fx, lane_layout, ret_lane_layout, x_lane, y_lane);
++ f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Ty<'tcx>, Value, Value) -> Value,
+) {
+ assert_eq!(x.layout(), y.layout());
+ let layout = x.layout();
+
+ let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
+ let lane_layout = fx.layout_of(lane_ty);
+ let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
+ let ret_lane_layout = fx.layout_of(ret_lane_ty);
+ assert_eq!(lane_count, ret_lane_count);
+
+ for lane_idx in 0..lane_count {
+ let x_lane = x.value_lane(fx, lane_idx).load_scalar(fx);
+ let y_lane = y.value_lane(fx, lane_idx).load_scalar(fx);
+
- f: impl Fn(&mut FunctionCx<'_, '_, 'tcx>, TyAndLayout<'tcx>, Value, Value) -> Value,
++ let res_lane = f(fx, lane_layout.ty, ret_lane_layout.ty, x_lane, y_lane);
++ let res_lane = CValue::by_val(res_lane, ret_lane_layout);
+
+ ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
+ }
+}
+
+fn simd_reduce<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ val: CValue<'tcx>,
+ acc: Option<Value>,
+ ret: CPlace<'tcx>,
- res_val = f(fx, lane_layout, res_val, lane);
++ f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Value, Value) -> Value,
+) {
+ let (lane_count, lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
+ let lane_layout = fx.layout_of(lane_ty);
+ assert_eq!(lane_layout, ret.layout());
+
+ let (mut res_val, start_lane) =
+ if let Some(acc) = acc { (acc, 0) } else { (val.value_lane(fx, 0).load_scalar(fx), 1) };
+ for lane_idx in start_lane..lane_count {
+ let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
- f: impl Fn(&mut FunctionCx<'_, '_, 'tcx>, Value, Value) -> Value,
++ res_val = f(fx, lane_layout.ty, res_val, lane);
+ }
+ let res = CValue::by_val(res_val, lane_layout);
+ ret.write_cvalue(fx, res);
+}
+
+// FIXME move all uses to `simd_reduce`
+fn simd_reduce_bool<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ val: CValue<'tcx>,
+ ret: CPlace<'tcx>,
- layout: TyAndLayout<'tcx>,
++ f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Value, Value) -> Value,
+) {
+ let (lane_count, _lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
+ assert!(ret.layout().ty.is_bool());
+
+ let res_val = val.value_lane(fx, 0).load_scalar(fx);
+ let mut res_val = fx.bcx.ins().band_imm(res_val, 1); // mask to boolean
+ for lane_idx in 1..lane_count {
+ let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
+ let lane = fx.bcx.ins().band_imm(lane, 1); // mask to boolean
+ res_val = f(fx, res_val, lane);
+ }
+ let res_val = if fx.bcx.func.dfg.value_type(res_val) != types::I8 {
+ fx.bcx.ins().ireduce(types::I8, res_val)
+ } else {
+ res_val
+ };
+ let res = CValue::by_val(res_val, ret.layout());
+ ret.write_cvalue(fx, res);
+}
+
+fn bool_to_zero_or_max_uint<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
- ) -> CValue<'tcx> {
- let ty = fx.clif_type(layout.ty).unwrap();
++ ty: Ty<'tcx>,
+ val: Value,
- CValue::by_val(res, layout)
- }
-
- macro simd_cmp {
- ($fx:expr, $cc:ident|$cc_f:ident($x:ident, $y:ident) -> $ret:ident) => {
- let vector_ty = clif_vector_type($fx.tcx, $x.layout());
-
- if let Some(vector_ty) = vector_ty {
- let x = $x.load_scalar($fx);
- let y = $y.load_scalar($fx);
- let val = if vector_ty.lane_type().is_float() {
- $fx.bcx.ins().fcmp(FloatCC::$cc_f, x, y)
- } else {
- $fx.bcx.ins().icmp(IntCC::$cc, x, y)
- };
-
- // HACK This depends on the fact that icmp for vectors represents bools as 0 and !0, not 0 and 1.
- let val = $fx.bcx.ins().raw_bitcast(vector_ty, val);
-
- $ret.write_cvalue($fx, CValue::by_val(val, $ret.layout()));
- } else {
- simd_pair_for_each_lane(
- $fx,
- $x,
- $y,
- $ret,
- |fx, lane_layout, res_lane_layout, x_lane, y_lane| {
- let res_lane = match lane_layout.ty.kind() {
- ty::Uint(_) | ty::Int(_) => fx.bcx.ins().icmp(IntCC::$cc, x_lane, y_lane),
- ty::Float(_) => fx.bcx.ins().fcmp(FloatCC::$cc_f, x_lane, y_lane),
- _ => unreachable!("{:?}", lane_layout.ty),
- };
- bool_to_zero_or_max_uint(fx, res_lane_layout, res_lane)
- },
- );
- }
- },
- ($fx:expr, $cc_u:ident|$cc_s:ident|$cc_f:ident($x:ident, $y:ident) -> $ret:ident) => {
- // FIXME use vector icmp when possible
- simd_pair_for_each_lane(
- $fx,
- $x,
- $y,
- $ret,
- |fx, lane_layout, res_lane_layout, x_lane, y_lane| {
- let res_lane = match lane_layout.ty.kind() {
- ty::Uint(_) => fx.bcx.ins().icmp(IntCC::$cc_u, x_lane, y_lane),
- ty::Int(_) => fx.bcx.ins().icmp(IntCC::$cc_s, x_lane, y_lane),
- ty::Float(_) => fx.bcx.ins().fcmp(FloatCC::$cc_f, x_lane, y_lane),
- _ => unreachable!("{:?}", lane_layout.ty),
- };
- bool_to_zero_or_max_uint(fx, res_lane_layout, res_lane)
- },
- );
- },
- }
-
- macro simd_int_binop {
- ($fx:expr, $op:ident($x:ident, $y:ident) -> $ret:ident) => {
- simd_int_binop!($fx, $op|$op($x, $y) -> $ret);
- },
- ($fx:expr, $op_u:ident|$op_s:ident($x:ident, $y:ident) -> $ret:ident) => {
- simd_pair_for_each_lane(
- $fx,
- $x,
- $y,
- $ret,
- |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
- let res_lane = match lane_layout.ty.kind() {
- ty::Uint(_) => fx.bcx.ins().$op_u(x_lane, y_lane),
- ty::Int(_) => fx.bcx.ins().$op_s(x_lane, y_lane),
- _ => unreachable!("{:?}", lane_layout.ty),
- };
- CValue::by_val(res_lane, ret_lane_layout)
- },
- );
- },
- }
-
- macro simd_int_flt_binop {
- ($fx:expr, $op:ident|$op_f:ident($x:ident, $y:ident) -> $ret:ident) => {
- simd_int_flt_binop!($fx, $op|$op|$op_f($x, $y) -> $ret);
- },
- ($fx:expr, $op_u:ident|$op_s:ident|$op_f:ident($x:ident, $y:ident) -> $ret:ident) => {
- simd_pair_for_each_lane(
- $fx,
- $x,
- $y,
- $ret,
- |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
- let res_lane = match lane_layout.ty.kind() {
- ty::Uint(_) => fx.bcx.ins().$op_u(x_lane, y_lane),
- ty::Int(_) => fx.bcx.ins().$op_s(x_lane, y_lane),
- ty::Float(_) => fx.bcx.ins().$op_f(x_lane, y_lane),
- _ => unreachable!("{:?}", lane_layout.ty),
- };
- CValue::by_val(res_lane, ret_lane_layout)
- },
- );
- },
- }
-
- macro simd_flt_binop($fx:expr, $op:ident($x:ident, $y:ident) -> $ret:ident) {
- simd_pair_for_each_lane(
- $fx,
- $x,
- $y,
- $ret,
- |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
- let res_lane = match lane_layout.ty.kind() {
- ty::Float(_) => fx.bcx.ins().$op(x_lane, y_lane),
- _ => unreachable!("{:?}", lane_layout.ty),
- };
- CValue::by_val(res_lane, ret_lane_layout)
- },
- );
++) -> Value {
++ let ty = fx.clif_type(ty).unwrap();
+
+ let int_ty = match ty {
+ types::F32 => types::I32,
+ types::F64 => types::I64,
+ ty => ty,
+ };
+
+ let val = fx.bcx.ins().bint(int_ty, val);
+ let mut res = fx.bcx.ins().ineg(val);
+
+ if ty.is_float() {
+ res = fx.bcx.ins().bitcast(ty, res);
+ }
+
- self::simd::codegen_simd_intrinsic_call(fx, instance, args, ret, span);
++ res
+}
+
+pub(crate) fn codegen_intrinsic_call<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ instance: Instance<'tcx>,
+ args: &[mir::Operand<'tcx>],
+ destination: Option<(CPlace<'tcx>, BasicBlock)>,
+ span: Span,
+) {
+ let intrinsic = fx.tcx.item_name(instance.def_id());
+ let substs = instance.substs;
+
+ let ret = match destination {
+ Some((place, _)) => place,
+ None => {
+ // Insert non returning intrinsics here
+ match intrinsic {
+ sym::abort => {
+ trap_abort(fx, "Called intrinsic::abort.");
+ }
+ sym::transmute => {
+ crate::base::codegen_panic(fx, "Transmuting to uninhabited type.", span);
+ }
+ _ => unimplemented!("unsupported instrinsic {}", intrinsic),
+ }
+ return;
+ }
+ };
+
+ if intrinsic.as_str().starts_with("simd_") {
- return;
++ self::simd::codegen_simd_intrinsic_call(fx, intrinsic, substs, args, ret, span);
+ let ret_block = fx.get_block(destination.expect("SIMD intrinsics don't diverge").1);
+ fx.bcx.ins().jump(ret_block, &[]);
- let usize_layout = fx.layout_of(fx.tcx.types.usize);
++ } else if codegen_float_intrinsic_call(fx, intrinsic, args, ret) {
++ let ret_block = fx.get_block(destination.expect("Float intrinsics don't diverge").1);
++ fx.bcx.ins().jump(ret_block, &[]);
++ } else {
++ codegen_regular_intrinsic_call(
++ fx,
++ instance,
++ intrinsic,
++ substs,
++ args,
++ ret,
++ span,
++ destination,
++ );
+ }
++}
+
- call_intrinsic_match! {
- fx, intrinsic, substs, ret, destination, args,
- expf32(flt) -> f32 => expf,
- expf64(flt) -> f64 => exp,
- exp2f32(flt) -> f32 => exp2f,
- exp2f64(flt) -> f64 => exp2,
- sqrtf32(flt) -> f32 => sqrtf,
- sqrtf64(flt) -> f64 => sqrt,
- powif32(a, x) -> f32 => __powisf2, // compiler-builtins
- powif64(a, x) -> f64 => __powidf2, // compiler-builtins
- powf32(a, x) -> f32 => powf,
- powf64(a, x) -> f64 => pow,
- logf32(flt) -> f32 => logf,
- logf64(flt) -> f64 => log,
- log2f32(flt) -> f32 => log2f,
- log2f64(flt) -> f64 => log2,
- log10f32(flt) -> f32 => log10f,
- log10f64(flt) -> f64 => log10,
- fabsf32(flt) -> f32 => fabsf,
- fabsf64(flt) -> f64 => fabs,
- fmaf32(x, y, z) -> f32 => fmaf,
- fmaf64(x, y, z) -> f64 => fma,
- copysignf32(x, y) -> f32 => copysignf,
- copysignf64(x, y) -> f64 => copysign,
-
- // rounding variants
- // FIXME use clif insts
- floorf32(flt) -> f32 => floorf,
- floorf64(flt) -> f64 => floor,
- ceilf32(flt) -> f32 => ceilf,
- ceilf64(flt) -> f64 => ceil,
- truncf32(flt) -> f32 => truncf,
- truncf64(flt) -> f64 => trunc,
- roundf32(flt) -> f32 => roundf,
- roundf64(flt) -> f64 => round,
-
- // trigonometry
- sinf32(flt) -> f32 => sinf,
- sinf64(flt) -> f64 => sin,
- cosf32(flt) -> f32 => cosf,
- cosf64(flt) -> f64 => cos,
++fn codegen_float_intrinsic_call<'tcx>(
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
++ intrinsic: Symbol,
++ args: &[mir::Operand<'tcx>],
++ ret: CPlace<'tcx>,
++) -> bool {
++ let (name, arg_count, ty) = match intrinsic {
++ sym::expf32 => ("expf", 1, fx.tcx.types.f32),
++ sym::expf64 => ("exp", 1, fx.tcx.types.f64),
++ sym::exp2f32 => ("exp2f", 1, fx.tcx.types.f32),
++ sym::exp2f64 => ("exp2", 1, fx.tcx.types.f64),
++ sym::sqrtf32 => ("sqrtf", 1, fx.tcx.types.f32),
++ sym::sqrtf64 => ("sqrt", 1, fx.tcx.types.f64),
++ sym::powif32 => ("__powisf2", 2, fx.tcx.types.f32), // compiler-builtins
++ sym::powif64 => ("__powidf2", 2, fx.tcx.types.f64), // compiler-builtins
++ sym::powf32 => ("powf", 2, fx.tcx.types.f32),
++ sym::powf64 => ("pow", 2, fx.tcx.types.f64),
++ sym::logf32 => ("logf", 1, fx.tcx.types.f32),
++ sym::logf64 => ("log", 1, fx.tcx.types.f64),
++ sym::log2f32 => ("log2f", 1, fx.tcx.types.f32),
++ sym::log2f64 => ("log2", 1, fx.tcx.types.f64),
++ sym::log10f32 => ("log10f", 1, fx.tcx.types.f32),
++ sym::log10f64 => ("log10", 1, fx.tcx.types.f64),
++ sym::fabsf32 => ("fabsf", 1, fx.tcx.types.f32),
++ sym::fabsf64 => ("fabs", 1, fx.tcx.types.f64),
++ sym::fmaf32 => ("fmaf", 3, fx.tcx.types.f32),
++ sym::fmaf64 => ("fma", 3, fx.tcx.types.f64),
++ sym::copysignf32 => ("copysignf", 2, fx.tcx.types.f32),
++ sym::copysignf64 => ("copysign", 2, fx.tcx.types.f64),
++ sym::floorf32 => ("floorf", 1, fx.tcx.types.f32),
++ sym::floorf64 => ("floor", 1, fx.tcx.types.f64),
++ sym::ceilf32 => ("ceilf", 1, fx.tcx.types.f32),
++ sym::ceilf64 => ("ceil", 1, fx.tcx.types.f64),
++ sym::truncf32 => ("truncf", 1, fx.tcx.types.f32),
++ sym::truncf64 => ("trunc", 1, fx.tcx.types.f64),
++ sym::roundf32 => ("roundf", 1, fx.tcx.types.f32),
++ sym::roundf64 => ("round", 1, fx.tcx.types.f64),
++ sym::sinf32 => ("sinf", 1, fx.tcx.types.f32),
++ sym::sinf64 => ("sin", 1, fx.tcx.types.f64),
++ sym::cosf32 => ("cosf", 1, fx.tcx.types.f32),
++ sym::cosf64 => ("cos", 1, fx.tcx.types.f64),
++ _ => return false,
++ };
+
- fx, intrinsic, substs, args,
++ if args.len() != arg_count {
++ bug!("wrong number of args for intrinsic {:?}", intrinsic);
+ }
+
++ let (a, b, c);
++ let args = match args {
++ [x] => {
++ a = [codegen_operand(fx, x)];
++ &a as &[_]
++ }
++ [x, y] => {
++ b = [codegen_operand(fx, x), codegen_operand(fx, y)];
++ &b
++ }
++ [x, y, z] => {
++ c = [codegen_operand(fx, x), codegen_operand(fx, y), codegen_operand(fx, z)];
++ &c
++ }
++ _ => unreachable!(),
++ };
++
++ let res = fx.easy_call(name, &args, ty);
++ ret.write_cvalue(fx, res);
++
++ true
++}
++
++fn codegen_regular_intrinsic_call<'tcx>(
++ fx: &mut FunctionCx<'_, '_, 'tcx>,
++ instance: Instance<'tcx>,
++ intrinsic: Symbol,
++ substs: SubstsRef<'tcx>,
++ args: &[mir::Operand<'tcx>],
++ ret: CPlace<'tcx>,
++ span: Span,
++ destination: Option<(CPlace<'tcx>, BasicBlock)>,
++) {
++ let usize_layout = fx.layout_of(fx.tcx.types.usize);
++
+ intrinsic_match! {
- copy | copy_nonoverlapping, <elem_ty> (v src, v dst, v count) {
++ fx, intrinsic, args,
+ _ => {
+ fx.tcx.sess.span_fatal(span, &format!("unsupported intrinsic {}", intrinsic));
+ };
+
+ assume, (c _a) {};
+ likely | unlikely, (c a) {
+ ret.write_cvalue(fx, a);
+ };
+ breakpoint, () {
+ fx.bcx.ins().debugtrap();
+ };
- volatile_copy_memory | volatile_copy_nonoverlapping_memory, <elem_ty> (v dst, v src, v count) {
++ copy | copy_nonoverlapping, (v src, v dst, v count) {
++ let elem_ty = substs.type_at(0);
+ let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
+ assert_eq!(args.len(), 3);
+ let byte_amount = if elem_size != 1 {
+ fx.bcx.ins().imul_imm(count, elem_size as i64)
+ } else {
+ count
+ };
+
+ if intrinsic == sym::copy_nonoverlapping {
+ // FIXME emit_small_memcpy
+ fx.bcx.call_memcpy(fx.target_config, dst, src, byte_amount);
+ } else {
+ // FIXME emit_small_memmove
+ fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount);
+ }
+ };
+ // NOTE: the volatile variants have src and dst swapped
- size_of_val, <T> (c ptr) {
- let layout = fx.layout_of(T);
++ volatile_copy_memory | volatile_copy_nonoverlapping_memory, (v dst, v src, v count) {
++ let elem_ty = substs.type_at(0);
+ let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
+ assert_eq!(args.len(), 3);
+ let byte_amount = if elem_size != 1 {
+ fx.bcx.ins().imul_imm(count, elem_size as i64)
+ } else {
+ count
+ };
+
+ // FIXME make the copy actually volatile when using emit_small_mem{cpy,move}
+ if intrinsic == sym::volatile_copy_nonoverlapping_memory {
+ // FIXME emit_small_memcpy
+ fx.bcx.call_memcpy(fx.target_config, dst, src, byte_amount);
+ } else {
+ // FIXME emit_small_memmove
+ fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount);
+ }
+ };
- min_align_of_val, <T> (c ptr) {
- let layout = fx.layout_of(T);
++ size_of_val, (c ptr) {
++ let layout = fx.layout_of(substs.type_at(0));
+ let size = if layout.is_unsized() {
+ let (_ptr, info) = ptr.load_scalar_pair(fx);
+ let (size, _align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
+ size
+ } else {
+ fx
+ .bcx
+ .ins()
+ .iconst(fx.pointer_type, layout.size.bytes() as i64)
+ };
+ ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
+ };
- unchecked_add | unchecked_sub | unchecked_div | exact_div | unchecked_rem
++ min_align_of_val, (c ptr) {
++ let layout = fx.layout_of(substs.type_at(0));
+ let align = if layout.is_unsized() {
+ let (_ptr, info) = ptr.load_scalar_pair(fx);
+ let (_size, align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
+ align
+ } else {
+ fx
+ .bcx
+ .ins()
+ .iconst(fx.pointer_type, layout.align.abi.bytes() as i64)
+ };
+ ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
+ };
+
- saturating_add | saturating_sub, <T> (c lhs, c rhs) {
++ unchecked_add | unchecked_sub | unchecked_mul | unchecked_div | exact_div | unchecked_rem
+ | unchecked_shl | unchecked_shr, (c x, c y) {
+ // FIXME trap on overflow
+ let bin_op = match intrinsic {
+ sym::unchecked_add => BinOp::Add,
+ sym::unchecked_sub => BinOp::Sub,
++ sym::unchecked_mul => BinOp::Mul,
+ sym::unchecked_div | sym::exact_div => BinOp::Div,
+ sym::unchecked_rem => BinOp::Rem,
+ sym::unchecked_shl => BinOp::Shl,
+ sym::unchecked_shr => BinOp::Shr,
+ _ => unreachable!(),
+ };
+ let res = crate::num::codegen_int_binop(fx, bin_op, x, y);
+ ret.write_cvalue(fx, res);
+ };
+ add_with_overflow | sub_with_overflow | mul_with_overflow, (c x, c y) {
+ assert_eq!(x.layout().ty, y.layout().ty);
+ let bin_op = match intrinsic {
+ sym::add_with_overflow => BinOp::Add,
+ sym::sub_with_overflow => BinOp::Sub,
+ sym::mul_with_overflow => BinOp::Mul,
+ _ => unreachable!(),
+ };
+
+ let res = crate::num::codegen_checked_int_binop(
+ fx,
+ bin_op,
+ x,
+ y,
+ );
+ ret.write_cvalue(fx, res);
+ };
- let signed = type_sign(T);
++ saturating_add | saturating_sub, (c lhs, c rhs) {
+ assert_eq!(lhs.layout().ty, rhs.layout().ty);
+ let bin_op = match intrinsic {
+ sym::saturating_add => BinOp::Add,
+ sym::saturating_sub => BinOp::Sub,
+ _ => unreachable!(),
+ };
+
- let clif_ty = fx.clif_type(T).unwrap();
++ let signed = type_sign(lhs.layout().ty);
+
+ let checked_res = crate::num::codegen_checked_int_binop(
+ fx,
+ bin_op,
+ lhs,
+ rhs,
+ );
+
+ let (val, has_overflow) = checked_res.load_scalar_pair(fx);
- let res = CValue::by_val(val, fx.layout_of(T));
++ let clif_ty = fx.clif_type(lhs.layout().ty).unwrap();
+
+ let (min, max) = type_min_max_value(&mut fx.bcx, clif_ty, signed);
+
+ let val = match (intrinsic, signed) {
+ (sym::saturating_add, false) => fx.bcx.ins().select(has_overflow, max, val),
+ (sym::saturating_sub, false) => fx.bcx.ins().select(has_overflow, min, val),
+ (sym::saturating_add, true) => {
+ let rhs = rhs.load_scalar(fx);
+ let rhs_ge_zero = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
+ let sat_val = fx.bcx.ins().select(rhs_ge_zero, max, min);
+ fx.bcx.ins().select(has_overflow, sat_val, val)
+ }
+ (sym::saturating_sub, true) => {
+ let rhs = rhs.load_scalar(fx);
+ let rhs_ge_zero = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
+ let sat_val = fx.bcx.ins().select(rhs_ge_zero, min, max);
+ fx.bcx.ins().select(has_overflow, sat_val, val)
+ }
+ _ => unreachable!(),
+ };
+
- rotate_left, <T>(v x, v y) {
- let layout = fx.layout_of(T);
++ let res = CValue::by_val(val, lhs.layout());
+
+ ret.write_cvalue(fx, res);
+ };
- rotate_right, <T>(v x, v y) {
- let layout = fx.layout_of(T);
++ rotate_left, (c x, v y) {
++ let layout = x.layout();
++ let x = x.load_scalar(fx);
+ let res = fx.bcx.ins().rotl(x, y);
+ ret.write_cvalue(fx, CValue::by_val(res, layout));
+ };
- ctlz | ctlz_nonzero, <T> (v arg) {
++ rotate_right, (c x, v y) {
++ let layout = x.layout();
++ let x = x.load_scalar(fx);
+ let res = fx.bcx.ins().rotr(x, y);
+ ret.write_cvalue(fx, CValue::by_val(res, layout));
+ };
+
+ // The only difference between offset and arith_offset is regarding UB. Because Cranelift
+ // doesn't have UB both are codegen'ed the same way
+ offset | arith_offset, (c base, v offset) {
+ let pointee_ty = base.layout().ty.builtin_deref(true).unwrap().ty;
+ let pointee_size = fx.layout_of(pointee_ty).size.bytes();
+ let ptr_diff = if pointee_size != 1 {
+ fx.bcx.ins().imul_imm(offset, pointee_size as i64)
+ } else {
+ offset
+ };
+ let base_val = base.load_scalar(fx);
+ let res = fx.bcx.ins().iadd(base_val, ptr_diff);
+ ret.write_cvalue(fx, CValue::by_val(res, base.layout()));
+ };
+
+ transmute, (c from) {
+ ret.write_cvalue_transmute(fx, from);
+ };
+ write_bytes | volatile_set_memory, (c dst, v val, v count) {
+ let pointee_ty = dst.layout().ty.builtin_deref(true).unwrap().ty;
+ let pointee_size = fx.layout_of(pointee_ty).size.bytes();
+ let count = if pointee_size != 1 {
+ fx.bcx.ins().imul_imm(count, pointee_size as i64)
+ } else {
+ count
+ };
+ let dst_ptr = dst.load_scalar(fx);
+ // FIXME make the memset actually volatile when switching to emit_small_memset
+ // FIXME use emit_small_memset
+ fx.bcx.call_memset(fx.target_config, dst_ptr, val, count);
+ };
- let res = fx.bcx.ins().clz(arg);
- let res = CValue::by_val(res, fx.layout_of(T));
++ ctlz | ctlz_nonzero, (c arg) {
++ let val = arg.load_scalar(fx);
+ // FIXME trap on `ctlz_nonzero` with zero arg.
- cttz | cttz_nonzero, <T> (v arg) {
++ let res = fx.bcx.ins().clz(val);
++ let res = CValue::by_val(res, arg.layout());
+ ret.write_cvalue(fx, res);
+ };
- let res = fx.bcx.ins().ctz(arg);
- let res = CValue::by_val(res, fx.layout_of(T));
++ cttz | cttz_nonzero, (c arg) {
++ let val = arg.load_scalar(fx);
+ // FIXME trap on `cttz_nonzero` with zero arg.
- ctpop, <T> (v arg) {
- let res = fx.bcx.ins().popcnt(arg);
- let res = CValue::by_val(res, fx.layout_of(T));
++ let res = fx.bcx.ins().ctz(val);
++ let res = CValue::by_val(res, arg.layout());
+ ret.write_cvalue(fx, res);
+ };
- bitreverse, <T> (v arg) {
- let res = fx.bcx.ins().bitrev(arg);
- let res = CValue::by_val(res, fx.layout_of(T));
++ ctpop, (c arg) {
++ let val = arg.load_scalar(fx);
++ let res = fx.bcx.ins().popcnt(val);
++ let res = CValue::by_val(res, arg.layout());
+ ret.write_cvalue(fx, res);
+ };
- bswap, <T> (v arg) {
++ bitreverse, (c arg) {
++ let val = arg.load_scalar(fx);
++ let res = fx.bcx.ins().bitrev(val);
++ let res = CValue::by_val(res, arg.layout());
+ ret.write_cvalue(fx, res);
+ };
- let res = CValue::by_val(swap(&mut fx.bcx, arg), fx.layout_of(T));
++ bswap, (c arg) {
+ // FIXME(CraneStation/cranelift#794) add bswap instruction to cranelift
+ fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value {
+ match bcx.func.dfg.value_type(v) {
+ types::I8 => v,
+
+ // https://code.woboq.org/gcc/include/bits/byteswap.h.html
+ types::I16 => {
+ let tmp1 = bcx.ins().ishl_imm(v, 8);
+ let n1 = bcx.ins().band_imm(tmp1, 0xFF00);
+
+ let tmp2 = bcx.ins().ushr_imm(v, 8);
+ let n2 = bcx.ins().band_imm(tmp2, 0x00FF);
+
+ bcx.ins().bor(n1, n2)
+ }
+ types::I32 => {
+ let tmp1 = bcx.ins().ishl_imm(v, 24);
+ let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000);
+
+ let tmp2 = bcx.ins().ishl_imm(v, 8);
+ let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000);
+
+ let tmp3 = bcx.ins().ushr_imm(v, 8);
+ let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00);
+
+ let tmp4 = bcx.ins().ushr_imm(v, 24);
+ let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF);
+
+ let or_tmp1 = bcx.ins().bor(n1, n2);
+ let or_tmp2 = bcx.ins().bor(n3, n4);
+ bcx.ins().bor(or_tmp1, or_tmp2)
+ }
+ types::I64 => {
+ let tmp1 = bcx.ins().ishl_imm(v, 56);
+ let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000_0000_0000u64 as i64);
+
+ let tmp2 = bcx.ins().ishl_imm(v, 40);
+ let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000_0000_0000u64 as i64);
+
+ let tmp3 = bcx.ins().ishl_imm(v, 24);
+ let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00_0000_0000u64 as i64);
+
+ let tmp4 = bcx.ins().ishl_imm(v, 8);
+ let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF_0000_0000u64 as i64);
+
+ let tmp5 = bcx.ins().ushr_imm(v, 8);
+ let n5 = bcx.ins().band_imm(tmp5, 0x0000_0000_FF00_0000u64 as i64);
+
+ let tmp6 = bcx.ins().ushr_imm(v, 24);
+ let n6 = bcx.ins().band_imm(tmp6, 0x0000_0000_00FF_0000u64 as i64);
+
+ let tmp7 = bcx.ins().ushr_imm(v, 40);
+ let n7 = bcx.ins().band_imm(tmp7, 0x0000_0000_0000_FF00u64 as i64);
+
+ let tmp8 = bcx.ins().ushr_imm(v, 56);
+ let n8 = bcx.ins().band_imm(tmp8, 0x0000_0000_0000_00FFu64 as i64);
+
+ let or_tmp1 = bcx.ins().bor(n1, n2);
+ let or_tmp2 = bcx.ins().bor(n3, n4);
+ let or_tmp3 = bcx.ins().bor(n5, n6);
+ let or_tmp4 = bcx.ins().bor(n7, n8);
+
+ let or_tmp5 = bcx.ins().bor(or_tmp1, or_tmp2);
+ let or_tmp6 = bcx.ins().bor(or_tmp3, or_tmp4);
+ bcx.ins().bor(or_tmp5, or_tmp6)
+ }
+ types::I128 => {
+ let (lo, hi) = bcx.ins().isplit(v);
+ let lo = swap(bcx, lo);
+ let hi = swap(bcx, hi);
+ bcx.ins().iconcat(hi, lo)
+ }
+ ty => unreachable!("bswap {}", ty),
+ }
+ }
- assert_inhabited | assert_zero_valid | assert_uninit_valid, <T> () {
- let layout = fx.layout_of(T);
++ let val = arg.load_scalar(fx);
++ let res = CValue::by_val(swap(&mut fx.bcx, val), arg.layout());
+ ret.write_cvalue(fx, res);
+ };
- &format!("attempted to instantiate uninhabited type `{}`", T),
++ assert_inhabited | assert_zero_valid | assert_uninit_valid, () {
++ let layout = fx.layout_of(substs.type_at(0));
+ if layout.abi.is_uninhabited() {
+ with_no_trimmed_paths!({
+ crate::base::codegen_panic(
+ fx,
- &format!("attempted to zero-initialize type `{}`, which is invalid", T),
++ &format!("attempted to instantiate uninhabited type `{}`", layout.ty),
+ span,
+ )
+ });
+ return;
+ }
+
+ if intrinsic == sym::assert_zero_valid && !layout.might_permit_raw_init(fx, /*zero:*/ true) {
+ with_no_trimmed_paths!({
+ crate::base::codegen_panic(
+ fx,
- &format!("attempted to leave type `{}` uninitialized, which is invalid", T),
++ &format!("attempted to zero-initialize type `{}`, which is invalid", layout.ty),
+ span,
+ );
+ });
+ return;
+ }
+
+ if intrinsic == sym::assert_uninit_valid && !layout.might_permit_raw_init(fx, /*zero:*/ false) {
+ with_no_trimmed_paths!({
+ crate::base::codegen_panic(
+ fx,
- ptr_offset_from, <T> (v ptr, v base) {
++ &format!("attempted to leave type `{}` uninitialized, which is invalid", layout.ty),
+ span,
+ )
+ });
+ return;
+ }
+ };
+
+ volatile_load | unaligned_volatile_load, (c ptr) {
+ // Cranelift treats loads as volatile by default
+ // FIXME correctly handle unaligned_volatile_load
+ let inner_layout =
+ fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
+ let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
+ ret.write_cvalue(fx, val);
+ };
+ volatile_store | unaligned_volatile_store, (v ptr, c val) {
+ // Cranelift treats stores as volatile by default
+ // FIXME correctly handle unaligned_volatile_store
+ let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
+ dest.write_cvalue(fx, val);
+ };
+
+ pref_align_of | needs_drop | type_id | type_name | variant_count, () {
+ let const_val =
+ fx.tcx.const_eval_instance(ParamEnv::reveal_all(), instance, None).unwrap();
+ let val = crate::constant::codegen_const_value(
+ fx,
+ const_val,
+ ret.layout().ty,
+ );
+ ret.write_cvalue(fx, val);
+ };
+
- let pointee_size: u64 = fx.layout_of(T).size.bytes();
++ ptr_offset_from, (v ptr, v base) {
++ let ty = substs.type_at(0);
+ let isize_layout = fx.layout_of(fx.tcx.types.isize);
+
- _ if intrinsic.as_str().starts_with("atomic_load"), <T> (v ptr) {
- validate_atomic_type!(fx, intrinsic, span, T);
- let ty = fx.clif_type(T).unwrap();
++ let pointee_size: u64 = fx.layout_of(ty).size.bytes();
+ let diff = fx.bcx.ins().isub(ptr, base);
+ // FIXME this can be an exact division.
+ let val = CValue::by_val(fx.bcx.ins().sdiv_imm(diff, pointee_size as i64), isize_layout);
+ ret.write_cvalue(fx, val);
+ };
+
+ ptr_guaranteed_eq, (c a, c b) {
+ let val = crate::num::codegen_ptr_binop(fx, BinOp::Eq, a, b);
+ ret.write_cvalue(fx, val);
+ };
+
+ ptr_guaranteed_ne, (c a, c b) {
+ let val = crate::num::codegen_ptr_binop(fx, BinOp::Ne, a, b);
+ ret.write_cvalue(fx, val);
+ };
+
+ caller_location, () {
+ let caller_location = fx.get_caller_location(span);
+ ret.write_cvalue(fx, caller_location);
+ };
+
+ _ if intrinsic.as_str().starts_with("atomic_fence"), () {
+ fx.bcx.ins().fence();
+ };
+ _ if intrinsic.as_str().starts_with("atomic_singlethreadfence"), () {
+ // FIXME use a compiler fence once Cranelift supports it
+ fx.bcx.ins().fence();
+ };
- let val = fx.bcx.ins().atomic_load(ty, MemFlags::trusted(), ptr);
++ _ if intrinsic.as_str().starts_with("atomic_load"), (v ptr) {
++ let ty = substs.type_at(0);
++ match ty.kind() {
++ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
++ _ => {
++ report_atomic_type_validation_error(fx, intrinsic, span, ty);
++ return;
++ }
++ }
++ let clif_ty = fx.clif_type(ty).unwrap();
+
- let val = CValue::by_val(val, fx.layout_of(T));
++ let val = fx.bcx.ins().atomic_load(clif_ty, MemFlags::trusted(), ptr);
+
- validate_atomic_type!(fx, intrinsic, span, val.layout().ty);
++ let val = CValue::by_val(val, fx.layout_of(ty));
+ ret.write_cvalue(fx, val);
+ };
+ _ if intrinsic.as_str().starts_with("atomic_store"), (v ptr, c val) {
- validate_atomic_type!(fx, intrinsic, span, layout.ty);
++ let ty = substs.type_at(0);
++ match ty.kind() {
++ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
++ _ => {
++ report_atomic_type_validation_error(fx, intrinsic, span, ty);
++ return;
++ }
++ }
+
+ let val = val.load_scalar(fx);
+
+ fx.bcx.ins().atomic_store(MemFlags::trusted(), val, ptr);
+ };
+ _ if intrinsic.as_str().starts_with("atomic_xchg"), (v ptr, c new) {
+ let layout = new.layout();
- validate_atomic_type!(fx, intrinsic, span, layout.ty);
++ match layout.ty.kind() {
++ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
++ _ => {
++ report_atomic_type_validation_error(fx, intrinsic, span, layout.ty);
++ return;
++ }
++ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let new = new.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xchg, ptr, new);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ };
+ _ if intrinsic.as_str().starts_with("atomic_cxchg"), (v ptr, c test_old, c new) { // both atomic_cxchg_* and atomic_cxchgweak_*
+ let layout = new.layout();
- validate_atomic_type!(fx, intrinsic, span, layout.ty);
++ match layout.ty.kind() {
++ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
++ _ => {
++ report_atomic_type_validation_error(fx, intrinsic, span, layout.ty);
++ return;
++ }
++ }
+
+ let test_old = test_old.load_scalar(fx);
+ let new = new.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_cas(MemFlags::trusted(), ptr, test_old, new);
+ let is_eq = fx.bcx.ins().icmp(IntCC::Equal, old, test_old);
+
+ let ret_val = CValue::by_val_pair(old, fx.bcx.ins().bint(types::I8, is_eq), ret.layout());
+ ret.write_cvalue(fx, ret_val)
+ };
+
+ _ if intrinsic.as_str().starts_with("atomic_xadd"), (v ptr, c amount) {
+ let layout = amount.layout();
- validate_atomic_type!(fx, intrinsic, span, layout.ty);
++ match layout.ty.kind() {
++ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
++ _ => {
++ report_atomic_type_validation_error(fx, intrinsic, span, layout.ty);
++ return;
++ }
++ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let amount = amount.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Add, ptr, amount);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ };
+ _ if intrinsic.as_str().starts_with("atomic_xsub"), (v ptr, c amount) {
+ let layout = amount.layout();
- validate_atomic_type!(fx, intrinsic, span, layout.ty);
++ match layout.ty.kind() {
++ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
++ _ => {
++ report_atomic_type_validation_error(fx, intrinsic, span, layout.ty);
++ return;
++ }
++ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let amount = amount.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Sub, ptr, amount);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ };
+ _ if intrinsic.as_str().starts_with("atomic_and"), (v ptr, c src) {
+ let layout = src.layout();
- validate_atomic_type!(fx, intrinsic, span, layout.ty);
++ match layout.ty.kind() {
++ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
++ _ => {
++ report_atomic_type_validation_error(fx, intrinsic, span, layout.ty);
++ return;
++ }
++ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::And, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ };
+ _ if intrinsic.as_str().starts_with("atomic_or"), (v ptr, c src) {
+ let layout = src.layout();
- validate_atomic_type!(fx, intrinsic, span, layout.ty);
++ match layout.ty.kind() {
++ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
++ _ => {
++ report_atomic_type_validation_error(fx, intrinsic, span, layout.ty);
++ return;
++ }
++ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Or, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ };
+ _ if intrinsic.as_str().starts_with("atomic_xor"), (v ptr, c src) {
+ let layout = src.layout();
- validate_atomic_type!(fx, intrinsic, span, layout.ty);
++ match layout.ty.kind() {
++ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
++ _ => {
++ report_atomic_type_validation_error(fx, intrinsic, span, layout.ty);
++ return;
++ }
++ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xor, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ };
+ _ if intrinsic.as_str().starts_with("atomic_nand"), (v ptr, c src) {
+ let layout = src.layout();
- validate_atomic_type!(fx, intrinsic, span, layout.ty);
++ match layout.ty.kind() {
++ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
++ _ => {
++ report_atomic_type_validation_error(fx, intrinsic, span, layout.ty);
++ return;
++ }
++ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Nand, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ };
+ _ if intrinsic.as_str().starts_with("atomic_max"), (v ptr, c src) {
+ let layout = src.layout();
- validate_atomic_type!(fx, intrinsic, span, layout.ty);
++ match layout.ty.kind() {
++ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
++ _ => {
++ report_atomic_type_validation_error(fx, intrinsic, span, layout.ty);
++ return;
++ }
++ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smax, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ };
+ _ if intrinsic.as_str().starts_with("atomic_umax"), (v ptr, c src) {
+ let layout = src.layout();
- validate_atomic_type!(fx, intrinsic, span, layout.ty);
++ match layout.ty.kind() {
++ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
++ _ => {
++ report_atomic_type_validation_error(fx, intrinsic, span, layout.ty);
++ return;
++ }
++ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umax, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ };
+ _ if intrinsic.as_str().starts_with("atomic_min"), (v ptr, c src) {
+ let layout = src.layout();
- validate_atomic_type!(fx, intrinsic, span, layout.ty);
++ match layout.ty.kind() {
++ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
++ _ => {
++ report_atomic_type_validation_error(fx, intrinsic, span, layout.ty);
++ return;
++ }
++ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smin, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ };
+ _ if intrinsic.as_str().starts_with("atomic_umin"), (v ptr, c src) {
+ let layout = src.layout();
- raw_eq, <T>(v lhs_ref, v rhs_ref) {
- fn type_by_size(size: Size) -> Option<Type> {
- Type::int(size.bits().try_into().ok()?)
- }
-
- let size = fx.layout_of(T).layout.size;
++ match layout.ty.kind() {
++ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
++ _ => {
++ report_atomic_type_validation_error(fx, intrinsic, span, layout.ty);
++ return;
++ }
++ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umin, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ };
+
+ // In Rust floating point min and max don't propagate NaN. In Cranelift they do however.
+ // For this reason it is necessary to use `a.is_nan() ? b : (a >= b ? b : a)` for `minnumf*`
+ // and `a.is_nan() ? b : (a <= b ? b : a)` for `maxnumf*`. NaN checks are done by comparing
+ // a float against itself. Only in case of NaN is it not equal to itself.
+ minnumf32, (v a, v b) {
+ let a_is_nan = fx.bcx.ins().fcmp(FloatCC::NotEqual, a, a);
+ let a_ge_b = fx.bcx.ins().fcmp(FloatCC::GreaterThanOrEqual, a, b);
+ let temp = fx.bcx.ins().select(a_ge_b, b, a);
+ let val = fx.bcx.ins().select(a_is_nan, b, temp);
+ let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
+ ret.write_cvalue(fx, val);
+ };
+ minnumf64, (v a, v b) {
+ let a_is_nan = fx.bcx.ins().fcmp(FloatCC::NotEqual, a, a);
+ let a_ge_b = fx.bcx.ins().fcmp(FloatCC::GreaterThanOrEqual, a, b);
+ let temp = fx.bcx.ins().select(a_ge_b, b, a);
+ let val = fx.bcx.ins().select(a_is_nan, b, temp);
+ let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
+ ret.write_cvalue(fx, val);
+ };
+ maxnumf32, (v a, v b) {
+ let a_is_nan = fx.bcx.ins().fcmp(FloatCC::NotEqual, a, a);
+ let a_le_b = fx.bcx.ins().fcmp(FloatCC::LessThanOrEqual, a, b);
+ let temp = fx.bcx.ins().select(a_le_b, b, a);
+ let val = fx.bcx.ins().select(a_is_nan, b, temp);
+ let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
+ ret.write_cvalue(fx, val);
+ };
+ maxnumf64, (v a, v b) {
+ let a_is_nan = fx.bcx.ins().fcmp(FloatCC::NotEqual, a, a);
+ let a_le_b = fx.bcx.ins().fcmp(FloatCC::LessThanOrEqual, a, b);
+ let temp = fx.bcx.ins().select(a_le_b, b, a);
+ let val = fx.bcx.ins().select(a_is_nan, b, temp);
+ let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
+ ret.write_cvalue(fx, val);
+ };
+
+ kw.Try, (v f, v data, v _catch_fn) {
+ // FIXME once unwinding is supported, change this to actually catch panics
+ let f_sig = fx.bcx.func.import_signature(Signature {
+ call_conv: fx.target_config.default_call_conv,
+ params: vec![AbiParam::new(fx.bcx.func.dfg.value_type(data))],
+ returns: vec![],
+ });
+
+ fx.bcx.ins().call_indirect(f_sig, f, &[data]);
+
+ let layout = ret.layout();
+ let ret_val = CValue::const_val(fx, layout, ty::ScalarInt::null(layout.size));
+ ret.write_cvalue(fx, ret_val);
+ };
+
+ fadd_fast | fsub_fast | fmul_fast | fdiv_fast | frem_fast, (c x, c y) {
+ let res = crate::num::codegen_float_binop(fx, match intrinsic {
+ sym::fadd_fast => BinOp::Add,
+ sym::fsub_fast => BinOp::Sub,
+ sym::fmul_fast => BinOp::Mul,
+ sym::fdiv_fast => BinOp::Div,
+ sym::frem_fast => BinOp::Rem,
+ _ => unreachable!(),
+ }, x, y);
+ ret.write_cvalue(fx, res);
+ };
+ float_to_int_unchecked, (v f) {
+ let res = crate::cast::clif_int_or_float_cast(
+ fx,
+ f,
+ false,
+ fx.clif_type(ret.layout().ty).unwrap(),
+ type_sign(ret.layout().ty),
+ );
+ ret.write_cvalue(fx, CValue::by_val(res, ret.layout()));
+ };
+
- } else if let Some(clty) = type_by_size(size) {
++ raw_eq, (v lhs_ref, v rhs_ref) {
++ let size = fx.layout_of(substs.type_at(0)).layout.size;
+ // FIXME add and use emit_small_memcmp
+ let is_eq_value =
+ if size == Size::ZERO {
+ // No bytes means they're trivially equal
+ fx.bcx.ins().iconst(types::I8, 1)
++ } else if let Some(clty) = size.bits().try_into().ok().and_then(Type::int) {
+ // Can't use `trusted` for these loads; they could be unaligned.
+ let mut flags = MemFlags::new();
+ flags.set_notrap();
+ let lhs_val = fx.bcx.ins().load(clty, flags, lhs_ref, 0);
+ let rhs_val = fx.bcx.ins().load(clty, flags, rhs_ref, 0);
+ let eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_val, rhs_val);
+ fx.bcx.ins().bint(types::I8, eq)
+ } else {
+ // Just call `memcmp` (like slices do in core) when the
+ // size is too large or it's not a power-of-two.
+ let signed_bytes = i64::try_from(size.bytes()).unwrap();
+ let bytes_val = fx.bcx.ins().iconst(fx.pointer_type, signed_bytes);
+ let params = vec![AbiParam::new(fx.pointer_type); 3];
+ let returns = vec![AbiParam::new(types::I32)];
+ let args = &[lhs_ref, rhs_ref, bytes_val];
+ let cmp = fx.lib_call("memcmp", params, returns, args)[0];
+ let eq = fx.bcx.ins().icmp_imm(IntCC::Equal, cmp, 0);
+ fx.bcx.ins().bint(types::I8, eq)
+ };
+ ret.write_cvalue(fx, CValue::by_val(is_eq_value, ret.layout()));
+ };
+
++ const_allocate, (c _size, c _align) {
++ // returns a null pointer at runtime.
++ let null = fx.bcx.ins().iconst(fx.pointer_type, 0);
++ ret.write_cvalue(fx, CValue::by_val(null, ret.layout()));
++ };
++
++ const_deallocate, (c _ptr, c _size, c _align) {
++ // nop at runtime.
++ };
++
+ black_box, (c a) {
+ // FIXME implement black_box semantics
+ ret.write_cvalue(fx, a);
+ };
+ }
+
+ if let Some((_, dest)) = destination {
+ let ret_block = fx.get_block(dest);
+ fx.bcx.ins().jump(ret_block, &[]);
+ } else {
+ trap_unreachable(fx, "[corruption] Diverging intrinsic returned.");
+ }
+}
--- /dev/null
- instance: Instance<'tcx>,
+//! Codegen `extern "platform-intrinsic"` intrinsics.
+
++use rustc_middle::ty::subst::SubstsRef;
++use rustc_span::Symbol;
++
+use super::*;
+use crate::prelude::*;
+
++fn report_simd_type_validation_error(
++ fx: &mut FunctionCx<'_, '_, '_>,
++ intrinsic: Symbol,
++ span: Span,
++ ty: Ty<'_>,
++) {
++ fx.tcx.sess.span_err(span, &format!("invalid monomorphization of `{}` intrinsic: expected SIMD input type, found non-SIMD `{}`", intrinsic, ty));
++ // Prevent verifier error
++ crate::trap::trap_unreachable(fx, "compilation should not have succeeded");
++}
++
+pub(super) fn codegen_simd_intrinsic_call<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
- let def_id = instance.def_id();
- let substs = instance.substs;
-
- let intrinsic = fx.tcx.item_name(def_id);
-
++ intrinsic: Symbol,
++ _substs: SubstsRef<'tcx>,
+ args: &[mir::Operand<'tcx>],
+ ret: CPlace<'tcx>,
+ span: Span,
+) {
- fx, intrinsic, substs, args,
+ intrinsic_match! {
- validate_simd_type!(fx, intrinsic, span, a.layout().ty);
- simd_for_each_lane(fx, a, ret, |fx, lane_layout, ret_lane_layout, lane| {
- let ret_lane_ty = fx.clif_type(ret_lane_layout.ty).unwrap();
++ fx, intrinsic, args,
+ _ => {
+ fx.tcx.sess.span_fatal(span, &format!("Unknown SIMD intrinsic {}", intrinsic));
+ };
+
+ simd_cast, (c a) {
- let from_signed = type_sign(lane_layout.ty);
- let to_signed = type_sign(ret_lane_layout.ty);
++ if !a.layout().ty.is_simd() {
++ report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty);
++ return;
++ }
++
++ simd_for_each_lane(fx, a, ret, &|fx, lane_ty, ret_lane_ty, lane| {
++ let ret_lane_clif_ty = fx.clif_type(ret_lane_ty).unwrap();
+
- let ret_lane = clif_int_or_float_cast(fx, lane, from_signed, ret_lane_ty, to_signed);
- CValue::by_val(ret_lane, ret_lane_layout)
++ let from_signed = type_sign(lane_ty);
++ let to_signed = type_sign(ret_lane_ty);
+
- simd_eq, (c x, c y) {
- validate_simd_type!(fx, intrinsic, span, x.layout().ty);
- simd_cmp!(fx, Equal|Equal(x, y) -> ret);
- };
- simd_ne, (c x, c y) {
- validate_simd_type!(fx, intrinsic, span, x.layout().ty);
- simd_cmp!(fx, NotEqual|NotEqual(x, y) -> ret);
- };
- simd_lt, (c x, c y) {
- validate_simd_type!(fx, intrinsic, span, x.layout().ty);
- simd_cmp!(fx, UnsignedLessThan|SignedLessThan|LessThan(x, y) -> ret);
- };
- simd_le, (c x, c y) {
- validate_simd_type!(fx, intrinsic, span, x.layout().ty);
- simd_cmp!(fx, UnsignedLessThanOrEqual|SignedLessThanOrEqual|LessThanOrEqual(x, y) -> ret);
- };
- simd_gt, (c x, c y) {
- validate_simd_type!(fx, intrinsic, span, x.layout().ty);
- simd_cmp!(fx, UnsignedGreaterThan|SignedGreaterThan|GreaterThan(x, y) -> ret);
- };
- simd_ge, (c x, c y) {
- validate_simd_type!(fx, intrinsic, span, x.layout().ty);
- simd_cmp!(
- fx,
- UnsignedGreaterThanOrEqual|SignedGreaterThanOrEqual|GreaterThanOrEqual
- (x, y) -> ret
- );
++ clif_int_or_float_cast(fx, lane, from_signed, ret_lane_clif_ty, to_signed)
+ });
+ };
+
- validate_simd_type!(fx, intrinsic, span, x.layout().ty);
++ simd_eq | simd_ne | simd_lt | simd_le | simd_gt | simd_ge, (c x, c y) {
++ if !x.layout().ty.is_simd() {
++ report_simd_type_validation_error(fx, intrinsic, span, x.layout().ty);
++ return;
++ }
++
++ // FIXME use vector instructions when possible
++ simd_pair_for_each_lane(fx, x, y, ret, &|fx, lane_ty, res_lane_ty, x_lane, y_lane| {
++ let res_lane = match (lane_ty.kind(), intrinsic) {
++ (ty::Uint(_), sym::simd_eq) => fx.bcx.ins().icmp(IntCC::Equal, x_lane, y_lane),
++ (ty::Uint(_), sym::simd_ne) => fx.bcx.ins().icmp(IntCC::NotEqual, x_lane, y_lane),
++ (ty::Uint(_), sym::simd_lt) => {
++ fx.bcx.ins().icmp(IntCC::UnsignedLessThan, x_lane, y_lane)
++ }
++ (ty::Uint(_), sym::simd_le) => {
++ fx.bcx.ins().icmp(IntCC::UnsignedLessThanOrEqual, x_lane, y_lane)
++ }
++ (ty::Uint(_), sym::simd_gt) => {
++ fx.bcx.ins().icmp(IntCC::UnsignedGreaterThan, x_lane, y_lane)
++ }
++ (ty::Uint(_), sym::simd_ge) => {
++ fx.bcx.ins().icmp(IntCC::UnsignedGreaterThanOrEqual, x_lane, y_lane)
++ }
++
++ (ty::Int(_), sym::simd_eq) => fx.bcx.ins().icmp(IntCC::Equal, x_lane, y_lane),
++ (ty::Int(_), sym::simd_ne) => fx.bcx.ins().icmp(IntCC::NotEqual, x_lane, y_lane),
++ (ty::Int(_), sym::simd_lt) => fx.bcx.ins().icmp(IntCC::SignedLessThan, x_lane, y_lane),
++ (ty::Int(_), sym::simd_le) => {
++ fx.bcx.ins().icmp(IntCC::SignedLessThanOrEqual, x_lane, y_lane)
++ }
++ (ty::Int(_), sym::simd_gt) => {
++ fx.bcx.ins().icmp(IntCC::SignedGreaterThan, x_lane, y_lane)
++ }
++ (ty::Int(_), sym::simd_ge) => {
++ fx.bcx.ins().icmp(IntCC::SignedGreaterThanOrEqual, x_lane, y_lane)
++ }
++
++ (ty::Float(_), sym::simd_eq) => fx.bcx.ins().fcmp(FloatCC::Equal, x_lane, y_lane),
++ (ty::Float(_), sym::simd_ne) => fx.bcx.ins().fcmp(FloatCC::NotEqual, x_lane, y_lane),
++ (ty::Float(_), sym::simd_lt) => fx.bcx.ins().fcmp(FloatCC::LessThan, x_lane, y_lane),
++ (ty::Float(_), sym::simd_le) => {
++ fx.bcx.ins().fcmp(FloatCC::LessThanOrEqual, x_lane, y_lane)
++ }
++ (ty::Float(_), sym::simd_gt) => fx.bcx.ins().fcmp(FloatCC::GreaterThan, x_lane, y_lane),
++ (ty::Float(_), sym::simd_ge) => {
++ fx.bcx.ins().fcmp(FloatCC::GreaterThanOrEqual, x_lane, y_lane)
++ }
++
++ _ => unreachable!(),
++ };
++
++ let ty = fx.clif_type(res_lane_ty).unwrap();
++
++ let res_lane = fx.bcx.ins().bint(ty, res_lane);
++ fx.bcx.ins().ineg(res_lane)
++ });
+ };
+
+ // simd_shuffle32<T, U>(x: T, y: T, idx: [u32; 32]) -> U
+ _ if intrinsic.as_str().starts_with("simd_shuffle"), (c x, c y, o idx) {
- validate_simd_type!(fx, intrinsic, span, v.layout().ty);
++ if !x.layout().ty.is_simd() {
++ report_simd_type_validation_error(fx, intrinsic, span, x.layout().ty);
++ return;
++ }
+
+ // If this intrinsic is the older "simd_shuffleN" form, simply parse the integer.
+ // If there is no suffix, use the index array length.
+ let n: u16 = if intrinsic == sym::simd_shuffle {
+ // Make sure this is actually an array, since typeck only checks the length-suffixed
+ // version of this intrinsic.
+ let idx_ty = fx.monomorphize(idx.ty(fx.mir, fx.tcx));
+ match idx_ty.kind() {
+ ty::Array(ty, len) if matches!(ty.kind(), ty::Uint(ty::UintTy::U32)) => {
+ len.try_eval_usize(fx.tcx, ty::ParamEnv::reveal_all()).unwrap_or_else(|| {
+ span_bug!(span, "could not evaluate shuffle index array length")
+ }).try_into().unwrap()
+ }
+ _ => {
+ fx.tcx.sess.span_err(
+ span,
+ &format!(
+ "simd_shuffle index must be an array of `u32`, got `{}`",
+ idx_ty,
+ ),
+ );
+ // Prevent verifier error
+ crate::trap::trap_unreachable(fx, "compilation should not have succeeded");
+ return;
+ }
+ }
+ } else {
+ intrinsic.as_str()["simd_shuffle".len()..].parse().unwrap()
+ };
+
+ assert_eq!(x.layout(), y.layout());
+ let layout = x.layout();
+
+ let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
+ let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
+
+ assert_eq!(lane_ty, ret_lane_ty);
+ assert_eq!(u64::from(n), ret_lane_count);
+
+ let total_len = lane_count * 2;
+
+ let indexes = {
+ use rustc_middle::mir::interpret::*;
+ let idx_const = crate::constant::mir_operand_get_const_val(fx, idx).expect("simd_shuffle* idx not const");
+
+ let idx_bytes = match idx_const {
+ ConstValue::ByRef { alloc, offset } => {
+ let size = Size::from_bytes(4 * ret_lane_count /* size_of([u32; ret_lane_count]) */);
+ alloc.get_bytes(fx, alloc_range(offset, size)).unwrap()
+ }
+ _ => unreachable!("{:?}", idx_const),
+ };
+
+ (0..ret_lane_count).map(|i| {
+ let i = usize::try_from(i).unwrap();
+ let idx = rustc_middle::mir::interpret::read_target_uint(
+ fx.tcx.data_layout.endian,
+ &idx_bytes[4*i.. 4*i + 4],
+ ).expect("read_target_uint");
+ u16::try_from(idx).expect("try_from u32")
+ }).collect::<Vec<u16>>()
+ };
+
+ for &idx in &indexes {
+ assert!(u64::from(idx) < total_len, "idx {} out of range 0..{}", idx, total_len);
+ }
+
+ for (out_idx, in_idx) in indexes.into_iter().enumerate() {
+ let in_lane = if u64::from(in_idx) < lane_count {
+ x.value_lane(fx, in_idx.into())
+ } else {
+ y.value_lane(fx, u64::from(in_idx) - lane_count)
+ };
+ let out_lane = ret.place_lane(fx, u64::try_from(out_idx).unwrap());
+ out_lane.write_cvalue(fx, in_lane);
+ }
+ };
+
+ simd_insert, (c base, o idx, c val) {
+ // FIXME validate
+ let idx_const = if let Some(idx_const) = crate::constant::mir_operand_get_const_val(fx, idx) {
+ idx_const
+ } else {
+ fx.tcx.sess.span_fatal(
+ span,
+ "Index argument for `simd_insert` is not a constant",
+ );
+ };
+
+ let idx = idx_const.try_to_bits(Size::from_bytes(4 /* u32*/)).unwrap_or_else(|| panic!("kind not scalar: {:?}", idx_const));
+ let (lane_count, _lane_ty) = base.layout().ty.simd_size_and_type(fx.tcx);
+ if idx >= lane_count.into() {
+ fx.tcx.sess.span_fatal(fx.mir.span, &format!("[simd_insert] idx {} >= lane_count {}", idx, lane_count));
+ }
+
+ ret.write_cvalue(fx, base);
+ let ret_lane = ret.place_field(fx, mir::Field::new(idx.try_into().unwrap()));
+ ret_lane.write_cvalue(fx, val);
+ };
+
+ simd_extract, (c v, o idx) {
- validate_simd_type!(fx, intrinsic, span, a.layout().ty);
- simd_for_each_lane(fx, a, ret, |fx, lane_layout, ret_lane_layout, lane| {
- let ret_lane = match lane_layout.ty.kind() {
++ if !v.layout().ty.is_simd() {
++ report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
++ return;
++ }
++
+ let idx_const = if let Some(idx_const) = crate::constant::mir_operand_get_const_val(fx, idx) {
+ idx_const
+ } else {
+ fx.tcx.sess.span_warn(
+ span,
+ "Index argument for `simd_extract` is not a constant",
+ );
+ let res = crate::trap::trap_unimplemented_ret_value(
+ fx,
+ ret.layout(),
+ "Index argument for `simd_extract` is not a constant",
+ );
+ ret.write_cvalue(fx, res);
+ return;
+ };
+
+ let idx = idx_const.try_to_bits(Size::from_bytes(4 /* u32*/)).unwrap_or_else(|| panic!("kind not scalar: {:?}", idx_const));
+ let (lane_count, _lane_ty) = v.layout().ty.simd_size_and_type(fx.tcx);
+ if idx >= lane_count.into() {
+ fx.tcx.sess.span_fatal(fx.mir.span, &format!("[simd_extract] idx {} >= lane_count {}", idx, lane_count));
+ }
+
+ let ret_lane = v.value_lane(fx, idx.try_into().unwrap());
+ ret.write_cvalue(fx, ret_lane);
+ };
+
+ simd_neg, (c a) {
- };
- CValue::by_val(ret_lane, ret_lane_layout)
++ if !a.layout().ty.is_simd() {
++ report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty);
++ return;
++ }
++
++ simd_for_each_lane(fx, a, ret, &|fx, lane_ty, _ret_lane_ty, lane| {
++ match lane_ty.kind() {
+ ty::Int(_) => fx.bcx.ins().ineg(lane),
+ ty::Float(_) => fx.bcx.ins().fneg(lane),
+ _ => unreachable!(),
- simd_fabs, (c a) {
- validate_simd_type!(fx, intrinsic, span, a.layout().ty);
- simd_for_each_lane(fx, a, ret, |fx, _lane_layout, ret_lane_layout, lane| {
- let ret_lane = fx.bcx.ins().fabs(lane);
- CValue::by_val(ret_lane, ret_lane_layout)
- });
- };
++ }
+ });
+ };
+
- simd_fsqrt, (c a) {
- validate_simd_type!(fx, intrinsic, span, a.layout().ty);
- simd_for_each_lane(fx, a, ret, |fx, _lane_layout, ret_lane_layout, lane| {
- let ret_lane = fx.bcx.ins().sqrt(lane);
- CValue::by_val(ret_lane, ret_lane_layout)
++ simd_add | simd_sub | simd_mul | simd_div | simd_rem
++ | simd_shl | simd_shr | simd_and | simd_or | simd_xor, (c x, c y) {
++ if !x.layout().ty.is_simd() {
++ report_simd_type_validation_error(fx, intrinsic, span, x.layout().ty);
++ return;
++ }
+
- simd_add, (c x, c y) {
- validate_simd_type!(fx, intrinsic, span, x.layout().ty);
- simd_int_flt_binop!(fx, iadd|fadd(x, y) -> ret);
- };
- simd_sub, (c x, c y) {
- validate_simd_type!(fx, intrinsic, span, x.layout().ty);
- simd_int_flt_binop!(fx, isub|fsub(x, y) -> ret);
- };
- simd_mul, (c x, c y) {
- validate_simd_type!(fx, intrinsic, span, x.layout().ty);
- simd_int_flt_binop!(fx, imul|fmul(x, y) -> ret);
- };
- simd_div, (c x, c y) {
- validate_simd_type!(fx, intrinsic, span, x.layout().ty);
- simd_int_flt_binop!(fx, udiv|sdiv|fdiv(x, y) -> ret);
- };
- simd_rem, (c x, c y) {
- validate_simd_type!(fx, intrinsic, span, x.layout().ty);
- simd_pair_for_each_lane(fx, x, y, ret, |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
- let res_lane = match lane_layout.ty.kind() {
- ty::Uint(_) => fx.bcx.ins().urem(x_lane, y_lane),
- ty::Int(_) => fx.bcx.ins().srem(x_lane, y_lane),
- ty::Float(FloatTy::F32) => fx.lib_call(
- "fmodf",
- vec![AbiParam::new(types::F32), AbiParam::new(types::F32)],
- vec![AbiParam::new(types::F32)],
- &[x_lane, y_lane],
- )[0],
- ty::Float(FloatTy::F64) => fx.lib_call(
- "fmod",
- vec![AbiParam::new(types::F64), AbiParam::new(types::F64)],
- vec![AbiParam::new(types::F64)],
- &[x_lane, y_lane],
- )[0],
- _ => unreachable!("{:?}", lane_layout.ty),
- };
- CValue::by_val(res_lane, ret_lane_layout)
- });
- };
- simd_shl, (c x, c y) {
- validate_simd_type!(fx, intrinsic, span, x.layout().ty);
- simd_int_binop!(fx, ishl(x, y) -> ret);
- };
- simd_shr, (c x, c y) {
- validate_simd_type!(fx, intrinsic, span, x.layout().ty);
- simd_int_binop!(fx, ushr|sshr(x, y) -> ret);
- };
- simd_and, (c x, c y) {
- validate_simd_type!(fx, intrinsic, span, x.layout().ty);
- simd_int_binop!(fx, band(x, y) -> ret);
- };
- simd_or, (c x, c y) {
- validate_simd_type!(fx, intrinsic, span, x.layout().ty);
- simd_int_binop!(fx, bor(x, y) -> ret);
- };
- simd_xor, (c x, c y) {
- validate_simd_type!(fx, intrinsic, span, x.layout().ty);
- simd_int_binop!(fx, bxor(x, y) -> ret);
- };
-
++ // FIXME use vector instructions when possible
++ simd_pair_for_each_lane(fx, x, y, ret, &|fx, lane_ty, _ret_lane_ty, x_lane, y_lane| match (
++ lane_ty.kind(),
++ intrinsic,
++ ) {
++ (ty::Uint(_), sym::simd_add) => fx.bcx.ins().iadd(x_lane, y_lane),
++ (ty::Uint(_), sym::simd_sub) => fx.bcx.ins().isub(x_lane, y_lane),
++ (ty::Uint(_), sym::simd_mul) => fx.bcx.ins().imul(x_lane, y_lane),
++ (ty::Uint(_), sym::simd_div) => fx.bcx.ins().udiv(x_lane, y_lane),
++ (ty::Uint(_), sym::simd_rem) => fx.bcx.ins().urem(x_lane, y_lane),
++
++ (ty::Int(_), sym::simd_add) => fx.bcx.ins().iadd(x_lane, y_lane),
++ (ty::Int(_), sym::simd_sub) => fx.bcx.ins().isub(x_lane, y_lane),
++ (ty::Int(_), sym::simd_mul) => fx.bcx.ins().imul(x_lane, y_lane),
++ (ty::Int(_), sym::simd_div) => fx.bcx.ins().sdiv(x_lane, y_lane),
++ (ty::Int(_), sym::simd_rem) => fx.bcx.ins().srem(x_lane, y_lane),
++
++ (ty::Float(_), sym::simd_add) => fx.bcx.ins().fadd(x_lane, y_lane),
++ (ty::Float(_), sym::simd_sub) => fx.bcx.ins().fsub(x_lane, y_lane),
++ (ty::Float(_), sym::simd_mul) => fx.bcx.ins().fmul(x_lane, y_lane),
++ (ty::Float(_), sym::simd_div) => fx.bcx.ins().fdiv(x_lane, y_lane),
++ (ty::Float(FloatTy::F32), sym::simd_rem) => fx.lib_call(
++ "fmodf",
++ vec![AbiParam::new(types::F32), AbiParam::new(types::F32)],
++ vec![AbiParam::new(types::F32)],
++ &[x_lane, y_lane],
++ )[0],
++ (ty::Float(FloatTy::F64), sym::simd_rem) => fx.lib_call(
++ "fmod",
++ vec![AbiParam::new(types::F64), AbiParam::new(types::F64)],
++ vec![AbiParam::new(types::F64)],
++ &[x_lane, y_lane],
++ )[0],
++
++ (ty::Uint(_), sym::simd_shl) => fx.bcx.ins().ishl(x_lane, y_lane),
++ (ty::Uint(_), sym::simd_shr) => fx.bcx.ins().ushr(x_lane, y_lane),
++ (ty::Uint(_), sym::simd_and) => fx.bcx.ins().band(x_lane, y_lane),
++ (ty::Uint(_), sym::simd_or) => fx.bcx.ins().bor(x_lane, y_lane),
++ (ty::Uint(_), sym::simd_xor) => fx.bcx.ins().bxor(x_lane, y_lane),
++
++ (ty::Int(_), sym::simd_shl) => fx.bcx.ins().ishl(x_lane, y_lane),
++ (ty::Int(_), sym::simd_shr) => fx.bcx.ins().sshr(x_lane, y_lane),
++ (ty::Int(_), sym::simd_and) => fx.bcx.ins().band(x_lane, y_lane),
++ (ty::Int(_), sym::simd_or) => fx.bcx.ins().bor(x_lane, y_lane),
++ (ty::Int(_), sym::simd_xor) => fx.bcx.ins().bxor(x_lane, y_lane),
++
++ _ => unreachable!(),
+ });
+ };
+
- validate_simd_type!(fx, intrinsic, span, a.layout().ty);
+ simd_fma, (c a, c b, c c) {
- simd_fmin, (c x, c y) {
- validate_simd_type!(fx, intrinsic, span, x.layout().ty);
- simd_flt_binop!(fx, fmin(x, y) -> ret);
- };
- simd_fmax, (c x, c y) {
- validate_simd_type!(fx, intrinsic, span, x.layout().ty);
- simd_flt_binop!(fx, fmax(x, y) -> ret);
++ if !a.layout().ty.is_simd() {
++ report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty);
++ return;
++ }
+ assert_eq!(a.layout(), b.layout());
+ assert_eq!(a.layout(), c.layout());
+ let layout = a.layout();
+
+ let (lane_count, _lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
+ let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
+ assert_eq!(lane_count, ret_lane_count);
+ let ret_lane_layout = fx.layout_of(ret_lane_ty);
+
+ for lane in 0..lane_count {
+ let a_lane = a.value_lane(fx, lane).load_scalar(fx);
+ let b_lane = b.value_lane(fx, lane).load_scalar(fx);
+ let c_lane = c.value_lane(fx, lane).load_scalar(fx);
+
+ let mul_lane = fx.bcx.ins().fmul(a_lane, b_lane);
+ let res_lane = CValue::by_val(fx.bcx.ins().fadd(mul_lane, c_lane), ret_lane_layout);
+
+ ret.place_lane(fx, lane).write_cvalue(fx, res_lane);
+ }
+ };
+
- validate_simd_type!(fx, intrinsic, span, a.layout().ty);
- simd_for_each_lane(fx, a, ret, |fx, lane_layout, ret_lane_layout, lane| {
- let res_lane = match lane_layout.ty.kind() {
++ simd_fmin | simd_fmax, (c x, c y) {
++ if !x.layout().ty.is_simd() {
++ report_simd_type_validation_error(fx, intrinsic, span, x.layout().ty);
++ return;
++ }
++
++ // FIXME use vector instructions when possible
++ simd_pair_for_each_lane(fx, x, y, ret, &|fx, lane_ty, _ret_lane_ty, x_lane, y_lane| {
++ match lane_ty.kind() {
++ ty::Float(_) => {},
++ _ => unreachable!("{:?}", lane_ty),
++ }
++ match intrinsic {
++ sym::simd_fmin => fx.bcx.ins().fmin(x_lane, y_lane),
++ sym::simd_fmax => fx.bcx.ins().fmax(x_lane, y_lane),
++ _ => unreachable!(),
++ }
++ });
+ };
+
+ simd_round, (c a) {
- _ => unreachable!("{:?}", lane_layout.ty),
- };
- CValue::by_val(res_lane, ret_lane_layout)
- });
- };
- simd_ceil, (c a) {
- validate_simd_type!(fx, intrinsic, span, a.layout().ty);
- simd_for_each_lane(fx, a, ret, |fx, _lane_layout, ret_lane_layout, lane| {
- let ret_lane = fx.bcx.ins().ceil(lane);
- CValue::by_val(ret_lane, ret_lane_layout)
- });
- };
- simd_floor, (c a) {
- validate_simd_type!(fx, intrinsic, span, a.layout().ty);
- simd_for_each_lane(fx, a, ret, |fx, _lane_layout, ret_lane_layout, lane| {
- let ret_lane = fx.bcx.ins().floor(lane);
- CValue::by_val(ret_lane, ret_lane_layout)
++ if !a.layout().ty.is_simd() {
++ report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty);
++ return;
++ }
++
++ simd_for_each_lane(fx, a, ret, &|fx, lane_ty, _ret_lane_ty, lane| {
++ match lane_ty.kind() {
+ ty::Float(FloatTy::F32) => fx.lib_call(
+ "roundf",
+ vec![AbiParam::new(types::F32)],
+ vec![AbiParam::new(types::F32)],
+ &[lane],
+ )[0],
+ ty::Float(FloatTy::F64) => fx.lib_call(
+ "round",
+ vec![AbiParam::new(types::F64)],
+ vec![AbiParam::new(types::F64)],
+ &[lane],
+ )[0],
- simd_trunc, (c a) {
- validate_simd_type!(fx, intrinsic, span, a.layout().ty);
- simd_for_each_lane(fx, a, ret, |fx, _lane_layout, ret_lane_layout, lane| {
- let ret_lane = fx.bcx.ins().trunc(lane);
- CValue::by_val(ret_lane, ret_lane_layout)
++ _ => unreachable!("{:?}", lane_ty),
++ }
+ });
+ };
- validate_simd_type!(fx, intrinsic, span, v.layout().ty);
- simd_reduce(fx, v, Some(acc), ret, |fx, lane_layout, a, b| {
- if lane_layout.ty.is_floating_point() {
++
++ simd_fabs | simd_fsqrt | simd_ceil | simd_floor | simd_trunc, (c a) {
++ if !a.layout().ty.is_simd() {
++ report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty);
++ return;
++ }
++
++ simd_for_each_lane(fx, a, ret, &|fx, lane_ty, _ret_lane_ty, lane| {
++ match lane_ty.kind() {
++ ty::Float(_) => {},
++ _ => unreachable!("{:?}", lane_ty),
++ }
++ match intrinsic {
++ sym::simd_fabs => fx.bcx.ins().fabs(lane),
++ sym::simd_fsqrt => fx.bcx.ins().sqrt(lane),
++ sym::simd_ceil => fx.bcx.ins().ceil(lane),
++ sym::simd_floor => fx.bcx.ins().floor(lane),
++ sym::simd_trunc => fx.bcx.ins().trunc(lane),
++ _ => unreachable!(),
++ }
+ });
+ };
+
+ simd_reduce_add_ordered | simd_reduce_add_unordered, (c v, v acc) {
- validate_simd_type!(fx, intrinsic, span, v.layout().ty);
- simd_reduce(fx, v, Some(acc), ret, |fx, lane_layout, a, b| {
- if lane_layout.ty.is_floating_point() {
++ if !v.layout().ty.is_simd() {
++ report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
++ return;
++ }
++
++ simd_reduce(fx, v, Some(acc), ret, &|fx, lane_ty, a, b| {
++ if lane_ty.is_floating_point() {
+ fx.bcx.ins().fadd(a, b)
+ } else {
+ fx.bcx.ins().iadd(a, b)
+ }
+ });
+ };
+
+ simd_reduce_mul_ordered | simd_reduce_mul_unordered, (c v, v acc) {
- validate_simd_type!(fx, intrinsic, span, v.layout().ty);
- simd_reduce_bool(fx, v, ret, |fx, a, b| fx.bcx.ins().band(a, b));
++ if !v.layout().ty.is_simd() {
++ report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
++ return;
++ }
++
++ simd_reduce(fx, v, Some(acc), ret, &|fx, lane_ty, a, b| {
++ if lane_ty.is_floating_point() {
+ fx.bcx.ins().fmul(a, b)
+ } else {
+ fx.bcx.ins().imul(a, b)
+ }
+ });
+ };
+
+ simd_reduce_all, (c v) {
- validate_simd_type!(fx, intrinsic, span, v.layout().ty);
- simd_reduce_bool(fx, v, ret, |fx, a, b| fx.bcx.ins().bor(a, b));
++ if !v.layout().ty.is_simd() {
++ report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
++ return;
++ }
++
++ simd_reduce_bool(fx, v, ret, &|fx, a, b| fx.bcx.ins().band(a, b));
+ };
+
+ simd_reduce_any, (c v) {
- validate_simd_type!(fx, intrinsic, span, v.layout().ty);
- simd_reduce(fx, v, None, ret, |fx, _layout, a, b| fx.bcx.ins().band(a, b));
++ if !v.layout().ty.is_simd() {
++ report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
++ return;
++ }
++
++ simd_reduce_bool(fx, v, ret, &|fx, a, b| fx.bcx.ins().bor(a, b));
+ };
+
+ simd_reduce_and, (c v) {
- validate_simd_type!(fx, intrinsic, span, v.layout().ty);
- simd_reduce(fx, v, None, ret, |fx, _layout, a, b| fx.bcx.ins().bor(a, b));
++ if !v.layout().ty.is_simd() {
++ report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
++ return;
++ }
++
++ simd_reduce(fx, v, None, ret, &|fx, _ty, a, b| fx.bcx.ins().band(a, b));
+ };
+
+ simd_reduce_or, (c v) {
- validate_simd_type!(fx, intrinsic, span, v.layout().ty);
- simd_reduce(fx, v, None, ret, |fx, _layout, a, b| fx.bcx.ins().bxor(a, b));
++ if !v.layout().ty.is_simd() {
++ report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
++ return;
++ }
++
++ simd_reduce(fx, v, None, ret, &|fx, _ty, a, b| fx.bcx.ins().bor(a, b));
+ };
+
+ simd_reduce_xor, (c v) {
- validate_simd_type!(fx, intrinsic, span, v.layout().ty);
- simd_reduce(fx, v, None, ret, |fx, layout, a, b| {
- let lt = match layout.ty.kind() {
++ if !v.layout().ty.is_simd() {
++ report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
++ return;
++ }
++
++ simd_reduce(fx, v, None, ret, &|fx, _ty, a, b| fx.bcx.ins().bxor(a, b));
+ };
+
+ simd_reduce_min, (c v) {
- validate_simd_type!(fx, intrinsic, span, v.layout().ty);
- simd_reduce(fx, v, None, ret, |fx, layout, a, b| {
- let gt = match layout.ty.kind() {
++ if !v.layout().ty.is_simd() {
++ report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
++ return;
++ }
++
++ simd_reduce(fx, v, None, ret, &|fx, ty, a, b| {
++ let lt = match ty.kind() {
+ ty::Int(_) => fx.bcx.ins().icmp(IntCC::SignedLessThan, a, b),
+ ty::Uint(_) => fx.bcx.ins().icmp(IntCC::UnsignedLessThan, a, b),
+ ty::Float(_) => fx.bcx.ins().fcmp(FloatCC::LessThan, a, b),
+ _ => unreachable!(),
+ };
+ fx.bcx.ins().select(lt, a, b)
+ });
+ };
+
+ simd_reduce_max, (c v) {
- validate_simd_type!(fx, intrinsic, span, m.layout().ty);
- validate_simd_type!(fx, intrinsic, span, a.layout().ty);
++ if !v.layout().ty.is_simd() {
++ report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
++ return;
++ }
++
++ simd_reduce(fx, v, None, ret, &|fx, ty, a, b| {
++ let gt = match ty.kind() {
+ ty::Int(_) => fx.bcx.ins().icmp(IntCC::SignedGreaterThan, a, b),
+ ty::Uint(_) => fx.bcx.ins().icmp(IntCC::UnsignedGreaterThan, a, b),
+ ty::Float(_) => fx.bcx.ins().fcmp(FloatCC::GreaterThan, a, b),
+ _ => unreachable!(),
+ };
+ fx.bcx.ins().select(gt, a, b)
+ });
+ };
+
+ simd_select, (c m, c a, c b) {
++ if !m.layout().ty.is_simd() {
++ report_simd_type_validation_error(fx, intrinsic, span, m.layout().ty);
++ return;
++ }
++ if !a.layout().ty.is_simd() {
++ report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty);
++ return;
++ }
+ assert_eq!(a.layout(), b.layout());
+
+ let (lane_count, lane_ty) = a.layout().ty.simd_size_and_type(fx.tcx);
+ let lane_layout = fx.layout_of(lane_ty);
+
+ for lane in 0..lane_count {
+ let m_lane = m.value_lane(fx, lane).load_scalar(fx);
+ let a_lane = a.value_lane(fx, lane).load_scalar(fx);
+ let b_lane = b.value_lane(fx, lane).load_scalar(fx);
+
+ let m_lane = fx.bcx.ins().icmp_imm(IntCC::Equal, m_lane, 0);
+ let res_lane = CValue::by_val(fx.bcx.ins().select(m_lane, b_lane, a_lane), lane_layout);
+
+ ret.place_lane(fx, lane).write_cvalue(fx, res_lane);
+ }
+ };
+
+ // simd_saturating_*
+ // simd_bitmask
+ // simd_scatter
+ // simd_gather
+ }
+}
--- /dev/null
- use cranelift_codegen::binemit::{NullStackMapSink, NullTrapSink};
+use rustc_hir::LangItem;
+use rustc_middle::ty::subst::GenericArg;
+use rustc_middle::ty::AssocKind;
+use rustc_session::config::EntryFnType;
+use rustc_span::symbol::Ident;
+
+use crate::prelude::*;
+
+/// Create the `main` function which will initialize the rust runtime and call
+/// users main function.
+pub(crate) fn maybe_create_entry_wrapper(
+ tcx: TyCtxt<'_>,
+ module: &mut impl Module,
+ unwind_context: &mut UnwindContext,
+ is_jit: bool,
+ is_primary_cgu: bool,
+) {
+ let (main_def_id, is_main_fn) = match tcx.entry_fn(()) {
+ Some((def_id, entry_ty)) => (
+ def_id,
+ match entry_ty {
+ EntryFnType::Main => true,
+ EntryFnType::Start => false,
+ },
+ ),
+ None => return,
+ };
+
+ if main_def_id.is_local() {
+ let instance = Instance::mono(tcx, main_def_id).polymorphize(tcx);
+ if !is_jit && module.get_name(&*tcx.symbol_name(instance).name).is_none() {
+ return;
+ }
+ } else if !is_primary_cgu {
+ return;
+ }
+
+ create_entry_fn(tcx, module, unwind_context, main_def_id, is_jit, is_main_fn);
+
+ fn create_entry_fn(
+ tcx: TyCtxt<'_>,
+ m: &mut impl Module,
+ unwind_context: &mut UnwindContext,
+ rust_main_def_id: DefId,
+ ignore_lang_start_wrapper: bool,
+ is_main_fn: bool,
+ ) {
+ let main_ret_ty = tcx.fn_sig(rust_main_def_id).output();
+ // Given that `main()` has no arguments,
+ // then its return type cannot have
+ // late-bound regions, since late-bound
+ // regions must appear in the argument
+ // listing.
+ let main_ret_ty = tcx.erase_regions(main_ret_ty.no_bound_vars().unwrap());
+
+ let cmain_sig = Signature {
+ params: vec![
+ AbiParam::new(m.target_config().pointer_type()),
+ AbiParam::new(m.target_config().pointer_type()),
+ ],
+ returns: vec![AbiParam::new(m.target_config().pointer_type() /*isize*/)],
+ call_conv: CallConv::triple_default(m.isa().triple()),
+ };
+
+ let cmain_func_id = m.declare_function("main", Linkage::Export, &cmain_sig).unwrap();
+
+ let instance = Instance::mono(tcx, rust_main_def_id).polymorphize(tcx);
+
+ let main_name = tcx.symbol_name(instance).name;
+ let main_sig = get_function_sig(tcx, m.isa().triple(), instance);
+ let main_func_id = m.declare_function(main_name, Linkage::Import, &main_sig).unwrap();
+
+ let mut ctx = Context::new();
+ ctx.func = Function::with_name_signature(ExternalName::user(0, 0), cmain_sig);
+ {
+ let mut func_ctx = FunctionBuilderContext::new();
+ let mut bcx = FunctionBuilder::new(&mut ctx.func, &mut func_ctx);
+
+ let block = bcx.create_block();
+ bcx.switch_to_block(block);
+ let arg_argc = bcx.append_block_param(block, m.target_config().pointer_type());
+ let arg_argv = bcx.append_block_param(block, m.target_config().pointer_type());
+
+ let main_func_ref = m.declare_func_in_func(main_func_id, &mut bcx.func);
+
+ let result = if is_main_fn && ignore_lang_start_wrapper {
+ // regular main fn, but ignoring #[lang = "start"] as we are running in the jit
+ // FIXME set program arguments somehow
+ let call_inst = bcx.ins().call(main_func_ref, &[]);
+ let call_results = bcx.func.dfg.inst_results(call_inst).to_owned();
+
+ let termination_trait = tcx.require_lang_item(LangItem::Termination, None);
+ let report = tcx
+ .associated_items(termination_trait)
+ .find_by_name_and_kind(
+ tcx,
+ Ident::from_str("report"),
+ AssocKind::Fn,
+ termination_trait,
+ )
+ .unwrap();
+ let report = Instance::resolve(
+ tcx,
+ ParamEnv::reveal_all(),
+ report.def_id,
+ tcx.mk_substs([GenericArg::from(main_ret_ty)].iter()),
+ )
+ .unwrap()
+ .unwrap();
+
+ let report_name = tcx.symbol_name(report).name;
+ let report_sig = get_function_sig(tcx, m.isa().triple(), report);
+ let report_func_id =
+ m.declare_function(report_name, Linkage::Import, &report_sig).unwrap();
+ let report_func_ref = m.declare_func_in_func(report_func_id, &mut bcx.func);
+
+ // FIXME do proper abi handling instead of expecting the pass mode to be identical
+ // for returns and arguments.
+ let report_call_inst = bcx.ins().call(report_func_ref, &call_results);
+ let res = bcx.func.dfg.inst_results(report_call_inst)[0];
+ match m.target_config().pointer_type() {
+ types::I32 => res,
+ types::I64 => bcx.ins().sextend(types::I64, res),
+ _ => unimplemented!("16bit systems are not yet supported"),
+ }
+ } else if is_main_fn {
+ let start_def_id = tcx.require_lang_item(LangItem::Start, None);
+ let start_instance = Instance::resolve(
+ tcx,
+ ParamEnv::reveal_all(),
+ start_def_id,
+ tcx.intern_substs(&[main_ret_ty.into()]),
+ )
+ .unwrap()
+ .unwrap()
+ .polymorphize(tcx);
+ let start_func_id = import_function(tcx, m, start_instance);
+
+ let main_val = bcx.ins().func_addr(m.target_config().pointer_type(), main_func_ref);
+
+ let func_ref = m.declare_func_in_func(start_func_id, &mut bcx.func);
+ let call_inst = bcx.ins().call(func_ref, &[main_val, arg_argc, arg_argv]);
+ bcx.inst_results(call_inst)[0]
+ } else {
+ // using user-defined start fn
+ let call_inst = bcx.ins().call(main_func_ref, &[arg_argc, arg_argv]);
+ bcx.inst_results(call_inst)[0]
+ };
+
+ bcx.ins().return_(&[result]);
+ bcx.seal_all_blocks();
+ bcx.finalize();
+ }
- m.define_function(cmain_func_id, &mut ctx, &mut NullTrapSink {}, &mut NullStackMapSink {})
- .unwrap();
++ m.define_function(cmain_func_id, &mut ctx).unwrap();
+ unwind_context.add_function(cmain_func_id, &ctx, m.isa());
+ }
+}
--- /dev/null
- crate::pretty_clif::write_clif_file(tcx, "preopt", isa, instance, &ctx, &*clif_comments);
+//! Various optimizations specific to cg_clif
+
+use cranelift_codegen::isa::TargetIsa;
+
+use crate::prelude::*;
+
+pub(crate) mod peephole;
+
+pub(crate) fn optimize_function<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ isa: &dyn TargetIsa,
+ instance: Instance<'tcx>,
+ ctx: &mut Context,
+ clif_comments: &mut crate::pretty_clif::CommentWriter,
+) {
+ // FIXME classify optimizations over opt levels once we have more
+
++ crate::pretty_clif::write_clif_file(tcx, "preopt", isa, instance, &ctx.func, &*clif_comments);
+ crate::base::verify_func(tcx, &*clif_comments, &ctx.func);
+}
--- /dev/null
- context: &cranelift_codegen::Context,
+//! This module provides the [CommentWriter] which makes it possible
+//! to add comments to the written cranelift ir.
+//!
+//! # Example
+//!
+//! ```clif
+//! test compile
+//! target x86_64
+//!
+//! function u0:0(i64, i64, i64) system_v {
+//! ; symbol _ZN119_$LT$example..IsNotEmpty$u20$as$u20$mini_core..FnOnce$LT$$LP$$RF$$u27$a$u20$$RF$$u27$b$u20$$u5b$u16$u5d$$C$$RP$$GT$$GT$9call_once17he85059d5e6a760a0E
+//! ; instance Instance { def: Item(DefId(0/0:29 ~ example[8787]::{{impl}}[0]::call_once[0])), substs: [ReErased, ReErased] }
+//! ; sig ([IsNotEmpty, (&&[u16],)]; c_variadic: false)->(u8, u8)
+//!
+//! ; ssa {_2: NOT_SSA, _4: NOT_SSA, _0: NOT_SSA, _3: (empty), _1: NOT_SSA}
+//! ; msg loc.idx param pass mode ssa flags ty
+//! ; ret _0 = v0 ByRef NOT_SSA (u8, u8)
+//! ; arg _1 = v1 ByRef NOT_SSA IsNotEmpty
+//! ; arg _2.0 = v2 ByVal(types::I64) NOT_SSA &&[u16]
+//!
+//! ss0 = explicit_slot 0 ; _1: IsNotEmpty size=0 align=1,8
+//! ss1 = explicit_slot 8 ; _2: (&&[u16],) size=8 align=8,8
+//! ss2 = explicit_slot 8 ; _4: (&&[u16],) size=8 align=8,8
+//! sig0 = (i64, i64, i64) system_v
+//! sig1 = (i64, i64, i64) system_v
+//! fn0 = colocated u0:6 sig1 ; Instance { def: Item(DefId(0/0:31 ~ example[8787]::{{impl}}[1]::call_mut[0])), substs: [ReErased, ReErased] }
+//!
+//! block0(v0: i64, v1: i64, v2: i64):
+//! v3 = stack_addr.i64 ss0
+//! v4 = stack_addr.i64 ss1
+//! store v2, v4
+//! v5 = stack_addr.i64 ss2
+//! jump block1
+//!
+//! block1:
+//! nop
+//! ; _3 = &mut _1
+//! ; _4 = _2
+//! v6 = load.i64 v4
+//! store v6, v5
+//! ;
+//! ; _0 = const mini_core::FnMut::call_mut(move _3, move _4)
+//! v7 = load.i64 v5
+//! call fn0(v0, v3, v7)
+//! jump block2
+//!
+//! block2:
+//! nop
+//! ;
+//! ; return
+//! return
+//! }
+//! ```
+
+use std::fmt;
+use std::io::Write;
+
+use cranelift_codegen::{
+ entity::SecondaryMap,
+ ir::entities::AnyEntity,
+ write::{FuncWriter, PlainWriter},
+};
+
+use rustc_middle::ty::layout::FnAbiOf;
+use rustc_session::config::OutputType;
+
+use crate::prelude::*;
+
+#[derive(Debug)]
+pub(crate) struct CommentWriter {
+ enabled: bool,
+ global_comments: Vec<String>,
+ entity_comments: FxHashMap<AnyEntity, String>,
+}
+
+impl CommentWriter {
+ pub(crate) fn new<'tcx>(tcx: TyCtxt<'tcx>, instance: Instance<'tcx>) -> Self {
+ let enabled = should_write_ir(tcx);
+ let global_comments = if enabled {
+ vec![
+ format!("symbol {}", tcx.symbol_name(instance).name),
+ format!("instance {:?}", instance),
+ format!(
+ "abi {:?}",
+ RevealAllLayoutCx(tcx).fn_abi_of_instance(instance, ty::List::empty())
+ ),
+ String::new(),
+ ]
+ } else {
+ vec![]
+ };
+
+ CommentWriter { enabled, global_comments, entity_comments: FxHashMap::default() }
+ }
+}
+
+impl CommentWriter {
+ pub(crate) fn enabled(&self) -> bool {
+ self.enabled
+ }
+
+ pub(crate) fn add_global_comment<S: Into<String>>(&mut self, comment: S) {
+ debug_assert!(self.enabled);
+ self.global_comments.push(comment.into());
+ }
+
+ pub(crate) fn add_comment<S: Into<String> + AsRef<str>, E: Into<AnyEntity>>(
+ &mut self,
+ entity: E,
+ comment: S,
+ ) {
+ debug_assert!(self.enabled);
+
+ use std::collections::hash_map::Entry;
+ match self.entity_comments.entry(entity.into()) {
+ Entry::Occupied(mut occ) => {
+ occ.get_mut().push('\n');
+ occ.get_mut().push_str(comment.as_ref());
+ }
+ Entry::Vacant(vac) => {
+ vac.insert(comment.into());
+ }
+ }
+ }
+}
+
+impl FuncWriter for &'_ CommentWriter {
+ fn write_preamble(
+ &mut self,
+ w: &mut dyn fmt::Write,
+ func: &Function,
+ ) -> Result<bool, fmt::Error> {
+ for comment in &self.global_comments {
+ if !comment.is_empty() {
+ writeln!(w, "; {}", comment)?;
+ } else {
+ writeln!(w)?;
+ }
+ }
+ if !self.global_comments.is_empty() {
+ writeln!(w)?;
+ }
+
+ self.super_preamble(w, func)
+ }
+
+ fn write_entity_definition(
+ &mut self,
+ w: &mut dyn fmt::Write,
+ _func: &Function,
+ entity: AnyEntity,
+ value: &dyn fmt::Display,
+ ) -> fmt::Result {
+ write!(w, " {} = {}", entity, value)?;
+
+ if let Some(comment) = self.entity_comments.get(&entity) {
+ writeln!(w, " ; {}", comment.replace('\n', "\n; "))
+ } else {
+ writeln!(w)
+ }
+ }
+
+ fn write_block_header(
+ &mut self,
+ w: &mut dyn fmt::Write,
+ func: &Function,
+ block: Block,
+ indent: usize,
+ ) -> fmt::Result {
+ PlainWriter.write_block_header(w, func, block, indent)
+ }
+
+ fn write_instruction(
+ &mut self,
+ w: &mut dyn fmt::Write,
+ func: &Function,
+ aliases: &SecondaryMap<Value, Vec<Value>>,
+ inst: Inst,
+ indent: usize,
+ ) -> fmt::Result {
+ PlainWriter.write_instruction(w, func, aliases, inst, indent)?;
+ if let Some(comment) = self.entity_comments.get(&inst.into()) {
+ writeln!(w, "; {}", comment.replace('\n', "\n; "))?;
+ }
+ Ok(())
+ }
+}
+
+impl FunctionCx<'_, '_, '_> {
+ pub(crate) fn add_global_comment<S: Into<String>>(&mut self, comment: S) {
+ self.clif_comments.add_global_comment(comment);
+ }
+
+ pub(crate) fn add_comment<S: Into<String> + AsRef<str>, E: Into<AnyEntity>>(
+ &mut self,
+ entity: E,
+ comment: S,
+ ) {
+ self.clif_comments.add_comment(entity, comment);
+ }
+}
+
+pub(crate) fn should_write_ir(tcx: TyCtxt<'_>) -> bool {
+ tcx.sess.opts.output_types.contains_key(&OutputType::LlvmAssembly)
+}
+
+pub(crate) fn write_ir_file(
+ tcx: TyCtxt<'_>,
+ name: impl FnOnce() -> String,
+ write: impl FnOnce(&mut dyn Write) -> std::io::Result<()>,
+) {
+ if !should_write_ir(tcx) {
+ return;
+ }
+
+ let clif_output_dir = tcx.output_filenames(()).with_extension("clif");
+
+ match std::fs::create_dir(&clif_output_dir) {
+ Ok(()) => {}
+ Err(err) if err.kind() == std::io::ErrorKind::AlreadyExists => {}
+ res @ Err(_) => res.unwrap(),
+ }
+
+ let clif_file_name = clif_output_dir.join(name());
+
+ let res = std::fs::File::create(clif_file_name).and_then(|mut file| write(&mut file));
+ if let Err(err) = res {
+ tcx.sess.warn(&format!("error writing ir file: {}", err));
+ }
+}
+
+pub(crate) fn write_clif_file<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ postfix: &str,
+ isa: &dyn cranelift_codegen::isa::TargetIsa,
+ instance: Instance<'tcx>,
- cranelift_codegen::write::decorate_function(
- &mut clif_comments,
- &mut clif,
- &context.func,
- )
- .unwrap();
++ func: &cranelift_codegen::ir::Function,
+ mut clif_comments: &CommentWriter,
+) {
+ write_ir_file(
+ tcx,
+ || format!("{}.{}.clif", tcx.symbol_name(instance).name, postfix),
+ |file| {
+ let mut clif = String::new();
++ cranelift_codegen::write::decorate_function(&mut clif_comments, &mut clif, func)
++ .unwrap();
+
+ for flag in isa.flags().iter() {
+ writeln!(file, "set {}", flag)?;
+ }
+ write!(file, "target {}", isa.triple().architecture.to_string())?;
+ for isa_flag in isa.isa_flags().iter() {
+ write!(file, " {}", isa_flag)?;
+ }
+ writeln!(file, "\n")?;
+ writeln!(file)?;
+ file.write_all(clif.as_bytes())?;
+ Ok(())
+ },
+ );
+}
+
+impl fmt::Debug for FunctionCx<'_, '_, '_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ writeln!(f, "{:?}", self.instance.substs)?;
+ writeln!(f, "{:?}", self.local_map)?;
+
+ let mut clif = String::new();
+ ::cranelift_codegen::write::decorate_function(
+ &mut &self.clif_comments,
+ &mut clif,
+ &self.bcx.func,
+ )
+ .unwrap();
+ writeln!(f, "\n{}", clif)
+ }
+}
--- /dev/null
- use std::env;
- use std::path::PathBuf;
- use std::process;
-
- #[path = "build_system/build_backend.rs"]
- mod build_backend;
- #[path = "build_system/build_sysroot.rs"]
- mod build_sysroot;
- #[path = "build_system/config.rs"]
- mod config;
- #[path = "build_system/prepare.rs"]
- mod prepare;
- #[path = "build_system/rustc_info.rs"]
- mod rustc_info;
- #[path = "build_system/utils.rs"]
- mod utils;
-
- fn usage() {
- eprintln!("Usage:");
- eprintln!(" ./y.rs prepare");
- eprintln!(
- " ./y.rs build [--debug] [--sysroot none|clif|llvm] [--target-dir DIR] [--no-unstable-features]"
- );
- }
-
- macro_rules! arg_error {
- ($($err:tt)*) => {{
- eprintln!($($err)*);
- usage();
- std::process::exit(1);
- }};
- }
-
- enum Command {
- Build,
- }
-
- #[derive(Copy, Clone)]
- enum SysrootKind {
- None,
- Clif,
- Llvm,
- }
+#!/usr/bin/env bash
+#![allow()] /*This line is ignored by bash
+# This block is ignored by rustc
+set -e
+echo "[BUILD] y.rs" 1>&2
+rustc $0 -o ${0/.rs/.bin} -g
+exec ${0/.rs/.bin} $@
+*/
+
+//! The build system for cg_clif
+//!
+//! # Manual compilation
+//!
+//! If your system doesn't support shell scripts you can manually compile and run this file using
+//! for example:
+//!
+//! ```shell
+//! $ rustc y.rs -o y.bin
+//! $ ./y.bin
+//! ```
+//!
+//! # Naming
+//!
+//! The name `y.rs` was chosen to not conflict with rustc's `x.py`.
+
- env::set_var("CG_CLIF_DISPLAY_CG_TIME", "1");
- env::set_var("CG_CLIF_DISABLE_INCR_CACHE", "1");
- // The target dir is expected in the default location. Guard against the user changing it.
- env::set_var("CARGO_TARGET_DIR", "target");
-
- let mut args = env::args().skip(1);
- let command = match args.next().as_deref() {
- Some("prepare") => {
- if args.next().is_some() {
- arg_error!("./x.rs prepare doesn't expect arguments");
- }
- prepare::prepare();
- process::exit(0);
- }
- Some("build") => Command::Build,
- Some(flag) if flag.starts_with('-') => arg_error!("Expected command found flag {}", flag),
- Some(command) => arg_error!("Unknown command {}", command),
- None => {
- usage();
- process::exit(0);
- }
- };
-
- let mut target_dir = PathBuf::from("build");
- let mut channel = "release";
- let mut sysroot_kind = SysrootKind::Clif;
- let mut use_unstable_features = true;
- while let Some(arg) = args.next().as_deref() {
- match arg {
- "--target-dir" => {
- target_dir = PathBuf::from(args.next().unwrap_or_else(|| {
- arg_error!("--target-dir requires argument");
- }))
- }
- "--debug" => channel = "debug",
- "--sysroot" => {
- sysroot_kind = match args.next().as_deref() {
- Some("none") => SysrootKind::None,
- Some("clif") => SysrootKind::Clif,
- Some("llvm") => SysrootKind::Llvm,
- Some(arg) => arg_error!("Unknown sysroot kind {}", arg),
- None => arg_error!("--sysroot requires argument"),
- }
- }
- "--no-unstable-features" => use_unstable_features = false,
- flag if flag.starts_with("-") => arg_error!("Unknown flag {}", flag),
- arg => arg_error!("Unexpected argument {}", arg),
- }
- }
-
- let host_triple = if let Ok(host_triple) = std::env::var("HOST_TRIPLE") {
- host_triple
- } else if let Some(host_triple) = crate::config::get_value("host") {
- host_triple
- } else {
- rustc_info::get_host_triple()
- };
- let target_triple = if let Ok(target_triple) = std::env::var("TARGET_TRIPLE") {
- if target_triple != "" {
- target_triple
- } else {
- host_triple.clone() // Empty target triple can happen on GHA
- }
- } else if let Some(target_triple) = crate::config::get_value("target") {
- target_triple
- } else {
- host_triple.clone()
- };
-
- if target_triple.ends_with("-msvc") {
- eprintln!("The MSVC toolchain is not yet supported by rustc_codegen_cranelift.");
- eprintln!("Switch to the MinGW toolchain for Windows support.");
- eprintln!("Hint: You can use `rustup set default-host x86_64-pc-windows-gnu` to");
- eprintln!("set the global default target to MinGW");
- process::exit(1);
- }
-
- let cg_clif_build_dir =
- build_backend::build_backend(channel, &host_triple, use_unstable_features);
- build_sysroot::build_sysroot(
- channel,
- sysroot_kind,
- &target_dir,
- cg_clif_build_dir,
- &host_triple,
- &target_triple,
- );
++#[path = "build_system/mod.rs"]
++mod build_system;
+
+fn main() {
++ build_system::main();
+}