]> git.lizzy.rs Git - rust.git/commitdiff
Merge commit '40dd3e2b7089b5e96714e064b731f6dbf17c61a9' into sync_cg_clif-2021-05-27
authorbjorn3 <bjorn3@users.noreply.github.com>
Thu, 27 May 2021 11:08:14 +0000 (13:08 +0200)
committerbjorn3 <bjorn3@users.noreply.github.com>
Thu, 27 May 2021 11:08:14 +0000 (13:08 +0200)
24 files changed:
1  2 
compiler/rustc_codegen_cranelift/Cargo.lock
compiler/rustc_codegen_cranelift/Cargo.toml
compiler/rustc_codegen_cranelift/build_sysroot/Cargo.lock
compiler/rustc_codegen_cranelift/build_sysroot/prepare_sysroot_src.sh
compiler/rustc_codegen_cranelift/crate_patches/0001-compiler-builtins-Disable-128bit-atomic-operations.patch
compiler/rustc_codegen_cranelift/example/std_example.rs
compiler/rustc_codegen_cranelift/patches/0022-core-Disable-not-compiling-tests.patch
compiler/rustc_codegen_cranelift/rust-toolchain
compiler/rustc_codegen_cranelift/scripts/setup_rust_fork.sh
compiler/rustc_codegen_cranelift/scripts/tests.sh
compiler/rustc_codegen_cranelift/src/archive.rs
compiler/rustc_codegen_cranelift/src/base.rs
compiler/rustc_codegen_cranelift/src/common.rs
compiler/rustc_codegen_cranelift/src/config.rs
compiler/rustc_codegen_cranelift/src/constant.rs
compiler/rustc_codegen_cranelift/src/driver/aot.rs
compiler/rustc_codegen_cranelift/src/driver/jit.rs
compiler/rustc_codegen_cranelift/src/inline_asm.rs
compiler/rustc_codegen_cranelift/src/intrinsics/cpuid.rs
compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
compiler/rustc_codegen_cranelift/src/lib.rs
compiler/rustc_codegen_cranelift/src/main_shim.rs
compiler/rustc_codegen_cranelift/src/trap.rs
compiler/rustc_codegen_cranelift/src/value_and_place.rs

index e6792def56796c2cc1ba0e328c73bb251a3404be,0000000000000000000000000000000000000000..a6f5925149b925a59fab9c83719be9c2aefd7a13
mode 100644,000000..100644
--- /dev/null
@@@ -1,388 -1,0 +1,296 @@@
- [[package]]
- name = "byteorder"
- version = "1.4.2"
- source = "registry+https://github.com/rust-lang/crates.io-index"
- checksum = "ae44d1a3d5a19df61dd0c8beb138458ac2a53a7ac09eba97d55592540004306b"
 +# This file is automatically @generated by Cargo.
 +# It is not intended for manual editing.
 +version = 3
 +
 +[[package]]
 +name = "anyhow"
 +version = "1.0.38"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "afddf7f520a80dbf76e6f50a35bca42a2331ef227a28b3b6dc5c2e2338d114b1"
 +
 +[[package]]
 +name = "ar"
 +version = "0.8.0"
 +source = "git+https://github.com/bjorn3/rust-ar.git?branch=do_not_remove_cg_clif_ranlib#de9ab0e56bf3a208381d342aa5b60f9ff2891648"
 +
 +[[package]]
 +name = "autocfg"
 +version = "1.0.1"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
 +
 +[[package]]
 +name = "bitflags"
 +version = "1.2.1"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
 +
- version = "0.73.0"
- source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#45bee40f338c631bff4a799288101ba328c7ad36"
 +[[package]]
 +name = "cfg-if"
 +version = "1.0.0"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
 +
 +[[package]]
 +name = "cranelift-bforest"
- version = "0.73.0"
- source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#45bee40f338c631bff4a799288101ba328c7ad36"
++version = "0.74.0"
++source = "git+https://github.com/bytecodealliance/wasmtime.git?branch=main#76c6b83f6a21a12a11d4f890490f8acb6329a600"
 +dependencies = [
 + "cranelift-entity",
 +]
 +
 +[[package]]
 +name = "cranelift-codegen"
-  "byteorder",
++version = "0.74.0"
++source = "git+https://github.com/bytecodealliance/wasmtime.git?branch=main#76c6b83f6a21a12a11d4f890490f8acb6329a600"
 +dependencies = [
-  "thiserror",
 + "cranelift-bforest",
 + "cranelift-codegen-meta",
 + "cranelift-codegen-shared",
 + "cranelift-entity",
 + "gimli",
 + "log",
 + "regalloc",
 + "smallvec",
 + "target-lexicon",
- version = "0.73.0"
- source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#45bee40f338c631bff4a799288101ba328c7ad36"
 +]
 +
 +[[package]]
 +name = "cranelift-codegen-meta"
- version = "0.73.0"
- source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#45bee40f338c631bff4a799288101ba328c7ad36"
++version = "0.74.0"
++source = "git+https://github.com/bytecodealliance/wasmtime.git?branch=main#76c6b83f6a21a12a11d4f890490f8acb6329a600"
 +dependencies = [
 + "cranelift-codegen-shared",
 + "cranelift-entity",
 +]
 +
 +[[package]]
 +name = "cranelift-codegen-shared"
- version = "0.73.0"
- source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#45bee40f338c631bff4a799288101ba328c7ad36"
++version = "0.74.0"
++source = "git+https://github.com/bytecodealliance/wasmtime.git?branch=main#76c6b83f6a21a12a11d4f890490f8acb6329a600"
 +
 +[[package]]
 +name = "cranelift-entity"
- version = "0.73.0"
- source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#45bee40f338c631bff4a799288101ba328c7ad36"
++version = "0.74.0"
++source = "git+https://github.com/bytecodealliance/wasmtime.git?branch=main#76c6b83f6a21a12a11d4f890490f8acb6329a600"
 +
 +[[package]]
 +name = "cranelift-frontend"
- version = "0.73.0"
- source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#45bee40f338c631bff4a799288101ba328c7ad36"
++version = "0.74.0"
++source = "git+https://github.com/bytecodealliance/wasmtime.git?branch=main#76c6b83f6a21a12a11d4f890490f8acb6329a600"
 +dependencies = [
 + "cranelift-codegen",
 + "log",
 + "smallvec",
 + "target-lexicon",
 +]
 +
 +[[package]]
 +name = "cranelift-jit"
-  "errno",
++version = "0.74.0"
++source = "git+https://github.com/bytecodealliance/wasmtime.git?branch=main#76c6b83f6a21a12a11d4f890490f8acb6329a600"
 +dependencies = [
 + "anyhow",
 + "cranelift-codegen",
 + "cranelift-entity",
 + "cranelift-module",
 + "cranelift-native",
- version = "0.73.0"
- source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#45bee40f338c631bff4a799288101ba328c7ad36"
 + "libc",
 + "log",
 + "region",
 + "target-lexicon",
 + "winapi",
 +]
 +
 +[[package]]
 +name = "cranelift-module"
-  "thiserror",
++version = "0.74.0"
++source = "git+https://github.com/bytecodealliance/wasmtime.git?branch=main#76c6b83f6a21a12a11d4f890490f8acb6329a600"
 +dependencies = [
 + "anyhow",
 + "cranelift-codegen",
 + "cranelift-entity",
 + "log",
- version = "0.73.0"
- source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#45bee40f338c631bff4a799288101ba328c7ad36"
 +]
 +
 +[[package]]
 +name = "cranelift-native"
- version = "0.73.0"
- source = "git+https://github.com/bytecodealliance/wasmtime/?branch=main#45bee40f338c631bff4a799288101ba328c7ad36"
++version = "0.74.0"
++source = "git+https://github.com/bytecodealliance/wasmtime.git?branch=main#76c6b83f6a21a12a11d4f890490f8acb6329a600"
 +dependencies = [
 + "cranelift-codegen",
 + "target-lexicon",
 +]
 +
 +[[package]]
 +name = "cranelift-object"
- [[package]]
- name = "errno"
- version = "0.2.7"
- source = "registry+https://github.com/rust-lang/crates.io-index"
- checksum = "fa68f2fb9cae9d37c9b2b3584aba698a2e97f72d7aef7b9f7aa71d8b54ce46fe"
- dependencies = [
-  "errno-dragonfly",
-  "libc",
-  "winapi",
- ]
- [[package]]
- name = "errno-dragonfly"
- version = "0.1.1"
- source = "registry+https://github.com/rust-lang/crates.io-index"
- checksum = "14ca354e36190500e1e1fb267c647932382b54053c50b14970856c0b00a35067"
- dependencies = [
-  "gcc",
-  "libc",
- ]
- [[package]]
- name = "gcc"
- version = "0.3.55"
- source = "registry+https://github.com/rust-lang/crates.io-index"
- checksum = "8f5f3913fa0bfe7ee1fd8248b6b9f42a5af4b9d65ec2dd2c3c26132b950ecfc2"
++version = "0.74.0"
++source = "git+https://github.com/bytecodealliance/wasmtime.git?branch=main#76c6b83f6a21a12a11d4f890490f8acb6329a600"
 +dependencies = [
 + "anyhow",
 + "cranelift-codegen",
 + "cranelift-module",
 + "log",
 + "object",
 + "target-lexicon",
 +]
 +
 +[[package]]
 +name = "crc32fast"
 +version = "1.2.1"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a"
 +dependencies = [
 + "cfg-if",
 +]
 +
- version = "0.23.0"
 +[[package]]
 +name = "gimli"
- checksum = "f6503fe142514ca4799d4c26297c4248239fe8838d827db6bd6065c6ed29a6ce"
++version = "0.24.0"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.23.0"
++checksum = "0e4075386626662786ddb0ec9081e7c7eeb1ba31951f447ca780ef9f5d568189"
 +dependencies = [
 + "indexmap",
 +]
 +
 +[[package]]
 +name = "hashbrown"
 +version = "0.9.1"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04"
 +
 +[[package]]
 +name = "indexmap"
 +version = "1.6.1"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "4fb1fa934250de4de8aef298d81c729a7d33d8c239daa3a7575e6b92bfc7313b"
 +dependencies = [
 + "autocfg",
 + "hashbrown",
 +]
 +
 +[[package]]
 +name = "libc"
 +version = "0.2.86"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "b7282d924be3275cec7f6756ff4121987bc6481325397dde6ba3e7802b1a8b1c"
 +
 +[[package]]
 +name = "libloading"
 +version = "0.6.7"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "351a32417a12d5f7e82c368a66781e307834dae04c6ce0cd4456d52989229883"
 +dependencies = [
 + "cfg-if",
 + "winapi",
 +]
 +
 +[[package]]
 +name = "log"
 +version = "0.4.14"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710"
 +dependencies = [
 + "cfg-if",
 +]
 +
 +[[package]]
 +name = "mach"
 +version = "0.3.2"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "b823e83b2affd8f40a9ee8c29dbc56404c1e34cd2710921f2801e2cf29527afa"
 +dependencies = [
 + "libc",
 +]
 +
 +[[package]]
 +name = "object"
- checksum = "a9a7ab5d64814df0fe4a4b5ead45ed6c5f181ee3ff04ba344313a6c80446c5d4"
++version = "0.24.0"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
- [[package]]
- name = "proc-macro2"
- version = "1.0.24"
- source = "registry+https://github.com/rust-lang/crates.io-index"
- checksum = "1e0704ee1a7e00d7bb417d0770ea303c1bccbabf0ef1667dae92b5967f5f8a71"
- dependencies = [
-  "unicode-xid",
- ]
- [[package]]
- name = "quote"
- version = "1.0.9"
- source = "registry+https://github.com/rust-lang/crates.io-index"
- checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7"
- dependencies = [
-  "proc-macro2",
- ]
++checksum = "1a5b3dd1c072ee7963717671d1ca129f1048fda25edea6b752bfc71ac8854170"
 +dependencies = [
 + "crc32fast",
 + "indexmap",
 +]
 +
- [[package]]
- name = "syn"
- version = "1.0.60"
- source = "registry+https://github.com/rust-lang/crates.io-index"
- checksum = "c700597eca8a5a762beb35753ef6b94df201c81cca676604f547495a0d7f0081"
- dependencies = [
-  "proc-macro2",
-  "quote",
-  "unicode-xid",
- ]
 +[[package]]
 +name = "regalloc"
 +version = "0.0.31"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "571f7f397d61c4755285cd37853fe8e03271c243424a907415909379659381c5"
 +dependencies = [
 + "log",
 + "rustc-hash",
 + "smallvec",
 +]
 +
 +[[package]]
 +name = "region"
 +version = "2.2.0"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "877e54ea2adcd70d80e9179344c97f93ef0dffd6b03e1f4529e6e83ab2fa9ae0"
 +dependencies = [
 + "bitflags",
 + "libc",
 + "mach",
 + "winapi",
 +]
 +
 +[[package]]
 +name = "rustc-hash"
 +version = "1.1.0"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2"
 +
 +[[package]]
 +name = "rustc_codegen_cranelift"
 +version = "0.1.0"
 +dependencies = [
 + "ar",
 + "cranelift-codegen",
 + "cranelift-frontend",
 + "cranelift-jit",
 + "cranelift-module",
 + "cranelift-native",
 + "cranelift-object",
 + "gimli",
 + "indexmap",
 + "libloading",
 + "object",
 + "smallvec",
 + "target-lexicon",
 +]
 +
 +[[package]]
 +name = "smallvec"
 +version = "1.6.1"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "fe0f37c9e8f3c5a4a66ad655a93c74daac4ad00c441533bf5c6e7990bb42604e"
 +
- [[package]]
- name = "thiserror"
- version = "1.0.24"
- source = "registry+https://github.com/rust-lang/crates.io-index"
- checksum = "e0f4a65597094d4483ddaed134f409b2cb7c1beccf25201a9f73c719254fa98e"
- dependencies = [
-  "thiserror-impl",
- ]
- [[package]]
- name = "thiserror-impl"
- version = "1.0.24"
- source = "registry+https://github.com/rust-lang/crates.io-index"
- checksum = "7765189610d8241a44529806d6fd1f2e0a08734313a35d5b3a556f92b381f3c0"
- dependencies = [
-  "proc-macro2",
-  "quote",
-  "syn",
- ]
- [[package]]
- name = "unicode-xid"
- version = "0.2.1"
- source = "registry+https://github.com/rust-lang/crates.io-index"
- checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564"
 +[[package]]
 +name = "target-lexicon"
 +version = "0.12.0"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "64ae3b39281e4b14b8123bdbaddd472b7dfe215e444181f2f9d2443c2444f834"
 +
 +[[package]]
 +name = "winapi"
 +version = "0.3.9"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
 +dependencies = [
 + "winapi-i686-pc-windows-gnu",
 + "winapi-x86_64-pc-windows-gnu",
 +]
 +
 +[[package]]
 +name = "winapi-i686-pc-windows-gnu"
 +version = "0.4.0"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
 +
 +[[package]]
 +name = "winapi-x86_64-pc-windows-gnu"
 +version = "0.4.0"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
index 2789207c65581ab380463f0ff114ddeb7a601159,0000000000000000000000000000000000000000..fd149af454735e528277b2290fd4c4258e64374c
mode 100644,000000..100644
--- /dev/null
@@@ -1,82 -1,0 +1,74 @@@
- cranelift-codegen = { git = "https://github.com/bytecodealliance/wasmtime/", branch = "main", features = ["unwind"] }
- cranelift-frontend = { git = "https://github.com/bytecodealliance/wasmtime/", branch = "main" }
- cranelift-module = { git = "https://github.com/bytecodealliance/wasmtime/", branch = "main" }
- cranelift-native = { git = "https://github.com/bytecodealliance/wasmtime/", branch = "main" }
- cranelift-jit = { git = "https://github.com/bytecodealliance/wasmtime/", branch = "main", optional = true }
- cranelift-object = { git = "https://github.com/bytecodealliance/wasmtime/", branch = "main" }
 +[package]
 +name = "rustc_codegen_cranelift"
 +version = "0.1.0"
 +authors = ["bjorn3 <bjorn3@users.noreply.github.com>"]
 +edition = "2018"
 +
 +[lib]
 +crate-type = ["dylib"]
 +
 +[dependencies]
 +# These have to be in sync with each other
- gimli = { version = "0.23.0", default-features = false, features = ["write"]}
- object = { version = "0.23.0", default-features = false, features = ["std", "read_core", "write", "archive", "coff", "elf", "macho", "pe"] }
++cranelift-codegen = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "main", features = ["unwind"] }
++cranelift-frontend = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "main" }
++cranelift-module = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "main" }
++cranelift-native = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "main" }
++cranelift-jit = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "main", optional = true }
++cranelift-object = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "main" }
 +target-lexicon = "0.12.0"
- #[patch."https://github.com/bytecodealliance/wasmtime/"]
++gimli = { version = "0.24.0", default-features = false, features = ["write"]}
++object = { version = "0.24.0", default-features = false, features = ["std", "read_core", "write", "archive", "coff", "elf", "macho", "pe"] }
 +
 +ar = { git = "https://github.com/bjorn3/rust-ar.git", branch = "do_not_remove_cg_clif_ranlib" }
 +indexmap = "1.0.2"
 +libloading = { version = "0.6.0", optional = true }
 +smallvec = "1.6.1"
 +
 +# Uncomment to use local checkout of cranelift
- #cranelift-native = { path = ../wasmtime/cranelift/native" }
++#[patch."https://github.com/bytecodealliance/wasmtime.git"]
 +#cranelift-codegen = { path = "../wasmtime/cranelift/codegen" }
 +#cranelift-frontend = { path = "../wasmtime/cranelift/frontend" }
 +#cranelift-module = { path = "../wasmtime/cranelift/module" }
- [profile.dev.package.syn]
- opt-level = 0
- debug = false
- [profile.release.package.syn]
- opt-level = 0
- debug = false
++#cranelift-native = { path = "../wasmtime/cranelift/native" }
 +#cranelift-jit = { path = "../wasmtime/cranelift/jit" }
 +#cranelift-object = { path = "../wasmtime/cranelift/object" }
 +
 +#[patch.crates-io]
 +#gimli = { path = "../" }
 +
 +[features]
 +default = ["jit", "inline_asm"]
 +jit = ["cranelift-jit", "libloading"]
 +inline_asm = []
 +
 +[profile.dev]
 +# By compiling dependencies with optimizations, performing tests gets much faster.
 +opt-level = 3
 +
 +[profile.dev.package.rustc_codegen_cranelift]
 +# Disabling optimizations for cg_clif itself makes compilation after a change faster.
 +opt-level = 0
 +
 +[profile.release.package.rustc_codegen_cranelift]
 +incremental = true
 +
 +# Disable optimizations and debuginfo of build scripts and some of the heavy build deps, as the
 +# execution time of build scripts is so fast that optimizing them slows down the total build time.
 +[profile.dev.build-override]
 +opt-level = 0
 +debug = false
 +
 +[profile.release.build-override]
 +opt-level = 0
 +debug = false
 +
 +[profile.dev.package.cranelift-codegen-meta]
 +opt-level = 0
 +debug = false
 +
 +[profile.release.package.cranelift-codegen-meta]
 +opt-level = 0
 +debug = false
 +
 +[package.metadata.rust-analyzer]
 +rustc_private = true
index e058a972ead3c184c382937d3f5afd19afcfa95b,0000000000000000000000000000000000000000..923deb9aec4c082e357f9c2bc3425a14fbe8c8e8
mode 100644,000000..100644
--- /dev/null
@@@ -1,327 -1,0 +1,327 @@@
- version = "1.0.67"
 +# This file is automatically @generated by Cargo.
 +# It is not intended for manual editing.
 +version = 3
 +
 +[[package]]
 +name = "addr2line"
 +version = "0.14.1"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "a55f82cfe485775d02112886f4169bde0c5894d75e79ead7eafe7e40a25e45f7"
 +dependencies = [
 + "compiler_builtins",
 + "gimli",
 + "rustc-std-workspace-alloc",
 + "rustc-std-workspace-core",
 +]
 +
 +[[package]]
 +name = "adler"
 +version = "1.0.2"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
 +dependencies = [
 + "compiler_builtins",
 + "rustc-std-workspace-core",
 +]
 +
 +[[package]]
 +name = "alloc"
 +version = "0.0.0"
 +dependencies = [
 + "compiler_builtins",
 + "core",
 +]
 +
 +[[package]]
 +name = "autocfg"
 +version = "1.0.1"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
 +
 +[[package]]
 +name = "cc"
- checksum = "e3c69b077ad434294d3ce9f1f6143a2a4b89a8a2d54ef813d85003a4fd1137fd"
++version = "1.0.68"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.1.40"
++checksum = "4a72c244c1ff497a746a7e1fb3d14bd08420ecda70c8f25c7112f2781652d787"
 +
 +[[package]]
 +name = "cfg-if"
 +version = "0.1.10"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
 +dependencies = [
 + "compiler_builtins",
 + "rustc-std-workspace-core",
 +]
 +
 +[[package]]
 +name = "compiler_builtins"
- version = "0.2.94"
++version = "0.1.43"
 +dependencies = [
 + "rustc-std-workspace-core",
 +]
 +
 +[[package]]
 +name = "core"
 +version = "0.0.0"
 +
 +[[package]]
 +name = "dlmalloc"
 +version = "0.2.1"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "332570860c2edf2d57914987bf9e24835425f75825086b6ba7d1e6a3e4f1f254"
 +dependencies = [
 + "compiler_builtins",
 + "libc",
 + "rustc-std-workspace-core",
 +]
 +
 +[[package]]
 +name = "fortanix-sgx-abi"
 +version = "0.3.3"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "c56c422ef86062869b2d57ae87270608dc5929969dd130a6e248979cf4fb6ca6"
 +dependencies = [
 + "compiler_builtins",
 + "rustc-std-workspace-core",
 +]
 +
 +[[package]]
 +name = "getopts"
 +version = "0.2.21"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "14dbbfd5c71d70241ecf9e6f13737f7b5ce823821063188d7e46c41d371eebd5"
 +dependencies = [
 + "rustc-std-workspace-core",
 + "rustc-std-workspace-std",
 + "unicode-width",
 +]
 +
 +[[package]]
 +name = "gimli"
 +version = "0.23.0"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "f6503fe142514ca4799d4c26297c4248239fe8838d827db6bd6065c6ed29a6ce"
 +dependencies = [
 + "compiler_builtins",
 + "rustc-std-workspace-alloc",
 + "rustc-std-workspace-core",
 +]
 +
 +[[package]]
 +name = "hashbrown"
 +version = "0.11.2"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e"
 +dependencies = [
 + "compiler_builtins",
 + "rustc-std-workspace-alloc",
 + "rustc-std-workspace-core",
 +]
 +
 +[[package]]
 +name = "hermit-abi"
 +version = "0.1.18"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c"
 +dependencies = [
 + "compiler_builtins",
 + "libc",
 + "rustc-std-workspace-core",
 +]
 +
 +[[package]]
 +name = "libc"
- checksum = "18794a8ad5b29321f790b55d93dfba91e125cb1a9edbd4f8e3150acc771c1a5e"
++version = "0.2.95"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
- version = "0.1.18"
++checksum = "789da6d93f1b866ffe175afc5322a4d76c038605a1c3319bb57b06967ca98a36"
 +dependencies = [
 + "rustc-std-workspace-core",
 +]
 +
 +[[package]]
 +name = "miniz_oxide"
 +version = "0.4.4"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b"
 +dependencies = [
 + "adler",
 + "autocfg",
 + "compiler_builtins",
 + "rustc-std-workspace-alloc",
 + "rustc-std-workspace-core",
 +]
 +
 +[[package]]
 +name = "object"
 +version = "0.22.0"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "8d3b63360ec3cb337817c2dbd47ab4a0f170d285d8e5a2064600f3def1402397"
 +dependencies = [
 + "compiler_builtins",
 + "rustc-std-workspace-alloc",
 + "rustc-std-workspace-core",
 +]
 +
 +[[package]]
 +name = "panic_abort"
 +version = "0.0.0"
 +dependencies = [
 + "alloc",
 + "cfg-if",
 + "compiler_builtins",
 + "core",
 + "libc",
 +]
 +
 +[[package]]
 +name = "panic_unwind"
 +version = "0.0.0"
 +dependencies = [
 + "alloc",
 + "cfg-if",
 + "compiler_builtins",
 + "core",
 + "libc",
 + "unwind",
 +]
 +
 +[[package]]
 +name = "proc_macro"
 +version = "0.0.0"
 +dependencies = [
 + "std",
 +]
 +
 +[[package]]
 +name = "rustc-demangle"
- checksum = "6e3bad0ee36814ca07d7968269dd4b7ec89ec2da10c4bb613928d3077083c232"
++version = "0.1.19"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "410f7acf3cb3a44527c5d9546bad4bf4e6c460915d5f9f2fc524498bfe8f70ce"
 +dependencies = [
 + "compiler_builtins",
 + "rustc-std-workspace-core",
 +]
 +
 +[[package]]
 +name = "rustc-std-workspace-alloc"
 +version = "1.99.0"
 +dependencies = [
 + "alloc",
 +]
 +
 +[[package]]
 +name = "rustc-std-workspace-core"
 +version = "1.99.0"
 +dependencies = [
 + "core",
 +]
 +
 +[[package]]
 +name = "rustc-std-workspace-std"
 +version = "1.99.0"
 +dependencies = [
 + "std",
 +]
 +
 +[[package]]
 +name = "std"
 +version = "0.0.0"
 +dependencies = [
 + "addr2line",
 + "alloc",
 + "cfg-if",
 + "compiler_builtins",
 + "core",
 + "dlmalloc",
 + "fortanix-sgx-abi",
 + "hashbrown",
 + "hermit-abi",
 + "libc",
 + "miniz_oxide",
 + "object",
 + "panic_abort",
 + "panic_unwind",
 + "rustc-demangle",
 + "std_detect",
 + "unwind",
 + "wasi",
 +]
 +
 +[[package]]
 +name = "std_detect"
 +version = "0.1.5"
 +dependencies = [
 + "cfg-if",
 + "compiler_builtins",
 + "libc",
 + "rustc-std-workspace-alloc",
 + "rustc-std-workspace-core",
 +]
 +
 +[[package]]
 +name = "sysroot"
 +version = "0.0.0"
 +dependencies = [
 + "alloc",
 + "compiler_builtins",
 + "core",
 + "std",
 + "test",
 +]
 +
 +[[package]]
 +name = "term"
 +version = "0.0.0"
 +dependencies = [
 + "core",
 + "std",
 +]
 +
 +[[package]]
 +name = "test"
 +version = "0.0.0"
 +dependencies = [
 + "cfg-if",
 + "core",
 + "getopts",
 + "libc",
 + "panic_abort",
 + "panic_unwind",
 + "proc_macro",
 + "std",
 + "term",
 +]
 +
 +[[package]]
 +name = "unicode-width"
 +version = "0.1.8"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3"
 +dependencies = [
 + "compiler_builtins",
 + "rustc-std-workspace-core",
 + "rustc-std-workspace-std",
 +]
 +
 +[[package]]
 +name = "unwind"
 +version = "0.0.0"
 +dependencies = [
 + "cc",
 + "cfg-if",
 + "compiler_builtins",
 + "core",
 + "libc",
 +]
 +
 +[[package]]
 +name = "wasi"
 +version = "0.9.0+wasi-snapshot-preview1"
 +source = "registry+https://github.com/rust-lang/crates.io-index"
 +checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519"
 +dependencies = [
 + "compiler_builtins",
 + "rustc-std-workspace-alloc",
 + "rustc-std-workspace-core",
 +]
index f7fcef10774102706c7a5169a3882eec5930bee1,0000000000000000000000000000000000000000..54b7a94750c5249ee465380802c313d72e68272c
mode 100755,000000..100755
--- /dev/null
@@@ -1,39 -1,0 +1,39 @@@
- git checkout 0.1.40
 +#!/usr/bin/env bash
 +set -e
 +cd "$(dirname "$0")"
 +
 +SRC_DIR="$(dirname "$(rustup which rustc)")/../lib/rustlib/src/rust/"
 +DST_DIR="sysroot_src"
 +
 +if [ ! -e "$SRC_DIR" ]; then
 +    echo "Please install rust-src component"
 +    exit 1
 +fi
 +
 +rm -rf $DST_DIR
 +mkdir -p $DST_DIR/library
 +cp -a "$SRC_DIR/library" $DST_DIR/
 +
 +pushd $DST_DIR
 +echo "[GIT] init"
 +git init
 +echo "[GIT] add"
 +git add .
 +echo "[GIT] commit"
 +git commit -m "Initial commit" -q
 +for file in $(ls ../../patches/ | grep -v patcha); do
 +echo "[GIT] apply" "$file"
 +git apply ../../patches/"$file"
 +git add -A
 +git commit --no-gpg-sign -m "Patch $file"
 +done
 +popd
 +
 +git clone https://github.com/rust-lang/compiler-builtins.git || echo "rust-lang/compiler-builtins has already been cloned"
 +pushd compiler-builtins
 +git checkout -- .
++git checkout 0.1.43
 +git apply ../../crate_patches/000*-compiler-builtins-*.patch
 +popd
 +
 +echo "Successfully prepared sysroot source for building"
index 0000000000000000000000000000000000000000,0000000000000000000000000000000000000000..7daea99f5794d2103588c152a0ac02ca1b5b14b1
new file mode 100644 (file)
--- /dev/null
--- /dev/null
@@@ -1,0 -1,0 +1,48 @@@
++From 1d574bf5e32d51641dcacaf8ef777e95b44f6f2a Mon Sep 17 00:00:00 2001
++From: bjorn3 <bjorn3@users.noreply.github.com>
++Date: Thu, 18 Feb 2021 18:30:55 +0100
++Subject: [PATCH] Disable 128bit atomic operations
++
++Cranelift doesn't support them yet
++---
++ src/mem/mod.rs | 12 ------------
++ 1 file changed, 12 deletions(-)
++
++diff --git a/src/mem/mod.rs b/src/mem/mod.rs
++index 107762c..2d1ae10 100644
++--- a/src/mem/mod.rs
+++++ b/src/mem/mod.rs
++@@ -137,10 +137,6 @@ intrinsics! {
++     pub extern "C" fn __llvm_memcpy_element_unordered_atomic_8(dest: *mut u64, src: *const u64, bytes: usize) -> () {
++         memcpy_element_unordered_atomic(dest, src, bytes);
++     }
++-    #[cfg(target_has_atomic_load_store = "128")]
++-    pub extern "C" fn __llvm_memcpy_element_unordered_atomic_16(dest: *mut u128, src: *const u128, bytes: usize) -> () {
++-        memcpy_element_unordered_atomic(dest, src, bytes);
++-    }
++ 
++     #[cfg(target_has_atomic_load_store = "8")]
++     pub extern "C" fn __llvm_memmove_element_unordered_atomic_1(dest: *mut u8, src: *const u8, bytes: usize) -> () {
++@@ -158,10 +154,6 @@ intrinsics! {
++     pub extern "C" fn __llvm_memmove_element_unordered_atomic_8(dest: *mut u64, src: *const u64, bytes: usize) -> () {
++         memmove_element_unordered_atomic(dest, src, bytes);
++     }
++-    #[cfg(target_has_atomic_load_store = "128")]
++-    pub extern "C" fn __llvm_memmove_element_unordered_atomic_16(dest: *mut u128, src: *const u128, bytes: usize) -> () {
++-        memmove_element_unordered_atomic(dest, src, bytes);
++-    }
++ 
++     #[cfg(target_has_atomic_load_store = "8")]
++     pub extern "C" fn __llvm_memset_element_unordered_atomic_1(s: *mut u8, c: u8, bytes: usize) -> () {
++@@ -179,8 +171,4 @@ intrinsics! {
++     pub extern "C" fn __llvm_memset_element_unordered_atomic_8(s: *mut u64, c: u8, bytes: usize) -> () {
++         memset_element_unordered_atomic(s, c, bytes);
++     }
++-    #[cfg(target_has_atomic_load_store = "128")]
++-    pub extern "C" fn __llvm_memset_element_unordered_atomic_16(s: *mut u128, c: u8, bytes: usize) -> () {
++-        memset_element_unordered_atomic(s, c, bytes);
++-    }
++ }
++-- 
++2.26.2.7.g19db9cfb68
++
index 77ba72df8ef371ddbc1163c7b0f6f0aca38b0d7e,0000000000000000000000000000000000000000..7d608df9253df85fe72e5c38bfb438b29bc267ab
mode 100644,000000..100644
--- /dev/null
@@@ -1,349 -1,0 +1,335 @@@
-     #[rustfmt::skip]
-     let a = _mm_setr_epi8(
-         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
-     );
-     let r = _mm_slli_si128(a, -1);
-     assert_eq_m128i(_mm_set1_epi8(0), r);
-     #[rustfmt::skip]
-     let a = _mm_setr_epi8(
-         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
-     );
-     let r = _mm_slli_si128(a, -0x80000000);
-     assert_eq_m128i(r, _mm_set1_epi8(0));
 +#![feature(core_intrinsics, generators, generator_trait, is_sorted)]
 +
 +#[cfg(target_arch = "x86_64")]
 +use std::arch::x86_64::*;
 +use std::io::Write;
 +use std::ops::Generator;
 +
 +fn main() {
 +    println!("{:?}", std::env::args().collect::<Vec<_>>());
 +
 +    let mutex = std::sync::Mutex::new(());
 +    let _guard = mutex.lock().unwrap();
 +
 +    let _ = ::std::iter::repeat('a' as u8).take(10).collect::<Vec<_>>();
 +    let stderr = ::std::io::stderr();
 +    let mut stderr = stderr.lock();
 +
 +    // FIXME support lazy jit when multi threading
 +    #[cfg(not(lazy_jit))]
 +    std::thread::spawn(move || {
 +        println!("Hello from another thread!");
 +    });
 +
 +    writeln!(stderr, "some {} text", "<unknown>").unwrap();
 +
 +    let _ = std::process::Command::new("true").env("c", "d").spawn();
 +
 +    println!("cargo:rustc-link-lib=z");
 +
 +    static ONCE: std::sync::Once = std::sync::Once::new();
 +    ONCE.call_once(|| {});
 +
 +    let _eq = LoopState::Continue(()) == LoopState::Break(());
 +
 +    // Make sure ByValPair values with differently sized components are correctly passed
 +    map(None::<(u8, Box<Instruction>)>);
 +
 +    println!("{}", 2.3f32.exp());
 +    println!("{}", 2.3f32.exp2());
 +    println!("{}", 2.3f32.abs());
 +    println!("{}", 2.3f32.sqrt());
 +    println!("{}", 2.3f32.floor());
 +    println!("{}", 2.3f32.ceil());
 +    println!("{}", 2.3f32.min(1.0));
 +    println!("{}", 2.3f32.max(1.0));
 +    println!("{}", 2.3f32.powi(2));
 +    println!("{}", 2.3f32.log2());
 +    assert_eq!(2.3f32.copysign(-1.0), -2.3f32);
 +    println!("{}", 2.3f32.powf(2.0));
 +
 +    assert_eq!(i64::MAX.checked_mul(2), None);
 +
 +    assert_eq!(-128i8, (-128i8).saturating_sub(1));
 +    assert_eq!(127i8, 127i8.saturating_sub(-128));
 +    assert_eq!(-128i8, (-128i8).saturating_add(-128));
 +    assert_eq!(127i8, 127i8.saturating_add(1));
 +
 +    assert_eq!(0b0000000000000000000000000010000010000000000000000000000000000000_0000000000100000000000000000000000001000000000000100000000000000u128.leading_zeros(), 26);
 +    assert_eq!(0b0000000000000000000000000010000000000000000000000000000000000000_0000000000000000000000000000000000001000000000000000000010000000u128.trailing_zeros(), 7);
 +    assert_eq!(core::intrinsics::saturating_sub(0, -170141183460469231731687303715884105728i128), 170141183460469231731687303715884105727i128);
 +
 +    let _d = 0i128.checked_div(2i128);
 +    let _d = 0u128.checked_div(2u128);
 +    assert_eq!(1u128 + 2, 3);
 +
 +    assert_eq!(0b100010000000000000000000000000000u128 >> 10, 0b10001000000000000000000u128);
 +    assert_eq!(0xFEDCBA987654321123456789ABCDEFu128 >> 64, 0xFEDCBA98765432u128);
 +    assert_eq!(0xFEDCBA987654321123456789ABCDEFu128 as i128 >> 64, 0xFEDCBA98765432i128);
 +
 +    let tmp = 353985398u128;
 +    assert_eq!(tmp * 932490u128, 330087843781020u128);
 +
 +    let tmp = -0x1234_5678_9ABC_DEF0i64;
 +    assert_eq!(tmp as i128, -0x1234_5678_9ABC_DEF0i128);
 +
 +    // Check that all u/i128 <-> float casts work correctly.
 +    let houndred_u128 = 100u128;
 +    let houndred_i128 = 100i128;
 +    let houndred_f32 = 100.0f32;
 +    let houndred_f64 = 100.0f64;
 +    assert_eq!(houndred_u128 as f32, 100.0);
 +    assert_eq!(houndred_u128 as f64, 100.0);
 +    assert_eq!(houndred_f32 as u128, 100);
 +    assert_eq!(houndred_f64 as u128, 100);
 +    assert_eq!(houndred_i128 as f32, 100.0);
 +    assert_eq!(houndred_i128 as f64, 100.0);
 +    assert_eq!(houndred_f32 as i128, 100);
 +    assert_eq!(houndred_f64 as i128, 100);
 +    assert_eq!(1u128.rotate_left(2), 4);
 +
 +    // Test signed 128bit comparing
 +    let max = usize::MAX as i128;
 +    if 100i128 < 0i128 || 100i128 > max {
 +        panic!();
 +    }
 +
 +    test_checked_mul();
 +
 +    let _a = 1u32 << 2u8;
 +
 +    let empty: [i32; 0] = [];
 +    assert!(empty.is_sorted());
 +
 +    println!("{:?}", std::intrinsics::caller_location());
 +
 +    #[cfg(target_arch = "x86_64")]
 +    unsafe {
 +        test_simd();
 +    }
 +
 +    Box::pin(move |mut _task_context| {
 +        yield ();
 +    }).as_mut().resume(0);
 +
 +    #[derive(Copy, Clone)]
 +    enum Nums {
 +        NegOne = -1,
 +    }
 +
 +    let kind = Nums::NegOne;
 +    assert_eq!(-1i128, kind as i128);
 +
 +    let options = [1u128];
 +    match options[0] {
 +        1 => (),
 +        0 => loop {},
 +        v => panic(v),
 +    };
 +}
 +
 +fn panic(_: u128) {
 +    panic!();
 +}
 +
 +#[cfg(target_arch = "x86_64")]
 +#[target_feature(enable = "sse2")]
 +unsafe fn test_simd() {
 +    assert!(is_x86_feature_detected!("sse2"));
 +
 +    let x = _mm_setzero_si128();
 +    let y = _mm_set1_epi16(7);
 +    let or = _mm_or_si128(x, y);
 +    let cmp_eq = _mm_cmpeq_epi8(y, y);
 +    let cmp_lt = _mm_cmplt_epi8(y, y);
 +
 +    assert_eq!(std::mem::transmute::<_, [u16; 8]>(or), [7, 7, 7, 7, 7, 7, 7, 7]);
 +    assert_eq!(std::mem::transmute::<_, [u16; 8]>(cmp_eq), [0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff]);
 +    assert_eq!(std::mem::transmute::<_, [u16; 8]>(cmp_lt), [0, 0, 0, 0, 0, 0, 0, 0]);
 +
 +    test_mm_slli_si128();
 +    test_mm_movemask_epi8();
 +    test_mm256_movemask_epi8();
 +    test_mm_add_epi8();
 +    test_mm_add_pd();
 +    test_mm_cvtepi8_epi16();
 +    test_mm_cvtsi128_si64();
 +
 +    test_mm_extract_epi8();
 +    test_mm_insert_epi16();
 +
 +    let mask1 = _mm_movemask_epi8(dbg!(_mm_setr_epi8(255u8 as i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)));
 +    assert_eq!(mask1, 1);
 +}
 +
 +#[cfg(target_arch = "x86_64")]
 +#[target_feature(enable = "sse2")]
 +unsafe fn test_mm_slli_si128() {
 +    #[rustfmt::skip]
 +    let a = _mm_setr_epi8(
 +        1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
 +    );
 +    let r = _mm_slli_si128(a, 1);
 +    let e = _mm_setr_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
 +    assert_eq_m128i(r, e);
 +
 +    #[rustfmt::skip]
 +    let a = _mm_setr_epi8(
 +        1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
 +    );
 +    let r = _mm_slli_si128(a, 15);
 +    let e = _mm_setr_epi8(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1);
 +    assert_eq_m128i(r, e);
 +
 +    #[rustfmt::skip]
 +    let a = _mm_setr_epi8(
 +        1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
 +    );
 +    let r = _mm_slli_si128(a, 16);
 +    assert_eq_m128i(r, _mm_set1_epi8(0));
-     let r2 = _mm_extract_epi8(a, 19);
 +}
 +
 +#[cfg(target_arch = "x86_64")]
 +#[target_feature(enable = "sse2")]
 +unsafe fn test_mm_movemask_epi8() {
 +    #[rustfmt::skip]
 +    let a = _mm_setr_epi8(
 +        0b1000_0000u8 as i8, 0b0, 0b1000_0000u8 as i8, 0b01,
 +        0b0101, 0b1111_0000u8 as i8, 0, 0,
 +        0, 0, 0b1111_0000u8 as i8, 0b0101,
 +        0b01, 0b1000_0000u8 as i8, 0b0, 0b1000_0000u8 as i8,
 +    );
 +    let r = _mm_movemask_epi8(a);
 +    assert_eq!(r, 0b10100100_00100101);
 +}
 +
 +#[cfg(target_arch = "x86_64")]
 +#[target_feature(enable = "avx2")]
 +unsafe fn test_mm256_movemask_epi8() {
 +    let a = _mm256_set1_epi8(-1);
 +    let r = _mm256_movemask_epi8(a);
 +    let e = -1;
 +    assert_eq!(r, e);
 +}
 +
 +#[cfg(target_arch = "x86_64")]
 +#[target_feature(enable = "sse2")]
 +unsafe fn test_mm_add_epi8() {
 +    let a = _mm_setr_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
 +    #[rustfmt::skip]
 +    let b = _mm_setr_epi8(
 +        16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
 +    );
 +    let r = _mm_add_epi8(a, b);
 +    #[rustfmt::skip]
 +    let e = _mm_setr_epi8(
 +        16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46,
 +    );
 +    assert_eq_m128i(r, e);
 +}
 +
 +#[cfg(target_arch = "x86_64")]
 +#[target_feature(enable = "sse2")]
 +unsafe fn test_mm_add_pd() {
 +    let a = _mm_setr_pd(1.0, 2.0);
 +    let b = _mm_setr_pd(5.0, 10.0);
 +    let r = _mm_add_pd(a, b);
 +    assert_eq_m128d(r, _mm_setr_pd(6.0, 12.0));
 +}
 +
 +#[cfg(target_arch = "x86_64")]
 +fn assert_eq_m128i(x: std::arch::x86_64::__m128i, y: std::arch::x86_64::__m128i) {
 +    unsafe {
 +        assert_eq!(std::mem::transmute::<_, [u8; 16]>(x), std::mem::transmute::<_, [u8; 16]>(y));
 +    }
 +}
 +
 +#[cfg(target_arch = "x86_64")]
 +#[target_feature(enable = "sse2")]
 +pub unsafe fn assert_eq_m128d(a: __m128d, b: __m128d) {
 +    if _mm_movemask_pd(_mm_cmpeq_pd(a, b)) != 0b11 {
 +        panic!("{:?} != {:?}", a, b);
 +    }
 +}
 +
 +#[cfg(target_arch = "x86_64")]
 +#[target_feature(enable = "sse2")]
 +unsafe fn test_mm_cvtsi128_si64() {
 +    let r = _mm_cvtsi128_si64(std::mem::transmute::<[i64; 2], _>([5, 0]));
 +    assert_eq!(r, 5);
 +}
 +
 +#[cfg(target_arch = "x86_64")]
 +#[target_feature(enable = "sse4.1")]
 +unsafe fn test_mm_cvtepi8_epi16() {
 +    let a = _mm_set1_epi8(10);
 +    let r = _mm_cvtepi8_epi16(a);
 +    let e = _mm_set1_epi16(10);
 +    assert_eq_m128i(r, e);
 +    let a = _mm_set1_epi8(-10);
 +    let r = _mm_cvtepi8_epi16(a);
 +    let e = _mm_set1_epi16(-10);
 +    assert_eq_m128i(r, e);
 +}
 +
 +#[cfg(target_arch = "x86_64")]
 +#[target_feature(enable = "sse4.1")]
 +unsafe fn test_mm_extract_epi8() {
 +    #[rustfmt::skip]
 +    let a = _mm_setr_epi8(
 +        -1, 1, 2, 3, 4, 5, 6, 7,
 +        8, 9, 10, 11, 12, 13, 14, 15
 +    );
 +    let r1 = _mm_extract_epi8(a, 0);
++    let r2 = _mm_extract_epi8(a, 3);
 +    assert_eq!(r1, 0xFF);
 +    assert_eq!(r2, 3);
 +}
 +
 +#[cfg(target_arch = "x86_64")]
 +#[target_feature(enable = "sse2")]
 +unsafe fn test_mm_insert_epi16() {
 +    let a = _mm_setr_epi16(0, 1, 2, 3, 4, 5, 6, 7);
 +    let r = _mm_insert_epi16(a, 9, 0);
 +    let e = _mm_setr_epi16(9, 1, 2, 3, 4, 5, 6, 7);
 +    assert_eq_m128i(r, e);
 +}
 +
 +fn test_checked_mul() {
 +    let u: Option<u8> = u8::from_str_radix("1000", 10).ok();
 +    assert_eq!(u, None);
 +
 +    assert_eq!(1u8.checked_mul(255u8), Some(255u8));
 +    assert_eq!(255u8.checked_mul(255u8), None);
 +    assert_eq!(1i8.checked_mul(127i8), Some(127i8));
 +    assert_eq!(127i8.checked_mul(127i8), None);
 +    assert_eq!((-1i8).checked_mul(-127i8), Some(127i8));
 +    assert_eq!(1i8.checked_mul(-128i8), Some(-128i8));
 +    assert_eq!((-128i8).checked_mul(-128i8), None);
 +
 +    assert_eq!(1u64.checked_mul(u64::MAX), Some(u64::MAX));
 +    assert_eq!(u64::MAX.checked_mul(u64::MAX), None);
 +    assert_eq!(1i64.checked_mul(i64::MAX), Some(i64::MAX));
 +    assert_eq!(i64::MAX.checked_mul(i64::MAX), None);
 +    assert_eq!((-1i64).checked_mul(i64::MIN + 1), Some(i64::MAX));
 +    assert_eq!(1i64.checked_mul(i64::MIN), Some(i64::MIN));
 +    assert_eq!(i64::MIN.checked_mul(i64::MIN), None);
 +}
 +
 +#[derive(PartialEq)]
 +enum LoopState {
 +    Continue(()),
 +    Break(())
 +}
 +
 +pub enum Instruction {
 +    Increment,
 +    Loop,
 +}
 +
 +fn map(a: Option<(u8, Box<Instruction>)>) -> Option<Box<Instruction>> {
 +    match a {
 +        None => None,
 +        Some((_, instr)) => Some(instr),
 +    }
 +}
index 8cfffe580a1f0ef9be7dc3f207d4d399465de3de,0000000000000000000000000000000000000000..ba0eaacd82870fd0a12952989c55b70317ef3b3a
mode 100644,000000..100644
--- /dev/null
@@@ -1,123 -1,0 +1,83 @@@
- diff --git a/library/core/tests/num/int_macros.rs b/library/core/tests/num/int_macros.rs
- index 0475aeb..9558198 100644
- --- a/library/core/tests/num/int_macros.rs
- +++ b/library/core/tests/num/int_macros.rs
- @@ -88,6 +88,7 @@ mod tests {
-                  assert_eq!(x.trailing_ones(), 0);
-              }
-  
- +            /*
-              #[test]
-              fn test_rotate() {
-                  assert_eq!(A.rotate_left(6).rotate_right(2).rotate_right(4), A);
- @@ -112,6 +113,7 @@ mod tests {
-                  assert_eq!(B.rotate_left(128), B);
-                  assert_eq!(C.rotate_left(128), C);
-              }
- +            */
-  
-              #[test]
-              fn test_swap_bytes() {
- diff --git a/library/core/tests/num/uint_macros.rs b/library/core/tests/num/uint_macros.rs
- index 04ed14f..a6e372e 100644
- --- a/library/core/tests/num/uint_macros.rs
- +++ b/library/core/tests/num/uint_macros.rs
- @@ -52,6 +52,7 @@ mod tests {
-                  assert_eq!(x.trailing_ones(), 0);
-              }
-  
- +            /*
-              #[test]
-              fn test_rotate() {
-                  assert_eq!(A.rotate_left(6).rotate_right(2).rotate_right(4), A);
- @@ -76,6 +77,7 @@ mod tests {
-                  assert_eq!(B.rotate_left(128), B);
-                  assert_eq!(C.rotate_left(128), C);
-              }
- +            */
-  
-              #[test]
-              fn test_swap_bytes() {
 +From f6befc4bb51d84f5f1cf35938a168c953d421350 Mon Sep 17 00:00:00 2001
 +From: bjorn3 <bjorn3@users.noreply.github.com>
 +Date: Sun, 24 Nov 2019 15:10:23 +0100
 +Subject: [PATCH] [core] Disable not compiling tests
 +
 +---
 + library/core/tests/Cargo.toml         | 8 ++++++++
 + library/core/tests/num/flt2dec/mod.rs | 1 -
 + library/core/tests/num/int_macros.rs  | 2 ++
 + library/core/tests/num/uint_macros.rs | 2 ++
 + library/core/tests/ptr.rs             | 2 ++
 + library/core/tests/slice.rs           | 2 ++
 + 6 files changed, 16 insertions(+), 1 deletion(-)
 + create mode 100644 library/core/tests/Cargo.toml
 +
 +diff --git a/library/core/tests/Cargo.toml b/library/core/tests/Cargo.toml
 +new file mode 100644
 +index 0000000..46fd999
 +--- /dev/null
 ++++ b/library/core/tests/Cargo.toml
 +@@ -0,0 +1,8 @@
 ++[package]
 ++name = "core"
 ++version = "0.0.0"
 ++edition = "2018"
 ++
 ++[lib]
 ++name = "coretests"
 ++path = "lib.rs"
 +diff --git a/library/core/tests/num/flt2dec/mod.rs b/library/core/tests/num/flt2dec/mod.rs
 +index a35897e..f0bf645 100644
 +--- a/library/core/tests/num/flt2dec/mod.rs
 ++++ b/library/core/tests/num/flt2dec/mod.rs
 +@@ -13,7 +13,6 @@ mod strategy {
 +     mod dragon;
 +     mod grisu;
 + }
 +-mod random;
 + 
 + pub fn decode_finite<T: DecodableFloat>(v: T) -> Decoded {
 +     match decode(v).1 {
 +diff --git a/library/core/tests/ptr.rs b/library/core/tests/ptr.rs
 +index 1a6be3a..42dbd59 100644
 +--- a/library/core/tests/ptr.rs
 ++++ b/library/core/tests/ptr.rs
 +@@ -250,6 +250,7 @@ fn test_unsized_nonnull() {
 +     assert!(ys == zs);
 + }
 + 
 ++/*
 + #[test]
 + #[allow(warnings)]
 + // Have a symbol for the test below. It doesn’t need to be an actual variadic function, match the
 +@@ -289,6 +290,7 @@ fn write_unaligned_drop() {
 +     }
 +     DROPS.with(|d| assert_eq!(*d.borrow(), [0]));
 + }
 ++*/
 + 
 + #[test]
 + fn align_offset_zst() {
 +diff --git a/library/core/tests/slice.rs b/library/core/tests/slice.rs
 +index 6609bc3..241b497 100644
 +--- a/library/core/tests/slice.rs
 ++++ b/library/core/tests/slice.rs
 +@@ -1209,6 +1209,7 @@ fn brute_force_rotate_test_1() {
 +     }
 + }
 + 
 ++/*
 + #[test]
 + #[cfg(not(target_arch = "wasm32"))]
 + fn sort_unstable() {
 +@@ -1394,6 +1395,7 @@ fn partition_at_index() {
 +     v.select_nth_unstable(0);
 +     assert!(v == [0xDEADBEEF]);
 + }
 ++*/
 + 
 + #[test]
 + #[should_panic(expected = "index 0 greater than length of slice")]
 +--
 +2.21.0 (Apple Git-122)
index 5442e3345aa913899488cf757816c1ebe01df325,0000000000000000000000000000000000000000..9fe6e093a7b81f6577d1fb822692c77508e0ac88
mode 100644,000000..100644
--- /dev/null
@@@ -1,3 -1,0 +1,3 @@@
- channel = "nightly-2021-04-28"
 +[toolchain]
++channel = "nightly-2021-05-26"
 +components = ["rust-src", "rustc-dev", "llvm-tools-preview"]
index 4821a07ac5d5de473b0efcc6c6a23811a908ad28,0000000000000000000000000000000000000000..43c4887669cf6a0512a87fbe8e045b359c66a14d
mode 100644,000000..100644
--- /dev/null
@@@ -1,68 -1,0 +1,57 @@@
- diff --git a/compiler/rustc_data_structures/Cargo.toml b/compiler/rustc_data_structures/Cargo.toml
- index 23e689fcae7..5f077b765b6 100644
- --- a/compiler/rustc_data_structures/Cargo.toml
- +++ b/compiler/rustc_data_structures/Cargo.toml
- @@ -32,7 +32,6 @@ tempfile = "3.0.5"
-  [dependencies.parking_lot]
-  version = "0.11"
- -features = ["nightly"]
-  [target.'cfg(windows)'.dependencies]
-  winapi = { version = "0.3", features = ["fileapi", "psapi"] }
 +#!/bin/bash
 +set -e
 +
 +./build.sh
 +source build/config.sh
 +
 +echo "[SETUP] Rust fork"
 +git clone https://github.com/rust-lang/rust.git || true
 +pushd rust
 +git fetch
 +git checkout -- .
 +git checkout "$(rustc -V | cut -d' ' -f3 | tr -d '(')"
 +
 +git apply - <<EOF
 +diff --git a/Cargo.toml b/Cargo.toml
 +index 5bd1147cad5..10d68a2ff14 100644
 +--- a/Cargo.toml
 ++++ b/Cargo.toml
 +@@ -111,5 +111,7 @@ rustc-std-workspace-std = { path = 'library/rustc-std-workspace-std' }
 + rustc-std-workspace-alloc = { path = 'library/rustc-std-workspace-alloc' }
 + rustc-std-workspace-std = { path = 'library/rustc-std-workspace-std' }
 +
 ++compiler_builtins = { path = "../build_sysroot/compiler-builtins" }
 ++
 + [patch."https://github.com/rust-lang/rust-clippy"]
 + clippy_lints = { path = "src/tools/clippy/clippy_lints" }
- -compiler_builtins = { version = "0.1.39", features = ['rustc-dep-of-std'] }
- +compiler_builtins = { version = "0.1.40", features = ['rustc-dep-of-std', 'no-asm'] }
 +diff --git a/library/alloc/Cargo.toml b/library/alloc/Cargo.toml
 +index d95b5b7f17f..00b6f0e3635 100644
 +--- a/library/alloc/Cargo.toml
 ++++ b/library/alloc/Cargo.toml
 +@@ -8,7 +8,7 @@ edition = "2018"
 +
 + [dependencies]
 + core = { path = "../core" }
++-compiler_builtins = { version = "0.1.40", features = ['rustc-dep-of-std'] }
+++compiler_builtins = { version = "0.1.43", features = ['rustc-dep-of-std', 'no-asm'] }
 +
 + [dev-dependencies]
 + rand = "0.7"
++ rand_xorshift = "0.2"
 +EOF
 +
 +cat > config.toml <<EOF
 +[llvm]
 +ninja = false
 +
 +[build]
 +rustc = "$(pwd)/../build/bin/cg_clif"
 +cargo = "$(rustup which cargo)"
 +full-bootstrap = true
 +local-rebuild = true
 +
 +[rust]
 +codegen-backends = ["cranelift"]
 +deny-warnings = false
 +EOF
 +popd
index 3afcea8f06bd6c257b6ec97077d17ee7496ed565,0000000000000000000000000000000000000000..0d99d2c507c95af6819f7b8decfeb0dfbcc7bf4e
mode 100755,000000..100755
--- /dev/null
@@@ -1,151 -1,0 +1,152 @@@
 +#!/usr/bin/env bash
 +
 +set -e
 +
 +source build/config.sh
 +source scripts/ext_config.sh
 +MY_RUSTC="$RUSTC $RUSTFLAGS -L crate=target/out --out-dir target/out -Cdebuginfo=2"
 +
 +function no_sysroot_tests() {
 +    echo "[BUILD] mini_core"
 +    $MY_RUSTC example/mini_core.rs --crate-name mini_core --crate-type lib,dylib --target "$TARGET_TRIPLE"
 +
 +    echo "[BUILD] example"
 +    $MY_RUSTC example/example.rs --crate-type lib --target "$TARGET_TRIPLE"
 +
 +    if [[ "$JIT_SUPPORTED" = "1" ]]; then
 +        echo "[JIT] mini_core_hello_world"
 +        CG_CLIF_JIT_ARGS="abc bcd" $MY_RUSTC -Cllvm-args=mode=jit -Cprefer-dynamic example/mini_core_hello_world.rs --cfg jit --target "$HOST_TRIPLE"
 +
 +        echo "[JIT-lazy] mini_core_hello_world"
 +        CG_CLIF_JIT_ARGS="abc bcd" $MY_RUSTC -Cllvm-args=mode=jit-lazy -Cprefer-dynamic example/mini_core_hello_world.rs --cfg jit --target "$HOST_TRIPLE"
 +    else
 +        echo "[JIT] mini_core_hello_world (skipped)"
 +    fi
 +
 +    echo "[AOT] mini_core_hello_world"
 +    $MY_RUSTC example/mini_core_hello_world.rs --crate-name mini_core_hello_world --crate-type bin -g --target "$TARGET_TRIPLE"
 +    $RUN_WRAPPER ./target/out/mini_core_hello_world abc bcd
 +    # (echo "break set -n main"; echo "run"; sleep 1; echo "si -c 10"; sleep 1; echo "frame variable") | lldb -- ./target/out/mini_core_hello_world abc bcd
 +}
 +
 +function base_sysroot_tests() {
 +    echo "[AOT] arbitrary_self_types_pointers_and_wrappers"
 +    $MY_RUSTC example/arbitrary_self_types_pointers_and_wrappers.rs --crate-name arbitrary_self_types_pointers_and_wrappers --crate-type bin --target "$TARGET_TRIPLE"
 +    $RUN_WRAPPER ./target/out/arbitrary_self_types_pointers_and_wrappers
 +
 +    echo "[AOT] alloc_system"
 +    $MY_RUSTC example/alloc_system.rs --crate-type lib --target "$TARGET_TRIPLE"
 +
 +    echo "[AOT] alloc_example"
 +    $MY_RUSTC example/alloc_example.rs --crate-type bin --target "$TARGET_TRIPLE"
 +    $RUN_WRAPPER ./target/out/alloc_example
 +
 +    if [[ "$JIT_SUPPORTED" = "1" ]]; then
 +        echo "[JIT] std_example"
 +        $MY_RUSTC -Cllvm-args=mode=jit -Cprefer-dynamic example/std_example.rs --target "$HOST_TRIPLE"
 +
 +        echo "[JIT-lazy] std_example"
 +        $MY_RUSTC -Cllvm-args=mode=jit-lazy -Cprefer-dynamic example/std_example.rs --cfg lazy_jit --target "$HOST_TRIPLE"
 +    else
 +        echo "[JIT] std_example (skipped)"
 +    fi
 +
 +    echo "[AOT] dst_field_align"
 +    # FIXME Re-add -Zmir-opt-level=2 once rust-lang/rust#67529 is fixed.
 +    $MY_RUSTC example/dst-field-align.rs --crate-name dst_field_align --crate-type bin --target "$TARGET_TRIPLE"
 +    $RUN_WRAPPER ./target/out/dst_field_align || (echo $?; false)
 +
 +    echo "[AOT] std_example"
 +    $MY_RUSTC example/std_example.rs --crate-type bin --target "$TARGET_TRIPLE"
 +    $RUN_WRAPPER ./target/out/std_example arg
 +
 +    echo "[AOT] subslice-patterns-const-eval"
 +    $MY_RUSTC example/subslice-patterns-const-eval.rs --crate-type bin -Cpanic=abort --target "$TARGET_TRIPLE"
 +    $RUN_WRAPPER ./target/out/subslice-patterns-const-eval
 +
 +    echo "[AOT] track-caller-attribute"
 +    $MY_RUSTC example/track-caller-attribute.rs --crate-type bin -Cpanic=abort --target "$TARGET_TRIPLE"
 +    $RUN_WRAPPER ./target/out/track-caller-attribute
 +
 +    echo "[AOT] mod_bench"
 +    $MY_RUSTC example/mod_bench.rs --crate-type bin --target "$TARGET_TRIPLE"
 +    $RUN_WRAPPER ./target/out/mod_bench
 +}
 +
 +function extended_sysroot_tests() {
 +    pushd rand
 +    cargo clean
 +    if [[ "$HOST_TRIPLE" = "$TARGET_TRIPLE" ]]; then
 +        echo "[TEST] rust-random/rand"
 +        ../build/cargo.sh test --workspace
 +    else
 +        echo "[AOT] rust-random/rand"
 +        ../build/cargo.sh build --workspace --target $TARGET_TRIPLE --tests
 +    fi
 +    popd
 +
 +    pushd simple-raytracer
 +    if [[ "$HOST_TRIPLE" = "$TARGET_TRIPLE" ]]; then
 +        echo "[BENCH COMPILE] ebobby/simple-raytracer"
 +        hyperfine --runs "${RUN_RUNS:-10}" --warmup 1 --prepare "cargo clean" \
 +        "RUSTC=rustc RUSTFLAGS='' cargo build" \
 +        "../build/cargo.sh build"
 +
 +        echo "[BENCH RUN] ebobby/simple-raytracer"
 +        cp ./target/debug/main ./raytracer_cg_clif
 +        hyperfine --runs "${RUN_RUNS:-10}" ./raytracer_cg_llvm ./raytracer_cg_clif
 +    else
 +        echo "[BENCH COMPILE] ebobby/simple-raytracer (skipped)"
 +        echo "[COMPILE] ebobby/simple-raytracer"
 +        ../build/cargo.sh build --target $TARGET_TRIPLE
 +        echo "[BENCH RUN] ebobby/simple-raytracer (skipped)"
 +    fi
 +    popd
 +
 +    pushd build_sysroot/sysroot_src/library/core/tests
 +    echo "[TEST] libcore"
 +    cargo clean
 +    if [[ "$HOST_TRIPLE" = "$TARGET_TRIPLE" ]]; then
 +        ../../../../../build/cargo.sh test
 +    else
 +        ../../../../../build/cargo.sh build --target $TARGET_TRIPLE --tests
 +    fi
 +    popd
 +
 +    pushd regex
 +    echo "[TEST] rust-lang/regex example shootout-regex-dna"
 +    cargo clean
++    export RUSTFLAGS="$RUSTFLAGS --cap-lints warn" # newer aho_corasick versions throw a deprecation warning
 +    # Make sure `[codegen mono items] start` doesn't poison the diff
 +    ../build/cargo.sh build --example shootout-regex-dna --target $TARGET_TRIPLE
 +    if [[ "$HOST_TRIPLE" = "$TARGET_TRIPLE" ]]; then
 +        cat examples/regexdna-input.txt \
 +            | ../build/cargo.sh run --example shootout-regex-dna --target $TARGET_TRIPLE \
 +            | grep -v "Spawned thread" > res.txt
 +        diff -u res.txt examples/regexdna-output.txt
 +    fi
 +
 +    if [[ "$HOST_TRIPLE" = "$TARGET_TRIPLE" ]]; then
 +        echo "[TEST] rust-lang/regex tests"
 +        ../build/cargo.sh test --tests -- --exclude-should-panic --test-threads 1 -Zunstable-options -q
 +    else
 +        echo "[AOT] rust-lang/regex tests"
 +        ../build/cargo.sh build --tests --target $TARGET_TRIPLE
 +    fi
 +    popd
 +}
 +
 +case "$1" in
 +    "no_sysroot")
 +        no_sysroot_tests
 +        ;;
 +    "base_sysroot")
 +        base_sysroot_tests
 +        ;;
 +    "extended_sysroot")
 +        extended_sysroot_tests
 +        ;;
 +    *)
 +        echo "unknown test suite"
 +        ;;
 +esac
index fc0823302e0189971c492fcf6c96cd77292c2cfb,0000000000000000000000000000000000000000..bd54adc53ee2a58eba807aa045dd51bcf098b9d4
mode 100644,000000..100644
--- /dev/null
@@@ -1,282 -1,0 +1,282 @@@
-                 match object::File::parse(&data) {
 +//! Creation of ar archives like for the lib and staticlib crate type
 +
 +use std::collections::BTreeMap;
 +use std::fs::File;
 +use std::path::{Path, PathBuf};
 +
 +use rustc_codegen_ssa::back::archive::{find_library, ArchiveBuilder};
 +use rustc_codegen_ssa::METADATA_FILENAME;
 +use rustc_session::Session;
 +
 +use object::{Object, ObjectSymbol, SymbolKind};
 +
 +#[derive(Debug)]
 +enum ArchiveEntry {
 +    FromArchive { archive_index: usize, entry_index: usize },
 +    File(PathBuf),
 +}
 +
 +pub(crate) struct ArArchiveBuilder<'a> {
 +    sess: &'a Session,
 +    dst: PathBuf,
 +    lib_search_paths: Vec<PathBuf>,
 +    use_gnu_style_archive: bool,
 +    no_builtin_ranlib: bool,
 +
 +    src_archives: Vec<(PathBuf, ar::Archive<File>)>,
 +    // Don't use `HashMap` here, as the order is important. `rust.metadata.bin` must always be at
 +    // the end of an archive for linkers to not get confused.
 +    entries: Vec<(String, ArchiveEntry)>,
 +}
 +
 +impl<'a> ArchiveBuilder<'a> for ArArchiveBuilder<'a> {
 +    fn new(sess: &'a Session, output: &Path, input: Option<&Path>) -> Self {
 +        use rustc_codegen_ssa::back::link::archive_search_paths;
 +
 +        let (src_archives, entries) = if let Some(input) = input {
 +            let mut archive = ar::Archive::new(File::open(input).unwrap());
 +            let mut entries = Vec::new();
 +
 +            let mut i = 0;
 +            while let Some(entry) = archive.next_entry() {
 +                let entry = entry.unwrap();
 +                entries.push((
 +                    String::from_utf8(entry.header().identifier().to_vec()).unwrap(),
 +                    ArchiveEntry::FromArchive { archive_index: 0, entry_index: i },
 +                ));
 +                i += 1;
 +            }
 +
 +            (vec![(input.to_owned(), archive)], entries)
 +        } else {
 +            (vec![], Vec::new())
 +        };
 +
 +        ArArchiveBuilder {
 +            sess,
 +            dst: output.to_path_buf(),
 +            lib_search_paths: archive_search_paths(sess),
 +            use_gnu_style_archive: sess.target.archive_format == "gnu",
 +            // FIXME fix builtin ranlib on macOS
 +            no_builtin_ranlib: sess.target.is_like_osx,
 +
 +            src_archives,
 +            entries,
 +        }
 +    }
 +
 +    fn src_files(&mut self) -> Vec<String> {
 +        self.entries.iter().map(|(name, _)| name.clone()).collect()
 +    }
 +
 +    fn remove_file(&mut self, name: &str) {
 +        let index = self
 +            .entries
 +            .iter()
 +            .position(|(entry_name, _)| entry_name == name)
 +            .expect("Tried to remove file not existing in src archive");
 +        self.entries.remove(index);
 +    }
 +
 +    fn add_file(&mut self, file: &Path) {
 +        self.entries.push((
 +            file.file_name().unwrap().to_str().unwrap().to_string(),
 +            ArchiveEntry::File(file.to_owned()),
 +        ));
 +    }
 +
 +    fn add_native_library(&mut self, name: rustc_span::symbol::Symbol, verbatim: bool) {
 +        let location = find_library(name, verbatim, &self.lib_search_paths, self.sess);
 +        self.add_archive(location.clone(), |_| false).unwrap_or_else(|e| {
 +            panic!("failed to add native library {}: {}", location.to_string_lossy(), e);
 +        });
 +    }
 +
 +    fn add_rlib(
 +        &mut self,
 +        rlib: &Path,
 +        name: &str,
 +        lto: bool,
 +        skip_objects: bool,
 +    ) -> std::io::Result<()> {
 +        let obj_start = name.to_owned();
 +
 +        self.add_archive(rlib.to_owned(), move |fname: &str| {
 +            // Ignore metadata files, no matter the name.
 +            if fname == METADATA_FILENAME {
 +                return true;
 +            }
 +
 +            // Don't include Rust objects if LTO is enabled
 +            if lto && fname.starts_with(&obj_start) && fname.ends_with(".o") {
 +                return true;
 +            }
 +
 +            // Otherwise if this is *not* a rust object and we're skipping
 +            // objects then skip this file
 +            if skip_objects && (!fname.starts_with(&obj_start) || !fname.ends_with(".o")) {
 +                return true;
 +            }
 +
 +            // ok, don't skip this
 +            false
 +        })
 +    }
 +
 +    fn update_symbols(&mut self) {}
 +
 +    fn build(mut self) {
 +        enum BuilderKind {
 +            Bsd(ar::Builder<File>),
 +            Gnu(ar::GnuBuilder<File>),
 +        }
 +
 +        let sess = self.sess;
 +
 +        let mut symbol_table = BTreeMap::new();
 +
 +        let mut entries = Vec::new();
 +
 +        for (entry_name, entry) in self.entries {
 +            // FIXME only read the symbol table of the object files to avoid having to keep all
 +            // object files in memory at once, or read them twice.
 +            let data = match entry {
 +                ArchiveEntry::FromArchive { archive_index, entry_index } => {
 +                    // FIXME read symbols from symtab
 +                    use std::io::Read;
 +                    let (ref _src_archive_path, ref mut src_archive) =
 +                        self.src_archives[archive_index];
 +                    let mut entry = src_archive.jump_to_entry(entry_index).unwrap();
 +                    let mut data = Vec::new();
 +                    entry.read_to_end(&mut data).unwrap();
 +                    data
 +                }
 +                ArchiveEntry::File(file) => std::fs::read(file).unwrap_or_else(|err| {
 +                    sess.fatal(&format!(
 +                        "error while reading object file during archive building: {}",
 +                        err
 +                    ));
 +                }),
 +            };
 +
 +            if !self.no_builtin_ranlib {
++                match object::File::parse(&*data) {
 +                    Ok(object) => {
 +                        symbol_table.insert(
 +                            entry_name.as_bytes().to_vec(),
 +                            object
 +                                .symbols()
 +                                .filter_map(|symbol| {
 +                                    if symbol.is_undefined()
 +                                        || symbol.is_local()
 +                                        || symbol.kind() != SymbolKind::Data
 +                                            && symbol.kind() != SymbolKind::Text
 +                                            && symbol.kind() != SymbolKind::Tls
 +                                    {
 +                                        None
 +                                    } else {
 +                                        symbol.name().map(|name| name.as_bytes().to_vec()).ok()
 +                                    }
 +                                })
 +                                .collect::<Vec<_>>(),
 +                        );
 +                    }
 +                    Err(err) => {
 +                        let err = err.to_string();
 +                        if err == "Unknown file magic" {
 +                            // Not an object file; skip it.
 +                        } else {
 +                            sess.fatal(&format!(
 +                                "error parsing `{}` during archive creation: {}",
 +                                entry_name, err
 +                            ));
 +                        }
 +                    }
 +                }
 +            }
 +
 +            entries.push((entry_name, data));
 +        }
 +
 +        let mut builder = if self.use_gnu_style_archive {
 +            BuilderKind::Gnu(
 +                ar::GnuBuilder::new(
 +                    File::create(&self.dst).unwrap_or_else(|err| {
 +                        sess.fatal(&format!(
 +                            "error opening destination during archive building: {}",
 +                            err
 +                        ));
 +                    }),
 +                    entries.iter().map(|(name, _)| name.as_bytes().to_vec()).collect(),
 +                    ar::GnuSymbolTableFormat::Size32,
 +                    symbol_table,
 +                )
 +                .unwrap(),
 +            )
 +        } else {
 +            BuilderKind::Bsd(
 +                ar::Builder::new(
 +                    File::create(&self.dst).unwrap_or_else(|err| {
 +                        sess.fatal(&format!(
 +                            "error opening destination during archive building: {}",
 +                            err
 +                        ));
 +                    }),
 +                    symbol_table,
 +                )
 +                .unwrap(),
 +            )
 +        };
 +
 +        // Add all files
 +        for (entry_name, data) in entries.into_iter() {
 +            let header = ar::Header::new(entry_name.into_bytes(), data.len() as u64);
 +            match builder {
 +                BuilderKind::Bsd(ref mut builder) => builder.append(&header, &mut &*data).unwrap(),
 +                BuilderKind::Gnu(ref mut builder) => builder.append(&header, &mut &*data).unwrap(),
 +            }
 +        }
 +
 +        // Finalize archive
 +        std::mem::drop(builder);
 +
 +        if self.no_builtin_ranlib {
 +            let ranlib = crate::toolchain::get_toolchain_binary(self.sess, "ranlib");
 +
 +            // Run ranlib to be able to link the archive
 +            let status = std::process::Command::new(ranlib)
 +                .arg(self.dst)
 +                .status()
 +                .expect("Couldn't run ranlib");
 +
 +            if !status.success() {
 +                self.sess.fatal(&format!("Ranlib exited with code {:?}", status.code()));
 +            }
 +        }
 +    }
 +}
 +
 +impl<'a> ArArchiveBuilder<'a> {
 +    fn add_archive<F>(&mut self, archive_path: PathBuf, mut skip: F) -> std::io::Result<()>
 +    where
 +        F: FnMut(&str) -> bool + 'static,
 +    {
 +        let mut archive = ar::Archive::new(std::fs::File::open(&archive_path)?);
 +        let archive_index = self.src_archives.len();
 +
 +        let mut i = 0;
 +        while let Some(entry) = archive.next_entry() {
 +            let entry = entry?;
 +            let file_name = String::from_utf8(entry.header().identifier().to_vec())
 +                .map_err(|err| std::io::Error::new(std::io::ErrorKind::InvalidData, err))?;
 +            if !skip(&file_name) {
 +                self.entries
 +                    .push((file_name, ArchiveEntry::FromArchive { archive_index, entry_index: i }));
 +            }
 +            i += 1;
 +        }
 +
 +        self.src_archives.push((archive_path, archive));
 +        Ok(())
 +    }
 +}
index 3ec5c14ff17a2d8308657936aa5c21f15cfc6160,0000000000000000000000000000000000000000..ec3e17e5b758d6929cae190f6c51b92645cab5d1
mode 100644,000000..100644
--- /dev/null
@@@ -1,904 -1,0 +1,903 @@@
-     // Perform rust specific optimizations
-     tcx.sess.time("optimize clif ir", || {
-         crate::optimize::optimize_function(tcx, instance, context, &mut clif_comments);
-     });
 +//! Codegen of a single function
 +
 +use cranelift_codegen::binemit::{NullStackMapSink, NullTrapSink};
 +use rustc_index::vec::IndexVec;
 +use rustc_middle::ty::adjustment::PointerCast;
 +use rustc_middle::ty::layout::FnAbiExt;
 +use rustc_target::abi::call::FnAbi;
 +
 +use crate::constant::ConstantCx;
 +use crate::prelude::*;
 +
 +pub(crate) fn codegen_fn<'tcx>(
 +    cx: &mut crate::CodegenCx<'tcx>,
 +    module: &mut dyn Module,
 +    instance: Instance<'tcx>,
 +) {
 +    let tcx = cx.tcx;
 +
 +    let _inst_guard =
 +        crate::PrintOnPanic(|| format!("{:?} {}", instance, tcx.symbol_name(instance).name));
 +    debug_assert!(!instance.substs.needs_infer());
 +
 +    let mir = tcx.instance_mir(instance.def);
 +
 +    // Declare function
 +    let symbol_name = tcx.symbol_name(instance);
 +    let sig = get_function_sig(tcx, module.isa().triple(), instance);
 +    let func_id = module.declare_function(symbol_name.name, Linkage::Local, &sig).unwrap();
 +
 +    cx.cached_context.clear();
 +
 +    // Make the FunctionBuilder
 +    let mut func_ctx = FunctionBuilderContext::new();
 +    let mut func = std::mem::replace(&mut cx.cached_context.func, Function::new());
 +    func.name = ExternalName::user(0, func_id.as_u32());
 +    func.signature = sig;
 +    func.collect_debug_info();
 +
 +    let mut bcx = FunctionBuilder::new(&mut func, &mut func_ctx);
 +
 +    // Predefine blocks
 +    let start_block = bcx.create_block();
 +    let block_map: IndexVec<BasicBlock, Block> =
 +        (0..mir.basic_blocks().len()).map(|_| bcx.create_block()).collect();
 +
 +    // Make FunctionCx
 +    let pointer_type = module.target_config().pointer_type();
 +    let clif_comments = crate::pretty_clif::CommentWriter::new(tcx, instance);
 +
 +    let mut fx = FunctionCx {
 +        cx,
 +        module,
 +        tcx,
 +        pointer_type,
 +        vtables: FxHashMap::default(),
 +        constants_cx: ConstantCx::new(),
 +
 +        instance,
 +        symbol_name,
 +        mir,
 +        fn_abi: Some(FnAbi::of_instance(&RevealAllLayoutCx(tcx), instance, &[])),
 +
 +        bcx,
 +        block_map,
 +        local_map: IndexVec::with_capacity(mir.local_decls.len()),
 +        caller_location: None, // set by `codegen_fn_prelude`
 +
 +        clif_comments,
 +        source_info_set: indexmap::IndexSet::new(),
 +        next_ssa_var: 0,
 +
 +        inline_asm_index: 0,
 +    };
 +
 +    let arg_uninhabited = fx
 +        .mir
 +        .args_iter()
 +        .any(|arg| fx.layout_of(fx.monomorphize(&fx.mir.local_decls[arg].ty)).abi.is_uninhabited());
 +
 +    if !crate::constant::check_constants(&mut fx) {
 +        fx.bcx.append_block_params_for_function_params(fx.block_map[START_BLOCK]);
 +        fx.bcx.switch_to_block(fx.block_map[START_BLOCK]);
 +        crate::trap::trap_unreachable(&mut fx, "compilation should have been aborted");
 +    } else if arg_uninhabited {
 +        fx.bcx.append_block_params_for_function_params(fx.block_map[START_BLOCK]);
 +        fx.bcx.switch_to_block(fx.block_map[START_BLOCK]);
 +        crate::trap::trap_unreachable(&mut fx, "function has uninhabited argument");
 +    } else {
 +        tcx.sess.time("codegen clif ir", || {
 +            tcx.sess
 +                .time("codegen prelude", || crate::abi::codegen_fn_prelude(&mut fx, start_block));
 +            codegen_fn_content(&mut fx);
 +        });
 +    }
 +
 +    // Recover all necessary data from fx, before accessing func will prevent future access to it.
 +    let instance = fx.instance;
 +    let mut clif_comments = fx.clif_comments;
 +    let source_info_set = fx.source_info_set;
 +    let local_map = fx.local_map;
 +
 +    fx.constants_cx.finalize(fx.tcx, &mut *fx.module);
 +
 +    // Store function in context
 +    let context = &mut cx.cached_context;
 +    context.func = func;
 +
 +    crate::pretty_clif::write_clif_file(tcx, "unopt", None, instance, &context, &clif_comments);
 +
 +    // Verify function
 +    verify_func(tcx, &clif_comments, &context.func);
 +
-     context.want_disasm = crate::pretty_clif::should_write_ir(tcx);
 +    // If the return block is not reachable, then the SSA builder may have inserted an `iconst.i128`
 +    // instruction, which doesn't have an encoding.
 +    context.compute_cfg();
 +    context.compute_domtree();
 +    context.eliminate_unreachable_code(module.isa()).unwrap();
 +    context.dce(module.isa()).unwrap();
 +    // Some Cranelift optimizations expect the domtree to not yet be computed and as such don't
 +    // invalidate it when it would change.
 +    context.domtree.clear();
 +
-     let msg_ptr = fx.anonymous_str("assert", msg_str);
++    // Perform rust specific optimizations
++    tcx.sess.time("optimize clif ir", || {
++        crate::optimize::optimize_function(tcx, instance, context, &mut clif_comments);
++    });
 +
 +    // Define function
 +    tcx.sess.time("define function", || {
++        context.want_disasm = crate::pretty_clif::should_write_ir(tcx);
 +        module
 +            .define_function(func_id, context, &mut NullTrapSink {}, &mut NullStackMapSink {})
 +            .unwrap()
 +    });
 +
 +    // Write optimized function to file for debugging
 +    crate::pretty_clif::write_clif_file(
 +        tcx,
 +        "opt",
 +        Some(module.isa()),
 +        instance,
 +        &context,
 +        &clif_comments,
 +    );
 +
 +    if let Some(disasm) = &context.mach_compile_result.as_ref().unwrap().disasm {
 +        crate::pretty_clif::write_ir_file(
 +            tcx,
 +            || format!("{}.vcode", tcx.symbol_name(instance).name),
 +            |file| file.write_all(disasm.as_bytes()),
 +        )
 +    }
 +
 +    // Define debuginfo for function
 +    let isa = module.isa();
 +    let debug_context = &mut cx.debug_context;
 +    let unwind_context = &mut cx.unwind_context;
 +    tcx.sess.time("generate debug info", || {
 +        if let Some(debug_context) = debug_context {
 +            debug_context.define_function(
 +                instance,
 +                func_id,
 +                symbol_name.name,
 +                isa,
 +                context,
 +                &source_info_set,
 +                local_map,
 +            );
 +        }
 +        unwind_context.add_function(func_id, &context, isa);
 +    });
 +
 +    // Clear context to make it usable for the next function
 +    context.clear();
 +}
 +
 +pub(crate) fn verify_func(
 +    tcx: TyCtxt<'_>,
 +    writer: &crate::pretty_clif::CommentWriter,
 +    func: &Function,
 +) {
 +    tcx.sess.time("verify clif ir", || {
 +        let flags = cranelift_codegen::settings::Flags::new(cranelift_codegen::settings::builder());
 +        match cranelift_codegen::verify_function(&func, &flags) {
 +            Ok(_) => {}
 +            Err(err) => {
 +                tcx.sess.err(&format!("{:?}", err));
 +                let pretty_error = cranelift_codegen::print_errors::pretty_verifier_error(
 +                    &func,
 +                    None,
 +                    Some(Box::new(writer)),
 +                    err,
 +                );
 +                tcx.sess.fatal(&format!("cranelift verify error:\n{}", pretty_error));
 +            }
 +        }
 +    });
 +}
 +
 +fn codegen_fn_content(fx: &mut FunctionCx<'_, '_, '_>) {
 +    for (bb, bb_data) in fx.mir.basic_blocks().iter_enumerated() {
 +        let block = fx.get_block(bb);
 +        fx.bcx.switch_to_block(block);
 +
 +        if bb_data.is_cleanup {
 +            // Unwinding after panicking is not supported
 +            continue;
 +
 +            // FIXME Once unwinding is supported and Cranelift supports marking blocks as cold, do
 +            // so for cleanup blocks.
 +        }
 +
 +        fx.bcx.ins().nop();
 +        for stmt in &bb_data.statements {
 +            fx.set_debug_loc(stmt.source_info);
 +            codegen_stmt(fx, block, stmt);
 +        }
 +
 +        if fx.clif_comments.enabled() {
 +            let mut terminator_head = "\n".to_string();
 +            bb_data.terminator().kind.fmt_head(&mut terminator_head).unwrap();
 +            let inst = fx.bcx.func.layout.last_inst(block).unwrap();
 +            fx.add_comment(inst, terminator_head);
 +        }
 +
 +        fx.set_debug_loc(bb_data.terminator().source_info);
 +
 +        match &bb_data.terminator().kind {
 +            TerminatorKind::Goto { target } => {
 +                if let TerminatorKind::Return = fx.mir[*target].terminator().kind {
 +                    let mut can_immediately_return = true;
 +                    for stmt in &fx.mir[*target].statements {
 +                        if let StatementKind::StorageDead(_) = stmt.kind {
 +                        } else {
 +                            // FIXME Can sometimes happen, see rust-lang/rust#70531
 +                            can_immediately_return = false;
 +                            break;
 +                        }
 +                    }
 +
 +                    if can_immediately_return {
 +                        crate::abi::codegen_return(fx);
 +                        continue;
 +                    }
 +                }
 +
 +                let block = fx.get_block(*target);
 +                fx.bcx.ins().jump(block, &[]);
 +            }
 +            TerminatorKind::Return => {
 +                crate::abi::codegen_return(fx);
 +            }
 +            TerminatorKind::Assert { cond, expected, msg, target, cleanup: _ } => {
 +                if !fx.tcx.sess.overflow_checks() {
 +                    if let mir::AssertKind::OverflowNeg(_) = *msg {
 +                        let target = fx.get_block(*target);
 +                        fx.bcx.ins().jump(target, &[]);
 +                        continue;
 +                    }
 +                }
 +                let cond = codegen_operand(fx, cond).load_scalar(fx);
 +
 +                let target = fx.get_block(*target);
 +                let failure = fx.bcx.create_block();
 +                // FIXME Mark failure block as cold once Cranelift supports it
 +
 +                if *expected {
 +                    fx.bcx.ins().brz(cond, failure, &[]);
 +                } else {
 +                    fx.bcx.ins().brnz(cond, failure, &[]);
 +                };
 +                fx.bcx.ins().jump(target, &[]);
 +
 +                fx.bcx.switch_to_block(failure);
 +                fx.bcx.ins().nop();
 +
 +                match msg {
 +                    AssertKind::BoundsCheck { ref len, ref index } => {
 +                        let len = codegen_operand(fx, len).load_scalar(fx);
 +                        let index = codegen_operand(fx, index).load_scalar(fx);
 +                        let location = fx
 +                            .get_caller_location(bb_data.terminator().source_info.span)
 +                            .load_scalar(fx);
 +
 +                        codegen_panic_inner(
 +                            fx,
 +                            rustc_hir::LangItem::PanicBoundsCheck,
 +                            &[index, len, location],
 +                            bb_data.terminator().source_info.span,
 +                        );
 +                    }
 +                    _ => {
 +                        let msg_str = msg.description();
 +                        codegen_panic(fx, msg_str, bb_data.terminator().source_info.span);
 +                    }
 +                }
 +            }
 +
 +            TerminatorKind::SwitchInt { discr, switch_ty, targets } => {
 +                let discr = codegen_operand(fx, discr).load_scalar(fx);
 +
 +                let use_bool_opt = switch_ty.kind() == fx.tcx.types.bool.kind()
 +                    || (targets.iter().count() == 1 && targets.iter().next().unwrap().0 == 0);
 +                if use_bool_opt {
 +                    assert_eq!(targets.iter().count(), 1);
 +                    let (then_value, then_block) = targets.iter().next().unwrap();
 +                    let then_block = fx.get_block(then_block);
 +                    let else_block = fx.get_block(targets.otherwise());
 +                    let test_zero = match then_value {
 +                        0 => true,
 +                        1 => false,
 +                        _ => unreachable!("{:?}", targets),
 +                    };
 +
 +                    let discr = crate::optimize::peephole::maybe_unwrap_bint(&mut fx.bcx, discr);
 +                    let (discr, is_inverted) =
 +                        crate::optimize::peephole::maybe_unwrap_bool_not(&mut fx.bcx, discr);
 +                    let test_zero = if is_inverted { !test_zero } else { test_zero };
 +                    let discr = crate::optimize::peephole::maybe_unwrap_bint(&mut fx.bcx, discr);
 +                    let discr =
 +                        crate::optimize::peephole::make_branchable_value(&mut fx.bcx, discr);
 +                    if let Some(taken) = crate::optimize::peephole::maybe_known_branch_taken(
 +                        &fx.bcx, discr, test_zero,
 +                    ) {
 +                        if taken {
 +                            fx.bcx.ins().jump(then_block, &[]);
 +                        } else {
 +                            fx.bcx.ins().jump(else_block, &[]);
 +                        }
 +                    } else {
 +                        if test_zero {
 +                            fx.bcx.ins().brz(discr, then_block, &[]);
 +                            fx.bcx.ins().jump(else_block, &[]);
 +                        } else {
 +                            fx.bcx.ins().brnz(discr, then_block, &[]);
 +                            fx.bcx.ins().jump(else_block, &[]);
 +                        }
 +                    }
 +                } else {
 +                    let mut switch = ::cranelift_frontend::Switch::new();
 +                    for (value, block) in targets.iter() {
 +                        let block = fx.get_block(block);
 +                        switch.set_entry(value, block);
 +                    }
 +                    let otherwise_block = fx.get_block(targets.otherwise());
 +                    switch.emit(&mut fx.bcx, discr, otherwise_block);
 +                }
 +            }
 +            TerminatorKind::Call {
 +                func,
 +                args,
 +                destination,
 +                fn_span,
 +                cleanup: _,
 +                from_hir_call: _,
 +            } => {
 +                fx.tcx.sess.time("codegen call", || {
 +                    crate::abi::codegen_terminator_call(fx, *fn_span, func, args, *destination)
 +                });
 +            }
 +            TerminatorKind::InlineAsm {
 +                template,
 +                operands,
 +                options,
 +                destination,
 +                line_spans: _,
 +            } => {
 +                crate::inline_asm::codegen_inline_asm(
 +                    fx,
 +                    bb_data.terminator().source_info.span,
 +                    template,
 +                    operands,
 +                    *options,
 +                );
 +
 +                match *destination {
 +                    Some(destination) => {
 +                        let destination_block = fx.get_block(destination);
 +                        fx.bcx.ins().jump(destination_block, &[]);
 +                    }
 +                    None => {
 +                        crate::trap::trap_unreachable(
 +                            fx,
 +                            "[corruption] Returned from noreturn inline asm",
 +                        );
 +                    }
 +                }
 +            }
 +            TerminatorKind::Resume | TerminatorKind::Abort => {
 +                trap_unreachable(fx, "[corruption] Unwinding bb reached.");
 +            }
 +            TerminatorKind::Unreachable => {
 +                trap_unreachable(fx, "[corruption] Hit unreachable code.");
 +            }
 +            TerminatorKind::Yield { .. }
 +            | TerminatorKind::FalseEdge { .. }
 +            | TerminatorKind::FalseUnwind { .. }
 +            | TerminatorKind::DropAndReplace { .. }
 +            | TerminatorKind::GeneratorDrop => {
 +                bug!("shouldn't exist at codegen {:?}", bb_data.terminator());
 +            }
 +            TerminatorKind::Drop { place, target, unwind: _ } => {
 +                let drop_place = codegen_place(fx, *place);
 +                crate::abi::codegen_drop(fx, bb_data.terminator().source_info.span, drop_place);
 +
 +                let target_block = fx.get_block(*target);
 +                fx.bcx.ins().jump(target_block, &[]);
 +            }
 +        };
 +    }
 +
 +    fx.bcx.seal_all_blocks();
 +    fx.bcx.finalize();
 +}
 +
 +fn codegen_stmt<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    #[allow(unused_variables)] cur_block: Block,
 +    stmt: &Statement<'tcx>,
 +) {
 +    let _print_guard = crate::PrintOnPanic(|| format!("stmt {:?}", stmt));
 +
 +    fx.set_debug_loc(stmt.source_info);
 +
 +    #[cfg(disabled)]
 +    match &stmt.kind {
 +        StatementKind::StorageLive(..) | StatementKind::StorageDead(..) => {} // Those are not very useful
 +        _ => {
 +            if fx.clif_comments.enabled() {
 +                let inst = fx.bcx.func.layout.last_inst(cur_block).unwrap();
 +                fx.add_comment(inst, format!("{:?}", stmt));
 +            }
 +        }
 +    }
 +
 +    match &stmt.kind {
 +        StatementKind::SetDiscriminant { place, variant_index } => {
 +            let place = codegen_place(fx, **place);
 +            crate::discriminant::codegen_set_discriminant(fx, place, *variant_index);
 +        }
 +        StatementKind::Assign(to_place_and_rval) => {
 +            let lval = codegen_place(fx, to_place_and_rval.0);
 +            let dest_layout = lval.layout();
 +            match to_place_and_rval.1 {
 +                Rvalue::Use(ref operand) => {
 +                    let val = codegen_operand(fx, operand);
 +                    lval.write_cvalue(fx, val);
 +                }
 +                Rvalue::Ref(_, _, place) | Rvalue::AddressOf(_, place) => {
 +                    let place = codegen_place(fx, place);
 +                    let ref_ = place.place_ref(fx, lval.layout());
 +                    lval.write_cvalue(fx, ref_);
 +                }
 +                Rvalue::ThreadLocalRef(def_id) => {
 +                    let val = crate::constant::codegen_tls_ref(fx, def_id, lval.layout());
 +                    lval.write_cvalue(fx, val);
 +                }
 +                Rvalue::BinaryOp(bin_op, ref lhs_rhs) => {
 +                    let lhs = codegen_operand(fx, &lhs_rhs.0);
 +                    let rhs = codegen_operand(fx, &lhs_rhs.1);
 +
 +                    let res = crate::num::codegen_binop(fx, bin_op, lhs, rhs);
 +                    lval.write_cvalue(fx, res);
 +                }
 +                Rvalue::CheckedBinaryOp(bin_op, ref lhs_rhs) => {
 +                    let lhs = codegen_operand(fx, &lhs_rhs.0);
 +                    let rhs = codegen_operand(fx, &lhs_rhs.1);
 +
 +                    let res = if !fx.tcx.sess.overflow_checks() {
 +                        let val =
 +                            crate::num::codegen_int_binop(fx, bin_op, lhs, rhs).load_scalar(fx);
 +                        let is_overflow = fx.bcx.ins().iconst(types::I8, 0);
 +                        CValue::by_val_pair(val, is_overflow, lval.layout())
 +                    } else {
 +                        crate::num::codegen_checked_int_binop(fx, bin_op, lhs, rhs)
 +                    };
 +
 +                    lval.write_cvalue(fx, res);
 +                }
 +                Rvalue::UnaryOp(un_op, ref operand) => {
 +                    let operand = codegen_operand(fx, operand);
 +                    let layout = operand.layout();
 +                    let val = operand.load_scalar(fx);
 +                    let res = match un_op {
 +                        UnOp::Not => match layout.ty.kind() {
 +                            ty::Bool => {
 +                                let res = fx.bcx.ins().icmp_imm(IntCC::Equal, val, 0);
 +                                CValue::by_val(fx.bcx.ins().bint(types::I8, res), layout)
 +                            }
 +                            ty::Uint(_) | ty::Int(_) => {
 +                                CValue::by_val(fx.bcx.ins().bnot(val), layout)
 +                            }
 +                            _ => unreachable!("un op Not for {:?}", layout.ty),
 +                        },
 +                        UnOp::Neg => match layout.ty.kind() {
 +                            ty::Int(IntTy::I128) => {
 +                                // FIXME remove this case once ineg.i128 works
 +                                let zero =
 +                                    CValue::const_val(fx, layout, ty::ScalarInt::null(layout.size));
 +                                crate::num::codegen_int_binop(fx, BinOp::Sub, zero, operand)
 +                            }
 +                            ty::Int(_) => CValue::by_val(fx.bcx.ins().ineg(val), layout),
 +                            ty::Float(_) => CValue::by_val(fx.bcx.ins().fneg(val), layout),
 +                            _ => unreachable!("un op Neg for {:?}", layout.ty),
 +                        },
 +                    };
 +                    lval.write_cvalue(fx, res);
 +                }
 +                Rvalue::Cast(
 +                    CastKind::Pointer(PointerCast::ReifyFnPointer),
 +                    ref operand,
 +                    to_ty,
 +                ) => {
 +                    let from_ty = fx.monomorphize(operand.ty(&fx.mir.local_decls, fx.tcx));
 +                    let to_layout = fx.layout_of(fx.monomorphize(to_ty));
 +                    match *from_ty.kind() {
 +                        ty::FnDef(def_id, substs) => {
 +                            let func_ref = fx.get_function_ref(
 +                                Instance::resolve_for_fn_ptr(
 +                                    fx.tcx,
 +                                    ParamEnv::reveal_all(),
 +                                    def_id,
 +                                    substs,
 +                                )
 +                                .unwrap()
 +                                .polymorphize(fx.tcx),
 +                            );
 +                            let func_addr = fx.bcx.ins().func_addr(fx.pointer_type, func_ref);
 +                            lval.write_cvalue(fx, CValue::by_val(func_addr, to_layout));
 +                        }
 +                        _ => bug!("Trying to ReifyFnPointer on non FnDef {:?}", from_ty),
 +                    }
 +                }
 +                Rvalue::Cast(
 +                    CastKind::Pointer(PointerCast::UnsafeFnPointer),
 +                    ref operand,
 +                    to_ty,
 +                )
 +                | Rvalue::Cast(
 +                    CastKind::Pointer(PointerCast::MutToConstPointer),
 +                    ref operand,
 +                    to_ty,
 +                )
 +                | Rvalue::Cast(
 +                    CastKind::Pointer(PointerCast::ArrayToPointer),
 +                    ref operand,
 +                    to_ty,
 +                ) => {
 +                    let to_layout = fx.layout_of(fx.monomorphize(to_ty));
 +                    let operand = codegen_operand(fx, operand);
 +                    lval.write_cvalue(fx, operand.cast_pointer_to(to_layout));
 +                }
 +                Rvalue::Cast(CastKind::Misc, ref operand, to_ty) => {
 +                    let operand = codegen_operand(fx, operand);
 +                    let from_ty = operand.layout().ty;
 +                    let to_ty = fx.monomorphize(to_ty);
 +
 +                    fn is_fat_ptr<'tcx>(fx: &FunctionCx<'_, '_, 'tcx>, ty: Ty<'tcx>) -> bool {
 +                        ty.builtin_deref(true)
 +                            .map(|ty::TypeAndMut { ty: pointee_ty, mutbl: _ }| {
 +                                has_ptr_meta(fx.tcx, pointee_ty)
 +                            })
 +                            .unwrap_or(false)
 +                    }
 +
 +                    if is_fat_ptr(fx, from_ty) {
 +                        if is_fat_ptr(fx, to_ty) {
 +                            // fat-ptr -> fat-ptr
 +                            lval.write_cvalue(fx, operand.cast_pointer_to(dest_layout));
 +                        } else {
 +                            // fat-ptr -> thin-ptr
 +                            let (ptr, _extra) = operand.load_scalar_pair(fx);
 +                            lval.write_cvalue(fx, CValue::by_val(ptr, dest_layout))
 +                        }
 +                    } else if let ty::Adt(adt_def, _substs) = from_ty.kind() {
 +                        // enum -> discriminant value
 +                        assert!(adt_def.is_enum());
 +                        match to_ty.kind() {
 +                            ty::Uint(_) | ty::Int(_) => {}
 +                            _ => unreachable!("cast adt {} -> {}", from_ty, to_ty),
 +                        }
 +                        let to_clif_ty = fx.clif_type(to_ty).unwrap();
 +
 +                        let discriminant = crate::discriminant::codegen_get_discriminant(
 +                            fx,
 +                            operand,
 +                            fx.layout_of(operand.layout().ty.discriminant_ty(fx.tcx)),
 +                        )
 +                        .load_scalar(fx);
 +
 +                        let res = crate::cast::clif_intcast(
 +                            fx,
 +                            discriminant,
 +                            to_clif_ty,
 +                            to_ty.is_signed(),
 +                        );
 +                        lval.write_cvalue(fx, CValue::by_val(res, dest_layout));
 +                    } else {
 +                        let to_clif_ty = fx.clif_type(to_ty).unwrap();
 +                        let from = operand.load_scalar(fx);
 +
 +                        let res = clif_int_or_float_cast(
 +                            fx,
 +                            from,
 +                            type_sign(from_ty),
 +                            to_clif_ty,
 +                            type_sign(to_ty),
 +                        );
 +                        lval.write_cvalue(fx, CValue::by_val(res, dest_layout));
 +                    }
 +                }
 +                Rvalue::Cast(
 +                    CastKind::Pointer(PointerCast::ClosureFnPointer(_)),
 +                    ref operand,
 +                    _to_ty,
 +                ) => {
 +                    let operand = codegen_operand(fx, operand);
 +                    match *operand.layout().ty.kind() {
 +                        ty::Closure(def_id, substs) => {
 +                            let instance = Instance::resolve_closure(
 +                                fx.tcx,
 +                                def_id,
 +                                substs,
 +                                ty::ClosureKind::FnOnce,
 +                            )
 +                            .polymorphize(fx.tcx);
 +                            let func_ref = fx.get_function_ref(instance);
 +                            let func_addr = fx.bcx.ins().func_addr(fx.pointer_type, func_ref);
 +                            lval.write_cvalue(fx, CValue::by_val(func_addr, lval.layout()));
 +                        }
 +                        _ => bug!("{} cannot be cast to a fn ptr", operand.layout().ty),
 +                    }
 +                }
 +                Rvalue::Cast(CastKind::Pointer(PointerCast::Unsize), ref operand, _to_ty) => {
 +                    let operand = codegen_operand(fx, operand);
 +                    operand.unsize_value(fx, lval);
 +                }
 +                Rvalue::Discriminant(place) => {
 +                    let place = codegen_place(fx, place);
 +                    let value = place.to_cvalue(fx);
 +                    let discr =
 +                        crate::discriminant::codegen_get_discriminant(fx, value, dest_layout);
 +                    lval.write_cvalue(fx, discr);
 +                }
 +                Rvalue::Repeat(ref operand, times) => {
 +                    let operand = codegen_operand(fx, operand);
 +                    let times = fx
 +                        .monomorphize(times)
 +                        .eval(fx.tcx, ParamEnv::reveal_all())
 +                        .val
 +                        .try_to_bits(fx.tcx.data_layout.pointer_size)
 +                        .unwrap();
 +                    if operand.layout().size.bytes() == 0 {
 +                        // Do nothing for ZST's
 +                    } else if fx.clif_type(operand.layout().ty) == Some(types::I8) {
 +                        let times = fx.bcx.ins().iconst(fx.pointer_type, times as i64);
 +                        // FIXME use emit_small_memset where possible
 +                        let addr = lval.to_ptr().get_addr(fx);
 +                        let val = operand.load_scalar(fx);
 +                        fx.bcx.call_memset(fx.module.target_config(), addr, val, times);
 +                    } else {
 +                        let loop_block = fx.bcx.create_block();
 +                        let loop_block2 = fx.bcx.create_block();
 +                        let done_block = fx.bcx.create_block();
 +                        let index = fx.bcx.append_block_param(loop_block, fx.pointer_type);
 +                        let zero = fx.bcx.ins().iconst(fx.pointer_type, 0);
 +                        fx.bcx.ins().jump(loop_block, &[zero]);
 +
 +                        fx.bcx.switch_to_block(loop_block);
 +                        let done = fx.bcx.ins().icmp_imm(IntCC::Equal, index, times as i64);
 +                        fx.bcx.ins().brnz(done, done_block, &[]);
 +                        fx.bcx.ins().jump(loop_block2, &[]);
 +
 +                        fx.bcx.switch_to_block(loop_block2);
 +                        let to = lval.place_index(fx, index);
 +                        to.write_cvalue(fx, operand);
 +                        let index = fx.bcx.ins().iadd_imm(index, 1);
 +                        fx.bcx.ins().jump(loop_block, &[index]);
 +
 +                        fx.bcx.switch_to_block(done_block);
 +                        fx.bcx.ins().nop();
 +                    }
 +                }
 +                Rvalue::Len(place) => {
 +                    let place = codegen_place(fx, place);
 +                    let usize_layout = fx.layout_of(fx.tcx.types.usize);
 +                    let len = codegen_array_len(fx, place);
 +                    lval.write_cvalue(fx, CValue::by_val(len, usize_layout));
 +                }
 +                Rvalue::NullaryOp(NullOp::Box, content_ty) => {
 +                    let usize_type = fx.clif_type(fx.tcx.types.usize).unwrap();
 +                    let content_ty = fx.monomorphize(content_ty);
 +                    let layout = fx.layout_of(content_ty);
 +                    let llsize = fx.bcx.ins().iconst(usize_type, layout.size.bytes() as i64);
 +                    let llalign = fx.bcx.ins().iconst(usize_type, layout.align.abi.bytes() as i64);
 +                    let box_layout = fx.layout_of(fx.tcx.mk_box(content_ty));
 +
 +                    // Allocate space:
 +                    let def_id =
 +                        match fx.tcx.lang_items().require(rustc_hir::LangItem::ExchangeMalloc) {
 +                            Ok(id) => id,
 +                            Err(s) => {
 +                                fx.tcx
 +                                    .sess
 +                                    .fatal(&format!("allocation of `{}` {}", box_layout.ty, s));
 +                            }
 +                        };
 +                    let instance = ty::Instance::mono(fx.tcx, def_id).polymorphize(fx.tcx);
 +                    let func_ref = fx.get_function_ref(instance);
 +                    let call = fx.bcx.ins().call(func_ref, &[llsize, llalign]);
 +                    let ptr = fx.bcx.inst_results(call)[0];
 +                    lval.write_cvalue(fx, CValue::by_val(ptr, box_layout));
 +                }
 +                Rvalue::NullaryOp(NullOp::SizeOf, ty) => {
 +                    assert!(
 +                        lval.layout()
 +                            .ty
 +                            .is_sized(fx.tcx.at(stmt.source_info.span), ParamEnv::reveal_all())
 +                    );
 +                    let ty_size = fx.layout_of(fx.monomorphize(ty)).size.bytes();
 +                    let val =
 +                        CValue::const_val(fx, fx.layout_of(fx.tcx.types.usize), ty_size.into());
 +                    lval.write_cvalue(fx, val);
 +                }
 +                Rvalue::Aggregate(ref kind, ref operands) => match kind.as_ref() {
 +                    AggregateKind::Array(_ty) => {
 +                        for (i, operand) in operands.iter().enumerate() {
 +                            let operand = codegen_operand(fx, operand);
 +                            let index = fx.bcx.ins().iconst(fx.pointer_type, i as i64);
 +                            let to = lval.place_index(fx, index);
 +                            to.write_cvalue(fx, operand);
 +                        }
 +                    }
 +                    _ => unreachable!("shouldn't exist at codegen {:?}", to_place_and_rval.1),
 +                },
 +            }
 +        }
 +        StatementKind::StorageLive(_)
 +        | StatementKind::StorageDead(_)
 +        | StatementKind::Nop
 +        | StatementKind::FakeRead(..)
 +        | StatementKind::Retag { .. }
 +        | StatementKind::AscribeUserType(..) => {}
 +
 +        StatementKind::LlvmInlineAsm(asm) => {
 +            match asm.asm.asm.as_str().trim() {
 +                "" => {
 +                    // Black box
 +                }
 +                _ => fx.tcx.sess.span_fatal(
 +                    stmt.source_info.span,
 +                    "Legacy `llvm_asm!` inline assembly is not supported. \
 +                    Try using the new `asm!` instead.",
 +                ),
 +            }
 +        }
 +        StatementKind::Coverage { .. } => fx.tcx.sess.fatal("-Zcoverage is unimplemented"),
 +        StatementKind::CopyNonOverlapping(inner) => {
 +            let dst = codegen_operand(fx, &inner.dst);
 +            let pointee = dst
 +                .layout()
 +                .pointee_info_at(fx, rustc_target::abi::Size::ZERO)
 +                .expect("Expected pointer");
 +            let dst = dst.load_scalar(fx);
 +            let src = codegen_operand(fx, &inner.src).load_scalar(fx);
 +            let count = codegen_operand(fx, &inner.count).load_scalar(fx);
 +            let elem_size: u64 = pointee.size.bytes();
 +            let bytes =
 +                if elem_size != 1 { fx.bcx.ins().imul_imm(count, elem_size as i64) } else { count };
 +            fx.bcx.call_memcpy(fx.module.target_config(), dst, src, bytes);
 +        }
 +    }
 +}
 +
 +fn codegen_array_len<'tcx>(fx: &mut FunctionCx<'_, '_, 'tcx>, place: CPlace<'tcx>) -> Value {
 +    match *place.layout().ty.kind() {
 +        ty::Array(_elem_ty, len) => {
 +            let len = fx.monomorphize(len).eval_usize(fx.tcx, ParamEnv::reveal_all()) as i64;
 +            fx.bcx.ins().iconst(fx.pointer_type, len)
 +        }
 +        ty::Slice(_elem_ty) => {
 +            place.to_ptr_maybe_unsized().1.expect("Length metadata for slice place")
 +        }
 +        _ => bug!("Rvalue::Len({:?})", place),
 +    }
 +}
 +
 +pub(crate) fn codegen_place<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    place: Place<'tcx>,
 +) -> CPlace<'tcx> {
 +    let mut cplace = fx.get_local_place(place.local);
 +
 +    for elem in place.projection {
 +        match elem {
 +            PlaceElem::Deref => {
 +                cplace = cplace.place_deref(fx);
 +            }
 +            PlaceElem::Field(field, _ty) => {
 +                cplace = cplace.place_field(fx, field);
 +            }
 +            PlaceElem::Index(local) => {
 +                let index = fx.get_local_place(local).to_cvalue(fx).load_scalar(fx);
 +                cplace = cplace.place_index(fx, index);
 +            }
 +            PlaceElem::ConstantIndex { offset, min_length: _, from_end } => {
 +                let offset: u64 = offset;
 +                let index = if !from_end {
 +                    fx.bcx.ins().iconst(fx.pointer_type, offset as i64)
 +                } else {
 +                    let len = codegen_array_len(fx, cplace);
 +                    fx.bcx.ins().iadd_imm(len, -(offset as i64))
 +                };
 +                cplace = cplace.place_index(fx, index);
 +            }
 +            PlaceElem::Subslice { from, to, from_end } => {
 +                // These indices are generated by slice patterns.
 +                // slice[from:-to] in Python terms.
 +
 +                let from: u64 = from;
 +                let to: u64 = to;
 +
 +                match cplace.layout().ty.kind() {
 +                    ty::Array(elem_ty, _len) => {
 +                        assert!(!from_end, "array subslices are never `from_end`");
 +                        let elem_layout = fx.layout_of(elem_ty);
 +                        let ptr = cplace.to_ptr();
 +                        cplace = CPlace::for_ptr(
 +                            ptr.offset_i64(fx, elem_layout.size.bytes() as i64 * (from as i64)),
 +                            fx.layout_of(fx.tcx.mk_array(elem_ty, to - from)),
 +                        );
 +                    }
 +                    ty::Slice(elem_ty) => {
 +                        assert!(from_end, "slice subslices should be `from_end`");
 +                        let elem_layout = fx.layout_of(elem_ty);
 +                        let (ptr, len) = cplace.to_ptr_maybe_unsized();
 +                        let len = len.unwrap();
 +                        cplace = CPlace::for_ptr_with_extra(
 +                            ptr.offset_i64(fx, elem_layout.size.bytes() as i64 * (from as i64)),
 +                            fx.bcx.ins().iadd_imm(len, -(from as i64 + to as i64)),
 +                            cplace.layout(),
 +                        );
 +                    }
 +                    _ => unreachable!(),
 +                }
 +            }
 +            PlaceElem::Downcast(_adt_def, variant) => {
 +                cplace = cplace.downcast_variant(fx, variant);
 +            }
 +        }
 +    }
 +
 +    cplace
 +}
 +
 +pub(crate) fn codegen_operand<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    operand: &Operand<'tcx>,
 +) -> CValue<'tcx> {
 +    match operand {
 +        Operand::Move(place) | Operand::Copy(place) => {
 +            let cplace = codegen_place(fx, *place);
 +            cplace.to_cvalue(fx)
 +        }
 +        Operand::Constant(const_) => crate::constant::codegen_constant(fx, const_),
 +    }
 +}
 +
 +pub(crate) fn codegen_panic<'tcx>(fx: &mut FunctionCx<'_, '_, 'tcx>, msg_str: &str, span: Span) {
 +    let location = fx.get_caller_location(span).load_scalar(fx);
 +
++    let msg_ptr = fx.anonymous_str(msg_str);
 +    let msg_len = fx.bcx.ins().iconst(fx.pointer_type, i64::try_from(msg_str.len()).unwrap());
 +    let args = [msg_ptr, msg_len, location];
 +
 +    codegen_panic_inner(fx, rustc_hir::LangItem::Panic, &args, span);
 +}
 +
 +pub(crate) fn codegen_panic_inner<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    lang_item: rustc_hir::LangItem,
 +    args: &[Value],
 +    span: Span,
 +) {
 +    let def_id =
 +        fx.tcx.lang_items().require(lang_item).unwrap_or_else(|s| fx.tcx.sess.span_fatal(span, &s));
 +
 +    let instance = Instance::mono(fx.tcx, def_id).polymorphize(fx.tcx);
 +    let symbol_name = fx.tcx.symbol_name(instance).name;
 +
 +    fx.lib_call(
 +        &*symbol_name,
 +        vec![
 +            AbiParam::new(fx.pointer_type),
 +            AbiParam::new(fx.pointer_type),
 +            AbiParam::new(fx.pointer_type),
 +        ],
 +        vec![],
 +        args,
 +    );
 +
 +    crate::trap::trap_unreachable(fx, "panic lang item returned");
 +}
index c12d6d0f1414306a13c698059eb544493129766a,0000000000000000000000000000000000000000..488ff6e134956e2d870fabf7e2ebce2327c9b5aa
mode 100644,000000..100644
--- /dev/null
@@@ -1,415 -1,0 +1,406 @@@
-     pub(crate) fn anonymous_str(&mut self, prefix: &str, msg: &str) -> Value {
-         use std::collections::hash_map::DefaultHasher;
-         use std::hash::{Hash, Hasher};
-         let mut hasher = DefaultHasher::new();
-         msg.hash(&mut hasher);
-         let msg_hash = hasher.finish();
 +use rustc_index::vec::IndexVec;
 +use rustc_middle::ty::SymbolName;
 +use rustc_target::abi::call::FnAbi;
 +use rustc_target::abi::{Integer, Primitive};
 +use rustc_target::spec::{HasTargetSpec, Target};
 +
 +use crate::constant::ConstantCx;
 +use crate::prelude::*;
 +
 +pub(crate) fn pointer_ty(tcx: TyCtxt<'_>) -> types::Type {
 +    match tcx.data_layout.pointer_size.bits() {
 +        16 => types::I16,
 +        32 => types::I32,
 +        64 => types::I64,
 +        bits => bug!("ptr_sized_integer: unknown pointer bit size {}", bits),
 +    }
 +}
 +
 +pub(crate) fn scalar_to_clif_type(tcx: TyCtxt<'_>, scalar: Scalar) -> Type {
 +    match scalar.value {
 +        Primitive::Int(int, _sign) => match int {
 +            Integer::I8 => types::I8,
 +            Integer::I16 => types::I16,
 +            Integer::I32 => types::I32,
 +            Integer::I64 => types::I64,
 +            Integer::I128 => types::I128,
 +        },
 +        Primitive::F32 => types::F32,
 +        Primitive::F64 => types::F64,
 +        Primitive::Pointer => pointer_ty(tcx),
 +    }
 +}
 +
 +fn clif_type_from_ty<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Option<types::Type> {
 +    Some(match ty.kind() {
 +        ty::Bool => types::I8,
 +        ty::Uint(size) => match size {
 +            UintTy::U8 => types::I8,
 +            UintTy::U16 => types::I16,
 +            UintTy::U32 => types::I32,
 +            UintTy::U64 => types::I64,
 +            UintTy::U128 => types::I128,
 +            UintTy::Usize => pointer_ty(tcx),
 +        },
 +        ty::Int(size) => match size {
 +            IntTy::I8 => types::I8,
 +            IntTy::I16 => types::I16,
 +            IntTy::I32 => types::I32,
 +            IntTy::I64 => types::I64,
 +            IntTy::I128 => types::I128,
 +            IntTy::Isize => pointer_ty(tcx),
 +        },
 +        ty::Char => types::I32,
 +        ty::Float(size) => match size {
 +            FloatTy::F32 => types::F32,
 +            FloatTy::F64 => types::F64,
 +        },
 +        ty::FnPtr(_) => pointer_ty(tcx),
 +        ty::RawPtr(TypeAndMut { ty: pointee_ty, mutbl: _ }) | ty::Ref(_, pointee_ty, _) => {
 +            if has_ptr_meta(tcx, pointee_ty) {
 +                return None;
 +            } else {
 +                pointer_ty(tcx)
 +            }
 +        }
 +        ty::Adt(adt_def, _) if adt_def.repr.simd() => {
 +            let (element, count) = match &tcx.layout_of(ParamEnv::reveal_all().and(ty)).unwrap().abi
 +            {
 +                Abi::Vector { element, count } => (element.clone(), *count),
 +                _ => unreachable!(),
 +            };
 +
 +            match scalar_to_clif_type(tcx, element).by(u16::try_from(count).unwrap()) {
 +                // Cranelift currently only implements icmp for 128bit vectors.
 +                Some(vector_ty) if vector_ty.bits() == 128 => vector_ty,
 +                _ => return None,
 +            }
 +        }
 +        ty::Param(_) => bug!("ty param {:?}", ty),
 +        _ => return None,
 +    })
 +}
 +
 +fn clif_pair_type_from_ty<'tcx>(
 +    tcx: TyCtxt<'tcx>,
 +    ty: Ty<'tcx>,
 +) -> Option<(types::Type, types::Type)> {
 +    Some(match ty.kind() {
 +        ty::Tuple(substs) if substs.len() == 2 => {
 +            let mut types = substs.types();
 +            let a = clif_type_from_ty(tcx, types.next().unwrap())?;
 +            let b = clif_type_from_ty(tcx, types.next().unwrap())?;
 +            if a.is_vector() || b.is_vector() {
 +                return None;
 +            }
 +            (a, b)
 +        }
 +        ty::RawPtr(TypeAndMut { ty: pointee_ty, mutbl: _ }) | ty::Ref(_, pointee_ty, _) => {
 +            if has_ptr_meta(tcx, pointee_ty) {
 +                (pointer_ty(tcx), pointer_ty(tcx))
 +            } else {
 +                return None;
 +            }
 +        }
 +        _ => return None,
 +    })
 +}
 +
 +/// Is a pointer to this type a fat ptr?
 +pub(crate) fn has_ptr_meta<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> bool {
 +    let ptr_ty = tcx.mk_ptr(TypeAndMut { ty, mutbl: rustc_hir::Mutability::Not });
 +    match &tcx.layout_of(ParamEnv::reveal_all().and(ptr_ty)).unwrap().abi {
 +        Abi::Scalar(_) => false,
 +        Abi::ScalarPair(_, _) => true,
 +        abi => unreachable!("Abi of ptr to {:?} is {:?}???", ty, abi),
 +    }
 +}
 +
 +pub(crate) fn codegen_icmp_imm(
 +    fx: &mut FunctionCx<'_, '_, '_>,
 +    intcc: IntCC,
 +    lhs: Value,
 +    rhs: i128,
 +) -> Value {
 +    let lhs_ty = fx.bcx.func.dfg.value_type(lhs);
 +    if lhs_ty == types::I128 {
 +        // FIXME legalize `icmp_imm.i128` in Cranelift
 +
 +        let (lhs_lsb, lhs_msb) = fx.bcx.ins().isplit(lhs);
 +        let (rhs_lsb, rhs_msb) = (rhs as u128 as u64 as i64, (rhs as u128 >> 64) as u64 as i64);
 +
 +        match intcc {
 +            IntCC::Equal => {
 +                let lsb_eq = fx.bcx.ins().icmp_imm(IntCC::Equal, lhs_lsb, rhs_lsb);
 +                let msb_eq = fx.bcx.ins().icmp_imm(IntCC::Equal, lhs_msb, rhs_msb);
 +                fx.bcx.ins().band(lsb_eq, msb_eq)
 +            }
 +            IntCC::NotEqual => {
 +                let lsb_ne = fx.bcx.ins().icmp_imm(IntCC::NotEqual, lhs_lsb, rhs_lsb);
 +                let msb_ne = fx.bcx.ins().icmp_imm(IntCC::NotEqual, lhs_msb, rhs_msb);
 +                fx.bcx.ins().bor(lsb_ne, msb_ne)
 +            }
 +            _ => {
 +                // if msb_eq {
 +                //     lsb_cc
 +                // } else {
 +                //     msb_cc
 +                // }
 +
 +                let msb_eq = fx.bcx.ins().icmp_imm(IntCC::Equal, lhs_msb, rhs_msb);
 +                let lsb_cc = fx.bcx.ins().icmp_imm(intcc, lhs_lsb, rhs_lsb);
 +                let msb_cc = fx.bcx.ins().icmp_imm(intcc, lhs_msb, rhs_msb);
 +
 +                fx.bcx.ins().select(msb_eq, lsb_cc, msb_cc)
 +            }
 +        }
 +    } else {
 +        let rhs = i64::try_from(rhs).expect("codegen_icmp_imm rhs out of range for <128bit int");
 +        fx.bcx.ins().icmp_imm(intcc, lhs, rhs)
 +    }
 +}
 +
 +pub(crate) fn type_min_max_value(
 +    bcx: &mut FunctionBuilder<'_>,
 +    ty: Type,
 +    signed: bool,
 +) -> (Value, Value) {
 +    assert!(ty.is_int());
 +
 +    if ty == types::I128 {
 +        if signed {
 +            let min = i128::MIN as u128;
 +            let min_lsb = bcx.ins().iconst(types::I64, min as u64 as i64);
 +            let min_msb = bcx.ins().iconst(types::I64, (min >> 64) as u64 as i64);
 +            let min = bcx.ins().iconcat(min_lsb, min_msb);
 +
 +            let max = i128::MAX as u128;
 +            let max_lsb = bcx.ins().iconst(types::I64, max as u64 as i64);
 +            let max_msb = bcx.ins().iconst(types::I64, (max >> 64) as u64 as i64);
 +            let max = bcx.ins().iconcat(max_lsb, max_msb);
 +
 +            return (min, max);
 +        } else {
 +            let min_half = bcx.ins().iconst(types::I64, 0);
 +            let min = bcx.ins().iconcat(min_half, min_half);
 +
 +            let max_half = bcx.ins().iconst(types::I64, u64::MAX as i64);
 +            let max = bcx.ins().iconcat(max_half, max_half);
 +
 +            return (min, max);
 +        }
 +    }
 +
 +    let min = match (ty, signed) {
 +        (types::I8, false) | (types::I16, false) | (types::I32, false) | (types::I64, false) => {
 +            0i64
 +        }
 +        (types::I8, true) => i64::from(i8::MIN),
 +        (types::I16, true) => i64::from(i16::MIN),
 +        (types::I32, true) => i64::from(i32::MIN),
 +        (types::I64, true) => i64::MIN,
 +        _ => unreachable!(),
 +    };
 +
 +    let max = match (ty, signed) {
 +        (types::I8, false) => i64::from(u8::MAX),
 +        (types::I16, false) => i64::from(u16::MAX),
 +        (types::I32, false) => i64::from(u32::MAX),
 +        (types::I64, false) => u64::MAX as i64,
 +        (types::I8, true) => i64::from(i8::MAX),
 +        (types::I16, true) => i64::from(i16::MAX),
 +        (types::I32, true) => i64::from(i32::MAX),
 +        (types::I64, true) => i64::MAX,
 +        _ => unreachable!(),
 +    };
 +
 +    let (min, max) = (bcx.ins().iconst(ty, min), bcx.ins().iconst(ty, max));
 +
 +    (min, max)
 +}
 +
 +pub(crate) fn type_sign(ty: Ty<'_>) -> bool {
 +    match ty.kind() {
 +        ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..) | ty::Char | ty::Uint(..) | ty::Bool => false,
 +        ty::Int(..) => true,
 +        ty::Float(..) => false, // `signed` is unused for floats
 +        _ => panic!("{}", ty),
 +    }
 +}
 +
 +pub(crate) struct FunctionCx<'m, 'clif, 'tcx: 'm> {
 +    pub(crate) cx: &'clif mut crate::CodegenCx<'tcx>,
 +    pub(crate) module: &'m mut dyn Module,
 +    pub(crate) tcx: TyCtxt<'tcx>,
 +    pub(crate) pointer_type: Type, // Cached from module
 +    pub(crate) vtables: FxHashMap<(Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>), DataId>,
 +    pub(crate) constants_cx: ConstantCx,
 +
 +    pub(crate) instance: Instance<'tcx>,
 +    pub(crate) symbol_name: SymbolName<'tcx>,
 +    pub(crate) mir: &'tcx Body<'tcx>,
 +    pub(crate) fn_abi: Option<FnAbi<'tcx, Ty<'tcx>>>,
 +
 +    pub(crate) bcx: FunctionBuilder<'clif>,
 +    pub(crate) block_map: IndexVec<BasicBlock, Block>,
 +    pub(crate) local_map: IndexVec<Local, CPlace<'tcx>>,
 +
 +    /// When `#[track_caller]` is used, the implicit caller location is stored in this variable.
 +    pub(crate) caller_location: Option<CValue<'tcx>>,
 +
 +    pub(crate) clif_comments: crate::pretty_clif::CommentWriter,
 +    pub(crate) source_info_set: indexmap::IndexSet<SourceInfo>,
 +
 +    /// This should only be accessed by `CPlace::new_var`.
 +    pub(crate) next_ssa_var: u32,
 +
 +    pub(crate) inline_asm_index: u32,
 +}
 +
 +impl<'tcx> LayoutOf for FunctionCx<'_, '_, 'tcx> {
 +    type Ty = Ty<'tcx>;
 +    type TyAndLayout = TyAndLayout<'tcx>;
 +
 +    fn layout_of(&self, ty: Ty<'tcx>) -> TyAndLayout<'tcx> {
 +        RevealAllLayoutCx(self.tcx).layout_of(ty)
 +    }
 +}
 +
 +impl<'tcx> layout::HasTyCtxt<'tcx> for FunctionCx<'_, '_, 'tcx> {
 +    fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
 +        self.tcx
 +    }
 +}
 +
 +impl<'tcx> rustc_target::abi::HasDataLayout for FunctionCx<'_, '_, 'tcx> {
 +    fn data_layout(&self) -> &rustc_target::abi::TargetDataLayout {
 +        &self.tcx.data_layout
 +    }
 +}
 +
 +impl<'tcx> layout::HasParamEnv<'tcx> for FunctionCx<'_, '_, 'tcx> {
 +    fn param_env(&self) -> ParamEnv<'tcx> {
 +        ParamEnv::reveal_all()
 +    }
 +}
 +
 +impl<'tcx> HasTargetSpec for FunctionCx<'_, '_, 'tcx> {
 +    fn target_spec(&self) -> &Target {
 +        &self.tcx.sess.target
 +    }
 +}
 +
 +impl<'tcx> FunctionCx<'_, '_, 'tcx> {
 +    pub(crate) fn monomorphize<T>(&self, value: T) -> T
 +    where
 +        T: TypeFoldable<'tcx> + Copy,
 +    {
 +        self.instance.subst_mir_and_normalize_erasing_regions(
 +            self.tcx,
 +            ty::ParamEnv::reveal_all(),
 +            value,
 +        )
 +    }
 +
 +    pub(crate) fn clif_type(&self, ty: Ty<'tcx>) -> Option<Type> {
 +        clif_type_from_ty(self.tcx, ty)
 +    }
 +
 +    pub(crate) fn clif_pair_type(&self, ty: Ty<'tcx>) -> Option<(Type, Type)> {
 +        clif_pair_type_from_ty(self.tcx, ty)
 +    }
 +
 +    pub(crate) fn get_block(&self, bb: BasicBlock) -> Block {
 +        *self.block_map.get(bb).unwrap()
 +    }
 +
 +    pub(crate) fn get_local_place(&mut self, local: Local) -> CPlace<'tcx> {
 +        *self.local_map.get(local).unwrap_or_else(|| {
 +            panic!("Local {:?} doesn't exist", local);
 +        })
 +    }
 +
 +    pub(crate) fn set_debug_loc(&mut self, source_info: mir::SourceInfo) {
 +        let (index, _) = self.source_info_set.insert_full(source_info);
 +        self.bcx.set_srcloc(SourceLoc::new(index as u32));
 +    }
 +
 +    pub(crate) fn get_caller_location(&mut self, span: Span) -> CValue<'tcx> {
 +        if let Some(loc) = self.caller_location {
 +            // `#[track_caller]` is used; return caller location instead of current location.
 +            return loc;
 +        }
 +
 +        let topmost = span.ctxt().outer_expn().expansion_cause().unwrap_or(span);
 +        let caller = self.tcx.sess.source_map().lookup_char_pos(topmost.lo());
 +        let const_loc = self.tcx.const_caller_location((
 +            rustc_span::symbol::Symbol::intern(
 +                &caller.file.name.prefer_remapped().to_string_lossy(),
 +            ),
 +            caller.line as u32,
 +            caller.col_display as u32 + 1,
 +        ));
 +        crate::constant::codegen_const_value(self, const_loc, self.tcx.caller_location_ty())
 +    }
 +
 +    pub(crate) fn triple(&self) -> &target_lexicon::Triple {
 +        self.module.isa().triple()
 +    }
 +
-         let msg_id = self
-             .module
-             .declare_data(&format!("__{}_{:08x}", prefix, msg_hash), Linkage::Local, false, false)
-             .unwrap();
++    pub(crate) fn anonymous_str(&mut self, msg: &str) -> Value {
 +        let mut data_ctx = DataContext::new();
 +        data_ctx.define(msg.as_bytes().to_vec().into_boxed_slice());
++        let msg_id = self.module.declare_anonymous_data(false, false).unwrap();
 +
 +        // Ignore DuplicateDefinition error, as the data will be the same
 +        let _ = self.module.define_data(msg_id, &data_ctx);
 +
 +        let local_msg_id = self.module.declare_data_in_func(msg_id, self.bcx.func);
 +        if self.clif_comments.enabled() {
 +            self.add_comment(local_msg_id, msg);
 +        }
 +        self.bcx.ins().global_value(self.pointer_type, local_msg_id)
 +    }
 +}
 +
 +pub(crate) struct RevealAllLayoutCx<'tcx>(pub(crate) TyCtxt<'tcx>);
 +
 +impl<'tcx> LayoutOf for RevealAllLayoutCx<'tcx> {
 +    type Ty = Ty<'tcx>;
 +    type TyAndLayout = TyAndLayout<'tcx>;
 +
 +    fn layout_of(&self, ty: Ty<'tcx>) -> TyAndLayout<'tcx> {
 +        assert!(!ty.still_further_specializable());
 +        self.0.layout_of(ParamEnv::reveal_all().and(&ty)).unwrap_or_else(|e| {
 +            if let layout::LayoutError::SizeOverflow(_) = e {
 +                self.0.sess.fatal(&e.to_string())
 +            } else {
 +                bug!("failed to get layout for `{}`: {}", ty, e)
 +            }
 +        })
 +    }
 +}
 +
 +impl<'tcx> layout::HasTyCtxt<'tcx> for RevealAllLayoutCx<'tcx> {
 +    fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
 +        self.0
 +    }
 +}
 +
 +impl<'tcx> rustc_target::abi::HasDataLayout for RevealAllLayoutCx<'tcx> {
 +    fn data_layout(&self) -> &rustc_target::abi::TargetDataLayout {
 +        &self.0.data_layout
 +    }
 +}
 +
 +impl<'tcx> layout::HasParamEnv<'tcx> for RevealAllLayoutCx<'tcx> {
 +    fn param_env(&self) -> ParamEnv<'tcx> {
 +        ParamEnv::reveal_all()
 +    }
 +}
 +
 +impl<'tcx> HasTargetSpec for RevealAllLayoutCx<'tcx> {
 +    fn target_spec(&self) -> &Target {
 +        &self.0.sess.target
 +    }
 +}
index e59a0cb0a23237bfe0b4fd0164909db4d8a98e49,0000000000000000000000000000000000000000..eef3c8c8d6e2b38170209854f34c5c441ad59b5b
mode 100644,000000..100644
--- /dev/null
@@@ -1,107 -1,0 +1,116 @@@
 +use std::env;
 +use std::str::FromStr;
 +
 +fn bool_env_var(key: &str) -> bool {
 +    env::var(key).as_ref().map(|val| &**val) == Ok("1")
 +}
 +
 +/// The mode to use for compilation.
 +#[derive(Copy, Clone, Debug)]
 +pub enum CodegenMode {
 +    /// AOT compile the crate. This is the default.
 +    Aot,
 +    /// JIT compile and execute the crate.
 +    Jit,
 +    /// JIT compile and execute the crate, but only compile functions the first time they are used.
 +    JitLazy,
 +}
 +
 +impl FromStr for CodegenMode {
 +    type Err = String;
 +
 +    fn from_str(s: &str) -> Result<Self, Self::Err> {
 +        match s {
 +            "aot" => Ok(CodegenMode::Aot),
 +            "jit" => Ok(CodegenMode::Jit),
 +            "jit-lazy" => Ok(CodegenMode::JitLazy),
 +            _ => Err(format!("Unknown codegen mode `{}`", s)),
 +        }
 +    }
 +}
 +
 +/// Configuration of cg_clif as passed in through `-Cllvm-args` and various env vars.
 +#[derive(Clone, Debug)]
 +pub struct BackendConfig {
 +    /// Should the crate be AOT compiled or JIT executed.
 +    ///
 +    /// Defaults to AOT compilation. Can be set using `-Cllvm-args=mode=...`.
 +    pub codegen_mode: CodegenMode,
 +
 +    /// When JIT mode is enable pass these arguments to the program.
 +    ///
 +    /// Defaults to the value of `CG_CLIF_JIT_ARGS`.
 +    pub jit_args: Vec<String>,
 +
 +    /// Display the time it took to perform codegen for a crate.
 +    ///
 +    /// Defaults to true when the `CG_CLIF_DISPLAY_CG_TIME` env var is set to 1 or false otherwise.
 +    /// Can be set using `-Cllvm-args=display_cg_time=...`.
 +    pub display_cg_time: bool,
 +
++    /// The register allocator to use.
++    ///
++    /// Defaults to the value of `CG_CLIF_REGALLOC` or `backtracking` otherwise. Can be set using
++    /// `-Cllvm-args=regalloc=...`.
++    pub regalloc: String,
++
 +    /// Enable the Cranelift ir verifier for all compilation passes. If not set it will only run
 +    /// once before passing the clif ir to Cranelift for compilation.
 +    ///
 +    /// Defaults to true when the `CG_CLIF_ENABLE_VERIFIER` env var is set to 1 or when cg_clif is
 +    /// compiled with debug assertions enabled or false otherwise. Can be set using
 +    /// `-Cllvm-args=enable_verifier=...`.
 +    pub enable_verifier: bool,
 +
 +    /// Don't cache object files in the incremental cache. Useful during development of cg_clif
 +    /// to make it possible to use incremental mode for all analyses performed by rustc without
 +    /// caching object files when their content should have been changed by a change to cg_clif.
 +    ///
 +    /// Defaults to true when the `CG_CLIF_DISABLE_INCR_CACHE` env var is set to 1 or false
 +    /// otherwise. Can be set using `-Cllvm-args=disable_incr_cache=...`.
 +    pub disable_incr_cache: bool,
 +}
 +
 +impl Default for BackendConfig {
 +    fn default() -> Self {
 +        BackendConfig {
 +            codegen_mode: CodegenMode::Aot,
 +            jit_args: {
 +                let args = std::env::var("CG_CLIF_JIT_ARGS").unwrap_or_else(|_| String::new());
 +                args.split(' ').map(|arg| arg.to_string()).collect()
 +            },
 +            display_cg_time: bool_env_var("CG_CLIF_DISPLAY_CG_TIME"),
++            regalloc: std::env::var("CG_CLIF_REGALLOC")
++                .unwrap_or_else(|_| "backtracking".to_string()),
 +            enable_verifier: cfg!(debug_assertions) || bool_env_var("CG_CLIF_ENABLE_VERIFIER"),
 +            disable_incr_cache: bool_env_var("CG_CLIF_DISABLE_INCR_CACHE"),
 +        }
 +    }
 +}
 +
 +impl BackendConfig {
 +    /// Parse the configuration passed in using `-Cllvm-args`.
 +    pub fn from_opts(opts: &[String]) -> Result<Self, String> {
 +        fn parse_bool(name: &str, value: &str) -> Result<bool, String> {
 +            value.parse().map_err(|_| format!("failed to parse value `{}` for {}", value, name))
 +        }
 +
 +        let mut config = BackendConfig::default();
 +        for opt in opts {
 +            if let Some((name, value)) = opt.split_once('=') {
 +                match name {
 +                    "mode" => config.codegen_mode = value.parse()?,
 +                    "display_cg_time" => config.display_cg_time = parse_bool(name, value)?,
++                    "regalloc" => config.regalloc = value.to_string(),
 +                    "enable_verifier" => config.enable_verifier = parse_bool(name, value)?,
 +                    "disable_incr_cache" => config.disable_incr_cache = parse_bool(name, value)?,
 +                    _ => return Err(format!("Unknown option `{}`", name)),
 +                }
 +            } else {
 +                return Err(format!("Invalid option `{}`", opt));
 +            }
 +        }
 +
 +        Ok(config)
 +    }
 +}
index 6b132e4ff0fb8093712495e2935ff17cb7d2f3a6,0000000000000000000000000000000000000000..3ba12c4e96d6831b1c3238eb48ac158fa334a3ab
mode 100644,000000..100644
--- /dev/null
@@@ -1,449 -1,0 +1,538 @@@
-     alloc_range, read_target_uint, AllocId, Allocation, ConstValue, ErrorHandled, GlobalAlloc, Scalar,
 +//! Handling of `static`s, `const`s and promoted allocations
 +
 +use rustc_span::DUMMY_SP;
 +
 +use rustc_ast::Mutability;
 +use rustc_data_structures::fx::{FxHashMap, FxHashSet};
 +use rustc_errors::ErrorReported;
 +use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
 +use rustc_middle::mir::interpret::{
-             // FIXME set correct segment for Mach-O files
-             data_ctx.set_segment_section("", &*section_name);
++    alloc_range, read_target_uint, AllocId, Allocation, ConstValue, ErrorHandled, GlobalAlloc,
++    Scalar,
 +};
 +use rustc_middle::ty::ConstKind;
 +
 +use cranelift_codegen::ir::GlobalValueData;
 +use cranelift_module::*;
 +
 +use crate::prelude::*;
 +
 +pub(crate) struct ConstantCx {
 +    todo: Vec<TodoItem>,
 +    done: FxHashSet<DataId>,
 +    anon_allocs: FxHashMap<AllocId, DataId>,
 +}
 +
 +#[derive(Copy, Clone, Debug)]
 +enum TodoItem {
 +    Alloc(AllocId),
 +    Static(DefId),
 +}
 +
 +impl ConstantCx {
 +    pub(crate) fn new() -> Self {
 +        ConstantCx { todo: vec![], done: FxHashSet::default(), anon_allocs: FxHashMap::default() }
 +    }
 +
 +    pub(crate) fn finalize(mut self, tcx: TyCtxt<'_>, module: &mut dyn Module) {
 +        //println!("todo {:?}", self.todo);
 +        define_all_allocs(tcx, module, &mut self);
 +        //println!("done {:?}", self.done);
 +        self.done.clear();
 +    }
 +}
 +
 +pub(crate) fn check_constants(fx: &mut FunctionCx<'_, '_, '_>) -> bool {
 +    let mut all_constants_ok = true;
 +    for constant in &fx.mir.required_consts {
 +        let const_ = match fx.monomorphize(constant.literal) {
 +            ConstantKind::Ty(ct) => ct,
 +            ConstantKind::Val(..) => continue,
 +        };
 +        match const_.val {
 +            ConstKind::Value(_) => {}
 +            ConstKind::Unevaluated(unevaluated) => {
 +                if let Err(err) =
 +                    fx.tcx.const_eval_resolve(ParamEnv::reveal_all(), unevaluated, None)
 +                {
 +                    all_constants_ok = false;
 +                    match err {
 +                        ErrorHandled::Reported(ErrorReported) | ErrorHandled::Linted => {
 +                            fx.tcx.sess.span_err(constant.span, "erroneous constant encountered");
 +                        }
 +                        ErrorHandled::TooGeneric => {
 +                            span_bug!(
 +                                constant.span,
 +                                "codgen encountered polymorphic constant: {:?}",
 +                                err
 +                            );
 +                        }
 +                    }
 +                }
 +            }
 +            ConstKind::Param(_)
 +            | ConstKind::Infer(_)
 +            | ConstKind::Bound(_, _)
 +            | ConstKind::Placeholder(_)
 +            | ConstKind::Error(_) => unreachable!("{:?}", const_),
 +        }
 +    }
 +    all_constants_ok
 +}
 +
 +pub(crate) fn codegen_static(tcx: TyCtxt<'_>, module: &mut dyn Module, def_id: DefId) {
 +    let mut constants_cx = ConstantCx::new();
 +    constants_cx.todo.push(TodoItem::Static(def_id));
 +    constants_cx.finalize(tcx, module);
 +}
 +
 +pub(crate) fn codegen_tls_ref<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    def_id: DefId,
 +    layout: TyAndLayout<'tcx>,
 +) -> CValue<'tcx> {
 +    let data_id = data_id_for_static(fx.tcx, fx.module, def_id, false);
 +    let local_data_id = fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
 +    if fx.clif_comments.enabled() {
 +        fx.add_comment(local_data_id, format!("tls {:?}", def_id));
 +    }
 +    let tls_ptr = fx.bcx.ins().tls_value(fx.pointer_type, local_data_id);
 +    CValue::by_val(tls_ptr, layout)
 +}
 +
 +fn codegen_static_ref<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    def_id: DefId,
 +    layout: TyAndLayout<'tcx>,
 +) -> CPlace<'tcx> {
 +    let data_id = data_id_for_static(fx.tcx, fx.module, def_id, false);
 +    let local_data_id = fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
 +    if fx.clif_comments.enabled() {
 +        fx.add_comment(local_data_id, format!("{:?}", def_id));
 +    }
 +    let global_ptr = fx.bcx.ins().global_value(fx.pointer_type, local_data_id);
 +    assert!(!layout.is_unsized(), "unsized statics aren't supported");
 +    assert!(
 +        matches!(
 +            fx.bcx.func.global_values[local_data_id],
 +            GlobalValueData::Symbol { tls: false, .. }
 +        ),
 +        "tls static referenced without Rvalue::ThreadLocalRef"
 +    );
 +    CPlace::for_ptr(crate::pointer::Pointer::new(global_ptr), layout)
 +}
 +
 +pub(crate) fn codegen_constant<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    constant: &Constant<'tcx>,
 +) -> CValue<'tcx> {
 +    let const_ = match fx.monomorphize(constant.literal) {
 +        ConstantKind::Ty(ct) => ct,
 +        ConstantKind::Val(val, ty) => return codegen_const_value(fx, val, ty),
 +    };
 +    let const_val = match const_.val {
 +        ConstKind::Value(const_val) => const_val,
 +        ConstKind::Unevaluated(ty::Unevaluated { def, substs, promoted })
 +            if fx.tcx.is_static(def.did) =>
 +        {
 +            assert!(substs.is_empty());
 +            assert!(promoted.is_none());
 +
 +            return codegen_static_ref(fx, def.did, fx.layout_of(const_.ty)).to_cvalue(fx);
 +        }
 +        ConstKind::Unevaluated(unevaluated) => {
 +            match fx.tcx.const_eval_resolve(ParamEnv::reveal_all(), unevaluated, None) {
 +                Ok(const_val) => const_val,
 +                Err(_) => {
 +                    span_bug!(constant.span, "erroneous constant not captured by required_consts");
 +                }
 +            }
 +        }
 +        ConstKind::Param(_)
 +        | ConstKind::Infer(_)
 +        | ConstKind::Bound(_, _)
 +        | ConstKind::Placeholder(_)
 +        | ConstKind::Error(_) => unreachable!("{:?}", const_),
 +    };
 +
 +    codegen_const_value(fx, const_val, const_.ty)
 +}
 +
 +pub(crate) fn codegen_const_value<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    const_val: ConstValue<'tcx>,
 +    ty: Ty<'tcx>,
 +) -> CValue<'tcx> {
 +    let layout = fx.layout_of(ty);
 +    assert!(!layout.is_unsized(), "sized const value");
 +
 +    if layout.is_zst() {
 +        return CValue::by_ref(crate::Pointer::dangling(layout.align.pref), layout);
 +    }
 +
 +    match const_val {
 +        ConstValue::Scalar(x) => {
 +            if fx.clif_type(layout.ty).is_none() {
 +                let (size, align) = (layout.size, layout.align.pref);
 +                let mut alloc = Allocation::from_bytes(
 +                    std::iter::repeat(0).take(size.bytes_usize()).collect::<Vec<u8>>(),
 +                    align,
 +                    Mutability::Not,
 +                );
 +                alloc.write_scalar(fx, alloc_range(Size::ZERO, size), x.into()).unwrap();
 +                let alloc = fx.tcx.intern_const_alloc(alloc);
 +                return CValue::by_ref(pointer_for_allocation(fx, alloc), layout);
 +            }
 +
 +            match x {
 +                Scalar::Int(int) => CValue::const_val(fx, layout, int),
 +                Scalar::Ptr(ptr) => {
 +                    let alloc_kind = fx.tcx.get_global_alloc(ptr.alloc_id);
 +                    let base_addr = match alloc_kind {
 +                        Some(GlobalAlloc::Memory(alloc)) => {
 +                            fx.constants_cx.todo.push(TodoItem::Alloc(ptr.alloc_id));
 +                            let data_id = data_id_for_alloc_id(
 +                                &mut fx.constants_cx,
 +                                fx.module,
 +                                ptr.alloc_id,
 +                                alloc.mutability,
 +                            );
 +                            let local_data_id =
 +                                fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
 +                            if fx.clif_comments.enabled() {
 +                                fx.add_comment(local_data_id, format!("{:?}", ptr.alloc_id));
 +                            }
 +                            fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
 +                        }
 +                        Some(GlobalAlloc::Function(instance)) => {
 +                            let func_id = crate::abi::import_function(fx.tcx, fx.module, instance);
 +                            let local_func_id =
 +                                fx.module.declare_func_in_func(func_id, &mut fx.bcx.func);
 +                            fx.bcx.ins().func_addr(fx.pointer_type, local_func_id)
 +                        }
 +                        Some(GlobalAlloc::Static(def_id)) => {
 +                            assert!(fx.tcx.is_static(def_id));
 +                            let data_id = data_id_for_static(fx.tcx, fx.module, def_id, false);
 +                            let local_data_id =
 +                                fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
 +                            if fx.clif_comments.enabled() {
 +                                fx.add_comment(local_data_id, format!("{:?}", def_id));
 +                            }
 +                            fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
 +                        }
 +                        None => bug!("missing allocation {:?}", ptr.alloc_id),
 +                    };
 +                    let val = if ptr.offset.bytes() != 0 {
 +                        fx.bcx.ins().iadd_imm(base_addr, i64::try_from(ptr.offset.bytes()).unwrap())
 +                    } else {
 +                        base_addr
 +                    };
 +                    CValue::by_val(val, layout)
 +                }
 +            }
 +        }
 +        ConstValue::ByRef { alloc, offset } => CValue::by_ref(
 +            pointer_for_allocation(fx, alloc)
 +                .offset_i64(fx, i64::try_from(offset.bytes()).unwrap()),
 +            layout,
 +        ),
 +        ConstValue::Slice { data, start, end } => {
 +            let ptr = pointer_for_allocation(fx, data)
 +                .offset_i64(fx, i64::try_from(start).unwrap())
 +                .get_addr(fx);
 +            let len = fx
 +                .bcx
 +                .ins()
 +                .iconst(fx.pointer_type, i64::try_from(end.checked_sub(start).unwrap()).unwrap());
 +            CValue::by_val_pair(ptr, len, layout)
 +        }
 +    }
 +}
 +
 +fn pointer_for_allocation<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    alloc: &'tcx Allocation,
 +) -> crate::pointer::Pointer {
 +    let alloc_id = fx.tcx.create_memory_alloc(alloc);
 +    fx.constants_cx.todo.push(TodoItem::Alloc(alloc_id));
 +    let data_id =
 +        data_id_for_alloc_id(&mut fx.constants_cx, &mut *fx.module, alloc_id, alloc.mutability);
 +
 +    let local_data_id = fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
 +    if fx.clif_comments.enabled() {
 +        fx.add_comment(local_data_id, format!("{:?}", alloc_id));
 +    }
 +    let global_ptr = fx.bcx.ins().global_value(fx.pointer_type, local_data_id);
 +    crate::pointer::Pointer::new(global_ptr)
 +}
 +
 +fn data_id_for_alloc_id(
 +    cx: &mut ConstantCx,
 +    module: &mut dyn Module,
 +    alloc_id: AllocId,
 +    mutability: rustc_hir::Mutability,
 +) -> DataId {
 +    *cx.anon_allocs.entry(alloc_id).or_insert_with(|| {
 +        module.declare_anonymous_data(mutability == rustc_hir::Mutability::Mut, false).unwrap()
 +    })
 +}
 +
 +fn data_id_for_static(
 +    tcx: TyCtxt<'_>,
 +    module: &mut dyn Module,
 +    def_id: DefId,
 +    definition: bool,
 +) -> DataId {
 +    let rlinkage = tcx.codegen_fn_attrs(def_id).linkage;
 +    let linkage = if definition {
 +        crate::linkage::get_static_linkage(tcx, def_id)
 +    } else if rlinkage == Some(rustc_middle::mir::mono::Linkage::ExternalWeak)
 +        || rlinkage == Some(rustc_middle::mir::mono::Linkage::WeakAny)
 +    {
 +        Linkage::Preemptible
 +    } else {
 +        Linkage::Import
 +    };
 +
 +    let instance = Instance::mono(tcx, def_id).polymorphize(tcx);
 +    let symbol_name = tcx.symbol_name(instance).name;
 +    let ty = instance.ty(tcx, ParamEnv::reveal_all());
 +    let is_mutable = if tcx.is_mutable_static(def_id) {
 +        true
 +    } else {
 +        !ty.is_freeze(tcx.at(DUMMY_SP), ParamEnv::reveal_all())
 +    };
 +    let align = tcx.layout_of(ParamEnv::reveal_all().and(ty)).unwrap().align.pref.bytes();
 +
 +    let attrs = tcx.codegen_fn_attrs(def_id);
 +
 +    let data_id = module
 +        .declare_data(
 +            &*symbol_name,
 +            linkage,
 +            is_mutable,
 +            attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL),
 +        )
 +        .unwrap();
 +
 +    if rlinkage.is_some() {
 +        // Comment copied from https://github.com/rust-lang/rust/blob/45060c2a66dfd667f88bd8b94261b28a58d85bd5/src/librustc_codegen_llvm/consts.rs#L141
 +        // Declare an internal global `extern_with_linkage_foo` which
 +        // is initialized with the address of `foo`.  If `foo` is
 +        // discarded during linking (for example, if `foo` has weak
 +        // linkage and there are no definitions), then
 +        // `extern_with_linkage_foo` will instead be initialized to
 +        // zero.
 +
 +        let ref_name = format!("_rust_extern_with_linkage_{}", symbol_name);
 +        let ref_data_id = module.declare_data(&ref_name, Linkage::Local, false, false).unwrap();
 +        let mut data_ctx = DataContext::new();
 +        data_ctx.set_align(align);
 +        let data = module.declare_data_in_data(data_id, &mut data_ctx);
 +        data_ctx.define(std::iter::repeat(0).take(pointer_ty(tcx).bytes() as usize).collect());
 +        data_ctx.write_data_addr(0, data, 0);
 +        match module.define_data(ref_data_id, &data_ctx) {
 +            // Every time the static is referenced there will be another definition of this global,
 +            // so duplicate definitions are expected and allowed.
 +            Err(ModuleError::DuplicateDefinition(_)) => {}
 +            res => res.unwrap(),
 +        }
 +        ref_data_id
 +    } else {
 +        data_id
 +    }
 +}
 +
 +fn define_all_allocs(tcx: TyCtxt<'_>, module: &mut dyn Module, cx: &mut ConstantCx) {
 +    while let Some(todo_item) = cx.todo.pop() {
 +        let (data_id, alloc, section_name) = match todo_item {
 +            TodoItem::Alloc(alloc_id) => {
 +                //println!("alloc_id {}", alloc_id);
 +                let alloc = match tcx.get_global_alloc(alloc_id).unwrap() {
 +                    GlobalAlloc::Memory(alloc) => alloc,
 +                    GlobalAlloc::Function(_) | GlobalAlloc::Static(_) => unreachable!(),
 +                };
 +                let data_id = data_id_for_alloc_id(cx, module, alloc_id, alloc.mutability);
 +                (data_id, alloc, None)
 +            }
 +            TodoItem::Static(def_id) => {
 +                //println!("static {:?}", def_id);
 +
 +                let section_name = tcx.codegen_fn_attrs(def_id).link_section.map(|s| s.as_str());
 +
 +                let alloc = tcx.eval_static_initializer(def_id).unwrap();
 +
 +                let data_id = data_id_for_static(tcx, module, def_id, true);
 +                (data_id, alloc, section_name)
 +            }
 +        };
 +
 +        //("data_id {}", data_id);
 +        if cx.done.contains(&data_id) {
 +            continue;
 +        }
 +
 +        let mut data_ctx = DataContext::new();
 +        data_ctx.set_align(alloc.align.bytes());
 +
 +        if let Some(section_name) = section_name {
-         Operand::Copy(_) | Operand::Move(_) => None,
++            let (segment_name, section_name) = if tcx.sess.target.is_like_osx {
++                if let Some(names) = section_name.split_once(',') {
++                    names
++                } else {
++                    tcx.sess.fatal(&format!(
++                        "#[link_section = \"{}\"] is not valid for macos target: must be segment and section separated by comma",
++                        section_name
++                    ));
++                }
++            } else {
++                ("", &*section_name)
++            };
++            data_ctx.set_segment_section(segment_name, section_name);
 +        }
 +
 +        let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len()).to_vec();
 +        data_ctx.define(bytes.into_boxed_slice());
 +
 +        for &(offset, (_tag, reloc)) in alloc.relocations().iter() {
 +            let addend = {
 +                let endianness = tcx.data_layout.endian;
 +                let offset = offset.bytes() as usize;
 +                let ptr_size = tcx.data_layout.pointer_size;
 +                let bytes = &alloc.inspect_with_uninit_and_ptr_outside_interpreter(
 +                    offset..offset + ptr_size.bytes() as usize,
 +                );
 +                read_target_uint(endianness, bytes).unwrap()
 +            };
 +
 +            let reloc_target_alloc = tcx.get_global_alloc(reloc).unwrap();
 +            let data_id = match reloc_target_alloc {
 +                GlobalAlloc::Function(instance) => {
 +                    assert_eq!(addend, 0);
 +                    let func_id = crate::abi::import_function(tcx, module, instance);
 +                    let local_func_id = module.declare_func_in_data(func_id, &mut data_ctx);
 +                    data_ctx.write_function_addr(offset.bytes() as u32, local_func_id);
 +                    continue;
 +                }
 +                GlobalAlloc::Memory(target_alloc) => {
 +                    cx.todo.push(TodoItem::Alloc(reloc));
 +                    data_id_for_alloc_id(cx, module, reloc, target_alloc.mutability)
 +                }
 +                GlobalAlloc::Static(def_id) => {
 +                    if tcx.codegen_fn_attrs(def_id).flags.contains(CodegenFnAttrFlags::THREAD_LOCAL)
 +                    {
 +                        tcx.sess.fatal(&format!(
 +                            "Allocation {:?} contains reference to TLS value {:?}",
 +                            alloc, def_id
 +                        ));
 +                    }
 +
 +                    // Don't push a `TodoItem::Static` here, as it will cause statics used by
 +                    // multiple crates to be duplicated between them. It isn't necessary anyway,
 +                    // as it will get pushed by `codegen_static` when necessary.
 +                    data_id_for_static(tcx, module, def_id, false)
 +                }
 +            };
 +
 +            let global_value = module.declare_data_in_data(data_id, &mut data_ctx);
 +            data_ctx.write_data_addr(offset.bytes() as u32, global_value, addend as i64);
 +        }
 +
 +        module.define_data(data_id, &data_ctx).unwrap();
 +        cx.done.insert(data_id);
 +    }
 +
 +    assert!(cx.todo.is_empty(), "{:?}", cx.todo);
 +}
 +
 +pub(crate) fn mir_operand_get_const_val<'tcx>(
 +    fx: &FunctionCx<'_, '_, 'tcx>,
 +    operand: &Operand<'tcx>,
 +) -> Option<ConstValue<'tcx>> {
 +    match operand {
 +        Operand::Constant(const_) => match const_.literal {
 +            ConstantKind::Ty(const_) => {
 +                fx.monomorphize(const_).eval(fx.tcx, ParamEnv::reveal_all()).val.try_to_value()
 +            }
 +            ConstantKind::Val(val, _) => Some(val),
 +        },
++        // FIXME(rust-lang/rust#85105): Casts like `IMM8 as u32` result in the const being stored
++        // inside a temporary before being passed to the intrinsic requiring the const argument.
++        // This code tries to find a single constant defining definition of the referenced local.
++        Operand::Copy(place) | Operand::Move(place) => {
++            if !place.projection.is_empty() {
++                return None;
++            }
++            let mut computed_const_val = None;
++            for bb_data in fx.mir.basic_blocks() {
++                for stmt in &bb_data.statements {
++                    match &stmt.kind {
++                        StatementKind::Assign(local_and_rvalue) if &local_and_rvalue.0 == place => {
++                            match &local_and_rvalue.1 {
++                                Rvalue::Cast(CastKind::Misc, operand, ty) => {
++                                    if computed_const_val.is_some() {
++                                        return None; // local assigned twice
++                                    }
++                                    if !matches!(ty.kind(), ty::Uint(_) | ty::Int(_)) {
++                                        return None;
++                                    }
++                                    let const_val = mir_operand_get_const_val(fx, operand)?;
++                                    if fx.layout_of(ty).size
++                                        != const_val.try_to_scalar_int()?.size()
++                                    {
++                                        return None;
++                                    }
++                                    computed_const_val = Some(const_val);
++                                }
++                                Rvalue::Use(operand) => {
++                                    computed_const_val = mir_operand_get_const_val(fx, operand)
++                                }
++                                _ => return None,
++                            }
++                        }
++                        StatementKind::SetDiscriminant { place: stmt_place, variant_index: _ }
++                            if &**stmt_place == place =>
++                        {
++                            return None;
++                        }
++                        StatementKind::LlvmInlineAsm(_) | StatementKind::CopyNonOverlapping(_) => {
++                            return None;
++                        } // conservative handling
++                        StatementKind::Assign(_)
++                        | StatementKind::FakeRead(_)
++                        | StatementKind::SetDiscriminant { .. }
++                        | StatementKind::StorageLive(_)
++                        | StatementKind::StorageDead(_)
++                        | StatementKind::Retag(_, _)
++                        | StatementKind::AscribeUserType(_, _)
++                        | StatementKind::Coverage(_)
++                        | StatementKind::Nop => {}
++                    }
++                }
++                match &bb_data.terminator().kind {
++                    TerminatorKind::Goto { .. }
++                    | TerminatorKind::SwitchInt { .. }
++                    | TerminatorKind::Resume
++                    | TerminatorKind::Abort
++                    | TerminatorKind::Return
++                    | TerminatorKind::Unreachable
++                    | TerminatorKind::Drop { .. }
++                    | TerminatorKind::Assert { .. } => {}
++                    TerminatorKind::DropAndReplace { .. }
++                    | TerminatorKind::Yield { .. }
++                    | TerminatorKind::GeneratorDrop
++                    | TerminatorKind::FalseEdge { .. }
++                    | TerminatorKind::FalseUnwind { .. } => unreachable!(),
++                    TerminatorKind::InlineAsm { .. } => return None,
++                    TerminatorKind::Call { destination: Some((call_place, _)), .. }
++                        if call_place == place =>
++                    {
++                        return None;
++                    }
++                    TerminatorKind::Call { .. } => {}
++                }
++            }
++            computed_const_val
++        }
 +    }
 +}
index 24d933728db693b804325b232d664f33b65f77d0,0000000000000000000000000000000000000000..9cf51d15c8ca0a8e99595398d76580bad62d7067
mode 100644,000000..100644
--- /dev/null
@@@ -1,433 -1,0 +1,436 @@@
-         let obj_out = tcx
-             .output_filenames(())
-             .temp_path(OutputType::Object, Some(&cgu.name().as_str()));
 +//! The AOT driver uses [`cranelift_object`] to write object files suitable for linking into a
 +//! standalone executable.
 +
 +use std::path::PathBuf;
 +
 +use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
 +use rustc_codegen_ssa::back::linker::LinkerInfo;
 +use rustc_codegen_ssa::{CodegenResults, CompiledModule, CrateInfo, ModuleKind};
 +use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
 +use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
 +use rustc_middle::middle::cstore::EncodedMetadata;
 +use rustc_middle::mir::mono::{CodegenUnit, MonoItem};
 +use rustc_session::cgu_reuse_tracker::CguReuse;
 +use rustc_session::config::{DebugInfo, OutputType};
 +
 +use cranelift_object::ObjectModule;
 +
 +use crate::{prelude::*, BackendConfig};
 +
 +struct ModuleCodegenResult(CompiledModule, Option<(WorkProductId, WorkProduct)>);
 +
 +impl<HCX> HashStable<HCX> for ModuleCodegenResult {
 +    fn hash_stable(&self, _: &mut HCX, _: &mut StableHasher) {
 +        // do nothing
 +    }
 +}
 +
 +fn emit_module(
 +    tcx: TyCtxt<'_>,
 +    backend_config: &BackendConfig,
 +    name: String,
 +    kind: ModuleKind,
 +    module: ObjectModule,
 +    debug: Option<DebugContext<'_>>,
 +    unwind_context: UnwindContext,
 +) -> ModuleCodegenResult {
 +    let mut product = module.finish();
 +
 +    if let Some(mut debug) = debug {
 +        debug.emit(&mut product);
 +    }
 +
 +    unwind_context.emit(&mut product);
 +
 +    let tmp_file = tcx.output_filenames(()).temp_path(OutputType::Object, Some(&name));
 +    let obj = product.object.write().unwrap();
 +    if let Err(err) = std::fs::write(&tmp_file, obj) {
 +        tcx.sess.fatal(&format!("error writing object file: {}", err));
 +    }
 +
 +    let work_product = if backend_config.disable_incr_cache {
 +        None
 +    } else {
 +        rustc_incremental::copy_cgu_workproduct_to_incr_comp_cache_dir(
 +            tcx.sess,
 +            &name,
 +            &Some(tmp_file.clone()),
 +        )
 +    };
 +
 +    ModuleCodegenResult(
 +        CompiledModule { name, kind, object: Some(tmp_file), dwarf_object: None, bytecode: None },
 +        work_product,
 +    )
 +}
 +
 +fn reuse_workproduct_for_cgu(
 +    tcx: TyCtxt<'_>,
 +    cgu: &CodegenUnit<'_>,
 +    work_products: &mut FxHashMap<WorkProductId, WorkProduct>,
 +) -> CompiledModule {
 +    let incr_comp_session_dir = tcx.sess.incr_comp_session_dir();
 +    let mut object = None;
 +    let work_product = cgu.work_product(tcx);
 +    if let Some(saved_file) = &work_product.saved_file {
-     crate::main_shim::maybe_create_entry_wrapper(tcx, &mut module, &mut cx.unwind_context, false);
++        let obj_out =
++            tcx.output_filenames(()).temp_path(OutputType::Object, Some(&cgu.name().as_str()));
 +        object = Some(obj_out.clone());
 +        let source_file = rustc_incremental::in_incr_comp_dir(&incr_comp_session_dir, &saved_file);
 +        if let Err(err) = rustc_fs_util::link_or_copy(&source_file, &obj_out) {
 +            tcx.sess.err(&format!(
 +                "unable to copy {} to {}: {}",
 +                source_file.display(),
 +                obj_out.display(),
 +                err
 +            ));
 +        }
 +    }
 +
 +    work_products.insert(cgu.work_product_id(), work_product);
 +
 +    CompiledModule {
 +        name: cgu.name().to_string(),
 +        kind: ModuleKind::Regular,
 +        object,
 +        dwarf_object: None,
 +        bytecode: None,
 +    }
 +}
 +
 +fn module_codegen(
 +    tcx: TyCtxt<'_>,
 +    (backend_config, cgu_name): (BackendConfig, rustc_span::Symbol),
 +) -> ModuleCodegenResult {
 +    let cgu = tcx.codegen_unit(cgu_name);
 +    let mono_items = cgu.items_in_deterministic_order(tcx);
 +
 +    let isa = crate::build_isa(tcx.sess, &backend_config);
 +    let mut module = crate::backend::make_module(tcx.sess, isa, cgu_name.as_str().to_string());
 +
 +    let mut cx = crate::CodegenCx::new(
 +        tcx,
 +        backend_config.clone(),
 +        module.isa(),
 +        tcx.sess.opts.debuginfo != DebugInfo::None,
 +    );
 +    super::predefine_mono_items(tcx, &mut module, &mono_items);
 +    for (mono_item, _) in mono_items {
 +        match mono_item {
 +            MonoItem::Fn(inst) => {
 +                cx.tcx
 +                    .sess
 +                    .time("codegen fn", || crate::base::codegen_fn(&mut cx, &mut module, inst));
 +            }
 +            MonoItem::Static(def_id) => crate::constant::codegen_static(tcx, &mut module, def_id),
 +            MonoItem::GlobalAsm(item_id) => {
 +                let item = cx.tcx.hir().item(item_id);
 +                if let rustc_hir::ItemKind::GlobalAsm(asm) = item.kind {
 +                    if !asm.options.contains(InlineAsmOptions::ATT_SYNTAX) {
 +                        cx.global_asm.push_str("\n.intel_syntax noprefix\n");
 +                    } else {
 +                        cx.global_asm.push_str("\n.att_syntax\n");
 +                    }
 +                    for piece in asm.template {
 +                        match *piece {
 +                            InlineAsmTemplatePiece::String(ref s) => cx.global_asm.push_str(s),
 +                            InlineAsmTemplatePiece::Placeholder { .. } => todo!(),
 +                        }
 +                    }
 +                    cx.global_asm.push_str("\n.att_syntax\n\n");
 +                } else {
 +                    bug!("Expected GlobalAsm found {:?}", item);
 +                }
 +            }
 +        }
 +    }
-             let tmp_file = tcx
-                 .output_filenames(())
-                 .temp_path(OutputType::Metadata, Some(&metadata_cgu_name));
++    crate::main_shim::maybe_create_entry_wrapper(
++        tcx,
++        &mut module,
++        &mut cx.unwind_context,
++        false,
++        cgu.is_primary(),
++    );
 +
 +    let debug_context = cx.debug_context;
 +    let unwind_context = cx.unwind_context;
 +    let codegen_result = tcx.sess.time("write object file", || {
 +        emit_module(
 +            tcx,
 +            &backend_config,
 +            cgu.name().as_str().to_string(),
 +            ModuleKind::Regular,
 +            module,
 +            debug_context,
 +            unwind_context,
 +        )
 +    });
 +
 +    codegen_global_asm(tcx, &cgu.name().as_str(), &cx.global_asm);
 +
 +    codegen_result
 +}
 +
 +pub(crate) fn run_aot(
 +    tcx: TyCtxt<'_>,
 +    backend_config: BackendConfig,
 +    metadata: EncodedMetadata,
 +    need_metadata_module: bool,
 +) -> Box<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>)> {
 +    use rustc_span::symbol::sym;
 +
 +    let crate_attrs = tcx.hir().attrs(rustc_hir::CRATE_HIR_ID);
 +    let subsystem = tcx.sess.first_attr_value_str_by_name(crate_attrs, sym::windows_subsystem);
 +    let windows_subsystem = subsystem.map(|subsystem| {
 +        if subsystem != sym::windows && subsystem != sym::console {
 +            tcx.sess.fatal(&format!(
 +                "invalid windows subsystem `{}`, only \
 +                                    `windows` and `console` are allowed",
 +                subsystem
 +            ));
 +        }
 +        subsystem.to_string()
 +    });
 +
 +    let mut work_products = FxHashMap::default();
 +
 +    let cgus = if tcx.sess.opts.output_types.should_codegen() {
 +        tcx.collect_and_partition_mono_items(()).1
 +    } else {
 +        // If only `--emit metadata` is used, we shouldn't perform any codegen.
 +        // Also `tcx.collect_and_partition_mono_items` may panic in that case.
 +        &[]
 +    };
 +
 +    if tcx.dep_graph.is_fully_enabled() {
 +        for cgu in &*cgus {
 +            tcx.ensure().codegen_unit(cgu.name());
 +        }
 +    }
 +
 +    let modules = super::time(tcx, backend_config.display_cg_time, "codegen mono items", || {
 +        cgus.iter()
 +            .map(|cgu| {
 +                let cgu_reuse = determine_cgu_reuse(tcx, cgu);
 +                tcx.sess.cgu_reuse_tracker.set_actual_reuse(&cgu.name().as_str(), cgu_reuse);
 +
 +                match cgu_reuse {
 +                    _ if backend_config.disable_incr_cache => {}
 +                    CguReuse::No => {}
 +                    CguReuse::PreLto => {
 +                        return reuse_workproduct_for_cgu(tcx, &*cgu, &mut work_products);
 +                    }
 +                    CguReuse::PostLto => unreachable!(),
 +                }
 +
 +                let dep_node = cgu.codegen_dep_node(tcx);
 +                let (ModuleCodegenResult(module, work_product), _) = tcx.dep_graph.with_task(
 +                    dep_node,
 +                    tcx,
 +                    (backend_config.clone(), cgu.name()),
 +                    module_codegen,
 +                    rustc_middle::dep_graph::hash_result,
 +                );
 +
 +                if let Some((id, product)) = work_product {
 +                    work_products.insert(id, product);
 +                }
 +
 +                module
 +            })
 +            .collect::<Vec<_>>()
 +    });
 +
 +    tcx.sess.abort_if_errors();
 +
 +    let isa = crate::build_isa(tcx.sess, &backend_config);
 +    let mut allocator_module =
 +        crate::backend::make_module(tcx.sess, isa, "allocator_shim".to_string());
 +    assert_eq!(pointer_ty(tcx), allocator_module.target_config().pointer_type());
 +    let mut allocator_unwind_context = UnwindContext::new(tcx, allocator_module.isa(), true);
 +    let created_alloc_shim =
 +        crate::allocator::codegen(tcx, &mut allocator_module, &mut allocator_unwind_context);
 +
 +    let allocator_module = if created_alloc_shim {
 +        let ModuleCodegenResult(module, work_product) = emit_module(
 +            tcx,
 +            &backend_config,
 +            "allocator_shim".to_string(),
 +            ModuleKind::Allocator,
 +            allocator_module,
 +            None,
 +            allocator_unwind_context,
 +        );
 +        if let Some((id, product)) = work_product {
 +            work_products.insert(id, product);
 +        }
 +        Some(module)
 +    } else {
 +        None
 +    };
 +
 +    let metadata_module = if need_metadata_module {
 +        let _timer = tcx.prof.generic_activity("codegen crate metadata");
 +        let (metadata_cgu_name, tmp_file) = tcx.sess.time("write compressed metadata", || {
 +            use rustc_middle::mir::mono::CodegenUnitNameBuilder;
 +
 +            let cgu_name_builder = &mut CodegenUnitNameBuilder::new(tcx);
 +            let metadata_cgu_name = cgu_name_builder
 +                .build_cgu_name(LOCAL_CRATE, &["crate"], Some("metadata"))
 +                .as_str()
 +                .to_string();
 +
-     let output_object_file =
-         tcx.output_filenames(()).temp_path(OutputType::Object, Some(cgu_name));
++            let tmp_file =
++                tcx.output_filenames(()).temp_path(OutputType::Metadata, Some(&metadata_cgu_name));
 +
 +            let obj = crate::backend::with_object(tcx.sess, &metadata_cgu_name, |object| {
 +                crate::metadata::write_metadata(tcx, object);
 +            });
 +
 +            if let Err(err) = std::fs::write(&tmp_file, obj) {
 +                tcx.sess.fatal(&format!("error writing metadata object file: {}", err));
 +            }
 +
 +            (metadata_cgu_name, tmp_file)
 +        });
 +
 +        Some(CompiledModule {
 +            name: metadata_cgu_name,
 +            kind: ModuleKind::Metadata,
 +            object: Some(tmp_file),
 +            dwarf_object: None,
 +            bytecode: None,
 +        })
 +    } else {
 +        None
 +    };
 +
 +    Box::new((
 +        CodegenResults {
 +            crate_name: tcx.crate_name(LOCAL_CRATE),
 +            modules,
 +            allocator_module,
 +            metadata_module,
 +            metadata,
 +            windows_subsystem,
 +            linker_info: LinkerInfo::new(tcx, crate::target_triple(tcx.sess).to_string()),
 +            crate_info: CrateInfo::new(tcx),
 +        },
 +        work_products,
 +    ))
 +}
 +
 +fn codegen_global_asm(tcx: TyCtxt<'_>, cgu_name: &str, global_asm: &str) {
 +    use std::io::Write;
 +    use std::process::{Command, Stdio};
 +
 +    if global_asm.is_empty() {
 +        return;
 +    }
 +
 +    if cfg!(not(feature = "inline_asm"))
 +        || tcx.sess.target.is_like_osx
 +        || tcx.sess.target.is_like_windows
 +    {
 +        if global_asm.contains("__rust_probestack") {
 +            return;
 +        }
 +
 +        // FIXME fix linker error on macOS
 +        if cfg!(not(feature = "inline_asm")) {
 +            tcx.sess.fatal(
 +                "asm! and global_asm! support is disabled while compiling rustc_codegen_cranelift",
 +            );
 +        } else {
 +            tcx.sess.fatal("asm! and global_asm! are not yet supported on macOS and Windows");
 +        }
 +    }
 +
 +    let assembler = crate::toolchain::get_toolchain_binary(tcx.sess, "as");
 +    let linker = crate::toolchain::get_toolchain_binary(tcx.sess, "ld");
 +
 +    // Remove all LLVM style comments
 +    let global_asm = global_asm
 +        .lines()
 +        .map(|line| if let Some(index) = line.find("//") { &line[0..index] } else { line })
 +        .collect::<Vec<_>>()
 +        .join("\n");
 +
++    let output_object_file = tcx.output_filenames(()).temp_path(OutputType::Object, Some(cgu_name));
 +
 +    // Assemble `global_asm`
 +    let global_asm_object_file = add_file_stem_postfix(output_object_file.clone(), ".asm");
 +    let mut child = Command::new(assembler)
 +        .arg("-o")
 +        .arg(&global_asm_object_file)
 +        .stdin(Stdio::piped())
 +        .spawn()
 +        .expect("Failed to spawn `as`.");
 +    child.stdin.take().unwrap().write_all(global_asm.as_bytes()).unwrap();
 +    let status = child.wait().expect("Failed to wait for `as`.");
 +    if !status.success() {
 +        tcx.sess.fatal(&format!("Failed to assemble `{}`", global_asm));
 +    }
 +
 +    // Link the global asm and main object file together
 +    let main_object_file = add_file_stem_postfix(output_object_file.clone(), ".main");
 +    std::fs::rename(&output_object_file, &main_object_file).unwrap();
 +    let status = Command::new(linker)
 +        .arg("-r") // Create a new object file
 +        .arg("-o")
 +        .arg(output_object_file)
 +        .arg(&main_object_file)
 +        .arg(&global_asm_object_file)
 +        .status()
 +        .unwrap();
 +    if !status.success() {
 +        tcx.sess.fatal(&format!(
 +            "Failed to link `{}` and `{}` together",
 +            main_object_file.display(),
 +            global_asm_object_file.display(),
 +        ));
 +    }
 +
 +    std::fs::remove_file(global_asm_object_file).unwrap();
 +    std::fs::remove_file(main_object_file).unwrap();
 +}
 +
 +fn add_file_stem_postfix(mut path: PathBuf, postfix: &str) -> PathBuf {
 +    let mut new_filename = path.file_stem().unwrap().to_owned();
 +    new_filename.push(postfix);
 +    if let Some(extension) = path.extension() {
 +        new_filename.push(".");
 +        new_filename.push(extension);
 +    }
 +    path.set_file_name(new_filename);
 +    path
 +}
 +
 +// Adapted from https://github.com/rust-lang/rust/blob/303d8aff6092709edd4dbd35b1c88e9aa40bf6d8/src/librustc_codegen_ssa/base.rs#L922-L953
 +fn determine_cgu_reuse<'tcx>(tcx: TyCtxt<'tcx>, cgu: &CodegenUnit<'tcx>) -> CguReuse {
 +    if !tcx.dep_graph.is_fully_enabled() {
 +        return CguReuse::No;
 +    }
 +
 +    let work_product_id = &cgu.work_product_id();
 +    if tcx.dep_graph.previous_work_product(work_product_id).is_none() {
 +        // We don't have anything cached for this CGU. This can happen
 +        // if the CGU did not exist in the previous session.
 +        return CguReuse::No;
 +    }
 +
 +    // Try to mark the CGU as green. If it we can do so, it means that nothing
 +    // affecting the LLVM module has changed and we can re-use a cached version.
 +    // If we compile with any kind of LTO, this means we can re-use the bitcode
 +    // of the Pre-LTO stage (possibly also the Post-LTO version but we'll only
 +    // know that later). If we are not doing LTO, there is only one optimized
 +    // version of each module, so we re-use that.
 +    let dep_node = cgu.codegen_dep_node(tcx);
 +    assert!(
 +        !tcx.dep_graph.dep_node_exists(&dep_node),
 +        "CompileCodegenUnit dep-node for CGU `{}` already exists before marking.",
 +        cgu.name()
 +    );
 +
 +    if tcx.try_mark_green(&dep_node) { CguReuse::PreLto } else { CguReuse::No }
 +}
index 632e86da736ab235530d52e7d25512fefe83b590,0000000000000000000000000000000000000000..4a99cb727c8306be349da798407d5da139b31828
mode 100644,000000..100644
--- /dev/null
@@@ -1,292 -1,0 +1,293 @@@
-         let obj = object::File::parse(&obj).unwrap();
 +//! The JIT driver uses [`cranelift_jit`] to JIT execute programs without writing any object
 +//! files.
 +
 +use std::cell::RefCell;
 +use std::ffi::CString;
 +use std::os::raw::{c_char, c_int};
 +
 +use cranelift_codegen::binemit::{NullStackMapSink, NullTrapSink};
 +use rustc_codegen_ssa::CrateInfo;
 +use rustc_middle::mir::mono::MonoItem;
 +
 +use cranelift_jit::{JITBuilder, JITModule};
 +
 +use crate::{prelude::*, BackendConfig};
 +use crate::{CodegenCx, CodegenMode};
 +
 +struct JitState {
 +    backend_config: BackendConfig,
 +    jit_module: JITModule,
 +}
 +
 +thread_local! {
 +    static LAZY_JIT_STATE: RefCell<Option<JitState>> = RefCell::new(None);
 +}
 +
 +fn create_jit_module<'tcx>(
 +    tcx: TyCtxt<'tcx>,
 +    backend_config: &BackendConfig,
 +    hotswap: bool,
 +) -> (JITModule, CodegenCx<'tcx>) {
 +    let imported_symbols = load_imported_symbols_for_jit(tcx);
 +
 +    let isa = crate::build_isa(tcx.sess, backend_config);
 +    let mut jit_builder = JITBuilder::with_isa(isa, cranelift_module::default_libcall_names());
 +    jit_builder.hotswap(hotswap);
 +    crate::compiler_builtins::register_functions_for_jit(&mut jit_builder);
 +    jit_builder.symbols(imported_symbols);
 +    let mut jit_module = JITModule::new(jit_builder);
 +
 +    let mut cx = crate::CodegenCx::new(tcx, backend_config.clone(), jit_module.isa(), false);
 +
 +    crate::allocator::codegen(tcx, &mut jit_module, &mut cx.unwind_context);
 +    crate::main_shim::maybe_create_entry_wrapper(
 +        tcx,
 +        &mut jit_module,
 +        &mut cx.unwind_context,
 +        true,
++        true,
 +    );
 +
 +    (jit_module, cx)
 +}
 +
 +pub(crate) fn run_jit(tcx: TyCtxt<'_>, backend_config: BackendConfig) -> ! {
 +    if !tcx.sess.opts.output_types.should_codegen() {
 +        tcx.sess.fatal("JIT mode doesn't work with `cargo check`");
 +    }
 +
 +    if !tcx.sess.crate_types().contains(&rustc_session::config::CrateType::Executable) {
 +        tcx.sess.fatal("can't jit non-executable crate");
 +    }
 +
 +    let (mut jit_module, mut cx) = create_jit_module(
 +        tcx,
 +        &backend_config,
 +        matches!(backend_config.codegen_mode, CodegenMode::JitLazy),
 +    );
 +
 +    let (_, cgus) = tcx.collect_and_partition_mono_items(());
 +    let mono_items = cgus
 +        .iter()
 +        .map(|cgu| cgu.items_in_deterministic_order(tcx).into_iter())
 +        .flatten()
 +        .collect::<FxHashMap<_, (_, _)>>()
 +        .into_iter()
 +        .collect::<Vec<(_, (_, _))>>();
 +
 +    super::time(tcx, backend_config.display_cg_time, "codegen mono items", || {
 +        super::predefine_mono_items(tcx, &mut jit_module, &mono_items);
 +        for (mono_item, _) in mono_items {
 +            match mono_item {
 +                MonoItem::Fn(inst) => match backend_config.codegen_mode {
 +                    CodegenMode::Aot => unreachable!(),
 +                    CodegenMode::Jit => {
 +                        cx.tcx.sess.time("codegen fn", || {
 +                            crate::base::codegen_fn(&mut cx, &mut jit_module, inst)
 +                        });
 +                    }
 +                    CodegenMode::JitLazy => codegen_shim(&mut cx, &mut jit_module, inst),
 +                },
 +                MonoItem::Static(def_id) => {
 +                    crate::constant::codegen_static(tcx, &mut jit_module, def_id);
 +                }
 +                MonoItem::GlobalAsm(item_id) => {
 +                    let item = tcx.hir().item(item_id);
 +                    tcx.sess.span_fatal(item.span, "Global asm is not supported in JIT mode");
 +                }
 +            }
 +        }
 +    });
 +
 +    if !cx.global_asm.is_empty() {
 +        tcx.sess.fatal("Inline asm is not supported in JIT mode");
 +    }
 +
 +    tcx.sess.abort_if_errors();
 +
 +    jit_module.finalize_definitions();
 +    unsafe { cx.unwind_context.register_jit(&jit_module) };
 +
 +    println!(
 +        "Rustc codegen cranelift will JIT run the executable, because -Cllvm-args=mode=jit was passed"
 +    );
 +
 +    let args = std::iter::once(&*tcx.crate_name(LOCAL_CRATE).as_str().to_string())
 +        .chain(backend_config.jit_args.iter().map(|arg| &**arg))
 +        .map(|arg| CString::new(arg).unwrap())
 +        .collect::<Vec<_>>();
 +    let mut argv = args.iter().map(|arg| arg.as_ptr()).collect::<Vec<_>>();
 +
 +    // Push a null pointer as a terminating argument. This is required by POSIX and
 +    // useful as some dynamic linkers use it as a marker to jump over.
 +    argv.push(std::ptr::null());
 +
 +    let start_sig = Signature {
 +        params: vec![
 +            AbiParam::new(jit_module.target_config().pointer_type()),
 +            AbiParam::new(jit_module.target_config().pointer_type()),
 +        ],
 +        returns: vec![AbiParam::new(jit_module.target_config().pointer_type() /*isize*/)],
 +        call_conv: CallConv::triple_default(&crate::target_triple(tcx.sess)),
 +    };
 +    let start_func_id = jit_module.declare_function("main", Linkage::Import, &start_sig).unwrap();
 +    let finalized_start: *const u8 = jit_module.get_finalized_function(start_func_id);
 +
 +    LAZY_JIT_STATE.with(|lazy_jit_state| {
 +        let mut lazy_jit_state = lazy_jit_state.borrow_mut();
 +        assert!(lazy_jit_state.is_none());
 +        *lazy_jit_state = Some(JitState { backend_config, jit_module });
 +    });
 +
 +    let f: extern "C" fn(c_int, *const *const c_char) -> c_int =
 +        unsafe { ::std::mem::transmute(finalized_start) };
 +    let ret = f(args.len() as c_int, argv.as_ptr());
 +    std::process::exit(ret);
 +}
 +
 +#[no_mangle]
 +extern "C" fn __clif_jit_fn(instance_ptr: *const Instance<'static>) -> *const u8 {
 +    rustc_middle::ty::tls::with(|tcx| {
 +        // lift is used to ensure the correct lifetime for instance.
 +        let instance = tcx.lift(unsafe { *instance_ptr }).unwrap();
 +
 +        LAZY_JIT_STATE.with(|lazy_jit_state| {
 +            let mut lazy_jit_state = lazy_jit_state.borrow_mut();
 +            let lazy_jit_state = lazy_jit_state.as_mut().unwrap();
 +            let jit_module = &mut lazy_jit_state.jit_module;
 +            let backend_config = lazy_jit_state.backend_config.clone();
 +
 +            let name = tcx.symbol_name(instance).name;
 +            let sig = crate::abi::get_function_sig(tcx, jit_module.isa().triple(), instance);
 +            let func_id = jit_module.declare_function(name, Linkage::Export, &sig).unwrap();
 +            jit_module.prepare_for_function_redefine(func_id).unwrap();
 +
 +            let mut cx = crate::CodegenCx::new(tcx, backend_config, jit_module.isa(), false);
 +            tcx.sess.time("codegen fn", || crate::base::codegen_fn(&mut cx, jit_module, instance));
 +
 +            assert!(cx.global_asm.is_empty());
 +            jit_module.finalize_definitions();
 +            unsafe { cx.unwind_context.register_jit(&jit_module) };
 +            jit_module.get_finalized_function(func_id)
 +        })
 +    })
 +}
 +
 +fn load_imported_symbols_for_jit(tcx: TyCtxt<'_>) -> Vec<(String, *const u8)> {
 +    use rustc_middle::middle::dependency_format::Linkage;
 +
 +    let mut dylib_paths = Vec::new();
 +
 +    let crate_info = CrateInfo::new(tcx);
 +    let formats = tcx.dependency_formats(());
 +    let data = &formats
 +        .iter()
 +        .find(|(crate_type, _data)| *crate_type == rustc_session::config::CrateType::Executable)
 +        .unwrap()
 +        .1;
 +    for &(cnum, _) in &crate_info.used_crates_dynamic {
 +        let src = &crate_info.used_crate_source[&cnum];
 +        match data[cnum.as_usize() - 1] {
 +            Linkage::NotLinked | Linkage::IncludedFromDylib => {}
 +            Linkage::Static => {
 +                let name = tcx.crate_name(cnum);
 +                let mut err =
 +                    tcx.sess.struct_err(&format!("Can't load static lib {}", name.as_str()));
 +                err.note("rustc_codegen_cranelift can only load dylibs in JIT mode.");
 +                err.emit();
 +            }
 +            Linkage::Dynamic => {
 +                dylib_paths.push(src.dylib.as_ref().unwrap().0.clone());
 +            }
 +        }
 +    }
 +
 +    let mut imported_symbols = Vec::new();
 +    for path in dylib_paths {
 +        use object::{Object, ObjectSymbol};
 +        let lib = libloading::Library::new(&path).unwrap();
 +        let obj = std::fs::read(path).unwrap();
++        let obj = object::File::parse(&*obj).unwrap();
 +        imported_symbols.extend(obj.dynamic_symbols().filter_map(|symbol| {
 +            let name = symbol.name().unwrap().to_string();
 +            if name.is_empty() || !symbol.is_global() || symbol.is_undefined() {
 +                return None;
 +            }
 +            if name.starts_with("rust_metadata_") {
 +                // The metadata is part of a section that is not loaded by the dynamic linker in
 +                // case of cg_llvm.
 +                return None;
 +            }
 +            let dlsym_name = if cfg!(target_os = "macos") {
 +                // On macOS `dlsym` expects the name without leading `_`.
 +                assert!(name.starts_with('_'), "{:?}", name);
 +                &name[1..]
 +            } else {
 +                &name
 +            };
 +            let symbol: libloading::Symbol<'_, *const u8> =
 +                unsafe { lib.get(dlsym_name.as_bytes()) }.unwrap();
 +            Some((name, *symbol))
 +        }));
 +        std::mem::forget(lib)
 +    }
 +
 +    tcx.sess.abort_if_errors();
 +
 +    imported_symbols
 +}
 +
 +fn codegen_shim<'tcx>(cx: &mut CodegenCx<'tcx>, module: &mut JITModule, inst: Instance<'tcx>) {
 +    let tcx = cx.tcx;
 +
 +    let pointer_type = module.target_config().pointer_type();
 +
 +    let name = tcx.symbol_name(inst).name;
 +    let sig = crate::abi::get_function_sig(tcx, module.isa().triple(), inst);
 +    let func_id = module.declare_function(name, Linkage::Export, &sig).unwrap();
 +
 +    let instance_ptr = Box::into_raw(Box::new(inst));
 +
 +    let jit_fn = module
 +        .declare_function(
 +            "__clif_jit_fn",
 +            Linkage::Import,
 +            &Signature {
 +                call_conv: module.target_config().default_call_conv,
 +                params: vec![AbiParam::new(pointer_type)],
 +                returns: vec![AbiParam::new(pointer_type)],
 +            },
 +        )
 +        .unwrap();
 +
 +    cx.cached_context.clear();
 +    let trampoline = &mut cx.cached_context.func;
 +    trampoline.signature = sig.clone();
 +
 +    let mut builder_ctx = FunctionBuilderContext::new();
 +    let mut trampoline_builder = FunctionBuilder::new(trampoline, &mut builder_ctx);
 +
 +    let jit_fn = module.declare_func_in_func(jit_fn, trampoline_builder.func);
 +    let sig_ref = trampoline_builder.func.import_signature(sig);
 +
 +    let entry_block = trampoline_builder.create_block();
 +    trampoline_builder.append_block_params_for_function_params(entry_block);
 +    let fn_args = trampoline_builder.func.dfg.block_params(entry_block).to_vec();
 +
 +    trampoline_builder.switch_to_block(entry_block);
 +    let instance_ptr = trampoline_builder.ins().iconst(pointer_type, instance_ptr as u64 as i64);
 +    let jitted_fn = trampoline_builder.ins().call(jit_fn, &[instance_ptr]);
 +    let jitted_fn = trampoline_builder.func.dfg.inst_results(jitted_fn)[0];
 +    let call_inst = trampoline_builder.ins().call_indirect(sig_ref, jitted_fn, &fn_args);
 +    let ret_vals = trampoline_builder.func.dfg.inst_results(call_inst).to_vec();
 +    trampoline_builder.ins().return_(&ret_vals);
 +
 +    module
 +        .define_function(
 +            func_id,
 +            &mut cx.cached_context,
 +            &mut NullTrapSink {},
 +            &mut NullStackMapSink {},
 +        )
 +        .unwrap();
 +}
index 4ab4c2957ca4e3f29fa06902f9332d26712e9b86,0000000000000000000000000000000000000000..09c5e6031c78dce37d6f985a7c901466f0bb84f9
mode 100644,000000..100644
--- /dev/null
@@@ -1,324 -1,0 +1,336 @@@
-     } else if template[0] == InlineAsmTemplatePiece::String("mov rsi, rbx".to_string())
-         && template[1] == InlineAsmTemplatePiece::String("\n".to_string())
-         && template[2] == InlineAsmTemplatePiece::String("cpuid".to_string())
-         && template[3] == InlineAsmTemplatePiece::String("\n".to_string())
-         && template[4] == InlineAsmTemplatePiece::String("xchg rsi, rbx".to_string())
 +//! Codegen of [`asm!`] invocations.
 +
 +use crate::prelude::*;
 +
 +use std::fmt::Write;
 +
 +use rustc_ast::ast::{InlineAsmOptions, InlineAsmTemplatePiece};
 +use rustc_middle::mir::InlineAsmOperand;
 +use rustc_target::asm::*;
 +
 +pub(crate) fn codegen_inline_asm<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    _span: Span,
 +    template: &[InlineAsmTemplatePiece],
 +    operands: &[InlineAsmOperand<'tcx>],
 +    options: InlineAsmOptions,
 +) {
 +    // FIXME add .eh_frame unwind info directives
 +
 +    if template.is_empty() {
 +        // Black box
 +        return;
 +    } else if template[0] == InlineAsmTemplatePiece::String("int $$0x29".to_string()) {
 +        let true_ = fx.bcx.ins().iconst(types::I32, 1);
 +        fx.bcx.ins().trapnz(true_, TrapCode::User(1));
 +        return;
-         let (leaf, eax_place) = match operands[0] {
++    } else if template[0] == InlineAsmTemplatePiece::String("movq %rbx, ".to_string())
++        && matches!(
++            template[1],
++            InlineAsmTemplatePiece::Placeholder { operand_idx: 0, modifier: Some('r'), span: _ }
++        )
++        && template[2] == InlineAsmTemplatePiece::String("\n".to_string())
++        && template[3] == InlineAsmTemplatePiece::String("cpuid".to_string())
++        && template[4] == InlineAsmTemplatePiece::String("\n".to_string())
++        && template[5] == InlineAsmTemplatePiece::String("xchgq %rbx, ".to_string())
++        && matches!(
++            template[6],
++            InlineAsmTemplatePiece::Placeholder { operand_idx: 0, modifier: Some('r'), span: _ }
++        )
 +    {
 +        assert_eq!(operands.len(), 4);
-         let ebx_place = match operands[1] {
++        let (leaf, eax_place) = match operands[1] {
 +            InlineAsmOperand::InOut { reg, late: true, ref in_value, out_place } => {
 +                let reg = expect_reg(reg);
 +                assert_eq!(reg, InlineAsmReg::X86(X86InlineAsmReg::ax));
 +                (
 +                    crate::base::codegen_operand(fx, in_value).load_scalar(fx),
 +                    crate::base::codegen_place(fx, out_place.unwrap()),
 +                )
 +            }
 +            _ => unreachable!(),
 +        };
-                 let reg = expect_reg(reg);
-                 assert_eq!(reg, InlineAsmReg::X86(X86InlineAsmReg::si));
++        let ebx_place = match operands[0] {
 +            InlineAsmOperand::Out { reg, late: true, place } => {
++                assert_eq!(
++                    reg,
++                    InlineAsmRegOrRegClass::RegClass(InlineAsmRegClass::X86(
++                        X86InlineAsmRegClass::reg
++                    ))
++                );
 +                crate::base::codegen_place(fx, place.unwrap())
 +            }
 +            _ => unreachable!(),
 +        };
 +        let (sub_leaf, ecx_place) = match operands[2] {
 +            InlineAsmOperand::InOut { reg, late: true, ref in_value, out_place } => {
 +                let reg = expect_reg(reg);
 +                assert_eq!(reg, InlineAsmReg::X86(X86InlineAsmReg::cx));
 +                (
 +                    crate::base::codegen_operand(fx, in_value).load_scalar(fx),
 +                    crate::base::codegen_place(fx, out_place.unwrap()),
 +                )
 +            }
 +            _ => unreachable!(),
 +        };
 +        let edx_place = match operands[3] {
 +            InlineAsmOperand::Out { reg, late: true, place } => {
 +                let reg = expect_reg(reg);
 +                assert_eq!(reg, InlineAsmReg::X86(X86InlineAsmReg::dx));
 +                crate::base::codegen_place(fx, place.unwrap())
 +            }
 +            _ => unreachable!(),
 +        };
 +
 +        let (eax, ebx, ecx, edx) = crate::intrinsics::codegen_cpuid_call(fx, leaf, sub_leaf);
 +
 +        eax_place.write_cvalue(fx, CValue::by_val(eax, fx.layout_of(fx.tcx.types.u32)));
 +        ebx_place.write_cvalue(fx, CValue::by_val(ebx, fx.layout_of(fx.tcx.types.u32)));
 +        ecx_place.write_cvalue(fx, CValue::by_val(ecx, fx.layout_of(fx.tcx.types.u32)));
 +        edx_place.write_cvalue(fx, CValue::by_val(edx, fx.layout_of(fx.tcx.types.u32)));
 +        return;
 +    } else if fx.tcx.symbol_name(fx.instance).name.starts_with("___chkstk") {
 +        // ___chkstk, ___chkstk_ms and __alloca are only used on Windows
 +        crate::trap::trap_unimplemented(fx, "Stack probes are not supported");
 +    } else if fx.tcx.symbol_name(fx.instance).name == "__alloca" {
 +        crate::trap::trap_unimplemented(fx, "Alloca is not supported");
 +    }
 +
 +    let mut slot_size = Size::from_bytes(0);
 +    let mut clobbered_regs = Vec::new();
 +    let mut inputs = Vec::new();
 +    let mut outputs = Vec::new();
 +
 +    let mut new_slot = |reg_class: InlineAsmRegClass| {
 +        let reg_size = reg_class
 +            .supported_types(InlineAsmArch::X86_64)
 +            .iter()
 +            .map(|(ty, _)| ty.size())
 +            .max()
 +            .unwrap();
 +        let align = rustc_target::abi::Align::from_bytes(reg_size.bytes()).unwrap();
 +        slot_size = slot_size.align_to(align);
 +        let offset = slot_size;
 +        slot_size += reg_size;
 +        offset
 +    };
 +
 +    // FIXME overlap input and output slots to save stack space
 +    for operand in operands {
 +        match *operand {
 +            InlineAsmOperand::In { reg, ref value } => {
 +                let reg = expect_reg(reg);
 +                clobbered_regs.push((reg, new_slot(reg.reg_class())));
 +                inputs.push((
 +                    reg,
 +                    new_slot(reg.reg_class()),
 +                    crate::base::codegen_operand(fx, value).load_scalar(fx),
 +                ));
 +            }
 +            InlineAsmOperand::Out { reg, late: _, place } => {
 +                let reg = expect_reg(reg);
 +                clobbered_regs.push((reg, new_slot(reg.reg_class())));
 +                if let Some(place) = place {
 +                    outputs.push((
 +                        reg,
 +                        new_slot(reg.reg_class()),
 +                        crate::base::codegen_place(fx, place),
 +                    ));
 +                }
 +            }
 +            InlineAsmOperand::InOut { reg, late: _, ref in_value, out_place } => {
 +                let reg = expect_reg(reg);
 +                clobbered_regs.push((reg, new_slot(reg.reg_class())));
 +                inputs.push((
 +                    reg,
 +                    new_slot(reg.reg_class()),
 +                    crate::base::codegen_operand(fx, in_value).load_scalar(fx),
 +                ));
 +                if let Some(out_place) = out_place {
 +                    outputs.push((
 +                        reg,
 +                        new_slot(reg.reg_class()),
 +                        crate::base::codegen_place(fx, out_place),
 +                    ));
 +                }
 +            }
 +            InlineAsmOperand::Const { value: _ } => todo!(),
 +            InlineAsmOperand::SymFn { value: _ } => todo!(),
 +            InlineAsmOperand::SymStatic { def_id: _ } => todo!(),
 +        }
 +    }
 +
 +    let inline_asm_index = fx.inline_asm_index;
 +    fx.inline_asm_index += 1;
 +    let asm_name = format!("{}__inline_asm_{}", fx.symbol_name, inline_asm_index);
 +
 +    let generated_asm = generate_asm_wrapper(
 +        &asm_name,
 +        InlineAsmArch::X86_64,
 +        options,
 +        template,
 +        clobbered_regs,
 +        &inputs,
 +        &outputs,
 +    );
 +    fx.cx.global_asm.push_str(&generated_asm);
 +
 +    call_inline_asm(fx, &asm_name, slot_size, inputs, outputs);
 +}
 +
 +fn generate_asm_wrapper(
 +    asm_name: &str,
 +    arch: InlineAsmArch,
 +    options: InlineAsmOptions,
 +    template: &[InlineAsmTemplatePiece],
 +    clobbered_regs: Vec<(InlineAsmReg, Size)>,
 +    inputs: &[(InlineAsmReg, Size, Value)],
 +    outputs: &[(InlineAsmReg, Size, CPlace<'_>)],
 +) -> String {
 +    let mut generated_asm = String::new();
 +    writeln!(generated_asm, ".globl {}", asm_name).unwrap();
 +    writeln!(generated_asm, ".type {},@function", asm_name).unwrap();
 +    writeln!(generated_asm, ".section .text.{},\"ax\",@progbits", asm_name).unwrap();
 +    writeln!(generated_asm, "{}:", asm_name).unwrap();
 +
 +    generated_asm.push_str(".intel_syntax noprefix\n");
 +    generated_asm.push_str("    push rbp\n");
 +    generated_asm.push_str("    mov rbp,rdi\n");
 +
 +    // Save clobbered registers
 +    if !options.contains(InlineAsmOptions::NORETURN) {
 +        // FIXME skip registers saved by the calling convention
 +        for &(reg, offset) in &clobbered_regs {
 +            save_register(&mut generated_asm, arch, reg, offset);
 +        }
 +    }
 +
 +    // Write input registers
 +    for &(reg, offset, _value) in inputs {
 +        restore_register(&mut generated_asm, arch, reg, offset);
 +    }
 +
 +    if options.contains(InlineAsmOptions::ATT_SYNTAX) {
 +        generated_asm.push_str(".att_syntax\n");
 +    }
 +
 +    // The actual inline asm
 +    for piece in template {
 +        match piece {
 +            InlineAsmTemplatePiece::String(s) => {
 +                generated_asm.push_str(s);
 +            }
 +            InlineAsmTemplatePiece::Placeholder { operand_idx: _, modifier: _, span: _ } => todo!(),
 +        }
 +    }
 +    generated_asm.push('\n');
 +
 +    if options.contains(InlineAsmOptions::ATT_SYNTAX) {
 +        generated_asm.push_str(".intel_syntax noprefix\n");
 +    }
 +
 +    if !options.contains(InlineAsmOptions::NORETURN) {
 +        // Read output registers
 +        for &(reg, offset, _place) in outputs {
 +            save_register(&mut generated_asm, arch, reg, offset);
 +        }
 +
 +        // Restore clobbered registers
 +        for &(reg, offset) in clobbered_regs.iter().rev() {
 +            restore_register(&mut generated_asm, arch, reg, offset);
 +        }
 +
 +        generated_asm.push_str("    pop rbp\n");
 +        generated_asm.push_str("    ret\n");
 +    } else {
 +        generated_asm.push_str("    ud2\n");
 +    }
 +
 +    generated_asm.push_str(".att_syntax\n");
 +    writeln!(generated_asm, ".size {name}, .-{name}", name = asm_name).unwrap();
 +    generated_asm.push_str(".text\n");
 +    generated_asm.push_str("\n\n");
 +
 +    generated_asm
 +}
 +
 +fn call_inline_asm<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    asm_name: &str,
 +    slot_size: Size,
 +    inputs: Vec<(InlineAsmReg, Size, Value)>,
 +    outputs: Vec<(InlineAsmReg, Size, CPlace<'tcx>)>,
 +) {
 +    let stack_slot = fx.bcx.func.create_stack_slot(StackSlotData {
 +        kind: StackSlotKind::ExplicitSlot,
 +        offset: None,
 +        size: u32::try_from(slot_size.bytes()).unwrap(),
 +    });
 +    if fx.clif_comments.enabled() {
 +        fx.add_comment(stack_slot, "inline asm scratch slot");
 +    }
 +
 +    let inline_asm_func = fx
 +        .module
 +        .declare_function(
 +            asm_name,
 +            Linkage::Import,
 +            &Signature {
 +                call_conv: CallConv::SystemV,
 +                params: vec![AbiParam::new(fx.pointer_type)],
 +                returns: vec![],
 +            },
 +        )
 +        .unwrap();
 +    let inline_asm_func = fx.module.declare_func_in_func(inline_asm_func, &mut fx.bcx.func);
 +    if fx.clif_comments.enabled() {
 +        fx.add_comment(inline_asm_func, asm_name);
 +    }
 +
 +    for (_reg, offset, value) in inputs {
 +        fx.bcx.ins().stack_store(value, stack_slot, i32::try_from(offset.bytes()).unwrap());
 +    }
 +
 +    let stack_slot_addr = fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, 0);
 +    fx.bcx.ins().call(inline_asm_func, &[stack_slot_addr]);
 +
 +    for (_reg, offset, place) in outputs {
 +        let ty = fx.clif_type(place.layout().ty).unwrap();
 +        let value = fx.bcx.ins().stack_load(ty, stack_slot, i32::try_from(offset.bytes()).unwrap());
 +        place.write_cvalue(fx, CValue::by_val(value, place.layout()));
 +    }
 +}
 +
 +fn expect_reg(reg_or_class: InlineAsmRegOrRegClass) -> InlineAsmReg {
 +    match reg_or_class {
 +        InlineAsmRegOrRegClass::Reg(reg) => reg,
 +        InlineAsmRegOrRegClass::RegClass(class) => unimplemented!("{:?}", class),
 +    }
 +}
 +
 +fn save_register(generated_asm: &mut String, arch: InlineAsmArch, reg: InlineAsmReg, offset: Size) {
 +    match arch {
 +        InlineAsmArch::X86_64 => {
 +            write!(generated_asm, "    mov [rbp+0x{:x}], ", offset.bytes()).unwrap();
 +            reg.emit(generated_asm, InlineAsmArch::X86_64, None).unwrap();
 +            generated_asm.push('\n');
 +        }
 +        _ => unimplemented!("save_register for {:?}", arch),
 +    }
 +}
 +
 +fn restore_register(
 +    generated_asm: &mut String,
 +    arch: InlineAsmArch,
 +    reg: InlineAsmReg,
 +    offset: Size,
 +) {
 +    match arch {
 +        InlineAsmArch::X86_64 => {
 +            generated_asm.push_str("    mov ");
 +            reg.emit(generated_asm, InlineAsmArch::X86_64, None).unwrap();
 +            writeln!(generated_asm, ", [rbp+0x{:x}]", offset.bytes()).unwrap();
 +        }
 +        _ => unimplemented!("restore_register for {:?}", arch),
 +    }
 +}
index 9de12e759bcc8826d8dca15c789cccc5c37f80e7,0000000000000000000000000000000000000000..d02dfd93c3ee3c533aa2b27a601306a59a757c6d
mode 100644,000000..100644
--- /dev/null
@@@ -1,67 -1,0 +1,74 @@@
 +//! Emulation of a subset of the cpuid x86 instruction.
 +
 +use crate::prelude::*;
 +
 +/// Emulates a subset of the cpuid x86 instruction.
 +///
 +/// This emulates an intel cpu with sse and sse2 support, but which doesn't support anything else.
 +pub(crate) fn codegen_cpuid_call<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    leaf: Value,
 +    _sub_leaf: Value,
 +) -> (Value, Value, Value, Value) {
 +    let leaf_0 = fx.bcx.create_block();
 +    let leaf_1 = fx.bcx.create_block();
++    let leaf_7 = fx.bcx.create_block();
 +    let leaf_8000_0000 = fx.bcx.create_block();
 +    let leaf_8000_0001 = fx.bcx.create_block();
 +    let unsupported_leaf = fx.bcx.create_block();
 +
 +    let dest = fx.bcx.create_block();
 +    let eax = fx.bcx.append_block_param(dest, types::I32);
 +    let ebx = fx.bcx.append_block_param(dest, types::I32);
 +    let ecx = fx.bcx.append_block_param(dest, types::I32);
 +    let edx = fx.bcx.append_block_param(dest, types::I32);
 +
 +    let mut switch = cranelift_frontend::Switch::new();
 +    switch.set_entry(0, leaf_0);
 +    switch.set_entry(1, leaf_1);
++    switch.set_entry(7, leaf_7);
 +    switch.set_entry(0x8000_0000, leaf_8000_0000);
 +    switch.set_entry(0x8000_0001, leaf_8000_0001);
 +    switch.emit(&mut fx.bcx, leaf, unsupported_leaf);
 +
 +    fx.bcx.switch_to_block(leaf_0);
 +    let max_basic_leaf = fx.bcx.ins().iconst(types::I32, 1);
 +    let vend0 = fx.bcx.ins().iconst(types::I32, i64::from(u32::from_le_bytes(*b"Genu")));
 +    let vend2 = fx.bcx.ins().iconst(types::I32, i64::from(u32::from_le_bytes(*b"ineI")));
 +    let vend1 = fx.bcx.ins().iconst(types::I32, i64::from(u32::from_le_bytes(*b"ntel")));
 +    fx.bcx.ins().jump(dest, &[max_basic_leaf, vend0, vend1, vend2]);
 +
 +    fx.bcx.switch_to_block(leaf_1);
 +    let cpu_signature = fx.bcx.ins().iconst(types::I32, 0);
 +    let additional_information = fx.bcx.ins().iconst(types::I32, 0);
 +    let ecx_features = fx.bcx.ins().iconst(types::I32, 0);
 +    let edx_features = fx.bcx.ins().iconst(types::I32, 1 << 25 /* sse */ | 1 << 26 /* sse2 */);
 +    fx.bcx.ins().jump(dest, &[cpu_signature, additional_information, ecx_features, edx_features]);
 +
++    fx.bcx.switch_to_block(leaf_7);
++    // This leaf technically has subleaves, but we just return zero for all subleaves.
++    let zero = fx.bcx.ins().iconst(types::I32, 0);
++    fx.bcx.ins().jump(dest, &[zero, zero, zero, zero]);
++
 +    fx.bcx.switch_to_block(leaf_8000_0000);
 +    let extended_max_basic_leaf = fx.bcx.ins().iconst(types::I32, 0);
 +    let zero = fx.bcx.ins().iconst(types::I32, 0);
 +    fx.bcx.ins().jump(dest, &[extended_max_basic_leaf, zero, zero, zero]);
 +
 +    fx.bcx.switch_to_block(leaf_8000_0001);
 +    let zero = fx.bcx.ins().iconst(types::I32, 0);
 +    let proc_info_ecx = fx.bcx.ins().iconst(types::I32, 0);
 +    let proc_info_edx = fx.bcx.ins().iconst(types::I32, 0);
 +    fx.bcx.ins().jump(dest, &[zero, zero, proc_info_ecx, proc_info_edx]);
 +
 +    fx.bcx.switch_to_block(unsupported_leaf);
 +    crate::trap::trap_unreachable(
 +        fx,
 +        "__cpuid_count arch intrinsic doesn't yet support specified leaf",
 +    );
 +
 +    fx.bcx.switch_to_block(dest);
 +    fx.bcx.ins().nop();
 +
 +    (eax, ebx, ecx, edx)
 +}
index 435737f3a513b511c1788db6cc663994fd6b7886,0000000000000000000000000000000000000000..52896fc7127e8a32fffbef7ae7f4ed4a9880adc2
mode 100644,000000..100644
--- /dev/null
@@@ -1,1126 -1,0 +1,1126 @@@
- use rustc_span::symbol::{sym, kw};
 +//! Codegen of intrinsics. This includes `extern "rust-intrinsic"`, `extern "platform-intrinsic"`
 +//! and LLVM intrinsics that have symbol names starting with `llvm.`.
 +
 +mod cpuid;
 +mod llvm;
 +mod simd;
 +
 +pub(crate) use cpuid::codegen_cpuid_call;
 +pub(crate) use llvm::codegen_llvm_intrinsic_call;
 +
 +use rustc_middle::ty::print::with_no_trimmed_paths;
++use rustc_span::symbol::{kw, sym};
 +
 +use crate::prelude::*;
 +use cranelift_codegen::ir::AtomicRmwOp;
 +
 +macro intrinsic_pat {
 +    (_) => {
 +        _
 +    },
 +    ($name:ident) => {
 +        sym::$name
 +    },
 +    (kw.$name:ident) => {
 +        kw::$name
 +    },
 +    ($name:literal) => {
 +        $name
 +    },
 +}
 +
 +macro intrinsic_arg {
 +    (o $fx:expr, $arg:ident) => {
 +        $arg
 +    },
 +    (c $fx:expr, $arg:ident) => {
 +        codegen_operand($fx, $arg)
 +    },
 +    (v $fx:expr, $arg:ident) => {
 +        codegen_operand($fx, $arg).load_scalar($fx)
 +    }
 +}
 +
 +macro intrinsic_substs {
 +    ($substs:expr, $index:expr,) => {},
 +    ($substs:expr, $index:expr, $first:ident $(,$rest:ident)*) => {
 +        let $first = $substs.type_at($index);
 +        intrinsic_substs!($substs, $index+1, $($rest),*);
 +    }
 +}
 +
 +macro intrinsic_match {
 +    ($fx:expr, $intrinsic:expr, $substs:expr, $args:expr,
 +    _ => $unknown:block;
 +    $(
 +        $($($name:tt).*)|+ $(if $cond:expr)?, $(<$($subst:ident),*>)? ($($a:ident $arg:ident),*) $content:block;
 +    )*) => {
 +        let _ = $substs; // Silence warning when substs is unused.
 +        match $intrinsic {
 +            $(
 +                $(intrinsic_pat!($($name).*))|* $(if $cond)? => {
 +                    #[allow(unused_parens, non_snake_case)]
 +                    {
 +                        $(
 +                            intrinsic_substs!($substs, 0, $($subst),*);
 +                        )?
 +                        if let [$($arg),*] = $args {
 +                            let ($($arg,)*) = (
 +                                $(intrinsic_arg!($a $fx, $arg),)*
 +                            );
 +                            #[warn(unused_parens, non_snake_case)]
 +                            {
 +                                $content
 +                            }
 +                        } else {
 +                            bug!("wrong number of args for intrinsic {:?}", $intrinsic);
 +                        }
 +                    }
 +                }
 +            )*
 +            _ => $unknown,
 +        }
 +    }
 +}
 +
 +macro call_intrinsic_match {
 +    ($fx:expr, $intrinsic:expr, $substs:expr, $ret:expr, $destination:expr, $args:expr, $(
 +        $name:ident($($arg:ident),*) -> $ty:ident => $func:ident,
 +    )*) => {
 +        match $intrinsic {
 +            $(
 +                sym::$name => {
 +                    assert!($substs.is_noop());
 +                    if let [$(ref $arg),*] = *$args {
 +                        let ($($arg,)*) = (
 +                            $(codegen_operand($fx, $arg),)*
 +                        );
 +                        let res = $fx.easy_call(stringify!($func), &[$($arg),*], $fx.tcx.types.$ty);
 +                        $ret.write_cvalue($fx, res);
 +
 +                        if let Some((_, dest)) = $destination {
 +                            let ret_block = $fx.get_block(dest);
 +                            $fx.bcx.ins().jump(ret_block, &[]);
 +                            return;
 +                        } else {
 +                            unreachable!();
 +                        }
 +                    } else {
 +                        bug!("wrong number of args for intrinsic {:?}", $intrinsic);
 +                    }
 +                }
 +            )*
 +            _ => {}
 +        }
 +    }
 +}
 +
 +macro validate_atomic_type($fx:ident, $intrinsic:ident, $span:ident, $ty:expr) {
 +    match $ty.kind() {
 +        ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
 +        _ => {
 +            $fx.tcx.sess.span_err(
 +                $span,
 +                &format!(
 +                    "`{}` intrinsic: expected basic integer or raw pointer type, found `{:?}`",
 +                    $intrinsic, $ty
 +                ),
 +            );
 +            // Prevent verifier error
 +            crate::trap::trap_unreachable($fx, "compilation should not have succeeded");
 +            return;
 +        }
 +    }
 +}
 +
 +macro validate_simd_type($fx:ident, $intrinsic:ident, $span:ident, $ty:expr) {
 +    if !$ty.is_simd() {
 +        $fx.tcx.sess.span_err($span, &format!("invalid monomorphization of `{}` intrinsic: expected SIMD input type, found non-SIMD `{}`", $intrinsic, $ty));
 +        // Prevent verifier error
 +        crate::trap::trap_unreachable($fx, "compilation should not have succeeded");
 +        return;
 +    }
 +}
 +
 +pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Option<Type> {
 +    let (element, count) = match &layout.abi {
 +        Abi::Vector { element, count } => (element.clone(), *count),
 +        _ => unreachable!(),
 +    };
 +
 +    match scalar_to_clif_type(tcx, element).by(u16::try_from(count).unwrap()) {
 +        // Cranelift currently only implements icmp for 128bit vectors.
 +        Some(vector_ty) if vector_ty.bits() == 128 => Some(vector_ty),
 +        _ => None,
 +    }
 +}
 +
 +fn simd_for_each_lane<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    val: CValue<'tcx>,
 +    ret: CPlace<'tcx>,
 +    f: impl Fn(
 +        &mut FunctionCx<'_, '_, 'tcx>,
 +        TyAndLayout<'tcx>,
 +        TyAndLayout<'tcx>,
 +        Value,
 +    ) -> CValue<'tcx>,
 +) {
 +    let layout = val.layout();
 +
 +    let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
 +    let lane_layout = fx.layout_of(lane_ty);
 +    let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
 +    let ret_lane_layout = fx.layout_of(ret_lane_ty);
 +    assert_eq!(lane_count, ret_lane_count);
 +
 +    for lane_idx in 0..lane_count {
 +        let lane_idx = mir::Field::new(lane_idx.try_into().unwrap());
 +        let lane = val.value_field(fx, lane_idx).load_scalar(fx);
 +
 +        let res_lane = f(fx, lane_layout, ret_lane_layout, lane);
 +
 +        ret.place_field(fx, lane_idx).write_cvalue(fx, res_lane);
 +    }
 +}
 +
 +fn simd_pair_for_each_lane<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    x: CValue<'tcx>,
 +    y: CValue<'tcx>,
 +    ret: CPlace<'tcx>,
 +    f: impl Fn(
 +        &mut FunctionCx<'_, '_, 'tcx>,
 +        TyAndLayout<'tcx>,
 +        TyAndLayout<'tcx>,
 +        Value,
 +        Value,
 +    ) -> CValue<'tcx>,
 +) {
 +    assert_eq!(x.layout(), y.layout());
 +    let layout = x.layout();
 +
 +    let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
 +    let lane_layout = fx.layout_of(lane_ty);
 +    let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
 +    let ret_lane_layout = fx.layout_of(ret_lane_ty);
 +    assert_eq!(lane_count, ret_lane_count);
 +
 +    for lane in 0..lane_count {
 +        let lane = mir::Field::new(lane.try_into().unwrap());
 +        let x_lane = x.value_field(fx, lane).load_scalar(fx);
 +        let y_lane = y.value_field(fx, lane).load_scalar(fx);
 +
 +        let res_lane = f(fx, lane_layout, ret_lane_layout, x_lane, y_lane);
 +
 +        ret.place_field(fx, lane).write_cvalue(fx, res_lane);
 +    }
 +}
 +
 +fn simd_reduce<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    val: CValue<'tcx>,
 +    ret: CPlace<'tcx>,
 +    f: impl Fn(&mut FunctionCx<'_, '_, 'tcx>, TyAndLayout<'tcx>, Value, Value) -> Value,
 +) {
 +    let (lane_count, lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
 +    let lane_layout = fx.layout_of(lane_ty);
 +    assert_eq!(lane_layout, ret.layout());
 +
 +    let mut res_val = val.value_field(fx, mir::Field::new(0)).load_scalar(fx);
 +    for lane_idx in 1..lane_count {
 +        let lane =
 +            val.value_field(fx, mir::Field::new(lane_idx.try_into().unwrap())).load_scalar(fx);
 +        res_val = f(fx, lane_layout, res_val, lane);
 +    }
 +    let res = CValue::by_val(res_val, lane_layout);
 +    ret.write_cvalue(fx, res);
 +}
 +
 +fn simd_reduce_bool<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    val: CValue<'tcx>,
 +    ret: CPlace<'tcx>,
 +    f: impl Fn(&mut FunctionCx<'_, '_, 'tcx>, Value, Value) -> Value,
 +) {
 +    let (lane_count, _lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
 +    assert!(ret.layout().ty.is_bool());
 +
 +    let res_val = val.value_field(fx, mir::Field::new(0)).load_scalar(fx);
 +    let mut res_val = fx.bcx.ins().band_imm(res_val, 1); // mask to boolean
 +    for lane_idx in 1..lane_count {
 +        let lane =
 +            val.value_field(fx, mir::Field::new(lane_idx.try_into().unwrap())).load_scalar(fx);
 +        let lane = fx.bcx.ins().band_imm(lane, 1); // mask to boolean
 +        res_val = f(fx, res_val, lane);
 +    }
 +    let res = CValue::by_val(res_val, ret.layout());
 +    ret.write_cvalue(fx, res);
 +}
 +
 +fn bool_to_zero_or_max_uint<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    layout: TyAndLayout<'tcx>,
 +    val: Value,
 +) -> CValue<'tcx> {
 +    let ty = fx.clif_type(layout.ty).unwrap();
 +
 +    let int_ty = match ty {
 +        types::F32 => types::I32,
 +        types::F64 => types::I64,
 +        ty => ty,
 +    };
 +
 +    let val = fx.bcx.ins().bint(int_ty, val);
 +    let mut res = fx.bcx.ins().ineg(val);
 +
 +    if ty.is_float() {
 +        res = fx.bcx.ins().bitcast(ty, res);
 +    }
 +
 +    CValue::by_val(res, layout)
 +}
 +
 +macro simd_cmp {
 +    ($fx:expr, $cc:ident|$cc_f:ident($x:ident, $y:ident) -> $ret:ident) => {
 +        let vector_ty = clif_vector_type($fx.tcx, $x.layout());
 +
 +        if let Some(vector_ty) = vector_ty {
 +            let x = $x.load_scalar($fx);
 +            let y = $y.load_scalar($fx);
 +            let val = $fx.bcx.ins().icmp(IntCC::$cc, x, y);
 +
 +            // HACK This depends on the fact that icmp for vectors represents bools as 0 and !0, not 0 and 1.
 +            let val = $fx.bcx.ins().raw_bitcast(vector_ty, val);
 +
 +            $ret.write_cvalue($fx, CValue::by_val(val, $ret.layout()));
 +        } else {
 +            simd_pair_for_each_lane(
 +                $fx,
 +                $x,
 +                $y,
 +                $ret,
 +                |fx, lane_layout, res_lane_layout, x_lane, y_lane| {
 +                    let res_lane = match lane_layout.ty.kind() {
 +                        ty::Uint(_) | ty::Int(_) => fx.bcx.ins().icmp(IntCC::$cc, x_lane, y_lane),
 +                        ty::Float(_) => fx.bcx.ins().fcmp(FloatCC::$cc_f, x_lane, y_lane),
 +                        _ => unreachable!("{:?}", lane_layout.ty),
 +                    };
 +                    bool_to_zero_or_max_uint(fx, res_lane_layout, res_lane)
 +                },
 +            );
 +        }
 +    },
 +    ($fx:expr, $cc_u:ident|$cc_s:ident|$cc_f:ident($x:ident, $y:ident) -> $ret:ident) => {
 +        // FIXME use vector icmp when possible
 +        simd_pair_for_each_lane(
 +            $fx,
 +            $x,
 +            $y,
 +            $ret,
 +            |fx, lane_layout, res_lane_layout, x_lane, y_lane| {
 +                let res_lane = match lane_layout.ty.kind() {
 +                    ty::Uint(_) => fx.bcx.ins().icmp(IntCC::$cc_u, x_lane, y_lane),
 +                    ty::Int(_) => fx.bcx.ins().icmp(IntCC::$cc_s, x_lane, y_lane),
 +                    ty::Float(_) => fx.bcx.ins().fcmp(FloatCC::$cc_f, x_lane, y_lane),
 +                    _ => unreachable!("{:?}", lane_layout.ty),
 +                };
 +                bool_to_zero_or_max_uint(fx, res_lane_layout, res_lane)
 +            },
 +        );
 +    },
 +}
 +
 +macro simd_int_binop {
 +    ($fx:expr, $op:ident($x:ident, $y:ident) -> $ret:ident) => {
 +        simd_int_binop!($fx, $op|$op($x, $y) -> $ret);
 +    },
 +    ($fx:expr, $op_u:ident|$op_s:ident($x:ident, $y:ident) -> $ret:ident) => {
 +        simd_pair_for_each_lane(
 +            $fx,
 +            $x,
 +            $y,
 +            $ret,
 +            |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
 +                let res_lane = match lane_layout.ty.kind() {
 +                    ty::Uint(_) => fx.bcx.ins().$op_u(x_lane, y_lane),
 +                    ty::Int(_) => fx.bcx.ins().$op_s(x_lane, y_lane),
 +                    _ => unreachable!("{:?}", lane_layout.ty),
 +                };
 +                CValue::by_val(res_lane, ret_lane_layout)
 +            },
 +        );
 +    },
 +}
 +
 +macro simd_int_flt_binop {
 +    ($fx:expr, $op:ident|$op_f:ident($x:ident, $y:ident) -> $ret:ident) => {
 +        simd_int_flt_binop!($fx, $op|$op|$op_f($x, $y) -> $ret);
 +    },
 +    ($fx:expr, $op_u:ident|$op_s:ident|$op_f:ident($x:ident, $y:ident) -> $ret:ident) => {
 +        simd_pair_for_each_lane(
 +            $fx,
 +            $x,
 +            $y,
 +            $ret,
 +            |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
 +                let res_lane = match lane_layout.ty.kind() {
 +                    ty::Uint(_) => fx.bcx.ins().$op_u(x_lane, y_lane),
 +                    ty::Int(_) => fx.bcx.ins().$op_s(x_lane, y_lane),
 +                    ty::Float(_) => fx.bcx.ins().$op_f(x_lane, y_lane),
 +                    _ => unreachable!("{:?}", lane_layout.ty),
 +                };
 +                CValue::by_val(res_lane, ret_lane_layout)
 +            },
 +        );
 +    },
 +}
 +
 +macro simd_flt_binop($fx:expr, $op:ident($x:ident, $y:ident) -> $ret:ident) {
 +    simd_pair_for_each_lane(
 +        $fx,
 +        $x,
 +        $y,
 +        $ret,
 +        |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
 +            let res_lane = match lane_layout.ty.kind() {
 +                ty::Float(_) => fx.bcx.ins().$op(x_lane, y_lane),
 +                _ => unreachable!("{:?}", lane_layout.ty),
 +            };
 +            CValue::by_val(res_lane, ret_lane_layout)
 +        },
 +    );
 +}
 +
 +pub(crate) fn codegen_intrinsic_call<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    instance: Instance<'tcx>,
 +    args: &[mir::Operand<'tcx>],
 +    destination: Option<(CPlace<'tcx>, BasicBlock)>,
 +    span: Span,
 +) {
 +    let def_id = instance.def_id();
 +    let substs = instance.substs;
 +
 +    let intrinsic = fx.tcx.item_name(def_id);
 +
 +    let ret = match destination {
 +        Some((place, _)) => place,
 +        None => {
 +            // Insert non returning intrinsics here
 +            match intrinsic {
 +                sym::abort => {
 +                    trap_abort(fx, "Called intrinsic::abort.");
 +                }
 +                sym::transmute => {
 +                    crate::base::codegen_panic(fx, "Transmuting to uninhabited type.", span);
 +                }
 +                _ => unimplemented!("unsupported instrinsic {}", intrinsic),
 +            }
 +            return;
 +        }
 +    };
 +
 +    if intrinsic.as_str().starts_with("simd_") {
 +        self::simd::codegen_simd_intrinsic_call(fx, instance, args, ret, span);
 +        let ret_block = fx.get_block(destination.expect("SIMD intrinsics don't diverge").1);
 +        fx.bcx.ins().jump(ret_block, &[]);
 +        return;
 +    }
 +
 +    let usize_layout = fx.layout_of(fx.tcx.types.usize);
 +
 +    call_intrinsic_match! {
 +        fx, intrinsic, substs, ret, destination, args,
 +        expf32(flt) -> f32 => expf,
 +        expf64(flt) -> f64 => exp,
 +        exp2f32(flt) -> f32 => exp2f,
 +        exp2f64(flt) -> f64 => exp2,
 +        sqrtf32(flt) -> f32 => sqrtf,
 +        sqrtf64(flt) -> f64 => sqrt,
 +        powif32(a, x) -> f32 => __powisf2, // compiler-builtins
 +        powif64(a, x) -> f64 => __powidf2, // compiler-builtins
 +        powf32(a, x) -> f32 => powf,
 +        powf64(a, x) -> f64 => pow,
 +        logf32(flt) -> f32 => logf,
 +        logf64(flt) -> f64 => log,
 +        log2f32(flt) -> f32 => log2f,
 +        log2f64(flt) -> f64 => log2,
 +        log10f32(flt) -> f32 => log10f,
 +        log10f64(flt) -> f64 => log10,
 +        fabsf32(flt) -> f32 => fabsf,
 +        fabsf64(flt) -> f64 => fabs,
 +        fmaf32(x, y, z) -> f32 => fmaf,
 +        fmaf64(x, y, z) -> f64 => fma,
 +        copysignf32(x, y) -> f32 => copysignf,
 +        copysignf64(x, y) -> f64 => copysign,
 +
 +        // rounding variants
 +        // FIXME use clif insts
 +        floorf32(flt) -> f32 => floorf,
 +        floorf64(flt) -> f64 => floor,
 +        ceilf32(flt) -> f32 => ceilf,
 +        ceilf64(flt) -> f64 => ceil,
 +        truncf32(flt) -> f32 => truncf,
 +        truncf64(flt) -> f64 => trunc,
 +        roundf32(flt) -> f32 => roundf,
 +        roundf64(flt) -> f64 => round,
 +
 +        // trigonometry
 +        sinf32(flt) -> f32 => sinf,
 +        sinf64(flt) -> f64 => sin,
 +        cosf32(flt) -> f32 => cosf,
 +        cosf64(flt) -> f64 => cos,
 +    }
 +
 +    intrinsic_match! {
 +        fx, intrinsic, substs, args,
 +        _ => {
 +            fx.tcx.sess.span_fatal(span, &format!("unsupported intrinsic {}", intrinsic));
 +        };
 +
 +        assume, (c _a) {};
 +        likely | unlikely, (c a) {
 +            ret.write_cvalue(fx, a);
 +        };
 +        breakpoint, () {
 +            fx.bcx.ins().debugtrap();
 +        };
 +        copy | copy_nonoverlapping, <elem_ty> (v src, v dst, v count) {
 +            let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
 +            assert_eq!(args.len(), 3);
 +            let byte_amount = if elem_size != 1 {
 +                fx.bcx.ins().imul_imm(count, elem_size as i64)
 +            } else {
 +                count
 +            };
 +
 +            if intrinsic == sym::copy_nonoverlapping {
 +                // FIXME emit_small_memcpy
 +                fx.bcx.call_memcpy(fx.module.target_config(), dst, src, byte_amount);
 +            } else {
 +                // FIXME emit_small_memmove
 +                fx.bcx.call_memmove(fx.module.target_config(), dst, src, byte_amount);
 +            }
 +        };
 +        // NOTE: the volatile variants have src and dst swapped
 +        volatile_copy_memory | volatile_copy_nonoverlapping_memory, <elem_ty> (v dst, v src, v count) {
 +            let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
 +            assert_eq!(args.len(), 3);
 +            let byte_amount = if elem_size != 1 {
 +                fx.bcx.ins().imul_imm(count, elem_size as i64)
 +            } else {
 +                count
 +            };
 +
 +            // FIXME make the copy actually volatile when using emit_small_mem{cpy,move}
 +            if intrinsic == sym::volatile_copy_nonoverlapping_memory {
 +                // FIXME emit_small_memcpy
 +                fx.bcx.call_memcpy(fx.module.target_config(), dst, src, byte_amount);
 +            } else {
 +                // FIXME emit_small_memmove
 +                fx.bcx.call_memmove(fx.module.target_config(), dst, src, byte_amount);
 +            }
 +        };
 +        size_of_val, <T> (c ptr) {
 +            let layout = fx.layout_of(T);
 +            let size = if layout.is_unsized() {
 +                let (_ptr, info) = ptr.load_scalar_pair(fx);
 +                let (size, _align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
 +                size
 +            } else {
 +                fx
 +                    .bcx
 +                    .ins()
 +                    .iconst(fx.pointer_type, layout.size.bytes() as i64)
 +            };
 +            ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
 +        };
 +        min_align_of_val, <T> (c ptr) {
 +            let layout = fx.layout_of(T);
 +            let align = if layout.is_unsized() {
 +                let (_ptr, info) = ptr.load_scalar_pair(fx);
 +                let (_size, align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
 +                align
 +            } else {
 +                fx
 +                    .bcx
 +                    .ins()
 +                    .iconst(fx.pointer_type, layout.align.abi.bytes() as i64)
 +            };
 +            ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
 +        };
 +
 +        unchecked_add | unchecked_sub | unchecked_div | exact_div | unchecked_rem
 +        | unchecked_shl | unchecked_shr, (c x, c y) {
 +            // FIXME trap on overflow
 +            let bin_op = match intrinsic {
 +                sym::unchecked_add => BinOp::Add,
 +                sym::unchecked_sub => BinOp::Sub,
 +                sym::unchecked_div | sym::exact_div => BinOp::Div,
 +                sym::unchecked_rem => BinOp::Rem,
 +                sym::unchecked_shl => BinOp::Shl,
 +                sym::unchecked_shr => BinOp::Shr,
 +                _ => unreachable!(),
 +            };
 +            let res = crate::num::codegen_int_binop(fx, bin_op, x, y);
 +            ret.write_cvalue(fx, res);
 +        };
 +        add_with_overflow | sub_with_overflow | mul_with_overflow, (c x, c y) {
 +            assert_eq!(x.layout().ty, y.layout().ty);
 +            let bin_op = match intrinsic {
 +                sym::add_with_overflow => BinOp::Add,
 +                sym::sub_with_overflow => BinOp::Sub,
 +                sym::mul_with_overflow => BinOp::Mul,
 +                _ => unreachable!(),
 +            };
 +
 +            let res = crate::num::codegen_checked_int_binop(
 +                fx,
 +                bin_op,
 +                x,
 +                y,
 +            );
 +            ret.write_cvalue(fx, res);
 +        };
 +        saturating_add | saturating_sub, <T> (c lhs, c rhs) {
 +            assert_eq!(lhs.layout().ty, rhs.layout().ty);
 +            let bin_op = match intrinsic {
 +                sym::saturating_add => BinOp::Add,
 +                sym::saturating_sub => BinOp::Sub,
 +                _ => unreachable!(),
 +            };
 +
 +            let signed = type_sign(T);
 +
 +            let checked_res = crate::num::codegen_checked_int_binop(
 +                fx,
 +                bin_op,
 +                lhs,
 +                rhs,
 +            );
 +
 +            let (val, has_overflow) = checked_res.load_scalar_pair(fx);
 +            let clif_ty = fx.clif_type(T).unwrap();
 +
 +            // `select.i8` is not implemented by Cranelift.
 +            let has_overflow = fx.bcx.ins().uextend(types::I32, has_overflow);
 +
 +            let (min, max) = type_min_max_value(&mut fx.bcx, clif_ty, signed);
 +
 +            let val = match (intrinsic, signed) {
 +                (sym::saturating_add, false) => fx.bcx.ins().select(has_overflow, max, val),
 +                (sym::saturating_sub, false) => fx.bcx.ins().select(has_overflow, min, val),
 +                (sym::saturating_add, true) => {
 +                    let rhs = rhs.load_scalar(fx);
 +                    let rhs_ge_zero = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
 +                    let sat_val = fx.bcx.ins().select(rhs_ge_zero, max, min);
 +                    fx.bcx.ins().select(has_overflow, sat_val, val)
 +                }
 +                (sym::saturating_sub, true) => {
 +                    let rhs = rhs.load_scalar(fx);
 +                    let rhs_ge_zero = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
 +                    let sat_val = fx.bcx.ins().select(rhs_ge_zero, min, max);
 +                    fx.bcx.ins().select(has_overflow, sat_val, val)
 +                }
 +                _ => unreachable!(),
 +            };
 +
 +            let res = CValue::by_val(val, fx.layout_of(T));
 +
 +            ret.write_cvalue(fx, res);
 +        };
 +        rotate_left, <T>(v x, v y) {
 +            let layout = fx.layout_of(T);
 +            let y = if fx.bcx.func.dfg.value_type(y) == types::I128 {
 +                fx.bcx.ins().ireduce(types::I64, y)
 +            } else {
 +                y
 +            };
 +            let res = fx.bcx.ins().rotl(x, y);
 +            ret.write_cvalue(fx, CValue::by_val(res, layout));
 +        };
 +        rotate_right, <T>(v x, v y) {
 +            let layout = fx.layout_of(T);
 +            let y = if fx.bcx.func.dfg.value_type(y) == types::I128 {
 +                fx.bcx.ins().ireduce(types::I64, y)
 +            } else {
 +                y
 +            };
 +            let res = fx.bcx.ins().rotr(x, y);
 +            ret.write_cvalue(fx, CValue::by_val(res, layout));
 +        };
 +
 +        // The only difference between offset and arith_offset is regarding UB. Because Cranelift
 +        // doesn't have UB both are codegen'ed the same way
 +        offset | arith_offset, (c base, v offset) {
 +            let pointee_ty = base.layout().ty.builtin_deref(true).unwrap().ty;
 +            let pointee_size = fx.layout_of(pointee_ty).size.bytes();
 +            let ptr_diff = if pointee_size != 1 {
 +                fx.bcx.ins().imul_imm(offset, pointee_size as i64)
 +            } else {
 +                offset
 +            };
 +            let base_val = base.load_scalar(fx);
 +            let res = fx.bcx.ins().iadd(base_val, ptr_diff);
 +            ret.write_cvalue(fx, CValue::by_val(res, base.layout()));
 +        };
 +
 +        transmute, (c from) {
 +            ret.write_cvalue_transmute(fx, from);
 +        };
 +        write_bytes | volatile_set_memory, (c dst, v val, v count) {
 +            let pointee_ty = dst.layout().ty.builtin_deref(true).unwrap().ty;
 +            let pointee_size = fx.layout_of(pointee_ty).size.bytes();
 +            let count = if pointee_size != 1 {
 +                fx.bcx.ins().imul_imm(count, pointee_size as i64)
 +            } else {
 +                count
 +            };
 +            let dst_ptr = dst.load_scalar(fx);
 +            // FIXME make the memset actually volatile when switching to emit_small_memset
 +            // FIXME use emit_small_memset
 +            fx.bcx.call_memset(fx.module.target_config(), dst_ptr, val, count);
 +        };
 +        ctlz | ctlz_nonzero, <T> (v arg) {
 +            // FIXME trap on `ctlz_nonzero` with zero arg.
 +            let res = if T == fx.tcx.types.u128 || T == fx.tcx.types.i128 {
 +                // FIXME verify this algorithm is correct
 +                let (lsb, msb) = fx.bcx.ins().isplit(arg);
 +                let lsb_lz = fx.bcx.ins().clz(lsb);
 +                let msb_lz = fx.bcx.ins().clz(msb);
 +                let msb_is_zero = fx.bcx.ins().icmp_imm(IntCC::Equal, msb, 0);
 +                let lsb_lz_plus_64 = fx.bcx.ins().iadd_imm(lsb_lz, 64);
 +                let res = fx.bcx.ins().select(msb_is_zero, lsb_lz_plus_64, msb_lz);
 +                fx.bcx.ins().uextend(types::I128, res)
 +            } else {
 +                fx.bcx.ins().clz(arg)
 +            };
 +            let res = CValue::by_val(res, fx.layout_of(T));
 +            ret.write_cvalue(fx, res);
 +        };
 +        cttz | cttz_nonzero, <T> (v arg) {
 +            // FIXME trap on `cttz_nonzero` with zero arg.
 +            let res = if T == fx.tcx.types.u128 || T == fx.tcx.types.i128 {
 +                // FIXME verify this algorithm is correct
 +                let (lsb, msb) = fx.bcx.ins().isplit(arg);
 +                let lsb_tz = fx.bcx.ins().ctz(lsb);
 +                let msb_tz = fx.bcx.ins().ctz(msb);
 +                let lsb_is_zero = fx.bcx.ins().icmp_imm(IntCC::Equal, lsb, 0);
 +                let msb_tz_plus_64 = fx.bcx.ins().iadd_imm(msb_tz, 64);
 +                let res = fx.bcx.ins().select(lsb_is_zero, msb_tz_plus_64, lsb_tz);
 +                fx.bcx.ins().uextend(types::I128, res)
 +            } else {
 +                fx.bcx.ins().ctz(arg)
 +            };
 +            let res = CValue::by_val(res, fx.layout_of(T));
 +            ret.write_cvalue(fx, res);
 +        };
 +        ctpop, <T> (v arg) {
 +            let res = fx.bcx.ins().popcnt(arg);
 +            let res = CValue::by_val(res, fx.layout_of(T));
 +            ret.write_cvalue(fx, res);
 +        };
 +        bitreverse, <T> (v arg) {
 +            let res = fx.bcx.ins().bitrev(arg);
 +            let res = CValue::by_val(res, fx.layout_of(T));
 +            ret.write_cvalue(fx, res);
 +        };
 +        bswap, <T> (v arg) {
 +            // FIXME(CraneStation/cranelift#794) add bswap instruction to cranelift
 +            fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value {
 +                match bcx.func.dfg.value_type(v) {
 +                    types::I8 => v,
 +
 +                    // https://code.woboq.org/gcc/include/bits/byteswap.h.html
 +                    types::I16 => {
 +                        let tmp1 = bcx.ins().ishl_imm(v, 8);
 +                        let n1 = bcx.ins().band_imm(tmp1, 0xFF00);
 +
 +                        let tmp2 = bcx.ins().ushr_imm(v, 8);
 +                        let n2 = bcx.ins().band_imm(tmp2, 0x00FF);
 +
 +                        bcx.ins().bor(n1, n2)
 +                    }
 +                    types::I32 => {
 +                        let tmp1 = bcx.ins().ishl_imm(v, 24);
 +                        let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000);
 +
 +                        let tmp2 = bcx.ins().ishl_imm(v, 8);
 +                        let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000);
 +
 +                        let tmp3 = bcx.ins().ushr_imm(v, 8);
 +                        let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00);
 +
 +                        let tmp4 = bcx.ins().ushr_imm(v, 24);
 +                        let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF);
 +
 +                        let or_tmp1 = bcx.ins().bor(n1, n2);
 +                        let or_tmp2 = bcx.ins().bor(n3, n4);
 +                        bcx.ins().bor(or_tmp1, or_tmp2)
 +                    }
 +                    types::I64 => {
 +                        let tmp1 = bcx.ins().ishl_imm(v, 56);
 +                        let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000_0000_0000u64 as i64);
 +
 +                        let tmp2 = bcx.ins().ishl_imm(v, 40);
 +                        let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000_0000_0000u64 as i64);
 +
 +                        let tmp3 = bcx.ins().ishl_imm(v, 24);
 +                        let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00_0000_0000u64 as i64);
 +
 +                        let tmp4 = bcx.ins().ishl_imm(v, 8);
 +                        let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF_0000_0000u64 as i64);
 +
 +                        let tmp5 = bcx.ins().ushr_imm(v, 8);
 +                        let n5 = bcx.ins().band_imm(tmp5, 0x0000_0000_FF00_0000u64 as i64);
 +
 +                        let tmp6 = bcx.ins().ushr_imm(v, 24);
 +                        let n6 = bcx.ins().band_imm(tmp6, 0x0000_0000_00FF_0000u64 as i64);
 +
 +                        let tmp7 = bcx.ins().ushr_imm(v, 40);
 +                        let n7 = bcx.ins().band_imm(tmp7, 0x0000_0000_0000_FF00u64 as i64);
 +
 +                        let tmp8 = bcx.ins().ushr_imm(v, 56);
 +                        let n8 = bcx.ins().band_imm(tmp8, 0x0000_0000_0000_00FFu64 as i64);
 +
 +                        let or_tmp1 = bcx.ins().bor(n1, n2);
 +                        let or_tmp2 = bcx.ins().bor(n3, n4);
 +                        let or_tmp3 = bcx.ins().bor(n5, n6);
 +                        let or_tmp4 = bcx.ins().bor(n7, n8);
 +
 +                        let or_tmp5 = bcx.ins().bor(or_tmp1, or_tmp2);
 +                        let or_tmp6 = bcx.ins().bor(or_tmp3, or_tmp4);
 +                        bcx.ins().bor(or_tmp5, or_tmp6)
 +                    }
 +                    types::I128 => {
 +                        let (lo, hi) = bcx.ins().isplit(v);
 +                        let lo = swap(bcx, lo);
 +                        let hi = swap(bcx, hi);
 +                        bcx.ins().iconcat(hi, lo)
 +                    }
 +                    ty => unreachable!("bswap {}", ty),
 +                }
 +            }
 +            let res = CValue::by_val(swap(&mut fx.bcx, arg), fx.layout_of(T));
 +            ret.write_cvalue(fx, res);
 +        };
 +        assert_inhabited | assert_zero_valid | assert_uninit_valid, <T> () {
 +            let layout = fx.layout_of(T);
 +            if layout.abi.is_uninhabited() {
 +                with_no_trimmed_paths(|| crate::base::codegen_panic(
 +                    fx,
 +                    &format!("attempted to instantiate uninhabited type `{}`", T),
 +                    span,
 +                ));
 +                return;
 +            }
 +
 +            if intrinsic == sym::assert_zero_valid && !layout.might_permit_raw_init(fx, /*zero:*/ true).unwrap() {
 +                with_no_trimmed_paths(|| crate::base::codegen_panic(
 +                    fx,
 +                    &format!("attempted to zero-initialize type `{}`, which is invalid", T),
 +                    span,
 +                ));
 +                return;
 +            }
 +
 +            if intrinsic == sym::assert_uninit_valid && !layout.might_permit_raw_init(fx, /*zero:*/ false).unwrap() {
 +                with_no_trimmed_paths(|| crate::base::codegen_panic(
 +                    fx,
 +                    &format!("attempted to leave type `{}` uninitialized, which is invalid", T),
 +                    span,
 +                ));
 +                return;
 +            }
 +        };
 +
 +        volatile_load | unaligned_volatile_load, (c ptr) {
 +            // Cranelift treats loads as volatile by default
 +            // FIXME correctly handle unaligned_volatile_load
 +            let inner_layout =
 +                fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
 +            let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
 +            ret.write_cvalue(fx, val);
 +        };
 +        volatile_store | unaligned_volatile_store, (v ptr, c val) {
 +            // Cranelift treats stores as volatile by default
 +            // FIXME correctly handle unaligned_volatile_store
 +            let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
 +            dest.write_cvalue(fx, val);
 +        };
 +
 +        pref_align_of | min_align_of | needs_drop | type_id | type_name | variant_count, () {
 +            let const_val =
 +                fx.tcx.const_eval_instance(ParamEnv::reveal_all(), instance, None).unwrap();
 +            let val = crate::constant::codegen_const_value(
 +                fx,
 +                const_val,
 +                ret.layout().ty,
 +            );
 +            ret.write_cvalue(fx, val);
 +        };
 +
 +        ptr_offset_from, <T> (v ptr, v base) {
 +            let isize_layout = fx.layout_of(fx.tcx.types.isize);
 +
 +            let pointee_size: u64 = fx.layout_of(T).size.bytes();
 +            let diff = fx.bcx.ins().isub(ptr, base);
 +            // FIXME this can be an exact division.
 +            let val = CValue::by_val(fx.bcx.ins().sdiv_imm(diff, pointee_size as i64), isize_layout);
 +            ret.write_cvalue(fx, val);
 +        };
 +
 +        ptr_guaranteed_eq, (c a, c b) {
 +            let val = crate::num::codegen_ptr_binop(fx, BinOp::Eq, a, b);
 +            ret.write_cvalue(fx, val);
 +        };
 +
 +        ptr_guaranteed_ne, (c a, c b) {
 +            let val = crate::num::codegen_ptr_binop(fx, BinOp::Ne, a, b);
 +            ret.write_cvalue(fx, val);
 +        };
 +
 +        caller_location, () {
 +            let caller_location = fx.get_caller_location(span);
 +            ret.write_cvalue(fx, caller_location);
 +        };
 +
 +        _ if intrinsic.as_str().starts_with("atomic_fence"), () {
 +            fx.bcx.ins().fence();
 +        };
 +        _ if intrinsic.as_str().starts_with("atomic_singlethreadfence"), () {
 +            // FIXME use a compiler fence once Cranelift supports it
 +            fx.bcx.ins().fence();
 +        };
 +        _ if intrinsic.as_str().starts_with("atomic_load"), <T> (v ptr) {
 +            validate_atomic_type!(fx, intrinsic, span, T);
 +            let ty = fx.clif_type(T).unwrap();
 +
 +            let val = fx.bcx.ins().atomic_load(ty, MemFlags::trusted(), ptr);
 +
 +            let val = CValue::by_val(val, fx.layout_of(T));
 +            ret.write_cvalue(fx, val);
 +        };
 +        _ if intrinsic.as_str().starts_with("atomic_store"), (v ptr, c val) {
 +            validate_atomic_type!(fx, intrinsic, span, val.layout().ty);
 +
 +            let val = val.load_scalar(fx);
 +
 +            fx.bcx.ins().atomic_store(MemFlags::trusted(), val, ptr);
 +        };
 +        _ if intrinsic.as_str().starts_with("atomic_xchg"), (v ptr, c new) {
 +            let layout = new.layout();
 +            validate_atomic_type!(fx, intrinsic, span, layout.ty);
 +            let ty = fx.clif_type(layout.ty).unwrap();
 +
 +            let new = new.load_scalar(fx);
 +
 +            let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xchg, ptr, new);
 +
 +            let old = CValue::by_val(old, layout);
 +            ret.write_cvalue(fx, old);
 +        };
 +        _ if intrinsic.as_str().starts_with("atomic_cxchg"), (v ptr, c test_old, c new) { // both atomic_cxchg_* and atomic_cxchgweak_*
 +            let layout = new.layout();
 +            validate_atomic_type!(fx, intrinsic, span, layout.ty);
 +
 +            let test_old = test_old.load_scalar(fx);
 +            let new = new.load_scalar(fx);
 +
 +            let old = fx.bcx.ins().atomic_cas(MemFlags::trusted(), ptr, test_old, new);
 +            let is_eq = fx.bcx.ins().icmp(IntCC::Equal, old, test_old);
 +
 +            let ret_val = CValue::by_val_pair(old, fx.bcx.ins().bint(types::I8, is_eq), ret.layout());
 +            ret.write_cvalue(fx, ret_val)
 +        };
 +
 +        _ if intrinsic.as_str().starts_with("atomic_xadd"), (v ptr, c amount) {
 +            let layout = amount.layout();
 +            validate_atomic_type!(fx, intrinsic, span, layout.ty);
 +            let ty = fx.clif_type(layout.ty).unwrap();
 +
 +            let amount = amount.load_scalar(fx);
 +
 +            let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Add, ptr, amount);
 +
 +            let old = CValue::by_val(old, layout);
 +            ret.write_cvalue(fx, old);
 +        };
 +        _ if intrinsic.as_str().starts_with("atomic_xsub"), (v ptr, c amount) {
 +            let layout = amount.layout();
 +            validate_atomic_type!(fx, intrinsic, span, layout.ty);
 +            let ty = fx.clif_type(layout.ty).unwrap();
 +
 +            let amount = amount.load_scalar(fx);
 +
 +            let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Sub, ptr, amount);
 +
 +            let old = CValue::by_val(old, layout);
 +            ret.write_cvalue(fx, old);
 +        };
 +        _ if intrinsic.as_str().starts_with("atomic_and"), (v ptr, c src) {
 +            let layout = src.layout();
 +            validate_atomic_type!(fx, intrinsic, span, layout.ty);
 +            let ty = fx.clif_type(layout.ty).unwrap();
 +
 +            let src = src.load_scalar(fx);
 +
 +            let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::And, ptr, src);
 +
 +            let old = CValue::by_val(old, layout);
 +            ret.write_cvalue(fx, old);
 +        };
 +        _ if intrinsic.as_str().starts_with("atomic_or"), (v ptr, c src) {
 +            let layout = src.layout();
 +            validate_atomic_type!(fx, intrinsic, span, layout.ty);
 +            let ty = fx.clif_type(layout.ty).unwrap();
 +
 +            let src = src.load_scalar(fx);
 +
 +            let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Or, ptr, src);
 +
 +            let old = CValue::by_val(old, layout);
 +            ret.write_cvalue(fx, old);
 +        };
 +        _ if intrinsic.as_str().starts_with("atomic_xor"), (v ptr, c src) {
 +            let layout = src.layout();
 +            validate_atomic_type!(fx, intrinsic, span, layout.ty);
 +            let ty = fx.clif_type(layout.ty).unwrap();
 +
 +            let src = src.load_scalar(fx);
 +
 +            let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xor, ptr, src);
 +
 +            let old = CValue::by_val(old, layout);
 +            ret.write_cvalue(fx, old);
 +        };
 +
 +        // FIXME https://github.com/bytecodealliance/wasmtime/issues/2647
 +        _ if intrinsic.as_str().starts_with("atomic_nand"), (v ptr, c src) {
 +            let layout = src.layout();
 +            validate_atomic_type!(fx, intrinsic, span, layout.ty);
 +            let ty = fx.clif_type(layout.ty).unwrap();
 +
 +            let src = src.load_scalar(fx);
 +
 +            let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Nand, ptr, src);
 +
 +            let old = CValue::by_val(old, layout);
 +            ret.write_cvalue(fx, old);
 +        };
 +        _ if intrinsic.as_str().starts_with("atomic_max"), (v ptr, c src) {
 +            let layout = src.layout();
 +            validate_atomic_type!(fx, intrinsic, span, layout.ty);
 +            let ty = fx.clif_type(layout.ty).unwrap();
 +
 +            let src = src.load_scalar(fx);
 +
 +            let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smax, ptr, src);
 +
 +            let old = CValue::by_val(old, layout);
 +            ret.write_cvalue(fx, old);
 +        };
 +        _ if intrinsic.as_str().starts_with("atomic_umax"), (v ptr, c src) {
 +            let layout = src.layout();
 +            validate_atomic_type!(fx, intrinsic, span, layout.ty);
 +            let ty = fx.clif_type(layout.ty).unwrap();
 +
 +            let src = src.load_scalar(fx);
 +
 +            let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umax, ptr, src);
 +
 +            let old = CValue::by_val(old, layout);
 +            ret.write_cvalue(fx, old);
 +        };
 +        _ if intrinsic.as_str().starts_with("atomic_min"), (v ptr, c src) {
 +            let layout = src.layout();
 +            validate_atomic_type!(fx, intrinsic, span, layout.ty);
 +            let ty = fx.clif_type(layout.ty).unwrap();
 +
 +            let src = src.load_scalar(fx);
 +
 +            let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smin, ptr, src);
 +
 +            let old = CValue::by_val(old, layout);
 +            ret.write_cvalue(fx, old);
 +        };
 +        _ if intrinsic.as_str().starts_with("atomic_umin"), (v ptr, c src) {
 +            let layout = src.layout();
 +            validate_atomic_type!(fx, intrinsic, span, layout.ty);
 +            let ty = fx.clif_type(layout.ty).unwrap();
 +
 +            let src = src.load_scalar(fx);
 +
 +            let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umin, ptr, src);
 +
 +            let old = CValue::by_val(old, layout);
 +            ret.write_cvalue(fx, old);
 +        };
 +
 +        minnumf32, (v a, v b) {
 +            let val = fx.bcx.ins().fmin(a, b);
 +            let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
 +            ret.write_cvalue(fx, val);
 +        };
 +        minnumf64, (v a, v b) {
 +            let val = fx.bcx.ins().fmin(a, b);
 +            let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
 +            ret.write_cvalue(fx, val);
 +        };
 +        maxnumf32, (v a, v b) {
 +            let val = fx.bcx.ins().fmax(a, b);
 +            let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
 +            ret.write_cvalue(fx, val);
 +        };
 +        maxnumf64, (v a, v b) {
 +            let val = fx.bcx.ins().fmax(a, b);
 +            let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
 +            ret.write_cvalue(fx, val);
 +        };
 +
 +        kw.Try, (v f, v data, v _catch_fn) {
 +            // FIXME once unwinding is supported, change this to actually catch panics
 +            let f_sig = fx.bcx.func.import_signature(Signature {
 +                call_conv: CallConv::triple_default(fx.triple()),
 +                params: vec![AbiParam::new(fx.bcx.func.dfg.value_type(data))],
 +                returns: vec![],
 +            });
 +
 +            fx.bcx.ins().call_indirect(f_sig, f, &[data]);
 +
 +            let layout = ret.layout();
 +            let ret_val = CValue::const_val(fx, layout, ty::ScalarInt::null(layout.size));
 +            ret.write_cvalue(fx, ret_val);
 +        };
 +
 +        fadd_fast | fsub_fast | fmul_fast | fdiv_fast | frem_fast, (c x, c y) {
 +            let res = crate::num::codegen_float_binop(fx, match intrinsic {
 +                sym::fadd_fast => BinOp::Add,
 +                sym::fsub_fast => BinOp::Sub,
 +                sym::fmul_fast => BinOp::Mul,
 +                sym::fdiv_fast => BinOp::Div,
 +                sym::frem_fast => BinOp::Rem,
 +                _ => unreachable!(),
 +            }, x, y);
 +            ret.write_cvalue(fx, res);
 +        };
 +        float_to_int_unchecked, (v f) {
 +            let res = crate::cast::clif_int_or_float_cast(
 +                fx,
 +                f,
 +                false,
 +                fx.clif_type(ret.layout().ty).unwrap(),
 +                type_sign(ret.layout().ty),
 +            );
 +            ret.write_cvalue(fx, CValue::by_val(res, ret.layout()));
 +        };
 +    }
 +
 +    if let Some((_, dest)) = destination {
 +        let ret_block = fx.get_block(dest);
 +        fx.bcx.ins().jump(ret_block, &[]);
 +    } else {
 +        trap_unreachable(fx, "[corruption] Diverging intrinsic returned.");
 +    }
 +}
index ff6e1856059af0066f85b7f8023f9ec5e03990c8,0000000000000000000000000000000000000000..4ee887cd5afaef819a65ed333e6bbbed3e8e8b16
mode 100644,000000..100644
--- /dev/null
@@@ -1,302 -1,0 +1,306 @@@
-             let mut builder = cranelift_codegen::isa::lookup_variant(target_triple, variant).unwrap();
 +#![feature(rustc_private, decl_macro, never_type, hash_drain_filter, vec_into_raw_parts)]
 +#![warn(rust_2018_idioms)]
 +#![warn(unused_lifetimes)]
 +#![warn(unreachable_pub)]
 +
 +extern crate snap;
 +#[macro_use]
 +extern crate rustc_middle;
 +extern crate rustc_ast;
 +extern crate rustc_codegen_ssa;
 +extern crate rustc_data_structures;
 +extern crate rustc_errors;
 +extern crate rustc_fs_util;
 +extern crate rustc_hir;
 +extern crate rustc_incremental;
 +extern crate rustc_index;
 +extern crate rustc_session;
 +extern crate rustc_span;
 +extern crate rustc_target;
 +
 +// This prevents duplicating functions and statics that are already part of the host rustc process.
 +#[allow(unused_extern_crates)]
 +extern crate rustc_driver;
 +
 +use std::any::Any;
 +
 +use rustc_codegen_ssa::traits::CodegenBackend;
 +use rustc_codegen_ssa::CodegenResults;
 +use rustc_errors::ErrorReported;
 +use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
 +use rustc_middle::middle::cstore::{EncodedMetadata, MetadataLoader};
 +use rustc_middle::ty::query::Providers;
 +use rustc_session::config::OutputFilenames;
 +use rustc_session::Session;
 +
 +use cranelift_codegen::isa::TargetIsa;
 +use cranelift_codegen::settings::{self, Configurable};
 +
 +pub use crate::config::*;
 +use crate::prelude::*;
 +
 +mod abi;
 +mod allocator;
 +mod analyze;
 +mod archive;
 +mod backend;
 +mod base;
 +mod cast;
 +mod codegen_i128;
 +mod common;
 +mod compiler_builtins;
 +mod config;
 +mod constant;
 +mod debuginfo;
 +mod discriminant;
 +mod driver;
 +mod inline_asm;
 +mod intrinsics;
 +mod linkage;
 +mod main_shim;
 +mod metadata;
 +mod num;
 +mod optimize;
 +mod pointer;
 +mod pretty_clif;
 +mod toolchain;
 +mod trap;
 +mod unsize;
 +mod value_and_place;
 +mod vtable;
 +
 +mod prelude {
 +    pub(crate) use std::convert::{TryFrom, TryInto};
 +
 +    pub(crate) use rustc_span::Span;
 +
 +    pub(crate) use rustc_hir::def_id::{DefId, LOCAL_CRATE};
 +    pub(crate) use rustc_middle::bug;
 +    pub(crate) use rustc_middle::mir::{self, *};
 +    pub(crate) use rustc_middle::ty::layout::{self, TyAndLayout};
 +    pub(crate) use rustc_middle::ty::{
 +        self, FloatTy, Instance, InstanceDef, IntTy, ParamEnv, Ty, TyCtxt, TypeAndMut,
 +        TypeFoldable, UintTy,
 +    };
 +    pub(crate) use rustc_target::abi::{Abi, LayoutOf, Scalar, Size, VariantIdx};
 +
 +    pub(crate) use rustc_data_structures::fx::FxHashMap;
 +
 +    pub(crate) use rustc_index::vec::Idx;
 +
 +    pub(crate) use cranelift_codegen::ir::condcodes::{FloatCC, IntCC};
 +    pub(crate) use cranelift_codegen::ir::function::Function;
 +    pub(crate) use cranelift_codegen::ir::types;
 +    pub(crate) use cranelift_codegen::ir::{
 +        AbiParam, Block, ExternalName, FuncRef, Inst, InstBuilder, MemFlags, Signature, SourceLoc,
 +        StackSlot, StackSlotData, StackSlotKind, TrapCode, Type, Value,
 +    };
 +    pub(crate) use cranelift_codegen::isa::{self, CallConv};
 +    pub(crate) use cranelift_codegen::Context;
 +    pub(crate) use cranelift_frontend::{FunctionBuilder, FunctionBuilderContext, Variable};
 +    pub(crate) use cranelift_module::{self, DataContext, DataId, FuncId, Linkage, Module};
 +
 +    pub(crate) use crate::abi::*;
 +    pub(crate) use crate::base::{codegen_operand, codegen_place};
 +    pub(crate) use crate::cast::*;
 +    pub(crate) use crate::common::*;
 +    pub(crate) use crate::debuginfo::{DebugContext, UnwindContext};
 +    pub(crate) use crate::pointer::Pointer;
 +    pub(crate) use crate::trap::*;
 +    pub(crate) use crate::value_and_place::{CPlace, CPlaceInner, CValue};
 +}
 +
 +struct PrintOnPanic<F: Fn() -> String>(F);
 +impl<F: Fn() -> String> Drop for PrintOnPanic<F> {
 +    fn drop(&mut self) {
 +        if ::std::thread::panicking() {
 +            println!("{}", (self.0)());
 +        }
 +    }
 +}
 +
 +/// The codegen context holds any information shared between the codegen of individual functions
 +/// inside a single codegen unit with the exception of the Cranelift [`Module`](cranelift_module::Module).
 +struct CodegenCx<'tcx> {
 +    tcx: TyCtxt<'tcx>,
 +    global_asm: String,
 +    cached_context: Context,
 +    debug_context: Option<DebugContext<'tcx>>,
 +    unwind_context: UnwindContext,
 +}
 +
 +impl<'tcx> CodegenCx<'tcx> {
 +    fn new(
 +        tcx: TyCtxt<'tcx>,
 +        backend_config: BackendConfig,
 +        isa: &dyn TargetIsa,
 +        debug_info: bool,
 +    ) -> Self {
 +        assert_eq!(pointer_ty(tcx), isa.pointer_type());
 +
 +        let unwind_context =
 +            UnwindContext::new(tcx, isa, matches!(backend_config.codegen_mode, CodegenMode::Aot));
 +        let debug_context = if debug_info { Some(DebugContext::new(tcx, isa)) } else { None };
 +        CodegenCx {
 +            tcx,
 +            global_asm: String::new(),
 +            cached_context: Context::new(),
 +            debug_context,
 +            unwind_context,
 +        }
 +    }
 +}
 +
 +pub struct CraneliftCodegenBackend {
 +    pub config: Option<BackendConfig>,
 +}
 +
 +impl CodegenBackend for CraneliftCodegenBackend {
 +    fn init(&self, sess: &Session) {
 +        use rustc_session::config::Lto;
 +        match sess.lto() {
 +            Lto::No | Lto::ThinLocal => {}
 +            Lto::Thin | Lto::Fat => sess.warn("LTO is not supported. You may get a linker error."),
 +        }
 +    }
 +
 +    fn metadata_loader(&self) -> Box<dyn MetadataLoader + Sync> {
 +        Box::new(rustc_codegen_ssa::back::metadata::DefaultMetadataLoader)
 +    }
 +
 +    fn provide(&self, _providers: &mut Providers) {}
 +    fn provide_extern(&self, _providers: &mut Providers) {}
 +
 +    fn target_features(&self, _sess: &Session) -> Vec<rustc_span::Symbol> {
 +        vec![]
 +    }
 +
 +    fn codegen_crate(
 +        &self,
 +        tcx: TyCtxt<'_>,
 +        metadata: EncodedMetadata,
 +        need_metadata_module: bool,
 +    ) -> Box<dyn Any> {
 +        tcx.sess.abort_if_errors();
 +        let config = if let Some(config) = self.config.clone() {
 +            config
 +        } else {
 +            BackendConfig::from_opts(&tcx.sess.opts.cg.llvm_args)
 +                .unwrap_or_else(|err| tcx.sess.fatal(&err))
 +        };
 +        match config.codegen_mode {
 +            CodegenMode::Aot => driver::aot::run_aot(tcx, config, metadata, need_metadata_module),
 +            CodegenMode::Jit | CodegenMode::JitLazy => {
 +                #[cfg(feature = "jit")]
 +                let _: ! = driver::jit::run_jit(tcx, config);
 +
 +                #[cfg(not(feature = "jit"))]
 +                tcx.sess.fatal("jit support was disabled when compiling rustc_codegen_cranelift");
 +            }
 +        }
 +    }
 +
 +    fn join_codegen(
 +        &self,
 +        ongoing_codegen: Box<dyn Any>,
 +        _sess: &Session,
 +    ) -> Result<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>), ErrorReported> {
 +        Ok(*ongoing_codegen
 +            .downcast::<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>)>()
 +            .unwrap())
 +    }
 +
 +    fn link(
 +        &self,
 +        sess: &Session,
 +        codegen_results: CodegenResults,
 +        outputs: &OutputFilenames,
 +    ) -> Result<(), ErrorReported> {
 +        use rustc_codegen_ssa::back::link::link_binary;
 +
 +        link_binary::<crate::archive::ArArchiveBuilder<'_>>(
 +            sess,
 +            &codegen_results,
 +            outputs,
 +            &codegen_results.crate_name.as_str(),
 +        );
 +
 +        Ok(())
 +    }
 +}
 +
 +fn target_triple(sess: &Session) -> target_lexicon::Triple {
 +    sess.target.llvm_target.parse().unwrap()
 +}
 +
 +fn build_isa(sess: &Session, backend_config: &BackendConfig) -> Box<dyn isa::TargetIsa + 'static> {
 +    use target_lexicon::BinaryFormat;
 +
 +    let target_triple = crate::target_triple(sess);
 +
 +    let mut flags_builder = settings::builder();
 +    flags_builder.enable("is_pic").unwrap();
 +    flags_builder.set("enable_probestack", "false").unwrap(); // __cranelift_probestack is not provided
 +    let enable_verifier = if backend_config.enable_verifier { "true" } else { "false" };
 +    flags_builder.set("enable_verifier", enable_verifier).unwrap();
 +
 +    let tls_model = match target_triple.binary_format {
 +        BinaryFormat::Elf => "elf_gd",
 +        BinaryFormat::Macho => "macho",
 +        BinaryFormat::Coff => "coff",
 +        _ => "none",
 +    };
 +    flags_builder.set("tls_model", tls_model).unwrap();
 +
 +    flags_builder.set("enable_simd", "true").unwrap();
 +
 +    flags_builder.set("enable_llvm_abi_extensions", "true").unwrap();
 +
++    flags_builder.set("regalloc", &backend_config.regalloc).unwrap();
++
 +    use rustc_session::config::OptLevel;
 +    match sess.opts.optimize {
 +        OptLevel::No => {
 +            flags_builder.set("opt_level", "none").unwrap();
 +        }
 +        OptLevel::Less | OptLevel::Default => {}
 +        OptLevel::Size | OptLevel::SizeMin | OptLevel::Aggressive => {
 +            flags_builder.set("opt_level", "speed_and_size").unwrap();
 +        }
 +    }
 +
 +    let flags = settings::Flags::new(flags_builder);
 +
 +    let variant = cranelift_codegen::isa::BackendVariant::MachInst;
 +
 +    let isa_builder = match sess.opts.cg.target_cpu.as_deref() {
 +        Some("native") => {
 +            let builder = cranelift_native::builder_with_options(variant, true).unwrap();
 +            builder
 +        }
 +        Some(value) => {
-             let mut builder = cranelift_codegen::isa::lookup_variant(target_triple, variant).unwrap();
++            let mut builder =
++                cranelift_codegen::isa::lookup_variant(target_triple, variant).unwrap();
 +            if let Err(_) = builder.enable(value) {
 +                sess.fatal("The specified target cpu isn't currently supported by Cranelift.");
 +            }
 +            builder
 +        }
 +        None => {
-     
++            let mut builder =
++                cranelift_codegen::isa::lookup_variant(target_triple, variant).unwrap();
 +            // Don't use "haswell" as the default, as it implies `has_lzcnt`.
 +            // macOS CI is still at Ivy Bridge EP, so `lzcnt` is interpreted as `bsr`.
 +            builder.enable("nehalem").unwrap();
 +            builder
 +        }
 +    };
++
 +    isa_builder.finish(flags)
 +}
 +
 +/// This is the entrypoint for a hot plugged rustc_codegen_cranelift
 +#[no_mangle]
 +pub fn __rustc_codegen_backend() -> Box<dyn CodegenBackend> {
 +    Box::new(CraneliftCodegenBackend { config: None })
 +}
index d1958c5f96b86b7be79f1a2c1129e4e57a82f1a6,0000000000000000000000000000000000000000..8fd1e4f5811f5d4f9d26fb6166f7981d5e699117
mode 100644,000000..100644
--- /dev/null
@@@ -1,154 -1,0 +1,159 @@@
-     let instance = Instance::mono(tcx, main_def_id).polymorphize(tcx);
-     if !is_jit && module.get_name(&*tcx.symbol_name(instance).name).is_none() {
 +use cranelift_codegen::binemit::{NullStackMapSink, NullTrapSink};
 +use rustc_hir::LangItem;
 +use rustc_middle::ty::subst::GenericArg;
 +use rustc_middle::ty::AssocKind;
 +use rustc_session::config::EntryFnType;
 +use rustc_span::symbol::Ident;
 +
 +use crate::prelude::*;
 +
 +/// Create the `main` function which will initialize the rust runtime and call
 +/// users main function.
 +pub(crate) fn maybe_create_entry_wrapper(
 +    tcx: TyCtxt<'_>,
 +    module: &mut impl Module,
 +    unwind_context: &mut UnwindContext,
 +    is_jit: bool,
++    is_primary_cgu: bool,
 +) {
 +    let (main_def_id, is_main_fn) = match tcx.entry_fn(()) {
 +        Some((def_id, entry_ty)) => (
 +            def_id,
 +            match entry_ty {
 +                EntryFnType::Main => true,
 +                EntryFnType::Start => false,
 +            },
 +        ),
 +        None => return,
 +    };
 +
++    if main_def_id.is_local() {
++        let instance = Instance::mono(tcx, main_def_id).polymorphize(tcx);
++        if !is_jit && module.get_name(&*tcx.symbol_name(instance).name).is_none() {
++            return;
++        }
++    } else if !is_primary_cgu {
 +        return;
 +    }
 +
 +    create_entry_fn(tcx, module, unwind_context, main_def_id, is_jit, is_main_fn);
 +
 +    fn create_entry_fn(
 +        tcx: TyCtxt<'_>,
 +        m: &mut impl Module,
 +        unwind_context: &mut UnwindContext,
 +        rust_main_def_id: DefId,
 +        ignore_lang_start_wrapper: bool,
 +        is_main_fn: bool,
 +    ) {
 +        let main_ret_ty = tcx.fn_sig(rust_main_def_id).output();
 +        // Given that `main()` has no arguments,
 +        // then its return type cannot have
 +        // late-bound regions, since late-bound
 +        // regions must appear in the argument
 +        // listing.
 +        let main_ret_ty = tcx.erase_regions(main_ret_ty.no_bound_vars().unwrap());
 +
 +        let cmain_sig = Signature {
 +            params: vec![
 +                AbiParam::new(m.target_config().pointer_type()),
 +                AbiParam::new(m.target_config().pointer_type()),
 +            ],
 +            returns: vec![AbiParam::new(m.target_config().pointer_type() /*isize*/)],
 +            call_conv: CallConv::triple_default(m.isa().triple()),
 +        };
 +
 +        let cmain_func_id = m.declare_function("main", Linkage::Export, &cmain_sig).unwrap();
 +
 +        let instance = Instance::mono(tcx, rust_main_def_id).polymorphize(tcx);
 +
 +        let main_name = tcx.symbol_name(instance).name;
 +        let main_sig = get_function_sig(tcx, m.isa().triple(), instance);
 +        let main_func_id = m.declare_function(main_name, Linkage::Import, &main_sig).unwrap();
 +
 +        let mut ctx = Context::new();
 +        ctx.func = Function::with_name_signature(ExternalName::user(0, 0), cmain_sig);
 +        {
 +            let mut func_ctx = FunctionBuilderContext::new();
 +            let mut bcx = FunctionBuilder::new(&mut ctx.func, &mut func_ctx);
 +
 +            let block = bcx.create_block();
 +            bcx.switch_to_block(block);
 +            let arg_argc = bcx.append_block_param(block, m.target_config().pointer_type());
 +            let arg_argv = bcx.append_block_param(block, m.target_config().pointer_type());
 +
 +            let main_func_ref = m.declare_func_in_func(main_func_id, &mut bcx.func);
 +
 +            let result = if is_main_fn && ignore_lang_start_wrapper {
 +                // regular main fn, but ignoring #[lang = "start"] as we are running in the jit
 +                // FIXME set program arguments somehow
 +                let call_inst = bcx.ins().call(main_func_ref, &[]);
 +                let call_results = bcx.func.dfg.inst_results(call_inst).to_owned();
 +
 +                let termination_trait = tcx.require_lang_item(LangItem::Termination, None);
 +                let report = tcx
 +                    .associated_items(termination_trait)
 +                    .find_by_name_and_kind(
 +                        tcx,
 +                        Ident::from_str("report"),
 +                        AssocKind::Fn,
 +                        termination_trait,
 +                    )
 +                    .unwrap();
 +                let report = Instance::resolve(
 +                    tcx,
 +                    ParamEnv::reveal_all(),
 +                    report.def_id,
 +                    tcx.mk_substs([GenericArg::from(main_ret_ty)].iter()),
 +                )
 +                .unwrap()
 +                .unwrap();
 +
 +                let report_name = tcx.symbol_name(report).name;
 +                let report_sig = get_function_sig(tcx, m.isa().triple(), report);
 +                let report_func_id =
 +                    m.declare_function(report_name, Linkage::Import, &report_sig).unwrap();
 +                let report_func_ref = m.declare_func_in_func(report_func_id, &mut bcx.func);
 +
 +                // FIXME do proper abi handling instead of expecting the pass mode to be identical
 +                // for returns and arguments.
 +                let report_call_inst = bcx.ins().call(report_func_ref, &call_results);
 +                let res = bcx.func.dfg.inst_results(report_call_inst)[0];
 +                match m.target_config().pointer_type() {
 +                    types::I32 => res,
 +                    types::I64 => bcx.ins().sextend(types::I64, res),
 +                    _ => unimplemented!("16bit systems are not yet supported"),
 +                }
 +            } else if is_main_fn {
 +                let start_def_id = tcx.require_lang_item(LangItem::Start, None);
 +                let start_instance = Instance::resolve(
 +                    tcx,
 +                    ParamEnv::reveal_all(),
 +                    start_def_id,
 +                    tcx.intern_substs(&[main_ret_ty.into()]),
 +                )
 +                .unwrap()
 +                .unwrap()
 +                .polymorphize(tcx);
 +                let start_func_id = import_function(tcx, m, start_instance);
 +
 +                let main_val = bcx.ins().func_addr(m.target_config().pointer_type(), main_func_ref);
 +
 +                let func_ref = m.declare_func_in_func(start_func_id, &mut bcx.func);
 +                let call_inst = bcx.ins().call(func_ref, &[main_val, arg_argc, arg_argv]);
 +                bcx.inst_results(call_inst)[0]
 +            } else {
 +                // using user-defined start fn
 +                let call_inst = bcx.ins().call(main_func_ref, &[arg_argc, arg_argv]);
 +                bcx.inst_results(call_inst)[0]
 +            };
 +
 +            bcx.ins().return_(&[result]);
 +            bcx.seal_all_blocks();
 +            bcx.finalize();
 +        }
 +        m.define_function(cmain_func_id, &mut ctx, &mut NullTrapSink {}, &mut NullStackMapSink {})
 +            .unwrap();
 +        unwind_context.add_function(cmain_func_id, &ctx, m.isa());
 +    }
 +}
index 819c8b51558a051321ef9f1670c2771f6937b301,0000000000000000000000000000000000000000..21d3e68dbc79257ac4f899715efe30ce481d2839
mode 100644,000000..100644
--- /dev/null
@@@ -1,78 -1,0 +1,78 @@@
-     let msg_ptr = fx.anonymous_str("trap", &real_msg);
 +//! Helpers used to print a message and abort in case of certain panics and some detected UB.
 +
 +use crate::prelude::*;
 +
 +fn codegen_print(fx: &mut FunctionCx<'_, '_, '_>, msg: &str) {
 +    let puts = fx
 +        .module
 +        .declare_function(
 +            "puts",
 +            Linkage::Import,
 +            &Signature {
 +                call_conv: CallConv::triple_default(fx.triple()),
 +                params: vec![AbiParam::new(pointer_ty(fx.tcx))],
 +                returns: vec![AbiParam::new(types::I32)],
 +            },
 +        )
 +        .unwrap();
 +    let puts = fx.module.declare_func_in_func(puts, &mut fx.bcx.func);
 +    if fx.clif_comments.enabled() {
 +        fx.add_comment(puts, "puts");
 +    }
 +
 +    let real_msg = format!("trap at {:?} ({}): {}\0", fx.instance, fx.symbol_name, msg);
++    let msg_ptr = fx.anonymous_str(&real_msg);
 +    fx.bcx.ins().call(puts, &[msg_ptr]);
 +}
 +
 +/// Trap code: user1
 +pub(crate) fn trap_abort(fx: &mut FunctionCx<'_, '_, '_>, msg: impl AsRef<str>) {
 +    codegen_print(fx, msg.as_ref());
 +    fx.bcx.ins().trap(TrapCode::User(1));
 +}
 +
 +/// Use this for example when a function call should never return. This will fill the current block,
 +/// so you can **not** add instructions to it afterwards.
 +///
 +/// Trap code: user65535
 +pub(crate) fn trap_unreachable(fx: &mut FunctionCx<'_, '_, '_>, msg: impl AsRef<str>) {
 +    codegen_print(fx, msg.as_ref());
 +    fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
 +}
 +
 +/// Like `trap_unreachable` but returns a fake value of the specified type.
 +///
 +/// Trap code: user65535
 +pub(crate) fn trap_unreachable_ret_value<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    dest_layout: TyAndLayout<'tcx>,
 +    msg: impl AsRef<str>,
 +) -> CValue<'tcx> {
 +    codegen_print(fx, msg.as_ref());
 +    let true_ = fx.bcx.ins().iconst(types::I32, 1);
 +    fx.bcx.ins().trapnz(true_, TrapCode::UnreachableCodeReached);
 +    CValue::by_ref(Pointer::const_addr(fx, 0), dest_layout)
 +}
 +
 +/// Use this when something is unimplemented, but `libcore` or `libstd` requires it to codegen.
 +/// Unlike `trap_unreachable` this will not fill the current block, so you **must** add instructions
 +/// to it afterwards.
 +///
 +/// Trap code: user65535
 +pub(crate) fn trap_unimplemented(fx: &mut FunctionCx<'_, '_, '_>, msg: impl AsRef<str>) {
 +    codegen_print(fx, msg.as_ref());
 +    let true_ = fx.bcx.ins().iconst(types::I32, 1);
 +    fx.bcx.ins().trapnz(true_, TrapCode::User(!0));
 +}
 +
 +/// Like `trap_unimplemented` but returns a fake value of the specified type.
 +///
 +/// Trap code: user65535
 +pub(crate) fn trap_unimplemented_ret_value<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    dest_layout: TyAndLayout<'tcx>,
 +    msg: impl AsRef<str>,
 +) -> CValue<'tcx> {
 +    trap_unimplemented(fx, msg);
 +    CValue::by_ref(Pointer::const_addr(fx, 0), dest_layout)
 +}
index 9a572c3501f925b7adc726c64e30755e9ade2fef,0000000000000000000000000000000000000000..171f39805f8963ffaad26ce92566cb3f369111d6
mode 100644,000000..100644
--- /dev/null
@@@ -1,729 -1,0 +1,730 @@@
 +//! Definition of [`CValue`] and [`CPlace`]
 +
 +use crate::prelude::*;
 +
 +use cranelift_codegen::ir::immediates::Offset32;
 +
 +fn codegen_field<'tcx>(
 +    fx: &mut FunctionCx<'_, '_, 'tcx>,
 +    base: Pointer,
 +    extra: Option<Value>,
 +    layout: TyAndLayout<'tcx>,
 +    field: mir::Field,
 +) -> (Pointer, TyAndLayout<'tcx>) {
 +    let field_offset = layout.fields.offset(field.index());
 +    let field_layout = layout.field(&*fx, field.index());
 +
 +    let simple = |fx: &mut FunctionCx<'_, '_, '_>| {
 +        (base.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap()), field_layout)
 +    };
 +
 +    if let Some(extra) = extra {
 +        if !field_layout.is_unsized() {
 +            return simple(fx);
 +        }
 +        match field_layout.ty.kind() {
 +            ty::Slice(..) | ty::Str | ty::Foreign(..) => simple(fx),
 +            ty::Adt(def, _) if def.repr.packed() => {
 +                assert_eq!(layout.align.abi.bytes(), 1);
 +                simple(fx)
 +            }
 +            _ => {
 +                // We have to align the offset for DST's
 +                let unaligned_offset = field_offset.bytes();
 +                let (_, unsized_align) =
 +                    crate::unsize::size_and_align_of_dst(fx, field_layout, extra);
 +
 +                let one = fx.bcx.ins().iconst(pointer_ty(fx.tcx), 1);
 +                let align_sub_1 = fx.bcx.ins().isub(unsized_align, one);
 +                let and_lhs = fx.bcx.ins().iadd_imm(align_sub_1, unaligned_offset as i64);
 +                let zero = fx.bcx.ins().iconst(pointer_ty(fx.tcx), 0);
 +                let and_rhs = fx.bcx.ins().isub(zero, unsized_align);
 +                let offset = fx.bcx.ins().band(and_lhs, and_rhs);
 +
 +                (base.offset_value(fx, offset), field_layout)
 +            }
 +        }
 +    } else {
 +        simple(fx)
 +    }
 +}
 +
 +fn scalar_pair_calculate_b_offset(
 +    tcx: TyCtxt<'_>,
 +    a_scalar: &Scalar,
 +    b_scalar: &Scalar,
 +) -> Offset32 {
 +    let b_offset = a_scalar.value.size(&tcx).align_to(b_scalar.value.align(&tcx).abi);
 +    Offset32::new(b_offset.bytes().try_into().unwrap())
 +}
 +
 +/// A read-only value
 +#[derive(Debug, Copy, Clone)]
 +pub(crate) struct CValue<'tcx>(CValueInner, TyAndLayout<'tcx>);
 +
 +#[derive(Debug, Copy, Clone)]
 +enum CValueInner {
 +    ByRef(Pointer, Option<Value>),
 +    ByVal(Value),
 +    ByValPair(Value, Value),
 +}
 +
 +impl<'tcx> CValue<'tcx> {
 +    pub(crate) fn by_ref(ptr: Pointer, layout: TyAndLayout<'tcx>) -> CValue<'tcx> {
 +        CValue(CValueInner::ByRef(ptr, None), layout)
 +    }
 +
 +    pub(crate) fn by_ref_unsized(
 +        ptr: Pointer,
 +        meta: Value,
 +        layout: TyAndLayout<'tcx>,
 +    ) -> CValue<'tcx> {
 +        CValue(CValueInner::ByRef(ptr, Some(meta)), layout)
 +    }
 +
 +    pub(crate) fn by_val(value: Value, layout: TyAndLayout<'tcx>) -> CValue<'tcx> {
 +        CValue(CValueInner::ByVal(value), layout)
 +    }
 +
 +    pub(crate) fn by_val_pair(
 +        value: Value,
 +        extra: Value,
 +        layout: TyAndLayout<'tcx>,
 +    ) -> CValue<'tcx> {
 +        CValue(CValueInner::ByValPair(value, extra), layout)
 +    }
 +
 +    pub(crate) fn layout(&self) -> TyAndLayout<'tcx> {
 +        self.1
 +    }
 +
 +    // FIXME remove
 +    pub(crate) fn force_stack(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> (Pointer, Option<Value>) {
 +        let layout = self.1;
 +        match self.0 {
 +            CValueInner::ByRef(ptr, meta) => (ptr, meta),
 +            CValueInner::ByVal(_) | CValueInner::ByValPair(_, _) => {
 +                let cplace = CPlace::new_stack_slot(fx, layout);
 +                cplace.write_cvalue(fx, self);
 +                (cplace.to_ptr(), None)
 +            }
 +        }
 +    }
 +
 +    pub(crate) fn try_to_ptr(self) -> Option<(Pointer, Option<Value>)> {
 +        match self.0 {
 +            CValueInner::ByRef(ptr, meta) => Some((ptr, meta)),
 +            CValueInner::ByVal(_) | CValueInner::ByValPair(_, _) => None,
 +        }
 +    }
 +
 +    /// Load a value with layout.abi of scalar
 +    pub(crate) fn load_scalar(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> Value {
 +        let layout = self.1;
 +        match self.0 {
 +            CValueInner::ByRef(ptr, None) => {
 +                let clif_ty = match layout.abi {
 +                    Abi::Scalar(ref scalar) => scalar_to_clif_type(fx.tcx, scalar.clone()),
 +                    Abi::Vector { ref element, count } => {
 +                        scalar_to_clif_type(fx.tcx, element.clone())
 +                            .by(u16::try_from(count).unwrap())
 +                            .unwrap()
 +                    }
 +                    _ => unreachable!("{:?}", layout.ty),
 +                };
 +                let mut flags = MemFlags::new();
 +                flags.set_notrap();
 +                ptr.load(fx, clif_ty, flags)
 +            }
 +            CValueInner::ByVal(value) => value,
 +            CValueInner::ByRef(_, Some(_)) => bug!("load_scalar for unsized value not allowed"),
 +            CValueInner::ByValPair(_, _) => bug!("Please use load_scalar_pair for ByValPair"),
 +        }
 +    }
 +
 +    /// Load a value pair with layout.abi of scalar pair
 +    pub(crate) fn load_scalar_pair(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> (Value, Value) {
 +        let layout = self.1;
 +        match self.0 {
 +            CValueInner::ByRef(ptr, None) => {
 +                let (a_scalar, b_scalar) = match &layout.abi {
 +                    Abi::ScalarPair(a, b) => (a, b),
 +                    _ => unreachable!("load_scalar_pair({:?})", self),
 +                };
 +                let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
 +                let clif_ty1 = scalar_to_clif_type(fx.tcx, a_scalar.clone());
 +                let clif_ty2 = scalar_to_clif_type(fx.tcx, b_scalar.clone());
 +                let mut flags = MemFlags::new();
 +                flags.set_notrap();
 +                let val1 = ptr.load(fx, clif_ty1, flags);
 +                let val2 = ptr.offset(fx, b_offset).load(fx, clif_ty2, flags);
 +                (val1, val2)
 +            }
 +            CValueInner::ByRef(_, Some(_)) => {
 +                bug!("load_scalar_pair for unsized value not allowed")
 +            }
 +            CValueInner::ByVal(_) => bug!("Please use load_scalar for ByVal"),
 +            CValueInner::ByValPair(val1, val2) => (val1, val2),
 +        }
 +    }
 +
 +    pub(crate) fn value_field(
 +        self,
 +        fx: &mut FunctionCx<'_, '_, 'tcx>,
 +        field: mir::Field,
 +    ) -> CValue<'tcx> {
 +        let layout = self.1;
 +        match self.0 {
 +            CValueInner::ByVal(val) => match layout.abi {
 +                Abi::Vector { element: _, count } => {
 +                    let count = u8::try_from(count).expect("SIMD type with more than 255 lanes???");
 +                    let field = u8::try_from(field.index()).unwrap();
 +                    assert!(field < count);
 +                    let lane = fx.bcx.ins().extractlane(val, field);
 +                    let field_layout = layout.field(&*fx, usize::from(field));
 +                    CValue::by_val(lane, field_layout)
 +                }
 +                _ => unreachable!("value_field for ByVal with abi {:?}", layout.abi),
 +            },
 +            CValueInner::ByValPair(val1, val2) => match layout.abi {
 +                Abi::ScalarPair(_, _) => {
 +                    let val = match field.as_u32() {
 +                        0 => val1,
 +                        1 => val2,
 +                        _ => bug!("field should be 0 or 1"),
 +                    };
 +                    let field_layout = layout.field(&*fx, usize::from(field));
 +                    CValue::by_val(val, field_layout)
 +                }
 +                _ => unreachable!("value_field for ByValPair with abi {:?}", layout.abi),
 +            },
 +            CValueInner::ByRef(ptr, None) => {
 +                let (field_ptr, field_layout) = codegen_field(fx, ptr, None, layout, field);
 +                CValue::by_ref(field_ptr, field_layout)
 +            }
 +            CValueInner::ByRef(_, Some(_)) => todo!(),
 +        }
 +    }
 +
 +    pub(crate) fn unsize_value(self, fx: &mut FunctionCx<'_, '_, 'tcx>, dest: CPlace<'tcx>) {
 +        crate::unsize::coerce_unsized_into(fx, self, dest);
 +    }
 +
 +    /// If `ty` is signed, `const_val` must already be sign extended.
 +    pub(crate) fn const_val(
 +        fx: &mut FunctionCx<'_, '_, 'tcx>,
 +        layout: TyAndLayout<'tcx>,
 +        const_val: ty::ScalarInt,
 +    ) -> CValue<'tcx> {
 +        assert_eq!(const_val.size(), layout.size, "{:#?}: {:?}", const_val, layout);
 +        use cranelift_codegen::ir::immediates::{Ieee32, Ieee64};
 +
 +        let clif_ty = fx.clif_type(layout.ty).unwrap();
 +
 +        if let ty::Bool = layout.ty.kind() {
 +            assert!(
 +                const_val == ty::ScalarInt::FALSE || const_val == ty::ScalarInt::TRUE,
 +                "Invalid bool 0x{:032X}",
 +                const_val
 +            );
 +        }
 +
 +        let val = match layout.ty.kind() {
 +            ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
 +                let const_val = const_val.to_bits(layout.size).unwrap();
 +                let lsb = fx.bcx.ins().iconst(types::I64, const_val as u64 as i64);
 +                let msb = fx.bcx.ins().iconst(types::I64, (const_val >> 64) as u64 as i64);
 +                fx.bcx.ins().iconcat(lsb, msb)
 +            }
 +            ty::Bool | ty::Char | ty::Uint(_) | ty::Int(_) | ty::Ref(..) | ty::RawPtr(..) => {
 +                fx.bcx.ins().iconst(clif_ty, const_val.to_bits(layout.size).unwrap() as i64)
 +            }
 +            ty::Float(FloatTy::F32) => {
 +                fx.bcx.ins().f32const(Ieee32::with_bits(u32::try_from(const_val).unwrap()))
 +            }
 +            ty::Float(FloatTy::F64) => {
 +                fx.bcx.ins().f64const(Ieee64::with_bits(u64::try_from(const_val).unwrap()))
 +            }
 +            _ => panic!(
 +                "CValue::const_val for non bool/char/float/integer/pointer type {:?} is not allowed",
 +                layout.ty
 +            ),
 +        };
 +
 +        CValue::by_val(val, layout)
 +    }
 +
 +    pub(crate) fn cast_pointer_to(self, layout: TyAndLayout<'tcx>) -> Self {
 +        assert!(matches!(self.layout().ty.kind(), ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..)));
 +        assert!(matches!(layout.ty.kind(), ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..)));
 +        assert_eq!(self.layout().abi, layout.abi);
 +        CValue(self.0, layout)
 +    }
 +}
 +
 +/// A place where you can write a value to or read a value from
 +#[derive(Debug, Copy, Clone)]
 +pub(crate) struct CPlace<'tcx> {
 +    inner: CPlaceInner,
 +    layout: TyAndLayout<'tcx>,
 +}
 +
 +#[derive(Debug, Copy, Clone)]
 +pub(crate) enum CPlaceInner {
 +    Var(Local, Variable),
 +    VarPair(Local, Variable, Variable),
 +    VarLane(Local, Variable, u8),
 +    Addr(Pointer, Option<Value>),
 +}
 +
 +impl<'tcx> CPlace<'tcx> {
 +    pub(crate) fn layout(&self) -> TyAndLayout<'tcx> {
 +        self.layout
 +    }
 +
 +    pub(crate) fn inner(&self) -> &CPlaceInner {
 +        &self.inner
 +    }
 +
 +    pub(crate) fn no_place(layout: TyAndLayout<'tcx>) -> CPlace<'tcx> {
 +        CPlace { inner: CPlaceInner::Addr(Pointer::dangling(layout.align.pref), None), layout }
 +    }
 +
 +    pub(crate) fn new_stack_slot(
 +        fx: &mut FunctionCx<'_, '_, 'tcx>,
 +        layout: TyAndLayout<'tcx>,
 +    ) -> CPlace<'tcx> {
 +        assert!(!layout.is_unsized());
 +        if layout.size.bytes() == 0 {
 +            return CPlace::no_place(layout);
 +        }
 +
 +        let stack_slot = fx.bcx.create_stack_slot(StackSlotData {
 +            kind: StackSlotKind::ExplicitSlot,
 +            // FIXME Don't force the size to a multiple of 16 bytes once Cranelift gets a way to
 +            // specify stack slot alignment.
 +            size: (u32::try_from(layout.size.bytes()).unwrap() + 15) / 16 * 16,
 +            offset: None,
 +        });
 +        CPlace { inner: CPlaceInner::Addr(Pointer::stack_slot(stack_slot), None), layout }
 +    }
 +
 +    pub(crate) fn new_var(
 +        fx: &mut FunctionCx<'_, '_, 'tcx>,
 +        local: Local,
 +        layout: TyAndLayout<'tcx>,
 +    ) -> CPlace<'tcx> {
 +        let var = Variable::with_u32(fx.next_ssa_var);
 +        fx.next_ssa_var += 1;
 +        fx.bcx.declare_var(var, fx.clif_type(layout.ty).unwrap());
 +        CPlace { inner: CPlaceInner::Var(local, var), layout }
 +    }
 +
 +    pub(crate) fn new_var_pair(
 +        fx: &mut FunctionCx<'_, '_, 'tcx>,
 +        local: Local,
 +        layout: TyAndLayout<'tcx>,
 +    ) -> CPlace<'tcx> {
 +        let var1 = Variable::with_u32(fx.next_ssa_var);
 +        fx.next_ssa_var += 1;
 +        let var2 = Variable::with_u32(fx.next_ssa_var);
 +        fx.next_ssa_var += 1;
 +
 +        let (ty1, ty2) = fx.clif_pair_type(layout.ty).unwrap();
 +        fx.bcx.declare_var(var1, ty1);
 +        fx.bcx.declare_var(var2, ty2);
 +        CPlace { inner: CPlaceInner::VarPair(local, var1, var2), layout }
 +    }
 +
 +    pub(crate) fn for_ptr(ptr: Pointer, layout: TyAndLayout<'tcx>) -> CPlace<'tcx> {
 +        CPlace { inner: CPlaceInner::Addr(ptr, None), layout }
 +    }
 +
 +    pub(crate) fn for_ptr_with_extra(
 +        ptr: Pointer,
 +        extra: Value,
 +        layout: TyAndLayout<'tcx>,
 +    ) -> CPlace<'tcx> {
 +        CPlace { inner: CPlaceInner::Addr(ptr, Some(extra)), layout }
 +    }
 +
 +    pub(crate) fn to_cvalue(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> CValue<'tcx> {
 +        let layout = self.layout();
 +        match self.inner {
 +            CPlaceInner::Var(_local, var) => {
 +                let val = fx.bcx.use_var(var);
 +                //fx.bcx.set_val_label(val, cranelift_codegen::ir::ValueLabel::new(var.index()));
 +                CValue::by_val(val, layout)
 +            }
 +            CPlaceInner::VarPair(_local, var1, var2) => {
 +                let val1 = fx.bcx.use_var(var1);
 +                //fx.bcx.set_val_label(val1, cranelift_codegen::ir::ValueLabel::new(var1.index()));
 +                let val2 = fx.bcx.use_var(var2);
 +                //fx.bcx.set_val_label(val2, cranelift_codegen::ir::ValueLabel::new(var2.index()));
 +                CValue::by_val_pair(val1, val2, layout)
 +            }
 +            CPlaceInner::VarLane(_local, var, lane) => {
 +                let val = fx.bcx.use_var(var);
 +                //fx.bcx.set_val_label(val, cranelift_codegen::ir::ValueLabel::new(var.index()));
 +                let val = fx.bcx.ins().extractlane(val, lane);
 +                CValue::by_val(val, layout)
 +            }
 +            CPlaceInner::Addr(ptr, extra) => {
 +                if let Some(extra) = extra {
 +                    CValue::by_ref_unsized(ptr, extra, layout)
 +                } else {
 +                    CValue::by_ref(ptr, layout)
 +                }
 +            }
 +        }
 +    }
 +
 +    pub(crate) fn to_ptr(self) -> Pointer {
 +        match self.to_ptr_maybe_unsized() {
 +            (ptr, None) => ptr,
 +            (_, Some(_)) => bug!("Expected sized cplace, found {:?}", self),
 +        }
 +    }
 +
 +    pub(crate) fn to_ptr_maybe_unsized(self) -> (Pointer, Option<Value>) {
 +        match self.inner {
 +            CPlaceInner::Addr(ptr, extra) => (ptr, extra),
 +            CPlaceInner::Var(_, _)
 +            | CPlaceInner::VarPair(_, _, _)
 +            | CPlaceInner::VarLane(_, _, _) => bug!("Expected CPlace::Addr, found {:?}", self),
 +        }
 +    }
 +
 +    pub(crate) fn write_cvalue(self, fx: &mut FunctionCx<'_, '_, 'tcx>, from: CValue<'tcx>) {
 +        assert_assignable(fx, from.layout().ty, self.layout().ty);
 +
 +        self.write_cvalue_maybe_transmute(fx, from, "write_cvalue");
 +    }
 +
 +    pub(crate) fn write_cvalue_transmute(
 +        self,
 +        fx: &mut FunctionCx<'_, '_, 'tcx>,
 +        from: CValue<'tcx>,
 +    ) {
 +        self.write_cvalue_maybe_transmute(fx, from, "write_cvalue_transmute");
 +    }
 +
 +    fn write_cvalue_maybe_transmute(
 +        self,
 +        fx: &mut FunctionCx<'_, '_, 'tcx>,
 +        from: CValue<'tcx>,
 +        method: &'static str,
 +    ) {
 +        fn transmute_value<'tcx>(
 +            fx: &mut FunctionCx<'_, '_, 'tcx>,
 +            var: Variable,
 +            data: Value,
 +            dst_ty: Type,
 +        ) {
 +            let src_ty = fx.bcx.func.dfg.value_type(data);
 +            assert_eq!(
 +                src_ty.bytes(),
 +                dst_ty.bytes(),
 +                "write_cvalue_transmute: {:?} -> {:?}",
 +                src_ty,
 +                dst_ty,
 +            );
 +            let data = match (src_ty, dst_ty) {
 +                (_, _) if src_ty == dst_ty => data,
 +
 +                // This is a `write_cvalue_transmute`.
 +                (types::I32, types::F32)
 +                | (types::F32, types::I32)
 +                | (types::I64, types::F64)
 +                | (types::F64, types::I64) => fx.bcx.ins().bitcast(dst_ty, data),
 +                _ if src_ty.is_vector() && dst_ty.is_vector() => {
 +                    fx.bcx.ins().raw_bitcast(dst_ty, data)
 +                }
 +                _ if src_ty.is_vector() || dst_ty.is_vector() => {
 +                    // FIXME do something more efficient for transmutes between vectors and integers.
 +                    let stack_slot = fx.bcx.create_stack_slot(StackSlotData {
 +                        kind: StackSlotKind::ExplicitSlot,
 +                        // FIXME Don't force the size to a multiple of 16 bytes once Cranelift gets a way to
 +                        // specify stack slot alignment.
 +                        size: (src_ty.bytes() + 15) / 16 * 16,
 +                        offset: None,
 +                    });
 +                    let ptr = Pointer::stack_slot(stack_slot);
 +                    ptr.store(fx, data, MemFlags::trusted());
 +                    ptr.load(fx, dst_ty, MemFlags::trusted())
 +                }
 +                _ => unreachable!("write_cvalue_transmute: {:?} -> {:?}", src_ty, dst_ty),
 +            };
 +            //fx.bcx.set_val_label(data, cranelift_codegen::ir::ValueLabel::new(var.index()));
 +            fx.bcx.def_var(var, data);
 +        }
 +
 +        assert_eq!(self.layout().size, from.layout().size);
 +
 +        if fx.clif_comments.enabled() {
 +            use cranelift_codegen::cursor::{Cursor, CursorPosition};
 +            let cur_block = match fx.bcx.cursor().position() {
 +                CursorPosition::After(block) => block,
 +                _ => unreachable!(),
 +            };
 +            fx.add_comment(
 +                fx.bcx.func.layout.last_inst(cur_block).unwrap(),
 +                format!(
 +                    "{}: {:?}: {:?} <- {:?}: {:?}",
 +                    method,
 +                    self.inner(),
 +                    self.layout().ty,
 +                    from.0,
 +                    from.layout().ty
 +                ),
 +            );
 +        }
 +
 +        let dst_layout = self.layout();
 +        let to_ptr = match self.inner {
 +            CPlaceInner::Var(_local, var) => {
 +                let data = CValue(from.0, dst_layout).load_scalar(fx);
 +                let dst_ty = fx.clif_type(self.layout().ty).unwrap();
 +                transmute_value(fx, var, data, dst_ty);
 +                return;
 +            }
 +            CPlaceInner::VarPair(_local, var1, var2) => {
 +                let (data1, data2) = CValue(from.0, dst_layout).load_scalar_pair(fx);
 +                let (dst_ty1, dst_ty2) = fx.clif_pair_type(self.layout().ty).unwrap();
 +                transmute_value(fx, var1, data1, dst_ty1);
 +                transmute_value(fx, var2, data2, dst_ty2);
 +                return;
 +            }
 +            CPlaceInner::VarLane(_local, var, lane) => {
 +                let data = from.load_scalar(fx);
 +
 +                // First get the old vector
 +                let vector = fx.bcx.use_var(var);
 +                //fx.bcx.set_val_label(vector, cranelift_codegen::ir::ValueLabel::new(var.index()));
 +
 +                // Next insert the written lane into the vector
 +                let vector = fx.bcx.ins().insertlane(vector, data, lane);
 +
 +                // Finally write the new vector
 +                //fx.bcx.set_val_label(vector, cranelift_codegen::ir::ValueLabel::new(var.index()));
 +                fx.bcx.def_var(var, vector);
 +
 +                return;
 +            }
 +            CPlaceInner::Addr(ptr, None) => {
 +                if dst_layout.size == Size::ZERO || dst_layout.abi == Abi::Uninhabited {
 +                    return;
 +                }
 +                ptr
 +            }
 +            CPlaceInner::Addr(_, Some(_)) => bug!("Can't write value to unsized place {:?}", self),
 +        };
 +
 +        let mut flags = MemFlags::new();
 +        flags.set_notrap();
 +        match from.layout().abi {
 +            // FIXME make Abi::Vector work too
 +            Abi::Scalar(_) => {
 +                let val = from.load_scalar(fx);
 +                to_ptr.store(fx, val, flags);
 +                return;
 +            }
 +            Abi::ScalarPair(ref a_scalar, ref b_scalar) => {
 +                let (value, extra) = from.load_scalar_pair(fx);
 +                let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
 +                to_ptr.store(fx, value, flags);
 +                to_ptr.offset(fx, b_offset).store(fx, extra, flags);
 +                return;
 +            }
 +            _ => {}
 +        }
 +
 +        match from.0 {
 +            CValueInner::ByVal(val) => {
 +                to_ptr.store(fx, val, flags);
 +            }
 +            CValueInner::ByValPair(_, _) => {
 +                bug!("Non ScalarPair abi {:?} for ByValPair CValue", dst_layout.abi);
 +            }
 +            CValueInner::ByRef(from_ptr, None) => {
 +                let from_addr = from_ptr.get_addr(fx);
 +                let to_addr = to_ptr.get_addr(fx);
 +                let src_layout = from.1;
 +                let size = dst_layout.size.bytes();
 +                let src_align = src_layout.align.abi.bytes() as u8;
 +                let dst_align = dst_layout.align.abi.bytes() as u8;
 +                fx.bcx.emit_small_memory_copy(
 +                    fx.module.target_config(),
 +                    to_addr,
 +                    from_addr,
 +                    size,
 +                    dst_align,
 +                    src_align,
 +                    true,
++                    MemFlags::trusted(),
 +                );
 +            }
 +            CValueInner::ByRef(_, Some(_)) => todo!(),
 +        }
 +    }
 +
 +    pub(crate) fn place_field(
 +        self,
 +        fx: &mut FunctionCx<'_, '_, 'tcx>,
 +        field: mir::Field,
 +    ) -> CPlace<'tcx> {
 +        let layout = self.layout();
 +
 +        match self.inner {
 +            CPlaceInner::Var(local, var) => {
 +                if let Abi::Vector { .. } = layout.abi {
 +                    return CPlace {
 +                        inner: CPlaceInner::VarLane(local, var, field.as_u32().try_into().unwrap()),
 +                        layout: layout.field(fx, field.as_u32().try_into().unwrap()),
 +                    };
 +                }
 +            }
 +            CPlaceInner::VarPair(local, var1, var2) => {
 +                let layout = layout.field(&*fx, field.index());
 +
 +                match field.as_u32() {
 +                    0 => return CPlace { inner: CPlaceInner::Var(local, var1), layout },
 +                    1 => return CPlace { inner: CPlaceInner::Var(local, var2), layout },
 +                    _ => unreachable!("field should be 0 or 1"),
 +                }
 +            }
 +            _ => {}
 +        }
 +
 +        let (base, extra) = self.to_ptr_maybe_unsized();
 +
 +        let (field_ptr, field_layout) = codegen_field(fx, base, extra, layout, field);
 +        if field_layout.is_unsized() {
 +            CPlace::for_ptr_with_extra(field_ptr, extra.unwrap(), field_layout)
 +        } else {
 +            CPlace::for_ptr(field_ptr, field_layout)
 +        }
 +    }
 +
 +    pub(crate) fn place_index(
 +        self,
 +        fx: &mut FunctionCx<'_, '_, 'tcx>,
 +        index: Value,
 +    ) -> CPlace<'tcx> {
 +        let (elem_layout, ptr) = match self.layout().ty.kind() {
 +            ty::Array(elem_ty, _) => (fx.layout_of(elem_ty), self.to_ptr()),
 +            ty::Slice(elem_ty) => (fx.layout_of(elem_ty), self.to_ptr_maybe_unsized().0),
 +            _ => bug!("place_index({:?})", self.layout().ty),
 +        };
 +
 +        let offset = fx.bcx.ins().imul_imm(index, elem_layout.size.bytes() as i64);
 +
 +        CPlace::for_ptr(ptr.offset_value(fx, offset), elem_layout)
 +    }
 +
 +    pub(crate) fn place_deref(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> CPlace<'tcx> {
 +        let inner_layout = fx.layout_of(self.layout().ty.builtin_deref(true).unwrap().ty);
 +        if has_ptr_meta(fx.tcx, inner_layout.ty) {
 +            let (addr, extra) = self.to_cvalue(fx).load_scalar_pair(fx);
 +            CPlace::for_ptr_with_extra(Pointer::new(addr), extra, inner_layout)
 +        } else {
 +            CPlace::for_ptr(Pointer::new(self.to_cvalue(fx).load_scalar(fx)), inner_layout)
 +        }
 +    }
 +
 +    pub(crate) fn place_ref(
 +        self,
 +        fx: &mut FunctionCx<'_, '_, 'tcx>,
 +        layout: TyAndLayout<'tcx>,
 +    ) -> CValue<'tcx> {
 +        if has_ptr_meta(fx.tcx, self.layout().ty) {
 +            let (ptr, extra) = self.to_ptr_maybe_unsized();
 +            CValue::by_val_pair(
 +                ptr.get_addr(fx),
 +                extra.expect("unsized type without metadata"),
 +                layout,
 +            )
 +        } else {
 +            CValue::by_val(self.to_ptr().get_addr(fx), layout)
 +        }
 +    }
 +
 +    pub(crate) fn downcast_variant(
 +        self,
 +        fx: &FunctionCx<'_, '_, 'tcx>,
 +        variant: VariantIdx,
 +    ) -> Self {
 +        assert!(!self.layout().is_unsized());
 +        let layout = self.layout().for_variant(fx, variant);
 +        CPlace { inner: self.inner, layout }
 +    }
 +}
 +
 +#[track_caller]
 +pub(crate) fn assert_assignable<'tcx>(
 +    fx: &FunctionCx<'_, '_, 'tcx>,
 +    from_ty: Ty<'tcx>,
 +    to_ty: Ty<'tcx>,
 +) {
 +    match (from_ty.kind(), to_ty.kind()) {
 +        (ty::Ref(_, a, _), ty::Ref(_, b, _))
 +        | (
 +            ty::RawPtr(TypeAndMut { ty: a, mutbl: _ }),
 +            ty::RawPtr(TypeAndMut { ty: b, mutbl: _ }),
 +        ) => {
 +            assert_assignable(fx, a, b);
 +        }
 +        (ty::Ref(_, a, _), ty::RawPtr(TypeAndMut { ty: b, mutbl: _ }))
 +        | (ty::RawPtr(TypeAndMut { ty: a, mutbl: _ }), ty::Ref(_, b, _)) => {
 +            assert_assignable(fx, a, b);
 +        }
 +        (ty::FnPtr(_), ty::FnPtr(_)) => {
 +            let from_sig = fx.tcx.normalize_erasing_late_bound_regions(
 +                ParamEnv::reveal_all(),
 +                from_ty.fn_sig(fx.tcx),
 +            );
 +            let to_sig = fx
 +                .tcx
 +                .normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), to_ty.fn_sig(fx.tcx));
 +            assert_eq!(
 +                from_sig, to_sig,
 +                "Can't write fn ptr with incompatible sig {:?} to place with sig {:?}\n\n{:#?}",
 +                from_sig, to_sig, fx,
 +            );
 +            // fn(&T) -> for<'l> fn(&'l T) is allowed
 +        }
 +        (&ty::Dynamic(from_traits, _), &ty::Dynamic(to_traits, _)) => {
 +            for (from, to) in from_traits.iter().zip(to_traits) {
 +                let from =
 +                    fx.tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), from);
 +                let to = fx.tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), to);
 +                assert_eq!(
 +                    from, to,
 +                    "Can't write trait object of incompatible traits {:?} to place with traits {:?}\n\n{:#?}",
 +                    from_traits, to_traits, fx,
 +                );
 +            }
 +            // dyn for<'r> Trait<'r> -> dyn Trait<'_> is allowed
 +        }
 +        (&ty::Adt(adt_def_a, substs_a), &ty::Adt(adt_def_b, substs_b))
 +            if adt_def_a.did == adt_def_b.did =>
 +        {
 +            let mut types_a = substs_a.types();
 +            let mut types_b = substs_b.types();
 +            loop {
 +                match (types_a.next(), types_b.next()) {
 +                    (Some(a), Some(b)) => assert_assignable(fx, a, b),
 +                    (None, None) => return,
 +                    (Some(_), None) | (None, Some(_)) => panic!("{:#?}/{:#?}", from_ty, to_ty),
 +                }
 +            }
 +        }
 +        _ => {
 +            assert_eq!(
 +                from_ty, to_ty,
 +                "Can't write value with incompatible type {:?} to place with type {:?}\n\n{:#?}",
 +                from_ty, to_ty, fx,
 +            );
 +        }
 +    }
 +}