--- /dev/null
--- /dev/null
++github: antoyo
++patreon: antoyo
--- /dev/null
--- /dev/null
++name: CI
++
++on:
++ - push
++ - pull_request
++
++jobs:
++ build:
++ runs-on: ubuntu-latest
++
++ strategy:
++ fail-fast: false
++
++ steps:
++ - uses: actions/checkout@v2
++
++ - name: Install packages
++ run: sudo apt-get install ninja-build ripgrep
++
++ - name: Download artifact
++ uses: dawidd6/action-download-artifact@v2
++ with:
++ workflow: main.yml
++ name: libgccjit.so
++ path: gcc-build
++ repo: antoyo/gcc
++
++ - name: Setup path to libgccjit
++ run: |
++ echo $(readlink -f gcc-build) > gcc_path
++ ln gcc-build/libgccjit.so gcc-build/libgccjit.so.0
++
++ - name: Set LIBRARY_PATH
++ run: echo "LIBRARY_PATH=$(cat gcc_path)" >> $GITHUB_ENV
++
++ # https://github.com/actions/cache/issues/133
++ - name: Fixup owner of ~/.cargo/
++ # Don't remove the trailing /. It is necessary to follow the symlink.
++ run: sudo chown -R $(whoami):$(id -ng) ~/.cargo/
++
++ - name: Cache cargo installed crates
++ uses: actions/cache@v1.1.2
++ with:
++ path: ~/.cargo/bin
++ key: cargo-installed-crates2-ubuntu-latest
++
++ - name: Cache cargo registry
++ uses: actions/cache@v1
++ with:
++ path: ~/.cargo/registry
++ key: ${{ runner.os }}-cargo-registry2-${{ hashFiles('**/Cargo.lock') }}
++
++ - name: Cache cargo index
++ uses: actions/cache@v1
++ with:
++ path: ~/.cargo/git
++ key: ${{ runner.os }}-cargo-index-${{ hashFiles('**/Cargo.lock') }}
++
++ - name: Cache cargo target dir
++ uses: actions/cache@v1.1.2
++ with:
++ path: target
++ key: ${{ runner.os }}-cargo-build-target-${{ hashFiles('rust-toolchain') }}
++
++ - name: Build
++ run: |
++ ./prepare_build.sh
++ ./build.sh
++ ./clean_all.sh
++
++ - name: Prepare dependencies
++ run: |
++ git config --global user.email "user@example.com"
++ git config --global user.name "User"
++ ./prepare.sh
++
++ # Compile is a separate step, as the actions-rs/cargo action supports error annotations
++ - name: Compile
++ uses: actions-rs/cargo@v1.0.3
++ with:
++ command: build
++ args: --release
++
++ - name: Test
++ run: |
++ # Enable backtraces for easier debugging
++ export RUST_BACKTRACE=1
++
++ # Reduce amount of benchmark runs as they are slow
++ export COMPILE_RUNS=2
++ export RUN_RUNS=2
++
++ ./test.sh --release
--- /dev/null
--- /dev/null
++target
++**/*.rs.bk
++*.rlib
++*.o
++perf.data
++perf.data.old
++*.events
++*.string*
++/build_sysroot/sysroot
++/build_sysroot/Cargo.lock
++/build_sysroot/test_target/Cargo.lock
++/rust
++/regex
++gimple*
++*asm
++res
++test-backend
--- /dev/null
--- /dev/null
++# This file is automatically @generated by Cargo.
++# It is not intended for manual editing.
++version = 3
++
++[[package]]
++name = "aho-corasick"
++version = "0.7.18"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f"
++dependencies = [
++ "memchr",
++]
++
++[[package]]
++name = "ar"
++version = "0.8.0"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "450575f58f7bee32816abbff470cbc47797397c2a81e0eaced4b98436daf52e1"
++
++[[package]]
++name = "autocfg"
++version = "1.0.1"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
++
++[[package]]
++name = "bitflags"
++version = "1.2.1"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
++
++[[package]]
++name = "cfg-if"
++version = "1.0.0"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
++
++[[package]]
++name = "crc32fast"
++version = "1.2.1"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a"
++dependencies = [
++ "cfg-if",
++]
++
++[[package]]
++name = "fm"
++version = "0.1.4"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "68fda3cff2cce84c19e5dfa5179a4b35d2c0f18b893f108002b8a6a54984acca"
++dependencies = [
++ "regex",
++]
++
++[[package]]
++name = "gccjit"
++version = "1.0.0"
++source = "git+https://github.com/antoyo/gccjit.rs#0572117c7ffdfcb0e6c6526d45266c3f34796bea"
++dependencies = [
++ "gccjit_sys",
++]
++
++[[package]]
++name = "gccjit_sys"
++version = "0.0.1"
++source = "git+https://github.com/antoyo/gccjit.rs#0572117c7ffdfcb0e6c6526d45266c3f34796bea"
++dependencies = [
++ "libc 0.1.12",
++]
++
++[[package]]
++name = "getopts"
++version = "0.2.21"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "14dbbfd5c71d70241ecf9e6f13737f7b5ce823821063188d7e46c41d371eebd5"
++dependencies = [
++ "unicode-width",
++]
++
++[[package]]
++name = "getrandom"
++version = "0.2.3"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753"
++dependencies = [
++ "cfg-if",
++ "libc 0.2.98",
++ "wasi",
++]
++
++[[package]]
++name = "hashbrown"
++version = "0.11.2"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e"
++
++[[package]]
++name = "hermit-abi"
++version = "0.1.19"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33"
++dependencies = [
++ "libc 0.2.98",
++]
++
++[[package]]
++name = "indexmap"
++version = "1.7.0"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "bc633605454125dec4b66843673f01c7df2b89479b32e0ed634e43a91cff62a5"
++dependencies = [
++ "autocfg",
++ "hashbrown",
++]
++
++[[package]]
++name = "lang_tester"
++version = "0.3.13"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "96bd995a092cac79868250589869b5a5d656b02a02bd74c8ebdc566dc7203090"
++dependencies = [
++ "fm",
++ "getopts",
++ "libc 0.2.98",
++ "num_cpus",
++ "termcolor",
++ "threadpool",
++ "wait-timeout",
++ "walkdir",
++]
++
++[[package]]
++name = "libc"
++version = "0.1.12"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "e32a70cf75e5846d53a673923498228bbec6a8624708a9ea5645f075d6276122"
++
++[[package]]
++name = "libc"
++version = "0.2.98"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "320cfe77175da3a483efed4bc0adc1968ca050b098ce4f2f1c13a56626128790"
++
++[[package]]
++name = "memchr"
++version = "2.4.0"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "b16bd47d9e329435e309c58469fe0791c2d0d1ba96ec0954152a5ae2b04387dc"
++
++[[package]]
++name = "num_cpus"
++version = "1.13.0"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3"
++dependencies = [
++ "hermit-abi",
++ "libc 0.2.98",
++]
++
++[[package]]
++name = "object"
++version = "0.25.3"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "a38f2be3697a57b4060074ff41b44c16870d916ad7877c17696e063257482bc7"
++dependencies = [
++ "crc32fast",
++ "indexmap",
++ "memchr",
++]
++
++[[package]]
++name = "ppv-lite86"
++version = "0.2.10"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857"
++
++[[package]]
++name = "rand"
++version = "0.8.4"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8"
++dependencies = [
++ "libc 0.2.98",
++ "rand_chacha",
++ "rand_core",
++ "rand_hc",
++]
++
++[[package]]
++name = "rand_chacha"
++version = "0.3.1"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
++dependencies = [
++ "ppv-lite86",
++ "rand_core",
++]
++
++[[package]]
++name = "rand_core"
++version = "0.6.3"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7"
++dependencies = [
++ "getrandom",
++]
++
++[[package]]
++name = "rand_hc"
++version = "0.3.1"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7"
++dependencies = [
++ "rand_core",
++]
++
++[[package]]
++name = "redox_syscall"
++version = "0.2.9"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "5ab49abadf3f9e1c4bc499e8845e152ad87d2ad2d30371841171169e9d75feee"
++dependencies = [
++ "bitflags",
++]
++
++[[package]]
++name = "regex"
++version = "1.5.4"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "d07a8629359eb56f1e2fb1652bb04212c072a87ba68546a04065d525673ac461"
++dependencies = [
++ "aho-corasick",
++ "memchr",
++ "regex-syntax",
++]
++
++[[package]]
++name = "regex-syntax"
++version = "0.6.25"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b"
++
++[[package]]
++name = "remove_dir_all"
++version = "0.5.3"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7"
++dependencies = [
++ "winapi",
++]
++
++[[package]]
++name = "rustc_codegen_gcc"
++version = "0.1.0"
++dependencies = [
++ "ar",
++ "gccjit",
++ "lang_tester",
++ "object",
++ "target-lexicon",
++ "tempfile",
++]
++
++[[package]]
++name = "same-file"
++version = "1.0.6"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
++dependencies = [
++ "winapi-util",
++]
++
++[[package]]
++name = "target-lexicon"
++version = "0.10.0"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "ab0e7238dcc7b40a7be719a25365910f6807bd864f4cce6b2e6b873658e2b19d"
++
++[[package]]
++name = "tempfile"
++version = "3.2.0"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22"
++dependencies = [
++ "cfg-if",
++ "libc 0.2.98",
++ "rand",
++ "redox_syscall",
++ "remove_dir_all",
++ "winapi",
++]
++
++[[package]]
++name = "termcolor"
++version = "1.1.2"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "2dfed899f0eb03f32ee8c6a0aabdb8a7949659e3466561fc0adf54e26d88c5f4"
++dependencies = [
++ "winapi-util",
++]
++
++[[package]]
++name = "threadpool"
++version = "1.8.1"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa"
++dependencies = [
++ "num_cpus",
++]
++
++[[package]]
++name = "unicode-width"
++version = "0.1.8"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3"
++
++[[package]]
++name = "wait-timeout"
++version = "0.2.0"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "9f200f5b12eb75f8c1ed65abd4b2db8a6e1b138a20de009dacee265a2498f3f6"
++dependencies = [
++ "libc 0.2.98",
++]
++
++[[package]]
++name = "walkdir"
++version = "2.3.2"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56"
++dependencies = [
++ "same-file",
++ "winapi",
++ "winapi-util",
++]
++
++[[package]]
++name = "wasi"
++version = "0.10.2+wasi-snapshot-preview1"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6"
++
++[[package]]
++name = "winapi"
++version = "0.3.9"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
++dependencies = [
++ "winapi-i686-pc-windows-gnu",
++ "winapi-x86_64-pc-windows-gnu",
++]
++
++[[package]]
++name = "winapi-i686-pc-windows-gnu"
++version = "0.4.0"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
++
++[[package]]
++name = "winapi-util"
++version = "0.1.5"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178"
++dependencies = [
++ "winapi",
++]
++
++[[package]]
++name = "winapi-x86_64-pc-windows-gnu"
++version = "0.4.0"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
--- /dev/null
--- /dev/null
++[package]
++name = "rustc_codegen_gcc"
++version = "0.1.0"
++authors = ["Antoni Boucher <bouanto@zoho.com>"]
++edition = "2018"
++license = "MIT OR Apache-2.0"
++
++[lib]
++crate-type = ["dylib"]
++
++[[test]]
++name = "lang_tests"
++path = "tests/lib.rs"
++harness = false
++
++[dependencies]
++gccjit = { git = "https://github.com/antoyo/gccjit.rs" }
++
++# Local copy.
++#gccjit = { path = "../gccjit.rs" }
++
++target-lexicon = "0.10.0"
++
++ar = "0.8.0"
++
++[dependencies.object]
++version = "0.25.0"
++default-features = false
++features = ["read", "std", "write"] # We don't need WASM support.
++
++[dev-dependencies]
++lang_tester = "0.3.9"
++tempfile = "3.1.0"
++
++[profile.dev]
++# By compiling dependencies with optimizations, performing tests gets much faster.
++opt-level = 3
++
++[profile.dev.package.rustc_codegen_gcc]
++# Disabling optimizations for cg_gccjit itself makes compilation after a change faster.
++opt-level = 0
++
++# Disable optimizations and debuginfo of build scripts and some of the heavy build deps, as the
++# execution time of build scripts is so fast that optimizing them slows down the total build time.
++[profile.dev.build-override]
++opt-level = 0
++debug = false
++
++[profile.release.build-override]
++opt-level = 0
++debug = false
--- /dev/null
--- /dev/null
++ Apache License
++ Version 2.0, January 2004
++ http://www.apache.org/licenses/
++
++TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
++
++1. Definitions.
++
++ "License" shall mean the terms and conditions for use, reproduction,
++ and distribution as defined by Sections 1 through 9 of this document.
++
++ "Licensor" shall mean the copyright owner or entity authorized by
++ the copyright owner that is granting the License.
++
++ "Legal Entity" shall mean the union of the acting entity and all
++ other entities that control, are controlled by, or are under common
++ control with that entity. For the purposes of this definition,
++ "control" means (i) the power, direct or indirect, to cause the
++ direction or management of such entity, whether by contract or
++ otherwise, or (ii) ownership of fifty percent (50%) or more of the
++ outstanding shares, or (iii) beneficial ownership of such entity.
++
++ "You" (or "Your") shall mean an individual or Legal Entity
++ exercising permissions granted by this License.
++
++ "Source" form shall mean the preferred form for making modifications,
++ including but not limited to software source code, documentation
++ source, and configuration files.
++
++ "Object" form shall mean any form resulting from mechanical
++ transformation or translation of a Source form, including but
++ not limited to compiled object code, generated documentation,
++ and conversions to other media types.
++
++ "Work" shall mean the work of authorship, whether in Source or
++ Object form, made available under the License, as indicated by a
++ copyright notice that is included in or attached to the work
++ (an example is provided in the Appendix below).
++
++ "Derivative Works" shall mean any work, whether in Source or Object
++ form, that is based on (or derived from) the Work and for which the
++ editorial revisions, annotations, elaborations, or other modifications
++ represent, as a whole, an original work of authorship. For the purposes
++ of this License, Derivative Works shall not include works that remain
++ separable from, or merely link (or bind by name) to the interfaces of,
++ the Work and Derivative Works thereof.
++
++ "Contribution" shall mean any work of authorship, including
++ the original version of the Work and any modifications or additions
++ to that Work or Derivative Works thereof, that is intentionally
++ submitted to Licensor for inclusion in the Work by the copyright owner
++ or by an individual or Legal Entity authorized to submit on behalf of
++ the copyright owner. For the purposes of this definition, "submitted"
++ means any form of electronic, verbal, or written communication sent
++ to the Licensor or its representatives, including but not limited to
++ communication on electronic mailing lists, source code control systems,
++ and issue tracking systems that are managed by, or on behalf of, the
++ Licensor for the purpose of discussing and improving the Work, but
++ excluding communication that is conspicuously marked or otherwise
++ designated in writing by the copyright owner as "Not a Contribution."
++
++ "Contributor" shall mean Licensor and any individual or Legal Entity
++ on behalf of whom a Contribution has been received by Licensor and
++ subsequently incorporated within the Work.
++
++2. Grant of Copyright License. Subject to the terms and conditions of
++ this License, each Contributor hereby grants to You a perpetual,
++ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
++ copyright license to reproduce, prepare Derivative Works of,
++ publicly display, publicly perform, sublicense, and distribute the
++ Work and such Derivative Works in Source or Object form.
++
++3. Grant of Patent License. Subject to the terms and conditions of
++ this License, each Contributor hereby grants to You a perpetual,
++ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
++ (except as stated in this section) patent license to make, have made,
++ use, offer to sell, sell, import, and otherwise transfer the Work,
++ where such license applies only to those patent claims licensable
++ by such Contributor that are necessarily infringed by their
++ Contribution(s) alone or by combination of their Contribution(s)
++ with the Work to which such Contribution(s) was submitted. If You
++ institute patent litigation against any entity (including a
++ cross-claim or counterclaim in a lawsuit) alleging that the Work
++ or a Contribution incorporated within the Work constitutes direct
++ or contributory patent infringement, then any patent licenses
++ granted to You under this License for that Work shall terminate
++ as of the date such litigation is filed.
++
++4. Redistribution. You may reproduce and distribute copies of the
++ Work or Derivative Works thereof in any medium, with or without
++ modifications, and in Source or Object form, provided that You
++ meet the following conditions:
++
++ (a) You must give any other recipients of the Work or
++ Derivative Works a copy of this License; and
++
++ (b) You must cause any modified files to carry prominent notices
++ stating that You changed the files; and
++
++ (c) You must retain, in the Source form of any Derivative Works
++ that You distribute, all copyright, patent, trademark, and
++ attribution notices from the Source form of the Work,
++ excluding those notices that do not pertain to any part of
++ the Derivative Works; and
++
++ (d) If the Work includes a "NOTICE" text file as part of its
++ distribution, then any Derivative Works that You distribute must
++ include a readable copy of the attribution notices contained
++ within such NOTICE file, excluding those notices that do not
++ pertain to any part of the Derivative Works, in at least one
++ of the following places: within a NOTICE text file distributed
++ as part of the Derivative Works; within the Source form or
++ documentation, if provided along with the Derivative Works; or,
++ within a display generated by the Derivative Works, if and
++ wherever such third-party notices normally appear. The contents
++ of the NOTICE file are for informational purposes only and
++ do not modify the License. You may add Your own attribution
++ notices within Derivative Works that You distribute, alongside
++ or as an addendum to the NOTICE text from the Work, provided
++ that such additional attribution notices cannot be construed
++ as modifying the License.
++
++ You may add Your own copyright statement to Your modifications and
++ may provide additional or different license terms and conditions
++ for use, reproduction, or distribution of Your modifications, or
++ for any such Derivative Works as a whole, provided Your use,
++ reproduction, and distribution of the Work otherwise complies with
++ the conditions stated in this License.
++
++5. Submission of Contributions. Unless You explicitly state otherwise,
++ any Contribution intentionally submitted for inclusion in the Work
++ by You to the Licensor shall be under the terms and conditions of
++ this License, without any additional terms or conditions.
++ Notwithstanding the above, nothing herein shall supersede or modify
++ the terms of any separate license agreement you may have executed
++ with Licensor regarding such Contributions.
++
++6. Trademarks. This License does not grant permission to use the trade
++ names, trademarks, service marks, or product names of the Licensor,
++ except as required for reasonable and customary use in describing the
++ origin of the Work and reproducing the content of the NOTICE file.
++
++7. Disclaimer of Warranty. Unless required by applicable law or
++ agreed to in writing, Licensor provides the Work (and each
++ Contributor provides its Contributions) on an "AS IS" BASIS,
++ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
++ implied, including, without limitation, any warranties or conditions
++ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
++ PARTICULAR PURPOSE. You are solely responsible for determining the
++ appropriateness of using or redistributing the Work and assume any
++ risks associated with Your exercise of permissions under this License.
++
++8. Limitation of Liability. In no event and under no legal theory,
++ whether in tort (including negligence), contract, or otherwise,
++ unless required by applicable law (such as deliberate and grossly
++ negligent acts) or agreed to in writing, shall any Contributor be
++ liable to You for damages, including any direct, indirect, special,
++ incidental, or consequential damages of any character arising as a
++ result of this License or out of the use or inability to use the
++ Work (including but not limited to damages for loss of goodwill,
++ work stoppage, computer failure or malfunction, or any and all
++ other commercial damages or losses), even if such Contributor
++ has been advised of the possibility of such damages.
++
++9. Accepting Warranty or Additional Liability. While redistributing
++ the Work or Derivative Works thereof, You may choose to offer,
++ and charge a fee for, acceptance of support, warranty, indemnity,
++ or other liability obligations and/or rights consistent with this
++ License. However, in accepting such obligations, You may act only
++ on Your own behalf and on Your sole responsibility, not on behalf
++ of any other Contributor, and only if You agree to indemnify,
++ defend, and hold each Contributor harmless for any liability
++ incurred by, or claims asserted against, such Contributor by reason
++ of your accepting any such warranty or additional liability.
++
++END OF TERMS AND CONDITIONS
--- /dev/null
--- /dev/null
++Permission is hereby granted, free of charge, to any
++person obtaining a copy of this software and associated
++documentation files (the "Software"), to deal in the
++Software without restriction, including without
++limitation the rights to use, copy, modify, merge,
++publish, distribute, sublicense, and/or sell copies of
++the Software, and to permit persons to whom the Software
++is furnished to do so, subject to the following
++conditions:
++
++The above copyright notice and this permission notice
++shall be included in all copies or substantial portions
++of the Software.
++
++THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
++ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
++TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
++PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
++SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
++CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
++IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++DEALINGS IN THE SOFTWARE.
--- /dev/null
--- /dev/null
++# WIP libgccjit codegen backend for rust
++
++This is a GCC codegen for rustc, which means it can be loaded by the existing rustc frontend, but benefits from GCC: more architectures are supported and GCC's optimizations are used.
++
++**Despite its name, libgccjit can be used for ahead-of-time compilation, as is used here.**
++
++## Motivation
++
++The primary goal of this project is to be able to compile Rust code on platforms unsupported by LLVM.
++A secondary goal is to check if using the gcc backend will provide any run-time speed improvement for the programs compiled using rustc.
++
++## Building
++
++**This requires a patched libgccjit in order to work.
++The patches in [this repostory](https://github.com/antoyo/libgccjit-patches) need to be applied.
++(Those patches should work when applied on master, but in case it doesn't work, they are known to work when applied on 079c23cfe079f203d5df83fea8e92a60c7d7e878.)
++You can also use my [fork of gcc](https://github.com/antoyo/gcc) which already includes these patches.**
++
++**Put the path to your custom build of libgccjit in the file `gcc_path`.**
++
++```bash
++$ git clone https://github.com/antoyo/rustc_codegen_gcc.git
++$ cd rustc_codegen_gcc
++$ ./prepare_build.sh # download and patch sysroot src
++$ ./build.sh --release
++```
++
++To run the tests:
++
++```bash
++$ ./prepare.sh # download and patch sysroot src and install hyperfine for benchmarking
++$ ./test.sh --release
++```
++
++## Usage
++
++`$cg_gccjit_dir` is the directory you cloned this repo into in the following instructions.
++
++### Cargo
++
++```bash
++$ CHANNEL="release" $cg_gccjit_dir/cargo.sh run
++```
++
++If you compiled cg_gccjit in debug mode (aka you didn't pass `--release` to `./test.sh`) you should use `CHANNEL="debug"` instead or omit `CHANNEL="release"` completely.
++
++### Rustc
++
++> You should prefer using the Cargo method.
++
++```bash
++$ rustc +$(cat $cg_gccjit_dir/rust-toolchain) -Cpanic=abort -Zcodegen-backend=$cg_gccjit_dir/target/release/librustc_codegen_gcc.so --sysroot $cg_gccjit_dir/build_sysroot/sysroot my_crate.rs
++```
++
++## Env vars
++
++<dl>
++ <dt>CG_GCCJIT_INCR_CACHE_DISABLED</dt>
++ <dd>Don't cache object files in the incremental cache. Useful during development of cg_gccjit
++ to make it possible to use incremental mode for all analyses performed by rustc without caching
++ object files when their content should have been changed by a change to cg_gccjit.</dd>
++ <dt>CG_GCCJIT_DISPLAY_CG_TIME</dt>
++ <dd>Display the time it took to perform codegen for a crate</dd>
++</dl>
++
++## Debugging
++
++Sometimes, libgccjit will crash and output an error like this:
++
++```
++during RTL pass: expand
++libgccjit.so: error: in expmed_mode_index, at expmed.h:249
++0x7f0da2e61a35 expmed_mode_index
++ ../../../gcc/gcc/expmed.h:249
++0x7f0da2e61aa4 expmed_op_cost_ptr
++ ../../../gcc/gcc/expmed.h:271
++0x7f0da2e620dc sdiv_cost_ptr
++ ../../../gcc/gcc/expmed.h:540
++0x7f0da2e62129 sdiv_cost
++ ../../../gcc/gcc/expmed.h:558
++0x7f0da2e73c12 expand_divmod(int, tree_code, machine_mode, rtx_def*, rtx_def*, rtx_def*, int)
++ ../../../gcc/gcc/expmed.c:4335
++0x7f0da2ea1423 expand_expr_real_2(separate_ops*, rtx_def*, machine_mode, expand_modifier)
++ ../../../gcc/gcc/expr.c:9240
++0x7f0da2cd1a1e expand_gimple_stmt_1
++ ../../../gcc/gcc/cfgexpand.c:3796
++0x7f0da2cd1c30 expand_gimple_stmt
++ ../../../gcc/gcc/cfgexpand.c:3857
++0x7f0da2cd90a9 expand_gimple_basic_block
++ ../../../gcc/gcc/cfgexpand.c:5898
++0x7f0da2cdade8 execute
++ ../../../gcc/gcc/cfgexpand.c:6582
++```
++
++To see the code which causes this error, call the following function:
++
++```c
++gcc_jit_context_dump_to_file(ctxt, "/tmp/output.c", 1 /* update_locations */)
++```
++
++This will create a C-like file and add the locations into the IR pointing to this C file.
++Then, rerun the program and it will output the location in the second line:
++
++```
++libgccjit.so: /tmp/something.c:61322:0: error: in expmed_mode_index, at expmed.h:249
++```
++
++Or add a breakpoint to `add_error` in gdb and print the line number using:
++
++```
++p loc->m_line
++```
++
++### How to use a custom-build rustc
++
++ * Build the stage1 compiler (`rustup toolchain link debug-current stage2 build/x86_64-unknown-linux-gnu/stage1`).
++ * Clean and rebuild the codegen with `debug-current` in the file `rust-toolchain`.
++ * Add `~/.rustup/toolchains/debug-current/lib/rustlib/x86_64-unknown-linux-gnu/lib` to `LD_LIBRARY_PATH`.
--- /dev/null
--- /dev/null
++#!/bin/bash
++
++#set -x
++set -e
++
++export GCC_PATH=$(cat gcc_path)
++
++export LD_LIBRARY_PATH="$GCC_PATH"
++export LIBRARY_PATH="$GCC_PATH"
++
++if [[ "$1" == "--release" ]]; then
++ export CHANNEL='release'
++ CARGO_INCREMENTAL=1 cargo rustc --release
++else
++ echo $LD_LIBRARY_PATH
++ export CHANNEL='debug'
++ cargo rustc
++fi
++
++source config.sh
++
++rm -r target/out || true
++mkdir -p target/out/gccjit
++
++echo "[BUILD] sysroot"
++time ./build_sysroot/build_sysroot.sh $CHANNEL
--- /dev/null
--- /dev/null
++[package]
++authors = ["bjorn3 <bjorn3@users.noreply.github.com>"]
++name = "sysroot"
++version = "0.0.0"
++
++[dependencies]
++core = { path = "./sysroot_src/library/core" }
++compiler_builtins = "0.1"
++alloc = { path = "./sysroot_src/library/alloc" }
++std = { path = "./sysroot_src/library/std", features = ["panic_unwind", "backtrace"] }
++test = { path = "./sysroot_src/library/test" }
++
++[patch.crates-io]
++rustc-std-workspace-core = { path = "./sysroot_src/library/rustc-std-workspace-core" }
++rustc-std-workspace-alloc = { path = "./sysroot_src/library/rustc-std-workspace-alloc" }
++rustc-std-workspace-std = { path = "./sysroot_src/library/rustc-std-workspace-std" }
++
++[profile.release]
++debug = true
--- /dev/null
--- /dev/null
++#!/bin/bash
++
++# Requires the CHANNEL env var to be set to `debug` or `release.`
++
++set -e
++cd $(dirname "$0")
++
++pushd ../ >/dev/null
++source ./config.sh
++popd >/dev/null
++
++# Cleanup for previous run
++# v Clean target dir except for build scripts and incremental cache
++rm -r target/*/{debug,release}/{build,deps,examples,libsysroot*,native} 2>/dev/null || true
++rm Cargo.lock test_target/Cargo.lock 2>/dev/null || true
++rm -r sysroot/ 2>/dev/null || true
++
++# Build libs
++export RUSTFLAGS="$RUSTFLAGS -Z force-unstable-if-unmarked -Cpanic=abort"
++if [[ "$1" == "--release" ]]; then
++ sysroot_channel='release'
++ RUSTFLAGS="$RUSTFLAGS -Zmir-opt-level=3" cargo build --target $TARGET_TRIPLE --release
++else
++ sysroot_channel='debug'
++ cargo build --target $TARGET_TRIPLE
++fi
++
++# Copy files to sysroot
++mkdir -p sysroot/lib/rustlib/$TARGET_TRIPLE/lib/
++cp -r target/$TARGET_TRIPLE/$sysroot_channel/deps/* sysroot/lib/rustlib/$TARGET_TRIPLE/lib/
--- /dev/null
--- /dev/null
++#!/bin/bash
++set -e
++cd $(dirname "$0")
++
++SRC_DIR=$(dirname $(rustup which rustc))"/../lib/rustlib/src/rust/"
++DST_DIR="sysroot_src"
++
++if [ ! -e $SRC_DIR ]; then
++ echo "Please install rust-src component"
++ exit 1
++fi
++
++rm -rf $DST_DIR
++mkdir -p $DST_DIR/library
++cp -r $SRC_DIR/library $DST_DIR/
++
++pushd $DST_DIR
++echo "[GIT] init"
++git init
++echo "[GIT] add"
++git add .
++echo "[GIT] commit"
++
++# This is needed on virgin system where nothing is configured.
++# git really needs something here, or it will fail.
++# Even using --author is not enough.
++git config user.email || git config user.email "none@example.com"
++git config user.name || git config user.name "None"
++
++git commit -m "Initial commit" -q
++for file in $(ls ../../patches/ | grep -v patcha); do
++echo "[GIT] apply" $file
++git apply ../../patches/$file
++git add -A
++git commit --no-gpg-sign -m "Patch $file"
++done
++popd
++
++echo "Successfully prepared libcore for building"
--- /dev/null
--- /dev/null
++#![no_std]
--- /dev/null
--- /dev/null
++#!/bin/bash
++
++if [ -z $CHANNEL ]; then
++export CHANNEL='debug'
++fi
++
++pushd $(dirname "$0") >/dev/null
++source config.sh
++
++# read nightly compiler from rust-toolchain file
++TOOLCHAIN=$(cat rust-toolchain)
++
++popd >/dev/null
++
++if [[ $(rustc -V) != $(rustc +${TOOLCHAIN} -V) ]]; then
++ echo "rustc_codegen_gcc is build for $(rustc +${TOOLCHAIN} -V) but the default rustc version is $(rustc -V)."
++ echo "Using $(rustc +${TOOLCHAIN} -V)."
++fi
++
++cmd=$1
++shift
++
++RUSTDOCFLAGS=$RUSTFLAGS cargo +${TOOLCHAIN} $cmd --target $TARGET_TRIPLE $@
--- /dev/null
--- /dev/null
++#!/bin/bash --verbose
++set -e
++
++rm -rf target/ build_sysroot/{sysroot/,sysroot_src/,target/,Cargo.lock} perf.data{,.old}
++rm -rf regex/ simple-raytracer/
--- /dev/null
--- /dev/null
++set -e
++
++export CARGO_INCREMENTAL=0
++
++export GCC_PATH=$(cat gcc_path)
++
++unamestr=`uname`
++if [[ "$unamestr" == 'Linux' ]]; then
++ dylib_ext='so'
++elif [[ "$unamestr" == 'Darwin' ]]; then
++ dylib_ext='dylib'
++else
++ echo "Unsupported os"
++ exit 1
++fi
++
++HOST_TRIPLE=$(rustc -vV | grep host | cut -d: -f2 | tr -d " ")
++TARGET_TRIPLE=$HOST_TRIPLE
++#TARGET_TRIPLE="aarch64-unknown-linux-gnu"
++
++linker=''
++RUN_WRAPPER=''
++if [[ "$HOST_TRIPLE" != "$TARGET_TRIPLE" ]]; then
++ if [[ "$TARGET_TRIPLE" == "aarch64-unknown-linux-gnu" ]]; then
++ # We are cross-compiling for aarch64. Use the correct linker and run tests in qemu.
++ linker='-Clinker=aarch64-linux-gnu-gcc'
++ RUN_WRAPPER='qemu-aarch64 -L /usr/aarch64-linux-gnu'
++ else
++ echo "Unknown non-native platform"
++ fi
++fi
++
++export RUSTFLAGS=$linker' -Cpanic=abort -Cdebuginfo=2 -Zpanic-abort-tests -Zcodegen-backend='$(pwd)'/target/'$CHANNEL'/librustc_codegen_gcc.'$dylib_ext' --sysroot '$(pwd)'/build_sysroot/sysroot'
++#export RUSTFLAGS=$linker' -Cpanic=abort -Cdebuginfo=2 -Zpanic-abort-tests -Zcodegen-backend='$(pwd)'/target/'$CHANNEL'/librustc_codegen_gcc.'$dylib_ext' --sysroot '$(pwd)'/build_sysroot/sysroot -Clto=fat -Cembed-bitcode=yes'
++
++# FIXME remove once the atomic shim is gone
++if [[ `uname` == 'Darwin' ]]; then
++ export RUSTFLAGS="$RUSTFLAGS -Clink-arg=-undefined -Clink-arg=dynamic_lookup"
++fi
++
++RUSTC="rustc $RUSTFLAGS -L crate=target/out --out-dir target/out"
++export RUSTC_LOG=warn # display metadata load errors
++
++export LD_LIBRARY_PATH="$(pwd)/target/out:$(pwd)/build_sysroot/sysroot/lib/rustlib/$TARGET_TRIPLE/lib:$GCC_PATH"
++export DYLD_LIBRARY_PATH=$LD_LIBRARY_PATH
++
++export CG_CLIF_DISPLAY_CG_TIME=1
++export CG_CLIF_INCR_CACHE_DISABLED=1
--- /dev/null
--- /dev/null
++#![feature(start, box_syntax, core_intrinsics, alloc_prelude, alloc_error_handler)]
++#![no_std]
++
++extern crate alloc;
++extern crate alloc_system;
++
++use alloc::prelude::v1::*;
++
++use alloc_system::System;
++
++#[global_allocator]
++static ALLOC: System = System;
++
++#[link(name = "c")]
++extern "C" {
++ fn puts(s: *const u8) -> i32;
++}
++
++#[panic_handler]
++fn panic_handler(_: &core::panic::PanicInfo) -> ! {
++ unsafe {
++ core::intrinsics::abort();
++ }
++}
++
++#[alloc_error_handler]
++fn alloc_error_handler(_: alloc::alloc::Layout) -> ! {
++ unsafe {
++ core::intrinsics::abort();
++ }
++}
++
++#[start]
++fn main(_argc: isize, _argv: *const *const u8) -> isize {
++ let world: Box<&str> = box "Hello World!\0";
++ unsafe {
++ puts(*world as *const str as *const u8);
++ }
++
++ 0
++}
--- /dev/null
--- /dev/null
++// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
++// file at the top-level directory of this distribution and at
++// http://rust-lang.org/COPYRIGHT.
++//
++// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
++// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
++// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
++// option. This file may not be copied, modified, or distributed
++// except according to those terms.
++#![no_std]
++#![feature(allocator_api, rustc_private)]
++#![cfg_attr(any(unix, target_os = "redox"), feature(libc))]
++
++// The minimum alignment guaranteed by the architecture. This value is used to
++// add fast paths for low alignment values.
++#[cfg(all(any(target_arch = "x86",
++ target_arch = "arm",
++ target_arch = "mips",
++ target_arch = "powerpc",
++ target_arch = "powerpc64")))]
++const MIN_ALIGN: usize = 8;
++#[cfg(all(any(target_arch = "x86_64",
++ target_arch = "aarch64",
++ target_arch = "mips64",
++ target_arch = "s390x",
++ target_arch = "sparc64")))]
++const MIN_ALIGN: usize = 16;
++
++pub struct System;
++#[cfg(any(windows, unix, target_os = "redox"))]
++mod realloc_fallback {
++ use core::alloc::{GlobalAlloc, Layout};
++ use core::cmp;
++ use core::ptr;
++ impl super::System {
++ pub(crate) unsafe fn realloc_fallback(&self, ptr: *mut u8, old_layout: Layout,
++ new_size: usize) -> *mut u8 {
++ // Docs for GlobalAlloc::realloc require this to be valid:
++ let new_layout = Layout::from_size_align_unchecked(new_size, old_layout.align());
++ let new_ptr = GlobalAlloc::alloc(self, new_layout);
++ if !new_ptr.is_null() {
++ let size = cmp::min(old_layout.size(), new_size);
++ ptr::copy_nonoverlapping(ptr, new_ptr, size);
++ GlobalAlloc::dealloc(self, ptr, old_layout);
++ }
++ new_ptr
++ }
++ }
++}
++#[cfg(any(unix, target_os = "redox"))]
++mod platform {
++ extern crate libc;
++ use core::ptr;
++ use MIN_ALIGN;
++ use System;
++ use core::alloc::{GlobalAlloc, Layout};
++ unsafe impl GlobalAlloc for System {
++ #[inline]
++ unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
++ if layout.align() <= MIN_ALIGN && layout.align() <= layout.size() {
++ libc::malloc(layout.size()) as *mut u8
++ } else {
++ #[cfg(target_os = "macos")]
++ {
++ if layout.align() > (1 << 31) {
++ return ptr::null_mut()
++ }
++ }
++ aligned_malloc(&layout)
++ }
++ }
++ #[inline]
++ unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
++ if layout.align() <= MIN_ALIGN && layout.align() <= layout.size() {
++ libc::calloc(layout.size(), 1) as *mut u8
++ } else {
++ let ptr = self.alloc(layout.clone());
++ if !ptr.is_null() {
++ ptr::write_bytes(ptr, 0, layout.size());
++ }
++ ptr
++ }
++ }
++ #[inline]
++ unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) {
++ libc::free(ptr as *mut libc::c_void)
++ }
++ #[inline]
++ unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
++ if layout.align() <= MIN_ALIGN && layout.align() <= new_size {
++ libc::realloc(ptr as *mut libc::c_void, new_size) as *mut u8
++ } else {
++ self.realloc_fallback(ptr, layout, new_size)
++ }
++ }
++ }
++ #[cfg(any(target_os = "android",
++ target_os = "hermit",
++ target_os = "redox",
++ target_os = "solaris"))]
++ #[inline]
++ unsafe fn aligned_malloc(layout: &Layout) -> *mut u8 {
++ // On android we currently target API level 9 which unfortunately
++ // doesn't have the `posix_memalign` API used below. Instead we use
++ // `memalign`, but this unfortunately has the property on some systems
++ // where the memory returned cannot be deallocated by `free`!
++ //
++ // Upon closer inspection, however, this appears to work just fine with
++ // Android, so for this platform we should be fine to call `memalign`
++ // (which is present in API level 9). Some helpful references could
++ // possibly be chromium using memalign [1], attempts at documenting that
++ // memalign + free is ok [2] [3], or the current source of chromium
++ // which still uses memalign on android [4].
++ //
++ // [1]: https://codereview.chromium.org/10796020/
++ // [2]: https://code.google.com/p/android/issues/detail?id=35391
++ // [3]: https://bugs.chromium.org/p/chromium/issues/detail?id=138579
++ // [4]: https://chromium.googlesource.com/chromium/src/base/+/master/
++ // /memory/aligned_memory.cc
++ libc::memalign(layout.align(), layout.size()) as *mut u8
++ }
++ #[cfg(not(any(target_os = "android",
++ target_os = "hermit",
++ target_os = "redox",
++ target_os = "solaris")))]
++ #[inline]
++ unsafe fn aligned_malloc(layout: &Layout) -> *mut u8 {
++ let mut out = ptr::null_mut();
++ let ret = libc::posix_memalign(&mut out, layout.align(), layout.size());
++ if ret != 0 {
++ ptr::null_mut()
++ } else {
++ out as *mut u8
++ }
++ }
++}
++#[cfg(windows)]
++#[allow(nonstandard_style)]
++mod platform {
++ use MIN_ALIGN;
++ use System;
++ use core::alloc::{GlobalAlloc, Layout};
++ type LPVOID = *mut u8;
++ type HANDLE = LPVOID;
++ type SIZE_T = usize;
++ type DWORD = u32;
++ type BOOL = i32;
++ extern "system" {
++ fn GetProcessHeap() -> HANDLE;
++ fn HeapAlloc(hHeap: HANDLE, dwFlags: DWORD, dwBytes: SIZE_T) -> LPVOID;
++ fn HeapReAlloc(hHeap: HANDLE, dwFlags: DWORD, lpMem: LPVOID, dwBytes: SIZE_T) -> LPVOID;
++ fn HeapFree(hHeap: HANDLE, dwFlags: DWORD, lpMem: LPVOID) -> BOOL;
++ fn GetLastError() -> DWORD;
++ }
++ #[repr(C)]
++ struct Header(*mut u8);
++ const HEAP_ZERO_MEMORY: DWORD = 0x00000008;
++ unsafe fn get_header<'a>(ptr: *mut u8) -> &'a mut Header {
++ &mut *(ptr as *mut Header).offset(-1)
++ }
++ unsafe fn align_ptr(ptr: *mut u8, align: usize) -> *mut u8 {
++ let aligned = ptr.add(align - (ptr as usize & (align - 1)));
++ *get_header(aligned) = Header(ptr);
++ aligned
++ }
++ #[inline]
++ unsafe fn allocate_with_flags(layout: Layout, flags: DWORD) -> *mut u8 {
++ let ptr = if layout.align() <= MIN_ALIGN {
++ HeapAlloc(GetProcessHeap(), flags, layout.size())
++ } else {
++ let size = layout.size() + layout.align();
++ let ptr = HeapAlloc(GetProcessHeap(), flags, size);
++ if ptr.is_null() {
++ ptr
++ } else {
++ align_ptr(ptr, layout.align())
++ }
++ };
++ ptr as *mut u8
++ }
++ unsafe impl GlobalAlloc for System {
++ #[inline]
++ unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
++ allocate_with_flags(layout, 0)
++ }
++ #[inline]
++ unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
++ allocate_with_flags(layout, HEAP_ZERO_MEMORY)
++ }
++ #[inline]
++ unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
++ if layout.align() <= MIN_ALIGN {
++ let err = HeapFree(GetProcessHeap(), 0, ptr as LPVOID);
++ debug_assert!(err != 0, "Failed to free heap memory: {}",
++ GetLastError());
++ } else {
++ let header = get_header(ptr);
++ let err = HeapFree(GetProcessHeap(), 0, header.0 as LPVOID);
++ debug_assert!(err != 0, "Failed to free heap memory: {}",
++ GetLastError());
++ }
++ }
++ #[inline]
++ unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
++ if layout.align() <= MIN_ALIGN {
++ HeapReAlloc(GetProcessHeap(), 0, ptr as LPVOID, new_size) as *mut u8
++ } else {
++ self.realloc_fallback(ptr, layout, new_size)
++ }
++ }
++ }
++}
--- /dev/null
--- /dev/null
++// Adapted from rustc run-pass test suite
++
++#![feature(arbitrary_self_types, unsize, coerce_unsized, dispatch_from_dyn)]
++#![feature(rustc_attrs)]
++
++use std::{
++ ops::{Deref, CoerceUnsized, DispatchFromDyn},
++ marker::Unsize,
++};
++
++struct Ptr<T: ?Sized>(Box<T>);
++
++impl<T: ?Sized> Deref for Ptr<T> {
++ type Target = T;
++
++ fn deref(&self) -> &T {
++ &*self.0
++ }
++}
++
++impl<T: Unsize<U> + ?Sized, U: ?Sized> CoerceUnsized<Ptr<U>> for Ptr<T> {}
++impl<T: Unsize<U> + ?Sized, U: ?Sized> DispatchFromDyn<Ptr<U>> for Ptr<T> {}
++
++struct Wrapper<T: ?Sized>(T);
++
++impl<T: ?Sized> Deref for Wrapper<T> {
++ type Target = T;
++
++ fn deref(&self) -> &T {
++ &self.0
++ }
++}
++
++impl<T: CoerceUnsized<U>, U> CoerceUnsized<Wrapper<U>> for Wrapper<T> {}
++impl<T: DispatchFromDyn<U>, U> DispatchFromDyn<Wrapper<U>> for Wrapper<T> {}
++
++
++trait Trait {
++ // This method isn't object-safe yet. Unsized by-value `self` is object-safe (but not callable
++ // without unsized_locals), but wrappers arond `Self` currently are not.
++ // FIXME (mikeyhew) uncomment this when unsized rvalues object-safety is implemented
++ // fn wrapper(self: Wrapper<Self>) -> i32;
++ fn ptr_wrapper(self: Ptr<Wrapper<Self>>) -> i32;
++ fn wrapper_ptr(self: Wrapper<Ptr<Self>>) -> i32;
++ fn wrapper_ptr_wrapper(self: Wrapper<Ptr<Wrapper<Self>>>) -> i32;
++}
++
++impl Trait for i32 {
++ fn ptr_wrapper(self: Ptr<Wrapper<Self>>) -> i32 {
++ **self
++ }
++ fn wrapper_ptr(self: Wrapper<Ptr<Self>>) -> i32 {
++ **self
++ }
++ fn wrapper_ptr_wrapper(self: Wrapper<Ptr<Wrapper<Self>>>) -> i32 {
++ ***self
++ }
++}
++
++fn main() {
++ let pw = Ptr(Box::new(Wrapper(5))) as Ptr<Wrapper<dyn Trait>>;
++ assert_eq!(pw.ptr_wrapper(), 5);
++
++ let wp = Wrapper(Ptr(Box::new(6))) as Wrapper<Ptr<dyn Trait>>;
++ assert_eq!(wp.wrapper_ptr(), 6);
++
++ let wpw = Wrapper(Ptr(Box::new(Wrapper(7)))) as Wrapper<Ptr<Wrapper<dyn Trait>>>;
++ assert_eq!(wpw.wrapper_ptr_wrapper(), 7);
++}
--- /dev/null
--- /dev/null
++// run-pass
++#![allow(dead_code)]
++struct Foo<T: ?Sized> {
++ a: u16,
++ b: T
++}
++
++trait Bar {
++ fn get(&self) -> usize;
++}
++
++impl Bar for usize {
++ fn get(&self) -> usize { *self }
++}
++
++struct Baz<T: ?Sized> {
++ a: T
++}
++
++struct HasDrop<T: ?Sized> {
++ ptr: Box<usize>,
++ data: T
++}
++
++fn main() {
++ // Test that zero-offset works properly
++ let b : Baz<usize> = Baz { a: 7 };
++ assert_eq!(b.a.get(), 7);
++ let b : &Baz<dyn Bar> = &b;
++ assert_eq!(b.a.get(), 7);
++
++ // Test that the field is aligned properly
++ let f : Foo<usize> = Foo { a: 0, b: 11 };
++ assert_eq!(f.b.get(), 11);
++ let ptr1 : *const u8 = &f.b as *const _ as *const u8;
++
++ let f : &Foo<dyn Bar> = &f;
++ let ptr2 : *const u8 = &f.b as *const _ as *const u8;
++ assert_eq!(f.b.get(), 11);
++
++ // The pointers should be the same
++ assert_eq!(ptr1, ptr2);
++
++ // Test that nested DSTs work properly
++ let f : Foo<Foo<usize>> = Foo { a: 0, b: Foo { a: 1, b: 17 }};
++ assert_eq!(f.b.b.get(), 17);
++ let f : &Foo<Foo<dyn Bar>> = &f;
++ assert_eq!(f.b.b.get(), 17);
++
++ // Test that get the pointer via destructuring works
++
++ let f : Foo<usize> = Foo { a: 0, b: 11 };
++ let f : &Foo<dyn Bar> = &f;
++ let &Foo { a: _, b: ref bar } = f;
++ assert_eq!(bar.get(), 11);
++
++ // Make sure that drop flags don't screw things up
++
++ let d : HasDrop<Baz<[i32; 4]>> = HasDrop {
++ ptr: Box::new(0),
++ data: Baz { a: [1,2,3,4] }
++ };
++ assert_eq!([1,2,3,4], d.data.a);
++
++ let d : &HasDrop<Baz<[i32]>> = &d;
++ assert_eq!(&[1,2,3,4], &d.data.a);
++}
--- /dev/null
--- /dev/null
++#![feature(no_core, unboxed_closures)]
++#![no_core]
++#![allow(dead_code)]
++
++extern crate mini_core;
++
++use mini_core::*;
++
++fn abc(a: u8) -> u8 {
++ a * 2
++}
++
++fn bcd(b: bool, a: u8) -> u8 {
++ if b {
++ a * 2
++ } else {
++ a * 3
++ }
++}
++
++fn call() {
++ abc(42);
++}
++
++fn indirect_call() {
++ let f: fn() = call;
++ f();
++}
++
++enum BoolOption {
++ Some(bool),
++ None,
++}
++
++fn option_unwrap_or(o: BoolOption, d: bool) -> bool {
++ match o {
++ BoolOption::Some(b) => b,
++ BoolOption::None => d,
++ }
++}
++
++fn ret_42() -> u8 {
++ 42
++}
++
++fn return_str() -> &'static str {
++ "hello world"
++}
++
++fn promoted_val() -> &'static u8 {
++ &(1 * 2)
++}
++
++fn cast_ref_to_raw_ptr(abc: &u8) -> *const u8 {
++ abc as *const u8
++}
++
++fn cmp_raw_ptr(a: *const u8, b: *const u8) -> bool {
++ a == b
++}
++
++fn int_cast(a: u16, b: i16) -> (u8, u16, u32, usize, i8, i16, i32, isize, u8, u32) {
++ (
++ a as u8, a as u16, a as u32, a as usize, a as i8, a as i16, a as i32, a as isize, b as u8,
++ b as u32,
++ )
++}
++
++fn char_cast(c: char) -> u8 {
++ c as u8
++}
++
++pub struct DebugTuple(());
++
++fn debug_tuple() -> DebugTuple {
++ DebugTuple(())
++}
++
++fn size_of<T>() -> usize {
++ intrinsics::size_of::<T>()
++}
++
++fn use_size_of() -> usize {
++ size_of::<u64>()
++}
++
++unsafe fn use_copy_intrinsic(src: *const u8, dst: *mut u8) {
++ intrinsics::copy::<u8>(src, dst, 1);
++}
++
++unsafe fn use_copy_intrinsic_ref(src: *const u8, dst: *mut u8) {
++ let copy2 = &intrinsics::copy::<u8>;
++ copy2(src, dst, 1);
++}
++
++const ABC: u8 = 6 * 7;
++
++fn use_const() -> u8 {
++ ABC
++}
++
++pub fn call_closure_3arg() {
++ (|_, _, _| {})(0u8, 42u16, 0u8)
++}
++
++pub fn call_closure_2arg() {
++ (|_, _| {})(0u8, 42u16)
++}
++
++struct IsNotEmpty;
++
++impl<'a, 'b> FnOnce<(&'a &'b [u16],)> for IsNotEmpty {
++ type Output = (u8, u8);
++
++ #[inline]
++ extern "rust-call" fn call_once(mut self, arg: (&'a &'b [u16],)) -> (u8, u8) {
++ self.call_mut(arg)
++ }
++}
++
++impl<'a, 'b> FnMut<(&'a &'b [u16],)> for IsNotEmpty {
++ #[inline]
++ extern "rust-call" fn call_mut(&mut self, _arg: (&'a &'b [u16],)) -> (u8, u8) {
++ (0, 42)
++ }
++}
++
++pub fn call_is_not_empty() {
++ IsNotEmpty.call_once((&(&[0u16] as &[_]),));
++}
++
++fn eq_char(a: char, b: char) -> bool {
++ a == b
++}
++
++unsafe fn transmute(c: char) -> u32 {
++ intrinsics::transmute(c)
++}
++
++unsafe fn deref_str_ptr(s: *const str) -> &'static str {
++ &*s
++}
++
++fn use_array(arr: [u8; 3]) -> u8 {
++ arr[1]
++}
++
++fn repeat_array() -> [u8; 3] {
++ [0; 3]
++}
++
++fn array_as_slice(arr: &[u8; 3]) -> &[u8] {
++ arr
++}
++
++unsafe fn use_ctlz_nonzero(a: u16) -> u16 {
++ intrinsics::ctlz_nonzero(a)
++}
++
++fn ptr_as_usize(ptr: *const u8) -> usize {
++ ptr as usize
++}
++
++fn float_cast(a: f32, b: f64) -> (f64, f32) {
++ (a as f64, b as f32)
++}
++
++fn int_to_float(a: u8, b: i32) -> (f64, f32) {
++ (a as f64, b as f32)
++}
++
++fn make_array() -> [u8; 3] {
++ [42, 0, 5]
++}
++
++fn some_promoted_tuple() -> &'static (&'static str, &'static str) {
++ &("abc", "some")
++}
++
++fn index_slice(s: &[u8]) -> u8 {
++ s[2]
++}
++
++pub struct StrWrapper {
++ s: str,
++}
++
++fn str_wrapper_get(w: &StrWrapper) -> &str {
++ &w.s
++}
++
++fn i16_as_i8(a: i16) -> i8 {
++ a as i8
++}
++
++struct Unsized(u8, str);
++
++fn get_sized_field_ref_from_unsized_type(u: &Unsized) -> &u8 {
++ &u.0
++}
++
++fn get_unsized_field_ref_from_unsized_type(u: &Unsized) -> &str {
++ &u.1
++}
++
++pub fn reuse_byref_argument_storage(a: (u8, u16, u32)) -> u8 {
++ a.0
++}
--- /dev/null
--- /dev/null
++#![feature(
++ no_core, lang_items, intrinsics, unboxed_closures, type_ascription, extern_types,
++ untagged_unions, decl_macro, rustc_attrs, transparent_unions, auto_traits,
++ thread_local
++)]
++#![no_core]
++#![allow(dead_code)]
++
++#[no_mangle]
++unsafe extern "C" fn _Unwind_Resume() {
++ intrinsics::unreachable();
++}
++
++#[lang = "sized"]
++pub trait Sized {}
++
++#[lang = "unsize"]
++pub trait Unsize<T: ?Sized> {}
++
++#[lang = "coerce_unsized"]
++pub trait CoerceUnsized<T> {}
++
++impl<'a, 'b: 'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a U> for &'b T {}
++impl<'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a mut U> for &'a mut T {}
++impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*const U> for *const T {}
++impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*mut U> for *mut T {}
++
++#[lang = "dispatch_from_dyn"]
++pub trait DispatchFromDyn<T> {}
++
++// &T -> &U
++impl<'a, T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<&'a U> for &'a T {}
++// &mut T -> &mut U
++impl<'a, T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<&'a mut U> for &'a mut T {}
++// *const T -> *const U
++impl<T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<*const U> for *const T {}
++// *mut T -> *mut U
++impl<T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<*mut U> for *mut T {}
++impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Box<U>> for Box<T> {}
++
++#[lang = "receiver"]
++pub trait Receiver {}
++
++impl<T: ?Sized> Receiver for &T {}
++impl<T: ?Sized> Receiver for &mut T {}
++impl<T: ?Sized> Receiver for Box<T> {}
++
++#[lang = "copy"]
++pub unsafe trait Copy {}
++
++unsafe impl Copy for bool {}
++unsafe impl Copy for u8 {}
++unsafe impl Copy for u16 {}
++unsafe impl Copy for u32 {}
++unsafe impl Copy for u64 {}
++unsafe impl Copy for usize {}
++unsafe impl Copy for i8 {}
++unsafe impl Copy for i16 {}
++unsafe impl Copy for i32 {}
++unsafe impl Copy for isize {}
++unsafe impl Copy for f32 {}
++unsafe impl Copy for char {}
++unsafe impl<'a, T: ?Sized> Copy for &'a T {}
++unsafe impl<T: ?Sized> Copy for *const T {}
++unsafe impl<T: ?Sized> Copy for *mut T {}
++
++#[lang = "sync"]
++pub unsafe trait Sync {}
++
++unsafe impl Sync for bool {}
++unsafe impl Sync for u8 {}
++unsafe impl Sync for u16 {}
++unsafe impl Sync for u32 {}
++unsafe impl Sync for u64 {}
++unsafe impl Sync for usize {}
++unsafe impl Sync for i8 {}
++unsafe impl Sync for i16 {}
++unsafe impl Sync for i32 {}
++unsafe impl Sync for isize {}
++unsafe impl Sync for char {}
++unsafe impl<'a, T: ?Sized> Sync for &'a T {}
++unsafe impl Sync for [u8; 16] {}
++
++#[lang = "freeze"]
++unsafe auto trait Freeze {}
++
++unsafe impl<T: ?Sized> Freeze for PhantomData<T> {}
++unsafe impl<T: ?Sized> Freeze for *const T {}
++unsafe impl<T: ?Sized> Freeze for *mut T {}
++unsafe impl<T: ?Sized> Freeze for &T {}
++unsafe impl<T: ?Sized> Freeze for &mut T {}
++
++#[lang = "structural_peq"]
++pub trait StructuralPartialEq {}
++
++#[lang = "structural_teq"]
++pub trait StructuralEq {}
++
++#[lang = "not"]
++pub trait Not {
++ type Output;
++
++ fn not(self) -> Self::Output;
++}
++
++impl Not for bool {
++ type Output = bool;
++
++ fn not(self) -> bool {
++ !self
++ }
++}
++
++#[lang = "mul"]
++pub trait Mul<RHS = Self> {
++ type Output;
++
++ #[must_use]
++ fn mul(self, rhs: RHS) -> Self::Output;
++}
++
++impl Mul for u8 {
++ type Output = Self;
++
++ fn mul(self, rhs: Self) -> Self::Output {
++ self * rhs
++ }
++}
++
++impl Mul for usize {
++ type Output = Self;
++
++ fn mul(self, rhs: Self) -> Self::Output {
++ self * rhs
++ }
++}
++
++#[lang = "add"]
++pub trait Add<RHS = Self> {
++ type Output;
++
++ fn add(self, rhs: RHS) -> Self::Output;
++}
++
++impl Add for u8 {
++ type Output = Self;
++
++ fn add(self, rhs: Self) -> Self {
++ self + rhs
++ }
++}
++
++impl Add for i8 {
++ type Output = Self;
++
++ fn add(self, rhs: Self) -> Self {
++ self + rhs
++ }
++}
++
++impl Add for usize {
++ type Output = Self;
++
++ fn add(self, rhs: Self) -> Self {
++ self + rhs
++ }
++}
++
++#[lang = "sub"]
++pub trait Sub<RHS = Self> {
++ type Output;
++
++ fn sub(self, rhs: RHS) -> Self::Output;
++}
++
++impl Sub for usize {
++ type Output = Self;
++
++ fn sub(self, rhs: Self) -> Self {
++ self - rhs
++ }
++}
++
++impl Sub for u8 {
++ type Output = Self;
++
++ fn sub(self, rhs: Self) -> Self {
++ self - rhs
++ }
++}
++
++impl Sub for i8 {
++ type Output = Self;
++
++ fn sub(self, rhs: Self) -> Self {
++ self - rhs
++ }
++}
++
++impl Sub for i16 {
++ type Output = Self;
++
++ fn sub(self, rhs: Self) -> Self {
++ self - rhs
++ }
++}
++
++#[lang = "rem"]
++pub trait Rem<RHS = Self> {
++ type Output;
++
++ fn rem(self, rhs: RHS) -> Self::Output;
++}
++
++impl Rem for usize {
++ type Output = Self;
++
++ fn rem(self, rhs: Self) -> Self {
++ self % rhs
++ }
++}
++
++#[lang = "bitor"]
++pub trait BitOr<RHS = Self> {
++ type Output;
++
++ #[must_use]
++ fn bitor(self, rhs: RHS) -> Self::Output;
++}
++
++impl BitOr for bool {
++ type Output = bool;
++
++ fn bitor(self, rhs: bool) -> bool {
++ self | rhs
++ }
++}
++
++impl<'a> BitOr<bool> for &'a bool {
++ type Output = bool;
++
++ fn bitor(self, rhs: bool) -> bool {
++ *self | rhs
++ }
++}
++
++#[lang = "eq"]
++pub trait PartialEq<Rhs: ?Sized = Self> {
++ fn eq(&self, other: &Rhs) -> bool;
++ fn ne(&self, other: &Rhs) -> bool;
++}
++
++impl PartialEq for u8 {
++ fn eq(&self, other: &u8) -> bool {
++ (*self) == (*other)
++ }
++ fn ne(&self, other: &u8) -> bool {
++ (*self) != (*other)
++ }
++}
++
++impl PartialEq for u16 {
++ fn eq(&self, other: &u16) -> bool {
++ (*self) == (*other)
++ }
++ fn ne(&self, other: &u16) -> bool {
++ (*self) != (*other)
++ }
++}
++
++impl PartialEq for u32 {
++ fn eq(&self, other: &u32) -> bool {
++ (*self) == (*other)
++ }
++ fn ne(&self, other: &u32) -> bool {
++ (*self) != (*other)
++ }
++}
++
++
++impl PartialEq for u64 {
++ fn eq(&self, other: &u64) -> bool {
++ (*self) == (*other)
++ }
++ fn ne(&self, other: &u64) -> bool {
++ (*self) != (*other)
++ }
++}
++
++impl PartialEq for usize {
++ fn eq(&self, other: &usize) -> bool {
++ (*self) == (*other)
++ }
++ fn ne(&self, other: &usize) -> bool {
++ (*self) != (*other)
++ }
++}
++
++impl PartialEq for i8 {
++ fn eq(&self, other: &i8) -> bool {
++ (*self) == (*other)
++ }
++ fn ne(&self, other: &i8) -> bool {
++ (*self) != (*other)
++ }
++}
++
++impl PartialEq for i32 {
++ fn eq(&self, other: &i32) -> bool {
++ (*self) == (*other)
++ }
++ fn ne(&self, other: &i32) -> bool {
++ (*self) != (*other)
++ }
++}
++
++impl PartialEq for isize {
++ fn eq(&self, other: &isize) -> bool {
++ (*self) == (*other)
++ }
++ fn ne(&self, other: &isize) -> bool {
++ (*self) != (*other)
++ }
++}
++
++impl PartialEq for char {
++ fn eq(&self, other: &char) -> bool {
++ (*self) == (*other)
++ }
++ fn ne(&self, other: &char) -> bool {
++ (*self) != (*other)
++ }
++}
++
++impl<T: ?Sized> PartialEq for *const T {
++ fn eq(&self, other: &*const T) -> bool {
++ *self == *other
++ }
++ fn ne(&self, other: &*const T) -> bool {
++ *self != *other
++ }
++}
++
++#[lang = "neg"]
++pub trait Neg {
++ type Output;
++
++ fn neg(self) -> Self::Output;
++}
++
++impl Neg for i8 {
++ type Output = i8;
++
++ fn neg(self) -> i8 {
++ -self
++ }
++}
++
++impl Neg for i16 {
++ type Output = i16;
++
++ fn neg(self) -> i16 {
++ self
++ }
++}
++
++impl Neg for isize {
++ type Output = isize;
++
++ fn neg(self) -> isize {
++ -self
++ }
++}
++
++impl Neg for f32 {
++ type Output = f32;
++
++ fn neg(self) -> f32 {
++ -self
++ }
++}
++
++pub enum Option<T> {
++ Some(T),
++ None,
++}
++
++pub use Option::*;
++
++#[lang = "phantom_data"]
++pub struct PhantomData<T: ?Sized>;
++
++#[lang = "fn_once"]
++#[rustc_paren_sugar]
++pub trait FnOnce<Args> {
++ #[lang = "fn_once_output"]
++ type Output;
++
++ extern "rust-call" fn call_once(self, args: Args) -> Self::Output;
++}
++
++#[lang = "fn_mut"]
++#[rustc_paren_sugar]
++pub trait FnMut<Args>: FnOnce<Args> {
++ extern "rust-call" fn call_mut(&mut self, args: Args) -> Self::Output;
++}
++
++#[lang = "panic"]
++#[track_caller]
++pub fn panic(_msg: &str) -> ! {
++ unsafe {
++ libc::puts("Panicking\n\0" as *const str as *const u8);
++ intrinsics::abort();
++ }
++}
++
++#[lang = "panic_bounds_check"]
++#[track_caller]
++fn panic_bounds_check(index: usize, len: usize) -> ! {
++ unsafe {
++ libc::printf("index out of bounds: the len is %d but the index is %d\n\0" as *const str as *const i8, len, index);
++ intrinsics::abort();
++ }
++}
++
++#[lang = "eh_personality"]
++fn eh_personality() -> ! {
++ loop {}
++}
++
++#[lang = "drop_in_place"]
++#[allow(unconditional_recursion)]
++pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
++ // Code here does not matter - this is replaced by the
++ // real drop glue by the compiler.
++ drop_in_place(to_drop);
++}
++
++#[lang = "deref"]
++pub trait Deref {
++ type Target: ?Sized;
++
++ fn deref(&self) -> &Self::Target;
++}
++
++#[lang = "owned_box"]
++pub struct Box<T: ?Sized>(*mut T);
++
++impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Box<U>> for Box<T> {}
++
++impl<T: ?Sized> Drop for Box<T> {
++ fn drop(&mut self) {
++ // drop is currently performed by compiler.
++ }
++}
++
++impl<T> Deref for Box<T> {
++ type Target = T;
++
++ fn deref(&self) -> &Self::Target {
++ &**self
++ }
++}
++
++#[lang = "exchange_malloc"]
++unsafe fn allocate(size: usize, _align: usize) -> *mut u8 {
++ libc::malloc(size)
++}
++
++#[lang = "box_free"]
++unsafe fn box_free<T: ?Sized>(ptr: *mut T) {
++ libc::free(ptr as *mut u8);
++}
++
++#[lang = "drop"]
++pub trait Drop {
++ fn drop(&mut self);
++}
++
++#[lang = "manually_drop"]
++#[repr(transparent)]
++pub struct ManuallyDrop<T: ?Sized> {
++ pub value: T,
++}
++
++#[lang = "maybe_uninit"]
++#[repr(transparent)]
++pub union MaybeUninit<T> {
++ pub uninit: (),
++ pub value: ManuallyDrop<T>,
++}
++
++pub mod intrinsics {
++ extern "rust-intrinsic" {
++ pub fn abort() -> !;
++ pub fn size_of<T>() -> usize;
++ pub fn size_of_val<T: ?::Sized>(val: *const T) -> usize;
++ pub fn min_align_of<T>() -> usize;
++ pub fn min_align_of_val<T: ?::Sized>(val: *const T) -> usize;
++ pub fn copy<T>(src: *const T, dst: *mut T, count: usize);
++ pub fn transmute<T, U>(e: T) -> U;
++ pub fn ctlz_nonzero<T>(x: T) -> T;
++ pub fn needs_drop<T>() -> bool;
++ pub fn bitreverse<T>(x: T) -> T;
++ pub fn bswap<T>(x: T) -> T;
++ pub fn write_bytes<T>(dst: *mut T, val: u8, count: usize);
++ pub fn unreachable() -> !;
++ }
++}
++
++pub mod libc {
++ #[link(name = "c")]
++ extern "C" {
++ pub fn puts(s: *const u8) -> i32;
++ pub fn printf(format: *const i8, ...) -> i32;
++ pub fn malloc(size: usize) -> *mut u8;
++ pub fn free(ptr: *mut u8);
++ pub fn memcpy(dst: *mut u8, src: *const u8, size: usize);
++ pub fn memmove(dst: *mut u8, src: *const u8, size: usize);
++ pub fn strncpy(dst: *mut u8, src: *const u8, size: usize);
++ }
++}
++
++#[lang = "index"]
++pub trait Index<Idx: ?Sized> {
++ type Output: ?Sized;
++ fn index(&self, index: Idx) -> &Self::Output;
++}
++
++impl<T> Index<usize> for [T; 3] {
++ type Output = T;
++
++ fn index(&self, index: usize) -> &Self::Output {
++ &self[index]
++ }
++}
++
++impl<T> Index<usize> for [T] {
++ type Output = T;
++
++ fn index(&self, index: usize) -> &Self::Output {
++ &self[index]
++ }
++}
++
++extern {
++ type VaListImpl;
++}
++
++#[lang = "va_list"]
++#[repr(transparent)]
++pub struct VaList<'a>(&'a mut VaListImpl);
++
++#[rustc_builtin_macro]
++#[rustc_macro_transparency = "semitransparent"]
++pub macro stringify($($t:tt)*) { /* compiler built-in */ }
++
++#[rustc_builtin_macro]
++#[rustc_macro_transparency = "semitransparent"]
++pub macro file() { /* compiler built-in */ }
++
++#[rustc_builtin_macro]
++#[rustc_macro_transparency = "semitransparent"]
++pub macro line() { /* compiler built-in */ }
++
++#[rustc_builtin_macro]
++#[rustc_macro_transparency = "semitransparent"]
++pub macro cfg() { /* compiler built-in */ }
++
++pub static A_STATIC: u8 = 42;
++
++#[lang = "panic_location"]
++struct PanicLocation {
++ file: &'static str,
++ line: u32,
++ column: u32,
++}
++
++#[no_mangle]
++pub fn get_tls() -> u8 {
++ #[thread_local]
++ static A: u8 = 42;
++
++ A
++}
--- /dev/null
--- /dev/null
++// Adapted from https://github.com/sunfishcode/mir2cranelift/blob/master/rust-examples/nocore-hello-world.rs
++
++#![feature(
++ no_core, unboxed_closures, start, lang_items, box_syntax, never_type, linkage,
++ extern_types, thread_local
++)]
++#![no_core]
++#![allow(dead_code, non_camel_case_types)]
++
++extern crate mini_core;
++
++use mini_core::*;
++use mini_core::libc::*;
++
++unsafe extern "C" fn my_puts(s: *const u8) {
++ puts(s);
++}
++
++#[lang = "termination"]
++trait Termination {
++ fn report(self) -> i32;
++}
++
++impl Termination for () {
++ fn report(self) -> i32 {
++ unsafe {
++ NUM = 6 * 7 + 1 + (1u8 == 1u8) as u8; // 44
++ *NUM_REF as i32
++ }
++ }
++}
++
++trait SomeTrait {
++ fn object_safe(&self);
++}
++
++impl SomeTrait for &'static str {
++ fn object_safe(&self) {
++ unsafe {
++ puts(*self as *const str as *const u8);
++ }
++ }
++}
++
++struct NoisyDrop {
++ text: &'static str,
++ inner: NoisyDropInner,
++}
++
++struct NoisyDropInner;
++
++impl Drop for NoisyDrop {
++ fn drop(&mut self) {
++ unsafe {
++ puts(self.text as *const str as *const u8);
++ }
++ }
++}
++
++impl Drop for NoisyDropInner {
++ fn drop(&mut self) {
++ unsafe {
++ puts("Inner got dropped!\0" as *const str as *const u8);
++ }
++ }
++}
++
++impl SomeTrait for NoisyDrop {
++ fn object_safe(&self) {}
++}
++
++enum Ordering {
++ Less = -1,
++ Equal = 0,
++ Greater = 1,
++}
++
++#[lang = "start"]
++fn start<T: Termination + 'static>(
++ main: fn() -> T,
++ argc: isize,
++ argv: *const *const u8,
++) -> isize {
++ if argc == 3 {
++ unsafe { puts(*argv); }
++ unsafe { puts(*((argv as usize + intrinsics::size_of::<*const u8>()) as *const *const u8)); }
++ unsafe { puts(*((argv as usize + 2 * intrinsics::size_of::<*const u8>()) as *const *const u8)); }
++ }
++
++ main().report();
++ 0
++}
++
++static mut NUM: u8 = 6 * 7;
++static NUM_REF: &'static u8 = unsafe { &NUM };
++
++macro_rules! assert {
++ ($e:expr) => {
++ if !$e {
++ panic(stringify!(! $e));
++ }
++ };
++}
++
++macro_rules! assert_eq {
++ ($l:expr, $r: expr) => {
++ if $l != $r {
++ panic(stringify!($l != $r));
++ }
++ }
++}
++
++struct Unique<T: ?Sized> {
++ pointer: *const T,
++ _marker: PhantomData<T>,
++}
++
++impl<T: ?Sized, U: ?Sized> CoerceUnsized<Unique<U>> for Unique<T> where T: Unsize<U> {}
++
++unsafe fn zeroed<T>() -> T {
++ let mut uninit = MaybeUninit { uninit: () };
++ intrinsics::write_bytes(&mut uninit.value.value as *mut T, 0, 1);
++ uninit.value.value
++}
++
++fn take_f32(_f: f32) {}
++fn take_unique(_u: Unique<()>) {}
++
++fn return_u128_pair() -> (u128, u128) {
++ (0, 0)
++}
++
++fn call_return_u128_pair() {
++ return_u128_pair();
++}
++
++fn main() {
++ take_unique(Unique {
++ pointer: 0 as *const (),
++ _marker: PhantomData,
++ });
++ take_f32(0.1);
++
++ //call_return_u128_pair();
++
++ let slice = &[0, 1] as &[i32];
++ let slice_ptr = slice as *const [i32] as *const i32;
++
++ assert_eq!(slice_ptr as usize % 4, 0);
++
++ //return;
++
++ unsafe {
++ printf("Hello %s\n\0" as *const str as *const i8, "printf\0" as *const str as *const i8);
++
++ let hello: &[u8] = b"Hello\0" as &[u8; 6];
++ let ptr: *const u8 = hello as *const [u8] as *const u8;
++ puts(ptr);
++
++ let world: Box<&str> = box "World!\0";
++ puts(*world as *const str as *const u8);
++ world as Box<dyn SomeTrait>;
++
++ assert_eq!(intrinsics::bitreverse(0b10101000u8), 0b00010101u8);
++
++ assert_eq!(intrinsics::bswap(0xabu8), 0xabu8);
++ assert_eq!(intrinsics::bswap(0xddccu16), 0xccddu16);
++ assert_eq!(intrinsics::bswap(0xffee_ddccu32), 0xccdd_eeffu32);
++ assert_eq!(intrinsics::bswap(0x1234_5678_ffee_ddccu64), 0xccdd_eeff_7856_3412u64);
++
++ assert_eq!(intrinsics::size_of_val(hello) as u8, 6);
++
++ let chars = &['C', 'h', 'a', 'r', 's'];
++ let chars = chars as &[char];
++ assert_eq!(intrinsics::size_of_val(chars) as u8, 4 * 5);
++
++ let a: &dyn SomeTrait = &"abc\0";
++ a.object_safe();
++
++ assert_eq!(intrinsics::size_of_val(a) as u8, 16);
++ assert_eq!(intrinsics::size_of_val(&0u32) as u8, 4);
++
++ assert_eq!(intrinsics::min_align_of::<u16>() as u8, 2);
++ assert_eq!(intrinsics::min_align_of_val(&a) as u8, intrinsics::min_align_of::<&str>() as u8);
++
++ assert!(!intrinsics::needs_drop::<u8>());
++ assert!(intrinsics::needs_drop::<NoisyDrop>());
++
++ Unique {
++ pointer: 0 as *const &str,
++ _marker: PhantomData,
++ } as Unique<dyn SomeTrait>;
++
++ struct MyDst<T: ?Sized>(T);
++
++ intrinsics::size_of_val(&MyDst([0u8; 4]) as &MyDst<[u8]>);
++
++ struct Foo {
++ x: u8,
++ y: !,
++ }
++
++ unsafe fn uninitialized<T>() -> T {
++ MaybeUninit { uninit: () }.value.value
++ }
++
++ zeroed::<(u8, u8)>();
++ #[allow(unreachable_code)]
++ {
++ if false {
++ zeroed::<!>();
++ zeroed::<Foo>();
++ uninitialized::<Foo>();
++ }
++ }
++ }
++
++ let _ = box NoisyDrop {
++ text: "Boxed outer got dropped!\0",
++ inner: NoisyDropInner,
++ } as Box<dyn SomeTrait>;
++
++ const FUNC_REF: Option<fn()> = Some(main);
++ match FUNC_REF {
++ Some(_) => {},
++ None => assert!(false),
++ }
++
++ match Ordering::Less {
++ Ordering::Less => {},
++ _ => assert!(false),
++ }
++
++ [NoisyDropInner, NoisyDropInner];
++
++ let x = &[0u32, 42u32] as &[u32];
++ match x {
++ [] => assert_eq!(0u32, 1),
++ [_, ref y @ ..] => assert_eq!(&x[1] as *const u32 as usize, &y[0] as *const u32 as usize),
++ }
++
++ assert_eq!(((|()| 42u8) as fn(()) -> u8)(()), 42);
++
++ extern {
++ #[linkage = "weak"]
++ static ABC: *const u8;
++ }
++
++ {
++ extern {
++ #[linkage = "weak"]
++ static ABC: *const u8;
++ }
++ }
++
++ // TODO: not sure about this assert. ABC is not defined, so should it be really 0?
++ //unsafe { assert_eq!(ABC as usize, 0); }
++
++ &mut (|| Some(0 as *const ())) as &mut dyn FnMut() -> Option<*const ()>;
++
++ let f = 1000.0;
++ assert_eq!(f as u8, 255);
++ let f2 = -1000.0;
++ assert_eq!(f2 as i8, -128);
++ assert_eq!(f2 as u8, 0);
++
++ static ANOTHER_STATIC: &u8 = &A_STATIC;
++ assert_eq!(*ANOTHER_STATIC, 42);
++
++ check_niche_behavior();
++
++ extern "C" {
++ type ExternType;
++ }
++
++ struct ExternTypeWrapper {
++ _a: ExternType,
++ }
++
++ let nullptr = 0 as *const ();
++ let extern_nullptr = nullptr as *const ExternTypeWrapper;
++ extern_nullptr as *const ();
++ let slice_ptr = &[] as *const [u8];
++ slice_ptr as *const u8;
++
++ #[cfg(not(jit))]
++ test_tls();
++}
++
++#[repr(C)]
++enum c_void {
++ _1,
++ _2,
++}
++
++type c_int = i32;
++type c_ulong = u64;
++
++type pthread_t = c_ulong;
++
++#[repr(C)]
++struct pthread_attr_t {
++ __size: [u64; 7],
++}
++
++#[link(name = "pthread")]
++extern "C" {
++ fn pthread_attr_init(attr: *mut pthread_attr_t) -> c_int;
++
++ fn pthread_create(
++ native: *mut pthread_t,
++ attr: *const pthread_attr_t,
++ f: extern "C" fn(_: *mut c_void) -> *mut c_void,
++ value: *mut c_void
++ ) -> c_int;
++
++ fn pthread_join(
++ native: pthread_t,
++ value: *mut *mut c_void
++ ) -> c_int;
++}
++
++#[thread_local]
++#[cfg(not(jit))]
++static mut TLS: u8 = 42;
++
++#[cfg(not(jit))]
++extern "C" fn mutate_tls(_: *mut c_void) -> *mut c_void {
++ unsafe { TLS = 0; }
++ 0 as *mut c_void
++}
++
++#[cfg(not(jit))]
++fn test_tls() {
++ unsafe {
++ let mut attr: pthread_attr_t = zeroed();
++ let mut thread: pthread_t = 0;
++
++ assert_eq!(TLS, 42);
++
++ if pthread_attr_init(&mut attr) != 0 {
++ assert!(false);
++ }
++
++ if pthread_create(&mut thread, &attr, mutate_tls, 0 as *mut c_void) != 0 {
++ assert!(false);
++ }
++
++ let mut res = 0 as *mut c_void;
++ pthread_join(thread, &mut res);
++
++ // TLS of main thread must not have been changed by the other thread.
++ assert_eq!(TLS, 42);
++
++ puts("TLS works!\n\0" as *const str as *const u8);
++ }
++}
++
++// Copied ui/issues/issue-61696.rs
++
++pub enum Infallible {}
++
++// The check that the `bool` field of `V1` is encoding a "niche variant"
++// (i.e. not `V1`, so `V3` or `V4`) used to be mathematically incorrect,
++// causing valid `V1` values to be interpreted as other variants.
++pub enum E1 {
++ V1 { f: bool },
++ V2 { f: Infallible },
++ V3,
++ V4,
++}
++
++// Computing the discriminant used to be done using the niche type (here `u8`,
++// from the `bool` field of `V1`), overflowing for variants with large enough
++// indices (`V3` and `V4`), causing them to be interpreted as other variants.
++pub enum E2<X> {
++ V1 { f: bool },
++
++ /*_00*/ _01(X), _02(X), _03(X), _04(X), _05(X), _06(X), _07(X),
++ _08(X), _09(X), _0A(X), _0B(X), _0C(X), _0D(X), _0E(X), _0F(X),
++ _10(X), _11(X), _12(X), _13(X), _14(X), _15(X), _16(X), _17(X),
++ _18(X), _19(X), _1A(X), _1B(X), _1C(X), _1D(X), _1E(X), _1F(X),
++ _20(X), _21(X), _22(X), _23(X), _24(X), _25(X), _26(X), _27(X),
++ _28(X), _29(X), _2A(X), _2B(X), _2C(X), _2D(X), _2E(X), _2F(X),
++ _30(X), _31(X), _32(X), _33(X), _34(X), _35(X), _36(X), _37(X),
++ _38(X), _39(X), _3A(X), _3B(X), _3C(X), _3D(X), _3E(X), _3F(X),
++ _40(X), _41(X), _42(X), _43(X), _44(X), _45(X), _46(X), _47(X),
++ _48(X), _49(X), _4A(X), _4B(X), _4C(X), _4D(X), _4E(X), _4F(X),
++ _50(X), _51(X), _52(X), _53(X), _54(X), _55(X), _56(X), _57(X),
++ _58(X), _59(X), _5A(X), _5B(X), _5C(X), _5D(X), _5E(X), _5F(X),
++ _60(X), _61(X), _62(X), _63(X), _64(X), _65(X), _66(X), _67(X),
++ _68(X), _69(X), _6A(X), _6B(X), _6C(X), _6D(X), _6E(X), _6F(X),
++ _70(X), _71(X), _72(X), _73(X), _74(X), _75(X), _76(X), _77(X),
++ _78(X), _79(X), _7A(X), _7B(X), _7C(X), _7D(X), _7E(X), _7F(X),
++ _80(X), _81(X), _82(X), _83(X), _84(X), _85(X), _86(X), _87(X),
++ _88(X), _89(X), _8A(X), _8B(X), _8C(X), _8D(X), _8E(X), _8F(X),
++ _90(X), _91(X), _92(X), _93(X), _94(X), _95(X), _96(X), _97(X),
++ _98(X), _99(X), _9A(X), _9B(X), _9C(X), _9D(X), _9E(X), _9F(X),
++ _A0(X), _A1(X), _A2(X), _A3(X), _A4(X), _A5(X), _A6(X), _A7(X),
++ _A8(X), _A9(X), _AA(X), _AB(X), _AC(X), _AD(X), _AE(X), _AF(X),
++ _B0(X), _B1(X), _B2(X), _B3(X), _B4(X), _B5(X), _B6(X), _B7(X),
++ _B8(X), _B9(X), _BA(X), _BB(X), _BC(X), _BD(X), _BE(X), _BF(X),
++ _C0(X), _C1(X), _C2(X), _C3(X), _C4(X), _C5(X), _C6(X), _C7(X),
++ _C8(X), _C9(X), _CA(X), _CB(X), _CC(X), _CD(X), _CE(X), _CF(X),
++ _D0(X), _D1(X), _D2(X), _D3(X), _D4(X), _D5(X), _D6(X), _D7(X),
++ _D8(X), _D9(X), _DA(X), _DB(X), _DC(X), _DD(X), _DE(X), _DF(X),
++ _E0(X), _E1(X), _E2(X), _E3(X), _E4(X), _E5(X), _E6(X), _E7(X),
++ _E8(X), _E9(X), _EA(X), _EB(X), _EC(X), _ED(X), _EE(X), _EF(X),
++ _F0(X), _F1(X), _F2(X), _F3(X), _F4(X), _F5(X), _F6(X), _F7(X),
++ _F8(X), _F9(X), _FA(X), _FB(X), _FC(X), _FD(X), _FE(X), _FF(X),
++
++ V3,
++ V4,
++}
++
++fn check_niche_behavior () {
++ if let E1::V2 { .. } = (E1::V1 { f: true }) {
++ intrinsics::abort();
++ }
++
++ if let E2::V1 { .. } = E2::V3::<Infallible> {
++ intrinsics::abort();
++ }
++}
--- /dev/null
--- /dev/null
++#![feature(start, box_syntax, core_intrinsics, lang_items)]
++#![no_std]
++
++#[link(name = "c")]
++extern {}
++
++#[panic_handler]
++fn panic_handler(_: &core::panic::PanicInfo) -> ! {
++ unsafe {
++ core::intrinsics::abort();
++ }
++}
++
++#[lang="eh_personality"]
++fn eh_personality(){}
++
++// Required for rustc_codegen_llvm
++#[no_mangle]
++unsafe extern "C" fn _Unwind_Resume() {
++ core::intrinsics::unreachable();
++}
++
++#[start]
++fn main(_argc: isize, _argv: *const *const u8) -> isize {
++ for i in 2..100_000_000 {
++ black_box((i + 1) % i);
++ }
++
++ 0
++}
++
++#[inline(never)]
++fn black_box(i: u32) {
++ if i != 1 {
++ unsafe { core::intrinsics::abort(); }
++ }
++}
--- /dev/null
--- /dev/null
++#![feature(core_intrinsics, generators, generator_trait, is_sorted)]
++
++use std::arch::x86_64::*;
++use std::io::Write;
++use std::ops::Generator;
++
++extern {
++ pub fn printf(format: *const i8, ...) -> i32;
++}
++
++fn main() {
++ let mutex = std::sync::Mutex::new(());
++ let _guard = mutex.lock().unwrap();
++
++ let _ = ::std::iter::repeat('a' as u8).take(10).collect::<Vec<_>>();
++ let stderr = ::std::io::stderr();
++ let mut stderr = stderr.lock();
++
++ // FIXME: this thread panics.
++ std::thread::spawn(move || {
++ println!("Hello from another thread!");
++ });
++
++ writeln!(stderr, "some {} text", "<unknown>").unwrap();
++
++ let _ = std::process::Command::new("true").env("c", "d").spawn();
++
++ println!("cargo:rustc-link-lib=z");
++
++ static ONCE: std::sync::Once = std::sync::Once::new();
++ ONCE.call_once(|| {});
++
++ let _eq = LoopState::Continue(()) == LoopState::Break(());
++
++ // Make sure ByValPair values with differently sized components are correctly passed
++ map(None::<(u8, Box<Instruction>)>);
++
++ println!("{}", 2.3f32.exp());
++ println!("{}", 2.3f32.exp2());
++ println!("{}", 2.3f32.abs());
++ println!("{}", 2.3f32.sqrt());
++ println!("{}", 2.3f32.floor());
++ println!("{}", 2.3f32.ceil());
++ println!("{}", 2.3f32.min(1.0));
++ println!("{}", 2.3f32.max(1.0));
++ println!("{}", 2.3f32.powi(2));
++ println!("{}", 2.3f32.log2());
++ assert_eq!(2.3f32.copysign(-1.0), -2.3f32);
++ println!("{}", 2.3f32.powf(2.0));
++
++ assert_eq!(-128i8, (-128i8).saturating_sub(1));
++ assert_eq!(127i8, 127i8.saturating_sub(-128));
++ assert_eq!(-128i8, (-128i8).saturating_add(-128));
++ assert_eq!(127i8, 127i8.saturating_add(1));
++
++ assert_eq!(-32768i16, (-32768i16).saturating_add(-32768));
++ assert_eq!(32767i16, 32767i16.saturating_add(1));
++
++ /*assert_eq!(0b0000000000000000000000000010000010000000000000000000000000000000_0000000000100000000000000000000000001000000000000100000000000000u128.leading_zeros(), 26);
++ assert_eq!(0b0000000000000000000000000010000000000000000000000000000000000000_0000000000000000000000000000000000001000000000000000000010000000u128.trailing_zeros(), 7);
++
++ let _d = 0i128.checked_div(2i128);
++ let _d = 0u128.checked_div(2u128);
++ assert_eq!(1u128 + 2, 3);
++
++ assert_eq!(0b100010000000000000000000000000000u128 >> 10, 0b10001000000000000000000u128);
++ assert_eq!(0xFEDCBA987654321123456789ABCDEFu128 >> 64, 0xFEDCBA98765432u128);
++ assert_eq!(0xFEDCBA987654321123456789ABCDEFu128 as i128 >> 64, 0xFEDCBA98765432i128);
++
++ let tmp = 353985398u128;
++ assert_eq!(tmp * 932490u128, 330087843781020u128);
++
++ let tmp = -0x1234_5678_9ABC_DEF0i64;
++ assert_eq!(tmp as i128, -0x1234_5678_9ABC_DEF0i128);
++
++ // Check that all u/i128 <-> float casts work correctly.
++ let houndred_u128 = 100u128;
++ let houndred_i128 = 100i128;
++ let houndred_f32 = 100.0f32;
++ let houndred_f64 = 100.0f64;
++ assert_eq!(houndred_u128 as f32, 100.0);
++ assert_eq!(houndred_u128 as f64, 100.0);
++ assert_eq!(houndred_f32 as u128, 100);
++ assert_eq!(houndred_f64 as u128, 100);
++ assert_eq!(houndred_i128 as f32, 100.0);
++ assert_eq!(houndred_i128 as f64, 100.0);
++ assert_eq!(houndred_f32 as i128, 100);
++ assert_eq!(houndred_f64 as i128, 100);*/
++
++ let _a = 1u32 << 2u8;
++
++ let empty: [i32; 0] = [];
++ assert!(empty.is_sorted());
++
++ println!("{:?}", std::intrinsics::caller_location());
++
++ /*unsafe {
++ test_simd();
++ }*/
++
++ Box::pin(move |mut _task_context| {
++ yield ();
++ }).as_mut().resume(0);
++
++ println!("End");
++}
++
++/*#[target_feature(enable = "sse2")]
++unsafe fn test_simd() {
++ let x = _mm_setzero_si128();
++ let y = _mm_set1_epi16(7);
++ let or = _mm_or_si128(x, y);
++ let cmp_eq = _mm_cmpeq_epi8(y, y);
++ let cmp_lt = _mm_cmplt_epi8(y, y);
++
++ /*assert_eq!(std::mem::transmute::<_, [u16; 8]>(or), [7, 7, 7, 7, 7, 7, 7, 7]);
++ assert_eq!(std::mem::transmute::<_, [u16; 8]>(cmp_eq), [0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff]);
++ assert_eq!(std::mem::transmute::<_, [u16; 8]>(cmp_lt), [0, 0, 0, 0, 0, 0, 0, 0]);
++
++ test_mm_slli_si128();
++ test_mm_movemask_epi8();
++ test_mm256_movemask_epi8();
++ test_mm_add_epi8();
++ test_mm_add_pd();
++ test_mm_cvtepi8_epi16();
++ test_mm_cvtsi128_si64();
++
++ // FIXME(#666) implement `#[rustc_arg_required_const(..)]` support
++ //test_mm_extract_epi8();
++
++ let mask1 = _mm_movemask_epi8(dbg!(_mm_setr_epi8(255u8 as i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)));
++ assert_eq!(mask1, 1);*/
++}*/
++
++/*#[target_feature(enable = "sse2")]
++unsafe fn test_mm_slli_si128() {
++ #[rustfmt::skip]
++ let a = _mm_setr_epi8(
++ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
++ );
++ let r = _mm_slli_si128(a, 1);
++ let e = _mm_setr_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
++ assert_eq_m128i(r, e);
++
++ #[rustfmt::skip]
++ let a = _mm_setr_epi8(
++ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
++ );
++ let r = _mm_slli_si128(a, 15);
++ let e = _mm_setr_epi8(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1);
++ assert_eq_m128i(r, e);
++
++ #[rustfmt::skip]
++ let a = _mm_setr_epi8(
++ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
++ );
++ let r = _mm_slli_si128(a, 16);
++ assert_eq_m128i(r, _mm_set1_epi8(0));
++
++ #[rustfmt::skip]
++ let a = _mm_setr_epi8(
++ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
++ );
++ let r = _mm_slli_si128(a, -1);
++ assert_eq_m128i(_mm_set1_epi8(0), r);
++
++ #[rustfmt::skip]
++ let a = _mm_setr_epi8(
++ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
++ );
++ let r = _mm_slli_si128(a, -0x80000000);
++ assert_eq_m128i(r, _mm_set1_epi8(0));
++}
++
++#[target_feature(enable = "sse2")]
++unsafe fn test_mm_movemask_epi8() {
++ #[rustfmt::skip]
++ let a = _mm_setr_epi8(
++ 0b1000_0000u8 as i8, 0b0, 0b1000_0000u8 as i8, 0b01,
++ 0b0101, 0b1111_0000u8 as i8, 0, 0,
++ 0, 0, 0b1111_0000u8 as i8, 0b0101,
++ 0b01, 0b1000_0000u8 as i8, 0b0, 0b1000_0000u8 as i8,
++ );
++ let r = _mm_movemask_epi8(a);
++ assert_eq!(r, 0b10100100_00100101);
++}
++
++#[target_feature(enable = "avx2")]
++unsafe fn test_mm256_movemask_epi8() {
++ let a = _mm256_set1_epi8(-1);
++ let r = _mm256_movemask_epi8(a);
++ let e = -1;
++ assert_eq!(r, e);
++}
++
++#[target_feature(enable = "sse2")]
++unsafe fn test_mm_add_epi8() {
++ let a = _mm_setr_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
++ #[rustfmt::skip]
++ let b = _mm_setr_epi8(
++ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
++ );
++ let r = _mm_add_epi8(a, b);
++ #[rustfmt::skip]
++ let e = _mm_setr_epi8(
++ 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46,
++ );
++ assert_eq_m128i(r, e);
++}
++
++#[target_feature(enable = "sse2")]
++unsafe fn test_mm_add_pd() {
++ let a = _mm_setr_pd(1.0, 2.0);
++ let b = _mm_setr_pd(5.0, 10.0);
++ let r = _mm_add_pd(a, b);
++ assert_eq_m128d(r, _mm_setr_pd(6.0, 12.0));
++}
++
++fn assert_eq_m128i(x: std::arch::x86_64::__m128i, y: std::arch::x86_64::__m128i) {
++ unsafe {
++ assert_eq!(std::mem::transmute::<_, [u8; 16]>(x), std::mem::transmute::<_, [u8; 16]>(y));
++ }
++}
++
++#[target_feature(enable = "sse2")]
++pub unsafe fn assert_eq_m128d(a: __m128d, b: __m128d) {
++ if _mm_movemask_pd(_mm_cmpeq_pd(a, b)) != 0b11 {
++ panic!("{:?} != {:?}", a, b);
++ }
++}
++
++#[target_feature(enable = "sse2")]
++unsafe fn test_mm_cvtsi128_si64() {
++ let r = _mm_cvtsi128_si64(std::mem::transmute::<[i64; 2], _>([5, 0]));
++ assert_eq!(r, 5);
++}
++
++#[target_feature(enable = "sse4.1")]
++unsafe fn test_mm_cvtepi8_epi16() {
++ let a = _mm_set1_epi8(10);
++ let r = _mm_cvtepi8_epi16(a);
++ let e = _mm_set1_epi16(10);
++ assert_eq_m128i(r, e);
++ let a = _mm_set1_epi8(-10);
++ let r = _mm_cvtepi8_epi16(a);
++ let e = _mm_set1_epi16(-10);
++ assert_eq_m128i(r, e);
++}
++
++#[target_feature(enable = "sse4.1")]
++unsafe fn test_mm_extract_epi8() {
++ #[rustfmt::skip]
++ let a = _mm_setr_epi8(
++ -1, 1, 2, 3, 4, 5, 6, 7,
++ 8, 9, 10, 11, 12, 13, 14, 15
++ );
++ let r1 = _mm_extract_epi8(a, 0);
++ let r2 = _mm_extract_epi8(a, 19);
++ assert_eq!(r1, 0xFF);
++ assert_eq!(r2, 3);
++}*/
++
++#[derive(PartialEq)]
++enum LoopState {
++ Continue(()),
++ Break(())
++}
++
++pub enum Instruction {
++ Increment,
++ Loop,
++}
++
++fn map(a: Option<(u8, Box<Instruction>)>) -> Option<Box<Instruction>> {
++ match a {
++ None => None,
++ Some((_, instr)) => Some(instr),
++ }
++}
--- /dev/null
--- /dev/null
++// Based on https://github.com/rust-lang/rust/blob/c5840f9d252c2f5cc16698dbf385a29c5de3ca07/src/test/ui/array-slice-vec/subslice-patterns-const-eval-match.rs
++
++// Test that array subslice patterns are correctly handled in const evaluation.
++
++// run-pass
++
++#[derive(PartialEq, Debug, Clone)]
++struct N(u8);
++
++#[derive(PartialEq, Debug, Clone)]
++struct Z;
++
++macro_rules! n {
++ ($($e:expr),* $(,)?) => {
++ [$(N($e)),*]
++ }
++}
++
++// This macro has an unused variable so that it can be repeated base on the
++// number of times a repeated variable (`$e` in `z`) occurs.
++macro_rules! zed {
++ ($e:expr) => { Z }
++}
++
++macro_rules! z {
++ ($($e:expr),* $(,)?) => {
++ [$(zed!($e)),*]
++ }
++}
++
++// Compare constant evaluation and runtime evaluation of a given expression.
++macro_rules! compare_evaluation {
++ ($e:expr, $t:ty $(,)?) => {{
++ const CONST_EVAL: $t = $e;
++ const fn const_eval() -> $t { $e }
++ static CONST_EVAL2: $t = const_eval();
++ let runtime_eval = $e;
++ assert_eq!(CONST_EVAL, runtime_eval);
++ assert_eq!(CONST_EVAL2, runtime_eval);
++ }}
++}
++
++// Repeat `$test`, substituting the given macro variables with the given
++// identifiers.
++//
++// For example:
++//
++// repeat! {
++// ($name); X; Y:
++// struct $name;
++// }
++//
++// Expands to:
++//
++// struct X; struct Y;
++//
++// This is used to repeat the tests using both the `N` and `Z`
++// types.
++macro_rules! repeat {
++ (($($dollar:tt $placeholder:ident)*); $($($values:ident),+);*: $($test:tt)*) => {
++ macro_rules! single {
++ ($($dollar $placeholder:ident),*) => { $($test)* }
++ }
++ $(single!($($values),+);)*
++ }
++}
++
++fn main() {
++ repeat! {
++ ($arr $Ty); n, N; z, Z:
++ compare_evaluation!({ let [_, x @ .., _] = $arr!(1, 2, 3, 4); x }, [$Ty; 2]);
++ compare_evaluation!({ let [_, ref x @ .., _] = $arr!(1, 2, 3, 4); x }, &'static [$Ty; 2]);
++ compare_evaluation!({ let [_, x @ .., _] = &$arr!(1, 2, 3, 4); x }, &'static [$Ty; 2]);
++
++ compare_evaluation!({ let [_, _, x @ .., _, _] = $arr!(1, 2, 3, 4); x }, [$Ty; 0]);
++ compare_evaluation!(
++ { let [_, _, ref x @ .., _, _] = $arr!(1, 2, 3, 4); x },
++ &'static [$Ty; 0],
++ );
++ compare_evaluation!(
++ { let [_, _, x @ .., _, _] = &$arr!(1, 2, 3, 4); x },
++ &'static [$Ty; 0],
++ );
++
++ compare_evaluation!({ let [_, .., x] = $arr!(1, 2, 3, 4); x }, $Ty);
++ compare_evaluation!({ let [_, .., ref x] = $arr!(1, 2, 3, 4); x }, &'static $Ty);
++ compare_evaluation!({ let [_, _y @ .., x] = &$arr!(1, 2, 3, 4); x }, &'static $Ty);
++ }
++
++ compare_evaluation!({ let [_, .., N(x)] = n!(1, 2, 3, 4); x }, u8);
++ compare_evaluation!({ let [_, .., N(ref x)] = n!(1, 2, 3, 4); x }, &'static u8);
++ compare_evaluation!({ let [_, .., N(x)] = &n!(1, 2, 3, 4); x }, &'static u8);
++
++ compare_evaluation!({ let [N(x), .., _] = n!(1, 2, 3, 4); x }, u8);
++ compare_evaluation!({ let [N(ref x), .., _] = n!(1, 2, 3, 4); x }, &'static u8);
++ compare_evaluation!({ let [N(x), .., _] = &n!(1, 2, 3, 4); x }, &'static u8);
++}
--- /dev/null
--- /dev/null
++// Based on https://github.com/anp/rust/blob/175631311716d7dfeceec40d2587cde7142ffa8c/src/test/ui/rfc-2091-track-caller/track-caller-attribute.rs
++
++// run-pass
++
++use std::panic::Location;
++
++#[track_caller]
++fn tracked() -> &'static Location<'static> {
++ Location::caller()
++}
++
++fn nested_intrinsic() -> &'static Location<'static> {
++ Location::caller()
++}
++
++fn nested_tracked() -> &'static Location<'static> {
++ tracked()
++}
++
++fn main() {
++ let location = Location::caller();
++ assert_eq!(location.file(), file!());
++ assert_eq!(location.line(), 21);
++ assert_eq!(location.column(), 20);
++
++ let tracked = tracked();
++ assert_eq!(tracked.file(), file!());
++ assert_eq!(tracked.line(), 26);
++ assert_eq!(tracked.column(), 19);
++
++ let nested = nested_intrinsic();
++ assert_eq!(nested.file(), file!());
++ assert_eq!(nested.line(), 13);
++ assert_eq!(nested.column(), 5);
++
++ let contained = nested_tracked();
++ assert_eq!(contained.file(), file!());
++ assert_eq!(contained.line(), 17);
++ assert_eq!(contained.column(), 5);
++}
--- /dev/null
--- /dev/null
--- /dev/null
--- /dev/null
++From f6befc4bb51d84f5f1cf35938a168c953d421350 Mon Sep 17 00:00:00 2001
++From: bjorn3 <bjorn3@users.noreply.github.com>
++Date: Sun, 24 Nov 2019 15:10:23 +0100
++Subject: [PATCH] [core] Disable not compiling tests
++
++---
++ library/core/tests/Cargo.toml | 8 ++++++++
++ library/core/tests/num/flt2dec/mod.rs | 1 -
++ library/core/tests/num/int_macros.rs | 2 ++
++ library/core/tests/num/uint_macros.rs | 2 ++
++ library/core/tests/ptr.rs | 2 ++
++ library/core/tests/slice.rs | 2 ++
++ 6 files changed, 16 insertions(+), 1 deletion(-)
++ create mode 100644 library/core/tests/Cargo.toml
++
++diff --git a/library/core/tests/Cargo.toml b/library/core/tests/Cargo.toml
++new file mode 100644
++index 0000000..46fd999
++--- /dev/null
+++++ b/library/core/tests/Cargo.toml
++@@ -0,0 +1,8 @@
+++[package]
+++name = "core"
+++version = "0.0.0"
+++edition = "2018"
+++
+++[lib]
+++name = "coretests"
+++path = "lib.rs"
++diff --git a/library/core/tests/num/flt2dec/mod.rs b/library/core/tests/num/flt2dec/mod.rs
++index a35897e..f0bf645 100644
++--- a/library/core/tests/num/flt2dec/mod.rs
+++++ b/library/core/tests/num/flt2dec/mod.rs
++@@ -13,7 +13,6 @@ mod strategy {
++ mod dragon;
++ mod grisu;
++ }
++-mod random;
++
++ pub fn decode_finite<T: DecodableFloat>(v: T) -> Decoded {
++ match decode(v).1 {
++diff --git a/library/core/tests/slice.rs b/library/core/tests/slice.rs
++index 6609bc3..241b497 100644
++--- a/library/core/tests/slice.rs
+++++ b/library/core/tests/slice.rs
++@@ -1209,6 +1209,7 @@ fn brute_force_rotate_test_1() {
++ }
++ }
++
+++/*
++ #[test]
++ #[cfg(not(target_arch = "wasm32"))]
++ fn sort_unstable() {
++@@ -1394,6 +1395,7 @@ fn partition_at_index() {
++ v.select_nth_unstable(0);
++ assert!(v == [0xDEADBEEF]);
++ }
+++*/
++
++ #[test]
++ #[should_panic(expected = "index 0 greater than length of slice")]
++--
++2.21.0 (Apple Git-122)
--- /dev/null
--- /dev/null
++From dd82e95c9de212524e14fc60155de1ae40156dfc Mon Sep 17 00:00:00 2001
++From: bjorn3 <bjorn3@users.noreply.github.com>
++Date: Sun, 24 Nov 2019 15:34:06 +0100
++Subject: [PATCH] [core] Ignore failing tests
++
++---
++ library/core/tests/iter.rs | 4 ++++
++ library/core/tests/num/bignum.rs | 10 ++++++++++
++ library/core/tests/num/mod.rs | 5 +++--
++ library/core/tests/time.rs | 1 +
++ 4 files changed, 18 insertions(+), 2 deletions(-)
++
++diff --git a/library/core/tests/array.rs b/library/core/tests/array.rs
++index 4bc44e9..8e3c7a4 100644
++--- a/library/core/tests/array.rs
+++++ b/library/core/tests/array.rs
++@@ -242,6 +242,7 @@ fn iterator_drops() {
++ assert_eq!(i.get(), 5);
++ }
++
+++/*
++ // This test does not work on targets without panic=unwind support.
++ // To work around this problem, test is marked is should_panic, so it will
++ // be automagically skipped on unsuitable targets, such as
++@@ -283,6 +284,7 @@ fn array_default_impl_avoids_leaks_on_panic() {
++ assert_eq!(COUNTER.load(Relaxed), 0);
++ panic!("test succeeded")
++ }
+++*/
++
++ #[test]
++ fn empty_array_is_always_default() {
++@@ -304,6 +304,7 @@ fn array_map() {
++ assert_eq!(b, [1, 2, 3]);
++ }
++
+++/*
++ // See note on above test for why `should_panic` is used.
++ #[test]
++ #[should_panic(expected = "test succeeded")]
++@@ -332,6 +333,7 @@ fn array_map_drop_safety() {
++ assert_eq!(DROPPED.load(Ordering::SeqCst), num_to_create);
++ panic!("test succeeded")
++ }
+++*/
++
++ #[test]
++ fn cell_allows_array_cycle() {
++-- 2.21.0 (Apple Git-122)
--- /dev/null
--- /dev/null
++#!/bin/bash --verbose
++set -e
++
++source prepare_build.sh
++
++cargo install hyperfine || echo "Skipping hyperfine install"
++
++git clone https://github.com/rust-lang/regex.git || echo "rust-lang/regex has already been cloned"
++pushd regex
++git checkout -- .
++git checkout 341f207c1071f7290e3f228c710817c280c8dca1
++popd
++
++git clone https://github.com/ebobby/simple-raytracer || echo "ebobby/simple-raytracer has already been cloned"
++pushd simple-raytracer
++git checkout -- .
++git checkout 804a7a21b9e673a482797aa289a18ed480e4d813
++
++# build with cg_llvm for perf comparison
++cargo build
++mv target/debug/main raytracer_cg_llvm
++popd
--- /dev/null
--- /dev/null
++#!/bin/bash --verbose
++set -e
++
++rustup component add rust-src rustc-dev llvm-tools-preview
++./build_sysroot/prepare_sysroot_src.sh
--- /dev/null
--- /dev/null
++nightly-2021-07-21
--- /dev/null
--- /dev/null
++#!/bin/bash
++
++set -e
++
++case $1 in
++ "prepare")
++ TOOLCHAIN=$(date +%Y-%m-%d)
++
++ echo "=> Installing new nightly"
++ rustup toolchain install --profile minimal nightly-${TOOLCHAIN} # Sanity check to see if the nightly exists
++ echo nightly-${TOOLCHAIN} > rust-toolchain
++
++ echo "=> Uninstalling all old nighlies"
++ for nightly in $(rustup toolchain list | grep nightly | grep -v $TOOLCHAIN | grep -v nightly-x86_64); do
++ rustup toolchain uninstall $nightly
++ done
++
++ ./clean_all.sh
++ ./prepare.sh
++ ;;
++ "commit")
++ git add rust-toolchain
++ git commit -m "Rustup to $(rustc -V)"
++ ;;
++ *)
++ echo "Unknown command '$1'"
++ echo "Usage: ./rustup.sh prepare|commit"
++ ;;
++esac
--- /dev/null
--- /dev/null
++use gccjit::{ToRValue, Type};
++use rustc_codegen_ssa::traits::{AbiBuilderMethods, BaseTypeMethods};
++use rustc_middle::bug;
++use rustc_middle::ty::Ty;
++use rustc_target::abi::call::{CastTarget, FnAbi, PassMode, Reg, RegKind};
++
++use crate::builder::Builder;
++use crate::context::CodegenCx;
++use crate::intrinsic::ArgAbiExt;
++use crate::type_of::LayoutGccExt;
++
++impl<'a, 'gcc, 'tcx> AbiBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
++ fn apply_attrs_callsite(&mut self, _fn_abi: &FnAbi<'tcx, Ty<'tcx>>, _callsite: Self::Value) {
++ // TODO
++ //fn_abi.apply_attrs_callsite(self, callsite)
++ }
++
++ fn get_param(&self, index: usize) -> Self::Value {
++ self.cx.current_func.borrow().expect("current func")
++ .get_param(index as i32)
++ .to_rvalue()
++ }
++}
++
++impl GccType for CastTarget {
++ fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, '_>) -> Type<'gcc> {
++ let rest_gcc_unit = self.rest.unit.gcc_type(cx);
++ let (rest_count, rem_bytes) =
++ if self.rest.unit.size.bytes() == 0 {
++ (0, 0)
++ }
++ else {
++ (self.rest.total.bytes() / self.rest.unit.size.bytes(), self.rest.total.bytes() % self.rest.unit.size.bytes())
++ };
++
++ if self.prefix.iter().all(|x| x.is_none()) {
++ // Simplify to a single unit when there is no prefix and size <= unit size
++ if self.rest.total <= self.rest.unit.size {
++ return rest_gcc_unit;
++ }
++
++ // Simplify to array when all chunks are the same size and type
++ if rem_bytes == 0 {
++ return cx.type_array(rest_gcc_unit, rest_count);
++ }
++ }
++
++ // Create list of fields in the main structure
++ let mut args: Vec<_> = self
++ .prefix
++ .iter()
++ .flat_map(|option_kind| {
++ option_kind.map(|kind| Reg { kind, size: self.prefix_chunk_size }.gcc_type(cx))
++ })
++ .chain((0..rest_count).map(|_| rest_gcc_unit))
++ .collect();
++
++ // Append final integer
++ if rem_bytes != 0 {
++ // Only integers can be really split further.
++ assert_eq!(self.rest.unit.kind, RegKind::Integer);
++ args.push(cx.type_ix(rem_bytes * 8));
++ }
++
++ cx.type_struct(&args, false)
++ }
++}
++
++pub trait GccType {
++ fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, '_>) -> Type<'gcc>;
++}
++
++impl GccType for Reg {
++ fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, '_>) -> Type<'gcc> {
++ match self.kind {
++ RegKind::Integer => cx.type_ix(self.size.bits()),
++ RegKind::Float => {
++ match self.size.bits() {
++ 32 => cx.type_f32(),
++ 64 => cx.type_f64(),
++ _ => bug!("unsupported float: {:?}", self),
++ }
++ },
++ RegKind::Vector => unimplemented!(), //cx.type_vector(cx.type_i8(), self.size.bytes()),
++ }
++ }
++}
++
++pub trait FnAbiGccExt<'gcc, 'tcx> {
++ // TODO: return a function pointer type instead?
++ fn gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> (Type<'gcc>, Vec<Type<'gcc>>, bool);
++ fn ptr_to_gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>;
++ /*fn llvm_cconv(&self) -> llvm::CallConv;
++ fn apply_attrs_llfn(&self, cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value);
++ fn apply_attrs_callsite(&self, bx: &mut Builder<'a, 'll, 'tcx>, callsite: &'ll Value);*/
++}
++
++impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
++ fn gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> (Type<'gcc>, Vec<Type<'gcc>>, bool) {
++ let args_capacity: usize = self.args.iter().map(|arg|
++ if arg.pad.is_some() {
++ 1
++ }
++ else {
++ 0
++ } +
++ if let PassMode::Pair(_, _) = arg.mode {
++ 2
++ } else {
++ 1
++ }
++ ).sum();
++ let mut argument_tys = Vec::with_capacity(
++ if let PassMode::Indirect { .. } = self.ret.mode {
++ 1
++ }
++ else {
++ 0
++ } + args_capacity,
++ );
++
++ let return_ty =
++ match self.ret.mode {
++ PassMode::Ignore => cx.type_void(),
++ PassMode::Direct(_) | PassMode::Pair(..) => self.ret.layout.immediate_gcc_type(cx),
++ PassMode::Cast(cast) => cast.gcc_type(cx),
++ PassMode::Indirect { .. } => {
++ argument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx)));
++ cx.type_void()
++ }
++ };
++
++ for arg in &self.args {
++ // add padding
++ if let Some(ty) = arg.pad {
++ argument_tys.push(ty.gcc_type(cx));
++ }
++
++ let arg_ty = match arg.mode {
++ PassMode::Ignore => continue,
++ PassMode::Direct(_) => arg.layout.immediate_gcc_type(cx),
++ PassMode::Pair(..) => {
++ argument_tys.push(arg.layout.scalar_pair_element_gcc_type(cx, 0, true));
++ argument_tys.push(arg.layout.scalar_pair_element_gcc_type(cx, 1, true));
++ continue;
++ }
++ PassMode::Indirect { extra_attrs: Some(_), .. } => {
++ /*let ptr_ty = cx.tcx.mk_mut_ptr(arg.layout.ty);
++ let ptr_layout = cx.layout_of(ptr_ty);
++ argument_tys.push(ptr_layout.scalar_pair_element_gcc_type(cx, 0, true));
++ argument_tys.push(ptr_layout.scalar_pair_element_gcc_type(cx, 1, true));*/
++ unimplemented!();
++ //continue;
++ }
++ PassMode::Cast(cast) => cast.gcc_type(cx),
++ PassMode::Indirect { extra_attrs: None, .. } => cx.type_ptr_to(arg.memory_ty(cx)),
++ };
++ argument_tys.push(arg_ty);
++ }
++
++ (return_ty, argument_tys, self.c_variadic)
++ }
++
++ fn ptr_to_gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
++ let (return_type, params, variadic) = self.gcc_type(cx);
++ let pointer_type = cx.context.new_function_pointer_type(None, return_type, ¶ms, variadic);
++ pointer_type
++ }
++
++ /*fn llvm_cconv(&self) -> llvm::CallConv {
++ match self.conv {
++ Conv::C | Conv::Rust => llvm::CCallConv,
++ Conv::AmdGpuKernel => llvm::AmdGpuKernel,
++ Conv::ArmAapcs => llvm::ArmAapcsCallConv,
++ Conv::Msp430Intr => llvm::Msp430Intr,
++ Conv::PtxKernel => llvm::PtxKernel,
++ Conv::X86Fastcall => llvm::X86FastcallCallConv,
++ Conv::X86Intr => llvm::X86_Intr,
++ Conv::X86Stdcall => llvm::X86StdcallCallConv,
++ Conv::X86ThisCall => llvm::X86_ThisCall,
++ Conv::X86VectorCall => llvm::X86_VectorCall,
++ Conv::X86_64SysV => llvm::X86_64_SysV,
++ Conv::X86_64Win64 => llvm::X86_64_Win64,
++ }
++ }
++
++ fn apply_attrs_llfn(&self, cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value) {
++ // FIXME(eddyb) can this also be applied to callsites?
++ if self.ret.layout.abi.is_uninhabited() {
++ llvm::Attribute::NoReturn.apply_llfn(llvm::AttributePlace::Function, llfn);
++ }
++
++ // FIXME(eddyb, wesleywiser): apply this to callsites as well?
++ if !self.can_unwind {
++ llvm::Attribute::NoUnwind.apply_llfn(llvm::AttributePlace::Function, llfn);
++ }
++
++ let mut i = 0;
++ let mut apply = |attrs: &ArgAttributes, ty: Option<&Type>| {
++ attrs.apply_llfn(llvm::AttributePlace::Argument(i), llfn, ty);
++ i += 1;
++ };
++ match self.ret.mode {
++ PassMode::Direct(ref attrs) => {
++ attrs.apply_llfn(llvm::AttributePlace::ReturnValue, llfn, None);
++ }
++ PassMode::Indirect(ref attrs, _) => apply(attrs, Some(self.ret.layout.gcc_type(cx))),
++ _ => {}
++ }
++ for arg in &self.args {
++ if arg.pad.is_some() {
++ apply(&ArgAttributes::new(), None);
++ }
++ match arg.mode {
++ PassMode::Ignore => {}
++ PassMode::Direct(ref attrs) | PassMode::Indirect(ref attrs, None) => {
++ apply(attrs, Some(arg.layout.gcc_type(cx)))
++ }
++ PassMode::Indirect(ref attrs, Some(ref extra_attrs)) => {
++ apply(attrs, None);
++ apply(extra_attrs, None);
++ }
++ PassMode::Pair(ref a, ref b) => {
++ apply(a, None);
++ apply(b, None);
++ }
++ PassMode::Cast(_) => apply(&ArgAttributes::new(), None),
++ }
++ }
++ }
++
++ fn apply_attrs_callsite(&self, bx: &mut Builder<'a, 'll, 'tcx>, callsite: &'ll Value) {
++ // FIXME(wesleywiser, eddyb): We should apply `nounwind` and `noreturn` as appropriate to this callsite.
++
++ let mut i = 0;
++ let mut apply = |attrs: &ArgAttributes, ty: Option<&Type>| {
++ attrs.apply_callsite(llvm::AttributePlace::Argument(i), callsite, ty);
++ i += 1;
++ };
++ match self.ret.mode {
++ PassMode::Direct(ref attrs) => {
++ attrs.apply_callsite(llvm::AttributePlace::ReturnValue, callsite, None);
++ }
++ PassMode::Indirect(ref attrs, _) => apply(attrs, Some(self.ret.layout.gcc_type(bx))),
++ _ => {}
++ }
++ if let abi::Abi::Scalar(ref scalar) = self.ret.layout.abi {
++ // If the value is a boolean, the range is 0..2 and that ultimately
++ // become 0..0 when the type becomes i1, which would be rejected
++ // by the LLVM verifier.
++ if let Int(..) = scalar.value {
++ if !scalar.is_bool() {
++ let range = scalar.valid_range_exclusive(bx);
++ if range.start != range.end {
++ bx.range_metadata(callsite, range);
++ }
++ }
++ }
++ }
++ for arg in &self.args {
++ if arg.pad.is_some() {
++ apply(&ArgAttributes::new(), None);
++ }
++ match arg.mode {
++ PassMode::Ignore => {}
++ PassMode::Direct(ref attrs) | PassMode::Indirect(ref attrs, None) => {
++ apply(attrs, Some(arg.layout.gcc_type(bx)))
++ }
++ PassMode::Indirect(ref attrs, Some(ref extra_attrs)) => {
++ apply(attrs, None);
++ apply(extra_attrs, None);
++ }
++ PassMode::Pair(ref a, ref b) => {
++ apply(a, None);
++ apply(b, None);
++ }
++ PassMode::Cast(_) => apply(&ArgAttributes::new(), None),
++ }
++ }
++
++ let cconv = self.llvm_cconv();
++ if cconv != llvm::CCallConv {
++ llvm::SetInstructionCallConv(callsite, cconv);
++ }
++ }*/
++}
--- /dev/null
--- /dev/null
++//use crate::attributes;
++use gccjit::{FunctionType, ToRValue};
++use rustc_ast::expand::allocator::{AllocatorKind, AllocatorTy, ALLOCATOR_METHODS};
++use rustc_middle::bug;
++use rustc_middle::ty::TyCtxt;
++use rustc_span::symbol::sym;
++
++use crate::GccContext;
++
++pub(crate) unsafe fn codegen(tcx: TyCtxt<'_>, mods: &mut GccContext, kind: AllocatorKind, has_alloc_error_handler: bool) {
++ let context = &mods.context;
++ let usize =
++ match tcx.sess.target.pointer_width {
++ 16 => context.new_type::<u16>(),
++ 32 => context.new_type::<u32>(),
++ 64 => context.new_type::<u64>(),
++ tws => bug!("Unsupported target word size for int: {}", tws),
++ };
++ let i8 = context.new_type::<i8>();
++ let i8p = i8.make_pointer();
++ let void = context.new_type::<()>();
++
++ for method in ALLOCATOR_METHODS {
++ let mut types = Vec::with_capacity(method.inputs.len());
++ for ty in method.inputs.iter() {
++ match *ty {
++ AllocatorTy::Layout => {
++ types.push(usize);
++ types.push(usize);
++ }
++ AllocatorTy::Ptr => types.push(i8p),
++ AllocatorTy::Usize => types.push(usize),
++
++ AllocatorTy::ResultPtr | AllocatorTy::Unit => panic!("invalid allocator arg"),
++ }
++ }
++ let output = match method.output {
++ AllocatorTy::ResultPtr => Some(i8p),
++ AllocatorTy::Unit => None,
++
++ AllocatorTy::Layout | AllocatorTy::Usize | AllocatorTy::Ptr => {
++ panic!("invalid allocator output")
++ }
++ };
++ let name = format!("__rust_{}", method.name);
++
++ let args: Vec<_> = types.iter().enumerate()
++ .map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index)))
++ .collect();
++ let func = context.new_function(None, FunctionType::Exported, output.unwrap_or(void), &args, name, false);
++
++ if tcx.sess.target.options.default_hidden_visibility {
++ //llvm::LLVMRustSetVisibility(func, llvm::Visibility::Hidden);
++ }
++ if tcx.sess.must_emit_unwind_tables() {
++ // TODO
++ //attributes::emit_uwtable(func, true);
++ }
++
++ let callee = kind.fn_name(method.name);
++ let args: Vec<_> = types.iter().enumerate()
++ .map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index)))
++ .collect();
++ let callee = context.new_function(None, FunctionType::Extern, output.unwrap_or(void), &args, callee, false);
++ //llvm::LLVMRustSetVisibility(callee, llvm::Visibility::Hidden);
++
++ let block = func.new_block("entry");
++
++ let args = args
++ .iter()
++ .enumerate()
++ .map(|(i, _)| func.get_param(i as i32).to_rvalue())
++ .collect::<Vec<_>>();
++ let ret = context.new_call(None, callee, &args);
++ //llvm::LLVMSetTailCall(ret, True);
++ if output.is_some() {
++ block.end_with_return(None, ret);
++ }
++ else {
++ block.end_with_void_return(None);
++ }
++ }
++
++ let types = [usize, usize];
++ let name = "__rust_alloc_error_handler".to_string();
++ let args: Vec<_> = types.iter().enumerate()
++ .map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index)))
++ .collect();
++ let func = context.new_function(None, FunctionType::Exported, void, &args, name, false);
++
++ let kind =
++ if has_alloc_error_handler {
++ AllocatorKind::Global
++ }
++ else {
++ AllocatorKind::Default
++ };
++ let callee = kind.fn_name(sym::oom);
++ let args: Vec<_> = types.iter().enumerate()
++ .map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index)))
++ .collect();
++ let callee = context.new_function(None, FunctionType::Extern, void, &args, callee, false);
++ //llvm::LLVMRustSetVisibility(callee, llvm::Visibility::Hidden);
++
++ let block = func.new_block("entry");
++
++ let args = args
++ .iter()
++ .enumerate()
++ .map(|(i, _)| func.get_param(i as i32).to_rvalue())
++ .collect::<Vec<_>>();
++ let _ret = context.new_call(None, callee, &args);
++ //llvm::LLVMSetTailCall(ret, True);
++ block.end_with_void_return(None);
++}
--- /dev/null
--- /dev/null
++use std::fs::File;
++use std::path::{Path, PathBuf};
++
++use rustc_session::Session;
++use rustc_codegen_ssa::back::archive::{find_library, ArchiveBuilder};
++use rustc_codegen_ssa::METADATA_FILENAME;
++use rustc_data_structures::temp_dir::MaybeTempDir;
++use rustc_middle::middle::cstore::DllImport;
++use rustc_span::symbol::Symbol;
++
++struct ArchiveConfig<'a> {
++ sess: &'a Session,
++ dst: PathBuf,
++ lib_search_paths: Vec<PathBuf>,
++ use_native_ar: bool,
++ use_gnu_style_archive: bool,
++}
++
++#[derive(Debug)]
++enum ArchiveEntry {
++ FromArchive {
++ archive_index: usize,
++ entry_index: usize,
++ },
++ File(PathBuf),
++}
++
++pub struct ArArchiveBuilder<'a> {
++ config: ArchiveConfig<'a>,
++ src_archives: Vec<(PathBuf, ar::Archive<File>)>,
++ // Don't use `HashMap` here, as the order is important. `rust.metadata.bin` must always be at
++ // the end of an archive for linkers to not get confused.
++ entries: Vec<(String, ArchiveEntry)>,
++}
++
++impl<'a> ArchiveBuilder<'a> for ArArchiveBuilder<'a> {
++ fn new(sess: &'a Session, output: &Path, input: Option<&Path>) -> Self {
++ use rustc_codegen_ssa::back::link::archive_search_paths;
++ let config = ArchiveConfig {
++ sess,
++ dst: output.to_path_buf(),
++ lib_search_paths: archive_search_paths(sess),
++ use_native_ar: false,
++ // FIXME test for linux and System V derivatives instead
++ use_gnu_style_archive: sess.target.options.archive_format == "gnu",
++ };
++
++ let (src_archives, entries) = if let Some(input) = input {
++ let mut archive = ar::Archive::new(File::open(input).unwrap());
++ let mut entries = Vec::new();
++
++ let mut i = 0;
++ while let Some(entry) = archive.next_entry() {
++ let entry = entry.unwrap();
++ entries.push((
++ String::from_utf8(entry.header().identifier().to_vec()).unwrap(),
++ ArchiveEntry::FromArchive {
++ archive_index: 0,
++ entry_index: i,
++ },
++ ));
++ i += 1;
++ }
++
++ (vec![(input.to_owned(), archive)], entries)
++ } else {
++ (vec![], Vec::new())
++ };
++
++ ArArchiveBuilder {
++ config,
++ src_archives,
++ entries,
++ }
++ }
++
++ fn src_files(&mut self) -> Vec<String> {
++ self.entries.iter().map(|(name, _)| name.clone()).collect()
++ }
++
++ fn remove_file(&mut self, name: &str) {
++ let index = self
++ .entries
++ .iter()
++ .position(|(entry_name, _)| entry_name == name)
++ .expect("Tried to remove file not existing in src archive");
++ self.entries.remove(index);
++ }
++
++ fn add_file(&mut self, file: &Path) {
++ self.entries.push((
++ file.file_name().unwrap().to_str().unwrap().to_string(),
++ ArchiveEntry::File(file.to_owned()),
++ ));
++ }
++
++ fn add_native_library(&mut self, name: Symbol, verbatim: bool) {
++ let location = find_library(name, verbatim, &self.config.lib_search_paths, self.config.sess);
++ self.add_archive(location.clone(), |_| false)
++ .unwrap_or_else(|e| {
++ panic!(
++ "failed to add native library {}: {}",
++ location.to_string_lossy(),
++ e
++ );
++ });
++ }
++
++ fn add_rlib(
++ &mut self,
++ rlib: &Path,
++ name: &str,
++ lto: bool,
++ skip_objects: bool,
++ ) -> std::io::Result<()> {
++ let obj_start = name.to_owned();
++
++ self.add_archive(rlib.to_owned(), move |fname: &str| {
++ // Ignore metadata files, no matter the name.
++ if fname == METADATA_FILENAME {
++ return true;
++ }
++
++ // Don't include Rust objects if LTO is enabled
++ if lto && fname.starts_with(&obj_start) && fname.ends_with(".o") {
++ return true;
++ }
++
++ // Otherwise if this is *not* a rust object and we're skipping
++ // objects then skip this file
++ if skip_objects && (!fname.starts_with(&obj_start) || !fname.ends_with(".o")) {
++ return true;
++ }
++
++ // ok, don't skip this
++ return false;
++ })
++ }
++
++ fn update_symbols(&mut self) {
++ }
++
++ fn build(mut self) {
++ use std::process::Command;
++
++ fn add_file_using_ar(archive: &Path, file: &Path) {
++ Command::new("ar")
++ .arg("r") // add or replace file
++ .arg("-c") // silence created file message
++ .arg(archive)
++ .arg(&file)
++ .status()
++ .unwrap();
++ }
++
++ enum BuilderKind<'a> {
++ Bsd(ar::Builder<File>),
++ Gnu(ar::GnuBuilder<File>),
++ NativeAr(&'a Path),
++ }
++
++ let mut builder = if self.config.use_native_ar {
++ BuilderKind::NativeAr(&self.config.dst)
++ } else if self.config.use_gnu_style_archive {
++ BuilderKind::Gnu(ar::GnuBuilder::new(
++ File::create(&self.config.dst).unwrap(),
++ self.entries
++ .iter()
++ .map(|(name, _)| name.as_bytes().to_vec())
++ .collect(),
++ ))
++ } else {
++ BuilderKind::Bsd(ar::Builder::new(File::create(&self.config.dst).unwrap()))
++ };
++
++ // Add all files
++ for (entry_name, entry) in self.entries.into_iter() {
++ match entry {
++ ArchiveEntry::FromArchive {
++ archive_index,
++ entry_index,
++ } => {
++ let (ref src_archive_path, ref mut src_archive) =
++ self.src_archives[archive_index];
++ let entry = src_archive.jump_to_entry(entry_index).unwrap();
++ let header = entry.header().clone();
++
++ match builder {
++ BuilderKind::Bsd(ref mut builder) => {
++ builder.append(&header, entry).unwrap()
++ }
++ BuilderKind::Gnu(ref mut builder) => {
++ builder.append(&header, entry).unwrap()
++ }
++ BuilderKind::NativeAr(archive_file) => {
++ Command::new("ar")
++ .arg("x")
++ .arg(src_archive_path)
++ .arg(&entry_name)
++ .status()
++ .unwrap();
++ add_file_using_ar(archive_file, Path::new(&entry_name));
++ std::fs::remove_file(entry_name).unwrap();
++ }
++ }
++ }
++ ArchiveEntry::File(file) =>
++ match builder {
++ BuilderKind::Bsd(ref mut builder) => {
++ builder
++ .append_file(entry_name.as_bytes(), &mut File::open(file).expect("file for bsd builder"))
++ .unwrap()
++ },
++ BuilderKind::Gnu(ref mut builder) => {
++ builder
++ .append_file(entry_name.as_bytes(), &mut File::open(&file).expect(&format!("file {:?} for gnu builder", file)))
++ .unwrap()
++ },
++ BuilderKind::NativeAr(archive_file) => add_file_using_ar(archive_file, &file),
++ },
++ }
++ }
++
++ // Finalize archive
++ std::mem::drop(builder);
++
++ // Run ranlib to be able to link the archive
++ let status = std::process::Command::new("ranlib")
++ .arg(self.config.dst)
++ .status()
++ .expect("Couldn't run ranlib");
++
++ if !status.success() {
++ self.config.sess.fatal(&format!("Ranlib exited with code {:?}", status.code()));
++ }
++ }
++
++ fn inject_dll_import_lib(&mut self, _lib_name: &str, _dll_imports: &[DllImport], _tmpdir: &MaybeTempDir) {
++ unimplemented!();
++ }
++}
++
++impl<'a> ArArchiveBuilder<'a> {
++ fn add_archive<F>(&mut self, archive_path: PathBuf, mut skip: F) -> std::io::Result<()>
++ where
++ F: FnMut(&str) -> bool + 'static,
++ {
++ let mut archive = ar::Archive::new(std::fs::File::open(&archive_path)?);
++ let archive_index = self.src_archives.len();
++
++ let mut i = 0;
++ while let Some(entry) = archive.next_entry() {
++ let entry = entry.unwrap();
++ let file_name = String::from_utf8(entry.header().identifier().to_vec()).unwrap();
++ if !skip(&file_name) {
++ self.entries.push((
++ file_name,
++ ArchiveEntry::FromArchive {
++ archive_index,
++ entry_index: i,
++ },
++ ));
++ }
++ i += 1;
++ }
++
++ self.src_archives.push((archive_path, archive));
++ Ok(())
++ }
++}
--- /dev/null
--- /dev/null
++use gccjit::{RValue, ToRValue, Type};
++use rustc_ast::ast::{InlineAsmOptions, InlineAsmTemplatePiece};
++use rustc_codegen_ssa::mir::operand::OperandValue;
++use rustc_codegen_ssa::mir::place::PlaceRef;
++use rustc_codegen_ssa::traits::{AsmBuilderMethods, AsmMethods, BaseTypeMethods, BuilderMethods, GlobalAsmOperandRef, InlineAsmOperandRef};
++use rustc_data_structures::fx::FxHashMap;
++use rustc_hir::LlvmInlineAsmInner;
++use rustc_middle::bug;
++use rustc_span::Span;
++use rustc_target::asm::*;
++
++use crate::builder::Builder;
++use crate::context::CodegenCx;
++use crate::type_of::LayoutGccExt;
++
++impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
++ fn codegen_llvm_inline_asm(&mut self, _ia: &LlvmInlineAsmInner, _outputs: Vec<PlaceRef<'tcx, RValue<'gcc>>>, mut _inputs: Vec<RValue<'gcc>>, _span: Span) -> bool {
++ // TODO
++ return true;
++
++ /*let mut ext_constraints = vec![];
++ let mut output_types = vec![];
++
++ // Prepare the output operands
++ let mut indirect_outputs = vec![];
++ for (i, (out, &place)) in ia.outputs.iter().zip(&outputs).enumerate() {
++ if out.is_rw {
++ let operand = self.load_operand(place);
++ if let OperandValue::Immediate(_) = operand.val {
++ inputs.push(operand.immediate());
++ }
++ ext_constraints.push(i.to_string());
++ }
++ if out.is_indirect {
++ let operand = self.load_operand(place);
++ if let OperandValue::Immediate(_) = operand.val {
++ indirect_outputs.push(operand.immediate());
++ }
++ } else {
++ output_types.push(place.layout.gcc_type(self.cx()));
++ }
++ }
++ if !indirect_outputs.is_empty() {
++ indirect_outputs.extend_from_slice(&inputs);
++ inputs = indirect_outputs;
++ }
++
++ let clobbers = ia.clobbers.iter().map(|s| format!("~{{{}}}", &s));
++
++ // Default per-arch clobbers
++ // Basically what clang does
++ let arch_clobbers = match &self.sess().target.target.arch[..] {
++ "x86" | "x86_64" => vec!["~{dirflag}", "~{fpsr}", "~{flags}"],
++ "mips" | "mips64" => vec!["~{$1}"],
++ _ => Vec::new(),
++ };
++
++ let all_constraints = ia
++ .outputs
++ .iter()
++ .map(|out| out.constraint.to_string())
++ .chain(ia.inputs.iter().map(|s| s.to_string()))
++ .chain(ext_constraints)
++ .chain(clobbers)
++ .chain(arch_clobbers.iter().map(|s| (*s).to_string()))
++ .collect::<Vec<String>>()
++ .join(",");
++
++ debug!("Asm Constraints: {}", &all_constraints);
++
++ // Depending on how many outputs we have, the return type is different
++ let num_outputs = output_types.len();
++ let output_type = match num_outputs {
++ 0 => self.type_void(),
++ 1 => output_types[0],
++ _ => self.type_struct(&output_types, false),
++ };
++
++ let asm = ia.asm.as_str();
++ let r = inline_asm_call(
++ self,
++ &asm,
++ &all_constraints,
++ &inputs,
++ output_type,
++ ia.volatile,
++ ia.alignstack,
++ ia.dialect,
++ );
++ if r.is_none() {
++ return false;
++ }
++ let r = r.unwrap();
++
++ // Again, based on how many outputs we have
++ let outputs = ia.outputs.iter().zip(&outputs).filter(|&(ref o, _)| !o.is_indirect);
++ for (i, (_, &place)) in outputs.enumerate() {
++ let v = if num_outputs == 1 { r } else { self.extract_value(r, i as u64) };
++ OperandValue::Immediate(v).store(self, place);
++ }
++
++ // Store mark in a metadata node so we can map LLVM errors
++ // back to source locations. See #17552.
++ unsafe {
++ let key = "srcloc";
++ let kind = llvm::LLVMGetMDKindIDInContext(
++ self.llcx,
++ key.as_ptr() as *const c_char,
++ key.len() as c_uint,
++ );
++
++ let val: &'ll Value = self.const_i32(span.ctxt().outer_expn().as_u32() as i32);
++
++ llvm::LLVMSetMetadata(r, kind, llvm::LLVMMDNodeInContext(self.llcx, &val, 1));
++ }
++
++ true*/
++ }
++
++ fn codegen_inline_asm(&mut self, template: &[InlineAsmTemplatePiece], operands: &[InlineAsmOperandRef<'tcx, Self>], options: InlineAsmOptions, _span: &[Span]) {
++ let asm_arch = self.tcx.sess.asm_arch.unwrap();
++
++ let intel_dialect =
++ match asm_arch {
++ InlineAsmArch::X86 | InlineAsmArch::X86_64 if !options.contains(InlineAsmOptions::ATT_SYNTAX) => true,
++ _ => false,
++ };
++
++ // Collect the types of output operands
++ // FIXME: we do this here instead of later because of a bug in libgccjit where creating the
++ // variable after the extended asm expression causes a segfault:
++ // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=100380
++ let mut output_vars = FxHashMap::default();
++ let mut operand_numbers = FxHashMap::default();
++ let mut current_number = 0;
++ for (idx, op) in operands.iter().enumerate() {
++ match *op {
++ InlineAsmOperandRef::Out { place, .. } => {
++ let ty =
++ match place {
++ Some(place) => place.layout.gcc_type(self.cx, false),
++ None => {
++ // If the output is discarded, we don't really care what
++ // type is used. We're just using this to tell GCC to
++ // reserve the register.
++ //dummy_output_type(self.cx, reg.reg_class())
++
++ // NOTE: if no output value, we should not create one (it will be a
++ // clobber).
++ continue;
++ },
++ };
++ let var = self.current_func().new_local(None, ty, "output_register");
++ operand_numbers.insert(idx, current_number);
++ current_number += 1;
++ output_vars.insert(idx, var);
++ }
++ InlineAsmOperandRef::InOut { out_place, .. } => {
++ let ty =
++ match out_place {
++ Some(place) => place.layout.gcc_type(self.cx, false),
++ None => {
++ // If the output is discarded, we don't really care what
++ // type is used. We're just using this to tell GCC to
++ // reserve the register.
++ //dummy_output_type(self.cx, reg.reg_class())
++
++ // NOTE: if no output value, we should not create one.
++ continue;
++ },
++ };
++ operand_numbers.insert(idx, current_number);
++ current_number += 1;
++ let var = self.current_func().new_local(None, ty, "output_register");
++ output_vars.insert(idx, var);
++ }
++ _ => {}
++ }
++ }
++
++ // All output operands must come before the input operands, hence the 2 loops.
++ for (idx, op) in operands.iter().enumerate() {
++ match *op {
++ InlineAsmOperandRef::In { .. } | InlineAsmOperandRef::InOut { .. } => {
++ operand_numbers.insert(idx, current_number);
++ current_number += 1;
++ },
++ _ => (),
++ }
++ }
++
++ // Build the template string
++ let mut template_str = String::new();
++ for piece in template {
++ match *piece {
++ InlineAsmTemplatePiece::String(ref string) => {
++ if string.contains('%') {
++ for c in string.chars() {
++ if c == '%' {
++ template_str.push_str("%%");
++ }
++ else {
++ template_str.push(c);
++ }
++ }
++ }
++ else {
++ template_str.push_str(string)
++ }
++ }
++ InlineAsmTemplatePiece::Placeholder { operand_idx, modifier, span: _ } => {
++ match operands[operand_idx] {
++ InlineAsmOperandRef::Out { reg, place: Some(_), .. } => {
++ let modifier = modifier_to_gcc(asm_arch, reg.reg_class(), modifier);
++ if let Some(modifier) = modifier {
++ template_str.push_str(&format!("%{}{}", modifier, operand_numbers[&operand_idx]));
++ } else {
++ template_str.push_str(&format!("%{}", operand_numbers[&operand_idx]));
++ }
++ },
++ InlineAsmOperandRef::Out { place: None, .. } => {
++ unimplemented!("Out None");
++ },
++ InlineAsmOperandRef::In { reg, .. }
++ | InlineAsmOperandRef::InOut { reg, .. } => {
++ let modifier = modifier_to_gcc(asm_arch, reg.reg_class(), modifier);
++ if let Some(modifier) = modifier {
++ template_str.push_str(&format!("%{}{}", modifier, operand_numbers[&operand_idx]));
++ } else {
++ template_str.push_str(&format!("%{}", operand_numbers[&operand_idx]));
++ }
++ }
++ InlineAsmOperandRef::Const { ref string } => {
++ // Const operands get injected directly into the template
++ template_str.push_str(string);
++ }
++ InlineAsmOperandRef::SymFn { .. }
++ | InlineAsmOperandRef::SymStatic { .. } => {
++ unimplemented!();
++ // Only emit the raw symbol name
++ //template_str.push_str(&format!("${{{}:c}}", op_idx[&operand_idx]));
++ }
++ }
++ }
++ }
++ }
++
++ let block = self.llbb();
++ let template_str =
++ if intel_dialect {
++ template_str
++ }
++ else {
++ // FIXME: this might break the "m" memory constraint:
++ // https://stackoverflow.com/a/9347957/389119
++ // TODO: only set on x86 platforms.
++ format!(".att_syntax noprefix\n\t{}\n\t.intel_syntax noprefix", template_str)
++ };
++ let extended_asm = block.add_extended_asm(None, &template_str);
++
++ // Collect the types of output operands
++ let mut output_types = vec![];
++ for (idx, op) in operands.iter().enumerate() {
++ match *op {
++ InlineAsmOperandRef::Out { reg, late, place } => {
++ let ty =
++ match place {
++ Some(place) => place.layout.gcc_type(self.cx, false),
++ None => {
++ // If the output is discarded, we don't really care what
++ // type is used. We're just using this to tell GCC to
++ // reserve the register.
++ dummy_output_type(self.cx, reg.reg_class())
++ },
++ };
++ output_types.push(ty);
++ //op_idx.insert(idx, constraints.len());
++ let prefix = if late { "=" } else { "=&" };
++ let constraint = format!("{}{}", prefix, reg_to_gcc(reg));
++
++ if place.is_some() {
++ let var = output_vars[&idx];
++ extended_asm.add_output_operand(None, &constraint, var);
++ }
++ else {
++ // NOTE: reg.to_string() returns the register name with quotes around it so
++ // remove them.
++ extended_asm.add_clobber(reg.to_string().trim_matches('"'));
++ }
++ }
++ InlineAsmOperandRef::InOut { reg, late, in_value, out_place } => {
++ let ty =
++ match out_place {
++ Some(out_place) => out_place.layout.gcc_type(self.cx, false),
++ None => dummy_output_type(self.cx, reg.reg_class())
++ };
++ output_types.push(ty);
++ //op_idx.insert(idx, constraints.len());
++ // TODO: prefix of "+" for reading and writing?
++ let prefix = if late { "=" } else { "=&" };
++ let constraint = format!("{}{}", prefix, reg_to_gcc(reg));
++
++ if out_place.is_some() {
++ let var = output_vars[&idx];
++ // TODO: also specify an output operand when out_place is none: that would
++ // be the clobber but clobbers do not support general constraint like reg;
++ // they only support named registers.
++ // Not sure how we can do this. And the LLVM backend does not seem to add a
++ // clobber.
++ extended_asm.add_output_operand(None, &constraint, var);
++ }
++
++ let constraint = reg_to_gcc(reg);
++ extended_asm.add_input_operand(None, &constraint, in_value.immediate());
++ }
++ InlineAsmOperandRef::In { reg, value } => {
++ let constraint = reg_to_gcc(reg);
++ extended_asm.add_input_operand(None, &constraint, value.immediate());
++ }
++ _ => {}
++ }
++ }
++
++ /*if !options.contains(InlineAsmOptions::PRESERVES_FLAGS) {
++ match asm_arch {
++ InlineAsmArch::AArch64 | InlineAsmArch::Arm => {
++ constraints.push("~{cc}".to_string());
++ }
++ InlineAsmArch::X86 | InlineAsmArch::X86_64 => {
++ constraints.extend_from_slice(&[
++ "~{dirflag}".to_string(),
++ "~{fpsr}".to_string(),
++ "~{flags}".to_string(),
++ ]);
++ }
++ InlineAsmArch::RiscV32 | InlineAsmArch::RiscV64 => {}
++ }
++ }
++ if !options.contains(InlineAsmOptions::NOMEM) {
++ // This is actually ignored by LLVM, but it's probably best to keep
++ // it just in case. LLVM instead uses the ReadOnly/ReadNone
++ // attributes on the call instruction to optimize.
++ constraints.push("~{memory}".to_string());
++ }
++ let volatile = !options.contains(InlineAsmOptions::PURE);
++ let alignstack = !options.contains(InlineAsmOptions::NOSTACK);
++ let output_type = match &output_types[..] {
++ [] => self.type_void(),
++ [ty] => ty,
++ tys => self.type_struct(&tys, false),
++ };*/
++
++ /*let result = inline_asm_call(
++ self,
++ &template_str,
++ &constraints.join(","),
++ &inputs,
++ output_type,
++ volatile,
++ alignstack,
++ dialect,
++ span,
++ )
++ .unwrap_or_else(|| span_bug!(span, "LLVM asm constraint validation failed"));
++
++ if options.contains(InlineAsmOptions::PURE) {
++ if options.contains(InlineAsmOptions::NOMEM) {
++ llvm::Attribute::ReadNone.apply_callsite(llvm::AttributePlace::Function, result);
++ } else if options.contains(InlineAsmOptions::READONLY) {
++ llvm::Attribute::ReadOnly.apply_callsite(llvm::AttributePlace::Function, result);
++ }
++ } else {
++ if options.contains(InlineAsmOptions::NOMEM) {
++ llvm::Attribute::InaccessibleMemOnly
++ .apply_callsite(llvm::AttributePlace::Function, result);
++ } else {
++ // LLVM doesn't have an attribute to represent ReadOnly + SideEffect
++ }
++ }*/
++
++ // Write results to outputs
++ for (idx, op) in operands.iter().enumerate() {
++ if let InlineAsmOperandRef::Out { place: Some(place), .. }
++ | InlineAsmOperandRef::InOut { out_place: Some(place), .. } = *op
++ {
++ OperandValue::Immediate(output_vars[&idx].to_rvalue()).store(self, place);
++ }
++ }
++ }
++}
++
++/// Converts a register class to a GCC constraint code.
++// TODO: return &'static str instead?
++fn reg_to_gcc(reg: InlineAsmRegOrRegClass) -> String {
++ match reg {
++ // For vector registers LLVM wants the register name to match the type size.
++ InlineAsmRegOrRegClass::Reg(reg) => {
++ // TODO: add support for vector register.
++ let constraint =
++ match reg.name() {
++ "ax" => "a",
++ "bx" => "b",
++ "cx" => "c",
++ "dx" => "d",
++ "si" => "S",
++ "di" => "D",
++ // TODO: for registers like r11, we have to create a register variable: https://stackoverflow.com/a/31774784/389119
++ // TODO: in this case though, it's a clobber, so it should work as r11.
++ // Recent nightly supports clobber() syntax, so update to it. It does not seem
++ // like it's implemented yet.
++ name => name, // FIXME: probably wrong.
++ };
++ constraint.to_string()
++ },
++ InlineAsmRegOrRegClass::RegClass(reg) => match reg {
++ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => unimplemented!(),
++ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => unimplemented!(),
++ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg) => unimplemented!(),
++ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => unimplemented!(),
++ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => unimplemented!(),
++ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg_thumb) => unimplemented!(),
++ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
++ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
++ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8) => unimplemented!(),
++ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16)
++ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8)
++ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => unimplemented!(),
++ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
++ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg) => unimplemented!(),
++ InlineAsmRegClass::Bpf(_) => unimplemented!(),
++ InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => unimplemented!(),
++ InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => unimplemented!(),
++ InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => unimplemented!(),
++ InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => unimplemented!(),
++ InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg32) => unimplemented!(),
++ InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg64) => unimplemented!(),
++ InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg) => unimplemented!(),
++ InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => unimplemented!(),
++ InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => unimplemented!(),
++ InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => unimplemented!(),
++ InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => unimplemented!(),
++ InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => unimplemented!(),
++ InlineAsmRegClass::X86(X86InlineAsmRegClass::mmx_reg) => unimplemented!(),
++ InlineAsmRegClass::X86(X86InlineAsmRegClass::reg) => "r",
++ InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => unimplemented!(),
++ InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => unimplemented!(),
++ InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg)
++ | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg) => unimplemented!(),
++ InlineAsmRegClass::X86(X86InlineAsmRegClass::x87_reg) => unimplemented!(),
++ InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => unimplemented!(),
++ InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => unimplemented!(),
++ InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => unimplemented!(),
++ InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
++ bug!("GCC backend does not support SPIR-V")
++ }
++ InlineAsmRegClass::Err => unreachable!(),
++ }
++ .to_string(),
++ }
++}
++
++/// Type to use for outputs that are discarded. It doesn't really matter what
++/// the type is, as long as it is valid for the constraint code.
++fn dummy_output_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, reg: InlineAsmRegClass) -> Type<'gcc> {
++ match reg {
++ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => cx.type_i32(),
++ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => unimplemented!(),
++ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg)
++ | InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => {
++ unimplemented!()
++ }
++ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg)
++ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg_thumb) => cx.type_i32(),
++ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
++ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) => cx.type_f32(),
++ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
++ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
++ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8) => cx.type_f64(),
++ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg)
++ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8)
++ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => {
++ unimplemented!()
++ }
++ InlineAsmRegClass::Bpf(_) => unimplemented!(),
++ InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => cx.type_i32(),
++ InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => cx.type_i32(),
++ InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => cx.type_f32(),
++ InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => cx.type_i16(),
++ InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg32) => cx.type_i32(),
++ InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg64) => cx.type_i64(),
++ InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg) => cx.type_i32(),
++ InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => cx.type_i32(),
++ InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => cx.type_f64(),
++ InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => cx.type_i32(),
++ InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => cx.type_f32(),
++ InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => cx.type_f32(),
++ InlineAsmRegClass::X86(X86InlineAsmRegClass::reg)
++ | InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => cx.type_i32(),
++ InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => cx.type_i8(),
++ InlineAsmRegClass::X86(X86InlineAsmRegClass::mmx_reg) => unimplemented!(),
++ InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg)
++ | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg)
++ | InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => cx.type_f32(),
++ InlineAsmRegClass::X86(X86InlineAsmRegClass::x87_reg) => unimplemented!(),
++ InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => cx.type_i16(),
++ InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => cx.type_i32(),
++ InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
++ bug!("LLVM backend does not support SPIR-V")
++ },
++ InlineAsmRegClass::Err => unreachable!(),
++ }
++}
++
++impl<'gcc, 'tcx> AsmMethods for CodegenCx<'gcc, 'tcx> {
++ fn codegen_global_asm(&self, template: &[InlineAsmTemplatePiece], operands: &[GlobalAsmOperandRef], options: InlineAsmOptions, _line_spans: &[Span]) {
++ let asm_arch = self.tcx.sess.asm_arch.unwrap();
++
++ // Default to Intel syntax on x86
++ let intel_syntax = matches!(asm_arch, InlineAsmArch::X86 | InlineAsmArch::X86_64)
++ && !options.contains(InlineAsmOptions::ATT_SYNTAX);
++
++ // Build the template string
++ let mut template_str = String::new();
++ for piece in template {
++ match *piece {
++ InlineAsmTemplatePiece::String(ref string) => {
++ for line in string.lines() {
++ // NOTE: gcc does not allow inline comment, so remove them.
++ let line =
++ if let Some(index) = line.rfind("//") {
++ &line[..index]
++ }
++ else {
++ line
++ };
++ template_str.push_str(line);
++ template_str.push('\n');
++ }
++ },
++ InlineAsmTemplatePiece::Placeholder { operand_idx, modifier: _, span: _ } => {
++ match operands[operand_idx] {
++ GlobalAsmOperandRef::Const { ref string } => {
++ // Const operands get injected directly into the
++ // template. Note that we don't need to escape $
++ // here unlike normal inline assembly.
++ template_str.push_str(string);
++ }
++ }
++ }
++ }
++ }
++
++ let template_str =
++ if intel_syntax {
++ format!("{}\n\t.intel_syntax noprefix", template_str)
++ }
++ else {
++ format!(".att_syntax\n\t{}\n\t.intel_syntax noprefix", template_str)
++ };
++ // NOTE: seems like gcc will put the asm in the wrong section, so set it to .text manually.
++ let template_str = format!(".pushsection .text\n{}\n.popsection", template_str);
++ self.context.add_top_level_asm(None, &template_str);
++ }
++}
++
++fn modifier_to_gcc(arch: InlineAsmArch, reg: InlineAsmRegClass, modifier: Option<char>) -> Option<char> {
++ match reg {
++ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => modifier,
++ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => modifier,
++ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg)
++ | InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => {
++ unimplemented!()
++ //if modifier == Some('v') { None } else { modifier }
++ }
++ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg)
++ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg_thumb) => unimplemented!(),
++ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
++ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) => unimplemented!(),
++ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
++ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
++ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8) => unimplemented!(),
++ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg)
++ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8)
++ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => {
++ unimplemented!()
++ /*if modifier.is_none() {
++ Some('q')
++ } else {
++ modifier
++ }*/
++ }
++ InlineAsmRegClass::Bpf(_) => unimplemented!(),
++ InlineAsmRegClass::Hexagon(_) => unimplemented!(),
++ InlineAsmRegClass::Mips(_) => unimplemented!(),
++ InlineAsmRegClass::Nvptx(_) => unimplemented!(),
++ InlineAsmRegClass::PowerPC(_) => unimplemented!(),
++ InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg)
++ | InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => unimplemented!(),
++ InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => unimplemented!(),
++ InlineAsmRegClass::X86(X86InlineAsmRegClass::reg)
++ | InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => match modifier {
++ None if arch == InlineAsmArch::X86_64 => Some('q'),
++ None => Some('k'),
++ Some('l') => Some('b'),
++ Some('h') => Some('h'),
++ Some('x') => Some('w'),
++ Some('e') => Some('k'),
++ Some('r') => Some('q'),
++ _ => unreachable!(),
++ },
++ InlineAsmRegClass::X86(X86InlineAsmRegClass::mmx_reg) => unimplemented!(),
++ InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => unimplemented!(),
++ InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg)
++ | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg)
++ | InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => unimplemented!() /*match (reg, modifier) {
++ (X86InlineAsmRegClass::xmm_reg, None) => Some('x'),
++ (X86InlineAsmRegClass::ymm_reg, None) => Some('t'),
++ (X86InlineAsmRegClass::zmm_reg, None) => Some('g'),
++ (_, Some('x')) => Some('x'),
++ (_, Some('y')) => Some('t'),
++ (_, Some('z')) => Some('g'),
++ _ => unreachable!(),
++ }*/,
++ InlineAsmRegClass::X86(X86InlineAsmRegClass::x87_reg) => unimplemented!(),
++ InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => unimplemented!(),
++ InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => unimplemented!(),
++ InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
++ bug!("LLVM backend does not support SPIR-V")
++ },
++ InlineAsmRegClass::Err => unreachable!(),
++ }
++}
--- /dev/null
--- /dev/null
++pub mod write;
--- /dev/null
--- /dev/null
++use std::fs;
++
++use gccjit::OutputKind;
++use rustc_codegen_ssa::{CompiledModule, ModuleCodegen};
++use rustc_codegen_ssa::back::write::{CodegenContext, EmitObj, ModuleConfig};
++use rustc_errors::Handler;
++use rustc_session::config::OutputType;
++use rustc_span::fatal_error::FatalError;
++use rustc_target::spec::SplitDebuginfo;
++
++use crate::{GccCodegenBackend, GccContext};
++
++pub(crate) unsafe fn codegen(cgcx: &CodegenContext<GccCodegenBackend>, _diag_handler: &Handler, module: ModuleCodegen<GccContext>, config: &ModuleConfig) -> Result<CompiledModule, FatalError> {
++ let _timer = cgcx.prof.generic_activity_with_arg("LLVM_module_codegen", &module.name[..]);
++ {
++ let context = &module.module_llvm.context;
++
++ //let llcx = &*module.module_llvm.llcx;
++ //let tm = &*module.module_llvm.tm;
++ let module_name = module.name.clone();
++ let module_name = Some(&module_name[..]);
++ //let handlers = DiagnosticHandlers::new(cgcx, diag_handler, llcx);
++
++ /*if cgcx.msvc_imps_needed {
++ create_msvc_imps(cgcx, llcx, llmod);
++ }*/
++
++ // A codegen-specific pass manager is used to generate object
++ // files for an GCC module.
++ //
++ // Apparently each of these pass managers is a one-shot kind of
++ // thing, so we create a new one for each type of output. The
++ // pass manager passed to the closure should be ensured to not
++ // escape the closure itself, and the manager should only be
++ // used once.
++ /*unsafe fn with_codegen<'ll, F, R>(tm: &'ll llvm::TargetMachine, llmod: &'ll llvm::Module, no_builtins: bool, f: F) -> R
++ where F: FnOnce(&'ll mut PassManager<'ll>) -> R,
++ {
++ let cpm = llvm::LLVMCreatePassManager();
++ llvm::LLVMAddAnalysisPasses(tm, cpm);
++ llvm::LLVMRustAddLibraryInfo(cpm, llmod, no_builtins);
++ f(cpm)
++ }*/
++
++ // Two things to note:
++ // - If object files are just LLVM bitcode we write bitcode, copy it to
++ // the .o file, and delete the bitcode if it wasn't otherwise
++ // requested.
++ // - If we don't have the integrated assembler then we need to emit
++ // asm from LLVM and use `gcc` to create the object file.
++
++ let _bc_out = cgcx.output_filenames.temp_path(OutputType::Bitcode, module_name);
++ let obj_out = cgcx.output_filenames.temp_path(OutputType::Object, module_name);
++
++ if config.bitcode_needed() {
++ // TODO
++ /*let _timer = cgcx
++ .prof
++ .generic_activity_with_arg("LLVM_module_codegen_make_bitcode", &module.name[..]);
++ let thin = ThinBuffer::new(llmod);
++ let data = thin.data();
++
++ if config.emit_bc || config.emit_obj == EmitObj::Bitcode {
++ let _timer = cgcx.prof.generic_activity_with_arg(
++ "LLVM_module_codegen_emit_bitcode",
++ &module.name[..],
++ );
++ if let Err(e) = fs::write(&bc_out, data) {
++ let msg = format!("failed to write bytecode to {}: {}", bc_out.display(), e);
++ diag_handler.err(&msg);
++ }
++ }
++
++ if config.emit_obj == EmitObj::ObjectCode(BitcodeSection::Full) {
++ let _timer = cgcx.prof.generic_activity_with_arg(
++ "LLVM_module_codegen_embed_bitcode",
++ &module.name[..],
++ );
++ embed_bitcode(cgcx, llcx, llmod, Some(data));
++ }
++
++ if config.emit_bc_compressed {
++ let _timer = cgcx.prof.generic_activity_with_arg(
++ "LLVM_module_codegen_emit_compressed_bitcode",
++ &module.name[..],
++ );
++ let dst = bc_out.with_extension(RLIB_BYTECODE_EXTENSION);
++ let data = bytecode::encode(&module.name, data);
++ if let Err(e) = fs::write(&dst, data) {
++ let msg = format!("failed to write bytecode to {}: {}", dst.display(), e);
++ diag_handler.err(&msg);
++ }
++ }*/
++ } /*else if config.emit_obj == EmitObj::ObjectCode(BitcodeSection::Marker) {
++ unimplemented!();
++ //embed_bitcode(cgcx, llcx, llmod, None);
++ }*/
++
++ if config.emit_ir {
++ unimplemented!();
++ /*let _timer = cgcx
++ .prof
++ .generic_activity_with_arg("LLVM_module_codegen_emit_ir", &module.name[..]);
++ let out = cgcx.output_filenames.temp_path(OutputType::LlvmAssembly, module_name);
++ let out_c = path_to_c_string(&out);
++
++ extern "C" fn demangle_callback(
++ input_ptr: *const c_char,
++ input_len: size_t,
++ output_ptr: *mut c_char,
++ output_len: size_t,
++ ) -> size_t {
++ let input =
++ unsafe { slice::from_raw_parts(input_ptr as *const u8, input_len as usize) };
++
++ let input = match str::from_utf8(input) {
++ Ok(s) => s,
++ Err(_) => return 0,
++ };
++
++ let output = unsafe {
++ slice::from_raw_parts_mut(output_ptr as *mut u8, output_len as usize)
++ };
++ let mut cursor = io::Cursor::new(output);
++
++ let demangled = match rustc_demangle::try_demangle(input) {
++ Ok(d) => d,
++ Err(_) => return 0,
++ };
++
++ if write!(cursor, "{:#}", demangled).is_err() {
++ // Possible only if provided buffer is not big enough
++ return 0;
++ }
++
++ cursor.position() as size_t
++ }
++
++ let result = llvm::LLVMRustPrintModule(llmod, out_c.as_ptr(), demangle_callback);
++ result.into_result().map_err(|()| {
++ let msg = format!("failed to write LLVM IR to {}", out.display());
++ llvm_err(diag_handler, &msg)
++ })?;*/
++ }
++
++ if config.emit_asm {
++ let _timer = cgcx
++ .prof
++ .generic_activity_with_arg("LLVM_module_codegen_emit_asm", &module.name[..]);
++ let path = cgcx.output_filenames.temp_path(OutputType::Assembly, module_name);
++ context.compile_to_file(OutputKind::Assembler, path.to_str().expect("path to str"));
++
++ /*with_codegen(tm, llmod, config.no_builtins, |cpm| {
++ write_output_file(diag_handler, tm, cpm, llmod, &path, llvm::FileType::AssemblyFile)
++ })?;*/
++ }
++
++ match config.emit_obj {
++ EmitObj::ObjectCode(_) => {
++ let _timer = cgcx
++ .prof
++ .generic_activity_with_arg("LLVM_module_codegen_emit_obj", &module.name[..]);
++ //with_codegen(tm, llmod, config.no_builtins, |cpm| {
++ //println!("1: {}", module.name);
++ match &*module.name {
++ "std_example.7rcbfp3g-cgu.15" => {
++ println!("Dumping reproducer {}", module.name);
++ let _ = fs::create_dir("/tmp/reproducers");
++ // FIXME: segfault in dump_reproducer_to_file() might be caused by
++ // transmuting an rvalue to an lvalue.
++ // Segfault is actually in gcc::jit::reproducer::get_identifier_as_lvalue
++ context.dump_reproducer_to_file(&format!("/tmp/reproducers/{}.c", module.name));
++ println!("Dumped reproducer {}", module.name);
++ },
++ _ => (),
++ }
++ /*let _ = fs::create_dir("/tmp/dumps");
++ context.dump_to_file(&format!("/tmp/dumps/{}.c", module.name), true);
++ println!("Dumped {}", module.name);*/
++ //println!("Compile module {}", module.name);
++ context.compile_to_file(OutputKind::ObjectFile, obj_out.to_str().expect("path to str"));
++ //})?;
++ }
++
++ EmitObj::Bitcode => {
++ //unimplemented!();
++ /*debug!("copying bitcode {:?} to obj {:?}", bc_out, obj_out);
++ if let Err(e) = link_or_copy(&bc_out, &obj_out) {
++ diag_handler.err(&format!("failed to copy bitcode to object file: {}", e));
++ }
++
++ if !config.emit_bc {
++ debug!("removing_bitcode {:?}", bc_out);
++ if let Err(e) = fs::remove_file(&bc_out) {
++ diag_handler.err(&format!("failed to remove bitcode: {}", e));
++ }
++ }*/
++ }
++
++ EmitObj::None => {}
++ }
++
++ //drop(handlers);
++ }
++
++ Ok(module.into_compiled_module(
++ config.emit_obj != EmitObj::None,
++ cgcx.target_can_use_split_dwarf && cgcx.split_debuginfo == SplitDebuginfo::Unpacked,
++ config.emit_bc,
++ &cgcx.output_filenames,
++ ))
++}
++
++pub(crate) fn link(_cgcx: &CodegenContext<GccCodegenBackend>, _diag_handler: &Handler, mut _modules: Vec<ModuleCodegen<GccContext>>) -> Result<ModuleCodegen<GccContext>, FatalError> {
++ unimplemented!();
++ /*use super::lto::{Linker, ModuleBuffer};
++ // Sort the modules by name to ensure to ensure deterministic behavior.
++ modules.sort_by(|a, b| a.name.cmp(&b.name));
++ let (first, elements) =
++ modules.split_first().expect("Bug! modules must contain at least one module.");
++
++ let mut linker = Linker::new(first.module_llvm.llmod());
++ for module in elements {
++ let _timer =
++ cgcx.prof.generic_activity_with_arg("LLVM_link_module", format!("{:?}", module.name));
++ let buffer = ModuleBuffer::new(module.module_llvm.llmod());
++ linker.add(&buffer.data()).map_err(|()| {
++ let msg = format!("failed to serialize module {:?}", module.name);
++ llvm_err(&diag_handler, &msg)
++ })?;
++ }
++ drop(linker);
++ Ok(modules.remove(0))*/
++}
--- /dev/null
--- /dev/null
++use std::env;
++use std::sync::Once;
++use std::time::Instant;
++
++use gccjit::{
++ Context,
++ FunctionType,
++ GlobalKind,
++};
++use rustc_hir::def_id::LOCAL_CRATE;
++use rustc_middle::dep_graph;
++use rustc_middle::middle::cstore::EncodedMetadata;
++use rustc_middle::middle::exported_symbols;
++use rustc_middle::ty::TyCtxt;
++use rustc_middle::mir::mono::Linkage;
++use rustc_codegen_ssa::{ModuleCodegen, ModuleKind};
++use rustc_codegen_ssa::base::maybe_create_entry_wrapper;
++use rustc_codegen_ssa::mono_item::MonoItemExt;
++use rustc_codegen_ssa::traits::DebugInfoMethods;
++use rustc_session::config::DebugInfo;
++use rustc_span::Symbol;
++
++use crate::{GccContext, create_function_calling_initializers};
++use crate::builder::Builder;
++use crate::context::CodegenCx;
++
++pub fn global_linkage_to_gcc(linkage: Linkage) -> GlobalKind {
++ match linkage {
++ Linkage::External => GlobalKind::Imported,
++ Linkage::AvailableExternally => GlobalKind::Imported,
++ Linkage::LinkOnceAny => unimplemented!(),
++ Linkage::LinkOnceODR => unimplemented!(),
++ Linkage::WeakAny => unimplemented!(),
++ Linkage::WeakODR => unimplemented!(),
++ Linkage::Appending => unimplemented!(),
++ Linkage::Internal => GlobalKind::Internal,
++ Linkage::Private => GlobalKind::Internal,
++ Linkage::ExternalWeak => GlobalKind::Imported, // TODO: should be weak linkage.
++ Linkage::Common => unimplemented!(),
++ }
++}
++
++pub fn linkage_to_gcc(linkage: Linkage) -> FunctionType {
++ match linkage {
++ Linkage::External => FunctionType::Exported,
++ Linkage::AvailableExternally => FunctionType::Extern,
++ Linkage::LinkOnceAny => unimplemented!(),
++ Linkage::LinkOnceODR => unimplemented!(),
++ Linkage::WeakAny => FunctionType::Exported, // FIXME: should be similar to linkonce.
++ Linkage::WeakODR => unimplemented!(),
++ Linkage::Appending => unimplemented!(),
++ Linkage::Internal => FunctionType::Internal,
++ Linkage::Private => FunctionType::Internal,
++ Linkage::ExternalWeak => unimplemented!(),
++ Linkage::Common => unimplemented!(),
++ }
++}
++
++pub fn compile_codegen_unit<'tcx>(tcx: TyCtxt<'tcx>, cgu_name: Symbol) -> (ModuleCodegen<GccContext>, u64) {
++ let prof_timer = tcx.prof.generic_activity("codegen_module");
++ let start_time = Instant::now();
++
++ let dep_node = tcx.codegen_unit(cgu_name).codegen_dep_node(tcx);
++ let (module, _) = tcx.dep_graph.with_task(dep_node, tcx, cgu_name, module_codegen, dep_graph::hash_result);
++ let time_to_codegen = start_time.elapsed();
++ drop(prof_timer);
++
++ // We assume that the cost to run GCC on a CGU is proportional to
++ // the time we needed for codegenning it.
++ let cost = time_to_codegen.as_secs() * 1_000_000_000 + time_to_codegen.subsec_nanos() as u64;
++
++ fn module_codegen(tcx: TyCtxt<'_>, cgu_name: Symbol) -> ModuleCodegen<GccContext> {
++ let cgu = tcx.codegen_unit(cgu_name);
++ // Instantiate monomorphizations without filling out definitions yet...
++ //let llvm_module = ModuleLlvm::new(tcx, &cgu_name.as_str());
++ let context = Context::default();
++ // TODO: only set on x86 platforms.
++ context.add_command_line_option("-masm=intel");
++ for arg in &tcx.sess.opts.cg.llvm_args {
++ context.add_command_line_option(arg);
++ }
++ context.add_command_line_option("-fno-semantic-interposition");
++ //context.set_dump_code_on_compile(true);
++ if env::var("CG_GCCJIT_DUMP_GIMPLE").as_deref() == Ok("1") {
++ context.set_dump_initial_gimple(true);
++ }
++ context.set_debug_info(true);
++ //context.set_dump_everything(true);
++ //context.set_keep_intermediates(true);
++
++ {
++ let cx = CodegenCx::new(&context, cgu, tcx);
++
++ static START: Once = Once::new();
++ START.call_once(|| {
++ let initializer_name = format!("__gccGlobalCrateInit{}", tcx.crate_name(LOCAL_CRATE));
++ let func = context.new_function(None, FunctionType::Exported, context.new_type::<()>(), &[], initializer_name, false);
++ let block = func.new_block("initial");
++ create_function_calling_initializers(tcx, &context, block);
++ block.end_with_void_return(None);
++ });
++
++ //println!("module_codegen: {:?} {:?}", cgu_name, &cx.context as *const _);
++ let mono_items = cgu.items_in_deterministic_order(tcx);
++ for &(mono_item, (linkage, visibility)) in &mono_items {
++ mono_item.predefine::<Builder<'_, '_, '_>>(&cx, linkage, visibility);
++ }
++
++ // ... and now that we have everything pre-defined, fill out those definitions.
++ for &(mono_item, _) in &mono_items {
++ mono_item.define::<Builder<'_, '_, '_>>(&cx);
++ }
++
++ // If this codegen unit contains the main function, also create the
++ // wrapper here
++ maybe_create_entry_wrapper::<Builder<'_, '_, '_>>(&cx);
++
++ // Finalize debuginfo
++ if cx.sess().opts.debuginfo != DebugInfo::None {
++ cx.debuginfo_finalize();
++ }
++
++ cx.global_init_block.end_with_void_return(None);
++ }
++
++ ModuleCodegen {
++ name: cgu_name.to_string(),
++ module_llvm: GccContext {
++ context
++ },
++ kind: ModuleKind::Regular,
++ }
++ }
++
++ (module, cost)
++}
++
++pub fn write_compressed_metadata<'tcx>(tcx: TyCtxt<'tcx>, metadata: &EncodedMetadata, gcc_module: &mut GccContext) {
++ use snap::write::FrameEncoder;
++ use std::io::Write;
++
++ // Historical note:
++ //
++ // When using link.exe it was seen that the section name `.note.rustc`
++ // was getting shortened to `.note.ru`, and according to the PE and COFF
++ // specification:
++ //
++ // > Executable images do not use a string table and do not support
++ // > section names longer than 8 characters
++ //
++ // https://docs.microsoft.com/en-us/windows/win32/debug/pe-format
++ //
++ // As a result, we choose a slightly shorter name! As to why
++ // `.note.rustc` works on MinGW, see
++ // https://github.com/llvm/llvm-project/blob/llvmorg-12.0.0/lld/COFF/Writer.cpp#L1190-L1197
++ let section_name = if tcx.sess.target.is_like_osx { "__DATA,.rustc" } else { ".rustc" };
++
++ let context = &gcc_module.context;
++ let mut compressed = rustc_metadata::METADATA_HEADER.to_vec();
++ FrameEncoder::new(&mut compressed).write_all(&metadata.raw_data).unwrap();
++
++ let name = exported_symbols::metadata_symbol_name(tcx);
++ let typ = context.new_array_type(None, context.new_type::<u8>(), compressed.len() as i32);
++ let global = context.new_global(None, GlobalKind::Exported, typ, name);
++ global.global_set_initializer(&compressed);
++ global.set_link_section(section_name);
++
++ // Also generate a .section directive to force no
++ // flags, at least for ELF outputs, so that the
++ // metadata doesn't get loaded into memory.
++ let directive = format!(".section {}", section_name);
++ context.add_top_level_asm(None, &directive);
++}
--- /dev/null
--- /dev/null
++use std::borrow::Cow;
++use std::cell::Cell;
++use std::convert::TryFrom;
++use std::ops::{Deref, Range};
++
++use gccjit::FunctionType;
++use gccjit::{
++ BinaryOp,
++ Block,
++ ComparisonOp,
++ Function,
++ LValue,
++ RValue,
++ ToRValue,
++ Type,
++ UnaryOp,
++};
++use rustc_codegen_ssa::MemFlags;
++use rustc_codegen_ssa::common::{AtomicOrdering, AtomicRmwBinOp, IntPredicate, RealPredicate, SynchronizationScope};
++use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
++use rustc_codegen_ssa::mir::place::PlaceRef;
++use rustc_codegen_ssa::traits::{
++ BackendTypes,
++ BaseTypeMethods,
++ BuilderMethods,
++ ConstMethods,
++ DerivedTypeMethods,
++ HasCodegen,
++ OverflowOp,
++ StaticBuilderMethods,
++};
++use rustc_middle::ty::{ParamEnv, Ty, TyCtxt};
++use rustc_middle::ty::layout::{HasParamEnv, HasTyCtxt, TyAndLayout};
++use rustc_span::Span;
++use rustc_span::def_id::DefId;
++use rustc_target::abi::{
++ self,
++ Align,
++ HasDataLayout,
++ LayoutOf,
++ Size,
++ TargetDataLayout,
++};
++use rustc_target::spec::{HasTargetSpec, Target};
++
++use crate::common::{SignType, TypeReflection, type_is_pointer};
++use crate::context::CodegenCx;
++use crate::type_of::LayoutGccExt;
++
++// TODO
++type Funclet = ();
++
++// TODO: remove this variable.
++static mut RETURN_VALUE_COUNT: usize = 0;
++
++enum ExtremumOperation {
++ Max,
++ Min,
++}
++
++trait EnumClone {
++ fn clone(&self) -> Self;
++}
++
++impl EnumClone for AtomicOrdering {
++ fn clone(&self) -> Self {
++ match *self {
++ AtomicOrdering::NotAtomic => AtomicOrdering::NotAtomic,
++ AtomicOrdering::Unordered => AtomicOrdering::Unordered,
++ AtomicOrdering::Monotonic => AtomicOrdering::Monotonic,
++ AtomicOrdering::Acquire => AtomicOrdering::Acquire,
++ AtomicOrdering::Release => AtomicOrdering::Release,
++ AtomicOrdering::AcquireRelease => AtomicOrdering::AcquireRelease,
++ AtomicOrdering::SequentiallyConsistent => AtomicOrdering::SequentiallyConsistent,
++ }
++ }
++}
++
++pub struct Builder<'a: 'gcc, 'gcc, 'tcx> {
++ pub cx: &'a CodegenCx<'gcc, 'tcx>,
++ pub block: Option<Block<'gcc>>,
++ stack_var_count: Cell<usize>,
++}
++
++impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
++ fn with_cx(cx: &'a CodegenCx<'gcc, 'tcx>) -> Self {
++ Builder {
++ cx,
++ block: None,
++ stack_var_count: Cell::new(0),
++ }
++ }
++
++ fn atomic_extremum(&mut self, operation: ExtremumOperation, dst: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering) -> RValue<'gcc> {
++ let size = self.cx.int_width(src.get_type()) / 8;
++
++ let func = self.current_func();
++
++ let load_ordering =
++ match order {
++ // TODO: does this make sense?
++ AtomicOrdering::AcquireRelease | AtomicOrdering::Release => AtomicOrdering::Acquire,
++ _ => order.clone(),
++ };
++ let previous_value = self.atomic_load(dst.get_type(), dst, load_ordering.clone(), Size::from_bytes(size));
++ let previous_var = func.new_local(None, previous_value.get_type(), "previous_value");
++ let return_value = func.new_local(None, previous_value.get_type(), "return_value");
++ self.llbb().add_assignment(None, previous_var, previous_value);
++ self.llbb().add_assignment(None, return_value, previous_var.to_rvalue());
++
++ let while_block = func.new_block("while");
++ let after_block = func.new_block("after_while");
++ self.llbb().end_with_jump(None, while_block);
++
++ // NOTE: since jumps were added and compare_exchange doesn't expect this, the current blocks in the
++ // state need to be updated.
++ self.block = Some(while_block);
++ *self.cx.current_block.borrow_mut() = Some(while_block);
++
++ let comparison_operator =
++ match operation {
++ ExtremumOperation::Max => ComparisonOp::LessThan,
++ ExtremumOperation::Min => ComparisonOp::GreaterThan,
++ };
++
++ let cond1 = self.context.new_comparison(None, comparison_operator, previous_var.to_rvalue(), self.context.new_cast(None, src, previous_value.get_type()));
++ let compare_exchange = self.compare_exchange(dst, previous_var, src, order, load_ordering, false);
++ let cond2 = self.cx.context.new_unary_op(None, UnaryOp::LogicalNegate, compare_exchange.get_type(), compare_exchange);
++ let cond = self.cx.context.new_binary_op(None, BinaryOp::LogicalAnd, self.cx.bool_type, cond1, cond2);
++
++ while_block.end_with_conditional(None, cond, while_block, after_block);
++
++ // NOTE: since jumps were added in a place rustc does not expect, the current blocks in the
++ // state need to be updated.
++ self.block = Some(after_block);
++ *self.cx.current_block.borrow_mut() = Some(after_block);
++
++ return_value.to_rvalue()
++ }
++
++ fn compare_exchange(&self, dst: RValue<'gcc>, cmp: LValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering, failure_order: AtomicOrdering, weak: bool) -> RValue<'gcc> {
++ let size = self.cx.int_width(src.get_type());
++ let compare_exchange = self.context.get_builtin_function(&format!("__atomic_compare_exchange_{}", size / 8));
++ let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
++ let failure_order = self.context.new_rvalue_from_int(self.i32_type, failure_order.to_gcc());
++ let weak = self.context.new_rvalue_from_int(self.bool_type, weak as i32);
++
++ let void_ptr_type = self.context.new_type::<*mut ()>();
++ let volatile_void_ptr_type = void_ptr_type.make_volatile();
++ let dst = self.context.new_cast(None, dst, volatile_void_ptr_type);
++ let expected = self.context.new_cast(None, cmp.get_address(None), void_ptr_type);
++
++ // NOTE: not sure why, but we have the wrong type here.
++ let int_type = compare_exchange.get_param(2).to_rvalue().get_type();
++ let src = self.context.new_cast(None, src, int_type);
++ self.context.new_call(None, compare_exchange, &[dst, expected, src, weak, order, failure_order])
++ }
++
++ pub fn assign(&self, lvalue: LValue<'gcc>, value: RValue<'gcc>) {
++ self.llbb().add_assignment(None, lvalue, value);
++ }
++
++ fn check_call<'b>(&mut self, _typ: &str, func: Function<'gcc>, args: &'b [RValue<'gcc>]) -> Cow<'b, [RValue<'gcc>]> {
++ //let mut fn_ty = self.cx.val_ty(func);
++ // Strip off pointers
++ /*while self.cx.type_kind(fn_ty) == TypeKind::Pointer {
++ fn_ty = self.cx.element_type(fn_ty);
++ }*/
++
++ /*assert!(
++ self.cx.type_kind(fn_ty) == TypeKind::Function,
++ "builder::{} not passed a function, but {:?}",
++ typ,
++ fn_ty
++ );
++
++ let param_tys = self.cx.func_params_types(fn_ty);
++
++ let all_args_match = param_tys
++ .iter()
++ .zip(args.iter().map(|&v| self.val_ty(v)))
++ .all(|(expected_ty, actual_ty)| *expected_ty == actual_ty);*/
++
++ let mut all_args_match = true;
++ let mut param_types = vec![];
++ let param_count = func.get_param_count();
++ for (index, arg) in args.iter().enumerate().take(param_count) {
++ let param = func.get_param(index as i32);
++ let param = param.to_rvalue().get_type();
++ if param != arg.get_type() {
++ all_args_match = false;
++ }
++ param_types.push(param);
++ }
++
++ if all_args_match {
++ return Cow::Borrowed(args);
++ }
++
++ let casted_args: Vec<_> = param_types
++ .into_iter()
++ .zip(args.iter())
++ .enumerate()
++ .map(|(_i, (expected_ty, &actual_val))| {
++ let actual_ty = actual_val.get_type();
++ if expected_ty != actual_ty {
++ /*debug!(
++ "type mismatch in function call of {:?}. \
++ Expected {:?} for param {}, got {:?}; injecting bitcast",
++ func, expected_ty, i, actual_ty
++ );*/
++ /*println!(
++ "type mismatch in function call of {:?}. \
++ Expected {:?} for param {}, got {:?}; injecting bitcast",
++ func, expected_ty, i, actual_ty
++ );*/
++ self.bitcast(actual_val, expected_ty)
++ }
++ else {
++ actual_val
++ }
++ })
++ .collect();
++
++ Cow::Owned(casted_args)
++ }
++
++ fn check_ptr_call<'b>(&mut self, _typ: &str, func_ptr: RValue<'gcc>, args: &'b [RValue<'gcc>]) -> Cow<'b, [RValue<'gcc>]> {
++ //let mut fn_ty = self.cx.val_ty(func);
++ // Strip off pointers
++ /*while self.cx.type_kind(fn_ty) == TypeKind::Pointer {
++ fn_ty = self.cx.element_type(fn_ty);
++ }*/
++
++ /*assert!(
++ self.cx.type_kind(fn_ty) == TypeKind::Function,
++ "builder::{} not passed a function, but {:?}",
++ typ,
++ fn_ty
++ );
++
++ let param_tys = self.cx.func_params_types(fn_ty);
++
++ let all_args_match = param_tys
++ .iter()
++ .zip(args.iter().map(|&v| self.val_ty(v)))
++ .all(|(expected_ty, actual_ty)| *expected_ty == actual_ty);*/
++
++ let mut all_args_match = true;
++ let mut param_types = vec![];
++ let gcc_func = func_ptr.get_type().is_function_ptr_type().expect("function ptr");
++ for (index, arg) in args.iter().enumerate().take(gcc_func.get_param_count()) {
++ let param = gcc_func.get_param_type(index);
++ if param != arg.get_type() {
++ all_args_match = false;
++ }
++ param_types.push(param);
++ }
++
++ if all_args_match {
++ return Cow::Borrowed(args);
++ }
++
++ let casted_args: Vec<_> = param_types
++ .into_iter()
++ .zip(args.iter())
++ .enumerate()
++ .map(|(_i, (expected_ty, &actual_val))| {
++ let actual_ty = actual_val.get_type();
++ if expected_ty != actual_ty {
++ /*debug!(
++ "type mismatch in function call of {:?}. \
++ Expected {:?} for param {}, got {:?}; injecting bitcast",
++ func, expected_ty, i, actual_ty
++ );*/
++ /*println!(
++ "type mismatch in function call of {:?}. \
++ Expected {:?} for param {}, got {:?}; injecting bitcast",
++ func, expected_ty, i, actual_ty
++ );*/
++ self.bitcast(actual_val, expected_ty)
++ }
++ else {
++ actual_val
++ }
++ })
++ .collect();
++
++ Cow::Owned(casted_args)
++ }
++
++ fn check_store(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>) -> RValue<'gcc> {
++ let dest_ptr_ty = self.cx.val_ty(ptr).make_pointer(); // TODO: make sure make_pointer() is okay here.
++ let stored_ty = self.cx.val_ty(val);
++ let stored_ptr_ty = self.cx.type_ptr_to(stored_ty);
++
++ //assert_eq!(self.cx.type_kind(dest_ptr_ty), TypeKind::Pointer);
++
++ if dest_ptr_ty == stored_ptr_ty {
++ ptr
++ }
++ else {
++ /*debug!(
++ "type mismatch in store. \
++ Expected {:?}, got {:?}; inserting bitcast",
++ dest_ptr_ty, stored_ptr_ty
++ );*/
++ /*println!(
++ "type mismatch in store. \
++ Expected {:?}, got {:?}; inserting bitcast",
++ dest_ptr_ty, stored_ptr_ty
++ );*/
++ //ptr
++ self.bitcast(ptr, stored_ptr_ty)
++ }
++ }
++
++ pub fn current_func(&self) -> Function<'gcc> {
++ self.block.expect("block").get_function()
++ }
++
++ fn function_call(&mut self, func: RValue<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
++ //debug!("call {:?} with args ({:?})", func, args);
++
++ // TODO: remove when the API supports a different type for functions.
++ let func: Function<'gcc> = self.cx.rvalue_as_function(func);
++ let args = self.check_call("call", func, args);
++ //let bundle = funclet.map(|funclet| funclet.bundle());
++ //let bundle = bundle.as_ref().map(|b| &*b.raw);
++
++ // gccjit requires to use the result of functions, even when it's not used.
++ // That's why we assign the result to a local or call add_eval().
++ let return_type = func.get_return_type();
++ let current_block = self.current_block.borrow().expect("block");
++ let void_type = self.context.new_type::<()>();
++ let current_func = current_block.get_function();
++ if return_type != void_type {
++ unsafe { RETURN_VALUE_COUNT += 1 };
++ let result = current_func.new_local(None, return_type, &format!("returnValue{}", unsafe { RETURN_VALUE_COUNT }));
++ current_block.add_assignment(None, result, self.cx.context.new_call(None, func, &args));
++ result.to_rvalue()
++ }
++ else {
++ current_block.add_eval(None, self.cx.context.new_call(None, func, &args));
++ // Return dummy value when not having return value.
++ self.context.new_rvalue_from_long(self.isize_type, 0)
++ }
++ }
++
++ fn function_ptr_call(&mut self, func_ptr: RValue<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
++ //debug!("func ptr call {:?} with args ({:?})", func, args);
++
++ let args = self.check_ptr_call("call", func_ptr, args);
++ //let bundle = funclet.map(|funclet| funclet.bundle());
++ //let bundle = bundle.as_ref().map(|b| &*b.raw);
++
++ // gccjit requires to use the result of functions, even when it's not used.
++ // That's why we assign the result to a local or call add_eval().
++ let gcc_func = func_ptr.get_type().is_function_ptr_type().expect("function ptr");
++ let mut return_type = gcc_func.get_return_type();
++ let current_block = self.current_block.borrow().expect("block");
++ let void_type = self.context.new_type::<()>();
++ let current_func = current_block.get_function();
++
++ // FIXME: As a temporary workaround for unsupported LLVM intrinsics.
++ if gcc_func.get_param_count() == 0 && format!("{:?}", func_ptr) == "__builtin_ia32_pmovmskb128" {
++ return_type = self.int_type;
++ }
++
++ if return_type != void_type {
++ unsafe { RETURN_VALUE_COUNT += 1 };
++ let result = current_func.new_local(None, return_type, &format!("returnValue{}", unsafe { RETURN_VALUE_COUNT }));
++ current_block.add_assignment(None, result, self.cx.context.new_call_through_ptr(None, func_ptr, &args));
++ result.to_rvalue()
++ }
++ else {
++ if gcc_func.get_param_count() == 0 {
++ // FIXME: As a temporary workaround for unsupported LLVM intrinsics.
++ current_block.add_eval(None, self.cx.context.new_call_through_ptr(None, func_ptr, &[]));
++ }
++ else {
++ current_block.add_eval(None, self.cx.context.new_call_through_ptr(None, func_ptr, &args));
++ }
++ // Return dummy value when not having return value.
++ let result = current_func.new_local(None, self.isize_type, "dummyValueThatShouldNeverBeUsed");
++ current_block.add_assignment(None, result, self.context.new_rvalue_from_long(self.isize_type, 0));
++ result.to_rvalue()
++ }
++ }
++
++ pub fn overflow_call(&mut self, func: Function<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
++ //debug!("overflow_call {:?} with args ({:?})", func, args);
++
++ //let bundle = funclet.map(|funclet| funclet.bundle());
++ //let bundle = bundle.as_ref().map(|b| &*b.raw);
++
++ // gccjit requires to use the result of functions, even when it's not used.
++ // That's why we assign the result to a local.
++ let return_type = self.context.new_type::<bool>();
++ let current_block = self.current_block.borrow().expect("block");
++ let current_func = current_block.get_function();
++ // TODO: return the new_call() directly? Since the overflow function has no side-effects.
++ unsafe { RETURN_VALUE_COUNT += 1 };
++ let result = current_func.new_local(None, return_type, &format!("returnValue{}", unsafe { RETURN_VALUE_COUNT }));
++ current_block.add_assignment(None, result, self.cx.context.new_call(None, func, &args));
++ result.to_rvalue()
++ }
++}
++
++impl<'gcc, 'tcx> HasCodegen<'tcx> for Builder<'_, 'gcc, 'tcx> {
++ type CodegenCx = CodegenCx<'gcc, 'tcx>;
++}
++
++impl<'tcx> HasTyCtxt<'tcx> for Builder<'_, '_, 'tcx> {
++ fn tcx(&self) -> TyCtxt<'tcx> {
++ self.cx.tcx()
++ }
++}
++
++impl HasDataLayout for Builder<'_, '_, '_> {
++ fn data_layout(&self) -> &TargetDataLayout {
++ self.cx.data_layout()
++ }
++}
++
++impl<'tcx> LayoutOf for Builder<'_, '_, 'tcx> {
++ type Ty = Ty<'tcx>;
++ type TyAndLayout = TyAndLayout<'tcx>;
++
++ fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
++ self.cx.layout_of(ty)
++ }
++}
++
++impl<'gcc, 'tcx> Deref for Builder<'_, 'gcc, 'tcx> {
++ type Target = CodegenCx<'gcc, 'tcx>;
++
++ fn deref(&self) -> &Self::Target {
++ self.cx
++ }
++}
++
++impl<'gcc, 'tcx> BackendTypes for Builder<'_, 'gcc, 'tcx> {
++ type Value = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Value;
++ type Function = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Function;
++ type BasicBlock = <CodegenCx<'gcc, 'tcx> as BackendTypes>::BasicBlock;
++ type Type = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Type;
++ type Funclet = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Funclet;
++
++ type DIScope = <CodegenCx<'gcc, 'tcx> as BackendTypes>::DIScope;
++ type DILocation = <CodegenCx<'gcc, 'tcx> as BackendTypes>::DILocation;
++ type DIVariable = <CodegenCx<'gcc, 'tcx> as BackendTypes>::DIVariable;
++}
++
++impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
++ fn build(cx: &'a CodegenCx<'gcc, 'tcx>, block: Block<'gcc>) -> Self {
++ let mut bx = Builder::with_cx(cx);
++ *cx.current_block.borrow_mut() = Some(block);
++ bx.block = Some(block);
++ bx
++ }
++
++ fn build_sibling_block(&mut self, name: &str) -> Self {
++ let block = self.append_sibling_block(name);
++ Self::build(self.cx, block)
++ }
++
++ fn llbb(&self) -> Block<'gcc> {
++ self.block.expect("block")
++ }
++
++ fn append_block(cx: &'a CodegenCx<'gcc, 'tcx>, func: RValue<'gcc>, name: &str) -> Block<'gcc> {
++ let func = cx.rvalue_as_function(func);
++ func.new_block(name)
++ }
++
++ fn append_sibling_block(&mut self, name: &str) -> Block<'gcc> {
++ let func = self.current_func();
++ func.new_block(name)
++ }
++
++ fn ret_void(&mut self) {
++ self.llbb().end_with_void_return(None)
++ }
++
++ fn ret(&mut self, value: RValue<'gcc>) {
++ let value =
++ if self.structs_as_pointer.borrow().contains(&value) {
++ // NOTE: hack to workaround a limitation of the rustc API: see comment on
++ // CodegenCx.structs_as_pointer
++ value.dereference(None).to_rvalue()
++ }
++ else {
++ value
++ };
++ self.llbb().end_with_return(None, value);
++ }
++
++ fn br(&mut self, dest: Block<'gcc>) {
++ self.llbb().end_with_jump(None, dest)
++ }
++
++ fn cond_br(&mut self, cond: RValue<'gcc>, then_block: Block<'gcc>, else_block: Block<'gcc>) {
++ self.llbb().end_with_conditional(None, cond, then_block, else_block)
++ }
++
++ fn switch(&mut self, value: RValue<'gcc>, default_block: Block<'gcc>, cases: impl ExactSizeIterator<Item = (u128, Block<'gcc>)>) {
++ let mut gcc_cases = vec![];
++ let typ = self.val_ty(value);
++ for (on_val, dest) in cases {
++ let on_val = self.const_uint_big(typ, on_val);
++ gcc_cases.push(self.context.new_case(on_val, on_val, dest));
++ }
++ self.block.expect("block").end_with_switch(None, value, default_block, &gcc_cases);
++ }
++
++ fn invoke(&mut self, _func: RValue<'gcc>, _args: &[RValue<'gcc>], _then: Block<'gcc>, _catch: Block<'gcc>, _funclet: Option<&Funclet>) -> RValue<'gcc> {
++ unimplemented!();
++ /*debug!("invoke {:?} with args ({:?})", func, args);
++
++ let args = self.check_call("invoke", func, args);
++ let bundle = funclet.map(|funclet| funclet.bundle());
++ let bundle = bundle.as_ref().map(|b| &*b.raw);
++
++ unsafe {
++ llvm::LLVMRustBuildInvoke(
++ self.llbuilder,
++ func,
++ args.as_ptr(),
++ args.len() as c_uint,
++ then,
++ catch,
++ bundle,
++ UNNAMED,
++ )
++ }*/
++ }
++
++ fn unreachable(&mut self) {
++ let func = self.context.get_builtin_function("__builtin_unreachable");
++ let block = self.block.expect("block");
++ block.add_eval(None, self.context.new_call(None, func, &[]));
++ let return_type = block.get_function().get_return_type();
++ let void_type = self.context.new_type::<()>();
++ if return_type == void_type {
++ block.end_with_void_return(None)
++ }
++ else {
++ let return_value = self.current_func()
++ .new_local(None, return_type, "unreachableReturn");
++ block.end_with_return(None, return_value)
++ }
++ }
++
++ fn add(&mut self, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
++ // FIXME: this should not be required.
++ if format!("{:?}", a.get_type()) != format!("{:?}", b.get_type()) {
++ b = self.context.new_cast(None, b, a.get_type());
++ }
++ a + b
++ }
++
++ fn fadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
++ a + b
++ }
++
++ fn sub(&mut self, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
++ if a.get_type() != b.get_type() {
++ b = self.context.new_cast(None, b, a.get_type());
++ }
++ a - b
++ }
++
++ fn fsub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
++ a - b
++ }
++
++ fn mul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
++ a * b
++ }
++
++ fn fmul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
++ a * b
++ }
++
++ fn udiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
++ // TODO: convert the arguments to unsigned?
++ a / b
++ }
++
++ fn exactudiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
++ // TODO: convert the arguments to unsigned?
++ // TODO: poison if not exact.
++ a / b
++ }
++
++ fn sdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
++ // TODO: convert the arguments to signed?
++ a / b
++ }
++
++ fn exactsdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
++ // TODO: posion if not exact.
++ // FIXME: rustc_codegen_ssa::mir::intrinsic uses different types for a and b but they
++ // should be the same.
++ let typ = a.get_type().to_signed(self);
++ let a = self.context.new_cast(None, a, typ);
++ let b = self.context.new_cast(None, b, typ);
++ a / b
++ }
++
++ fn fdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
++ a / b
++ }
++
++ fn urem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
++ a % b
++ }
++
++ fn srem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
++ a % b
++ }
++
++ fn frem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
++ if a.get_type() == self.cx.float_type {
++ let fmodf = self.context.get_builtin_function("fmodf");
++ // FIXME: this seems to produce the wrong result.
++ return self.context.new_call(None, fmodf, &[a, b]);
++ }
++ assert_eq!(a.get_type(), self.cx.double_type);
++
++ let fmod = self.context.get_builtin_function("fmod");
++ return self.context.new_call(None, fmod, &[a, b]);
++ }
++
++ fn shl(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
++ // FIXME: remove the casts when libgccjit can shift an unsigned number by an unsigned number.
++ let a_type = a.get_type();
++ let b_type = b.get_type();
++ if a_type.is_unsigned(self) && b_type.is_signed(self) {
++ //println!("shl: {:?} -> {:?}", a, b_type);
++ let a = self.context.new_cast(None, a, b_type);
++ let result = a << b;
++ //println!("shl: {:?} -> {:?}", result, a_type);
++ self.context.new_cast(None, result, a_type)
++ }
++ else if a_type.is_signed(self) && b_type.is_unsigned(self) {
++ //println!("shl: {:?} -> {:?}", b, a_type);
++ let b = self.context.new_cast(None, b, a_type);
++ a << b
++ }
++ else {
++ a << b
++ }
++ }
++
++ fn lshr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
++ // FIXME: remove the casts when libgccjit can shift an unsigned number by an unsigned number.
++ // TODO: cast to unsigned to do a logical shift if that does not work.
++ let a_type = a.get_type();
++ let b_type = b.get_type();
++ if a_type.is_unsigned(self) && b_type.is_signed(self) {
++ //println!("lshl: {:?} -> {:?}", a, b_type);
++ let a = self.context.new_cast(None, a, b_type);
++ let result = a >> b;
++ //println!("lshl: {:?} -> {:?}", result, a_type);
++ self.context.new_cast(None, result, a_type)
++ }
++ else if a_type.is_signed(self) && b_type.is_unsigned(self) {
++ //println!("lshl: {:?} -> {:?}", b, a_type);
++ let b = self.context.new_cast(None, b, a_type);
++ a >> b
++ }
++ else {
++ a >> b
++ }
++ }
++
++ fn ashr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
++ // TODO: check whether behavior is an arithmetic shift for >> .
++ // FIXME: remove the casts when libgccjit can shift an unsigned number by an unsigned number.
++ let a_type = a.get_type();
++ let b_type = b.get_type();
++ if a_type.is_unsigned(self) && b_type.is_signed(self) {
++ //println!("ashl: {:?} -> {:?}", a, b_type);
++ let a = self.context.new_cast(None, a, b_type);
++ let result = a >> b;
++ //println!("ashl: {:?} -> {:?}", result, a_type);
++ self.context.new_cast(None, result, a_type)
++ }
++ else if a_type.is_signed(self) && b_type.is_unsigned(self) {
++ //println!("ashl: {:?} -> {:?}", b, a_type);
++ let b = self.context.new_cast(None, b, a_type);
++ a >> b
++ }
++ else {
++ a >> b
++ }
++ }
++
++ fn and(&mut self, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
++ // FIXME: hack by putting the result in a variable to workaround this bug:
++ // https://gcc.gnu.org/bugzilla//show_bug.cgi?id=95498
++ if a.get_type() != b.get_type() {
++ b = self.context.new_cast(None, b, a.get_type());
++ }
++ let res = self.current_func().new_local(None, b.get_type(), "andResult");
++ self.llbb().add_assignment(None, res, a & b);
++ res.to_rvalue()
++ }
++
++ fn or(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
++ // FIXME: hack by putting the result in a variable to workaround this bug:
++ // https://gcc.gnu.org/bugzilla//show_bug.cgi?id=95498
++ let res = self.current_func().new_local(None, b.get_type(), "orResult");
++ self.llbb().add_assignment(None, res, a | b);
++ res.to_rvalue()
++ }
++
++ fn xor(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
++ a ^ b
++ }
++
++ fn neg(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
++ // TODO: use new_unary_op()?
++ self.cx.context.new_rvalue_from_long(a.get_type(), 0) - a
++ }
++
++ fn fneg(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
++ self.cx.context.new_unary_op(None, UnaryOp::Minus, a.get_type(), a)
++ }
++
++ fn not(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
++ let operation =
++ if a.get_type().is_bool() {
++ UnaryOp::LogicalNegate
++ }
++ else {
++ UnaryOp::BitwiseNegate
++ };
++ self.cx.context.new_unary_op(None, operation, a.get_type(), a)
++ }
++
++ fn unchecked_sadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
++ a + b
++ }
++
++ fn unchecked_uadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
++ a + b
++ }
++
++ fn unchecked_ssub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
++ a - b
++ }
++
++ fn unchecked_usub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
++ // TODO: should generate poison value?
++ a - b
++ }
++
++ fn unchecked_smul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
++ a * b
++ }
++
++ fn unchecked_umul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
++ a * b
++ }
++
++ fn fadd_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
++ unimplemented!();
++ /*unsafe {
++ let instr = llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, UNNAMED);
++ llvm::LLVMRustSetHasUnsafeAlgebra(instr);
++ instr
++ }*/
++ }
++
++ fn fsub_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
++ unimplemented!();
++ /*unsafe {
++ let instr = llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, UNNAMED);
++ llvm::LLVMRustSetHasUnsafeAlgebra(instr);
++ instr
++ }*/
++ }
++
++ fn fmul_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
++ unimplemented!();
++ /*unsafe {
++ let instr = llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, UNNAMED);
++ llvm::LLVMRustSetHasUnsafeAlgebra(instr);
++ instr
++ }*/
++ }
++
++ fn fdiv_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
++ unimplemented!();
++ /*unsafe {
++ let instr = llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, UNNAMED);
++ llvm::LLVMRustSetHasUnsafeAlgebra(instr);
++ instr
++ }*/
++ }
++
++ fn frem_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
++ unimplemented!();
++ /*unsafe {
++ let instr = llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, UNNAMED);
++ llvm::LLVMRustSetHasUnsafeAlgebra(instr);
++ instr
++ }*/
++ }
++
++ fn checked_binop(&mut self, oop: OverflowOp, typ: Ty<'_>, lhs: Self::Value, rhs: Self::Value) -> (Self::Value, Self::Value) {
++ use rustc_middle::ty::{Int, IntTy::*, Uint, UintTy::*};
++
++ let new_kind =
++ match typ.kind() {
++ Int(t @ Isize) => Int(t.normalize(self.tcx.sess.target.pointer_width)),
++ Uint(t @ Usize) => Uint(t.normalize(self.tcx.sess.target.pointer_width)),
++ t @ (Uint(_) | Int(_)) => t.clone(),
++ _ => panic!("tried to get overflow intrinsic for op applied to non-int type"),
++ };
++
++ // TODO: remove duplication with intrinsic?
++ let name =
++ match oop {
++ OverflowOp::Add =>
++ match new_kind {
++ Int(I8) => "__builtin_add_overflow",
++ Int(I16) => "__builtin_add_overflow",
++ Int(I32) => "__builtin_sadd_overflow",
++ Int(I64) => "__builtin_saddll_overflow",
++ Int(I128) => "__builtin_add_overflow",
++
++ Uint(U8) => "__builtin_add_overflow",
++ Uint(U16) => "__builtin_add_overflow",
++ Uint(U32) => "__builtin_uadd_overflow",
++ Uint(U64) => "__builtin_uaddll_overflow",
++ Uint(U128) => "__builtin_add_overflow",
++
++ _ => unreachable!(),
++ },
++ OverflowOp::Sub =>
++ match new_kind {
++ Int(I8) => "__builtin_sub_overflow",
++ Int(I16) => "__builtin_sub_overflow",
++ Int(I32) => "__builtin_ssub_overflow",
++ Int(I64) => "__builtin_ssubll_overflow",
++ Int(I128) => "__builtin_sub_overflow",
++
++ Uint(U8) => "__builtin_sub_overflow",
++ Uint(U16) => "__builtin_sub_overflow",
++ Uint(U32) => "__builtin_usub_overflow",
++ Uint(U64) => "__builtin_usubll_overflow",
++ Uint(U128) => "__builtin_sub_overflow",
++
++ _ => unreachable!(),
++ },
++ OverflowOp::Mul =>
++ match new_kind {
++ Int(I8) => "__builtin_mul_overflow",
++ Int(I16) => "__builtin_mul_overflow",
++ Int(I32) => "__builtin_smul_overflow",
++ Int(I64) => "__builtin_smulll_overflow",
++ Int(I128) => "__builtin_mul_overflow",
++
++ Uint(U8) => "__builtin_mul_overflow",
++ Uint(U16) => "__builtin_mul_overflow",
++ Uint(U32) => "__builtin_umul_overflow",
++ Uint(U64) => "__builtin_umulll_overflow",
++ Uint(U128) => "__builtin_mul_overflow",
++
++ _ => unreachable!(),
++ },
++ };
++
++ let intrinsic = self.context.get_builtin_function(&name);
++ let res = self.current_func()
++ // TODO: is it correct to use rhs type instead of the parameter typ?
++ .new_local(None, rhs.get_type(), "binopResult")
++ .get_address(None);
++ let overflow = self.overflow_call(intrinsic, &[lhs, rhs, res], None);
++ (res.dereference(None).to_rvalue(), overflow)
++ }
++
++ fn alloca(&mut self, ty: Type<'gcc>, align: Align) -> RValue<'gcc> {
++ // FIXME: this check that we don't call get_aligned() a second time on a time.
++ // Ideally, we shouldn't need to do this check.
++ let aligned_type =
++ if ty == self.cx.u128_type || ty == self.cx.i128_type {
++ ty
++ }
++ else {
++ ty.get_aligned(align.bytes())
++ };
++ // TODO: It might be better to return a LValue, but fixing the rustc API is non-trivial.
++ self.stack_var_count.set(self.stack_var_count.get() + 1);
++ self.current_func().new_local(None, aligned_type, &format!("stack_var_{}", self.stack_var_count.get())).get_address(None)
++ }
++
++ fn dynamic_alloca(&mut self, _ty: Type<'gcc>, _align: Align) -> RValue<'gcc> {
++ unimplemented!();
++ /*unsafe {
++ let alloca = llvm::LLVMBuildAlloca(self.llbuilder, ty, UNNAMED);
++ llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
++ alloca
++ }*/
++ }
++
++ fn array_alloca(&mut self, _ty: Type<'gcc>, _len: RValue<'gcc>, _align: Align) -> RValue<'gcc> {
++ unimplemented!();
++ /*unsafe {
++ let alloca = llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len, UNNAMED);
++ llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
++ alloca
++ }*/
++ }
++
++ fn load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>, _align: Align) -> RValue<'gcc> {
++ // TODO: use ty.
++ let block = self.llbb();
++ let function = block.get_function();
++ // NOTE: instead of returning the dereference here, we have to assign it to a variable in
++ // the current basic block. Otherwise, it could be used in another basic block, causing a
++ // dereference after a drop, for instance.
++ // TODO: handle align.
++ let deref = ptr.dereference(None).to_rvalue();
++ let value_type = deref.get_type();
++ unsafe { RETURN_VALUE_COUNT += 1 };
++ let loaded_value = function.new_local(None, value_type, &format!("loadedValue{}", unsafe { RETURN_VALUE_COUNT }));
++ block.add_assignment(None, loaded_value, deref);
++ loaded_value.to_rvalue()
++ }
++
++ fn volatile_load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>) -> RValue<'gcc> {
++ // TODO: use ty.
++ //println!("5: volatile load: {:?} to {:?}", ptr, ptr.get_type().make_volatile());
++ let ptr = self.context.new_cast(None, ptr, ptr.get_type().make_volatile());
++ //println!("6");
++ ptr.dereference(None).to_rvalue()
++ }
++
++ fn atomic_load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>, order: AtomicOrdering, size: Size) -> RValue<'gcc> {
++ // TODO: use ty.
++ // TODO: handle alignment.
++ let atomic_load = self.context.get_builtin_function(&format!("__atomic_load_{}", size.bytes()));
++ let ordering = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
++
++ let volatile_const_void_ptr_type = self.context.new_type::<*mut ()>().make_const().make_volatile();
++ let ptr = self.context.new_cast(None, ptr, volatile_const_void_ptr_type);
++ self.context.new_call(None, atomic_load, &[ptr, ordering])
++ }
++
++ fn load_operand(&mut self, place: PlaceRef<'tcx, RValue<'gcc>>) -> OperandRef<'tcx, RValue<'gcc>> {
++ //debug!("PlaceRef::load: {:?}", place);
++
++ assert_eq!(place.llextra.is_some(), place.layout.is_unsized());
++
++ if place.layout.is_zst() {
++ return OperandRef::new_zst(self, place.layout);
++ }
++
++ fn scalar_load_metadata<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, load: RValue<'gcc>, scalar: &abi::Scalar) {
++ let vr = scalar.valid_range.clone();
++ match scalar.value {
++ abi::Int(..) => {
++ let range = scalar.valid_range_exclusive(bx);
++ if range.start != range.end {
++ bx.range_metadata(load, range);
++ }
++ }
++ abi::Pointer if vr.start() < vr.end() && !vr.contains(&0) => {
++ bx.nonnull_metadata(load);
++ }
++ _ => {}
++ }
++ }
++
++ let val =
++ if let Some(llextra) = place.llextra {
++ OperandValue::Ref(place.llval, Some(llextra), place.align)
++ }
++ else if place.layout.is_gcc_immediate() {
++ let const_llval = None;
++ /*unsafe {
++ if let Some(global) = llvm::LLVMIsAGlobalVariable(place.llval) {
++ if llvm::LLVMIsGlobalConstant(global) == llvm::True {
++ const_llval = llvm::LLVMGetInitializer(global);
++ }
++ }
++ }*/
++ let llval = const_llval.unwrap_or_else(|| {
++ let load = self.load(place.llval.get_type(), place.llval, place.align);
++ if let abi::Abi::Scalar(ref scalar) = place.layout.abi {
++ scalar_load_metadata(self, load, scalar);
++ }
++ load
++ });
++ OperandValue::Immediate(self.to_immediate(llval, place.layout))
++ }
++ else if let abi::Abi::ScalarPair(ref a, ref b) = place.layout.abi {
++ let b_offset = a.value.size(self).align_to(b.value.align(self).abi);
++
++ let mut load = |i, scalar: &abi::Scalar, align| {
++ let llptr = self.struct_gep(place.llval, i as u64);
++ let load = self.load(llptr.get_type(), llptr, align);
++ scalar_load_metadata(self, load, scalar);
++ if scalar.is_bool() { self.trunc(load, self.type_i1()) } else { load }
++ };
++
++ OperandValue::Pair(
++ load(0, a, place.align),
++ load(1, b, place.align.restrict_for_offset(b_offset)),
++ )
++ }
++ else {
++ OperandValue::Ref(place.llval, None, place.align)
++ };
++
++ OperandRef { val, layout: place.layout }
++ }
++
++ fn write_operand_repeatedly(mut self, cg_elem: OperandRef<'tcx, RValue<'gcc>>, count: u64, dest: PlaceRef<'tcx, RValue<'gcc>>) -> Self {
++ let zero = self.const_usize(0);
++ let count = self.const_usize(count);
++ let start = dest.project_index(&mut self, zero).llval;
++ let end = dest.project_index(&mut self, count).llval;
++
++ let mut header_bx = self.build_sibling_block("repeat_loop_header");
++ let mut body_bx = self.build_sibling_block("repeat_loop_body");
++ let next_bx = self.build_sibling_block("repeat_loop_next");
++
++ let ptr_type = start.get_type();
++ let current = self.llbb().get_function().new_local(None, ptr_type, "loop_var");
++ let current_val = current.to_rvalue();
++ self.assign(current, start);
++
++ self.br(header_bx.llbb());
++
++ let keep_going = header_bx.icmp(IntPredicate::IntNE, current_val, end);
++ header_bx.cond_br(keep_going, body_bx.llbb(), next_bx.llbb());
++
++ let align = dest.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size);
++ cg_elem.val.store(&mut body_bx, PlaceRef::new_sized_aligned(current_val, cg_elem.layout, align));
++
++ let next = body_bx.inbounds_gep(current.to_rvalue(), &[self.const_usize(1)]);
++ body_bx.llbb().add_assignment(None, current, next);
++ body_bx.br(header_bx.llbb());
++
++ next_bx
++ }
++
++ fn range_metadata(&mut self, _load: RValue<'gcc>, _range: Range<u128>) {
++ // TODO
++ /*if self.sess().target.target.arch == "amdgpu" {
++ // amdgpu/LLVM does something weird and thinks a i64 value is
++ // split into a v2i32, halving the bitwidth LLVM expects,
++ // tripping an assertion. So, for now, just disable this
++ // optimization.
++ return;
++ }
++
++ unsafe {
++ let llty = self.cx.val_ty(load);
++ let v = [
++ self.cx.const_uint_big(llty, range.start),
++ self.cx.const_uint_big(llty, range.end),
++ ];
++
++ llvm::LLVMSetMetadata(
++ load,
++ llvm::MD_range as c_uint,
++ llvm::LLVMMDNodeInContext(self.cx.llcx, v.as_ptr(), v.len() as c_uint),
++ );
++ }*/
++ }
++
++ fn nonnull_metadata(&mut self, _load: RValue<'gcc>) {
++ // TODO
++ /*unsafe {
++ llvm::LLVMSetMetadata(
++ load,
++ llvm::MD_nonnull as c_uint,
++ llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0),
++ );
++ }*/
++ }
++
++ fn store(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>, align: Align) -> RValue<'gcc> {
++ self.store_with_flags(val, ptr, align, MemFlags::empty())
++ }
++
++ fn store_with_flags(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>, _align: Align, _flags: MemFlags) -> RValue<'gcc> {
++ //debug!("Store {:?} -> {:?} ({:?})", val, ptr, flags);
++ let ptr = self.check_store(val, ptr);
++ self.llbb().add_assignment(None, ptr.dereference(None), val);
++ /*let align =
++ if flags.contains(MemFlags::UNALIGNED) { 1 } else { align.bytes() as c_uint };
++ llvm::LLVMSetAlignment(store, align);
++ if flags.contains(MemFlags::VOLATILE) {
++ llvm::LLVMSetVolatile(store, llvm::True);
++ }
++ if flags.contains(MemFlags::NONTEMPORAL) {
++ // According to LLVM [1] building a nontemporal store must
++ // *always* point to a metadata value of the integer 1.
++ //
++ // [1]: http://llvm.org/docs/LangRef.html#store-instruction
++ let one = self.cx.const_i32(1);
++ let node = llvm::LLVMMDNodeInContext(self.cx.llcx, &one, 1);
++ llvm::LLVMSetMetadata(store, llvm::MD_nontemporal as c_uint, node);
++ }*/
++ // NOTE: dummy value here since it's never used. FIXME: API should not return a value here?
++ self.cx.context.new_rvalue_zero(self.type_i32())
++ }
++
++ fn atomic_store(&mut self, value: RValue<'gcc>, ptr: RValue<'gcc>, order: AtomicOrdering, size: Size) {
++ // TODO: handle alignment.
++ let atomic_store = self.context.get_builtin_function(&format!("__atomic_store_{}", size.bytes()));
++ let ordering = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
++ let volatile_const_void_ptr_type = self.context.new_type::<*mut ()>().make_const().make_volatile();
++ let ptr = self.context.new_cast(None, ptr, volatile_const_void_ptr_type);
++
++ // FIXME: fix libgccjit to allow comparing an integer type with an aligned integer type because
++ // the following cast is required to avoid this error:
++ // gcc_jit_context_new_call: mismatching types for argument 2 of function "__atomic_store_4": assignment to param arg1 (type: int) from loadedValue3577 (type: unsigned int __attribute__((aligned(4))))
++ let int_type = atomic_store.get_param(1).to_rvalue().get_type();
++ let value = self.context.new_cast(None, value, int_type);
++ self.llbb()
++ .add_eval(None, self.context.new_call(None, atomic_store, &[ptr, value, ordering]));
++ }
++
++ fn gep(&mut self, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc> {
++ let mut result = ptr;
++ for index in indices {
++ result = self.context.new_array_access(None, result, *index).get_address(None).to_rvalue();
++ }
++ result
++ }
++
++ fn inbounds_gep(&mut self, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc> {
++ // FIXME: would be safer if doing the same thing (loop) as gep.
++ // TODO: specify inbounds somehow.
++ match indices.len() {
++ 1 => {
++ self.context.new_array_access(None, ptr, indices[0]).get_address(None)
++ },
++ 2 => {
++ let array = ptr.dereference(None); // TODO: assert that first index is 0?
++ self.context.new_array_access(None, array, indices[1]).get_address(None)
++ },
++ _ => unimplemented!(),
++ }
++ }
++
++ fn struct_gep(&mut self, ptr: RValue<'gcc>, idx: u64) -> RValue<'gcc> {
++ // FIXME: it would be better if the API only called this on struct, not on arrays.
++ assert_eq!(idx as usize as u64, idx);
++ let value = ptr.dereference(None).to_rvalue();
++ let value_type = value.get_type();
++
++ if value_type.is_array().is_some() {
++ let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
++ let element = self.context.new_array_access(None, value, index);
++ element.get_address(None)
++ }
++ else if let Some(vector_type) = value_type.is_vector() {
++ let array_type = vector_type.get_element_type().make_pointer();
++ let array = self.bitcast(ptr, array_type);
++ let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
++ let element = self.context.new_array_access(None, array, index);
++ element.get_address(None)
++ }
++ else if let Some(struct_type) = value_type.is_struct() {
++ ptr.dereference_field(None, struct_type.get_field(idx as i32)).get_address(None)
++ }
++ else {
++ panic!("Unexpected type {:?}", value_type);
++ }
++ }
++
++ /* Casts */
++ fn trunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
++ // TODO: check that it indeed truncate the value.
++ //println!("trunc: {:?} -> {:?}", value, dest_ty);
++ self.context.new_cast(None, value, dest_ty)
++ }
++
++ fn sext(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
++ // TODO: check that it indeed sign extend the value.
++ //println!("Sext {:?} to {:?}", value, dest_ty);
++ //if let Some(vector_type) = value.get_type().is_vector() {
++ if dest_ty.is_vector().is_some() {
++ // TODO: nothing to do as it is only for LLVM?
++ return value;
++ /*let dest_type = self.context.new_vector_type(dest_ty, vector_type.get_num_units() as u64);
++ println!("Casting {:?} to {:?}", value, dest_type);
++ return self.context.new_cast(None, value, dest_type);*/
++ }
++ self.context.new_cast(None, value, dest_ty)
++ }
++
++ fn fptoui(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
++ //println!("7: fptoui: {:?} to {:?}", value, dest_ty);
++ let ret = self.context.new_cast(None, value, dest_ty);
++ //println!("8");
++ ret
++ //unsafe { llvm::LLVMBuildFPToUI(self.llbuilder, val, dest_ty, UNNAMED) }
++ }
++
++ fn fptosi(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
++ self.context.new_cast(None, value, dest_ty)
++ }
++
++ fn uitofp(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
++ //println!("1: uitofp: {:?} -> {:?}", value, dest_ty);
++ let ret = self.context.new_cast(None, value, dest_ty);
++ //println!("2");
++ ret
++ }
++
++ fn sitofp(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
++ //println!("3: sitofp: {:?} -> {:?}", value, dest_ty);
++ let ret = self.context.new_cast(None, value, dest_ty);
++ //println!("4");
++ ret
++ }
++
++ fn fptrunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
++ // TODO: make sure it trancates.
++ self.context.new_cast(None, value, dest_ty)
++ }
++
++ fn fpext(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
++ self.context.new_cast(None, value, dest_ty)
++ }
++
++ fn ptrtoint(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
++ self.cx.ptrtoint(self.block.expect("block"), value, dest_ty)
++ }
++
++ fn inttoptr(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
++ self.cx.inttoptr(self.block.expect("block"), value, dest_ty)
++ }
++
++ fn bitcast(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
++ self.cx.const_bitcast(value, dest_ty)
++ }
++
++ fn intcast(&mut self, value: RValue<'gcc>, dest_typ: Type<'gcc>, _is_signed: bool) -> RValue<'gcc> {
++ // NOTE: is_signed is for value, not dest_typ.
++ //println!("intcast: {:?} ({:?}) -> {:?}", value, value.get_type(), dest_typ);
++ self.cx.context.new_cast(None, value, dest_typ)
++ }
++
++ fn pointercast(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
++ //println!("pointercast: {:?} ({:?}) -> {:?}", value, value.get_type(), dest_ty);
++ let val_type = value.get_type();
++ match (type_is_pointer(val_type), type_is_pointer(dest_ty)) {
++ (false, true) => {
++ // NOTE: Projecting a field of a pointer type will attemp a cast from a signed char to
++ // a pointer, which is not supported by gccjit.
++ return self.cx.context.new_cast(None, self.inttoptr(value, val_type.make_pointer()), dest_ty);
++ },
++ (false, false) => {
++ // When they are not pointers, we want a transmute (or reinterpret_cast).
++ //self.cx.context.new_cast(None, value, dest_ty)
++ self.bitcast(value, dest_ty)
++ },
++ (true, true) => self.cx.context.new_cast(None, value, dest_ty),
++ (true, false) => unimplemented!(),
++ }
++ }
++
++ /* Comparisons */
++ fn icmp(&mut self, op: IntPredicate, lhs: RValue<'gcc>, mut rhs: RValue<'gcc>) -> RValue<'gcc> {
++ if lhs.get_type() != rhs.get_type() {
++ // NOTE: hack because we try to cast a vector type to the same vector type.
++ if format!("{:?}", lhs.get_type()) != format!("{:?}", rhs.get_type()) {
++ rhs = self.context.new_cast(None, rhs, lhs.get_type());
++ }
++ }
++ self.context.new_comparison(None, op.to_gcc_comparison(), lhs, rhs)
++ }
++
++ fn fcmp(&mut self, op: RealPredicate, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
++ self.context.new_comparison(None, op.to_gcc_comparison(), lhs, rhs)
++ }
++
++ /* Miscellaneous instructions */
++ fn memcpy(&mut self, dst: RValue<'gcc>, dst_align: Align, src: RValue<'gcc>, src_align: Align, size: RValue<'gcc>, flags: MemFlags) {
++ if flags.contains(MemFlags::NONTEMPORAL) {
++ // HACK(nox): This is inefficient but there is no nontemporal memcpy.
++ let val = self.load(src.get_type(), src, src_align);
++ let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val)));
++ self.store_with_flags(val, ptr, dst_align, flags);
++ return;
++ }
++ let size = self.intcast(size, self.type_size_t(), false);
++ let _is_volatile = flags.contains(MemFlags::VOLATILE);
++ let dst = self.pointercast(dst, self.type_i8p());
++ let src = self.pointercast(src, self.type_ptr_to(self.type_void()));
++ let memcpy = self.context.get_builtin_function("memcpy");
++ let block = self.block.expect("block");
++ // TODO: handle aligns and is_volatile.
++ block.add_eval(None, self.context.new_call(None, memcpy, &[dst, src, size]));
++ }
++
++ fn memmove(&mut self, dst: RValue<'gcc>, dst_align: Align, src: RValue<'gcc>, src_align: Align, size: RValue<'gcc>, flags: MemFlags) {
++ if flags.contains(MemFlags::NONTEMPORAL) {
++ // HACK(nox): This is inefficient but there is no nontemporal memmove.
++ let val = self.load(src.get_type(), src, src_align);
++ let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val)));
++ self.store_with_flags(val, ptr, dst_align, flags);
++ return;
++ }
++ let size = self.intcast(size, self.type_size_t(), false);
++ let _is_volatile = flags.contains(MemFlags::VOLATILE);
++ let dst = self.pointercast(dst, self.type_i8p());
++ let src = self.pointercast(src, self.type_ptr_to(self.type_void()));
++
++ let memmove = self.context.get_builtin_function("memmove");
++ let block = self.block.expect("block");
++ // TODO: handle is_volatile.
++ block.add_eval(None, self.context.new_call(None, memmove, &[dst, src, size]));
++ }
++
++ fn memset(&mut self, ptr: RValue<'gcc>, fill_byte: RValue<'gcc>, size: RValue<'gcc>, _align: Align, flags: MemFlags) {
++ let _is_volatile = flags.contains(MemFlags::VOLATILE);
++ let ptr = self.pointercast(ptr, self.type_i8p());
++ let memset = self.context.get_builtin_function("memset");
++ let block = self.block.expect("block");
++ // TODO: handle aligns and is_volatile.
++ //println!("memset: {:?} -> {:?}", fill_byte, self.i32_type);
++ let fill_byte = self.context.new_cast(None, fill_byte, self.i32_type);
++ let size = self.intcast(size, self.type_size_t(), false);
++ block.add_eval(None, self.context.new_call(None, memset, &[ptr, fill_byte, size]));
++ }
++
++ fn select(&mut self, cond: RValue<'gcc>, then_val: RValue<'gcc>, mut else_val: RValue<'gcc>) -> RValue<'gcc> {
++ let func = self.current_func();
++ let variable = func.new_local(None, then_val.get_type(), "selectVar");
++ let then_block = func.new_block("then");
++ let else_block = func.new_block("else");
++ let after_block = func.new_block("after");
++ self.llbb().end_with_conditional(None, cond, then_block, else_block);
++
++ then_block.add_assignment(None, variable, then_val);
++ then_block.end_with_jump(None, after_block);
++
++ if then_val.get_type() != else_val.get_type() {
++ else_val = self.context.new_cast(None, else_val, then_val.get_type());
++ }
++ else_block.add_assignment(None, variable, else_val);
++ else_block.end_with_jump(None, after_block);
++
++ // NOTE: since jumps were added in a place rustc does not expect, the current blocks in the
++ // state need to be updated.
++ self.block = Some(after_block);
++ *self.cx.current_block.borrow_mut() = Some(after_block);
++
++ variable.to_rvalue()
++ }
++
++ #[allow(dead_code)]
++ fn va_arg(&mut self, _list: RValue<'gcc>, _ty: Type<'gcc>) -> RValue<'gcc> {
++ unimplemented!();
++ //unsafe { llvm::LLVMBuildVAArg(self.llbuilder, list, ty, UNNAMED) }
++ }
++
++ fn extract_element(&mut self, _vec: RValue<'gcc>, _idx: RValue<'gcc>) -> RValue<'gcc> {
++ unimplemented!();
++ //unsafe { llvm::LLVMBuildExtractElement(self.llbuilder, vec, idx, UNNAMED) }
++ }
++
++ fn vector_splat(&mut self, _num_elts: usize, _elt: RValue<'gcc>) -> RValue<'gcc> {
++ unimplemented!();
++ /*unsafe {
++ let elt_ty = self.cx.val_ty(elt);
++ let undef = llvm::LLVMGetUndef(self.type_vector(elt_ty, num_elts as u64));
++ let vec = self.insert_element(undef, elt, self.cx.const_i32(0));
++ let vec_i32_ty = self.type_vector(self.type_i32(), num_elts as u64);
++ self.shuffle_vector(vec, undef, self.const_null(vec_i32_ty))
++ }*/
++ }
++
++ fn extract_value(&mut self, aggregate_value: RValue<'gcc>, idx: u64) -> RValue<'gcc> {
++ // FIXME: it would be better if the API only called this on struct, not on arrays.
++ assert_eq!(idx as usize as u64, idx);
++ let value_type = aggregate_value.get_type();
++
++ if value_type.is_array().is_some() {
++ let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
++ let element = self.context.new_array_access(None, aggregate_value, index);
++ element.get_address(None)
++ }
++ else if value_type.is_vector().is_some() {
++ panic!();
++ }
++ else if let Some(pointer_type) = value_type.get_pointee() {
++ if let Some(struct_type) = pointer_type.is_struct() {
++ // NOTE: hack to workaround a limitation of the rustc API: see comment on
++ // CodegenCx.structs_as_pointer
++ aggregate_value.dereference_field(None, struct_type.get_field(idx as i32)).to_rvalue()
++ }
++ else {
++ panic!("Unexpected type {:?}", value_type);
++ }
++ }
++ else if let Some(struct_type) = value_type.is_struct() {
++ aggregate_value.access_field(None, struct_type.get_field(idx as i32)).to_rvalue()
++ }
++ else {
++ panic!("Unexpected type {:?}", value_type);
++ }
++ /*assert_eq!(idx as c_uint as u64, idx);
++ unsafe { llvm::LLVMBuildExtractValue(self.llbuilder, agg_val, idx as c_uint, UNNAMED) }*/
++ }
++
++ fn insert_value(&mut self, aggregate_value: RValue<'gcc>, value: RValue<'gcc>, idx: u64) -> RValue<'gcc> {
++ // FIXME: it would be better if the API only called this on struct, not on arrays.
++ assert_eq!(idx as usize as u64, idx);
++ let value_type = aggregate_value.get_type();
++
++ let lvalue =
++ if value_type.is_array().is_some() {
++ let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
++ self.context.new_array_access(None, aggregate_value, index)
++ }
++ else if value_type.is_vector().is_some() {
++ panic!();
++ }
++ else if let Some(pointer_type) = value_type.get_pointee() {
++ if let Some(struct_type) = pointer_type.is_struct() {
++ // NOTE: hack to workaround a limitation of the rustc API: see comment on
++ // CodegenCx.structs_as_pointer
++ aggregate_value.dereference_field(None, struct_type.get_field(idx as i32))
++ }
++ else {
++ panic!("Unexpected type {:?}", value_type);
++ }
++ }
++ else {
++ panic!("Unexpected type {:?}", value_type);
++ };
++ self.llbb().add_assignment(None, lvalue, value);
++
++ aggregate_value
++ }
++
++ fn landing_pad(&mut self, _ty: Type<'gcc>, _pers_fn: RValue<'gcc>, _num_clauses: usize) -> RValue<'gcc> {
++ unimplemented!();
++ /*unsafe {
++ llvm::LLVMBuildLandingPad(self.llbuilder, ty, pers_fn, num_clauses as c_uint, UNNAMED)
++ }*/
++ }
++
++ fn set_cleanup(&mut self, _landing_pad: RValue<'gcc>) {
++ unimplemented!();
++ /*unsafe {
++ llvm::LLVMSetCleanup(landing_pad, llvm::True);
++ }*/
++ }
++
++ fn resume(&mut self, _exn: RValue<'gcc>) -> RValue<'gcc> {
++ unimplemented!();
++ //unsafe { llvm::LLVMBuildResume(self.llbuilder, exn) }
++ }
++
++ fn cleanup_pad(&mut self, _parent: Option<RValue<'gcc>>, _args: &[RValue<'gcc>]) -> Funclet {
++ unimplemented!();
++ /*let name = const_cstr!("cleanuppad");
++ let ret = unsafe {
++ llvm::LLVMRustBuildCleanupPad(
++ self.llbuilder,
++ parent,
++ args.len() as c_uint,
++ args.as_ptr(),
++ name.as_ptr(),
++ )
++ };
++ Funclet::new(ret.expect("LLVM does not have support for cleanuppad"))*/
++ }
++
++ fn cleanup_ret(&mut self, _funclet: &Funclet, _unwind: Option<Block<'gcc>>) -> RValue<'gcc> {
++ unimplemented!();
++ /*let ret =
++ unsafe { llvm::LLVMRustBuildCleanupRet(self.llbuilder, funclet.cleanuppad(), unwind) };
++ ret.expect("LLVM does not have support for cleanupret")*/
++ }
++
++ fn catch_pad(&mut self, _parent: RValue<'gcc>, _args: &[RValue<'gcc>]) -> Funclet {
++ unimplemented!();
++ /*let name = const_cstr!("catchpad");
++ let ret = unsafe {
++ llvm::LLVMRustBuildCatchPad(
++ self.llbuilder,
++ parent,
++ args.len() as c_uint,
++ args.as_ptr(),
++ name.as_ptr(),
++ )
++ };
++ Funclet::new(ret.expect("LLVM does not have support for catchpad"))*/
++ }
++
++ fn catch_switch(&mut self, _parent: Option<RValue<'gcc>>, _unwind: Option<Block<'gcc>>, _num_handlers: usize) -> RValue<'gcc> {
++ unimplemented!();
++ /*let name = const_cstr!("catchswitch");
++ let ret = unsafe {
++ llvm::LLVMRustBuildCatchSwitch(
++ self.llbuilder,
++ parent,
++ unwind,
++ num_handlers as c_uint,
++ name.as_ptr(),
++ )
++ };
++ ret.expect("LLVM does not have support for catchswitch")*/
++ }
++
++ fn add_handler(&mut self, _catch_switch: RValue<'gcc>, _handler: Block<'gcc>) {
++ unimplemented!();
++ /*unsafe {
++ llvm::LLVMRustAddHandler(catch_switch, handler);
++ }*/
++ }
++
++ fn set_personality_fn(&mut self, _personality: RValue<'gcc>) {
++ unimplemented!();
++ /*unsafe {
++ llvm::LLVMSetPersonalityFn(self.llfn(), personality);
++ }*/
++ }
++
++ // Atomic Operations
++ fn atomic_cmpxchg(&mut self, dst: RValue<'gcc>, cmp: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering, failure_order: AtomicOrdering, weak: bool) -> RValue<'gcc> {
++ let expected = self.current_func().new_local(None, cmp.get_type(), "expected");
++ self.llbb().add_assignment(None, expected, cmp);
++ let success = self.compare_exchange(dst, expected, src, order, failure_order, weak);
++
++ let pair_type = self.cx.type_struct(&[src.get_type(), self.bool_type], false);
++ let result = self.current_func().new_local(None, pair_type, "atomic_cmpxchg_result");
++ let align = Align::from_bits(64).expect("align"); // TODO: use good align.
++
++ let value_type = result.to_rvalue().get_type();
++ if let Some(struct_type) = value_type.is_struct() {
++ self.store(success, result.access_field(None, struct_type.get_field(1)).get_address(None), align);
++ // NOTE: since success contains the call to the intrinsic, it must be stored before
++ // expected so that we store expected after the call.
++ self.store(expected.to_rvalue(), result.access_field(None, struct_type.get_field(0)).get_address(None), align);
++ }
++ // TODO: handle when value is not a struct.
++
++ result.to_rvalue()
++ }
++
++ fn atomic_rmw(&mut self, op: AtomicRmwBinOp, dst: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering) -> RValue<'gcc> {
++ let size = self.cx.int_width(src.get_type()) / 8;
++ let name =
++ match op {
++ AtomicRmwBinOp::AtomicXchg => format!("__atomic_exchange_{}", size),
++ AtomicRmwBinOp::AtomicAdd => format!("__atomic_fetch_add_{}", size),
++ AtomicRmwBinOp::AtomicSub => format!("__atomic_fetch_sub_{}", size),
++ AtomicRmwBinOp::AtomicAnd => format!("__atomic_fetch_and_{}", size),
++ AtomicRmwBinOp::AtomicNand => format!("__atomic_fetch_nand_{}", size),
++ AtomicRmwBinOp::AtomicOr => format!("__atomic_fetch_or_{}", size),
++ AtomicRmwBinOp::AtomicXor => format!("__atomic_fetch_xor_{}", size),
++ AtomicRmwBinOp::AtomicMax => return self.atomic_extremum(ExtremumOperation::Max, dst, src, order),
++ AtomicRmwBinOp::AtomicMin => return self.atomic_extremum(ExtremumOperation::Min, dst, src, order),
++ AtomicRmwBinOp::AtomicUMax => return self.atomic_extremum(ExtremumOperation::Max, dst, src, order),
++ AtomicRmwBinOp::AtomicUMin => return self.atomic_extremum(ExtremumOperation::Min, dst, src, order),
++ };
++
++
++ let atomic_function = self.context.get_builtin_function(name);
++ let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
++
++ let void_ptr_type = self.context.new_type::<*mut ()>();
++ let volatile_void_ptr_type = void_ptr_type.make_volatile();
++ let dst = self.context.new_cast(None, dst, volatile_void_ptr_type);
++ // NOTE: not sure why, but we have the wrong type here.
++ let new_src_type = atomic_function.get_param(1).to_rvalue().get_type();
++ let src = self.context.new_cast(None, src, new_src_type);
++ let res = self.context.new_call(None, atomic_function, &[dst, src, order]);
++ self.context.new_cast(None, res, src.get_type())
++ }
++
++ fn atomic_fence(&mut self, order: AtomicOrdering, scope: SynchronizationScope) {
++ let name =
++ match scope {
++ SynchronizationScope::SingleThread => "__atomic_signal_fence",
++ SynchronizationScope::CrossThread => "__atomic_thread_fence",
++ };
++ let thread_fence = self.context.get_builtin_function(name);
++ let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
++ self.llbb().add_eval(None, self.context.new_call(None, thread_fence, &[order]));
++ }
++
++ fn set_invariant_load(&mut self, load: RValue<'gcc>) {
++ // NOTE: Hack to consider vtable function pointer as non-global-variable function pointer.
++ self.normal_function_addresses.borrow_mut().insert(load);
++ // TODO
++ /*unsafe {
++ llvm::LLVMSetMetadata(
++ load,
++ llvm::MD_invariant_load as c_uint,
++ llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0),
++ );
++ }*/
++ }
++
++ fn lifetime_start(&mut self, _ptr: RValue<'gcc>, _size: Size) {
++ // TODO
++ //self.call_lifetime_intrinsic("llvm.lifetime.start.p0i8", ptr, size);
++ }
++
++ fn lifetime_end(&mut self, _ptr: RValue<'gcc>, _size: Size) {
++ // TODO
++ //self.call_lifetime_intrinsic("llvm.lifetime.end.p0i8", ptr, size);
++ }
++
++ fn call(&mut self, func: RValue<'gcc>, args: &[RValue<'gcc>], funclet: Option<&Funclet>) -> RValue<'gcc> {
++ // FIXME: remove when having a proper API.
++ let gcc_func = unsafe { std::mem::transmute(func) };
++ if self.functions.borrow().values().find(|value| **value == gcc_func).is_some() {
++ self.function_call(func, args, funclet)
++ }
++ else {
++ // If it's a not function that was defined, it's a function pointer.
++ self.function_ptr_call(func, args, funclet)
++ }
++ }
++
++ fn zext(&mut self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
++ // FIXME: this does not zero-extend.
++ if value.get_type().is_bool() && dest_typ.is_i8(&self.cx) {
++ // FIXME: hack because base::from_immediate converts i1 to i8.
++ // Fix the code in codegen_ssa::base::from_immediate.
++ return value;
++ }
++ //println!("zext: {:?} -> {:?}", value, dest_typ);
++ self.context.new_cast(None, value, dest_typ)
++ }
++
++ fn cx(&self) -> &CodegenCx<'gcc, 'tcx> {
++ self.cx
++ }
++
++ fn do_not_inline(&mut self, _llret: RValue<'gcc>) {
++ unimplemented!();
++ //llvm::Attribute::NoInline.apply_callsite(llvm::AttributePlace::Function, llret);
++ }
++
++ fn set_span(&mut self, _span: Span) {}
++
++ fn from_immediate(&mut self, val: Self::Value) -> Self::Value {
++ if self.cx().val_ty(val) == self.cx().type_i1() {
++ self.zext(val, self.cx().type_i8())
++ }
++ else {
++ val
++ }
++ }
++
++ fn to_immediate_scalar(&mut self, val: Self::Value, scalar: &abi::Scalar) -> Self::Value {
++ if scalar.is_bool() {
++ return self.trunc(val, self.cx().type_i1());
++ }
++ val
++ }
++
++ fn fptoui_sat(&mut self, _val: RValue<'gcc>, _dest_ty: Type<'gcc>) -> Option<RValue<'gcc>> {
++ None
++ }
++
++ fn fptosi_sat(&mut self, _val: RValue<'gcc>, _dest_ty: Type<'gcc>) -> Option<RValue<'gcc>> {
++ None
++ }
++
++ fn instrprof_increment(&mut self, _fn_name: RValue<'gcc>, _hash: RValue<'gcc>, _num_counters: RValue<'gcc>, _index: RValue<'gcc>) {
++ unimplemented!();
++ /*debug!(
++ "instrprof_increment() with args ({:?}, {:?}, {:?}, {:?})",
++ fn_name, hash, num_counters, index
++ );
++
++ let llfn = unsafe { llvm::LLVMRustGetInstrProfIncrementIntrinsic(self.cx().llmod) };
++ let args = &[fn_name, hash, num_counters, index];
++ let args = self.check_call("call", llfn, args);
++
++ unsafe {
++ let _ = llvm::LLVMRustBuildCall(
++ self.llbuilder,
++ llfn,
++ args.as_ptr() as *const &llvm::Value,
++ args.len() as c_uint,
++ None,
++ );
++ }*/
++ }
++}
++
++impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
++ pub fn shuffle_vector(&mut self, v1: RValue<'gcc>, v2: RValue<'gcc>, mask: RValue<'gcc>) -> RValue<'gcc> {
++ let return_type = v1.get_type();
++ let params = [
++ self.context.new_parameter(None, return_type, "v1"),
++ self.context.new_parameter(None, return_type, "v2"),
++ self.context.new_parameter(None, mask.get_type(), "mask"),
++ ];
++ let shuffle = self.context.new_function(None, FunctionType::Extern, return_type, ¶ms, "_mm_shuffle_epi8", false);
++ self.context.new_call(None, shuffle, &[v1, v2, mask])
++ }
++}
++
++impl<'a, 'gcc, 'tcx> StaticBuilderMethods for Builder<'a, 'gcc, 'tcx> {
++ fn get_static(&mut self, def_id: DefId) -> RValue<'gcc> {
++ // Forward to the `get_static` method of `CodegenCx`
++ self.cx().get_static(def_id)
++ }
++}
++
++impl<'tcx> HasParamEnv<'tcx> for Builder<'_, '_, 'tcx> {
++ fn param_env(&self) -> ParamEnv<'tcx> {
++ self.cx.param_env()
++ }
++}
++
++impl<'tcx> HasTargetSpec for Builder<'_, '_, 'tcx> {
++ fn target_spec(&self) -> &Target {
++ &self.cx.target_spec()
++ }
++}
++
++trait ToGccComp {
++ fn to_gcc_comparison(&self) -> ComparisonOp;
++}
++
++impl ToGccComp for IntPredicate {
++ fn to_gcc_comparison(&self) -> ComparisonOp {
++ match *self {
++ IntPredicate::IntEQ => ComparisonOp::Equals,
++ IntPredicate::IntNE => ComparisonOp::NotEquals,
++ IntPredicate::IntUGT => ComparisonOp::GreaterThan,
++ IntPredicate::IntUGE => ComparisonOp::GreaterThanEquals,
++ IntPredicate::IntULT => ComparisonOp::LessThan,
++ IntPredicate::IntULE => ComparisonOp::LessThanEquals,
++ IntPredicate::IntSGT => ComparisonOp::GreaterThan,
++ IntPredicate::IntSGE => ComparisonOp::GreaterThanEquals,
++ IntPredicate::IntSLT => ComparisonOp::LessThan,
++ IntPredicate::IntSLE => ComparisonOp::LessThanEquals,
++ }
++ }
++}
++
++impl ToGccComp for RealPredicate {
++ fn to_gcc_comparison(&self) -> ComparisonOp {
++ // TODO: check that ordered vs non-ordered is respected.
++ match *self {
++ RealPredicate::RealPredicateFalse => unreachable!(),
++ RealPredicate::RealOEQ => ComparisonOp::Equals,
++ RealPredicate::RealOGT => ComparisonOp::GreaterThan,
++ RealPredicate::RealOGE => ComparisonOp::GreaterThanEquals,
++ RealPredicate::RealOLT => ComparisonOp::LessThan,
++ RealPredicate::RealOLE => ComparisonOp::LessThanEquals,
++ RealPredicate::RealONE => ComparisonOp::NotEquals,
++ RealPredicate::RealORD => unreachable!(),
++ RealPredicate::RealUNO => unreachable!(),
++ RealPredicate::RealUEQ => ComparisonOp::Equals,
++ RealPredicate::RealUGT => ComparisonOp::GreaterThan,
++ RealPredicate::RealUGE => ComparisonOp::GreaterThan,
++ RealPredicate::RealULT => ComparisonOp::LessThan,
++ RealPredicate::RealULE => ComparisonOp::LessThan,
++ RealPredicate::RealUNE => ComparisonOp::NotEquals,
++ RealPredicate::RealPredicateTrue => unreachable!(),
++ }
++ }
++}
++
++#[repr(C)]
++#[allow(non_camel_case_types)]
++enum MemOrdering {
++ __ATOMIC_RELAXED,
++ __ATOMIC_CONSUME,
++ __ATOMIC_ACQUIRE,
++ __ATOMIC_RELEASE,
++ __ATOMIC_ACQ_REL,
++ __ATOMIC_SEQ_CST,
++}
++
++trait ToGccOrdering {
++ fn to_gcc(self) -> i32;
++}
++
++impl ToGccOrdering for AtomicOrdering {
++ fn to_gcc(self) -> i32 {
++ use MemOrdering::*;
++
++ let ordering =
++ match self {
++ AtomicOrdering::NotAtomic => __ATOMIC_RELAXED, // TODO: check if that's the same.
++ AtomicOrdering::Unordered => __ATOMIC_RELAXED,
++ AtomicOrdering::Monotonic => __ATOMIC_RELAXED, // TODO: check if that's the same.
++ AtomicOrdering::Acquire => __ATOMIC_ACQUIRE,
++ AtomicOrdering::Release => __ATOMIC_RELEASE,
++ AtomicOrdering::AcquireRelease => __ATOMIC_ACQ_REL,
++ AtomicOrdering::SequentiallyConsistent => __ATOMIC_SEQ_CST,
++ };
++ ordering as i32
++ }
++}
--- /dev/null
--- /dev/null
++use gccjit::{FunctionType, RValue};
++use rustc_codegen_ssa::traits::BaseTypeMethods;
++use rustc_middle::ty::{Instance, TypeFoldable};
++use rustc_middle::ty::layout::{FnAbiExt, HasTyCtxt};
++use rustc_target::abi::call::FnAbi;
++
++use crate::abi::FnAbiGccExt;
++use crate::context::CodegenCx;
++
++/// Codegens a reference to a fn/method item, monomorphizing and
++/// inlining as it goes.
++///
++/// # Parameters
++///
++/// - `cx`: the crate context
++/// - `instance`: the instance to be instantiated
++pub fn get_fn<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, instance: Instance<'tcx>) -> RValue<'gcc> {
++ let tcx = cx.tcx();
++
++ //debug!("get_fn(instance={:?})", instance);
++
++ assert!(!instance.substs.needs_infer());
++ assert!(!instance.substs.has_escaping_bound_vars());
++ assert!(!instance.substs.has_param_types_or_consts());
++
++ if let Some(&func) = cx.instances.borrow().get(&instance) {
++ return func;
++ }
++
++ let sym = tcx.symbol_name(instance).name;
++ //debug!("get_fn({:?}: {:?}) => {}", instance, instance.monomorphic_ty(cx.tcx()), sym);
++
++ let fn_abi = FnAbi::of_instance(cx, instance, &[]);
++
++ // TODO
++ let func =
++ if let Some(func) = cx.get_declared_value(&sym) {
++ // Create a fn pointer with the new signature.
++ let ptrty = fn_abi.ptr_to_gcc_type(cx);
++
++ // This is subtle and surprising, but sometimes we have to bitcast
++ // the resulting fn pointer. The reason has to do with external
++ // functions. If you have two crates that both bind the same C
++ // library, they may not use precisely the same types: for
++ // example, they will probably each declare their own structs,
++ // which are distinct types from LLVM's point of view (nominal
++ // types).
++ //
++ // Now, if those two crates are linked into an application, and
++ // they contain inlined code, you can wind up with a situation
++ // where both of those functions wind up being loaded into this
++ // application simultaneously. In that case, the same function
++ // (from LLVM's point of view) requires two types. But of course
++ // LLVM won't allow one function to have two types.
++ //
++ // What we currently do, therefore, is declare the function with
++ // one of the two types (whichever happens to come first) and then
++ // bitcast as needed when the function is referenced to make sure
++ // it has the type we expect.
++ //
++ // This can occur on either a crate-local or crate-external
++ // reference. It also occurs when testing libcore and in some
++ // other weird situations. Annoying.
++ if cx.val_ty(func) != ptrty {
++ //debug!("get_fn: casting {:?} to {:?}", func, ptrty);
++ // TODO
++ //cx.const_ptrcast(func, ptrty)
++ func
++ }
++ else {
++ //debug!("get_fn: not casting pointer!");
++ func
++ }
++ }
++ else {
++ cx.linkage.set(FunctionType::Extern);
++ let func = cx.declare_fn(&sym, &fn_abi);
++ //cx.linkage.set(FunctionType::Internal);
++ //debug!("get_fn: not casting pointer!");
++
++ // TODO
++ //attributes::from_fn_attrs(cx, func, instance);
++
++ //let instance_def_id = instance.def_id();
++
++ // TODO
++ /*if cx.use_dll_storage_attrs && tcx.is_dllimport_foreign_item(instance_def_id) {
++ unsafe {
++ llvm::LLVMSetDLLStorageClass(func, llvm::DLLStorageClass::DllImport);
++ }
++ }*/
++
++ func
++ };
++
++ cx.instances.borrow_mut().insert(instance, func);
++
++ func
++}
--- /dev/null
--- /dev/null
++use std::convert::TryFrom;
++use std::convert::TryInto;
++
++use gccjit::{Block, CType, RValue, Type, ToRValue};
++use rustc_codegen_ssa::mir::place::PlaceRef;
++use rustc_codegen_ssa::traits::{
++ BaseTypeMethods,
++ ConstMethods,
++ DerivedTypeMethods,
++ MiscMethods,
++ StaticMethods,
++};
++use rustc_middle::bug;
++use rustc_middle::mir::Mutability;
++use rustc_middle::ty::{layout::TyAndLayout, ScalarInt};
++use rustc_mir::interpret::{Allocation, GlobalAlloc, Scalar};
++use rustc_span::Symbol;
++use rustc_target::abi::{self, HasDataLayout, LayoutOf, Pointer, Size};
++
++use crate::consts::const_alloc_to_gcc;
++use crate::context::CodegenCx;
++use crate::type_of::LayoutGccExt;
++
++impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
++ pub fn const_bytes(&self, bytes: &[u8]) -> RValue<'gcc> {
++ bytes_in_context(self, bytes)
++ }
++
++ fn const_cstr(&self, symbol: Symbol, _null_terminated: bool) -> RValue<'gcc> {
++ // TODO: handle null_terminated.
++ if let Some(&value) = self.const_cstr_cache.borrow().get(&symbol) {
++ return value.to_rvalue();
++ }
++
++ let global = self.global_string(&*symbol.as_str());
++
++ self.const_cstr_cache.borrow_mut().insert(symbol, global.dereference(None));
++ global
++ }
++
++ fn global_string(&self, string: &str) -> RValue<'gcc> {
++ // TODO: handle non-null-terminated strings.
++ let string = self.context.new_string_literal(&*string);
++ let sym = self.generate_local_symbol_name("str");
++ // NOTE: TLS is always off for a string litteral.
++ // NOTE: string litterals do not have a link section.
++ let global = self.define_global(&sym, self.val_ty(string), false, None)
++ .unwrap_or_else(|| bug!("symbol `{}` is already defined", sym));
++ self.global_init_block.add_assignment(None, global.dereference(None), string);
++ global.to_rvalue()
++ //llvm::LLVMRustSetLinkage(global, llvm::Linkage::InternalLinkage);
++ }
++
++ pub fn inttoptr(&self, block: Block<'gcc>, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
++ let func = block.get_function();
++ let local = func.new_local(None, value.get_type(), "intLocal");
++ block.add_assignment(None, local, value);
++ let value_address = local.get_address(None);
++
++ let ptr = self.context.new_cast(None, value_address, dest_ty.make_pointer());
++ ptr.dereference(None).to_rvalue()
++ }
++
++ pub fn ptrtoint(&self, block: Block<'gcc>, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
++ // TODO: when libgccjit allow casting from pointer to int, remove this.
++ let func = block.get_function();
++ let local = func.new_local(None, value.get_type(), "ptrLocal");
++ block.add_assignment(None, local, value);
++ let ptr_address = local.get_address(None);
++
++ let ptr = self.context.new_cast(None, ptr_address, dest_ty.make_pointer());
++ ptr.dereference(None).to_rvalue()
++ }
++
++ /*pub fn const_vector(&self, elements: &[RValue<'gcc>]) -> RValue<'gcc> {
++ self.context.new_rvalue_from_vector(None, elements[0].get_type(), elements)
++ }*/
++}
++
++pub fn bytes_in_context<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, bytes: &[u8]) -> RValue<'gcc> {
++ let context = &cx.context;
++ let typ = context.new_array_type(None, context.new_type::<u8>(), bytes.len() as i32);
++ let global = cx.declare_unnamed_global(typ);
++ global.global_set_initializer(bytes);
++ global.to_rvalue()
++}
++
++pub fn type_is_pointer<'gcc>(typ: Type<'gcc>) -> bool {
++ typ.get_pointee().is_some()
++}
++
++impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
++ fn const_null(&self, typ: Type<'gcc>) -> RValue<'gcc> {
++ if type_is_pointer(typ) {
++ self.context.new_null(typ)
++ }
++ else {
++ self.const_int(typ, 0)
++ }
++ }
++
++ fn const_undef(&self, typ: Type<'gcc>) -> RValue<'gcc> {
++ let local = self.current_func.borrow().expect("func")
++ .new_local(None, typ, "undefined");
++ if typ.is_struct().is_some() {
++ // NOTE: hack to workaround a limitation of the rustc API: see comment on
++ // CodegenCx.structs_as_pointer
++ let pointer = local.get_address(None);
++ self.structs_as_pointer.borrow_mut().insert(pointer);
++ pointer
++ }
++ else {
++ local.to_rvalue()
++ }
++ }
++
++ fn const_int(&self, typ: Type<'gcc>, int: i64) -> RValue<'gcc> {
++ self.context.new_rvalue_from_long(typ, i64::try_from(int).expect("i64::try_from"))
++ }
++
++ fn const_uint(&self, typ: Type<'gcc>, int: u64) -> RValue<'gcc> {
++ self.context.new_rvalue_from_long(typ, u64::try_from(int).expect("u64::try_from") as i64)
++ }
++
++ fn const_uint_big(&self, typ: Type<'gcc>, num: u128) -> RValue<'gcc> {
++ let num64: Result<i64, _> = num.try_into();
++ if let Ok(num) = num64 {
++ // FIXME: workaround for a bug where libgccjit is expecting a constant.
++ // The operations >> 64 and | low are making the normal case a non-constant.
++ return self.context.new_rvalue_from_long(typ, num as i64);
++ }
++
++ if num >> 64 != 0 {
++ // FIXME: use a new function new_rvalue_from_unsigned_long()?
++ let low = self.context.new_rvalue_from_long(self.u64_type, num as u64 as i64);
++ let high = self.context.new_rvalue_from_long(typ, (num >> 64) as u64 as i64);
++
++ let sixty_four = self.context.new_rvalue_from_long(typ, 64);
++ (high << sixty_four) | self.context.new_cast(None, low, typ)
++ }
++ else if typ.is_i128(self) {
++ let num = self.context.new_rvalue_from_long(self.u64_type, num as u64 as i64);
++ self.context.new_cast(None, num, typ)
++ }
++ else {
++ self.context.new_rvalue_from_long(typ, num as u64 as i64)
++ }
++ }
++
++ fn const_bool(&self, val: bool) -> RValue<'gcc> {
++ self.const_uint(self.type_i1(), val as u64)
++ }
++
++ fn const_i32(&self, i: i32) -> RValue<'gcc> {
++ self.const_int(self.type_i32(), i as i64)
++ }
++
++ fn const_u32(&self, i: u32) -> RValue<'gcc> {
++ self.const_uint(self.type_u32(), i as u64)
++ }
++
++ fn const_u64(&self, i: u64) -> RValue<'gcc> {
++ self.const_uint(self.type_u64(), i)
++ }
++
++ fn const_usize(&self, i: u64) -> RValue<'gcc> {
++ let bit_size = self.data_layout().pointer_size.bits();
++ if bit_size < 64 {
++ // make sure it doesn't overflow
++ assert!(i < (1 << bit_size));
++ }
++
++ self.const_uint(self.usize_type, i)
++ }
++
++ fn const_u8(&self, _i: u8) -> RValue<'gcc> {
++ unimplemented!();
++ //self.const_uint(self.type_i8(), i as u64)
++ }
++
++ fn const_real(&self, _t: Type<'gcc>, _val: f64) -> RValue<'gcc> {
++ unimplemented!();
++ //unsafe { llvm::LLVMConstReal(t, val) }
++ }
++
++ fn const_str(&self, s: Symbol) -> (RValue<'gcc>, RValue<'gcc>) {
++ let len = s.as_str().len();
++ let cs = self.const_ptrcast(self.const_cstr(s, false),
++ self.type_ptr_to(self.layout_of(self.tcx.types.str_).gcc_type(self, true)),
++ );
++ (cs, self.const_usize(len as u64))
++ }
++
++ fn const_struct(&self, values: &[RValue<'gcc>], packed: bool) -> RValue<'gcc> {
++ let fields: Vec<_> = values.iter()
++ .map(|value| value.get_type())
++ .collect();
++ // TODO: cache the type? It's anonymous, so probably not.
++ let name = fields.iter().map(|typ| format!("{:?}", typ)).collect::<Vec<_>>().join("_");
++ let typ = self.type_struct(&fields, packed);
++ let structure = self.global_init_func.new_local(None, typ, &name);
++ let struct_type = typ.is_struct().expect("struct type");
++ for (index, value) in values.iter().enumerate() {
++ let field = struct_type.get_field(index as i32);
++ let field_lvalue = structure.access_field(None, field);
++ self.global_init_block.add_assignment(None, field_lvalue, *value);
++ }
++ self.lvalue_to_rvalue(structure)
++ }
++
++ fn const_to_opt_uint(&self, _v: RValue<'gcc>) -> Option<u64> {
++ // TODO
++ None
++ //try_as_const_integral(v).map(|v| unsafe { llvm::LLVMConstIntGetZExtValue(v) })
++ }
++
++ fn const_to_opt_u128(&self, _v: RValue<'gcc>, _sign_ext: bool) -> Option<u128> {
++ // TODO
++ None
++ /*try_as_const_integral(v).and_then(|v| unsafe {
++ let (mut lo, mut hi) = (0u64, 0u64);
++ let success = llvm::LLVMRustConstInt128Get(v, sign_ext, &mut hi, &mut lo);
++ success.then_some(hi_lo_to_u128(lo, hi))
++ })*/
++ }
++
++ fn scalar_to_backend(&self, cv: Scalar, layout: &abi::Scalar, ty: Type<'gcc>) -> RValue<'gcc> {
++ let bitsize = if layout.is_bool() { 1 } else { layout.value.size(self).bits() };
++ match cv {
++ Scalar::Int(ScalarInt::ZST) => {
++ assert_eq!(0, layout.value.size(self).bytes());
++ self.const_undef(self.type_ix(0))
++ }
++ Scalar::Int(int) => {
++ let data = int.assert_bits(layout.value.size(self));
++
++ // FIXME: there's some issues with using the u128 code that follows, so hard-code
++ // the paths for floating-point values.
++ if ty == self.float_type {
++ return self.context.new_rvalue_from_double(ty, f32::from_bits(data as u32) as f64);
++ }
++ else if ty == self.double_type {
++ return self.context.new_rvalue_from_double(ty, f64::from_bits(data as u64));
++ }
++
++ let value = self.const_uint_big(self.type_ix(bitsize), data);
++ if layout.value == Pointer {
++ self.inttoptr(self.current_block.borrow().expect("block"), value, ty)
++ } else {
++ self.const_bitcast(value, ty)
++ }
++ }
++ Scalar::Ptr(ptr, _size) => {
++ let (alloc_id, offset) = ptr.into_parts();
++ let base_addr =
++ match self.tcx.global_alloc(alloc_id) {
++ GlobalAlloc::Memory(alloc) => {
++ let init = const_alloc_to_gcc(self, alloc);
++ let value =
++ match alloc.mutability {
++ Mutability::Mut => self.static_addr_of_mut(init, alloc.align, None),
++ _ => self.static_addr_of(init, alloc.align, None),
++ };
++ if !self.sess().fewer_names() {
++ // TODO
++ //llvm::set_value_name(value, format!("{:?}", ptr.alloc_id).as_bytes());
++ }
++ value
++ },
++ GlobalAlloc::Function(fn_instance) => {
++ self.get_fn_addr(fn_instance)
++ },
++ GlobalAlloc::Static(def_id) => {
++ assert!(self.tcx.is_static(def_id));
++ self.get_static(def_id)
++ },
++ };
++ let ptr_type = base_addr.get_type();
++ let base_addr = self.const_bitcast(base_addr, self.usize_type);
++ let offset = self.context.new_rvalue_from_long(self.usize_type, offset.bytes() as i64);
++ let ptr = self.const_bitcast(base_addr + offset, ptr_type);
++ let value = ptr.dereference(None);
++ if layout.value != Pointer {
++ self.const_bitcast(value.to_rvalue(), ty)
++ }
++ else {
++ self.const_bitcast(value.get_address(None), ty)
++ }
++ }
++ }
++ }
++
++ fn const_data_from_alloc(&self, alloc: &Allocation) -> Self::Value {
++ const_alloc_to_gcc(self, alloc)
++ }
++
++ fn from_const_alloc(&self, layout: TyAndLayout<'tcx>, alloc: &Allocation, offset: Size) -> PlaceRef<'tcx, RValue<'gcc>> {
++ assert_eq!(alloc.align, layout.align.abi);
++ let ty = self.type_ptr_to(layout.gcc_type(self, true));
++ let value =
++ if layout.size == Size::ZERO {
++ let value = self.const_usize(alloc.align.bytes());
++ self.context.new_cast(None, value, ty)
++ }
++ else {
++ let init = const_alloc_to_gcc(self, alloc);
++ let base_addr = self.static_addr_of(init, alloc.align, None);
++
++ let array = self.const_bitcast(base_addr, self.type_i8p());
++ let value = self.context.new_array_access(None, array, self.const_usize(offset.bytes())).get_address(None);
++ self.const_bitcast(value, ty)
++ };
++ PlaceRef::new_sized(value, layout)
++ }
++
++ fn const_ptrcast(&self, val: RValue<'gcc>, ty: Type<'gcc>) -> RValue<'gcc> {
++ self.context.new_cast(None, val, ty)
++ }
++}
++
++pub trait SignType<'gcc, 'tcx> {
++ fn is_signed(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
++ fn is_unsigned(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
++ fn to_signed(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>;
++}
++
++impl<'gcc, 'tcx> SignType<'gcc, 'tcx> for Type<'gcc> {
++ fn is_signed(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
++ self.is_i8(cx) || self.is_i16(cx) || self.is_i32(cx) || self.is_i64(cx) || self.is_i128(cx)
++ }
++
++ fn is_unsigned(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
++ self.is_u8(cx) || self.is_u16(cx) || self.is_u32(cx) || self.is_u64(cx) || self.is_u128(cx)
++ }
++
++ fn to_signed(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
++ if self.is_u8(cx) {
++ cx.i8_type
++ }
++ else if self.is_u16(cx) {
++ cx.i16_type
++ }
++ else if self.is_u32(cx) {
++ cx.i32_type
++ }
++ else if self.is_u64(cx) {
++ cx.i64_type
++ }
++ else if self.is_u128(cx) {
++ cx.i128_type
++ }
++ else {
++ self.clone()
++ }
++ }
++}
++
++pub trait TypeReflection<'gcc, 'tcx> {
++ fn is_uchar(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
++ fn is_ushort(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
++ fn is_uint(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
++ fn is_ulong(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
++ fn is_ulonglong(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
++
++ fn is_i8(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
++ fn is_u8(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
++ fn is_i16(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
++ fn is_u16(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
++ fn is_i32(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
++ fn is_u32(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
++ fn is_i64(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
++ fn is_u64(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
++ fn is_i128(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
++ fn is_u128(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
++
++ fn is_f32(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
++ fn is_f64(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
++}
++
++impl<'gcc, 'tcx> TypeReflection<'gcc, 'tcx> for Type<'gcc> {
++ fn is_uchar(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
++ self.unqualified() == cx.u8_type
++ }
++
++ fn is_ushort(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
++ self.unqualified() == cx.u16_type
++ }
++
++ fn is_uint(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
++ self.unqualified() == cx.uint_type
++ }
++
++ fn is_ulong(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
++ self.unqualified() == cx.ulong_type
++ }
++
++ fn is_ulonglong(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
++ self.unqualified() == cx.ulonglong_type
++ }
++
++ fn is_i8(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
++ self.unqualified() == cx.i8_type
++ }
++
++ fn is_u8(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
++ self.unqualified() == cx.u8_type
++ }
++
++ fn is_i16(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
++ self.unqualified() == cx.i16_type
++ }
++
++ fn is_u16(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
++ self.unqualified() == cx.u16_type
++ }
++
++ fn is_i32(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
++ self.unqualified() == cx.i32_type
++ }
++
++ fn is_u32(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
++ self.unqualified() == cx.u32_type
++ }
++
++ fn is_i64(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
++ self.unqualified() == cx.i64_type
++ }
++
++ fn is_u64(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
++ self.unqualified() == cx.u64_type
++ }
++
++ fn is_i128(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
++ self.unqualified() == cx.context.new_c_type(CType::Int128t)
++ }
++
++ fn is_u128(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
++ self.unqualified() == cx.context.new_c_type(CType::UInt128t)
++ }
++
++ fn is_f32(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
++ self.unqualified() == cx.context.new_type::<f32>()
++ }
++
++ fn is_f64(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
++ self.unqualified() == cx.context.new_type::<f64>()
++ }
++}
--- /dev/null
--- /dev/null
++use gccjit::{RValue, Type};
++use rustc_codegen_ssa::traits::{BaseTypeMethods, ConstMethods, DerivedTypeMethods, StaticMethods};
++use rustc_hir as hir;
++use rustc_hir::Node;
++use rustc_middle::{bug, span_bug};
++use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs};
++use rustc_middle::mir::mono::MonoItem;
++use rustc_middle::ty::{self, Instance, Ty};
++use rustc_mir::interpret::{self, Allocation, ErrorHandled, Scalar as InterpScalar, read_target_uint};
++use rustc_span::Span;
++use rustc_span::def_id::DefId;
++use rustc_target::abi::{self, Align, HasDataLayout, LayoutOf, Primitive, Size};
++
++use crate::base;
++use crate::context::CodegenCx;
++use crate::mangled_std_symbols::{ARGC, ARGV, ARGV_INIT_ARRAY};
++use crate::type_of::LayoutGccExt;
++
++impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
++ pub fn const_bitcast(&self, value: RValue<'gcc>, typ: Type<'gcc>) -> RValue<'gcc> {
++ if value.get_type() == self.bool_type.make_pointer() {
++ if let Some(pointee) = typ.get_pointee() {
++ if pointee.is_vector().is_some() {
++ panic!()
++ }
++ }
++ }
++ self.context.new_bitcast(None, value, typ)
++ }
++}
++
++impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> {
++ fn static_addr_of(&self, cv: RValue<'gcc>, align: Align, kind: Option<&str>) -> RValue<'gcc> {
++ if let Some(global_value) = self.const_globals.borrow().get(&cv) {
++ // TODO
++ /*unsafe {
++ // Upgrade the alignment in cases where the same constant is used with different
++ // alignment requirements
++ let llalign = align.bytes() as u32;
++ if llalign > llvm::LLVMGetAlignment(gv) {
++ llvm::LLVMSetAlignment(gv, llalign);
++ }
++ }*/
++ return *global_value;
++ }
++ let global_value = self.static_addr_of_mut(cv, align, kind);
++ // TODO
++ /*unsafe {
++ llvm::LLVMSetGlobalConstant(global_value, True);
++ }*/
++ self.const_globals.borrow_mut().insert(cv, global_value);
++ global_value
++ }
++
++ fn codegen_static(&self, def_id: DefId, is_mutable: bool) {
++ let attrs = self.tcx.codegen_fn_attrs(def_id);
++
++ let instance = Instance::mono(self.tcx, def_id);
++ let name = &*self.tcx.symbol_name(instance).name;
++
++ let (value, alloc) =
++ match codegen_static_initializer(&self, def_id) {
++ Ok(value) => value,
++ // Error has already been reported
++ Err(_) => return,
++ };
++
++ let is_tls = attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL);
++ let global = self.get_static(def_id);
++
++ // boolean SSA values are i1, but they have to be stored in i8 slots,
++ // otherwise some LLVM optimization passes don't work as expected
++ let val_llty = self.val_ty(value);
++ let value =
++ if val_llty == self.type_i1() {
++ //val_llty = self.type_i8();
++ unimplemented!();
++ //llvm::LLVMConstZExt(value, val_llty)
++ }
++ else {
++ value
++ };
++
++ let instance = Instance::mono(self.tcx, def_id);
++ let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all());
++ let gcc_type = self.layout_of(ty).gcc_type(self, true);
++
++ let global =
++ if val_llty == gcc_type {
++ global
++ }
++ else {
++ // If we created the global with the wrong type,
++ // correct the type.
++ /*let name = llvm::get_value_name(global).to_vec();
++ llvm::set_value_name(global, b"");
++
++ let linkage = llvm::LLVMRustGetLinkage(global);
++ let visibility = llvm::LLVMRustGetVisibility(global);*/
++
++ let new_global = self.get_or_insert_global(&name, val_llty, is_tls, attrs.link_section);
++
++ /*llvm::LLVMRustSetLinkage(new_global, linkage);
++ llvm::LLVMRustSetVisibility(new_global, visibility);*/
++
++ // To avoid breaking any invariants, we leave around the old
++ // global for the moment; we'll replace all references to it
++ // with the new global later. (See base::codegen_backend.)
++ //self.statics_to_rauw.borrow_mut().push((global, new_global));
++ new_global
++ };
++ // TODO
++ //set_global_alignment(&self, global, self.align_of(ty));
++ //llvm::LLVMSetInitializer(global, value);
++ let value = self.rvalue_as_lvalue(value);
++ let value = value.get_address(None);
++ let dest_typ = global.get_type();
++ let value = self.context.new_cast(None, value, dest_typ);
++
++ // NOTE: do not init the variables related to argc/argv because it seems we cannot
++ // overwrite those variables.
++ // FIXME: correctly support global variable initialization.
++ let skip_init = [
++ ARGV_INIT_ARRAY,
++ ARGC,
++ ARGV,
++ ];
++ if !skip_init.iter().any(|symbol_name| name.starts_with(symbol_name)) {
++ // TODO: switch to set_initializer when libgccjit supports that.
++ let memcpy = self.context.get_builtin_function("memcpy");
++ let dst = self.context.new_cast(None, global, self.type_i8p());
++ let src = self.context.new_cast(None, value, self.type_ptr_to(self.type_void()));
++ let size = self.context.new_rvalue_from_long(self.sizet_type, alloc.size().bytes() as i64);
++ self.global_init_block.add_eval(None, self.context.new_call(None, memcpy, &[dst, src, size]));
++ }
++
++ // As an optimization, all shared statics which do not have interior
++ // mutability are placed into read-only memory.
++ if !is_mutable {
++ if self.type_is_freeze(ty) {
++ // TODO
++ //llvm::LLVMSetGlobalConstant(global, llvm::True);
++ }
++ }
++
++ //debuginfo::create_global_var_metadata(&self, def_id, global);
++
++ if attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) {
++ // Do not allow LLVM to change the alignment of a TLS on macOS.
++ //
++ // By default a global's alignment can be freely increased.
++ // This allows LLVM to generate more performant instructions
++ // e.g., using load-aligned into a SIMD register.
++ //
++ // However, on macOS 10.10 or below, the dynamic linker does not
++ // respect any alignment given on the TLS (radar 24221680).
++ // This will violate the alignment assumption, and causing segfault at runtime.
++ //
++ // This bug is very easy to trigger. In `println!` and `panic!`,
++ // the `LOCAL_STDOUT`/`LOCAL_STDERR` handles are stored in a TLS,
++ // which the values would be `mem::replace`d on initialization.
++ // The implementation of `mem::replace` will use SIMD
++ // whenever the size is 32 bytes or higher. LLVM notices SIMD is used
++ // and tries to align `LOCAL_STDOUT`/`LOCAL_STDERR` to a 32-byte boundary,
++ // which macOS's dyld disregarded and causing crashes
++ // (see issues #51794, #51758, #50867, #48866 and #44056).
++ //
++ // To workaround the bug, we trick LLVM into not increasing
++ // the global's alignment by explicitly assigning a section to it
++ // (equivalent to automatically generating a `#[link_section]` attribute).
++ // See the comment in the `GlobalValue::canIncreaseAlignment()` function
++ // of `lib/IR/Globals.cpp` for why this works.
++ //
++ // When the alignment is not increased, the optimized `mem::replace`
++ // will use load-unaligned instructions instead, and thus avoiding the crash.
++ //
++ // We could remove this hack whenever we decide to drop macOS 10.10 support.
++ if self.tcx.sess.target.options.is_like_osx {
++ // The `inspect` method is okay here because we checked relocations, and
++ // because we are doing this access to inspect the final interpreter state
++ // (not as part of the interpreter execution).
++ //
++ // FIXME: This check requires that the (arbitrary) value of undefined bytes
++ // happens to be zero. Instead, we should only check the value of defined bytes
++ // and set all undefined bytes to zero if this allocation is headed for the
++ // BSS.
++ /*let all_bytes_are_zero = alloc.relocations().is_empty()
++ && alloc
++ .inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len())
++ .iter()
++ .all(|&byte| byte == 0);
++
++ let sect_name = if all_bytes_are_zero {
++ CStr::from_bytes_with_nul_unchecked(b"__DATA,__thread_bss\0")
++ } else {
++ CStr::from_bytes_with_nul_unchecked(b"__DATA,__thread_data\0")
++ };*/
++ unimplemented!();
++ //llvm::LLVMSetSection(global, sect_name.as_ptr());
++ }
++ }
++
++ // Wasm statics with custom link sections get special treatment as they
++ // go into custom sections of the wasm executable.
++ if self.tcx.sess.opts.target_triple.triple().starts_with("wasm32") {
++ if let Some(_section) = attrs.link_section {
++ unimplemented!();
++ /*let section = llvm::LLVMMDStringInContext(
++ self.llcx,
++ section.as_str().as_ptr().cast(),
++ section.as_str().len() as c_uint,
++ );
++ assert!(alloc.relocations().is_empty());
++
++ // The `inspect` method is okay here because we checked relocations, and
++ // because we are doing this access to inspect the final interpreter state (not
++ // as part of the interpreter execution).
++ let bytes =
++ alloc.inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len());
++ let alloc = llvm::LLVMMDStringInContext(
++ self.llcx,
++ bytes.as_ptr().cast(),
++ bytes.len() as c_uint,
++ );
++ let data = [section, alloc];
++ let meta = llvm::LLVMMDNodeInContext(self.llcx, data.as_ptr(), 2);
++ llvm::LLVMAddNamedMetadataOperand(
++ self.llmod,
++ "wasm.custom_sections\0".as_ptr().cast(),
++ meta,
++ );*/
++ }
++ } else {
++ // TODO
++ //base::set_link_section(global, &attrs);
++ }
++
++ if attrs.flags.contains(CodegenFnAttrFlags::USED) {
++ self.add_used_global(global);
++ }
++ }
++
++ /// Add a global value to a list to be stored in the `llvm.used` variable, an array of i8*.
++ fn add_used_global(&self, _global: RValue<'gcc>) {
++ // TODO
++ //let cast = self.context.new_cast(None, global, self.type_i8p());
++ //self.used_statics.borrow_mut().push(cast);
++ }
++}
++
++impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
++ pub fn static_addr_of_mut(&self, cv: RValue<'gcc>, align: Align, kind: Option<&str>) -> RValue<'gcc> {
++ let (name, gv) =
++ match kind {
++ Some(kind) if !self.tcx.sess.fewer_names() => {
++ let name = self.generate_local_symbol_name(kind);
++ // TODO: check if it's okay that TLS is off here.
++ // TODO: check if it's okay that link_section is None here.
++ // TODO: set alignment here as well.
++ let gv = self.define_global(&name[..], self.val_ty(cv), false, None).unwrap_or_else(|| {
++ bug!("symbol `{}` is already defined", name);
++ });
++ //llvm::LLVMRustSetLinkage(gv, llvm::Linkage::PrivateLinkage);
++ (name, gv)
++ }
++ _ => {
++ let index = self.global_gen_sym_counter.get();
++ let name = format!("global_{}_{}", index, self.codegen_unit.name());
++ let typ = self.val_ty(cv).get_aligned(align.bytes());
++ let global = self.define_private_global(typ);
++ (name, global)
++ },
++ };
++ // FIXME: I think the name coming from generate_local_symbol_name() above cannot be used
++ // globally.
++ // NOTE: global seems to only be global in a module. So save the name instead of the value
++ // to import it later.
++ self.global_names.borrow_mut().insert(cv, name);
++ self.global_init_block.add_assignment(None, gv.dereference(None), cv);
++ //llvm::SetUnnamedAddress(gv, llvm::UnnamedAddr::Global);
++ gv
++ }
++
++ pub fn get_static(&self, def_id: DefId) -> RValue<'gcc> {
++ let instance = Instance::mono(self.tcx, def_id);
++ let fn_attrs = self.tcx.codegen_fn_attrs(def_id);
++ if let Some(&global) = self.instances.borrow().get(&instance) {
++ /*let attrs = self.tcx.codegen_fn_attrs(def_id);
++ let name = &*self.tcx.symbol_name(instance).name;
++ let name =
++ if let Some(linkage) = attrs.linkage {
++ // This is to match what happens in check_and_apply_linkage.
++ Cow::from(format!("_rust_extern_with_linkage_{}", name))
++ }
++ else {
++ Cow::from(name)
++ };
++ let global = self.context.new_global(None, GlobalKind::Imported, global.get_type(), &name)
++ .get_address(None);
++ self.global_names.borrow_mut().insert(global, name.to_string());*/
++ return global;
++ }
++
++ let defined_in_current_codegen_unit =
++ self.codegen_unit.items().contains_key(&MonoItem::Static(def_id));
++ assert!(
++ !defined_in_current_codegen_unit,
++ "consts::get_static() should always hit the cache for \
++ statics defined in the same CGU, but did not for `{:?}`",
++ def_id
++ );
++
++ let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all());
++ let sym = self.tcx.symbol_name(instance).name;
++
++ //debug!("get_static: sym={} instance={:?}", sym, instance);
++
++ let global =
++ if let Some(def_id) = def_id.as_local() {
++ let id = self.tcx.hir().local_def_id_to_hir_id(def_id);
++ let llty = self.layout_of(ty).gcc_type(self, true);
++ // FIXME: refactor this to work without accessing the HIR
++ let global = match self.tcx.hir().get(id) {
++ Node::Item(&hir::Item { span, kind: hir::ItemKind::Static(..), .. }) => {
++ if let Some(global) = self.get_declared_value(&sym) {
++ if self.val_ty(global) != self.type_ptr_to(llty) {
++ span_bug!(span, "Conflicting types for static");
++ }
++ }
++
++ let is_tls = fn_attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL);
++ let global = self.declare_global(&sym, llty, is_tls, fn_attrs.link_section);
++
++ if !self.tcx.is_reachable_non_generic(def_id) {
++ /*unsafe {
++ llvm::LLVMRustSetVisibility(global, llvm::Visibility::Hidden);
++ }*/
++ }
++
++ global
++ }
++
++ Node::ForeignItem(&hir::ForeignItem {
++ span,
++ kind: hir::ForeignItemKind::Static(..),
++ ..
++ }) => {
++ let fn_attrs = self.tcx.codegen_fn_attrs(def_id);
++ check_and_apply_linkage(&self, &fn_attrs, ty, sym, span)
++ }
++
++ item => bug!("get_static: expected static, found {:?}", item),
++ };
++
++ //debug!("get_static: sym={} attrs={:?}", sym, attrs);
++
++ global
++ }
++ else {
++ // FIXME(nagisa): perhaps the map of externs could be offloaded to llvm somehow?
++ //debug!("get_static: sym={} item_attr={:?}", sym, self.tcx.item_attrs(def_id));
++
++ let attrs = self.tcx.codegen_fn_attrs(def_id);
++ let span = self.tcx.def_span(def_id);
++ let global = check_and_apply_linkage(&self, &attrs, ty, sym, span);
++
++ let needs_dll_storage_attr = false; /*self.use_dll_storage_attrs && !self.tcx.is_foreign_item(def_id) &&
++ // ThinLTO can't handle this workaround in all cases, so we don't
++ // emit the attrs. Instead we make them unnecessary by disallowing
++ // dynamic linking when linker plugin based LTO is enabled.
++ !self.tcx.sess.opts.cg.linker_plugin_lto.enabled();*/
++
++ // If this assertion triggers, there's something wrong with commandline
++ // argument validation.
++ debug_assert!(
++ !(self.tcx.sess.opts.cg.linker_plugin_lto.enabled()
++ && self.tcx.sess.target.options.is_like_msvc
++ && self.tcx.sess.opts.cg.prefer_dynamic)
++ );
++
++ if needs_dll_storage_attr {
++ // This item is external but not foreign, i.e., it originates from an external Rust
++ // crate. Since we don't know whether this crate will be linked dynamically or
++ // statically in the final application, we always mark such symbols as 'dllimport'.
++ // If final linkage happens to be static, we rely on compiler-emitted __imp_ stubs
++ // to make things work.
++ //
++ // However, in some scenarios we defer emission of statics to downstream
++ // crates, so there are cases where a static with an upstream DefId
++ // is actually present in the current crate. We can find out via the
++ // is_codegened_item query.
++ if !self.tcx.is_codegened_item(def_id) {
++ unimplemented!();
++ /*unsafe {
++ llvm::LLVMSetDLLStorageClass(global, llvm::DLLStorageClass::DllImport);
++ }*/
++ }
++ }
++ global
++ };
++
++ /*if self.use_dll_storage_attrs && self.tcx.is_dllimport_foreign_item(def_id) {
++ // For foreign (native) libs we know the exact storage type to use.
++ unsafe {
++ llvm::LLVMSetDLLStorageClass(global, llvm::DLLStorageClass::DllImport);
++ }
++ }*/
++
++ self.instances.borrow_mut().insert(instance, global);
++ global
++ }
++}
++
++pub fn const_alloc_to_gcc<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, alloc: &Allocation) -> RValue<'gcc> {
++ let mut llvals = Vec::with_capacity(alloc.relocations().len() + 1);
++ let dl = cx.data_layout();
++ let pointer_size = dl.pointer_size.bytes() as usize;
++
++ let mut next_offset = 0;
++ for &(offset, alloc_id) in alloc.relocations().iter() {
++ let offset = offset.bytes();
++ assert_eq!(offset as usize as u64, offset);
++ let offset = offset as usize;
++ if offset > next_offset {
++ // This `inspect` is okay since we have checked that it is not within a relocation, it
++ // is within the bounds of the allocation, and it doesn't affect interpreter execution
++ // (we inspect the result after interpreter execution). Any undef byte is replaced with
++ // some arbitrary byte value.
++ //
++ // FIXME: relay undef bytes to codegen as undef const bytes
++ let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(next_offset..offset);
++ llvals.push(cx.const_bytes(bytes));
++ }
++ let ptr_offset =
++ read_target_uint( dl.endian,
++ // This `inspect` is okay since it is within the bounds of the allocation, it doesn't
++ // affect interpreter execution (we inspect the result after interpreter execution),
++ // and we properly interpret the relocation as a relocation pointer offset.
++ alloc.inspect_with_uninit_and_ptr_outside_interpreter(offset..(offset + pointer_size)),
++ )
++ .expect("const_alloc_to_llvm: could not read relocation pointer")
++ as u64;
++ llvals.push(cx.scalar_to_backend(
++ InterpScalar::from_pointer(
++ interpret::Pointer::new(alloc_id, Size::from_bytes(ptr_offset)),
++ &cx.tcx,
++ ),
++ &abi::Scalar { value: Primitive::Pointer, valid_range: 0..=!0 },
++ cx.type_i8p(),
++ ));
++ next_offset = offset + pointer_size;
++ }
++ if alloc.len() >= next_offset {
++ let range = next_offset..alloc.len();
++ // This `inspect` is okay since we have check that it is after all relocations, it is
++ // within the bounds of the allocation, and it doesn't affect interpreter execution (we
++ // inspect the result after interpreter execution). Any undef byte is replaced with some
++ // arbitrary byte value.
++ //
++ // FIXME: relay undef bytes to codegen as undef const bytes
++ let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(range);
++ llvals.push(cx.const_bytes(bytes));
++ }
++
++ cx.const_struct(&llvals, true)
++}
++
++pub fn codegen_static_initializer<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, def_id: DefId) -> Result<(RValue<'gcc>, &'tcx Allocation), ErrorHandled> {
++ let alloc = cx.tcx.eval_static_initializer(def_id)?;
++ Ok((const_alloc_to_gcc(cx, alloc), alloc))
++}
++
++fn check_and_apply_linkage<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, attrs: &CodegenFnAttrs, ty: Ty<'tcx>, sym: &str, span: Span) -> RValue<'gcc> {
++ let is_tls = attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL);
++ let llty = cx.layout_of(ty).gcc_type(cx, true);
++ if let Some(linkage) = attrs.linkage {
++ //debug!("get_static: sym={} linkage={:?}", sym, linkage);
++
++ // If this is a static with a linkage specified, then we need to handle
++ // it a little specially. The typesystem prevents things like &T and
++ // extern "C" fn() from being non-null, so we can't just declare a
++ // static and call it a day. Some linkages (like weak) will make it such
++ // that the static actually has a null value.
++ let llty2 =
++ if let ty::RawPtr(ref mt) = ty.kind() {
++ cx.layout_of(mt.ty).gcc_type(cx, true)
++ }
++ else {
++ cx.sess().span_fatal(
++ span,
++ "must have type `*const T` or `*mut T` due to `#[linkage]` attribute",
++ )
++ };
++ // Declare a symbol `foo` with the desired linkage.
++ let global1 = cx.declare_global_with_linkage(&sym, llty2, base::global_linkage_to_gcc(linkage));
++
++ // Declare an internal global `extern_with_linkage_foo` which
++ // is initialized with the address of `foo`. If `foo` is
++ // discarded during linking (for example, if `foo` has weak
++ // linkage and there are no definitions), then
++ // `extern_with_linkage_foo` will instead be initialized to
++ // zero.
++ let mut real_name = "_rust_extern_with_linkage_".to_string();
++ real_name.push_str(&sym);
++ let global2 =
++ cx.define_global(&real_name, llty, is_tls, attrs.link_section).unwrap_or_else(|| {
++ cx.sess().span_fatal(span, &format!("symbol `{}` is already defined", &sym))
++ });
++ //llvm::LLVMRustSetLinkage(global2, llvm::Linkage::InternalLinkage);
++ let lvalue = global2.dereference(None);
++ cx.global_init_block.add_assignment(None, lvalue, global1);
++ //llvm::LLVMSetInitializer(global2, global1);
++ global2
++ }
++ else {
++ // Generate an external declaration.
++ // FIXME(nagisa): investigate whether it can be changed into define_global
++
++ // Thread-local statics in some other crate need to *always* be linked
++ // against in a thread-local fashion, so we need to be sure to apply the
++ // thread-local attribute locally if it was present remotely. If we
++ // don't do this then linker errors can be generated where the linker
++ // complains that one object files has a thread local version of the
++ // symbol and another one doesn't.
++ cx.declare_global(&sym, llty, is_tls, attrs.link_section)
++ }
++}
--- /dev/null
--- /dev/null
++use std::cell::{Cell, RefCell};
++
++use gccjit::{
++ Block,
++ Context,
++ CType,
++ Function,
++ FunctionType,
++ LValue,
++ RValue,
++ Struct,
++ Type,
++};
++use rustc_codegen_ssa::base::wants_msvc_seh;
++use rustc_codegen_ssa::traits::{
++ BackendTypes,
++ BaseTypeMethods,
++ MiscMethods,
++};
++use rustc_data_structures::base_n;
++use rustc_data_structures::fx::{FxHashMap, FxHashSet};
++use rustc_middle::bug;
++use rustc_middle::mir::mono::CodegenUnit;
++use rustc_middle::ty::{self, Instance, ParamEnv, PolyExistentialTraitRef, Ty, TyCtxt};
++use rustc_middle::ty::layout::{HasParamEnv, HasTyCtxt, LayoutError, TyAndLayout};
++use rustc_session::Session;
++use rustc_span::{Span, Symbol, DUMMY_SP};
++use rustc_target::abi::{HasDataLayout, LayoutOf, PointeeInfo, Size, TargetDataLayout, VariantIdx};
++use rustc_target::spec::{HasTargetSpec, Target, TlsModel};
++
++use crate::callee::get_fn;
++use crate::declare::mangle_name;
++
++#[derive(Clone)]
++pub struct FuncSig<'gcc> {
++ pub params: Vec<Type<'gcc>>,
++ pub return_type: Type<'gcc>,
++}
++
++pub struct CodegenCx<'gcc, 'tcx> {
++ pub check_overflow: bool,
++ pub codegen_unit: &'tcx CodegenUnit<'tcx>,
++ pub context: &'gcc Context<'gcc>,
++
++ // TODO: First set it to a dummy block to avoid using Option?
++ pub current_block: RefCell<Option<Block<'gcc>>>,
++ pub current_func: RefCell<Option<Function<'gcc>>>,
++ pub normal_function_addresses: RefCell<FxHashSet<RValue<'gcc>>>,
++
++ /// The function where globals are initialized.
++ pub global_init_func: Function<'gcc>,
++ pub global_init_block: Block<'gcc>,
++
++ pub functions: RefCell<FxHashMap<String, Function<'gcc>>>,
++
++ pub tls_model: gccjit::TlsModel,
++
++ pub bool_type: Type<'gcc>,
++ pub i8_type: Type<'gcc>,
++ pub i16_type: Type<'gcc>,
++ pub i32_type: Type<'gcc>,
++ pub i64_type: Type<'gcc>,
++ pub i128_type: Type<'gcc>,
++ pub isize_type: Type<'gcc>,
++
++ pub u8_type: Type<'gcc>,
++ pub u16_type: Type<'gcc>,
++ pub u32_type: Type<'gcc>,
++ pub u64_type: Type<'gcc>,
++ pub u128_type: Type<'gcc>,
++ pub usize_type: Type<'gcc>,
++
++ pub int_type: Type<'gcc>,
++ pub uint_type: Type<'gcc>,
++ pub long_type: Type<'gcc>,
++ pub ulong_type: Type<'gcc>,
++ pub ulonglong_type: Type<'gcc>,
++ pub sizet_type: Type<'gcc>,
++
++ pub float_type: Type<'gcc>,
++ pub double_type: Type<'gcc>,
++
++ pub linkage: Cell<FunctionType>,
++ pub scalar_types: RefCell<FxHashMap<Ty<'tcx>, Type<'gcc>>>,
++ pub types: RefCell<FxHashMap<(Ty<'tcx>, Option<VariantIdx>), Type<'gcc>>>,
++ pub tcx: TyCtxt<'tcx>,
++
++ pub struct_types: RefCell<FxHashMap<Vec<Type<'gcc>>, Type<'gcc>>>,
++
++ pub types_with_fields_to_set: RefCell<FxHashMap<Type<'gcc>, (Struct<'gcc>, TyAndLayout<'tcx>)>>,
++
++ /// Cache instances of monomorphic and polymorphic items
++ pub instances: RefCell<FxHashMap<Instance<'tcx>, RValue<'gcc>>>,
++ /// Cache generated vtables
++ pub vtables: RefCell<FxHashMap<(Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>), RValue<'gcc>>>,
++
++ /// Cache of emitted const globals (value -> global)
++ pub const_globals: RefCell<FxHashMap<RValue<'gcc>, RValue<'gcc>>>,
++
++ pub init_argv_var: RefCell<String>,
++ pub argv_initialized: Cell<bool>,
++
++ /// Cache of constant strings,
++ pub const_cstr_cache: RefCell<FxHashMap<Symbol, LValue<'gcc>>>,
++
++ /// Cache of globals.
++ pub globals: RefCell<FxHashMap<String, RValue<'gcc>>>,
++ // TODO: remove global_names.
++ pub global_names: RefCell<FxHashMap<RValue<'gcc>, String>>,
++
++ /// A counter that is used for generating local symbol names
++ local_gen_sym_counter: Cell<usize>,
++ pub global_gen_sym_counter: Cell<usize>,
++
++ eh_personality: Cell<Option<RValue<'gcc>>>,
++
++ pub pointee_infos: RefCell<FxHashMap<(Ty<'tcx>, Size), Option<PointeeInfo>>>,
++
++ /// NOTE: a hack is used because the rustc API is not suitable to libgccjit and as such,
++ /// `const_undef()` returns struct as pointer so that they can later be assigned a value.
++ /// As such, this set remembers which of these pointers were returned by this function so that
++ /// they can be derefered later.
++ /// FIXME: fix the rustc API to avoid having this hack.
++ pub structs_as_pointer: RefCell<FxHashSet<RValue<'gcc>>>,
++
++ /// Store the pointer of different types for safety.
++ /// When casting the values back to their original types, check that they are indeed that type
++ /// with these sets.
++ /// FIXME: remove when the API supports more types.
++ #[cfg(debug_assertions)]
++ lvalues: RefCell<FxHashSet<LValue<'gcc>>>,
++}
++
++impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
++ pub fn new(context: &'gcc Context<'gcc>, codegen_unit: &'tcx CodegenUnit<'tcx>, tcx: TyCtxt<'tcx>) -> Self {
++ let check_overflow = tcx.sess.overflow_checks();
++ // TODO: fix this mess. libgccjit seems to return random type when using new_int_type().
++ //let isize_type = context.new_int_type((tcx.data_layout.pointer_size.bits() / 8) as i32, true);
++ let isize_type = context.new_c_type(CType::LongLong);
++ //let usize_type = context.new_int_type((tcx.data_layout.pointer_size.bits() / 8) as i32, false);
++ let usize_type = context.new_c_type(CType::ULongLong);
++ let bool_type = context.new_type::<bool>();
++ let i8_type = context.new_type::<i8>();
++ let i16_type = context.new_type::<i16>();
++ let i32_type = context.new_type::<i32>();
++ let i64_type = context.new_c_type(CType::LongLong);
++ let i128_type = context.new_c_type(CType::Int128t).get_aligned(8); // TODO: should this be hard-coded?
++ let u8_type = context.new_type::<u8>();
++ let u16_type = context.new_type::<u16>();
++ let u32_type = context.new_type::<u32>();
++ let u64_type = context.new_c_type(CType::ULongLong);
++ let u128_type = context.new_c_type(CType::UInt128t).get_aligned(8); // TODO: should this be hard-coded?
++
++ let tls_model = to_gcc_tls_mode(tcx.sess.tls_model());
++
++ let float_type = context.new_type::<f32>();
++ let double_type = context.new_type::<f64>();
++
++ let int_type = context.new_c_type(CType::Int);
++ let uint_type = context.new_c_type(CType::UInt);
++ let long_type = context.new_c_type(CType::Long);
++ let ulong_type = context.new_c_type(CType::ULong);
++ let ulonglong_type = context.new_c_type(CType::ULongLong);
++ let sizet_type = context.new_c_type(CType::SizeT);
++
++ assert_eq!(isize_type, i64_type);
++ assert_eq!(usize_type, u64_type);
++
++ let mut functions = FxHashMap::default();
++ let builtins = [
++ "__builtin_unreachable", "abort", "__builtin_expect", "__builtin_add_overflow", "__builtin_mul_overflow",
++ "__builtin_saddll_overflow", /*"__builtin_sadd_overflow",*/ "__builtin_smulll_overflow", /*"__builtin_smul_overflow",*/
++ "__builtin_ssubll_overflow", /*"__builtin_ssub_overflow",*/ "__builtin_sub_overflow", "__builtin_uaddll_overflow",
++ "__builtin_uadd_overflow", "__builtin_umulll_overflow", "__builtin_umul_overflow", "__builtin_usubll_overflow",
++ "__builtin_usub_overflow", "sqrtf", "sqrt", "__builtin_powif", "__builtin_powi", "sinf", "sin", "cosf", "cos",
++ "powf", "pow", "expf", "exp", "exp2f", "exp2", "logf", "log", "log10f", "log10", "log2f", "log2", "fmaf",
++ "fma", "fabsf", "fabs", "fminf", "fmin", "fmaxf", "fmax", "copysignf", "copysign", "floorf", "floor", "ceilf",
++ "ceil", "truncf", "trunc", "rintf", "rint", "nearbyintf", "nearbyint", "roundf", "round",
++ "__builtin_expect_with_probability",
++ ];
++
++ for builtin in builtins.iter() {
++ functions.insert(builtin.to_string(), context.get_builtin_function(builtin));
++ }
++
++ let global_init_func = context.new_function(None, FunctionType::Exported, context.new_type::<()>(), &[],
++ &format!("__gccGlobalInit{}", unit_name(&codegen_unit)), false);
++ let global_init_block = global_init_func.new_block("initial");
++
++ Self {
++ check_overflow,
++ codegen_unit,
++ context,
++ current_block: RefCell::new(None),
++ current_func: RefCell::new(None),
++ normal_function_addresses: Default::default(),
++ functions: RefCell::new(functions),
++ global_init_func,
++ global_init_block,
++
++ tls_model,
++
++ bool_type,
++ i8_type,
++ i16_type,
++ i32_type,
++ i64_type,
++ i128_type,
++ isize_type,
++ usize_type,
++ u8_type,
++ u16_type,
++ u32_type,
++ u64_type,
++ u128_type,
++ int_type,
++ uint_type,
++ long_type,
++ ulong_type,
++ ulonglong_type,
++ sizet_type,
++
++ float_type,
++ double_type,
++
++ linkage: Cell::new(FunctionType::Internal),
++ #[cfg(debug_assertions)]
++ lvalues: Default::default(),
++ instances: Default::default(),
++ vtables: Default::default(),
++ const_globals: Default::default(),
++ init_argv_var: RefCell::new(String::new()),
++ argv_initialized: Cell::new(false),
++ const_cstr_cache: Default::default(),
++ global_names: Default::default(),
++ globals: Default::default(),
++ scalar_types: Default::default(),
++ types: Default::default(),
++ tcx,
++ struct_types: Default::default(),
++ types_with_fields_to_set: Default::default(),
++ local_gen_sym_counter: Cell::new(0),
++ global_gen_sym_counter: Cell::new(0),
++ eh_personality: Cell::new(None),
++ pointee_infos: Default::default(),
++ structs_as_pointer: Default::default(),
++ }
++ }
++
++ pub fn lvalue_to_rvalue(&self, value: LValue<'gcc>) -> RValue<'gcc> {
++ #[cfg(debug_assertions)]
++ self.lvalues.borrow_mut().insert(value);
++ unsafe { std::mem::transmute(value) }
++ }
++
++ pub fn rvalue_as_function(&self, value: RValue<'gcc>) -> Function<'gcc> {
++ let function: Function<'gcc> = unsafe { std::mem::transmute(value) };
++ debug_assert!(self.functions.borrow().values().find(|value| **value == function).is_some(),
++ "{:?} ({:?}) is not a function", value, value.get_type());
++ function
++ }
++
++ pub fn rvalue_as_lvalue(&self, value: RValue<'gcc>) -> LValue<'gcc> {
++ let lvalue: LValue<'gcc> = unsafe { std::mem::transmute(value) };
++ //debug_assert!(self.lvalues.borrow().contains(&lvalue), "{:?} is not an lvalue", value);
++ lvalue
++ }
++
++ pub fn sess(&self) -> &Session {
++ &self.tcx.sess
++ }
++}
++
++impl<'gcc, 'tcx> BackendTypes for CodegenCx<'gcc, 'tcx> {
++ type Value = RValue<'gcc>;
++ type Function = RValue<'gcc>;
++
++ type BasicBlock = Block<'gcc>;
++ type Type = Type<'gcc>;
++ type Funclet = (); // TODO
++
++ type DIScope = (); // TODO
++ type DILocation = (); // TODO
++ type DIVariable = (); // TODO
++}
++
++impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
++ fn vtables(&self) -> &RefCell<FxHashMap<(Ty<'tcx>, Option<PolyExistentialTraitRef<'tcx>>), RValue<'gcc>>> {
++ &self.vtables
++ }
++
++ fn get_fn(&self, instance: Instance<'tcx>) -> RValue<'gcc> {
++ let func = get_fn(self, instance);
++ *self.current_func.borrow_mut() = Some(self.rvalue_as_function(func));
++ func
++ }
++
++ fn get_fn_addr(&self, instance: Instance<'tcx>) -> RValue<'gcc> {
++ //let symbol = self.tcx.symbol_name(instance).name;
++
++ let func = get_fn(self, instance);
++ let func = self.rvalue_as_function(func);
++ let ptr = func.get_address(None);
++
++ // TODO: don't do this twice: i.e. in declare_fn and here.
++ //let fn_abi = FnAbi::of_instance(self, instance, &[]);
++ //let (return_type, params, _) = fn_abi.gcc_type(self);
++ // FIXME: the rustc API seems to call get_fn_addr() when not needed (e.g. for FFI).
++ //let pointer_type = ptr.get_type();
++
++ self.normal_function_addresses.borrow_mut().insert(ptr);
++
++ ptr
++ }
++
++ fn eh_personality(&self) -> RValue<'gcc> {
++ // The exception handling personality function.
++ //
++ // If our compilation unit has the `eh_personality` lang item somewhere
++ // within it, then we just need to codegen that. Otherwise, we're
++ // building an rlib which will depend on some upstream implementation of
++ // this function, so we just codegen a generic reference to it. We don't
++ // specify any of the types for the function, we just make it a symbol
++ // that LLVM can later use.
++ //
++ // Note that MSVC is a little special here in that we don't use the
++ // `eh_personality` lang item at all. Currently LLVM has support for
++ // both Dwarf and SEH unwind mechanisms for MSVC targets and uses the
++ // *name of the personality function* to decide what kind of unwind side
++ // tables/landing pads to emit. It looks like Dwarf is used by default,
++ // injecting a dependency on the `_Unwind_Resume` symbol for resuming
++ // an "exception", but for MSVC we want to force SEH. This means that we
++ // can't actually have the personality function be our standard
++ // `rust_eh_personality` function, but rather we wired it up to the
++ // CRT's custom personality function, which forces LLVM to consider
++ // landing pads as "landing pads for SEH".
++ if let Some(llpersonality) = self.eh_personality.get() {
++ return llpersonality;
++ }
++ let tcx = self.tcx;
++ let llfn = match tcx.lang_items().eh_personality() {
++ Some(def_id) if !wants_msvc_seh(self.sess()) => self.get_fn_addr(
++ ty::Instance::resolve(
++ tcx,
++ ty::ParamEnv::reveal_all(),
++ def_id,
++ tcx.intern_substs(&[]),
++ )
++ .unwrap().unwrap(),
++ ),
++ _ => {
++ let name = if wants_msvc_seh(self.sess()) {
++ "__CxxFrameHandler3"
++ } else {
++ "rust_eh_personality"
++ };
++ self.declare_func(name, self.type_i32(), &[], true)
++ }
++ };
++ //attributes::apply_target_cpu_attr(self, llfn);
++ self.eh_personality.set(Some(llfn));
++ llfn
++ }
++
++ fn sess(&self) -> &Session {
++ &self.tcx.sess
++ }
++
++ fn check_overflow(&self) -> bool {
++ self.check_overflow
++ }
++
++ fn codegen_unit(&self) -> &'tcx CodegenUnit<'tcx> {
++ self.codegen_unit
++ }
++
++ fn used_statics(&self) -> &RefCell<Vec<RValue<'gcc>>> {
++ unimplemented!();
++ //&self.used_statics
++ }
++
++ fn set_frame_pointer_type(&self, _llfn: RValue<'gcc>) {
++ // TODO
++ //attributes::set_frame_pointer_type(self, llfn)
++ }
++
++ fn apply_target_cpu_attr(&self, _llfn: RValue<'gcc>) {
++ // TODO
++ //attributes::apply_target_cpu_attr(self, llfn)
++ }
++
++ fn create_used_variable(&self) {
++ unimplemented!();
++ /*let name = const_cstr!("llvm.used");
++ let section = const_cstr!("llvm.metadata");
++ let array =
++ self.const_array(&self.type_ptr_to(self.type_i8()), &*self.used_statics.borrow());
++
++ unsafe {
++ let g = llvm::LLVMAddGlobal(self.llmod, self.val_ty(array), name.as_ptr());
++ llvm::LLVMSetInitializer(g, array);
++ llvm::LLVMRustSetLinkage(g, llvm::Linkage::AppendingLinkage);
++ llvm::LLVMSetSection(g, section.as_ptr());
++ }*/
++ }
++
++ fn declare_c_main(&self, fn_type: Self::Type) -> Option<Self::Function> {
++ if self.get_declared_value("main").is_none() {
++ Some(self.declare_cfn("main", fn_type))
++ }
++ else {
++ // If the symbol already exists, it is an error: for example, the user wrote
++ // #[no_mangle] extern "C" fn main(..) {..}
++ // instead of #[start]
++ None
++ }
++ }
++}
++
++impl<'gcc, 'tcx> HasTyCtxt<'tcx> for CodegenCx<'gcc, 'tcx> {
++ fn tcx(&self) -> TyCtxt<'tcx> {
++ self.tcx
++ }
++}
++
++impl<'gcc, 'tcx> HasDataLayout for CodegenCx<'gcc, 'tcx> {
++ fn data_layout(&self) -> &TargetDataLayout {
++ &self.tcx.data_layout
++ }
++}
++
++impl<'gcc, 'tcx> HasTargetSpec for CodegenCx<'gcc, 'tcx> {
++ fn target_spec(&self) -> &Target {
++ &self.tcx.sess.target
++ }
++}
++
++impl<'gcc, 'tcx> LayoutOf for CodegenCx<'gcc, 'tcx> {
++ type Ty = Ty<'tcx>;
++ type TyAndLayout = TyAndLayout<'tcx>;
++
++ fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
++ self.spanned_layout_of(ty, DUMMY_SP)
++ }
++
++ fn spanned_layout_of(&self, ty: Ty<'tcx>, span: Span) -> Self::TyAndLayout {
++ self.tcx.layout_of(ParamEnv::reveal_all().and(ty)).unwrap_or_else(|e| {
++ if let LayoutError::SizeOverflow(_) = e {
++ self.sess().span_fatal(span, &e.to_string())
++ } else {
++ bug!("failed to get layout for `{}`: {}", ty, e)
++ }
++ })
++ }
++}
++
++impl<'tcx, 'gcc> HasParamEnv<'tcx> for CodegenCx<'gcc, 'tcx> {
++ fn param_env(&self) -> ParamEnv<'tcx> {
++ ParamEnv::reveal_all()
++ }
++}
++
++impl<'b, 'tcx> CodegenCx<'b, 'tcx> {
++ /// Generates a new symbol name with the given prefix. This symbol name must
++ /// only be used for definitions with `internal` or `private` linkage.
++ pub fn generate_local_symbol_name(&self, prefix: &str) -> String {
++ let idx = self.local_gen_sym_counter.get();
++ self.local_gen_sym_counter.set(idx + 1);
++ // Include a '.' character, so there can be no accidental conflicts with
++ // user defined names
++ let mut name = String::with_capacity(prefix.len() + 6);
++ name.push_str(prefix);
++ name.push_str(".");
++ base_n::push_str(idx as u128, base_n::ALPHANUMERIC_ONLY, &mut name);
++ name
++ }
++}
++
++pub fn unit_name<'tcx>(codegen_unit: &CodegenUnit<'tcx>) -> String {
++ let name = &codegen_unit.name().to_string();
++ mangle_name(&name.replace('-', "_"))
++}
++
++fn to_gcc_tls_mode(tls_model: TlsModel) -> gccjit::TlsModel {
++ match tls_model {
++ TlsModel::GeneralDynamic => gccjit::TlsModel::GlobalDynamic,
++ TlsModel::LocalDynamic => gccjit::TlsModel::LocalDynamic,
++ TlsModel::InitialExec => gccjit::TlsModel::InitialExec,
++ TlsModel::LocalExec => gccjit::TlsModel::LocalExec,
++ }
++}
--- /dev/null
--- /dev/null
++use gccjit::RValue;
++use rustc_codegen_ssa::traits::{CoverageInfoBuilderMethods, CoverageInfoMethods};
++use rustc_hir::def_id::DefId;
++use rustc_middle::mir::coverage::{
++ CodeRegion,
++ CounterValueReference,
++ ExpressionOperandId,
++ InjectedExpressionId,
++ Op,
++};
++use rustc_middle::ty::Instance;
++
++use crate::builder::Builder;
++use crate::context::CodegenCx;
++
++impl<'a, 'gcc, 'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
++ fn set_function_source_hash(
++ &mut self,
++ _instance: Instance<'tcx>,
++ _function_source_hash: u64,
++ ) -> bool {
++ unimplemented!();
++ /*if let Some(coverage_context) = self.coverage_context() {
++ debug!(
++ "ensuring function source hash is set for instance={:?}; function_source_hash={}",
++ instance, function_source_hash,
++ );
++ let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
++ coverage_map
++ .entry(instance)
++ .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
++ .set_function_source_hash(function_source_hash);
++ true
++ } else {
++ false
++ }*/
++ }
++
++ fn add_coverage_counter(&mut self, _instance: Instance<'tcx>, _id: CounterValueReference, _region: CodeRegion) -> bool {
++ /*if let Some(coverage_context) = self.coverage_context() {
++ debug!(
++ "adding counter to coverage_regions: instance={:?}, function_source_hash={}, id={:?}, \
++ at {:?}",
++ instance, function_source_hash, id, region,
++ );
++ let mut coverage_regions = coverage_context.function_coverage_map.borrow_mut();
++ coverage_regions
++ .entry(instance)
++ .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
++ .add_counter(function_source_hash, id, region);
++ true
++ } else {
++ false
++ }*/
++ // TODO
++ false
++ }
++
++ fn add_coverage_counter_expression(&mut self, _instance: Instance<'tcx>, _id: InjectedExpressionId, _lhs: ExpressionOperandId, _op: Op, _rhs: ExpressionOperandId, _region: Option<CodeRegion>) -> bool {
++ /*if let Some(coverage_context) = self.coverage_context() {
++ debug!(
++ "adding counter expression to coverage_regions: instance={:?}, id={:?}, {:?} {:?} {:?}, \
++ at {:?}",
++ instance, id, lhs, op, rhs, region,
++ );
++ let mut coverage_regions = coverage_context.function_coverage_map.borrow_mut();
++ coverage_regions
++ .entry(instance)
++ .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
++ .add_counter_expression(id, lhs, op, rhs, region);
++ true
++ } else {
++ false
++ }*/
++ // TODO
++ false
++ }
++
++ fn add_coverage_unreachable(&mut self, _instance: Instance<'tcx>, _region: CodeRegion) -> bool {
++ /*if let Some(coverage_context) = self.coverage_context() {
++ debug!(
++ "adding unreachable code to coverage_regions: instance={:?}, at {:?}",
++ instance, region,
++ );
++ let mut coverage_regions = coverage_context.function_coverage_map.borrow_mut();
++ coverage_regions
++ .entry(instance)
++ .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
++ .add_unreachable_region(region);
++ true
++ } else {
++ false
++ }*/
++ // TODO
++ false
++ }
++}
++
++impl<'gcc, 'tcx> CoverageInfoMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
++ fn coverageinfo_finalize(&self) {
++ // TODO
++ //mapgen::finalize(self)
++ }
++
++ fn get_pgo_func_name_var(&self, _instance: Instance<'tcx>) -> RValue<'gcc> {
++ unimplemented!();
++ /*if let Some(coverage_context) = self.coverage_context() {
++ debug!("getting pgo_func_name_var for instance={:?}", instance);
++ let mut pgo_func_name_var_map = coverage_context.pgo_func_name_var_map.borrow_mut();
++ pgo_func_name_var_map
++ .entry(instance)
++ .or_insert_with(|| create_pgo_func_name_var(self, instance))
++ } else {
++ bug!("Could not get the `coverage_context`");
++ }*/
++ }
++
++ /// Functions with MIR-based coverage are normally codegenned _only_ if
++ /// called. LLVM coverage tools typically expect every function to be
++ /// defined (even if unused), with at least one call to LLVM intrinsic
++ /// `instrprof.increment`.
++ ///
++ /// Codegen a small function that will never be called, with one counter
++ /// that will never be incremented.
++ ///
++ /// For used/called functions, the coverageinfo was already added to the
++ /// `function_coverage_map` (keyed by function `Instance`) during codegen.
++ /// But in this case, since the unused function was _not_ previously
++ /// codegenned, collect the coverage `CodeRegion`s from the MIR and add
++ /// them. The first `CodeRegion` is used to add a single counter, with the
++ /// same counter ID used in the injected `instrprof.increment` intrinsic
++ /// call. Since the function is never called, all other `CodeRegion`s can be
++ /// added as `unreachable_region`s.
++ fn define_unused_fn(&self, _def_id: DefId) {
++ unimplemented!();
++ /*let instance = declare_unused_fn(self, &def_id);
++ codegen_unused_fn_and_counter(self, instance);
++ add_unused_function_coverage(self, instance, def_id);*/
++ }
++}
--- /dev/null
--- /dev/null
++use gccjit::{FunctionType, RValue};
++use rustc_codegen_ssa::mir::debuginfo::{FunctionDebugContext, VariableKind};
++use rustc_codegen_ssa::traits::{BuilderMethods, DebugInfoBuilderMethods, DebugInfoMethods};
++use rustc_middle::middle::cstore::CrateDepKind;
++use rustc_middle::mir;
++use rustc_middle::ty::{Instance, Ty};
++use rustc_span::{SourceFile, Span, Symbol};
++use rustc_span::def_id::LOCAL_CRATE;
++use rustc_target::abi::Size;
++use rustc_target::abi::call::FnAbi;
++
++use crate::builder::Builder;
++use crate::context::CodegenCx;
++
++impl<'a, 'gcc, 'tcx> DebugInfoBuilderMethods for Builder<'a, 'gcc, 'tcx> {
++ // FIXME(eddyb) find a common convention for all of the debuginfo-related
++ // names (choose between `dbg`, `debug`, `debuginfo`, `debug_info` etc.).
++ fn dbg_var_addr(&mut self, _dbg_var: Self::DIVariable, _scope_metadata: Self::DIScope, _variable_alloca: Self::Value, _direct_offset: Size, _indirect_offsets: &[Size]) {
++ unimplemented!();
++ /*let cx = self.cx();
++
++ // Convert the direct and indirect offsets to address ops.
++ // FIXME(eddyb) use `const`s instead of getting the values via FFI,
++ // the values should match the ones in the DWARF standard anyway.
++ let op_deref = || unsafe { llvm::LLVMRustDIBuilderCreateOpDeref() };
++ let op_plus_uconst = || unsafe { llvm::LLVMRustDIBuilderCreateOpPlusUconst() };
++ let mut addr_ops = SmallVec::<[_; 8]>::new();
++
++ if direct_offset.bytes() > 0 {
++ addr_ops.push(op_plus_uconst());
++ addr_ops.push(direct_offset.bytes() as i64);
++ }
++ for &offset in indirect_offsets {
++ addr_ops.push(op_deref());
++ if offset.bytes() > 0 {
++ addr_ops.push(op_plus_uconst());
++ addr_ops.push(offset.bytes() as i64);
++ }
++ }
++
++ // FIXME(eddyb) maybe this information could be extracted from `dbg_var`,
++ // to avoid having to pass it down in both places?
++ // NB: `var` doesn't seem to know about the column, so that's a limitation.
++ let dbg_loc = cx.create_debug_loc(scope_metadata, span);
++ unsafe {
++ // FIXME(eddyb) replace `llvm.dbg.declare` with `llvm.dbg.addr`.
++ llvm::LLVMRustDIBuilderInsertDeclareAtEnd(
++ DIB(cx),
++ variable_alloca,
++ dbg_var,
++ addr_ops.as_ptr(),
++ addr_ops.len() as c_uint,
++ dbg_loc,
++ self.llbb(),
++ );
++ }*/
++ }
++
++ /*fn set_source_location(&mut self, scope: Self::DIScope, span: Span) {
++ unimplemented!();
++ /*debug!("set_source_location: {}", self.sess().source_map().span_to_string(span));
++
++ let dbg_loc = self.cx().create_debug_loc(scope, span);
++
++ unsafe {
++ llvm::LLVMSetCurrentDebugLocation(self.llbuilder, dbg_loc);
++ }*/
++ }*/
++
++ fn insert_reference_to_gdb_debug_scripts_section_global(&mut self) {
++ // TODO: replace with gcc_jit_context_new_global_with_initializer() if it's added:
++ // https://gcc.gnu.org/pipermail/jit/2020q3/001225.html
++ //
++ // Call the function to initialize global values here.
++ // We assume this is only called for the main function.
++ use std::iter;
++
++ for crate_num in self.cx.tcx.crates(()).iter().copied().chain(iter::once(LOCAL_CRATE)) {
++ // FIXME: better way to find if a crate is of proc-macro type?
++ if crate_num == LOCAL_CRATE || self.cx.tcx.dep_kind(crate_num) != CrateDepKind::MacrosOnly {
++ // NOTE: proc-macro crates are not included in the executable, so don't call their
++ // initialization routine.
++ let initializer_name = format!("__gccGlobalCrateInit{}", self.cx.tcx.crate_name(crate_num));
++ let codegen_init_func = self.context.new_function(None, FunctionType::Extern, self.context.new_type::<()>(), &[],
++ initializer_name, false);
++ self.llbb().add_eval(None, self.context.new_call(None, codegen_init_func, &[]));
++ }
++ }
++
++ // TODO
++ //gdb::insert_reference_to_gdb_debug_scripts_section_global(self)
++ }
++
++ fn set_var_name(&mut self, _value: RValue<'gcc>, _name: &str) {
++ unimplemented!();
++ // Avoid wasting time if LLVM value names aren't even enabled.
++ /*if self.sess().fewer_names() {
++ return;
++ }
++
++ // Only function parameters and instructions are local to a function,
++ // don't change the name of anything else (e.g. globals).
++ let param_or_inst = unsafe {
++ llvm::LLVMIsAArgument(value).is_some() || llvm::LLVMIsAInstruction(value).is_some()
++ };
++ if !param_or_inst {
++ return;
++ }
++
++ // Avoid replacing the name if it already exists.
++ // While we could combine the names somehow, it'd
++ // get noisy quick, and the usefulness is dubious.
++ if llvm::get_value_name(value).is_empty() {
++ llvm::set_value_name(value, name.as_bytes());
++ }*/
++ }
++
++ fn set_dbg_loc(&mut self, _dbg_loc: Self::DILocation) {
++ unimplemented!();
++ /*unsafe {
++ let dbg_loc_as_llval = llvm::LLVMRustMetadataAsValue(self.cx().llcx, dbg_loc);
++ llvm::LLVMSetCurrentDebugLocation(self.llbuilder, dbg_loc_as_llval);
++ }*/
++ }
++}
++
++impl<'gcc, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
++ fn create_vtable_metadata(&self, _ty: Ty<'tcx>, _vtable: Self::Value) {
++ //metadata::create_vtable_metadata(self, ty, vtable)
++ }
++
++ fn create_function_debug_context(&self, _instance: Instance<'tcx>, _fn_abi: &FnAbi<'tcx, Ty<'tcx>>, _llfn: RValue<'gcc>, _mir: &mir::Body<'tcx>) -> Option<FunctionDebugContext<Self::DIScope, Self::DILocation>> {
++ // TODO
++ None
++ }
++
++ fn extend_scope_to_file(&self, _scope_metadata: Self::DIScope, _file: &SourceFile) -> Self::DIScope {
++ unimplemented!();
++ }
++
++ fn debuginfo_finalize(&self) {
++ //unimplemented!();
++ }
++
++ fn create_dbg_var(&self, _variable_name: Symbol, _variable_type: Ty<'tcx>, _scope_metadata: Self::DIScope, _variable_kind: VariableKind, _span: Span) -> Self::DIVariable {
++ unimplemented!();
++ }
++
++ fn dbg_scope_fn(&self, _instance: Instance<'tcx>, _fn_abi: &FnAbi<'tcx, Ty<'tcx>>, _maybe_definition_llfn: Option<RValue<'gcc>>) -> Self::DIScope {
++ unimplemented!();
++ /*let def_id = instance.def_id();
++ let containing_scope = get_containing_scope(self, instance);
++ let span = self.tcx.def_span(def_id);
++ let loc = self.lookup_debug_loc(span.lo());
++ let file_metadata = file_metadata(self, &loc.file);
++
++ let function_type_metadata = unsafe {
++ let fn_signature = get_function_signature(self, fn_abi);
++ llvm::LLVMRustDIBuilderCreateSubroutineType(DIB(self), fn_signature)
++ };
++
++ // Find the enclosing function, in case this is a closure.
++ let def_key = self.tcx().def_key(def_id);
++ let mut name = def_key.disambiguated_data.data.to_string();
++
++ let enclosing_fn_def_id = self.tcx().closure_base_def_id(def_id);
++
++ // Get_template_parameters() will append a `<...>` clause to the function
++ // name if necessary.
++ let generics = self.tcx().generics_of(enclosing_fn_def_id);
++ let substs = instance.substs.truncate_to(self.tcx(), generics);
++ let template_parameters = get_template_parameters(self, &generics, substs, &mut name);
++
++ let linkage_name = &mangled_name_of_instance(self, instance).name;
++ // Omit the linkage_name if it is the same as subprogram name.
++ let linkage_name = if &name == linkage_name { "" } else { linkage_name };
++
++ // FIXME(eddyb) does this need to be separate from `loc.line` for some reason?
++ let scope_line = loc.line;
++
++ let mut flags = DIFlags::FlagPrototyped;
++
++ if fn_abi.ret.layout.abi.is_uninhabited() {
++ flags |= DIFlags::FlagNoReturn;
++ }
++
++ let mut spflags = DISPFlags::SPFlagDefinition;
++ if is_node_local_to_unit(self, def_id) {
++ spflags |= DISPFlags::SPFlagLocalToUnit;
++ }
++ if self.sess().opts.optimize != config::OptLevel::No {
++ spflags |= DISPFlags::SPFlagOptimized;
++ }
++ if let Some((id, _)) = self.tcx.entry_fn(LOCAL_CRATE) {
++ if id.to_def_id() == def_id {
++ spflags |= DISPFlags::SPFlagMainSubprogram;
++ }
++ }
++
++ unsafe {
++ return llvm::LLVMRustDIBuilderCreateFunction(
++ DIB(self),
++ containing_scope,
++ name.as_ptr().cast(),
++ name.len(),
++ linkage_name.as_ptr().cast(),
++ linkage_name.len(),
++ file_metadata,
++ loc.line.unwrap_or(UNKNOWN_LINE_NUMBER),
++ function_type_metadata,
++ scope_line.unwrap_or(UNKNOWN_LINE_NUMBER),
++ flags,
++ spflags,
++ maybe_definition_llfn,
++ template_parameters,
++ None,
++ );
++ }
++
++ fn get_function_signature<'ll, 'tcx>(
++ cx: &CodegenCx<'ll, 'tcx>,
++ fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
++ ) -> &'ll DIArray {
++ if cx.sess().opts.debuginfo == DebugInfo::Limited {
++ return create_DIArray(DIB(cx), &[]);
++ }
++
++ let mut signature = Vec::with_capacity(fn_abi.args.len() + 1);
++
++ // Return type -- llvm::DIBuilder wants this at index 0
++ signature.push(if fn_abi.ret.is_ignore() {
++ None
++ } else {
++ Some(type_metadata(cx, fn_abi.ret.layout.ty, rustc_span::DUMMY_SP))
++ });
++
++ // Arguments types
++ if cx.sess().target.options.is_like_msvc {
++ // FIXME(#42800):
++ // There is a bug in MSDIA that leads to a crash when it encounters
++ // a fixed-size array of `u8` or something zero-sized in a
++ // function-type (see #40477).
++ // As a workaround, we replace those fixed-size arrays with a
++ // pointer-type. So a function `fn foo(a: u8, b: [u8; 4])` would
++ // appear as `fn foo(a: u8, b: *const u8)` in debuginfo,
++ // and a function `fn bar(x: [(); 7])` as `fn bar(x: *const ())`.
++ // This transformed type is wrong, but these function types are
++ // already inaccurate due to ABI adjustments (see #42800).
++ signature.extend(fn_abi.args.iter().map(|arg| {
++ let t = arg.layout.ty;
++ let t = match t.kind() {
++ ty::Array(ct, _)
++ if (*ct == cx.tcx.types.u8) || cx.layout_of(ct).is_zst() =>
++ {
++ cx.tcx.mk_imm_ptr(ct)
++ }
++ _ => t,
++ };
++ Some(type_metadata(cx, t, rustc_span::DUMMY_SP))
++ }));
++ } else {
++ signature.extend(
++ fn_abi
++ .args
++ .iter()
++ .map(|arg| Some(type_metadata(cx, arg.layout.ty, rustc_span::DUMMY_SP))),
++ );
++ }
++
++ create_DIArray(DIB(cx), &signature[..])
++ }
++
++ fn get_template_parameters<'ll, 'tcx>(
++ cx: &CodegenCx<'ll, 'tcx>,
++ generics: &ty::Generics,
++ substs: SubstsRef<'tcx>,
++ name_to_append_suffix_to: &mut String,
++ ) -> &'ll DIArray {
++ if substs.types().next().is_none() {
++ return create_DIArray(DIB(cx), &[]);
++ }
++
++ name_to_append_suffix_to.push('<');
++ for (i, actual_type) in substs.types().enumerate() {
++ if i != 0 {
++ name_to_append_suffix_to.push(',');
++ }
++
++ let actual_type =
++ cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), actual_type);
++ // Add actual type name to <...> clause of function name
++ let actual_type_name = compute_debuginfo_type_name(cx.tcx(), actual_type, true);
++ name_to_append_suffix_to.push_str(&actual_type_name[..]);
++ }
++ name_to_append_suffix_to.push('>');
++
++ // Again, only create type information if full debuginfo is enabled
++ let template_params: Vec<_> = if cx.sess().opts.debuginfo == DebugInfo::Full {
++ let names = get_parameter_names(cx, generics);
++ substs
++ .iter()
++ .zip(names)
++ .filter_map(|(kind, name)| {
++ if let GenericArgKind::Type(ty) = kind.unpack() {
++ let actual_type =
++ cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), ty);
++ let actual_type_metadata =
++ type_metadata(cx, actual_type, rustc_span::DUMMY_SP);
++ let name = name.as_str();
++ Some(unsafe {
++ Some(llvm::LLVMRustDIBuilderCreateTemplateTypeParameter(
++ DIB(cx),
++ None,
++ name.as_ptr().cast(),
++ name.len(),
++ actual_type_metadata,
++ ))
++ })
++ } else {
++ None
++ }
++ })
++ .collect()
++ } else {
++ vec![]
++ };
++
++ create_DIArray(DIB(cx), &template_params[..])
++ }
++
++ fn get_parameter_names(cx: &CodegenCx<'_, '_>, generics: &ty::Generics) -> Vec<Symbol> {
++ let mut names = generics
++ .parent
++ .map_or(vec![], |def_id| get_parameter_names(cx, cx.tcx.generics_of(def_id)));
++ names.extend(generics.params.iter().map(|param| param.name));
++ names
++ }
++
++ fn get_containing_scope<'ll, 'tcx>(
++ cx: &CodegenCx<'ll, 'tcx>,
++ instance: Instance<'tcx>,
++ ) -> &'ll DIScope {
++ // First, let's see if this is a method within an inherent impl. Because
++ // if yes, we want to make the result subroutine DIE a child of the
++ // subroutine's self-type.
++ let self_type = cx.tcx.impl_of_method(instance.def_id()).and_then(|impl_def_id| {
++ // If the method does *not* belong to a trait, proceed
++ if cx.tcx.trait_id_of_impl(impl_def_id).is_none() {
++ let impl_self_ty = cx.tcx.subst_and_normalize_erasing_regions(
++ instance.substs,
++ ty::ParamEnv::reveal_all(),
++ &cx.tcx.type_of(impl_def_id),
++ );
++
++ // Only "class" methods are generally understood by LLVM,
++ // so avoid methods on other types (e.g., `<*mut T>::null`).
++ match impl_self_ty.kind() {
++ ty::Adt(def, ..) if !def.is_box() => {
++ // Again, only create type information if full debuginfo is enabled
++ if cx.sess().opts.debuginfo == DebugInfo::Full
++ && !impl_self_ty.needs_subst()
++ {
++ Some(type_metadata(cx, impl_self_ty, rustc_span::DUMMY_SP))
++ } else {
++ Some(namespace::item_namespace(cx, def.did))
++ }
++ }
++ _ => None,
++ }
++ } else {
++ // For trait method impls we still use the "parallel namespace"
++ // strategy
++ None
++ }
++ });
++
++ self_type.unwrap_or_else(|| {
++ namespace::item_namespace(
++ cx,
++ DefId {
++ krate: instance.def_id().krate,
++ index: cx
++ .tcx
++ .def_key(instance.def_id())
++ .parent
++ .expect("get_containing_scope: missing parent?"),
++ },
++ )
++ })
++ }*/
++ }
++
++ fn dbg_loc(&self, _scope: Self::DIScope, _inlined_at: Option<Self::DILocation>, _span: Span) -> Self::DILocation {
++ unimplemented!();
++ /*let DebugLoc { line, col, .. } = self.lookup_debug_loc(span.lo());
++
++ unsafe {
++ llvm::LLVMRustDIBuilderCreateDebugLocation(
++ utils::debug_context(self).llcontext,
++ line.unwrap_or(UNKNOWN_LINE_NUMBER),
++ col.unwrap_or(UNKNOWN_COLUMN_NUMBER),
++ scope,
++ inlined_at,
++ )
++ }*/
++ }
++}
--- /dev/null
--- /dev/null
++use gccjit::{Function, FunctionType, GlobalKind, LValue, RValue, Type};
++use rustc_codegen_ssa::traits::BaseTypeMethods;
++use rustc_middle::ty::Ty;
++use rustc_span::Symbol;
++use rustc_target::abi::call::FnAbi;
++
++use crate::abi::FnAbiGccExt;
++use crate::context::{CodegenCx, unit_name};
++use crate::intrinsic::llvm;
++use crate::mangled_std_symbols::{ARGV_INIT_ARRAY, ARGV_INIT_WRAPPER};
++
++impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
++ pub fn get_or_insert_global(&self, name: &str, ty: Type<'gcc>, is_tls: bool, link_section: Option<Symbol>) -> RValue<'gcc> {
++ if self.globals.borrow().contains_key(name) {
++ let typ = self.globals.borrow().get(name).expect("global").get_type();
++ let global = self.context.new_global(None, GlobalKind::Imported, typ, name);
++ if is_tls {
++ global.set_tls_model(self.tls_model);
++ }
++ if let Some(link_section) = link_section {
++ global.set_link_section(&link_section.as_str());
++ }
++ global.get_address(None)
++ }
++ else {
++ self.declare_global(name, ty, is_tls, link_section)
++ }
++ }
++
++ pub fn declare_unnamed_global(&self, ty: Type<'gcc>) -> LValue<'gcc> {
++ let index = self.global_gen_sym_counter.get();
++ self.global_gen_sym_counter.set(index + 1);
++ let name = format!("global_{}_{}", index, unit_name(&self.codegen_unit));
++ self.context.new_global(None, GlobalKind::Exported, ty, &name)
++ }
++
++ pub fn declare_global_with_linkage(&self, name: &str, ty: Type<'gcc>, linkage: GlobalKind) -> RValue<'gcc> {
++ //debug!("declare_global_with_linkage(name={:?})", name);
++ let global = self.context.new_global(None, linkage, ty, name)
++ .get_address(None);
++ self.globals.borrow_mut().insert(name.to_string(), global);
++ // NOTE: global seems to only be global in a module. So save the name instead of the value
++ // to import it later.
++ self.global_names.borrow_mut().insert(global, name.to_string());
++ global
++ }
++
++ pub fn declare_func(&self, name: &str, return_type: Type<'gcc>, params: &[Type<'gcc>], variadic: bool) -> RValue<'gcc> {
++ self.linkage.set(FunctionType::Exported);
++ let func = declare_raw_fn(self, name, () /*llvm::CCallConv*/, return_type, params, variadic);
++ // FIXME: this is a wrong cast. That requires changing the compiler API.
++ unsafe { std::mem::transmute(func) }
++ }
++
++ pub fn declare_global(&self, name: &str, ty: Type<'gcc>, is_tls: bool, link_section: Option<Symbol>) -> RValue<'gcc> {
++ //debug!("declare_global(name={:?})", name);
++ // FIXME: correctly support global variable initialization.
++ if name.starts_with(ARGV_INIT_ARRAY) {
++ // NOTE: hack to avoid having to update the names in mangled_std_symbols: we save the
++ // name of the variable now to actually declare it later.
++ *self.init_argv_var.borrow_mut() = name.to_string();
++
++ let global = self.context.new_global(None, GlobalKind::Imported, ty, name);
++ if let Some(link_section) = link_section {
++ global.set_link_section(&link_section.as_str());
++ }
++ return global.get_address(None);
++ }
++ let global = self.context.new_global(None, GlobalKind::Exported, ty, name);
++ if is_tls {
++ global.set_tls_model(self.tls_model);
++ }
++ if let Some(link_section) = link_section {
++ global.set_link_section(&link_section.as_str());
++ }
++ let global = global.get_address(None);
++ self.globals.borrow_mut().insert(name.to_string(), global);
++ // NOTE: global seems to only be global in a module. So save the name instead of the value
++ // to import it later.
++ self.global_names.borrow_mut().insert(global, name.to_string());
++ global
++ }
++
++ pub fn declare_cfn(&self, name: &str, _fn_type: Type<'gcc>) -> RValue<'gcc> {
++ // TODO: use the fn_type parameter.
++ let const_string = self.context.new_type::<u8>().make_pointer().make_pointer();
++ let return_type = self.type_i32();
++ let variadic = false;
++ self.linkage.set(FunctionType::Exported);
++ let func = declare_raw_fn(self, name, () /*llvm::CCallConv*/, return_type, &[self.type_i32(), const_string], variadic);
++ // NOTE: it is needed to set the current_func here as well, because get_fn() is not called
++ // for the main function.
++ *self.current_func.borrow_mut() = Some(func);
++ // FIXME: this is a wrong cast. That requires changing the compiler API.
++ unsafe { std::mem::transmute(func) }
++ }
++
++ pub fn declare_fn(&self, name: &str, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> RValue<'gcc> {
++ // NOTE: hack to avoid having to update the names in mangled_std_symbols: we found the name
++ // of the variable earlier, so we declare it now.
++ // Since we don't correctly support initializers yet, we initialize this variable manually
++ // for now.
++ if name.starts_with(ARGV_INIT_WRAPPER) && !self.argv_initialized.get() {
++ let global_name = &*self.init_argv_var.borrow();
++ let return_type = self.type_void();
++ let params = [
++ self.context.new_parameter(None, self.int_type, "argc"),
++ self.context.new_parameter(None, self.u8_type.make_pointer().make_pointer(), "argv"),
++ self.context.new_parameter(None, self.u8_type.make_pointer().make_pointer(), "envp"),
++ ];
++ let function = self.context.new_function(None, FunctionType::Extern, return_type, ¶ms, name, false);
++ let initializer = function.get_address(None);
++
++ let param_types = [
++ self.int_type,
++ self.u8_type.make_pointer().make_pointer(),
++ self.u8_type.make_pointer().make_pointer(),
++ ];
++ let ty = self.context.new_function_pointer_type(None, return_type, ¶m_types, false);
++
++ let global = self.context.new_global(None, GlobalKind::Exported, ty, global_name);
++ global.set_link_section(".init_array.00099");
++ global.global_set_initializer_value(initializer);
++ let global = global.get_address(None);
++ self.globals.borrow_mut().insert(global_name.to_string(), global);
++ // NOTE: global seems to only be global in a module. So save the name instead of the value
++ // to import it later.
++ self.global_names.borrow_mut().insert(global, global_name.to_string());
++ self.argv_initialized.set(true);
++ }
++ //debug!("declare_rust_fn(name={:?}, fn_abi={:?})", name, fn_abi);
++ let (return_type, params, variadic) = fn_abi.gcc_type(self);
++ let func = declare_raw_fn(self, name, () /*fn_abi.llvm_cconv()*/, return_type, ¶ms, variadic);
++ //fn_abi.apply_attrs_llfn(self, func);
++ // FIXME: this is a wrong cast. That requires changing the compiler API.
++ unsafe { std::mem::transmute(func) }
++ }
++
++ pub fn define_global(&self, name: &str, ty: Type<'gcc>, is_tls: bool, link_section: Option<Symbol>) -> Option<RValue<'gcc>> {
++ Some(self.get_or_insert_global(name, ty, is_tls, link_section))
++ }
++
++ pub fn define_private_global(&self, ty: Type<'gcc>) -> RValue<'gcc> {
++ let global = self.declare_unnamed_global(ty);
++ global.get_address(None)
++ }
++
++ pub fn get_declared_value(&self, name: &str) -> Option<RValue<'gcc>> {
++ //debug!("get_declared_value(name={:?})", name);
++ // TODO: use a different field than globals, because this seems to return a function?
++ self.globals.borrow().get(name).cloned()
++ }
++
++ /*fn get_defined_value(&self, name: &str) -> Option<RValue<'gcc>> {
++ // TODO: gcc does not allow global initialization.
++ None
++ /*self.get_declared_value(name).and_then(|val| {
++ let declaration = unsafe { llvm::LLVMIsDeclaration(val) != 0 };
++ if !declaration { Some(val) } else { None }
++ })*/
++ }*/
++}
++
++/// Declare a function.
++///
++/// If there’s a value with the same name already declared, the function will
++/// update the declaration and return existing Value instead.
++fn declare_raw_fn<'gcc>(cx: &CodegenCx<'gcc, '_>, name: &str, _callconv: () /*llvm::CallConv*/, return_type: Type<'gcc>, param_types: &[Type<'gcc>], variadic: bool) -> Function<'gcc> {
++ //debug!("declare_raw_fn(name={:?}, ty={:?})", name, ty);
++ /*let llfn = unsafe {
++ llvm::LLVMRustGetOrInsertFunction(cx.llmod, name.as_ptr().cast(), name.len(), ty)
++ };*/
++
++ if name.starts_with("llvm.") {
++ return llvm::intrinsic(name, cx);
++ }
++ let func =
++ if cx.functions.borrow().contains_key(name) {
++ *cx.functions.borrow().get(name).expect("function")
++ }
++ else {
++ let params: Vec<_> = param_types.into_iter().enumerate()
++ .map(|(index, param)| cx.context.new_parameter(None, *param, &format!("param{}", index))) // TODO: set name.
++ .collect();
++ let func = cx.context.new_function(None, cx.linkage.get(), return_type, ¶ms, mangle_name(name), variadic);
++ cx.functions.borrow_mut().insert(name.to_string(), func);
++ func
++ };
++
++ //llvm::SetFunctionCallConv(llfn, callconv); // TODO
++ // Function addresses in Rust are never significant, allowing functions to
++ // be merged.
++ //llvm::SetUnnamedAddress(llfn, llvm::UnnamedAddr::Global); // TODO
++
++ /*if cx.tcx.sess.opts.cg.no_redzone.unwrap_or(cx.tcx.sess.target.target.options.disable_redzone) {
++ llvm::Attribute::NoRedZone.apply_llfn(Function, llfn);
++ }*/
++
++ //attributes::default_optimisation_attrs(cx.tcx.sess, llfn);
++ //attributes::non_lazy_bind(cx.sess(), llfn);
++
++ // FIXME: invalid cast.
++ // TODO: is this line useful?
++ //cx.globals.borrow_mut().insert(name.to_string(), unsafe { std::mem::transmute(func) });
++ func
++}
++
++// FIXME: this is a hack because libgccjit currently only supports alpha, num and _.
++// Unsupported characters: `$` and `.`.
++pub fn mangle_name(name: &str) -> String {
++ name.replace(|char: char| {
++ if !char.is_alphanumeric() && char != '_' {
++ debug_assert!("$.".contains(char), "Unsupported char in function name: {}", char);
++ true
++ }
++ else {
++ false
++ }
++ }, "_")
++}
--- /dev/null
--- /dev/null
++use gccjit::Function;
++
++use crate::context::CodegenCx;
++
++pub fn intrinsic<'gcc, 'tcx>(name: &str, cx: &CodegenCx<'gcc, 'tcx>) -> Function<'gcc> {
++ let _gcc_name =
++ match name {
++ "llvm.x86.xgetbv" => {
++ let gcc_name = "__builtin_trap";
++ let func = cx.context.get_builtin_function(gcc_name);
++ cx.functions.borrow_mut().insert(gcc_name.to_string(), func);
++ return func;
++ },
++ // TODO: this doc specifies the equivalent GCC builtins: http://huonw.github.io/llvmint/llvmint/x86/index.html
++ "llvm.x86.sse2.cmp.pd" => "__builtin_ia32_cmppd",
++ "llvm.x86.sse2.movmsk.pd" => "__builtin_ia32_movmskpd",
++ "llvm.x86.sse2.pmovmskb.128" => "__builtin_ia32_pmovmskb128",
++ _ => unimplemented!("unsupported LLVM intrinsic {}", name)
++ };
++
++ println!("Get target builtin");
++ unimplemented!();
++ /*let func = cx.context.get_target_builtin_function(gcc_name);
++ cx.functions.borrow_mut().insert(gcc_name.to_string(), func);
++ func*/
++}
--- /dev/null
--- /dev/null
++pub mod llvm;
++mod simd;
++
++use gccjit::{ComparisonOp, Function, RValue, ToRValue, Type, UnaryOp};
++use rustc_codegen_ssa::MemFlags;
++use rustc_codegen_ssa::base::wants_msvc_seh;
++use rustc_codegen_ssa::common::{IntPredicate, span_invalid_monomorphization_error};
++use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
++use rustc_codegen_ssa::mir::place::PlaceRef;
++use rustc_codegen_ssa::traits::{ArgAbiMethods, BaseTypeMethods, BuilderMethods, ConstMethods, IntrinsicCallMethods};
++use rustc_middle::bug;
++use rustc_middle::ty::{self, Instance, Ty};
++use rustc_span::{Span, Symbol, symbol::kw, sym};
++use rustc_target::abi::{HasDataLayout, LayoutOf};
++use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode};
++use rustc_target::spec::PanicStrategy;
++
++use crate::abi::GccType;
++use crate::builder::Builder;
++use crate::common::TypeReflection;
++use crate::context::CodegenCx;
++use crate::type_of::LayoutGccExt;
++use crate::intrinsic::simd::generic_simd_intrinsic;
++
++fn get_simple_intrinsic<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, name: Symbol) -> Option<Function<'gcc>> {
++ let gcc_name = match name {
++ sym::sqrtf32 => "sqrtf",
++ sym::sqrtf64 => "sqrt",
++ sym::powif32 => "__builtin_powif",
++ sym::powif64 => "__builtin_powi",
++ sym::sinf32 => "sinf",
++ sym::sinf64 => "sin",
++ sym::cosf32 => "cosf",
++ sym::cosf64 => "cos",
++ sym::powf32 => "powf",
++ sym::powf64 => "pow",
++ sym::expf32 => "expf",
++ sym::expf64 => "exp",
++ sym::exp2f32 => "exp2f",
++ sym::exp2f64 => "exp2",
++ sym::logf32 => "logf",
++ sym::logf64 => "log",
++ sym::log10f32 => "log10f",
++ sym::log10f64 => "log10",
++ sym::log2f32 => "log2f",
++ sym::log2f64 => "log2",
++ sym::fmaf32 => "fmaf",
++ sym::fmaf64 => "fma",
++ sym::fabsf32 => "fabsf",
++ sym::fabsf64 => "fabs",
++ sym::minnumf32 => "fminf",
++ sym::minnumf64 => "fmin",
++ sym::maxnumf32 => "fmaxf",
++ sym::maxnumf64 => "fmax",
++ sym::copysignf32 => "copysignf",
++ sym::copysignf64 => "copysign",
++ sym::floorf32 => "floorf",
++ sym::floorf64 => "floor",
++ sym::ceilf32 => "ceilf",
++ sym::ceilf64 => "ceil",
++ sym::truncf32 => "truncf",
++ sym::truncf64 => "trunc",
++ sym::rintf32 => "rintf",
++ sym::rintf64 => "rint",
++ sym::nearbyintf32 => "nearbyintf",
++ sym::nearbyintf64 => "nearbyint",
++ sym::roundf32 => "roundf",
++ sym::roundf64 => "round",
++ sym::abort => "abort",
++ _ => return None,
++ };
++ Some(cx.context.get_builtin_function(&gcc_name))
++}
++
++impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
++ fn codegen_intrinsic_call(&mut self, instance: Instance<'tcx>, fn_abi: &FnAbi<'tcx, Ty<'tcx>>, args: &[OperandRef<'tcx, RValue<'gcc>>], llresult: RValue<'gcc>, span: Span) {
++ let tcx = self.tcx;
++ let callee_ty = instance.ty(tcx, ty::ParamEnv::reveal_all());
++
++ let (def_id, substs) = match *callee_ty.kind() {
++ ty::FnDef(def_id, substs) => (def_id, substs),
++ _ => bug!("expected fn item type, found {}", callee_ty),
++ };
++
++ let sig = callee_ty.fn_sig(tcx);
++ let sig = tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig);
++ let arg_tys = sig.inputs();
++ let ret_ty = sig.output();
++ let name = tcx.item_name(def_id);
++ let name_str = &*name.as_str();
++
++ let llret_ty = self.layout_of(ret_ty).gcc_type(self, true);
++ let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout);
++
++ let simple = get_simple_intrinsic(self, name);
++ let llval =
++ match name {
++ _ if simple.is_some() => {
++ // FIXME: remove this cast when the API supports function.
++ let func = unsafe { std::mem::transmute(simple.expect("simple")) };
++ self.call(func, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None)
++ },
++ sym::likely => {
++ self.expect(args[0].immediate(), true)
++ }
++ sym::unlikely => {
++ self.expect(args[0].immediate(), false)
++ }
++ kw::Try => {
++ try_intrinsic(
++ self,
++ args[0].immediate(),
++ args[1].immediate(),
++ args[2].immediate(),
++ llresult,
++ );
++ return;
++ }
++ sym::breakpoint => {
++ unimplemented!();
++ /*let llfn = self.get_intrinsic(&("llvm.debugtrap"));
++ self.call(llfn, &[], None)*/
++ }
++ sym::va_copy => {
++ unimplemented!();
++ /*let intrinsic = self.cx().get_intrinsic(&("llvm.va_copy"));
++ self.call(intrinsic, &[args[0].immediate(), args[1].immediate()], None)*/
++ }
++ sym::va_arg => {
++ unimplemented!();
++ /*match fn_abi.ret.layout.abi {
++ abi::Abi::Scalar(ref scalar) => {
++ match scalar.value {
++ Primitive::Int(..) => {
++ if self.cx().size_of(ret_ty).bytes() < 4 {
++ // `va_arg` should not be called on a integer type
++ // less than 4 bytes in length. If it is, promote
++ // the integer to a `i32` and truncate the result
++ // back to the smaller type.
++ let promoted_result = emit_va_arg(self, args[0], tcx.types.i32);
++ self.trunc(promoted_result, llret_ty)
++ } else {
++ emit_va_arg(self, args[0], ret_ty)
++ }
++ }
++ Primitive::F64 | Primitive::Pointer => {
++ emit_va_arg(self, args[0], ret_ty)
++ }
++ // `va_arg` should never be used with the return type f32.
++ Primitive::F32 => bug!("the va_arg intrinsic does not work with `f32`"),
++ }
++ }
++ _ => bug!("the va_arg intrinsic does not work with non-scalar types"),
++ }*/
++ }
++
++ sym::volatile_load | sym::unaligned_volatile_load => {
++ let tp_ty = substs.type_at(0);
++ let mut ptr = args[0].immediate();
++ if let PassMode::Cast(ty) = fn_abi.ret.mode {
++ ptr = self.pointercast(ptr, self.type_ptr_to(ty.gcc_type(self)));
++ }
++ let load = self.volatile_load(ptr.get_type(), ptr);
++ // TODO
++ /*let align = if name == sym::unaligned_volatile_load {
++ 1
++ } else {
++ self.align_of(tp_ty).bytes() as u32
++ };
++ unsafe {
++ llvm::LLVMSetAlignment(load, align);
++ }*/
++ self.to_immediate(load, self.layout_of(tp_ty))
++ }
++ sym::volatile_store => {
++ let dst = args[0].deref(self.cx());
++ args[1].val.volatile_store(self, dst);
++ return;
++ }
++ sym::unaligned_volatile_store => {
++ let dst = args[0].deref(self.cx());
++ args[1].val.unaligned_volatile_store(self, dst);
++ return;
++ }
++ sym::prefetch_read_data
++ | sym::prefetch_write_data
++ | sym::prefetch_read_instruction
++ | sym::prefetch_write_instruction => {
++ unimplemented!();
++ /*let expect = self.get_intrinsic(&("llvm.prefetch"));
++ let (rw, cache_type) = match name {
++ sym::prefetch_read_data => (0, 1),
++ sym::prefetch_write_data => (1, 1),
++ sym::prefetch_read_instruction => (0, 0),
++ sym::prefetch_write_instruction => (1, 0),
++ _ => bug!(),
++ };
++ self.call(
++ expect,
++ &[
++ args[0].immediate(),
++ self.const_i32(rw),
++ args[1].immediate(),
++ self.const_i32(cache_type),
++ ],
++ None,
++ )*/
++ }
++ sym::ctlz
++ | sym::ctlz_nonzero
++ | sym::cttz
++ | sym::cttz_nonzero
++ | sym::ctpop
++ | sym::bswap
++ | sym::bitreverse
++ | sym::rotate_left
++ | sym::rotate_right
++ | sym::saturating_add
++ | sym::saturating_sub => {
++ let ty = arg_tys[0];
++ match int_type_width_signed(ty, self) {
++ Some((width, signed)) => match name {
++ sym::ctlz | sym::cttz => {
++ let func = self.current_func.borrow().expect("func");
++ let then_block = func.new_block("then");
++ let else_block = func.new_block("else");
++ let after_block = func.new_block("after");
++
++ let arg = args[0].immediate();
++ let result = func.new_local(None, arg.get_type(), "zeros");
++ let zero = self.cx.context.new_rvalue_zero(arg.get_type());
++ let cond = self.cx.context.new_comparison(None, ComparisonOp::Equals, arg, zero);
++ self.block.expect("block").end_with_conditional(None, cond, then_block, else_block);
++
++ let zero_result = self.cx.context.new_rvalue_from_long(arg.get_type(), width as i64);
++ then_block.add_assignment(None, result, zero_result);
++ then_block.end_with_jump(None, after_block);
++
++ // NOTE: since jumps were added in a place
++ // count_leading_zeroes() does not expect, the current blocks
++ // in the state need to be updated.
++ *self.current_block.borrow_mut() = Some(else_block);
++ self.block = Some(else_block);
++
++ let zeros =
++ match name {
++ sym::ctlz => self.count_leading_zeroes(width, arg),
++ sym::cttz => self.count_trailing_zeroes(width, arg),
++ _ => unreachable!(),
++ };
++ else_block.add_assignment(None, result, zeros);
++ else_block.end_with_jump(None, after_block);
++
++ // NOTE: since jumps were added in a place rustc does not
++ // expect, the current blocks in the state need to be updated.
++ *self.current_block.borrow_mut() = Some(after_block);
++ self.block = Some(after_block);
++
++ result.to_rvalue()
++
++ /*let y = self.const_bool(false);
++ let llfn = self.get_intrinsic(&format!("llvm.{}.i{}", name, width));
++ self.call(llfn, &[args[0].immediate(), y], None)*/
++ }
++ sym::ctlz_nonzero => {
++ self.count_leading_zeroes(width, args[0].immediate())
++ },
++ sym::cttz_nonzero => {
++ self.count_trailing_zeroes(width, args[0].immediate())
++ }
++ sym::ctpop => self.pop_count(args[0].immediate()),
++ sym::bswap => {
++ if width == 8 {
++ args[0].immediate() // byte swap a u8/i8 is just a no-op
++ }
++ else {
++ // TODO: check if it's faster to use string literals and a
++ // match instead of format!.
++ let bswap = self.cx.context.get_builtin_function(&format!("__builtin_bswap{}", width));
++ let mut arg = args[0].immediate();
++ // FIXME: this cast should not be necessary. Remove
++ // when having proper sized integer types.
++ let param_type = bswap.get_param(0).to_rvalue().get_type();
++ if param_type != arg.get_type() {
++ arg = self.bitcast(arg, param_type);
++ }
++ self.cx.context.new_call(None, bswap, &[arg])
++ }
++ },
++ sym::bitreverse => self.bit_reverse(width, args[0].immediate()),
++ sym::rotate_left | sym::rotate_right => {
++ // TODO: implement using algorithm from:
++ // https://blog.regehr.org/archives/1063
++ // for other platforms.
++ let is_left = name == sym::rotate_left;
++ let val = args[0].immediate();
++ let raw_shift = args[1].immediate();
++ if is_left {
++ self.rotate_left(val, raw_shift, width)
++ }
++ else {
++ self.rotate_right(val, raw_shift, width)
++ }
++ },
++ sym::saturating_add => {
++ self.saturating_add(args[0].immediate(), args[1].immediate(), signed, width)
++ },
++ sym::saturating_sub => {
++ self.saturating_sub(args[0].immediate(), args[1].immediate(), signed, width)
++ },
++ _ => bug!(),
++ },
++ None => {
++ span_invalid_monomorphization_error(
++ tcx.sess,
++ span,
++ &format!(
++ "invalid monomorphization of `{}` intrinsic: \
++ expected basic integer type, found `{}`",
++ name, ty
++ ),
++ );
++ return;
++ }
++ }
++ }
++
++ sym::raw_eq => {
++ use rustc_target::abi::Abi::*;
++ let tp_ty = substs.type_at(0);
++ let layout = self.layout_of(tp_ty).layout;
++ let use_integer_compare = match layout.abi {
++ Scalar(_) | ScalarPair(_, _) => true,
++ Uninhabited | Vector { .. } => false,
++ Aggregate { .. } => {
++ // For rusty ABIs, small aggregates are actually passed
++ // as `RegKind::Integer` (see `FnAbi::adjust_for_abi`),
++ // so we re-use that same threshold here.
++ layout.size <= self.data_layout().pointer_size * 2
++ }
++ };
++
++ let a = args[0].immediate();
++ let b = args[1].immediate();
++ if layout.size.bytes() == 0 {
++ self.const_bool(true)
++ }
++ /*else if use_integer_compare {
++ let integer_ty = self.type_ix(layout.size.bits()); // FIXME: LLVM creates an integer of 96 bits for [i32; 3], but gcc doesn't support this, so it creates an integer of 128 bits.
++ let ptr_ty = self.type_ptr_to(integer_ty);
++ let a_ptr = self.bitcast(a, ptr_ty);
++ let a_val = self.load(integer_ty, a_ptr, layout.align.abi);
++ let b_ptr = self.bitcast(b, ptr_ty);
++ let b_val = self.load(integer_ty, b_ptr, layout.align.abi);
++ self.icmp(IntPredicate::IntEQ, a_val, b_val)
++ }*/
++ else {
++ let void_ptr_type = self.context.new_type::<*const ()>();
++ let a_ptr = self.bitcast(a, void_ptr_type);
++ let b_ptr = self.bitcast(b, void_ptr_type);
++ let n = self.context.new_cast(None, self.const_usize(layout.size.bytes()), self.sizet_type);
++ let builtin = self.context.get_builtin_function("memcmp");
++ let cmp = self.context.new_call(None, builtin, &[a_ptr, b_ptr, n]);
++ self.icmp(IntPredicate::IntEQ, cmp, self.const_i32(0))
++ }
++ }
++
++ _ if name_str.starts_with("simd_") => {
++ match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) {
++ Ok(llval) => llval,
++ Err(()) => return,
++ }
++ }
++
++ _ => bug!("unknown intrinsic '{}'", name),
++ };
++
++ if !fn_abi.ret.is_ignore() {
++ if let PassMode::Cast(ty) = fn_abi.ret.mode {
++ let ptr_llty = self.type_ptr_to(ty.gcc_type(self));
++ let ptr = self.pointercast(result.llval, ptr_llty);
++ self.store(llval, ptr, result.align);
++ }
++ else {
++ OperandRef::from_immediate_or_packed_pair(self, llval, result.layout)
++ .val
++ .store(self, result);
++ }
++ }
++ }
++
++ fn abort(&mut self) {
++ let func = self.context.get_builtin_function("abort");
++ let func: RValue<'gcc> = unsafe { std::mem::transmute(func) };
++ self.call(func, &[], None);
++ }
++
++ fn assume(&mut self, value: Self::Value) {
++ // TODO: switch to asumme when it exists.
++ // Or use something like this:
++ // #define __assume(cond) do { if (!(cond)) __builtin_unreachable(); } while (0)
++ self.expect(value, true);
++ }
++
++ fn expect(&mut self, cond: Self::Value, _expected: bool) -> Self::Value {
++ // TODO
++ /*let expect = self.context.get_builtin_function("__builtin_expect");
++ let expect: RValue<'gcc> = unsafe { std::mem::transmute(expect) };
++ self.call(expect, &[cond, self.const_bool(expected)], None)*/
++ cond
++ }
++
++ fn sideeffect(&mut self) {
++ // TODO
++ /*if self.tcx().sess.opts.debugging_opts.insert_sideeffect {
++ let fnname = self.get_intrinsic(&("llvm.sideeffect"));
++ self.call(fnname, &[], None);
++ }*/
++ }
++
++ fn va_start(&mut self, _va_list: RValue<'gcc>) -> RValue<'gcc> {
++ unimplemented!();
++ /*let intrinsic = self.cx().get_intrinsic("llvm.va_start");
++ self.call(intrinsic, &[va_list], None)*/
++ }
++
++ fn va_end(&mut self, _va_list: RValue<'gcc>) -> RValue<'gcc> {
++ unimplemented!();
++ /*let intrinsic = self.cx().get_intrinsic("llvm.va_end");
++ self.call(intrinsic, &[va_list], None)*/
++ }
++}
++
++impl<'a, 'gcc, 'tcx> ArgAbiMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
++ fn store_fn_arg(&mut self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>, idx: &mut usize, dst: PlaceRef<'tcx, Self::Value>) {
++ arg_abi.store_fn_arg(self, idx, dst)
++ }
++
++ fn store_arg(&mut self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>, val: RValue<'gcc>, dst: PlaceRef<'tcx, RValue<'gcc>>) {
++ arg_abi.store(self, val, dst)
++ }
++
++ fn arg_memory_ty(&self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>) -> Type<'gcc> {
++ arg_abi.memory_ty(self)
++ }
++}
++
++pub trait ArgAbiExt<'gcc, 'tcx> {
++ fn memory_ty(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>;
++ fn store(&self, bx: &mut Builder<'_, 'gcc, 'tcx>, val: RValue<'gcc>, dst: PlaceRef<'tcx, RValue<'gcc>>);
++ fn store_fn_arg(&self, bx: &mut Builder<'_, 'gcc, 'tcx>, idx: &mut usize, dst: PlaceRef<'tcx, RValue<'gcc>>);
++}
++
++impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
++ /// Gets the LLVM type for a place of the original Rust type of
++ /// this argument/return, i.e., the result of `type_of::type_of`.
++ fn memory_ty(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
++ self.layout.gcc_type(cx, true)
++ }
++
++ /// Stores a direct/indirect value described by this ArgAbi into a
++ /// place for the original Rust type of this argument/return.
++ /// Can be used for both storing formal arguments into Rust variables
++ /// or results of call/invoke instructions into their destinations.
++ fn store(&self, bx: &mut Builder<'_, 'gcc, 'tcx>, val: RValue<'gcc>, dst: PlaceRef<'tcx, RValue<'gcc>>) {
++ if self.is_ignore() {
++ return;
++ }
++ if self.is_sized_indirect() {
++ OperandValue::Ref(val, None, self.layout.align.abi).store(bx, dst)
++ }
++ else if self.is_unsized_indirect() {
++ bug!("unsized `ArgAbi` must be handled through `store_fn_arg`");
++ }
++ else if let PassMode::Cast(cast) = self.mode {
++ // FIXME(eddyb): Figure out when the simpler Store is safe, clang
++ // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
++ let can_store_through_cast_ptr = false;
++ if can_store_through_cast_ptr {
++ let cast_ptr_llty = bx.type_ptr_to(cast.gcc_type(bx));
++ let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty);
++ bx.store(val, cast_dst, self.layout.align.abi);
++ }
++ else {
++ // The actual return type is a struct, but the ABI
++ // adaptation code has cast it into some scalar type. The
++ // code that follows is the only reliable way I have
++ // found to do a transform like i64 -> {i32,i32}.
++ // Basically we dump the data onto the stack then memcpy it.
++ //
++ // Other approaches I tried:
++ // - Casting rust ret pointer to the foreign type and using Store
++ // is (a) unsafe if size of foreign type > size of rust type and
++ // (b) runs afoul of strict aliasing rules, yielding invalid
++ // assembly under -O (specifically, the store gets removed).
++ // - Truncating foreign type to correct integral type and then
++ // bitcasting to the struct type yields invalid cast errors.
++
++ // We instead thus allocate some scratch space...
++ let scratch_size = cast.size(bx);
++ let scratch_align = cast.align(bx);
++ let llscratch = bx.alloca(cast.gcc_type(bx), scratch_align);
++ bx.lifetime_start(llscratch, scratch_size);
++
++ // ... where we first store the value...
++ bx.store(val, llscratch, scratch_align);
++
++ // ... and then memcpy it to the intended destination.
++ bx.memcpy(
++ dst.llval,
++ self.layout.align.abi,
++ llscratch,
++ scratch_align,
++ bx.const_usize(self.layout.size.bytes()),
++ MemFlags::empty(),
++ );
++
++ bx.lifetime_end(llscratch, scratch_size);
++ }
++ }
++ else {
++ OperandValue::Immediate(val).store(bx, dst);
++ }
++ }
++
++ fn store_fn_arg<'a>(&self, bx: &mut Builder<'a, 'gcc, 'tcx>, idx: &mut usize, dst: PlaceRef<'tcx, RValue<'gcc>>) {
++ let mut next = || {
++ let val = bx.current_func().get_param(*idx as i32);
++ *idx += 1;
++ val.to_rvalue()
++ };
++ match self.mode {
++ PassMode::Ignore => {}
++ PassMode::Pair(..) => {
++ OperandValue::Pair(next(), next()).store(bx, dst);
++ }
++ PassMode::Indirect { extra_attrs: Some(_), .. } => {
++ OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst);
++ }
++ PassMode::Direct(_) | PassMode::Indirect { extra_attrs: None, .. } | PassMode::Cast(_) => {
++ let next_arg = next();
++ self.store(bx, next_arg.to_rvalue(), dst);
++ }
++ }
++ }
++}
++
++fn int_type_width_signed<'gcc, 'tcx>(ty: Ty<'tcx>, cx: &CodegenCx<'gcc, 'tcx>) -> Option<(u64, bool)> {
++ match ty.kind() {
++ ty::Int(t) => Some((
++ match t {
++ rustc_middle::ty::IntTy::Isize => u64::from(cx.tcx.sess.target.pointer_width),
++ rustc_middle::ty::IntTy::I8 => 8,
++ rustc_middle::ty::IntTy::I16 => 16,
++ rustc_middle::ty::IntTy::I32 => 32,
++ rustc_middle::ty::IntTy::I64 => 64,
++ rustc_middle::ty::IntTy::I128 => 128,
++ },
++ true,
++ )),
++ ty::Uint(t) => Some((
++ match t {
++ rustc_middle::ty::UintTy::Usize => u64::from(cx.tcx.sess.target.pointer_width),
++ rustc_middle::ty::UintTy::U8 => 8,
++ rustc_middle::ty::UintTy::U16 => 16,
++ rustc_middle::ty::UintTy::U32 => 32,
++ rustc_middle::ty::UintTy::U64 => 64,
++ rustc_middle::ty::UintTy::U128 => 128,
++ },
++ false,
++ )),
++ _ => None,
++ }
++}
++
++impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
++ fn bit_reverse(&mut self, width: u64, value: RValue<'gcc>) -> RValue<'gcc> {
++ let typ = value.get_type();
++ let context = &self.cx.context;
++ match width {
++ 8 => {
++ // First step.
++ let left = self.and(value, context.new_rvalue_from_int(typ, 0xF0));
++ let left = self.lshr(left, context.new_rvalue_from_int(typ, 4));
++ let right = self.and(value, context.new_rvalue_from_int(typ, 0x0F));
++ let right = self.shl(right, context.new_rvalue_from_int(typ, 4));
++ let step1 = self.or(left, right);
++
++ // Second step.
++ let left = self.and(step1, context.new_rvalue_from_int(typ, 0xCC));
++ let left = self.lshr(left, context.new_rvalue_from_int(typ, 2));
++ let right = self.and(step1, context.new_rvalue_from_int(typ, 0x33));
++ let right = self.shl(right, context.new_rvalue_from_int(typ, 2));
++ let step2 = self.or(left, right);
++
++ // Third step.
++ let left = self.and(step2, context.new_rvalue_from_int(typ, 0xAA));
++ let left = self.lshr(left, context.new_rvalue_from_int(typ, 1));
++ let right = self.and(step2, context.new_rvalue_from_int(typ, 0x55));
++ let right = self.shl(right, context.new_rvalue_from_int(typ, 1));
++ let step3 = self.or(left, right);
++
++ step3
++ },
++ 16 => {
++ // First step.
++ let left = self.and(value, context.new_rvalue_from_int(typ, 0x5555));
++ let left = self.shl(left, context.new_rvalue_from_int(typ, 1));
++ let right = self.and(value, context.new_rvalue_from_int(typ, 0xAAAA));
++ let right = self.lshr(right, context.new_rvalue_from_int(typ, 1));
++ let step1 = self.or(left, right);
++
++ // Second step.
++ let left = self.and(step1, context.new_rvalue_from_int(typ, 0x3333));
++ let left = self.shl(left, context.new_rvalue_from_int(typ, 2));
++ let right = self.and(step1, context.new_rvalue_from_int(typ, 0xCCCC));
++ let right = self.lshr(right, context.new_rvalue_from_int(typ, 2));
++ let step2 = self.or(left, right);
++
++ // Third step.
++ let left = self.and(step2, context.new_rvalue_from_int(typ, 0x0F0F));
++ let left = self.shl(left, context.new_rvalue_from_int(typ, 4));
++ let right = self.and(step2, context.new_rvalue_from_int(typ, 0xF0F0));
++ let right = self.lshr(right, context.new_rvalue_from_int(typ, 4));
++ let step3 = self.or(left, right);
++
++ // Fourth step.
++ let left = self.and(step3, context.new_rvalue_from_int(typ, 0x00FF));
++ let left = self.shl(left, context.new_rvalue_from_int(typ, 8));
++ let right = self.and(step3, context.new_rvalue_from_int(typ, 0xFF00));
++ let right = self.lshr(right, context.new_rvalue_from_int(typ, 8));
++ let step4 = self.or(left, right);
++
++ step4
++ },
++ 32 => {
++ // TODO: Refactor with other implementations.
++ // First step.
++ let left = self.and(value, context.new_rvalue_from_long(typ, 0x55555555));
++ let left = self.shl(left, context.new_rvalue_from_long(typ, 1));
++ let right = self.and(value, context.new_rvalue_from_long(typ, 0xAAAAAAAA));
++ let right = self.lshr(right, context.new_rvalue_from_long(typ, 1));
++ let step1 = self.or(left, right);
++
++ // Second step.
++ let left = self.and(step1, context.new_rvalue_from_long(typ, 0x33333333));
++ let left = self.shl(left, context.new_rvalue_from_long(typ, 2));
++ let right = self.and(step1, context.new_rvalue_from_long(typ, 0xCCCCCCCC));
++ let right = self.lshr(right, context.new_rvalue_from_long(typ, 2));
++ let step2 = self.or(left, right);
++
++ // Third step.
++ let left = self.and(step2, context.new_rvalue_from_long(typ, 0x0F0F0F0F));
++ let left = self.shl(left, context.new_rvalue_from_long(typ, 4));
++ let right = self.and(step2, context.new_rvalue_from_long(typ, 0xF0F0F0F0));
++ let right = self.lshr(right, context.new_rvalue_from_long(typ, 4));
++ let step3 = self.or(left, right);
++
++ // Fourth step.
++ let left = self.and(step3, context.new_rvalue_from_long(typ, 0x00FF00FF));
++ let left = self.shl(left, context.new_rvalue_from_long(typ, 8));
++ let right = self.and(step3, context.new_rvalue_from_long(typ, 0xFF00FF00));
++ let right = self.lshr(right, context.new_rvalue_from_long(typ, 8));
++ let step4 = self.or(left, right);
++
++ // Fifth step.
++ let left = self.and(step4, context.new_rvalue_from_long(typ, 0x0000FFFF));
++ let left = self.shl(left, context.new_rvalue_from_long(typ, 16));
++ let right = self.and(step4, context.new_rvalue_from_long(typ, 0xFFFF0000));
++ let right = self.lshr(right, context.new_rvalue_from_long(typ, 16));
++ let step5 = self.or(left, right);
++
++ step5
++ },
++ 64 => {
++ // First step.
++ let left = self.shl(value, context.new_rvalue_from_long(typ, 32));
++ let right = self.lshr(value, context.new_rvalue_from_long(typ, 32));
++ let step1 = self.or(left, right);
++
++ // Second step.
++ let left = self.and(step1, context.new_rvalue_from_long(typ, 0x0001FFFF0001FFFF));
++ let left = self.shl(left, context.new_rvalue_from_long(typ, 15));
++ let right = self.and(step1, context.new_rvalue_from_long(typ, 0xFFFE0000FFFE0000u64 as i64)); // TODO: transmute the number instead?
++ let right = self.lshr(right, context.new_rvalue_from_long(typ, 17));
++ let step2 = self.or(left, right);
++
++ // Third step.
++ let left = self.lshr(step2, context.new_rvalue_from_long(typ, 10));
++ let left = self.xor(step2, left);
++ let temp = self.and(left, context.new_rvalue_from_long(typ, 0x003F801F003F801F));
++
++ let left = self.shl(temp, context.new_rvalue_from_long(typ, 10));
++ let left = self.or(temp, left);
++ let step3 = self.xor(left, step2);
++
++ // Fourth step.
++ let left = self.lshr(step3, context.new_rvalue_from_long(typ, 4));
++ let left = self.xor(step3, left);
++ let temp = self.and(left, context.new_rvalue_from_long(typ, 0x0E0384210E038421));
++
++ let left = self.shl(temp, context.new_rvalue_from_long(typ, 4));
++ let left = self.or(temp, left);
++ let step4 = self.xor(left, step3);
++
++ // Fifth step.
++ let left = self.lshr(step4, context.new_rvalue_from_long(typ, 2));
++ let left = self.xor(step4, left);
++ let temp = self.and(left, context.new_rvalue_from_long(typ, 0x2248884222488842));
++
++ let left = self.shl(temp, context.new_rvalue_from_long(typ, 2));
++ let left = self.or(temp, left);
++ let step5 = self.xor(left, step4);
++
++ step5
++ },
++ 128 => {
++ // TODO: find a more efficient implementation?
++ let sixty_four = self.context.new_rvalue_from_long(typ, 64);
++ let high = self.context.new_cast(None, value >> sixty_four, self.u64_type);
++ let low = self.context.new_cast(None, value, self.u64_type);
++
++ let reversed_high = self.bit_reverse(64, high);
++ let reversed_low = self.bit_reverse(64, low);
++
++ let new_low = self.context.new_cast(None, reversed_high, typ);
++ let new_high = self.context.new_cast(None, reversed_low, typ) << sixty_four;
++
++ new_low | new_high
++ },
++ _ => {
++ panic!("cannot bit reverse with width = {}", width);
++ },
++ }
++ }
++
++ fn count_leading_zeroes(&self, width: u64, arg: RValue<'gcc>) -> RValue<'gcc> {
++ // TODO: use width?
++ let arg_type = arg.get_type();
++ let count_leading_zeroes =
++ if arg_type.is_uint(&self.cx) {
++ "__builtin_clz"
++ }
++ else if arg_type.is_ulong(&self.cx) {
++ "__builtin_clzl"
++ }
++ else if arg_type.is_ulonglong(&self.cx) {
++ "__builtin_clzll"
++ }
++ else if width == 128 {
++ // Algorithm from: https://stackoverflow.com/a/28433850/389119
++ let array_type = self.context.new_array_type(None, arg_type, 3);
++ let result = self.current_func()
++ .new_local(None, array_type, "count_loading_zeroes_results");
++
++ let sixty_four = self.context.new_rvalue_from_long(arg_type, 64);
++ let high = self.context.new_cast(None, arg >> sixty_four, self.u64_type);
++ let low = self.context.new_cast(None, arg, self.u64_type);
++
++ let zero = self.context.new_rvalue_zero(self.usize_type);
++ let one = self.context.new_rvalue_one(self.usize_type);
++ let two = self.context.new_rvalue_from_long(self.usize_type, 2);
++
++ let clzll = self.context.get_builtin_function("__builtin_clzll");
++
++ let first_elem = self.context.new_array_access(None, result, zero);
++ let first_value = self.context.new_cast(None, self.context.new_call(None, clzll, &[high]), arg_type);
++ self.llbb()
++ .add_assignment(None, first_elem, first_value);
++
++ let second_elem = self.context.new_array_access(None, result, one);
++ let second_value = self.context.new_cast(None, self.context.new_call(None, clzll, &[low]), arg_type) + sixty_four;
++ self.llbb()
++ .add_assignment(None, second_elem, second_value);
++
++ let third_elem = self.context.new_array_access(None, result, two);
++ let third_value = self.context.new_rvalue_from_long(arg_type, 128);
++ self.llbb()
++ .add_assignment(None, third_elem, third_value);
++
++ let not_high = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, high);
++ let not_low = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, low);
++ let not_low_and_not_high = not_low & not_high;
++ let index = not_high + not_low_and_not_high;
++
++ let res = self.context.new_array_access(None, result, index);
++
++ return self.context.new_cast(None, res, arg_type);
++ }
++ else {
++ let count_leading_zeroes = self.context.get_builtin_function("__builtin_clz");
++ let arg = self.context.new_cast(None, arg, self.uint_type);
++ let diff = self.int_width(self.uint_type) - self.int_width(arg_type);
++ let diff = self.context.new_rvalue_from_long(self.int_type, diff);
++ let res = self.context.new_call(None, count_leading_zeroes, &[arg]) - diff;
++ return self.context.new_cast(None, res, arg_type);
++ };
++ let count_leading_zeroes = self.context.get_builtin_function(count_leading_zeroes);
++ let res = self.context.new_call(None, count_leading_zeroes, &[arg]);
++ self.context.new_cast(None, res, arg_type)
++ }
++
++ fn count_trailing_zeroes(&self, _width: u64, arg: RValue<'gcc>) -> RValue<'gcc> {
++ let arg_type = arg.get_type();
++ let (count_trailing_zeroes, expected_type) =
++ if arg_type.is_uchar(&self.cx) || arg_type.is_ushort(&self.cx) || arg_type.is_uint(&self.cx) {
++ // NOTE: we don't need to & 0xFF for uchar because the result is undefined on zero.
++ ("__builtin_ctz", self.cx.uint_type)
++ }
++ else if arg_type.is_ulong(&self.cx) {
++ ("__builtin_ctzl", self.cx.ulong_type)
++ }
++ else if arg_type.is_ulonglong(&self.cx) {
++ ("__builtin_ctzll", self.cx.ulonglong_type)
++ }
++ else if arg_type.is_u128(&self.cx) {
++ // Adapted from the algorithm to count leading zeroes from: https://stackoverflow.com/a/28433850/389119
++ let array_type = self.context.new_array_type(None, arg_type, 3);
++ let result = self.current_func()
++ .new_local(None, array_type, "count_loading_zeroes_results");
++
++ let sixty_four = self.context.new_rvalue_from_long(arg_type, 64);
++ let high = self.context.new_cast(None, arg >> sixty_four, self.u64_type);
++ let low = self.context.new_cast(None, arg, self.u64_type);
++
++ let zero = self.context.new_rvalue_zero(self.usize_type);
++ let one = self.context.new_rvalue_one(self.usize_type);
++ let two = self.context.new_rvalue_from_long(self.usize_type, 2);
++
++ let ctzll = self.context.get_builtin_function("__builtin_ctzll");
++
++ let first_elem = self.context.new_array_access(None, result, zero);
++ let first_value = self.context.new_cast(None, self.context.new_call(None, ctzll, &[low]), arg_type);
++ self.llbb()
++ .add_assignment(None, first_elem, first_value);
++
++ let second_elem = self.context.new_array_access(None, result, one);
++ let second_value = self.context.new_cast(None, self.context.new_call(None, ctzll, &[high]), arg_type) + sixty_four;
++ self.llbb()
++ .add_assignment(None, second_elem, second_value);
++
++ let third_elem = self.context.new_array_access(None, result, two);
++ let third_value = self.context.new_rvalue_from_long(arg_type, 128);
++ self.llbb()
++ .add_assignment(None, third_elem, third_value);
++
++ let not_low = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, low);
++ let not_high = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, high);
++ let not_low_and_not_high = not_low & not_high;
++ let index = not_low + not_low_and_not_high;
++
++ let res = self.context.new_array_access(None, result, index);
++
++ return self.context.new_cast(None, res, arg_type);
++ }
++ else {
++ unimplemented!("count_trailing_zeroes for {:?}", arg_type);
++ };
++ let count_trailing_zeroes = self.context.get_builtin_function(count_trailing_zeroes);
++ let arg =
++ if arg_type != expected_type {
++ self.context.new_cast(None, arg, expected_type)
++ }
++ else {
++ arg
++ };
++ let res = self.context.new_call(None, count_trailing_zeroes, &[arg]);
++ self.context.new_cast(None, res, arg_type)
++ }
++
++ fn int_width(&self, typ: Type<'gcc>) -> i64 {
++ self.cx.int_width(typ) as i64
++ }
++
++ fn pop_count(&self, value: RValue<'gcc>) -> RValue<'gcc> {
++ // TODO: use the optimized version with fewer operations.
++ let value_type = value.get_type();
++
++ if value_type.is_u128(&self.cx) {
++ // TODO: implement in the normal algorithm below to have a more efficient
++ // implementation (that does not require a call to __popcountdi2).
++ let popcount = self.context.get_builtin_function("__builtin_popcountll");
++ let sixty_four = self.context.new_rvalue_from_long(value_type, 64);
++ let high = self.context.new_cast(None, value >> sixty_four, self.cx.ulonglong_type);
++ let high = self.context.new_call(None, popcount, &[high]);
++ let low = self.context.new_cast(None, value, self.cx.ulonglong_type);
++ let low = self.context.new_call(None, popcount, &[low]);
++ return high + low;
++ }
++
++ // First step.
++ let mask = self.context.new_rvalue_from_long(value_type, 0x5555555555555555);
++ let left = value & mask;
++ let shifted = value >> self.context.new_rvalue_from_int(value_type, 1);
++ let right = shifted & mask;
++ let value = left + right;
++
++ // Second step.
++ let mask = self.context.new_rvalue_from_long(value_type, 0x3333333333333333);
++ let left = value & mask;
++ let shifted = value >> self.context.new_rvalue_from_int(value_type, 2);
++ let right = shifted & mask;
++ let value = left + right;
++
++ // Third step.
++ let mask = self.context.new_rvalue_from_long(value_type, 0x0F0F0F0F0F0F0F0F);
++ let left = value & mask;
++ let shifted = value >> self.context.new_rvalue_from_int(value_type, 4);
++ let right = shifted & mask;
++ let value = left + right;
++
++ if value_type.is_u8(&self.cx) {
++ return value;
++ }
++
++ // Fourth step.
++ let mask = self.context.new_rvalue_from_long(value_type, 0x00FF00FF00FF00FF);
++ let left = value & mask;
++ let shifted = value >> self.context.new_rvalue_from_int(value_type, 8);
++ let right = shifted & mask;
++ let value = left + right;
++
++ if value_type.is_u16(&self.cx) {
++ return value;
++ }
++
++ // Fifth step.
++ let mask = self.context.new_rvalue_from_long(value_type, 0x0000FFFF0000FFFF);
++ let left = value & mask;
++ let shifted = value >> self.context.new_rvalue_from_int(value_type, 16);
++ let right = shifted & mask;
++ let value = left + right;
++
++ if value_type.is_u32(&self.cx) {
++ return value;
++ }
++
++ // Sixth step.
++ let mask = self.context.new_rvalue_from_long(value_type, 0x00000000FFFFFFFF);
++ let left = value & mask;
++ let shifted = value >> self.context.new_rvalue_from_int(value_type, 32);
++ let right = shifted & mask;
++ let value = left + right;
++
++ value
++ }
++
++ // Algorithm from: https://blog.regehr.org/archives/1063
++ fn rotate_left(&mut self, value: RValue<'gcc>, shift: RValue<'gcc>, width: u64) -> RValue<'gcc> {
++ let max = self.context.new_rvalue_from_long(shift.get_type(), width as i64);
++ let shift = shift % max;
++ let lhs = self.shl(value, shift);
++ let result_and =
++ self.and(
++ self.context.new_unary_op(None, UnaryOp::Minus, shift.get_type(), shift),
++ self.context.new_rvalue_from_long(shift.get_type(), width as i64 - 1),
++ );
++ let rhs = self.lshr(value, result_and);
++ self.or(lhs, rhs)
++ }
++
++ // Algorithm from: https://blog.regehr.org/archives/1063
++ fn rotate_right(&mut self, value: RValue<'gcc>, shift: RValue<'gcc>, width: u64) -> RValue<'gcc> {
++ let max = self.context.new_rvalue_from_long(shift.get_type(), width as i64);
++ let shift = shift % max;
++ let lhs = self.lshr(value, shift);
++ let result_and =
++ self.and(
++ self.context.new_unary_op(None, UnaryOp::Minus, shift.get_type(), shift),
++ self.context.new_rvalue_from_long(shift.get_type(), width as i64 - 1),
++ );
++ let rhs = self.shl(value, result_and);
++ self.or(lhs, rhs)
++ }
++
++ fn saturating_add(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>, signed: bool, width: u64) -> RValue<'gcc> {
++ let func = self.current_func.borrow().expect("func");
++
++ if signed {
++ // Algorithm from: https://stackoverflow.com/a/56531252/389119
++ let after_block = func.new_block("after");
++ let func_name =
++ match width {
++ 8 => "__builtin_add_overflow",
++ 16 => "__builtin_add_overflow",
++ 32 => "__builtin_sadd_overflow",
++ 64 => "__builtin_saddll_overflow",
++ 128 => "__builtin_add_overflow",
++ _ => unreachable!(),
++ };
++ let overflow_func = self.context.get_builtin_function(func_name);
++ let result_type = lhs.get_type();
++ let res = func.new_local(None, result_type, "saturating_sum");
++ let overflow = self.overflow_call(overflow_func, &[lhs, rhs, res.get_address(None)], None);
++
++ let then_block = func.new_block("then");
++
++ let unsigned_type = self.context.new_int_type(width as i32 / 8, false);
++ let shifted = self.context.new_cast(None, lhs, unsigned_type) >> self.context.new_rvalue_from_int(unsigned_type, width as i32 - 1);
++ let uint_max = self.context.new_unary_op(None, UnaryOp::BitwiseNegate, unsigned_type,
++ self.context.new_rvalue_from_int(unsigned_type, 0)
++ );
++ let int_max = uint_max >> self.context.new_rvalue_one(unsigned_type);
++ then_block.add_assignment(None, res, self.context.new_cast(None, shifted + int_max, result_type));
++ then_block.end_with_jump(None, after_block);
++
++ self.block.expect("block").end_with_conditional(None, overflow, then_block, after_block);
++
++ // NOTE: since jumps were added in a place rustc does not
++ // expect, the current blocks in the state need to be updated.
++ *self.current_block.borrow_mut() = Some(after_block);
++ self.block = Some(after_block);
++
++ res.to_rvalue()
++ }
++ else {
++ // Algorithm from: http://locklessinc.com/articles/sat_arithmetic/
++ let res = lhs + rhs;
++ let res_type = res.get_type();
++ let cond = self.context.new_comparison(None, ComparisonOp::LessThan, res, lhs);
++ let value = self.context.new_unary_op(None, UnaryOp::Minus, res_type, self.context.new_cast(None, cond, res_type));
++ res | value
++ }
++ }
++
++ // Algorithm from: https://locklessinc.com/articles/sat_arithmetic/
++ fn saturating_sub(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>, signed: bool, width: u64) -> RValue<'gcc> {
++ if signed {
++ // Also based on algorithm from: https://stackoverflow.com/a/56531252/389119
++ let func_name =
++ match width {
++ 8 => "__builtin_sub_overflow",
++ 16 => "__builtin_sub_overflow",
++ 32 => "__builtin_ssub_overflow",
++ 64 => "__builtin_ssubll_overflow",
++ 128 => "__builtin_sub_overflow",
++ _ => unreachable!(),
++ };
++ let overflow_func = self.context.get_builtin_function(func_name);
++ let result_type = lhs.get_type();
++ let func = self.current_func.borrow().expect("func");
++ let res = func.new_local(None, result_type, "saturating_diff");
++ let overflow = self.overflow_call(overflow_func, &[lhs, rhs, res.get_address(None)], None);
++
++ let then_block = func.new_block("then");
++ let after_block = func.new_block("after");
++
++ let unsigned_type = self.context.new_int_type(width as i32 / 8, false);
++ let shifted = self.context.new_cast(None, lhs, unsigned_type) >> self.context.new_rvalue_from_int(unsigned_type, width as i32 - 1);
++ let uint_max = self.context.new_unary_op(None, UnaryOp::BitwiseNegate, unsigned_type,
++ self.context.new_rvalue_from_int(unsigned_type, 0)
++ );
++ let int_max = uint_max >> self.context.new_rvalue_one(unsigned_type);
++ then_block.add_assignment(None, res, self.context.new_cast(None, shifted + int_max, result_type));
++ then_block.end_with_jump(None, after_block);
++
++ self.block.expect("block").end_with_conditional(None, overflow, then_block, after_block);
++
++ // NOTE: since jumps were added in a place rustc does not
++ // expect, the current blocks in the state need to be updated.
++ *self.current_block.borrow_mut() = Some(after_block);
++ self.block = Some(after_block);
++
++ res.to_rvalue()
++ }
++ else {
++ let res = lhs - rhs;
++ let comparison = self.context.new_comparison(None, ComparisonOp::LessThanEquals, res, lhs);
++ let comparison = self.context.new_cast(None, comparison, lhs.get_type());
++ let unary_op = self.context.new_unary_op(None, UnaryOp::Minus, comparison.get_type(), comparison);
++ self.and(res, unary_op)
++ }
++ }
++}
++
++fn try_intrinsic<'gcc, 'tcx>(bx: &mut Builder<'_, 'gcc, 'tcx>, try_func: RValue<'gcc>, data: RValue<'gcc>, _catch_func: RValue<'gcc>, dest: RValue<'gcc>) {
++ if bx.sess().panic_strategy() == PanicStrategy::Abort {
++ bx.call(try_func, &[data], None);
++ // Return 0 unconditionally from the intrinsic call;
++ // we can never unwind.
++ let ret_align = bx.tcx.data_layout.i32_align.abi;
++ bx.store(bx.const_i32(0), dest, ret_align);
++ }
++ else if wants_msvc_seh(bx.sess()) {
++ unimplemented!();
++ //codegen_msvc_try(bx, try_func, data, catch_func, dest);
++ }
++ else {
++ unimplemented!();
++ //codegen_gnu_try(bx, try_func, data, catch_func, dest);
++ }
++}
++
++// MSVC's definition of the `rust_try` function.
++//
++// This implementation uses the new exception handling instructions in LLVM
++// which have support in LLVM for SEH on MSVC targets. Although these
++// instructions are meant to work for all targets, as of the time of this
++// writing, however, LLVM does not recommend the usage of these new instructions
++// as the old ones are still more optimized.
++/*fn codegen_msvc_try<'a, 'gcc, 'tcx>(_bx: &mut Builder<'a, 'gcc, 'tcx>, _try_func: RValue<'gcc>, _data: RValue<'gcc>, _catch_func: RValue<'gcc>, _dest: RValue<'gcc>) {
++ unimplemented!();
++ /*let llfn = get_rust_try_fn(bx, &mut |mut bx| {
++ bx.set_personality_fn(bx.eh_personality());
++ bx.sideeffect();
++
++ let mut normal = bx.build_sibling_block("normal");
++ let mut catchswitch = bx.build_sibling_block("catchswitch");
++ let mut catchpad = bx.build_sibling_block("catchpad");
++ let mut caught = bx.build_sibling_block("caught");
++
++ let try_func = llvm::get_param(bx.llfn(), 0);
++ let data = llvm::get_param(bx.llfn(), 1);
++ let catch_func = llvm::get_param(bx.llfn(), 2);
++
++ // We're generating an IR snippet that looks like:
++ //
++ // declare i32 @rust_try(%try_func, %data, %catch_func) {
++ // %slot = alloca u8*
++ // invoke %try_func(%data) to label %normal unwind label %catchswitch
++ //
++ // normal:
++ // ret i32 0
++ //
++ // catchswitch:
++ // %cs = catchswitch within none [%catchpad] unwind to caller
++ //
++ // catchpad:
++ // %tok = catchpad within %cs [%type_descriptor, 0, %slot]
++ // %ptr = load %slot
++ // call %catch_func(%data, %ptr)
++ // catchret from %tok to label %caught
++ //
++ // caught:
++ // ret i32 1
++ // }
++ //
++ // This structure follows the basic usage of throw/try/catch in LLVM.
++ // For example, compile this C++ snippet to see what LLVM generates:
++ //
++ // #include <stdint.h>
++ //
++ // struct rust_panic {
++ // rust_panic(const rust_panic&);
++ // ~rust_panic();
++ //
++ // uint64_t x[2];
++ // };
++ //
++ // int __rust_try(
++ // void (*try_func)(void*),
++ // void *data,
++ // void (*catch_func)(void*, void*) noexcept
++ // ) {
++ // try {
++ // try_func(data);
++ // return 0;
++ // } catch(rust_panic& a) {
++ // catch_func(data, &a);
++ // return 1;
++ // }
++ // }
++ //
++ // More information can be found in libstd's seh.rs implementation.
++ let ptr_align = bx.tcx().data_layout.pointer_align.abi;
++ let slot = bx.alloca(bx.type_i8p(), ptr_align);
++ bx.invoke(try_func, &[data], normal.llbb(), catchswitch.llbb(), None);
++
++ normal.ret(bx.const_i32(0));
++
++ let cs = catchswitch.catch_switch(None, None, 1);
++ catchswitch.add_handler(cs, catchpad.llbb());
++
++ // We can't use the TypeDescriptor defined in libpanic_unwind because it
++ // might be in another DLL and the SEH encoding only supports specifying
++ // a TypeDescriptor from the current module.
++ //
++ // However this isn't an issue since the MSVC runtime uses string
++ // comparison on the type name to match TypeDescriptors rather than
++ // pointer equality.
++ //
++ // So instead we generate a new TypeDescriptor in each module that uses
++ // `try` and let the linker merge duplicate definitions in the same
++ // module.
++ //
++ // When modifying, make sure that the type_name string exactly matches
++ // the one used in src/libpanic_unwind/seh.rs.
++ let type_info_vtable = bx.declare_global("??_7type_info@@6B@", bx.type_i8p());
++ let type_name = bx.const_bytes(b"rust_panic\0");
++ let type_info =
++ bx.const_struct(&[type_info_vtable, bx.const_null(bx.type_i8p()), type_name], false);
++ let tydesc = bx.declare_global("__rust_panic_type_info", bx.val_ty(type_info));
++ unsafe {
++ llvm::LLVMRustSetLinkage(tydesc, llvm::Linkage::LinkOnceODRLinkage);
++ llvm::SetUniqueComdat(bx.llmod, tydesc);
++ llvm::LLVMSetInitializer(tydesc, type_info);
++ }
++
++ // The flag value of 8 indicates that we are catching the exception by
++ // reference instead of by value. We can't use catch by value because
++ // that requires copying the exception object, which we don't support
++ // since our exception object effectively contains a Box.
++ //
++ // Source: MicrosoftCXXABI::getAddrOfCXXCatchHandlerType in clang
++ let flags = bx.const_i32(8);
++ let funclet = catchpad.catch_pad(cs, &[tydesc, flags, slot]);
++ let ptr = catchpad.load(slot, ptr_align);
++ catchpad.call(catch_func, &[data, ptr], Some(&funclet));
++
++ catchpad.catch_ret(&funclet, caught.llbb());
++
++ caught.ret(bx.const_i32(1));
++ });
++
++ // Note that no invoke is used here because by definition this function
++ // can't panic (that's what it's catching).
++ let ret = bx.call(llfn, &[try_func, data, catch_func], None);
++ let i32_align = bx.tcx().data_layout.i32_align.abi;
++ bx.store(ret, dest, i32_align);*/
++}*/
++
++// Definition of the standard `try` function for Rust using the GNU-like model
++// of exceptions (e.g., the normal semantics of LLVM's `landingpad` and `invoke`
++// instructions).
++//
++// This codegen is a little surprising because we always call a shim
++// function instead of inlining the call to `invoke` manually here. This is done
++// because in LLVM we're only allowed to have one personality per function
++// definition. The call to the `try` intrinsic is being inlined into the
++// function calling it, and that function may already have other personality
++// functions in play. By calling a shim we're guaranteed that our shim will have
++// the right personality function.
++/*fn codegen_gnu_try<'a, 'gcc, 'tcx>(_bx: &mut Builder<'a, 'gcc, 'tcx>, _try_func: RValue<'gcc>, _data: RValue<'gcc>, _catch_func: RValue<'gcc>, _dest: RValue<'gcc>) {
++ unimplemented!();
++ /*let llfn = get_rust_try_fn(bx, &mut |mut bx| {
++ // Codegens the shims described above:
++ //
++ // bx:
++ // invoke %try_func(%data) normal %normal unwind %catch
++ //
++ // normal:
++ // ret 0
++ //
++ // catch:
++ // (%ptr, _) = landingpad
++ // call %catch_func(%data, %ptr)
++ // ret 1
++
++ bx.sideeffect();
++
++ let mut then = bx.build_sibling_block("then");
++ let mut catch = bx.build_sibling_block("catch");
++
++ let try_func = llvm::get_param(bx.llfn(), 0);
++ let data = llvm::get_param(bx.llfn(), 1);
++ let catch_func = llvm::get_param(bx.llfn(), 2);
++ bx.invoke(try_func, &[data], then.llbb(), catch.llbb(), None);
++ then.ret(bx.const_i32(0));
++
++ // Type indicator for the exception being thrown.
++ //
++ // The first value in this tuple is a pointer to the exception object
++ // being thrown. The second value is a "selector" indicating which of
++ // the landing pad clauses the exception's type had been matched to.
++ // rust_try ignores the selector.
++ let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false);
++ let vals = catch.landing_pad(lpad_ty, bx.eh_personality(), 1);
++ let tydesc = match bx.tcx().lang_items().eh_catch_typeinfo() {
++ Some(tydesc) => {
++ let tydesc = bx.get_static(tydesc);
++ bx.bitcast(tydesc, bx.type_i8p())
++ }
++ None => bx.const_null(bx.type_i8p()),
++ };
++ catch.add_clause(vals, tydesc);
++ let ptr = catch.extract_value(vals, 0);
++ catch.call(catch_func, &[data, ptr], None);
++ catch.ret(bx.const_i32(1));
++ });
++
++ // Note that no invoke is used here because by definition this function
++ // can't panic (that's what it's catching).
++ let ret = bx.call(llfn, &[try_func, data, catch_func], None);
++ let i32_align = bx.tcx().data_layout.i32_align.abi;
++ bx.store(ret, dest, i32_align);*/
++}*/
--- /dev/null
--- /dev/null
++use gccjit::{RValue, Type};
++use rustc_codegen_ssa::base::compare_simd_types;
++use rustc_codegen_ssa::common::{TypeKind, span_invalid_monomorphization_error};
++use rustc_codegen_ssa::mir::operand::OperandRef;
++use rustc_codegen_ssa::traits::{BaseTypeMethods, BuilderMethods};
++use rustc_hir as hir;
++use rustc_middle::span_bug;
++use rustc_middle::ty::layout::HasTyCtxt;
++use rustc_middle::ty::{self, Ty};
++use rustc_span::{Span, Symbol, sym};
++
++use crate::builder::Builder;
++
++pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, name: Symbol, callee_ty: Ty<'tcx>, args: &[OperandRef<'tcx, RValue<'gcc>>], ret_ty: Ty<'tcx>, llret_ty: Type<'gcc>, span: Span) -> Result<RValue<'gcc>, ()> {
++ //println!("Generic simd: {}", name);
++
++ // macros for error handling:
++ macro_rules! emit_error {
++ ($msg: tt) => {
++ emit_error!($msg, )
++ };
++ ($msg: tt, $($fmt: tt)*) => {
++ span_invalid_monomorphization_error(
++ bx.sess(), span,
++ &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
++ name, $($fmt)*));
++ }
++ }
++
++ macro_rules! return_error {
++ ($($fmt: tt)*) => {
++ {
++ emit_error!($($fmt)*);
++ return Err(());
++ }
++ }
++ }
++
++ macro_rules! require {
++ ($cond: expr, $($fmt: tt)*) => {
++ if !$cond {
++ return_error!($($fmt)*);
++ }
++ };
++ }
++
++ macro_rules! require_simd {
++ ($ty: expr, $position: expr) => {
++ require!($ty.is_simd(), "expected SIMD {} type, found non-SIMD `{}`", $position, $ty)
++ };
++ }
++
++ let tcx = bx.tcx();
++ let sig =
++ tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), callee_ty.fn_sig(tcx));
++ let arg_tys = sig.inputs();
++ let name_str = &*name.as_str();
++
++ /*if name == sym::simd_select_bitmask {
++ let in_ty = arg_tys[0];
++ let m_len = match in_ty.kind() {
++ // Note that this `.unwrap()` crashes for isize/usize, that's sort
++ // of intentional as there's not currently a use case for that.
++ ty::Int(i) => i.bit_width().unwrap(),
++ ty::Uint(i) => i.bit_width().unwrap(),
++ _ => return_error!("`{}` is not an integral type", in_ty),
++ };
++ require_simd!(arg_tys[1], "argument");
++ let (v_len, _) = arg_tys[1].simd_size_and_type(bx.tcx());
++ require!(
++ // Allow masks for vectors with fewer than 8 elements to be
++ // represented with a u8 or i8.
++ m_len == v_len || (m_len == 8 && v_len < 8),
++ "mismatched lengths: mask length `{}` != other vector length `{}`",
++ m_len,
++ v_len
++ );
++ let i1 = bx.type_i1();
++ let im = bx.type_ix(v_len);
++ let i1xn = bx.type_vector(i1, v_len);
++ let m_im = bx.trunc(args[0].immediate(), im);
++ let m_i1s = bx.bitcast(m_im, i1xn);
++ return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
++ }*/
++
++ // every intrinsic below takes a SIMD vector as its first argument
++ require_simd!(arg_tys[0], "input");
++ let in_ty = arg_tys[0];
++
++ let comparison = match name {
++ sym::simd_eq => Some(hir::BinOpKind::Eq),
++ sym::simd_ne => Some(hir::BinOpKind::Ne),
++ sym::simd_lt => Some(hir::BinOpKind::Lt),
++ sym::simd_le => Some(hir::BinOpKind::Le),
++ sym::simd_gt => Some(hir::BinOpKind::Gt),
++ sym::simd_ge => Some(hir::BinOpKind::Ge),
++ _ => None,
++ };
++
++ let (in_len, in_elem) = arg_tys[0].simd_size_and_type(bx.tcx());
++ if let Some(cmp_op) = comparison {
++ require_simd!(ret_ty, "return");
++
++ let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx());
++ require!(
++ in_len == out_len,
++ "expected return type with length {} (same as input type `{}`), \
++ found `{}` with length {}",
++ in_len,
++ in_ty,
++ ret_ty,
++ out_len
++ );
++ require!(
++ bx.type_kind(bx.element_type(llret_ty)) == TypeKind::Integer,
++ "expected return type with integer elements, found `{}` with non-integer `{}`",
++ ret_ty,
++ out_ty
++ );
++
++ return Ok(compare_simd_types(
++ bx,
++ args[0].immediate(),
++ args[1].immediate(),
++ in_elem,
++ llret_ty,
++ cmp_op,
++ ));
++ }
++
++ if let Some(stripped) = name_str.strip_prefix("simd_shuffle") {
++ let n: u64 = stripped.parse().unwrap_or_else(|_| {
++ span_bug!(span, "bad `simd_shuffle` instruction only caught in codegen?")
++ });
++
++ require_simd!(ret_ty, "return");
++
++ let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx());
++ require!(
++ out_len == n,
++ "expected return type of length {}, found `{}` with length {}",
++ n,
++ ret_ty,
++ out_len
++ );
++ require!(
++ in_elem == out_ty,
++ "expected return element type `{}` (element of input `{}`), \
++ found `{}` with element type `{}`",
++ in_elem,
++ in_ty,
++ ret_ty,
++ out_ty
++ );
++
++ //let total_len = u128::from(in_len) * 2;
++
++ let vector = args[2].immediate();
++
++ // TODO:
++ /*let indices: Option<Vec<_>> = (0..n)
++ .map(|i| {
++ let arg_idx = i;
++ let val = bx.const_get_vector_element(vector, i as u64);
++ match bx.const_to_opt_u128(val, true) {
++ None => {
++ emit_error!("shuffle index #{} is not a constant", arg_idx);
++ None
++ }
++ Some(idx) if idx >= total_len => {
++ emit_error!(
++ "shuffle index #{} is out of bounds (limit {})",
++ arg_idx,
++ total_len
++ );
++ None
++ }
++ Some(idx) => Some(bx.const_i32(idx as i32)),
++ }
++ })
++ .collect();
++ let indices = match indices {
++ Some(i) => i,
++ None => return Ok(bx.const_null(llret_ty)),
++ };*/
++
++ return Ok(bx.shuffle_vector(
++ args[0].immediate(),
++ args[1].immediate(),
++ vector,
++ ));
++ }
++
++ /*if name == sym::simd_insert {
++ require!(
++ in_elem == arg_tys[2],
++ "expected inserted type `{}` (element of input `{}`), found `{}`",
++ in_elem,
++ in_ty,
++ arg_tys[2]
++ );
++ return Ok(bx.insert_element(
++ args[0].immediate(),
++ args[2].immediate(),
++ args[1].immediate(),
++ ));
++ }
++ if name == sym::simd_extract {
++ require!(
++ ret_ty == in_elem,
++ "expected return type `{}` (element of input `{}`), found `{}`",
++ in_elem,
++ in_ty,
++ ret_ty
++ );
++ return Ok(bx.extract_element(args[0].immediate(), args[1].immediate()));
++ }
++
++ if name == sym::simd_select {
++ let m_elem_ty = in_elem;
++ let m_len = in_len;
++ require_simd!(arg_tys[1], "argument");
++ let (v_len, _) = arg_tys[1].simd_size_and_type(bx.tcx());
++ require!(
++ m_len == v_len,
++ "mismatched lengths: mask length `{}` != other vector length `{}`",
++ m_len,
++ v_len
++ );
++ match m_elem_ty.kind() {
++ ty::Int(_) => {}
++ _ => return_error!("mask element type is `{}`, expected `i_`", m_elem_ty),
++ }
++ // truncate the mask to a vector of i1s
++ let i1 = bx.type_i1();
++ let i1xn = bx.type_vector(i1, m_len as u64);
++ let m_i1s = bx.trunc(args[0].immediate(), i1xn);
++ return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
++ }
++
++ if name == sym::simd_bitmask {
++ // The `fn simd_bitmask(vector) -> unsigned integer` intrinsic takes a
++ // vector mask and returns an unsigned integer containing the most
++ // significant bit (MSB) of each lane.
++
++ // If the vector has less than 8 lanes, an u8 is returned with zeroed
++ // trailing bits.
++ let expected_int_bits = in_len.max(8);
++ match ret_ty.kind() {
++ ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => (),
++ _ => return_error!("bitmask `{}`, expected `u{}`", ret_ty, expected_int_bits),
++ }
++
++ // Integer vector <i{in_bitwidth} x in_len>:
++ let (i_xn, in_elem_bitwidth) = match in_elem.kind() {
++ ty::Int(i) => (
++ args[0].immediate(),
++ i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits()),
++ ),
++ ty::Uint(i) => (
++ args[0].immediate(),
++ i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits()),
++ ),
++ _ => return_error!(
++ "vector argument `{}`'s element type `{}`, expected integer element type",
++ in_ty,
++ in_elem
++ ),
++ };
++
++ // Shift the MSB to the right by "in_elem_bitwidth - 1" into the first bit position.
++ let shift_indices =
++ vec![
++ bx.cx.const_int(bx.type_ix(in_elem_bitwidth), (in_elem_bitwidth - 1) as _);
++ in_len as _
++ ];
++ let i_xn_msb = bx.lshr(i_xn, bx.const_vector(shift_indices.as_slice()));
++ // Truncate vector to an <i1 x N>
++ let i1xn = bx.trunc(i_xn_msb, bx.type_vector(bx.type_i1(), in_len));
++ // Bitcast <i1 x N> to iN:
++ let i_ = bx.bitcast(i1xn, bx.type_ix(in_len));
++ // Zero-extend iN to the bitmask type:
++ return Ok(bx.zext(i_, bx.type_ix(expected_int_bits)));
++ }
++
++ fn simd_simple_float_intrinsic<'a, 'gcc, 'tcx>(
++ name: Symbol,
++ in_elem: &::rustc_middle::ty::TyS<'_>,
++ in_ty: &::rustc_middle::ty::TyS<'_>,
++ in_len: u64,
++ bx: &mut Builder<'a, 'gcc, 'tcx>,
++ span: Span,
++ args: &[OperandRef<'tcx, RValue<'gcc>>],
++ ) -> Result<RValue<'gcc>, ()> {
++ macro_rules! emit_error {
++ ($msg: tt) => {
++ emit_error!($msg, )
++ };
++ ($msg: tt, $($fmt: tt)*) => {
++ span_invalid_monomorphization_error(
++ bx.sess(), span,
++ &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
++ name, $($fmt)*));
++ }
++ }
++ macro_rules! return_error {
++ ($($fmt: tt)*) => {
++ {
++ emit_error!($($fmt)*);
++ return Err(());
++ }
++ }
++ }
++
++ let (elem_ty_str, elem_ty) = if let ty::Float(f) = in_elem.kind() {
++ let elem_ty = bx.cx.type_float_from_ty(*f);
++ match f.bit_width() {
++ 32 => ("f32", elem_ty),
++ 64 => ("f64", elem_ty),
++ _ => {
++ return_error!(
++ "unsupported element type `{}` of floating-point vector `{}`",
++ f.name_str(),
++ in_ty
++ );
++ }
++ }
++ } else {
++ return_error!("`{}` is not a floating-point type", in_ty);
++ };
++
++ let vec_ty = bx.type_vector(elem_ty, in_len);
++
++ let (intr_name, fn_ty) = match name {
++ sym::simd_ceil => ("ceil", bx.type_func(&[vec_ty], vec_ty)),
++ sym::simd_fabs => ("fabs", bx.type_func(&[vec_ty], vec_ty)),
++ sym::simd_fcos => ("cos", bx.type_func(&[vec_ty], vec_ty)),
++ sym::simd_fexp2 => ("exp2", bx.type_func(&[vec_ty], vec_ty)),
++ sym::simd_fexp => ("exp", bx.type_func(&[vec_ty], vec_ty)),
++ sym::simd_flog10 => ("log10", bx.type_func(&[vec_ty], vec_ty)),
++ sym::simd_flog2 => ("log2", bx.type_func(&[vec_ty], vec_ty)),
++ sym::simd_flog => ("log", bx.type_func(&[vec_ty], vec_ty)),
++ sym::simd_floor => ("floor", bx.type_func(&[vec_ty], vec_ty)),
++ sym::simd_fma => ("fma", bx.type_func(&[vec_ty, vec_ty, vec_ty], vec_ty)),
++ sym::simd_fpowi => ("powi", bx.type_func(&[vec_ty, bx.type_i32()], vec_ty)),
++ sym::simd_fpow => ("pow", bx.type_func(&[vec_ty, vec_ty], vec_ty)),
++ sym::simd_fsin => ("sin", bx.type_func(&[vec_ty], vec_ty)),
++ sym::simd_fsqrt => ("sqrt", bx.type_func(&[vec_ty], vec_ty)),
++ sym::simd_round => ("round", bx.type_func(&[vec_ty], vec_ty)),
++ sym::simd_trunc => ("trunc", bx.type_func(&[vec_ty], vec_ty)),
++ _ => return_error!("unrecognized intrinsic `{}`", name),
++ };
++ let llvm_name = &format!("llvm.{0}.v{1}{2}", intr_name, in_len, elem_ty_str);
++ let f = bx.declare_cfn(&llvm_name, fn_ty);
++ let c = bx.call(f, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None);
++ Ok(c)
++ }
++
++ if std::matches!(
++ name,
++ sym::simd_ceil
++ | sym::simd_fabs
++ | sym::simd_fcos
++ | sym::simd_fexp2
++ | sym::simd_fexp
++ | sym::simd_flog10
++ | sym::simd_flog2
++ | sym::simd_flog
++ | sym::simd_floor
++ | sym::simd_fma
++ | sym::simd_fpow
++ | sym::simd_fpowi
++ | sym::simd_fsin
++ | sym::simd_fsqrt
++ | sym::simd_round
++ | sym::simd_trunc
++ ) {
++ return simd_simple_float_intrinsic(name, in_elem, in_ty, in_len, bx, span, args);
++ }
++
++ // FIXME: use:
++ // https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Function.h#L182
++ // https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Intrinsics.h#L81
++ fn llvm_vector_str(elem_ty: Ty<'_>, vec_len: u64, no_pointers: usize) -> String {
++ let p0s: String = "p0".repeat(no_pointers);
++ match *elem_ty.kind() {
++ ty::Int(v) => format!("v{}{}i{}", vec_len, p0s, v.bit_width().unwrap()),
++ ty::Uint(v) => format!("v{}{}i{}", vec_len, p0s, v.bit_width().unwrap()),
++ ty::Float(v) => format!("v{}{}f{}", vec_len, p0s, v.bit_width()),
++ _ => unreachable!(),
++ }
++ }
++
++ fn gcc_vector_ty<'gcc>(
++ cx: &CodegenCx<'gcc, '_>,
++ elem_ty: Ty<'_>,
++ vec_len: u64,
++ mut no_pointers: usize,
++ ) -> Type<'gcc> {
++ // FIXME: use cx.layout_of(ty).llvm_type() ?
++ let mut elem_ty = match *elem_ty.kind() {
++ ty::Int(v) => cx.type_int_from_ty(v),
++ ty::Uint(v) => cx.type_uint_from_ty(v),
++ ty::Float(v) => cx.type_float_from_ty(v),
++ _ => unreachable!(),
++ };
++ while no_pointers > 0 {
++ elem_ty = cx.type_ptr_to(elem_ty);
++ no_pointers -= 1;
++ }
++ cx.type_vector(elem_ty, vec_len)
++ }
++
++ if name == sym::simd_gather {
++ // simd_gather(values: <N x T>, pointers: <N x *_ T>,
++ // mask: <N x i{M}>) -> <N x T>
++ // * N: number of elements in the input vectors
++ // * T: type of the element to load
++ // * M: any integer width is supported, will be truncated to i1
++
++ // All types must be simd vector types
++ require_simd!(in_ty, "first");
++ require_simd!(arg_tys[1], "second");
++ require_simd!(arg_tys[2], "third");
++ require_simd!(ret_ty, "return");
++
++ // Of the same length:
++ let (out_len, _) = arg_tys[1].simd_size_and_type(bx.tcx());
++ let (out_len2, _) = arg_tys[2].simd_size_and_type(bx.tcx());
++ require!(
++ in_len == out_len,
++ "expected {} argument with length {} (same as input type `{}`), \
++ found `{}` with length {}",
++ "second",
++ in_len,
++ in_ty,
++ arg_tys[1],
++ out_len
++ );
++ require!(
++ in_len == out_len2,
++ "expected {} argument with length {} (same as input type `{}`), \
++ found `{}` with length {}",
++ "third",
++ in_len,
++ in_ty,
++ arg_tys[2],
++ out_len2
++ );
++
++ // The return type must match the first argument type
++ require!(ret_ty == in_ty, "expected return type `{}`, found `{}`", in_ty, ret_ty);
++
++ // This counts how many pointers
++ fn ptr_count(t: Ty<'_>) -> usize {
++ match t.kind() {
++ ty::RawPtr(p) => 1 + ptr_count(p.ty),
++ _ => 0,
++ }
++ }
++
++ // Non-ptr type
++ fn non_ptr(t: Ty<'_>) -> Ty<'_> {
++ match t.kind() {
++ ty::RawPtr(p) => non_ptr(p.ty),
++ _ => t,
++ }
++ }
++
++ // The second argument must be a simd vector with an element type that's a pointer
++ // to the element type of the first argument
++ let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx());
++ let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx());
++ let (pointer_count, underlying_ty) = match element_ty1.kind() {
++ ty::RawPtr(p) if p.ty == in_elem => (ptr_count(element_ty1), non_ptr(element_ty1)),
++ _ => {
++ require!(
++ false,
++ "expected element type `{}` of second argument `{}` \
++ to be a pointer to the element type `{}` of the first \
++ argument `{}`, found `{}` != `*_ {}`",
++ element_ty1,
++ arg_tys[1],
++ in_elem,
++ in_ty,
++ element_ty1,
++ in_elem
++ );
++ unreachable!();
++ }
++ };
++ assert!(pointer_count > 0);
++ assert_eq!(pointer_count - 1, ptr_count(element_ty0));
++ assert_eq!(underlying_ty, non_ptr(element_ty0));
++
++ // The element type of the third argument must be a signed integer type of any width:
++ let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx());
++ match element_ty2.kind() {
++ ty::Int(_) => (),
++ _ => {
++ require!(
++ false,
++ "expected element type `{}` of third argument `{}` \
++ to be a signed integer type",
++ element_ty2,
++ arg_tys[2]
++ );
++ }
++ }
++
++ // Alignment of T, must be a constant integer value:
++ let alignment_ty = bx.type_i32();
++ let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
++
++ // Truncate the mask vector to a vector of i1s:
++ let (mask, mask_ty) = {
++ let i1 = bx.type_i1();
++ let i1xn = bx.type_vector(i1, in_len);
++ (bx.trunc(args[2].immediate(), i1xn), i1xn)
++ };
++
++ // Type of the vector of pointers:
++ let llvm_pointer_vec_ty = gcc_vector_ty(bx, underlying_ty, in_len, pointer_count);
++ let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count);
++
++ // Type of the vector of elements:
++ let llvm_elem_vec_ty = gcc_vector_ty(bx, underlying_ty, in_len, pointer_count - 1);
++ let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1);
++
++ let llvm_intrinsic =
++ format!("llvm.masked.gather.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str);
++ let f = bx.declare_cfn(
++ &llvm_intrinsic,
++ bx.type_func(
++ &[llvm_pointer_vec_ty, alignment_ty, mask_ty, llvm_elem_vec_ty],
++ llvm_elem_vec_ty,
++ ),
++ );
++ let v = bx.call(f, &[args[1].immediate(), alignment, mask, args[0].immediate()], None);
++ return Ok(v);
++ }
++
++ if name == sym::simd_scatter {
++ // simd_scatter(values: <N x T>, pointers: <N x *mut T>,
++ // mask: <N x i{M}>) -> ()
++ // * N: number of elements in the input vectors
++ // * T: type of the element to load
++ // * M: any integer width is supported, will be truncated to i1
++
++ // All types must be simd vector types
++ require_simd!(in_ty, "first");
++ require_simd!(arg_tys[1], "second");
++ require_simd!(arg_tys[2], "third");
++
++ // Of the same length:
++ let (element_len1, _) = arg_tys[1].simd_size_and_type(bx.tcx());
++ let (element_len2, _) = arg_tys[2].simd_size_and_type(bx.tcx());
++ require!(
++ in_len == element_len1,
++ "expected {} argument with length {} (same as input type `{}`), \
++ found `{}` with length {}",
++ "second",
++ in_len,
++ in_ty,
++ arg_tys[1],
++ element_len1
++ );
++ require!(
++ in_len == element_len2,
++ "expected {} argument with length {} (same as input type `{}`), \
++ found `{}` with length {}",
++ "third",
++ in_len,
++ in_ty,
++ arg_tys[2],
++ element_len2
++ );
++
++ // This counts how many pointers
++ fn ptr_count(t: Ty<'_>) -> usize {
++ match t.kind() {
++ ty::RawPtr(p) => 1 + ptr_count(p.ty),
++ _ => 0,
++ }
++ }
++
++ // Non-ptr type
++ fn non_ptr(t: Ty<'_>) -> Ty<'_> {
++ match t.kind() {
++ ty::RawPtr(p) => non_ptr(p.ty),
++ _ => t,
++ }
++ }
++
++ // The second argument must be a simd vector with an element type that's a pointer
++ // to the element type of the first argument
++ let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx());
++ let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx());
++ let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx());
++ let (pointer_count, underlying_ty) = match element_ty1.kind() {
++ ty::RawPtr(p) if p.ty == in_elem && p.mutbl == hir::Mutability::Mut => {
++ (ptr_count(element_ty1), non_ptr(element_ty1))
++ }
++ _ => {
++ require!(
++ false,
++ "expected element type `{}` of second argument `{}` \
++ to be a pointer to the element type `{}` of the first \
++ argument `{}`, found `{}` != `*mut {}`",
++ element_ty1,
++ arg_tys[1],
++ in_elem,
++ in_ty,
++ element_ty1,
++ in_elem
++ );
++ unreachable!();
++ }
++ };
++ assert!(pointer_count > 0);
++ assert_eq!(pointer_count - 1, ptr_count(element_ty0));
++ assert_eq!(underlying_ty, non_ptr(element_ty0));
++
++ // The element type of the third argument must be a signed integer type of any width:
++ match element_ty2.kind() {
++ ty::Int(_) => (),
++ _ => {
++ require!(
++ false,
++ "expected element type `{}` of third argument `{}` \
++ be a signed integer type",
++ element_ty2,
++ arg_tys[2]
++ );
++ }
++ }
++
++ // Alignment of T, must be a constant integer value:
++ let alignment_ty = bx.type_i32();
++ let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
++
++ // Truncate the mask vector to a vector of i1s:
++ let (mask, mask_ty) = {
++ let i1 = bx.type_i1();
++ let i1xn = bx.type_vector(i1, in_len);
++ (bx.trunc(args[2].immediate(), i1xn), i1xn)
++ };
++
++ let ret_t = bx.type_void();
++
++ // Type of the vector of pointers:
++ let llvm_pointer_vec_ty = gcc_vector_ty(bx, underlying_ty, in_len, pointer_count);
++ let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count);
++
++ // Type of the vector of elements:
++ let llvm_elem_vec_ty = gcc_vector_ty(bx, underlying_ty, in_len, pointer_count - 1);
++ let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1);
++
++ let llvm_intrinsic =
++ format!("llvm.masked.scatter.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str);
++ let f = bx.declare_cfn(
++ &llvm_intrinsic,
++ bx.type_func(&[llvm_elem_vec_ty, llvm_pointer_vec_ty, alignment_ty, mask_ty], ret_t),
++ );
++ let v = bx.call(f, &[args[0].immediate(), args[1].immediate(), alignment, mask], None);
++ return Ok(v);
++ }
++
++ macro_rules! arith_red {
++ ($name:ident : $integer_reduce:ident, $float_reduce:ident, $ordered:expr, $op:ident,
++ $identity:expr) => {
++ if name == sym::$name {
++ require!(
++ ret_ty == in_elem,
++ "expected return type `{}` (element of input `{}`), found `{}`",
++ in_elem,
++ in_ty,
++ ret_ty
++ );
++ return match in_elem.kind() {
++ ty::Int(_) | ty::Uint(_) => {
++ let r = bx.$integer_reduce(args[0].immediate());
++ if $ordered {
++ // if overflow occurs, the result is the
++ // mathematical result modulo 2^n:
++ Ok(bx.$op(args[1].immediate(), r))
++ } else {
++ Ok(bx.$integer_reduce(args[0].immediate()))
++ }
++ }
++ ty::Float(f) => {
++ let acc = if $ordered {
++ // ordered arithmetic reductions take an accumulator
++ args[1].immediate()
++ } else {
++ // unordered arithmetic reductions use the identity accumulator
++ match f.bit_width() {
++ 32 => bx.const_real(bx.type_f32(), $identity),
++ 64 => bx.const_real(bx.type_f64(), $identity),
++ v => return_error!(
++ r#"
++unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
++ sym::$name,
++ in_ty,
++ in_elem,
++ v,
++ ret_ty
++ ),
++ }
++ };
++ Ok(bx.$float_reduce(acc, args[0].immediate()))
++ }
++ _ => return_error!(
++ "unsupported {} from `{}` with element `{}` to `{}`",
++ sym::$name,
++ in_ty,
++ in_elem,
++ ret_ty
++ ),
++ };
++ }
++ };
++ }
++
++ arith_red!(simd_reduce_add_ordered: vector_reduce_add, vector_reduce_fadd, true, add, 0.0);
++ arith_red!(simd_reduce_mul_ordered: vector_reduce_mul, vector_reduce_fmul, true, mul, 1.0);
++ arith_red!(
++ simd_reduce_add_unordered: vector_reduce_add,
++ vector_reduce_fadd_fast,
++ false,
++ add,
++ 0.0
++ );
++ arith_red!(
++ simd_reduce_mul_unordered: vector_reduce_mul,
++ vector_reduce_fmul_fast,
++ false,
++ mul,
++ 1.0
++ );
++
++ macro_rules! minmax_red {
++ ($name:ident: $int_red:ident, $float_red:ident) => {
++ if name == sym::$name {
++ require!(
++ ret_ty == in_elem,
++ "expected return type `{}` (element of input `{}`), found `{}`",
++ in_elem,
++ in_ty,
++ ret_ty
++ );
++ return match in_elem.kind() {
++ ty::Int(_i) => Ok(bx.$int_red(args[0].immediate(), true)),
++ ty::Uint(_u) => Ok(bx.$int_red(args[0].immediate(), false)),
++ ty::Float(_f) => Ok(bx.$float_red(args[0].immediate())),
++ _ => return_error!(
++ "unsupported {} from `{}` with element `{}` to `{}`",
++ sym::$name,
++ in_ty,
++ in_elem,
++ ret_ty
++ ),
++ };
++ }
++ };
++ }
++
++ minmax_red!(simd_reduce_min: vector_reduce_min, vector_reduce_fmin);
++ minmax_red!(simd_reduce_max: vector_reduce_max, vector_reduce_fmax);
++
++ minmax_red!(simd_reduce_min_nanless: vector_reduce_min, vector_reduce_fmin_fast);
++ minmax_red!(simd_reduce_max_nanless: vector_reduce_max, vector_reduce_fmax_fast);
++
++ macro_rules! bitwise_red {
++ ($name:ident : $red:ident, $boolean:expr) => {
++ if name == sym::$name {
++ let input = if !$boolean {
++ require!(
++ ret_ty == in_elem,
++ "expected return type `{}` (element of input `{}`), found `{}`",
++ in_elem,
++ in_ty,
++ ret_ty
++ );
++ args[0].immediate()
++ } else {
++ match in_elem.kind() {
++ ty::Int(_) | ty::Uint(_) => {}
++ _ => return_error!(
++ "unsupported {} from `{}` with element `{}` to `{}`",
++ sym::$name,
++ in_ty,
++ in_elem,
++ ret_ty
++ ),
++ }
++
++ // boolean reductions operate on vectors of i1s:
++ let i1 = bx.type_i1();
++ let i1xn = bx.type_vector(i1, in_len as u64);
++ bx.trunc(args[0].immediate(), i1xn)
++ };
++ return match in_elem.kind() {
++ ty::Int(_) | ty::Uint(_) => {
++ let r = bx.$red(input);
++ Ok(if !$boolean { r } else { bx.zext(r, bx.type_bool()) })
++ }
++ _ => return_error!(
++ "unsupported {} from `{}` with element `{}` to `{}`",
++ sym::$name,
++ in_ty,
++ in_elem,
++ ret_ty
++ ),
++ };
++ }
++ };
++ }
++
++ bitwise_red!(simd_reduce_and: vector_reduce_and, false);
++ bitwise_red!(simd_reduce_or: vector_reduce_or, false);
++ bitwise_red!(simd_reduce_xor: vector_reduce_xor, false);
++ bitwise_red!(simd_reduce_all: vector_reduce_and, true);
++ bitwise_red!(simd_reduce_any: vector_reduce_or, true);
++
++ if name == sym::simd_cast {
++ require_simd!(ret_ty, "return");
++ let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx());
++ require!(
++ in_len == out_len,
++ "expected return type with length {} (same as input type `{}`), \
++ found `{}` with length {}",
++ in_len,
++ in_ty,
++ ret_ty,
++ out_len
++ );
++ // casting cares about nominal type, not just structural type
++ if in_elem == out_elem {
++ return Ok(args[0].immediate());
++ }
++
++ enum Style {
++ Float,
++ Int(/* is signed? */ bool),
++ Unsupported,
++ }
++
++ let (in_style, in_width) = match in_elem.kind() {
++ // vectors of pointer-sized integers should've been
++ // disallowed before here, so this unwrap is safe.
++ ty::Int(i) => (Style::Int(true), i.bit_width().unwrap()),
++ ty::Uint(u) => (Style::Int(false), u.bit_width().unwrap()),
++ ty::Float(f) => (Style::Float, f.bit_width()),
++ _ => (Style::Unsupported, 0),
++ };
++ let (out_style, out_width) = match out_elem.kind() {
++ ty::Int(i) => (Style::Int(true), i.bit_width().unwrap()),
++ ty::Uint(u) => (Style::Int(false), u.bit_width().unwrap()),
++ ty::Float(f) => (Style::Float, f.bit_width()),
++ _ => (Style::Unsupported, 0),
++ };
++
++ match (in_style, out_style) {
++ (Style::Int(in_is_signed), Style::Int(_)) => {
++ return Ok(match in_width.cmp(&out_width) {
++ Ordering::Greater => bx.trunc(args[0].immediate(), llret_ty),
++ Ordering::Equal => args[0].immediate(),
++ Ordering::Less => {
++ if in_is_signed {
++ bx.sext(args[0].immediate(), llret_ty)
++ } else {
++ bx.zext(args[0].immediate(), llret_ty)
++ }
++ }
++ });
++ }
++ (Style::Int(in_is_signed), Style::Float) => {
++ return Ok(if in_is_signed {
++ bx.sitofp(args[0].immediate(), llret_ty)
++ } else {
++ bx.uitofp(args[0].immediate(), llret_ty)
++ });
++ }
++ (Style::Float, Style::Int(out_is_signed)) => {
++ return Ok(if out_is_signed {
++ bx.fptosi(args[0].immediate(), llret_ty)
++ } else {
++ bx.fptoui(args[0].immediate(), llret_ty)
++ });
++ }
++ (Style::Float, Style::Float) => {
++ return Ok(match in_width.cmp(&out_width) {
++ Ordering::Greater => bx.fptrunc(args[0].immediate(), llret_ty),
++ Ordering::Equal => args[0].immediate(),
++ Ordering::Less => bx.fpext(args[0].immediate(), llret_ty),
++ });
++ }
++ _ => { /* Unsupported. Fallthrough. */ }
++ }
++ require!(
++ false,
++ "unsupported cast from `{}` with element `{}` to `{}` with element `{}`",
++ in_ty,
++ in_elem,
++ ret_ty,
++ out_elem
++ );
++ }*/
++
++ macro_rules! arith_binary {
++ ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
++ $(if name == sym::$name {
++ match in_elem.kind() {
++ $($(ty::$p(_))|* => {
++ return Ok(bx.$call(args[0].immediate(), args[1].immediate()))
++ })*
++ _ => {},
++ }
++ require!(false,
++ "unsupported operation on `{}` with element `{}`",
++ in_ty,
++ in_elem)
++ })*
++ }
++ }
++
++ arith_binary! {
++ simd_add: Uint, Int => add, Float => fadd;
++ simd_sub: Uint, Int => sub, Float => fsub;
++ simd_mul: Uint, Int => mul, Float => fmul;
++ simd_div: Uint => udiv, Int => sdiv, Float => fdiv;
++ simd_rem: Uint => urem, Int => srem, Float => frem;
++ simd_shl: Uint, Int => shl;
++ simd_shr: Uint => lshr, Int => ashr;
++ simd_and: Uint, Int => and;
++ simd_or: Uint, Int => or; // FIXME: calling or might not work on vectors.
++ simd_xor: Uint, Int => xor;
++ /*simd_fmax: Float => maxnum;
++ simd_fmin: Float => minnum;*/
++ }
++
++ /*macro_rules! arith_unary {
++ ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
++ $(if name == sym::$name {
++ match in_elem.kind() {
++ $($(ty::$p(_))|* => {
++ return Ok(bx.$call(args[0].immediate()))
++ })*
++ _ => {},
++ }
++ require!(false,
++ "unsupported operation on `{}` with element `{}`",
++ in_ty,
++ in_elem)
++ })*
++ }
++ }
++
++ arith_unary! {
++ simd_neg: Int => neg, Float => fneg;
++ }
++
++ if name == sym::simd_saturating_add || name == sym::simd_saturating_sub {
++ let lhs = args[0].immediate();
++ let rhs = args[1].immediate();
++ let is_add = name == sym::simd_saturating_add;
++ let ptr_bits = bx.tcx().data_layout.pointer_size.bits() as _;
++ let (signed, elem_width, elem_ty) = match *in_elem.kind() {
++ ty::Int(i) => (true, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_int_from_ty(i)),
++ ty::Uint(i) => (false, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_uint_from_ty(i)),
++ _ => {
++ return_error!(
++ "expected element type `{}` of vector type `{}` \
++ to be a signed or unsigned integer type",
++ arg_tys[0].simd_size_and_type(bx.tcx()).1,
++ arg_tys[0]
++ );
++ }
++ };
++ let llvm_intrinsic = &format!(
++ "llvm.{}{}.sat.v{}i{}",
++ if signed { 's' } else { 'u' },
++ if is_add { "add" } else { "sub" },
++ in_len,
++ elem_width
++ );
++ let vec_ty = bx.cx.type_vector(elem_ty, in_len as u64);
++
++ let f = bx.declare_cfn(
++ &llvm_intrinsic,
++ bx.type_func(&[vec_ty, vec_ty], vec_ty),
++ );
++ let v = bx.call(f, &[lhs, rhs], None);
++ return Ok(v);
++ }*/
++
++ unimplemented!("simd {}", name);
++
++ //span_bug!(span, "unknown SIMD intrinsic");
++}
--- /dev/null
--- /dev/null
++/*
++ * TODO: support #[inline] attributes.
++ * TODO: support LTO.
++ *
++ * TODO: remove the local gccjit LD_LIBRARY_PATH in config.sh.
++ * TODO: remove the object dependency.
++ * TODO: remove the patches.
++ */
++
++#![feature(rustc_private, decl_macro, associated_type_bounds, never_type, trusted_len)]
++#![allow(broken_intra_doc_links)]
++#![recursion_limit="256"]
++#![warn(rust_2018_idioms)]
++#![warn(unused_lifetimes)]
++
++/*extern crate flate2;
++extern crate libc;*/
++extern crate rustc_ast;
++extern crate rustc_codegen_ssa;
++extern crate rustc_data_structures;
++extern crate rustc_errors;
++//extern crate rustc_fs_util;
++extern crate rustc_hir;
++extern crate rustc_metadata;
++extern crate rustc_middle;
++extern crate rustc_mir;
++extern crate rustc_session;
++extern crate rustc_span;
++extern crate rustc_symbol_mangling;
++extern crate rustc_target;
++extern crate snap;
++
++// This prevents duplicating functions and statics that are already part of the host rustc process.
++#[allow(unused_extern_crates)]
++extern crate rustc_driver;
++
++mod abi;
++mod allocator;
++mod archive;
++mod asm;
++mod back;
++mod base;
++mod builder;
++mod callee;
++mod common;
++mod consts;
++mod context;
++mod coverageinfo;
++mod debuginfo;
++mod declare;
++mod intrinsic;
++mod mangled_std_symbols;
++mod mono_item;
++mod type_;
++mod type_of;
++mod va_arg;
++
++use std::any::Any;
++use std::sync::Arc;
++
++use gccjit::{Block, Context, FunctionType, OptimizationLevel};
++use rustc_ast::expand::allocator::AllocatorKind;
++use rustc_codegen_ssa::{CodegenResults, CompiledModule, ModuleCodegen};
++use rustc_codegen_ssa::base::codegen_crate;
++use rustc_codegen_ssa::back::write::{CodegenContext, FatLTOInput, ModuleConfig, TargetMachineFactoryFn};
++use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule};
++use rustc_codegen_ssa::target_features::supported_target_features;
++use rustc_codegen_ssa::traits::{CodegenBackend, ExtraBackendMethods, ModuleBufferMethods, ThinBufferMethods, WriteBackendMethods};
++use rustc_data_structures::fx::FxHashMap;
++use rustc_errors::{ErrorReported, Handler};
++use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
++use rustc_middle::middle::cstore::EncodedMetadata;
++use rustc_middle::ty::TyCtxt;
++use rustc_session::config::{CrateType, Lto, OptLevel, OutputFilenames};
++use rustc_session::Session;
++use rustc_span::Symbol;
++use rustc_span::fatal_error::FatalError;
++
++use crate::context::unit_name;
++
++pub struct PrintOnPanic<F: Fn() -> String>(pub F);
++
++impl<F: Fn() -> String> Drop for PrintOnPanic<F> {
++ fn drop(&mut self) {
++ if ::std::thread::panicking() {
++ println!("{}", (self.0)());
++ }
++ }
++}
++
++#[derive(Clone)]
++pub struct GccCodegenBackend;
++
++impl CodegenBackend for GccCodegenBackend {
++ fn init(&self, sess: &Session) {
++ if sess.lto() != Lto::No {
++ sess.warn("LTO is not supported. You may get a linker error.");
++ }
++ }
++
++ fn codegen_crate<'tcx>(&self, tcx: TyCtxt<'tcx>, metadata: EncodedMetadata, need_metadata_module: bool) -> Box<dyn Any> {
++ let target_cpu = target_cpu(tcx.sess);
++ let res = codegen_crate(self.clone(), tcx, target_cpu.to_string(), metadata, need_metadata_module);
++
++ rustc_symbol_mangling::test::report_symbol_names(tcx);
++
++ Box::new(res)
++ }
++
++ fn join_codegen(&self, ongoing_codegen: Box<dyn Any>, sess: &Session) -> Result<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>), ErrorReported> {
++ let (codegen_results, work_products) = ongoing_codegen
++ .downcast::<rustc_codegen_ssa::back::write::OngoingCodegen<GccCodegenBackend>>()
++ .expect("Expected GccCodegenBackend's OngoingCodegen, found Box<Any>")
++ .join(sess);
++
++ Ok((codegen_results, work_products))
++ }
++
++ fn link(&self, sess: &Session, mut codegen_results: CodegenResults, outputs: &OutputFilenames) -> Result<(), ErrorReported> {
++ use rustc_codegen_ssa::back::link::link_binary;
++ if let Some(symbols) = codegen_results.crate_info.exported_symbols.get_mut(&CrateType::Dylib) {
++ // TODO: remove when global initializer work without calling a function at runtime.
++ // HACK: since this codegen add some symbols (e.g. __gccGlobalCrateInit) and the UI
++ // tests load libstd.so as a dynamic library, and rustc use a version-script to specify
++ // the symbols visibility, we add * to export all symbols.
++ // It seems other symbols from libstd/libcore are causing some issues here as well.
++ symbols.push("*".to_string());
++ }
++
++ link_binary::<crate::archive::ArArchiveBuilder<'_>>(
++ sess,
++ &codegen_results,
++ outputs,
++ )
++ }
++
++ fn target_features(&self, sess: &Session) -> Vec<Symbol> {
++ target_features(sess)
++ }
++}
++
++impl ExtraBackendMethods for GccCodegenBackend {
++ fn new_metadata<'tcx>(&self, _tcx: TyCtxt<'tcx>, _mod_name: &str) -> Self::Module {
++ GccContext {
++ context: Context::default(),
++ }
++ }
++
++ fn write_compressed_metadata<'tcx>(&self, tcx: TyCtxt<'tcx>, metadata: &EncodedMetadata, gcc_module: &mut Self::Module) {
++ base::write_compressed_metadata(tcx, metadata, gcc_module)
++ }
++
++ fn codegen_allocator<'tcx>(&self, tcx: TyCtxt<'tcx>, mods: &mut Self::Module, kind: AllocatorKind, has_alloc_error_handler: bool) {
++ unsafe { allocator::codegen(tcx, mods, kind, has_alloc_error_handler) }
++ }
++
++ fn compile_codegen_unit<'tcx>(&self, tcx: TyCtxt<'tcx>, cgu_name: Symbol) -> (ModuleCodegen<Self::Module>, u64) {
++ base::compile_codegen_unit(tcx, cgu_name)
++ }
++
++ fn target_machine_factory(&self, _sess: &Session, _opt_level: OptLevel) -> TargetMachineFactoryFn<Self> {
++ // TODO: set opt level.
++ Arc::new(|_| {
++ Ok(())
++ })
++ }
++
++ fn target_cpu<'b>(&self, _sess: &'b Session) -> &'b str {
++ unimplemented!();
++ }
++
++ fn tune_cpu<'b>(&self, _sess: &'b Session) -> Option<&'b str> {
++ None
++ // TODO
++ //llvm_util::tune_cpu(sess)
++ }
++}
++
++pub struct ModuleBuffer;
++
++impl ModuleBufferMethods for ModuleBuffer {
++ fn data(&self) -> &[u8] {
++ unimplemented!();
++ }
++}
++
++pub struct ThinBuffer;
++
++impl ThinBufferMethods for ThinBuffer {
++ fn data(&self) -> &[u8] {
++ unimplemented!();
++ }
++}
++
++pub struct GccContext {
++ context: Context<'static>,
++}
++
++unsafe impl Send for GccContext {}
++// FIXME: that shouldn't be Sync. Parallel compilation is currently disabled with "-Zno-parallel-llvm". Try to disable it here.
++unsafe impl Sync for GccContext {}
++
++impl WriteBackendMethods for GccCodegenBackend {
++ type Module = GccContext;
++ type TargetMachine = ();
++ type ModuleBuffer = ModuleBuffer;
++ type Context = ();
++ type ThinData = ();
++ type ThinBuffer = ThinBuffer;
++
++ fn run_fat_lto(_cgcx: &CodegenContext<Self>, mut modules: Vec<FatLTOInput<Self>>, _cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>) -> Result<LtoModuleCodegen<Self>, FatalError> {
++ // TODO: implement LTO by sending -flto to libgccjit and adding the appropriate gcc linker plugins.
++ // NOTE: implemented elsewhere.
++ let module =
++ match modules.remove(0) {
++ FatLTOInput::InMemory(module) => module,
++ FatLTOInput::Serialized { .. } => {
++ unimplemented!();
++ /*info!("pushing serialized module {:?}", name);
++ let buffer = SerializedModule::Local(buffer);
++ serialized_modules.push((buffer, CString::new(name).unwrap()));*/
++ }
++ };
++ Ok(LtoModuleCodegen::Fat { module: Some(module), _serialized_bitcode: vec![] })
++ }
++
++ fn run_thin_lto(_cgcx: &CodegenContext<Self>, _modules: Vec<(String, Self::ThinBuffer)>, _cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>) -> Result<(Vec<LtoModuleCodegen<Self>>, Vec<WorkProduct>), FatalError> {
++ unimplemented!();
++ }
++
++ fn print_pass_timings(&self) {
++ unimplemented!();
++ }
++
++ unsafe fn optimize(_cgcx: &CodegenContext<Self>, _diag_handler: &Handler, module: &ModuleCodegen<Self::Module>, config: &ModuleConfig) -> Result<(), FatalError> {
++ //if cgcx.lto == Lto::Fat {
++ //module.module_llvm.context.add_driver_option("-flto");
++ //}
++ module.module_llvm.context.set_optimization_level(to_gcc_opt_level(config.opt_level));
++ Ok(())
++ }
++
++ unsafe fn optimize_thin(_cgcx: &CodegenContext<Self>, _thin: &mut ThinModule<Self>) -> Result<ModuleCodegen<Self::Module>, FatalError> {
++ unimplemented!();
++ }
++
++ unsafe fn codegen(cgcx: &CodegenContext<Self>, diag_handler: &Handler, module: ModuleCodegen<Self::Module>, config: &ModuleConfig) -> Result<CompiledModule, FatalError> {
++ back::write::codegen(cgcx, diag_handler, module, config)
++ }
++
++ fn prepare_thin(_module: ModuleCodegen<Self::Module>) -> (String, Self::ThinBuffer) {
++ unimplemented!();
++ }
++
++ fn serialize_module(_module: ModuleCodegen<Self::Module>) -> (String, Self::ModuleBuffer) {
++ unimplemented!();
++ }
++
++ fn run_lto_pass_manager(_cgcx: &CodegenContext<Self>, _module: &ModuleCodegen<Self::Module>, _config: &ModuleConfig, _thin: bool) -> Result<(), FatalError> {
++ // TODO
++ Ok(())
++ }
++
++ fn run_link(cgcx: &CodegenContext<Self>, diag_handler: &Handler, modules: Vec<ModuleCodegen<Self::Module>>) -> Result<ModuleCodegen<Self::Module>, FatalError> {
++ back::write::link(cgcx, diag_handler, modules)
++ }
++}
++
++/*fn target_triple(sess: &Session) -> target_lexicon::Triple {
++ sess.target.llvm_target.parse().unwrap()
++}*/
++
++/// This is the entrypoint for a hot plugged rustc_codegen_gccjit
++#[no_mangle]
++pub fn __rustc_codegen_backend() -> Box<dyn CodegenBackend> {
++ Box::new(GccCodegenBackend)
++}
++
++fn to_gcc_opt_level(optlevel: Option<OptLevel>) -> OptimizationLevel {
++ match optlevel {
++ None => OptimizationLevel::None,
++ Some(level) => {
++ match level {
++ OptLevel::No => OptimizationLevel::None,
++ OptLevel::Less => OptimizationLevel::Limited,
++ OptLevel::Default => OptimizationLevel::Standard,
++ OptLevel::Aggressive => OptimizationLevel::Aggressive,
++ OptLevel::Size | OptLevel::SizeMin => OptimizationLevel::Limited,
++ }
++ },
++ }
++}
++
++fn create_function_calling_initializers<'gcc, 'tcx>(tcx: TyCtxt<'tcx>, context: &Context<'gcc>, block: Block<'gcc>) {
++ let codegen_units = tcx.collect_and_partition_mono_items(()).1;
++ for codegen_unit in codegen_units {
++ let codegen_init_func = context.new_function(None, FunctionType::Extern, context.new_type::<()>(), &[],
++ &format!("__gccGlobalInit{}", unit_name(&codegen_unit)), false);
++ block.add_eval(None, context.new_call(None, codegen_init_func, &[]));
++ }
++}
++
++fn handle_native(name: &str) -> &str {
++ if name != "native" {
++ return name;
++ }
++
++ unimplemented!();
++ /*unsafe {
++ let mut len = 0;
++ let ptr = llvm::LLVMRustGetHostCPUName(&mut len);
++ str::from_utf8(slice::from_raw_parts(ptr as *const u8, len)).unwrap()
++ }*/
++}
++
++pub fn target_cpu(sess: &Session) -> &str {
++ let name = sess.opts.cg.target_cpu.as_ref().unwrap_or(&sess.target.cpu);
++ handle_native(name)
++}
++
++pub fn target_features(sess: &Session) -> Vec<Symbol> {
++ supported_target_features(sess)
++ .iter()
++ .filter_map(
++ |&(feature, gate)| {
++ if sess.is_nightly_build() || gate.is_none() { Some(feature) } else { None }
++ },
++ )
++ .filter(|_feature| {
++ /*if feature.starts_with("sse") {
++ return true;
++ }*/
++ // TODO: implement a way to get enabled feature in libgccjit.
++ //println!("Feature: {}", feature);
++ /*let llvm_feature = to_llvm_feature(sess, feature);
++ let cstr = CString::new(llvm_feature).unwrap();
++ unsafe { llvm::LLVMRustHasFeature(target_machine, cstr.as_ptr()) }*/
++ false
++ })
++ .map(|feature| Symbol::intern(feature))
++ .collect()
++}
--- /dev/null
--- /dev/null
++pub const ARGV_INIT_ARRAY: &str = "_ZN3std3sys4unix4args3imp15ARGV_INIT_ARRAY";
++pub const ARGV_INIT_WRAPPER: &str = "_ZN3std3sys4unix4args3imp15ARGV_INIT_ARRAY12init_wrapper";
++pub const ARGC: &str = "_ZN3std3sys4unix4args3imp4ARGC";
++pub const ARGV: &str = "_ZN3std3sys4unix4args3imp4ARGV";
--- /dev/null
--- /dev/null
++use rustc_codegen_ssa::traits::PreDefineMethods;
++use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
++use rustc_middle::mir::mono::{Linkage, Visibility};
++use rustc_middle::ty::{self, Instance, TypeFoldable};
++use rustc_middle::ty::layout::FnAbiExt;
++use rustc_span::def_id::DefId;
++use rustc_target::abi::LayoutOf;
++use rustc_target::abi::call::FnAbi;
++
++use crate::base;
++use crate::context::CodegenCx;
++use crate::type_of::LayoutGccExt;
++
++impl<'gcc, 'tcx> PreDefineMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
++ fn predefine_static(&self, def_id: DefId, _linkage: Linkage, _visibility: Visibility, symbol_name: &str) {
++ let attrs = self.tcx.codegen_fn_attrs(def_id);
++ let instance = Instance::mono(self.tcx, def_id);
++ let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all());
++ let gcc_type = self.layout_of(ty).gcc_type(self, true);
++
++ let is_tls = attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL);
++ let global = self.define_global(symbol_name, gcc_type, is_tls, attrs.link_section).unwrap_or_else(|| {
++ self.sess().span_fatal(
++ self.tcx.def_span(def_id),
++ &format!("symbol `{}` is already defined", symbol_name),
++ )
++ });
++
++ // TODO
++ /*unsafe {
++ llvm::LLVMRustSetLinkage(global, base::linkage_to_llvm(linkage));
++ llvm::LLVMRustSetVisibility(global, base::visibility_to_llvm(visibility));
++ }*/
++
++ self.instances.borrow_mut().insert(instance, global);
++ }
++
++ fn predefine_fn(&self, instance: Instance<'tcx>, linkage: Linkage, _visibility: Visibility, symbol_name: &str) {
++ assert!(!instance.substs.needs_infer() && !instance.substs.has_param_types_or_consts());
++
++ let fn_abi = FnAbi::of_instance(self, instance, &[]);
++ self.linkage.set(base::linkage_to_gcc(linkage));
++ let _decl = self.declare_fn(symbol_name, &fn_abi);
++ //let attrs = self.tcx.codegen_fn_attrs(instance.def_id());
++
++ // TODO: call set_link_section() to allow initializing argc/argv.
++ //base::set_link_section(decl, &attrs);
++ /*if linkage == Linkage::LinkOnceODR || linkage == Linkage::WeakODR {
++ llvm::SetUniqueComdat(self.llmod, decl);
++ }*/
++
++ //debug!("predefine_fn: instance = {:?}", instance);
++
++ // TODO: use inline attribute from there in linkage.set() above:
++ //attributes::from_fn_attrs(self, decl, instance);
++
++ //self.instances.borrow_mut().insert(instance, decl);
++ }
++}
--- /dev/null
--- /dev/null
++use std::convert::TryInto;
++
++use gccjit::{RValue, Struct, Type};
++use rustc_codegen_ssa::traits::{BaseTypeMethods, DerivedTypeMethods};
++use rustc_codegen_ssa::common::TypeKind;
++use rustc_middle::bug;
++use rustc_middle::ty::layout::TyAndLayout;
++use rustc_target::abi::{AddressSpace, Align, Integer, Size};
++
++use crate::common::TypeReflection;
++use crate::context::CodegenCx;
++use crate::type_of::LayoutGccExt;
++
++impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
++ pub fn type_ix(&self, num_bits: u64) -> Type<'gcc> {
++ // gcc only supports 1, 2, 4 or 8-byte integers.
++ let bytes = (num_bits / 8).next_power_of_two() as i32;
++ match bytes {
++ 1 => self.i8_type,
++ 2 => self.i16_type,
++ 4 => self.i32_type,
++ 8 => self.i64_type,
++ 16 => self.i128_type,
++ _ => panic!("unexpected num_bits: {}", num_bits),
++ }
++ /*
++ let bytes = (num_bits / 8).next_power_of_two() as i32;
++ println!("num_bits: {}, bytes: {}", num_bits, bytes);
++ self.context.new_int_type(bytes, true) // TODO: check if it is indeed a signed integer.
++ */
++ }
++
++ /*pub fn type_bool(&self) -> Type<'gcc> {
++ self.bool_type
++ }*/
++
++ pub fn type_void(&self) -> Type<'gcc> {
++ self.context.new_type::<()>()
++ }
++
++ pub fn type_size_t(&self) -> Type<'gcc> {
++ self.context.new_type::<usize>()
++ }
++
++ pub fn type_u8(&self) -> Type<'gcc> {
++ self.u8_type
++ }
++
++ pub fn type_u16(&self) -> Type<'gcc> {
++ self.u16_type
++ }
++
++ pub fn type_u32(&self) -> Type<'gcc> {
++ self.u32_type
++ }
++
++ pub fn type_u64(&self) -> Type<'gcc> {
++ self.u64_type
++ }
++
++ pub fn type_u128(&self) -> Type<'gcc> {
++ self.u128_type
++ }
++
++ pub fn type_pointee_for_align(&self, align: Align) -> Type<'gcc> {
++ // FIXME(eddyb) We could find a better approximation if ity.align < align.
++ let ity = Integer::approximate_align(self, align);
++ self.type_from_integer(ity)
++ }
++
++ /*pub fn type_int_from_ty(&self, t: ty::IntTy) -> Type<'gcc> {
++ match t {
++ ty::IntTy::Isize => self.type_isize(),
++ ty::IntTy::I8 => self.type_i8(),
++ ty::IntTy::I16 => self.type_i16(),
++ ty::IntTy::I32 => self.type_i32(),
++ ty::IntTy::I64 => self.type_i64(),
++ ty::IntTy::I128 => self.type_i128(),
++ }
++ }
++
++ pub fn type_uint_from_ty(&self, t: ty::UintTy) -> Type<'gcc> {
++ match t {
++ ty::UintTy::Usize => self.type_isize(),
++ ty::UintTy::U8 => self.type_i8(),
++ ty::UintTy::U16 => self.type_i16(),
++ ty::UintTy::U32 => self.type_i32(),
++ ty::UintTy::U64 => self.type_i64(),
++ ty::UintTy::U128 => self.type_i128(),
++ }
++ }
++
++ pub fn type_float_from_ty(&self, t: ty::FloatTy) -> Type<'gcc> {
++ match t {
++ ty::FloatTy::F32 => self.type_f32(),
++ ty::FloatTy::F64 => self.type_f64(),
++ }
++ }
++
++ pub fn type_vector(&self, ty: Type<'gcc>, len: u64) -> Type<'gcc> {
++ self.context.new_vector_type(ty, len)
++ }*/
++}
++
++impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
++ fn type_i1(&self) -> Type<'gcc> {
++ self.bool_type
++ }
++
++ fn type_i8(&self) -> Type<'gcc> {
++ self.i8_type
++ }
++
++ fn type_i16(&self) -> Type<'gcc> {
++ self.i16_type
++ }
++
++ fn type_i32(&self) -> Type<'gcc> {
++ self.i32_type
++ }
++
++ fn type_i64(&self) -> Type<'gcc> {
++ self.i64_type
++ }
++
++ fn type_i128(&self) -> Type<'gcc> {
++ self.i128_type
++ }
++
++ fn type_isize(&self) -> Type<'gcc> {
++ self.isize_type
++ }
++
++ fn type_f32(&self) -> Type<'gcc> {
++ self.context.new_type::<f32>()
++ }
++
++ fn type_f64(&self) -> Type<'gcc> {
++ self.context.new_type::<f64>()
++ }
++
++ fn type_func(&self, params: &[Type<'gcc>], return_type: Type<'gcc>) -> Type<'gcc> {
++ self.context.new_function_pointer_type(None, return_type, params, false)
++ }
++
++ fn type_struct(&self, fields: &[Type<'gcc>], _packed: bool) -> Type<'gcc> {
++ let types = fields.to_vec();
++ if let Some(typ) = self.struct_types.borrow().get(fields) {
++ return typ.clone();
++ }
++ let fields: Vec<_> = fields.iter().enumerate()
++ .map(|(index, field)| self.context.new_field(None, *field, &format!("field{}_TODO", index)))
++ .collect();
++ // TODO: use packed.
++ //let name = types.iter().map(|typ| format!("{:?}", typ)).collect::<Vec<_>>().join("_");
++ //let typ = self.context.new_struct_type(None, format!("struct{}", name), &fields).as_type();
++ let typ = self.context.new_struct_type(None, "struct", &fields).as_type();
++ self.struct_types.borrow_mut().insert(types, typ);
++ typ
++ }
++
++ fn type_kind(&self, typ: Type<'gcc>) -> TypeKind {
++ if typ.is_integral() {
++ TypeKind::Integer
++ }
++ else if typ.is_vector().is_some() {
++ TypeKind::Vector
++ }
++ else {
++ // TODO
++ TypeKind::Void
++ }
++ }
++
++ fn type_ptr_to(&self, ty: Type<'gcc>) -> Type<'gcc> {
++ // TODO
++ /*assert_ne!(self.type_kind(ty), TypeKind::Function,
++ "don't call ptr_to on function types, use ptr_to_gcc_type on FnAbi instead"
++ );*/
++ ty.make_pointer()
++ }
++
++ fn type_ptr_to_ext(&self, ty: Type<'gcc>, _address_space: AddressSpace) -> Type<'gcc> {
++ // TODO: use address_space
++ ty.make_pointer()
++ }
++
++ fn element_type(&self, ty: Type<'gcc>) -> Type<'gcc> {
++ if let Some(typ) = ty.is_array() {
++ typ
++ }
++ else if let Some(vector_type) = ty.is_vector() {
++ vector_type.get_element_type()
++ }
++ else if let Some(typ) = ty.get_pointee() {
++ typ
++ }
++ else {
++ unreachable!()
++ }
++ }
++
++ fn vector_length(&self, _ty: Type<'gcc>) -> usize {
++ unimplemented!();
++ //unsafe { llvm::LLVMGetVectorSize(ty) as usize }
++ }
++
++ fn float_width(&self, typ: Type<'gcc>) -> usize {
++ let f32 = self.context.new_type::<f32>();
++ let f64 = self.context.new_type::<f64>();
++ if typ == f32 {
++ 32
++ }
++ else if typ == f64 {
++ 64
++ }
++ else {
++ panic!("Cannot get width of float type {:?}", typ);
++ }
++ // TODO: support other sizes.
++ /*match self.type_kind(ty) {
++ TypeKind::Float => 32,
++ TypeKind::Double => 64,
++ TypeKind::X86_FP80 => 80,
++ TypeKind::FP128 | TypeKind::PPC_FP128 => 128,
++ _ => bug!("llvm_float_width called on a non-float type"),
++ }*/
++ }
++
++ fn int_width(&self, typ: Type<'gcc>) -> u64 {
++ if typ.is_i8(self) || typ.is_u8(self) {
++ 8
++ }
++ else if typ.is_i16(self) || typ.is_u16(self) {
++ 16
++ }
++ else if typ.is_i32(self) || typ.is_u32(self) {
++ 32
++ }
++ else if typ.is_i64(self) || typ.is_u64(self) {
++ 64
++ }
++ else if typ.is_i128(self) || typ.is_u128(self) {
++ 128
++ }
++ else {
++ panic!("Cannot get width of int type {:?}", typ);
++ }
++ }
++
++ fn val_ty(&self, value: RValue<'gcc>) -> Type<'gcc> {
++ value.get_type()
++ }
++}
++
++impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
++ pub fn type_padding_filler(&self, size: Size, align: Align) -> Type<'gcc> {
++ let unit = Integer::approximate_align(self, align);
++ let size = size.bytes();
++ let unit_size = unit.size().bytes();
++ assert_eq!(size % unit_size, 0);
++ self.type_array(self.type_from_integer(unit), size / unit_size)
++ }
++
++ pub fn set_struct_body(&self, typ: Struct<'gcc>, fields: &[Type<'gcc>], _packed: bool) {
++ // TODO: use packed.
++ let fields: Vec<_> = fields.iter().enumerate()
++ .map(|(index, field)| self.context.new_field(None, *field, &format!("field_{}", index)))
++ .collect();
++ typ.set_fields(None, &fields);
++ }
++
++ /*fn type_struct(&self, fields: &[Type<'gcc>], packed: bool) -> Type<'gcc> {
++ // TODO: use packed.
++ let fields: Vec<_> = fields.iter().enumerate()
++ .map(|(index, field)| self.context.new_field(None, *field, &format!("field_{}", index)))
++ .collect();
++ return self.context.new_struct_type(None, "unnamedStruct", &fields).as_type();
++ }*/
++
++ pub fn type_named_struct(&self, name: &str) -> Struct<'gcc> {
++ self.context.new_opaque_struct_type(None, name)
++ }
++
++ pub fn type_array(&self, ty: Type<'gcc>, mut len: u64) -> Type<'gcc> {
++ if let Some(struct_type) = ty.is_struct() {
++ if struct_type.get_field_count() == 0 {
++ // NOTE: since gccjit only supports i32 for the array size and libcore's tests uses a
++ // size of usize::MAX in test_binary_search, we workaround this by setting the size to
++ // zero for ZSTs.
++ // FIXME: fix gccjit API.
++ len = 0;
++ }
++ }
++
++ let len: i32 = len.try_into().expect("array len");
++
++ self.context.new_array_type(None, ty, len)
++ }
++}
++
++pub fn struct_fields<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout<'tcx>) -> (Vec<Type<'gcc>>, bool) {
++ //debug!("struct_fields: {:#?}", layout);
++ let field_count = layout.fields.count();
++
++ let mut packed = false;
++ let mut offset = Size::ZERO;
++ let mut prev_effective_align = layout.align.abi;
++ let mut result: Vec<_> = Vec::with_capacity(1 + field_count * 2);
++ for i in layout.fields.index_by_increasing_offset() {
++ let target_offset = layout.fields.offset(i as usize);
++ let field = layout.field(cx, i);
++ let effective_field_align =
++ layout.align.abi.min(field.align.abi).restrict_for_offset(target_offset);
++ packed |= effective_field_align < field.align.abi;
++
++ /*debug!(
++ "struct_fields: {}: {:?} offset: {:?} target_offset: {:?} \
++ effective_field_align: {}",
++ i,
++ field,
++ offset,
++ target_offset,
++ effective_field_align.bytes()
++ );*/
++ assert!(target_offset >= offset);
++ let padding = target_offset - offset;
++ let padding_align = prev_effective_align.min(effective_field_align);
++ assert_eq!(offset.align_to(padding_align) + padding, target_offset);
++ result.push(cx.type_padding_filler(padding, padding_align));
++ //debug!(" padding before: {:?}", padding);
++
++ result.push(field.gcc_type(cx, !field.ty.is_any_ptr())); // FIXME: might need to check if the type is inside another, like Box<Type>.
++ offset = target_offset + field.size;
++ prev_effective_align = effective_field_align;
++ }
++ if !layout.is_unsized() && field_count > 0 {
++ if offset > layout.size {
++ bug!("layout: {:#?} stride: {:?} offset: {:?}", layout, layout.size, offset);
++ }
++ let padding = layout.size - offset;
++ let padding_align = prev_effective_align;
++ assert_eq!(offset.align_to(padding_align) + padding, layout.size);
++ /*debug!(
++ "struct_fields: pad_bytes: {:?} offset: {:?} stride: {:?}",
++ padding, offset, layout.size
++ );*/
++ result.push(cx.type_padding_filler(padding, padding_align));
++ assert_eq!(result.len(), 1 + field_count * 2);
++ } else {
++ //debug!("struct_fields: offset: {:?} stride: {:?}", offset, layout.size);
++ }
++
++ (result, packed)
++}
--- /dev/null
--- /dev/null
++use std::fmt::Write;
++
++use gccjit::{Struct, Type};
++use crate::rustc_codegen_ssa::traits::{BaseTypeMethods, DerivedTypeMethods, LayoutTypeMethods};
++use rustc_middle::bug;
++use rustc_middle::ty::{self, Ty, TypeFoldable};
++use rustc_middle::ty::layout::{FnAbiExt, TyAndLayout};
++use rustc_middle::ty::print::with_no_trimmed_paths;
++use rustc_target::abi::{self, Abi, F32, F64, FieldsShape, Int, Integer, LayoutOf, Pointer, PointeeInfo, Size, TyAndLayoutMethods, Variants};
++use rustc_target::abi::call::{CastTarget, FnAbi, Reg};
++
++use crate::abi::{FnAbiGccExt, GccType};
++use crate::context::CodegenCx;
++use crate::type_::struct_fields;
++
++impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
++ fn type_from_unsigned_integer(&self, i: Integer) -> Type<'gcc> {
++ use Integer::*;
++ match i {
++ I8 => self.type_u8(),
++ I16 => self.type_u16(),
++ I32 => self.type_u32(),
++ I64 => self.type_u64(),
++ I128 => self.type_u128(),
++ }
++ }
++}
++
++pub fn uncached_gcc_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout<'tcx>, defer: &mut Option<(Struct<'gcc>, TyAndLayout<'tcx>)>) -> Type<'gcc> {
++ match layout.abi {
++ Abi::Scalar(_) => bug!("handled elsewhere"),
++ Abi::Vector { ref element, count } => {
++ let element = layout.scalar_gcc_type_at(cx, element, Size::ZERO);
++ return cx.context.new_vector_type(element, count);
++ },
++ Abi::ScalarPair(..) => {
++ return cx.type_struct(
++ &[
++ layout.scalar_pair_element_gcc_type(cx, 0, false),
++ layout.scalar_pair_element_gcc_type(cx, 1, false),
++ ],
++ false,
++ );
++ }
++ Abi::Uninhabited | Abi::Aggregate { .. } => {}
++ }
++
++ let name = match layout.ty.kind() {
++ // FIXME(eddyb) producing readable type names for trait objects can result
++ // in problematically distinct types due to HRTB and subtyping (see #47638).
++ // ty::Dynamic(..) |
++ ty::Adt(..) | ty::Closure(..) | ty::Foreign(..) | ty::Generator(..) | ty::Str
++ if !cx.sess().fewer_names() =>
++ {
++ let mut name = with_no_trimmed_paths(|| layout.ty.to_string());
++ if let (&ty::Adt(def, _), &Variants::Single { index }) =
++ (layout.ty.kind(), &layout.variants)
++ {
++ if def.is_enum() && !def.variants.is_empty() {
++ write!(&mut name, "::{}", def.variants[index].ident).unwrap();
++ }
++ }
++ if let (&ty::Generator(_, _, _), &Variants::Single { index }) =
++ (layout.ty.kind(), &layout.variants)
++ {
++ write!(&mut name, "::{}", ty::GeneratorSubsts::variant_name(index)).unwrap();
++ }
++ Some(name)
++ }
++ ty::Adt(..) => {
++ // If `Some` is returned then a named struct is created in LLVM. Name collisions are
++ // avoided by LLVM (with increasing suffixes). If rustc doesn't generate names then that
++ // can improve perf.
++ // FIXME: I don't think that's true for libgccjit.
++ Some(String::new())
++ }
++ _ => None,
++ };
++
++ match layout.fields {
++ FieldsShape::Primitive | FieldsShape::Union(_) => {
++ let fill = cx.type_padding_filler(layout.size, layout.align.abi);
++ let packed = false;
++ match name {
++ None => cx.type_struct(&[fill], packed),
++ Some(ref name) => {
++ let gcc_type = cx.type_named_struct(name);
++ cx.set_struct_body(gcc_type, &[fill], packed);
++ gcc_type.as_type()
++ },
++ }
++ }
++ FieldsShape::Array { count, .. } => cx.type_array(layout.field(cx, 0).gcc_type(cx, true), count),
++ FieldsShape::Arbitrary { .. } =>
++ match name {
++ None => {
++ let (gcc_fields, packed) = struct_fields(cx, layout);
++ cx.type_struct(&gcc_fields, packed)
++ },
++ Some(ref name) => {
++ let gcc_type = cx.type_named_struct(name);
++ *defer = Some((gcc_type, layout));
++ gcc_type.as_type()
++ },
++ },
++ }
++}
++
++pub trait LayoutGccExt<'tcx> {
++ fn is_gcc_immediate(&self) -> bool;
++ fn is_gcc_scalar_pair(&self) -> bool;
++ fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, set_fields: bool) -> Type<'gcc>;
++ fn immediate_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>;
++ fn scalar_gcc_type_at<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, scalar: &abi::Scalar, offset: Size) -> Type<'gcc>;
++ fn scalar_pair_element_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, index: usize, immediate: bool) -> Type<'gcc>;
++ fn gcc_field_index(&self, index: usize) -> u64;
++ fn pointee_info_at<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, offset: Size) -> Option<PointeeInfo>;
++}
++
++impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
++ fn is_gcc_immediate(&self) -> bool {
++ match self.abi {
++ Abi::Scalar(_) | Abi::Vector { .. } => true,
++ Abi::ScalarPair(..) => false,
++ Abi::Uninhabited | Abi::Aggregate { .. } => self.is_zst(),
++ }
++ }
++
++ fn is_gcc_scalar_pair(&self) -> bool {
++ match self.abi {
++ Abi::ScalarPair(..) => true,
++ Abi::Uninhabited | Abi::Scalar(_) | Abi::Vector { .. } | Abi::Aggregate { .. } => false,
++ }
++ }
++
++ /// Gets the GCC type corresponding to a Rust type, i.e., `rustc_middle::ty::Ty`.
++ /// The pointee type of the pointer in `PlaceRef` is always this type.
++ /// For sized types, it is also the right LLVM type for an `alloca`
++ /// containing a value of that type, and most immediates (except `bool`).
++ /// Unsized types, however, are represented by a "minimal unit", e.g.
++ /// `[T]` becomes `T`, while `str` and `Trait` turn into `i8` - this
++ /// is useful for indexing slices, as `&[T]`'s data pointer is `T*`.
++ /// If the type is an unsized struct, the regular layout is generated,
++ /// with the inner-most trailing unsized field using the "minimal unit"
++ /// of that field's type - this is useful for taking the address of
++ /// that field and ensuring the struct has the right alignment.
++ fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, set_fields: bool) -> Type<'gcc> {
++ if let Abi::Scalar(ref scalar) = self.abi {
++ // Use a different cache for scalars because pointers to DSTs
++ // can be either fat or thin (data pointers of fat pointers).
++ if let Some(&ty) = cx.scalar_types.borrow().get(&self.ty) {
++ return ty;
++ }
++ let ty =
++ match *self.ty.kind() {
++ ty::Ref(_, ty, _) | ty::RawPtr(ty::TypeAndMut { ty, .. }) => {
++ cx.type_ptr_to(cx.layout_of(ty).gcc_type(cx, set_fields))
++ }
++ ty::Adt(def, _) if def.is_box() => {
++ cx.type_ptr_to(cx.layout_of(self.ty.boxed_ty()).gcc_type(cx, true))
++ }
++ ty::FnPtr(sig) => cx.fn_ptr_backend_type(&FnAbi::of_fn_ptr(cx, sig, &[])),
++ _ => self.scalar_gcc_type_at(cx, scalar, Size::ZERO),
++ };
++ cx.scalar_types.borrow_mut().insert(self.ty, ty);
++ return ty;
++ }
++
++ // Check the cache.
++ let variant_index =
++ match self.variants {
++ Variants::Single { index } => Some(index),
++ _ => None,
++ };
++ let cached_type = cx.types.borrow().get(&(self.ty, variant_index)).cloned();
++ if let Some(ty) = cached_type {
++ let type_to_set_fields = cx.types_with_fields_to_set.borrow_mut().remove(&ty);
++ if let Some((struct_type, layout)) = type_to_set_fields {
++ // Since we might be trying to generate a type containing another type which is not
++ // completely generated yet, we deferred setting the fields until now.
++ let (fields, packed) = struct_fields(cx, layout);
++ cx.set_struct_body(struct_type, &fields, packed);
++ }
++ return ty;
++ }
++
++ //debug!("gcc_type({:#?})", self);
++
++ assert!(!self.ty.has_escaping_bound_vars(), "{:?} has escaping bound vars", self.ty);
++
++ // Make sure lifetimes are erased, to avoid generating distinct LLVM
++ // types for Rust types that only differ in the choice of lifetimes.
++ let normal_ty = cx.tcx.erase_regions(self.ty);
++
++ let mut defer = None;
++ let ty =
++ if self.ty != normal_ty {
++ let mut layout = cx.layout_of(normal_ty);
++ if let Some(v) = variant_index {
++ layout = layout.for_variant(cx, v);
++ }
++ layout.gcc_type(cx, true)
++ }
++ else {
++ uncached_gcc_type(cx, *self, &mut defer)
++ };
++ //debug!("--> mapped {:#?} to ty={:?}", self, ty);
++
++ cx.types.borrow_mut().insert((self.ty, variant_index), ty);
++
++ if let Some((ty, layout)) = defer {
++ //TODO: do we still need this conditions and the set_fields parameter?
++ //if set_fields {
++ let (fields, packed) = struct_fields(cx, layout);
++ cx.set_struct_body(ty, &fields, packed);
++ /*}
++ else {
++ // Since we might be trying to generate a type containing another type which is not
++ // completely generated yet, we don't set the fields right now, but we save the
++ // type to set the fields later.
++ cx.types_with_fields_to_set.borrow_mut().insert(ty.as_type(), (ty, layout));
++ }*/
++ }
++
++ ty
++ }
++
++ fn immediate_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
++ if let Abi::Scalar(ref scalar) = self.abi {
++ if scalar.is_bool() {
++ return cx.type_i1();
++ }
++ }
++ self.gcc_type(cx, true)
++ }
++
++ fn scalar_gcc_type_at<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, scalar: &abi::Scalar, offset: Size) -> Type<'gcc> {
++ match scalar.value {
++ Int(i, true) => cx.type_from_integer(i),
++ Int(i, false) => cx.type_from_unsigned_integer(i),
++ F32 => cx.type_f32(),
++ F64 => cx.type_f64(),
++ Pointer => {
++ // If we know the alignment, pick something better than i8.
++ let pointee =
++ if let Some(pointee) = self.pointee_info_at(cx, offset) {
++ cx.type_pointee_for_align(pointee.align)
++ }
++ else {
++ cx.type_i8()
++ };
++ cx.type_ptr_to(pointee)
++ }
++ }
++ }
++
++ fn scalar_pair_element_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, index: usize, immediate: bool) -> Type<'gcc> {
++ // TODO: remove llvm hack:
++ // HACK(eddyb) special-case fat pointers until LLVM removes
++ // pointee types, to avoid bitcasting every `OperandRef::deref`.
++ match self.ty.kind() {
++ ty::Ref(..) | ty::RawPtr(_) => {
++ return self.field(cx, index).gcc_type(cx, true);
++ }
++ ty::Adt(def, _) if def.is_box() => {
++ let ptr_ty = cx.tcx.mk_mut_ptr(self.ty.boxed_ty());
++ return cx.layout_of(ptr_ty).scalar_pair_element_gcc_type(cx, index, immediate);
++ }
++ _ => {}
++ }
++
++ let (a, b) = match self.abi {
++ Abi::ScalarPair(ref a, ref b) => (a, b),
++ _ => bug!("TyAndLayout::scalar_pair_element_llty({:?}): not applicable", self),
++ };
++ let scalar = [a, b][index];
++
++ // Make sure to return the same type `immediate_gcc_type` would when
++ // dealing with an immediate pair. This means that `(bool, bool)` is
++ // effectively represented as `{i8, i8}` in memory and two `i1`s as an
++ // immediate, just like `bool` is typically `i8` in memory and only `i1`
++ // when immediate. We need to load/store `bool` as `i8` to avoid
++ // crippling LLVM optimizations or triggering other LLVM bugs with `i1`.
++ // TODO: this bugs certainly don't happen in this case since the bool type is used instead of i1.
++ if /*immediate &&*/ scalar.is_bool() {
++ return cx.type_i1();
++ }
++
++ let offset =
++ if index == 0 {
++ Size::ZERO
++ }
++ else {
++ a.value.size(cx).align_to(b.value.align(cx).abi)
++ };
++ self.scalar_gcc_type_at(cx, scalar, offset)
++ }
++
++ fn gcc_field_index(&self, index: usize) -> u64 {
++ match self.abi {
++ Abi::Scalar(_) | Abi::ScalarPair(..) => {
++ bug!("TyAndLayout::gcc_field_index({:?}): not applicable", self)
++ }
++ _ => {}
++ }
++ match self.fields {
++ FieldsShape::Primitive | FieldsShape::Union(_) => {
++ bug!("TyAndLayout::gcc_field_index({:?}): not applicable", self)
++ }
++
++ FieldsShape::Array { .. } => index as u64,
++
++ FieldsShape::Arbitrary { .. } => 1 + (self.fields.memory_index(index) as u64) * 2,
++ }
++ }
++
++ fn pointee_info_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, offset: Size) -> Option<PointeeInfo> {
++ if let Some(&pointee) = cx.pointee_infos.borrow().get(&(self.ty, offset)) {
++ return pointee;
++ }
++
++ let result = Ty::pointee_info_at(*self, cx, offset);
++
++ cx.pointee_infos.borrow_mut().insert((self.ty, offset), result);
++ result
++ }
++}
++
++impl<'gcc, 'tcx> LayoutTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
++ fn backend_type(&self, layout: TyAndLayout<'tcx>) -> Type<'gcc> {
++ layout.gcc_type(self, true)
++ }
++
++ fn immediate_backend_type(&self, layout: TyAndLayout<'tcx>) -> Type<'gcc> {
++ layout.immediate_gcc_type(self)
++ }
++
++ fn is_backend_immediate(&self, layout: TyAndLayout<'tcx>) -> bool {
++ layout.is_gcc_immediate()
++ }
++
++ fn is_backend_scalar_pair(&self, layout: TyAndLayout<'tcx>) -> bool {
++ layout.is_gcc_scalar_pair()
++ }
++
++ fn backend_field_index(&self, layout: TyAndLayout<'tcx>, index: usize) -> u64 {
++ layout.gcc_field_index(index)
++ }
++
++ fn scalar_pair_element_backend_type(&self, layout: TyAndLayout<'tcx>, index: usize, immediate: bool) -> Type<'gcc> {
++ layout.scalar_pair_element_gcc_type(self, index, immediate)
++ }
++
++ fn cast_backend_type(&self, ty: &CastTarget) -> Type<'gcc> {
++ ty.gcc_type(self)
++ }
++
++ fn fn_ptr_backend_type(&self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> Type<'gcc> {
++ fn_abi.ptr_to_gcc_type(self)
++ }
++
++ fn reg_backend_type(&self, _ty: &Reg) -> Type<'gcc> {
++ unimplemented!();
++ //ty.gcc_type(self)
++ }
++}
--- /dev/null
--- /dev/null
++/*use gccjit::{RValue, ToRValue, Type};
++use rustc_codegen_ssa::mir::operand::OperandRef;
++use rustc_codegen_ssa::{
++ common::IntPredicate,
++ traits::{BaseTypeMethods, BuilderMethods, ConstMethods, DerivedTypeMethods},
++};
++use rustc_middle::ty::layout::HasTyCtxt;
++use rustc_middle::ty::Ty;
++use rustc_target::abi::{Align, Endian, HasDataLayout, LayoutOf, Size};
++
++use crate::builder::Builder;
++use crate::type_of::LayoutGccExt;
++
++fn round_pointer_up_to_alignment<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, addr: RValue<'gcc>, align: Align, ptr_ty: Type<'gcc>) -> RValue<'gcc> {
++ let mut ptr_as_int = bx.ptrtoint(addr, bx.cx().type_isize());
++ ptr_as_int = bx.add(ptr_as_int, bx.cx().const_i32(align.bytes() as i32 - 1));
++ ptr_as_int = bx.and(ptr_as_int, bx.cx().const_i32(-(align.bytes() as i32)));
++ bx.inttoptr(ptr_as_int, ptr_ty)
++}
++
++fn emit_direct_ptr_va_arg<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, list: OperandRef<'tcx, RValue<'gcc>>, llty: Type<'gcc>, size: Size, align: Align, slot_size: Align, allow_higher_align: bool) -> (RValue<'gcc>, Align) {
++ let va_list_ptr_ty = bx.cx().type_ptr_to(bx.cx.type_i8p());
++ let va_list_addr =
++ if list.layout.gcc_type(bx.cx, true) != va_list_ptr_ty {
++ bx.bitcast(list.immediate(), va_list_ptr_ty)
++ }
++ else {
++ list.immediate()
++ };
++
++ let ptr = bx.load(va_list_addr, bx.tcx().data_layout.pointer_align.abi);
++
++ let (addr, addr_align) = if allow_higher_align && align > slot_size {
++ (round_pointer_up_to_alignment(bx, ptr, align, bx.cx().type_i8p()), align)
++ } else {
++ (ptr, slot_size)
++ };
++
++ let aligned_size = size.align_to(slot_size).bytes() as i32;
++ let full_direct_size = bx.cx().const_i32(aligned_size);
++ let next = bx.inbounds_gep(addr, &[full_direct_size]);
++ bx.store(next, va_list_addr, bx.tcx().data_layout.pointer_align.abi);
++
++ if size.bytes() < slot_size.bytes() && bx.tcx().sess.target.endian == Endian::Big {
++ let adjusted_size = bx.cx().const_i32((slot_size.bytes() - size.bytes()) as i32);
++ let adjusted = bx.inbounds_gep(addr, &[adjusted_size]);
++ (bx.bitcast(adjusted, bx.cx().type_ptr_to(llty)), addr_align)
++ } else {
++ (bx.bitcast(addr, bx.cx().type_ptr_to(llty)), addr_align)
++ }
++}
++
++fn emit_ptr_va_arg<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, list: OperandRef<'tcx, RValue<'gcc>>, target_ty: Ty<'tcx>, indirect: bool, slot_size: Align, allow_higher_align: bool) -> RValue<'gcc> {
++ let layout = bx.cx.layout_of(target_ty);
++ let (llty, size, align) =
++ if indirect {
++ (
++ bx.cx.layout_of(bx.cx.tcx.mk_imm_ptr(target_ty)).gcc_type(bx.cx, true),
++ bx.cx.data_layout().pointer_size,
++ bx.cx.data_layout().pointer_align,
++ )
++ }
++ else {
++ (layout.gcc_type(bx.cx, true), layout.size, layout.align)
++ };
++ let (addr, addr_align) = emit_direct_ptr_va_arg(bx, list, llty, size, align.abi, slot_size, allow_higher_align);
++ if indirect {
++ let tmp_ret = bx.load(addr, addr_align);
++ bx.load(tmp_ret, align.abi)
++ }
++ else {
++ bx.load(addr, addr_align)
++ }
++}
++
++fn emit_aapcs_va_arg<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, list: OperandRef<'tcx, RValue<'gcc>>, target_ty: Ty<'tcx>) -> RValue<'gcc> {
++ // Implementation of the AAPCS64 calling convention for va_args see
++ // https://github.com/ARM-software/abi-aa/blob/master/aapcs64/aapcs64.rst
++ let va_list_addr = list.immediate();
++ let layout = bx.cx.layout_of(target_ty);
++ let gcc_type = layout.immediate_gcc_type(bx);
++
++ let function = bx.llbb().get_function();
++ let variable = function.new_local(None, gcc_type, "va_arg");
++
++ let mut maybe_reg = bx.build_sibling_block("va_arg.maybe_reg");
++ let mut in_reg = bx.build_sibling_block("va_arg.in_reg");
++ let mut on_stack = bx.build_sibling_block("va_arg.on_stack");
++ let end = bx.build_sibling_block("va_arg.end");
++ let zero = bx.const_i32(0);
++ let offset_align = Align::from_bytes(4).unwrap();
++ assert!(bx.tcx().sess.target.endian == Endian::Little);
++
++ let gr_type = target_ty.is_any_ptr() || target_ty.is_integral();
++ let (reg_off, reg_top_index, slot_size) = if gr_type {
++ let gr_offs = bx.struct_gep(va_list_addr, 7);
++ let nreg = (layout.size.bytes() + 7) / 8;
++ (gr_offs, 3, nreg * 8)
++ } else {
++ let vr_off = bx.struct_gep(va_list_addr, 9);
++ let nreg = (layout.size.bytes() + 15) / 16;
++ (vr_off, 5, nreg * 16)
++ };
++
++ // if the offset >= 0 then the value will be on the stack
++ let mut reg_off_v = bx.load(reg_off, offset_align);
++ let use_stack = bx.icmp(IntPredicate::IntSGE, reg_off_v, zero);
++ bx.cond_br(use_stack, on_stack.llbb(), maybe_reg.llbb());
++
++ // The value at this point might be in a register, but there is a chance that
++ // it could be on the stack so we have to update the offset and then check
++ // the offset again.
++
++ if gr_type && layout.align.abi.bytes() > 8 {
++ reg_off_v = maybe_reg.add(reg_off_v, bx.const_i32(15));
++ reg_off_v = maybe_reg.and(reg_off_v, bx.const_i32(-16));
++ }
++ let new_reg_off_v = maybe_reg.add(reg_off_v, bx.const_i32(slot_size as i32));
++
++ maybe_reg.store(new_reg_off_v, reg_off, offset_align);
++
++ // Check to see if we have overflowed the registers as a result of this.
++ // If we have then we need to use the stack for this value
++ let use_stack = maybe_reg.icmp(IntPredicate::IntSGT, new_reg_off_v, zero);
++ maybe_reg.cond_br(use_stack, on_stack.llbb(), in_reg.llbb());
++
++ let top = in_reg.struct_gep(va_list_addr, reg_top_index);
++ let top = in_reg.load(top, bx.tcx().data_layout.pointer_align.abi);
++
++ // reg_value = *(@top + reg_off_v);
++ let top = in_reg.gep(top, &[reg_off_v]);
++ let top = in_reg.bitcast(top, bx.cx.type_ptr_to(layout.gcc_type(bx, true)));
++ let reg_value = in_reg.load(top, layout.align.abi);
++ in_reg.assign(variable, reg_value);
++ in_reg.br(end.llbb());
++
++ // On Stack block
++ let stack_value =
++ emit_ptr_va_arg(&mut on_stack, list, target_ty, false, Align::from_bytes(8).unwrap(), true);
++ on_stack.assign(variable, stack_value);
++ on_stack.br(end.llbb());
++
++ *bx = end;
++ variable.to_rvalue()
++}
++
++pub(super) fn emit_va_arg<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, addr: OperandRef<'tcx, RValue<'gcc>>, target_ty: Ty<'tcx>) -> RValue<'gcc> {
++ // Determine the va_arg implementation to use. The LLVM va_arg instruction
++ // is lacking in some instances, so we should only use it as a fallback.
++ let target = &bx.cx.tcx.sess.target;
++ let arch = &bx.cx.tcx.sess.target.arch;
++ match &**arch {
++ // Windows x86
++ "x86" if target.options.is_like_windows => {
++ emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(4).unwrap(), false)
++ }
++ // Generic x86
++ "x86" => emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(4).unwrap(), true),
++ // Windows AArch64
++ "aarch64" if target.options.is_like_windows => {
++ emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(8).unwrap(), false)
++ }
++ // macOS / iOS AArch64
++ "aarch64" if target.options.is_like_osx => {
++ emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(8).unwrap(), true)
++ }
++ "aarch64" => emit_aapcs_va_arg(bx, addr, target_ty),
++ // Windows x86_64
++ "x86_64" if target.options.is_like_windows => {
++ let target_ty_size = bx.cx.size_of(target_ty).bytes();
++ let indirect: bool = target_ty_size > 8 || !target_ty_size.is_power_of_two();
++ emit_ptr_va_arg(bx, addr, target_ty, indirect, Align::from_bytes(8).unwrap(), false)
++ }
++ // For all other architecture/OS combinations fall back to using
++ // the LLVM va_arg instruction.
++ // https://llvm.org/docs/LangRef.html#va-arg-instruction
++ _ => bx.va_arg(addr.immediate(), bx.cx.layout_of(target_ty).gcc_type(bx.cx, true)),
++ }
++}*/
--- /dev/null
--- /dev/null
++#!/bin/bash
++
++# TODO: rewrite to cargo-make (or just) or something like that to only rebuild the sysroot when needed?
++
++#set -x
++set -e
++
++export GCC_PATH=$(cat gcc_path)
++
++export LD_LIBRARY_PATH="$GCC_PATH"
++export LIBRARY_PATH="$GCC_PATH"
++
++if [[ "$1" == "--release" ]]; then
++ export CHANNEL='release'
++ CARGO_INCREMENTAL=1 cargo rustc --release
++else
++ echo $LD_LIBRARY_PATH
++ export CHANNEL='debug'
++ cargo rustc
++fi
++
++source config.sh
++
++rm -r target/out || true
++mkdir -p target/out/gccjit
++
++echo "[BUILD] mini_core"
++$RUSTC example/mini_core.rs --crate-name mini_core --crate-type lib,dylib --target $TARGET_TRIPLE
++
++echo "[BUILD] example"
++$RUSTC example/example.rs --crate-type lib --target $TARGET_TRIPLE
++
++#if [[ "$HOST_TRIPLE" = "$TARGET_TRIPLE" ]]; then
++ #echo "[JIT] mini_core_hello_world"
++ #CG_CLIF_JIT=1 CG_CLIF_JIT_ARGS="abc bcd" $RUSTC --crate-type bin -Cprefer-dynamic example/mini_core_hello_world.rs --cfg jit --target $HOST_TRIPLE
++#else
++ #echo "[JIT] mini_core_hello_world (skipped)"
++#fi
++
++echo "[AOT] mini_core_hello_world"
++$RUSTC example/mini_core_hello_world.rs --crate-name mini_core_hello_world --crate-type bin -g --target $TARGET_TRIPLE
++$RUN_WRAPPER ./target/out/mini_core_hello_world abc bcd
++# (echo "break set -n main"; echo "run"; sleep 1; echo "si -c 10"; sleep 1; echo "frame variable") | lldb -- ./target/out/mini_core_hello_world abc bcd
++
++echo "[BUILD] sysroot"
++time ./build_sysroot/build_sysroot.sh
++
++echo "[AOT] arbitrary_self_types_pointers_and_wrappers"
++$RUSTC example/arbitrary_self_types_pointers_and_wrappers.rs --crate-name arbitrary_self_types_pointers_and_wrappers --crate-type bin --target $TARGET_TRIPLE
++$RUN_WRAPPER ./target/out/arbitrary_self_types_pointers_and_wrappers
++
++echo "[AOT] alloc_system"
++$RUSTC example/alloc_system.rs --crate-type lib --target "$TARGET_TRIPLE"
++
++# FIXME: this requires linking an additional lib for __popcountdi2
++#echo "[AOT] alloc_example"
++#$RUSTC example/alloc_example.rs --crate-type bin --target $TARGET_TRIPLE
++#$RUN_WRAPPER ./target/out/alloc_example
++
++#if [[ "$HOST_TRIPLE" = "$TARGET_TRIPLE" ]]; then
++ #echo "[JIT] std_example"
++ #CG_CLIF_JIT=1 $RUSTC --crate-type bin -Cprefer-dynamic example/std_example.rs --target $HOST_TRIPLE
++#else
++ #echo "[JIT] std_example (skipped)"
++#fi
++
++echo "[AOT] dst_field_align"
++# FIXME Re-add -Zmir-opt-level=2 once rust-lang/rust#67529 is fixed.
++$RUSTC example/dst-field-align.rs --crate-name dst_field_align --crate-type bin --target $TARGET_TRIPLE
++$RUN_WRAPPER ./target/out/dst_field_align || (echo $?; false)
++
++echo "[AOT] std_example"
++$RUSTC example/std_example.rs --crate-type bin --target $TARGET_TRIPLE
++$RUN_WRAPPER ./target/out/std_example --target $TARGET_TRIPLE
++
++echo "[AOT] subslice-patterns-const-eval"
++$RUSTC example/subslice-patterns-const-eval.rs --crate-type bin -Cpanic=abort --target $TARGET_TRIPLE
++$RUN_WRAPPER ./target/out/subslice-patterns-const-eval
++
++echo "[AOT] track-caller-attribute"
++$RUSTC example/track-caller-attribute.rs --crate-type bin -Cpanic=abort --target $TARGET_TRIPLE
++$RUN_WRAPPER ./target/out/track-caller-attribute
++
++# FIXME: this requires linking an additional lib for __popcountdi2
++#echo "[BUILD] mod_bench"
++#$RUSTC example/mod_bench.rs --crate-type bin --target $TARGET_TRIPLE
++
++# FIXME linker gives multiple definitions error on Linux
++#echo "[BUILD] sysroot in release mode"
++#./build_sysroot/build_sysroot.sh --release
++
++#pushd simple-raytracer
++#if [[ "$HOST_TRIPLE" = "$TARGET_TRIPLE" ]]; then
++ #echo "[BENCH COMPILE] ebobby/simple-raytracer"
++ #hyperfine --runs ${RUN_RUNS:-10} --warmup 1 --prepare "rm -r target/*/debug || true" \
++ #"RUSTFLAGS='' cargo build --target $TARGET_TRIPLE" \
++ #"../cargo.sh build"
++
++ #echo "[BENCH RUN] ebobby/simple-raytracer"
++ #cp ./target/*/debug/main ./raytracer_cg_gccjit
++ #hyperfine --runs ${RUN_RUNS:-10} ./raytracer_cg_llvm ./raytracer_cg_gccjit
++#else
++ #echo "[BENCH COMPILE] ebobby/simple-raytracer (skipped)"
++ #echo "[COMPILE] ebobby/simple-raytracer"
++ #../cargo.sh build
++ #echo "[BENCH RUN] ebobby/simple-raytracer (skipped)"
++#fi
++#popd
++
++pushd build_sysroot/sysroot_src/library/core/tests
++echo "[TEST] libcore"
++rm -r ./target || true
++../../../../../cargo.sh test
++popd
++
++#pushd regex
++#echo "[TEST] rust-lang/regex example shootout-regex-dna"
++#../cargo.sh clean
++## Make sure `[codegen mono items] start` doesn't poison the diff
++#../cargo.sh build --example shootout-regex-dna
++#cat examples/regexdna-input.txt | ../cargo.sh run --example shootout-regex-dna | grep -v "Spawned thread" > res.txt
++#diff -u res.txt examples/regexdna-output.txt
++
++#echo "[TEST] rust-lang/regex tests"
++#../cargo.sh test --tests -- --exclude-should-panic --test-threads 1 -Zunstable-options
++#popd
++
++#echo
++#echo "[BENCH COMPILE] mod_bench"
++
++#COMPILE_MOD_BENCH_INLINE="$RUSTC example/mod_bench.rs --crate-type bin -Zmir-opt-level=3 -O --crate-name mod_bench_inline"
++#COMPILE_MOD_BENCH_LLVM_0="rustc example/mod_bench.rs --crate-type bin -Copt-level=0 -o target/out/mod_bench_llvm_0 -Cpanic=abort"
++#COMPILE_MOD_BENCH_LLVM_1="rustc example/mod_bench.rs --crate-type bin -Copt-level=1 -o target/out/mod_bench_llvm_1 -Cpanic=abort"
++#COMPILE_MOD_BENCH_LLVM_2="rustc example/mod_bench.rs --crate-type bin -Copt-level=2 -o target/out/mod_bench_llvm_2 -Cpanic=abort"
++#COMPILE_MOD_BENCH_LLVM_3="rustc example/mod_bench.rs --crate-type bin -Copt-level=3 -o target/out/mod_bench_llvm_3 -Cpanic=abort"
++
++## Use 100 runs, because a single compilations doesn't take more than ~150ms, so it isn't very slow
++#hyperfine --runs ${COMPILE_RUNS:-100} "$COMPILE_MOD_BENCH_INLINE" "$COMPILE_MOD_BENCH_LLVM_0" "$COMPILE_MOD_BENCH_LLVM_1" "$COMPILE_MOD_BENCH_LLVM_2" "$COMPILE_MOD_BENCH_LLVM_3"
++
++#echo
++#echo "[BENCH RUN] mod_bench"
++#hyperfine --runs ${RUN_RUNS:-10} ./target/out/mod_bench{,_inline} ./target/out/mod_bench_llvm_*
++
++echo
++echo "[TEST] rust-lang/rust"
++
++rust_toolchain=$(cat rust-toolchain)
++
++git clone https://github.com/rust-lang/rust.git || true
++cd rust
++git fetch
++git checkout $(rustc -V | cut -d' ' -f3 | tr -d '(')
++export RUSTFLAGS=
++
++#git apply ../rust_lang.patch
++
++
++rm config.toml || true
++
++cat > config.toml <<EOF
++[rust]
++codegen-backends = []
++
++[build]
++cargo = "$(which cargo)"
++local-rebuild = true
++rustc = "$HOME/.rustup/toolchains/$rust_toolchain-$TARGET_TRIPLE/bin/rustc"
++EOF
++
++rustc -V | cut -d' ' -f3 | tr -d '('
++git checkout $(rustc -V | cut -d' ' -f3 | tr -d '(') src/test
++
++for test in $(rg -i --files-with-matches "//(\[\w+\])?~|// error-pattern:|// build-fail|// run-fail|-Cllvm-args" src/test/ui); do
++ rm $test
++done
++
++git checkout -- src/test/ui/issues/auxiliary/issue-3136-a.rs # contains //~ERROR, but shouldn't be removed
++
++rm -r src/test/ui/{abi*,extern/,panic-runtime/,panics/,unsized-locals/,proc-macro/,threads-sendsync/,thinlto/,simd*,borrowck/,test*,*lto*.rs} || true
++for test in $(rg --files-with-matches "catch_unwind|should_panic|thread|lto" src/test/ui); do
++ rm $test
++done
++git checkout src/test/ui/type-alias-impl-trait/auxiliary/cross_crate_ice.rs
++git checkout src/test/ui/type-alias-impl-trait/auxiliary/cross_crate_ice2.rs
++rm src/test/ui/llvm-asm/llvm-asm-in-out-operand.rs || true # TODO: Enable back this test if I ever implement the llvm_asm! macro.
++#rm src/test/ui/consts/const-size_of-cycle.rs || true # Error file path difference
++#rm src/test/ui/impl-trait/impl-generic-mismatch.rs || true # ^
++#rm src/test/ui/type_length_limit.rs || true
++#rm src/test/ui/issues/issue-50993.rs || true # Target `thumbv7em-none-eabihf` is not supported
++#rm src/test/ui/macros/same-sequence-span.rs || true # Proc macro .rustc section not found?
++#rm src/test/ui/suggestions/issue-61963.rs || true # ^
++
++RUSTC_ARGS="-Zpanic-abort-tests -Zcodegen-backend="$(pwd)"/../target/"$CHANNEL"/librustc_codegen_gcc."$dylib_ext" --sysroot "$(pwd)"/../build_sysroot/sysroot -Cpanic=abort"
++
++echo "[TEST] rustc test suite"
++# TODO: remove excluded tests when they stop stalling.
++COMPILETEST_FORCE_STAGE0=1 ./x.py test --run always --stage 0 src/test/ui/ --rustc-args "$RUSTC_ARGS" --exclude src/test/ui/numbers-arithmetic/saturating-float-casts.rs --exclude src/test/ui/issues/issue-50811.rs
--- /dev/null
--- /dev/null
++use std::{
++ env::{self, current_dir},
++ path::PathBuf,
++ process::Command,
++};
++
++use lang_tester::LangTester;
++use tempfile::TempDir;
++
++fn main() {
++ let tempdir = TempDir::new().expect("temp dir");
++ let current_dir = current_dir().expect("current dir");
++ let current_dir = current_dir.to_str().expect("current dir").to_string();
++ let gcc_path = include_str!("../gcc_path");
++ let gcc_path = gcc_path.trim();
++ env::set_var("LD_LIBRARY_PATH", gcc_path);
++ LangTester::new()
++ .test_dir("tests/run")
++ .test_file_filter(|path| path.extension().expect("extension").to_str().expect("to_str") == "rs")
++ .test_extract(|source| {
++ let lines =
++ source.lines()
++ .skip_while(|l| !l.starts_with("//"))
++ .take_while(|l| l.starts_with("//"))
++ .map(|l| &l[2..])
++ .collect::<Vec<_>>()
++ .join("\n");
++ Some(lines)
++ })
++ .test_cmds(move |path| {
++ // Test command 1: Compile `x.rs` into `tempdir/x`.
++ let mut exe = PathBuf::new();
++ exe.push(&tempdir);
++ exe.push(path.file_stem().expect("file_stem"));
++ let mut compiler = Command::new("rustc");
++ compiler.args(&[
++ &format!("-Zcodegen-backend={}/target/debug/librustc_codegen_gcc.so", current_dir),
++ "--sysroot", &format!("{}/build_sysroot/sysroot/", current_dir),
++ "-Zno-parallel-llvm",
++ "-C", "panic=abort",
++ "-C", "link-arg=-lc",
++ "-o", exe.to_str().expect("to_str"),
++ path.to_str().expect("to_str"),
++ ]);
++ // Test command 2: run `tempdir/x`.
++ let runtime = Command::new(exe);
++ vec![("Compiler", compiler), ("Run-time", runtime)]
++ })
++ .run();
++}
--- /dev/null
--- /dev/null
++// Compiler:
++//
++// Run-time:
++// status: signal
++
++#![feature(auto_traits, lang_items, no_core, start, intrinsics)]
++
++#![no_std]
++#![no_core]
++
++/*
++ * Core
++ */
++
++// Because we don't have core yet.
++#[lang = "sized"]
++pub trait Sized {}
++
++#[lang = "copy"]
++trait Copy {
++}
++
++impl Copy for isize {}
++
++#[lang = "receiver"]
++trait Receiver {
++}
++
++#[lang = "freeze"]
++pub(crate) unsafe auto trait Freeze {}
++
++mod intrinsics {
++ use super::Sized;
++
++ extern "rust-intrinsic" {
++ pub fn abort() -> !;
++ }
++}
++
++/*
++ * Code
++ */
++
++fn test_fail() -> ! {
++ unsafe { intrinsics::abort() };
++}
++
++#[start]
++fn main(mut argc: isize, _argv: *const *const u8) -> isize {
++ test_fail();
++}
--- /dev/null
--- /dev/null
++// Compiler:
++//
++// Run-time:
++// status: signal
++
++#![feature(auto_traits, lang_items, no_core, start, intrinsics)]
++
++#![no_std]
++#![no_core]
++
++/*
++ * Core
++ */
++
++// Because we don't have core yet.
++#[lang = "sized"]
++pub trait Sized {}
++
++#[lang = "copy"]
++trait Copy {
++}
++
++impl Copy for isize {}
++
++#[lang = "receiver"]
++trait Receiver {
++}
++
++#[lang = "freeze"]
++pub(crate) unsafe auto trait Freeze {}
++
++mod intrinsics {
++ use super::Sized;
++
++ extern "rust-intrinsic" {
++ pub fn abort() -> !;
++ }
++}
++
++/*
++ * Code
++ */
++
++fn fail() -> i32 {
++ unsafe { intrinsics::abort() };
++ 0
++}
++
++#[start]
++fn main(mut argc: isize, _argv: *const *const u8) -> isize {
++ fail();
++ 0
++}
--- /dev/null
--- /dev/null
++// Compiler:
++//
++// Run-time:
++// status: 0
++// stdout: 42
++// 7
++// 5
++// 10
++
++#![feature(arbitrary_self_types, auto_traits, lang_items, no_core, start, intrinsics)]
++
++#![no_std]
++#![no_core]
++
++/*
++ * Core
++ */
++
++// Because we don't have core yet.
++#[lang = "sized"]
++pub trait Sized {}
++
++#[lang = "copy"]
++trait Copy {
++}
++
++impl Copy for isize {}
++impl Copy for usize {}
++impl Copy for i32 {}
++impl Copy for u8 {}
++impl Copy for i8 {}
++impl Copy for i16 {}
++
++#[lang = "receiver"]
++trait Receiver {
++}
++
++#[lang = "freeze"]
++pub(crate) unsafe auto trait Freeze {}
++
++mod libc {
++ #[link(name = "c")]
++ extern "C" {
++ pub fn printf(format: *const i8, ...) -> i32;
++ pub fn puts(s: *const u8) -> i32;
++ }
++}
++
++#[lang = "index"]
++pub trait Index<Idx: ?Sized> {
++ type Output: ?Sized;
++ fn index(&self, index: Idx) -> &Self::Output;
++}
++
++impl<T> Index<usize> for [T; 3] {
++ type Output = T;
++
++ fn index(&self, index: usize) -> &Self::Output {
++ &self[index]
++ }
++}
++
++impl<T> Index<usize> for [T] {
++ type Output = T;
++
++ fn index(&self, index: usize) -> &Self::Output {
++ &self[index]
++ }
++}
++
++#[lang = "drop_in_place"]
++#[allow(unconditional_recursion)]
++pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
++ // Code here does not matter - this is replaced by the
++ // real drop glue by the compiler.
++ drop_in_place(to_drop);
++}
++
++#[lang = "panic"]
++#[track_caller]
++#[no_mangle]
++pub fn panic(_msg: &str) -> ! {
++ unsafe {
++ libc::puts("Panicking\0" as *const str as *const u8);
++ intrinsics::abort();
++ }
++}
++
++#[lang = "panic_location"]
++struct PanicLocation {
++ file: &'static str,
++ line: u32,
++ column: u32,
++}
++
++#[lang = "panic_bounds_check"]
++#[track_caller]
++#[no_mangle]
++fn panic_bounds_check(index: usize, len: usize) -> ! {
++ unsafe {
++ libc::printf("index out of bounds: the len is %d but the index is %d\n\0" as *const str as *const i8, len, index);
++ intrinsics::abort();
++ }
++}
++
++mod intrinsics {
++ extern "rust-intrinsic" {
++ pub fn abort() -> !;
++ }
++}
++
++#[lang = "add"]
++trait Add<RHS = Self> {
++ type Output;
++
++ fn add(self, rhs: RHS) -> Self::Output;
++}
++
++impl Add for u8 {
++ type Output = Self;
++
++ fn add(self, rhs: Self) -> Self {
++ self + rhs
++ }
++}
++
++impl Add for i8 {
++ type Output = Self;
++
++ fn add(self, rhs: Self) -> Self {
++ self + rhs
++ }
++}
++
++impl Add for i32 {
++ type Output = Self;
++
++ fn add(self, rhs: Self) -> Self {
++ self + rhs
++ }
++}
++
++impl Add for usize {
++ type Output = Self;
++
++ fn add(self, rhs: Self) -> Self {
++ self + rhs
++ }
++}
++
++impl Add for isize {
++ type Output = Self;
++
++ fn add(self, rhs: Self) -> Self {
++ self + rhs
++ }
++}
++
++#[lang = "sub"]
++pub trait Sub<RHS = Self> {
++ type Output;
++
++ fn sub(self, rhs: RHS) -> Self::Output;
++}
++
++impl Sub for usize {
++ type Output = Self;
++
++ fn sub(self, rhs: Self) -> Self {
++ self - rhs
++ }
++}
++
++impl Sub for isize {
++ type Output = Self;
++
++ fn sub(self, rhs: Self) -> Self {
++ self - rhs
++ }
++}
++
++impl Sub for u8 {
++ type Output = Self;
++
++ fn sub(self, rhs: Self) -> Self {
++ self - rhs
++ }
++}
++
++impl Sub for i8 {
++ type Output = Self;
++
++ fn sub(self, rhs: Self) -> Self {
++ self - rhs
++ }
++}
++
++impl Sub for i16 {
++ type Output = Self;
++
++ fn sub(self, rhs: Self) -> Self {
++ self - rhs
++ }
++}
++
++
++/*
++ * Code
++ */
++
++static mut ONE: usize = 1;
++
++fn make_array() -> [u8; 3] {
++ [42, 10, 5]
++}
++
++#[start]
++fn main(argc: isize, _argv: *const *const u8) -> isize {
++ let array = [42, 7, 5];
++ let array2 = make_array();
++ unsafe {
++ libc::printf(b"%ld\n\0" as *const u8 as *const i8, array[ONE - 1]);
++ libc::printf(b"%ld\n\0" as *const u8 as *const i8, array[ONE]);
++ libc::printf(b"%ld\n\0" as *const u8 as *const i8, array[ONE + 1]);
++
++ libc::printf(b"%d\n\0" as *const u8 as *const i8, array2[argc as usize] as u32);
++ }
++ 0
++}
--- /dev/null
--- /dev/null
++// Compiler:
++//
++// Run-time:
++// status: 0
++
++#![feature(asm, global_asm)]
++
++global_asm!("
++ .global add_asm
++add_asm:
++ mov rax, rdi
++ add rax, rsi
++ ret"
++);
++
++extern "C" {
++ fn add_asm(a: i64, b: i64) -> i64;
++}
++
++fn main() {
++ unsafe {
++ asm!("nop");
++ }
++
++ let x: u64;
++ unsafe {
++ asm!("mov $5, {}",
++ out(reg) x,
++ options(att_syntax)
++ );
++ }
++ assert_eq!(x, 5);
++
++ let x: u64;
++ let input: u64 = 42;
++ unsafe {
++ asm!("mov {input}, {output}",
++ "add $1, {output}",
++ input = in(reg) input,
++ output = out(reg) x,
++ options(att_syntax)
++ );
++ }
++ assert_eq!(x, 43);
++
++ let x: u64;
++ unsafe {
++ asm!("mov {}, 6",
++ out(reg) x,
++ );
++ }
++ assert_eq!(x, 6);
++
++ let x: u64;
++ let input: u64 = 42;
++ unsafe {
++ asm!("mov {output}, {input}",
++ "add {output}, 1",
++ input = in(reg) input,
++ output = out(reg) x,
++ );
++ }
++ assert_eq!(x, 43);
++
++ assert_eq!(unsafe { add_asm(40, 2) }, 42);
++}
--- /dev/null
--- /dev/null
++// Compiler:
++//
++// Run-time:
++// stdout: 2
++// 7 8
++// 10
++
++#![allow(unused_attributes)]
++#![feature(auto_traits, lang_items, no_core, start, intrinsics, track_caller)]
++
++#![no_std]
++#![no_core]
++
++/*
++ * Core
++ */
++
++// Because we don't have core yet.
++#[lang = "sized"]
++pub trait Sized {}
++
++#[lang = "copy"]
++trait Copy {
++}
++
++impl Copy for isize {}
++impl Copy for *mut i32 {}
++impl Copy for usize {}
++impl Copy for u8 {}
++impl Copy for i8 {}
++impl Copy for i32 {}
++
++#[lang = "receiver"]
++trait Receiver {
++}
++
++#[lang = "freeze"]
++pub(crate) unsafe auto trait Freeze {}
++
++#[lang = "panic_location"]
++struct PanicLocation {
++ file: &'static str,
++ line: u32,
++ column: u32,
++}
++
++mod libc {
++ #[link(name = "c")]
++ extern "C" {
++ pub fn puts(s: *const u8) -> i32;
++ pub fn fflush(stream: *mut i32) -> i32;
++ pub fn printf(format: *const i8, ...) -> i32;
++
++ pub static STDOUT: *mut i32;
++ }
++}
++
++mod intrinsics {
++ extern "rust-intrinsic" {
++ pub fn abort() -> !;
++ }
++}
++
++#[lang = "panic"]
++#[track_caller]
++#[no_mangle]
++pub fn panic(_msg: &str) -> ! {
++ unsafe {
++ libc::puts("Panicking\0" as *const str as *const u8);
++ libc::fflush(libc::STDOUT);
++ intrinsics::abort();
++ }
++}
++
++#[lang = "add"]
++trait Add<RHS = Self> {
++ type Output;
++
++ fn add(self, rhs: RHS) -> Self::Output;
++}
++
++impl Add for u8 {
++ type Output = Self;
++
++ fn add(self, rhs: Self) -> Self {
++ self + rhs
++ }
++}
++
++impl Add for i8 {
++ type Output = Self;
++
++ fn add(self, rhs: Self) -> Self {
++ self + rhs
++ }
++}
++
++impl Add for i32 {
++ type Output = Self;
++
++ fn add(self, rhs: Self) -> Self {
++ self + rhs
++ }
++}
++
++impl Add for usize {
++ type Output = Self;
++
++ fn add(self, rhs: Self) -> Self {
++ self + rhs
++ }
++}
++
++impl Add for isize {
++ type Output = Self;
++
++ fn add(self, rhs: Self) -> Self {
++ self + rhs
++ }
++}
++
++/*
++ * Code
++ */
++
++fn inc_ref(num: &mut isize) -> isize {
++ *num = *num + 5;
++ *num + 1
++}
++
++fn inc(num: isize) -> isize {
++ num + 1
++}
++
++
++#[start]
++fn main(mut argc: isize, _argv: *const *const u8) -> isize {
++ argc = inc(argc);
++ unsafe {
++ libc::printf(b"%ld\n\0" as *const u8 as *const i8, argc);
++ }
++
++ let b = inc_ref(&mut argc);
++ unsafe {
++ libc::printf(b"%ld %ld\n\0" as *const u8 as *const i8, argc, b);
++ }
++
++ argc = 10;
++ unsafe {
++ libc::printf(b"%ld\n\0" as *const u8 as *const i8, argc);
++ }
++ 0
++}
--- /dev/null
--- /dev/null
++// Compiler:
++//
++// Run-time:
++// status: 0
++// stdout: Arg: 1
++// Argument: 1
++// String arg: 1
++// Int argument: 2
++// Both args: 11
++
++#![feature(arbitrary_self_types, auto_traits, lang_items, no_core, start, intrinsics,
++ unboxed_closures)]
++
++#![no_std]
++#![no_core]
++
++/*
++ * Core
++ */
++
++// Because we don't have core yet.
++#[lang = "sized"]
++pub trait Sized {}
++
++#[lang = "copy"]
++trait Copy {
++}
++
++impl Copy for isize {}
++impl Copy for usize {}
++impl Copy for i32 {}
++impl Copy for u32 {}
++impl Copy for u8 {}
++impl Copy for i8 {}
++
++#[lang = "receiver"]
++trait Receiver {
++}
++
++#[lang = "freeze"]
++pub(crate) unsafe auto trait Freeze {}
++
++mod libc {
++ #[link(name = "c")]
++ extern "C" {
++ pub fn puts(s: *const u8) -> i32;
++ pub fn printf(format: *const i8, ...) -> i32;
++ }
++}
++
++#[lang = "index"]
++pub trait Index<Idx: ?Sized> {
++ type Output: ?Sized;
++ fn index(&self, index: Idx) -> &Self::Output;
++}
++
++impl<T> Index<usize> for [T; 3] {
++ type Output = T;
++
++ fn index(&self, index: usize) -> &Self::Output {
++ &self[index]
++ }
++}
++
++impl<T> Index<usize> for [T] {
++ type Output = T;
++
++ fn index(&self, index: usize) -> &Self::Output {
++ &self[index]
++ }
++}
++
++#[lang = "drop_in_place"]
++#[allow(unconditional_recursion)]
++pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
++ // Code here does not matter - this is replaced by the
++ // real drop glue by the compiler.
++ drop_in_place(to_drop);
++}
++
++#[lang = "panic_location"]
++struct PanicLocation {
++ file: &'static str,
++ line: u32,
++ column: u32,
++}
++
++#[lang = "panic_bounds_check"]
++#[track_caller]
++#[no_mangle]
++fn panic_bounds_check(index: usize, len: usize) -> ! {
++ unsafe {
++ libc::printf("index out of bounds: the len is %d but the index is %d\n\0" as *const str as *const i8, len, index);
++ intrinsics::abort();
++ }
++}
++
++mod intrinsics {
++ extern "rust-intrinsic" {
++ pub fn abort() -> !;
++ }
++}
++
++#[lang = "unsize"]
++pub trait Unsize<T: ?Sized> {}
++
++#[lang = "coerce_unsized"]
++pub trait CoerceUnsized<T> {}
++
++impl<'a, 'b: 'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a U> for &'b T {}
++impl<'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a mut U> for &'a mut T {}
++impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*const U> for *const T {}
++impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*mut U> for *mut T {}
++
++#[lang = "fn_once"]
++#[rustc_paren_sugar]
++pub trait FnOnce<Args> {
++ #[lang = "fn_once_output"]
++ type Output;
++
++ extern "rust-call" fn call_once(self, args: Args) -> Self::Output;
++}
++
++#[lang = "fn_mut"]
++#[rustc_paren_sugar]
++pub trait FnMut<Args>: FnOnce<Args> {
++ extern "rust-call" fn call_mut(&mut self, args: Args) -> Self::Output;
++}
++
++#[lang = "add"]
++trait Add<RHS = Self> {
++ type Output;
++
++ fn add(self, rhs: RHS) -> Self::Output;
++}
++
++impl Add for u8 {
++ type Output = Self;
++
++ fn add(self, rhs: Self) -> Self {
++ self + rhs
++ }
++}
++
++impl Add for i8 {
++ type Output = Self;
++
++ fn add(self, rhs: Self) -> Self {
++ self + rhs
++ }
++}
++
++impl Add for i32 {
++ type Output = Self;
++
++ fn add(self, rhs: Self) -> Self {
++ self + rhs
++ }
++}
++
++impl Add for usize {
++ type Output = Self;
++
++ fn add(self, rhs: Self) -> Self {
++ self + rhs
++ }
++}
++
++impl Add for isize {
++ type Output = Self;
++
++ fn add(self, rhs: Self) -> Self {
++ self + rhs
++ }
++}
++
++#[lang = "panic"]
++#[track_caller]
++#[no_mangle]
++pub fn panic(_msg: &str) -> ! {
++ unsafe {
++ libc::puts("Panicking\0" as *const str as *const u8);
++ intrinsics::abort();
++ }
++}
++
++/*
++ * Code
++ */
++
++#[start]
++fn main(mut argc: isize, _argv: *const *const u8) -> isize {
++ let string = "Arg: %d\n\0";
++ let mut closure = || {
++ unsafe {
++ libc::printf(string as *const str as *const i8, argc);
++ }
++ };
++ closure();
++
++ let mut closure = || {
++ unsafe {
++ libc::printf("Argument: %d\n\0" as *const str as *const i8, argc);
++ }
++ };
++ closure();
++
++ let mut closure = |string| {
++ unsafe {
++ libc::printf(string as *const str as *const i8, argc);
++ }
++ };
++ closure("String arg: %d\n\0");
++
++ let mut closure = |arg: isize| {
++ unsafe {
++ libc::printf("Int argument: %d\n\0" as *const str as *const i8, arg);
++ }
++ };
++ closure(argc + 1);
++
++ let mut closure = |string, arg: isize| {
++ unsafe {
++ libc::printf(string as *const str as *const i8, arg);
++ }
++ };
++ closure("Both args: %d\n\0", argc + 10);
++
++ 0
++}
--- /dev/null
--- /dev/null
++// Compiler:
++//
++// Run-time:
++// status: 0
++// stdout: true
++// 1
++
++#![feature(arbitrary_self_types, auto_traits, lang_items, no_core, start, intrinsics)]
++
++#![no_std]
++#![no_core]
++
++/*
++ * Core
++ */
++
++// Because we don't have core yet.
++#[lang = "sized"]
++pub trait Sized {}
++
++#[lang = "copy"]
++trait Copy {
++}
++
++impl Copy for isize {}
++impl Copy for usize {}
++impl Copy for u64 {}
++impl Copy for i32 {}
++impl Copy for u32 {}
++impl Copy for bool {}
++impl Copy for u16 {}
++impl Copy for i16 {}
++impl Copy for char {}
++impl Copy for i8 {}
++impl Copy for u8 {}
++
++#[lang = "receiver"]
++trait Receiver {
++}
++
++#[lang = "freeze"]
++pub(crate) unsafe auto trait Freeze {}
++
++mod libc {
++ #[link(name = "c")]
++ extern "C" {
++ pub fn printf(format: *const i8, ...) -> i32;
++ pub fn puts(s: *const u8) -> i32;
++ }
++}
++
++#[lang = "index"]
++pub trait Index<Idx: ?Sized> {
++ type Output: ?Sized;
++ fn index(&self, index: Idx) -> &Self::Output;
++}
++
++impl<T> Index<usize> for [T; 3] {
++ type Output = T;
++
++ fn index(&self, index: usize) -> &Self::Output {
++ &self[index]
++ }
++}
++
++impl<T> Index<usize> for [T] {
++ type Output = T;
++
++ fn index(&self, index: usize) -> &Self::Output {
++ &self[index]
++ }
++}
++
++#[lang = "drop_in_place"]
++#[allow(unconditional_recursion)]
++pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
++ // Code here does not matter - this is replaced by the
++ // real drop glue by the compiler.
++ drop_in_place(to_drop);
++}
++
++#[lang = "panic"]
++#[track_caller]
++#[no_mangle]
++pub fn panic(_msg: &str) -> ! {
++ unsafe {
++ libc::puts("Panicking\0" as *const str as *const u8);
++ intrinsics::abort();
++ }
++}
++
++#[lang = "panic_location"]
++struct PanicLocation {
++ file: &'static str,
++ line: u32,
++ column: u32,
++}
++
++#[lang = "panic_bounds_check"]
++#[track_caller]
++#[no_mangle]
++fn panic_bounds_check(index: usize, len: usize) -> ! {
++ unsafe {
++ libc::printf("index out of bounds: the len is %d but the index is %d\n\0" as *const str as *const i8, len, index);
++ intrinsics::abort();
++ }
++}
++
++mod intrinsics {
++ extern "rust-intrinsic" {
++ pub fn abort() -> !;
++ }
++}
++
++#[lang = "add"]
++trait Add<RHS = Self> {
++ type Output;
++
++ fn add(self, rhs: RHS) -> Self::Output;
++}
++
++impl Add for u8 {
++ type Output = Self;
++
++ fn add(self, rhs: Self) -> Self {
++ self + rhs
++ }
++}
++
++impl Add for i8 {
++ type Output = Self;
++
++ fn add(self, rhs: Self) -> Self {
++ self + rhs
++ }
++}
++
++impl Add for i32 {
++ type Output = Self;
++
++ fn add(self, rhs: Self) -> Self {
++ self + rhs
++ }
++}
++
++impl Add for usize {
++ type Output = Self;
++
++ fn add(self, rhs: Self) -> Self {
++ self + rhs
++ }
++}
++
++impl Add for isize {
++ type Output = Self;
++
++ fn add(self, rhs: Self) -> Self {
++ self + rhs
++ }
++}
++
++#[lang = "sub"]
++pub trait Sub<RHS = Self> {
++ type Output;
++
++ fn sub(self, rhs: RHS) -> Self::Output;
++}
++
++impl Sub for usize {
++ type Output = Self;
++
++ fn sub(self, rhs: Self) -> Self {
++ self - rhs
++ }
++}
++
++impl Sub for isize {
++ type Output = Self;
++
++ fn sub(self, rhs: Self) -> Self {
++ self - rhs
++ }
++}
++
++impl Sub for u8 {
++ type Output = Self;
++
++ fn sub(self, rhs: Self) -> Self {
++ self - rhs
++ }
++}
++
++impl Sub for i8 {
++ type Output = Self;
++
++ fn sub(self, rhs: Self) -> Self {
++ self - rhs
++ }
++}
++
++impl Sub for i16 {
++ type Output = Self;
++
++ fn sub(self, rhs: Self) -> Self {
++ self - rhs
++ }
++}
++
++#[lang = "eq"]
++pub trait PartialEq<Rhs: ?Sized = Self> {
++ fn eq(&self, other: &Rhs) -> bool;
++ fn ne(&self, other: &Rhs) -> bool;
++}
++
++impl PartialEq for u8 {
++ fn eq(&self, other: &u8) -> bool {
++ (*self) == (*other)
++ }
++ fn ne(&self, other: &u8) -> bool {
++ (*self) != (*other)
++ }
++}
++
++impl PartialEq for u16 {
++ fn eq(&self, other: &u16) -> bool {
++ (*self) == (*other)
++ }
++ fn ne(&self, other: &u16) -> bool {
++ (*self) != (*other)
++ }
++}
++
++impl PartialEq for u32 {
++ fn eq(&self, other: &u32) -> bool {
++ (*self) == (*other)
++ }
++ fn ne(&self, other: &u32) -> bool {
++ (*self) != (*other)
++ }
++}
++
++
++impl PartialEq for u64 {
++ fn eq(&self, other: &u64) -> bool {
++ (*self) == (*other)
++ }
++ fn ne(&self, other: &u64) -> bool {
++ (*self) != (*other)
++ }
++}
++
++impl PartialEq for usize {
++ fn eq(&self, other: &usize) -> bool {
++ (*self) == (*other)
++ }
++ fn ne(&self, other: &usize) -> bool {
++ (*self) != (*other)
++ }
++}
++
++impl PartialEq for i8 {
++ fn eq(&self, other: &i8) -> bool {
++ (*self) == (*other)
++ }
++ fn ne(&self, other: &i8) -> bool {
++ (*self) != (*other)
++ }
++}
++
++impl PartialEq for i32 {
++ fn eq(&self, other: &i32) -> bool {
++ (*self) == (*other)
++ }
++ fn ne(&self, other: &i32) -> bool {
++ (*self) != (*other)
++ }
++}
++
++impl PartialEq for isize {
++ fn eq(&self, other: &isize) -> bool {
++ (*self) == (*other)
++ }
++ fn ne(&self, other: &isize) -> bool {
++ (*self) != (*other)
++ }
++}
++
++impl PartialEq for char {
++ fn eq(&self, other: &char) -> bool {
++ (*self) == (*other)
++ }
++ fn ne(&self, other: &char) -> bool {
++ (*self) != (*other)
++ }
++}
++
++/*
++ * Code
++ */
++
++#[start]
++fn main(argc: isize, _argv: *const *const u8) -> isize {
++ unsafe {
++ if argc == 1 {
++ libc::printf(b"true\n\0" as *const u8 as *const i8);
++ }
++
++ let string =
++ match argc {
++ 1 => b"1\n\0",
++ 2 => b"2\n\0",
++ 3 => b"3\n\0",
++ 4 => b"4\n\0",
++ 5 => b"5\n\0",
++ _ => b"_\n\0",
++ };
++ libc::printf(string as *const u8 as *const i8);
++ }
++ 0
++}
--- /dev/null
--- /dev/null
++// Compiler:
++//
++// Run-time:
++// status: 0
++
++#![feature(auto_traits, lang_items, no_core, start)]
++
++#![no_std]
++#![no_core]
++
++/*
++ * Core
++ */
++
++// Because we don't have core yet.
++#[lang = "sized"]
++pub trait Sized {}
++
++#[lang = "copy"]
++trait Copy {
++}
++
++impl Copy for isize {}
++
++#[lang = "receiver"]
++trait Receiver {
++}
++
++#[lang = "freeze"]
++pub(crate) unsafe auto trait Freeze {}
++
++/*
++ * Code
++ */
++
++#[start]
++fn main(mut argc: isize, _argv: *const *const u8) -> isize {
++ 0
++}
--- /dev/null
--- /dev/null
++// Compiler:
++//
++// Run-time:
++// status: 2
++
++#![feature(auto_traits, lang_items, no_core, start, intrinsics)]
++
++#![no_std]
++#![no_core]
++
++mod libc {
++ #[link(name = "c")]
++ extern "C" {
++ pub fn exit(status: i32);
++ }
++}
++
++/*
++ * Core
++ */
++
++// Because we don't have core yet.
++#[lang = "sized"]
++pub trait Sized {}
++
++#[lang = "copy"]
++trait Copy {
++}
++
++impl Copy for isize {}
++
++#[lang = "receiver"]
++trait Receiver {
++}
++
++#[lang = "freeze"]
++pub(crate) unsafe auto trait Freeze {}
++
++/*
++ * Code
++ */
++
++#[start]
++fn main(mut argc: isize, _argv: *const *const u8) -> isize {
++ unsafe {
++ libc::exit(2);
++ }
++ 0
++}
--- /dev/null
--- /dev/null
++// Compiler:
++//
++// Run-time:
++// status: 1
++
++#![feature(auto_traits, lang_items, no_core, start)]
++
++#![no_std]
++#![no_core]
++
++/*
++ * Core
++ */
++
++// Because we don't have core yet.
++#[lang = "sized"]
++pub trait Sized {}
++
++#[lang = "copy"]
++trait Copy {
++}
++
++impl Copy for isize {}
++
++#[lang = "receiver"]
++trait Receiver {
++}
++
++#[lang = "freeze"]
++pub(crate) unsafe auto trait Freeze {}
++
++/*
++ * Code
++ */
++
++#[start]
++fn main(mut argc: isize, _argv: *const *const u8) -> isize {
++ 1
++}
--- /dev/null
--- /dev/null
++// Compiler:
++//
++// Run-time:
++// status: 0
++// stdout: 1
++
++#![feature(arbitrary_self_types, auto_traits, lang_items, no_core, start, intrinsics)]
++
++#![no_std]
++#![no_core]
++
++/*
++ * Core
++ */
++
++// Because we don't have core yet.
++#[lang = "sized"]
++pub trait Sized {}
++
++#[lang = "copy"]
++trait Copy {
++}
++
++impl Copy for isize {}
++impl Copy for usize {}
++impl Copy for i32 {}
++impl Copy for u8 {}
++impl Copy for i8 {}
++impl Copy for i16 {}
++
++#[lang = "receiver"]
++trait Receiver {
++}
++
++#[lang = "freeze"]
++pub(crate) unsafe auto trait Freeze {}
++
++mod libc {
++ #[link(name = "c")]
++ extern "C" {
++ pub fn printf(format: *const i8, ...) -> i32;
++ pub fn puts(s: *const u8) -> i32;
++ }
++}
++
++#[lang = "index"]
++pub trait Index<Idx: ?Sized> {
++ type Output: ?Sized;
++ fn index(&self, index: Idx) -> &Self::Output;
++}
++
++impl<T> Index<usize> for [T; 3] {
++ type Output = T;
++
++ fn index(&self, index: usize) -> &Self::Output {
++ &self[index]
++ }
++}
++
++impl<T> Index<usize> for [T] {
++ type Output = T;
++
++ fn index(&self, index: usize) -> &Self::Output {
++ &self[index]
++ }
++}
++
++#[lang = "drop_in_place"]
++#[allow(unconditional_recursion)]
++pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
++ // Code here does not matter - this is replaced by the
++ // real drop glue by the compiler.
++ drop_in_place(to_drop);
++}
++
++#[lang = "panic"]
++#[track_caller]
++#[no_mangle]
++pub fn panic(_msg: &str) -> ! {
++ unsafe {
++ libc::puts("Panicking\0" as *const str as *const u8);
++ intrinsics::abort();
++ }
++}
++
++#[lang = "panic_location"]
++struct PanicLocation {
++ file: &'static str,
++ line: u32,
++ column: u32,
++}
++
++#[lang = "panic_bounds_check"]
++#[track_caller]
++#[no_mangle]
++fn panic_bounds_check(index: usize, len: usize) -> ! {
++ unsafe {
++ libc::printf("index out of bounds: the len is %d but the index is %d\n\0" as *const str as *const i8, len, index);
++ intrinsics::abort();
++ }
++}
++
++mod intrinsics {
++ extern "rust-intrinsic" {
++ pub fn abort() -> !;
++ }
++}
++
++#[lang = "add"]
++trait Add<RHS = Self> {
++ type Output;
++
++ fn add(self, rhs: RHS) -> Self::Output;
++}
++
++impl Add for u8 {
++ type Output = Self;
++
++ fn add(self, rhs: Self) -> Self {
++ self + rhs
++ }
++}
++
++impl Add for i8 {
++ type Output = Self;
++
++ fn add(self, rhs: Self) -> Self {
++ self + rhs
++ }
++}
++
++impl Add for i32 {
++ type Output = Self;
++
++ fn add(self, rhs: Self) -> Self {
++ self + rhs
++ }
++}
++
++impl Add for usize {
++ type Output = Self;
++
++ fn add(self, rhs: Self) -> Self {
++ self + rhs
++ }
++}
++
++impl Add for isize {
++ type Output = Self;
++
++ fn add(self, rhs: Self) -> Self {
++ self + rhs
++ }
++}
++
++#[lang = "sub"]
++pub trait Sub<RHS = Self> {
++ type Output;
++
++ fn sub(self, rhs: RHS) -> Self::Output;
++}
++
++impl Sub for usize {
++ type Output = Self;
++
++ fn sub(self, rhs: Self) -> Self {
++ self - rhs
++ }
++}
++
++impl Sub for isize {
++ type Output = Self;
++
++ fn sub(self, rhs: Self) -> Self {
++ self - rhs
++ }
++}
++
++impl Sub for u8 {
++ type Output = Self;
++
++ fn sub(self, rhs: Self) -> Self {
++ self - rhs
++ }
++}
++
++impl Sub for i8 {
++ type Output = Self;
++
++ fn sub(self, rhs: Self) -> Self {
++ self - rhs
++ }
++}
++
++impl Sub for i16 {
++ type Output = Self;
++
++ fn sub(self, rhs: Self) -> Self {
++ self - rhs
++ }
++}
++
++
++/*
++ * Code
++ */
++
++fn i16_as_i8(a: i16) -> i8 {
++ a as i8
++}
++
++fn call_func(func: fn(i16) -> i8, param: i16) -> i8 {
++ func(param)
++}
++
++#[start]
++fn main(argc: isize, _argv: *const *const u8) -> isize {
++ unsafe {
++ let result = call_func(i16_as_i8, argc as i16) as isize;
++ libc::printf(b"%ld\n\0" as *const u8 as *const i8, result);
++ }
++ 0
++}
--- /dev/null
--- /dev/null
++// Compiler:
++//
++// Run-time:
++// stdout: Panicking
++// status: signal
++
++#![allow(unused_attributes)]
++#![feature(auto_traits, lang_items, no_core, start, intrinsics)]
++
++#![no_std]
++#![no_core]
++
++/*
++ * Core
++ */
++
++// Because we don't have core yet.
++#[lang = "sized"]
++pub trait Sized {}
++
++#[lang = "copy"]
++trait Copy {
++}
++
++impl Copy for isize {}
++impl Copy for *mut i32 {}
++impl Copy for usize {}
++impl Copy for i32 {}
++impl Copy for u8 {}
++impl Copy for i8 {}
++
++#[lang = "receiver"]
++trait Receiver {
++}
++
++#[lang = "freeze"]
++pub(crate) unsafe auto trait Freeze {}
++
++#[lang = "panic_location"]
++struct PanicLocation {
++ file: &'static str,
++ line: u32,
++ column: u32,
++}
++
++mod libc {
++ #[link(name = "c")]
++ extern "C" {
++ pub fn puts(s: *const u8) -> i32;
++ pub fn fflush(stream: *mut i32) -> i32;
++
++ pub static STDOUT: *mut i32;
++ }
++}
++
++mod intrinsics {
++ extern "rust-intrinsic" {
++ pub fn abort() -> !;
++ }
++}
++
++#[lang = "panic"]
++#[track_caller]
++#[no_mangle]
++pub fn panic(_msg: &str) -> ! {
++ unsafe {
++ libc::puts("Panicking\0" as *const str as *const u8);
++ libc::fflush(libc::STDOUT);
++ intrinsics::abort();
++ }
++}
++
++#[lang = "add"]
++trait Add<RHS = Self> {
++ type Output;
++
++ fn add(self, rhs: RHS) -> Self::Output;
++}
++
++impl Add for u8 {
++ type Output = Self;
++
++ fn add(self, rhs: Self) -> Self {
++ self + rhs
++ }
++}
++
++impl Add for i8 {
++ type Output = Self;
++
++ fn add(self, rhs: Self) -> Self {
++ self + rhs
++ }
++}
++
++impl Add for i32 {
++ type Output = Self;
++
++ fn add(self, rhs: Self) -> Self {
++ self + rhs
++ }
++}
++
++impl Add for usize {
++ type Output = Self;
++
++ fn add(self, rhs: Self) -> Self {
++ self + rhs
++ }
++}
++
++impl Add for isize {
++ type Output = Self;
++
++ fn add(self, rhs: Self) -> Self {
++ self + rhs
++ }
++}
++
++/*
++ * Code
++ */
++
++#[start]
++fn main(mut argc: isize, _argv: *const *const u8) -> isize {
++ let int = 9223372036854775807isize;
++ let int = int + argc;
++ int
++}
--- /dev/null
--- /dev/null
++
++// Compiler:
++//
++// Run-time:
++// stdout: 2
++// 7
++// 6
++// 11
++
++#![allow(unused_attributes)]
++#![feature(auto_traits, lang_items, no_core, start, intrinsics, track_caller)]
++
++#![no_std]
++#![no_core]
++
++/*
++ * Core
++ */
++
++// Because we don't have core yet.
++#[lang = "sized"]
++pub trait Sized {}
++
++#[lang = "copy"]
++trait Copy {
++}
++
++impl Copy for isize {}
++impl Copy for *mut i32 {}
++impl Copy for usize {}
++impl Copy for u8 {}
++impl Copy for i8 {}
++impl Copy for i32 {}
++
++#[lang = "receiver"]
++trait Receiver {
++}
++
++#[lang = "freeze"]
++pub(crate) unsafe auto trait Freeze {}
++
++#[lang = "panic_location"]
++struct PanicLocation {
++ file: &'static str,
++ line: u32,
++ column: u32,
++}
++
++mod libc {
++ #[link(name = "c")]
++ extern "C" {
++ pub fn puts(s: *const u8) -> i32;
++ pub fn fflush(stream: *mut i32) -> i32;
++ pub fn printf(format: *const i8, ...) -> i32;
++
++ pub static STDOUT: *mut i32;
++ }
++}
++
++mod intrinsics {
++ extern "rust-intrinsic" {
++ pub fn abort() -> !;
++ }
++}
++
++#[lang = "panic"]
++#[track_caller]
++#[no_mangle]
++pub fn panic(_msg: &str) -> ! {
++ unsafe {
++ libc::puts("Panicking\0" as *const str as *const u8);
++ libc::fflush(libc::STDOUT);
++ intrinsics::abort();
++ }
++}
++
++#[lang = "add"]
++trait Add<RHS = Self> {
++ type Output;
++
++ fn add(self, rhs: RHS) -> Self::Output;
++}
++
++impl Add for u8 {
++ type Output = Self;
++
++ fn add(self, rhs: Self) -> Self {
++ self + rhs
++ }
++}
++
++impl Add for i8 {
++ type Output = Self;
++
++ fn add(self, rhs: Self) -> Self {
++ self + rhs
++ }
++}
++
++impl Add for i32 {
++ type Output = Self;
++
++ fn add(self, rhs: Self) -> Self {
++ self + rhs
++ }
++}
++
++impl Add for usize {
++ type Output = Self;
++
++ fn add(self, rhs: Self) -> Self {
++ self + rhs
++ }
++}
++
++impl Add for isize {
++ type Output = Self;
++
++ fn add(self, rhs: Self) -> Self {
++ self + rhs
++ }
++}
++
++/*
++ * Code
++ */
++
++struct Test {
++ field: isize,
++}
++
++fn test(num: isize) -> Test {
++ Test {
++ field: num + 1,
++ }
++}
++
++fn update_num(num: &mut isize) {
++ *num = *num + 5;
++}
++
++#[start]
++fn main(mut argc: isize, _argv: *const *const u8) -> isize {
++ let mut test = test(argc);
++ unsafe {
++ libc::printf(b"%ld\n\0" as *const u8 as *const i8, test.field);
++ }
++ update_num(&mut test.field);
++ unsafe {
++ libc::printf(b"%ld\n\0" as *const u8 as *const i8, test.field);
++ }
++
++ update_num(&mut argc);
++ unsafe {
++ libc::printf(b"%ld\n\0" as *const u8 as *const i8, argc);
++ }
++
++ let refe = &mut argc;
++ *refe = *refe + 5;
++ unsafe {
++ libc::printf(b"%ld\n\0" as *const u8 as *const i8, argc);
++ }
++
++ 0
++}
--- /dev/null
--- /dev/null
++// Compiler:
++//
++// Run-time:
++// stdout: 41
++// 39
++// 10
++
++#![allow(unused_attributes)]
++#![feature(auto_traits, lang_items, no_core, start, intrinsics, arbitrary_self_types)]
++
++#![no_std]
++#![no_core]
++
++/*
++ * Core
++ */
++
++// Because we don't have core yet.
++#[lang = "sized"]
++pub trait Sized {}
++
++#[lang = "copy"]
++trait Copy {
++}
++
++impl Copy for isize {}
++impl Copy for *mut i32 {}
++impl Copy for usize {}
++impl Copy for u8 {}
++impl Copy for i8 {}
++impl Copy for i16 {}
++impl Copy for i32 {}
++
++#[lang = "deref"]
++pub trait Deref {
++ type Target: ?Sized;
++
++ fn deref(&self) -> &Self::Target;
++}
++
++#[lang = "receiver"]
++trait Receiver {
++}
++
++#[lang = "freeze"]
++pub(crate) unsafe auto trait Freeze {}
++
++#[lang = "panic_location"]
++struct PanicLocation {
++ file: &'static str,
++ line: u32,
++ column: u32,
++}
++
++mod libc {
++ #[link(name = "c")]
++ extern "C" {
++ pub fn printf(format: *const i8, ...) -> i32;
++ pub fn puts(s: *const u8) -> i32;
++ pub fn fflush(stream: *mut i32) -> i32;
++
++ pub static STDOUT: *mut i32;
++ }
++}
++
++mod intrinsics {
++ extern "rust-intrinsic" {
++ pub fn abort() -> !;
++ }
++}
++
++#[lang = "panic"]
++#[track_caller]
++#[no_mangle]
++pub fn panic(_msg: &str) -> ! {
++ unsafe {
++ libc::puts("Panicking\0" as *const str as *const u8);
++ libc::fflush(libc::STDOUT);
++ intrinsics::abort();
++ }
++}
++
++#[lang = "add"]
++trait Add<RHS = Self> {
++ type Output;
++
++ fn add(self, rhs: RHS) -> Self::Output;
++}
++
++impl Add for u8 {
++ type Output = Self;
++
++ fn add(self, rhs: Self) -> Self {
++ self + rhs
++ }
++}
++
++impl Add for i8 {
++ type Output = Self;
++
++ fn add(self, rhs: Self) -> Self {
++ self + rhs
++ }
++}
++
++impl Add for i32 {
++ type Output = Self;
++
++ fn add(self, rhs: Self) -> Self {
++ self + rhs
++ }
++}
++
++impl Add for usize {
++ type Output = Self;
++
++ fn add(self, rhs: Self) -> Self {
++ self + rhs
++ }
++}
++
++impl Add for isize {
++ type Output = Self;
++
++ fn add(self, rhs: Self) -> Self {
++ self + rhs
++ }
++}
++
++#[lang = "sub"]
++pub trait Sub<RHS = Self> {
++ type Output;
++
++ fn sub(self, rhs: RHS) -> Self::Output;
++}
++
++impl Sub for usize {
++ type Output = Self;
++
++ fn sub(self, rhs: Self) -> Self {
++ self - rhs
++ }
++}
++
++impl Sub for isize {
++ type Output = Self;
++
++ fn sub(self, rhs: Self) -> Self {
++ self - rhs
++ }
++}
++
++impl Sub for u8 {
++ type Output = Self;
++
++ fn sub(self, rhs: Self) -> Self {
++ self - rhs
++ }
++}
++
++impl Sub for i8 {
++ type Output = Self;
++
++ fn sub(self, rhs: Self) -> Self {
++ self - rhs
++ }
++}
++
++impl Sub for i16 {
++ type Output = Self;
++
++ fn sub(self, rhs: Self) -> Self {
++ self - rhs
++ }
++}
++
++#[lang = "mul"]
++pub trait Mul<RHS = Self> {
++ type Output;
++
++ #[must_use]
++ fn mul(self, rhs: RHS) -> Self::Output;
++}
++
++impl Mul for u8 {
++ type Output = Self;
++
++ fn mul(self, rhs: Self) -> Self::Output {
++ self * rhs
++ }
++}
++
++impl Mul for usize {
++ type Output = Self;
++
++ fn mul(self, rhs: Self) -> Self::Output {
++ self * rhs
++ }
++}
++
++impl Mul for isize {
++ type Output = Self;
++
++ fn mul(self, rhs: Self) -> Self::Output {
++ self * rhs
++ }
++}
++
++/*
++ * Code
++ */
++
++#[start]
++fn main(mut argc: isize, _argv: *const *const u8) -> isize {
++ unsafe {
++ libc::printf(b"%ld\n\0" as *const u8 as *const i8, 40 + argc);
++ libc::printf(b"%ld\n\0" as *const u8 as *const i8, 40 - argc);
++ libc::printf(b"%ld\n\0" as *const u8 as *const i8, 10 * argc);
++ }
++ 0
++}
--- /dev/null
--- /dev/null
++// Compiler:
++//
++// Run-time:
++// status: 0
++// stdout: 1
++
++#![feature(arbitrary_self_types, auto_traits, lang_items, no_core, start, intrinsics)]
++
++#![no_std]
++#![no_core]
++
++/*
++ * Core
++ */
++
++// Because we don't have core yet.
++#[lang = "sized"]
++pub trait Sized {}
++
++#[lang = "copy"]
++trait Copy {
++}
++
++impl Copy for isize {}
++impl Copy for usize {}
++impl Copy for i32 {}
++impl Copy for u8 {}
++impl Copy for i8 {}
++impl Copy for i16 {}
++
++#[lang = "receiver"]
++trait Receiver {
++}
++
++#[lang = "freeze"]
++pub(crate) unsafe auto trait Freeze {}
++
++mod libc {
++ #[link(name = "c")]
++ extern "C" {
++ pub fn printf(format: *const i8, ...) -> i32;
++ pub fn puts(s: *const u8) -> i32;
++ }
++}
++
++#[lang = "index"]
++pub trait Index<Idx: ?Sized> {
++ type Output: ?Sized;
++ fn index(&self, index: Idx) -> &Self::Output;
++}
++
++impl<T> Index<usize> for [T; 3] {
++ type Output = T;
++
++ fn index(&self, index: usize) -> &Self::Output {
++ &self[index]
++ }
++}
++
++impl<T> Index<usize> for [T] {
++ type Output = T;
++
++ fn index(&self, index: usize) -> &Self::Output {
++ &self[index]
++ }
++}
++
++#[lang = "drop_in_place"]
++#[allow(unconditional_recursion)]
++pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
++ // Code here does not matter - this is replaced by the
++ // real drop glue by the compiler.
++ drop_in_place(to_drop);
++}
++
++#[lang = "panic"]
++#[track_caller]
++#[no_mangle]
++pub fn panic(_msg: &str) -> ! {
++ unsafe {
++ libc::puts("Panicking\0" as *const str as *const u8);
++ intrinsics::abort();
++ }
++}
++
++#[lang = "panic_location"]
++struct PanicLocation {
++ file: &'static str,
++ line: u32,
++ column: u32,
++}
++
++#[lang = "panic_bounds_check"]
++#[track_caller]
++#[no_mangle]
++fn panic_bounds_check(index: usize, len: usize) -> ! {
++ unsafe {
++ libc::printf("index out of bounds: the len is %d but the index is %d\n\0" as *const str as *const i8, len, index);
++ intrinsics::abort();
++ }
++}
++
++mod intrinsics {
++ extern "rust-intrinsic" {
++ pub fn abort() -> !;
++ }
++}
++
++#[lang = "add"]
++trait Add<RHS = Self> {
++ type Output;
++
++ fn add(self, rhs: RHS) -> Self::Output;
++}
++
++impl Add for u8 {
++ type Output = Self;
++
++ fn add(self, rhs: Self) -> Self {
++ self + rhs
++ }
++}
++
++impl Add for i8 {
++ type Output = Self;
++
++ fn add(self, rhs: Self) -> Self {
++ self + rhs
++ }
++}
++
++impl Add for i32 {
++ type Output = Self;
++
++ fn add(self, rhs: Self) -> Self {
++ self + rhs
++ }
++}
++
++impl Add for usize {
++ type Output = Self;
++
++ fn add(self, rhs: Self) -> Self {
++ self + rhs
++ }
++}
++
++impl Add for isize {
++ type Output = Self;
++
++ fn add(self, rhs: Self) -> Self {
++ self + rhs
++ }
++}
++
++#[lang = "sub"]
++pub trait Sub<RHS = Self> {
++ type Output;
++
++ fn sub(self, rhs: RHS) -> Self::Output;
++}
++
++impl Sub for usize {
++ type Output = Self;
++
++ fn sub(self, rhs: Self) -> Self {
++ self - rhs
++ }
++}
++
++impl Sub for isize {
++ type Output = Self;
++
++ fn sub(self, rhs: Self) -> Self {
++ self - rhs
++ }
++}
++
++impl Sub for u8 {
++ type Output = Self;
++
++ fn sub(self, rhs: Self) -> Self {
++ self - rhs
++ }
++}
++
++impl Sub for i8 {
++ type Output = Self;
++
++ fn sub(self, rhs: Self) -> Self {
++ self - rhs
++ }
++}
++
++impl Sub for i16 {
++ type Output = Self;
++
++ fn sub(self, rhs: Self) -> Self {
++ self - rhs
++ }
++}
++
++
++/*
++ * Code
++ */
++
++static mut ONE: usize = 1;
++
++fn make_array() -> [u8; 3] {
++ [42, 10, 5]
++}
++
++#[start]
++fn main(argc: isize, _argv: *const *const u8) -> isize {
++ unsafe {
++ let ptr = ONE as *mut usize;
++ let value = ptr as usize;
++ libc::printf(b"%ld\n\0" as *const u8 as *const i8, value);
++ }
++ 0
++}
--- /dev/null
--- /dev/null
++// Compiler:
++//
++// Run-time:
++// status: 0
++// stdout: 10
++// 10
++// 42
++
++#![feature(auto_traits, lang_items, no_core, start, intrinsics)]
++
++#![no_std]
++#![no_core]
++
++#[lang = "copy"]
++pub unsafe trait Copy {}
++
++unsafe impl Copy for bool {}
++unsafe impl Copy for u8 {}
++unsafe impl Copy for u16 {}
++unsafe impl Copy for u32 {}
++unsafe impl Copy for u64 {}
++unsafe impl Copy for usize {}
++unsafe impl Copy for i8 {}
++unsafe impl Copy for i16 {}
++unsafe impl Copy for i32 {}
++unsafe impl Copy for isize {}
++unsafe impl Copy for f32 {}
++unsafe impl Copy for char {}
++
++mod libc {
++ #[link(name = "c")]
++ extern "C" {
++ pub fn printf(format: *const i8, ...) -> i32;
++ }
++}
++
++/*
++ * Core
++ */
++
++// Because we don't have core yet.
++#[lang = "sized"]
++pub trait Sized {}
++
++#[lang = "receiver"]
++trait Receiver {
++}
++
++#[lang = "freeze"]
++pub(crate) unsafe auto trait Freeze {}
++
++/*
++ * Code
++ */
++
++fn int_cast(a: u16, b: i16) -> (u8, u16, u32, usize, i8, i16, i32, isize, u8, u32) {
++ (
++ a as u8, a as u16, a as u32, a as usize, a as i8, a as i16, a as i32, a as isize, b as u8,
++ b as u32,
++ )
++}
++
++#[start]
++fn main(argc: isize, _argv: *const *const u8) -> isize {
++ let (a, b, c, d, e, f, g, h, i, j) = int_cast(10, 42);
++ unsafe {
++ libc::printf(b"%d\n\0" as *const u8 as *const i8, c);
++ libc::printf(b"%ld\n\0" as *const u8 as *const i8, d);
++ libc::printf(b"%ld\n\0" as *const u8 as *const i8, j);
++ }
++ 0
++}
--- /dev/null
--- /dev/null
++// Compiler:
++//
++// Run-time:
++// status: 0
++// stdout: 5
++
++#![feature(arbitrary_self_types, auto_traits, lang_items, no_core, start, intrinsics)]
++
++#![no_std]
++#![no_core]
++
++/*
++ * Core
++ */
++
++// Because we don't have core yet.
++#[lang = "sized"]
++pub trait Sized {}
++
++#[lang = "copy"]
++trait Copy {
++}
++
++impl Copy for isize {}
++impl Copy for usize {}
++impl Copy for i32 {}
++impl Copy for u32 {}
++
++#[lang = "receiver"]
++trait Receiver {
++}
++
++#[lang = "freeze"]
++pub(crate) unsafe auto trait Freeze {}
++
++mod libc {
++ #[link(name = "c")]
++ extern "C" {
++ pub fn printf(format: *const i8, ...) -> i32;
++ }
++}
++
++#[lang = "index"]
++pub trait Index<Idx: ?Sized> {
++ type Output: ?Sized;
++ fn index(&self, index: Idx) -> &Self::Output;
++}
++
++impl<T> Index<usize> for [T; 3] {
++ type Output = T;
++
++ fn index(&self, index: usize) -> &Self::Output {
++ &self[index]
++ }
++}
++
++impl<T> Index<usize> for [T] {
++ type Output = T;
++
++ fn index(&self, index: usize) -> &Self::Output {
++ &self[index]
++ }
++}
++
++#[lang = "unsize"]
++pub trait Unsize<T: ?Sized> {}
++
++#[lang = "coerce_unsized"]
++pub trait CoerceUnsized<T> {}
++
++impl<'a, 'b: 'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a U> for &'b T {}
++impl<'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a mut U> for &'a mut T {}
++impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*const U> for *const T {}
++impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*mut U> for *mut T {}
++
++#[lang = "drop_in_place"]
++#[allow(unconditional_recursion)]
++pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
++ // Code here does not matter - this is replaced by the
++ // real drop glue by the compiler.
++ drop_in_place(to_drop);
++}
++
++#[lang = "panic_location"]
++struct PanicLocation {
++ file: &'static str,
++ line: u32,
++ column: u32,
++}
++
++#[lang = "panic_bounds_check"]
++#[track_caller]
++#[no_mangle]
++fn panic_bounds_check(index: usize, len: usize) -> ! {
++ unsafe {
++ libc::printf("index out of bounds: the len is %d but the index is %d\n\0" as *const str as *const i8, len, index);
++ intrinsics::abort();
++ }
++}
++
++mod intrinsics {
++ use super::Sized;
++
++ extern "rust-intrinsic" {
++ pub fn abort() -> !;
++ }
++}
++
++/*
++ * Code
++ */
++
++static mut TWO: usize = 2;
++
++fn index_slice(s: &[u32]) -> u32 {
++ unsafe {
++ s[TWO]
++ }
++}
++
++#[start]
++fn main(mut argc: isize, _argv: *const *const u8) -> isize {
++ let array = [42, 7, 5];
++ unsafe {
++ libc::printf(b"%ld\n\0" as *const u8 as *const i8, index_slice(&array));
++ }
++ 0
++}
--- /dev/null
--- /dev/null
++// Compiler:
++//
++// Run-time:
++// status: 0
++// stdout: 10
++// 14
++// 1
++// 12
++// 12
++// 1
++
++#![feature(auto_traits, lang_items, no_core, start, intrinsics)]
++
++#![no_std]
++#![no_core]
++
++/*
++ * Core
++ */
++
++// Because we don't have core yet.
++#[lang = "sized"]
++pub trait Sized {}
++
++#[lang = "copy"]
++trait Copy {
++}
++
++impl Copy for isize {}
++
++#[lang = "receiver"]
++trait Receiver {
++}
++
++#[lang = "freeze"]
++pub(crate) unsafe auto trait Freeze {}
++
++mod intrinsics {
++ use super::Sized;
++
++ extern "rust-intrinsic" {
++ pub fn abort() -> !;
++ }
++}
++
++mod libc {
++ #[link(name = "c")]
++ extern "C" {
++ pub fn printf(format: *const i8, ...) -> i32;
++ }
++}
++
++#[lang = "structural_peq"]
++pub trait StructuralPartialEq {}
++
++#[lang = "structural_teq"]
++pub trait StructuralEq {}
++
++#[lang = "drop_in_place"]
++#[allow(unconditional_recursion)]
++pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
++ // Code here does not matter - this is replaced by the
++ // real drop glue by the compiler.
++ drop_in_place(to_drop);
++}
++
++/*
++ * Code
++ */
++
++struct Test {
++ field: isize,
++}
++
++struct WithRef {
++ refe: &'static Test,
++}
++
++static mut CONSTANT: isize = 10;
++
++static mut TEST: Test = Test {
++ field: 12,
++};
++
++static mut TEST2: Test = Test {
++ field: 14,
++};
++
++static mut WITH_REF: WithRef = WithRef {
++ refe: unsafe { &TEST },
++};
++
++#[start]
++fn main(mut argc: isize, _argv: *const *const u8) -> isize {
++ unsafe {
++ libc::printf(b"%ld\n\0" as *const u8 as *const i8, CONSTANT);
++ libc::printf(b"%ld\n\0" as *const u8 as *const i8, TEST2.field);
++ TEST2.field = argc;
++ libc::printf(b"%ld\n\0" as *const u8 as *const i8, TEST2.field);
++ libc::printf(b"%ld\n\0" as *const u8 as *const i8, WITH_REF.refe.field);
++ WITH_REF.refe = &TEST2;
++ libc::printf(b"%ld\n\0" as *const u8 as *const i8, TEST.field);
++ libc::printf(b"%ld\n\0" as *const u8 as *const i8, WITH_REF.refe.field);
++ }
++ 0
++}
--- /dev/null
--- /dev/null
++// Compiler:
++//
++// Run-time:
++// status: 0
++// stdout: 1
++// 2
++
++#![feature(auto_traits, lang_items, no_core, start, intrinsics)]
++
++#![no_std]
++#![no_core]
++
++/*
++ * Core
++ */
++
++// Because we don't have core yet.
++#[lang = "sized"]
++pub trait Sized {}
++
++#[lang = "copy"]
++trait Copy {
++}
++
++impl Copy for isize {}
++
++#[lang = "receiver"]
++trait Receiver {
++}
++
++#[lang = "freeze"]
++pub(crate) unsafe auto trait Freeze {}
++
++mod libc {
++ #[link(name = "c")]
++ extern "C" {
++ pub fn printf(format: *const i8, ...) -> i32;
++ }
++}
++
++/*
++ * Code
++ */
++
++struct Test {
++ field: isize,
++}
++
++struct Two {
++ two: isize,
++}
++
++fn one() -> isize {
++ 1
++}
++
++#[start]
++fn main(mut argc: isize, _argv: *const *const u8) -> isize {
++ let test = Test {
++ field: one(),
++ };
++ let two = Two {
++ two: 2,
++ };
++ unsafe {
++ libc::printf(b"%ld\n\0" as *const u8 as *const i8, test.field);
++ libc::printf(b"%ld\n\0" as *const u8 as *const i8, two.two);
++ }
++ 0
++}
--- /dev/null
--- /dev/null
++// Compiler:
++//
++// Run-time:
++// status: 0
++// stdout: 3
++
++#![feature(auto_traits, lang_items, no_core, start, intrinsics)]
++
++#![no_std]
++#![no_core]
++
++/*
++ * Core
++ */
++
++// Because we don't have core yet.
++#[lang = "sized"]
++pub trait Sized {}
++
++#[lang = "copy"]
++trait Copy {
++}
++
++impl Copy for isize {}
++
++#[lang = "receiver"]
++trait Receiver {
++}
++
++#[lang = "freeze"]
++pub(crate) unsafe auto trait Freeze {}
++
++mod libc {
++ #[link(name = "c")]
++ extern "C" {
++ pub fn printf(format: *const i8, ...) -> i32;
++ }
++}
++
++/*
++ * Code
++ */
++
++#[start]
++fn main(mut argc: isize, _argv: *const *const u8) -> isize {
++ let test: (isize, isize, isize) = (3, 1, 4);
++ unsafe {
++ libc::printf(b"%ld\n\0" as *const u8 as *const i8, test.0);
++ }
++ 0
++}