## Quick Start
-Read ["Installing Rust"] from [The Book].
+Read ["Installation"] from [The Book].
-["Installing Rust"]: https://doc.rust-lang.org/book/getting-started.html#installing-rust
+["Installation"]: https://doc.rust-lang.org/book/second-edition/ch01-01-installation.html
[The Book]: https://doc.rust-lang.org/book/index.html
## Building from Source
version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
+[[package]]
+name = "bitflags"
+version = "0.9.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
[[package]]
name = "bootstrap"
version = "0.0.0"
"curl 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"docopt 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "error-chain 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)",
"filetime 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
"flate2 0.2.19 (registry+https://github.com/rust-lang/crates.io-index)",
"fs2 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "git2 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "git2 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)",
"git2-curl 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
"glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
"hamcrest 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "jobserver 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.22 (registry+https://github.com/rust-lang/crates.io-index)",
- "libgit2-sys 0.6.11 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libgit2-sys 0.6.12 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
"miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"num_cpus 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"openssl 0.9.12 (registry+https://github.com/rust-lang/crates.io-index)",
"psapi-sys 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)",
+ "scoped-tls 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"semver 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
"cargo 0.20.0",
"filetime 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
"flate2 0.2.19 (registry+https://github.com/rust-lang/crates.io-index)",
- "git2 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "git2 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)",
"hamcrest 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.22 (registry+https://github.com/rust-lang/crates.io-index)",
version = "0.9.0"
dependencies = [
"curl 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "error-chain 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-[[package]]
-name = "either"
-version = "1.1.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-
[[package]]
name = "enum_primitive"
version = "0.1.1"
[[package]]
name = "git2"
-version = "0.6.5"
+version = "0.6.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "bitflags 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "bitflags 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.22 (registry+https://github.com/rust-lang/crates.io-index)",
- "libgit2-sys 0.6.11 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libgit2-sys 0.6.12 (registry+https://github.com/rust-lang/crates.io-index)",
"openssl-probe 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"openssl-sys 0.9.12 (registry+https://github.com/rust-lang/crates.io-index)",
"url 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"curl 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
- "git2 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "git2 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
"url 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
]
[[package]]
-name = "itertools"
-version = "0.5.10"
+name = "itoa"
+version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "either 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
-]
[[package]]
-name = "itoa"
-version = "0.3.1"
+name = "jobserver"
+version = "0.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.22 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
+]
[[package]]
name = "kernel32-sys"
[[package]]
name = "libgit2-sys"
-version = "0.6.11"
+version = "0.6.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"cmake 0.1.23 (registry+https://github.com/rust-lang/crates.io-index)",
"ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
-[[package]]
-name = "multimap"
-version = "0.3.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-
[[package]]
name = "net2"
version = "0.2.29"
"languageserver-types 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
"racer 2.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
- "rls-analysis 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "rls-data 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rls-analysis 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rls-data 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "rls-vfs 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "rustfmt 0.8.4 (git+https://github.com/rust-lang-nursery/rustfmt)",
+ "rls-vfs 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rustfmt 0.9.0 (git+https://github.com/rust-lang-nursery/rustfmt?branch=libsyntax)",
"serde 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
[[package]]
name = "rls-analysis"
-version = "0.2.1"
+version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"derive-new 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
- "rls-data 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rls-data 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "rls-data"
-version = "0.3.1"
+version = "0.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "rls-data"
+version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
[[package]]
name = "rls-vfs"
-version = "0.3.0"
+version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"racer 2.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
version = "0.0.0"
dependencies = [
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
- "rls-data 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rls-data 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc 0.0.0",
"rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)",
[[package]]
name = "rustfmt"
-version = "0.8.4"
-source = "git+https://github.com/rust-lang-nursery/rustfmt#bf9b3fa1d7cab2f7bd541539d397a92b4954ec96"
+version = "0.9.0"
+source = "git+https://github.com/rust-lang-nursery/rustfmt?branch=libsyntax#6c1de7694782d9f710b2f00b1f650f266a99b384"
dependencies = [
"diff 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
"getopts 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)",
- "itertools 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)",
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.22 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
- "multimap 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"regex 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "strings 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "syntex_errors 0.58.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "syntex_syntax 0.58.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "strings 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"term 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
"toml 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
"unicode-segmentation 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "walkdir 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
]
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
]
+[[package]]
+name = "scoped-tls"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
[[package]]
name = "semver"
version = "0.7.0"
[[package]]
name = "strings"
-version = "0.0.1"
+version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
"unicode-xid 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
]
-[[package]]
-name = "syntex_errors"
-version = "0.58.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "libc 0.2.22 (registry+https://github.com/rust-lang/crates.io-index)",
- "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)",
- "syntex_pos 0.58.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "term 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
- "unicode-xid 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
[[package]]
name = "syntex_pos"
version = "0.52.0"
"rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)",
]
-[[package]]
-name = "syntex_pos"
-version = "0.58.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
[[package]]
name = "syntex_syntax"
version = "0.52.0"
"unicode-xid 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
]
-[[package]]
-name = "syntex_syntax"
-version = "0.58.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "bitflags 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
- "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)",
- "syntex_errors 0.58.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "syntex_pos 0.58.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "unicode-xid 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
[[package]]
name = "tar"
version = "0.4.13"
"checksum bitflags 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4f67931368edf3a9a51d29886d245f1c3db2f1ef0dcc9e35ff70341b78c10d23"
"checksum bitflags 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "aad18937a628ec6abcd26d1489012cc0e18c21798210f491af69ded9b881106d"
"checksum bitflags 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1370e9fc2a6ae53aea8b7a5110edbd08836ed87c88736dfabccade1c2b44bff4"
+"checksum bitflags 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4efd02e230a02e18f92fc2735f44597385ed02ad8f831e7c1c1156ee5e1ab3a5"
"checksum bufstream 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "f2f382711e76b9de6c744cc00d0497baba02fb00a787f088c879f01d09468e32"
"checksum cargo 0.20.0 (git+https://github.com/rust-lang/cargo)" = "<none>"
"checksum cfg-if 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "de1e760d7b6535af4241fca8bd8adf68e2e7edacc6b29f5d399050c5e48cf88c"
"checksum diff 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "0a515461b6c8c08419850ced27bc29e86166dcdcde8fbe76f8b1f0589bb49472"
"checksum docopt 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ab32ea6e284d87987066f21a9e809a73c14720571ef34516f0890b3d355ccfd8"
"checksum dtoa 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "80c8b71fd71146990a9742fc06dcbbde19161a267e0ad4e572c35162f4578c90"
-"checksum either 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "18785c1ba806c258137c937e44ada9ee7e69a37e3c72077542cd2f069d78562a"
"checksum enum_primitive 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "be4551092f4d519593039259a9ed8daedf0da12e5109c5280338073eaeb81180"
"checksum env_logger 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "15abd780e45b3ea4f76b4e9a26ff4843258dd8a3eed2775a0e7368c2e7936c2f"
"checksum env_logger 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e3856f1697098606fc6cb97a93de88ca3f3bc35bb878c725920e6e82ecf05e83"
"checksum gcc 0.3.50 (registry+https://github.com/rust-lang/crates.io-index)" = "5f837c392f2ea61cb1576eac188653df828c861b7137d74ea4a5caa89621f9e6"
"checksum gdi32-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0912515a8ff24ba900422ecda800b52f4016a56251922d397c576bf92c690518"
"checksum getopts 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "d9047cfbd08a437050b363d35ef160452c5fe8ea5187ae0a624708c91581d685"
-"checksum git2 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)" = "9de9df4358c17e448a778d90cd0272e1dab5eae30244502333fa2001c4e24357"
+"checksum git2 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)" = "aa01936ac96555c083c0e8553f672616274408d9d3fc5b8696603fbf63ff43ee"
"checksum git2-curl 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "68676bc784bf0bef83278898929bf64a251e87c0340723d0b93fa096c9c5bf8e"
"checksum glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "8be18de09a56b60ed0edf84bc9df007e30040691af7acd1c41874faac5895bfb"
"checksum hamcrest 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "bf088f042a467089e9baa4972f57f9247e42a0cc549ba264c7a04fbb8ecb89d4"
"checksum handlebars 0.25.3 (registry+https://github.com/rust-lang/crates.io-index)" = "15bdf598fc3c2de40c6b340213028301c0d225eea55a2294e6cc148074e557a1"
"checksum idna 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "6ac85ec3f80c8e4e99d9325521337e14ec7555c458a14e377d189659a427f375"
-"checksum itertools 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4833d6978da405305126af4ac88569b5d71ff758581ce5a987dbfa3755f694fc"
"checksum itoa 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "eb2f404fbc66fd9aac13e998248505e7ecb2ad8e44ab6388684c5fb11c6c251c"
+"checksum jobserver 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "c43fc6e4066b2adf0539c854daa1d926d7f23e6926e019850d34b8ae46391b2e"
"checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d"
"checksum languageserver-types 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "97c2985bfcbbcb0189cfa25e1c10c1ac7111df2b6214b652c690127aefdf4e5b"
"checksum lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "3b37545ab726dd833ec6420aaba8231c5b320814b9029ad585555d2a03e94fbf"
"checksum libc 0.2.22 (registry+https://github.com/rust-lang/crates.io-index)" = "babb8281da88cba992fa1f4ddec7d63ed96280a1a53ec9b919fd37b53d71e502"
-"checksum libgit2-sys 0.6.11 (registry+https://github.com/rust-lang/crates.io-index)" = "d9dc31ee90fb179b706d35fb672e91d0b74e950d7fb4ea7eae3c0f5ecbf2d3d3"
+"checksum libgit2-sys 0.6.12 (registry+https://github.com/rust-lang/crates.io-index)" = "df18a822100352d9863b302faf6f8f25c0e77f0e60feb40e5dbe1238b7f13b1d"
"checksum libssh2-sys 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "0db4ec23611747ef772db1c4d650f8bd762f07b461727ec998f953c614024b75"
"checksum libz-sys 1.0.13 (registry+https://github.com/rust-lang/crates.io-index)" = "e5ee912a45d686d393d5ac87fac15ba0ba18daae14e8e7543c63ebf7fb7e970c"
"checksum log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)" = "5141eca02775a762cc6cd564d8d2c50f67c0ea3a372cbf1c51592b3e029e10ad"
"checksum memchr 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1dbccc0e46f1ea47b9f17e6d67c5a96bd27030519c519c9c91327e31275a47b4"
"checksum miniz-sys 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "28eaee17666671fa872e567547e8428e83308ebe5808cdf6a0e28397dbe2c726"
"checksum miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8c1f2f3b1cf331de6896aabf6e9d55dca90356cc9960cca7eaaf408a355ae919"
-"checksum multimap 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9223f4774d08e06185e44e555b9a7561243d387bac49c78a6205c42d6975fbf2"
"checksum net2 0.2.29 (registry+https://github.com/rust-lang/crates.io-index)" = "bc01404e7568680f1259aa5729539f221cb1e6d047a0d9053cab4be8a73b5d67"
"checksum num 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)" = "98b15ba84e910ea7a1973bccd3df7b31ae282bf9d8bd2897779950c9b8303d40"
"checksum num-bigint 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)" = "ba6d838b16e56da1b6c383d065ff1ec3c7d7797f65a3e8f6ba7092fd87820bac"
"checksum regex 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4278c17d0f6d62dfef0ab00028feb45bd7d2102843f80763474eeb1be8a10c01"
"checksum regex-syntax 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "f9ec002c35e86791825ed294b50008eea9ddfc8def4420124fbc6b08db834957"
"checksum regex-syntax 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2f9191b1f57603095f105d317e375d19b1c9c5c3185ea9633a99a6dcbed04457"
-"checksum rls-analysis 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a62d88c341375c6f3f8b2e18b9b364896e7d3e7aa916907de717d0267e116506"
-"checksum rls-data 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "fc4277ce3c57f456b11fe3145b181a844a25201bab5cbaa1978457e6e2f27d47"
+"checksum rls-analysis 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "8d77d58e8933752142b5b92e3f8ba6d6f1630be6da5627c492268a43f79ffbda"
+"checksum rls-data 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "374a8fad31cc0681a7bfd8a04079dd4afd0e981d34e18a171b1a467445bdf51e"
+"checksum rls-data 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9e2087477364c34faca86c2476765deb1185dbae3c598cfb1eb040f3a74d22b5"
"checksum rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5d7c7046dc6a92f2ae02ed302746db4382e75131b9ce20ce967259f6b5867a6a"
-"checksum rls-vfs 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "986eada111517bcb5a7a75205b3f2b70c82e7766653cca61a23f5afce79bdb94"
+"checksum rls-vfs 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "ace07060dd154731b39254864245cbdd33c8f5f64fe1f630a089c72e2468f854"
"checksum rustc-demangle 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "3058a43ada2c2d0b92b3ae38007a2d0fa5e9db971be260e0171408a4ff471c95"
"checksum rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)" = "dcf128d1287d2ea9d80910b5f1120d0b8eede3fbf1abe91c40d39ea7d51e6fda"
-"checksum rustfmt 0.8.4 (git+https://github.com/rust-lang-nursery/rustfmt)" = "<none>"
+"checksum rustfmt 0.9.0 (git+https://github.com/rust-lang-nursery/rustfmt?branch=libsyntax)" = "<none>"
"checksum same-file 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "d931a44fdaa43b8637009e7632a02adc4f2b2e0733c08caa4cf00e8da4a117a7"
+"checksum scoped-tls 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f417c22df063e9450888a7561788e9bd46d3bb3c1466435b4eccb903807f147d"
"checksum semver 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3fdd61b85a0fa777f7fb7c454b9189b2941b110d1385ce84d7f76efdf1606a85"
"checksum semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3"
"checksum serde 0.9.15 (registry+https://github.com/rust-lang/crates.io-index)" = "34b623917345a631dc9608d5194cc206b3fe6c3554cd1c75b937e55e285254af"
"checksum serde_json 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "48b04779552e92037212c3615370f6bd57a40ebba7f20e554ff9f55e41a69a7b"
"checksum shell-escape 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "dd5cc96481d54583947bfe88bf30c23d53f883c6cd0145368b69989d97b84ef8"
"checksum stable_deref_trait 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "15132e0e364248108c5e2c02e3ab539be8d6f5d52a01ca9bbf27ed657316f02b"
-"checksum strings 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "54f86446ab480b4f60782188f4f78886465c5793aee248cbb48b7fdc0d022420"
+"checksum strings 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "da75d8bf2c4d210d63dd09581a041b036001f9f6e03d9b151dbff810fb7ba26a"
"checksum strsim 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "67f84c44fbb2f91db7fef94554e6b2ac05909c9c0b0bc23bb98d3a1aebfe7f7c"
"checksum strsim 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b4d15c810519a91cf877e7e36e63fe068815c678181439f2f29e2562147c3694"
"checksum syn 0.11.11 (registry+https://github.com/rust-lang/crates.io-index)" = "d3b891b9015c88c576343b9b3e41c2c11a51c219ef067b264bd9c8aa9b441dad"
"checksum syn 0.8.7 (registry+https://github.com/rust-lang/crates.io-index)" = "6ae6fb0dcc9bd85f89a1a4adc0df2fd90c90c98849d61433983dd7a9df6363f7"
"checksum synom 0.11.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a393066ed9010ebaed60b9eafa373d4b1baac186dd7e008555b0f702b51945b6"
"checksum syntex_errors 0.52.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9e52bffe6202cfb67587784cf23e0ec5bf26d331eef4922a16d5c42e12aa1e9b"
-"checksum syntex_errors 0.58.1 (registry+https://github.com/rust-lang/crates.io-index)" = "867cc5c2d7140ae7eaad2ae9e8bf39cb18a67ca651b7834f88d46ca98faadb9c"
"checksum syntex_pos 0.52.0 (registry+https://github.com/rust-lang/crates.io-index)" = "955ef4b16af4c468e4680d1497f873ff288f557d338180649e18f915af5e15ac"
-"checksum syntex_pos 0.58.1 (registry+https://github.com/rust-lang/crates.io-index)" = "13ad4762fe52abc9f4008e85c4fb1b1fe3aa91ccb99ff4826a439c7c598e1047"
"checksum syntex_syntax 0.52.0 (registry+https://github.com/rust-lang/crates.io-index)" = "76a302e717e348aa372ff577791c3832395650073b8d8432f8b3cb170b34afde"
-"checksum syntex_syntax 0.58.1 (registry+https://github.com/rust-lang/crates.io-index)" = "6e0e4dbae163dd98989464c23dd503161b338790640e11537686f2ef0f25c791"
"checksum tar 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)" = "281285b717926caa919ad905ef89c63d75805c7d89437fb873100925a53f2b1b"
"checksum tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "87974a6f5c1dfb344d733055601650059a3363de2a6104819293baff662132d6"
"checksum term 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "d168af3930b369cfe245132550579d47dfd873d69470755a19c2c6568dbbd989"
continue
}
- println!("doc tests for: {}", p.display());
markdown_test(build, compiler, &p);
}
}
return;
}
+ println!("doc tests for: {}", markdown.display());
let mut cmd = Command::new(build.rustdoc(compiler));
build.add_rustc_lib_path(compiler, &mut cmd);
build.add_rust_test_threads(&mut cmd);
if build.is_rust_llvm(target) {
cargo.env("LLVM_RUSTLLVM", "1");
}
+ if let Some(ref cfg_file) = build.flags.config {
+ let cfg_path = t!(PathBuf::from(cfg_file).canonicalize());
+ cargo.env("CFG_LLVM_TOML", cfg_path.into_os_string());
+ }
cargo.env("LLVM_CONFIG", build.llvm_config(target));
let target_config = build.config.target_config.get(target);
if let Some(s) = target_config.and_then(|c| c.llvm_config.as_ref()) {
./x.py build
./x.py build --stage 1
- For a quick build with a usable compile, you can pass:
+ For a quick build of a usable compiler, you can pass:
- ./x.py build --stage 1 src/libtest");
+ ./x.py build --stage 1 src/libtest
+
+ This will first build everything once (like --stage 0 without further
+ arguments would), and then use the compiler built in stage 0 to build
+ src/libtest and its dependencies.
+ Once this is done, build/$ARCH/stage1 contains a usable compiler.");
}
"test" => {
subcommand_help.push_str("\n
--- /dev/null
+# The Rustdoc Book
+
+- [What is rustdoc?](what-is-rustdoc.md)
+- [Command-line arguments](command-line-arguments.md)
+- [In-source directives](in-source-directives.md)
+- [Documentation tests](documentation-tests.md)
+- [Plugins](plugins.md)
+- [Passes](passes.md)
\ No newline at end of file
--- /dev/null
+# Command-line arguments
+
+Here's the list of arguments you can pass to `rustdoc`:
+
+## `-h`/`--help`: help
+
+Using this flag looks like this:
+
+```bash
+$ rustdoc -h
+$ rustdoc --help
+```
+
+This will show `rustdoc`'s built-in help, which largely consists of
+a list of possible command-line flags.
+
+Some of `rustdoc`'s flags are unstable; this page only shows stable
+options, `--help` will show them all.
+
+## `-V`/`--version`: version information
+
+Using this flag looks like this:
+
+```bash
+$ rustdoc -V
+$ rustdoc --version
+```
+
+This will show `rustdoc`'s version, which will look something
+like this:
+
+```text
+rustdoc 1.17.0 (56124baa9 2017-04-24)
+```
+
+## `-v`/`--verbose`: more verbose output
+
+Using this flag looks like this:
+
+```bash
+$ rustdoc -v src/lib.rs
+$ rustdoc --verbose src/lib.rs
+```
+
+This enables "verbose mode", which means that more information will be written
+to standard out. What is written depends on the other flags you've passed in.
+For example, with `--version`:
+
+```text
+$ rustdoc --verbose --version
+rustdoc 1.17.0 (56124baa9 2017-04-24)
+binary: rustdoc
+commit-hash: hash
+commit-date: date
+host: host-triple
+release: 1.17.0
+LLVM version: 3.9
+```
+
+## `-r`/`--input-format`: input format
+
+This flag is currently ignored; the idea is that `rustdoc` would support various
+input formats, and you could specify them via this flag.
+
+Rustdoc only supports Rust source code and Markdown input formats. If the
+file ends in `.md` or `.markdown`, `rustdoc` treats it as a Markdown file.
+Otherwise, it assumes that the input file is Rust.
+
+
+## `-w`/`--output-format`: output format
+
+This flag is currently ignored; the idea is that `rustdoc` would support
+various output formats, and you could specify them via this flag.
+
+Rustdoc only supports HTML output, and so this flag is redundant today.
+
+## `-o`/`--output`: output path
+
+Using this flag looks like this:
+
+```bash
+$ rustdoc src/lib.rs -o target\\doc
+$ rustdoc src/lib.rs --output target\\doc
+```
+
+By default, `rustdoc`'s output appears in a directory named `doc` in
+the current working directory. With this flag, it will place all output
+into the directory you specify.
+
+
+## `--crate-name`: controlling the name of the crate
+
+Using this flag looks like this:
+
+```bash
+$ rustdoc src/lib.rs --crate-name mycrate
+```
+
+By default, `rustodc` assumes that the name of your crate is the same name
+as the `.rs` file. `--crate-name` lets you override this assumption with
+whatever name you choose.
+
+## `-L`/`--library-path`:
+
+Using this flag looks like this:
+
+```bash
+$ rustdoc src/lib.rs -L target/debug/deps
+$ rustdoc src/lib.rs --library-path target/debug/deps
+```
+
+If your crate has dependencies, `rustdoc` needs to know where to find them.
+Passing `--library-path` gives `rustdoc` a list of places to look for these
+dependencies.
+
+This flag takes any number of directories as its argument, and will use all of
+them when searching.
+
+
+## `--cfg`: passing configuration flags
+
+Using this flag looks like this:
+
+```bash
+$ rustdoc src/lib.rs --cfg feature="foo"
+```
+
+This flag accepts the same values as `rustc --cfg`, and uses it to configure
+compilation. The example above uses `feature`, but any of the `cfg` values
+are acceptable.
+
+## `--extern`: specify a dependency's location
+
+Using this flag looks like this:
+
+```bash
+$ rustdoc src/lib.rs --extern lazy-static=/path/to/lazy-static
+```
+
+Similar to `--library-path`, `--extern` is about specifying the location
+of a dependency. `--library-path` provides directories to search in, `--extern`
+instead lets you specify exactly which dependency is located where.
+
+
+## `--plugin-path`: loading plugins
+
+Using this flag looks like this:
+
+```bash
+$ rustdoc src/lib.rs --plugin-path=/path/to/plugins
+```
+
+Similar to `--library-path`, but for plugins. For more, see
+the [chapter on plugins](plugins.html).
+
+See also: `--plugins`.
+
+## `--passes`: add more rustdoc passes
+
+Using this flag looks like this:
+
+```bash
+$ rustdoc --passes list
+$ rustdoc src/lib.rs --passes strip-priv-imports
+```
+
+An argument of "list" will print a list of possible "rustdoc passes", and other
+arguments will be the name of which passes to run in addition to the defaults.
+
+For more details on passes, see [the chapter on them](passes.html).
+
+See also `--no-defaults`.
+
+## `--plugins`:
+
+Using this flag looks like this:
+
+```bash
+$ rustdoc src/lib.rs --plugins foo bar
+```
+
+For more, see the [chapter on plugins](plugins.html).
+
+See also: `--plugin-path`.
+
+## `--no-defaults`: don't run default passes
+
+Using this flag looks like this:
+
+```bash
+$ rustdoc src/lib.rs --no-defaults
+```
+
+By default, `rustdoc` will run several passes over your code. This
+removes those defaults, allowing you to use `--passes` to specify
+exactly which passes you want.
+
+For more details on passes, see [the chapter on them](passes.html).
+
+See also `--passes`.
+
+## `--test`: run code examples as tests
+
+Using this flag looks like this:
+
+```bash
+$ rustdoc src/lib.rs --test
+```
+
+This flag will run your code examples as tests. For more, see [the chapter
+on documentation tests](documentation-tests.html).
+
+See also `--test-args`.
+
+## `--test-args`:
+
+Using this flag looks like this:
+
+```bash
+$ rustdoc src/lib.rs --test --test-args ignored
+```
+
+This flag will pass options to the test runner when running documentation tests.
+For more, see [the chapter on documentation tests](documentation-tests.html).
+
+See also `--test`.
+
+## `--target`:
+
+Using this flag looks like this:
+
+```bash
+$ rustdoc src/lib.rs --target x86_64-pc-windows-gnu
+```
+
+Similar to the `--target` flag for `rustc`, this generates documentation
+for a target triple that's different than your host triple.
+
+All of the usual caveats of cross-compiling code apply.
+
+## `--markdown-css`: include more CSS files when rendering markdown
+
+Using this flag looks like this:
+
+```bash
+$ rustdoc README.md --markdown-css foo.css
+```
+
+When rendering Markdown files, this will create a `<link>` element in the
+`<head>` section of the generated HTML. For example, with the invocation above,
+
+```html
+<link rel="stylesheet" type="text/css" href="foo.css">
+```
+
+will be added.
+
+When rendering Rust files, this flag is ignored.
+
+## `--html-in-header`: include more HTML in <head>
+
+Using this flag looks like this:
+
+```bash
+$ rustdoc src/lib.rs --html-in-header header.html
+$ rustdoc README.md --html-in-header header.html
+```
+
+This flag takes a list of files, and inserts them into the `<head>` section of
+the rendered documentation.
+
+## `--html-before-content`: include more HTML before the content
+
+Using this flag looks like this:
+
+```bash
+$ rustdoc src/lib.rs --html-before-content extra.html
+$ rustdoc README.md --html-before-content extra.html
+```
+
+This flag takes a list of files, and inserts them inside the `<body>` tag but
+before the other content `rustodc` would normally produce in the rendered
+documentation.
+
+## `--html-after-content`: include more HTML after the content
+
+Using this flag looks like this:
+
+```bash
+$ rustdoc src/lib.rs --html-after-content extra.html
+$ rustdoc README.md --html-after-content extra.html
+```
+
+This flag takes a list of files, and inserts them before the `</body>` tag but
+after the other content `rustodc` would normally produce in the rendered
+documentation.
+
+
+## `--markdown-playground-url`: control the location of the playground
+
+Using this flag looks like this:
+
+```bash
+$ rustdoc README.md --markdown-playground-url https://play.rust-lang.org/
+```
+
+When rendering a Markdown file, this flag gives the base URL of the Rust
+Playround, to use for generating `Run` buttons.
+
+
+## `--markdown-no-toc`: don't generate a table of contents
+
+Using this flag looks like this:
+
+```bash
+$ rustdoc README.md --markdown-no-toc
+```
+
+When generating documentation from a Markdown file, by default, `rustdoc` will
+generate a table of contents. This flag supresses that, and no TOC will be
+generated.
+
+
+## `-e`/`--extend-css`: extend rustdoc's CSS
+
+Using this flag looks like this:
+
+```bash
+$ rustdoc src/lib.rs -e extra.css
+$ rustdoc src/lib.rs --extend-css extra.css
+```
+
+With this flag, the contents of the files you pass are included at the bottom
+of Rustdoc's `theme.css` file.
+
+While this flag is stable, the contents of `theme.css` are not, so be careful!
+Updates may break your theme extensions.
+
+## `--sysroot`: override the system root
+
+Using this flag looks like this:
+
+```bash
+$ rustdoc src/lib.rs --sysroot /path/to/sysroot
+```
+
+Similar to `rustc --sysroot`, this lets you change the sysroot `rustdoc` uses
+when compiling your code.
\ No newline at end of file
--- /dev/null
+# Documentation tests
+
+Coming soon!
\ No newline at end of file
--- /dev/null
+# In-source directives
+
+Coming soon!
\ No newline at end of file
--- /dev/null
+# Passes
+
+Coming soon!
\ No newline at end of file
--- /dev/null
+# Plugins
+
+Coming soon!
\ No newline at end of file
--- /dev/null
+# What is rustdoc?
+
+The standard Rust distribution ships with a tool called `rustdoc`. Its job is
+to generate documentation for Rust projects. On a fundamental level, Rustdoc
+takes as an argument either a crate root or a Markdown file, and produces HTML,
+CSS, and JavaScript.
+
+## Basic usage
+
+Let's give it a try! Let's create a new project with Cargo:
+
+```bash
+$ cargo new docs
+$ cd docs
+```
+
+In `src/lib.rs`, you'll find that Cargo has generated some sample code. Delete
+it and replace it with this:
+
+```rust
+/// foo is a function
+fn foo() {}
+```
+
+Let's run `rustdoc` on our code. To do so, we can call it with the path to
+our crate root like this:
+
+```bash
+$ rustdoc src/lib.rs
+```
+
+This will create a new directory, `doc`, with a website inside! In our case,
+the main page is located in `doc/lib/index.html`. If you open that up in
+a web browser, you'll see a page with a search bar, and "Crate lib" at the
+top, with no contents. There's two problems with this: first, why does it
+think that our package is named "lib"? Second, why does it not have any
+contents?
+
+The first problem is due to `rustdoc` trying to be helpful; like `rustc`,
+it assumes that our crate's name is the name of the file for the crate
+root. To fix this, we can pass in a command-line flag:
+
+```bash
+$ rustdoc src/lib.rs --crate-name docs
+```
+
+Now, `doc/docs/index.html` will be generated, and the page says "Crate docs."
+
+For the second issue, it's because our function `foo` is not public; `rustdoc`
+defaults to generating documentation for only public functions. If we change
+our code...
+
+```rust
+/// foo is a function
+pub fn foo() {}
+```
+
+... and then re-run `rustdoc`:
+
+```bash
+$ rustdoc src/lib.rs --crate-name docs
+```
+
+We'll have some generated documentation. Open up `doc/docs/index.html` and
+check it out! It should show a link to the `foo` function's page, which
+is located at `doc/docs/fn.foo.html`. On that page, you'll see the "foo is
+a function" we put inside the documentation comment in our crate.
+
+## Using rustdoc with Cargo
+
+Cargo also has integration with `rustdoc` to make it easier to generate
+docs. Instead of the `rustdoc` command, we could have done this:
+
+```bash
+$ cargo doc
+```
+
+Internally, this calls out to `rustdoc` like this:
+
+```bash
+$ rustdoc --crate-name docs srclib.rs -o <path>\docs\target\doc -L
+dependency=<path>docs\target\debug\deps
+```
+
+You can see this with `cargo doc --verbose`.
+
+It generates the correct `--crate-name` for us, as well as pointing to
+`src/lib.rs` But what about those other arguments? `-o` controls the
+*o*utput of our docs. Instead of a top-level `doc` directory, you'll
+notice that Cargo puts generated documentation under `target`. That's
+the idiomatic place for generated files in Cargo projects. Also, it
+passes `-L`, a flag that helps rustdoc find the dependencies
+your code relies on. If our project used dependencies, we'd get
+documentation for them as well!
+
+## Using standalone Markdown files
+
+`rustdoc` can also generate HTML from standalone Markdown files. Let's
+give it a try: create a `README.md` file with these contents:
+
+```text
+ # Docs
+
+ This is a project to test out `rustdoc`.
+
+ [Here is a link!](https://www.rust-lang.org)
+
+ ## Subheading
+
+ ```rust
+ fn foo() -> i32 {
+ 1 + 1
+ }
+ ```
+```
+
+And call `rustdoc` on it:
+
+```bash
+$ rustdoc README.md
+```
+
+You'll find an HTML file in `docs/doc/README.html` generated from its
+Markdown contents.
+
+Cargo currently does not understand standalone Markdown files, unfortunately.
+
+## Summary
+
+This covers the simplest use-cases of `rustdoc`. The rest of this book will
+explain all of the options that `rustdoc` has, and how to use them.
\ No newline at end of file
TYPE_KIND_PTR = 15
TYPE_KIND_FIXED_SIZE_VEC = 16
TYPE_KIND_REGULAR_UNION = 17
+TYPE_KIND_OS_STRING = 18
ENCODED_ENUM_PREFIX = "RUST$ENCODED$ENUM$"
ENUM_DISR_FIELD_NAME = "RUST$ENUM$DISR"
# std::String related constants
STD_STRING_FIELD_NAMES = ["vec"]
+# std::ffi::OsString related constants
+OS_STRING_FIELD_NAMES = ["inner"]
+
class Type(object):
"""
self.__conforms_to_field_layout(STD_STRING_FIELD_NAMES)):
return TYPE_KIND_STD_STRING
+ # OS STRING
+ if (unqualified_type_name == "OsString" and
+ self.__conforms_to_field_layout(OS_STRING_FIELD_NAMES)):
+ return TYPE_KIND_OS_STRING
+
# ENUM VARIANTS
if fields[0].name == ENUM_DISR_FIELD_NAME:
if field_count == 1:
return qualified_type_name
else:
return qualified_type_name[index + 2:]
+
+try:
+ compat_str = unicode # Python 2
+except NameError:
+ compat_str = str
def as_integer(self):
if self.gdb_val.type.code == gdb.TYPE_CODE_PTR:
- return int(str(self.gdb_val), 0)
+ as_str = rustpp.compat_str(self.gdb_val).split()[0]
+ return int(as_str, 0)
return int(self.gdb_val)
def get_wrapped_value(self):
val = GdbValue(gdb_val)
type_kind = val.type.get_type_kind()
- if (type_kind == rustpp.TYPE_KIND_REGULAR_STRUCT or
- type_kind == rustpp.TYPE_KIND_EMPTY):
+ if type_kind == rustpp.TYPE_KIND_EMPTY:
+ return RustEmptyPrinter(val)
+
+ if type_kind == rustpp.TYPE_KIND_REGULAR_STRUCT:
return RustStructPrinter(val,
omit_first_field = False,
omit_type_name = False,
if type_kind == rustpp.TYPE_KIND_STD_STRING:
return RustStdStringPrinter(val)
+ if type_kind == rustpp.TYPE_KIND_OS_STRING:
+ return RustOsStringPrinter(val)
+
if type_kind == rustpp.TYPE_KIND_TUPLE:
return RustStructPrinter(val,
omit_first_field = False,
#=------------------------------------------------------------------------------
# Pretty Printer Classes
#=------------------------------------------------------------------------------
+class RustEmptyPrinter(object):
+ def __init__(self, val):
+ self.__val = val
+
+ def to_string(self):
+ return self.__val.type.get_unqualified_type_name()
+
+
class RustStructPrinter(object):
def __init__(self, val, omit_first_field, omit_type_name, is_tuple_like):
self.__val = val
cs = []
wrapped_value = self.__val.get_wrapped_value()
- for field in self.__val.type.get_fields():
+ for number, field in enumerate(self.__val.type.get_fields()):
field_value = wrapped_value[field.name]
if self.__is_tuple_like:
- cs.append(("", field_value))
+ cs.append((str(number), field_value))
else:
cs.append((field.name, field_value))
length=length)
+class RustOsStringPrinter(object):
+ def __init__(self, val):
+ self.__val = val
+
+ def to_string(self):
+ buf = self.__val.get_child_at_index(0)
+ vec = buf.get_child_at_index(0)
+ if vec.type.get_unqualified_type_name() == "Wtf8Buf":
+ vec = vec.get_child_at_index(0)
+
+ (length, data_ptr, cap) = rustpp.extract_length_ptr_and_cap_from_std_vec(
+ vec)
+ return '"%s"' % data_ptr.get_wrapped_value().string(length=length)
+
+
class RustCStyleVariantPrinter(object):
def __init__(self, val):
assert val.type.get_dwarf_type_kind() == rustpp.DWARF_TYPE_CODE_ENUM
{
"platform": "arm_v",
"intrinsic_prefix": "",
- "llvm_prefix": "llvm.neon.v",
+ "llvm_prefix": "llvm.arm.neon.v",
"number_info": {
"signed": {
"kind": "s",
-Subproject commit 3288e0659c08fb5006f6d6dd4b5675ed0c2c432a
+Subproject commit 11bfb0dcf85f7aa92abd30524bb1e42e18d108c6
/// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references.
const MAX_REFCOUNT: usize = (isize::MAX) as usize;
-/// A thread-safe reference-counting pointer.
+/// A thread-safe reference-counting pointer. 'Arc' stands for 'Atomically
+/// Reference Counted'.
///
/// The type `Arc<T>` provides shared ownership of a value of type `T`,
/// allocated in the heap. Invoking [`clone`][clone] on `Arc` produces
#![allow(deprecated)]
-//! Single-threaded reference-counting pointers.
+//! Single-threaded reference-counting pointers. 'Rc' stands for 'Reference
+//! Counted'.
//!
//! The type [`Rc<T>`][`Rc`] provides shared ownership of a value of type `T`,
//! allocated in the heap. Invoking [`clone`][clone] on [`Rc`] produces a new
value: T,
}
-/// A single-threaded reference-counting pointer.
+/// A single-threaded reference-counting pointer. 'Rc' stands for 'Reference
+/// Counted'.
///
/// See the [module-level documentation](./index.html) for more details.
///
.env("AR", &ar)
.env("RANLIB", format!("{} s", ar.display()));
- if target.contains("ios") {
+ if target.contains("windows") {
+ // A bit of history here, this used to be --enable-lazy-lock added in
+ // #14006 which was filed with jemalloc in jemalloc/jemalloc#83 which
+ // was also reported to MinGW:
+ //
+ // http://sourceforge.net/p/mingw-w64/bugs/395/
+ //
+ // When updating jemalloc to 4.0, however, it was found that binaries
+ // would exit with the status code STATUS_RESOURCE_NOT_OWNED indicating
+ // that a thread was unlocking a mutex it never locked. Disabling this
+ // "lazy lock" option seems to fix the issue, but it was enabled by
+ // default for MinGW targets in 13473c7 for jemalloc.
+ //
+ // As a result of all that, force disabling lazy lock on Windows, and
+ // after reading some code it at least *appears* that the initialization
+ // of mutexes is otherwise ok in jemalloc, so shouldn't cause problems
+ // hopefully...
+ //
+ // tl;dr: make windows behave like other platforms by disabling lazy
+ // locking, but requires passing an option due to a historical
+ // default with jemalloc.
+ cmd.arg("--disable-lazy-lock");
+ } else if target.contains("ios") {
cmd.arg("--disable-tls");
} else if target.contains("android") {
// We force android to have prefixed symbols because apparently
core_str::StrExt::rsplitn(self, n, pat)
}
- /// An iterator over the matches of a pattern within the given string
+ /// An iterator over the disjoint matches of a pattern within the given string
/// slice.
///
/// The pattern can be a `&str`, [`char`], or a closure that
core_str::StrExt::matches(self, pat)
}
- /// An iterator over the matches of a pattern within this string slice,
+ /// An iterator over the disjoint matches of a pattern within this string slice,
/// yielded in reverse order.
///
/// The pattern can be a `&str`, [`char`], or a closure that determines if
#![feature(repr_align)]
#![feature(slice_rotate)]
#![feature(splice)]
+#![feature(str_checked_slicing)]
#![feature(str_escape)]
#![feature(test)]
#![feature(unboxed_closures)]
&"中华Việt Nam"[0..2];
}
+#[test]
+#[should_panic]
+fn test_str_slice_rangetoinclusive_max_panics() {
+ &"hello"[...usize::max_value()];
+}
+
+#[test]
+#[should_panic]
+fn test_str_slice_rangeinclusive_max_panics() {
+ &"hello"[1...usize::max_value()];
+}
+
+#[test]
+#[should_panic]
+fn test_str_slicemut_rangetoinclusive_max_panics() {
+ let mut s = "hello".to_owned();
+ let s: &mut str = &mut s;
+ &mut s[...usize::max_value()];
+}
+
+#[test]
+#[should_panic]
+fn test_str_slicemut_rangeinclusive_max_panics() {
+ let mut s = "hello".to_owned();
+ let s: &mut str = &mut s;
+ &mut s[1...usize::max_value()];
+}
+
+#[test]
+fn test_str_get_maxinclusive() {
+ let mut s = "hello".to_owned();
+ {
+ let s: &str = &s;
+ assert_eq!(s.get(...usize::max_value()), None);
+ assert_eq!(s.get(1...usize::max_value()), None);
+ }
+ {
+ let s: &mut str = &mut s;
+ assert_eq!(s.get(...usize::max_value()), None);
+ assert_eq!(s.get(1...usize::max_value()), None);
+ }
+}
#[test]
fn test_is_char_boundary() {
// except according to those terms.
//! A contiguous growable array type with heap-allocated contents, written
-//! `Vec<T>` but pronounced 'vector.'
+//! `Vec<T>`.
//!
//! Vectors have `O(1)` indexing, amortized `O(1)` push (to the end) and
//! `O(1)` pop (from the end).
}
}
- /// Replaces the contained value.
+ /// Replaces the contained value, and returns it.
///
/// # Examples
///
/// ```
/// use std::cell::Cell;
///
- /// let c = Cell::new(5);
- /// let old = c.replace(10);
- ///
- /// assert_eq!(5, old);
+ /// let cell = Cell::new(5);
+ /// assert_eq!(cell.get(), 5);
+ /// assert_eq!(cell.replace(10), 5);
+ /// assert_eq!(cell.get(), 10);
/// ```
#[stable(feature = "move_cell", since = "1.17.0")]
pub fn replace(&self, val: T) -> T {
/// ```
pub fn ctlz<T>(x: T) -> T;
+ /// Like `ctlz`, but extra-unsafe as it returns `undef` when
+ /// given an `x` with value `0`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(core_intrinsics)]
+ ///
+ /// use std::intrinsics::ctlz_nonzero;
+ ///
+ /// let x = 0b0001_1100_u8;
+ /// let num_leading = unsafe { ctlz_nonzero(x) };
+ /// assert_eq!(num_leading, 3);
+ /// ```
+ #[cfg(not(stage0))]
+ pub fn ctlz_nonzero<T>(x: T) -> T;
+
/// Returns the number of trailing unset bits (zeroes) in an integer type `T`.
///
/// # Examples
/// ```
pub fn cttz<T>(x: T) -> T;
+ /// Like `cttz`, but extra-unsafe as it returns `undef` when
+ /// given an `x` with value `0`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(core_intrinsics)]
+ ///
+ /// use std::intrinsics::cttz_nonzero;
+ ///
+ /// let x = 0b0011_1000_u8;
+ /// let num_trailing = unsafe { cttz_nonzero(x) };
+ /// assert_eq!(num_trailing, 3);
+ /// ```
+ #[cfg(not(stage0))]
+ pub fn cttz_nonzero<T>(x: T) -> T;
+
/// Reverses the bytes in an integer type `T`.
pub fn bswap<T>(x: T) -> T;
/// Creates an iterator starting at the same point, but stepping by
/// the given amount at each iteration.
///
- /// Note that it will always return the first element of the range,
+ /// Note that it will always return the first element of the iterator,
/// regardless of the step given.
///
/// # Panics
impl<T, U, E> Sum<Result<U, E>> for Result<T, E>
where T: Sum<U>,
{
+ /// Takes each element in the `Iterator`: if it is an `Err`, no further
+ /// elements are taken, and the `Err` is returned. Should no `Err` occur,
+ /// the sum of all elements is returned.
+ ///
+ /// # Examples
+ ///
+ /// This sums up every integer in a vector, rejecting the sum if a negative
+ /// element is encountered:
+ ///
+ /// ```
+ /// let v = vec![1, 2];
+ /// let res: Result<i32, &'static str> = v.iter().map(|&x: &i32|
+ /// if x < 0 { Err("Negative element found") }
+ /// else { Ok(x) }
+ /// ).sum();
+ /// assert_eq!(res, Ok(3));
+ /// ```
fn sum<I>(iter: I) -> Result<T, E>
where I: Iterator<Item = Result<U, E>>,
{
impl<T, U, E> Product<Result<U, E>> for Result<T, E>
where T: Product<U>,
{
+ /// Takes each element in the `Iterator`: if it is an `Err`, no further
+ /// elements are taken, and the `Err` is returned. Should no `Err` occur,
+ /// the product of all elements is returned.
fn product<I>(iter: I) -> Result<T, E>
where I: Iterator<Item = Result<U, E>>,
{
/// An iterator that always continues to yield `None` when exhausted.
///
/// Calling next on a fused iterator that has returned `None` once is guaranteed
-/// to return [`None`] again. This trait is should be implemented by all iterators
+/// to return [`None`] again. This trait should be implemented by all iterators
/// that behave this way because it allows for some significant optimizations.
///
/// Note: In general, you should not use `FusedIterator` in generic bounds if
/// This will invoke the [`panic!`] macro if the provided expression cannot be
/// evaluated to `true` at runtime.
///
+/// # Uses
+///
/// Assertions are always checked in both debug and release builds, and cannot
/// be disabled. See [`debug_assert!`] for assertions that are not enabled in
/// release builds by default.
/// Other use-cases of `assert!` include [testing] and enforcing run-time
/// invariants in safe code (whose violation cannot result in unsafety).
///
-/// This macro has a second version, where a custom panic message can
+/// # Custom Messages
+///
+/// This macro has a second form, where a custom panic message can
/// be provided with or without arguments for formatting.
///
/// [`panic!`]: macro.panic.html
);
}
-/// Asserts that two expressions are equal to each other.
+/// Asserts that two expressions are equal to each other (using [`PartialEq`]).
///
/// On panic, this macro will print the values of the expressions with their
/// debug representations.
///
-/// Like [`assert!`], this macro has a second version, where a custom
+/// Like [`assert!`], this macro has a second form, where a custom
/// panic message can be provided.
///
+/// [`PartialEq`]: cmp/trait.PartialEq.html
/// [`assert!`]: macro.assert.html
///
/// # Examples
});
}
-/// Asserts that two expressions are not equal to each other.
+/// Asserts that two expressions are not equal to each other (using [`PartialEq`]).
///
/// On panic, this macro will print the values of the expressions with their
/// debug representations.
///
-/// Like `assert!()`, this macro has a second version, where a custom
+/// Like [`assert!`], this macro has a second form, where a custom
/// panic message can be provided.
///
+/// [`PartialEq`]: cmp/trait.PartialEq.html
/// [`assert!`]: macro.assert.html
///
/// # Examples
/// Like [`assert!`], this macro also has a second version, where a custom panic
/// message can be provided.
///
+/// # Uses
+///
/// Unlike [`assert!`], `debug_assert!` statements are only enabled in non
/// optimized builds by default. An optimized build will omit all
/// `debug_assert!` statements unless `-C debug-assertions` is passed to the
#[macro_export]
#[stable(feature = "rust1", since = "1.0.0")]
macro_rules! unimplemented {
- () => (panic!("not yet implemented"))
+ () => (panic!("not yet implemented"));
+ ($($arg:tt)+) => (panic!("not yet implemented: {}", format_args!($($arg)*)));
}
/// Built-in macros to the compiler itself.
/// but not `Copy`.
///
/// [`Clone`] is a supertrait of `Copy`, so everything which is `Copy` must also implement
-/// [`Clone`]. If a type is `Copy` then its [`Clone`] implementation need only return `*self`
+/// [`Clone`]. If a type is `Copy` then its [`Clone`] implementation only needs to return `*self`
/// (see the example above).
///
/// ## When can my type be `Copy`?
/// [`Clone`][clone]. You need the value's destructor to run only once,
/// because a double `free` is undefined behavior.
///
-/// An example is the definition of [`mem::swap`][swap] in this module:
+/// An example is a possible implementation of [`mem::swap`][swap]:
///
/// ```
/// use std::mem;
#[stable(feature = "rust1", since = "1.0.0")]
pub fn swap<T>(x: &mut T, y: &mut T) {
unsafe {
- // Give ourselves some scratch space to work with
- let mut t: T = uninitialized();
-
- // Perform the swap, `&mut` pointers never alias
- ptr::copy_nonoverlapping(&*x, &mut t, 1);
- ptr::copy_nonoverlapping(&*y, x, 1);
- ptr::copy_nonoverlapping(&t, y, 1);
-
- // y and t now point to the same thing, but we need to completely
- // forget `t` because we do not want to run the destructor for `T`
- // on its value, which is still owned somewhere outside this function.
- forget(t);
+ // The approach here is to utilize simd to swap x & y efficiently. Testing reveals
+ // that swapping either 32 bytes or 64 bytes at a time is most efficient for intel
+ // Haswell E processors. LLVM is more able to optimize if we give a struct a
+ // #[repr(simd)], even if we don't actually use this struct directly.
+ //
+ // FIXME repr(simd) broken on emscripten
+ #[cfg_attr(not(target_os = "emscripten"), repr(simd))]
+ struct Block(u64, u64, u64, u64);
+ struct UnalignedBlock(u64, u64, u64, u64);
+
+ let block_size = size_of::<Block>();
+
+ // Get raw pointers to the bytes of x & y for easier manipulation
+ let x = x as *mut T as *mut u8;
+ let y = y as *mut T as *mut u8;
+
+ // Loop through x & y, copying them `Block` at a time
+ // The optimizer should unroll the loop fully for most types
+ // N.B. We can't use a for loop as the `range` impl calls `mem::swap` recursively
+ let len = size_of::<T>();
+ let mut i = 0;
+ while i + block_size <= len {
+ // Create some uninitialized memory as scratch space
+ // Declaring `t` here avoids aligning the stack when this loop is unused
+ let mut t: Block = uninitialized();
+ let t = &mut t as *mut _ as *mut u8;
+ let x = x.offset(i as isize);
+ let y = y.offset(i as isize);
+
+ // Swap a block of bytes of x & y, using t as a temporary buffer
+ // This should be optimized into efficient SIMD operations where available
+ ptr::copy_nonoverlapping(x, t, block_size);
+ ptr::copy_nonoverlapping(y, x, block_size);
+ ptr::copy_nonoverlapping(t, y, block_size);
+ i += block_size;
+ }
+
+
+ if i < len {
+ // Swap any remaining bytes, using aligned types to copy
+ // where appropriate (this information is lost by conversion
+ // to *mut u8, so restore it manually here)
+ let mut t: UnalignedBlock = uninitialized();
+ let rem = len - i;
+
+ let t = &mut t as *mut _ as *mut u8;
+ let x = x.offset(i as isize);
+ let y = y.offset(i as isize);
+
+ ptr::copy_nonoverlapping(x, t, rem);
+ ptr::copy_nonoverlapping(y, x, rem);
+ ptr::copy_nonoverlapping(t, y, rem);
+ }
}
}
use convert::TryFrom;
use fmt;
use intrinsics;
-use mem::size_of;
use str::FromStr;
/// Provides intentionally-wrapped arithmetic on `T`.
($SelfT:ty, $ActualT:ty, $BITS:expr,
$ctpop:path,
$ctlz:path,
+ $ctlz_nonzero:path,
$cttz:path,
$bswap:path,
$add_with_overflow:path,
(self.wrapping_sub(1)) & self == 0 && !(self == 0)
}
+ // Returns one less than next power of two.
+ // (For 8u8 next power of two is 8u8 and for 6u8 it is 8u8)
+ //
+ // 8u8.one_less_than_next_power_of_two() == 7
+ // 6u8.one_less_than_next_power_of_two() == 7
+ //
+ // This method cannot overflow, as in the `next_power_of_two`
+ // overflow cases it instead ends up returning the maximum value
+ // of the type, and can return 0 for 0.
+ #[inline]
+ fn one_less_than_next_power_of_two(self) -> Self {
+ if self <= 1 { return 0; }
+
+ // Because `p > 0`, it cannot consist entirely of leading zeros.
+ // That means the shift is always in-bounds, and some processors
+ // (such as intel pre-haswell) have more efficient ctlz
+ // intrinsics when the argument is non-zero.
+ let p = self - 1;
+ let z = unsafe { $ctlz_nonzero(p) };
+ <$SelfT>::max_value() >> z
+ }
+
/// Returns the smallest power of two greater than or equal to `self`.
- /// Unspecified behavior on overflow.
+ ///
+ /// When return value overflows (i.e. `self > (1 << (N-1))` for type
+ /// `uN`), it panics in debug mode and return value is wrapped to 0 in
+ /// release mode (the only situation in which method can return 0).
///
/// # Examples
///
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn next_power_of_two(self) -> Self {
- let bits = size_of::<Self>() * 8;
- let one: Self = 1;
- one << ((bits - self.wrapping_sub(one).leading_zeros() as usize) % bits)
+ self.one_less_than_next_power_of_two() + 1
}
/// Returns the smallest power of two greater than or equal to `n`. If
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn checked_next_power_of_two(self) -> Option<Self> {
- let npot = self.next_power_of_two();
- if npot >= self {
- Some(npot)
- } else {
- None
- }
+ self.one_less_than_next_power_of_two().checked_add(1)
}
}
}
+#[cfg(stage0)]
+unsafe fn ctlz_nonzero<T>(x: T) -> T { intrinsics::ctlz(x) }
+#[cfg(not(stage0))]
+unsafe fn ctlz_nonzero<T>(x: T) -> T { intrinsics::ctlz_nonzero(x) }
+
#[lang = "u8"]
impl u8 {
uint_impl! { u8, u8, 8,
intrinsics::ctpop,
intrinsics::ctlz,
+ ctlz_nonzero,
intrinsics::cttz,
intrinsics::bswap,
intrinsics::add_with_overflow,
uint_impl! { u16, u16, 16,
intrinsics::ctpop,
intrinsics::ctlz,
+ ctlz_nonzero,
intrinsics::cttz,
intrinsics::bswap,
intrinsics::add_with_overflow,
uint_impl! { u32, u32, 32,
intrinsics::ctpop,
intrinsics::ctlz,
+ ctlz_nonzero,
intrinsics::cttz,
intrinsics::bswap,
intrinsics::add_with_overflow,
uint_impl! { u64, u64, 64,
intrinsics::ctpop,
intrinsics::ctlz,
+ ctlz_nonzero,
intrinsics::cttz,
intrinsics::bswap,
intrinsics::add_with_overflow,
uint_impl! { u128, u128, 128,
intrinsics::ctpop,
intrinsics::ctlz,
+ ctlz_nonzero,
intrinsics::cttz,
intrinsics::bswap,
intrinsics::add_with_overflow,
uint_impl! { usize, u16, 16,
intrinsics::ctpop,
intrinsics::ctlz,
+ ctlz_nonzero,
intrinsics::cttz,
intrinsics::bswap,
intrinsics::add_with_overflow,
uint_impl! { usize, u32, 32,
intrinsics::ctpop,
intrinsics::ctlz,
+ ctlz_nonzero,
intrinsics::cttz,
intrinsics::bswap,
intrinsics::add_with_overflow,
uint_impl! { usize, u64, 64,
intrinsics::ctpop,
intrinsics::ctlz,
+ ctlz_nonzero,
intrinsics::cttz,
intrinsics::bswap,
intrinsics::add_with_overflow,
/// checking for overflow:
///
/// ```
- /// use std::u32;
- ///
/// let v = vec![1, 2];
- /// let res: Result<Vec<u32>, &'static str> = v.iter().map(|&x: &u32|
- /// if x == u32::MAX { Err("Overflow!") }
- /// else { Ok(x + 1) }
+ /// let res: Result<Vec<u32>, &'static str> = v.iter().map(|x: &u32|
+ /// x.checked_add(1).ok_or("Overflow!")
/// ).collect();
/// assert!(res == Ok(vec![2, 3]));
/// ```
fn from_error(v: E) -> Self {
Err(v)
}
-}
\ No newline at end of file
+}
#[inline]
fn index(&self, index: ops::RangeTo<usize>) -> &str {
- // is_char_boundary checks that the index is in [0, .len()]
- if self.is_char_boundary(index.end) {
- unsafe { self.slice_unchecked(0, index.end) }
- } else {
- super::slice_error_fail(self, 0, index.end)
- }
+ index.index(self)
}
}
impl ops::IndexMut<ops::RangeTo<usize>> for str {
#[inline]
fn index_mut(&mut self, index: ops::RangeTo<usize>) -> &mut str {
- // is_char_boundary checks that the index is in [0, .len()]
- if self.is_char_boundary(index.end) {
- unsafe { self.slice_mut_unchecked(0, index.end) }
- } else {
- super::slice_error_fail(self, 0, index.end)
- }
+ index.index_mut(self)
}
}
#[inline]
fn index(&self, index: ops::RangeFrom<usize>) -> &str {
- // is_char_boundary checks that the index is in [0, .len()]
- if self.is_char_boundary(index.start) {
- unsafe { self.slice_unchecked(index.start, self.len()) }
- } else {
- super::slice_error_fail(self, index.start, self.len())
- }
+ index.index(self)
}
}
impl ops::IndexMut<ops::RangeFrom<usize>> for str {
#[inline]
fn index_mut(&mut self, index: ops::RangeFrom<usize>) -> &mut str {
- // is_char_boundary checks that the index is in [0, .len()]
- if self.is_char_boundary(index.start) {
- let len = self.len();
- unsafe { self.slice_mut_unchecked(index.start, len) }
- } else {
- super::slice_error_fail(self, index.start, self.len())
- }
+ index.index_mut(self)
}
}
#[inline]
fn index(&self, index: ops::RangeInclusive<usize>) -> &str {
- assert!(index.end != usize::max_value(),
- "attempted to index str up to maximum usize");
- self.index(index.start .. index.end+1)
+ index.index(self)
}
}
#[inline]
fn index(&self, index: ops::RangeToInclusive<usize>) -> &str {
- assert!(index.end != usize::max_value(),
- "attempted to index str up to maximum usize");
- self.index(.. index.end+1)
+ index.index(self)
}
}
impl ops::IndexMut<ops::RangeInclusive<usize>> for str {
#[inline]
fn index_mut(&mut self, index: ops::RangeInclusive<usize>) -> &mut str {
- assert!(index.end != usize::max_value(),
- "attempted to index str up to maximum usize");
- self.index_mut(index.start .. index.end+1)
+ index.index_mut(self)
}
}
#[unstable(feature = "inclusive_range",
impl ops::IndexMut<ops::RangeToInclusive<usize>> for str {
#[inline]
fn index_mut(&mut self, index: ops::RangeToInclusive<usize>) -> &mut str {
- assert!(index.end != usize::max_value(),
- "attempted to index str up to maximum usize");
- self.index_mut(.. index.end+1)
+ index.index_mut(self)
}
}
}
#[inline]
fn index_mut(self, slice: &mut str) -> &mut Self::Output {
+ // is_char_boundary checks that the index is in [0, .len()]
if slice.is_char_boundary(self.end) {
unsafe { self.get_unchecked_mut(slice) }
} else {
}
#[inline]
fn index_mut(self, slice: &mut str) -> &mut Self::Output {
+ // is_char_boundary checks that the index is in [0, .len()]
if slice.is_char_boundary(self.start) {
unsafe { self.get_unchecked_mut(slice) }
} else {
type Output = str;
#[inline]
fn get(self, slice: &str) -> Option<&Self::Output> {
- (self.start..self.end+1).get(slice)
+ if let Some(end) = self.end.checked_add(1) {
+ (self.start..end).get(slice)
+ } else {
+ None
+ }
}
#[inline]
fn get_mut(self, slice: &mut str) -> Option<&mut Self::Output> {
- (self.start..self.end+1).get_mut(slice)
+ if let Some(end) = self.end.checked_add(1) {
+ (self.start..end).get_mut(slice)
+ } else {
+ None
+ }
}
#[inline]
unsafe fn get_unchecked(self, slice: &str) -> &Self::Output {
}
#[inline]
fn index(self, slice: &str) -> &Self::Output {
+ assert!(self.end != usize::max_value(),
+ "attempted to index str up to maximum usize");
(self.start..self.end+1).index(slice)
}
#[inline]
fn index_mut(self, slice: &mut str) -> &mut Self::Output {
+ assert!(self.end != usize::max_value(),
+ "attempted to index str up to maximum usize");
(self.start..self.end+1).index_mut(slice)
}
}
type Output = str;
#[inline]
fn get(self, slice: &str) -> Option<&Self::Output> {
- if slice.is_char_boundary(self.end + 1) {
+ if self.end < usize::max_value() && slice.is_char_boundary(self.end + 1) {
Some(unsafe { self.get_unchecked(slice) })
} else {
None
}
#[inline]
fn get_mut(self, slice: &mut str) -> Option<&mut Self::Output> {
- if slice.is_char_boundary(self.end + 1) {
+ if self.end < usize::max_value() && slice.is_char_boundary(self.end + 1) {
Some(unsafe { self.get_unchecked_mut(slice) })
} else {
None
}
#[inline]
fn index(self, slice: &str) -> &Self::Output {
+ assert!(self.end != usize::max_value(),
+ "attempted to index str up to maximum usize");
let end = self.end + 1;
self.get(slice).unwrap_or_else(|| super::slice_error_fail(slice, 0, end))
}
#[inline]
fn index_mut(self, slice: &mut str) -> &mut Self::Output {
+ assert!(self.end != usize::max_value(),
+ "attempted to index str up to maximum usize");
if slice.is_char_boundary(self.end) {
unsafe { self.get_unchecked_mut(slice) }
} else {
pub const USING_SJLJ_EXCEPTIONS: bool = cfg!(all(target_os = "ios", target_arch = "arm"));
-pub unsafe fn find_eh_action(lsda: *const u8, context: &EHContext) -> EHAction {
+pub unsafe fn find_eh_action(lsda: *const u8, context: &EHContext)
+ -> Result<EHAction, ()>
+{
if lsda.is_null() {
- return EHAction::None;
+ return Ok(EHAction::None)
}
let func_start = context.func_start;
let start_encoding = reader.read::<u8>();
// base address for landing pad offsets
let lpad_base = if start_encoding != DW_EH_PE_omit {
- read_encoded_pointer(&mut reader, context, start_encoding)
+ read_encoded_pointer(&mut reader, context, start_encoding)?
} else {
func_start
};
if !USING_SJLJ_EXCEPTIONS {
while reader.ptr < action_table {
- let cs_start = read_encoded_pointer(&mut reader, context, call_site_encoding);
- let cs_len = read_encoded_pointer(&mut reader, context, call_site_encoding);
- let cs_lpad = read_encoded_pointer(&mut reader, context, call_site_encoding);
+ let cs_start = read_encoded_pointer(&mut reader, context, call_site_encoding)?;
+ let cs_len = read_encoded_pointer(&mut reader, context, call_site_encoding)?;
+ let cs_lpad = read_encoded_pointer(&mut reader, context, call_site_encoding)?;
let cs_action = reader.read_uleb128();
// Callsite table is sorted by cs_start, so if we've passed the ip, we
// may stop searching.
}
if ip < func_start + cs_start + cs_len {
if cs_lpad == 0 {
- return EHAction::None;
+ return Ok(EHAction::None)
} else {
let lpad = lpad_base + cs_lpad;
- return interpret_cs_action(cs_action, lpad);
+ return Ok(interpret_cs_action(cs_action, lpad))
}
}
}
// Ip is not present in the table. This should not happen... but it does: issue #35011.
// So rather than returning EHAction::Terminate, we do this.
- EHAction::None
+ Ok(EHAction::None)
} else {
// SjLj version:
// The "IP" is an index into the call-site table, with two exceptions:
// -1 means 'no-action', and 0 means 'terminate'.
match ip as isize {
- -1 => return EHAction::None,
- 0 => return EHAction::Terminate,
+ -1 => return Ok(EHAction::None),
+ 0 => return Ok(EHAction::Terminate),
_ => (),
}
let mut idx = ip;
// Can never have null landing pad for sjlj -- that would have
// been indicated by a -1 call site index.
let lpad = (cs_lpad + 1) as usize;
- return interpret_cs_action(cs_action, lpad);
+ return Ok(interpret_cs_action(cs_action, lpad))
}
}
}
}
#[inline]
-fn round_up(unrounded: usize, align: usize) -> usize {
- assert!(align.is_power_of_two());
- (unrounded + align - 1) & !(align - 1)
+fn round_up(unrounded: usize, align: usize) -> Result<usize, ()> {
+ if align.is_power_of_two() {
+ Ok((unrounded + align - 1) & !(align - 1))
+ } else {
+ Err(())
+ }
}
unsafe fn read_encoded_pointer(reader: &mut DwarfReader,
context: &EHContext,
encoding: u8)
- -> usize {
- assert!(encoding != DW_EH_PE_omit);
+ -> Result<usize, ()> {
+ if encoding == DW_EH_PE_omit {
+ return Err(())
+ }
// DW_EH_PE_aligned implies it's an absolute pointer value
if encoding == DW_EH_PE_aligned {
- reader.ptr = round_up(reader.ptr as usize, mem::size_of::<usize>()) as *const u8;
- return reader.read::<usize>();
+ reader.ptr = round_up(reader.ptr as usize, mem::size_of::<usize>())? as *const u8;
+ return Ok(reader.read::<usize>())
}
let mut result = match encoding & 0x0F {
DW_EH_PE_sdata2 => reader.read::<i16>() as usize,
DW_EH_PE_sdata4 => reader.read::<i32>() as usize,
DW_EH_PE_sdata8 => reader.read::<i64>() as usize,
- _ => panic!(),
+ _ => return Err(()),
};
result += match encoding & 0x70 {
// relative to address of the encoded value, despite the name
DW_EH_PE_pcrel => reader.ptr as usize,
DW_EH_PE_funcrel => {
- assert!(context.func_start != 0);
+ if context.func_start == 0 {
+ return Err(())
+ }
context.func_start
}
DW_EH_PE_textrel => (*context.get_text_start)(),
DW_EH_PE_datarel => (*context.get_data_start)(),
- _ => panic!(),
+ _ => return Err(()),
};
if encoding & DW_EH_PE_indirect != 0 {
result = *(result as *const usize);
}
- result
+ Ok(result)
}
if version != 1 {
return uw::_URC_FATAL_PHASE1_ERROR;
}
- let eh_action = find_eh_action(context);
+ let eh_action = match find_eh_action(context) {
+ Ok(action) => action,
+ Err(_) => return uw::_URC_FATAL_PHASE1_ERROR,
+ };
if actions as i32 & uw::_UA_SEARCH_PHASE as i32 != 0 {
match eh_action {
EHAction::None |
// _Unwind_Context in our libunwind bindings and fetch the required data from there directly,
// bypassing DWARF compatibility functions.
- let eh_action = find_eh_action(context);
+ let eh_action = match find_eh_action(context) {
+ Ok(action) => action,
+ Err(_) => return uw::_URC_FAILURE,
+ };
if search_phase {
match eh_action {
EHAction::None |
}
}
-unsafe fn find_eh_action(context: *mut uw::_Unwind_Context) -> EHAction {
+unsafe fn find_eh_action(context: *mut uw::_Unwind_Context)
+ -> Result<EHAction, ()>
+{
let lsda = uw::_Unwind_GetLanguageSpecificData(context) as *const u8;
let mut ip_before_instr: c_int = 0;
let ip = uw::_Unwind_GetIPInfo(context, &mut ip_before_instr);
get_data_start: &|| unimplemented!(),
};
match find_eh_action(dc.HandlerData, &eh_ctx) {
- EHAction::None => None,
- EHAction::Cleanup(lpad) |
- EHAction::Catch(lpad) => Some(lpad),
- EHAction::Terminate => intrinsics::abort(),
+ Err(_) |
+ Ok(EHAction::None) => None,
+ Ok(EHAction::Cleanup(lpad)) |
+ Ok(EHAction::Catch(lpad)) => Some(lpad),
+ Ok(EHAction::Terminate) => intrinsics::abort(),
}
}
1. HIR nodes (like `Hir(DefId)`) represent the HIR input itself.
2. Data nodes (like `ItemSignature(DefId)`) represent some computed
information about a particular item.
-3. Procedure notes (like `CoherenceCheckImpl(DefId)`) represent some
+3. Procedure nodes (like `CoherenceCheckTrait(DefId)`) represent some
procedure that is executing. Usually this procedure is
performing some kind of check for errors. You can think of them as
computed values where the value being computed is `()` (and the
us what *else* we have to recompile. Shared state is anything that is
used to communicate results from one item to another.
-### Identifying the current task
+### Identifying the current task, tracking reads/writes, etc
-The dep graph always tracks a current task: this is basically the
-`DepNode` that the compiler is computing right now. Typically it would
-be a procedure node, but it can also be a data node (as noted above,
-the two are kind of equivalent).
-
-You set the current task by calling `dep_graph.in_task(node)`. For example:
-
-```rust
-let _task = tcx.dep_graph.in_task(DepNode::Privacy);
-```
-
-Now all the code until `_task` goes out of scope will be considered
-part of the "privacy task".
-
-The tasks are maintained in a stack, so it is perfectly fine to nest
-one task within another. Because pushing a task is considered to be
-computing a value, when you nest a task `N2` inside of a task `N1`, we
-automatically add an edge `N2 -> N1` (since `N1` presumably needed the
-result of `N2` to complete):
-
-```rust
-let _n1 = tcx.dep_graph.in_task(DepNode::N1);
-let _n2 = tcx.dep_graph.in_task(DepNode::N2);
-// this will result in an edge N1 -> n2
-```
-
-### Ignore tasks
-
-Although it is rarely needed, you can also push a special "ignore"
-task:
-
-```rust
-let _ignore = tc.dep_graph.in_ignore();
-```
-
-This will cause all read/write edges to be ignored until it goes out
-of scope or until something else is pushed. For example, we could
-suppress the edge between nested tasks like so:
-
-```rust
-let _n1 = tcx.dep_graph.in_task(DepNode::N1);
-let _ignore = tcx.dep_graph.in_ignore();
-let _n2 = tcx.dep_graph.in_task(DepNode::N2);
-// now no edge is added
-```
-
-### Tracking reads and writes
-
-We need to identify what shared state is read/written by the current
-task as it executes. The most fundamental way of doing that is to invoke
-the `read` and `write` methods on `DepGraph`:
-
-```rust
-// Adds an edge from DepNode::Hir(some_def_id) to the current task
-tcx.dep_graph.read(DepNode::Hir(some_def_id))
-
-// Adds an edge from the current task to DepNode::ItemSignature(some_def_id)
-tcx.dep_graph.write(DepNode::ItemSignature(some_def_id))
-```
-
-However, you should rarely need to invoke those methods directly.
-Instead, the idea is to *encapsulate* shared state into some API that
-will invoke `read` and `write` automatically. The most common way to
-do this is to use a `DepTrackingMap`, described in the next section,
-but any sort of abstraction barrier will do. In general, the strategy
-is that getting access to information implicitly adds an appropriate
-`read`. So, for example, when you use the
-`dep_graph::visit_all_items_in_krate` helper method, it will visit
-each item `X`, start a task `Foo(X)` for that item, and automatically
-add an edge `Hir(X) -> Foo(X)`. This edge is added because the code is
-being given access to the HIR node for `X`, and hence it is expected
-to read from it. Similarly, reading from the `tcache` map for item `X`
-(which is a `DepTrackingMap`, described below) automatically invokes
-`dep_graph.read(ItemSignature(X))`.
-
-**Note:** adding `Hir` nodes requires a bit of caution due to the
-"inlining" that old trans and constant evaluation still use. See the
-section on inlining below.
-
-To make this strategy work, a certain amount of indirection is
-required. For example, modules in the HIR do not have direct pointers
-to the items that they contain. Rather, they contain node-ids -- one
-can then ask the HIR map for the item with a given node-id. This gives
-us an opportunity to add an appropriate read edge.
-
-#### Explicit calls to read and write when starting a new subtask
-
-One time when you *may* need to call `read` and `write` directly is
-when you push a new task onto the stack, either by calling `in_task`
-as shown above or indirectly, such as with the `memoize` pattern
-described below. In that case, any data that the task has access to
-from the surrounding environment must be explicitly "read". For
-example, in `librustc_typeck`, the collection code visits all items
-and, among other things, starts a subtask producing its signature
-(what follows is simplified pseudocode, of course):
-
-```rust
-fn visit_item(item: &hir::Item) {
- // Here, current subtask is "Collect(X)", and an edge Hir(X) -> Collect(X)
- // has automatically been added by `visit_all_items_in_krate`.
- let sig = signature_of_item(item);
-}
-
-fn signature_of_item(item: &hir::Item) {
- let def_id = tcx.map.local_def_id(item.id);
- let task = tcx.dep_graph.in_task(DepNode::ItemSignature(def_id));
- tcx.dep_graph.read(DepNode::Hir(def_id)); // <-- the interesting line
- ...
-}
-```
-
-Here you can see that, in `signature_of_item`, we started a subtask
-corresponding to producing the `ItemSignature`. This subtask will read from
-`item` -- but it gained access to `item` implicitly. This means that if it just
-reads from `item`, there would be missing edges in the graph:
-
- Hir(X) --+ // added by the explicit call to `read`
- | |
- | +---> ItemSignature(X) -> Collect(X)
- | ^
- | |
- +---------------------------------+ // added by `visit_all_items_in_krate`
-
-In particular, the edge from `Hir(X)` to `ItemSignature(X)` is only
-present because we called `read` ourselves when entering the `ItemSignature(X)`
-task.
-
-So, the rule of thumb: when entering a new task yourself, register
-reads on any shared state that you inherit. (This actually comes up
-fairly infrequently though: the main place you need caution is around
-memoization.)
+FIXME(#42293). This text needs to be rewritten for the new red-green
+system, which doesn't fully exist yet.
#### Dependency tracking map
use super::dep_node::DepNode;
use std::error::Error;
-use std::fmt::Debug;
/// A dep-node filter goes from a user-defined string to a query over
/// nodes. Right now the format is like this:
}
/// Tests whether `node` meets the filter, returning true if so.
- pub fn test<D: Clone + Debug>(&self, node: &DepNode<D>) -> bool {
+ pub fn test(&self, node: &DepNode) -> bool {
let debug_str = format!("{:?}", node);
self.text.split("&")
.map(|s| s.trim())
}
}
- pub fn test<D: Clone + Debug>(&self,
- source: &DepNode<D>,
- target: &DepNode<D>)
- -> bool {
+ pub fn test(&self,
+ source: &DepNode,
+ target: &DepNode)
+ -> bool {
self.source.test(source) && self.target.test(target)
}
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use hir::def_id::CrateNum;
-use std::fmt::Debug;
-use std::sync::Arc;
-
-macro_rules! try_opt {
- ($e:expr) => (
- match $e {
- Some(r) => r,
- None => return None,
+
+//! This module defines the `DepNode` type which the compiler uses to represent
+//! nodes in the dependency graph. A `DepNode` consists of a `DepKind` (which
+//! specifies the kind of thing it represents, like a piece of HIR, MIR, etc)
+//! and a `Fingerprint`, a 128 bit hash value the exact meaning of which
+//! depends on the node's `DepKind`. Together, the kind and the fingerprint
+//! fully identify a dependency node, even across multiple compilation sessions.
+//! In other words, the value of the fingerprint does not depend on anything
+//! that is specific to a given compilation session, like an unpredictable
+//! interning key (e.g. NodeId, DefId, Symbol) or the numeric value of a
+//! pointer. The concept behind this could be compared to how git commit hashes
+//! uniquely identify a given commit and has a few advantages:
+//!
+//! * A `DepNode` can simply be serialized to disk and loaded in another session
+//! without the need to do any "rebasing (like we have to do for Spans and
+//! NodeIds) or "retracing" like we had to do for `DefId` in earlier
+//! implementations of the dependency graph.
+//! * A `Fingerprint` is just a bunch of bits, which allows `DepNode` to
+//! implement `Copy`, `Sync`, `Send`, `Freeze`, etc.
+//! * Since we just have a bit pattern, `DepNode` can be mapped from disk into
+//! memory without any post-processing (e.g. "abomination-style" pointer
+//! reconstruction).
+//! * Because a `DepNode` is self-contained, we can instantiate `DepNodes` that
+//! refer to things that do not exist anymore. In previous implementations
+//! `DepNode` contained a `DefId`. A `DepNode` referring to something that
+//! had been removed between the previous and the current compilation session
+//! could not be instantiated because the current compilation session
+//! contained no `DefId` for thing that had been removed.
+//!
+//! `DepNode` definition happens in the `define_dep_nodes!()` macro. This macro
+//! defines the `DepKind` enum and a corresponding `DepConstructor` enum. The
+//! `DepConstructor` enum links a `DepKind` to the parameters that are needed at
+//! runtime in order to construct a valid `DepNode` fingerprint.
+//!
+//! Because the macro sees what parameters a given `DepKind` requires, it can
+//! "infer" some properties for each kind of `DepNode`:
+//!
+//! * Whether a `DepNode` of a given kind has any parameters at all. Some
+//! `DepNode`s, like `Krate`, represent global concepts with only one value.
+//! * Whether it is possible, in principle, to reconstruct a query key from a
+//! given `DepNode`. Many `DepKind`s only require a single `DefId` parameter,
+//! in which case it is possible to map the node's fingerprint back to the
+//! `DefId` it was computed from. In other cases, too much information gets
+//! lost during fingerprint computation.
+//!
+//! The `DepConstructor` enum, together with `DepNode::new()` ensures that only
+//! valid `DepNode` instances can be constructed. For example, the API does not
+//! allow for constructing parameterless `DepNode`s with anything other
+//! than a zeroed out fingerprint. More generally speaking, it relieves the
+//! user of the `DepNode` API of having to know how to compute the expected
+//! fingerprint for a given set of node parameters.
+
+use hir::def_id::{CrateNum, DefId};
+use hir::map::DefPathHash;
+
+use ich::Fingerprint;
+use ty::TyCtxt;
+use rustc_data_structures::stable_hasher::{StableHasher, HashStable};
+use ich::StableHashingContext;
+use std::hash::Hash;
+
+// erase!() just makes tokens go away. It's used to specify which macro argument
+// is repeated (i.e. which sub-expression of the macro we are in) but don't need
+// to actually use any of the arguments.
+macro_rules! erase {
+ ($x:tt) => ({})
+}
+
+macro_rules! define_dep_nodes {
+ ($(
+ $variant:ident $(( $($tuple_arg:tt),* ))*
+ $({ $($struct_arg_name:ident : $struct_arg_ty:ty),* })*
+ ),*
+ ) => (
+ #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash,
+ RustcEncodable, RustcDecodable)]
+ pub enum DepKind {
+ $($variant),*
+ }
+
+ impl DepKind {
+ #[allow(unreachable_code)]
+ #[inline]
+ pub fn can_reconstruct_query_key(&self) -> bool {
+ match *self {
+ $(
+ DepKind :: $variant => {
+ // tuple args
+ $({
+ return <( $($tuple_arg,)* ) as DepNodeParams>
+ ::CAN_RECONSTRUCT_QUERY_KEY;
+ })*
+
+ // struct args
+ $({
+ return <( $($struct_arg_ty,)* ) as DepNodeParams>
+ ::CAN_RECONSTRUCT_QUERY_KEY;
+ })*
+
+ true
+ }
+ )*
+ }
+ }
+
+ #[allow(unreachable_code)]
+ #[inline]
+ pub fn has_params(&self) -> bool {
+ match *self {
+ $(
+ DepKind :: $variant => {
+ // tuple args
+ $({
+ $(erase!($tuple_arg);)*
+ return true;
+ })*
+
+ // struct args
+ $({
+ $(erase!($struct_arg_name);)*
+ return true;
+ })*
+
+ false
+ }
+ )*
+ }
+ }
+ }
+
+ pub enum DepConstructor {
+ $(
+ $variant $(( $($tuple_arg),* ))*
+ $({ $($struct_arg_name : $struct_arg_ty),* })*
+ ),*
+ }
+
+ #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash,
+ RustcEncodable, RustcDecodable)]
+ pub struct DepNode {
+ pub kind: DepKind,
+ pub hash: Fingerprint,
}
- )
+
+ impl DepNode {
+ #[allow(unreachable_code, non_snake_case)]
+ pub fn new(tcx: TyCtxt, dep: DepConstructor) -> DepNode {
+ match dep {
+ $(
+ DepConstructor :: $variant $(( $($tuple_arg),* ))*
+ $({ $($struct_arg_name),* })*
+ =>
+ {
+ // tuple args
+ $({
+ let tupled_args = ( $($tuple_arg,)* );
+ let hash = DepNodeParams::to_fingerprint(&tupled_args,
+ tcx);
+ return DepNode {
+ kind: DepKind::$variant,
+ hash
+ };
+ })*
+
+ // struct args
+ $({
+ let tupled_args = ( $($struct_arg_name,)* );
+ let hash = DepNodeParams::to_fingerprint(&tupled_args,
+ tcx);
+ return DepNode {
+ kind: DepKind::$variant,
+ hash
+ };
+ })*
+
+ DepNode {
+ kind: DepKind::$variant,
+ hash: Fingerprint::zero(),
+ }
+ }
+ )*
+ }
+ }
+
+ /// Construct a DepNode from the given DepKind and DefPathHash. This
+ /// method will assert that the given DepKind actually requires a
+ /// single DefId/DefPathHash parameter.
+ #[inline]
+ pub fn from_def_path_hash(kind: DepKind,
+ def_path_hash: DefPathHash)
+ -> DepNode {
+ assert!(kind.can_reconstruct_query_key() && kind.has_params());
+ DepNode {
+ kind,
+ hash: def_path_hash.0,
+ }
+ }
+
+ /// Create a new, parameterless DepNode. This method will assert
+ /// that the DepNode corresponding to the given DepKind actually
+ /// does not require any parameters.
+ #[inline]
+ pub fn new_no_params(kind: DepKind) -> DepNode {
+ assert!(!kind.has_params());
+ DepNode {
+ kind,
+ hash: Fingerprint::zero(),
+ }
+ }
+
+ /// Extract the DefId corresponding to this DepNode. This will work
+ /// if two conditions are met:
+ ///
+ /// 1. The Fingerprint of the DepNode actually is a DefPathHash, and
+ /// 2. the item that the DefPath refers to exists in the current tcx.
+ ///
+ /// Condition (1) is determined by the DepKind variant of the
+ /// DepNode. Condition (2) might not be fulfilled if a DepNode
+ /// refers to something from the previous compilation session that
+ /// has been removed.
+ #[inline]
+ pub fn extract_def_id(&self, tcx: TyCtxt) -> Option<DefId> {
+ if self.kind.can_reconstruct_query_key() {
+ let def_path_hash = DefPathHash(self.hash);
+ tcx.def_path_hash_to_def_id
+ .as_ref()
+ .unwrap()
+ .get(&def_path_hash)
+ .cloned()
+ } else {
+ None
+ }
+ }
+
+ /// Used in testing
+ pub fn from_label_string(label: &str,
+ def_path_hash: DefPathHash)
+ -> Result<DepNode, ()> {
+ let kind = match label {
+ $(
+ stringify!($variant) => DepKind::$variant,
+ )*
+ _ => return Err(()),
+ };
+
+ if !kind.can_reconstruct_query_key() {
+ return Err(());
+ }
+
+ if kind.has_params() {
+ Ok(def_path_hash.to_dep_node(kind))
+ } else {
+ Ok(DepNode::new_no_params(kind))
+ }
+ }
+ }
+ );
}
-#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)]
-pub enum DepNode<D: Clone + Debug> {
- // The `D` type is "how definitions are identified".
- // During compilation, it is always `DefId`, but when serializing
- // it is mapped to `DefPath`.
-
- /// Represents the `Krate` as a whole (the `hir::Krate` value) (as
- /// distinct from the krate module). This is basically a hash of
- /// the entire krate, so if you read from `Krate` (e.g., by calling
- /// `tcx.hir.krate()`), we will have to assume that any change
- /// means that you need to be recompiled. This is because the
- /// `Krate` value gives you access to all other items. To avoid
- /// this fate, do not call `tcx.hir.krate()`; instead, prefer
- /// wrappers like `tcx.visit_all_items_in_krate()`. If there is no
- /// suitable wrapper, you can use `tcx.dep_graph.ignore()` to gain
- /// access to the krate, but you must remember to add suitable
- /// edges yourself for the individual items that you read.
- Krate,
+impl DefPathHash {
+ #[inline]
+ pub fn to_dep_node(self, kind: DepKind) -> DepNode {
+ DepNode::from_def_path_hash(kind, self)
+ }
+}
- /// Represents the HIR node with the given node-id
- Hir(D),
+impl DefId {
+ #[inline]
+ pub fn to_dep_node(self, tcx: TyCtxt, kind: DepKind) -> DepNode {
+ DepNode::from_def_path_hash(kind, tcx.def_path_hash(self))
+ }
+}
- /// Represents the body of a function or method. The def-id is that of the
- /// function/method.
- HirBody(D),
+define_dep_nodes!(
+ // Represents the `Krate` as a whole (the `hir::Krate` value) (as
+ // distinct from the krate module). This is basically a hash of
+ // the entire krate, so if you read from `Krate` (e.g., by calling
+ // `tcx.hir.krate()`), we will have to assume that any change
+ // means that you need to be recompiled. This is because the
+ // `Krate` value gives you access to all other items. To avoid
+ // this fate, do not call `tcx.hir.krate()`; instead, prefer
+ // wrappers like `tcx.visit_all_items_in_krate()`. If there is no
+ // suitable wrapper, you can use `tcx.dep_graph.ignore()` to gain
+ // access to the krate, but you must remember to add suitable
+ // edges yourself for the individual items that you read.
+ Krate,
- /// Represents the metadata for a given HIR node, typically found
- /// in an extern crate.
- MetaData(D),
+ // Represents the HIR node with the given node-id
+ Hir(DefId),
- /// Represents some piece of metadata global to its crate.
- GlobalMetaData(D, GlobalMetaDataKind),
+ // Represents the body of a function or method. The def-id is that of the
+ // function/method.
+ HirBody(DefId),
- /// Represents some artifact that we save to disk. Note that these
- /// do not have a def-id as part of their identifier.
- WorkProduct(Arc<WorkProductId>),
+ // Represents the metadata for a given HIR node, typically found
+ // in an extern crate.
+ MetaData(DefId),
+
+ // Represents some artifact that we save to disk. Note that these
+ // do not have a def-id as part of their identifier.
+ WorkProduct(WorkProductId),
// Represents different phases in the compiler.
- RegionMaps(D),
+ RegionMaps(DefId),
Coherence,
Resolve,
- CoherenceCheckTrait(D),
- CoherenceCheckImpl(D),
- CoherenceOverlapCheck(D),
- CoherenceOverlapCheckSpecial(D),
- Variance,
+ CoherenceCheckTrait(DefId),
PrivacyAccessLevels(CrateNum),
// Represents the MIR for a fn; also used as the task node for
// things read/modify that MIR.
- MirKrate,
- Mir(D),
- MirShim(Vec<D>),
+ Mir(DefId),
+ MirShim(DefIdList),
BorrowCheckKrate,
- BorrowCheck(D),
- RvalueCheck(D),
+ BorrowCheck(DefId),
+ RvalueCheck(DefId),
Reachability,
MirKeys,
- LateLintCheck,
- TransCrateItem(D),
TransWriteMetadata,
CrateVariances,
// nodes. Often we map multiple tables to the same node if there
// is no point in distinguishing them (e.g., both the type and
// predicates for an item wind up in `ItemSignature`).
- AssociatedItems(D),
- ItemSignature(D),
- ItemVarianceConstraints(D),
- ItemVariances(D),
- IsForeignItem(D),
- TypeParamPredicates((D, D)),
- SizedConstraint(D),
- DtorckConstraint(D),
- AdtDestructor(D),
- AssociatedItemDefIds(D),
- InherentImpls(D),
+ AssociatedItems(DefId),
+ ItemSignature(DefId),
+ ItemVarianceConstraints(DefId),
+ ItemVariances(DefId),
+ IsForeignItem(DefId),
+ TypeParamPredicates { item_id: DefId, param_id: DefId },
+ SizedConstraint(DefId),
+ DtorckConstraint(DefId),
+ AdtDestructor(DefId),
+ AssociatedItemDefIds(DefId),
+ InherentImpls(DefId),
TypeckBodiesKrate,
- TypeckTables(D),
- UsedTraitImports(D),
- ConstEval(D),
- SymbolName(D),
- SpecializationGraph(D),
- ObjectSafety(D),
- IsCopy(D),
- IsSized(D),
- IsFreeze(D),
- NeedsDrop(D),
- Layout(D),
-
- /// The set of impls for a given trait. Ultimately, it would be
- /// nice to get more fine-grained here (e.g., to include a
- /// simplified type), but we can't do that until we restructure the
- /// HIR to distinguish the *header* of an impl from its body. This
- /// is because changes to the header may change the self-type of
- /// the impl and hence would require us to be more conservative
- /// than changes in the impl body.
- TraitImpls(D),
+ TypeckTables(DefId),
+ ConstEval(DefId),
+ SymbolName(DefId),
+ SpecializationGraph(DefId),
+ ObjectSafety(DefId),
+ IsCopy(DefId),
+ IsSized(DefId),
+ IsFreeze(DefId),
+ NeedsDrop(DefId),
+ Layout(DefId),
+
+ // The set of impls for a given trait. Ultimately, it would be
+ // nice to get more fine-grained here (e.g., to include a
+ // simplified type), but we can't do that until we restructure the
+ // HIR to distinguish the *header* of an impl from its body. This
+ // is because changes to the header may change the self-type of
+ // the impl and hence would require us to be more conservative
+ // than changes in the impl body.
+ TraitImpls(DefId),
AllLocalTraitImpls,
// Otherwise the write into the map would be incorrectly
// attributed to the first task that happened to fill the cache,
// which would yield an overly conservative dep-graph.
- TraitItems(D),
- ReprHints(D),
-
- /// Trait selection cache is a little funny. Given a trait
- /// reference like `Foo: SomeTrait<Bar>`, there could be
- /// arbitrarily many def-ids to map on in there (e.g., `Foo`,
- /// `SomeTrait`, `Bar`). We could have a vector of them, but it
- /// requires heap-allocation, and trait sel in general can be a
- /// surprisingly hot path. So instead we pick two def-ids: the
- /// trait def-id, and the first def-id in the input types. If there
- /// is no def-id in the input types, then we use the trait def-id
- /// again. So for example:
- ///
- /// - `i32: Clone` -> `TraitSelect { trait_def_id: Clone, self_def_id: Clone }`
- /// - `u32: Clone` -> `TraitSelect { trait_def_id: Clone, self_def_id: Clone }`
- /// - `Clone: Clone` -> `TraitSelect { trait_def_id: Clone, self_def_id: Clone }`
- /// - `Vec<i32>: Clone` -> `TraitSelect { trait_def_id: Clone, self_def_id: Vec }`
- /// - `String: Clone` -> `TraitSelect { trait_def_id: Clone, self_def_id: String }`
- /// - `Foo: Trait<Bar>` -> `TraitSelect { trait_def_id: Trait, self_def_id: Foo }`
- /// - `Foo: Trait<i32>` -> `TraitSelect { trait_def_id: Trait, self_def_id: Foo }`
- /// - `(Foo, Bar): Trait` -> `TraitSelect { trait_def_id: Trait, self_def_id: Foo }`
- /// - `i32: Trait<Foo>` -> `TraitSelect { trait_def_id: Trait, self_def_id: Foo }`
- ///
- /// You can see that we map many trait refs to the same
- /// trait-select node. This is not a problem, it just means
- /// imprecision in our dep-graph tracking. The important thing is
- /// that for any given trait-ref, we always map to the **same**
- /// trait-select node.
- TraitSelect { trait_def_id: D, input_def_id: D },
-
- /// For proj. cache, we just keep a list of all def-ids, since it is
- /// not a hotspot.
- ProjectionCache { def_ids: Vec<D> },
-
- ParamEnv(D),
- DescribeDef(D),
- DefSpan(D),
- Stability(D),
- Deprecation(D),
- ItemBodyNestedBodies(D),
- ConstIsRvaluePromotableToStatic(D),
- ImplParent(D),
- TraitOfItem(D),
- IsExportedSymbol(D),
- IsMirAvailable(D),
- ItemAttrs(D),
- FnArgNames(D),
+ TraitItems(DefId),
+ ReprHints(DefId),
+
+ // Trait selection cache is a little funny. Given a trait
+ // reference like `Foo: SomeTrait<Bar>`, there could be
+ // arbitrarily many def-ids to map on in there (e.g., `Foo`,
+ // `SomeTrait`, `Bar`). We could have a vector of them, but it
+ // requires heap-allocation, and trait sel in general can be a
+ // surprisingly hot path. So instead we pick two def-ids: the
+ // trait def-id, and the first def-id in the input types. If there
+ // is no def-id in the input types, then we use the trait def-id
+ // again. So for example:
+ //
+ // - `i32: Clone` -> `TraitSelect { trait_def_id: Clone, self_def_id: Clone }`
+ // - `u32: Clone` -> `TraitSelect { trait_def_id: Clone, self_def_id: Clone }`
+ // - `Clone: Clone` -> `TraitSelect { trait_def_id: Clone, self_def_id: Clone }`
+ // - `Vec<i32>: Clone` -> `TraitSelect { trait_def_id: Clone, self_def_id: Vec }`
+ // - `String: Clone` -> `TraitSelect { trait_def_id: Clone, self_def_id: String }`
+ // - `Foo: Trait<Bar>` -> `TraitSelect { trait_def_id: Trait, self_def_id: Foo }`
+ // - `Foo: Trait<i32>` -> `TraitSelect { trait_def_id: Trait, self_def_id: Foo }`
+ // - `(Foo, Bar): Trait` -> `TraitSelect { trait_def_id: Trait, self_def_id: Foo }`
+ // - `i32: Trait<Foo>` -> `TraitSelect { trait_def_id: Trait, self_def_id: Foo }`
+ //
+ // You can see that we map many trait refs to the same
+ // trait-select node. This is not a problem, it just means
+ // imprecision in our dep-graph tracking. The important thing is
+ // that for any given trait-ref, we always map to the **same**
+ // trait-select node.
+ TraitSelect { trait_def_id: DefId, input_def_id: DefId },
+
+ // For proj. cache, we just keep a list of all def-ids, since it is
+ // not a hotspot.
+ ProjectionCache { def_ids: DefIdList },
+
+ ParamEnv(DefId),
+ DescribeDef(DefId),
+ DefSpan(DefId),
+ Stability(DefId),
+ Deprecation(DefId),
+ ItemBodyNestedBodies(DefId),
+ ConstIsRvaluePromotableToStatic(DefId),
+ ImplParent(DefId),
+ TraitOfItem(DefId),
+ IsExportedSymbol(DefId),
+ IsMirAvailable(DefId),
+ ItemAttrs(DefId),
+ FnArgNames(DefId)
+);
+
+trait DepNodeParams<'a, 'gcx: 'tcx + 'a, 'tcx: 'a> {
+ const CAN_RECONSTRUCT_QUERY_KEY: bool;
+ fn to_fingerprint(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Fingerprint;
}
-impl<D: Clone + Debug> DepNode<D> {
- /// Used in testing
- pub fn from_label_string(label: &str, data: D) -> Result<DepNode<D>, ()> {
- macro_rules! check {
- ($($name:ident,)*) => {
- match label {
- $(stringify!($name) => Ok(DepNode::$name(data)),)*
- _ => Err(())
- }
- }
- }
+impl<'a, 'gcx: 'tcx + 'a, 'tcx: 'a, T> DepNodeParams<'a, 'gcx, 'tcx> for T
+ where T: HashStable<StableHashingContext<'a, 'gcx, 'tcx>>
+{
+ default const CAN_RECONSTRUCT_QUERY_KEY: bool = false;
- if label == "Krate" {
- // special case
- return Ok(DepNode::Krate);
- }
+ default fn to_fingerprint(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Fingerprint {
+ let mut hcx = StableHashingContext::new(tcx);
+ let mut hasher = StableHasher::new();
- check! {
- BorrowCheck,
- Hir,
- HirBody,
- TransCrateItem,
- AssociatedItems,
- ItemSignature,
- ItemVariances,
- IsForeignItem,
- AssociatedItemDefIds,
- InherentImpls,
- TypeckTables,
- UsedTraitImports,
- TraitImpls,
- ReprHints,
- }
+ self.hash_stable(&mut hcx, &mut hasher);
+
+ hasher.finish()
}
+}
- pub fn map_def<E, OP>(&self, mut op: OP) -> Option<DepNode<E>>
- where OP: FnMut(&D) -> Option<E>, E: Clone + Debug
- {
- use self::DepNode::*;
-
- match *self {
- Krate => Some(Krate),
- BorrowCheckKrate => Some(BorrowCheckKrate),
- MirKrate => Some(MirKrate),
- TypeckBodiesKrate => Some(TypeckBodiesKrate),
- Coherence => Some(Coherence),
- CrateVariances => Some(CrateVariances),
- Resolve => Some(Resolve),
- Variance => Some(Variance),
- PrivacyAccessLevels(k) => Some(PrivacyAccessLevels(k)),
- Reachability => Some(Reachability),
- MirKeys => Some(MirKeys),
- LateLintCheck => Some(LateLintCheck),
- TransWriteMetadata => Some(TransWriteMetadata),
-
- // work product names do not need to be mapped, because
- // they are always absolute.
- WorkProduct(ref id) => Some(WorkProduct(id.clone())),
-
- IsCopy(ref d) => op(d).map(IsCopy),
- IsSized(ref d) => op(d).map(IsSized),
- IsFreeze(ref d) => op(d).map(IsFreeze),
- NeedsDrop(ref d) => op(d).map(NeedsDrop),
- Layout(ref d) => op(d).map(Layout),
- Hir(ref d) => op(d).map(Hir),
- HirBody(ref d) => op(d).map(HirBody),
- MetaData(ref d) => op(d).map(MetaData),
- CoherenceCheckTrait(ref d) => op(d).map(CoherenceCheckTrait),
- CoherenceCheckImpl(ref d) => op(d).map(CoherenceCheckImpl),
- CoherenceOverlapCheck(ref d) => op(d).map(CoherenceOverlapCheck),
- CoherenceOverlapCheckSpecial(ref d) => op(d).map(CoherenceOverlapCheckSpecial),
- Mir(ref d) => op(d).map(Mir),
- MirShim(ref def_ids) => {
- let def_ids: Option<Vec<E>> = def_ids.iter().map(op).collect();
- def_ids.map(MirShim)
- }
- BorrowCheck(ref d) => op(d).map(BorrowCheck),
- RegionMaps(ref d) => op(d).map(RegionMaps),
- RvalueCheck(ref d) => op(d).map(RvalueCheck),
- TransCrateItem(ref d) => op(d).map(TransCrateItem),
- AssociatedItems(ref d) => op(d).map(AssociatedItems),
- ItemSignature(ref d) => op(d).map(ItemSignature),
- ItemVariances(ref d) => op(d).map(ItemVariances),
- ItemVarianceConstraints(ref d) => op(d).map(ItemVarianceConstraints),
- IsForeignItem(ref d) => op(d).map(IsForeignItem),
- TypeParamPredicates((ref item, ref param)) => {
- Some(TypeParamPredicates((try_opt!(op(item)), try_opt!(op(param)))))
- }
- SizedConstraint(ref d) => op(d).map(SizedConstraint),
- DtorckConstraint(ref d) => op(d).map(DtorckConstraint),
- AdtDestructor(ref d) => op(d).map(AdtDestructor),
- AssociatedItemDefIds(ref d) => op(d).map(AssociatedItemDefIds),
- InherentImpls(ref d) => op(d).map(InherentImpls),
- TypeckTables(ref d) => op(d).map(TypeckTables),
- UsedTraitImports(ref d) => op(d).map(UsedTraitImports),
- ConstEval(ref d) => op(d).map(ConstEval),
- SymbolName(ref d) => op(d).map(SymbolName),
- SpecializationGraph(ref d) => op(d).map(SpecializationGraph),
- ObjectSafety(ref d) => op(d).map(ObjectSafety),
- TraitImpls(ref d) => op(d).map(TraitImpls),
- AllLocalTraitImpls => Some(AllLocalTraitImpls),
- TraitItems(ref d) => op(d).map(TraitItems),
- ReprHints(ref d) => op(d).map(ReprHints),
- TraitSelect { ref trait_def_id, ref input_def_id } => {
- op(trait_def_id).and_then(|trait_def_id| {
- op(input_def_id).and_then(|input_def_id| {
- Some(TraitSelect { trait_def_id: trait_def_id,
- input_def_id: input_def_id })
- })
- })
- }
- ProjectionCache { ref def_ids } => {
- let def_ids: Option<Vec<E>> = def_ids.iter().map(op).collect();
- def_ids.map(|d| ProjectionCache { def_ids: d })
- }
- ParamEnv(ref d) => op(d).map(ParamEnv),
- DescribeDef(ref d) => op(d).map(DescribeDef),
- DefSpan(ref d) => op(d).map(DefSpan),
- Stability(ref d) => op(d).map(Stability),
- Deprecation(ref d) => op(d).map(Deprecation),
- ItemAttrs(ref d) => op(d).map(ItemAttrs),
- FnArgNames(ref d) => op(d).map(FnArgNames),
- ImplParent(ref d) => op(d).map(ImplParent),
- TraitOfItem(ref d) => op(d).map(TraitOfItem),
- IsExportedSymbol(ref d) => op(d).map(IsExportedSymbol),
- ItemBodyNestedBodies(ref d) => op(d).map(ItemBodyNestedBodies),
- ConstIsRvaluePromotableToStatic(ref d) => op(d).map(ConstIsRvaluePromotableToStatic),
- IsMirAvailable(ref d) => op(d).map(IsMirAvailable),
- GlobalMetaData(ref d, kind) => op(d).map(|d| GlobalMetaData(d, kind)),
- }
+impl<'a, 'gcx: 'tcx + 'a, 'tcx: 'a> DepNodeParams<'a, 'gcx, 'tcx> for (DefId,) {
+ const CAN_RECONSTRUCT_QUERY_KEY: bool = true;
+
+ fn to_fingerprint(&self, tcx: TyCtxt) -> Fingerprint {
+ tcx.def_path_hash(self.0).0
}
}
/// some independent path or string that persists between runs without
/// the need to be mapped or unmapped. (This ensures we can serialize
/// them even in the absence of a tcx.)
-#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)]
-pub struct WorkProductId(pub String);
+#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash,
+ RustcEncodable, RustcDecodable)]
+pub struct WorkProductId {
+ hash: Fingerprint
+}
-#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)]
-pub enum GlobalMetaDataKind {
- Krate,
- CrateDeps,
- DylibDependencyFormats,
- LangItems,
- LangItemsMissing,
- NativeLibraries,
- CodeMap,
- Impls,
- ExportedSymbols,
+impl WorkProductId {
+ pub fn from_cgu_name(cgu_name: &str) -> WorkProductId {
+ let mut hasher = StableHasher::new();
+ cgu_name.len().hash(&mut hasher);
+ cgu_name.hash(&mut hasher);
+ WorkProductId {
+ hash: hasher.finish()
+ }
+ }
+
+ pub fn from_fingerprint(fingerprint: Fingerprint) -> WorkProductId {
+ WorkProductId {
+ hash: fingerprint
+ }
+ }
+
+ pub fn to_dep_node(self) -> DepNode {
+ DepNode {
+ kind: DepKind::WorkProduct,
+ hash: self.hash,
+ }
+ }
}
+
+impl_stable_hash_for!(struct ::dep_graph::WorkProductId {
+ hash
+});
+
+type DefIdList = Vec<DefId>;
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use hir::def_id::DefId;
use rustc_data_structures::fx::FxHashMap;
use std::cell::RefCell;
-use std::ops::Index;
use std::hash::Hash;
use std::marker::PhantomData;
+use ty::TyCtxt;
use util::common::MemoizationMap;
use super::{DepNode, DepGraph};
pub trait DepTrackingMapConfig {
type Key: Eq + Hash + Clone;
type Value: Clone;
- fn to_dep_node(key: &Self::Key) -> DepNode<DefId>;
+ fn to_dep_node(tcx: TyCtxt, key: &Self::Key) -> DepNode;
}
impl<M: DepTrackingMapConfig> DepTrackingMap<M> {
/// Registers a (synthetic) read from the key `k`. Usually this
/// is invoked automatically by `get`.
- fn read(&self, k: &M::Key) {
- let dep_node = M::to_dep_node(k);
+ fn read(&self, tcx: TyCtxt, k: &M::Key) {
+ let dep_node = M::to_dep_node(tcx, k);
self.graph.read(dep_node);
}
- pub fn get(&self, k: &M::Key) -> Option<&M::Value> {
- self.read(k);
+ pub fn get(&self, tcx: TyCtxt, k: &M::Key) -> Option<&M::Value> {
+ self.read(tcx, k);
self.map.get(k)
}
- pub fn contains_key(&self, k: &M::Key) -> bool {
- self.read(k);
+ pub fn contains_key(&self, tcx: TyCtxt, k: &M::Key) -> bool {
+ self.read(tcx, k);
self.map.contains_key(k)
}
/// The key is the line marked `(*)`: the closure implicitly
/// accesses the body of the item `item`, so we register a read
/// from `Hir(item_def_id)`.
- fn memoize<OP>(&self, key: M::Key, op: OP) -> M::Value
+ fn memoize<OP>(&self, tcx: TyCtxt, key: M::Key, op: OP) -> M::Value
where OP: FnOnce() -> M::Value
{
let graph;
{
let this = self.borrow();
if let Some(result) = this.map.get(&key) {
- this.read(&key);
+ this.read(tcx, &key);
return result.clone();
}
graph = this.graph.clone();
}
- let _task = graph.in_task(M::to_dep_node(&key));
+ let _task = graph.in_task(M::to_dep_node(tcx, &key));
let result = op();
self.borrow_mut().map.insert(key, result.clone());
result
}
}
-
-impl<'k, M: DepTrackingMapConfig> Index<&'k M::Key> for DepTrackingMap<M> {
- type Output = M::Value;
-
- #[inline]
- fn index(&self, k: &'k M::Key) -> &M::Value {
- self.get(k).unwrap()
- }
-}
-
// except according to those terms.
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
-use std::fmt::Debug;
-use std::hash::Hash;
use super::{DepGraphQuery, DepNode};
-pub struct DepGraphEdges<D: Clone + Debug + Eq + Hash> {
- nodes: Vec<DepNode<D>>,
- indices: FxHashMap<DepNode<D>, IdIndex>,
+pub struct DepGraphEdges {
+ nodes: Vec<DepNode>,
+ indices: FxHashMap<DepNode, IdIndex>,
edges: FxHashSet<(IdIndex, IdIndex)>,
open_nodes: Vec<OpenNode>,
}
Ignore,
}
-impl<D: Clone + Debug + Eq + Hash> DepGraphEdges<D> {
- pub fn new() -> DepGraphEdges<D> {
+impl DepGraphEdges {
+ pub fn new() -> DepGraphEdges {
DepGraphEdges {
nodes: vec![],
indices: FxHashMap(),
}
}
- fn id(&self, index: IdIndex) -> DepNode<D> {
+ fn id(&self, index: IdIndex) -> DepNode {
self.nodes[index.index()].clone()
}
/// Creates a node for `id` in the graph.
- fn make_node(&mut self, id: DepNode<D>) -> IdIndex {
+ fn make_node(&mut self, id: DepNode) -> IdIndex {
if let Some(&i) = self.indices.get(&id) {
return i;
}
assert_eq!(popped_node, OpenNode::Ignore);
}
- pub fn push_task(&mut self, key: DepNode<D>) {
+ pub fn push_task(&mut self, key: DepNode) {
let top_node = self.current_node();
let new_node = self.make_node(key);
}
}
- pub fn pop_task(&mut self, key: DepNode<D>) {
+ pub fn pop_task(&mut self, key: DepNode) {
let popped_node = self.open_nodes.pop().unwrap();
assert_eq!(OpenNode::Node(self.indices[&key]), popped_node);
}
/// effect. Note that *reading* from tracked state is harmless if
/// you are not in a task; what is bad is *writing* to tracked
/// state (and leaking data that you read into a tracked task).
- pub fn read(&mut self, v: DepNode<D>) {
+ pub fn read(&mut self, v: DepNode) {
if self.current_node().is_some() {
let source = self.make_node(v);
self.add_edge_from_current_node(|current| (source, current))
/// Indicates that the current task `C` writes `v` by adding an
/// edge from `C` to `v`. If there is no current task, panics. If
/// you want to suppress this edge, use `ignore`.
- pub fn write(&mut self, v: DepNode<D>) {
+ pub fn write(&mut self, v: DepNode) {
let target = self.make_node(v);
self.add_edge_from_current_node(|current| (current, target))
}
}
}
- pub fn query(&self) -> DepGraphQuery<D> {
+ pub fn query(&self) -> DepGraphQuery {
let edges: Vec<_> = self.edges.iter()
.map(|&(i, j)| (self.id(i), self.id(j)))
.collect();
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use hir::def_id::DefId;
use rustc_data_structures::fx::FxHashMap;
use session::config::OutputType;
use std::cell::{Ref, RefCell};
use std::rc::Rc;
-use std::sync::Arc;
use super::dep_node::{DepNode, WorkProductId};
use super::query::DepGraphQuery;
/// things available to us. If we find that they are not dirty, we
/// load the path to the file storing those work-products here into
/// this map. We can later look for and extract that data.
- previous_work_products: RefCell<FxHashMap<Arc<WorkProductId>, WorkProduct>>,
+ previous_work_products: RefCell<FxHashMap<WorkProductId, WorkProduct>>,
/// Work-products that we generate in this run.
- work_products: RefCell<FxHashMap<Arc<WorkProductId>, WorkProduct>>,
+ work_products: RefCell<FxHashMap<WorkProductId, WorkProduct>>,
}
impl DepGraph {
self.data.thread.is_fully_enabled()
}
- pub fn query(&self) -> DepGraphQuery<DefId> {
+ pub fn query(&self) -> DepGraphQuery {
self.data.thread.query()
}
raii::IgnoreTask::new(&self.data.thread)
}
- pub fn in_task<'graph>(&'graph self, key: DepNode<DefId>) -> Option<raii::DepTask<'graph>> {
+ pub fn in_task<'graph>(&'graph self, key: DepNode) -> Option<raii::DepTask<'graph>> {
raii::DepTask::new(&self.data.thread, key)
}
/// `arg` parameter.
///
/// [README]: README.md
- pub fn with_task<C, A, R>(&self, key: DepNode<DefId>, cx: C, arg: A, task: fn(C, A) -> R) -> R
+ pub fn with_task<C, A, R>(&self, key: DepNode, cx: C, arg: A, task: fn(C, A) -> R) -> R
where C: DepGraphSafe, A: DepGraphSafe
{
let _task = self.in_task(key);
task(cx, arg)
}
- pub fn read(&self, v: DepNode<DefId>) {
+ pub fn read(&self, v: DepNode) {
if self.data.thread.is_enqueue_enabled() {
self.data.thread.enqueue(DepMessage::Read(v));
}
/// Indicates that a previous work product exists for `v`. This is
/// invoked during initial start-up based on what nodes are clean
/// (and what files exist in the incr. directory).
- pub fn insert_previous_work_product(&self, v: &Arc<WorkProductId>, data: WorkProduct) {
+ pub fn insert_previous_work_product(&self, v: &WorkProductId, data: WorkProduct) {
debug!("insert_previous_work_product({:?}, {:?})", v, data);
self.data.previous_work_products.borrow_mut()
.insert(v.clone(), data);
/// Indicates that we created the given work-product in this run
/// for `v`. This record will be preserved and loaded in the next
/// run.
- pub fn insert_work_product(&self, v: &Arc<WorkProductId>, data: WorkProduct) {
+ pub fn insert_work_product(&self, v: &WorkProductId, data: WorkProduct) {
debug!("insert_work_product({:?}, {:?})", v, data);
self.data.work_products.borrow_mut()
.insert(v.clone(), data);
/// Check whether a previous work product exists for `v` and, if
/// so, return the path that leads to it. Used to skip doing work.
- pub fn previous_work_product(&self, v: &Arc<WorkProductId>) -> Option<WorkProduct> {
+ pub fn previous_work_product(&self, v: &WorkProductId) -> Option<WorkProduct> {
self.data.previous_work_products.borrow()
.get(v)
.cloned()
/// Access the map of work-products created during this run. Only
/// used during saving of the dep-graph.
- pub fn work_products(&self) -> Ref<FxHashMap<Arc<WorkProductId>, WorkProduct>> {
+ pub fn work_products(&self) -> Ref<FxHashMap<WorkProductId, WorkProduct>> {
self.data.work_products.borrow()
}
/// Access the map of work-products created during the cached run. Only
/// used during saving of the dep-graph.
- pub fn previous_work_products(&self) -> Ref<FxHashMap<Arc<WorkProductId>, WorkProduct>> {
+ pub fn previous_work_products(&self) -> Ref<FxHashMap<WorkProductId, WorkProduct>> {
self.data.previous_work_products.borrow()
}
}
pub use self::dep_tracking_map::{DepTrackingMap, DepTrackingMapConfig};
pub use self::dep_node::DepNode;
pub use self::dep_node::WorkProductId;
-pub use self::dep_node::GlobalMetaDataKind;
pub use self::graph::DepGraph;
pub use self::graph::WorkProduct;
pub use self::query::DepGraphQuery;
pub use self::safe::AssertDepGraphSafe;
pub use self::safe::DepGraphSafe;
pub use self::raii::DepTask;
+
+pub use self::dep_node::{DepKind, DepConstructor};
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::graph::{Direction, INCOMING, Graph, NodeIndex, OUTGOING};
-use std::fmt::Debug;
-use std::hash::Hash;
use super::DepNode;
-pub struct DepGraphQuery<D: Clone + Debug + Hash + Eq> {
- pub graph: Graph<DepNode<D>, ()>,
- pub indices: FxHashMap<DepNode<D>, NodeIndex>,
+pub struct DepGraphQuery {
+ pub graph: Graph<DepNode, ()>,
+ pub indices: FxHashMap<DepNode, NodeIndex>,
}
-impl<D: Clone + Debug + Hash + Eq> DepGraphQuery<D> {
- pub fn new(nodes: &[DepNode<D>],
- edges: &[(DepNode<D>, DepNode<D>)])
- -> DepGraphQuery<D> {
+impl DepGraphQuery {
+ pub fn new(nodes: &[DepNode],
+ edges: &[(DepNode, DepNode)])
+ -> DepGraphQuery {
let mut graph = Graph::new();
let mut indices = FxHashMap();
for node in nodes {
}
}
- pub fn contains_node(&self, node: &DepNode<D>) -> bool {
+ pub fn contains_node(&self, node: &DepNode) -> bool {
self.indices.contains_key(&node)
}
- pub fn nodes(&self) -> Vec<&DepNode<D>> {
+ pub fn nodes(&self) -> Vec<&DepNode> {
self.graph.all_nodes()
.iter()
.map(|n| &n.data)
.collect()
}
- pub fn edges(&self) -> Vec<(&DepNode<D>,&DepNode<D>)> {
+ pub fn edges(&self) -> Vec<(&DepNode,&DepNode)> {
self.graph.all_edges()
.iter()
.map(|edge| (edge.source(), edge.target()))
.collect()
}
- fn reachable_nodes(&self, node: &DepNode<D>, direction: Direction) -> Vec<&DepNode<D>> {
+ fn reachable_nodes(&self, node: &DepNode, direction: Direction) -> Vec<&DepNode> {
if let Some(&index) = self.indices.get(node) {
self.graph.depth_traverse(index, direction)
.map(|s| self.graph.node_data(s))
/// All nodes reachable from `node`. In other words, things that
/// will have to be recomputed if `node` changes.
- pub fn transitive_successors(&self, node: &DepNode<D>) -> Vec<&DepNode<D>> {
+ pub fn transitive_successors(&self, node: &DepNode) -> Vec<&DepNode> {
self.reachable_nodes(node, OUTGOING)
}
/// All nodes that can reach `node`.
- pub fn transitive_predecessors(&self, node: &DepNode<D>) -> Vec<&DepNode<D>> {
+ pub fn transitive_predecessors(&self, node: &DepNode) -> Vec<&DepNode> {
self.reachable_nodes(node, INCOMING)
}
/// Just the outgoing edges from `node`.
- pub fn immediate_successors(&self, node: &DepNode<D>) -> Vec<&DepNode<D>> {
+ pub fn immediate_successors(&self, node: &DepNode) -> Vec<&DepNode> {
if let Some(&index) = self.indices.get(&node) {
self.graph.successor_nodes(index)
.map(|s| self.graph.node_data(s))
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use hir::def_id::DefId;
use super::DepNode;
use super::thread::{DepGraphThreadData, DepMessage};
pub struct DepTask<'graph> {
data: &'graph DepGraphThreadData,
- key: Option<DepNode<DefId>>,
+ key: Option<DepNode>,
}
impl<'graph> DepTask<'graph> {
- pub fn new(data: &'graph DepGraphThreadData, key: DepNode<DefId>)
+ pub fn new(data: &'graph DepGraphThreadData, key: DepNode)
-> Option<DepTask<'graph>> {
if data.is_enqueue_enabled() {
data.enqueue(DepMessage::PushTask(key.clone()));
//! specify an edge filter to be applied to each edge as it is
//! created. See `./README.md` for details.
-use hir::def_id::DefId;
use std::cell::RefCell;
use std::env;
pub struct ShadowGraph {
// if you push None onto the stack, that corresponds to an Ignore
- stack: RefCell<Vec<Option<DepNode<DefId>>>>,
+ stack: RefCell<Vec<Option<DepNode>>>,
forbidden_edge: Option<EdgeFilter>,
}
}
fn check_edge(&self,
- source: Option<Option<&DepNode<DefId>>>,
- target: Option<Option<&DepNode<DefId>>>) {
+ source: Option<Option<&DepNode>>,
+ target: Option<Option<&DepNode>>) {
assert!(ENABLED);
match (source, target) {
// cannot happen, one side is always Some(Some(_))
// Do a little juggling: we get back a reference to an option at the
// top of the stack, convert it to an optional reference.
-fn top<'s>(stack: &'s Vec<Option<DepNode<DefId>>>) -> Option<Option<&'s DepNode<DefId>>> {
+fn top<'s>(stack: &'s Vec<Option<DepNode>>) -> Option<Option<&'s DepNode>> {
stack.last()
- .map(|n: &'s Option<DepNode<DefId>>| -> Option<&'s DepNode<DefId>> {
+ .map(|n: &'s Option<DepNode>| -> Option<&'s DepNode> {
// (*)
// (*) type annotation just there to clarify what would
// otherwise be some *really* obscure code
//! to accumulate more messages. This way we only ever have two vectors
//! allocated (and both have a fairly large capacity).
-use hir::def_id::DefId;
use rustc_data_structures::veccell::VecCell;
use std::sync::mpsc::{self, Sender, Receiver};
use std::thread;
#[derive(Debug)]
pub enum DepMessage {
- Read(DepNode<DefId>),
- Write(DepNode<DefId>),
- PushTask(DepNode<DefId>),
- PopTask(DepNode<DefId>),
+ Read(DepNode),
+ Write(DepNode),
+ PushTask(DepNode),
+ PopTask(DepNode),
PushIgnore,
PopIgnore,
Query,
swap_out: Sender<Vec<DepMessage>>,
// where to receive query results
- query_in: Receiver<DepGraphQuery<DefId>>,
+ query_in: Receiver<DepGraphQuery>,
}
const INITIAL_CAPACITY: usize = 2048;
self.swap_out.send(old_messages).unwrap();
}
- pub fn query(&self) -> DepGraphQuery<DefId> {
+ pub fn query(&self) -> DepGraphQuery {
assert!(self.is_fully_enabled(), "should never query if not fully enabled");
self.enqueue(DepMessage::Query);
self.swap();
/// Definition of the depgraph thread.
pub fn main(swap_in: Receiver<Vec<DepMessage>>,
swap_out: Sender<Vec<DepMessage>>,
- query_out: Sender<DepGraphQuery<DefId>>) {
+ query_out: Sender<DepGraphQuery>) {
let mut edges = DepGraphEdges::new();
// the compiler thread always expects a fresh buffer to be
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+//! This module implements some validity checks for attributes.
+//! In particular it verifies that `#[inline]` and `#[repr]` attributes are
+//! attached to items that actually support them and if there are
+//! conflicts between multiple such attributes attached to the same
+//! item.
+
use session::Session;
use syntax::ast;
}
impl<'a> CheckAttrVisitor<'a> {
+ /// Check any attribute.
+ fn check_attribute(&self, attr: &ast::Attribute, target: Target) {
+ if let Some(name) = attr.name() {
+ match &*name.as_str() {
+ "inline" => self.check_inline(attr, target),
+ "repr" => self.check_repr(attr, target),
+ _ => (),
+ }
+ }
+ }
+
+ /// Check if an `#[inline]` is applied to a function.
fn check_inline(&self, attr: &ast::Attribute, target: Target) {
if target != Target::Fn {
struct_span_err!(self.sess, attr.span, E0518, "attribute should be applied to function")
}
}
+ /// Check if an `#[repr]` attr is valid.
fn check_repr(&self, attr: &ast::Attribute, target: Target) {
let words = match attr.meta_item_list() {
Some(words) => words,
"conflicting packed and align representation hints").emit();
}
}
-
- fn check_attribute(&self, attr: &ast::Attribute, target: Target) {
- if let Some(name) = attr.name() {
- match &*name.as_str() {
- "inline" => self.check_inline(attr, target),
- "repr" => self.check_repr(attr, target),
- _ => (),
- }
- }
- }
}
impl<'a> Visitor<'a> for CheckAttrVisitor<'a> {
#[derive(Clone, Copy, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
pub enum CtorKind {
- // Constructor function automatically created by a tuple struct/variant.
+ /// Constructor function automatically created by a tuple struct/variant.
Fn,
- // Constructor constant automatically created by a unit struct/variant.
+ /// Constructor constant automatically created by a unit struct/variant.
Const,
- // Unusable name in value namespace created by a struct variant.
+ /// Unusable name in value namespace created by a struct variant.
Fictive,
}
}
}
-// Definition mapping
+/// Definition mapping
pub type DefMap = NodeMap<PathResolution>;
-// This is the replacement export map. It maps a module to all of the exports
-// within.
+
+/// This is the replacement export map. It maps a module to all of the exports
+/// within.
pub type ExportMap = NodeMap<Vec<Export>>;
#[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)]
pub struct Export {
- pub ident: ast::Ident, // The name of the target.
- pub def: Def, // The definition of the target.
- pub span: Span, // The span of the target definition.
+ /// The name of the target.
+ pub ident: ast::Ident,
+ /// The definition of the target.
+ pub def: Def,
+ /// The span of the target definition.
+ pub span: Span,
}
impl CtorKind {
}
}
+ /// A human readable kind name
pub fn kind_name(&self) -> &'static str {
match *self {
Def::Fn(..) => "function",
impl DefId {
+ /// Make a local `DefId` with the given index.
pub fn local(index: DefIndex) -> DefId {
DefId { krate: LOCAL_CRATE, index: index }
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-// Lowers the AST to the HIR.
-//
-// Since the AST and HIR are fairly similar, this is mostly a simple procedure,
-// much like a fold. Where lowering involves a bit more work things get more
-// interesting and there are some invariants you should know about. These mostly
-// concern spans and ids.
-//
-// Spans are assigned to AST nodes during parsing and then are modified during
-// expansion to indicate the origin of a node and the process it went through
-// being expanded. Ids are assigned to AST nodes just before lowering.
-//
-// For the simpler lowering steps, ids and spans should be preserved. Unlike
-// expansion we do not preserve the process of lowering in the spans, so spans
-// should not be modified here. When creating a new node (as opposed to
-// 'folding' an existing one), then you create a new id using `next_id()`.
-//
-// You must ensure that ids are unique. That means that you should only use the
-// id from an AST node in a single HIR node (you can assume that AST node ids
-// are unique). Every new node must have a unique id. Avoid cloning HIR nodes.
-// If you do, you must then set the new node's id to a fresh one.
-//
-// Spans are used for error messages and for tools to map semantics back to
-// source code. It is therefore not as important with spans as ids to be strict
-// about use (you can't break the compiler by screwing up a span). Obviously, a
-// HIR node can only have a single span. But multiple nodes can have the same
-// span and spans don't need to be kept in order, etc. Where code is preserved
-// by lowering, it should have the same span as in the AST. Where HIR nodes are
-// new it is probably best to give a span for the whole AST node being lowered.
-// All nodes should have real spans, don't use dummy spans. Tools are likely to
-// get confused if the spans from leaf AST nodes occur in multiple places
-// in the HIR, especially for multiple identifiers.
+//! Lowers the AST to the HIR.
+//!
+//! Since the AST and HIR are fairly similar, this is mostly a simple procedure,
+//! much like a fold. Where lowering involves a bit more work things get more
+//! interesting and there are some invariants you should know about. These mostly
+//! concern spans and ids.
+//!
+//! Spans are assigned to AST nodes during parsing and then are modified during
+//! expansion to indicate the origin of a node and the process it went through
+//! being expanded. Ids are assigned to AST nodes just before lowering.
+//!
+//! For the simpler lowering steps, ids and spans should be preserved. Unlike
+//! expansion we do not preserve the process of lowering in the spans, so spans
+//! should not be modified here. When creating a new node (as opposed to
+//! 'folding' an existing one), then you create a new id using `next_id()`.
+//!
+//! You must ensure that ids are unique. That means that you should only use the
+//! id from an AST node in a single HIR node (you can assume that AST node ids
+//! are unique). Every new node must have a unique id. Avoid cloning HIR nodes.
+//! If you do, you must then set the new node's id to a fresh one.
+//!
+//! Spans are used for error messages and for tools to map semantics back to
+//! source code. It is therefore not as important with spans as ids to be strict
+//! about use (you can't break the compiler by screwing up a span). Obviously, a
+//! HIR node can only have a single span. But multiple nodes can have the same
+//! span and spans don't need to be kept in order, etc. Where code is preserved
+//! by lowering, it should have the same span as in the AST. Where HIR nodes are
+//! new it is probably best to give a span for the whole AST node being lowered.
+//! All nodes should have real spans, don't use dummy spans. Tools are likely to
+//! get confused if the spans from leaf AST nodes occur in multiple places
+//! in the HIR, especially for multiple identifiers.
use hir;
use hir::map::{Definitions, DefKey, REGULAR_SPACE};
pub struct LoweringContext<'a> {
crate_root: Option<&'static str>,
+
// Use to assign ids to hir nodes that do not directly correspond to an ast node
sess: &'a Session,
+
// As we walk the AST we must keep track of the current 'parent' def id (in
// the form of a DefIndex) so that if we create a new node which introduces
// a definition, then we can properly create the def id.
}
pub trait Resolver {
- // Resolve a hir path generated by the lowerer when expanding `for`, `if let`, etc.
+ /// Resolve a hir path generated by the lowerer when expanding `for`, `if let`, etc.
fn resolve_hir_path(&mut self, path: &mut hir::Path, is_value: bool);
- // Obtain the resolution for a node id
+ /// Obtain the resolution for a node id
fn get_resolution(&mut self, id: NodeId) -> Option<PathResolution>;
- // We must keep the set of definitions up to date as we add nodes that weren't in the AST.
- // This should only return `None` during testing.
+ /// We must keep the set of definitions up to date as we add nodes that weren't in the AST.
+ /// This should only return `None` during testing.
fn definitions(&mut self) -> &mut Definitions;
}
let parent_def = self.parent_def.unwrap();
let def_id = {
let defs = self.resolver.definitions();
- let def_path_data = DefPathData::Binding(Ident::with_empty_ctxt(name));
+ let def_path_data = DefPathData::Binding(name);
let def_index = defs
.create_def_with_parent(parent_def, id, def_path_data, REGULAR_SPACE, Mark::root());
DefId::local(def_index)
use syntax::ext::hygiene::Mark;
use syntax::visit;
use syntax::symbol::keywords;
+use syntax::symbol::Symbol;
use hir::map::{ITEM_LIKE_SPACE, REGULAR_SPACE};
DefPathData::Impl,
ItemKind::Enum(..) | ItemKind::Struct(..) | ItemKind::Union(..) | ItemKind::Trait(..) |
ItemKind::ExternCrate(..) | ItemKind::ForeignMod(..) | ItemKind::Ty(..) =>
- DefPathData::TypeNs(i.ident.modern()),
+ DefPathData::TypeNs(i.ident.name),
ItemKind::Mod(..) if i.ident == keywords::Invalid.ident() => {
return visit::walk_item(self, i);
}
- ItemKind::Mod(..) => DefPathData::Module(i.ident.modern()),
+ ItemKind::Mod(..) => DefPathData::Module(i.ident.name),
ItemKind::Static(..) | ItemKind::Const(..) | ItemKind::Fn(..) =>
- DefPathData::ValueNs(i.ident.modern()),
- ItemKind::MacroDef(..) => DefPathData::MacroDef(i.ident.modern()),
+ DefPathData::ValueNs(i.ident.name),
+ ItemKind::MacroDef(..) => DefPathData::MacroDef(i.ident.name),
ItemKind::Mac(..) => return self.visit_macro_invoc(i.id, false),
ItemKind::GlobalAsm(..) => DefPathData::Misc,
ItemKind::Use(ref view_path) => {
for v in &enum_definition.variants {
let variant_def_index =
this.create_def(v.node.data.id(),
- DefPathData::EnumVariant(v.node.name.modern()),
+ DefPathData::EnumVariant(v.node.name.name),
REGULAR_SPACE);
this.with_parent(variant_def_index, |this| {
for (index, field) in v.node.data.fields().iter().enumerate() {
- let ident = field.ident.map(Ident::modern)
- .unwrap_or_else(|| Ident::from_str(&index.to_string()));
- this.create_def(field.id, DefPathData::Field(ident), REGULAR_SPACE);
+ let name = field.ident.map(|ident| ident.name)
+ .unwrap_or_else(|| Symbol::intern(&index.to_string()));
+ this.create_def(field.id, DefPathData::Field(name), REGULAR_SPACE);
}
if let Some(ref expr) = v.node.disr_expr {
}
for (index, field) in struct_def.fields().iter().enumerate() {
- let ident = field.ident.map(Ident::modern)
- .unwrap_or_else(|| Ident::from_str(&index.to_string()));
- this.create_def(field.id, DefPathData::Field(ident), REGULAR_SPACE);
+ let name = field.ident.map(|ident| ident.name)
+ .unwrap_or_else(|| Symbol::intern(&index.to_string()));
+ this.create_def(field.id, DefPathData::Field(name), REGULAR_SPACE);
}
}
_ => {}
fn visit_foreign_item(&mut self, foreign_item: &'a ForeignItem) {
let def = self.create_def(foreign_item.id,
- DefPathData::ValueNs(foreign_item.ident.modern()),
+ DefPathData::ValueNs(foreign_item.ident.name),
REGULAR_SPACE);
self.with_parent(def, |this| {
fn visit_generics(&mut self, generics: &'a Generics) {
for ty_param in generics.ty_params.iter() {
self.create_def(ty_param.id,
- DefPathData::TypeParam(ty_param.ident.modern()),
+ DefPathData::TypeParam(ty_param.ident.name),
REGULAR_SPACE);
}
fn visit_trait_item(&mut self, ti: &'a TraitItem) {
let def_data = match ti.node {
TraitItemKind::Method(..) | TraitItemKind::Const(..) =>
- DefPathData::ValueNs(ti.ident.modern()),
- TraitItemKind::Type(..) => DefPathData::TypeNs(ti.ident.modern()),
+ DefPathData::ValueNs(ti.ident.name),
+ TraitItemKind::Type(..) => DefPathData::TypeNs(ti.ident.name),
TraitItemKind::Macro(..) => return self.visit_macro_invoc(ti.id, false),
};
fn visit_impl_item(&mut self, ii: &'a ImplItem) {
let def_data = match ii.node {
ImplItemKind::Method(..) | ImplItemKind::Const(..) =>
- DefPathData::ValueNs(ii.ident.modern()),
- ImplItemKind::Type(..) => DefPathData::TypeNs(ii.ident.modern()),
+ DefPathData::ValueNs(ii.ident.name),
+ ImplItemKind::Type(..) => DefPathData::TypeNs(ii.ident.name),
ImplItemKind::Macro(..) => return self.visit_macro_invoc(ii.id, false),
};
PatKind::Mac(..) => return self.visit_macro_invoc(pat.id, false),
PatKind::Ident(_, id, _) => {
let def = self.create_def(pat.id,
- DefPathData::Binding(id.node.modern()),
+ DefPathData::Binding(id.node.name),
REGULAR_SPACE);
self.parent_def = Some(def);
}
fn visit_lifetime_def(&mut self, def: &'a LifetimeDef) {
self.create_def(def.lifetime.id,
- DefPathData::LifetimeDef(def.lifetime.ident.modern()),
+ DefPathData::LifetimeDef(def.lifetime.ident.name),
REGULAR_SPACE);
}
//! expressions) that are mostly just leftovers.
use hir;
-use hir::def_id::{CrateNum, DefId, DefIndex, LOCAL_CRATE, DefIndexAddressSpace};
+use hir::def_id::{CrateNum, DefId, DefIndex, LOCAL_CRATE, DefIndexAddressSpace,
+ CRATE_DEF_INDEX};
use ich::Fingerprint;
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::indexed_vec::IndexVec;
use serialize::{Encodable, Decodable, Encoder, Decoder};
use std::fmt::Write;
use std::hash::Hash;
-use syntax::ast::{self, Ident};
-use syntax::ext::hygiene::{Mark, SyntaxContext};
+use syntax::ast;
+use syntax::ext::hygiene::Mark;
use syntax::symbol::{Symbol, InternedString};
use ty::TyCtxt;
use util::nodemap::NodeMap;
// and the special "root_parent" below.
0u8.hash(&mut hasher);
parent_hash.hash(&mut hasher);
- self.disambiguated_data.hash(&mut hasher);
+
+ let DisambiguatedDefPathData {
+ ref data,
+ disambiguator,
+ } = self.disambiguated_data;
+
+ ::std::mem::discriminant(data).hash(&mut hasher);
+ match *data {
+ DefPathData::TypeNs(name) |
+ DefPathData::ValueNs(name) |
+ DefPathData::Module(name) |
+ DefPathData::MacroDef(name) |
+ DefPathData::TypeParam(name) |
+ DefPathData::LifetimeDef(name) |
+ DefPathData::EnumVariant(name) |
+ DefPathData::Binding(name) |
+ DefPathData::Field(name) |
+ DefPathData::GlobalMetaData(name) => {
+ (*name.as_str()).hash(&mut hasher);
+ }
+
+ DefPathData::Impl |
+ DefPathData::CrateRoot |
+ DefPathData::Misc |
+ DefPathData::ClosureExpr |
+ DefPathData::StructCtor |
+ DefPathData::Initializer |
+ DefPathData::ImplTrait |
+ DefPathData::Typeof => {}
+ };
+
+ disambiguator.hash(&mut hasher);
+
DefPathHash(hasher.finish())
}
}
}
-#[derive(Clone, Debug, RustcEncodable, RustcDecodable)]
+#[derive(Clone, Debug, Eq, PartialEq, Hash, RustcEncodable, RustcDecodable)]
pub enum DefPathData {
// Root: these should only be used for the root nodes, because
// they are treated specially by the `def_path` function.
/// An impl
Impl,
/// Something in the type NS
- TypeNs(Ident),
+ TypeNs(Symbol),
/// Something in the value NS
- ValueNs(Ident),
+ ValueNs(Symbol),
/// A module declaration
- Module(Ident),
+ Module(Symbol),
/// A macro rule
- MacroDef(Ident),
+ MacroDef(Symbol),
/// A closure expression
ClosureExpr,
// Subportions of items
/// A type parameter (generic parameter)
- TypeParam(Ident),
+ TypeParam(Symbol),
/// A lifetime definition
- LifetimeDef(Ident),
+ LifetimeDef(Symbol),
/// A variant of a enum
- EnumVariant(Ident),
+ EnumVariant(Symbol),
/// A struct field
- Field(Ident),
+ Field(Symbol),
/// Implicit ctor for a tuple-like struct
StructCtor,
/// Initializer for a const
Initializer,
/// Pattern binding
- Binding(Ident),
+ Binding(Symbol),
/// An `impl Trait` type node.
ImplTrait,
/// A `typeof` type node.
Typeof,
+
+ /// GlobalMetaData identifies a piece of crate metadata that is global to
+ /// a whole crate (as opposed to just one item). GlobalMetaData components
+ /// are only supposed to show up right below the crate root.
+ GlobalMetaData(Symbol)
}
#[derive(Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord, Debug,
/// Get the number of definitions.
pub fn def_index_counts_lo_hi(&self) -> (usize, usize) {
- (self.def_index_to_node[DefIndexAddressSpace::Low.index()].len(),
- self.def_index_to_node[DefIndexAddressSpace::High.index()].len())
+ (self.table.index_to_key[DefIndexAddressSpace::Low.index()].len(),
+ self.table.index_to_key[DefIndexAddressSpace::High.index()].len())
}
pub fn def_key(&self, index: DefIndex) -> DefKey {
if def_id.krate == LOCAL_CRATE {
let space_index = def_id.index.address_space().index();
let array_index = def_id.index.as_array_index();
- Some(self.def_index_to_node[space_index][array_index])
+ let node_id = self.def_index_to_node[space_index][array_index];
+ if node_id != ast::DUMMY_NODE_ID {
+ Some(node_id)
+ } else {
+ None
+ }
} else {
None
}
// Create the definition.
let address_space = super::ITEM_LIKE_SPACE;
- let index = self.table.allocate(key, def_path_hash, address_space);
+ let root_index = self.table.allocate(key, def_path_hash, address_space);
+ assert_eq!(root_index, CRATE_DEF_INDEX);
assert!(self.def_index_to_node[address_space.index()].is_empty());
self.def_index_to_node[address_space.index()].push(ast::CRATE_NODE_ID);
- self.node_to_def_index.insert(ast::CRATE_NODE_ID, index);
+ self.node_to_def_index.insert(ast::CRATE_NODE_ID, root_index);
- index
+ // Allocate some other DefIndices that always must exist.
+ GlobalMetaDataKind::allocate_def_indices(self);
+
+ root_index
}
/// Add a definition with a parent definition.
assert_eq!(index.as_array_index(),
self.def_index_to_node[address_space.index()].len());
self.def_index_to_node[address_space.index()].push(node_id);
+
+ // Some things for which we allocate DefIndices don't correspond to
+ // anything in the AST, so they don't have a NodeId. For these cases
+ // we don't need a mapping from NodeId to DefIndex.
+ if node_id != ast::DUMMY_NODE_ID {
+ debug!("create_def_with_parent: def_index_to_node[{:?} <-> {:?}", index, node_id);
+ self.node_to_def_index.insert(node_id, index);
+ }
+
if expansion.is_modern() {
self.expansions.insert(index, expansion);
}
- debug!("create_def_with_parent: def_index_to_node[{:?} <-> {:?}", index, node_id);
- self.node_to_def_index.insert(node_id, index);
-
index
}
}
impl DefPathData {
- pub fn get_opt_ident(&self) -> Option<Ident> {
+ pub fn get_opt_name(&self) -> Option<Symbol> {
use self::DefPathData::*;
match *self {
- TypeNs(ident) |
- ValueNs(ident) |
- Module(ident) |
- MacroDef(ident) |
- TypeParam(ident) |
- LifetimeDef(ident) |
- EnumVariant(ident) |
- Binding(ident) |
- Field(ident) => Some(ident),
+ TypeNs(name) |
+ ValueNs(name) |
+ Module(name) |
+ MacroDef(name) |
+ TypeParam(name) |
+ LifetimeDef(name) |
+ EnumVariant(name) |
+ Binding(name) |
+ Field(name) |
+ GlobalMetaData(name) => Some(name),
Impl |
CrateRoot |
}
}
- pub fn get_opt_name(&self) -> Option<ast::Name> {
- self.get_opt_ident().map(|ident| ident.name)
- }
-
pub fn as_interned_str(&self) -> InternedString {
use self::DefPathData::*;
let s = match *self {
- TypeNs(ident) |
- ValueNs(ident) |
- Module(ident) |
- MacroDef(ident) |
- TypeParam(ident) |
- LifetimeDef(ident) |
- EnumVariant(ident) |
- Binding(ident) |
- Field(ident) => {
- return ident.name.as_str();
+ TypeNs(name) |
+ ValueNs(name) |
+ Module(name) |
+ MacroDef(name) |
+ TypeParam(name) |
+ LifetimeDef(name) |
+ EnumVariant(name) |
+ Binding(name) |
+ Field(name) |
+ GlobalMetaData(name) => {
+ return name.as_str();
}
// note that this does not show up in user printouts
}
}
-impl Eq for DefPathData {}
-impl PartialEq for DefPathData {
- fn eq(&self, other: &DefPathData) -> bool {
- ::std::mem::discriminant(self) == ::std::mem::discriminant(other) &&
- self.get_opt_ident() == other.get_opt_ident()
- }
-}
+// We define the GlobalMetaDataKind enum with this macro because we want to
+// make sure that we exhaustively iterate over all variants when registering
+// the corresponding DefIndices in the DefTable.
+macro_rules! define_global_metadata_kind {
+ (pub enum GlobalMetaDataKind {
+ $($variant:ident),*
+ }) => (
+ #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash,
+ RustcEncodable, RustcDecodable)]
+ pub enum GlobalMetaDataKind {
+ $($variant),*
+ }
-impl ::std::hash::Hash for DefPathData {
- fn hash<H: ::std::hash::Hasher>(&self, hasher: &mut H) {
- ::std::mem::discriminant(self).hash(hasher);
- if let Some(ident) = self.get_opt_ident() {
- if ident.ctxt == SyntaxContext::empty() && ident.name == ident.name.interned() {
- ident.name.as_str().hash(hasher)
- } else {
- // FIXME(jseyfried) implement stable hashing for idents with macros 2.0 hygiene info
- ident.hash(hasher)
+ impl GlobalMetaDataKind {
+ fn allocate_def_indices(definitions: &mut Definitions) {
+ $({
+ let instance = GlobalMetaDataKind::$variant;
+ definitions.create_def_with_parent(
+ CRATE_DEF_INDEX,
+ ast::DUMMY_NODE_ID,
+ DefPathData::GlobalMetaData(instance.name()),
+ DefIndexAddressSpace::High,
+ Mark::root()
+ );
+
+ // Make sure calling def_index does not crash.
+ instance.def_index(&definitions.table);
+ })*
+ }
+
+ pub fn def_index(&self, def_path_table: &DefPathTable) -> DefIndex {
+ let def_key = DefKey {
+ parent: Some(CRATE_DEF_INDEX),
+ disambiguated_data: DisambiguatedDefPathData {
+ data: DefPathData::GlobalMetaData(self.name()),
+ disambiguator: 0,
+ }
+ };
+
+ def_path_table.key_to_index[&def_key]
+ }
+
+ fn name(&self) -> Symbol {
+
+ let string = match *self {
+ $(
+ GlobalMetaDataKind::$variant => {
+ concat!("{{GlobalMetaData::", stringify!($variant), "}}")
+ }
+ )*
+ };
+
+ Symbol::intern(string)
}
}
- }
+ )
}
+
+define_global_metadata_kind!(pub enum GlobalMetaDataKind {
+ Krate,
+ CrateDeps,
+ DylibDependencyFormats,
+ LangItems,
+ LangItemsMissing,
+ NativeLibraries,
+ CodeMap,
+ Impls,
+ ExportedSymbols
+});
pub use self::definitions::{Definitions, DefKey, DefPath, DefPathData,
DisambiguatedDefPathData, DefPathHash};
-use dep_graph::{DepGraph, DepNode};
+use dep_graph::{DepGraph, DepNode, DepKind};
use hir::def_id::{CRATE_DEF_INDEX, DefId, DefIndex, DefIndexAddressSpace};
}
pub fn krate<'hir>(&'hir self) -> &'hir Crate {
- self.dep_graph.read(DepNode::Krate);
+ self.dep_graph.read(DepNode::new_no_params(DepKind::Krate));
&self.krate
}
}
self.dep_graph.read(self.dep_node(id));
}
- fn dep_node(&self, id0: NodeId) -> DepNode<DefId> {
+ fn dep_node(&self, id0: NodeId) -> DepNode {
let mut id = id0;
let mut last_expr = None;
loop {
EntryItem(..) |
EntryTraitItem(..) |
EntryImplItem(..) => {
+ let def_index = self.definitions.opt_def_index(id).unwrap();
+ let def_path_hash = self.definitions.def_path_hash(def_index);
+
if let Some(last_id) = last_expr {
// The body may have a separate dep node
if entry.is_body_owner(last_id) {
- let def_id = self.local_def_id(id);
- return DepNode::HirBody(def_id);
+ return def_path_hash.to_dep_node(DepKind::HirBody);
}
}
- return DepNode::Hir(self.local_def_id(id));
+ return def_path_hash.to_dep_node(DepKind::Hir);
}
EntryVariant(p, v) => {
if last_expr.is_some() {
if v.node.disr_expr.map(|e| e.node_id) == last_expr {
// The enum parent holds both Hir and HirBody nodes.
- let def_id = self.local_def_id(id);
- return DepNode::HirBody(def_id);
+ let def_index = self.definitions.opt_def_index(id).unwrap();
+ let def_path_hash = self.definitions.def_path_hash(def_index);
+ return def_path_hash.to_dep_node(DepKind::HirBody);
}
}
}
}
RootCrate => {
- return DepNode::Hir(DefId::local(CRATE_DEF_INDEX));
+ let def_path_hash = self.definitions.def_path_hash(CRATE_DEF_INDEX);
+ return def_path_hash.to_dep_node(DepKind::Hir);
}
NotPresent =>
// present in the map for whatever reason, but
// they *do* have def-ids. So if we encounter an
// empty hole, check for that case.
- return self.opt_local_def_id(id)
- .map(|def_id| DepNode::Hir(def_id))
+ return self.definitions.opt_def_index(id)
+ .map(|def_index| {
+ let def_path_hash = self.definitions.def_path_hash(def_index);
+ def_path_hash.to_dep_node(DepKind::Hir)
+ })
.unwrap_or_else(|| {
bug!("Walking parents from `{}` \
led to `NotPresent` at `{}`",
}
pub fn trait_impls(&self, trait_did: DefId) -> &'hir [NodeId] {
- self.dep_graph.read(DepNode::AllLocalTraitImpls);
+ self.dep_graph.read(DepNode::new_no_params(DepKind::AllLocalTraitImpls));
// NB: intentionally bypass `self.forest.krate()` so that we
// do not trigger a read of the whole krate here
}
pub fn trait_default_impl(&self, trait_did: DefId) -> Option<NodeId> {
- self.dep_graph.read(DepNode::AllLocalTraitImpls);
+ self.dep_graph.read(DepNode::new_no_params(DepKind::AllLocalTraitImpls));
// NB: intentionally bypass `self.forest.krate()` so that we
// do not trigger a read of the whole krate here
/// invoking `krate.attrs` because it registers a tighter
/// dep-graph access.
pub fn krate_attrs(&self) -> &'hir [ast::Attribute] {
- let crate_root_def_id = DefId::local(CRATE_DEF_INDEX);
- self.dep_graph.read(DepNode::Hir(crate_root_def_id));
+ let def_path_hash = self.definitions.def_path_hash(CRATE_DEF_INDEX);
+
+ self.dep_graph.read(def_path_hash.to_dep_node(DepKind::Hir));
&self.forest.krate.attrs
}
}
}
- pub fn get_inlined_body(&self, def_id: DefId) -> Option<&'hir Body> {
- self.inlined_bodies.borrow().get(&def_id).map(|&body| {
- self.dep_graph.read(DepNode::MetaData(def_id));
- body
- })
+ pub fn get_inlined_body_untracked(&self, def_id: DefId) -> Option<&'hir Body> {
+ self.inlined_bodies.borrow().get(&def_id).cloned()
}
pub fn intern_inlined_body(&self, def_id: DefId, body: Body) -> &'hir Body {
time_stamp: usize,
}
-impl<'tcx> CachingCodemapView<'tcx> {
- pub fn new<'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> CachingCodemapView<'tcx> {
+impl<'gcx> CachingCodemapView<'gcx> {
+ pub fn new<'a, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>) -> CachingCodemapView<'gcx> {
let codemap = tcx.sess.codemap();
let files = codemap.files();
let first_file = files[0].clone();
/// enough information to transform DefIds and HirIds into stable DefPaths (i.e.
/// a reference to the TyCtxt) and it holds a few caches for speeding up various
/// things (e.g. each DefId/DefPath is only hashed once).
-pub struct StableHashingContext<'a, 'tcx: 'a> {
- tcx: ty::TyCtxt<'a, 'tcx, 'tcx>,
- codemap: CachingCodemapView<'tcx>,
+pub struct StableHashingContext<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
+ tcx: ty::TyCtxt<'a, 'gcx, 'tcx>,
+ codemap: CachingCodemapView<'gcx>,
hash_spans: bool,
hash_bodies: bool,
overflow_checks_enabled: bool,
HashTraitsInScope,
}
-impl<'a, 'tcx: 'a> StableHashingContext<'a, 'tcx> {
+impl<'a, 'gcx, 'tcx> StableHashingContext<'a, 'gcx, 'tcx> {
- pub fn new(tcx: ty::TyCtxt<'a, 'tcx, 'tcx>) -> Self {
+ pub fn new(tcx: ty::TyCtxt<'a, 'gcx, 'tcx>) -> Self {
let hash_spans_initial = tcx.sess.opts.debuginfo != NoDebugInfo;
let check_overflow_initial = tcx.sess.overflow_checks();
}
#[inline]
- pub fn tcx(&self) -> ty::TyCtxt<'a, 'tcx, 'tcx> {
+ pub fn tcx(&self) -> ty::TyCtxt<'a, 'gcx, 'tcx> {
self.tcx
}
}
#[inline]
- pub fn codemap(&mut self) -> &mut CachingCodemapView<'tcx> {
+ pub fn codemap(&mut self) -> &mut CachingCodemapView<'gcx> {
&mut self.codemap
}
}
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for ast::NodeId {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> for ast::NodeId {
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
match hcx.node_id_hashing_mode {
NodeIdHashingMode::Ignore => {
}
}
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for Span {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> for Span {
// Hash a span in a stable way. We can't directly hash the span's BytePos
// fields (that would be similar to hashing pointers, since those are just
// Also, hashing filenames is expensive so we avoid doing it twice when the
// span starts and ends in the same file, which is almost always the case.
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
use syntax_pos::Pos;
}
}
-pub fn hash_stable_hashmap<'a, 'tcx, K, V, R, SK, F, W>(hcx: &mut StableHashingContext<'a, 'tcx>,
- hasher: &mut StableHasher<W>,
- map: &HashMap<K, V, R>,
- extract_stable_key: F)
+pub fn hash_stable_hashmap<'a, 'gcx, 'tcx, K, V, R, SK, F, W>(
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
+ hasher: &mut StableHasher<W>,
+ map: &HashMap<K, V, R>,
+ extract_stable_key: F)
where K: Eq + std_hash::Hash,
- V: HashStable<StableHashingContext<'a, 'tcx>>,
+ V: HashStable<StableHashingContext<'a, 'gcx, 'tcx>>,
R: std_hash::BuildHasher,
- SK: HashStable<StableHashingContext<'a, 'tcx>> + Ord + Clone,
- F: Fn(&mut StableHashingContext<'a, 'tcx>, &K) -> SK,
+ SK: HashStable<StableHashingContext<'a, 'gcx, 'tcx>> + Ord + Clone,
+ F: Fn(&mut StableHashingContext<'a, 'gcx, 'tcx>, &K) -> SK,
W: StableHasherResult,
{
let mut keys: Vec<_> = map.keys()
}
}
-pub fn hash_stable_hashset<'a, 'tcx, K, R, SK, F, W>(hcx: &mut StableHashingContext<'a, 'tcx>,
- hasher: &mut StableHasher<W>,
- set: &HashSet<K, R>,
- extract_stable_key: F)
+pub fn hash_stable_hashset<'a, 'tcx, 'gcx, K, R, SK, F, W>(
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
+ hasher: &mut StableHasher<W>,
+ set: &HashSet<K, R>,
+ extract_stable_key: F)
where K: Eq + std_hash::Hash,
R: std_hash::BuildHasher,
- SK: HashStable<StableHashingContext<'a, 'tcx>> + Ord + Clone,
- F: Fn(&mut StableHashingContext<'a, 'tcx>, &K) -> SK,
+ SK: HashStable<StableHashingContext<'a, 'gcx, 'tcx>> + Ord + Clone,
+ F: Fn(&mut StableHashingContext<'a, 'gcx, 'tcx>, &K) -> SK,
W: StableHasherResult,
{
let mut keys: Vec<_> = set.iter()
keys.hash_stable(hcx, hasher);
}
-pub fn hash_stable_nodemap<'a, 'tcx, V, W>(hcx: &mut StableHashingContext<'a, 'tcx>,
- hasher: &mut StableHasher<W>,
- map: &NodeMap<V>)
- where V: HashStable<StableHashingContext<'a, 'tcx>>,
+pub fn hash_stable_nodemap<'a, 'tcx, 'gcx, V, W>(
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
+ hasher: &mut StableHasher<W>,
+ map: &NodeMap<V>)
+ where V: HashStable<StableHashingContext<'a, 'gcx, 'tcx>>,
W: StableHasherResult,
{
hash_stable_hashmap(hcx, hasher, map, |hcx, node_id| {
}
-pub fn hash_stable_btreemap<'a, 'tcx, K, V, SK, F, W>(hcx: &mut StableHashingContext<'a, 'tcx>,
- hasher: &mut StableHasher<W>,
- map: &BTreeMap<K, V>,
- extract_stable_key: F)
+pub fn hash_stable_btreemap<'a, 'tcx, 'gcx, K, V, SK, F, W>(
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
+ hasher: &mut StableHasher<W>,
+ map: &BTreeMap<K, V>,
+ extract_stable_key: F)
where K: Eq + Ord,
- V: HashStable<StableHashingContext<'a, 'tcx>>,
- SK: HashStable<StableHashingContext<'a, 'tcx>> + Ord + Clone,
- F: Fn(&mut StableHashingContext<'a, 'tcx>, &K) -> SK,
+ V: HashStable<StableHashingContext<'a, 'gcx, 'tcx>>,
+ SK: HashStable<StableHashingContext<'a, 'gcx, 'tcx>> + Ord + Clone,
+ F: Fn(&mut StableHashingContext<'a, 'gcx, 'tcx>, &K) -> SK,
W: StableHasherResult,
{
let mut keys: Vec<_> = map.keys()
//! types in no particular order.
use hir;
-use hir::def_id::DefId;
+use hir::def_id::{DefId, CrateNum, CRATE_DEF_INDEX};
use ich::{StableHashingContext, NodeIdHashingMode};
use std::mem;
use rustc_data_structures::stable_hasher::{HashStable, StableHasher,
StableHasherResult};
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for DefId {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> for DefId {
#[inline]
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
hcx.def_path_hash(*self).hash_stable(hcx, hasher);
}
}
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for hir::HirId {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> for hir::HirId {
#[inline]
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
let hir::HirId {
owner,
}
}
+
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> for CrateNum {
+ #[inline]
+ fn hash_stable<W: StableHasherResult>(&self,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
+ hasher: &mut StableHasher<W>) {
+ hcx.def_path_hash(DefId {
+ krate: *self,
+ index: CRATE_DEF_INDEX
+ }).hash_stable(hcx, hasher);
+ }
+}
+
impl_stable_hash_for!(tuple_struct hir::ItemLocalId { index });
// The following implementations of HashStable for ItemId, TraitItemId, and
// want to pick up on a reference changing its target, so we hash the NodeIds
// in "DefPath Mode".
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for hir::ItemId {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> for hir::ItemId {
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
let hir::ItemId {
id
}
}
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for hir::TraitItemId {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> for hir::TraitItemId {
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
let hir::TraitItemId {
node_id
}
}
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for hir::ImplItemId {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> for hir::ImplItemId {
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
let hir::ImplItemId {
node_id
span
});
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for hir::Ty {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> for hir::Ty {
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
let node_id_hashing_mode = match self.node {
hir::TySlice(..) |
Return(t)
});
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for hir::TraitRef {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> for hir::TraitRef {
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
let hir::TraitRef {
ref path,
});
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for hir::Block {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> for hir::Block {
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
let hir::Block {
ref stmts,
}
}
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for hir::Pat {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> for hir::Pat {
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
let node_id_hashing_mode = match self.node {
hir::PatKind::Wild |
UserProvided
});
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for hir::Expr {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> for hir::Expr {
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
hcx.while_hashing_hir_bodies(true, |hcx| {
let hir::Expr {
ForLoop
});
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for hir::MatchSource {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> for hir::MatchSource {
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
use hir::MatchSource;
Loop(loop_id_result)
});
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for ast::Ident {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> for ast::Ident {
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
let ast::Ident {
ref name,
}
}
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for hir::TraitItem {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> for hir::TraitItem {
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
let hir::TraitItem {
id,
Type(bounds, rhs)
});
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for hir::ImplItem {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> for hir::ImplItem {
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
let hir::ImplItem {
id,
Type(t)
});
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for hir::Visibility {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> for hir::Visibility {
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
mem::discriminant(self).hash_stable(hcx, hasher);
match *self {
}
}
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for hir::Defaultness {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> for hir::Defaultness {
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
mem::discriminant(self).hash_stable(hcx, hasher);
match *self {
Negative
});
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for hir::Mod {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> for hir::Mod {
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
let hir::Mod {
inner,
Unit(id)
});
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for hir::Item {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> for hir::Item {
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
let node_id_hashing_mode = match self.node {
hir::ItemExternCrate(..) |
defaultness
});
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for hir::AssociatedItemKind {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>>
+for hir::AssociatedItemKind {
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
mem::discriminant(self).hash_stable(hcx, hasher);
match *self {
value
});
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for hir::BodyId {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> for hir::BodyId {
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
if hcx.hash_bodies() {
hcx.tcx().hir.body(*self).hash_stable(hcx, hasher);
is_indirect
});
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for hir::GlobalAsm {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> for hir::GlobalAsm {
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
let hir::GlobalAsm {
asm,
}
}
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for hir::InlineAsm {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> for hir::InlineAsm {
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
let hir::InlineAsm {
asm,
NotConst
});
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for hir::def_id::DefIndex {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>>
+for hir::def_id::DefIndex {
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
DefId::local(*self).hash_stable(hcx, hasher);
}
span
});
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for ::middle::lang_items::LangItem {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>>
+for ::middle::lang_items::LangItem {
fn hash_stable<W: StableHasherResult>(&self,
- _: &mut StableHashingContext<'a, 'tcx>,
+ _: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
::std::hash::Hash::hash(self, hasher);
}
impl_stable_hash_for!(struct mir::UpvarDecl { debug_name, by_ref });
impl_stable_hash_for!(struct mir::BasicBlockData<'tcx> { statements, terminator, is_cleanup });
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for mir::Terminator<'tcx> {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>>
+for mir::Terminator<'tcx> {
#[inline]
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
let mir::Terminator {
ref kind,
}
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for mir::Local {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> for mir::Local {
#[inline]
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
use rustc_data_structures::indexed_vec::Idx;
self.index().hash_stable(hcx, hasher);
}
}
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for mir::BasicBlock {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> for mir::BasicBlock {
#[inline]
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
use rustc_data_structures::indexed_vec::Idx;
self.index().hash_stable(hcx, hasher);
}
}
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for mir::Field {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> for mir::Field {
#[inline]
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
use rustc_data_structures::indexed_vec::Idx;
self.index().hash_stable(hcx, hasher);
}
}
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for mir::VisibilityScope {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>>
+for mir::VisibilityScope {
#[inline]
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
use rustc_data_structures::indexed_vec::Idx;
self.index().hash_stable(hcx, hasher);
}
}
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for mir::Promoted {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> for mir::Promoted {
#[inline]
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
use rustc_data_structures::indexed_vec::Idx;
self.index().hash_stable(hcx, hasher);
}
}
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for mir::TerminatorKind<'tcx> {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>>
+for mir::TerminatorKind<'tcx> {
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
mem::discriminant(self).hash_stable(hcx, hasher);
}
}
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for mir::AssertMessage<'tcx> {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>>
+for mir::AssertMessage<'tcx> {
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
mem::discriminant(self).hash_stable(hcx, hasher);
impl_stable_hash_for!(struct mir::Statement<'tcx> { source_info, kind });
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for mir::StatementKind<'tcx> {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>>
+for mir::StatementKind<'tcx> {
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
mem::discriminant(self).hash_stable(hcx, hasher);
}
}
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for mir::Lvalue<'tcx> {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> for mir::Lvalue<'tcx> {
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
mem::discriminant(self).hash_stable(hcx, hasher);
match *self {
}
}
-impl<'a, 'tcx, B, V> HashStable<StableHashingContext<'a, 'tcx>> for mir::Projection<'tcx, B, V>
- where B: HashStable<StableHashingContext<'a, 'tcx>>,
- V: HashStable<StableHashingContext<'a, 'tcx>>
+impl<'a, 'gcx, 'tcx, B, V> HashStable<StableHashingContext<'a, 'gcx, 'tcx>>
+for mir::Projection<'tcx, B, V>
+ where B: HashStable<StableHashingContext<'a, 'gcx, 'tcx>>,
+ V: HashStable<StableHashingContext<'a, 'gcx, 'tcx>>
{
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
let mir::Projection {
ref base,
}
}
-impl<'a, 'tcx, V> HashStable<StableHashingContext<'a, 'tcx>> for mir::ProjectionElem<'tcx, V>
- where V: HashStable<StableHashingContext<'a, 'tcx>>
+impl<'a, 'gcx, 'tcx, V> HashStable<StableHashingContext<'a, 'gcx, 'tcx>>
+for mir::ProjectionElem<'tcx, V>
+ where V: HashStable<StableHashingContext<'a, 'gcx, 'tcx>>
{
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
mem::discriminant(self).hash_stable(hcx, hasher);
match *self {
impl_stable_hash_for!(struct mir::VisibilityScopeData { span, parent_scope });
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for mir::Operand<'tcx> {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> for mir::Operand<'tcx> {
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
mem::discriminant(self).hash_stable(hcx, hasher);
}
}
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for mir::Rvalue<'tcx> {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> for mir::Rvalue<'tcx> {
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
mem::discriminant(self).hash_stable(hcx, hasher);
Unsize
});
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for mir::AggregateKind<'tcx> {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>>
+for mir::AggregateKind<'tcx> {
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
mem::discriminant(self).hash_stable(hcx, hasher);
match *self {
impl_stable_hash_for!(struct mir::Constant<'tcx> { span, ty, literal });
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for mir::Literal<'tcx> {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> for mir::Literal<'tcx> {
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
mem::discriminant(self).hash_stable(hcx, hasher);
match *self {
StableHasherResult};
use rustc_data_structures::accumulate_vec::AccumulateVec;
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for ::syntax::symbol::InternedString {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>>
+for ::syntax::symbol::InternedString {
#[inline]
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
let s: &str = &**self;
s.hash_stable(hcx, hasher);
}
}
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for ast::Name {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> for ast::Name {
#[inline]
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
self.as_str().hash_stable(hcx, hasher);
}
impl_stable_hash_for!(struct ::syntax::attr::Deprecation { since, note });
impl_stable_hash_for!(struct ::syntax::attr::Stability { level, feature, rustc_depr });
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for ::syntax::attr::StabilityLevel {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>>
+for ::syntax::attr::StabilityLevel {
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
mem::discriminant(self).hash_stable(hcx, hasher);
match *self {
impl_stable_hash_for!(enum ::syntax::ast::StrStyle { Cooked, Raw(pounds) });
impl_stable_hash_for!(enum ::syntax::ast::AttrStyle { Outer, Inner });
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for [ast::Attribute] {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> for [ast::Attribute] {
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
// Some attributes are always ignored during hashing.
let filtered: AccumulateVec<[&ast::Attribute; 8]> = self
}
}
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for ast::Attribute {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> for ast::Attribute {
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
// Make sure that these have been filtered out.
debug_assert!(self.name().map(|name| !hcx.is_ignored_attr(name)).unwrap_or(true));
}
}
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for tokenstream::TokenTree {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>>
+for tokenstream::TokenTree {
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
mem::discriminant(self).hash_stable(hcx, hasher);
match *self {
}
}
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for tokenstream::TokenStream {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>>
+for tokenstream::TokenStream {
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
for sub_tt in self.trees() {
sub_tt.hash_stable(hcx, hasher);
}
}
-fn hash_token<'a, 'tcx, W: StableHasherResult>(token: &token::Token,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+fn hash_token<'a, 'gcx, 'tcx, W: StableHasherResult>(token: &token::Token,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>,
error_reporting_span: Span) {
mem::discriminant(token).hash_stable(hcx, hasher);
NameValue(lit)
});
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for FileMap {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> for FileMap {
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
let FileMap {
ref name,
use syntax_pos::symbol::InternedString;
use ty;
-impl<'a, 'tcx, T> HashStable<StableHashingContext<'a, 'tcx>> for &'tcx ty::Slice<T>
- where T: HashStable<StableHashingContext<'a, 'tcx>> {
+impl<'a, 'gcx, 'tcx, T> HashStable<StableHashingContext<'a, 'gcx, 'tcx>>
+for &'tcx ty::Slice<T>
+ where T: HashStable<StableHashingContext<'a, 'gcx, 'tcx>> {
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
(&self[..]).hash_stable(hcx, hasher);
}
}
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for ty::subst::Kind<'tcx> {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>>
+for ty::subst::Kind<'tcx> {
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
self.as_type().hash_stable(hcx, hasher);
self.as_region().hash_stable(hcx, hasher);
}
}
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for ty::RegionKind {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>>
+for ty::RegionKind {
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
mem::discriminant(self).hash_stable(hcx, hasher);
match *self {
}
}
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for ty::adjustment::AutoBorrow<'tcx> {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>>
+for ty::adjustment::AutoBorrow<'tcx> {
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
mem::discriminant(self).hash_stable(hcx, hasher);
match *self {
}
}
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for ty::adjustment::Adjust<'tcx> {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>>
+for ty::adjustment::Adjust<'tcx> {
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
mem::discriminant(self).hash_stable(hcx, hasher);
match *self {
MutBorrow
});
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for ty::UpvarCapture<'tcx> {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>>
+for ty::UpvarCapture<'tcx> {
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
mem::discriminant(self).hash_stable(hcx, hasher);
match *self {
abi
});
-impl<'a, 'tcx, T> HashStable<StableHashingContext<'a, 'tcx>> for ty::Binder<T>
- where T: HashStable<StableHashingContext<'a, 'tcx>> + ty::fold::TypeFoldable<'tcx>
+impl<'a, 'gcx, 'tcx, T> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> for ty::Binder<T>
+ where T: HashStable<StableHashingContext<'a, 'gcx, 'tcx>> + ty::fold::TypeFoldable<'tcx>
{
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
hcx.tcx().anonymize_late_bound_regions(self).0.hash_stable(hcx, hasher);
}
impl_stable_hash_for!(tuple_struct ty::EquatePredicate<'tcx> { t1, t2 });
impl_stable_hash_for!(struct ty::SubtypePredicate<'tcx> { a_is_expected, a, b });
-impl<'a, 'tcx, A, B> HashStable<StableHashingContext<'a, 'tcx>> for ty::OutlivesPredicate<A, B>
- where A: HashStable<StableHashingContext<'a, 'tcx>>,
- B: HashStable<StableHashingContext<'a, 'tcx>>,
+impl<'a, 'gcx, 'tcx, A, B> HashStable<StableHashingContext<'a, 'gcx, 'tcx>>
+for ty::OutlivesPredicate<A, B>
+ where A: HashStable<StableHashingContext<'a, 'gcx, 'tcx>>,
+ B: HashStable<StableHashingContext<'a, 'gcx, 'tcx>>,
{
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
let ty::OutlivesPredicate(ref a, ref b) = *self;
a.hash_stable(hcx, hasher);
impl_stable_hash_for!(struct ty::ProjectionTy<'tcx> { trait_ref, item_def_id });
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for ty::Predicate<'tcx> {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> for ty::Predicate<'tcx> {
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
mem::discriminant(self).hash_stable(hcx, hasher);
match *self {
}
}
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for ty::AdtFlags {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> for ty::AdtFlags {
fn hash_stable<W: StableHasherResult>(&self,
- _: &mut StableHashingContext<'a, 'tcx>,
+ _: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
std_hash::Hash::hash(self, hasher);
}
vis
});
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>>
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>>
for ::middle::const_val::ConstVal<'tcx> {
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
use middle::const_val::ConstVal;
Struct(index)
});
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for ty::Generics {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> for ty::Generics {
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
let ty::Generics {
parent,
}
}
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for ty::RegionParameterDef {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>>
+for ty::RegionParameterDef {
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
let ty::RegionParameterDef {
name,
});
-impl<'a, 'tcx, T> HashStable<StableHashingContext<'a, 'tcx>>
+impl<'a, 'gcx, 'tcx, T> HashStable<StableHashingContext<'a, 'gcx, 'tcx>>
for ::middle::resolve_lifetime::Set1<T>
- where T: HashStable<StableHashingContext<'a, 'tcx>>
+ where T: HashStable<StableHashingContext<'a, 'gcx, 'tcx>>
{
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
use middle::resolve_lifetime::Set1;
FnPtrAddrCast
});
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for ::middle::region::CodeExtent
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>>
+for ::middle::region::CodeExtent
{
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
use middle::region::CodeExtent;
BrEnv
});
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for ty::TypeVariants<'tcx>
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>>
+for ty::TypeVariants<'tcx>
{
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
use ty::TypeVariants::*;
mutbl
});
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for ty::ExistentialPredicate<'tcx>
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>>
+for ty::ExistentialPredicate<'tcx>
{
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
mem::discriminant(self).hash_stable(hcx, hasher);
match *self {
});
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for ty::TypeckTables<'tcx> {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>>
+for ty::TypeckTables<'tcx> {
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
let ty::TypeckTables {
ref type_dependent_defs,
pub use self::region_inference::{GenericKind, VerifyBound};
use hir::def_id::DefId;
-use hir;
use middle::free_region::{FreeRegionMap, RegionRelations};
use middle::region::RegionMaps;
-use middle::mem_categorization as mc;
-use middle::mem_categorization::McResult;
use middle::lang_items;
use mir::tcx::LvalueTy;
use ty::subst::{Kind, Subst, Substs};
use ty::relate::RelateResult;
use traits::{self, ObligationCause, PredicateObligations, Reveal};
use rustc_data_structures::unify::{self, UnificationTable};
-use std::cell::{Cell, RefCell, Ref, RefMut};
+use std::cell::{Cell, RefCell, Ref};
use std::fmt;
-use std::ops::Deref;
use syntax::ast;
use errors::DiagnosticBuilder;
use syntax_pos::{self, Span, DUMMY_SP};
pub type UnitResult<'tcx> = RelateResult<'tcx, ()>; // "unify result"
pub type FixupResult<T> = Result<T, FixupError>; // "fixup result"
-/// A version of &ty::TypeckTables which can be `Missing` (not needed),
-/// `InProgress` (during typeck) or `Interned` (result of typeck).
-/// Only the `InProgress` version supports `borrow_mut`.
-#[derive(Copy, Clone)]
-pub enum InferTables<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
- Interned(&'a ty::TypeckTables<'gcx>),
- InProgress(&'a RefCell<ty::TypeckTables<'tcx>>),
- Missing
-}
-
-pub enum InferTablesRef<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
- Interned(&'a ty::TypeckTables<'gcx>),
- InProgress(Ref<'a, ty::TypeckTables<'tcx>>)
-}
-
-impl<'a, 'gcx, 'tcx> Deref for InferTablesRef<'a, 'gcx, 'tcx> {
- type Target = ty::TypeckTables<'tcx>;
- fn deref(&self) -> &Self::Target {
- match *self {
- InferTablesRef::Interned(tables) => tables,
- InferTablesRef::InProgress(ref tables) => tables
- }
- }
-}
-
-impl<'a, 'gcx, 'tcx> InferTables<'a, 'gcx, 'tcx> {
- pub fn borrow(self) -> InferTablesRef<'a, 'gcx, 'tcx> {
- match self {
- InferTables::Interned(tables) => InferTablesRef::Interned(tables),
- InferTables::InProgress(tables) => InferTablesRef::InProgress(tables.borrow()),
- InferTables::Missing => {
- bug!("InferTables: infcx.tables.borrow() with no tables")
- }
- }
- }
-
- pub fn expect_interned(self) -> &'a ty::TypeckTables<'gcx> {
- match self {
- InferTables::Interned(tables) => tables,
- InferTables::InProgress(_) => {
- bug!("InferTables: infcx.tables.expect_interned() during type-checking");
- }
- InferTables::Missing => {
- bug!("InferTables: infcx.tables.expect_interned() with no tables")
- }
- }
- }
-
- pub fn borrow_mut(self) -> RefMut<'a, ty::TypeckTables<'tcx>> {
- match self {
- InferTables::Interned(_) => {
- bug!("InferTables: infcx.tables.borrow_mut() outside of type-checking");
- }
- InferTables::InProgress(tables) => tables.borrow_mut(),
- InferTables::Missing => {
- bug!("InferTables: infcx.tables.borrow_mut() with no tables")
- }
- }
- }
-}
-
pub struct InferCtxt<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
pub tcx: TyCtxt<'a, 'gcx, 'tcx>,
- pub tables: InferTables<'a, 'gcx, 'tcx>,
+ /// During type-checking/inference of a body, `in_progress_tables`
+ /// contains a reference to the tables being built up, which are
+ /// used for reading closure kinds/signatures as they are inferred,
+ /// and for error reporting logic to read arbitrary node types.
+ pub in_progress_tables: Option<&'a RefCell<ty::TypeckTables<'tcx>>>,
// Cache for projections. This cache is snapshotted along with the
// infcx.
}
}
-pub trait InferEnv<'a, 'tcx> {
- fn to_parts(self, tcx: TyCtxt<'a, 'tcx, 'tcx>)
- -> (Option<&'a ty::TypeckTables<'tcx>>,
- Option<ty::TypeckTables<'tcx>>);
-}
-
-impl<'a, 'tcx> InferEnv<'a, 'tcx> for () {
- fn to_parts(self, _: TyCtxt<'a, 'tcx, 'tcx>)
- -> (Option<&'a ty::TypeckTables<'tcx>>,
- Option<ty::TypeckTables<'tcx>>) {
- (None, None)
- }
-}
-
-impl<'a, 'tcx> InferEnv<'a, 'tcx> for &'a ty::TypeckTables<'tcx> {
- fn to_parts(self, _: TyCtxt<'a, 'tcx, 'tcx>)
- -> (Option<&'a ty::TypeckTables<'tcx>>,
- Option<ty::TypeckTables<'tcx>>) {
- (Some(self), None)
- }
-}
-
-impl<'a, 'tcx> InferEnv<'a, 'tcx> for ty::TypeckTables<'tcx> {
- fn to_parts(self, _: TyCtxt<'a, 'tcx, 'tcx>)
- -> (Option<&'a ty::TypeckTables<'tcx>>,
- Option<ty::TypeckTables<'tcx>>) {
- (None, Some(self))
- }
-}
-
-impl<'a, 'tcx> InferEnv<'a, 'tcx> for hir::BodyId {
- fn to_parts(self, tcx: TyCtxt<'a, 'tcx, 'tcx>)
- -> (Option<&'a ty::TypeckTables<'tcx>>,
- Option<ty::TypeckTables<'tcx>>) {
- let def_id = tcx.hir.body_owner_def_id(self);
- (Some(tcx.typeck_tables_of(def_id)), None)
- }
-}
-
-/// Helper type of a temporary returned by tcx.infer_ctxt(...).
+/// Helper type of a temporary returned by tcx.infer_ctxt().
/// Necessary because we can't write the following bound:
/// F: for<'b, 'tcx> where 'gcx: 'tcx FnOnce(InferCtxt<'b, 'gcx, 'tcx>).
pub struct InferCtxtBuilder<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
global_tcx: TyCtxt<'a, 'gcx, 'gcx>,
arena: DroplessArena,
fresh_tables: Option<RefCell<ty::TypeckTables<'tcx>>>,
- tables: Option<&'a ty::TypeckTables<'gcx>>,
}
impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'gcx> {
- pub fn infer_ctxt<E: InferEnv<'a, 'gcx>>(self, env: E) -> InferCtxtBuilder<'a, 'gcx, 'tcx> {
- let (tables, fresh_tables) = env.to_parts(self);
+ pub fn infer_ctxt(self) -> InferCtxtBuilder<'a, 'gcx, 'tcx> {
InferCtxtBuilder {
global_tcx: self,
arena: DroplessArena::new(),
- fresh_tables: fresh_tables.map(RefCell::new),
- tables: tables,
- }
- }
-
- /// Fake InferCtxt with the global tcx. Used by pre-MIR borrowck
- /// for MemCategorizationContext/ExprUseVisitor.
- /// If any inference functionality is used, ICEs will occur.
- pub fn borrowck_fake_infer_ctxt(self, body: hir::BodyId)
- -> InferCtxt<'a, 'gcx, 'gcx> {
- let (tables, _) = body.to_parts(self);
- InferCtxt {
- tcx: self,
- tables: InferTables::Interned(tables.unwrap()),
- type_variables: RefCell::new(type_variable::TypeVariableTable::new()),
- int_unification_table: RefCell::new(UnificationTable::new()),
- float_unification_table: RefCell::new(UnificationTable::new()),
- region_vars: RegionVarBindings::new(self),
- selection_cache: traits::SelectionCache::new(),
- evaluation_cache: traits::EvaluationCache::new(),
- projection_cache: RefCell::new(traits::ProjectionCache::new()),
- reported_trait_errors: RefCell::new(FxHashSet()),
- tainted_by_errors_flag: Cell::new(false),
- err_count_on_creation: self.sess.err_count(),
- in_snapshot: Cell::new(false),
+ fresh_tables: None,
}
}
}
impl<'a, 'gcx, 'tcx> InferCtxtBuilder<'a, 'gcx, 'tcx> {
+ /// Used only by `rustc_typeck` during body type-checking/inference,
+ /// will initialize `in_progress_tables` with fresh `TypeckTables`.
+ pub fn with_fresh_in_progress_tables(mut self) -> Self {
+ self.fresh_tables = Some(RefCell::new(ty::TypeckTables::empty()));
+ self
+ }
+
pub fn enter<F, R>(&'tcx mut self, f: F) -> R
where F: for<'b> FnOnce(InferCtxt<'b, 'gcx, 'tcx>) -> R
{
global_tcx,
ref arena,
ref fresh_tables,
- tables,
} = *self;
- let tables = tables.map(InferTables::Interned).unwrap_or_else(|| {
- fresh_tables.as_ref().map_or(InferTables::Missing, InferTables::InProgress)
- });
+ let in_progress_tables = fresh_tables.as_ref();
global_tcx.enter_local(arena, |tcx| f(InferCtxt {
- tcx: tcx,
- tables: tables,
+ tcx,
+ in_progress_tables,
projection_cache: RefCell::new(traits::ProjectionCache::new()),
type_variables: RefCell::new(type_variable::TypeVariableTable::new()),
int_unification_table: RefCell::new(UnificationTable::new()),
return value;
}
- self.infer_ctxt(()).enter(|infcx| {
+ self.infer_ctxt().enter(|infcx| {
value.trans_normalize(&infcx, param_env)
})
}
return value;
}
- self.infer_ctxt(()).enter(|infcx| {
+ self.infer_ctxt().enter(|infcx| {
value.trans_normalize(&infcx, env.reveal_all())
})
}
was_in_snapshot: in_snapshot,
// Borrow tables "in progress" (i.e. during typeck)
// to ban writes from within a snapshot to them.
- _in_progress_tables: match self.tables {
- InferTables::InProgress(ref tables) => tables.try_borrow().ok(),
- _ => None
- }
+ _in_progress_tables: self.in_progress_tables.map(|tables| {
+ tables.borrow()
+ })
}
}
self.tainted_by_errors_flag.set(true)
}
- pub fn node_type(&self, id: ast::NodeId) -> Ty<'tcx> {
- match self.tables.borrow().node_types.get(&id) {
- Some(&t) => t,
- // FIXME
- None if self.is_tainted_by_errors() =>
- self.tcx.types.err,
- None => {
- bug!("no type for node {}: {} in fcx",
- id, self.tcx.hir.node_to_string(id));
- }
- }
- }
-
- pub fn expr_ty(&self, ex: &hir::Expr) -> Ty<'tcx> {
- match self.tables.borrow().node_types.get(&ex.id) {
- Some(&t) => t,
- None => {
- bug!("no type for expr in fcx");
- }
- }
- }
-
pub fn resolve_regions_and_report_errors(&self,
region_context: DefId,
region_map: &RegionMaps,
value.fold_with(&mut r)
}
- /// Resolves all type variables in `t` and then, if any were left
- /// unresolved, substitutes an error type. This is used after the
- /// main checking when doing a second pass before writeback. The
- /// justification is that writeback will produce an error for
- /// these unconstrained type variables.
- fn resolve_type_vars_or_error(&self, t: &Ty<'tcx>) -> mc::McResult<Ty<'tcx>> {
- let ty = self.resolve_type_vars_if_possible(t);
- if ty.references_error() || ty.is_ty_var() {
- debug!("resolve_type_vars_or_error: error from {:?}", ty);
- Err(())
- } else {
- Ok(ty)
- }
- }
-
pub fn fully_resolve<T:TypeFoldable<'tcx>>(&self, value: &T) -> FixupResult<T> {
/*!
* Attempts to resolve all type/region variables in
self.region_vars.verify_generic_bound(origin, kind, a, bound);
}
- pub fn node_ty(&self, id: ast::NodeId) -> McResult<Ty<'tcx>> {
- let ty = self.node_type(id);
- self.resolve_type_vars_or_error(&ty)
- }
-
- pub fn expr_ty_adjusted(&self, expr: &hir::Expr) -> McResult<Ty<'tcx>> {
- let ty = self.tables.borrow().expr_ty_adjusted(expr);
- self.resolve_type_vars_or_error(&ty)
- }
-
pub fn type_moves_by_default(&self,
param_env: ty::ParamEnv<'tcx>,
ty: Ty<'tcx>,
span: Span)
-> bool {
let ty = self.resolve_type_vars_if_possible(&ty);
- if let Some((param_env, ty)) = self.tcx.lift_to_global(&(param_env, ty)) {
- // Even if the type may have no inference variables, during
- // type-checking closure types are in local tables only.
- let local_closures = match self.tables {
- InferTables::InProgress(_) => ty.has_closure_types(),
- _ => false
- };
- if !local_closures {
+ // Even if the type may have no inference variables, during
+ // type-checking closure types are in local tables only.
+ if !self.in_progress_tables.is_some() || !ty.has_closure_types() {
+ if let Some((param_env, ty)) = self.tcx.lift_to_global(&(param_env, ty)) {
return ty.moves_by_default(self.tcx.global_tcx(), param_env, span);
}
}
!traits::type_known_to_meet_bound(self, param_env, ty, copy_def_id, span)
}
- pub fn upvar_capture(&self, upvar_id: ty::UpvarId) -> Option<ty::UpvarCapture<'tcx>> {
- self.tables.borrow().upvar_capture_map.get(&upvar_id).cloned()
- }
-
pub fn closure_kind(&self,
def_id: DefId)
-> Option<ty::ClosureKind>
{
- if let InferTables::InProgress(tables) = self.tables {
+ if let Some(tables) = self.in_progress_tables {
if let Some(id) = self.tcx.hir.as_local_node_id(def_id) {
return tables.borrow()
.closure_kinds
}
pub fn closure_type(&self, def_id: DefId) -> ty::PolyFnSig<'tcx> {
- if let InferTables::InProgress(tables) = self.tables {
+ if let Some(tables) = self.in_progress_tables {
if let Some(id) = self.tcx.hir.as_local_node_id(def_id) {
if let Some(&ty) = tables.borrow().closure_tys.get(&id) {
return ty;
Region(ty::RegionKind),
}
-// type Edge = Constraint;
#[derive(Clone, PartialEq, Eq, Debug, Copy)]
enum Edge<'tcx> {
Constraint(Constraint<'tcx>),
mod graphviz;
-// A constraint that influences the inference process.
+/// A constraint that influences the inference process.
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
pub enum Constraint<'tcx> {
- // One region variable is subregion of another
+ /// One region variable is subregion of another
ConstrainVarSubVar(RegionVid, RegionVid),
- // Concrete region is subregion of region variable
+ /// Concrete region is subregion of region variable
ConstrainRegSubVar(Region<'tcx>, RegionVid),
- // Region variable is subregion of concrete region. This does not
- // directly affect inference, but instead is checked after
- // inference is complete.
+ /// Region variable is subregion of concrete region. This does not
+ /// directly affect inference, but instead is checked after
+ /// inference is complete.
ConstrainVarSubReg(RegionVid, Region<'tcx>),
- // A constraint where neither side is a variable. This does not
- // directly affect inference, but instead is checked after
- // inference is complete.
+ /// A constraint where neither side is a variable. This does not
+ /// directly affect inference, but instead is checked after
+ /// inference is complete.
ConstrainRegSubReg(Region<'tcx>, Region<'tcx>),
}
-// VerifyGenericBound(T, _, R, RS): The parameter type `T` (or
-// associated type) must outlive the region `R`. `T` is known to
-// outlive `RS`. Therefore verify that `R <= RS[i]` for some
-// `i`. Inference variables may be involved (but this verification
-// step doesn't influence inference).
+/// VerifyGenericBound(T, _, R, RS): The parameter type `T` (or
+/// associated type) must outlive the region `R`. `T` is known to
+/// outlive `RS`. Therefore verify that `R <= RS[i]` for some
+/// `i`. Inference variables may be involved (but this verification
+/// step doesn't influence inference).
#[derive(Debug)]
pub struct Verify<'tcx> {
kind: GenericKind<'tcx>,
Projection(ty::ProjectionTy<'tcx>),
}
-// When we introduce a verification step, we wish to test that a
-// particular region (let's call it `'min`) meets some bound.
-// The bound is described the by the following grammar:
+/// When we introduce a verification step, we wish to test that a
+/// particular region (let's call it `'min`) meets some bound.
+/// The bound is described the by the following grammar:
#[derive(Debug)]
pub enum VerifyBound<'tcx> {
- // B = exists {R} --> some 'r in {R} must outlive 'min
- //
- // Put another way, the subject value is known to outlive all
- // regions in {R}, so if any of those outlives 'min, then the
- // bound is met.
+ /// B = exists {R} --> some 'r in {R} must outlive 'min
+ ///
+ /// Put another way, the subject value is known to outlive all
+ /// regions in {R}, so if any of those outlives 'min, then the
+ /// bound is met.
AnyRegion(Vec<Region<'tcx>>),
- // B = forall {R} --> all 'r in {R} must outlive 'min
- //
- // Put another way, the subject value is known to outlive some
- // region in {R}, so if all of those outlives 'min, then the bound
- // is met.
+ /// B = forall {R} --> all 'r in {R} must outlive 'min
+ ///
+ /// Put another way, the subject value is known to outlive some
+ /// region in {R}, so if all of those outlives 'min, then the bound
+ /// is met.
AllRegions(Vec<Region<'tcx>>),
- // B = exists {B} --> 'min must meet some bound b in {B}
+ /// B = exists {B} --> 'min must meet some bound b in {B}
AnyBound(Vec<VerifyBound<'tcx>>),
- // B = forall {B} --> 'min must meet all bounds b in {B}
+ /// B = forall {B} --> 'min must meet all bounds b in {B}
AllBounds(Vec<VerifyBound<'tcx>>),
}
tcx: TyCtxt<'a, 'gcx, 'tcx>,
var_origins: RefCell<Vec<RegionVariableOrigin>>,
- // Constraints of the form `A <= B` introduced by the region
- // checker. Here at least one of `A` and `B` must be a region
- // variable.
+ /// Constraints of the form `A <= B` introduced by the region
+ /// checker. Here at least one of `A` and `B` must be a region
+ /// variable.
constraints: RefCell<FxHashMap<Constraint<'tcx>, SubregionOrigin<'tcx>>>,
- // A "verify" is something that we need to verify after inference is
- // done, but which does not directly affect inference in any way.
- //
- // An example is a `A <= B` where neither `A` nor `B` are
- // inference variables.
+ /// A "verify" is something that we need to verify after inference is
+ /// done, but which does not directly affect inference in any way.
+ ///
+ /// An example is a `A <= B` where neither `A` nor `B` are
+ /// inference variables.
verifys: RefCell<Vec<Verify<'tcx>>>,
- // A "given" is a relationship that is known to hold. In particular,
- // we often know from closure fn signatures that a particular free
- // region must be a subregion of a region variable:
- //
- // foo.iter().filter(<'a> |x: &'a &'b T| ...)
- //
- // In situations like this, `'b` is in fact a region variable
- // introduced by the call to `iter()`, and `'a` is a bound region
- // on the closure (as indicated by the `<'a>` prefix). If we are
- // naive, we wind up inferring that `'b` must be `'static`,
- // because we require that it be greater than `'a` and we do not
- // know what `'a` is precisely.
- //
- // This hashmap is used to avoid that naive scenario. Basically we
- // record the fact that `'a <= 'b` is implied by the fn signature,
- // and then ignore the constraint when solving equations. This is
- // a bit of a hack but seems to work.
+ /// A "given" is a relationship that is known to hold. In particular,
+ /// we often know from closure fn signatures that a particular free
+ /// region must be a subregion of a region variable:
+ ///
+ /// foo.iter().filter(<'a> |x: &'a &'b T| ...)
+ ///
+ /// In situations like this, `'b` is in fact a region variable
+ /// introduced by the call to `iter()`, and `'a` is a bound region
+ /// on the closure (as indicated by the `<'a>` prefix). If we are
+ /// naive, we wind up inferring that `'b` must be `'static`,
+ /// because we require that it be greater than `'a` and we do not
+ /// know what `'a` is precisely.
+ ///
+ /// This hashmap is used to avoid that naive scenario. Basically we
+ /// record the fact that `'a <= 'b` is implied by the fn signature,
+ /// and then ignore the constraint when solving equations. This is
+ /// a bit of a hack but seems to work.
givens: RefCell<FxHashSet<(Region<'tcx>, ty::RegionVid)>>,
lubs: RefCell<CombineMap<'tcx>>,
skolemization_count: Cell<u32>,
bound_count: Cell<u32>,
- // The undo log records actions that might later be undone.
- //
- // Note: when the undo_log is empty, we are not actively
- // snapshotting. When the `start_snapshot()` method is called, we
- // push an OpenSnapshot entry onto the list to indicate that we
- // are now actively snapshotting. The reason for this is that
- // otherwise we end up adding entries for things like the lower
- // bound on a variable and so forth, which can never be rolled
- // back.
+ /// The undo log records actions that might later be undone.
+ ///
+ /// Note: when the undo_log is empty, we are not actively
+ /// snapshotting. When the `start_snapshot()` method is called, we
+ /// push an OpenSnapshot entry onto the list to indicate that we
+ /// are now actively snapshotting. The reason for this is that
+ /// otherwise we end up adding entries for things like the lower
+ /// bound on a variable and so forth, which can never be rolled
+ /// back.
undo_log: RefCell<Vec<UndoLogEntry<'tcx>>>,
+
unification_table: RefCell<UnificationTable<ty::RegionVid>>,
- // This contains the results of inference. It begins as an empty
- // option and only acquires a value after inference is complete.
+ /// This contains the results of inference. It begins as an empty
+ /// option and only acquires a value after inference is complete.
values: RefCell<Option<Vec<VarValue<'tcx>>>>,
}
for verify in self.verifys.borrow().iter() {
debug!("collect_errors: verify={:?}", verify);
let sub = normalize(self.tcx, var_data, verify.region);
+
+ // This was an inference variable which didn't get
+ // constrained, therefore it can be assume to hold.
+ if let ty::ReEmpty = *sub {
+ continue;
+ }
+
if verify.bound.is_met(region_rels, var_data, sub) {
continue;
}
//! for all lint attributes.
use self::TargetLint::*;
-use dep_graph::DepNode;
use middle::privacy::AccessLevels;
+use traits::Reveal;
use ty::{self, TyCtxt};
use session::{config, early_error, Session};
use lint::{Level, LevelSource, Lint, LintId, LintPass, LintSource};
/// Side-tables for the body we are in.
pub tables: &'a ty::TypeckTables<'tcx>,
- /// The crate being checked.
- pub krate: &'a hir::Crate,
+ /// Parameter environment for the item we are in.
+ pub param_env: ty::ParamEnv<'tcx>,
/// Items accessible from the crate being checked.
pub access_levels: &'a AccessLevels,
}
}
+impl<'a, 'tcx> LateContext<'a, 'tcx> {
+ fn with_param_env<F>(&mut self, id: ast::NodeId, f: F)
+ where F: FnOnce(&mut Self),
+ {
+ let old_param_env = self.param_env;
+ self.param_env = self.tcx.param_env(self.tcx.hir.local_def_id(id));
+ f(self);
+ self.param_env = old_param_env;
+ }
+}
+
impl<'a, 'tcx> hir_visit::Visitor<'tcx> for LateContext<'a, 'tcx> {
/// Because lints are scoped lexically, we want to walk nested
/// items in the context of the outer item, so enable
fn visit_item(&mut self, it: &'tcx hir::Item) {
self.with_lint_attrs(&it.attrs, |cx| {
- run_lints!(cx, check_item, late_passes, it);
- hir_visit::walk_item(cx, it);
- run_lints!(cx, check_item_post, late_passes, it);
+ cx.with_param_env(it.id, |cx| {
+ run_lints!(cx, check_item, late_passes, it);
+ hir_visit::walk_item(cx, it);
+ run_lints!(cx, check_item_post, late_passes, it);
+ });
})
}
fn visit_foreign_item(&mut self, it: &'tcx hir::ForeignItem) {
self.with_lint_attrs(&it.attrs, |cx| {
- run_lints!(cx, check_foreign_item, late_passes, it);
- hir_visit::walk_foreign_item(cx, it);
- run_lints!(cx, check_foreign_item_post, late_passes, it);
+ cx.with_param_env(it.id, |cx| {
+ run_lints!(cx, check_foreign_item, late_passes, it);
+ hir_visit::walk_foreign_item(cx, it);
+ run_lints!(cx, check_foreign_item_post, late_passes, it);
+ });
})
}
fn visit_trait_item(&mut self, trait_item: &'tcx hir::TraitItem) {
self.with_lint_attrs(&trait_item.attrs, |cx| {
- run_lints!(cx, check_trait_item, late_passes, trait_item);
- hir_visit::walk_trait_item(cx, trait_item);
- run_lints!(cx, check_trait_item_post, late_passes, trait_item);
+ cx.with_param_env(trait_item.id, |cx| {
+ run_lints!(cx, check_trait_item, late_passes, trait_item);
+ hir_visit::walk_trait_item(cx, trait_item);
+ run_lints!(cx, check_trait_item_post, late_passes, trait_item);
+ });
});
}
fn visit_impl_item(&mut self, impl_item: &'tcx hir::ImplItem) {
self.with_lint_attrs(&impl_item.attrs, |cx| {
- run_lints!(cx, check_impl_item, late_passes, impl_item);
- hir_visit::walk_impl_item(cx, impl_item);
- run_lints!(cx, check_impl_item_post, late_passes, impl_item);
+ cx.with_param_env(impl_item.id, |cx| {
+ run_lints!(cx, check_impl_item, late_passes, impl_item);
+ hir_visit::walk_impl_item(cx, impl_item);
+ run_lints!(cx, check_impl_item_post, late_passes, impl_item);
+ });
});
}
///
/// Consumes the `lint_store` field of the `Session`.
pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
- let _task = tcx.dep_graph.in_task(DepNode::LateLintCheck);
-
let access_levels = &tcx.privacy_access_levels(LOCAL_CRATE);
let krate = tcx.hir.krate();
let mut cx = LateContext {
tcx: tcx,
tables: &ty::TypeckTables::empty(),
- krate: krate,
+ param_env: ty::ParamEnv::empty(Reveal::UserFacing),
access_levels: access_levels,
lint_sess: LintSession::new(&tcx.sess.lint_store),
};
#[macro_export]
macro_rules! impl_stable_hash_for {
(enum $enum_name:path { $( $variant:ident $( ( $($arg:ident),* ) )* ),* }) => {
- impl<'a, 'tcx> ::rustc_data_structures::stable_hasher::HashStable<$crate::ich::StableHashingContext<'a, 'tcx>> for $enum_name {
+ impl<'a, 'gcx, 'tcx> ::rustc_data_structures::stable_hasher::HashStable<$crate::ich::StableHashingContext<'a, 'gcx, 'tcx>> for $enum_name {
#[inline]
fn hash_stable<W: ::rustc_data_structures::stable_hasher::StableHasherResult>(&self,
- __ctx: &mut $crate::ich::StableHashingContext<'a, 'tcx>,
+ __ctx: &mut $crate::ich::StableHashingContext<'a, 'gcx, 'tcx>,
__hasher: &mut ::rustc_data_structures::stable_hasher::StableHasher<W>) {
use $enum_name::*;
::std::mem::discriminant(self).hash_stable(__ctx, __hasher);
}
};
(struct $struct_name:path { $($field:ident),* }) => {
- impl<'a, 'tcx> ::rustc_data_structures::stable_hasher::HashStable<$crate::ich::StableHashingContext<'a, 'tcx>> for $struct_name {
+ impl<'a, 'gcx, 'tcx> ::rustc_data_structures::stable_hasher::HashStable<$crate::ich::StableHashingContext<'a, 'gcx, 'tcx>> for $struct_name {
#[inline]
fn hash_stable<W: ::rustc_data_structures::stable_hasher::StableHasherResult>(&self,
- __ctx: &mut $crate::ich::StableHashingContext<'a, 'tcx>,
+ __ctx: &mut $crate::ich::StableHashingContext<'a, 'gcx, 'tcx>,
__hasher: &mut ::rustc_data_structures::stable_hasher::StableHasher<W>) {
let $struct_name {
$(ref $field),*
}
};
(tuple_struct $struct_name:path { $($field:ident),* }) => {
- impl<'a, 'tcx> ::rustc_data_structures::stable_hasher::HashStable<$crate::ich::StableHashingContext<'a, 'tcx>> for $struct_name {
+ impl<'a, 'gcx, 'tcx> ::rustc_data_structures::stable_hasher::HashStable<$crate::ich::StableHashingContext<'a, 'gcx, 'tcx>> for $struct_name {
#[inline]
fn hash_stable<W: ::rustc_data_structures::stable_hasher::StableHasherResult>(&self,
- __ctx: &mut $crate::ich::StableHashingContext<'a, 'tcx>,
+ __ctx: &mut $crate::ich::StableHashingContext<'a, 'gcx, 'tcx>,
__hasher: &mut ::rustc_data_structures::stable_hasher::StableHasher<W>) {
let $struct_name (
$(ref $field),*
macro_rules! impl_stable_hash_for_spanned {
($T:path) => (
- impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for ::syntax::codemap::Spanned<$T>
+ impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> for ::syntax::codemap::Spanned<$T>
{
#[inline]
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
self.node.hash_stable(hcx, hasher);
self.span.hash_stable(hcx, hasher);
// probably get a better home if someone can find one.
use hir::def;
-use dep_graph::DepNode;
use hir::def_id::{CrateNum, DefId, DefIndex};
use hir::map as hir_map;
use hir::map::definitions::{Definitions, DefKey, DisambiguatedDefPathData,
/// upstream crate.
#[derive(Debug, RustcEncodable, RustcDecodable, Clone)]
pub struct EncodedMetadataHashes {
- pub entry_hashes: Vec<EncodedMetadataHash>,
- pub global_hashes: Vec<(DepNode<()>, ich::Fingerprint)>,
+ // Stable content hashes for things in crate metadata, indexed by DefIndex.
+ pub hashes: Vec<EncodedMetadataHash>,
}
impl EncodedMetadataHashes {
pub fn new() -> EncodedMetadataHashes {
EncodedMetadataHashes {
- entry_hashes: Vec::new(),
- global_hashes: Vec::new(),
+ hashes: Vec::new(),
}
}
}
// item info
fn visibility(&self, def: DefId) -> ty::Visibility;
- fn visible_parent_map<'a>(&'a self) -> ::std::cell::Ref<'a, DefIdMap<DefId>>;
+ fn visible_parent_map<'a>(&'a self, sess: &Session) -> ::std::cell::Ref<'a, DefIdMap<DefId>>;
fn item_generics_cloned(&self, def: DefId) -> ty::Generics;
// trait info
fn def_path_hash(&self, def: DefId) -> hir_map::DefPathHash;
fn def_path_table(&self, cnum: CrateNum) -> Rc<DefPathTable>;
fn struct_field_names(&self, def: DefId) -> Vec<ast::Name>;
- fn item_children(&self, did: DefId) -> Vec<def::Export>;
+ fn item_children(&self, did: DefId, sess: &Session) -> Vec<def::Export>;
fn load_macro(&self, did: DefId, sess: &Session) -> LoadedMacro;
// misc. metadata
{ bug!("crate_data_as_rc_any") }
// item info
fn visibility(&self, def: DefId) -> ty::Visibility { bug!("visibility") }
- fn visible_parent_map<'a>(&'a self) -> ::std::cell::Ref<'a, DefIdMap<DefId>> {
+ fn visible_parent_map<'a>(&'a self, session: &Session)
+ -> ::std::cell::Ref<'a, DefIdMap<DefId>>
+ {
bug!("visible_parent_map")
}
fn item_generics_cloned(&self, def: DefId) -> ty::Generics
bug!("def_path_table")
}
fn struct_field_names(&self, def: DefId) -> Vec<ast::Name> { bug!("struct_field_names") }
- fn item_children(&self, did: DefId) -> Vec<def::Export> { bug!("item_children") }
+ fn item_children(&self, did: DefId, sess: &Session) -> Vec<def::Export> {
+ bug!("item_children")
+ }
fn load_macro(&self, did: DefId, sess: &Session) -> LoadedMacro { bug!("load_macro") }
// misc. metadata
///////////////////////////////////////////////////////////////////////////
// The ExprUseVisitor type
//
-// This is the code that actually walks the tree. Like
-// mem_categorization, it requires a TYPER, which is a type that
-// supplies types from the tree. After type checking is complete, you
-// can just use the tcx as the typer.
+// This is the code that actually walks the tree.
pub struct ExprUseVisitor<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
mc: mc::MemCategorizationContext<'a, 'gcx, 'tcx>,
delegate: &'a mut Delegate<'tcx>,
param_env: ty::ParamEnv<'tcx>,
}
-// If the TYPER results in an error, it's because the type check
+// If the MC results in an error, it's because the type check
// failed (or will fail, when the error is uncovered and reported
// during writeback). In this case, we just ignore this part of the
// code.
)
}
-impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> {
+impl<'a, 'tcx> ExprUseVisitor<'a, 'tcx, 'tcx> {
pub fn new(delegate: &'a mut (Delegate<'tcx>+'a),
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
region_maps: &'a RegionMaps,
- infcx: &'a InferCtxt<'a, 'gcx, 'tcx>,
- param_env: ty::ParamEnv<'tcx>)
+ tables: &'a ty::TypeckTables<'tcx>)
-> Self
{
- ExprUseVisitor::with_options(delegate,
- infcx,
- param_env,
- region_maps,
- mc::MemCategorizationOptions::default())
+ ExprUseVisitor {
+ mc: mc::MemCategorizationContext::new(tcx, region_maps, tables),
+ delegate,
+ param_env,
+ }
}
+}
- pub fn with_options(delegate: &'a mut (Delegate<'tcx>+'a),
- infcx: &'a InferCtxt<'a, 'gcx, 'tcx>,
- param_env: ty::ParamEnv<'tcx>,
- region_maps: &'a RegionMaps,
- options: mc::MemCategorizationOptions)
- -> Self
+impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> {
+ pub fn with_infer(delegate: &'a mut (Delegate<'tcx>+'a),
+ infcx: &'a InferCtxt<'a, 'gcx, 'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ region_maps: &'a RegionMaps,
+ tables: &'a ty::TypeckTables<'tcx>)
+ -> Self
{
ExprUseVisitor {
- mc: mc::MemCategorizationContext::with_options(infcx, region_maps, options),
+ mc: mc::MemCategorizationContext::with_infer(infcx, region_maps, tables),
delegate,
param_env,
}
debug!("consume_body(body={:?})", body);
for arg in &body.arguments {
- let arg_ty = return_if_err!(self.mc.infcx.node_ty(arg.pat.id));
+ let arg_ty = return_if_err!(self.mc.node_ty(arg.pat.id));
let fn_body_scope_r = self.tcx().node_scope_region(body.value.id);
let arg_cmt = self.mc.cat_rvalue(
}
fn tcx(&self) -> TyCtxt<'a, 'gcx, 'tcx> {
- self.mc.infcx.tcx
+ self.mc.tcx
}
fn delegate_consume(&mut self,
debug!("delegate_consume(consume_id={}, cmt={:?})",
consume_id, cmt);
- let mode = copy_or_move(self.mc.infcx, self.param_env, &cmt, DirectRefMove);
+ let mode = copy_or_move(&self.mc, self.param_env, &cmt, DirectRefMove);
self.delegate.consume(consume_id, consume_span, cmt, mode);
}
hir::ExprAddrOf(m, ref base) => { // &base
// make sure that the thing we are pointing out stays valid
// for the lifetime `scope_r` of the resulting ptr:
- let expr_ty = return_if_err!(self.mc.infcx.node_ty(expr.id));
+ let expr_ty = return_if_err!(self.mc.expr_ty(expr));
if let ty::TyRef(r, _) = expr_ty.sty {
let bk = ty::BorrowKind::from_mutbl(m);
self.borrow_expr(&base, r, bk, AddrOf);
}
hir::ExprAssignOp(_, ref lhs, ref rhs) => {
- if self.mc.infcx.tables.borrow().is_method_call(expr) {
+ if self.mc.tables.is_method_call(expr) {
self.consume_expr(lhs);
} else {
self.mutate_expr(expr, &lhs, MutateMode::WriteAndRead);
}
fn walk_callee(&mut self, call: &hir::Expr, callee: &hir::Expr) {
- let callee_ty = return_if_err!(self.mc.infcx.expr_ty_adjusted(callee));
+ let callee_ty = return_if_err!(self.mc.expr_ty_adjusted(callee));
debug!("walk_callee: callee={:?} callee_ty={:?}",
callee, callee_ty);
match callee_ty.sty {
}
ty::TyError => { }
_ => {
- let def_id = self.mc.infcx.tables.borrow().type_dependent_defs[&call.id].def_id();
+ let def_id = self.mc.tables.type_dependent_defs[&call.id].def_id();
match OverloadedCallType::from_method_id(self.tcx(), def_id) {
FnMutOverloadedCall => {
let call_scope_r = self.tcx().node_scope_region(call.id);
// consumed or borrowed as part of the automatic adjustment
// process.
fn walk_adjustment(&mut self, expr: &hir::Expr) {
- //NOTE(@jroesch): mixed RefCell borrow causes crash
- let adjustments = self.mc.infcx.tables.borrow().expr_adjustments(expr).to_vec();
+ let adjustments = self.mc.tables.expr_adjustments(expr);
let mut cmt = return_if_err!(self.mc.cat_expr_unadjusted(expr));
for adjustment in adjustments {
debug!("walk_adjustment expr={:?} adj={:?}", expr, adjustment);
mode: &mut TrackMatchMode) {
debug!("determine_pat_move_mode cmt_discr={:?} pat={:?}", cmt_discr,
pat);
- return_if_err!(self.mc.cat_pattern(cmt_discr, pat, |_mc, cmt_pat, pat| {
+ return_if_err!(self.mc.cat_pattern(cmt_discr, pat, |cmt_pat, pat| {
match pat.node {
PatKind::Binding(hir::BindByRef(..), ..) =>
mode.lub(BorrowingMatch),
PatKind::Binding(hir::BindByValue(..), ..) => {
- match copy_or_move(self.mc.infcx, self.param_env, &cmt_pat, PatBindingMove) {
+ match copy_or_move(&self.mc, self.param_env, &cmt_pat, PatBindingMove) {
Copy => mode.lub(CopyingMatch),
Move(..) => mode.lub(MovingMatch),
}
debug!("walk_pat cmt_discr={:?} pat={:?}", cmt_discr, pat);
let tcx = self.tcx();
- let infcx = self.mc.infcx;
let ExprUseVisitor { ref mc, ref mut delegate, param_env } = *self;
- return_if_err!(mc.cat_pattern(cmt_discr.clone(), pat, |mc, cmt_pat, pat| {
+ return_if_err!(mc.cat_pattern(cmt_discr.clone(), pat, |cmt_pat, pat| {
if let PatKind::Binding(bmode, def_id, ..) = pat.node {
debug!("binding cmt_pat={:?} pat={:?} match_mode={:?}", cmt_pat, pat, match_mode);
// pat_ty: the type of the binding being produced.
- let pat_ty = return_if_err!(infcx.node_ty(pat.id));
+ let pat_ty = return_if_err!(mc.node_ty(pat.id));
// Each match binding is effectively an assignment to the
// binding being produced.
}
}
hir::BindByValue(..) => {
- let mode = copy_or_move(infcx, param_env, &cmt_pat, PatBindingMove);
+ let mode = copy_or_move(mc, param_env, &cmt_pat, PatBindingMove);
debug!("walk_pat binding consuming pat");
delegate.consume_pat(pat, cmt_pat, mode);
}
// the interior nodes (enum variants and structs), as opposed
// to the above loop's visit of than the bindings that form
// the leaves of the pattern tree structure.
- return_if_err!(mc.cat_pattern(cmt_discr, pat, |mc, cmt_pat, pat| {
+ return_if_err!(mc.cat_pattern(cmt_discr, pat, |cmt_pat, pat| {
let qpath = match pat.node {
PatKind::Path(ref qpath) |
PatKind::TupleStruct(ref qpath, ..) |
PatKind::Struct(ref qpath, ..) => qpath,
_ => return
};
- let def = infcx.tables.borrow().qpath_def(qpath, pat.id);
+ let def = mc.tables.qpath_def(qpath, pat.id);
match def {
Def::Variant(variant_did) |
Def::VariantCtor(variant_did, ..) => {
let id_var = self.tcx().hir.as_local_node_id(def_id).unwrap();
let upvar_id = ty::UpvarId { var_id: id_var,
closure_expr_id: closure_expr.id };
- let upvar_capture = self.mc.infcx.upvar_capture(upvar_id).unwrap();
+ let upvar_capture = self.mc.tables.upvar_capture(upvar_id);
let cmt_var = return_if_err!(self.cat_captured_var(closure_expr.id,
fn_decl_span,
freevar.def));
match upvar_capture {
ty::UpvarCapture::ByValue => {
- let mode = copy_or_move(self.mc.infcx,
+ let mode = copy_or_move(&self.mc,
self.param_env,
&cmt_var,
CaptureMove);
// Create the cmt for the variable being borrowed, from the
// caller's perspective
let var_id = self.tcx().hir.as_local_node_id(upvar_def.def_id()).unwrap();
- let var_ty = self.mc.infcx.node_ty(var_id)?;
+ let var_ty = self.mc.node_ty(var_id)?;
self.mc.cat_def(closure_id, closure_span, var_ty, upvar_def)
}
}
-fn copy_or_move<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>,
+fn copy_or_move<'a, 'gcx, 'tcx>(mc: &mc::MemCategorizationContext<'a, 'gcx, 'tcx>,
param_env: ty::ParamEnv<'tcx>,
cmt: &mc::cmt<'tcx>,
move_reason: MoveReason)
-> ConsumeMode
{
- if infcx.type_moves_by_default(param_env, cmt.ty, cmt.span) {
+ if mc.type_moves_by_default(param_env, cmt.ty, cmt.span) {
Move(move_reason)
} else {
Copy
use hir::def::{Def, CtorKind};
use ty::adjustment;
use ty::{self, Ty, TyCtxt};
+use ty::fold::TypeFoldable;
use hir::{MutImmutable, MutMutable, PatKind};
use hir::pat_util::EnumerateAndAdjustIterator;
#[derive(Clone)]
pub struct MemCategorizationContext<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
- pub infcx: &'a InferCtxt<'a, 'gcx, 'tcx>,
+ pub tcx: TyCtxt<'a, 'gcx, 'tcx>,
pub region_maps: &'a RegionMaps,
- options: MemCategorizationOptions,
-}
-
-#[derive(Copy, Clone, Default)]
-pub struct MemCategorizationOptions {
- // If true, then when analyzing a closure upvar, if the closure
- // has a missing kind, we treat it like a Fn closure. When false,
- // we ICE if the closure has a missing kind. Should be false
- // except during closure kind inference. It is used by the
- // mem-categorization code to be able to have stricter assertions
- // (which are always true except during upvar inference).
- pub during_closure_kind_inference: bool,
+ pub tables: &'a ty::TypeckTables<'tcx>,
+ infcx: Option<&'a InferCtxt<'a, 'gcx, 'tcx>>,
}
pub type McResult<T> = Result<T, ()>;
}
}
-impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> {
- /// Context should be the `DefId` we use to fetch region-maps.
- pub fn new(infcx: &'a InferCtxt<'a, 'gcx, 'tcx>,
- region_maps: &'a RegionMaps)
- -> MemCategorizationContext<'a, 'gcx, 'tcx> {
- MemCategorizationContext::with_options(infcx,
- region_maps,
- MemCategorizationOptions::default())
+impl<'a, 'tcx> MemCategorizationContext<'a, 'tcx, 'tcx> {
+ pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ region_maps: &'a RegionMaps,
+ tables: &'a ty::TypeckTables<'tcx>)
+ -> MemCategorizationContext<'a, 'tcx, 'tcx> {
+ MemCategorizationContext { tcx, region_maps, tables, infcx: None }
}
+}
- pub fn with_options(infcx: &'a InferCtxt<'a, 'gcx, 'tcx>,
- region_maps: &'a RegionMaps,
- options: MemCategorizationOptions)
- -> MemCategorizationContext<'a, 'gcx, 'tcx> {
+impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> {
+ pub fn with_infer(infcx: &'a InferCtxt<'a, 'gcx, 'tcx>,
+ region_maps: &'a RegionMaps,
+ tables: &'a ty::TypeckTables<'tcx>)
+ -> MemCategorizationContext<'a, 'gcx, 'tcx> {
MemCategorizationContext {
- infcx: infcx,
- region_maps: region_maps,
- options: options,
+ tcx: infcx.tcx,
+ region_maps,
+ tables,
+ infcx: Some(infcx),
}
}
- fn tcx(&self) -> TyCtxt<'a, 'gcx, 'tcx> {
- self.infcx.tcx
+ pub fn type_moves_by_default(&self,
+ param_env: ty::ParamEnv<'tcx>,
+ ty: Ty<'tcx>,
+ span: Span)
+ -> bool {
+ self.infcx.map(|infcx| infcx.type_moves_by_default(param_env, ty, span))
+ .or_else(|| {
+ self.tcx.lift_to_global(&(param_env, ty)).map(|(param_env, ty)| {
+ ty.moves_by_default(self.tcx.global_tcx(), param_env, span)
+ })
+ })
+ .unwrap_or(true)
}
- fn expr_ty(&self, expr: &hir::Expr) -> McResult<Ty<'tcx>> {
- match self.infcx.node_ty(expr.id) {
- Ok(t) => Ok(t),
- Err(()) => {
- debug!("expr_ty({:?}) yielded Err", expr);
- Err(())
+ fn resolve_type_vars_if_possible<T>(&self, value: &T) -> T
+ where T: TypeFoldable<'tcx>
+ {
+ self.infcx.map(|infcx| infcx.resolve_type_vars_if_possible(value))
+ .unwrap_or_else(|| value.clone())
+ }
+
+ fn is_tainted_by_errors(&self) -> bool {
+ self.infcx.map_or(false, |infcx| infcx.is_tainted_by_errors())
+ }
+
+ fn resolve_type_vars_or_error(&self,
+ id: ast::NodeId,
+ ty: Option<Ty<'tcx>>)
+ -> McResult<Ty<'tcx>> {
+ match ty {
+ Some(ty) => {
+ let ty = self.resolve_type_vars_if_possible(&ty);
+ if ty.references_error() || ty.is_ty_var() {
+ debug!("resolve_type_vars_or_error: error from {:?}", ty);
+ Err(())
+ } else {
+ Ok(ty)
+ }
+ }
+ // FIXME
+ None if self.is_tainted_by_errors() => Err(()),
+ None => {
+ bug!("no type for node {}: {} in mem_categorization",
+ id, self.tcx.hir.node_to_string(id));
}
}
}
- fn expr_ty_adjusted(&self, expr: &hir::Expr) -> McResult<Ty<'tcx>> {
- self.infcx.expr_ty_adjusted(expr)
+ pub fn node_ty(&self, id: ast::NodeId) -> McResult<Ty<'tcx>> {
+ self.resolve_type_vars_or_error(id, self.tables.node_id_to_type_opt(id))
}
- fn node_ty(&self, id: ast::NodeId) -> McResult<Ty<'tcx>> {
- self.infcx.node_ty(id)
+ pub fn expr_ty(&self, expr: &hir::Expr) -> McResult<Ty<'tcx>> {
+ self.resolve_type_vars_or_error(expr.id, self.tables.expr_ty_opt(expr))
+ }
+
+ pub fn expr_ty_adjusted(&self, expr: &hir::Expr) -> McResult<Ty<'tcx>> {
+ self.resolve_type_vars_or_error(expr.id, self.tables.expr_ty_adjusted_opt(expr))
}
fn pat_ty(&self, pat: &hir::Pat) -> McResult<Ty<'tcx>> {
- let base_ty = self.infcx.node_ty(pat.id)?;
+ let base_ty = self.node_ty(pat.id)?;
// FIXME (Issue #18207): This code detects whether we are
// looking at a `ref x`, and if so, figures out what the type
// *being borrowed* is. But ideally we would put in a more
}
}
- helper(self, expr, self.infcx.tables.borrow().expr_adjustments(expr))
+ helper(self, expr, self.tables.expr_adjustments(expr))
}
pub fn cat_expr_adjusted(&self, expr: &hir::Expr,
where F: FnOnce() -> McResult<cmt<'tcx>>
{
debug!("cat_expr_adjusted_with({:?}): {:?}", adjustment, expr);
- let target = self.infcx.resolve_type_vars_if_possible(&adjustment.target);
+ let target = self.resolve_type_vars_if_possible(&adjustment.target);
match adjustment.kind {
adjustment::Adjust::Deref(overloaded) => {
// Equivalent to *expr or something similar.
let base = if let Some(deref) = overloaded {
- let ref_ty = self.tcx().mk_ref(deref.region, ty::TypeAndMut {
+ let ref_ty = self.tcx.mk_ref(deref.region, ty::TypeAndMut {
ty: target,
mutbl: deref.mutbl,
});
let expr_ty = self.expr_ty(expr)?;
match expr.node {
hir::ExprUnary(hir::UnDeref, ref e_base) => {
- if self.infcx.tables.borrow().is_method_call(expr) {
+ if self.tables.is_method_call(expr) {
self.cat_overloaded_lvalue(expr, e_base, false)
} else {
let base_cmt = self.cat_expr(&e_base)?;
}
hir::ExprIndex(ref base, _) => {
- if self.infcx.tables.borrow().is_method_call(expr) {
+ if self.tables.is_method_call(expr) {
// If this is an index implemented by a method call, then it
// will include an implicit deref of the result.
// The call to index() returns a `&T` value, which
}
hir::ExprPath(ref qpath) => {
- let def = self.infcx.tables.borrow().qpath_def(qpath, expr.id);
+ let def = self.tables.qpath_def(qpath, expr.id);
self.cat_def(expr.id, expr.span, expr_ty, def)
}
}
Def::Upvar(def_id, _, fn_node_id) => {
- let var_id = self.tcx().hir.as_local_node_id(def_id).unwrap();
- let ty = self.node_ty(fn_node_id)?;
- match ty.sty {
- ty::TyClosure(closure_id, _) => {
- match self.infcx.closure_kind(closure_id) {
- Some(kind) => {
- self.cat_upvar(id, span, var_id, fn_node_id, kind)
- }
- None => {
- if !self.options.during_closure_kind_inference {
- span_bug!(
- span,
- "No closure kind for {:?}",
- closure_id);
- }
-
- // during closure kind inference, we
- // don't know the closure kind yet, but
- // it's ok because we detect that we are
- // accessing an upvar and handle that
- // case specially anyhow. Use Fn
- // arbitrarily.
- self.cat_upvar(id, span, var_id, fn_node_id, ty::ClosureKind::Fn)
- }
- }
- }
- _ => {
- span_bug!(
- span,
- "Upvar of non-closure {} - {:?}",
- fn_node_id,
- ty);
- }
- }
+ let var_id = self.tcx.hir.as_local_node_id(def_id).unwrap();
+ self.cat_upvar(id, span, var_id, fn_node_id)
}
Def::Local(def_id) => {
- let vid = self.tcx().hir.as_local_node_id(def_id).unwrap();
+ let vid = self.tcx.hir.as_local_node_id(def_id).unwrap();
Ok(Rc::new(cmt_ {
id: id,
span: span,
cat: Categorization::Local(vid),
- mutbl: MutabilityCategory::from_local(self.tcx(), vid),
+ mutbl: MutabilityCategory::from_local(self.tcx, vid),
ty: expr_ty,
note: NoteNone
}))
id: ast::NodeId,
span: Span,
var_id: ast::NodeId,
- fn_node_id: ast::NodeId,
- kind: ty::ClosureKind)
+ fn_node_id: ast::NodeId)
-> McResult<cmt<'tcx>>
{
// An upvar can have up to 3 components. We translate first to a
// FnMut | copied -> &'env mut | upvar -> &'env mut -> &'up bk
// FnOnce | copied | upvar -> &'up bk
+ let kind = match self.tables.closure_kinds.get(&fn_node_id) {
+ Some(&(kind, _)) => kind,
+ None => span_bug!(span, "missing closure kind")
+ };
+
let upvar_id = ty::UpvarId { var_id: var_id,
closure_expr_id: fn_node_id };
let var_ty = self.node_ty(var_id)?;
// Mutability of original variable itself
- let var_mutbl = MutabilityCategory::from_local(self.tcx(), var_id);
+ let var_mutbl = MutabilityCategory::from_local(self.tcx, var_id);
// Construct the upvar. This represents access to the field
// from the environment (perhaps we should eventually desugar
// for that.
let upvar_id = ty::UpvarId { var_id: var_id,
closure_expr_id: fn_node_id };
- let upvar_capture = self.infcx.upvar_capture(upvar_id).unwrap();
+ let upvar_capture = self.tables.upvar_capture(upvar_id);
let cmt_result = match upvar_capture {
ty::UpvarCapture::ByValue => {
cmt_result
-> cmt_<'tcx>
{
// Region of environment pointer
- let env_region = self.tcx().mk_region(ty::ReFree(ty::FreeRegion {
+ let env_region = self.tcx.mk_region(ty::ReFree(ty::FreeRegion {
// The environment of a closure is guaranteed to
// outlive any bindings introduced in the body of the
// closure itself.
- scope: self.tcx().hir.local_def_id(upvar_id.closure_expr_id),
+ scope: self.tcx.hir.local_def_id(upvar_id.closure_expr_id),
bound_region: ty::BrEnv
}));
// one.
let cmt_result = cmt_ {
mutbl: McImmutable,
- ty: self.tcx().types.err,
+ ty: self.tcx.types.err,
..cmt_result
};
pub fn temporary_scope(&self, id: ast::NodeId) -> ty::Region<'tcx>
{
let scope = self.region_maps.temporary_scope(id);
- self.tcx().mk_region(match scope {
+ self.tcx.mk_region(match scope {
Some(scope) => ty::ReScope(scope),
None => ty::ReStatic
})
span: Span,
expr_ty: Ty<'tcx>)
-> cmt<'tcx> {
- let promotable = self.tcx().rvalue_promotable_to_static.borrow().get(&id).cloned()
+ let promotable = self.tcx.rvalue_promotable_to_static.borrow().get(&id).cloned()
.unwrap_or(false);
// When the corresponding feature isn't toggled, only promote `[T; 0]`.
let promotable = match expr_ty.sty {
ty::TyArray(_, 0) => true,
- _ => promotable && self.tcx().sess.features.borrow().rvalue_static_promotion,
+ _ => promotable && self.tcx.sess.features.borrow().rvalue_static_promotion,
};
// Compute maximum lifetime of this rvalue. This is 'static if
// we can promote to a constant, otherwise equal to enclosing temp
// lifetime.
let re = if promotable {
- self.tcx().types.re_static
+ self.tcx.types.re_static
} else {
self.temporary_scope(id)
};
span_bug!(expr.span, "cat_overloaded_lvalue: base is not a reference")
}
};
- let ref_ty = self.tcx().mk_ref(region, ty::TypeAndMut {
+ let ref_ty = self.tcx.mk_ref(region, ty::TypeAndMut {
ty: lvalue_ty,
mutbl,
});
}
pub fn cat_pattern<F>(&self, cmt: cmt<'tcx>, pat: &hir::Pat, mut op: F) -> McResult<()>
- where F: FnMut(&MemCategorizationContext<'a, 'gcx, 'tcx>, cmt<'tcx>, &hir::Pat),
+ where F: FnMut(cmt<'tcx>, &hir::Pat),
{
self.cat_pattern_(cmt, pat, &mut op)
}
// FIXME(#19596) This is a workaround, but there should be a better way to do this
fn cat_pattern_<F>(&self, cmt: cmt<'tcx>, pat: &hir::Pat, op: &mut F) -> McResult<()>
- where F : FnMut(&MemCategorizationContext<'a, 'gcx, 'tcx>, cmt<'tcx>, &hir::Pat)
+ where F : FnMut(cmt<'tcx>, &hir::Pat)
{
// Here, `cmt` is the categorization for the value being
// matched and pat is the pattern it is being matched against.
debug!("cat_pattern: {:?} cmt={:?}", pat, cmt);
- op(self, cmt.clone(), pat);
+ op(cmt.clone(), pat);
// Note: This goes up here (rather than within the PatKind::TupleStruct arm
// alone) because PatKind::Struct can also refer to variants.
Def::Variant(variant_did) |
Def::VariantCtor(variant_did, ..) => {
// univariant enums do not need downcasts
- let enum_did = self.tcx().parent_def_id(variant_did).unwrap();
- if !self.tcx().adt_def(enum_did).is_univariant() {
+ let enum_did = self.tcx.parent_def_id(variant_did).unwrap();
+ if !self.tcx.adt_def(enum_did).is_univariant() {
self.cat_downcast(pat, cmt.clone(), cmt.ty, variant_did)
} else {
cmt
match pat.node {
PatKind::TupleStruct(ref qpath, ref subpats, ddpos) => {
- let def = self.infcx.tables.borrow().qpath_def(qpath, pat.id);
+ let def = self.tables.qpath_def(qpath, pat.id);
let expected_len = match def {
Def::VariantCtor(def_id, CtorKind::Fn) => {
- let enum_def = self.tcx().parent_def_id(def_id).unwrap();
- self.tcx().adt_def(enum_def).variant_with_id(def_id).fields.len()
+ let enum_def = self.tcx.parent_def_id(def_id).unwrap();
+ self.tcx.adt_def(enum_def).variant_with_id(def_id).fields.len()
}
Def::StructCtor(_, CtorKind::Fn) => {
match self.pat_ty(&pat)?.sty {
m.push_str(&(if n == 1 {
help_name
} else {
- format!("one of {}'s {} elided {}lifetimes", help_name, n,
+ format!("one of {}'s {} {}lifetimes", help_name, n,
if have_bound_regions { "free " } else { "" } )
})[..]);
}
}
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for Cache {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> for Cache {
fn hash_stable<W: StableHasherResult>(&self,
- _: &mut StableHashingContext<'a, 'tcx>,
+ _: &mut StableHashingContext<'a, 'gcx, 'tcx>,
_: &mut StableHasher<W>) {
// do nothing
}
}
impl<'a, 'gcx, 'tcx> FindLocalByTypeVisitor<'a, 'gcx, 'tcx> {
- fn node_matches_type(&mut self, node_id: &'gcx NodeId) -> bool {
- match self.infcx.tables.borrow().node_types.get(node_id) {
- Some(&ty) => {
+ fn node_matches_type(&mut self, node_id: NodeId) -> bool {
+ let ty_opt = self.infcx.in_progress_tables.and_then(|tables| {
+ tables.borrow().node_id_to_type_opt(node_id)
+ });
+ match ty_opt {
+ Some(ty) => {
let ty = self.infcx.resolve_type_vars_if_possible(&ty);
ty.walk().any(|inner_ty| {
inner_ty == *self.target_ty || match (&inner_ty.sty, &self.target_ty.sty) {
}
})
}
- _ => false,
+ None => false,
}
}
}
}
fn visit_local(&mut self, local: &'gcx Local) {
- if self.found_local_pattern.is_none() && self.node_matches_type(&local.id) {
+ if self.found_local_pattern.is_none() && self.node_matches_type(local.id) {
self.found_local_pattern = Some(&*local.pat);
}
intravisit::walk_local(self, local);
fn visit_body(&mut self, body: &'gcx Body) {
for argument in &body.arguments {
- if self.found_arg_pattern.is_none() && self.node_matches_type(&argument.id) {
+ if self.found_arg_pattern.is_none() && self.node_matches_type(argument.id) {
self.found_arg_pattern = Some(&*argument.pat);
}
}
ty::Predicate::ClosureKind(closure_def_id, kind) => {
let found_kind = self.closure_kind(closure_def_id).unwrap();
let closure_span = self.tcx.hir.span_if_local(closure_def_id).unwrap();
+ let node_id = self.tcx.hir.as_local_node_id(closure_def_id).unwrap();
let mut err = struct_span_err!(
self.tcx.sess, closure_span, E0525,
"expected a closure that implements the `{}` trait, \
but this closure only implements `{}`",
kind,
found_kind);
- err.span_note(
+
+ err.span_label(
obligation.cause.span,
- &format!("the requirement to implement \
- `{}` derives from here", kind));
+ format!("the requirement to implement `{}` derives from here", kind));
+
+ // Additional context information explaining why the closure only implements
+ // a particular trait.
+ if let Some(tables) = self.in_progress_tables {
+ match tables.borrow().closure_kinds.get(&node_id) {
+ Some(&(ty::ClosureKind::FnOnce, Some((span, name)))) => {
+ err.span_note(span, &format!(
+ "closure is `FnOnce` because it moves the \
+ variable `{}` out of its environment", name));
+ },
+ Some(&(ty::ClosureKind::FnMut, Some((span, name)))) => {
+ err.span_note(span, &format!(
+ "closure is `FnMut` because it mutates the \
+ variable `{}` here", name));
+ },
+ _ => {}
+ }
+ }
+
err.emit();
return;
}
assert!(!infcx.is_in_snapshot());
- if infcx.tcx.fulfilled_predicates.borrow().check_duplicate(&obligation.predicate) {
+ let tcx = infcx.tcx;
+
+ if tcx.fulfilled_predicates.borrow().check_duplicate(tcx, &obligation.predicate) {
debug!("register_predicate_obligation: duplicate");
return
}
match obligation.predicate {
ty::Predicate::Trait(ref data) => {
- if selcx.tcx().fulfilled_predicates.borrow().check_duplicate_trait(data) {
+ let tcx = selcx.tcx();
+ if tcx.fulfilled_predicates.borrow().check_duplicate_trait(tcx, data) {
return Ok(Some(vec![]));
}
}
}
- pub fn check_duplicate(&self, key: &ty::Predicate<'tcx>) -> bool {
+ pub fn check_duplicate(&self, tcx: TyCtxt, key: &ty::Predicate<'tcx>) -> bool {
if let ty::Predicate::Trait(ref data) = *key {
- self.check_duplicate_trait(data)
+ self.check_duplicate_trait(tcx, data)
} else {
false
}
}
- pub fn check_duplicate_trait(&self, data: &ty::PolyTraitPredicate<'tcx>) -> bool {
+ pub fn check_duplicate_trait(&self, tcx: TyCtxt, data: &ty::PolyTraitPredicate<'tcx>) -> bool {
// For the global predicate registry, when we find a match, it
// may have been computed by some other task, so we want to
// add a read from the node corresponding to the predicate
// processing to make sure we get the transitive dependencies.
if self.set.contains(data) {
debug_assert!(data.is_global());
- self.dep_graph.read(data.dep_node());
+ self.dep_graph.read(data.dep_node(tcx));
debug!("check_duplicate: global predicate `{:?}` already proved elsewhere", data);
true
let elaborated_env = ty::ParamEnv::new(tcx.intern_predicates(&predicates),
unnormalized_env.reveal);
- tcx.infer_ctxt(()).enter(|infcx| {
+ tcx.infer_ctxt().enter(|infcx| {
let predicates = match fully_normalize(
&infcx,
cause,
debug!("normalize_and_test_predicates(predicates={:?})",
predicates);
- tcx.infer_ctxt(()).enter(|infcx| {
+ tcx.infer_ctxt().enter(|infcx| {
let param_env = ty::ParamEnv::empty(Reveal::All);
let mut selcx = SelectionContext::new(&infcx);
let mut fulfill_cx = FulfillmentContext::new();
assert!(!obligation.predicate.has_escaping_regions());
let tcx = self.tcx();
- let dep_node = obligation.predicate.dep_node();
+ let dep_node = obligation.predicate.dep_node(tcx);
let _task = tcx.dep_graph.in_task(dep_node);
let stack = self.push_stack(TraitObligationStackList::empty(), obligation);
debug!("evaluate_predicate_recursively({:?})",
obligation);
+ let tcx = self.tcx();
+
// Check the cache from the tcx of predicates that we know
// have been proven elsewhere. This cache only contains
// predicates that are global in scope and hence unaffected by
// the current environment.
- if self.tcx().fulfilled_predicates.borrow().check_duplicate(&obligation.predicate) {
+ if tcx.fulfilled_predicates.borrow().check_duplicate(tcx, &obligation.predicate) {
return EvaluatedToOk;
}
let ancestors = trait_def.ancestors(tcx, impl_data.impl_def_id);
match ancestors.defs(tcx, item.name, item.kind).next() {
Some(node_item) => {
- let substs = tcx.infer_ctxt(()).enter(|infcx| {
+ let substs = tcx.infer_ctxt().enter(|infcx| {
let param_env = ty::ParamEnv::empty(Reveal::All);
let substs = substs.rebase_onto(tcx, trait_def_id, impl_data.substs);
let substs = translate_substs(&infcx, param_env, impl_data.impl_def_id,
let impl1_trait_ref = tcx.impl_trait_ref(impl1_def_id).unwrap();
// Create a infcx, taking the predicates of impl1 as assumptions:
- let result = tcx.infer_ctxt(()).enter(|infcx| {
+ let result = tcx.infer_ctxt().enter(|infcx| {
// Normalize the trait reference. The WF rules ought to ensure
// that this always succeeds.
let impl1_trait_ref =
let possible_sibling = *slot;
let tcx = tcx.global_tcx();
- let (le, ge) = tcx.infer_ctxt(()).enter(|infcx| {
+ let (le, ge) = tcx.infer_ctxt().enter(|infcx| {
let overlap = traits::overlapping_impls(&infcx,
possible_sibling,
impl_def_id);
// seems likely that they should eventually be merged into more
// general routines.
-use dep_graph::{DepGraph, DepNode, DepTrackingMap, DepTrackingMapConfig};
+use dep_graph::{DepGraph, DepNode, DepTrackingMap, DepTrackingMapConfig,
+ DepConstructor};
use hir::def_id::DefId;
use infer::TransNormalize;
use std::cell::RefCell;
// Remove any references to regions; this helps improve caching.
let trait_ref = self.erase_regions(&trait_ref);
- self.trans_trait_caches.trait_cache.memoize(trait_ref, || {
+ self.trans_trait_caches.trait_cache.memoize(self, trait_ref, || {
debug!("trans::fulfill_obligation(trait_ref={:?}, def_id={:?})",
trait_ref, trait_ref.def_id());
// Do the initial selection for the obligation. This yields the
// shallow result we are looking for -- that is, what specific impl.
- self.infer_ctxt(()).enter(|infcx| {
+ self.infer_ctxt().enter(|infcx| {
let mut selcx = SelectionContext::new(&infcx);
let param_env = ty::ParamEnv::empty(Reveal::All);
if !ty.has_projection_types() {
ty
} else {
- self.tcx.trans_trait_caches.project_cache.memoize(ty, || {
+ self.tcx.trans_trait_caches.project_cache.memoize(self.tcx, ty, || {
debug!("AssociatedTypeNormalizer: ty={:?}", ty);
self.tcx.normalize_associated_type(&ty)
})
impl<'tcx> DepTrackingMapConfig for TraitSelectionCache<'tcx> {
type Key = ty::PolyTraitRef<'tcx>;
type Value = Vtable<'tcx, ()>;
- fn to_dep_node(key: &ty::PolyTraitRef<'tcx>) -> DepNode<DefId> {
- key.to_poly_trait_predicate().dep_node()
+ fn to_dep_node(tcx: TyCtxt, key: &ty::PolyTraitRef<'tcx>) -> DepNode {
+ key.to_poly_trait_predicate().dep_node(tcx)
}
}
impl<'gcx> DepTrackingMapConfig for ProjectionCache<'gcx> {
type Key = Ty<'gcx>;
type Value = Ty<'gcx>;
- fn to_dep_node(key: &Self::Key) -> DepNode<DefId> {
+ fn to_dep_node(tcx: TyCtxt, key: &Self::Key) -> DepNode {
// Ideally, we'd just put `key` into the dep-node, but we
// can't put full types in there. So just collect up all the
// def-ids of structs/enums as well as any traits that we
})
.collect();
- DepNode::ProjectionCache { def_ids: def_ids }
+ DepNode::new(tcx, DepConstructor::ProjectionCache { def_ids: def_ids })
}
}
}
}
- pub fn upvar_capture(&self, upvar_id: ty::UpvarId) -> Option<ty::UpvarCapture<'tcx>> {
- Some(self.upvar_capture_map.get(&upvar_id).unwrap().clone())
+ pub fn upvar_capture(&self, upvar_id: ty::UpvarId) -> ty::UpvarCapture<'tcx> {
+ self.upvar_capture_map[&upvar_id]
}
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use dep_graph::DepNode;
+use dep_graph::DepConstructor;
use hir::def_id::DefId;
use ty::{self, Ty, TypeFoldable, Substs};
use util::ppaux;
tcx.get_attrs(self.def_id())
}
- pub(crate) fn dep_node(&self) -> DepNode<DefId> {
+ pub //(crate)
+ fn dep_node(&self) -> DepConstructor {
// HACK: def-id binning, project-style; someone replace this with
// real on-demand.
let ty = match self {
_ => None
}.into_iter();
- DepNode::MirShim(
+ DepConstructor::MirShim(
Some(self.def_id()).into_iter().chain(
ty.flat_map(|t| t.walk()).flat_map(|t| match t.sty {
ty::TyAdt(adt_def, _) => Some(adt_def.did),
pub fn try_push_visible_item_path<T>(self, buffer: &mut T, external_def_id: DefId) -> bool
where T: ItemPathBuffer
{
- let visible_parent_map = self.sess.cstore.visible_parent_map();
+ let visible_parent_map = self.sess.cstore.visible_parent_map(self.sess);
let (mut cur_def, mut cur_path) = (external_def_id, Vec::<ast::Name>::new());
loop {
data @ DefPathData::ClosureExpr |
data @ DefPathData::Binding(..) |
data @ DefPathData::ImplTrait |
- data @ DefPathData::Typeof => {
+ data @ DefPathData::Typeof |
+ data @ DefPathData::GlobalMetaData(..) => {
let parent_def_id = self.parent_def_id(def_id).unwrap();
self.push_item_path(buffer, parent_def_id);
buffer.push(&data.as_interned_str());
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use dep_graph::{DepNode, DepTrackingMapConfig};
+use dep_graph::{DepConstructor, DepNode, DepTrackingMapConfig};
use hir::def_id::{CrateNum, CRATE_DEF_INDEX, DefId, LOCAL_CRATE};
use hir::def::Def;
use hir;
type Value = $V;
#[allow(unused)]
- fn to_dep_node(key: &$K) -> DepNode<DefId> {
- use dep_graph::DepNode::*;
+ fn to_dep_node(tcx: TyCtxt, key: &$K) -> DepNode {
+ use dep_graph::DepConstructor::*;
- $node(*key)
+ DepNode::new(tcx, $node(*key))
}
}
impl<'a, $tcx, 'lcx> queries::$name<$tcx> {
span = key.default_span(tcx)
}
- let _task = tcx.dep_graph.in_task(Self::to_dep_node(&key));
+ let _task = tcx.dep_graph.in_task(Self::to_dep_node(tcx, &key));
let result = tcx.cycle_check(span, Query::$name(key), || {
let provider = tcx.maps.providers[key.map_crate()].$name;
// We register the `read` here, but not in `force`, since
// `force` does not give access to the value produced (and thus
// we actually don't read it).
- tcx.dep_graph.read(Self::to_dep_node(&key));
+ tcx.dep_graph.read(Self::to_dep_node(tcx, &key));
Self::try_get_with(tcx, span, key, Clone::clone)
}
/// To avoid cycles within the predicates of a single item we compute
/// per-type-parameter predicates for resolving `T::AssocTy`.
- [] type_param_predicates: TypeParamPredicates((DefId, DefId))
+ [] type_param_predicates: type_param_predicates((DefId, DefId))
-> ty::GenericPredicates<'tcx>,
[] trait_def: ItemSignature(DefId) -> &'tcx ty::TraitDef,
-> Result<&'tcx Layout, LayoutError<'tcx>>,
}
-fn coherent_trait_dep_node((_, def_id): (CrateNum, DefId)) -> DepNode<DefId> {
- DepNode::CoherenceCheckTrait(def_id)
+fn type_param_predicates((item_id, param_id): (DefId, DefId)) -> DepConstructor {
+ DepConstructor::TypeParamPredicates {
+ item_id,
+ param_id
+ }
+}
+
+fn coherent_trait_dep_node((_, def_id): (CrateNum, DefId)) -> DepConstructor {
+ DepConstructor::CoherenceCheckTrait(def_id)
}
-fn crate_inherent_impls_dep_node(_: CrateNum) -> DepNode<DefId> {
- DepNode::Coherence
+fn crate_inherent_impls_dep_node(_: CrateNum) -> DepConstructor {
+ DepConstructor::Coherence
}
-fn reachability_dep_node(_: CrateNum) -> DepNode<DefId> {
- DepNode::Reachability
+fn reachability_dep_node(_: CrateNum) -> DepConstructor {
+ DepConstructor::Reachability
}
-fn mir_shim_dep_node(instance: ty::InstanceDef) -> DepNode<DefId> {
+fn mir_shim_dep_node(instance: ty::InstanceDef) -> DepConstructor {
instance.dep_node()
}
-fn symbol_name_dep_node(instance: ty::Instance) -> DepNode<DefId> {
+fn symbol_name_dep_node(instance: ty::Instance) -> DepConstructor {
// symbol_name uses the substs only to traverse them to find the
// hash, and that does not create any new dep-nodes.
- DepNode::SymbolName(instance.def.def_id())
+ DepConstructor::SymbolName(instance.def.def_id())
}
-fn typeck_item_bodies_dep_node(_: CrateNum) -> DepNode<DefId> {
- DepNode::TypeckBodiesKrate
+fn typeck_item_bodies_dep_node(_: CrateNum) -> DepConstructor {
+ DepConstructor::TypeckBodiesKrate
}
-fn const_eval_dep_node((def_id, _): (DefId, &Substs)) -> DepNode<DefId> {
- DepNode::ConstEval(def_id)
+fn const_eval_dep_node((def_id, _): (DefId, &Substs)) -> DepConstructor {
+ DepConstructor::ConstEval(def_id)
}
-fn mir_keys(_: CrateNum) -> DepNode<DefId> {
- DepNode::MirKeys
+fn mir_keys(_: CrateNum) -> DepConstructor {
+ DepConstructor::MirKeys
}
-fn crate_variances(_: CrateNum) -> DepNode<DefId> {
- DepNode::CrateVariances
+fn crate_variances(_: CrateNum) -> DepConstructor {
+ DepConstructor::CrateVariances
}
-fn relevant_trait_impls_for((def_id, _): (DefId, SimplifiedType)) -> DepNode<DefId> {
- DepNode::TraitImpls(def_id)
+fn relevant_trait_impls_for((def_id, _): (DefId, SimplifiedType)) -> DepConstructor {
+ DepConstructor::TraitImpls(def_id)
}
-fn is_copy_dep_node<'tcx>(key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepNode<DefId> {
+fn is_copy_dep_node<'tcx>(key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor {
let def_id = ty::item_path::characteristic_def_id_of_type(key.value)
.unwrap_or(DefId::local(CRATE_DEF_INDEX));
- DepNode::IsCopy(def_id)
+ DepConstructor::IsCopy(def_id)
}
-fn is_sized_dep_node<'tcx>(key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepNode<DefId> {
+fn is_sized_dep_node<'tcx>(key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor {
let def_id = ty::item_path::characteristic_def_id_of_type(key.value)
.unwrap_or(DefId::local(CRATE_DEF_INDEX));
- DepNode::IsSized(def_id)
+ DepConstructor::IsSized(def_id)
}
-fn is_freeze_dep_node<'tcx>(key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepNode<DefId> {
+fn is_freeze_dep_node<'tcx>(key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor {
let def_id = ty::item_path::characteristic_def_id_of_type(key.value)
.unwrap_or(DefId::local(CRATE_DEF_INDEX));
- DepNode::IsFreeze(def_id)
+ DepConstructor::IsFreeze(def_id)
}
-fn needs_drop_dep_node<'tcx>(key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepNode<DefId> {
+fn needs_drop_dep_node<'tcx>(key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor {
let def_id = ty::item_path::characteristic_def_id_of_type(key.value)
.unwrap_or(DefId::local(CRATE_DEF_INDEX));
- DepNode::NeedsDrop(def_id)
+ DepConstructor::NeedsDrop(def_id)
}
-fn layout_dep_node<'tcx>(key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepNode<DefId> {
+fn layout_dep_node<'tcx>(key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor {
let def_id = ty::item_path::characteristic_def_id_of_type(key.value)
.unwrap_or(DefId::local(CRATE_DEF_INDEX));
- DepNode::Layout(def_id)
+ DepConstructor::Layout(def_id)
}
pub use self::LvaluePreference::*;
pub use self::fold::TypeFoldable;
-use dep_graph::DepNode;
+use dep_graph::{DepNode, DepConstructor};
use hir::{map as hir_map, FreevarMap, TraitMap};
use hir::def::{Def, CtorKind, ExportMap};
use hir::def_id::{CrateNum, DefId, DefIndex, CRATE_DEF_INDEX, LOCAL_CRATE};
}
}
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for ty::TyS<'tcx> {
+impl<'tcx> TyS<'tcx> {
+ pub fn is_primitive_ty(&self) -> bool {
+ match self.sty {
+ TypeVariants::TyBool |
+ TypeVariants::TyChar |
+ TypeVariants::TyInt(_) |
+ TypeVariants::TyUint(_) |
+ TypeVariants::TyFloat(_) |
+ TypeVariants::TyInfer(InferTy::IntVar(_)) |
+ TypeVariants::TyInfer(InferTy::FloatVar(_)) |
+ TypeVariants::TyInfer(InferTy::FreshIntTy(_)) |
+ TypeVariants::TyInfer(InferTy::FreshFloatTy(_)) => true,
+ TypeVariants::TyRef(_, x) => x.ty.is_primitive_ty(),
+ _ => false,
+ }
+ }
+}
+
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> for ty::TyS<'tcx> {
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
let ty::TyS {
ref sty,
}
/// Creates the dep-node for selecting/evaluating this trait reference.
- fn dep_node(&self) -> DepNode<DefId> {
+ fn dep_node(&self, tcx: TyCtxt) -> DepNode {
// Extact the trait-def and first def-id from inputs. See the
// docs for `DepNode::TraitSelect` for more information.
let trait_def_id = self.def_id();
self.input_types()
.flat_map(|t| t.walk())
.filter_map(|t| match t.sty {
- ty::TyAdt(adt_def, _) => Some(adt_def.did),
+ ty::TyAdt(adt_def, ..) => Some(adt_def.did),
+ ty::TyClosure(def_id, ..) => Some(def_id),
+ ty::TyFnDef(def_id, ..) => Some(def_id),
_ => None
})
.next()
.unwrap_or(trait_def_id);
- DepNode::TraitSelect {
+ DepNode::new(tcx, DepConstructor::TraitSelect {
trait_def_id: trait_def_id,
input_def_id: input_def_id
- }
+ })
}
pub fn input_types<'a>(&'a self) -> impl DoubleEndedIterator<Item=Ty<'tcx>> + 'a {
self.0.def_id()
}
- pub fn dep_node(&self) -> DepNode<DefId> {
+ pub fn dep_node(&self, tcx: TyCtxt) -> DepNode {
// ok to skip binder since depnode does not care about regions
- self.0.dep_node()
+ self.0.dep_node(tcx)
}
}
impl<'tcx> serialize::UseSpecializedDecodable for &'tcx AdtDef {}
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for AdtDef {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> for AdtDef {
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
let ty::AdtDef {
did,
self_type: Ty<'tcx>, span: Span)
-> Result<(), CopyImplementationError<'tcx>> {
// FIXME: (@jroesch) float this code up
- tcx.infer_ctxt(()).enter(|infcx| {
+ tcx.infer_ctxt().enter(|infcx| {
let (adt, substs) = match self_type.sty {
ty::TyAdt(adt, substs) => (adt, substs),
_ => return Err(CopyImplementationError::NotAnAdt),
{
let (param_env, ty) = query.into_parts();
let trait_def_id = tcx.require_lang_item(lang_items::CopyTraitLangItem);
- tcx.infer_ctxt(())
+ tcx.infer_ctxt()
.enter(|infcx| traits::type_known_to_meet_bound(&infcx,
param_env,
ty,
{
let (param_env, ty) = query.into_parts();
let trait_def_id = tcx.require_lang_item(lang_items::SizedTraitLangItem);
- tcx.infer_ctxt(())
+ tcx.infer_ctxt()
.enter(|infcx| traits::type_known_to_meet_bound(&infcx,
param_env,
ty,
{
let (param_env, ty) = query.into_parts();
let trait_def_id = tcx.require_lang_item(lang_items::FreezeTraitLangItem);
- tcx.infer_ctxt(())
+ tcx.infer_ctxt()
.enter(|infcx| traits::type_known_to_meet_bound(&infcx,
param_env,
ty,
use std::path::Path;
use std::time::{Duration, Instant};
+use ty::TyCtxt;
+
// The name of the associated type for `Fn` return types
pub const FN_OUTPUT_NAME: &'static str = "Output";
/// needed in the `op` to ensure that the correct edges are
/// added into the dep graph. See the `DepTrackingMap` impl for
/// more details!
- fn memoize<OP>(&self, key: Self::Key, op: OP) -> Self::Value
+ fn memoize<OP>(&self, tcx: TyCtxt, key: Self::Key, op: OP) -> Self::Value
where OP: FnOnce() -> Self::Value;
}
type Key = K;
type Value = V;
- fn memoize<OP>(&self, key: K, op: OP) -> V
+ fn memoize<OP>(&self, _tcx: TyCtxt, key: K, op: OP) -> V
where OP: FnOnce() -> V
{
let result = self.borrow().get(&key).cloned();
debug!("check_loans(body id={})", body.value.id);
let def_id = bccx.tcx.hir.body_owner_def_id(body.id());
- let infcx = bccx.tcx.borrowck_fake_infer_ctxt(body.id());
let param_env = bccx.tcx.param_env(def_id);
let mut clcx = CheckLoanCtxt {
bccx: bccx,
all_loans: all_loans,
param_env,
};
- euv::ExprUseVisitor::new(&mut clcx, &bccx.region_maps, &infcx, param_env).consume_body(body);
+ euv::ExprUseVisitor::new(&mut clcx, bccx.tcx, param_env, &bccx.region_maps, bccx.tables)
+ .consume_body(body);
}
#[derive(PartialEq)]
use borrowck::*;
use borrowck::move_data::MoveData;
-use rustc::infer::InferCtxt;
use rustc::middle::expr_use_visitor as euv;
use rustc::middle::mem_categorization as mc;
use rustc::middle::mem_categorization::Categorization;
body: hir::BodyId)
-> (Vec<Loan<'tcx>>, move_data::MoveData<'tcx>) {
let def_id = bccx.tcx.hir.body_owner_def_id(body);
- let infcx = bccx.tcx.borrowck_fake_infer_ctxt(body);
let param_env = bccx.tcx.param_env(def_id);
let mut glcx = GatherLoanCtxt {
bccx: bccx,
- infcx: &infcx,
all_loans: Vec::new(),
item_ub: region::CodeExtent::Misc(body.node_id),
move_data: MoveData::new(),
};
let body = glcx.bccx.tcx.hir.body(body);
- euv::ExprUseVisitor::new(&mut glcx, &bccx.region_maps, &infcx, param_env).consume_body(body);
+ euv::ExprUseVisitor::new(&mut glcx, bccx.tcx, param_env, &bccx.region_maps, bccx.tables)
+ .consume_body(body);
glcx.report_potential_errors();
let GatherLoanCtxt { all_loans, move_data, .. } = glcx;
struct GatherLoanCtxt<'a, 'tcx: 'a> {
bccx: &'a BorrowckCtxt<'a, 'tcx>,
- infcx: &'a InferCtxt<'a, 'tcx, 'tcx>,
move_data: move_data::MoveData<'tcx>,
move_error_collector: move_error::MoveErrorCollector<'tcx>,
all_loans: Vec<Loan<'tcx>>,
}
fn decl_without_init(&mut self, id: ast::NodeId, _span: Span) {
- let ty = self.infcx.tables.borrow().node_id_to_type(id);
+ let ty = self.bccx.tables.node_id_to_type(id);
gather_moves::gather_decl(self.bccx, &self.move_data, id, ty);
}
}
///
/// FIXME: this should be done by borrowck.
fn check_for_mutation_in_guard(cx: &MatchVisitor, guard: &hir::Expr) {
- cx.tcx.infer_ctxt(cx.tables).enter(|infcx| {
- let mut checker = MutationChecker {
- cx: cx,
- };
- ExprUseVisitor::new(&mut checker, cx.region_maps, &infcx, cx.param_env).walk_expr(guard);
- });
+ let mut checker = MutationChecker {
+ cx: cx,
+ };
+ ExprUseVisitor::new(&mut checker, cx.tcx, cx.param_env, cx.region_maps, cx.tables)
+ .walk_expr(guard);
}
-struct MutationChecker<'a, 'gcx: 'a> {
- cx: &'a MatchVisitor<'a, 'gcx>,
+struct MutationChecker<'a, 'tcx: 'a> {
+ cx: &'a MatchVisitor<'a, 'tcx>,
}
-impl<'a, 'gcx, 'tcx> Delegate<'tcx> for MutationChecker<'a, 'gcx> {
+impl<'a, 'tcx> Delegate<'tcx> for MutationChecker<'a, 'tcx> {
fn matched_pat(&mut self, _: &Pat, _: cmt, _: euv::MatchMode) {}
fn consume(&mut self, _: ast::NodeId, _: Span, _: cmt, _: ConsumeMode) {}
fn consume_pat(&mut self, _: &Pat, _: cmt, _: ConsumeMode) {}
debug!("resolve_trait_associated_const: trait_ref={:?}",
trait_ref);
- tcx.infer_ctxt(()).enter(|infcx| {
+ tcx.infer_ctxt().enter(|infcx| {
let param_env = ty::ParamEnv::empty(Reveal::UserFacing);
let mut selcx = traits::SelectionContext::new(&infcx);
let obligation = traits::Obligation::new(traits::ObligationCause::dummy(),
}
}
+impl<T1: HashStable<CTX>, CTX> HashStable<CTX> for (T1,) {
+ fn hash_stable<W: StableHasherResult>(&self,
+ ctx: &mut CTX,
+ hasher: &mut StableHasher<W>) {
+ self.0.hash_stable(ctx, hasher);
+ }
+}
+
impl<T1: HashStable<CTX>, T2: HashStable<CTX>, CTX> HashStable<CTX> for (T1, T2) {
fn hash_stable<W: StableHasherResult>(&self,
ctx: &mut CTX,
}
}
+impl<T: HashStable<CTX>, CTX> HashStable<CTX> for ::std::rc::Rc<T> {
+ #[inline]
+ fn hash_stable<W: StableHasherResult>(&self,
+ ctx: &mut CTX,
+ hasher: &mut StableHasher<W>) {
+ (**self).hash_stable(ctx, hasher);
+ }
+}
+
+impl<T: HashStable<CTX>, CTX> HashStable<CTX> for ::std::sync::Arc<T> {
+ #[inline]
+ fn hash_stable<W: StableHasherResult>(&self,
+ ctx: &mut CTX,
+ hasher: &mut StableHasher<W>) {
+ (**self).hash_stable(ctx, hasher);
+ }
+}
+
impl<CTX> HashStable<CTX> for str {
#[inline]
fn hash_stable<W: StableHasherResult>(&self,
index,
"test_crate",
|tcx| {
- tcx.infer_ctxt(()).enter(|infcx| {
+ tcx.infer_ctxt().enter(|infcx| {
let mut region_maps = RegionMaps::new();
body(Env {
infcx: &infcx,
//! ```
use graphviz as dot;
-use rustc::dep_graph::{DepGraphQuery, DepNode};
+use rustc::dep_graph::{DepGraphQuery, DepNode, DepKind};
use rustc::dep_graph::debug::{DepNodeFilter, EdgeFilter};
use rustc::hir::def_id::DefId;
use rustc::ty::TyCtxt;
check_paths(tcx, &if_this_changed, &then_this_would_need);
}
-type Sources = Vec<(Span, DefId, DepNode<DefId>)>;
-type Targets = Vec<(Span, ast::Name, ast::NodeId, DepNode<DefId>)>;
+type Sources = Vec<(Span, DefId, DepNode)>;
+type Targets = Vec<(Span, ast::Name, ast::NodeId, DepNode)>;
struct IfThisChanged<'a, 'tcx:'a> {
tcx: TyCtxt<'a, 'tcx, 'tcx>,
fn process_attrs(&mut self, node_id: ast::NodeId, attrs: &[ast::Attribute]) {
let def_id = self.tcx.hir.local_def_id(node_id);
+ let def_path_hash = self.tcx.def_path_hash(def_id);
for attr in attrs {
if attr.check_name(ATTR_IF_THIS_CHANGED) {
let dep_node_interned = self.argument(attr);
let dep_node = match dep_node_interned {
- None => DepNode::Hir(def_id),
+ None => def_path_hash.to_dep_node(DepKind::Hir),
Some(n) => {
- match DepNode::from_label_string(&n.as_str(), def_id) {
+ match DepNode::from_label_string(&n.as_str(), def_path_hash) {
Ok(n) => n,
Err(()) => {
self.tcx.sess.span_fatal(
let dep_node_interned = self.argument(attr);
let dep_node = match dep_node_interned {
Some(n) => {
- match DepNode::from_label_string(&n.as_str(), def_id) {
+ match DepNode::from_label_string(&n.as_str(), def_path_hash) {
Ok(n) => n,
Err(()) => {
self.tcx.sess.span_fatal(
}
}
-pub struct GraphvizDepGraph<'q>(FxHashSet<&'q DepNode<DefId>>,
- Vec<(&'q DepNode<DefId>, &'q DepNode<DefId>)>);
+pub struct GraphvizDepGraph<'q>(FxHashSet<&'q DepNode>,
+ Vec<(&'q DepNode, &'q DepNode)>);
impl<'a, 'tcx, 'q> dot::GraphWalk<'a> for GraphvizDepGraph<'q> {
- type Node = &'q DepNode<DefId>;
- type Edge = (&'q DepNode<DefId>, &'q DepNode<DefId>);
- fn nodes(&self) -> dot::Nodes<&'q DepNode<DefId>> {
+ type Node = &'q DepNode;
+ type Edge = (&'q DepNode, &'q DepNode);
+ fn nodes(&self) -> dot::Nodes<&'q DepNode> {
let nodes: Vec<_> = self.0.iter().cloned().collect();
nodes.into_cow()
}
- fn edges(&self) -> dot::Edges<(&'q DepNode<DefId>, &'q DepNode<DefId>)> {
+ fn edges(&self) -> dot::Edges<(&'q DepNode, &'q DepNode)> {
self.1[..].into_cow()
}
- fn source(&self, edge: &(&'q DepNode<DefId>, &'q DepNode<DefId>)) -> &'q DepNode<DefId> {
+ fn source(&self, edge: &(&'q DepNode, &'q DepNode)) -> &'q DepNode {
edge.0
}
- fn target(&self, edge: &(&'q DepNode<DefId>, &'q DepNode<DefId>)) -> &'q DepNode<DefId> {
+ fn target(&self, edge: &(&'q DepNode, &'q DepNode)) -> &'q DepNode {
edge.1
}
}
impl<'a, 'tcx, 'q> dot::Labeller<'a> for GraphvizDepGraph<'q> {
- type Node = &'q DepNode<DefId>;
- type Edge = (&'q DepNode<DefId>, &'q DepNode<DefId>);
+ type Node = &'q DepNode;
+ type Edge = (&'q DepNode, &'q DepNode);
fn graph_id(&self) -> dot::Id {
dot::Id::new("DependencyGraph").unwrap()
}
- fn node_id(&self, n: &&'q DepNode<DefId>) -> dot::Id {
+ fn node_id(&self, n: &&'q DepNode) -> dot::Id {
let s: String =
format!("{:?}", n).chars()
.map(|c| if c == '_' || c.is_alphanumeric() { c } else { '_' })
debug!("n={:?} s={:?}", n, s);
dot::Id::new(s).unwrap()
}
- fn node_label(&self, n: &&'q DepNode<DefId>) -> dot::LabelText {
+ fn node_label(&self, n: &&'q DepNode) -> dot::LabelText {
dot::LabelText::label(format!("{:?}", n))
}
}
// Given an optional filter like `"x,y,z"`, returns either `None` (no
// filter) or the set of nodes whose labels contain all of those
// substrings.
-fn node_set<'q>(query: &'q DepGraphQuery<DefId>, filter: &DepNodeFilter)
- -> Option<FxHashSet<&'q DepNode<DefId>>>
+fn node_set<'q>(query: &'q DepGraphQuery, filter: &DepNodeFilter)
+ -> Option<FxHashSet<&'q DepNode>>
{
debug!("node_set(filter={:?})", filter);
Some(query.nodes().into_iter().filter(|n| filter.test(n)).collect())
}
-fn filter_nodes<'q>(query: &'q DepGraphQuery<DefId>,
- sources: &Option<FxHashSet<&'q DepNode<DefId>>>,
- targets: &Option<FxHashSet<&'q DepNode<DefId>>>)
- -> FxHashSet<&'q DepNode<DefId>>
+fn filter_nodes<'q>(query: &'q DepGraphQuery,
+ sources: &Option<FxHashSet<&'q DepNode>>,
+ targets: &Option<FxHashSet<&'q DepNode>>)
+ -> FxHashSet<&'q DepNode>
{
if let &Some(ref sources) = sources {
if let &Some(ref targets) = targets {
}
}
-fn walk_nodes<'q>(query: &'q DepGraphQuery<DefId>,
- starts: &FxHashSet<&'q DepNode<DefId>>,
+fn walk_nodes<'q>(query: &'q DepGraphQuery,
+ starts: &FxHashSet<&'q DepNode>,
direction: Direction)
- -> FxHashSet<&'q DepNode<DefId>>
+ -> FxHashSet<&'q DepNode>
{
let mut set = FxHashSet();
for &start in starts {
set
}
-fn walk_between<'q>(query: &'q DepGraphQuery<DefId>,
- sources: &FxHashSet<&'q DepNode<DefId>>,
- targets: &FxHashSet<&'q DepNode<DefId>>)
- -> FxHashSet<&'q DepNode<DefId>>
+fn walk_between<'q>(query: &'q DepGraphQuery,
+ sources: &FxHashSet<&'q DepNode>,
+ targets: &FxHashSet<&'q DepNode>)
+ -> FxHashSet<&'q DepNode>
{
// This is a bit tricky. We want to include a node only if it is:
// (a) reachable from a source and (b) will reach a target. And we
})
.collect();
- fn recurse(query: &DepGraphQuery<DefId>,
+ fn recurse(query: &DepGraphQuery,
node_states: &mut [State],
node: NodeIndex)
-> bool
}
}
-fn filter_edges<'q>(query: &'q DepGraphQuery<DefId>,
- nodes: &FxHashSet<&'q DepNode<DefId>>)
- -> Vec<(&'q DepNode<DefId>, &'q DepNode<DefId>)>
+fn filter_edges<'q>(query: &'q DepGraphQuery,
+ nodes: &FxHashSet<&'q DepNode>)
+ -> Vec<(&'q DepNode, &'q DepNode)>
{
query.edges()
.into_iter()
use std::cell::RefCell;
use std::hash::Hash;
-use rustc::dep_graph::DepNode;
+use rustc::dep_graph::{DepNode, DepKind};
use rustc::hir;
use rustc::hir::def_id::{CRATE_DEF_INDEX, DefId};
use rustc::hir::map::DefPathHash;
pub type IchHasher = StableHasher<Fingerprint>;
pub struct IncrementalHashesMap {
- hashes: FxHashMap<DepNode<DefId>, Fingerprint>,
+ hashes: FxHashMap<DepNode, Fingerprint>,
// These are the metadata hashes for the current crate as they were stored
// during the last compilation session. They are only loaded if
}
}
- pub fn get(&self, k: &DepNode<DefId>) -> Option<&Fingerprint> {
+ pub fn get(&self, k: &DepNode) -> Option<&Fingerprint> {
self.hashes.get(k)
}
- pub fn insert(&mut self, k: DepNode<DefId>, v: Fingerprint) -> Option<Fingerprint> {
- self.hashes.insert(k, v)
+ pub fn insert(&mut self, k: DepNode, v: Fingerprint) {
+ assert!(self.hashes.insert(k, v).is_none());
}
pub fn iter<'a>(&'a self)
- -> ::std::collections::hash_map::Iter<'a, DepNode<DefId>, Fingerprint> {
+ -> ::std::collections::hash_map::Iter<'a, DepNode, Fingerprint> {
self.hashes.iter()
}
}
}
-impl<'a> ::std::ops::Index<&'a DepNode<DefId>> for IncrementalHashesMap {
+impl<'a> ::std::ops::Index<&'a DepNode> for IncrementalHashesMap {
type Output = Fingerprint;
- fn index(&self, index: &'a DepNode<DefId>) -> &Fingerprint {
+ fn index(&self, index: &'a DepNode) -> &Fingerprint {
match self.hashes.get(index) {
Some(fingerprint) => fingerprint,
None => {
}
struct ComputeItemHashesVisitor<'a, 'tcx: 'a> {
- hcx: StableHashingContext<'a, 'tcx>,
+ hcx: StableHashingContext<'a, 'tcx, 'tcx>,
hashes: IncrementalHashesMap,
}
impl<'a, 'tcx: 'a> ComputeItemHashesVisitor<'a, 'tcx> {
fn compute_and_store_ich_for_item_like<T>(&mut self,
- dep_node: DepNode<DefId>,
+ dep_node: DepNode,
hash_bodies: bool,
item_like: T)
- where T: HashStable<StableHashingContext<'a, 'tcx>>
+ where T: HashStable<StableHashingContext<'a, 'tcx, 'tcx>>
{
if !hash_bodies && !self.hcx.tcx().sess.opts.build_dep_graph() {
// If we just need the hashes in order to compute the SVH, we don't
// add each item (in some deterministic order) to the overall
// crate hash.
{
- let hcx = &mut self.hcx;
let mut item_hashes: Vec<_> =
self.hashes.iter()
- .filter_map(|(item_dep_node, &item_hash)| {
+ .filter_map(|(&item_dep_node, &item_hash)| {
// This `match` determines what kinds of nodes
// go into the SVH:
- match *item_dep_node {
- DepNode::Hir(_) |
- DepNode::HirBody(_) => {
+ match item_dep_node.kind {
+ DepKind::Hir |
+ DepKind::HirBody => {
// We want to incoporate these into the
// SVH.
}
- DepNode::AllLocalTraitImpls => {
+ DepKind::AllLocalTraitImpls => {
// These are already covered by hashing
// the HIR.
return None
}
ref other => {
- bug!("Found unexpected DepNode during \
+ bug!("Found unexpected DepKind during \
SVH computation: {:?}",
other)
}
}
- // Convert from a DepNode<DefId> to a
- // DepNode<u64> where the u64 is the hash of
- // the def-id's def-path:
- let item_dep_node =
- item_dep_node.map_def(|&did| Some(hcx.def_path_hash(did)))
- .unwrap();
Some((item_dep_node, item_hash))
})
.collect();
krate.attrs.hash_stable(&mut self.hcx, &mut crate_state);
let crate_hash = crate_state.finish();
- self.hashes.insert(DepNode::Krate, crate_hash);
+ self.hashes.insert(DepNode::new_no_params(DepKind::Krate), crate_hash);
debug!("calculate_crate_hash: crate_hash={:?}", crate_hash);
}
body_ids: _,
} = *krate;
- let def_id = DefId::local(CRATE_DEF_INDEX);
- self.compute_and_store_ich_for_item_like(DepNode::Hir(def_id),
+ let def_path_hash = self.hcx.tcx().hir.definitions().def_path_hash(CRATE_DEF_INDEX);
+ self.compute_and_store_ich_for_item_like(def_path_hash.to_dep_node(DepKind::Hir),
false,
(module, (span, attrs)));
- self.compute_and_store_ich_for_item_like(DepNode::HirBody(def_id),
+ self.compute_and_store_ich_for_item_like(def_path_hash.to_dep_node(DepKind::HirBody),
true,
(module, (span, attrs)));
}
let mut hasher = StableHasher::new();
impls.hash_stable(&mut self.hcx, &mut hasher);
- self.hashes.insert(DepNode::AllLocalTraitImpls, hasher.finish());
+ self.hashes.insert(DepNode::new_no_params(DepKind::AllLocalTraitImpls),
+ hasher.finish());
}
}
impl<'a, 'tcx: 'a> ItemLikeVisitor<'tcx> for ComputeItemHashesVisitor<'a, 'tcx> {
fn visit_item(&mut self, item: &'tcx hir::Item) {
let def_id = self.hcx.tcx().hir.local_def_id(item.id);
- self.compute_and_store_ich_for_item_like(DepNode::Hir(def_id), false, item);
- self.compute_and_store_ich_for_item_like(DepNode::HirBody(def_id), true, item);
+ let def_path_hash = self.hcx.tcx().def_path_hash(def_id);
+ self.compute_and_store_ich_for_item_like(def_path_hash.to_dep_node(DepKind::Hir),
+ false,
+ item);
+ self.compute_and_store_ich_for_item_like(def_path_hash.to_dep_node(DepKind::HirBody),
+ true,
+ item);
}
fn visit_trait_item(&mut self, item: &'tcx hir::TraitItem) {
let def_id = self.hcx.tcx().hir.local_def_id(item.id);
- self.compute_and_store_ich_for_item_like(DepNode::Hir(def_id), false, item);
- self.compute_and_store_ich_for_item_like(DepNode::HirBody(def_id), true, item);
+ let def_path_hash = self.hcx.tcx().def_path_hash(def_id);
+ self.compute_and_store_ich_for_item_like(def_path_hash.to_dep_node(DepKind::Hir),
+ false,
+ item);
+ self.compute_and_store_ich_for_item_like(def_path_hash.to_dep_node(DepKind::HirBody),
+ true,
+ item);
}
fn visit_impl_item(&mut self, item: &'tcx hir::ImplItem) {
let def_id = self.hcx.tcx().hir.local_def_id(item.id);
- self.compute_and_store_ich_for_item_like(DepNode::Hir(def_id), false, item);
- self.compute_and_store_ich_for_item_like(DepNode::HirBody(def_id), true, item);
+ let def_path_hash = self.hcx.tcx().def_path_hash(def_id);
+ self.compute_and_store_ich_for_item_like(def_path_hash.to_dep_node(DepKind::Hir),
+ false,
+ item);
+ self.compute_and_store_ich_for_item_like(def_path_hash.to_dep_node(DepKind::HirBody),
+ true,
+ item);
}
}
for macro_def in krate.exported_macros.iter() {
let def_id = tcx.hir.local_def_id(macro_def.id);
- visitor.compute_and_store_ich_for_item_like(DepNode::Hir(def_id), false, macro_def);
- visitor.compute_and_store_ich_for_item_like(DepNode::HirBody(def_id), true, macro_def);
+ let def_path_hash = tcx.def_path_hash(def_id);
+ visitor.compute_and_store_ich_for_item_like(def_path_hash.to_dep_node(DepKind::Hir),
+ false,
+ macro_def);
+ visitor.compute_and_store_ich_for_item_like(def_path_hash.to_dep_node(DepKind::HirBody),
+ true,
+ macro_def);
}
visitor.compute_and_store_ich_for_trait_impls(krate);
use rustc::hir::map::DefPathHash;
use rustc::ich::Fingerprint;
use rustc::middle::cstore::EncodedMetadataHash;
-use std::sync::Arc;
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::indexed_vec::{IndexVec, Idx};
#[derive(Debug, RustcEncodable, RustcDecodable)]
pub struct SerializedDepGraph {
/// The set of all DepNodes in the graph
- pub nodes: IndexVec<DepNodeIndex, DepNode<DefPathHash>>,
+ pub nodes: IndexVec<DepNodeIndex, DepNode>,
/// For each DepNode, stores the list of edges originating from that
/// DepNode. Encoded as a [start, end) pair indexing into edge_list_data,
/// which holds the actual DepNodeIndices of the target nodes.
- pub edge_list_indices: Vec<(u32, u32)>,
+ pub edge_list_indices: IndexVec<DepNodeIndex, (u32, u32)>,
/// A flattened list of all edge targets in the graph. Edge sources are
/// implicit in edge_list_indices.
pub edge_list_data: Vec<DepNodeIndex>,
/// These are output nodes that have no incoming edges. We track
/// these separately so that when we reload all edges, we don't
/// lose track of these nodes.
- pub bootstrap_outputs: Vec<DepNode<DefPathHash>>,
+ pub bootstrap_outputs: Vec<DepNode>,
/// These are hashes of two things:
/// - the HIR nodes in this crate
/// will be different when we next compile) related to each node,
/// but rather the `DefPathIndex`. This can then be retraced
/// to find the current def-id.
- pub hashes: Vec<SerializedHash>,
+ pub hashes: Vec<(DepNodeIndex, Fingerprint)>,
+}
+
+impl SerializedDepGraph {
+ pub fn edge_targets_from(&self, source: DepNodeIndex) -> &[DepNodeIndex] {
+ let targets = self.edge_list_indices[source];
+ &self.edge_list_data[targets.0 as usize .. targets.1 as usize]
+ }
}
/// The index of a DepNode in the SerializedDepGraph::nodes array.
}
}
-#[derive(Debug, RustcEncodable, RustcDecodable)]
-pub struct SerializedHash {
- /// def-id of thing being hashed
- pub dep_node: DepNode<DefPathHash>,
-
- /// the hash as of previous compilation, computed by code in
- /// `hash` module
- pub hash: Fingerprint,
-}
-
#[derive(Debug, RustcEncodable, RustcDecodable)]
pub struct SerializedWorkProduct {
/// node that produced the work-product
- pub id: Arc<WorkProductId>,
+ pub id: WorkProductId,
/// work-product data itself
pub work_product: WorkProduct,
/// (matching the one found in this structure).
pub entry_hashes: Vec<EncodedMetadataHash>,
- /// This map contains fingerprints that are not specific to some DefId but
- /// describe something global to the whole crate.
- pub global_hashes: Vec<(DepNode<()>, Fingerprint)>,
-
/// For each DefIndex (as it occurs in SerializedMetadataHash), this
/// map stores the DefPathIndex (as it occurs in DefIdDirectory), so
/// that we can find the new DefId for a SerializedMetadataHash in a
//! previous revision to compare things to.
//!
+use super::data::DepNodeIndex;
use super::load::DirtyNodes;
-use rustc::dep_graph::{DepGraphQuery, DepNode};
+use rustc::dep_graph::{DepGraphQuery, DepNode, DepKind};
use rustc::hir;
use rustc::hir::def_id::DefId;
use rustc::hir::itemlikevisit::ItemLikeVisitor;
ATTR_CLEAN_METADATA};
use syntax::ast::{self, Attribute, NestedMetaItem};
use rustc_data_structures::fx::{FxHashSet, FxHashMap};
+use rustc_data_structures::indexed_vec::IndexVec;
use syntax_pos::Span;
use rustc::ty::TyCtxt;
const CFG: &'static str = "cfg";
pub fn check_dirty_clean_annotations<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ nodes: &IndexVec<DepNodeIndex, DepNode>,
dirty_inputs: &DirtyNodes) {
// can't add `#[rustc_dirty]` etc without opting in to this feature
if !tcx.sess.features.borrow().rustc_attrs {
}
let _ignore = tcx.dep_graph.in_ignore();
- let def_path_hash_to_def_id = tcx.def_path_hash_to_def_id.as_ref().unwrap();
- let dirty_inputs: FxHashSet<DepNode<DefId>> =
+ let dirty_inputs: FxHashSet<DepNode> =
dirty_inputs.keys()
- .filter_map(|dep_node| {
- dep_node.map_def(|def_path_hash| {
- def_path_hash_to_def_id.get(def_path_hash).cloned()
- })
+ .filter_map(|dep_node_index| {
+ let dep_node = nodes[*dep_node_index];
+ if dep_node.extract_def_id(tcx).is_some() {
+ Some(dep_node)
+ } else {
+ None
+ }
})
.collect();
pub struct DirtyCleanVisitor<'a, 'tcx:'a> {
tcx: TyCtxt<'a, 'tcx, 'tcx>,
- query: &'a DepGraphQuery<DefId>,
- dirty_inputs: FxHashSet<DepNode<DefId>>,
+ query: &'a DepGraphQuery,
+ dirty_inputs: FxHashSet<DepNode>,
checked_attrs: FxHashSet<ast::AttrId>,
}
impl<'a, 'tcx> DirtyCleanVisitor<'a, 'tcx> {
- fn dep_node(&self, attr: &Attribute, def_id: DefId) -> DepNode<DefId> {
+ fn dep_node(&self, attr: &Attribute, def_id: DefId) -> DepNode {
+ let def_path_hash = self.tcx.def_path_hash(def_id);
for item in attr.meta_item_list().unwrap_or_else(Vec::new) {
if item.check_name(LABEL) {
let value = expect_associated_value(self.tcx, &item);
- match DepNode::from_label_string(&value.as_str(), def_id) {
- Ok(def_id) => return def_id,
+ match DepNode::from_label_string(&value.as_str(), def_path_hash) {
+ Ok(dep_node) => return dep_node,
Err(()) => {
self.tcx.sess.span_fatal(
item.span,
self.tcx.sess.span_fatal(attr.span, "no `label` found");
}
- fn dep_node_str(&self, dep_node: &DepNode<DefId>) -> DepNode<String> {
- dep_node.map_def(|&def_id| Some(self.tcx.item_path_str(def_id))).unwrap()
+ fn dep_node_str(&self, dep_node: &DepNode) -> String {
+ if let Some(def_id) = dep_node.extract_def_id(self.tcx) {
+ format!("{:?}({})",
+ dep_node.kind,
+ self.tcx.item_path_str(def_id))
+ } else {
+ format!("{:?}({:?})", dep_node.kind, dep_node.hash)
+ }
}
- fn assert_dirty(&self, item_span: Span, dep_node: DepNode<DefId>) {
+ fn assert_dirty(&self, item_span: Span, dep_node: DepNode) {
debug!("assert_dirty({:?})", dep_node);
- match dep_node {
- DepNode::Krate |
- DepNode::Hir(_) |
- DepNode::HirBody(_) => {
+ match dep_node.kind {
+ DepKind::Krate |
+ DepKind::Hir |
+ DepKind::HirBody => {
// HIR nodes are inputs, so if we are asserting that the HIR node is
// dirty, we check the dirty input set.
if !self.dirty_inputs.contains(&dep_node) {
let dep_node_str = self.dep_node_str(&dep_node);
self.tcx.sess.span_err(
item_span,
- &format!("`{:?}` not found in dirty set, but should be dirty",
+ &format!("`{}` not found in dirty set, but should be dirty",
dep_node_str));
}
}
let dep_node_str = self.dep_node_str(&dep_node);
self.tcx.sess.span_err(
item_span,
- &format!("`{:?}` found in dep graph, but should be dirty", dep_node_str));
+ &format!("`{}` found in dep graph, but should be dirty", dep_node_str));
}
}
}
}
- fn assert_clean(&self, item_span: Span, dep_node: DepNode<DefId>) {
+ fn assert_clean(&self, item_span: Span, dep_node: DepNode) {
debug!("assert_clean({:?})", dep_node);
- match dep_node {
- DepNode::Krate |
- DepNode::Hir(_) |
- DepNode::HirBody(_) => {
+ match dep_node.kind {
+ DepKind::Krate |
+ DepKind::Hir |
+ DepKind::HirBody => {
// For HIR nodes, check the inputs.
if self.dirty_inputs.contains(&dep_node) {
let dep_node_str = self.dep_node_str(&dep_node);
self.tcx.sess.span_err(
item_span,
- &format!("`{:?}` found in dirty-node set, but should be clean",
+ &format!("`{}` found in dirty-node set, but should be clean",
dep_node_str));
}
}
let dep_node_str = self.dep_node_str(&dep_node);
self.tcx.sess.span_err(
item_span,
- &format!("`{:?}` not found in dep graph, but should be clean",
+ &format!("`{}` not found in dep graph, but should be clean",
dep_node_str));
}
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use rustc::dep_graph::DepNode;
-use rustc::hir::def_id::{CrateNum, DefId, LOCAL_CRATE, CRATE_DEF_INDEX};
+use rustc::dep_graph::{DepNode, DepKind};
+use rustc::hir::def_id::{CrateNum, DefId, LOCAL_CRATE};
use rustc::hir::svh::Svh;
use rustc::ich::Fingerprint;
use rustc::ty::TyCtxt;
pub struct HashContext<'a, 'tcx: 'a> {
pub tcx: TyCtxt<'a, 'tcx, 'tcx>,
incremental_hashes_map: &'a IncrementalHashesMap,
- item_metadata_hashes: FxHashMap<DefId, Fingerprint>,
+ metadata_hashes: FxHashMap<DefId, Fingerprint>,
crate_hashes: FxHashMap<CrateNum, Svh>,
- global_metadata_hashes: FxHashMap<DepNode<DefId>, Fingerprint>,
}
impl<'a, 'tcx> HashContext<'a, 'tcx> {
HashContext {
tcx: tcx,
incremental_hashes_map: incremental_hashes_map,
- item_metadata_hashes: FxHashMap(),
+ metadata_hashes: FxHashMap(),
crate_hashes: FxHashMap(),
- global_metadata_hashes: FxHashMap(),
}
}
- pub fn is_hashable(dep_node: &DepNode<DefId>) -> bool {
- match *dep_node {
- DepNode::Krate |
- DepNode::Hir(_) |
- DepNode::HirBody(_) =>
+ pub fn is_hashable(tcx: TyCtxt, dep_node: &DepNode) -> bool {
+ match dep_node.kind {
+ DepKind::Krate |
+ DepKind::Hir |
+ DepKind::HirBody =>
true,
- DepNode::MetaData(def_id) |
- DepNode::GlobalMetaData(def_id, _) => !def_id.is_local(),
+ DepKind::MetaData => {
+ let def_id = dep_node.extract_def_id(tcx).unwrap();
+ !def_id.is_local()
+ }
_ => false,
}
}
- pub fn hash(&mut self, dep_node: &DepNode<DefId>) -> Option<Fingerprint> {
- match *dep_node {
- DepNode::Krate => {
+ pub fn hash(&mut self, dep_node: &DepNode) -> Option<Fingerprint> {
+ match dep_node.kind {
+ DepKind::Krate => {
Some(self.incremental_hashes_map[dep_node])
}
// HIR nodes (which always come from our crate) are an input:
- DepNode::Hir(def_id) |
- DepNode::HirBody(def_id) => {
- assert!(def_id.is_local(),
- "cannot hash HIR for non-local def-id {:?} => {:?}",
- def_id,
- self.tcx.item_path_str(def_id));
-
+ DepKind::Hir |
+ DepKind::HirBody => {
Some(self.incremental_hashes_map[dep_node])
}
// MetaData nodes from *our* crates are an *output*; we
// don't hash them, but we do compute a hash for them and
// save it for others to use.
- DepNode::MetaData(def_id) if !def_id.is_local() => {
- Some(self.metadata_hash(def_id,
- def_id.krate,
- |this| &mut this.item_metadata_hashes))
- }
-
- DepNode::GlobalMetaData(def_id, kind) => {
- Some(self.metadata_hash(DepNode::GlobalMetaData(def_id, kind),
+ DepKind::MetaData => {
+ let def_id = dep_node.extract_def_id(self.tcx).unwrap();
+ if !def_id.is_local() {
+ Some(self.metadata_hash(def_id,
def_id.krate,
- |this| &mut this.global_metadata_hashes))
+ |this| &mut this.metadata_hashes))
+ } else {
+ None
+ }
}
_ => {
let def_id = DefId { krate: cnum, index: serialized_hash.def_index };
// record the hash for this dep-node
- let old = self.item_metadata_hashes.insert(def_id, serialized_hash.hash);
+ let old = self.metadata_hashes.insert(def_id, serialized_hash.hash);
debug!("load_from_data: def_id={:?} hash={}", def_id, serialized_hash.hash);
assert!(old.is_none(), "already have hash for {:?}", def_id);
}
- for (dep_node, fingerprint) in serialized_hashes.global_hashes {
- // Here we need to remap the CrateNum in the DepNode.
- let def_id = DefId { krate: cnum, index: CRATE_DEF_INDEX };
- let dep_node = match dep_node {
- DepNode::GlobalMetaData(_, kind) => DepNode::GlobalMetaData(def_id, kind),
- other => {
- bug!("unexpected DepNode variant: {:?}", other)
- }
- };
-
- // record the hash for this dep-node
- debug!("load_from_data: def_node={:?} hash={}", dep_node, fingerprint);
- let old = self.global_metadata_hashes.insert(dep_node.clone(), fingerprint);
- assert!(old.is_none(), "already have hash for {:?}", dep_node);
- }
-
Ok(())
}
}
//! Code to save/load the dep-graph from files.
-use rustc::dep_graph::{DepNode, WorkProductId};
+use rustc::dep_graph::{DepNode, WorkProductId, DepKind};
use rustc::hir::def_id::DefId;
-use rustc::hir::map::DefPathHash;
use rustc::hir::svh::Svh;
use rustc::ich::Fingerprint;
use rustc::session::Session;
use rustc::ty::TyCtxt;
use rustc_data_structures::fx::{FxHashSet, FxHashMap};
+use rustc_data_structures::indexed_vec::IndexVec;
use rustc_serialize::Decodable as RustcDecodable;
use rustc_serialize::opaque::Decoder;
-use std::default::Default;
use std::path::{Path};
-use std::sync::Arc;
use IncrementalHashesMap;
use super::data::*;
// The key is a dirty node. The value is **some** base-input that we
// can blame it on.
-pub type DirtyNodes = FxHashMap<DepNode<DefPathHash>, DepNode<DefPathHash>>;
+pub type DirtyNodes = FxHashMap<DepNodeIndex, DepNodeIndex>;
/// If we are in incremental mode, and a previous dep-graph exists,
/// then load up those nodes/edges that are still valid into the
None
}
-/// Try to convert a DepNode from the old dep-graph into a DepNode in the
-/// current graph by mapping the DefPathHash to a valid DefId. This will fail
-/// if the DefPathHash refers to something that has been removed (because
-/// there is no DefId for that thing anymore).
-fn retrace(tcx: TyCtxt, dep_node: &DepNode<DefPathHash>) -> Option<DepNode<DefId>> {
- dep_node.map_def(|def_path_hash| {
- tcx.def_path_hash_to_def_id.as_ref().unwrap().get(def_path_hash).cloned()
- })
+/// Check if a DepNode from the previous dep-graph refers to something that
+/// still exists in the current compilation session. Only works for DepNode
+/// variants that represent inputs (HIR and imported Metadata).
+fn does_still_exist(tcx: TyCtxt, dep_node: &DepNode) -> bool {
+ match dep_node.kind {
+ DepKind::Hir |
+ DepKind::HirBody |
+ DepKind::MetaData => {
+ dep_node.extract_def_id(tcx).is_some()
+ }
+ _ => {
+ bug!("unexpected Input DepNode: {:?}", dep_node)
+ }
+ }
}
/// Decode the dep graph and load the edges/nodes that are still clean
let serialized_dep_graph = SerializedDepGraph::decode(&mut dep_graph_decoder)?;
- let edge_map: FxHashMap<DepNode<DefPathHash>, Vec<DepNode<DefPathHash>>> = {
- let capacity = serialized_dep_graph.edge_list_data.len();
- let mut edge_map = FxHashMap::with_capacity_and_hasher(capacity, Default::default());
-
- for (node_index, source) in serialized_dep_graph.nodes.iter().enumerate() {
- let (start, end) = serialized_dep_graph.edge_list_indices[node_index];
- let targets =
- (&serialized_dep_graph.edge_list_data[start as usize .. end as usize])
- .into_iter()
- .map(|&node_index| serialized_dep_graph.nodes[node_index].clone())
- .collect();
-
- edge_map.insert(source.clone(), targets);
- }
-
- edge_map
- };
-
// Compute the set of nodes from the old graph where some input
- // has changed or been removed. These are "raw" source nodes,
- // which means that they still use the original `DefPathIndex`
- // values from the encoding, rather than having been retraced to a
- // `DefId`. The reason for this is that this way we can include
- // nodes that have been removed (which no longer have a `DefId` in
- // the current compilation).
+ // has changed or been removed.
let dirty_raw_nodes = initial_dirty_nodes(tcx,
incremental_hashes_map,
+ &serialized_dep_graph.nodes,
&serialized_dep_graph.hashes);
- let dirty_raw_nodes = transitive_dirty_nodes(&edge_map, dirty_raw_nodes);
+ let dirty_raw_nodes = transitive_dirty_nodes(&serialized_dep_graph,
+ dirty_raw_nodes);
// Recreate the edges in the graph that are still clean.
let mut clean_work_products = FxHashSet();
let mut dirty_work_products = FxHashSet(); // incomplete; just used to suppress debug output
- let mut extra_edges = vec![];
- for (source, targets) in &edge_map {
- for target in targets {
- process_edges(tcx, source, target, &edge_map, &dirty_raw_nodes,
- &mut clean_work_products, &mut dirty_work_products, &mut extra_edges);
+ for (source, targets) in serialized_dep_graph.edge_list_indices.iter_enumerated() {
+ let target_begin = targets.0 as usize;
+ let target_end = targets.1 as usize;
+
+ for &target in &serialized_dep_graph.edge_list_data[target_begin .. target_end] {
+ process_edge(tcx,
+ source,
+ target,
+ &serialized_dep_graph.nodes,
+ &dirty_raw_nodes,
+ &mut clean_work_products,
+ &mut dirty_work_products);
}
}
- // Recreate bootstrap outputs, which are outputs that have no incoming edges (and hence cannot
- // be dirty).
+ // Recreate bootstrap outputs, which are outputs that have no incoming edges
+ // (and hence cannot be dirty).
for bootstrap_output in &serialized_dep_graph.bootstrap_outputs {
- if let Some(n) = retrace(tcx, bootstrap_output) {
- if let DepNode::WorkProduct(ref wp) = n {
- clean_work_products.insert(wp.clone());
- }
+ if let DepKind::WorkProduct = bootstrap_output.kind {
+ let wp_id = WorkProductId::from_fingerprint(bootstrap_output.hash);
+ clean_work_products.insert(wp_id);
+ }
- tcx.dep_graph.with_task(n, (), (), create_node);
+ tcx.dep_graph.with_task(*bootstrap_output, (), (), create_node);
- fn create_node((): (), (): ()) {
- // just create the node with no inputs
- }
+ fn create_node((): (), (): ()) {
+ // just create the node with no inputs
}
}
- // Subtle. Sometimes we have intermediate nodes that we can't recreate in the new graph.
- // This is pretty unusual but it arises in a scenario like this:
- //
- // Hir(X) -> Foo(Y) -> Bar
- //
- // Note that the `Hir(Y)` is not an input to `Foo(Y)` -- this
- // almost never happens, but can happen in some obscure
- // scenarios. In that case, if `Y` is removed, then we can't
- // recreate `Foo(Y)` (the def-id `Y` no longer exists); what we do
- // then is to push the edge `Hir(X) -> Bar` onto `extra_edges`
- // (along with any other targets of `Foo(Y)`). We will then add
- // the edge from `Hir(X)` to `Bar` (or, if `Bar` itself cannot be
- // recreated, to the targets of `Bar`).
- while let Some((source, target)) = extra_edges.pop() {
- process_edges(tcx, source, target, &edge_map, &dirty_raw_nodes,
- &mut clean_work_products, &mut dirty_work_products, &mut extra_edges);
- }
-
// Add in work-products that are still clean, and delete those that are
// dirty.
reconcile_work_products(tcx, work_products, &clean_work_products);
- dirty_clean::check_dirty_clean_annotations(tcx, &dirty_raw_nodes);
+ dirty_clean::check_dirty_clean_annotations(tcx,
+ &serialized_dep_graph.nodes,
+ &dirty_raw_nodes);
load_prev_metadata_hashes(tcx,
&mut *incremental_hashes_map.prev_metadata_hashes.borrow_mut());
/// a bit vector where the index is the DefPathIndex.
fn initial_dirty_nodes<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
incremental_hashes_map: &IncrementalHashesMap,
- serialized_hashes: &[SerializedHash])
+ nodes: &IndexVec<DepNodeIndex, DepNode>,
+ serialized_hashes: &[(DepNodeIndex, Fingerprint)])
-> DirtyNodes {
let mut hcx = HashContext::new(tcx, incremental_hashes_map);
let mut dirty_nodes = FxHashMap();
- let print_removed_message = |dep_node: &DepNode<_>| {
- if tcx.sess.opts.debugging_opts.incremental_dump_hash {
- println!("node {:?} is dirty as it was removed", dep_node);
- }
-
- debug!("initial_dirty_nodes: {:?} is dirty as it was removed", dep_node);
- };
+ for &(dep_node_index, prev_hash) in serialized_hashes {
+ let dep_node = nodes[dep_node_index];
+ if does_still_exist(tcx, &dep_node) {
+ let current_hash = hcx.hash(&dep_node).unwrap_or_else(|| {
+ bug!("Cannot find current ICH for input that still exists?")
+ });
- for hash in serialized_hashes {
- if let Some(dep_node) = retrace(tcx, &hash.dep_node) {
- if let Some(current_hash) = hcx.hash(&dep_node) {
- if current_hash == hash.hash {
- debug!("initial_dirty_nodes: {:?} is clean (hash={:?})",
- dep_node.map_def(|&def_id| Some(tcx.def_path(def_id))).unwrap(),
+ if current_hash == prev_hash {
+ debug!("initial_dirty_nodes: {:?} is clean (hash={:?})",
+ dep_node,
current_hash);
- continue;
- }
-
- if tcx.sess.opts.debugging_opts.incremental_dump_hash {
- println!("node {:?} is dirty as hash is {:?} was {:?}",
- dep_node.map_def(|&def_id| Some(tcx.def_path(def_id))).unwrap(),
- current_hash,
- hash.hash);
- }
+ continue;
+ }
- debug!("initial_dirty_nodes: {:?} is dirty as hash is {:?}, was {:?}",
- dep_node.map_def(|&def_id| Some(tcx.def_path(def_id))).unwrap(),
- current_hash,
- hash.hash);
- } else {
- print_removed_message(&hash.dep_node);
+ if tcx.sess.opts.debugging_opts.incremental_dump_hash {
+ println!("node {:?} is dirty as hash is {:?}, was {:?}",
+ dep_node,
+ current_hash,
+ prev_hash);
}
+
+ debug!("initial_dirty_nodes: {:?} is dirty as hash is {:?}, was {:?}",
+ dep_node,
+ current_hash,
+ prev_hash);
} else {
- print_removed_message(&hash.dep_node);
- }
+ if tcx.sess.opts.debugging_opts.incremental_dump_hash {
+ println!("node {:?} is dirty as it was removed", dep_node);
+ }
- dirty_nodes.insert(hash.dep_node.clone(), hash.dep_node.clone());
+ debug!("initial_dirty_nodes: {:?} is dirty as it was removed", dep_node);
+ }
+ dirty_nodes.insert(dep_node_index, dep_node_index);
}
dirty_nodes
}
-fn transitive_dirty_nodes(edge_map: &FxHashMap<DepNode<DefPathHash>, Vec<DepNode<DefPathHash>>>,
+fn transitive_dirty_nodes(serialized_dep_graph: &SerializedDepGraph,
mut dirty_nodes: DirtyNodes)
-> DirtyNodes
{
- let mut stack: Vec<(DepNode<DefPathHash>, DepNode<DefPathHash>)> = vec![];
- stack.extend(dirty_nodes.iter().map(|(s, b)| (s.clone(), b.clone())));
+ let mut stack: Vec<(DepNodeIndex, DepNodeIndex)> = vec![];
+ stack.extend(dirty_nodes.iter().map(|(&s, &b)| (s, b)));
while let Some((source, blame)) = stack.pop() {
// we know the source is dirty (because of the node `blame`)...
- assert!(dirty_nodes.contains_key(&source));
+ debug_assert!(dirty_nodes.contains_key(&source));
// ...so we dirty all the targets (with the same blame)
- if let Some(targets) = edge_map.get(&source) {
- for target in targets {
- if !dirty_nodes.contains_key(target) {
- dirty_nodes.insert(target.clone(), blame.clone());
- stack.push((target.clone(), blame.clone()));
- }
+ for &target in serialized_dep_graph.edge_targets_from(source) {
+ if !dirty_nodes.contains_key(&target) {
+ dirty_nodes.insert(target, blame);
+ stack.push((target, blame));
}
}
}
/// otherwise no longer applicable.
fn reconcile_work_products<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
work_products: Vec<SerializedWorkProduct>,
- clean_work_products: &FxHashSet<Arc<WorkProductId>>) {
+ clean_work_products: &FxHashSet<WorkProductId>) {
debug!("reconcile_work_products({:?})", work_products);
for swp in work_products {
if !clean_work_products.contains(&swp.id) {
fn load_prev_metadata_hashes(tcx: TyCtxt,
output: &mut FxHashMap<DefId, Fingerprint>) {
if !tcx.sess.opts.debugging_opts.query_dep_graph {
+ // Previous metadata hashes are only needed for testing.
return
}
serialized_hashes.index_map.len());
}
-fn process_edges<'a, 'tcx, 'edges>(
+fn process_edge<'a, 'tcx, 'edges>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
- source: &'edges DepNode<DefPathHash>,
- target: &'edges DepNode<DefPathHash>,
- edges: &'edges FxHashMap<DepNode<DefPathHash>, Vec<DepNode<DefPathHash>>>,
+ source: DepNodeIndex,
+ target: DepNodeIndex,
+ nodes: &IndexVec<DepNodeIndex, DepNode>,
dirty_raw_nodes: &DirtyNodes,
- clean_work_products: &mut FxHashSet<Arc<WorkProductId>>,
- dirty_work_products: &mut FxHashSet<Arc<WorkProductId>>,
- extra_edges: &mut Vec<(&'edges DepNode<DefPathHash>, &'edges DepNode<DefPathHash>)>)
+ clean_work_products: &mut FxHashSet<WorkProductId>,
+ dirty_work_products: &mut FxHashSet<WorkProductId>)
{
// If the target is dirty, skip the edge. If this is an edge
// that targets a work-product, we can print the blame
// information now.
- if let Some(blame) = dirty_raw_nodes.get(target) {
- if let DepNode::WorkProduct(ref wp) = *target {
+ if let Some(&blame) = dirty_raw_nodes.get(&target) {
+ let target = nodes[target];
+ if let DepKind::WorkProduct = target.kind {
if tcx.sess.opts.debugging_opts.incremental_info {
- if dirty_work_products.insert(wp.clone()) {
+ let wp_id = WorkProductId::from_fingerprint(target.hash);
+
+ if dirty_work_products.insert(wp_id) {
// Try to reconstruct the human-readable version of the
// DepNode. This cannot be done for things that where
// removed.
- let readable_blame = if let Some(dep_node) = retrace(tcx, blame) {
- dep_node.map_def(|&def_id| Some(tcx.def_path(def_id).to_string(tcx)))
- .unwrap()
+ let blame = nodes[blame];
+ let blame_str = if let Some(def_id) = blame.extract_def_id(tcx) {
+ format!("{:?}({})",
+ blame.kind,
+ tcx.def_path(def_id).to_string(tcx))
} else {
- blame.map_def(|def_path_hash| Some(format!("{:?}", def_path_hash)))
- .unwrap()
+ format!("{:?}", blame)
};
println!("incremental: module {:?} is dirty because {:?} \
changed or was removed",
- wp,
- readable_blame);
+ wp_id,
+ blame_str);
}
}
}
return;
}
- // If the source is dirty, the target will be dirty.
- assert!(!dirty_raw_nodes.contains_key(source));
-
- // Retrace the source -> target edges to def-ids and then create
- // an edge in the graph. Retracing may yield none if some of the
- // data happens to have been removed.
- if let Some(source_node) = retrace(tcx, source) {
- if let Some(target_node) = retrace(tcx, target) {
- let _task = tcx.dep_graph.in_task(target_node);
- tcx.dep_graph.read(source_node);
- if let DepNode::WorkProduct(ref wp) = *target {
- clean_work_products.insert(wp.clone());
- }
- } else {
- // As discussed in `decode_dep_graph` above, sometimes the
- // target cannot be recreated again, in which case we add
- // edges to go from `source` to the targets of `target`.
- extra_edges.extend(
- edges[target].iter().map(|t| (source, t)));
+ // At this point we have asserted that the target is clean -- otherwise, we
+ // would have hit the return above. We can do some further consistency
+ // checks based on this fact:
+
+ // We should never have an edge where the target is clean but the source
+ // was dirty. Otherwise something was wrong with the dirtying pass above:
+ debug_assert!(!dirty_raw_nodes.contains_key(&source));
+
+ // We also never should encounter an edge going from a removed input to a
+ // clean target because removing the input would have dirtied the input
+ // node and transitively dirtied the target.
+ debug_assert!(match nodes[source].kind {
+ DepKind::Hir | DepKind::HirBody | DepKind::MetaData => {
+ does_still_exist(tcx, &nodes[source])
+ }
+ _ => true,
+ });
+
+ if !dirty_raw_nodes.contains_key(&target) {
+ let target = nodes[target];
+ let source = nodes[source];
+ let _task = tcx.dep_graph.in_task(target);
+ tcx.dep_graph.read(source);
+
+ if let DepKind::WorkProduct = target.kind {
+ let wp_id = WorkProductId::from_fingerprint(target.hash);
+ clean_work_products.insert(wp_id);
}
- } else {
- // It's also possible that the source can't be created! But we
- // can ignore such cases, because (a) if `source` is a HIR
- // node, it would be considered dirty; and (b) in other cases,
- // there must be some input to this node that is clean, and so
- // we'll re-create the edges over in the case where target is
- // undefined.
}
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use rustc::dep_graph::{DepGraphQuery, DepNode};
-use rustc::hir::def_id::DefId;
+use rustc::dep_graph::{DepGraphQuery, DepNode, DepKind};
use rustc::ich::Fingerprint;
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::graph::{Graph, NodeIndex};
// nodes) and all of the "work-products" we may care about
// later. Other nodes may be retained if it keeps the overall size
// of the graph down.
- pub reduced_graph: Graph<&'query DepNode<DefId>, ()>,
+ pub reduced_graph: Graph<&'query DepNode, ()>,
// These are output nodes that have no incoming edges. We have to
// track these specially because, when we load the data back up
// to recreate the nodes where all incoming edges are clean; but
// since we ordinarily just serialize edges, we wind up just
// forgetting that bootstrap outputs even exist in that case.)
- pub bootstrap_outputs: Vec<&'query DepNode<DefId>>,
+ pub bootstrap_outputs: Vec<&'query DepNode>,
// For the inputs (hir/foreign-metadata), we include hashes.
- pub hashes: FxHashMap<&'query DepNode<DefId>, Fingerprint>,
+ pub hashes: FxHashMap<&'query DepNode, Fingerprint>,
}
impl<'q> Predecessors<'q> {
- pub fn new(query: &'q DepGraphQuery<DefId>, hcx: &mut HashContext) -> Self {
+ pub fn new(query: &'q DepGraphQuery, hcx: &mut HashContext) -> Self {
let tcx = hcx.tcx;
// Find the set of "start nodes". These are nodes that we will
// possibly query later.
- let is_output = |node: &DepNode<DefId>| -> bool {
- match *node {
- DepNode::WorkProduct(_) => true,
- DepNode::MetaData(ref def_id) => {
+ let is_output = |node: &DepNode| -> bool {
+ match node.kind {
+ DepKind::WorkProduct => true,
+ DepKind::MetaData => {
// We do *not* create dep-nodes for the current crate's
// metadata anymore, just for metadata that we import/read
// from other crates.
- debug_assert!(!def_id.is_local());
+ debug_assert!(!node.extract_def_id(tcx).unwrap().is_local());
false
}
// if -Z query-dep-graph is passed, save more extended data
// to enable better unit testing
- DepNode::TypeckTables(_) |
- DepNode::TransCrateItem(_) => tcx.sess.opts.debugging_opts.query_dep_graph,
+ DepKind::TypeckTables => tcx.sess.opts.debugging_opts.query_dep_graph,
_ => false,
}
// Reduce the graph to the most important nodes.
let compress::Reduction { graph, input_nodes } =
- compress::reduce_graph(&query.graph, HashContext::is_hashable, |n| is_output(n));
+ compress::reduce_graph(&query.graph,
+ |n| HashContext::is_hashable(tcx, n),
+ |n| is_output(n));
let mut hashes = FxHashMap();
for input_index in input_nodes {
// Not all inputs might have been reachable from an output node,
// but we still want their hash for our unit tests.
let hir_nodes = query.graph.all_nodes().iter().filter_map(|node| {
- match node.data {
- DepNode::Hir(_) => Some(&node.data),
+ match node.data.kind {
+ DepKind::Hir => Some(&node.data),
_ => None,
}
});
}
}
- let bootstrap_outputs: Vec<&'q DepNode<DefId>> =
+ let bootstrap_outputs: Vec<&'q DepNode> =
(0 .. graph.len_nodes())
.map(NodeIndex)
.filter(|&n| graph.incoming_edges(n).next().is_none())
use rustc::dep_graph::DepNode;
use rustc::hir::def_id::DefId;
use rustc::hir::svh::Svh;
-use rustc::hir::map::DefPathHash;
use rustc::ich::Fingerprint;
use rustc::middle::cstore::EncodedMetadataHashes;
use rustc::session::Session;
// First encode the commandline arguments hash
tcx.sess.opts.dep_tracking_hash().encode(encoder)?;
- let to_hash_based_node = |dep_node: &DepNode<DefId>| {
- dep_node.map_def(|&def_id| Some(tcx.def_path_hash(def_id))).unwrap()
- };
-
// NB: We rely on this Vec being indexable by reduced_graph's NodeIndex.
- let nodes: IndexVec<DepNodeIndex, DepNode<DefPathHash>> = preds
+ let mut nodes: IndexVec<DepNodeIndex, DepNode> = preds
.reduced_graph
.all_nodes()
.iter()
- .map(|node| to_hash_based_node(node.data))
+ .map(|node| node.data.clone())
.collect();
- let mut edge_list_indices = Vec::with_capacity(nodes.len());
+ let mut edge_list_indices = IndexVec::with_capacity(nodes.len());
let mut edge_list_data = Vec::with_capacity(preds.reduced_graph.len_edges());
for node_index in 0 .. nodes.len() {
edge_list_indices.push((start, end));
}
- // Let's make we had no overflow there.
+ // Let's make sure we had no overflow there.
assert!(edge_list_data.len() <= ::std::u32::MAX as usize);
// Check that we have a consistent number of edges.
assert_eq!(edge_list_data.len(), preds.reduced_graph.len_edges());
- let bootstrap_outputs = preds
- .bootstrap_outputs
- .iter()
- .map(|n| to_hash_based_node(n))
- .collect();
+ let bootstrap_outputs = preds.bootstrap_outputs
+ .iter()
+ .map(|dep_node| (**dep_node).clone())
+ .collect();
+
+ // Next, build the map of content hashes. To this end, we need to transform
+ // the (DepNode -> Fingerprint) map that we have into a
+ // (DepNodeIndex -> Fingerprint) map. This may necessitate adding nodes back
+ // to the dep-graph that have been filtered out during reduction.
+ let content_hashes = {
+ // We have to build a (DepNode -> DepNodeIndex) map. We over-allocate a
+ // little because we expect some more nodes to be added.
+ let capacity = (nodes.len() * 120) / 100;
+ let mut node_to_index = FxHashMap::with_capacity_and_hasher(capacity,
+ Default::default());
+ // Add the nodes we already have in the graph.
+ node_to_index.extend(nodes.iter_enumerated()
+ .map(|(index, &node)| (node, index)));
+
+ let mut content_hashes = Vec::with_capacity(preds.hashes.len());
+
+ for (&&dep_node, &hash) in preds.hashes.iter() {
+ let dep_node_index = *node_to_index
+ .entry(dep_node)
+ .or_insert_with(|| {
+ // There is no DepNodeIndex for this DepNode yet. This
+ // happens when the DepNode got filtered out during graph
+ // reduction. Since we have a content hash for the DepNode,
+ // we add it back to the graph.
+ let next_index = nodes.len();
+ nodes.push(dep_node);
+
+ debug_assert_eq!(next_index, edge_list_indices.len());
+ // Push an empty list of edges
+ edge_list_indices.push((0,0));
+
+ DepNodeIndex::new(next_index)
+ });
+
+ content_hashes.push((dep_node_index, hash));
+ }
- let hashes = preds
- .hashes
- .iter()
- .map(|(&dep_node, &hash)| {
- SerializedHash {
- dep_node: to_hash_based_node(dep_node),
- hash: hash,
- }
- })
- .collect();
+ content_hashes
+ };
let graph = SerializedDepGraph {
nodes,
edge_list_indices,
edge_list_data,
bootstrap_outputs,
- hashes,
+ hashes: content_hashes,
};
// Encode the graph data.
current_metadata_hashes: &mut FxHashMap<DefId, Fingerprint>,
encoder: &mut Encoder)
-> io::Result<()> {
+ assert_eq!(metadata_hashes.hashes.len(),
+ metadata_hashes.hashes.iter().map(|x| (x.def_index, ())).collect::<FxHashMap<_,_>>().len());
+
let mut serialized_hashes = SerializedMetadataHashes {
- entry_hashes: metadata_hashes.entry_hashes.to_vec(),
- global_hashes: metadata_hashes.global_hashes.to_vec(),
+ entry_hashes: metadata_hashes.hashes.to_vec(),
index_map: FxHashMap()
};
use rustc::session::config::OutputType;
use rustc::util::fs::link_or_copy;
use std::path::PathBuf;
-use std::sync::Arc;
use std::fs as std_fs;
pub fn save_trans_partition(sess: &Session,
if sess.opts.incremental.is_none() {
return;
}
- let work_product_id = Arc::new(WorkProductId(cgu_name.to_string()));
+ let work_product_id = WorkProductId::from_cgu_name(cgu_name);
let saved_files: Option<Vec<_>> =
files.iter()
use rustc::hir::def_id::DefId;
use rustc::cfg;
use rustc::ty::subst::Substs;
-use rustc::ty::{self, Ty, TyCtxt};
+use rustc::ty::{self, Ty};
use rustc::traits::{self, Reveal};
use rustc::hir::map as hir_map;
use util::nodemap::NodeSet;
for adjustment in cx.tables.expr_adjustments(expr) {
if let Adjust::Deref(Some(deref)) = adjustment.kind {
let (def_id, substs) = deref.method_call(cx.tcx, source);
- if method_call_refers_to_method(cx.tcx, method, def_id, substs, id) {
+ if method_call_refers_to_method(cx, method, def_id, substs, id) {
return true;
}
}
if cx.tables.is_method_call(expr) {
let def_id = cx.tables.type_dependent_defs[&id].def_id();
let substs = cx.tables.node_substs(id);
- if method_call_refers_to_method(cx.tcx, method, def_id, substs, id) {
+ if method_call_refers_to_method(cx, method, def_id, substs, id) {
return true;
}
}
match def {
Def::Method(def_id) => {
let substs = cx.tables.node_substs(callee.id);
- method_call_refers_to_method(
- cx.tcx, method, def_id, substs, id)
+ method_call_refers_to_method(cx, method, def_id, substs, id)
}
_ => false,
}
// Check if the method call to the method with the ID `callee_id`
// and instantiated with `callee_substs` refers to method `method`.
- fn method_call_refers_to_method<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ fn method_call_refers_to_method<'a, 'tcx>(cx: &LateContext<'a, 'tcx>,
method: &ty::AssociatedItem,
callee_id: DefId,
callee_substs: &Substs<'tcx>,
expr_id: ast::NodeId)
-> bool {
+ let tcx = cx.tcx;
let callee_item = tcx.associated_item(callee_id);
match callee_item.container {
let trait_ref = ty::TraitRef::from_method(tcx, trait_def_id, callee_substs);
let trait_ref = ty::Binder(trait_ref);
let span = tcx.hir.span(expr_id);
- let param_env = tcx.param_env(method.def_id);
let obligation =
traits::Obligation::new(traits::ObligationCause::misc(span, expr_id),
- param_env,
+ cx.param_env,
trait_ref.to_poly_trait_predicate());
- tcx.infer_ctxt(()).enter(|infcx| {
+ tcx.infer_ctxt().enter(|infcx| {
let mut selcx = traits::SelectionContext::new(&infcx);
match selcx.select(&obligation) {
// The method comes from a `T: Trait` bound.
impl<'a, 'tcx> LateLintPass<'a, 'tcx> for UnionsWithDropFields {
fn check_item(&mut self, ctx: &LateContext, item: &hir::Item) {
if let hir::ItemUnion(ref vdata, _) = item.node {
- let item_def_id = ctx.tcx.hir.local_def_id(item.id);
- let param_env = ctx.tcx.param_env(item_def_id);
for field in vdata.fields() {
let field_ty = ctx.tcx.type_of(ctx.tcx.hir.local_def_id(field.id));
- if field_ty.needs_drop(ctx.tcx, param_env) {
+ if field_ty.needs_drop(ctx.tcx, ctx.param_env) {
ctx.span_lint(UNIONS_WITH_DROP_FIELDS,
field.span,
"union contains a field with possibly non-trivial drop code, \
// sizes only make sense for non-generic types
let item_def_id = cx.tcx.hir.local_def_id(it.id);
let t = cx.tcx.type_of(item_def_id);
- let param_env = cx.tcx.param_env(item_def_id).reveal_all();
+ let param_env = cx.param_env.reveal_all();
let ty = cx.tcx.erase_regions(&t);
let layout = ty.layout(cx.tcx, param_env).unwrap_or_else(|e| {
bug!("failed to get layout for `{}`: {}", t, e)
println!("cargo:rerun-if-changed={}", llvm_config.display());
+ if let Some(cfg_toml) = env::var_os("CFG_LLVM_TOML") {
+ let cfg_path = PathBuf::from(cfg_toml);
+ println!("cargo:rerun-if-changed={}", cfg_path.display());
+ }
+
// Test whether we're cross-compiling LLVM. This is a pretty rare case
// currently where we're producing an LLVM for a different platform than
// what this build script is currently running on.
use locator::{self, CratePaths};
use schema::{CrateRoot, Tracked};
-use rustc::dep_graph::{DepNode, GlobalMetaDataKind};
-use rustc::hir::def_id::{DefId, CrateNum, DefIndex, CRATE_DEF_INDEX};
+use rustc::hir::def_id::{CrateNum, DefIndex};
use rustc::hir::svh::Svh;
use rustc::middle::cstore::DepKind;
use rustc::session::Session;
return cstore::CrateNumMap::new();
}
- let dep_node = DepNode::GlobalMetaData(DefId { krate, index: CRATE_DEF_INDEX },
- GlobalMetaDataKind::CrateDeps);
-
// The map from crate numbers in the crate we're resolving to local crate numbers.
// We map 0 and all other holes in the map to our parent crate. The "additional"
// self-dependencies should be harmless.
::std::iter::once(krate).chain(crate_root.crate_deps
- .get(&self.sess.dep_graph, dep_node)
+ .get_untracked()
.decode(metadata)
.map(|dep| {
debug!("resolving dep crate {} hash: `{}`", dep.name, dep.hash);
use schema::{self, Tracked};
-use rustc::dep_graph::{DepGraph, DepNode, GlobalMetaDataKind};
+use rustc::dep_graph::DepGraph;
use rustc::hir::def_id::{CRATE_DEF_INDEX, LOCAL_CRATE, CrateNum, DefIndex, DefId};
-use rustc::hir::map::definitions::DefPathTable;
+use rustc::hir::map::definitions::{DefPathTable, GlobalMetaDataKind};
use rustc::hir::svh::Svh;
use rustc::middle::cstore::{DepKind, ExternCrate, MetadataLoader};
use rustc_back::PanicStrategy;
pub fn do_extern_mod_stmt_cnum(&self, emod_id: ast::NodeId) -> Option<CrateNum> {
self.extern_mod_crate_map.borrow().get(&emod_id).cloned()
}
+
+ pub fn read_dep_node(&self, def_id: DefId) {
+ use rustc::middle::cstore::CrateStore;
+ let def_path_hash = self.def_path_hash(def_id);
+ let dep_node = def_path_hash.to_dep_node(::rustc::dep_graph::DepKind::MetaData);
+ self.dep_graph.read(dep_node);
+ }
}
impl CrateMetadata {
}
pub fn panic_strategy(&self, dep_graph: &DepGraph) -> PanicStrategy {
- let def_id = DefId {
- krate: self.cnum,
- index: CRATE_DEF_INDEX,
- };
- let dep_node = DepNode::GlobalMetaData(def_id, GlobalMetaDataKind::Krate);
-
+ let dep_node = self.metadata_dep_node(GlobalMetaDataKind::Krate);
self.root
.panic_strategy
.get(dep_graph, dep_node)
use rustc::ty::{self, TyCtxt};
use rustc::ty::maps::Providers;
use rustc::hir::def_id::{CrateNum, DefId, DefIndex, CRATE_DEF_INDEX, LOCAL_CRATE};
-
-use rustc::dep_graph::{DepNode, GlobalMetaDataKind};
use rustc::hir::map::{DefKey, DefPath, DisambiguatedDefPathData, DefPathHash};
-use rustc::hir::map::definitions::DefPathTable;
+use rustc::hir::map::definitions::{DefPathTable, GlobalMetaDataKind};
use rustc::util::nodemap::{NodeSet, DefIdMap};
use rustc_back::PanicStrategy;
DepTrackingMapConfig>::Value {
assert!(!$def_id.is_local());
- $tcx.dep_graph.read(DepNode::MetaData($def_id));
+ let def_path_hash = $tcx.def_path_hash($def_id);
+ let dep_node = def_path_hash.to_dep_node(::rustc::dep_graph::DepKind::MetaData);
+
+ $tcx.dep_graph.read(dep_node);
let $cdata = $tcx.sess.cstore.crate_data_as_rc_any($def_id.krate);
let $cdata = $cdata.downcast_ref::<cstore::CrateMetadata>()
variances_of => { Rc::new(cdata.get_item_variances(def_id.index)) }
associated_item_def_ids => {
let mut result = vec![];
- cdata.each_child_of_item(def_id.index, |child| result.push(child.def.def_id()));
+ cdata.each_child_of_item(def_id.index, |child| result.push(child.def.def_id()), tcx.sess);
Rc::new(result)
}
associated_item => { cdata.get_associated_item(def_id.index) }
}
fn visibility(&self, def: DefId) -> ty::Visibility {
- self.dep_graph.read(DepNode::MetaData(def));
+ self.read_dep_node(def);
self.get_crate_data(def.krate).get_visibility(def.index)
}
fn item_generics_cloned(&self, def: DefId) -> ty::Generics {
- self.dep_graph.read(DepNode::MetaData(def));
+ self.read_dep_node(def);
self.get_crate_data(def.krate).get_generics(def.index)
}
fn impl_defaultness(&self, def: DefId) -> hir::Defaultness
{
- self.dep_graph.read(DepNode::MetaData(def));
+ self.read_dep_node(def);
self.get_crate_data(def.krate).get_impl_defaultness(def.index)
}
fn associated_item_cloned(&self, def: DefId) -> ty::AssociatedItem
{
- self.dep_graph.read(DepNode::MetaData(def));
+ self.read_dep_node(def);
self.get_crate_data(def.krate).get_associated_item(def.index)
}
fn is_const_fn(&self, did: DefId) -> bool
{
- self.dep_graph.read(DepNode::MetaData(did));
+ self.read_dep_node(did);
self.get_crate_data(did.krate).is_const_fn(did.index)
}
fn struct_field_names(&self, def: DefId) -> Vec<ast::Name>
{
- self.dep_graph.read(DepNode::MetaData(def));
+ self.read_dep_node(def);
self.get_crate_data(def.krate).get_struct_field_names(def.index)
}
- fn item_children(&self, def_id: DefId) -> Vec<def::Export>
+ fn item_children(&self, def_id: DefId, sess: &Session) -> Vec<def::Export>
{
- self.dep_graph.read(DepNode::MetaData(def_id));
+ self.read_dep_node(def_id);
let mut result = vec![];
self.get_crate_data(def_id.krate)
- .each_child_of_item(def_id.index, |child| result.push(child));
+ .each_child_of_item(def_id.index, |child| result.push(child), sess);
result
}
tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId)
-> &'tcx hir::Body {
- if let Some(cached) = tcx.hir.get_inlined_body(def_id) {
+ self.read_dep_node(def_id);
+
+ if let Some(cached) = tcx.hir.get_inlined_body_untracked(def_id) {
return cached;
}
- self.dep_graph.read(DepNode::MetaData(def_id));
debug!("item_body({:?}): inlining item", def_id);
self.get_crate_data(def_id.krate).item_body(tcx, def_id.index)
/// Returns a map from a sufficiently visible external item (i.e. an external item that is
/// visible from at least one local module) to a sufficiently visible parent (considering
/// modules that re-export the external item to be parents).
- fn visible_parent_map<'a>(&'a self) -> ::std::cell::Ref<'a, DefIdMap<DefId>> {
+ fn visible_parent_map<'a>(&'a self, sess: &Session) -> ::std::cell::Ref<'a, DefIdMap<DefId>> {
{
let visible_parent_map = self.visible_parent_map.borrow();
if !visible_parent_map.is_empty() {
index: CRATE_DEF_INDEX
});
while let Some(def) = bfs_queue.pop_front() {
- for child in self.item_children(def) {
+ for child in self.item_children(def, sess) {
add_child(bfs_queue, child, def);
}
}
use cstore::{self, CrateMetadata, MetadataBlob, NativeLibrary};
use schema::*;
-use rustc::dep_graph::{DepGraph, DepNode, GlobalMetaDataKind};
+use rustc::dep_graph::{DepGraph, DepNode, DepKind};
use rustc::hir::map::{DefKey, DefPath, DefPathData, DefPathHash};
+use rustc::hir::map::definitions::GlobalMetaDataKind;
use rustc::hir;
use rustc::middle::cstore::LinkagePreference;
}
/// Iterates over each child of the given item.
- pub fn each_child_of_item<F>(&self, id: DefIndex, mut callback: F)
+ pub fn each_child_of_item<F>(&self, id: DefIndex, mut callback: F, sess: &Session)
where F: FnMut(def::Export)
{
if let Some(ref proc_macros) = self.proc_macros {
// Find the item.
let item = match self.maybe_entry(id) {
None => return,
- Some(item) => item.decode(self),
+ Some(item) => item.decode((self, sess)),
};
// Iterate over all children.
let macros_only = self.dep_kind.get().macros_only();
- for child_index in item.children.decode(self) {
+ for child_index in item.children.decode((self, sess)) {
if macros_only {
continue
}
// Get the item.
if let Some(child) = self.maybe_entry(child_index) {
- let child = child.decode(self);
+ let child = child.decode((self, sess));
match child.kind {
EntryKind::MacroDef(..) => {}
_ if macros_only => continue,
match child.kind {
// FIXME(eddyb) Don't encode these in children.
EntryKind::ForeignMod => {
- for child_index in child.children.decode(self) {
+ for child_index in child.children.decode((self, sess)) {
if let Some(def) = self.get_def(child_index) {
callback(def::Export {
def: def,
ident: Ident::with_empty_ctxt(self.item_name(child_index)),
- span: self.entry(child_index).span.decode(self),
+ span: self.entry(child_index).span.decode((self, sess)),
});
}
}
}
let def_key = self.def_key(child_index);
- let span = child.span.decode(self);
+ let span = child.span.decode((self, sess));
if let (Some(def), Some(name)) =
(self.get_def(child_index), def_key.disambiguated_data.data.get_opt_name()) {
let ident = Ident::with_empty_ctxt(name);
}
if let EntryKind::Mod(data) = item.kind {
- for exp in data.decode(self).reexports.decode(self) {
+ for exp in data.decode((self, sess)).reexports.decode((self, sess)) {
match exp.def {
Def::Macro(..) => {}
_ if macros_only => continue,
return Rc::new([]);
}
- dep_graph.read(DepNode::MetaData(self.local_def_id(node_id)));
+ let dep_node = self.def_path_hash(node_id).to_dep_node(DepKind::MetaData);
+ dep_graph.read(dep_node);
if let Some(&Some(ref val)) =
self.attribute_cache.borrow()[node_as].get(node_index) {
pub fn get_dylib_dependency_formats(&self,
dep_graph: &DepGraph)
-> Vec<(CrateNum, LinkagePreference)> {
- let def_id = DefId {
- krate: self.cnum,
- index: CRATE_DEF_INDEX,
- };
- let dep_node = DepNode::GlobalMetaData(def_id,
- GlobalMetaDataKind::DylibDependencyFormats);
+ let dep_node =
+ self.metadata_dep_node(GlobalMetaDataKind::DylibDependencyFormats);
self.root
.dylib_dependency_formats
.get(dep_graph, dep_node)
self.codemap_import_info.borrow()
}
- pub fn metadata_dep_node(&self, kind: GlobalMetaDataKind) -> DepNode<DefId> {
- let def_id = DefId {
- krate: self.cnum,
- index: CRATE_DEF_INDEX,
- };
-
- DepNode::GlobalMetaData(def_id, kind)
+ pub fn metadata_dep_node(&self, kind: GlobalMetaDataKind) -> DepNode {
+ let def_index = kind.def_index(&self.def_path_table);
+ let def_path_hash = self.def_path_table.def_path_hash(def_index);
+ def_path_hash.to_dep_node(DepKind::MetaData)
}
}
use schema::*;
use rustc::middle::cstore::{LinkMeta, LinkagePreference, NativeLibrary,
- EncodedMetadata, EncodedMetadataHashes};
+ EncodedMetadata, EncodedMetadataHashes,
+ EncodedMetadataHash};
use rustc::hir::def_id::{CrateNum, CRATE_DEF_INDEX, DefIndex, DefId, LOCAL_CRATE};
-use rustc::hir::map::definitions::DefPathTable;
-use rustc::dep_graph::{DepNode, GlobalMetaDataKind};
+use rustc::hir::map::definitions::{DefPathTable, GlobalMetaDataKind};
use rustc::ich::Fingerprint;
use rustc::middle::dependency_format::Linkage;
use rustc::middle::lang_items;
// Encodes something that corresponds to a single DepNode::GlobalMetaData
// and registers the Fingerprint in the `metadata_hashes` map.
pub fn tracked<'x, DATA, R>(&'x mut self,
- dep_node: DepNode<()>,
+ def_index: DefIndex,
op: fn(&mut IsolatedEncoder<'x, 'a, 'tcx>, DATA) -> R,
data: DATA)
-> Tracked<R> {
let (fingerprint, this) = entry_builder.finish();
if let Some(fingerprint) = fingerprint {
- this.metadata_hashes.global_hashes.push((dep_node, fingerprint));
+ this.metadata_hashes.hashes.push(EncodedMetadataHash {
+ def_index,
+ hash: fingerprint,
+ })
}
Tracked::new(ret)
fn encode_crate_root(&mut self) -> Lazy<CrateRoot> {
let mut i = self.position();
+ let tcx = self.tcx;
+ let global_metadata_def_index = move |kind: GlobalMetaDataKind| {
+ kind.def_index(tcx.hir.definitions().def_path_table())
+ };
+
let crate_deps = self.tracked(
- DepNode::GlobalMetaData((), GlobalMetaDataKind::CrateDeps),
+ global_metadata_def_index(GlobalMetaDataKind::CrateDeps),
IsolatedEncoder::encode_crate_deps,
());
let dylib_dependency_formats = self.tracked(
- DepNode::GlobalMetaData((), GlobalMetaDataKind::DylibDependencyFormats),
+ global_metadata_def_index(GlobalMetaDataKind::DylibDependencyFormats),
IsolatedEncoder::encode_dylib_dependency_formats,
());
let dep_bytes = self.position() - i;
// Encode the language items.
i = self.position();
let lang_items = self.tracked(
- DepNode::GlobalMetaData((), GlobalMetaDataKind::LangItems),
+ global_metadata_def_index(GlobalMetaDataKind::LangItems),
IsolatedEncoder::encode_lang_items,
());
let lang_items_missing = self.tracked(
- DepNode::GlobalMetaData((), GlobalMetaDataKind::LangItemsMissing),
+ global_metadata_def_index(GlobalMetaDataKind::LangItemsMissing),
IsolatedEncoder::encode_lang_items_missing,
());
let lang_item_bytes = self.position() - i;
// Encode the native libraries used
i = self.position();
let native_libraries = self.tracked(
- DepNode::GlobalMetaData((), GlobalMetaDataKind::NativeLibraries),
+ global_metadata_def_index(GlobalMetaDataKind::NativeLibraries),
IsolatedEncoder::encode_native_libraries,
());
let native_lib_bytes = self.position() - i;
// Encode the def IDs of impls, for coherence checking.
i = self.position();
let impls = self.tracked(
- DepNode::GlobalMetaData((), GlobalMetaDataKind::Impls),
+ global_metadata_def_index(GlobalMetaDataKind::Impls),
IsolatedEncoder::encode_impls,
());
let impl_bytes = self.position() - i;
// Encode exported symbols info.
i = self.position();
let exported_symbols = self.tracked(
- DepNode::GlobalMetaData((), GlobalMetaDataKind::ExportedSymbols),
+ global_metadata_def_index(GlobalMetaDataKind::ExportedSymbols),
IsolatedEncoder::encode_exported_symbols,
self.exported_symbols);
let exported_symbols_bytes = self.position() - i;
let total_bytes = self.position();
- self.metadata_hashes.global_hashes.push((
- DepNode::GlobalMetaData((), GlobalMetaDataKind::Krate),
- Fingerprint::from_smaller_hash(link_meta.crate_hash.as_u64())
- ));
+ self.metadata_hashes.hashes.push(EncodedMetadataHash {
+ def_index: global_metadata_def_index(GlobalMetaDataKind::Krate),
+ hash: Fingerprint::from_smaller_hash(link_meta.crate_hash.as_u64())
+ });
if self.tcx.sess.meta_stats() {
let mut zero_bytes = 0;
let (fingerprint, ecx) = entry_builder.finish();
if let Some(hash) = fingerprint {
- ecx.metadata_hashes.entry_hashes.push(EncodedMetadataHash {
+ ecx.metadata_hashes.hashes.push(EncodedMetadataHash {
def_index: id.index,
hash: hash,
});
pub struct IsolatedEncoder<'a, 'b: 'a, 'tcx: 'b> {
pub tcx: TyCtxt<'b, 'tcx, 'tcx>,
ecx: &'a mut EncodeContext<'b, 'tcx>,
- hcx: Option<(StableHashingContext<'b, 'tcx>, StableHasher<Fingerprint>)>,
+ hcx: Option<(StableHashingContext<'b, 'tcx, 'tcx>, StableHasher<Fingerprint>)>,
}
impl<'a, 'b: 'a, 'tcx: 'b> IsolatedEncoder<'a, 'b, 'tcx> {
}
pub fn lazy<T>(&mut self, value: &T) -> Lazy<T>
- where T: Encodable + HashStable<StableHashingContext<'b, 'tcx>>
+ where T: Encodable + HashStable<StableHashingContext<'b, 'tcx, 'tcx>>
{
if let Some((ref mut hcx, ref mut hasher)) = self.hcx {
value.hash_stable(hcx, hasher);
pub fn lazy_seq<I, T>(&mut self, iter: I) -> LazySeq<T>
where I: IntoIterator<Item = T>,
- T: Encodable + HashStable<StableHashingContext<'b, 'tcx>>
+ T: Encodable + HashStable<StableHashingContext<'b, 'tcx, 'tcx>>
{
if let Some((ref mut hcx, ref mut hasher)) = self.hcx {
let iter = iter.into_iter();
pub fn lazy_seq_ref<'x, I, T>(&mut self, iter: I) -> LazySeq<T>
where I: IntoIterator<Item = &'x T>,
- T: 'x + Encodable + HashStable<StableHashingContext<'b, 'tcx>>
+ T: 'x + Encodable + HashStable<StableHashingContext<'b, 'tcx, 'tcx>>
{
if let Some((ref mut hcx, ref mut hasher)) = self.hcx {
let iter = iter.into_iter();
}
pub fn lazy_seq_from_slice<T>(&mut self, slice: &[T]) -> LazySeq<T>
- where T: Encodable + HashStable<StableHashingContext<'b, 'tcx>>
+ where T: Encodable + HashStable<StableHashingContext<'b, 'tcx, 'tcx>>
{
if let Some((ref mut hcx, ref mut hasher)) = self.hcx {
slice.hash_stable(hcx, hasher);
}
pub fn lazy_seq_ref_from_slice<T>(&mut self, slice: &[&T]) -> LazySeq<T>
- where T: Encodable + HashStable<StableHashingContext<'b, 'tcx>>
+ where T: Encodable + HashStable<StableHashingContext<'b, 'tcx, 'tcx>>
{
if let Some((ref mut hcx, ref mut hasher)) = self.hcx {
slice.hash_stable(hcx, hasher);
}
}
- pub fn get(&self, dep_graph: &DepGraph, dep_node: DepNode<DefId>) -> &T {
+ pub fn get(&self, dep_graph: &DepGraph, dep_node: DepNode) -> &T {
dep_graph.read(dep_node);
&self.state
}
}
}
-impl<'a, 'tcx, T> HashStable<StableHashingContext<'a, 'tcx>> for Tracked<T>
- where T: HashStable<StableHashingContext<'a, 'tcx>>
+impl<'a, 'gcx, 'tcx, T> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> for Tracked<T>
+ where T: HashStable<StableHashingContext<'a, 'gcx, 'tcx>>
{
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
let Tracked {
ref state
pub impls: LazySeq<DefIndex>,
}
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for TraitImpls {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> for TraitImpls {
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
let TraitImpls {
trait_id: (krate, def_index),
AssociatedConst(AssociatedContainer, u8),
}
-impl<'a, 'tcx> HashStable<StableHashingContext<'a, 'tcx>> for EntryKind<'tcx> {
+impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> for EntryKind<'tcx> {
fn hash_stable<W: StableHasherResult>(&self,
- hcx: &mut StableHashingContext<'a, 'tcx>,
+ hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
mem::discriminant(self).hash_stable(hcx, hasher);
match *self {
};
let src = MirSource::from_node(tcx, id);
- tcx.infer_ctxt(body_id).enter(|infcx| {
+ tcx.infer_ctxt().enter(|infcx| {
let cx = Cx::new(&infcx, src);
let mut mir = if cx.tables().tainted_by_errors {
build::construct_error(cx, body_id)
{
let span = tcx.hir.span(ctor_id);
if let hir::VariantData::Tuple(ref fields, ctor_id) = *v {
- tcx.infer_ctxt(()).enter(|infcx| {
+ tcx.infer_ctxt().enter(|infcx| {
let (mut mir, src) =
shim::build_adt_ctor(&infcx, ctor_id, fields, span);
let upvar_decls: Vec<_> = tcx.with_freevars(fn_id, |freevars| {
freevars.iter().map(|fv| {
let var_id = tcx.hir.as_local_node_id(fv.def.def_id()).unwrap();
- let by_ref = hir.tables().upvar_capture(ty::UpvarId {
+ let capture = hir.tables().upvar_capture(ty::UpvarId {
var_id: var_id,
closure_expr_id: fn_id
- }).map_or(false, |capture| match capture {
+ });
+ let by_ref = match capture {
ty::UpvarCapture::ByValue => false,
ty::UpvarCapture::ByRef(..) => true
- });
+ };
let mut decl = UpvarDecl {
debug_name: keywords::Invalid.name(),
by_ref: by_ref
const A: AtomicUsize = ATOMIC_USIZE_INIT;
static B: &'static AtomicUsize = &A;
-// error: cannot borrow a constant which contains interior mutability, create a
-// static instead
+// error: cannot borrow a constant which may contain interior mutability,
+// create a static instead
```
A `const` represents a constant value that should never change. If one takes
const A: Cell<usize> = Cell::new(1);
const B: &'static Cell<usize> = &A;
-// error: cannot borrow a constant which contains interior mutability, create
-// a static instead
+// error: cannot borrow a constant which may contain interior mutability,
+// create a static instead
// or:
struct C { a: Cell<usize> }
var_id: id_var,
closure_expr_id: closure_expr_id,
};
- let upvar_capture = match cx.tables().upvar_capture(upvar_id) {
- Some(c) => c,
- None => {
- span_bug!(expr.span, "no upvar_capture for {:?}", upvar_id);
- }
- };
- match upvar_capture {
+ match cx.tables().upvar_capture(upvar_id) {
ty::UpvarCapture::ByValue => field_kind,
ty::UpvarCapture::ByRef(borrow) => {
ExprKind::Deref {
var_id: id_var,
closure_expr_id: closure_expr.id,
};
- let upvar_capture = cx.tables().upvar_capture(upvar_id).unwrap();
+ let upvar_capture = cx.tables().upvar_capture(upvar_id);
let temp_lifetime = cx.region_maps.temporary_scope(closure_expr.id);
let var_ty = cx.tables().node_id_to_type(id_var);
let captured_var = Expr {
infcx: &'a InferCtxt<'a, 'gcx, 'tcx>,
pub param_env: ty::ParamEnv<'tcx>,
pub region_maps: Rc<RegionMaps>,
+ pub tables: &'a ty::TypeckTables<'gcx>,
/// This is `Constness::Const` if we are compiling a `static`,
/// `const`, or the body of a `const fn`.
let param_env = tcx.param_env(src_def_id);
let region_maps = tcx.region_maps(src_def_id);
+ let tables = tcx.typeck_tables_of(src_def_id);
let attrs = tcx.hir.attrs(src_id);
// Constants and const fn's always need overflow checks.
check_overflow |= constness == hir::Constness::Const;
- Cx { tcx, infcx, param_env, region_maps, constness, src, check_overflow }
+ Cx { tcx, infcx, param_env, region_maps, tables, constness, src, check_overflow }
}
}
}
pub fn tables(&self) -> &'a ty::TypeckTables<'gcx> {
- self.infcx.tables.expect_interned()
+ self.tables
}
pub fn check_overflow(&self) -> bool {
self.add(Qualif::NOT_CONST);
if self.mode != Mode::Fn {
span_err!(self.tcx.sess, self.span, E0492,
- "cannot borrow a constant which contains \
+ "cannot borrow a constant which may contain \
interior mutability, create a static instead");
}
}
// Statics must be Sync.
if mode == Mode::Static {
let ty = mir.return_ty;
- tcx.infer_ctxt(()).enter(|infcx| {
+ tcx.infer_ctxt().enter(|infcx| {
let param_env = ty::ParamEnv::empty(Reveal::UserFacing);
let cause = traits::ObligationCause::new(mir.span, id, traits::SharedStatic);
let mut fulfillment_cx = traits::FulfillmentContext::new();
return;
}
let param_env = tcx.param_env(def_id);
- tcx.infer_ctxt(()).enter(|infcx| {
+ tcx.infer_ctxt().enter(|infcx| {
let mut checker = TypeChecker::new(&infcx, item_id, param_env);
{
let mut verifier = TypeVerifier::new(&mut checker, mir);
use std::collections::hash_map::Entry;
use std::cmp::Ordering;
-use std::mem;
struct CheckCrateVisitor<'a, 'tcx: 'a> {
tcx: TyCtxt<'a, 'tcx, 'tcx>,
self.check_const_eval(&body.value);
}
- let outer_penv = self.tcx.infer_ctxt(body_id).enter(|infcx| {
- let param_env = self.tcx.param_env(item_def_id);
- let outer_penv = mem::replace(&mut self.param_env, param_env);
- let region_maps = &self.tcx.region_maps(item_def_id);
- euv::ExprUseVisitor::new(self, region_maps, &infcx, param_env).consume_body(body);
- outer_penv
- });
+ let outer_penv = self.param_env;
+ self.param_env = self.tcx.param_env(item_def_id);
+
+ let tcx = self.tcx;
+ let param_env = self.param_env;
+ let region_maps = self.tcx.region_maps(item_def_id);
+ euv::ExprUseVisitor::new(self, tcx, param_env, ®ion_maps, self.tables)
+ .consume_body(body);
self.visit_body(body);
"hadd_s8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
output: &::I8x8,
- definition: Named("llvm.neon.vhadds.v8i8")
+ definition: Named("llvm.arm.neon.vhadds.v8i8")
},
"hadd_u8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS },
output: &::U8x8,
- definition: Named("llvm.neon.vhaddu.v8i8")
+ definition: Named("llvm.arm.neon.vhaddu.v8i8")
},
"hadd_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
output: &::I16x4,
- definition: Named("llvm.neon.vhadds.v4i16")
+ definition: Named("llvm.arm.neon.vhadds.v4i16")
},
"hadd_u16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS },
output: &::U16x4,
- definition: Named("llvm.neon.vhaddu.v4i16")
+ definition: Named("llvm.arm.neon.vhaddu.v4i16")
},
"hadd_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
output: &::I32x2,
- definition: Named("llvm.neon.vhadds.v2i32")
+ definition: Named("llvm.arm.neon.vhadds.v2i32")
},
"hadd_u32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS },
output: &::U32x2,
- definition: Named("llvm.neon.vhaddu.v2i32")
+ definition: Named("llvm.arm.neon.vhaddu.v2i32")
},
"haddq_s8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS },
output: &::I8x16,
- definition: Named("llvm.neon.vhadds.v16i8")
+ definition: Named("llvm.arm.neon.vhadds.v16i8")
},
"haddq_u8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS },
output: &::U8x16,
- definition: Named("llvm.neon.vhaddu.v16i8")
+ definition: Named("llvm.arm.neon.vhaddu.v16i8")
},
"haddq_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
output: &::I16x8,
- definition: Named("llvm.neon.vhadds.v8i16")
+ definition: Named("llvm.arm.neon.vhadds.v8i16")
},
"haddq_u16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS },
output: &::U16x8,
- definition: Named("llvm.neon.vhaddu.v8i16")
+ definition: Named("llvm.arm.neon.vhaddu.v8i16")
},
"haddq_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
output: &::I32x4,
- definition: Named("llvm.neon.vhadds.v4i32")
+ definition: Named("llvm.arm.neon.vhadds.v4i32")
},
"haddq_u32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS },
output: &::U32x4,
- definition: Named("llvm.neon.vhaddu.v4i32")
+ definition: Named("llvm.arm.neon.vhaddu.v4i32")
},
"rhadd_s8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
output: &::I8x8,
- definition: Named("llvm.neon.vrhadds.v8i8")
+ definition: Named("llvm.arm.neon.vrhadds.v8i8")
},
"rhadd_u8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS },
output: &::U8x8,
- definition: Named("llvm.neon.vrhaddu.v8i8")
+ definition: Named("llvm.arm.neon.vrhaddu.v8i8")
},
"rhadd_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
output: &::I16x4,
- definition: Named("llvm.neon.vrhadds.v4i16")
+ definition: Named("llvm.arm.neon.vrhadds.v4i16")
},
"rhadd_u16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS },
output: &::U16x4,
- definition: Named("llvm.neon.vrhaddu.v4i16")
+ definition: Named("llvm.arm.neon.vrhaddu.v4i16")
},
"rhadd_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
output: &::I32x2,
- definition: Named("llvm.neon.vrhadds.v2i32")
+ definition: Named("llvm.arm.neon.vrhadds.v2i32")
},
"rhadd_u32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS },
output: &::U32x2,
- definition: Named("llvm.neon.vrhaddu.v2i32")
+ definition: Named("llvm.arm.neon.vrhaddu.v2i32")
},
"rhaddq_s8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS },
output: &::I8x16,
- definition: Named("llvm.neon.vrhadds.v16i8")
+ definition: Named("llvm.arm.neon.vrhadds.v16i8")
},
"rhaddq_u8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS },
output: &::U8x16,
- definition: Named("llvm.neon.vrhaddu.v16i8")
+ definition: Named("llvm.arm.neon.vrhaddu.v16i8")
},
"rhaddq_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
output: &::I16x8,
- definition: Named("llvm.neon.vrhadds.v8i16")
+ definition: Named("llvm.arm.neon.vrhadds.v8i16")
},
"rhaddq_u16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS },
output: &::U16x8,
- definition: Named("llvm.neon.vrhaddu.v8i16")
+ definition: Named("llvm.arm.neon.vrhaddu.v8i16")
},
"rhaddq_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
output: &::I32x4,
- definition: Named("llvm.neon.vrhadds.v4i32")
+ definition: Named("llvm.arm.neon.vrhadds.v4i32")
},
"rhaddq_u32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS },
output: &::U32x4,
- definition: Named("llvm.neon.vrhaddu.v4i32")
+ definition: Named("llvm.arm.neon.vrhaddu.v4i32")
},
"qadd_s8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
output: &::I8x8,
- definition: Named("llvm.neon.vqadds.v8i8")
+ definition: Named("llvm.arm.neon.vqadds.v8i8")
},
"qadd_u8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS },
output: &::U8x8,
- definition: Named("llvm.neon.vqaddu.v8i8")
+ definition: Named("llvm.arm.neon.vqaddu.v8i8")
},
"qadd_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
output: &::I16x4,
- definition: Named("llvm.neon.vqadds.v4i16")
+ definition: Named("llvm.arm.neon.vqadds.v4i16")
},
"qadd_u16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS },
output: &::U16x4,
- definition: Named("llvm.neon.vqaddu.v4i16")
+ definition: Named("llvm.arm.neon.vqaddu.v4i16")
},
"qadd_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
output: &::I32x2,
- definition: Named("llvm.neon.vqadds.v2i32")
+ definition: Named("llvm.arm.neon.vqadds.v2i32")
},
"qadd_u32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS },
output: &::U32x2,
- definition: Named("llvm.neon.vqaddu.v2i32")
+ definition: Named("llvm.arm.neon.vqaddu.v2i32")
},
"qadd_s64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS },
output: &::I64x1,
- definition: Named("llvm.neon.vqadds.v1i64")
+ definition: Named("llvm.arm.neon.vqadds.v1i64")
},
"qadd_u64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::U64x1]; &INPUTS },
output: &::U64x1,
- definition: Named("llvm.neon.vqaddu.v1i64")
+ definition: Named("llvm.arm.neon.vqaddu.v1i64")
},
"qaddq_s8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS },
output: &::I8x16,
- definition: Named("llvm.neon.vqadds.v16i8")
+ definition: Named("llvm.arm.neon.vqadds.v16i8")
},
"qaddq_u8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS },
output: &::U8x16,
- definition: Named("llvm.neon.vqaddu.v16i8")
+ definition: Named("llvm.arm.neon.vqaddu.v16i8")
},
"qaddq_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
output: &::I16x8,
- definition: Named("llvm.neon.vqadds.v8i16")
+ definition: Named("llvm.arm.neon.vqadds.v8i16")
},
"qaddq_u16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS },
output: &::U16x8,
- definition: Named("llvm.neon.vqaddu.v8i16")
+ definition: Named("llvm.arm.neon.vqaddu.v8i16")
},
"qaddq_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
output: &::I32x4,
- definition: Named("llvm.neon.vqadds.v4i32")
+ definition: Named("llvm.arm.neon.vqadds.v4i32")
},
"qaddq_u32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS },
output: &::U32x4,
- definition: Named("llvm.neon.vqaddu.v4i32")
+ definition: Named("llvm.arm.neon.vqaddu.v4i32")
},
"qaddq_s64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS },
output: &::I64x2,
- definition: Named("llvm.neon.vqadds.v2i64")
+ definition: Named("llvm.arm.neon.vqadds.v2i64")
},
"qaddq_u64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS },
output: &::U64x2,
- definition: Named("llvm.neon.vqaddu.v2i64")
+ definition: Named("llvm.arm.neon.vqaddu.v2i64")
},
"raddhn_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
output: &::I8x8,
- definition: Named("llvm.neon.vraddhn.v8i8")
+ definition: Named("llvm.arm.neon.vraddhn.v8i8")
},
"raddhn_u16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS },
output: &::U8x8,
- definition: Named("llvm.neon.vraddhn.v8i8")
+ definition: Named("llvm.arm.neon.vraddhn.v8i8")
},
"raddhn_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
output: &::I16x4,
- definition: Named("llvm.neon.vraddhn.v4i16")
+ definition: Named("llvm.arm.neon.vraddhn.v4i16")
},
"raddhn_u32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS },
output: &::U16x4,
- definition: Named("llvm.neon.vraddhn.v4i16")
+ definition: Named("llvm.arm.neon.vraddhn.v4i16")
},
"raddhn_s64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS },
output: &::I32x2,
- definition: Named("llvm.neon.vraddhn.v2i32")
+ definition: Named("llvm.arm.neon.vraddhn.v2i32")
},
"raddhn_u64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS },
output: &::U32x2,
- definition: Named("llvm.neon.vraddhn.v2i32")
+ definition: Named("llvm.arm.neon.vraddhn.v2i32")
},
"fma_f32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS },
"qdmulh_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
output: &::I16x4,
- definition: Named("llvm.neon.vsqdmulh.v4i16")
+ definition: Named("llvm.arm.neon.vsqdmulh.v4i16")
},
"qdmulh_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
output: &::I32x2,
- definition: Named("llvm.neon.vsqdmulh.v2i32")
+ definition: Named("llvm.arm.neon.vsqdmulh.v2i32")
},
"qdmulhq_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
output: &::I16x8,
- definition: Named("llvm.neon.vsqdmulh.v8i16")
+ definition: Named("llvm.arm.neon.vsqdmulh.v8i16")
},
"qdmulhq_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
output: &::I32x4,
- definition: Named("llvm.neon.vsqdmulh.v4i32")
+ definition: Named("llvm.arm.neon.vsqdmulh.v4i32")
},
"qrdmulh_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
output: &::I16x4,
- definition: Named("llvm.neon.vsqrdmulh.v4i16")
+ definition: Named("llvm.arm.neon.vsqrdmulh.v4i16")
},
"qrdmulh_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
output: &::I32x2,
- definition: Named("llvm.neon.vsqrdmulh.v2i32")
+ definition: Named("llvm.arm.neon.vsqrdmulh.v2i32")
},
"qrdmulhq_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
output: &::I16x8,
- definition: Named("llvm.neon.vsqrdmulh.v8i16")
+ definition: Named("llvm.arm.neon.vsqrdmulh.v8i16")
},
"qrdmulhq_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
output: &::I32x4,
- definition: Named("llvm.neon.vsqrdmulh.v4i32")
+ definition: Named("llvm.arm.neon.vsqrdmulh.v4i32")
},
"mull_s8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
output: &::I16x8,
- definition: Named("llvm.neon.vmulls.v8i16")
+ definition: Named("llvm.arm.neon.vmulls.v8i16")
},
"mull_u8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS },
output: &::U16x8,
- definition: Named("llvm.neon.vmullu.v8i16")
+ definition: Named("llvm.arm.neon.vmullu.v8i16")
},
"mull_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
output: &::I32x4,
- definition: Named("llvm.neon.vmulls.v4i32")
+ definition: Named("llvm.arm.neon.vmulls.v4i32")
},
"mull_u16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS },
output: &::U32x4,
- definition: Named("llvm.neon.vmullu.v4i32")
+ definition: Named("llvm.arm.neon.vmullu.v4i32")
},
"mull_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
output: &::I64x2,
- definition: Named("llvm.neon.vmulls.v2i64")
+ definition: Named("llvm.arm.neon.vmulls.v2i64")
},
"mull_u32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS },
output: &::U64x2,
- definition: Named("llvm.neon.vmullu.v2i64")
+ definition: Named("llvm.arm.neon.vmullu.v2i64")
},
"qdmullq_s8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
output: &::I16x8,
- definition: Named("llvm.neon.vsqdmull.v8i16")
+ definition: Named("llvm.arm.neon.vsqdmull.v8i16")
},
"qdmullq_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
output: &::I32x4,
- definition: Named("llvm.neon.vsqdmull.v4i32")
+ definition: Named("llvm.arm.neon.vsqdmull.v4i32")
},
"hsub_s8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
output: &::I8x8,
- definition: Named("llvm.neon.vhsubs.v8i8")
+ definition: Named("llvm.arm.neon.vhsubs.v8i8")
},
"hsub_u8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS },
output: &::U8x8,
- definition: Named("llvm.neon.vhsubu.v8i8")
+ definition: Named("llvm.arm.neon.vhsubu.v8i8")
},
"hsub_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
output: &::I16x4,
- definition: Named("llvm.neon.vhsubs.v4i16")
+ definition: Named("llvm.arm.neon.vhsubs.v4i16")
},
"hsub_u16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS },
output: &::U16x4,
- definition: Named("llvm.neon.vhsubu.v4i16")
+ definition: Named("llvm.arm.neon.vhsubu.v4i16")
},
"hsub_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
output: &::I32x2,
- definition: Named("llvm.neon.vhsubs.v2i32")
+ definition: Named("llvm.arm.neon.vhsubs.v2i32")
},
"hsub_u32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS },
output: &::U32x2,
- definition: Named("llvm.neon.vhsubu.v2i32")
+ definition: Named("llvm.arm.neon.vhsubu.v2i32")
},
"hsubq_s8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS },
output: &::I8x16,
- definition: Named("llvm.neon.vhsubs.v16i8")
+ definition: Named("llvm.arm.neon.vhsubs.v16i8")
},
"hsubq_u8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS },
output: &::U8x16,
- definition: Named("llvm.neon.vhsubu.v16i8")
+ definition: Named("llvm.arm.neon.vhsubu.v16i8")
},
"hsubq_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
output: &::I16x8,
- definition: Named("llvm.neon.vhsubs.v8i16")
+ definition: Named("llvm.arm.neon.vhsubs.v8i16")
},
"hsubq_u16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS },
output: &::U16x8,
- definition: Named("llvm.neon.vhsubu.v8i16")
+ definition: Named("llvm.arm.neon.vhsubu.v8i16")
},
"hsubq_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
output: &::I32x4,
- definition: Named("llvm.neon.vhsubs.v4i32")
+ definition: Named("llvm.arm.neon.vhsubs.v4i32")
},
"hsubq_u32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS },
output: &::U32x4,
- definition: Named("llvm.neon.vhsubu.v4i32")
+ definition: Named("llvm.arm.neon.vhsubu.v4i32")
},
"qsub_s8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
output: &::I8x8,
- definition: Named("llvm.neon.vqsubs.v8i8")
+ definition: Named("llvm.arm.neon.vqsubs.v8i8")
},
"qsub_u8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS },
output: &::U8x8,
- definition: Named("llvm.neon.vqsubu.v8i8")
+ definition: Named("llvm.arm.neon.vqsubu.v8i8")
},
"qsub_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
output: &::I16x4,
- definition: Named("llvm.neon.vqsubs.v4i16")
+ definition: Named("llvm.arm.neon.vqsubs.v4i16")
},
"qsub_u16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS },
output: &::U16x4,
- definition: Named("llvm.neon.vqsubu.v4i16")
+ definition: Named("llvm.arm.neon.vqsubu.v4i16")
},
"qsub_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
output: &::I32x2,
- definition: Named("llvm.neon.vqsubs.v2i32")
+ definition: Named("llvm.arm.neon.vqsubs.v2i32")
},
"qsub_u32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS },
output: &::U32x2,
- definition: Named("llvm.neon.vqsubu.v2i32")
+ definition: Named("llvm.arm.neon.vqsubu.v2i32")
},
"qsub_s64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS },
output: &::I64x1,
- definition: Named("llvm.neon.vqsubs.v1i64")
+ definition: Named("llvm.arm.neon.vqsubs.v1i64")
},
"qsub_u64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::U64x1]; &INPUTS },
output: &::U64x1,
- definition: Named("llvm.neon.vqsubu.v1i64")
+ definition: Named("llvm.arm.neon.vqsubu.v1i64")
},
"qsubq_s8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS },
output: &::I8x16,
- definition: Named("llvm.neon.vqsubs.v16i8")
+ definition: Named("llvm.arm.neon.vqsubs.v16i8")
},
"qsubq_u8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS },
output: &::U8x16,
- definition: Named("llvm.neon.vqsubu.v16i8")
+ definition: Named("llvm.arm.neon.vqsubu.v16i8")
},
"qsubq_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
output: &::I16x8,
- definition: Named("llvm.neon.vqsubs.v8i16")
+ definition: Named("llvm.arm.neon.vqsubs.v8i16")
},
"qsubq_u16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS },
output: &::U16x8,
- definition: Named("llvm.neon.vqsubu.v8i16")
+ definition: Named("llvm.arm.neon.vqsubu.v8i16")
},
"qsubq_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
output: &::I32x4,
- definition: Named("llvm.neon.vqsubs.v4i32")
+ definition: Named("llvm.arm.neon.vqsubs.v4i32")
},
"qsubq_u32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS },
output: &::U32x4,
- definition: Named("llvm.neon.vqsubu.v4i32")
+ definition: Named("llvm.arm.neon.vqsubu.v4i32")
},
"qsubq_s64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS },
output: &::I64x2,
- definition: Named("llvm.neon.vqsubs.v2i64")
+ definition: Named("llvm.arm.neon.vqsubs.v2i64")
},
"qsubq_u64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS },
output: &::U64x2,
- definition: Named("llvm.neon.vqsubu.v2i64")
+ definition: Named("llvm.arm.neon.vqsubu.v2i64")
},
"rsubhn_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
output: &::I8x8,
- definition: Named("llvm.neon.vrsubhn.v8i8")
+ definition: Named("llvm.arm.neon.vrsubhn.v8i8")
},
"rsubhn_u16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS },
output: &::U8x8,
- definition: Named("llvm.neon.vrsubhn.v8i8")
+ definition: Named("llvm.arm.neon.vrsubhn.v8i8")
},
"rsubhn_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
output: &::I16x4,
- definition: Named("llvm.neon.vrsubhn.v4i16")
+ definition: Named("llvm.arm.neon.vrsubhn.v4i16")
},
"rsubhn_u32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS },
output: &::U16x4,
- definition: Named("llvm.neon.vrsubhn.v4i16")
+ definition: Named("llvm.arm.neon.vrsubhn.v4i16")
},
"rsubhn_s64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS },
output: &::I32x2,
- definition: Named("llvm.neon.vrsubhn.v2i32")
+ definition: Named("llvm.arm.neon.vrsubhn.v2i32")
},
"rsubhn_u64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS },
output: &::U32x2,
- definition: Named("llvm.neon.vrsubhn.v2i32")
+ definition: Named("llvm.arm.neon.vrsubhn.v2i32")
},
"abd_s8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
output: &::I8x8,
- definition: Named("llvm.neon.vabds.v8i8")
+ definition: Named("llvm.arm.neon.vabds.v8i8")
},
"abd_u8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS },
output: &::U8x8,
- definition: Named("llvm.neon.vabdu.v8i8")
+ definition: Named("llvm.arm.neon.vabdu.v8i8")
},
"abd_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
output: &::I16x4,
- definition: Named("llvm.neon.vabds.v4i16")
+ definition: Named("llvm.arm.neon.vabds.v4i16")
},
"abd_u16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS },
output: &::U16x4,
- definition: Named("llvm.neon.vabdu.v4i16")
+ definition: Named("llvm.arm.neon.vabdu.v4i16")
},
"abd_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
output: &::I32x2,
- definition: Named("llvm.neon.vabds.v2i32")
+ definition: Named("llvm.arm.neon.vabds.v2i32")
},
"abd_u32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS },
output: &::U32x2,
- definition: Named("llvm.neon.vabdu.v2i32")
+ definition: Named("llvm.arm.neon.vabdu.v2i32")
},
"abd_f32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS },
output: &::F32x2,
- definition: Named("llvm.neon.vabdf.v2f32")
+ definition: Named("llvm.arm.neon.vabdf.v2f32")
},
"abdq_s8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS },
output: &::I8x16,
- definition: Named("llvm.neon.vabds.v16i8")
+ definition: Named("llvm.arm.neon.vabds.v16i8")
},
"abdq_u8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS },
output: &::U8x16,
- definition: Named("llvm.neon.vabdu.v16i8")
+ definition: Named("llvm.arm.neon.vabdu.v16i8")
},
"abdq_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
output: &::I16x8,
- definition: Named("llvm.neon.vabds.v8i16")
+ definition: Named("llvm.arm.neon.vabds.v8i16")
},
"abdq_u16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS },
output: &::U16x8,
- definition: Named("llvm.neon.vabdu.v8i16")
+ definition: Named("llvm.arm.neon.vabdu.v8i16")
},
"abdq_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
output: &::I32x4,
- definition: Named("llvm.neon.vabds.v4i32")
+ definition: Named("llvm.arm.neon.vabds.v4i32")
},
"abdq_u32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS },
output: &::U32x4,
- definition: Named("llvm.neon.vabdu.v4i32")
+ definition: Named("llvm.arm.neon.vabdu.v4i32")
},
"abdq_f32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS },
output: &::F32x4,
- definition: Named("llvm.neon.vabdf.v4f32")
+ definition: Named("llvm.arm.neon.vabdf.v4f32")
},
"max_s8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
output: &::I8x8,
- definition: Named("llvm.neon.vmaxs.v8i8")
+ definition: Named("llvm.arm.neon.vmaxs.v8i8")
},
"max_u8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS },
output: &::U8x8,
- definition: Named("llvm.neon.vmaxu.v8i8")
+ definition: Named("llvm.arm.neon.vmaxu.v8i8")
},
"max_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
output: &::I16x4,
- definition: Named("llvm.neon.vmaxs.v4i16")
+ definition: Named("llvm.arm.neon.vmaxs.v4i16")
},
"max_u16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS },
output: &::U16x4,
- definition: Named("llvm.neon.vmaxu.v4i16")
+ definition: Named("llvm.arm.neon.vmaxu.v4i16")
},
"max_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
output: &::I32x2,
- definition: Named("llvm.neon.vmaxs.v2i32")
+ definition: Named("llvm.arm.neon.vmaxs.v2i32")
},
"max_u32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS },
output: &::U32x2,
- definition: Named("llvm.neon.vmaxu.v2i32")
+ definition: Named("llvm.arm.neon.vmaxu.v2i32")
},
"max_f32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS },
output: &::F32x2,
- definition: Named("llvm.neon.vmaxf.v2f32")
+ definition: Named("llvm.arm.neon.vmaxf.v2f32")
},
"maxq_s8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS },
output: &::I8x16,
- definition: Named("llvm.neon.vmaxs.v16i8")
+ definition: Named("llvm.arm.neon.vmaxs.v16i8")
},
"maxq_u8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS },
output: &::U8x16,
- definition: Named("llvm.neon.vmaxu.v16i8")
+ definition: Named("llvm.arm.neon.vmaxu.v16i8")
},
"maxq_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
output: &::I16x8,
- definition: Named("llvm.neon.vmaxs.v8i16")
+ definition: Named("llvm.arm.neon.vmaxs.v8i16")
},
"maxq_u16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS },
output: &::U16x8,
- definition: Named("llvm.neon.vmaxu.v8i16")
+ definition: Named("llvm.arm.neon.vmaxu.v8i16")
},
"maxq_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
output: &::I32x4,
- definition: Named("llvm.neon.vmaxs.v4i32")
+ definition: Named("llvm.arm.neon.vmaxs.v4i32")
},
"maxq_u32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS },
output: &::U32x4,
- definition: Named("llvm.neon.vmaxu.v4i32")
+ definition: Named("llvm.arm.neon.vmaxu.v4i32")
},
"maxq_f32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS },
output: &::F32x4,
- definition: Named("llvm.neon.vmaxf.v4f32")
+ definition: Named("llvm.arm.neon.vmaxf.v4f32")
},
"min_s8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
output: &::I8x8,
- definition: Named("llvm.neon.vmins.v8i8")
+ definition: Named("llvm.arm.neon.vmins.v8i8")
},
"min_u8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS },
output: &::U8x8,
- definition: Named("llvm.neon.vminu.v8i8")
+ definition: Named("llvm.arm.neon.vminu.v8i8")
},
"min_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
output: &::I16x4,
- definition: Named("llvm.neon.vmins.v4i16")
+ definition: Named("llvm.arm.neon.vmins.v4i16")
},
"min_u16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS },
output: &::U16x4,
- definition: Named("llvm.neon.vminu.v4i16")
+ definition: Named("llvm.arm.neon.vminu.v4i16")
},
"min_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
output: &::I32x2,
- definition: Named("llvm.neon.vmins.v2i32")
+ definition: Named("llvm.arm.neon.vmins.v2i32")
},
"min_u32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS },
output: &::U32x2,
- definition: Named("llvm.neon.vminu.v2i32")
+ definition: Named("llvm.arm.neon.vminu.v2i32")
},
"min_f32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS },
output: &::F32x2,
- definition: Named("llvm.neon.vminf.v2f32")
+ definition: Named("llvm.arm.neon.vminf.v2f32")
},
"minq_s8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS },
output: &::I8x16,
- definition: Named("llvm.neon.vmins.v16i8")
+ definition: Named("llvm.arm.neon.vmins.v16i8")
},
"minq_u8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS },
output: &::U8x16,
- definition: Named("llvm.neon.vminu.v16i8")
+ definition: Named("llvm.arm.neon.vminu.v16i8")
},
"minq_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
output: &::I16x8,
- definition: Named("llvm.neon.vmins.v8i16")
+ definition: Named("llvm.arm.neon.vmins.v8i16")
},
"minq_u16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS },
output: &::U16x8,
- definition: Named("llvm.neon.vminu.v8i16")
+ definition: Named("llvm.arm.neon.vminu.v8i16")
},
"minq_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
output: &::I32x4,
- definition: Named("llvm.neon.vmins.v4i32")
+ definition: Named("llvm.arm.neon.vmins.v4i32")
},
"minq_u32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS },
output: &::U32x4,
- definition: Named("llvm.neon.vminu.v4i32")
+ definition: Named("llvm.arm.neon.vminu.v4i32")
},
"minq_f32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS },
output: &::F32x4,
- definition: Named("llvm.neon.vminf.v4f32")
+ definition: Named("llvm.arm.neon.vminf.v4f32")
},
"shl_s8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
output: &::I8x8,
- definition: Named("llvm.neon.vshls.v8i8")
+ definition: Named("llvm.arm.neon.vshls.v8i8")
},
"shl_u8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::I8x8]; &INPUTS },
output: &::U8x8,
- definition: Named("llvm.neon.vshlu.v8i8")
+ definition: Named("llvm.arm.neon.vshlu.v8i8")
},
"shl_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
output: &::I16x4,
- definition: Named("llvm.neon.vshls.v4i16")
+ definition: Named("llvm.arm.neon.vshls.v4i16")
},
"shl_u16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::I16x4]; &INPUTS },
output: &::U16x4,
- definition: Named("llvm.neon.vshlu.v4i16")
+ definition: Named("llvm.arm.neon.vshlu.v4i16")
},
"shl_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
output: &::I32x2,
- definition: Named("llvm.neon.vshls.v2i32")
+ definition: Named("llvm.arm.neon.vshls.v2i32")
},
"shl_u32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::I32x2]; &INPUTS },
output: &::U32x2,
- definition: Named("llvm.neon.vshlu.v2i32")
+ definition: Named("llvm.arm.neon.vshlu.v2i32")
},
"shl_s64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS },
output: &::I64x1,
- definition: Named("llvm.neon.vshls.v1i64")
+ definition: Named("llvm.arm.neon.vshls.v1i64")
},
"shl_u64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::I64x1]; &INPUTS },
output: &::U64x1,
- definition: Named("llvm.neon.vshlu.v1i64")
+ definition: Named("llvm.arm.neon.vshlu.v1i64")
},
"shlq_s8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS },
output: &::I8x16,
- definition: Named("llvm.neon.vshls.v16i8")
+ definition: Named("llvm.arm.neon.vshls.v16i8")
},
"shlq_u8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::I8x16]; &INPUTS },
output: &::U8x16,
- definition: Named("llvm.neon.vshlu.v16i8")
+ definition: Named("llvm.arm.neon.vshlu.v16i8")
},
"shlq_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
output: &::I16x8,
- definition: Named("llvm.neon.vshls.v8i16")
+ definition: Named("llvm.arm.neon.vshls.v8i16")
},
"shlq_u16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::I16x8]; &INPUTS },
output: &::U16x8,
- definition: Named("llvm.neon.vshlu.v8i16")
+ definition: Named("llvm.arm.neon.vshlu.v8i16")
},
"shlq_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
output: &::I32x4,
- definition: Named("llvm.neon.vshls.v4i32")
+ definition: Named("llvm.arm.neon.vshls.v4i32")
},
"shlq_u32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::I32x4]; &INPUTS },
output: &::U32x4,
- definition: Named("llvm.neon.vshlu.v4i32")
+ definition: Named("llvm.arm.neon.vshlu.v4i32")
},
"shlq_s64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS },
output: &::I64x2,
- definition: Named("llvm.neon.vshls.v2i64")
+ definition: Named("llvm.arm.neon.vshls.v2i64")
},
"shlq_u64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::I64x2]; &INPUTS },
output: &::U64x2,
- definition: Named("llvm.neon.vshlu.v2i64")
+ definition: Named("llvm.arm.neon.vshlu.v2i64")
},
"qshl_s8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
output: &::I8x8,
- definition: Named("llvm.neon.vqshls.v8i8")
+ definition: Named("llvm.arm.neon.vqshls.v8i8")
},
"qshl_u8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::I8x8]; &INPUTS },
output: &::U8x8,
- definition: Named("llvm.neon.vqshlu.v8i8")
+ definition: Named("llvm.arm.neon.vqshlu.v8i8")
},
"qshl_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
output: &::I16x4,
- definition: Named("llvm.neon.vqshls.v4i16")
+ definition: Named("llvm.arm.neon.vqshls.v4i16")
},
"qshl_u16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::I16x4]; &INPUTS },
output: &::U16x4,
- definition: Named("llvm.neon.vqshlu.v4i16")
+ definition: Named("llvm.arm.neon.vqshlu.v4i16")
},
"qshl_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
output: &::I32x2,
- definition: Named("llvm.neon.vqshls.v2i32")
+ definition: Named("llvm.arm.neon.vqshls.v2i32")
},
"qshl_u32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::I32x2]; &INPUTS },
output: &::U32x2,
- definition: Named("llvm.neon.vqshlu.v2i32")
+ definition: Named("llvm.arm.neon.vqshlu.v2i32")
},
"qshl_s64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS },
output: &::I64x1,
- definition: Named("llvm.neon.vqshls.v1i64")
+ definition: Named("llvm.arm.neon.vqshls.v1i64")
},
"qshl_u64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::I64x1]; &INPUTS },
output: &::U64x1,
- definition: Named("llvm.neon.vqshlu.v1i64")
+ definition: Named("llvm.arm.neon.vqshlu.v1i64")
},
"qshlq_s8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS },
output: &::I8x16,
- definition: Named("llvm.neon.vqshls.v16i8")
+ definition: Named("llvm.arm.neon.vqshls.v16i8")
},
"qshlq_u8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::I8x16]; &INPUTS },
output: &::U8x16,
- definition: Named("llvm.neon.vqshlu.v16i8")
+ definition: Named("llvm.arm.neon.vqshlu.v16i8")
},
"qshlq_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
output: &::I16x8,
- definition: Named("llvm.neon.vqshls.v8i16")
+ definition: Named("llvm.arm.neon.vqshls.v8i16")
},
"qshlq_u16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::I16x8]; &INPUTS },
output: &::U16x8,
- definition: Named("llvm.neon.vqshlu.v8i16")
+ definition: Named("llvm.arm.neon.vqshlu.v8i16")
},
"qshlq_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
output: &::I32x4,
- definition: Named("llvm.neon.vqshls.v4i32")
+ definition: Named("llvm.arm.neon.vqshls.v4i32")
},
"qshlq_u32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::I32x4]; &INPUTS },
output: &::U32x4,
- definition: Named("llvm.neon.vqshlu.v4i32")
+ definition: Named("llvm.arm.neon.vqshlu.v4i32")
},
"qshlq_s64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS },
output: &::I64x2,
- definition: Named("llvm.neon.vqshls.v2i64")
+ definition: Named("llvm.arm.neon.vqshls.v2i64")
},
"qshlq_u64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::I64x2]; &INPUTS },
output: &::U64x2,
- definition: Named("llvm.neon.vqshlu.v2i64")
+ definition: Named("llvm.arm.neon.vqshlu.v2i64")
},
"rshl_s8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
output: &::I8x8,
- definition: Named("llvm.neon.vrshls.v8i8")
+ definition: Named("llvm.arm.neon.vrshls.v8i8")
},
"rshl_u8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::I8x8]; &INPUTS },
output: &::U8x8,
- definition: Named("llvm.neon.vrshlu.v8i8")
+ definition: Named("llvm.arm.neon.vrshlu.v8i8")
},
"rshl_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
output: &::I16x4,
- definition: Named("llvm.neon.vrshls.v4i16")
+ definition: Named("llvm.arm.neon.vrshls.v4i16")
},
"rshl_u16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::I16x4]; &INPUTS },
output: &::U16x4,
- definition: Named("llvm.neon.vrshlu.v4i16")
+ definition: Named("llvm.arm.neon.vrshlu.v4i16")
},
"rshl_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
output: &::I32x2,
- definition: Named("llvm.neon.vrshls.v2i32")
+ definition: Named("llvm.arm.neon.vrshls.v2i32")
},
"rshl_u32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::I32x2]; &INPUTS },
output: &::U32x2,
- definition: Named("llvm.neon.vrshlu.v2i32")
+ definition: Named("llvm.arm.neon.vrshlu.v2i32")
},
"rshl_s64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS },
output: &::I64x1,
- definition: Named("llvm.neon.vrshls.v1i64")
+ definition: Named("llvm.arm.neon.vrshls.v1i64")
},
"rshl_u64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::I64x1]; &INPUTS },
output: &::U64x1,
- definition: Named("llvm.neon.vrshlu.v1i64")
+ definition: Named("llvm.arm.neon.vrshlu.v1i64")
},
"rshlq_s8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS },
output: &::I8x16,
- definition: Named("llvm.neon.vrshls.v16i8")
+ definition: Named("llvm.arm.neon.vrshls.v16i8")
},
"rshlq_u8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::I8x16]; &INPUTS },
output: &::U8x16,
- definition: Named("llvm.neon.vrshlu.v16i8")
+ definition: Named("llvm.arm.neon.vrshlu.v16i8")
},
"rshlq_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
output: &::I16x8,
- definition: Named("llvm.neon.vrshls.v8i16")
+ definition: Named("llvm.arm.neon.vrshls.v8i16")
},
"rshlq_u16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::I16x8]; &INPUTS },
output: &::U16x8,
- definition: Named("llvm.neon.vrshlu.v8i16")
+ definition: Named("llvm.arm.neon.vrshlu.v8i16")
},
"rshlq_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
output: &::I32x4,
- definition: Named("llvm.neon.vrshls.v4i32")
+ definition: Named("llvm.arm.neon.vrshls.v4i32")
},
"rshlq_u32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::I32x4]; &INPUTS },
output: &::U32x4,
- definition: Named("llvm.neon.vrshlu.v4i32")
+ definition: Named("llvm.arm.neon.vrshlu.v4i32")
},
"rshlq_s64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS },
output: &::I64x2,
- definition: Named("llvm.neon.vrshls.v2i64")
+ definition: Named("llvm.arm.neon.vrshls.v2i64")
},
"rshlq_u64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::I64x2]; &INPUTS },
output: &::U64x2,
- definition: Named("llvm.neon.vrshlu.v2i64")
+ definition: Named("llvm.arm.neon.vrshlu.v2i64")
},
"qrshl_s8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
output: &::I8x8,
- definition: Named("llvm.neon.vqrshls.v8i8")
+ definition: Named("llvm.arm.neon.vqrshls.v8i8")
},
"qrshl_u8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::I8x8]; &INPUTS },
output: &::U8x8,
- definition: Named("llvm.neon.vqrshlu.v8i8")
+ definition: Named("llvm.arm.neon.vqrshlu.v8i8")
},
"qrshl_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
output: &::I16x4,
- definition: Named("llvm.neon.vqrshls.v4i16")
+ definition: Named("llvm.arm.neon.vqrshls.v4i16")
},
"qrshl_u16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::I16x4]; &INPUTS },
output: &::U16x4,
- definition: Named("llvm.neon.vqrshlu.v4i16")
+ definition: Named("llvm.arm.neon.vqrshlu.v4i16")
},
"qrshl_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
output: &::I32x2,
- definition: Named("llvm.neon.vqrshls.v2i32")
+ definition: Named("llvm.arm.neon.vqrshls.v2i32")
},
"qrshl_u32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::I32x2]; &INPUTS },
output: &::U32x2,
- definition: Named("llvm.neon.vqrshlu.v2i32")
+ definition: Named("llvm.arm.neon.vqrshlu.v2i32")
},
"qrshl_s64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS },
output: &::I64x1,
- definition: Named("llvm.neon.vqrshls.v1i64")
+ definition: Named("llvm.arm.neon.vqrshls.v1i64")
},
"qrshl_u64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::I64x1]; &INPUTS },
output: &::U64x1,
- definition: Named("llvm.neon.vqrshlu.v1i64")
+ definition: Named("llvm.arm.neon.vqrshlu.v1i64")
},
"qrshlq_s8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS },
output: &::I8x16,
- definition: Named("llvm.neon.vqrshls.v16i8")
+ definition: Named("llvm.arm.neon.vqrshls.v16i8")
},
"qrshlq_u8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::I8x16]; &INPUTS },
output: &::U8x16,
- definition: Named("llvm.neon.vqrshlu.v16i8")
+ definition: Named("llvm.arm.neon.vqrshlu.v16i8")
},
"qrshlq_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
output: &::I16x8,
- definition: Named("llvm.neon.vqrshls.v8i16")
+ definition: Named("llvm.arm.neon.vqrshls.v8i16")
},
"qrshlq_u16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::I16x8]; &INPUTS },
output: &::U16x8,
- definition: Named("llvm.neon.vqrshlu.v8i16")
+ definition: Named("llvm.arm.neon.vqrshlu.v8i16")
},
"qrshlq_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
output: &::I32x4,
- definition: Named("llvm.neon.vqrshls.v4i32")
+ definition: Named("llvm.arm.neon.vqrshls.v4i32")
},
"qrshlq_u32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::I32x4]; &INPUTS },
output: &::U32x4,
- definition: Named("llvm.neon.vqrshlu.v4i32")
+ definition: Named("llvm.arm.neon.vqrshlu.v4i32")
},
"qrshlq_s64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS },
output: &::I64x2,
- definition: Named("llvm.neon.vqrshls.v2i64")
+ definition: Named("llvm.arm.neon.vqrshls.v2i64")
},
"qrshlq_u64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::I64x2]; &INPUTS },
output: &::U64x2,
- definition: Named("llvm.neon.vqrshlu.v2i64")
+ definition: Named("llvm.arm.neon.vqrshlu.v2i64")
},
"qshrun_n_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::U32]; &INPUTS },
output: &::I8x8,
- definition: Named("llvm.neon.vsqshrun.v8i8")
+ definition: Named("llvm.arm.neon.vsqshrun.v8i8")
},
"qshrun_n_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::U32]; &INPUTS },
output: &::I16x4,
- definition: Named("llvm.neon.vsqshrun.v4i16")
+ definition: Named("llvm.arm.neon.vsqshrun.v4i16")
},
"qshrun_n_s64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::U32]; &INPUTS },
output: &::I32x2,
- definition: Named("llvm.neon.vsqshrun.v2i32")
+ definition: Named("llvm.arm.neon.vsqshrun.v2i32")
},
"qrshrun_n_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::U32]; &INPUTS },
output: &::I8x8,
- definition: Named("llvm.neon.vsqrshrun.v8i8")
+ definition: Named("llvm.arm.neon.vsqrshrun.v8i8")
},
"qrshrun_n_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::U32]; &INPUTS },
output: &::I16x4,
- definition: Named("llvm.neon.vsqrshrun.v4i16")
+ definition: Named("llvm.arm.neon.vsqrshrun.v4i16")
},
"qrshrun_n_s64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::U32]; &INPUTS },
output: &::I32x2,
- definition: Named("llvm.neon.vsqrshrun.v2i32")
+ definition: Named("llvm.arm.neon.vsqrshrun.v2i32")
},
"qshrn_n_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::U32]; &INPUTS },
output: &::I8x8,
- definition: Named("llvm.neon.vqshrns.v8i8")
+ definition: Named("llvm.arm.neon.vqshrns.v8i8")
},
"qshrn_n_u16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U32]; &INPUTS },
output: &::U8x8,
- definition: Named("llvm.neon.vqshrnu.v8i8")
+ definition: Named("llvm.arm.neon.vqshrnu.v8i8")
},
"qshrn_n_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::U32]; &INPUTS },
output: &::I16x4,
- definition: Named("llvm.neon.vqshrns.v4i16")
+ definition: Named("llvm.arm.neon.vqshrns.v4i16")
},
"qshrn_n_u32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32]; &INPUTS },
output: &::U16x4,
- definition: Named("llvm.neon.vqshrnu.v4i16")
+ definition: Named("llvm.arm.neon.vqshrnu.v4i16")
},
"qshrn_n_s64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::U32]; &INPUTS },
output: &::I32x2,
- definition: Named("llvm.neon.vqshrns.v2i32")
+ definition: Named("llvm.arm.neon.vqshrns.v2i32")
},
"qshrn_n_u64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U32]; &INPUTS },
output: &::U32x2,
- definition: Named("llvm.neon.vqshrnu.v2i32")
+ definition: Named("llvm.arm.neon.vqshrnu.v2i32")
},
"rshrn_n_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::U32]; &INPUTS },
output: &::I8x8,
- definition: Named("llvm.neon.vrshrn.v8i8")
+ definition: Named("llvm.arm.neon.vrshrn.v8i8")
},
"rshrn_n_u16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U32]; &INPUTS },
output: &::U8x8,
- definition: Named("llvm.neon.vrshrn.v8i8")
+ definition: Named("llvm.arm.neon.vrshrn.v8i8")
},
"rshrn_n_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::U32]; &INPUTS },
output: &::I16x4,
- definition: Named("llvm.neon.vrshrn.v4i16")
+ definition: Named("llvm.arm.neon.vrshrn.v4i16")
},
"rshrn_n_u32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32]; &INPUTS },
output: &::U16x4,
- definition: Named("llvm.neon.vrshrn.v4i16")
+ definition: Named("llvm.arm.neon.vrshrn.v4i16")
},
"rshrn_n_s64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::U32]; &INPUTS },
output: &::I32x2,
- definition: Named("llvm.neon.vrshrn.v2i32")
+ definition: Named("llvm.arm.neon.vrshrn.v2i32")
},
"rshrn_n_u64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U32]; &INPUTS },
output: &::U32x2,
- definition: Named("llvm.neon.vrshrn.v2i32")
+ definition: Named("llvm.arm.neon.vrshrn.v2i32")
},
"qrshrn_n_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::U32]; &INPUTS },
output: &::I8x8,
- definition: Named("llvm.neon.vqrshrns.v8i8")
+ definition: Named("llvm.arm.neon.vqrshrns.v8i8")
},
"qrshrn_n_u16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U32]; &INPUTS },
output: &::U8x8,
- definition: Named("llvm.neon.vqrshrnu.v8i8")
+ definition: Named("llvm.arm.neon.vqrshrnu.v8i8")
},
"qrshrn_n_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::U32]; &INPUTS },
output: &::I16x4,
- definition: Named("llvm.neon.vqrshrns.v4i16")
+ definition: Named("llvm.arm.neon.vqrshrns.v4i16")
},
"qrshrn_n_u32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32]; &INPUTS },
output: &::U16x4,
- definition: Named("llvm.neon.vqrshrnu.v4i16")
+ definition: Named("llvm.arm.neon.vqrshrnu.v4i16")
},
"qrshrn_n_s64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::U32]; &INPUTS },
output: &::I32x2,
- definition: Named("llvm.neon.vqrshrns.v2i32")
+ definition: Named("llvm.arm.neon.vqrshrns.v2i32")
},
"qrshrn_n_u64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U32]; &INPUTS },
output: &::U32x2,
- definition: Named("llvm.neon.vqrshrnu.v2i32")
+ definition: Named("llvm.arm.neon.vqrshrnu.v2i32")
},
"sri_s8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
output: &::I8x8,
- definition: Named("llvm.neon.vvsri.v8i8")
+ definition: Named("llvm.arm.neon.vvsri.v8i8")
},
"sri_u8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS },
output: &::U8x8,
- definition: Named("llvm.neon.vvsri.v8i8")
+ definition: Named("llvm.arm.neon.vvsri.v8i8")
},
"sri_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
output: &::I16x4,
- definition: Named("llvm.neon.vvsri.v4i16")
+ definition: Named("llvm.arm.neon.vvsri.v4i16")
},
"sri_u16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS },
output: &::U16x4,
- definition: Named("llvm.neon.vvsri.v4i16")
+ definition: Named("llvm.arm.neon.vvsri.v4i16")
},
"sri_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
output: &::I32x2,
- definition: Named("llvm.neon.vvsri.v2i32")
+ definition: Named("llvm.arm.neon.vvsri.v2i32")
},
"sri_u32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS },
output: &::U32x2,
- definition: Named("llvm.neon.vvsri.v2i32")
+ definition: Named("llvm.arm.neon.vvsri.v2i32")
},
"sri_s64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS },
output: &::I64x1,
- definition: Named("llvm.neon.vvsri.v1i64")
+ definition: Named("llvm.arm.neon.vvsri.v1i64")
},
"sri_u64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::U64x1]; &INPUTS },
output: &::U64x1,
- definition: Named("llvm.neon.vvsri.v1i64")
+ definition: Named("llvm.arm.neon.vvsri.v1i64")
},
"sriq_s8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS },
output: &::I8x16,
- definition: Named("llvm.neon.vvsri.v16i8")
+ definition: Named("llvm.arm.neon.vvsri.v16i8")
},
"sriq_u8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS },
output: &::U8x16,
- definition: Named("llvm.neon.vvsri.v16i8")
+ definition: Named("llvm.arm.neon.vvsri.v16i8")
},
"sriq_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
output: &::I16x8,
- definition: Named("llvm.neon.vvsri.v8i16")
+ definition: Named("llvm.arm.neon.vvsri.v8i16")
},
"sriq_u16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS },
output: &::U16x8,
- definition: Named("llvm.neon.vvsri.v8i16")
+ definition: Named("llvm.arm.neon.vvsri.v8i16")
},
"sriq_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
output: &::I32x4,
- definition: Named("llvm.neon.vvsri.v4i32")
+ definition: Named("llvm.arm.neon.vvsri.v4i32")
},
"sriq_u32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS },
output: &::U32x4,
- definition: Named("llvm.neon.vvsri.v4i32")
+ definition: Named("llvm.arm.neon.vvsri.v4i32")
},
"sriq_s64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS },
output: &::I64x2,
- definition: Named("llvm.neon.vvsri.v2i64")
+ definition: Named("llvm.arm.neon.vvsri.v2i64")
},
"sriq_u64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS },
output: &::U64x2,
- definition: Named("llvm.neon.vvsri.v2i64")
+ definition: Named("llvm.arm.neon.vvsri.v2i64")
},
"sli_s8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
output: &::I8x8,
- definition: Named("llvm.neon.vvsli.v8i8")
+ definition: Named("llvm.arm.neon.vvsli.v8i8")
},
"sli_u8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS },
output: &::U8x8,
- definition: Named("llvm.neon.vvsli.v8i8")
+ definition: Named("llvm.arm.neon.vvsli.v8i8")
},
"sli_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
output: &::I16x4,
- definition: Named("llvm.neon.vvsli.v4i16")
+ definition: Named("llvm.arm.neon.vvsli.v4i16")
},
"sli_u16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS },
output: &::U16x4,
- definition: Named("llvm.neon.vvsli.v4i16")
+ definition: Named("llvm.arm.neon.vvsli.v4i16")
},
"sli_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
output: &::I32x2,
- definition: Named("llvm.neon.vvsli.v2i32")
+ definition: Named("llvm.arm.neon.vvsli.v2i32")
},
"sli_u32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS },
output: &::U32x2,
- definition: Named("llvm.neon.vvsli.v2i32")
+ definition: Named("llvm.arm.neon.vvsli.v2i32")
},
"sli_s64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS },
output: &::I64x1,
- definition: Named("llvm.neon.vvsli.v1i64")
+ definition: Named("llvm.arm.neon.vvsli.v1i64")
},
"sli_u64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::U64x1]; &INPUTS },
output: &::U64x1,
- definition: Named("llvm.neon.vvsli.v1i64")
+ definition: Named("llvm.arm.neon.vvsli.v1i64")
},
"sliq_s8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS },
output: &::I8x16,
- definition: Named("llvm.neon.vvsli.v16i8")
+ definition: Named("llvm.arm.neon.vvsli.v16i8")
},
"sliq_u8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS },
output: &::U8x16,
- definition: Named("llvm.neon.vvsli.v16i8")
+ definition: Named("llvm.arm.neon.vvsli.v16i8")
},
"sliq_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
output: &::I16x8,
- definition: Named("llvm.neon.vvsli.v8i16")
+ definition: Named("llvm.arm.neon.vvsli.v8i16")
},
"sliq_u16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS },
output: &::U16x8,
- definition: Named("llvm.neon.vvsli.v8i16")
+ definition: Named("llvm.arm.neon.vvsli.v8i16")
},
"sliq_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
output: &::I32x4,
- definition: Named("llvm.neon.vvsli.v4i32")
+ definition: Named("llvm.arm.neon.vvsli.v4i32")
},
"sliq_u32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS },
output: &::U32x4,
- definition: Named("llvm.neon.vvsli.v4i32")
+ definition: Named("llvm.arm.neon.vvsli.v4i32")
},
"sliq_s64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS },
output: &::I64x2,
- definition: Named("llvm.neon.vvsli.v2i64")
+ definition: Named("llvm.arm.neon.vvsli.v2i64")
},
"sliq_u64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS },
output: &::U64x2,
- definition: Named("llvm.neon.vvsli.v2i64")
+ definition: Named("llvm.arm.neon.vvsli.v2i64")
},
"vqmovn_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS },
output: &::I8x8,
- definition: Named("llvm.neon.vqxtns.v8i8")
+ definition: Named("llvm.arm.neon.vqxtns.v8i8")
},
"vqmovn_u16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::U16x8]; &INPUTS },
output: &::U8x8,
- definition: Named("llvm.neon.vqxtnu.v8i8")
+ definition: Named("llvm.arm.neon.vqxtnu.v8i8")
},
"vqmovn_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS },
output: &::I16x4,
- definition: Named("llvm.neon.vqxtns.v4i16")
+ definition: Named("llvm.arm.neon.vqxtns.v4i16")
},
"vqmovn_u32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::U32x4]; &INPUTS },
output: &::U16x4,
- definition: Named("llvm.neon.vqxtnu.v4i16")
+ definition: Named("llvm.arm.neon.vqxtnu.v4i16")
},
"vqmovn_s64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::I64x2]; &INPUTS },
output: &::I32x2,
- definition: Named("llvm.neon.vqxtns.v2i32")
+ definition: Named("llvm.arm.neon.vqxtns.v2i32")
},
"vqmovn_u64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::U64x2]; &INPUTS },
output: &::U32x2,
- definition: Named("llvm.neon.vqxtnu.v2i32")
+ definition: Named("llvm.arm.neon.vqxtnu.v2i32")
},
"abs_s8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS },
output: &::I8x8,
- definition: Named("llvm.neon.vabs.v8i8")
+ definition: Named("llvm.arm.neon.vabs.v8i8")
},
"abs_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::I16x4]; &INPUTS },
output: &::I16x4,
- definition: Named("llvm.neon.vabs.v4i16")
+ definition: Named("llvm.arm.neon.vabs.v4i16")
},
"abs_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::I32x2]; &INPUTS },
output: &::I32x2,
- definition: Named("llvm.neon.vabs.v2i32")
+ definition: Named("llvm.arm.neon.vabs.v2i32")
},
"absq_s8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS },
output: &::I8x16,
- definition: Named("llvm.neon.vabs.v16i8")
+ definition: Named("llvm.arm.neon.vabs.v16i8")
},
"absq_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS },
output: &::I16x8,
- definition: Named("llvm.neon.vabs.v8i16")
+ definition: Named("llvm.arm.neon.vabs.v8i16")
},
"absq_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS },
output: &::I32x4,
- definition: Named("llvm.neon.vabs.v4i32")
+ definition: Named("llvm.arm.neon.vabs.v4i32")
},
"abs_f32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::F32x2]; &INPUTS },
"qabs_s8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS },
output: &::I8x8,
- definition: Named("llvm.neon.vsqabs.v8i8")
+ definition: Named("llvm.arm.neon.vsqabs.v8i8")
},
"qabs_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::I16x4]; &INPUTS },
output: &::I16x4,
- definition: Named("llvm.neon.vsqabs.v4i16")
+ definition: Named("llvm.arm.neon.vsqabs.v4i16")
},
"qabs_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::I32x2]; &INPUTS },
output: &::I32x2,
- definition: Named("llvm.neon.vsqabs.v2i32")
+ definition: Named("llvm.arm.neon.vsqabs.v2i32")
},
"qabsq_s8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS },
output: &::I8x16,
- definition: Named("llvm.neon.vsqabs.v16i8")
+ definition: Named("llvm.arm.neon.vsqabs.v16i8")
},
"qabsq_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS },
output: &::I16x8,
- definition: Named("llvm.neon.vsqabs.v8i16")
+ definition: Named("llvm.arm.neon.vsqabs.v8i16")
},
"qabsq_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS },
output: &::I32x4,
- definition: Named("llvm.neon.vsqabs.v4i32")
+ definition: Named("llvm.arm.neon.vsqabs.v4i32")
},
"qneg_s8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS },
output: &::I8x8,
- definition: Named("llvm.neon.vsqneg.v8i8")
+ definition: Named("llvm.arm.neon.vsqneg.v8i8")
},
"qneg_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::I16x4]; &INPUTS },
output: &::I16x4,
- definition: Named("llvm.neon.vsqneg.v4i16")
+ definition: Named("llvm.arm.neon.vsqneg.v4i16")
},
"qneg_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::I32x2]; &INPUTS },
output: &::I32x2,
- definition: Named("llvm.neon.vsqneg.v2i32")
+ definition: Named("llvm.arm.neon.vsqneg.v2i32")
},
"qnegq_s8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS },
output: &::I8x16,
- definition: Named("llvm.neon.vsqneg.v16i8")
+ definition: Named("llvm.arm.neon.vsqneg.v16i8")
},
"qnegq_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS },
output: &::I16x8,
- definition: Named("llvm.neon.vsqneg.v8i16")
+ definition: Named("llvm.arm.neon.vsqneg.v8i16")
},
"qnegq_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS },
output: &::I32x4,
- definition: Named("llvm.neon.vsqneg.v4i32")
+ definition: Named("llvm.arm.neon.vsqneg.v4i32")
},
"clz_s8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS },
"cls_s8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS },
output: &::I8x8,
- definition: Named("llvm.neon.vcls.v8i8")
+ definition: Named("llvm.arm.neon.vcls.v8i8")
},
"cls_u8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::U8x8]; &INPUTS },
output: &::U8x8,
- definition: Named("llvm.neon.vcls.v8i8")
+ definition: Named("llvm.arm.neon.vcls.v8i8")
},
"cls_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::I16x4]; &INPUTS },
output: &::I16x4,
- definition: Named("llvm.neon.vcls.v4i16")
+ definition: Named("llvm.arm.neon.vcls.v4i16")
},
"cls_u16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::U16x4]; &INPUTS },
output: &::U16x4,
- definition: Named("llvm.neon.vcls.v4i16")
+ definition: Named("llvm.arm.neon.vcls.v4i16")
},
"cls_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::I32x2]; &INPUTS },
output: &::I32x2,
- definition: Named("llvm.neon.vcls.v2i32")
+ definition: Named("llvm.arm.neon.vcls.v2i32")
},
"cls_u32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::U32x2]; &INPUTS },
output: &::U32x2,
- definition: Named("llvm.neon.vcls.v2i32")
+ definition: Named("llvm.arm.neon.vcls.v2i32")
},
"clsq_s8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS },
output: &::I8x16,
- definition: Named("llvm.neon.vcls.v16i8")
+ definition: Named("llvm.arm.neon.vcls.v16i8")
},
"clsq_u8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::U8x16]; &INPUTS },
output: &::U8x16,
- definition: Named("llvm.neon.vcls.v16i8")
+ definition: Named("llvm.arm.neon.vcls.v16i8")
},
"clsq_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS },
output: &::I16x8,
- definition: Named("llvm.neon.vcls.v8i16")
+ definition: Named("llvm.arm.neon.vcls.v8i16")
},
"clsq_u16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::U16x8]; &INPUTS },
output: &::U16x8,
- definition: Named("llvm.neon.vcls.v8i16")
+ definition: Named("llvm.arm.neon.vcls.v8i16")
},
"clsq_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS },
output: &::I32x4,
- definition: Named("llvm.neon.vcls.v4i32")
+ definition: Named("llvm.arm.neon.vcls.v4i32")
},
"clsq_u32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::U32x4]; &INPUTS },
output: &::U32x4,
- definition: Named("llvm.neon.vcls.v4i32")
+ definition: Named("llvm.arm.neon.vcls.v4i32")
},
"cnt_s8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS },
"recpe_u32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::U32x2]; &INPUTS },
output: &::U32x2,
- definition: Named("llvm.neon.vrecpe.v2i32")
+ definition: Named("llvm.arm.neon.vrecpe.v2i32")
},
"recpe_f32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::F32x2]; &INPUTS },
output: &::F32x2,
- definition: Named("llvm.neon.vrecpe.v2f32")
+ definition: Named("llvm.arm.neon.vrecpe.v2f32")
},
"recpeq_u32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::U32x4]; &INPUTS },
output: &::U32x4,
- definition: Named("llvm.neon.vrecpe.v4i32")
+ definition: Named("llvm.arm.neon.vrecpe.v4i32")
},
"recpeq_f32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS },
output: &::F32x4,
- definition: Named("llvm.neon.vrecpe.v4f32")
+ definition: Named("llvm.arm.neon.vrecpe.v4f32")
},
"recps_f32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS },
output: &::F32x2,
- definition: Named("llvm.neon.vfrecps.v2f32")
+ definition: Named("llvm.arm.neon.vfrecps.v2f32")
},
"recpsq_f32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS },
output: &::F32x4,
- definition: Named("llvm.neon.vfrecps.v4f32")
+ definition: Named("llvm.arm.neon.vfrecps.v4f32")
},
"sqrt_f32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::F32x2]; &INPUTS },
"rsqrte_u32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::U32x2]; &INPUTS },
output: &::U32x2,
- definition: Named("llvm.neon.vrsqrte.v2i32")
+ definition: Named("llvm.arm.neon.vrsqrte.v2i32")
},
"rsqrte_f32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::F32x2]; &INPUTS },
output: &::F32x2,
- definition: Named("llvm.neon.vrsqrte.v2f32")
+ definition: Named("llvm.arm.neon.vrsqrte.v2f32")
},
"rsqrteq_u32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::U32x4]; &INPUTS },
output: &::U32x4,
- definition: Named("llvm.neon.vrsqrte.v4i32")
+ definition: Named("llvm.arm.neon.vrsqrte.v4i32")
},
"rsqrteq_f32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS },
output: &::F32x4,
- definition: Named("llvm.neon.vrsqrte.v4f32")
+ definition: Named("llvm.arm.neon.vrsqrte.v4f32")
},
"rsqrts_f32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS },
output: &::F32x2,
- definition: Named("llvm.neon.vrsqrts.v2f32")
+ definition: Named("llvm.arm.neon.vrsqrts.v2f32")
},
"rsqrtsq_f32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS },
output: &::F32x4,
- definition: Named("llvm.neon.vrsqrts.v4f32")
+ definition: Named("llvm.arm.neon.vrsqrts.v4f32")
},
"bsl_s8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::I8x8]; &INPUTS },
output: &::I8x8,
- definition: Named("llvm.neon.vbsl.v8i8")
+ definition: Named("llvm.arm.neon.vbsl.v8i8")
},
"bsl_u8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS },
output: &::U8x8,
- definition: Named("llvm.neon.vbsl.v8i8")
+ definition: Named("llvm.arm.neon.vbsl.v8i8")
},
"bsl_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::I16x4]; &INPUTS },
output: &::I16x4,
- definition: Named("llvm.neon.vbsl.v4i16")
+ definition: Named("llvm.arm.neon.vbsl.v4i16")
},
"bsl_u16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS },
output: &::U16x4,
- definition: Named("llvm.neon.vbsl.v4i16")
+ definition: Named("llvm.arm.neon.vbsl.v4i16")
},
"bsl_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::I32x2]; &INPUTS },
output: &::I32x2,
- definition: Named("llvm.neon.vbsl.v2i32")
+ definition: Named("llvm.arm.neon.vbsl.v2i32")
},
"bsl_u32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS },
output: &::U32x2,
- definition: Named("llvm.neon.vbsl.v2i32")
+ definition: Named("llvm.arm.neon.vbsl.v2i32")
},
"bsl_s64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::I64x1]; &INPUTS },
output: &::I64x1,
- definition: Named("llvm.neon.vbsl.v1i64")
+ definition: Named("llvm.arm.neon.vbsl.v1i64")
},
"bsl_u64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::U64x1]; &INPUTS },
output: &::U64x1,
- definition: Named("llvm.neon.vbsl.v1i64")
+ definition: Named("llvm.arm.neon.vbsl.v1i64")
},
"bslq_s8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::I8x16]; &INPUTS },
output: &::I8x16,
- definition: Named("llvm.neon.vbsl.v16i8")
+ definition: Named("llvm.arm.neon.vbsl.v16i8")
},
"bslq_u8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS },
output: &::U8x16,
- definition: Named("llvm.neon.vbsl.v16i8")
+ definition: Named("llvm.arm.neon.vbsl.v16i8")
},
"bslq_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::I16x8]; &INPUTS },
output: &::I16x8,
- definition: Named("llvm.neon.vbsl.v8i16")
+ definition: Named("llvm.arm.neon.vbsl.v8i16")
},
"bslq_u16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS },
output: &::U16x8,
- definition: Named("llvm.neon.vbsl.v8i16")
+ definition: Named("llvm.arm.neon.vbsl.v8i16")
},
"bslq_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::I32x4]; &INPUTS },
output: &::I32x4,
- definition: Named("llvm.neon.vbsl.v4i32")
+ definition: Named("llvm.arm.neon.vbsl.v4i32")
},
"bslq_u32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS },
output: &::U32x4,
- definition: Named("llvm.neon.vbsl.v4i32")
+ definition: Named("llvm.arm.neon.vbsl.v4i32")
},
"bslq_s64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::I64x2]; &INPUTS },
output: &::I64x2,
- definition: Named("llvm.neon.vbsl.v2i64")
+ definition: Named("llvm.arm.neon.vbsl.v2i64")
},
"bslq_u64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS },
output: &::U64x2,
- definition: Named("llvm.neon.vbsl.v2i64")
+ definition: Named("llvm.arm.neon.vbsl.v2i64")
},
"padd_s8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
output: &::I8x8,
- definition: Named("llvm.neon.vpadd.v8i8")
+ definition: Named("llvm.arm.neon.vpadd.v8i8")
},
"padd_u8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS },
output: &::U8x8,
- definition: Named("llvm.neon.vpadd.v8i8")
+ definition: Named("llvm.arm.neon.vpadd.v8i8")
},
"padd_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
output: &::I16x4,
- definition: Named("llvm.neon.vpadd.v4i16")
+ definition: Named("llvm.arm.neon.vpadd.v4i16")
},
"padd_u16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS },
output: &::U16x4,
- definition: Named("llvm.neon.vpadd.v4i16")
+ definition: Named("llvm.arm.neon.vpadd.v4i16")
},
"padd_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
output: &::I32x2,
- definition: Named("llvm.neon.vpadd.v2i32")
+ definition: Named("llvm.arm.neon.vpadd.v2i32")
},
"padd_u32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS },
output: &::U32x2,
- definition: Named("llvm.neon.vpadd.v2i32")
+ definition: Named("llvm.arm.neon.vpadd.v2i32")
},
"padd_f32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS },
output: &::F32x2,
- definition: Named("llvm.neon.vpadd.v2f32")
+ definition: Named("llvm.arm.neon.vpadd.v2f32")
},
"paddl_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS },
output: &::I16x4,
- definition: Named("llvm.neon.vpaddls.v4i16.v8i8")
+ definition: Named("llvm.arm.neon.vpaddls.v4i16.v8i8")
},
"paddl_u16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::U8x8]; &INPUTS },
output: &::U16x4,
- definition: Named("llvm.neon.vpaddlu.v4i16.v8i8")
+ definition: Named("llvm.arm.neon.vpaddlu.v4i16.v8i8")
},
"paddl_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::I16x4]; &INPUTS },
output: &::I32x2,
- definition: Named("llvm.neon.vpaddls.v2i32.v4i16")
+ definition: Named("llvm.arm.neon.vpaddls.v2i32.v4i16")
},
"paddl_u32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::U16x4]; &INPUTS },
output: &::U32x2,
- definition: Named("llvm.neon.vpaddlu.v2i32.v4i16")
+ definition: Named("llvm.arm.neon.vpaddlu.v2i32.v4i16")
},
"paddl_s64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::I32x2]; &INPUTS },
output: &::I64x1,
- definition: Named("llvm.neon.vpaddls.v1i64.v2i32")
+ definition: Named("llvm.arm.neon.vpaddls.v1i64.v2i32")
},
"paddl_u64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::U32x2]; &INPUTS },
output: &::U64x1,
- definition: Named("llvm.neon.vpaddlu.v1i64.v2i32")
+ definition: Named("llvm.arm.neon.vpaddlu.v1i64.v2i32")
},
"paddlq_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS },
output: &::I16x8,
- definition: Named("llvm.neon.vpaddls.v8i16.v16i8")
+ definition: Named("llvm.arm.neon.vpaddls.v8i16.v16i8")
},
"paddlq_u16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::U8x16]; &INPUTS },
output: &::U16x8,
- definition: Named("llvm.neon.vpaddlu.v8i16.v16i8")
+ definition: Named("llvm.arm.neon.vpaddlu.v8i16.v16i8")
},
"paddlq_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS },
output: &::I32x4,
- definition: Named("llvm.neon.vpaddls.v4i32.v8i16")
+ definition: Named("llvm.arm.neon.vpaddls.v4i32.v8i16")
},
"paddlq_u32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::U16x8]; &INPUTS },
output: &::U32x4,
- definition: Named("llvm.neon.vpaddlu.v4i32.v8i16")
+ definition: Named("llvm.arm.neon.vpaddlu.v4i32.v8i16")
},
"paddlq_s64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS },
output: &::I64x2,
- definition: Named("llvm.neon.vpaddls.v2i64.v4i32")
+ definition: Named("llvm.arm.neon.vpaddls.v2i64.v4i32")
},
"paddlq_u64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 1] = [&::U32x4]; &INPUTS },
output: &::U64x2,
- definition: Named("llvm.neon.vpaddlu.v2i64.v4i32")
+ definition: Named("llvm.arm.neon.vpaddlu.v2i64.v4i32")
},
"padal_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I8x8]; &INPUTS },
output: &::I16x4,
- definition: Named("llvm.neon.vpadals.v4i16.v4i16")
+ definition: Named("llvm.arm.neon.vpadals.v4i16.v4i16")
},
"padal_u16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U8x8]; &INPUTS },
output: &::U16x4,
- definition: Named("llvm.neon.vpadalu.v4i16.v4i16")
+ definition: Named("llvm.arm.neon.vpadalu.v4i16.v4i16")
},
"padal_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I16x4]; &INPUTS },
output: &::I32x2,
- definition: Named("llvm.neon.vpadals.v2i32.v2i32")
+ definition: Named("llvm.arm.neon.vpadals.v2i32.v2i32")
},
"padal_u32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U16x4]; &INPUTS },
output: &::U32x2,
- definition: Named("llvm.neon.vpadalu.v2i32.v2i32")
+ definition: Named("llvm.arm.neon.vpadalu.v2i32.v2i32")
},
"padal_s64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I32x2]; &INPUTS },
output: &::I64x1,
- definition: Named("llvm.neon.vpadals.v1i64.v1i64")
+ definition: Named("llvm.arm.neon.vpadals.v1i64.v1i64")
},
"padal_u64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::U32x2]; &INPUTS },
output: &::U64x1,
- definition: Named("llvm.neon.vpadalu.v1i64.v1i64")
+ definition: Named("llvm.arm.neon.vpadalu.v1i64.v1i64")
},
"padalq_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I8x16]; &INPUTS },
output: &::I16x8,
- definition: Named("llvm.neon.vpadals.v8i16.v8i16")
+ definition: Named("llvm.arm.neon.vpadals.v8i16.v8i16")
},
"padalq_u16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U8x16]; &INPUTS },
output: &::U16x8,
- definition: Named("llvm.neon.vpadalu.v8i16.v8i16")
+ definition: Named("llvm.arm.neon.vpadalu.v8i16.v8i16")
},
"padalq_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I16x8]; &INPUTS },
output: &::I32x4,
- definition: Named("llvm.neon.vpadals.v4i32.v4i32")
+ definition: Named("llvm.arm.neon.vpadals.v4i32.v4i32")
},
"padalq_u32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U16x8]; &INPUTS },
output: &::U32x4,
- definition: Named("llvm.neon.vpadalu.v4i32.v4i32")
+ definition: Named("llvm.arm.neon.vpadalu.v4i32.v4i32")
},
"padalq_s64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I32x4]; &INPUTS },
output: &::I64x2,
- definition: Named("llvm.neon.vpadals.v2i64.v2i64")
+ definition: Named("llvm.arm.neon.vpadals.v2i64.v2i64")
},
"padalq_u64" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U32x4]; &INPUTS },
output: &::U64x2,
- definition: Named("llvm.neon.vpadalu.v2i64.v2i64")
+ definition: Named("llvm.arm.neon.vpadalu.v2i64.v2i64")
},
"pmax_s8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
output: &::I8x8,
- definition: Named("llvm.neon.vpmaxs.v8i8")
+ definition: Named("llvm.arm.neon.vpmaxs.v8i8")
},
"pmax_u8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS },
output: &::U8x8,
- definition: Named("llvm.neon.vpmaxu.v8i8")
+ definition: Named("llvm.arm.neon.vpmaxu.v8i8")
},
"pmax_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
output: &::I16x4,
- definition: Named("llvm.neon.vpmaxs.v4i16")
+ definition: Named("llvm.arm.neon.vpmaxs.v4i16")
},
"pmax_u16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS },
output: &::U16x4,
- definition: Named("llvm.neon.vpmaxu.v4i16")
+ definition: Named("llvm.arm.neon.vpmaxu.v4i16")
},
"pmax_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
output: &::I32x2,
- definition: Named("llvm.neon.vpmaxs.v2i32")
+ definition: Named("llvm.arm.neon.vpmaxs.v2i32")
},
"pmax_u32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS },
output: &::U32x2,
- definition: Named("llvm.neon.vpmaxu.v2i32")
+ definition: Named("llvm.arm.neon.vpmaxu.v2i32")
},
"pmax_f32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS },
output: &::F32x2,
- definition: Named("llvm.neon.vpmaxf.v2f32")
+ definition: Named("llvm.arm.neon.vpmaxf.v2f32")
},
"pmin_s8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS },
output: &::I8x8,
- definition: Named("llvm.neon.vpmins.v8i8")
+ definition: Named("llvm.arm.neon.vpmins.v8i8")
},
"pmin_u8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS },
output: &::U8x8,
- definition: Named("llvm.neon.vpminu.v8i8")
+ definition: Named("llvm.arm.neon.vpminu.v8i8")
},
"pmin_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS },
output: &::I16x4,
- definition: Named("llvm.neon.vpmins.v4i16")
+ definition: Named("llvm.arm.neon.vpmins.v4i16")
},
"pmin_u16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS },
output: &::U16x4,
- definition: Named("llvm.neon.vpminu.v4i16")
+ definition: Named("llvm.arm.neon.vpminu.v4i16")
},
"pmin_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS },
output: &::I32x2,
- definition: Named("llvm.neon.vpmins.v2i32")
+ definition: Named("llvm.arm.neon.vpmins.v2i32")
},
"pmin_u32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS },
output: &::U32x2,
- definition: Named("llvm.neon.vpminu.v2i32")
+ definition: Named("llvm.arm.neon.vpminu.v2i32")
},
"pmin_f32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS },
output: &::F32x2,
- definition: Named("llvm.neon.vpminf.v2f32")
+ definition: Named("llvm.arm.neon.vpminf.v2f32")
},
"pminq_s8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS },
output: &::I8x16,
- definition: Named("llvm.neon.vpmins.v16i8")
+ definition: Named("llvm.arm.neon.vpmins.v16i8")
},
"pminq_u8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS },
output: &::U8x16,
- definition: Named("llvm.neon.vpminu.v16i8")
+ definition: Named("llvm.arm.neon.vpminu.v16i8")
},
"pminq_s16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS },
output: &::I16x8,
- definition: Named("llvm.neon.vpmins.v8i16")
+ definition: Named("llvm.arm.neon.vpmins.v8i16")
},
"pminq_u16" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS },
output: &::U16x8,
- definition: Named("llvm.neon.vpminu.v8i16")
+ definition: Named("llvm.arm.neon.vpminu.v8i16")
},
"pminq_s32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS },
output: &::I32x4,
- definition: Named("llvm.neon.vpmins.v4i32")
+ definition: Named("llvm.arm.neon.vpmins.v4i32")
},
"pminq_u32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS },
output: &::U32x4,
- definition: Named("llvm.neon.vpminu.v4i32")
+ definition: Named("llvm.arm.neon.vpminu.v4i32")
},
"pminq_f32" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS },
output: &::F32x4,
- definition: Named("llvm.neon.vpminf.v4f32")
+ definition: Named("llvm.arm.neon.vpminf.v4f32")
},
"tbl1_s8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::U8x8]; &INPUTS },
output: &::I8x8,
- definition: Named("llvm.neon.vtbl1")
+ definition: Named("llvm.arm.neon.vtbl1")
},
"tbl1_u8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS },
output: &::U8x8,
- definition: Named("llvm.neon.vtbl1")
+ definition: Named("llvm.arm.neon.vtbl1")
},
"tbx1_s8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 3] = [&::I8x8, &::I8x8, &::U8x8]; &INPUTS },
output: &::I8x8,
- definition: Named("llvm.neon.vtbx1")
+ definition: Named("llvm.arm.neon.vtbx1")
},
"tbx1_u8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 3] = [&::U8x8, &::U8x8, &::U8x8]; &INPUTS },
output: &::U8x8,
- definition: Named("llvm.neon.vtbx1")
+ definition: Named("llvm.arm.neon.vtbx1")
},
"tbl2_s8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &PARTS }); &AGG }, &::U8x8]; &INPUTS },
output: &::I8x8,
- definition: Named("llvm.neon.vtbl2")
+ definition: Named("llvm.arm.neon.vtbl2")
},
"tbl2_u8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &PARTS }); &AGG }, &::U8x8]; &INPUTS },
output: &::U8x8,
- definition: Named("llvm.neon.vtbl2")
+ definition: Named("llvm.arm.neon.vtbl2")
},
"tbx2_s8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &PARTS }); &AGG }, &::U8x8]; &INPUTS },
output: &::I8x8,
- definition: Named("llvm.neon.vtbx2")
+ definition: Named("llvm.arm.neon.vtbx2")
},
"tbx2_u8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &PARTS }); &AGG }, &::U8x8]; &INPUTS },
output: &::U8x8,
- definition: Named("llvm.neon.vtbx2")
+ definition: Named("llvm.arm.neon.vtbx2")
},
"tbl3_s8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 3] = [&::I8x8, &::I8x8, &::I8x8]; &PARTS }); &AGG }, &::U8x8]; &INPUTS },
output: &::I8x8,
- definition: Named("llvm.neon.vtbl3")
+ definition: Named("llvm.arm.neon.vtbl3")
},
"tbl3_u8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 3] = [&::U8x8, &::U8x8, &::U8x8]; &PARTS }); &AGG }, &::U8x8]; &INPUTS },
output: &::U8x8,
- definition: Named("llvm.neon.vtbl3")
+ definition: Named("llvm.arm.neon.vtbl3")
},
"tbx3_s8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 3] = [&::I8x8, { static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 3] = [&::I8x8, &::I8x8, &::I8x8]; &PARTS }); &AGG }, &::U8x8]; &INPUTS },
output: &::I8x8,
- definition: Named("llvm.neon.vtbx3")
+ definition: Named("llvm.arm.neon.vtbx3")
},
"tbx3_u8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 3] = [&::U8x8, { static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 3] = [&::U8x8, &::U8x8, &::U8x8]; &PARTS }); &AGG }, &::U8x8]; &INPUTS },
output: &::U8x8,
- definition: Named("llvm.neon.vtbx3")
+ definition: Named("llvm.arm.neon.vtbx3")
},
"tbl4_s8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 4] = [&::I8x8, &::I8x8, &::I8x8, &::I8x8]; &PARTS }); &AGG }, &::U8x8]; &INPUTS },
output: &::I8x8,
- definition: Named("llvm.neon.vtbl4")
+ definition: Named("llvm.arm.neon.vtbl4")
},
"tbl4_u8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 4] = [&::U8x8, &::U8x8, &::U8x8, &::U8x8]; &PARTS }); &AGG }, &::U8x8]; &INPUTS },
output: &::U8x8,
- definition: Named("llvm.neon.vtbl4")
+ definition: Named("llvm.arm.neon.vtbl4")
},
"tbx4_s8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 3] = [&::I8x8, { static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 4] = [&::I8x8, &::I8x8, &::I8x8, &::I8x8]; &PARTS }); &AGG }, &::U8x8]; &INPUTS },
output: &::I8x8,
- definition: Named("llvm.neon.vtbx4")
+ definition: Named("llvm.arm.neon.vtbx4")
},
"tbx4_u8" => Intrinsic {
inputs: { static INPUTS: [&'static Type; 3] = [&::U8x8, { static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 4] = [&::U8x8, &::U8x8, &::U8x8, &::U8x8]; &PARTS }); &AGG }, &::U8x8]; &INPUTS },
output: &::U8x8,
- definition: Named("llvm.neon.vtbx4")
+ definition: Named("llvm.arm.neon.vtbx4")
},
_ => return None,
})
span);
self.define(parent, ident, TypeNS, (module, vis, DUMMY_SP, expansion));
- for child in self.session.cstore.item_children(def_id) {
+ for child in self.session.cstore.item_children(def_id, self.session) {
let ns = if let Def::AssociatedTy(..) = child.def { TypeNS } else { ValueNS };
self.define(module, child.ident, ns,
(child.def, ty::Visibility::Public, DUMMY_SP, expansion));
/// is built, building it if it is not.
pub fn populate_module_if_necessary(&mut self, module: Module<'a>) {
if module.populated.get() { return }
- for child in self.session.cstore.item_children(module.def_id().unwrap()) {
+ for child in self.session.cstore.item_children(module.def_id().unwrap(), self.session) {
self.build_reduced_graph_for_external_crate_def(module, child);
}
module.populated.set(true)
-> Result<Option<Rc<SyntaxExtension>>, Determinacy> {
let def = match invoc.kind {
InvocationKind::Attr { attr: None, .. } => return Ok(None),
- _ => match self.resolve_invoc_to_def(invoc, scope, force) {
- Ok(def) => def,
- Err(determinacy) => return Err(determinacy),
- },
+ _ => self.resolve_invoc_to_def(invoc, scope, force)?,
};
self.macro_defs.insert(invoc.expansion_data.mark, def.def_id());
if let Some(err) = self.finalize_import(import) {
errors = true;
+ if let SingleImport { source, ref result, .. } = import.subclass {
+ if source.name == "self" {
+ // Silence `unresolved import` error if E0429 is already emitted
+ match result.value_ns.get() {
+ Err(Determined) => continue,
+ _ => {},
+ }
+ }
+ }
+
// If the error is a single failed import then create a "fake" import
// resolution for it so that later resolve stages won't complain.
self.import_dummy_binding(import);
rustc_typeck = { path = "../librustc_typeck" }
syntax = { path = "../libsyntax" }
syntax_pos = { path = "../libsyntax_pos" }
-rls-data = "0.3"
+rls-data = "0.6"
rls-span = "0.4"
# FIXME(#40527) should move rustc serialize out of tree
rustc-serialize = "0.3"
use syntax::ast::{self, Attribute, NodeId};
use syntax_pos::Span;
-use rls_data::ExternalCrateData;
+use rls_data::{ExternalCrateData, Signature};
pub struct CrateData {
pub name: String,
pub variants: Vec<NodeId>,
pub visibility: Visibility,
pub docs: String,
- pub sig: Signature,
+ pub sig: Option<Signature>,
pub attributes: Vec<Attribute>,
}
pub visibility: Visibility,
pub parent: Option<DefId>,
pub docs: String,
- pub sig: Signature,
+ pub sig: Option<Signature>,
pub attributes: Vec<Attribute>,
}
pub parent: Option<DefId>,
pub visibility: Visibility,
pub docs: String,
- pub sig: Signature,
+ pub sig: Option<Signature>,
pub attributes: Vec<Attribute>,
}
pub fields: Vec<NodeId>,
pub visibility: Visibility,
pub docs: String,
- pub sig: Signature,
+ pub sig: Option<Signature>,
pub attributes: Vec<Attribute>,
}
pub scope: NodeId,
pub parent: Option<DefId>,
pub docs: String,
- pub sig: Signature,
+ pub sig: Option<Signature>,
pub attributes: Vec<Attribute>,
}
pub items: Vec<NodeId>,
pub visibility: Visibility,
pub docs: String,
- pub sig: Signature,
+ pub sig: Option<Signature>,
pub attributes: Vec<Attribute>,
}
pub scope: NodeId,
pub parent: Option<DefId>,
pub docs: String,
- pub sig: Signature,
+ pub sig: Option<Signature>,
pub attributes: Vec<Attribute>,
}
pub scope: NodeId,
pub ref_id: DefId,
}
-
-
-/// Encodes information about the signature of a definition. This should have
-/// enough information to create a nice display about a definition without
-/// access to the source code.
-#[derive(Clone, Debug)]
-pub struct Signature {
- pub span: Span,
- pub text: String,
- // These identify the main identifier for the defintion as byte offsets into
- // `text`. E.g., of `foo` in `pub fn foo(...)`
- pub ident_start: usize,
- pub ident_end: usize,
- pub defs: Vec<SigElement>,
- pub refs: Vec<SigElement>,
-}
-
-/// An element of a signature. `start` and `end` are byte offsets into the `text`
-/// of the parent `Signature`.
-#[derive(Clone, Debug)]
-pub struct SigElement {
- pub id: DefId,
- pub start: usize,
- pub end: usize,
-}
use syntax::codemap::Spanned;
use syntax_pos::*;
-use super::{escape, generated_code, SaveContext, PathCollector, docs_for_attrs};
-use super::data::*;
-use super::dump::Dump;
-use super::external_data::{Lower, make_def_id};
-use super::span_utils::SpanUtils;
-use super::recorder;
+use {escape, generated_code, SaveContext, PathCollector, docs_for_attrs};
+use data::*;
+use dump::Dump;
+use external_data::{Lower, make_def_id};
+use recorder;
+use span_utils::SpanUtils;
+use sig;
use rls_data::ExternalCrateData;
sig: &'l ast::MethodSig,
body: Option<&'l ast::Block>,
id: ast::NodeId,
- name: ast::Name,
+ name: ast::Ident,
vis: Visibility,
attrs: &'l [Attribute],
span: Span) {
debug!("process_method: {}:{}", id, name);
- if let Some(method_data) = self.save_ctxt.get_method_data(id, name, span) {
+ if let Some(method_data) = self.save_ctxt.get_method_data(id, name.name, span) {
let sig_str = ::make_signature(&sig.decl, &sig.generics);
if body.is_some() {
Some(id) => {
for item in self.tcx.associated_items(id) {
if item.kind == ty::AssociatedKind::Method {
- if item.name == name {
+ if item.name == name.name {
decl_id = Some(item.def_id);
break;
}
parent: trait_id,
visibility: vis,
docs: docs_for_attrs(attrs),
- sig: method_data.sig,
+ sig: sig::method_signature(id, name, sig, &self.save_ctxt),
attributes: attrs.to_vec(),
}.lower(self.tcx));
}
name: ast::Name,
span: Span,
typ: &'l ast::Ty,
- expr: &'l ast::Expr,
+ expr: Option<&'l ast::Expr>,
parent_id: DefId,
vis: Visibility,
attrs: &'l [Attribute]) {
let qualname = format!("::{}", self.tcx.node_path_str(id));
let sub_span = self.span.sub_span_after_keyword(span, keywords::Const);
+ let value = expr.map(|e| self.span.snippet(e.span)).unwrap_or(String::new());
if !self.span.filter_generated(sub_span, span) {
self.dumper.variable(VariableData {
id: id,
name: name.to_string(),
qualname: qualname,
- value: self.span.snippet(expr.span),
+ value: value,
type_value: ty_to_string(&typ),
scope: self.cur_scope,
parent: Some(parent_id),
visibility: vis,
docs: docs_for_attrs(attrs),
- sig: None,
+ sig: sig::assoc_const_signature(id, name, typ, expr, &self.save_ctxt),
attributes: attrs.to_vec(),
}.lower(self.tcx));
}
// walk type and init value
self.visit_ty(typ);
- self.visit_expr(expr);
+ if let Some(expr) = expr {
+ self.visit_expr(expr);
+ }
}
// FIXME tuple structs should generate tuple-specific data.
fields: fields,
visibility: From::from(&item.vis),
docs: docs_for_attrs(&item.attrs),
- sig: self.save_ctxt.sig_base(item),
+ sig: sig::item_signature(item, &self.save_ctxt),
attributes: item.attrs.clone(),
}.lower(self.tcx));
}
qualname.push_str("::");
qualname.push_str(&name);
- let text = self.span.signature_string_for_span(variant.span);
- let ident_start = text.find(&name).unwrap();
- let ident_end = ident_start + name.len();
- let sig = Signature {
- span: variant.span,
- text: text,
- ident_start: ident_start,
- ident_end: ident_end,
- defs: vec![],
- refs: vec![],
- };
-
match variant.node.data {
ast::VariantData::Struct(ref fields, _) => {
let sub_span = self.span.span_for_first_ident(variant.span);
scope: enum_data.scope,
parent: Some(make_def_id(item.id, &self.tcx.hir)),
docs: docs_for_attrs(&variant.node.attrs),
- sig: sig,
+ sig: sig::variant_signature(variant, &self.save_ctxt),
attributes: variant.node.attrs.clone(),
}.lower(self.tcx));
}
scope: enum_data.scope,
parent: Some(make_def_id(item.id, &self.tcx.hir)),
docs: docs_for_attrs(&variant.node.attrs),
- sig: sig,
+ sig: sig::variant_signature(variant, &self.save_ctxt),
attributes: variant.node.attrs.clone(),
}.lower(self.tcx));
}
items: methods.iter().map(|i| i.id).collect(),
visibility: From::from(&item.vis),
docs: docs_for_attrs(&item.attrs),
- sig: self.save_ctxt.sig_base(item),
+ sig: sig::item_signature(item, &self.save_ctxt),
attributes: item.attrs.clone(),
}.lower(self.tcx));
}
fn process_trait_item(&mut self, trait_item: &'l ast::TraitItem, trait_id: DefId) {
self.process_macro_use(trait_item.span, trait_item.id);
match trait_item.node {
- ast::TraitItemKind::Const(ref ty, Some(ref expr)) => {
+ ast::TraitItemKind::Const(ref ty, ref expr) => {
self.process_assoc_const(trait_item.id,
trait_item.ident.name,
trait_item.span,
&ty,
- &expr,
+ expr.as_ref().map(|e| &**e),
trait_id,
Visibility::Public,
&trait_item.attrs);
self.process_method(sig,
body.as_ref().map(|x| &**x),
trait_item.id,
- trait_item.ident.name,
+ trait_item.ident,
Visibility::Public,
&trait_item.attrs,
trait_item.span);
}
- ast::TraitItemKind::Type(ref _bounds, ref default_ty) => {
+ ast::TraitItemKind::Type(ref bounds, ref default_ty) => {
// FIXME do something with _bounds (for type refs)
let name = trait_item.ident.name.to_string();
let qualname = format!("::{}", self.tcx.node_path_str(trait_item.id));
visibility: Visibility::Public,
parent: Some(trait_id),
docs: docs_for_attrs(&trait_item.attrs),
- sig: None,
+ sig: sig::assoc_type_signature(trait_item.id,
+ trait_item.ident,
+ Some(bounds),
+ default_ty.as_ref().map(|ty| &**ty),
+ &self.save_ctxt),
attributes: trait_item.attrs.clone(),
}.lower(self.tcx));
}
self.visit_ty(default_ty)
}
}
- ast::TraitItemKind::Const(ref ty, None) => self.visit_ty(ty),
ast::TraitItemKind::Macro(_) => {}
}
}
impl_item.ident.name,
impl_item.span,
&ty,
- &expr,
+ Some(expr),
impl_id,
From::from(&impl_item.vis),
&impl_item.attrs);
self.process_method(sig,
Some(body),
impl_item.id,
- impl_item.ident.name,
+ impl_item.ident,
From::from(&impl_item.vis),
&impl_item.attrs,
impl_item.span);
}
- ast::ImplItemKind::Type(ref ty) => self.visit_ty(ty),
+ ast::ImplItemKind::Type(ref ty) => {
+ // FIXME uses of the assoc type should ideally point to this
+ // 'def' and the name here should be a ref to the def in the
+ // trait.
+ self.visit_ty(ty)
+ }
ast::ImplItemKind::Macro(_) => {}
}
}
visibility: From::from(&item.vis),
parent: None,
docs: docs_for_attrs(&item.attrs),
- sig: Some(self.save_ctxt.sig_base(item)),
+ sig: sig::item_signature(item, &self.save_ctxt),
attributes: item.attrs.clone(),
}.lower(self.tcx));
}
use syntax::print::pprust;
use syntax_pos::Span;
-use data::{self, Visibility, SigElement};
+use data::{self, Visibility};
-use rls_data::{SpanData, CratePreludeData, Attribute};
+use rls_data::{SpanData, CratePreludeData, Attribute, Signature};
use rls_span::{Column, Row};
// FIXME: this should be pub(crate), but the current snapshot doesn't allow it yet
pub variants: Vec<DefId>,
pub visibility: Visibility,
pub docs: String,
- pub sig: Signature,
+ pub sig: Option<Signature>,
pub attributes: Vec<Attribute>,
}
variants: self.variants.into_iter().map(|id| make_def_id(id, &tcx.hir)).collect(),
visibility: self.visibility,
docs: self.docs,
- sig: self.sig.lower(tcx),
+ sig: self.sig,
attributes: self.attributes.lower(tcx),
}
}
pub visibility: Visibility,
pub parent: Option<DefId>,
pub docs: String,
- pub sig: Signature,
+ pub sig: Option<Signature>,
pub attributes: Vec<Attribute>,
}
visibility: self.visibility,
parent: self.parent,
docs: self.docs,
- sig: self.sig.lower(tcx),
+ sig: self.sig,
attributes: self.attributes.lower(tcx),
}
}
pub visibility: Visibility,
pub parent: Option<DefId>,
pub docs: String,
- pub sig: Signature,
+ pub sig: Option<Signature>,
pub attributes: Vec<Attribute>,
}
visibility: self.visibility,
parent: self.parent,
docs: self.docs,
- sig: self.sig.lower(tcx),
+ sig: self.sig,
attributes: self.attributes.lower(tcx),
}
}
items: self.items.into_iter().map(|id| make_def_id(id, &tcx.hir)).collect(),
visibility: self.visibility,
docs: self.docs,
- sig: self.sig.map(|s| s.lower(tcx)),
+ sig: self.sig,
attributes: self.attributes.lower(tcx),
}
}
pub fields: Vec<DefId>,
pub visibility: Visibility,
pub docs: String,
- pub sig: Signature,
+ pub sig: Option<Signature>,
pub attributes: Vec<Attribute>,
}
fields: self.fields.into_iter().map(|id| make_def_id(id, &tcx.hir)).collect(),
visibility: self.visibility,
docs: self.docs,
- sig: self.sig.lower(tcx),
+ sig: self.sig,
attributes: self.attributes.lower(tcx),
}
}
pub scope: DefId,
pub parent: Option<DefId>,
pub docs: String,
- pub sig: Signature,
+ pub sig: Option<Signature>,
pub attributes: Vec<Attribute>,
}
scope: make_def_id(self.scope, &tcx.hir),
parent: self.parent,
docs: self.docs,
- sig: self.sig.lower(tcx),
+ sig: self.sig,
attributes: self.attributes.lower(tcx),
}
}
pub items: Vec<DefId>,
pub visibility: Visibility,
pub docs: String,
- pub sig: Signature,
+ pub sig: Option<Signature>,
pub attributes: Vec<Attribute>,
}
items: self.items.into_iter().map(|id| make_def_id(id, &tcx.hir)).collect(),
visibility: self.visibility,
docs: self.docs,
- sig: self.sig.lower(tcx),
+ sig: self.sig,
attributes: self.attributes.lower(tcx),
}
}
pub scope: DefId,
pub parent: Option<DefId>,
pub docs: String,
- pub sig: Signature,
+ pub sig: Option<Signature>,
pub attributes: Vec<Attribute>,
}
scope: make_def_id(self.scope, &tcx.hir),
parent: self.parent,
docs: self.docs,
- sig: self.sig.lower(tcx),
+ sig: self.sig,
attributes: self.attributes.lower(tcx),
}
}
visibility: self.visibility,
parent: self.parent,
docs: self.docs,
- sig: self.sig.map(|s| s.lower(tcx)),
+ sig: self.sig,
attributes: self.attributes.lower(tcx),
}
}
parent: self.parent,
visibility: self.visibility,
docs: self.docs,
- sig: self.sig.map(|s| s.lower(tcx)),
+ sig: self.sig,
attributes: self.attributes.lower(tcx),
}
}
}
}
}
-
-#[derive(Clone, Debug)]
-pub struct Signature {
- pub span: SpanData,
- pub text: String,
- // These identify the main identifier for the defintion as byte offsets into
- // `text`. E.g., of `foo` in `pub fn foo(...)`
- pub ident_start: usize,
- pub ident_end: usize,
- pub defs: Vec<SigElement>,
- pub refs: Vec<SigElement>,
-}
-
-impl Lower for data::Signature {
- type Target = Signature;
-
- fn lower(self, tcx: TyCtxt) -> Signature {
- Signature {
- span: span_from_span(self.span, tcx.sess.codemap()),
- text: self.text,
- ident_start: self.ident_start,
- ident_end: self.ident_end,
- defs: self.defs,
- refs: self.refs,
- }
- }
-}
use external_data::*;
use data::{VariableKind, Visibility};
use dump::Dump;
-use json_dumper::id_from_def_id;
+use id_from_def_id;
-use rls_data::{Analysis, Import, ImportKind, Def, DefKind, CratePreludeData};
+use rls_data::{Analysis, Import, ImportKind, Def, DefKind, CratePreludeData, Format};
// A dumper to dump a restricted set of JSON information, designed for use with
impl<'b, W: Write> JsonApiDumper<'b, W> {
pub fn new(writer: &'b mut W) -> JsonApiDumper<'b, W> {
- JsonApiDumper { output: writer, result: Analysis::new() }
+ let mut result = Analysis::new();
+ result.kind = Format::JsonApi;
+ JsonApiDumper { output: writer, result }
}
}
children: self.variants.into_iter().map(|id| id_from_def_id(id)).collect(),
decl_id: None,
docs: self.docs,
- sig: Some(self.sig.into()),
+ sig: self.sig,
attributes: vec![],
}),
_ => None,
children: vec![],
decl_id: None,
docs: self.docs,
- sig: Some(self.sig.into()),
+ sig: self.sig,
attributes: vec![],
})
}
children: vec![],
decl_id: None,
docs: self.docs,
- sig: Some(self.sig.into()),
+ sig: self.sig,
attributes: vec![],
})
}
children: self.fields.into_iter().map(|id| id_from_def_id(id)).collect(),
decl_id: None,
docs: self.docs,
- sig: Some(self.sig.into()),
+ sig: self.sig,
attributes: vec![],
}),
_ => None,
parent: None,
decl_id: None,
docs: self.docs,
- sig: Some(self.sig.into()),
+ sig: self.sig,
attributes: vec![],
}),
_ => None,
parent: self.parent.map(|id| id_from_def_id(id)),
decl_id: None,
docs: self.docs,
- sig: Some(self.sig.into()),
+ sig: self.sig,
attributes: vec![],
}),
_ => None,
parent: self.parent.map(|id| id_from_def_id(id)),
decl_id: self.decl_id.map(|id| id_from_def_id(id)),
docs: self.docs,
- sig: Some(self.sig.into()),
+ sig: self.sig,
attributes: vec![],
}),
_ => None,
use std::io::Write;
-use rustc::hir::def_id::DefId;
use rustc_serialize::json::as_json;
use rls_data::{self, Id, Analysis, Import, ImportKind, Def, DefKind, Ref, RefKind, MacroRef,
- Relation, RelationKind, Signature, SigElement, CratePreludeData};
+ Relation, RelationKind, CratePreludeData};
use rls_span::{Column, Row};
-use external_data;
use external_data::*;
-use data::{self, VariableKind};
+use data::VariableKind;
use dump::Dump;
+use id_from_def_id;
pub struct JsonDumper<O: DumpOutput> {
result: Analysis,
children: data.items.into_iter().map(|id| id_from_def_id(id)).collect(),
decl_id: None,
docs: data.docs,
- sig: data.sig.map(|s| s.into()),
+ sig: data.sig,
attributes: data.attributes.into_iter().map(|a| a.into()).collect(),
};
if def.span.file_name.to_str().unwrap() != def.value {
// method, but not the supplied method). In both cases, we are currently
// ignoring it.
-// DefId::index is a newtype and so the JSON serialisation is ugly. Therefore
-// we use our own Id which is the same, but without the newtype.
-pub fn id_from_def_id(id: DefId) -> Id {
- Id {
- krate: id.krate.as_u32(),
- index: id.index.as_u32(),
- }
-}
-
impl Into<Import> for ExternCrateData {
fn into(self) -> Import {
Import {
children: self.variants.into_iter().map(|id| id_from_def_id(id)).collect(),
decl_id: None,
docs: self.docs,
- sig: Some(self.sig.into()),
+ sig: self.sig,
attributes: self.attributes,
}
}
children: vec![],
decl_id: None,
docs: self.docs,
- sig: Some(self.sig.into()),
+ sig: self.sig,
attributes: self.attributes,
}
}
children: vec![],
decl_id: None,
docs: self.docs,
- sig: Some(self.sig.into()),
+ sig: self.sig,
attributes: self.attributes,
}
}
children: self.fields.into_iter().map(|id| id_from_def_id(id)).collect(),
decl_id: None,
docs: self.docs,
- sig: Some(self.sig.into()),
+ sig: self.sig,
attributes: self.attributes,
}
}
children: self.items.into_iter().map(|id| id_from_def_id(id)).collect(),
decl_id: None,
docs: self.docs,
- sig: Some(self.sig.into()),
+ sig: self.sig,
attributes: self.attributes,
}
}
children: vec![],
decl_id: None,
docs: self.docs,
- sig: Some(self.sig.into()),
+ sig: self.sig,
attributes: self.attributes,
}
}
children: vec![],
decl_id: self.decl_id.map(|id| id_from_def_id(id)),
docs: self.docs,
- sig: Some(self.sig.into()),
+ sig: self.sig,
attributes: self.attributes,
}
}
children: vec![],
decl_id: None,
docs: String::new(),
- sig: self.sig.map(|s| s.into()),
+ sig: self.sig,
attributes: self.attributes,
}
}
}
}
}
-
-impl Into<Signature> for external_data::Signature {
- fn into(self) -> Signature {
- Signature {
- span: self.span,
- text: self.text,
- ident_start: self.ident_start,
- ident_end: self.ident_end,
- defs: self.defs.into_iter().map(|s| s.into()).collect(),
- refs: self.refs.into_iter().map(|s| s.into()).collect(),
- }
- }
-}
-
-impl Into<SigElement> for data::SigElement {
- fn into(self) -> SigElement {
- SigElement {
- id: id_from_def_id(self.id),
- start: self.start,
- end: self.end,
- }
- }
-}
pub mod external_data;
#[macro_use]
pub mod span_utils;
+mod sig;
use rustc::hir;
use rustc::hir::def::Def;
visibility: From::from(&item.vis),
parent: None,
docs: docs_for_attrs(&item.attrs),
- sig: self.sig_base_extern(item),
+ sig: sig::foreign_item_signature(item, self),
attributes: item.attrs.clone(),
}))
}
type_value: ty_to_string(ty),
visibility: From::from(&item.vis),
docs: docs_for_attrs(&item.attrs),
- sig: Some(self.sig_base_extern(item)),
+ sig: sig::foreign_item_signature(item, self),
attributes: item.attrs.clone(),
}))
}
visibility: From::from(&item.vis),
parent: None,
docs: docs_for_attrs(&item.attrs),
- sig: self.sig_base(item),
+ sig: sig::item_signature(item, self),
attributes: item.attrs.clone(),
}))
}
type_value: ty_to_string(&typ),
visibility: From::from(&item.vis),
docs: docs_for_attrs(&item.attrs),
- sig: Some(self.sig_base(item)),
+ sig: sig::item_signature(item, self),
attributes: item.attrs.clone(),
}))
}
type_value: ty_to_string(&typ),
visibility: From::from(&item.vis),
docs: docs_for_attrs(&item.attrs),
- sig: Some(self.sig_base(item)),
+ sig: sig::item_signature(item, self),
attributes: item.attrs.clone(),
}))
}
items: m.items.iter().map(|i| i.id).collect(),
visibility: From::from(&item.vis),
docs: docs_for_attrs(&item.attrs),
- sig: Some(self.sig_base(item)),
+ sig: sig::item_signature(item, self),
attributes: item.attrs.clone(),
}))
}
variants: def.variants.iter().map(|v| v.node.data.id()).collect(),
visibility: From::from(&item.vis),
docs: docs_for_attrs(&item.attrs),
- sig: self.sig_base(item),
+ sig: sig::item_signature(item, self),
attributes: item.attrs.clone(),
}))
}
let def_id = self.tcx.hir.local_def_id(field.id);
let typ = self.tcx.type_of(def_id).to_string();
- let span = field.span;
- let text = self.span_utils.snippet(field.span);
- let ident_start = text.find(&name).unwrap();
- let ident_end = ident_start + name.len();
- let sig = Signature {
- span: span,
- text: text,
- ident_start: ident_start,
- ident_end: ident_end,
- defs: vec![],
- refs: vec![],
- };
Some(VariableData {
id: field.id,
kind: VariableKind::Field,
type_value: typ,
visibility: From::from(&field.vis),
docs: docs_for_attrs(&field.attrs),
- sig: Some(sig),
+ sig: sig::field_signature(field, self),
attributes: field.attrs.clone(),
})
} else {
// FIXME would be nice to take a MethodItem here, but the ast provides both
// trait and impl flavours, so the caller must do the disassembly.
- pub fn get_method_data(&self, id: ast::NodeId,
- name: ast::Name, span: Span) -> Option<FunctionData> {
+ pub fn get_method_data(&self,
+ id: ast::NodeId,
+ name: ast::Name,
+ span: Span)
+ -> Option<FunctionData> {
// The qualname for a method is the trait name or name of the struct in an impl in
// which the method is declared in, followed by the method's name.
let (qualname, parent_scope, decl_id, vis, docs, attributes) =
let sub_span = self.span_utils.sub_span_after_keyword(span, keywords::Fn);
filter!(self.span_utils, sub_span, span, None);
- let name = name.to_string();
- let text = self.span_utils.signature_string_for_span(span);
- let ident_start = text.find(&name).unwrap();
- let ident_end = ident_start + name.len();
- let sig = Signature {
- span: span,
- text: text,
- ident_start: ident_start,
- ident_end: ident_end,
- defs: vec![],
- refs: vec![],
- };
-
Some(FunctionData {
id: id,
- name: name,
+ name: name.to_string(),
qualname: qualname,
declaration: decl_id,
span: sub_span.unwrap(),
visibility: vis,
parent: parent_scope,
docs: docs,
- sig: sig,
+ sig: None,
attributes: attributes,
})
}
}
}
- fn sig_base(&self, item: &ast::Item) -> Signature {
- let text = self.span_utils.signature_string_for_span(item.span);
- let name = item.ident.to_string();
- let ident_start = text.find(&name).expect("Name not in signature?");
- let ident_end = ident_start + name.len();
- Signature {
- span: Span { hi: item.span.lo + BytePos(text.len() as u32), ..item.span },
- text: text,
- ident_start: ident_start,
- ident_end: ident_end,
- defs: vec![],
- refs: vec![],
- }
- }
-
- fn sig_base_extern(&self, item: &ast::ForeignItem) -> Signature {
- let text = self.span_utils.signature_string_for_span(item.span);
- let name = item.ident.to_string();
- let ident_start = text.find(&name).expect("Name not in signature?");
- let ident_end = ident_start + name.len();
- Signature {
- span: Span { hi: item.span.lo + BytePos(text.len() as u32), ..item.span },
- text: text,
- ident_start: ident_start,
- ident_end: ident_end,
- defs: vec![],
- refs: vec![],
- }
- }
-
#[inline]
pub fn enclosing_scope(&self, id: NodeId) -> NodeId {
self.tcx.hir.get_enclosing_scope(id).unwrap_or(CRATE_NODE_ID)
// Helper function to determine if a span came from a
// macro expansion or syntax extension.
-pub fn generated_code(span: Span) -> bool {
+fn generated_code(span: Span) -> bool {
span.ctxt != NO_EXPANSION || span == DUMMY_SP
}
+
+// DefId::index is a newtype and so the JSON serialisation is ugly. Therefore
+// we use our own Id which is the same, but without the newtype.
+fn id_from_def_id(id: DefId) -> rls_data::Id {
+ rls_data::Id {
+ krate: id.krate.as_u32(),
+ index: id.index.as_u32(),
+ }
+}
+
+fn id_from_node_id(id: NodeId, scx: &SaveContext) -> rls_data::Id {
+ let def_id = scx.tcx.hir.local_def_id(id);
+ id_from_def_id(def_id)
+}
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// A signature is a string representation of an item's type signature, excluding
+// any body. It also includes ids for any defs or refs in the signature. For
+// example:
+//
+// ```
+// fn foo(x: String) {
+// println!("{}", x);
+// }
+// ```
+// The signature string is something like "fn foo(x: String) {}" and the signature
+// will have defs for `foo` and `x` and a ref for `String`.
+//
+// All signature text should parse in the correct context (i.e., in a module or
+// impl, etc.). Clients may want to trim trailing `{}` or `;`. The text of a
+// signature is not guaranteed to be stable (it may improve or change as the
+// syntax changes, or whitespace or punctuation may change). It is also likely
+// not to be pretty - no attempt is made to prettify the text. It is recommended
+// that clients run the text through Rustfmt.
+//
+// This module generates Signatures for items by walking the AST and looking up
+// references.
+//
+// Signatures do not include visibility info. I'm not sure if this is a feature
+// or an ommission (FIXME).
+//
+// FIXME where clauses need implementing, defs/refs in generics are mostly missing.
+
+use {SaveContext, id_from_def_id, id_from_node_id};
+
+use rls_data::{Signature, SigElement};
+
+use rustc::hir::def::Def;
+use syntax::ast::{self, NodeId};
+use syntax::print::pprust;
+
+
+pub fn item_signature(item: &ast::Item, scx: &SaveContext) -> Option<Signature> {
+ item.make(0, None, scx).ok()
+}
+
+pub fn foreign_item_signature(item: &ast::ForeignItem, scx: &SaveContext) -> Option<Signature> {
+ item.make(0, None, scx).ok()
+}
+
+/// Signature for a struct or tuple field declaration.
+/// Does not include a trailing comma.
+pub fn field_signature(field: &ast::StructField, scx: &SaveContext) -> Option<Signature> {
+ field.make(0, None, scx).ok()
+}
+
+/// Does not include a trailing comma.
+pub fn variant_signature(variant: &ast::Variant, scx: &SaveContext) -> Option<Signature> {
+ variant.node.make(0, None, scx).ok()
+}
+
+pub fn method_signature(id: NodeId,
+ ident: ast::Ident,
+ m: &ast::MethodSig,
+ scx: &SaveContext)
+ -> Option<Signature> {
+ make_method_signature(id, ident, m, scx).ok()
+}
+
+pub fn assoc_const_signature(id: NodeId,
+ ident: ast::Name,
+ ty: &ast::Ty,
+ default: Option<&ast::Expr>,
+ scx: &SaveContext)
+ -> Option<Signature> {
+ make_assoc_const_signature(id, ident, ty, default, scx).ok()
+}
+
+pub fn assoc_type_signature(id: NodeId,
+ ident: ast::Ident,
+ bounds: Option<&ast::TyParamBounds>,
+ default: Option<&ast::Ty>,
+ scx: &SaveContext)
+ -> Option<Signature> {
+ make_assoc_type_signature(id, ident, bounds, default, scx).ok()
+}
+
+type Result = ::std::result::Result<Signature, &'static str>;
+
+trait Sig {
+ fn make(&self, offset: usize, id: Option<NodeId>, scx: &SaveContext) -> Result;
+}
+
+fn extend_sig(mut sig: Signature,
+ text: String,
+ defs: Vec<SigElement>,
+ refs: Vec<SigElement>)
+ -> Signature {
+ sig.text = text;
+ sig.defs.extend(defs.into_iter());
+ sig.refs.extend(refs.into_iter());
+ sig
+}
+
+fn replace_text(mut sig: Signature, text: String) -> Signature {
+ sig.text = text;
+ sig
+}
+
+fn merge_sigs(text: String, sigs: Vec<Signature>) -> Signature {
+ let mut result = Signature {
+ text,
+ defs: vec![],
+ refs: vec![],
+ };
+
+ let (defs, refs): (Vec<_>, Vec<_>) = sigs.into_iter().map(|s| (s.defs, s.refs)).unzip();
+
+ result.defs.extend(defs.into_iter().flat_map(|ds| ds.into_iter()));
+ result.refs.extend(refs.into_iter().flat_map(|rs| rs.into_iter()));
+
+ result
+}
+
+fn text_sig(text: String) -> Signature {
+ Signature {
+ text: text,
+ defs: vec![],
+ refs: vec![],
+ }
+}
+
+impl Sig for ast::Ty {
+ fn make(&self, offset: usize, _parent_id: Option<NodeId>, scx: &SaveContext) -> Result {
+ let id = Some(self.id);
+ match self.node {
+ ast::TyKind::Slice(ref ty) => {
+ let nested = ty.make(offset + 1, id, scx)?;
+ let text = format!("[{}]", nested.text);
+ Ok(replace_text(nested, text))
+ }
+ ast::TyKind::Ptr(ref mt) => {
+ let prefix = match mt.mutbl {
+ ast::Mutability::Mutable => "*mut ",
+ ast::Mutability::Immutable => "*const ",
+ };
+ let nested = mt.ty.make(offset + prefix.len(), id, scx)?;
+ let text = format!("{}{}", prefix, nested.text);
+ Ok(replace_text(nested, text))
+ }
+ ast::TyKind::Rptr(ref lifetime, ref mt) => {
+ let mut prefix = "&".to_owned();
+ if let &Some(ref l) = lifetime {
+ prefix.push_str(&l.ident.to_string());
+ prefix.push(' ');
+ }
+ if let ast::Mutability::Mutable = mt.mutbl {
+ prefix.push_str("mut ");
+ };
+
+ let nested = mt.ty.make(offset + prefix.len(), id, scx)?;
+ let text = format!("{}{}", prefix, nested.text);
+ Ok(replace_text(nested, text))
+ }
+ ast::TyKind::Never => {
+ Ok(text_sig("!".to_owned()))
+ },
+ ast::TyKind::Tup(ref ts) => {
+ let mut text = "(".to_owned();
+ let mut defs = vec![];
+ let mut refs = vec![];
+ for t in ts {
+ let nested = t.make(offset + text.len(), id, scx)?;
+ text.push_str(&nested.text);
+ text.push(',');
+ defs.extend(nested.defs.into_iter());
+ refs.extend(nested.refs.into_iter());
+ }
+ text.push(')');
+ Ok(Signature { text, defs, refs })
+ }
+ ast::TyKind::Paren(ref ty) => {
+ let nested = ty.make(offset + 1, id, scx)?;
+ let text = format!("({})", nested.text);
+ Ok(replace_text(nested, text))
+ }
+ ast::TyKind::BareFn(ref f) => {
+ let mut text = String::new();
+ if !f.lifetimes.is_empty() {
+ // FIXME defs, bounds on lifetimes
+ text.push_str("for<");
+ text.push_str(&f.lifetimes.iter().map(|l|
+ l.lifetime.ident.to_string()).collect::<Vec<_>>().join(", "));
+ text.push('>');
+ }
+
+ if f.unsafety == ast::Unsafety::Unsafe {
+ text.push_str("unsafe ");
+ }
+ if f.abi != ::syntax::abi::Abi::Rust {
+ text.push_str("extern");
+ text.push_str(&f.abi.to_string());
+ text.push(' ');
+ }
+ text.push_str("fn(");
+
+ let mut defs = vec![];
+ let mut refs = vec![];
+ for i in &f.decl.inputs {
+ let nested = i.ty.make(offset + text.len(), Some(i.id), scx)?;
+ text.push_str(&nested.text);
+ text.push(',');
+ defs.extend(nested.defs.into_iter());
+ refs.extend(nested.refs.into_iter());
+ }
+ text.push(')');
+ if let ast::FunctionRetTy::Ty(ref t) = f.decl.output {
+ text.push_str(" -> ");
+ let nested = t.make(offset + text.len(), None, scx)?;
+ text.push_str(&nested.text);
+ text.push(',');
+ defs.extend(nested.defs.into_iter());
+ refs.extend(nested.refs.into_iter());
+ }
+
+ Ok(Signature { text, defs, refs })
+ }
+ ast::TyKind::Path(None, ref path) => {
+ path.make(offset, id, scx)
+ }
+ ast::TyKind::Path(Some(ref qself), ref path) => {
+ let nested_ty = qself.ty.make(offset + 1, id, scx)?;
+ let prefix = if qself.position == 0 {
+ format!("<{}>::", nested_ty.text)
+ } else if qself.position == 1 {
+ let first = pprust::path_segment_to_string(&path.segments[0]);
+ format!("<{} as {}>::", nested_ty.text, first)
+ } else {
+ // FIXME handle path instead of elipses.
+ format!("<{} as ...>::", nested_ty.text)
+ };
+
+ let name = pprust::path_segment_to_string(path.segments.last().ok_or("Bad path")?);
+ let def = scx.get_path_def(id.ok_or("Missing id for Path")?);
+ let id = id_from_def_id(def.def_id());
+ if path.segments.len() - qself.position == 1 {
+ let start = offset + prefix.len();
+ let end = start + name.len();
+
+ Ok(Signature {
+ text: prefix + &name,
+ defs: vec![],
+ refs: vec![SigElement { id, start, end }],
+ })
+ } else {
+ let start = offset + prefix.len() + 5;
+ let end = start + name.len();
+ // FIXME should put the proper path in there, not elipses.
+ Ok(Signature {
+ text: prefix + "...::" + &name,
+ defs: vec![],
+ refs: vec![SigElement { id, start, end }],
+ })
+ }
+ }
+ ast::TyKind::TraitObject(ref bounds) => {
+ // FIXME recurse into bounds
+ let nested = pprust::bounds_to_string(bounds);
+ Ok(text_sig(nested))
+ }
+ ast::TyKind::ImplTrait(ref bounds) => {
+ // FIXME recurse into bounds
+ let nested = pprust::bounds_to_string(bounds);
+ Ok(text_sig(format!("impl {}", nested)))
+ }
+ ast::TyKind::Array(ref ty, ref v) => {
+ let nested_ty = ty.make(offset + 1, id, scx)?;
+ let expr = pprust::expr_to_string(v).replace('\n', " ");
+ let text = format!("[{}; {}]", nested_ty.text, expr);
+ Ok(replace_text(nested_ty, text))
+ }
+ ast::TyKind::Typeof(_) |
+ ast::TyKind::Infer |
+ ast::TyKind::Err |
+ ast::TyKind::ImplicitSelf |
+ ast::TyKind::Mac(_) => Err("Ty"),
+ }
+ }
+}
+
+impl Sig for ast::Item {
+ fn make(&self, offset: usize, _parent_id: Option<NodeId>, scx: &SaveContext) -> Result {
+ let id = Some(self.id);
+
+ match self.node {
+ ast::ItemKind::Static(ref ty, m, ref expr) => {
+ let mut text = "static ".to_owned();
+ if m == ast::Mutability::Mutable {
+ text.push_str("mut ");
+ }
+ let name = self.ident.to_string();
+ let defs = vec![SigElement {
+ id: id_from_node_id(self.id, scx),
+ start: offset + text.len(),
+ end: offset + text.len() + name.len(),
+ }];
+ text.push_str(&name);
+ text.push_str(": ");
+
+ let ty = ty.make(offset + text.len(), id, scx)?;
+ text.push_str(&ty.text);
+ text.push_str(" = ");
+
+ let expr = pprust::expr_to_string(expr).replace('\n', " ");
+ text.push_str(&expr);
+ text.push(';');
+
+ Ok(extend_sig(ty, text, defs, vec![]))
+ }
+ ast::ItemKind::Const(ref ty, ref expr) => {
+ let mut text = "const ".to_owned();
+ let name = self.ident.to_string();
+ let defs = vec![SigElement {
+ id: id_from_node_id(self.id, scx),
+ start: offset + text.len(),
+ end: offset + text.len() + name.len(),
+ }];
+ text.push_str(&name);
+ text.push_str(": ");
+
+ let ty = ty.make(offset + text.len(), id, scx)?;
+ text.push_str(&ty.text);
+ text.push_str(" = ");
+
+ let expr = pprust::expr_to_string(expr).replace('\n', " ");
+ text.push_str(&expr);
+ text.push(';');
+
+ Ok(extend_sig(ty, text, defs, vec![]))
+ }
+ ast::ItemKind::Fn(ref decl, unsafety, constness, abi, ref generics, _) => {
+ let mut text = String::new();
+ if constness.node == ast::Constness::Const {
+ text.push_str("const ");
+ }
+ if unsafety == ast::Unsafety::Unsafe {
+ text.push_str("unsafe ");
+ }
+ if abi != ::syntax::abi::Abi::Rust {
+ text.push_str("extern");
+ text.push_str(&abi.to_string());
+ text.push(' ');
+ }
+ text.push_str("fn ");
+
+ let mut sig = name_and_generics(text,
+ offset,
+ generics,
+ self.id,
+ self.ident,
+ scx)?;
+
+ sig.text.push('(');
+ for i in &decl.inputs {
+ // FIXME shoudl descend into patterns to add defs.
+ sig.text.push_str(&pprust::pat_to_string(&i.pat));
+ sig.text.push_str(": ");
+ let nested = i.ty.make(offset + sig.text.len(), Some(i.id), scx)?;
+ sig.text.push_str(&nested.text);
+ sig.text.push(',');
+ sig.defs.extend(nested.defs.into_iter());
+ sig.refs.extend(nested.refs.into_iter());
+ }
+ sig.text.push(')');
+
+ if let ast::FunctionRetTy::Ty(ref t) = decl.output {
+ sig.text.push_str(" -> ");
+ let nested = t.make(offset + sig.text.len(), None, scx)?;
+ sig.text.push_str(&nested.text);
+ sig.defs.extend(nested.defs.into_iter());
+ sig.refs.extend(nested.refs.into_iter());
+ }
+ sig.text.push_str(" {}");
+
+ Ok(sig)
+ }
+ ast::ItemKind::Mod(ref _mod) => {
+ let mut text = "mod ".to_owned();
+ let name = self.ident.to_string();
+ let defs = vec![SigElement {
+ id: id_from_node_id(self.id, scx),
+ start: offset + text.len(),
+ end: offset + text.len() + name.len(),
+ }];
+ text.push_str(&name);
+ // Could be either `mod foo;` or `mod foo { ... }`, but we'll just puck one.
+ text.push(';');
+
+ Ok(Signature {
+ text,
+ defs,
+ refs: vec![],
+ })
+ }
+ ast::ItemKind::Ty(ref ty, ref generics) => {
+ let text = "type ".to_owned();
+ let mut sig = name_and_generics(text,
+ offset,
+ generics,
+ self.id,
+ self.ident,
+ scx)?;
+
+ sig.text.push_str(" = ");
+ let ty = ty.make(offset + sig.text.len(), id, scx)?;
+ sig.text.push_str(&ty.text);
+ sig.text.push(';');
+
+ Ok(merge_sigs(sig.text.clone(), vec![sig, ty]))
+ }
+ ast::ItemKind::Enum(_, ref generics) => {
+ let text = "enum ".to_owned();
+ let mut sig = name_and_generics(text,
+ offset,
+ generics,
+ self.id,
+ self.ident,
+ scx)?;
+ sig.text.push_str(" {}");
+ Ok(sig)
+ }
+ ast::ItemKind::Struct(_, ref generics) => {
+ let text = "struct ".to_owned();
+ let mut sig = name_and_generics(text,
+ offset,
+ generics,
+ self.id,
+ self.ident,
+ scx)?;
+ sig.text.push_str(" {}");
+ Ok(sig)
+ }
+ ast::ItemKind::Union(_, ref generics) => {
+ let text = "union ".to_owned();
+ let mut sig = name_and_generics(text,
+ offset,
+ generics,
+ self.id,
+ self.ident,
+ scx)?;
+ sig.text.push_str(" {}");
+ Ok(sig)
+ }
+ ast::ItemKind::Trait(unsafety, ref generics, ref bounds, _) => {
+ let mut text = String::new();
+ if unsafety == ast::Unsafety::Unsafe {
+ text.push_str("unsafe ");
+ }
+ text.push_str("trait ");
+ let mut sig = name_and_generics(text,
+ offset,
+ generics,
+ self.id,
+ self.ident,
+ scx)?;
+
+ if !bounds.is_empty() {
+ sig.text.push_str(": ");
+ sig.text.push_str(&pprust::bounds_to_string(bounds));
+ }
+ // FIXME where clause
+ sig.text.push_str(" {}");
+
+ Ok(sig)
+ }
+ ast::ItemKind::DefaultImpl(unsafety, ref trait_ref) => {
+ let mut text = String::new();
+ if unsafety == ast::Unsafety::Unsafe {
+ text.push_str("unsafe ");
+ }
+ text.push_str("impl ");
+ let trait_sig = trait_ref.path.make(offset + text.len(), id, scx)?;
+ text.push_str(&trait_sig.text);
+ text.push_str(" for .. {}");
+ Ok(replace_text(trait_sig, text))
+ }
+ ast::ItemKind::Impl(unsafety,
+ polarity,
+ defaultness,
+ ref generics,
+ ref opt_trait,
+ ref ty,
+ _) => {
+ let mut text = String::new();
+ if let ast::Defaultness::Default = defaultness {
+ text.push_str("default ");
+ }
+ if unsafety == ast::Unsafety::Unsafe {
+ text.push_str("unsafe ");
+ }
+ text.push_str("impl");
+
+ let generics_sig = generics.make(offset + text.len(), id, scx)?;
+ text.push_str(&generics_sig.text);
+
+ text.push(' ');
+
+ let trait_sig = if let Some(ref t) = *opt_trait {
+ if polarity == ast::ImplPolarity::Negative {
+ text.push('!');
+ }
+ let trait_sig = t.path.make(offset + text.len(), id, scx)?;
+ text.push_str(&trait_sig.text);
+ text.push_str(" for ");
+ trait_sig
+ } else {
+ text_sig(String::new())
+ };
+
+ let ty_sig = ty.make(offset + text.len(), id, scx)?;
+ text.push_str(&ty_sig.text);
+
+ text.push_str(" {}");
+
+ Ok(merge_sigs(text, vec![generics_sig, trait_sig, ty_sig]))
+
+ // FIXME where clause
+ }
+ ast::ItemKind::ForeignMod(_) => Err("extern mod"),
+ ast::ItemKind::GlobalAsm(_) => Err("glboal asm"),
+ ast::ItemKind::ExternCrate(_) => Err("extern crate"),
+ // FIXME should implement this (e.g., pub use).
+ ast::ItemKind::Use(_) => Err("import"),
+ ast::ItemKind::Mac(..) |
+ ast::ItemKind::MacroDef(_) => Err("Macro"),
+ }
+ }
+}
+
+impl Sig for ast::Path {
+ fn make(&self, offset: usize, id: Option<NodeId>, scx: &SaveContext) -> Result {
+ let def = scx.get_path_def(id.ok_or("Missing id for Path")?);
+
+ let (name, start, end) = match def {
+ Def::Label(..) |
+ Def::PrimTy(..) |
+ Def::SelfTy(..) |
+ Def::Err => {
+ return Ok(Signature {
+ text: pprust::path_to_string(self),
+ defs: vec![],
+ refs: vec![],
+ })
+ }
+ Def::AssociatedConst(..) |
+ Def::Variant(..) |
+ Def::VariantCtor(..) => {
+ let len = self.segments.len();
+ if len < 2 {
+ return Err("Bad path");
+ }
+ // FIXME: really we should descend into the generics here and add SigElements for
+ // them.
+ // FIXME: would be nice to have a def for the first path segment.
+ let seg1 = pprust::path_segment_to_string(&self.segments[len - 2]);
+ let seg2 = pprust::path_segment_to_string(&self.segments[len - 1]);
+ let start = offset + seg1.len() + 2;
+ (format!("{}::{}", seg1, seg2), start, start + seg2.len())
+ }
+ _ => {
+ let name = pprust::path_segment_to_string(self.segments.last().ok_or("Bad path")?);
+ let end = offset + name.len();
+ (name, offset, end)
+ }
+ };
+
+ let id = id_from_def_id(def.def_id());
+ Ok(Signature {
+ text: name,
+ defs: vec![],
+ refs: vec![SigElement { id, start, end }],
+ })
+ }
+}
+
+// This does not cover the where clause, which must be processed separately.
+impl Sig for ast::Generics {
+ fn make(&self, offset: usize, _parent_id: Option<NodeId>, scx: &SaveContext) -> Result {
+ let total = self.lifetimes.len() + self.ty_params.len();
+ if total == 0 {
+ return Ok(text_sig(String::new()));
+ }
+
+ let mut text = "<".to_owned();
+
+ let mut defs = vec![];
+ for l in &self.lifetimes {
+ let mut l_text = l.lifetime.ident.to_string();
+ defs.push(SigElement {
+ id: id_from_node_id(l.lifetime.id, scx),
+ start: offset + text.len(),
+ end: offset + text.len() + l_text.len(),
+ });
+
+ if !l.bounds.is_empty() {
+ l_text.push_str(": ");
+ let bounds = l.bounds.iter().map(|l| {
+ l.ident.to_string()
+ }).collect::<Vec<_>>().join(" + ");
+ l_text.push_str(&bounds);
+ // FIXME add lifetime bounds refs.
+ }
+ text.push_str(&l_text);
+ text.push(',');
+ }
+ for t in &self.ty_params {
+ let mut t_text = t.ident.to_string();
+ defs.push(SigElement {
+ id: id_from_node_id(t.id, scx),
+ start: offset + text.len(),
+ end: offset + text.len() + t_text.len(),
+ });
+
+ if !t.bounds.is_empty() {
+ t_text.push_str(": ");
+ t_text.push_str(&pprust::bounds_to_string(&t.bounds));
+ // FIXME descend properly into bounds.
+ }
+ text.push_str(&t_text);
+ text.push(',');
+ }
+
+ text.push('>');
+ Ok(Signature {text, defs, refs: vec![] })
+ }
+}
+
+impl Sig for ast::StructField {
+ fn make(&self, offset: usize, _parent_id: Option<NodeId>, scx: &SaveContext) -> Result {
+ let mut text = String::new();
+ let mut defs = None;
+ if let Some(ref ident) = self.ident {
+ text.push_str(&ident.to_string());
+ defs = Some(SigElement {
+ id: id_from_node_id(self.id, scx),
+ start: offset,
+ end: offset + text.len(),
+ });
+ text.push_str(": ");
+ }
+
+ let mut ty_sig = self.ty.make(offset + text.len(), Some(self.id), scx)?;
+ text.push_str(&ty_sig.text);
+ ty_sig.text = text;
+ ty_sig.defs.extend(defs.into_iter());
+ Ok(ty_sig)
+ }
+}
+
+
+impl Sig for ast::Variant_ {
+ fn make(&self, offset: usize, _parent_id: Option<NodeId>, scx: &SaveContext) -> Result {
+ let mut text = self.name.to_string();
+ match self.data {
+ ast::VariantData::Struct(ref fields, id) => {
+ let name_def = SigElement {
+ id: id_from_node_id(id, scx),
+ start: offset,
+ end: offset + text.len(),
+ };
+ text.push_str(" { ");
+ let mut defs = vec![name_def];
+ let mut refs = vec![];
+ for f in fields {
+ let field_sig = f.make(offset + text.len(), Some(id), scx)?;
+ text.push_str(&field_sig.text);
+ text.push_str(", ");
+ defs.extend(field_sig.defs.into_iter());
+ refs.extend(field_sig.refs.into_iter());
+ }
+ text.push('}');
+ Ok(Signature {
+ text,
+ defs: defs,
+ refs: refs,
+ })
+ }
+ ast::VariantData::Tuple(ref fields, id) => {
+ let name_def = SigElement {
+ id: id_from_node_id(id, scx),
+ start: offset,
+ end: offset + text.len(),
+ };
+ text.push('(');
+ let mut defs = vec![name_def];
+ let mut refs = vec![];
+ for f in fields {
+ let field_sig = f.make(offset + text.len(), Some(id), scx)?;
+ text.push_str(&field_sig.text);
+ text.push_str(", ");
+ defs.extend(field_sig.defs.into_iter());
+ refs.extend(field_sig.refs.into_iter());
+ }
+ text.push(')');
+ Ok(Signature {
+ text,
+ defs: defs,
+ refs: refs,
+ })
+ }
+ ast::VariantData::Unit(id) => {
+ let name_def = SigElement {
+ id: id_from_node_id(id, scx),
+ start: offset,
+ end: offset + text.len(),
+ };
+ Ok(Signature {
+ text,
+ defs: vec![name_def],
+ refs: vec![],
+ })
+ }
+ }
+ }
+}
+
+impl Sig for ast::ForeignItem {
+ fn make(&self, offset: usize, _parent_id: Option<NodeId>, scx: &SaveContext) -> Result {
+ let id = Some(self.id);
+ match self.node {
+ ast::ForeignItemKind::Fn(ref decl, ref generics) => {
+ let mut text = String::new();
+ text.push_str("fn ");
+
+ let mut sig = name_and_generics(text,
+ offset,
+ generics,
+ self.id,
+ self.ident,
+ scx)?;
+
+ sig.text.push('(');
+ for i in &decl.inputs {
+ // FIXME should descend into patterns to add defs.
+ sig.text.push_str(&pprust::pat_to_string(&i.pat));
+ sig.text.push_str(": ");
+ let nested = i.ty.make(offset + sig.text.len(), Some(i.id), scx)?;
+ sig.text.push_str(&nested.text);
+ sig.text.push(',');
+ sig.defs.extend(nested.defs.into_iter());
+ sig.refs.extend(nested.refs.into_iter());
+ }
+ sig.text.push(')');
+
+ if let ast::FunctionRetTy::Ty(ref t) = decl.output {
+ sig.text.push_str(" -> ");
+ let nested = t.make(offset + sig.text.len(), None, scx)?;
+ sig.text.push_str(&nested.text);
+ sig.defs.extend(nested.defs.into_iter());
+ sig.refs.extend(nested.refs.into_iter());
+ }
+ sig.text.push(';');
+
+ Ok(sig)
+ }
+ ast::ForeignItemKind::Static(ref ty, m) => {
+ let mut text = "static ".to_owned();
+ if m {
+ text.push_str("mut ");
+ }
+ let name = self.ident.to_string();
+ let defs = vec![SigElement {
+ id: id_from_node_id(self.id, scx),
+ start: offset + text.len(),
+ end: offset + text.len() + name.len(),
+ }];
+ text.push_str(&name);
+ text.push_str(": ");
+
+ let ty_sig = ty.make(offset + text.len(), id, scx)?;
+ text.push(';');
+
+ Ok(extend_sig(ty_sig, text, defs, vec![]))
+ }
+ }
+ }
+}
+
+fn name_and_generics(mut text: String,
+ offset: usize,
+ generics: &ast::Generics,
+ id: NodeId,
+ name: ast::Ident,
+ scx: &SaveContext)
+ -> Result {
+ let name = name.to_string();
+ let def = SigElement {
+ id: id_from_node_id(id, scx),
+ start: offset + text.len(),
+ end: offset + text.len() + name.len(),
+ };
+ text.push_str(&name);
+ let generics: Signature = generics.make(offset + text.len(), Some(id), scx)?;
+ // FIXME where clause
+ let text = format!("{}{}", text, generics.text);
+ Ok(extend_sig(generics, text, vec![def], vec![]))
+}
+
+
+fn make_assoc_type_signature(id: NodeId,
+ ident: ast::Ident,
+ bounds: Option<&ast::TyParamBounds>,
+ default: Option<&ast::Ty>,
+ scx: &SaveContext)
+ -> Result {
+ let mut text = "type ".to_owned();
+ let name = ident.to_string();
+ let mut defs = vec![SigElement {
+ id: id_from_node_id(id, scx),
+ start: text.len(),
+ end: text.len() + name.len(),
+ }];
+ let mut refs = vec![];
+ text.push_str(&name);
+ if let Some(bounds) = bounds {
+ text.push_str(": ");
+ // FIXME should descend into bounds
+ text.push_str(&pprust::bounds_to_string(bounds));
+ }
+ if let Some(default) = default {
+ text.push_str(" = ");
+ let ty_sig = default.make(text.len(), Some(id), scx)?;
+ text.push_str(&ty_sig.text);
+ defs.extend(ty_sig.defs.into_iter());
+ refs.extend(ty_sig.refs.into_iter());
+ }
+ text.push(';');
+ Ok(Signature { text, defs, refs })
+}
+
+fn make_assoc_const_signature(id: NodeId,
+ ident: ast::Name,
+ ty: &ast::Ty,
+ default: Option<&ast::Expr>,
+ scx: &SaveContext)
+ -> Result {
+ let mut text = "const ".to_owned();
+ let name = ident.to_string();
+ let mut defs = vec![SigElement {
+ id: id_from_node_id(id, scx),
+ start: text.len(),
+ end: text.len() + name.len(),
+ }];
+ let mut refs = vec![];
+ text.push_str(&name);
+ text.push_str(": ");
+
+ let ty_sig = ty.make(text.len(), Some(id), scx)?;
+ text.push_str(&ty_sig.text);
+ defs.extend(ty_sig.defs.into_iter());
+ refs.extend(ty_sig.refs.into_iter());
+
+ if let Some(default) = default {
+ text.push_str(" = ");
+ text.push_str(&pprust::expr_to_string(default));
+ }
+ text.push(';');
+ Ok(Signature { text, defs, refs })
+}
+
+fn make_method_signature(id: NodeId,
+ ident: ast::Ident,
+ m: &ast::MethodSig,
+ scx: &SaveContext)
+ -> Result {
+ // FIXME code dup with function signature
+ let mut text = String::new();
+ if m.constness.node == ast::Constness::Const {
+ text.push_str("const ");
+ }
+ if m.unsafety == ast::Unsafety::Unsafe {
+ text.push_str("unsafe ");
+ }
+ if m.abi != ::syntax::abi::Abi::Rust {
+ text.push_str("extern");
+ text.push_str(&m.abi.to_string());
+ text.push(' ');
+ }
+ text.push_str("fn ");
+
+ let mut sig = name_and_generics(text,
+ 0,
+ &m.generics,
+ id,
+ ident,
+ scx)?;
+
+ sig.text.push('(');
+ for i in &m.decl.inputs {
+ // FIXME shoudl descend into patterns to add defs.
+ sig.text.push_str(&pprust::pat_to_string(&i.pat));
+ sig.text.push_str(": ");
+ let nested = i.ty.make(sig.text.len(), Some(i.id), scx)?;
+ sig.text.push_str(&nested.text);
+ sig.text.push(',');
+ sig.defs.extend(nested.defs.into_iter());
+ sig.refs.extend(nested.refs.into_iter());
+ }
+ sig.text.push(')');
+
+ if let ast::FunctionRetTy::Ty(ref t) = m.decl.output {
+ sig.text.push_str(" -> ");
+ let nested = t.make(sig.text.len(), None, scx)?;
+ sig.text.push_str(&nested.text);
+ sig.defs.extend(nested.defs.into_iter());
+ sig.refs.extend(nested.refs.into_iter());
+ }
+ sig.text.push_str(" {}");
+
+ Ok(sig)
+}
use CrateTranslation;
use rustc::util::common::time;
use rustc::util::fs::fix_windows_verbatim_for_gcc;
-use rustc::dep_graph::DepNode;
+use rustc::dep_graph::{DepKind, DepNode};
use rustc::hir::def_id::CrateNum;
use rustc::hir::svh::Svh;
use rustc_back::tempdir::TempDir;
}
pub fn build_link_meta(incremental_hashes_map: &IncrementalHashesMap) -> LinkMeta {
+ let krate_dep_node = &DepNode::new_no_params(DepKind::Krate);
let r = LinkMeta {
- crate_hash: Svh::new(incremental_hashes_map[&DepNode::Krate].to_smaller_hash()),
+ crate_hash: Svh::new(incremental_hashes_map[krate_dep_node].to_smaller_hash()),
};
info!("{:?}", r);
return r;
}
-// The third parameter is for an env vars, used to set up the path for MSVC
-// to find its DLLs
+// The third parameter is for env vars, used on windows to set up the
+// path for MSVC to find its DLLs, and gcc to find its bundled
+// toolchain
pub fn get_linker(sess: &Session) -> (String, Command, Vec<(OsString, OsString)>) {
+ let envs = vec![("PATH".into(), command_path(sess))];
+
if let Some(ref linker) = sess.opts.cg.linker {
- (linker.clone(), Command::new(linker), vec![])
+ (linker.clone(), Command::new(linker), envs)
} else if sess.target.target.options.is_like_msvc {
let (cmd, envs) = msvc_link_exe_cmd(sess);
("link.exe".to_string(), cmd, envs)
} else {
- (sess.target.target.options.linker.clone(),
- Command::new(&sess.target.target.options.linker), vec![])
+ let linker = &sess.target.target.options.linker;
+ (linker.clone(), Command::new(&linker), envs)
}
}
})
}
-fn command_path(sess: &Session, extra: Option<PathBuf>) -> OsString {
+fn command_path(sess: &Session) -> OsString {
// The compiler's sysroot often has some bundled tools, so add it to the
// PATH for the child.
let mut new_path = sess.host_filesearch(PathKind::All)
if let Some(path) = env::var_os("PATH") {
new_path.extend(env::split_paths(&path));
}
- new_path.extend(extra);
env::join_paths(new_path).unwrap()
}
src: input.map(|p| p.to_path_buf()),
lib_search_paths: archive_search_paths(sess),
ar_prog: get_ar_prog(sess),
- command_path: command_path(sess, None),
+ command_path: command_path(sess),
}
}
// The invocations of cc share some flags across platforms
let (pname, mut cmd, envs) = get_linker(sess);
- // This will set PATH on MSVC
+ // This will set PATH on windows
cmd.envs(envs);
let root = sess.target_filesearch(PathKind::Native).get_lib_path();
};
bcx.call(expect, &[llargs[0], C_i32(ccx, rw), llargs[1], C_i32(ccx, cache_type)], None)
},
- "ctlz" | "cttz" | "ctpop" | "bswap" |
+ "ctlz" | "ctlz_nonzero" | "cttz" | "cttz_nonzero" | "ctpop" | "bswap" |
"add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" |
"overflowing_add" | "overflowing_sub" | "overflowing_mul" |
"unchecked_div" | "unchecked_rem" | "unchecked_shl" | "unchecked_shr" => {
let llfn = ccx.get_intrinsic(&format!("llvm.{}.i{}", name, width));
bcx.call(llfn, &[llargs[0], y], None)
}
+ "ctlz_nonzero" | "cttz_nonzero" => {
+ let y = C_bool(bcx.ccx, true);
+ let llvm_name = &format!("llvm.{}.i{}", &name[..4], width);
+ let llfn = ccx.get_intrinsic(llvm_name);
+ bcx.call(llfn, &[llargs[0], y], None)
+ }
"ctpop" => bcx.call(ccx.get_intrinsic(&format!("llvm.ctpop.i{}", width)),
&llargs, None),
"bswap" => {
let a = base::from_immediate(bcx, a);
let b = base::from_immediate(bcx, b);
- bcx.store(a, bcx.struct_gep(lldest, ix0), f_align);
- bcx.store(b, bcx.struct_gep(lldest, ix1), f_align);
+
+ // See comment above about zero-sized values.
+ let (a_zst, b_zst) = common::type_pair_fields(bcx.ccx, operand.ty)
+ .map_or((false, false), |[a_ty, b_ty]| {
+ (common::type_is_zero_size(bcx.ccx, a_ty),
+ common::type_is_zero_size(bcx.ccx, b_ty))
+ });
+
+ if !a_zst {
+ bcx.store(a, bcx.struct_gep(lldest, ix0), f_align);
+ }
+ if !b_zst {
+ bcx.store(b, bcx.struct_gep(lldest, ix1), f_align);
+ }
}
}
}
use rustc::ty::item_path::characteristic_def_id_of_type;
use rustc_incremental::IchHasher;
use std::hash::Hash;
-use std::sync::Arc;
use syntax::ast::NodeId;
use syntax::symbol::{Symbol, InternedString};
use trans_item::{TransItem, InstantiationMode};
&self.items
}
- pub fn work_product_id(&self) -> Arc<WorkProductId> {
- Arc::new(WorkProductId(self.name().to_string()))
+ pub fn work_product_id(&self) -> WorkProductId {
+ WorkProductId::from_cgu_name(self.name())
}
- pub fn work_product_dep_node(&self) -> DepNode<DefId> {
- DepNode::WorkProduct(self.work_product_id())
+ pub fn work_product_dep_node(&self) -> DepNode {
+ self.work_product_id().to_dep_node()
}
pub fn compute_symbol_name_hash<'a>(&self,
use declare;
use llvm;
use monomorphize::Instance;
-use rustc::dep_graph::DepNode;
use rustc::hir;
use rustc::hir::def_id::DefId;
use rustc::ty::{self, Ty, TyCtxt, TypeFoldable};
self.to_raw_string(),
ccx.codegen_unit().name());
- // (*) This code executes in the context of a dep-node for the
- // entire CGU. In some cases, we introduce dep-nodes for
- // particular items that we are translating (these nodes will
- // have read edges coming into the CGU node). These smaller
- // nodes are not needed for correctness -- we always
- // invalidate an entire CGU at a time -- but they enable
- // finer-grained testing, since you can write tests that check
- // that the incoming edges to a particular fn are from a
- // particular set.
-
match *self {
TransItem::Static(node_id) => {
- let def_id = ccx.tcx().hir.local_def_id(node_id);
- let _task = ccx.tcx().dep_graph.in_task(DepNode::TransCrateItem(def_id)); // (*)
- let item = ccx.tcx().hir.expect_item(node_id);
+ let tcx = ccx.tcx();
+ let item = tcx.hir.expect_item(node_id);
if let hir::ItemStatic(_, m, _) = item.node {
match consts::trans_static(&ccx, m, item.id, &item.attrs) {
Ok(_) => { /* Cool, everything's alright. */ },
Err(err) => {
- err.report(ccx.tcx(), item.span, "static");
+ err.report(tcx, item.span, "static");
}
};
} else {
}
}
TransItem::Fn(instance) => {
- let _task = ccx.tcx().dep_graph.in_task(
- DepNode::TransCrateItem(instance.def_id())); // (*)
-
base::trans_instance(&ccx, instance);
}
}
param_env,
normalize_cause.clone());
- tcx.infer_ctxt(()).enter(|infcx| {
+ tcx.infer_ctxt().enter(|infcx| {
let inh = Inherited::new(infcx, impl_m.def_id);
let infcx = &inh.infcx;
impl_trait_ref: ty::TraitRef<'tcx>) {
debug!("compare_const_impl(impl_trait_ref={:?})", impl_trait_ref);
- tcx.infer_ctxt(()).enter(|infcx| {
+ tcx.infer_ctxt().enter(|infcx| {
let param_env = ty::ParamEnv::empty(Reveal::UserFacing);
let inh = Inherited::new(infcx, impl_c.def_id);
let infcx = &inh.infcx;
// check that the impl type can be made to match the trait type.
- tcx.infer_ctxt(()).enter(|ref infcx| {
+ tcx.infer_ctxt().enter(|ref infcx| {
let impl_param_env = tcx.param_env(self_type_did);
let tcx = infcx.tcx;
let mut fulfillment_cx = traits::FulfillmentContext::new();
"volatile_store" =>
(1, vec![ tcx.mk_mut_ptr(param(0)), param(0) ], tcx.mk_nil()),
- "ctpop" | "ctlz" | "cttz" | "bswap" => (1, vec![param(0)], param(0)),
+ "ctpop" | "ctlz" | "ctlz_nonzero" | "cttz" | "cttz_nonzero" | "bswap" =>
+ (1, vec![param(0)], param(0)),
"add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" =>
(1, vec![param(0), param(0)],
// overloaded lvalue ops, and will be fixed by them in order to get
// the correct region.
let mut source = self.node_ty(expr.id);
- if let Some(adjustments) = self.tables.borrow_mut().adjustments.get_mut(&expr.id) {
+ // Do not mutate adjustments in place, but rather take them,
+ // and replace them after mutating them, to avoid having the
+ // tables borrowed during (`deref_mut`) method resolution.
+ let previous_adjustments = self.tables.borrow_mut().adjustments.remove(&expr.id);
+ if let Some(mut adjustments) = previous_adjustments {
let pref = LvaluePreference::PreferMutLvalue;
- for adjustment in adjustments {
+ for adjustment in &mut adjustments {
if let Adjust::Deref(Some(ref mut deref)) = adjustment.kind {
if let Some(ok) = self.try_overloaded_deref(expr.span, source, pref) {
let method = self.register_infer_ok_obligations(ok);
}
source = adjustment.target;
}
+ self.tables.borrow_mut().adjustments.insert(expr.id, adjustments);
}
match expr.node {
if !external_mods.insert(def_id) {
return;
}
- for child in tcx.sess.cstore.item_children(def_id) {
+ for child in tcx.sess.cstore.item_children(def_id, tcx.sess) {
handle_external_def(tcx, traits, external_mods, child.def)
}
}
use util::common::{ErrorReported, indenter};
use util::nodemap::{DefIdMap, FxHashMap, NodeMap};
-use std::cell::{Cell, RefCell};
+use std::cell::{Cell, RefCell, Ref, RefMut};
use std::collections::hash_map::Entry;
use std::cmp;
use std::mem::replace;
use syntax::ptr::P;
use syntax::symbol::{Symbol, InternedString, keywords};
use syntax::util::lev_distance::find_best_match_for_name;
-use syntax_pos::{self, BytePos, Span, DUMMY_SP};
+use syntax_pos::{self, BytePos, Span};
use rustc::hir::intravisit::{self, Visitor, NestedVisitorMap};
use rustc::hir::itemlikevisit::ItemLikeVisitor;
mod intrinsic;
mod op;
+/// A wrapper for InferCtxt's `in_progress_tables` field.
+#[derive(Copy, Clone)]
+struct MaybeInProgressTables<'a, 'tcx: 'a> {
+ maybe_tables: Option<&'a RefCell<ty::TypeckTables<'tcx>>>,
+}
+
+impl<'a, 'tcx> MaybeInProgressTables<'a, 'tcx> {
+ fn borrow(self) -> Ref<'a, ty::TypeckTables<'tcx>> {
+ match self.maybe_tables {
+ Some(tables) => tables.borrow(),
+ None => {
+ bug!("MaybeInProgressTables: inh/fcx.tables.borrow() with no tables")
+ }
+ }
+ }
+
+ fn borrow_mut(self) -> RefMut<'a, ty::TypeckTables<'tcx>> {
+ match self.maybe_tables {
+ Some(tables) => tables.borrow_mut(),
+ None => {
+ bug!("MaybeInProgressTables: inh/fcx.tables.borrow_mut() with no tables")
+ }
+ }
+ }
+}
+
+
/// closures defined within the function. For example:
///
/// fn foo() {
pub struct Inherited<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
infcx: InferCtxt<'a, 'gcx, 'tcx>,
+ tables: MaybeInProgressTables<'a, 'tcx>,
+
locals: RefCell<NodeMap<Ty<'tcx>>>,
fulfillment_cx: RefCell<traits::FulfillmentContext<'tcx>>,
impl<'a, 'gcx, 'tcx> Inherited<'a, 'gcx, 'tcx> {
pub fn build(tcx: TyCtxt<'a, 'gcx, 'gcx>, def_id: DefId)
-> InheritedBuilder<'a, 'gcx, 'tcx> {
- let tables = ty::TypeckTables::empty();
InheritedBuilder {
- infcx: tcx.infer_ctxt(tables),
+ infcx: tcx.infer_ctxt().with_fresh_in_progress_tables(),
def_id,
}
}
});
Inherited {
+ tables: MaybeInProgressTables {
+ maybe_tables: infcx.in_progress_tables,
+ },
infcx: infcx,
fulfillment_cx: RefCell::new(traits::FulfillmentContext::new()),
locals: RefCell::new(NodeMap()),
value)
}
- pub fn write_nil(&self, node_id: ast::NodeId) {
- self.write_ty(node_id, self.tcx.mk_nil());
- }
-
- pub fn write_error(&self, node_id: ast::NodeId) {
- self.write_ty(node_id, self.tcx.types.err);
- }
-
pub fn require_type_meets(&self,
ty: Ty<'tcx>,
span: Span,
.emit();
self.tcx().types.err
} else {
- let mut err = self.type_error_struct(field.span, |actual| {
- format!("no field `{}` on type `{}`",
- field.node, actual)
- }, expr_t);
- match expr_t.sty {
- ty::TyAdt(def, _) if !def.is_enum() => {
- if let Some(suggested_field_name) =
- Self::suggest_field_name(def.struct_variant(), field, vec![]) {
- err.span_label(field.span,
- format!("did you mean `{}`?", suggested_field_name));
- } else {
- err.span_label(field.span,
- "unknown field");
- };
- }
- ty::TyRawPtr(..) => {
- err.note(&format!("`{0}` is a native pointer; perhaps you need to deref with \
- `(*{0}).{1}`",
- self.tcx.hir.node_to_pretty_string(base.id),
- field.node));
+ if !expr_t.is_primitive_ty() {
+ let mut err = type_error_struct!(self.tcx().sess, field.span, expr_t, E0609,
+ "no field `{}` on type `{}`",
+ field.node, expr_t);
+ match expr_t.sty {
+ ty::TyAdt(def, _) if !def.is_enum() => {
+ if let Some(suggested_field_name) =
+ Self::suggest_field_name(def.struct_variant(), field, vec![]) {
+ err.span_label(field.span,
+ format!("did you mean `{}`?", suggested_field_name));
+ } else {
+ err.span_label(field.span, "unknown field");
+ };
+ }
+ ty::TyRawPtr(..) => {
+ err.note(&format!("`{0}` is a native pointer; perhaps you need to deref \
+ with `(*{0}).{1}`",
+ self.tcx.hir.node_to_pretty_string(base.id),
+ field.node));
+ }
+ _ => {}
}
- _ => {}
- }
- err.emit();
+ err
+ } else {
+ type_error_struct!(self.tcx().sess, field.span, expr_t, E0610,
+ "`{}` is a primitive type and therefore doesn't have fields",
+ expr_t)
+ }.emit();
self.tcx().types.err
}
}
self.check_expr_has_type(base_expr, struct_ty);
match struct_ty.sty {
ty::TyAdt(adt, substs) if adt.is_struct() => {
- self.tables.borrow_mut().fru_field_types.insert(
- expr.id,
- adt.struct_variant().fields.iter().map(|f| {
- self.normalize_associated_types_in(
- expr.span, &f.ty(self.tcx, substs)
- )
- }).collect()
- );
+ let fru_field_types = adt.struct_variant().fields.iter().map(|f| {
+ self.normalize_associated_types_in(expr.span, &f.ty(self.tcx, substs))
+ }).collect();
+ self.tables.borrow_mut().fru_field_types.insert(expr.id, fru_field_types);
}
_ => {
span_err!(self.tcx.sess, base_expr.span, E0436,
pub fn check_stmt(&self, stmt: &'gcx hir::Stmt) {
// Don't do all the complex logic below for DeclItem.
match stmt.node {
- hir::StmtDecl(ref decl, id) => {
+ hir::StmtDecl(ref decl, _) => {
match decl.node {
hir::DeclLocal(_) => {}
hir::DeclItem(_) => {
- self.write_nil(id);
return;
}
}
self.diverges.set(Diverges::Maybe);
self.has_errors.set(false);
- let (node_id, _span) = match stmt.node {
- hir::StmtDecl(ref decl, id) => {
- let span = match decl.node {
+ match stmt.node {
+ hir::StmtDecl(ref decl, _) => {
+ match decl.node {
hir::DeclLocal(ref l) => {
self.check_decl_local(&l);
- l.span
}
- hir::DeclItem(_) => {/* ignore for now */
- DUMMY_SP
- }
- };
- (id, span)
+ hir::DeclItem(_) => {/* ignore for now */}
+ }
}
- hir::StmtExpr(ref expr, id) => {
+ hir::StmtExpr(ref expr, _) => {
// Check with expected type of ()
self.check_expr_has_type(&expr, self.tcx.mk_nil());
- (id, expr.span)
}
- hir::StmtSemi(ref expr, id) => {
+ hir::StmtSemi(ref expr, _) => {
self.check_expr(&expr);
- (id, expr.span)
}
- };
-
- if self.has_errors.get() {
- self.write_error(node_id);
- } else {
- self.write_nil(node_id);
}
// Combine the diverging and has_error flags.
hir::StmtSemi(ref e, _) => e,
_ => return,
};
- let last_expr_ty = self.expr_ty(last_expr);
+ let last_expr_ty = self.node_ty(last_expr.id);
if self.can_sub(self.param_env, last_expr_ty, expected_ty).is_err() {
return;
}
}
}
+ /// Create a temporary `MemCategorizationContext` and pass it to the closure.
+ fn with_mc<F, R>(&self, f: F) -> R
+ where F: for<'b> FnOnce(mc::MemCategorizationContext<'b, 'gcx, 'tcx>) -> R
+ {
+ f(mc::MemCategorizationContext::with_infer(&self.infcx,
+ &self.region_maps,
+ &self.tables.borrow()))
+ }
+
/// Invoked on any adjustments that occur. Checks that if this is a region pointer being
/// dereferenced, the lifetime of the pointer includes the deref expr.
fn constrain_adjustments(&mut self, expr: &hir::Expr) -> mc::McResult<mc::cmt<'tcx>> {
debug!("constrain_adjustments(expr={:?})", expr);
- let mut cmt = {
- let mc = mc::MemCategorizationContext::new(self, &self.region_maps);
- mc.cat_expr_unadjusted(expr)?
- };
+ let mut cmt = self.with_mc(|mc| mc.cat_expr_unadjusted(expr))?;
- //NOTE(@jroesch): mixed RefCell borrow causes crash
- let adjustments = self.tables.borrow().expr_adjustments(&expr).to_vec();
+ let tables = self.tables.borrow();
+ let adjustments = tables.expr_adjustments(&expr);
if adjustments.is_empty() {
return Ok(cmt);
}
expr.id, expr_region);
}
- {
- let mc = mc::MemCategorizationContext::new(self, &self.region_maps);
- cmt = mc.cat_expr_adjusted(expr, cmt, &adjustment)?;
- }
+ cmt = self.with_mc(|mc| mc.cat_expr_adjusted(expr, cmt, &adjustment))?;
if let Categorization::Deref(_, mc::BorrowedPtr(_, r_ptr)) = cmt.cat {
self.mk_subregion_due_to_dereference(expr.span,
mutability: hir::Mutability, base: &hir::Expr) {
debug!("link_addr_of(expr={:?}, base={:?})", expr, base);
- let cmt = {
- let mc = mc::MemCategorizationContext::new(self, &self.region_maps);
- ignore_err!(mc.cat_expr(base))
- };
+ let cmt = ignore_err!(self.with_mc(|mc| mc.cat_expr(base)));
debug!("link_addr_of: cmt={:?}", cmt);
None => { return; }
Some(ref expr) => &**expr,
};
- let mc = &mc::MemCategorizationContext::new(self, &self.region_maps);
- let discr_cmt = ignore_err!(mc.cat_expr(init_expr));
- self.link_pattern(mc, discr_cmt, &local.pat);
+ let discr_cmt = ignore_err!(self.with_mc(|mc| mc.cat_expr(init_expr)));
+ self.link_pattern(discr_cmt, &local.pat);
}
/// Computes the guarantors for any ref bindings in a match and
/// linked to the lifetime of its guarantor (if any).
fn link_match(&self, discr: &hir::Expr, arms: &[hir::Arm]) {
debug!("regionck::for_match()");
- let mc = &mc::MemCategorizationContext::new(self, &self.region_maps);
- let discr_cmt = ignore_err!(mc.cat_expr(discr));
+ let discr_cmt = ignore_err!(self.with_mc(|mc| mc.cat_expr(discr)));
debug!("discr_cmt={:?}", discr_cmt);
for arm in arms {
for root_pat in &arm.pats {
- self.link_pattern(mc, discr_cmt.clone(), &root_pat);
+ self.link_pattern(discr_cmt.clone(), &root_pat);
}
}
}
/// linked to the lifetime of its guarantor (if any).
fn link_fn_args(&self, body_scope: CodeExtent, args: &[hir::Arg]) {
debug!("regionck::link_fn_args(body_scope={:?})", body_scope);
- let mc = &mc::MemCategorizationContext::new(self, &self.region_maps);
for arg in args {
let arg_ty = self.node_ty(arg.id);
let re_scope = self.tcx.mk_region(ty::ReScope(body_scope));
- let arg_cmt = mc.cat_rvalue(
- arg.id, arg.pat.span, re_scope, arg_ty);
+ let arg_cmt = self.with_mc(|mc| {
+ mc.cat_rvalue(arg.id, arg.pat.span, re_scope, arg_ty)
+ });
debug!("arg_ty={:?} arg_cmt={:?} arg={:?}",
arg_ty,
arg_cmt,
arg);
- self.link_pattern(mc, arg_cmt, &arg.pat);
+ self.link_pattern(arg_cmt, &arg.pat);
}
}
/// Link lifetimes of any ref bindings in `root_pat` to the pointers found
/// in the discriminant, if needed.
- fn link_pattern<'t>(&self,
- mc: &mc::MemCategorizationContext<'a, 'gcx, 'tcx>,
- discr_cmt: mc::cmt<'tcx>,
- root_pat: &hir::Pat) {
+ fn link_pattern(&self, discr_cmt: mc::cmt<'tcx>, root_pat: &hir::Pat) {
debug!("link_pattern(discr_cmt={:?}, root_pat={:?})",
discr_cmt,
root_pat);
- let _ = mc.cat_pattern(discr_cmt, root_pat, |_, sub_cmt, sub_pat| {
+ let _ = self.with_mc(|mc| {
+ mc.cat_pattern(discr_cmt, root_pat, |sub_cmt, sub_pat| {
match sub_pat.node {
// `ref x` pattern
PatKind::Binding(hir::BindByRef(mutbl), ..) => {
}
_ => {}
}
- });
+ })
+ });
}
/// Link lifetime of borrowed pointer resulting from autoref to lifetimes in the value being
// Detect by-ref upvar `x`:
let cause = match note {
mc::NoteUpvarRef(ref upvar_id) => {
- let upvar_capture_map = &self.tables.borrow_mut().upvar_capture_map;
- match upvar_capture_map.get(upvar_id) {
+ match self.tables.borrow().upvar_capture_map.get(upvar_id) {
Some(&ty::UpvarCapture::ByRef(ref upvar_borrow)) => {
// The mutability of the upvar may have been modified
// by the above adjustment, so update our local variable.
use rustc::hir::intravisit::{self, Visitor, NestedVisitorMap};
use rustc::util::nodemap::NodeMap;
-///////////////////////////////////////////////////////////////////////////
-// PUBLIC ENTRY POINTS
+use std::collections::hash_map::Entry;
impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> {
pub fn closure_analyze(&self, body: &'gcx hir::Body) {
- let mut seed = SeedBorrowKind::new(self);
- seed.visit_body(body);
-
- let mut adjust = AdjustBorrowKind::new(self, seed.temp_closure_kinds);
- adjust.visit_body(body);
+ InferBorrowKindVisitor { fcx: self }.visit_body(body);
// it's our job to process these.
assert!(self.deferred_call_resolutions.borrow().is_empty());
}
}
-///////////////////////////////////////////////////////////////////////////
-// SEED BORROW KIND
-
-struct SeedBorrowKind<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
+struct InferBorrowKindVisitor<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
fcx: &'a FnCtxt<'a, 'gcx, 'tcx>,
- temp_closure_kinds: NodeMap<(ty::ClosureKind, Option<(Span, ast::Name)>)>,
}
-impl<'a, 'gcx, 'tcx> Visitor<'gcx> for SeedBorrowKind<'a, 'gcx, 'tcx> {
+impl<'a, 'gcx, 'tcx> Visitor<'gcx> for InferBorrowKindVisitor<'a, 'gcx, 'tcx> {
fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'gcx> {
NestedVisitorMap::None
}
hir::ExprClosure(cc, _, body_id, _) => {
let body = self.fcx.tcx.hir.body(body_id);
self.visit_body(body);
- self.check_closure(expr, cc);
+ self.fcx.analyze_closure(expr.id, expr.span, body, cc);
}
_ => { }
}
}
-impl<'a, 'gcx, 'tcx> SeedBorrowKind<'a, 'gcx, 'tcx> {
- fn new(fcx: &'a FnCtxt<'a, 'gcx, 'tcx>) -> SeedBorrowKind<'a, 'gcx, 'tcx> {
- SeedBorrowKind { fcx: fcx, temp_closure_kinds: NodeMap() }
- }
+impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> {
+ fn analyze_closure(&self,
+ id: ast::NodeId,
+ span: Span,
+ body: &hir::Body,
+ capture_clause: hir::CaptureClause) {
+ /*!
+ * Analysis starting point.
+ */
- fn check_closure(&mut self,
- expr: &hir::Expr,
- capture_clause: hir::CaptureClause)
- {
- if !self.fcx.tables.borrow().closure_kinds.contains_key(&expr.id) {
- self.temp_closure_kinds.insert(expr.id, (ty::ClosureKind::Fn, None));
- debug!("check_closure: adding closure {:?} as Fn", expr.id);
- }
+ debug!("analyze_closure(id={:?}, body.id={:?})", id, body.id());
- self.fcx.tcx.with_freevars(expr.id, |freevars| {
+ let infer_kind = match self.tables.borrow_mut().closure_kinds.entry(id) {
+ Entry::Occupied(_) => false,
+ Entry::Vacant(entry) => {
+ debug!("check_closure: adding closure {:?} as Fn", id);
+ entry.insert((ty::ClosureKind::Fn, None));
+ true
+ }
+ };
+
+ self.tcx.with_freevars(id, |freevars| {
for freevar in freevars {
let def_id = freevar.def.def_id();
- let var_node_id = self.fcx.tcx.hir.as_local_node_id(def_id).unwrap();
+ let var_node_id = self.tcx.hir.as_local_node_id(def_id).unwrap();
let upvar_id = ty::UpvarId { var_id: var_node_id,
- closure_expr_id: expr.id };
+ closure_expr_id: id };
debug!("seed upvar_id {:?}", upvar_id);
let capture_kind = match capture_clause {
ty::UpvarCapture::ByValue
}
hir::CaptureByRef => {
- let origin = UpvarRegion(upvar_id, expr.span);
- let freevar_region = self.fcx.next_region_var(origin);
+ let origin = UpvarRegion(upvar_id, span);
+ let freevar_region = self.next_region_var(origin);
let upvar_borrow = ty::UpvarBorrow { kind: ty::ImmBorrow,
region: freevar_region };
ty::UpvarCapture::ByRef(upvar_borrow)
}
};
- self.fcx.tables.borrow_mut().upvar_capture_map.insert(upvar_id, capture_kind);
+ self.tables.borrow_mut().upvar_capture_map.insert(upvar_id, capture_kind);
}
});
- }
-}
-
-///////////////////////////////////////////////////////////////////////////
-// ADJUST BORROW KIND
-
-struct AdjustBorrowKind<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
- fcx: &'a FnCtxt<'a, 'gcx, 'tcx>,
- temp_closure_kinds: NodeMap<(ty::ClosureKind, Option<(Span, ast::Name)>)>,
-}
-
-impl<'a, 'gcx, 'tcx> AdjustBorrowKind<'a, 'gcx, 'tcx> {
- fn new(fcx: &'a FnCtxt<'a, 'gcx, 'tcx>,
- temp_closure_kinds: NodeMap<(ty::ClosureKind, Option<(Span, ast::Name)>)>)
- -> AdjustBorrowKind<'a, 'gcx, 'tcx> {
- AdjustBorrowKind { fcx: fcx, temp_closure_kinds: temp_closure_kinds }
- }
-
- fn analyze_closure(&mut self,
- id: ast::NodeId,
- span: Span,
- body: &hir::Body) {
- /*!
- * Analysis starting point.
- */
-
- debug!("analyze_closure(id={:?}, body.id={:?})", id, body.id());
{
- let body_owner_def_id = self.fcx.tcx.hir.body_owner_def_id(body.id());
- let region_maps = &self.fcx.tcx.region_maps(body_owner_def_id);
- let param_env = self.fcx.param_env;
- let mut euv =
- euv::ExprUseVisitor::with_options(self,
- self.fcx,
- param_env,
- region_maps,
- mc::MemCategorizationOptions {
- during_closure_kind_inference: true
- });
- euv.consume_body(body);
+ let body_owner_def_id = self.tcx.hir.body_owner_def_id(body.id());
+ let region_maps = &self.tcx.region_maps(body_owner_def_id);
+ let mut delegate = InferBorrowKind {
+ fcx: self,
+ adjust_closure_kinds: NodeMap(),
+ adjust_upvar_captures: ty::UpvarCaptureMap::default(),
+ };
+ euv::ExprUseVisitor::with_infer(&mut delegate,
+ &self.infcx,
+ self.param_env,
+ region_maps,
+ &self.tables.borrow())
+ .consume_body(body);
+
+ // Write the adjusted values back into the main tables.
+ if infer_kind {
+ if let Some(kind) = delegate.adjust_closure_kinds.remove(&id) {
+ self.tables.borrow_mut().closure_kinds.insert(id, kind);
+ }
+ }
+ self.tables.borrow_mut().upvar_capture_map.extend(
+ delegate.adjust_upvar_captures);
}
// Now that we've analyzed the closure, we know how each
// inference algorithm will reject it).
// Extract the type variables UV0...UVn.
- let (def_id, closure_substs) = match self.fcx.node_ty(id).sty {
+ let (def_id, closure_substs) = match self.node_ty(id).sty {
ty::TyClosure(def_id, substs) => (def_id, substs),
ref t => {
span_bug!(
debug!("analyze_closure: id={:?} closure_substs={:?} final_upvar_tys={:?}",
id, closure_substs, final_upvar_tys);
for (upvar_ty, final_upvar_ty) in
- closure_substs.upvar_tys(def_id, self.fcx.tcx).zip(final_upvar_tys)
+ closure_substs.upvar_tys(def_id, self.tcx).zip(final_upvar_tys)
{
- self.fcx.demand_eqtype(span, final_upvar_ty, upvar_ty);
+ self.demand_eqtype(span, final_upvar_ty, upvar_ty);
}
- // If we are also inferred the closure kind here, update the
- // main table and process any deferred resolutions.
- if let Some(&(kind, context)) = self.temp_closure_kinds.get(&id) {
- self.fcx.tables.borrow_mut().closure_kinds.insert(id, (kind, context));
- let closure_def_id = self.fcx.tcx.hir.local_def_id(id);
- debug!("closure_kind({:?}) = {:?}", closure_def_id, kind);
-
+ // If we are also inferred the closure kind here,
+ // process any deferred resolutions.
+ if infer_kind {
+ let closure_def_id = self.tcx.hir.local_def_id(id);
let deferred_call_resolutions =
- self.fcx.remove_deferred_call_resolutions(closure_def_id);
+ self.remove_deferred_call_resolutions(closure_def_id);
for deferred_call_resolution in deferred_call_resolutions {
- deferred_call_resolution.resolve(self.fcx);
+ deferred_call_resolution.resolve(self);
}
}
}
// Returns a list of `ClosureUpvar`s for each upvar.
- fn final_upvar_tys(&mut self, closure_id: ast::NodeId) -> Vec<Ty<'tcx>> {
+ fn final_upvar_tys(&self, closure_id: ast::NodeId) -> Vec<Ty<'tcx>> {
// Presently an unboxed closure type cannot "escape" out of a
// function, so we will only encounter ones that originated in the
// local crate or were inlined into it along with some function.
// This may change if abstract return types of some sort are
// implemented.
- let tcx = self.fcx.tcx;
+ let tcx = self.tcx;
tcx.with_freevars(closure_id, |freevars| {
freevars.iter().map(|freevar| {
let def_id = freevar.def.def_id();
let var_id = tcx.hir.as_local_node_id(def_id).unwrap();
- let freevar_ty = self.fcx.node_ty(var_id);
+ let freevar_ty = self.node_ty(var_id);
let upvar_id = ty::UpvarId {
var_id: var_id,
closure_expr_id: closure_id
};
- let capture = self.fcx.upvar_capture(upvar_id).unwrap();
+ let capture = self.tables.borrow().upvar_capture(upvar_id);
debug!("var_id={:?} freevar_ty={:?} capture={:?}",
var_id, freevar_ty, capture);
}).collect()
})
}
+}
+struct InferBorrowKind<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
+ fcx: &'a FnCtxt<'a, 'gcx, 'tcx>,
+ adjust_closure_kinds: NodeMap<(ty::ClosureKind, Option<(Span, ast::Name)>)>,
+ adjust_upvar_captures: ty::UpvarCaptureMap<'tcx>,
+}
+
+impl<'a, 'gcx, 'tcx> InferBorrowKind<'a, 'gcx, 'tcx> {
fn adjust_upvar_borrow_kind_for_consume(&mut self,
cmt: mc::cmt<'tcx>,
mode: euv::ConsumeMode)
guarantor.span,
tcx.hir.name(upvar_id.var_id));
- let upvar_capture_map =
- &mut self.fcx.tables.borrow_mut().upvar_capture_map;
- upvar_capture_map.insert(upvar_id, ty::UpvarCapture::ByValue);
+ self.adjust_upvar_captures.insert(upvar_id, ty::UpvarCapture::ByValue);
}
mc::NoteClosureEnv(upvar_id) => {
// we get just a closureenv ref if this is a
// upvar, then we need to modify the
// borrow_kind of the upvar to make sure it
// is inferred to mutable if necessary
- {
- let upvar_capture_map = &mut self.fcx.tables.borrow_mut().upvar_capture_map;
- let ub = upvar_capture_map.get_mut(&upvar_id).unwrap();
- self.adjust_upvar_borrow_kind(upvar_id, ub, borrow_kind);
- }
+ self.adjust_upvar_borrow_kind(upvar_id, borrow_kind);
// also need to be in an FnMut closure since this is not an ImmBorrow
self.adjust_closure_kind(upvar_id.closure_expr_id,
/// some particular use.
fn adjust_upvar_borrow_kind(&mut self,
upvar_id: ty::UpvarId,
- upvar_capture: &mut ty::UpvarCapture,
kind: ty::BorrowKind) {
+ let upvar_capture = self.adjust_upvar_captures.get(&upvar_id).cloned()
+ .unwrap_or_else(|| self.fcx.tables.borrow().upvar_capture(upvar_id));
debug!("adjust_upvar_borrow_kind(upvar_id={:?}, upvar_capture={:?}, kind={:?})",
upvar_id, upvar_capture, kind);
- match *upvar_capture {
+ match upvar_capture {
ty::UpvarCapture::ByValue => {
// Upvar is already by-value, the strongest criteria.
}
- ty::UpvarCapture::ByRef(ref mut upvar_borrow) => {
+ ty::UpvarCapture::ByRef(mut upvar_borrow) => {
match (upvar_borrow.kind, kind) {
// Take RHS:
(ty::ImmBorrow, ty::UniqueImmBorrow) |
(ty::ImmBorrow, ty::MutBorrow) |
(ty::UniqueImmBorrow, ty::MutBorrow) => {
upvar_borrow.kind = kind;
+ self.adjust_upvar_captures.insert(upvar_id,
+ ty::UpvarCapture::ByRef(upvar_borrow));
}
// Take LHS:
(ty::ImmBorrow, ty::ImmBorrow) |
debug!("adjust_closure_kind(closure_id={}, new_kind={:?}, upvar_span={:?}, var_name={})",
closure_id, new_kind, upvar_span, var_name);
- if let Some(&(existing_kind, _)) = self.temp_closure_kinds.get(&closure_id) {
+ let closure_kind = self.adjust_closure_kinds.get(&closure_id).cloned()
+ .or_else(|| self.fcx.tables.borrow().closure_kinds.get(&closure_id).cloned());
+ if let Some((existing_kind, _)) = closure_kind {
debug!("adjust_closure_kind: closure_id={}, existing_kind={:?}, new_kind={:?}",
closure_id, existing_kind, new_kind);
(ty::ClosureKind::Fn, ty::ClosureKind::FnOnce) |
(ty::ClosureKind::FnMut, ty::ClosureKind::FnOnce) => {
// new kind is stronger than the old kind
- self.temp_closure_kinds.insert(
+ self.adjust_closure_kinds.insert(
closure_id,
(new_kind, Some((upvar_span, var_name)))
);
}
}
-impl<'a, 'gcx, 'tcx> Visitor<'gcx> for AdjustBorrowKind<'a, 'gcx, 'tcx> {
- fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'gcx> {
- NestedVisitorMap::None
- }
-
- fn visit_fn(&mut self,
- fn_kind: intravisit::FnKind<'gcx>,
- decl: &'gcx hir::FnDecl,
- body: hir::BodyId,
- span: Span,
- id: ast::NodeId)
- {
- intravisit::walk_fn(self, fn_kind, decl, body, span, id);
-
- let body = self.fcx.tcx.hir.body(body);
- self.visit_body(body);
- self.analyze_closure(id, span, body);
- }
-}
-
-impl<'a, 'gcx, 'tcx> euv::Delegate<'tcx> for AdjustBorrowKind<'a, 'gcx, 'tcx> {
+impl<'a, 'gcx, 'tcx> euv::Delegate<'tcx> for InferBorrowKind<'a, 'gcx, 'tcx> {
fn consume(&mut self,
_consume_id: ast::NodeId,
_consume_span: Span,
NestedVisitorMap::None
}
- fn visit_stmt(&mut self, s: &'gcx hir::Stmt) {
- self.visit_node_id(s.span, s.node.id());
- intravisit::walk_stmt(self, s);
- }
-
fn visit_expr(&mut self, e: &'gcx hir::Expr) {
self.fix_scalar_builtin_expr(e);
source,
target);
- tcx.infer_ctxt(()).enter(|infcx| {
+ tcx.infer_ctxt().enter(|infcx| {
let cause = ObligationCause::misc(span, impl_node_id);
let check_mutbl = |mt_a: ty::TypeAndMut<'tcx>,
mt_b: ty::TypeAndMut<'tcx>,
//! `tcx.inherent_impls(def_id)`). That value, however,
//! is computed by selecting an idea from this table.
-use rustc::dep_graph::DepNode;
+use rustc::dep_graph::DepKind;
use rustc::hir::def_id::{CrateNum, DefId, LOCAL_CRATE};
use rustc::hir;
use rustc::hir::itemlikevisit::ItemLikeVisitor;
});
for &impl_def_id in &result[..] {
- tcx.dep_graph.read(DepNode::Hir(impl_def_id));
+ let def_path_hash = tcx.def_path_hash(impl_def_id);
+ tcx.dep_graph.read(def_path_hash.to_dep_node(DepKind::Hir));
}
result
for (i, &impl1_def_id) in impls.iter().enumerate() {
for &impl2_def_id in &impls[(i + 1)..] {
- self.tcx.infer_ctxt(()).enter(|infcx| {
+ self.tcx.infer_ctxt().enter(|infcx| {
if traits::overlapping_impls(&infcx, impl1_def_id, impl2_def_id).is_some() {
self.check_for_common_items_in_impls(impl1_def_id, impl2_def_id)
}
use rustc::traits;
use rustc::ty::{self, TyCtxt, TypeFoldable};
use syntax::ast;
-use rustc::dep_graph::DepNode;
use rustc::hir;
use rustc::hir::itemlikevisit::ItemLikeVisitor;
return
}
- let _task =
- tcx.dep_graph.in_task(DepNode::CoherenceOverlapCheck(trait_def_id));
-
// Trigger building the specialization graph for the trait of this impl.
// This will detect any overlap errors.
tcx.specialization_graph_of(trait_def_id);
```
"##,
+E0609: r##"
+Attempted to access a non-existent field in a struct.
+
+Erroneous code example:
+
+```compile_fail,E0609
+struct StructWithFields {
+ x: u32,
+}
+
+let s = StructWithFields { x: 0 };
+println!("{}", s.foo); // error: no field `foo` on type `StructWithFields`
+```
+
+To fix this error, check that you didn't misspell the field's name or that the
+field actually exists. Example:
+
+```
+struct StructWithFields {
+ x: u32,
+}
+
+let s = StructWithFields { x: 0 };
+println!("{}", s.x); // ok!
+```
+"##,
+
+E0610: r##"
+Attempted to access a field on a primitive type.
+
+Erroneous code example:
+
+```compile_fail,E0610
+let x: u32 = 0;
+println!("{}", x.foo); // error: `{integer}` is a primitive type, therefore
+ // doesn't have fields
+```
+
+Primitive types are the most basic types available in Rust and don't have
+fields. To access data via named fields, struct types are used. Example:
+
+```
+// We declare struct called `Foo` containing two fields:
+struct Foo {
+ x: u32,
+ y: i64,
+}
+
+// We create an instance of this struct:
+let variable = Foo { x: 0, y: -12 };
+// And we can now access its fields:
+println!("x: {}, y: {}", variable.x, variable.y);
+```
+
+For more information see The Rust Book: https://doc.rust-lang.org/book/
+"##,
+
}
register_diagnostics! {
expected: Ty<'tcx>,
actual: Ty<'tcx>)
-> bool {
- tcx.infer_ctxt(()).enter(|ref infcx| {
+ tcx.infer_ctxt().enter(|ref infcx| {
let param_env = ty::ParamEnv::empty(Reveal::UserFacing);
let mut fulfill_cx = FulfillmentContext::new();
match infcx.at(&cause, param_env).eq(expected, actual) {
use hir::def_id::DefId;
use middle::resolve_lifetime as rl;
-use rustc::dep_graph::{AssertDepGraphSafe, DepNode};
+use rustc::dep_graph::{AssertDepGraphSafe, DepKind};
use rustc::ty::subst::Substs;
use rustc::ty::{self, Ty, TyCtxt};
use rustc::hir::map as hir_map;
hir::ItemEnum(..) |
hir::ItemStruct(..) |
hir::ItemUnion(..) => {
- tcx.dep_graph.with_task(DepNode::ItemVarianceConstraints(def_id),
+ let dep_node = def_id.to_dep_node(tcx, DepKind::ItemVarianceConstraints);
+ tcx.dep_graph.with_task(dep_node,
AssertDepGraphSafe(self),
def_id,
visit_item_task);
//! parameters. See README.md for details.
use arena;
-use rustc::dep_graph::DepNode;
+use rustc::dep_graph::DepKind;
use rustc::hir;
use rustc::hir::def_id::{CrateNum, DefId, LOCAL_CRATE};
use rustc::ty::{self, CrateVariancesMap, TyCtxt};
// Lacking red/green, we read the variances for all items here
// but ignore the dependencies, then re-synthesize the ones we need.
let crate_map = tcx.dep_graph.with_ignore(|| tcx.crate_variances(LOCAL_CRATE));
- tcx.dep_graph.read(DepNode::ItemVarianceConstraints(item_def_id));
+ let dep_node = item_def_id.to_dep_node(tcx, DepKind::ItemVarianceConstraints);
+ tcx.dep_graph.read(dep_node);
for &dep_def_id in crate_map.dependencies.less_than(&item_def_id) {
if dep_def_id.is_local() {
- tcx.dep_graph.read(DepNode::ItemVarianceConstraints(dep_def_id));
+ let dep_node = dep_def_id.to_dep_node(tcx, DepKind::ItemVarianceConstraints);
+ tcx.dep_graph.read(dep_node);
} else {
- tcx.dep_graph.read(DepNode::ItemVariances(dep_def_id));
+ let dep_node = dep_def_id.to_dep_node(tcx, DepKind::ItemVariances);
+ tcx.dep_graph.read(dep_node);
}
}
ret.extend(build_impls(cx, did));
clean::EnumItem(build_enum(cx, did))
}
- // Assume that the enum type is reexported next to the variant, and
- // variants don't show up in documentation specially.
- // Similarly, consider that struct type is reexported next to its constructor.
- Def::Variant(..) |
+ // Never inline enum variants but leave them shown as reexports.
+ Def::Variant(..) => return None,
+ // Assume that enum variants and struct types are reexported next to
+ // their constructors.
Def::VariantCtor(..) |
Def::StructCtor(..) => return Some(Vec::new()),
Def::Mod(did) => {
// two namespaces, so the target may be listed twice. Make sure we only
// visit each node at most once.
let mut visited = FxHashSet();
- for item in cx.tcx.sess.cstore.item_children(did) {
+ for item in cx.tcx.sess.cstore.item_children(did, cx.tcx.sess) {
let def_id = item.def.def_id();
if cx.tcx.sess.cstore.visibility(def_id) == ty::Visibility::Public {
if !visited.insert(def_id) { continue }
}
}).collect()
} else {
- cx.tcx.sess.cstore.item_children(root).iter().map(|item| item.def)
+ cx.tcx.sess.cstore.item_children(root, cx.tcx.sess).iter().map(|item| item.def)
.filter_map(as_primitive).collect()
};
pub fn is_ty_method(&self) -> bool {
self.type_() == ItemType::TyMethod
}
+ pub fn is_typedef(&self) -> bool {
+ self.type_() == ItemType::Typedef
+ }
pub fn is_primitive(&self) -> bool {
self.type_() == ItemType::Primitive
}
/// rendering function with the necessary arguments for linking to a local path.
fn resolved_path(w: &mut fmt::Formatter, did: DefId, path: &clean::Path,
print_all: bool, use_absolute: bool) -> fmt::Result {
- let empty = clean::PathSegment {
- name: String::new(),
- params: clean::PathParameters::Parenthesized {
- inputs: Vec::new(),
- output: None,
- }
- };
- let last = path.segments.last()
- .unwrap_or(&empty);
- let rel_root = if path.segments.is_empty() {
- None
- } else {
- match &*path.segments[0].name {
- "self" => Some("./".to_string()),
- _ => None,
- }
+ let last = path.segments.last().unwrap();
+ let rel_root = match &*path.segments[0].name {
+ "self" => Some("./".to_string()),
+ _ => None,
};
if print_all {
Some((_, _, fqp)) => {
format!("{}::{}",
fqp[..fqp.len() - 1].join("::"),
- HRef::new(did, fqp.last().unwrap_or(&String::new())))
+ HRef::new(did, fqp.last().unwrap()))
}
None => format!("{}", HRef::new(did, &last.name)),
}
}
clean::QPath { ref name, ref self_type, ref trait_ } => {
let should_show_cast = match *trait_ {
- box clean::ResolvedPath { .. } => {
- let path = clean::Path::singleton(name.clone());
- !path.segments.is_empty() && &format!("{:#}", trait_) != "()" &&
- &format!("{:#}", self_type) != "Self"
+ box clean::ResolvedPath { ref path, .. } => {
+ !path.segments.is_empty() && !self_type.is_self_type()
}
_ => true,
};
// everything comes in as a fully resolved QPath (hard to
// look at).
box clean::ResolvedPath { did, ref typarams, .. } => {
- let path = clean::Path::singleton(name.clone());
- resolved_path(f, did, &path, true, use_absolute)?;
+ match href(did) {
+ Some((ref url, _, ref path)) if !f.alternate() => {
+ write!(f,
+ "<a class=\"type\" href=\"{url}#{shortty}.{name}\" \
+ title=\"type {path}::{name}\">{name}</a>",
+ url = url,
+ shortty = ItemType::AssociatedType,
+ name = name,
+ path = path.join("::"))?;
+ }
+ _ => write!(f, "{}", name)?,
+ }
// FIXME: `typarams` are not rendered, and this seems bad?
drop(typarams);
}
}
}
- try_err!(mkdir(&dst), &dst);
+ try_err!(fs::create_dir_all(&dst), &dst);
krate = render_sources(&dst, &mut scx, krate)?;
let cx = Context {
current: Vec::new(),
// Write out the shared files. Note that these are shared among all rustdoc
// docs placed in the output directory, so this needs to be a synchronized
// operation with respect to all other rustdocs running around.
- try_err!(mkdir(&cx.dst), &cx.dst);
+ try_err!(fs::create_dir_all(&cx.dst), &cx.dst);
let _lock = flock::Lock::panicking_new(&cx.dst.join(".lock"), true, true, true);
// Add all the static files. These may already exist, but we just
fn render_sources(dst: &Path, scx: &mut SharedContext,
krate: clean::Crate) -> Result<clean::Crate, Error> {
info!("emitting source files");
- let dst = dst.join("src");
- try_err!(mkdir(&dst), &dst);
- let dst = dst.join(&krate.name);
- try_err!(mkdir(&dst), &dst);
+ let dst = dst.join("src").join(&krate.name);
+ try_err!(fs::create_dir_all(&dst), &dst);
let mut folder = SourceCollector {
dst: dst,
scx: scx,
Ok(try_err!(try_err!(File::create(&dst), &dst).write_all(contents), &dst))
}
-/// Makes a directory on the filesystem, failing the thread if an error occurs
-/// and skipping if the directory already exists.
-///
-/// Note that this also handles races as rustdoc is likely to be run
-/// concurrently against another invocation.
-fn mkdir(path: &Path) -> io::Result<()> {
- match fs::create_dir(path) {
- Ok(()) => Ok(()),
- Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => Ok(()),
- Err(e) => Err(e)
- }
-}
-
/// Takes a path to a source file and cleans the path to it. This canonicalizes
/// things like ".." to components which preserve the "top down" hierarchy of a
/// static HTML tree. Each component in the cleaned path will be passed as an
let mut href = String::new();
clean_srcpath(&self.scx.src_root, &p, false, |component| {
cur.push(component);
- mkdir(&cur).unwrap();
+ fs::create_dir_all(&cur).unwrap();
root_path.push_str("../");
href.push_str(component);
href.push('/');
// these modules are recursed into, but not rendered normally
// (a flag on the context).
if !self.render_redirect_pages {
- self.render_redirect_pages = maybe_ignore_item(&item);
+ self.render_redirect_pages = item.is_stripped();
}
if item.is_mod() {
// BTreeMap instead of HashMap to get a sorted output
let mut map = BTreeMap::new();
for item in &m.items {
- if maybe_ignore_item(item) { continue }
+ if item.is_stripped() { continue }
let short = item.type_().css_class();
let myname = match item.name {
if let clean::DefaultImplItem(..) = items[*i].inner {
return false;
}
- !maybe_ignore_item(&items[*i])
+ !items[*i].is_stripped()
}).collect::<Vec<usize>>();
// the order of item types in the listing
Ok(())
}
-fn maybe_ignore_item(it: &clean::Item) -> bool {
- match it.inner {
- clean::StrippedItem(..) => true,
- clean::ModuleItem(ref m) => {
- it.doc_value().is_none() && m.items.is_empty()
- && it.visibility != Some(clean::Public)
- },
- _ => false,
- }
-}
-
fn short_stability(item: &clean::Item, cx: &Context, show_reason: bool) -> Vec<String> {
let mut stability = vec![];
where_clause = WhereClause { gens: &t.generics, indent: 0, end_newline: true },
type_ = t.type_)?;
- document(w, cx, it)
+ document(w, cx, it)?;
+
+ // Render any items associated directly to this alias, as otherwise they
+ // won't be visible anywhere in the docs. It would be nice to also show
+ // associated items from the aliased type (see discussion in #32077), but
+ // we need #14072 to make sense of the generics.
+ render_assoc_items(w, cx, it, it.def_id, AssocItemRender::All)
}
impl<'a> fmt::Display for Sidebar<'a> {
let parentlen = cx.current.len() - if it.is_mod() {1} else {0};
if it.is_struct() || it.is_trait() || it.is_primitive() || it.is_union()
- || it.is_enum() || it.is_mod()
+ || it.is_enum() || it.is_mod() || it.is_typedef()
{
write!(fmt, "<p class='location'>")?;
match it.inner {
clean::PrimitiveItem(..) => write!(fmt, "Primitive Type ")?,
clean::UnionItem(..) => write!(fmt, "Union ")?,
clean::EnumItem(..) => write!(fmt, "Enum ")?,
+ clean::TypedefItem(..) => write!(fmt, "Type Definition ")?,
clean::ModuleItem(..) => if it.is_crate() {
write!(fmt, "Crate ")?;
} else {
clean::PrimitiveItem(ref p) => sidebar_primitive(fmt, it, p)?,
clean::UnionItem(ref u) => sidebar_union(fmt, it, u)?,
clean::EnumItem(ref e) => sidebar_enum(fmt, it, e)?,
+ clean::TypedefItem(ref t, _) => sidebar_typedef(fmt, it, t)?,
clean::ModuleItem(ref m) => sidebar_module(fmt, it, &m.items)?,
_ => (),
}
Ok(())
}
+fn sidebar_typedef(fmt: &mut fmt::Formatter, it: &clean::Item,
+ _t: &clean::Typedef) -> fmt::Result {
+ let sidebar = sidebar_assoc_items(it);
+
+ if !sidebar.is_empty() {
+ write!(fmt, "<div class=\"block items\"><ul>{}</ul></div>", sidebar)?;
+ }
+ Ok(())
+}
+
fn sidebar_union(fmt: &mut fmt::Formatter, it: &clean::Item,
u: &clean::Union) -> fmt::Result {
let mut sidebar = String::new();
if let clean::DefaultImplItem(..) = it.inner {
false
} else {
- !maybe_ignore_item(it) && !it.is_stripped() && it.type_() == myty
+ !it.is_stripped() && it.type_() == myty
}
}) {
let (short, name) = match myty {
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
+/*!
+ * Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+ * file at the top-level directory of this distribution and at
+ * http://rust-lang.org/COPYRIGHT.
+ *
+ * Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+ * http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+ * <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+ * option. This file may not be copied, modified, or distributed
+ * except according to those terms.
+ */
/*jslint browser: true, es5: true */
/*globals $: true, rootPath: true */
self.fold_item_recur(i)
};
- i.and_then(|i| {
- match i.inner {
- // emptied modules have no need to exist
- clean::ModuleItem(ref m)
- if m.items.is_empty() &&
- i.doc_value().is_none() => None,
- _ => {
- if self.update_retained {
- self.retained.insert(i.def_id);
- }
- Some(i)
- }
+ if let Some(ref i) = i {
+ if self.update_retained {
+ self.retained.insert(i.def_id);
}
- })
+ }
+ i
}
}
if !self.view_item_stack.insert(def_node_id) { return false }
let ret = match tcx.hir.get(def_node_id) {
- hir_map::NodeItem(it) => {
+ hir_map::NodeItem(&hir::Item { node: hir::ItemMod(ref m), .. }) if glob => {
let prev = mem::replace(&mut self.inlining, true);
- if glob {
- match it.node {
- hir::ItemMod(ref m) => {
- for i in &m.item_ids {
- let i = self.cx.tcx.hir.expect_item(i.id);
- self.visit_item(i, None, om);
- }
- }
- hir::ItemEnum(..) => {}
- _ => { panic!("glob not mapped to a module or enum"); }
- }
- } else {
- self.visit_item(it, renamed, om);
+ for i in &m.item_ids {
+ let i = self.cx.tcx.hir.expect_item(i.id);
+ self.visit_item(i, None, om);
}
self.inlining = prev;
true
}
+ hir_map::NodeItem(it) if !glob => {
+ let prev = mem::replace(&mut self.inlining, true);
+ self.visit_item(it, renamed, om);
+ self.inlining = prev;
+ true
+ }
_ => false,
};
self.view_item_stack.remove(&def_node_id);
return;
}
- for item in self.cstore.item_children(def_id) {
+ for item in self.cstore.item_children(def_id, self.cx.tcx.sess) {
self.visit_item(item.def);
}
}
/// An iterator that splits an environment variable into paths according to
/// platform-specific conventions.
///
-/// This structure is created by the [`std::env::split_paths`] function See its
+/// This structure is created by the [`std::env::split_paths`] function. See its
/// documentation for more.
///
/// [`std::env::split_paths`]: fn.split_paths.html
os_imp::current_exe()
}
-/// An iterator over the arguments of a process, yielding a [`String`] value
-/// for each argument.
+/// An iterator over the arguments of a process, yielding a [`String`] value for
+/// each argument.
///
-/// This structure is created through the [`std::env::args`] function.
+/// This struct is created by the [`std::env::args`] function. See its
+/// documentation for more.
///
/// The first element is traditionally the path of the executable, but it can be
-/// set to arbitrary text, and may not even exist. This means this property should
-/// not be relied upon for security purposes.
+/// set to arbitrary text, and may not even exist. This means this property
+/// should not be relied upon for security purposes.
///
/// [`String`]: ../string/struct.String.html
/// [`std::env::args`]: ./fn.args.html
/// An iterator over the arguments of a process, yielding an [`OsString`] value
/// for each argument.
///
-/// This structure is created through the [`std::env::args_os`] function.
+/// This struct is created by the [`std::env::args_os`] function. See its
+/// documentation for more.
///
/// The first element is traditionally the path of the executable, but it can be
-/// set to arbitrary text, and may not even exist. This means this property should
-/// not be relied upon for security purposes.
+/// set to arbitrary text, and may not even exist. This means this property
+/// should not be relied upon for security purposes.
///
/// [`OsString`]: ../ffi/struct.OsString.html
/// [`std::env::args_os`]: ./fn.args_os.html
/// Failure to call [`from_raw`] will lead to a memory leak.
///
/// [`from_raw`]: #method.from_raw
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::CString;
+ ///
+ /// let c_string = CString::new("foo").unwrap();
+ ///
+ /// let ptr = c_string.into_raw();
+ ///
+ /// unsafe {
+ /// assert_eq!(b'f', *ptr as u8);
+ /// assert_eq!(b'o', *ptr.offset(1) as u8);
+ /// assert_eq!(b'o', *ptr.offset(2) as u8);
+ /// assert_eq!(b'\0', *ptr.offset(3) as u8);
+ ///
+ /// // retake pointer to free memory
+ /// let _ = CString::from_raw(ptr);
+ /// }
+ /// ```
#[stable(feature = "cstr_memory", since = "1.4.0")]
pub fn into_raw(self) -> *mut c_char {
Box::into_raw(self.into_inner()) as *mut c_char
///
/// The returned buffer does **not** contain the trailing nul separator and
/// it is guaranteed to not have any interior nul bytes.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::CString;
+ ///
+ /// let c_string = CString::new("foo").unwrap();
+ /// let bytes = c_string.into_bytes();
+ /// assert_eq!(bytes, vec![b'f', b'o', b'o']);
+ /// ```
#[stable(feature = "cstring_into", since = "1.7.0")]
pub fn into_bytes(self) -> Vec<u8> {
let mut vec = self.into_inner().into_vec();
/// includes the trailing nul byte.
///
/// [`into_bytes`]: #method.into_bytes
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::CString;
+ ///
+ /// let c_string = CString::new("foo").unwrap();
+ /// let bytes = c_string.into_bytes_with_nul();
+ /// assert_eq!(bytes, vec![b'f', b'o', b'o', b'\0']);
+ /// ```
#[stable(feature = "cstring_into", since = "1.7.0")]
pub fn into_bytes_with_nul(self) -> Vec<u8> {
self.into_inner().into_vec()
///
/// The returned slice does **not** contain the trailing nul separator and
/// it is guaranteed to not have any interior nul bytes.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::CString;
+ ///
+ /// let c_string = CString::new("foo").unwrap();
+ /// let bytes = c_string.as_bytes();
+ /// assert_eq!(bytes, &[b'f', b'o', b'o']);
+ /// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn as_bytes(&self) -> &[u8] {
&self.inner[..self.inner.len() - 1]
/// includes the trailing nul byte.
///
/// [`as_bytes`]: #method.as_bytes
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::CString;
+ ///
+ /// let c_string = CString::new("foo").unwrap();
+ /// let bytes = c_string.as_bytes_with_nul();
+ /// assert_eq!(bytes, &[b'f', b'o', b'o', b'\0']);
+ /// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn as_bytes_with_nul(&self) -> &[u8] {
&self.inner
fn $test_name() {
#![test]
assert_eq!((0 as $T).checked_next_power_of_two(), Some(1));
- assert!(($T::MAX / 2).checked_next_power_of_two().is_some());
+ let smax = $T::MAX >> 1;
+ assert_eq!(smax.checked_next_power_of_two(), Some(smax+1));
+ assert_eq!((smax + 1).checked_next_power_of_two(), Some(smax + 1));
+ assert_eq!((smax + 2).checked_next_power_of_two(), None);
assert_eq!(($T::MAX - 1).checked_next_power_of_two(), None);
assert_eq!($T::MAX.checked_next_power_of_two(), None);
let mut next_power = 1;
/// This is a "helper marker trait" used to provide impl blocks for the
/// `UnwindSafe` trait, for more information see that documentation.
#[stable(feature = "catch_unwind", since = "1.9.0")]
-#[rustc_on_unimplemented = "the type {Self} contains interior mutability \
+#[rustc_on_unimplemented = "the type {Self} may contain interior mutability \
and a reference may not be safely transferrable \
across a catch_unwind boundary"]
pub trait RefUnwindSafe {}
use ffi::OsStr;
use fmt;
+use fs;
use io;
use path::Path;
use str;
/// .expect("ls command failed to start");
/// ```
#[stable(feature = "process", since = "1.0.0")]
- pub fn stdin(&mut self, cfg: Stdio) -> &mut Command {
- self.inner.stdin(cfg.0);
+ pub fn stdin<T: Into<Stdio>>(&mut self, cfg: T) -> &mut Command {
+ self.inner.stdin(cfg.into().0);
self
}
/// .expect("ls command failed to start");
/// ```
#[stable(feature = "process", since = "1.0.0")]
- pub fn stdout(&mut self, cfg: Stdio) -> &mut Command {
- self.inner.stdout(cfg.0);
+ pub fn stdout<T: Into<Stdio>>(&mut self, cfg: T) -> &mut Command {
+ self.inner.stdout(cfg.into().0);
self
}
/// .expect("ls command failed to start");
/// ```
#[stable(feature = "process", since = "1.0.0")]
- pub fn stderr(&mut self, cfg: Stdio) -> &mut Command {
- self.inner.stderr(cfg.0);
+ pub fn stderr<T: Into<Stdio>>(&mut self, cfg: T) -> &mut Command {
+ self.inner.stderr(cfg.into().0);
self
}
}
}
+#[stable(feature = "stdio_from", since = "1.20.0")]
+impl From<ChildStdin> for Stdio {
+ fn from(child: ChildStdin) -> Stdio {
+ Stdio::from_inner(child.into_inner().into())
+ }
+}
+
+#[stable(feature = "stdio_from", since = "1.20.0")]
+impl From<ChildStdout> for Stdio {
+ fn from(child: ChildStdout) -> Stdio {
+ Stdio::from_inner(child.into_inner().into())
+ }
+}
+
+#[stable(feature = "stdio_from", since = "1.20.0")]
+impl From<ChildStderr> for Stdio {
+ fn from(child: ChildStderr) -> Stdio {
+ Stdio::from_inner(child.into_inner().into())
+ }
+}
+
+#[stable(feature = "stdio_from", since = "1.20.0")]
+impl From<fs::File> for Stdio {
+ fn from(file: fs::File) -> Stdio {
+ Stdio::from_inner(file.into_inner().into())
+ }
+}
+
/// Describes the result of a process after it has terminated.
///
/// This `struct` is used to represent the exit status of a child process.
}
}
+impl From<AnonPipe> for Stdio {
+ fn from(pipe: AnonPipe) -> Stdio {
+ Stdio::Fd(pipe.into_fd())
+ }
+}
+
+impl From<File> for Stdio {
+ fn from(file: File) -> Stdio {
+ Stdio::Fd(file.into_fd())
+ }
+}
+
impl ChildStdio {
fn fd(&self) -> Option<usize> {
match *self {
use io;
use libc::{self, c_int};
use mem;
-use sys::{cvt, cvt_r};
+use sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT, Ordering};
use sys::fd::FileDesc;
+use sys::{cvt, cvt_r};
////////////////////////////////////////////////////////////////////////////////
// Anonymous pipes
pub struct AnonPipe(FileDesc);
pub fn anon_pipe() -> io::Result<(AnonPipe, AnonPipe)> {
+ weak! { fn pipe2(*mut c_int, c_int) -> c_int }
+ static INVALID: AtomicBool = ATOMIC_BOOL_INIT;
+
let mut fds = [0; 2];
// Unfortunately the only known way right now to create atomically set the
target_os = "freebsd",
target_os = "linux",
target_os = "netbsd",
- target_os = "openbsd"))
+ target_os = "openbsd")) &&
+ !INVALID.load(Ordering::SeqCst)
{
- weak! { fn pipe2(*mut c_int, c_int) -> c_int }
+
if let Some(pipe) = pipe2.get() {
- cvt(unsafe { pipe(fds.as_mut_ptr(), libc::O_CLOEXEC) })?;
- return Ok((AnonPipe(FileDesc::new(fds[0])),
- AnonPipe(FileDesc::new(fds[1]))));
+ // Note that despite calling a glibc function here we may still
+ // get ENOSYS. Glibc has `pipe2` since 2.9 and doesn't try to
+ // emulate on older kernels, so if you happen to be running on
+ // an older kernel you may see `pipe2` as a symbol but still not
+ // see the syscall.
+ match cvt(unsafe { pipe(fds.as_mut_ptr(), libc::O_CLOEXEC) }) {
+ Ok(_) => {
+ return Ok((AnonPipe(FileDesc::new(fds[0])),
+ AnonPipe(FileDesc::new(fds[1]))));
+ }
+ Err(ref e) if e.raw_os_error() == Some(libc::ENOSYS) => {
+ INVALID.store(true, Ordering::SeqCst);
+ }
+ Err(e) => return Err(e),
+ }
}
}
cvt(unsafe { libc::pipe(fds.as_mut_ptr()) })?;
}
}
+impl From<AnonPipe> for Stdio {
+ fn from(pipe: AnonPipe) -> Stdio {
+ Stdio::Fd(pipe.into_fd())
+ }
+}
+
+impl From<File> for Stdio {
+ fn from(file: File) -> Stdio {
+ Stdio::Fd(file.into_fd())
+ }
+}
+
impl ChildStdio {
pub fn fd(&self) -> Option<c_int> {
match *self {
}
}
+impl From<AnonPipe> for Stdio {
+ fn from(pipe: AnonPipe) -> Stdio {
+ Stdio::Handle(pipe.into_handle())
+ }
+}
+
+impl From<File> for Stdio {
+ fn from(file: File) -> Stdio {
+ Stdio::Handle(file.into_handle())
+ }
+}
+
////////////////////////////////////////////////////////////////////////////////
// Processes
////////////////////////////////////////////////////////////////////////////////
})
}
+#[macro_export]
+macro_rules! type_error_struct {
+ ($session:expr, $span:expr, $typ:expr, $code:ident, $($message:tt)*) => ({
+ if $typ.references_error() {
+ $session.diagnostic().struct_dummy()
+ } else {
+ struct_span_err!($session, $span, $code, $($message)*)
+ }
+ })
+}
+
#[macro_export]
macro_rules! struct_span_warn {
($session:expr, $span:expr, $code:ident, $($message:tt)*) => ({
sep: Option<Token>,
idx: usize,
up: Option<Box<MatcherPos>>,
- matches: Vec<Vec<Rc<NamedMatch>>>,
+ matches: Vec<Rc<Vec<NamedMatch>>>,
match_lo: usize,
match_cur: usize,
match_hi: usize,
sp_lo: BytePos,
}
+impl MatcherPos {
+ fn push_match(&mut self, idx: usize, m: NamedMatch) {
+ let matches = Rc::make_mut(&mut self.matches[idx]);
+ matches.push(m);
+ }
+}
+
pub type NamedParseResult = ParseResult<HashMap<Ident, Rc<NamedMatch>>>;
pub fn count_names(ms: &[TokenTree]) -> usize {
/// only on the nesting depth of `ast::TTSeq`s in the originating
/// token tree it was derived from.
+#[derive(Debug, Clone)]
pub enum NamedMatch {
- MatchedSeq(Vec<Rc<NamedMatch>>, syntax_pos::Span),
+ MatchedSeq(Rc<Vec<NamedMatch>>, syntax_pos::Span),
MatchedNonterminal(Rc<Nonterminal>)
}
-fn nameize<I: Iterator<Item=Rc<NamedMatch>>>(sess: &ParseSess, ms: &[TokenTree], mut res: I)
+fn nameize<I: Iterator<Item=NamedMatch>>(sess: &ParseSess, ms: &[TokenTree], mut res: I)
-> NamedParseResult {
- fn n_rec<I: Iterator<Item=Rc<NamedMatch>>>(sess: &ParseSess, m: &TokenTree, mut res: &mut I,
+ fn n_rec<I: Iterator<Item=NamedMatch>>(sess: &ParseSess, m: &TokenTree, mut res: &mut I,
ret_val: &mut HashMap<Ident, Rc<NamedMatch>>)
-> Result<(), (syntax_pos::Span, String)> {
match *m {
TokenTree::MetaVarDecl(sp, bind_name, _) => {
match ret_val.entry(bind_name) {
Vacant(spot) => {
- spot.insert(res.next().unwrap());
+ // FIXME(simulacrum): Don't construct Rc here
+ spot.insert(Rc::new(res.next().unwrap()));
}
Occupied(..) => {
return Err((sp, format!("duplicated bind name: {}", bind_name)))
}
}
-fn create_matches(len: usize) -> Vec<Vec<Rc<NamedMatch>>> {
- (0..len).into_iter().map(|_| Vec::new()).collect()
+fn create_matches(len: usize) -> Vec<Rc<Vec<NamedMatch>>> {
+ (0..len).into_iter().map(|_| Rc::new(Vec::new())).collect()
}
fn inner_parse_loop(sess: &ParseSess,
// update matches (the MBE "parse tree") by appending
// each tree as a subtree.
- // I bet this is a perf problem: we're preemptively
- // doing a lot of array work that will get thrown away
- // most of the time.
-
// Only touch the binders we have actually bound
for idx in ei.match_lo..ei.match_hi {
let sub = ei.matches[idx].clone();
- new_pos.matches[idx]
- .push(Rc::new(MatchedSeq(sub, Span { lo: ei.sp_lo, ..span })));
+ new_pos.push_match(idx, MatchedSeq(sub, Span { lo: ei.sp_lo, ..span }));
}
new_pos.match_cur = ei.match_hi;
new_ei.match_cur += seq.num_captures;
new_ei.idx += 1;
for idx in ei.match_cur..ei.match_cur + seq.num_captures {
- new_ei.matches[idx].push(Rc::new(MatchedSeq(vec![], sp)));
+ new_ei.push_match(idx, MatchedSeq(Rc::new(vec![]), sp));
}
cur_eis.push(new_ei);
}
/* error messages here could be improved with links to orig. rules */
if token_name_eq(&parser.token, &token::Eof) {
if eof_eis.len() == 1 {
- let matches = eof_eis[0].matches.iter_mut().map(|mut dv| dv.pop().unwrap());
+ let matches = eof_eis[0].matches.iter_mut().map(|mut dv| {
+ Rc::make_mut(dv).pop().unwrap()
+ });
return nameize(sess, ms, matches);
} else if eof_eis.len() > 1 {
return Error(parser.span, "ambiguity: multiple successful parses".to_string());
let mut ei = bb_eis.pop().unwrap();
if let TokenTree::MetaVarDecl(span, _, ident) = ei.top_elts.get_tt(ei.idx) {
let match_cur = ei.match_cur;
- ei.matches[match_cur].push(Rc::new(MatchedNonterminal(
- Rc::new(parse_nt(&mut parser, span, &ident.name.as_str())))));
+ ei.push_match(match_cur,
+ MatchedNonterminal(Rc::new(parse_nt(&mut parser, span, &ident.name.as_str()))));
ei.idx += 1;
ei.match_cur += 1;
} else {
let lhses = match *argument_map[&lhs_nm] {
MatchedSeq(ref s, _) => {
s.iter().map(|m| {
- if let MatchedNonterminal(ref nt) = **m {
+ if let MatchedNonterminal(ref nt) = *m {
if let NtTT(ref tt) = **nt {
let tt = quoted::parse(tt.clone().into(), true, sess).pop().unwrap();
valid &= check_lhs_nt_follows(sess, features, &tt);
let rhses = match *argument_map[&rhs_nm] {
MatchedSeq(ref s, _) => {
s.iter().map(|m| {
- if let MatchedNonterminal(ref nt) = **m {
+ if let MatchedNonterminal(ref nt) = *m {
if let NtTT(ref tt) = **nt {
return quoted::parse(tt.clone().into(), false, sess).pop().unwrap();
}
repeats: &[(usize, usize)])
-> Option<Rc<NamedMatch>> {
interpolations.get(&ident).map(|matched| {
- repeats.iter().fold(matched.clone(), |ad, &(idx, _)| {
- match *ad {
- MatchedNonterminal(_) => {
- // end of the line; duplicate henceforth
- ad.clone()
- }
- MatchedSeq(ref ads, _) => ads[idx].clone()
+ let mut matched = matched.clone();
+ for &(idx, _) in repeats {
+ let m = matched.clone();
+ match *m {
+ MatchedNonterminal(_) => break,
+ MatchedSeq(ref ads, _) => matched = Rc::new(ads[idx].clone()),
}
- })
+ }
+
+ matched
})
}
let mac = respan(lo.to(self.prev_span), Mac_ { path: pth, tts: tts });
(keywords::Invalid.ident(), ast::TraitItemKind::Macro(mac))
} else {
- let (constness, unsafety, abi) = match self.parse_fn_front_matter() {
- Ok(cua) => cua,
- Err(e) => return Err(e),
- };
+ let (constness, unsafety, abi) = self.parse_fn_front_matter()?;
let ident = self.parse_ident()?;
let mut generics = self.parse_generics()?;
to_string(|s| s.print_path(p, false, 0, false))
}
+pub fn path_segment_to_string(p: &ast::PathSegment) -> String {
+ to_string(|s| s.print_path_segment(p, false))
+}
+
pub fn ident_to_string(id: ast::Ident) -> String {
to_string(|s| s.print_ident(id))
}
if i > 0 {
word(&mut self.s, "::")?
}
- if segment.identifier.name != keywords::CrateRoot.name() &&
- segment.identifier.name != "$crate" {
- self.print_ident(segment.identifier)?;
- if let Some(ref parameters) = segment.parameters {
- self.print_path_parameters(parameters, colons_before_params)?;
- }
- }
+ self.print_path_segment(segment, colons_before_params)?;
}
Ok(())
}
+ fn print_path_segment(&mut self,
+ segment: &ast::PathSegment,
+ colons_before_params: bool)
+ -> io::Result<()>
+ {
+ if segment.identifier.name != keywords::CrateRoot.name() &&
+ segment.identifier.name != "$crate" {
+ self.print_ident(segment.identifier)?;
+ if let Some(ref parameters) = segment.parameters {
+ self.print_path_parameters(parameters, colons_before_params)?;
+ }
+ }
+ Ok(())
+ }
+
fn print_qpath(&mut self,
path: &ast::Path,
qself: &ast::QSelf,
impl<T: Decodable> Decodable for P<[T]> {
fn decode<D: Decoder>(d: &mut D) -> Result<P<[T]>, D::Error> {
- Ok(P::from_vec(match Decodable::decode(d) {
- Ok(t) => t,
- Err(e) => return Err(e)
- }))
+ Ok(P::from_vec(Decodable::decode(d)?))
}
}
// The interner in thread-local, so `Symbol` shouldn't move between threads.
impl !Send for Symbol { }
+impl !Sync for Symbol { }
impl Symbol {
/// Maps a string to its interned representation.
#![crate_type = "lib"]
use std::marker::PhantomData;
-
+#[derive(Copy, Clone)]
struct Zst { phantom: PhantomData<Zst> }
// CHECK-LABEL: @mir
+// CHECK-NOT: store{{.*}}undef
#[no_mangle]
-fn mir(){
- // CHECK-NOT: getelementptr
- // CHECK-NOT: store{{.*}}undef
+fn mir() {
let x = Zst { phantom: PhantomData };
+ let y = (x, 0);
+ drop(y);
+ drop((0, x));
}
// except according to those terms.
use std::fmt::self; //~ ERROR E0429
- //~^ ERROR E0432
fn main () {
}
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+struct Foo {
+ x: u32,
+}
+
+fn main() {
+ let x = Foo { x: 0 };
+ let _ = x.foo; //~ ERROR E0609
+}
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn main() {
+ let x = 0;
+ let _ = x.foo; //~ ERROR E0610
+}
// Check that bogus field access is non-fatal
fn main() {
let x = 0;
- let _ = x.foo; //~ no field `foo` on type `{integer}`
- let _ = x.bar; //~ no field `bar` on type `{integer}`
+ let _ = x.foo; //~ `{integer}` is a primitive type and therefore doesn't have fields [E0610]
+ let _ = x.bar; //~ `{integer}` is a primitive type and therefore doesn't have fields [E0610]
}
use Foo;
#[rustc_then_this_would_need(TypeckTables)] //~ ERROR OK
- #[rustc_then_this_would_need(TransCrateItem)] //~ ERROR OK
pub fn use_char_assoc() {
// Careful here: in the representation, <char as Foo>::T gets
// normalized away, so at a certain point we had no edge to
// These dependencies SHOULD exist:
#[rustc_then_this_would_need(TypeckTables)] //~ ERROR OK
- #[rustc_then_this_would_need(TransCrateItem)] //~ ERROR OK
pub fn y() {
x::x();
}
// These are expected to yield errors, because changes to `x`
// affect the BODY of `y`, but not its signature.
#[rustc_then_this_would_need(TypeckTables)] //~ ERROR no path
- #[rustc_then_this_would_need(TransCrateItem)] //~ ERROR no path
pub fn z() {
y::y();
}
use Foo;
#[rustc_then_this_would_need(TypeckTables)] //~ ERROR OK
- #[rustc_then_this_would_need(TransCrateItem)] //~ ERROR OK
pub fn with_char() {
char::method('a');
}
#[rustc_then_this_would_need(TypeckTables)] //~ ERROR OK
- #[rustc_then_this_would_need(TransCrateItem)] //~ ERROR OK
pub fn take_foo_with_char() {
take_foo::<char>('a');
}
#[rustc_then_this_would_need(TypeckTables)] //~ ERROR OK
- #[rustc_then_this_would_need(TransCrateItem)] //~ ERROR OK
pub fn with_u32() {
u32::method(22);
}
#[rustc_then_this_would_need(TypeckTables)] //~ ERROR OK
- #[rustc_then_this_would_need(TransCrateItem)] //~ ERROR OK
pub fn take_foo_with_u32() {
take_foo::<u32>(22);
}
// These are expected to yield errors, because changes to `x`
// affect the BODY of `y`, but not its signature.
#[rustc_then_this_would_need(TypeckTables)] //~ ERROR no path
- #[rustc_then_this_would_need(TransCrateItem)] //~ ERROR no path
pub fn z() {
y::with_char();
y::with_u32();
+++ /dev/null
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// Test that two unrelated functions have no trans dependency.
-
-// compile-flags: -Z query-dep-graph
-
-#![feature(rustc_attrs)]
-#![allow(dead_code)]
-
-#[rustc_if_this_changed]
-fn main() { }
-
-#[rustc_then_this_would_need(TransCrateItem)] //~ ERROR no path from `main`
-fn bar() { }
const A: UnsafeCell<usize> = UnsafeCell::new(1);
const B: &'static UnsafeCell<usize> = &A;
-//~^ ERROR: cannot borrow a constant which contains interior mutability
+//~^ ERROR: cannot borrow a constant which may contain interior mutability
struct C { a: UnsafeCell<usize> }
const D: C = C { a: UnsafeCell::new(1) };
const E: &'static UnsafeCell<usize> = &D.a;
-//~^ ERROR: cannot borrow a constant which contains interior mutability
+//~^ ERROR: cannot borrow a constant which may contain interior mutability
const F: &'static C = &D;
-//~^ ERROR: cannot borrow a constant which contains interior mutability
+//~^ ERROR: cannot borrow a constant which may contain interior mutability
fn main() {}
// except according to those terms.
fn main() {
- 1.create_a_type_error[ //~ no field `create_a_type_error` on type `{integer}`
+ 1.create_a_type_error[ //~ `{integer}` is a primitive type and therefore doesn't have fields
()+() //~ ERROR binary operation `+` cannot be applied
// ^ ensure that we typeck the inner expression ^
];
fn parse_type(iter: Box<Iterator<Item=&str>+'static>) -> &str { iter.next() }
//~^ ERROR missing lifetime specifier [E0106]
-//~^^ HELP 2 elided lifetimes
+//~^^ HELP 2 lifetimes
fn parse_type_2(iter: fn(&u8)->&u8) -> &str { iter() }
//~^ ERROR missing lifetime specifier [E0106]
fn f(a: &S, b: i32) -> &i32 {
//~^ ERROR missing lifetime specifier [E0106]
-//~^^ HELP does not say which one of `a`'s 2 elided lifetimes it is borrowed from
+//~^^ HELP does not say which one of `a`'s 2 lifetimes it is borrowed from
panic!();
}
fn g(a: &S, b: bool, c: &i32) -> &i32 {
//~^ ERROR missing lifetime specifier [E0106]
-//~^^ HELP does not say whether it is borrowed from one of `a`'s 2 elided lifetimes or `c`
+//~^^ HELP does not say whether it is borrowed from one of `a`'s 2 lifetimes or `c`
panic!();
}
fn h(a: &bool, b: bool, c: &S, d: &i32) -> &i32 {
//~^ ERROR missing lifetime specifier [E0106]
-//~^^ HELP does not say whether it is borrowed from `a`, one of `c`'s 2 elided lifetimes, or `d`
+//~^^ HELP does not say whether it is borrowed from `a`, one of `c`'s 2 lifetimes, or `d`
panic!();
}
// Lifetime annotation needed because we have two lifetimes: one as a parameter
// and one on the reference.
fn h(_x: &Foo) -> &isize { //~ ERROR missing lifetime specifier
-//~^ HELP the signature does not say which one of `_x`'s 2 elided lifetimes it is borrowed from
+//~^ HELP the signature does not say which one of `_x`'s 2 lifetimes it is borrowed from
panic!()
}
let y = 42;
let x = y.; //~ ERROR unexpected token
let x = y.(); //~ ERROR unexpected token
- let x = y.foo; //~ ERROR no field
+ let x = y.foo; //~ ERROR `{integer}` is a primitive type and therefore doesn't have fields [E061
}
mod a {
mod b {
- use self as A; //~ ERROR `self` imports are only allowed within a { } list
- //~^ ERROR unresolved import `self` [E0432]
- //~| no `self` in the root
+ use self as A;
+ //~^ ERROR `self` imports are only allowed within a { } list
use super as B;
//~^ ERROR unresolved import `super` [E0432]
//~| no `super` in the root
use foo::self; //~ ERROR unresolved import `foo::self`
//~^ ERROR `self` imports are only allowed within a { } list
+use std::mem::self;
+//~^ ERROR `self` imports are only allowed within a { } list
+
fn main() {}
// except according to those terms.
// ignore-arm stdcall isn't suppported
+// ignore-aarch64 stdcall isn't suppported
extern "stdcall" {
fn printf(_: *const u8, ...); //~ ERROR: variadic function must have C or cdecl calling
// gdbg-check:$6 = None
// gdbr-check:$6 = core::option::Option::None
+// gdb-command: print os_string
+// gdb-check:$7 = "IAMA OS string 😃"
+
+// gdb-command: print some_string
+// gdb-check:$8 = Some = {"IAMA optional string!"}
+
// === LLDB TESTS ==================================================================================
#![allow(unused_variables)]
+use std::ffi::OsString;
+
fn main() {
// String
let string = "IAMA string!".to_string();
+ // OsString
+ let os_string = OsString::from("IAMA OS string \u{1F603}");
+
// Option
let some = Some(8i16);
let none: Option<i64> = None;
+ let some_string = Some("IAMA optional string!".to_owned());
+
zzz(); // #break
}
use x;
#[rustc_clean(label="TypeckTables", cfg="cfail2")]
- #[rustc_clean(label="TransCrateItem", cfg="cfail2")]
pub fn y() {
- //[cfail2]~^ ERROR `TypeckTables("y::y")` not found in dep graph, but should be clean
- //[cfail2]~| ERROR `TransCrateItem("y::y")` not found in dep graph, but should be clean
+ //[cfail2]~^ ERROR `TypeckTables(y::y)` not found in dep graph, but should be clean
x::x();
}
}
mod z {
#[rustc_dirty(label="TypeckTables", cfg="cfail2")]
- #[rustc_dirty(label="TransCrateItem", cfg="cfail2")]
pub fn z() {
- //[cfail2]~^ ERROR `TypeckTables("z::z")` found in dep graph, but should be dirty
- //[cfail2]~| ERROR `TransCrateItem("z::z")` found in dep graph, but should be dirty
+ //[cfail2]~^ ERROR `TypeckTables(z::z)` found in dep graph, but should be dirty
}
}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Regression test for #42602. It used to be that we had
+// a dep-graph like
+//
+// typeck(foo) -> FnOnce -> typeck(bar)
+//
+// This was fixed by improving the resolution of the `FnOnce` trait
+// selection node.
+
+// revisions:cfail1
+// compile-flags:-Zquery-dep-graph
+
+#![feature(rustc_attrs)]
+
+fn main() {
+ a::foo();
+ b::bar();
+}
+
+mod a {
+ #[rustc_if_this_changed(HirBody)]
+ pub fn foo() {
+ let x = vec![1, 2, 3];
+ let v = || ::std::mem::drop(x);
+ v();
+ }
+}
+
+mod b {
+ #[rustc_then_this_would_need(TypeckTables)] //[cfail1]~ ERROR no path
+ pub fn bar() {
+ let x = vec![1, 2, 3];
+ let v = || ::std::mem::drop(x);
+ v();
+ }
+}
// except according to those terms.
// Regr. test that using HIR inlined from another krate does *not* add
-// a dependency from the local Krate node.
+// a dependency from the local Krate node. We can't easily test that
+// directly anymore, so now we test that we get reuse.
-// revisions: cfail1
+// revisions: rpass1 rpass2
// compile-flags: -Z query-dep-graph
#![allow(warnings)]
#![feature(rustc_attrs)]
+#![rustc_partition_reused(module="krate_inlined-x", cfg="rpass2")]
-#![rustc_if_this_changed(Krate)]
-
-fn main() { }
+fn main() {
+ #[cfg(rpass2)]
+ ()
+}
mod x {
- #[rustc_then_this_would_need(TransCrateItem)] //[cfail1]~ ERROR no path
fn method() {
// use some methods that require inlining HIR from another crate:
let mut v = vec![];
extern crate extern_crate;
-#[rustc_clean(label="TransCrateItem", cfg="rpass2")]
-#[rustc_clean(label="TransCrateItem", cfg="rpass3")]
fn main() {
some_mod::some_fn();
}
mod some_mod {
use extern_crate;
- #[rustc_clean(label="TransCrateItem", cfg="rpass2")]
- #[rustc_dirty(label="TransCrateItem", cfg="rpass3")]
pub fn some_fn() {
extern_crate::inline_fn();
}
#[cfg(rpass2)]
#[rustc_dirty(label="TypeckTables", cfg="rpass2")]
- #[rustc_dirty(label="TransCrateItem", cfg="rpass2")]
pub fn x() {
println!("{}", "2");
}
use x;
#[rustc_clean(label="TypeckTables", cfg="rpass2")]
- #[rustc_clean(label="TransCrateItem", cfg="rpass2")]
pub fn y() {
x::x();
}
use y;
#[rustc_clean(label="TypeckTables", cfg="rpass2")]
- #[rustc_clean(label="TransCrateItem", cfg="rpass2")]
pub fn z() {
y::y();
}
--- /dev/null
+-include ../tools.mk
+
+all:
+ $(HOST_RPATH_ENV) '$(RUSTDOC)' -o "$(TMPDIR)/foo/bar/doc" foo.rs
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+pub struct Foo;
let mac_expr = match (&*matched_nt, &*map[&Ident::from_str("pat")]) {
(&NtExpr(ref matched_expr), &MatchedSeq(ref pats, seq_sp)) => {
let pats: Vec<P<Pat>> = pats.iter().map(|pat_nt| {
- match **pat_nt {
+ match *pat_nt {
MatchedNonterminal(ref nt) => match **nt {
NtPat(ref pat) => pat.clone(),
_ => unreachable!(),
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// ignore-cross-compile
+
+#![feature(rustc_private)]
+
+extern crate rustc_back;
+
+use std::env;
+use std::fs::File;
+use std::io;
+use std::io::{Read, Write};
+use std::process::{Command, Stdio};
+
+use rustc_back::tempdir::TempDir;
+
+fn main() {
+ if env::args().len() > 1 {
+ child().unwrap()
+ } else {
+ parent().unwrap()
+ }
+}
+
+fn parent() -> io::Result<()> {
+ let td = TempDir::new("foo").unwrap();
+ let input = td.path().join("input");
+ let output = td.path().join("output");
+
+ File::create(&input)?.write_all(b"foo\n")?;
+
+ // Set up this chain:
+ // $ me <file | me | me >file
+ // ... to duplicate each line 8 times total.
+
+ let mut child1 = Command::new(env::current_exe()?)
+ .arg("first")
+ .stdin(File::open(&input)?) // tests File::into()
+ .stdout(Stdio::piped())
+ .spawn()?;
+
+ let mut child3 = Command::new(env::current_exe()?)
+ .arg("third")
+ .stdin(Stdio::piped())
+ .stdout(File::create(&output)?) // tests File::into()
+ .spawn()?;
+
+ // Started out of order so we can test both `ChildStdin` and `ChildStdout`.
+ let mut child2 = Command::new(env::current_exe()?)
+ .arg("second")
+ .stdin(child1.stdout.take().unwrap()) // tests ChildStdout::into()
+ .stdout(child3.stdin.take().unwrap()) // tests ChildStdin::into()
+ .spawn()?;
+
+ assert!(child1.wait()?.success());
+ assert!(child2.wait()?.success());
+ assert!(child3.wait()?.success());
+
+ let mut data = String::new();
+ File::open(&output)?.read_to_string(&mut data)?;
+ for line in data.lines() {
+ assert_eq!(line, "foo");
+ }
+ assert_eq!(data.lines().count(), 8);
+ Ok(())
+}
+
+fn child() -> io::Result<()> {
+ // double everything
+ let mut input = vec![];
+ io::stdin().read_to_end(&mut input)?;
+ io::stdout().write_all(&input)?;
+ io::stdout().write_all(&input)?;
+ Ok(())
+}
extern "rust-intrinsic" {
pub fn ctpop<T>(x: T) -> T;
pub fn ctlz<T>(x: T) -> T;
+ pub fn ctlz_nonzero<T>(x: T) -> T;
pub fn cttz<T>(x: T) -> T;
+ pub fn cttz_nonzero<T>(x: T) -> T;
pub fn bswap<T>(x: T) -> T;
}
}
assert_eq!(ctlz(100u32), 25); assert_eq!(ctlz(100i32), 25);
assert_eq!(ctlz(100u64), 57); assert_eq!(ctlz(100i64), 57);
+ assert_eq!(ctlz_nonzero(1u8), 7); assert_eq!(ctlz_nonzero(1i8), 7);
+ assert_eq!(ctlz_nonzero(1u16), 15); assert_eq!(ctlz_nonzero(1i16), 15);
+ assert_eq!(ctlz_nonzero(1u32), 31); assert_eq!(ctlz_nonzero(1i32), 31);
+ assert_eq!(ctlz_nonzero(1u64), 63); assert_eq!(ctlz_nonzero(1i64), 63);
+
+ assert_eq!(ctlz_nonzero(10u8), 4); assert_eq!(ctlz_nonzero(10i8), 4);
+ assert_eq!(ctlz_nonzero(10u16), 12); assert_eq!(ctlz_nonzero(10i16), 12);
+ assert_eq!(ctlz_nonzero(10u32), 28); assert_eq!(ctlz_nonzero(10i32), 28);
+ assert_eq!(ctlz_nonzero(10u64), 60); assert_eq!(ctlz_nonzero(10i64), 60);
+
+ assert_eq!(ctlz_nonzero(100u8), 1); assert_eq!(ctlz_nonzero(100i8), 1);
+ assert_eq!(ctlz_nonzero(100u16), 9); assert_eq!(ctlz_nonzero(100i16), 9);
+ assert_eq!(ctlz_nonzero(100u32), 25); assert_eq!(ctlz_nonzero(100i32), 25);
+ assert_eq!(ctlz_nonzero(100u64), 57); assert_eq!(ctlz_nonzero(100i64), 57);
+
assert_eq!(cttz(-1i8 as u8), 0); assert_eq!(cttz(-1i8), 0);
assert_eq!(cttz(-1i16 as u16), 0); assert_eq!(cttz(-1i16), 0);
assert_eq!(cttz(-1i32 as u32), 0); assert_eq!(cttz(-1i32), 0);
assert_eq!(cttz(100u32), 2); assert_eq!(cttz(100i32), 2);
assert_eq!(cttz(100u64), 2); assert_eq!(cttz(100i64), 2);
+ assert_eq!(cttz_nonzero(-1i8 as u8), 0); assert_eq!(cttz_nonzero(-1i8), 0);
+ assert_eq!(cttz_nonzero(-1i16 as u16), 0); assert_eq!(cttz_nonzero(-1i16), 0);
+ assert_eq!(cttz_nonzero(-1i32 as u32), 0); assert_eq!(cttz_nonzero(-1i32), 0);
+ assert_eq!(cttz_nonzero(-1i64 as u64), 0); assert_eq!(cttz_nonzero(-1i64), 0);
+
+ assert_eq!(cttz_nonzero(1u8), 0); assert_eq!(cttz_nonzero(1i8), 0);
+ assert_eq!(cttz_nonzero(1u16), 0); assert_eq!(cttz_nonzero(1i16), 0);
+ assert_eq!(cttz_nonzero(1u32), 0); assert_eq!(cttz_nonzero(1i32), 0);
+ assert_eq!(cttz_nonzero(1u64), 0); assert_eq!(cttz_nonzero(1i64), 0);
+
+ assert_eq!(cttz_nonzero(10u8), 1); assert_eq!(cttz_nonzero(10i8), 1);
+ assert_eq!(cttz_nonzero(10u16), 1); assert_eq!(cttz_nonzero(10i16), 1);
+ assert_eq!(cttz_nonzero(10u32), 1); assert_eq!(cttz_nonzero(10i32), 1);
+ assert_eq!(cttz_nonzero(10u64), 1); assert_eq!(cttz_nonzero(10i64), 1);
+
+ assert_eq!(cttz_nonzero(100u8), 2); assert_eq!(cttz_nonzero(100i8), 2);
+ assert_eq!(cttz_nonzero(100u16), 2); assert_eq!(cttz_nonzero(100i16), 2);
+ assert_eq!(cttz_nonzero(100u32), 2); assert_eq!(cttz_nonzero(100i32), 2);
+ assert_eq!(cttz_nonzero(100u64), 2); assert_eq!(cttz_nonzero(100i64), 2);
+
assert_eq!(bswap(0x0Au8), 0x0A); // no-op
assert_eq!(bswap(0x0Ai8), 0x0A); // no-op
assert_eq!(bswap(0x0A0Bu16), 0x0B0A);
Command::new(name)
.arg("--child")
.stdin(Stdio::inherit())
- .stdout(unsafe { FromRawFd::from_raw_fd(libc::STDERR_FILENO) })
- .stderr(unsafe { FromRawFd::from_raw_fd(libc::STDOUT_FILENO) })
+ .stdout(unsafe { Stdio::from_raw_fd(libc::STDERR_FILENO) })
+ .stderr(unsafe { Stdio::from_raw_fd(libc::STDOUT_FILENO) })
.spawn()
};
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::ops::{Deref, DerefMut};
+
+struct CheckedDeref<T, F> {
+ value: T,
+ check: F
+}
+
+impl<F: Fn(&T) -> bool, T> Deref for CheckedDeref<T, F> {
+ type Target = T;
+ fn deref(&self) -> &T {
+ assert!((self.check)(&self.value));
+ &self.value
+ }
+}
+
+impl<F: Fn(&T) -> bool, T> DerefMut for CheckedDeref<T, F> {
+ fn deref_mut(&mut self) -> &mut T {
+ assert!((self.check)(&self.value));
+ &mut self.value
+ }
+}
+
+
+fn main() {
+ let mut v = CheckedDeref {
+ value: vec![0],
+ check: |v: &Vec<_>| !v.is_empty()
+ };
+ v.push(1);
+ assert_eq!(*v, vec![0, 1]);
+}
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+struct Foo<T>(T);
+
+struct IntoIter<T>(T);
+
+impl<'a, T: 'a> Iterator for IntoIter<T> {
+ type Item = ();
+
+ fn next(&mut self) -> Option<()> {
+ None
+ }
+}
+
+impl<T> IntoIterator for Foo<T> {
+ type Item = ();
+ type IntoIter = IntoIter<T>;
+
+ fn into_iter(self) -> IntoIter<T> {
+ IntoIter(self.0)
+ }
+}
+
+fn main() {}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+// ignore-tidy-linelength
+
#![crate_type="lib"]
// @has assoc_types/trait.Index.html
// @has - '//*[@id="index.v"]//code' 'fn index'
// @has - '//*[@id="tymethod.index"]//code' \
// "fn index<'a>(&'a self, index: I) -> &'a Self::Output"
+ // @has - '//*[@id="tymethod.index"]//code//a[@href="../assoc_types/trait.Index.html#associatedtype.Output"]' \
+ // "Output"
fn index<'a>(&'a self, index: I) -> &'a Self::Output;
}
// @has assoc_types/fn.use_output.html
// @has - '//*[@class="rust fn"]' '-> &T::Output'
+// @has - '//*[@class="rust fn"]//a[@href="../assoc_types/trait.Index.html#associatedtype.Output"]' 'Output'
pub fn use_output<T: Index<usize>>(obj: &T, index: usize) -> &T::Output {
obj.index(index)
}
// @has assoc_types/fn.use_input.html
// @has - '//*[@class="rust fn"]' 'T::Input'
+// @has - '//*[@class="rust fn"]//a[@href="../assoc_types/trait.Feed.html#associatedtype.Input"]' 'Input'
pub fn use_input<T: Feed>(_feed: &T, _element: T::Input) { }
// @has assoc_types/fn.cmp_input.html
// @has - '//*[@class="rust fn"]' 'where T::Input: PartialEq<U::Input>'
+// @has - '//*[@class="rust fn"]//a[@href="../assoc_types/trait.Feed.html#associatedtype.Input"]' 'Input'
pub fn cmp_input<T: Feed, U: Feed>(a: &T::Input, b: &U::Input) -> bool
where T::Input: PartialEq<U::Input>
{
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// ignore-tidy-linelength
+// compile-flags: --no-defaults --passes collapse-docs --passes unindent-comments --passes strip-priv-imports
+
+// @has 'empty_mod_private/index.html' '//a[@href="foo/index.html"]' 'foo'
+// @has 'empty_mod_private/sidebar-items.js' 'foo'
+// @matches 'empty_mod_private/foo/index.html' '//h1' 'Module empty_mod_private::foo'
+mod foo {}
+
+// @has 'empty_mod_private/index.html' '//a[@href="bar/index.html"]' 'bar'
+// @has 'empty_mod_private/sidebar-items.js' 'bar'
+// @matches 'empty_mod_private/bar/index.html' '//h1' 'Module empty_mod_private::bar'
+mod bar {
+ // @has 'empty_mod_private/bar/index.html' '//a[@href="baz/index.html"]' 'baz'
+ // @has 'empty_mod_private/bar/sidebar-items.js' 'baz'
+ // @matches 'empty_mod_private/bar/baz/index.html' '//h1' 'Module empty_mod_private::bar::baz'
+ mod baz {}
+}
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// @has 'empty_mod_public/index.html' '//a[@href="foo/index.html"]' 'foo'
+// @has 'empty_mod_public/sidebar-items.js' 'foo'
+// @matches 'empty_mod_public/foo/index.html' '//h1' 'Module empty_mod_public::foo'
+pub mod foo {}
+
+// @has 'empty_mod_public/index.html' '//a[@href="bar/index.html"]' 'bar'
+// @has 'empty_mod_public/sidebar-items.js' 'bar'
+// @matches 'empty_mod_public/bar/index.html' '//h1' 'Module empty_mod_public::bar'
+pub mod bar {
+ // @has 'empty_mod_public/bar/index.html' '//a[@href="baz/index.html"]' 'baz'
+ // @has 'empty_mod_public/bar/sidebar-items.js' 'baz'
+ // @matches 'empty_mod_public/bar/baz/index.html' '//h1' 'Module empty_mod_public::bar::baz'
+ pub mod baz {}
+}
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+mod foo {
+ pub enum Foo {
+ Bar,
+ }
+ pub use self::Foo::*;
+}
+
+// @has 'issue_35488/index.html' '//code' 'pub use self::Foo::*;'
+// @has 'issue_35488/enum.Foo.html'
+pub use self::foo::*;
+
+// @has 'issue_35488/index.html' '//code' 'pub use std::option::Option::None;'
+pub use std::option::Option::None;
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+pub trait MyTrait {
+ fn method_on_mytrait() {}
+}
+
+pub struct MyStruct;
+
+impl MyStruct {
+ pub fn method_on_mystruct() {}
+}
+
+// @has typedef/type.MyAlias.html
+// @has - '//*[@class="impl"]//code' 'impl MyAlias'
+// @has - '//*[@class="impl"]//code' 'impl MyTrait for MyAlias'
+// @has - 'Alias docstring'
+// @has - '//*[@class="sidebar"]//p[@class="location"]' 'Type Definition MyAlias'
+// @has - '//*[@class="sidebar"]//a[@href="#methods"]' 'Methods'
+// @has - '//*[@class="sidebar"]//a[@href="#implementations"]' 'Trait Implementations'
+/// Alias docstring
+pub type MyAlias = MyStruct;
+
+impl MyAlias {
+ pub fn method_on_myalias() {}
+}
+
+impl MyTrait for MyAlias {}
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn foo() -> Box<Fn()> {
+ let num = 5;
+
+ let closure = || {
+ num += 1;
+ };
+
+ Box::new(closure)
+}
+
+fn main() {}
--- /dev/null
+error[E0525]: expected a closure that implements the `Fn` trait, but this closure only implements `FnMut`
+ --> $DIR/issue-26046-fn-mut.rs:14:19
+ |
+14 | let closure = || {
+ | ___________________^
+15 | | num += 1;
+16 | | };
+ | |_____^
+17 |
+18 | Box::new(closure)
+ | ----------------- the requirement to implement `Fn` derives from here
+ |
+note: closure is `FnMut` because it mutates the variable `num` here
+ --> $DIR/issue-26046-fn-mut.rs:15:9
+ |
+15 | num += 1;
+ | ^^^
+
+error: aborting due to previous error(s)
+
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn get_closure() -> Box<Fn() -> Vec<u8>> {
+ let vec = vec![1u8, 2u8];
+
+ let closure = move || {
+ vec
+ };
+
+ Box::new(closure)
+}
+
+fn main() {}
--- /dev/null
+error[E0525]: expected a closure that implements the `Fn` trait, but this closure only implements `FnOnce`
+ --> $DIR/issue-26046-fn-once.rs:14:19
+ |
+14 | let closure = move || {
+ | ___________________^
+15 | | vec
+16 | | };
+ | |_____^
+17 |
+18 | Box::new(closure)
+ | ----------------- the requirement to implement `Fn` derives from here
+ |
+note: closure is `FnOnce` because it moves the variable `vec` out of its environment
+ --> $DIR/issue-26046-fn-once.rs:15:9
+ |
+15 | vec
+ | ^^^
+
+error: aborting due to previous error(s)
+
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::collections::HashMap;
+
+fn main() {
+ let dict: HashMap<i32, i32> = HashMap::new();
+ let debug_dump_dict = || {
+ for (key, value) in dict {
+ println!("{:?} - {:?}", key, value);
+ }
+ };
+ debug_dump_dict();
+ debug_dump_dict();
+ //~^ ERROR use of moved value: `debug_dump_dict`
+ //~| NOTE closure cannot be invoked more than once because it moves the
+ //~| variable `dict` out of its environment
+}
--- /dev/null
+error[E0382]: use of moved value: `debug_dump_dict`
+ --> $DIR/issue-42065.rs:21:5
+ |
+20 | debug_dump_dict();
+ | --------------- value moved here
+21 | debug_dump_dict();
+ | ^^^^^^^^^^^^^^^ value used here after move
+ |
+note: closure cannot be invoked more than once because it moves the variable `dict` out of its environment
+ --> $DIR/issue-42065.rs:16:29
+ |
+16 | for (key, value) in dict {
+ | ^^^^
+
+error: aborting due to previous error(s)
+
-error: no field `baz` on type `Foo`
+error[E0609]: no field `baz` on type `Foo`
--> $DIR/issue-36798.rs:17:7
|
17 | f.baz;
-error: no field `zz` on type `Foo`
+error[E0609]: no field `zz` on type `Foo`
--> $DIR/issue-36798_unknown_field.rs:17:7
|
17 | f.zz;
+++ /dev/null
-// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use std::collections::HashMap;
-
-fn main() {
- let dict: HashMap<i32, i32> = HashMap::new();
- let debug_dump_dict = || {
- for (key, value) in dict {
- println!("{:?} - {:?}", key, value);
- }
- };
- debug_dump_dict();
- debug_dump_dict();
- //~^ ERROR use of moved value: `debug_dump_dict`
- //~| NOTE closure cannot be invoked more than once because it moves the
- //~| variable `dict` out of its environment
-}
+++ /dev/null
-error[E0382]: use of moved value: `debug_dump_dict`
- --> $DIR/fn_once-moved.rs:21:5
- |
-20 | debug_dump_dict();
- | --------------- value moved here
-21 | debug_dump_dict();
- | ^^^^^^^^^^^^^^^ value used here after move
- |
-note: closure cannot be invoked more than once because it moves the variable `dict` out of its environment
- --> $DIR/fn_once-moved.rs:16:29
- |
-16 | for (key, value) in dict {
- | ^^^^
-
-error: aborting due to previous error(s)
-
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::cell::Cell;
+use std::panic::catch_unwind;
+fn main() {
+ let mut x = Cell::new(22);
+ catch_unwind(|| { x.set(23); });
+}
--- /dev/null
+error[E0277]: the trait bound `std::cell::UnsafeCell<i32>: std::panic::RefUnwindSafe` is not satisfied in `std::cell::Cell<i32>`
+ --> $DIR/interior-mutability.rs:15:5
+ |
+15 | catch_unwind(|| { x.set(23); });
+ | ^^^^^^^^^^^^ the type std::cell::UnsafeCell<i32> may contain interior mutability and a reference may not be safely transferrable across a catch_unwind boundary
+ |
+ = help: within `std::cell::Cell<i32>`, the trait `std::panic::RefUnwindSafe` is not implemented for `std::cell::UnsafeCell<i32>`
+ = note: required because it appears within the type `std::cell::Cell<i32>`
+ = note: required because of the requirements on the impl of `std::panic::UnwindSafe` for `&std::cell::Cell<i32>`
+ = note: required because it appears within the type `[closure@$DIR/interior-mutability.rs:15:18: 15:35 x:&std::cell::Cell<i32>]`
+ = note: required by `std::panic::catch_unwind`
+
+error: aborting due to previous error(s)
+
50 | fake_method_stmt!();
| -------------------- in this macro invocation
-error: no field `fake` on type `{integer}`
+error[E0610]: `{integer}` is a primitive type and therefore doesn't have fields
--> $DIR/macro-backtrace-invalid-internals.rs:21:13
|
21 | 1.fake
54 | let _ = fake_method_expr!();
| ------------------- in this macro invocation
-error: no field `fake` on type `{integer}`
+error[E0610]: `{integer}` is a primitive type and therefore doesn't have fields
--> $DIR/macro-backtrace-invalid-internals.rs:39:13
|
39 | 1.fake
|
= note: vtable kinds may not match
-error: no field `f` on type `fn() {main}`
+error[E0609]: no field `f` on type `fn() {main}`
--> $DIR/cast-rfc0401.rs:75:18
|
75 | let _ = main.f as *const u32;
-Subproject commit 82733b01471a2c62bb1cec966d888c52ff118914
+Subproject commit bbfe9b3a9d64aa8698b18cbb3803b3fa00ee3f44
-Subproject commit 38ca9b702b73c03959e447f5dae56eff7497c986
+Subproject commit 0d0f3baad02d65d96befbb90e77bf8a326dd14f5
lines.windows(LICENSE.lines().count()).any(|window| {
let offset = if window.iter().all(|w| w.starts_with("//")) {
2
- } else if window.iter().all(|w| w.starts_with("#")) {
+ } else if window.iter().all(|w| w.starts_with('#')) {
1
+ } else if window.iter().all(|w| w.starts_with(" *")) {
+ 2
} else {
return false
};