[submodule "src/doc/rust-by-example"]
path = src/doc/rust-by-example
url = https://github.com/rust-lang/rust-by-example
+[submodule "src/llvm-emscripten"]
+ path = src/llvm-emscripten
+ url = https://github.com/rust-lang/llvm
# OSX 10.7 and `xcode7` is the latest Xcode able to compile LLVM for 10.7.
- env: >
RUST_CHECK_TARGET=dist
- RUST_CONFIGURE_ARGS="--build=i686-apple-darwin --enable-extended --enable-profiler"
+ RUST_CONFIGURE_ARGS="--build=i686-apple-darwin --enable-extended --enable-profiler --enable-emscripten"
SRC=.
DEPLOY=1
RUSTC_RETRY_LINKER_ON_SEGFAULT=1
- env: >
RUST_CHECK_TARGET=dist
- RUST_CONFIGURE_ARGS="--target=aarch64-apple-ios,armv7-apple-ios,armv7s-apple-ios,i386-apple-ios,x86_64-apple-ios --enable-extended --enable-sanitizers --enable-profiler"
+ RUST_CONFIGURE_ARGS="--target=aarch64-apple-ios,armv7-apple-ios,armv7s-apple-ios,i386-apple-ios,x86_64-apple-ios --enable-extended --enable-sanitizers --enable-profiler --enable-emscripten"
SRC=.
DEPLOY=1
RUSTC_RETRY_LINKER_ON_SEGFAULT=1
--build=x86_64-pc-windows-msvc
--enable-extended
--enable-profiler
+ --enable-emscripten
SCRIPT: python x.py dist
DEPLOY: 1
- RUST_CONFIGURE_ARGS: >
--target=i586-pc-windows-msvc
--enable-extended
--enable-profiler
+ --enable-emscripten
SCRIPT: python x.py dist
DEPLOY: 1
- MSYS_BITS: 32
- RUST_CONFIGURE_ARGS: --build=i686-pc-windows-gnu --enable-extended
+ RUST_CONFIGURE_ARGS: --build=i686-pc-windows-gnu --enable-extended --enable-emscripten
SCRIPT: python x.py dist
MINGW_URL: https://s3-us-west-1.amazonaws.com/rust-lang-ci2/rust-ci-mirror
MINGW_ARCHIVE: i686-6.3.0-release-posix-dwarf-rt_v5-rev2.7z
DEPLOY: 1
- MSYS_BITS: 64
SCRIPT: python x.py dist
- RUST_CONFIGURE_ARGS: --build=x86_64-pc-windows-gnu --enable-extended
+ RUST_CONFIGURE_ARGS: --build=x86_64-pc-windows-gnu --enable-extended --enable-emscripten
MINGW_URL: https://s3-us-west-1.amazonaws.com/rust-lang-ci2/rust-ci-mirror
MINGW_ARCHIVE: x86_64-6.3.0-release-posix-seh-rt_v5-rev2.7z
MINGW_DIR: mingw64
# compiler.
#codegen-units = 1
+# Whether to enable ThinLTO (and increase the codegen units to either a default
+# or the configured value). On by default. If we want the fastest possible
+# compiler, we should disable this.
+#thinlto = true
+
# Whether or not debug assertions are enabled for the compiler and standard
# library. Also enables compilation of debug! and trace! logging macros.
#debug-assertions = false
# result (broken, compiling, testing) into this JSON file.
#save-toolstates = "/path/to/toolstates.json"
+# This is an array of the codegen backends that will be compiled for the rustc
+# that's being compiled. The default is to only build the LLVM codegen backend,
+# but you can also optionally enable the "emscripten" backend for asm.js or
+# make this an empty array (but that probably won't get too far in the
+# bootstrap)
+#codegen-backends = ["llvm"]
+
+# Flag indicating whether `libstd` calls an imported function to hande basic IO
+# when targetting WebAssembly. Enable this to debug tests for the `wasm32-unknown-unknown`
+# target, as without this option the test output will not be captured.
+#wasm-syscall = false
+
# =============================================================================
# Options for specific targets
#
[[package]]
name = "clippy"
-version = "0.0.174"
+version = "0.0.186"
dependencies = [
"cargo_metadata 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "clippy-mini-macro-test 0.1.0",
- "clippy_lints 0.0.174",
- "compiletest_rs 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "clippy-mini-macro-test 0.2.0",
+ "clippy_lints 0.0.186",
+ "compiletest_rs 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"duct 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "lazy_static 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
+ "lazy_static 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"regex 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.27 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.27 (registry+https://github.com/rust-lang/crates.io-index)",
[[package]]
name = "clippy-mini-macro-test"
-version = "0.1.0"
+version = "0.2.0"
[[package]]
name = "clippy_lints"
-version = "0.0.174"
+version = "0.0.186"
dependencies = [
"if_chain 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"itertools 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
- "lazy_static 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
+ "lazy_static 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"matches 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
"pulldown-cmark 0.0.15 (registry+https://github.com/rust-lang/crates.io-index)",
"quine-mc_cluskey 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)",
[[package]]
name = "compiletest_rs"
-version = "0.3.3"
+version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"diff 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
"xz2 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
]
+[[package]]
+name = "is-match"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
[[package]]
name = "itertools"
version = "0.6.5"
"either 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
+[[package]]
+name = "itertools"
+version = "0.7.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "either 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
[[package]]
name = "itoa"
version = "0.3.4"
[[package]]
name = "mdbook"
-version = "0.0.28"
+version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
+ "chrono 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"clap 2.29.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "env_logger 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "env_logger 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)",
"error-chain 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
"handlebars 0.29.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "lazy_static 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
- "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "itertools 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "lazy_static 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "memchr 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
"open 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"pulldown-cmark 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"regex 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.27 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.27 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "shlex 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
"toml 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "toml-query 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
dependencies = [
"byteorder 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"cargo_metadata 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "compiletest_rs 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "compiletest_rs 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"regex 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "rls"
-version = "0.124.0"
+version = "0.125.0"
dependencies = [
"cargo 0.26.0",
"env_logger 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)",
"rls-rustc 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rls-vfs 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)",
- "rustfmt-nightly 0.3.6",
+ "rustfmt-nightly 0.3.8",
"serde 1.0.27 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.27 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)",
version = "0.1.0"
dependencies = [
"clap 2.29.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "mdbook 0.0.28 (registry+https://github.com/rust-lang/crates.io-index)",
+ "mdbook 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
[[package]]
name = "rustc-ap-rustc_cratesio_shim"
-version = "12.0.0"
+version = "29.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"bitflags 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
[[package]]
name = "rustc-ap-rustc_data_structures"
-version = "12.0.0"
+version = "29.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot_core 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)",
- "rustc-ap-serialize 12.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rustc-ap-serialize 29.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"stable_deref_trait 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"term 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "rustc-ap-rustc_errors"
-version = "12.0.0"
+version = "29.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "rustc-ap-rustc_data_structures 12.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "rustc-ap-serialize 12.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "rustc-ap-syntax_pos 12.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rustc-ap-rustc_data_structures 29.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rustc-ap-serialize 29.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rustc-ap-syntax_pos 29.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"term 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"unicode-width 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "rustc-ap-serialize"
-version = "12.0.0"
+version = "29.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "rustc-ap-syntax"
-version = "12.0.0"
+version = "29.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"bitflags 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "rustc-ap-rustc_cratesio_shim 12.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "rustc-ap-rustc_data_structures 12.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "rustc-ap-rustc_errors 12.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "rustc-ap-serialize 12.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "rustc-ap-syntax_pos 12.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rustc-ap-rustc_cratesio_shim 29.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rustc-ap-rustc_data_structures 29.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rustc-ap-rustc_errors 29.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rustc-ap-serialize 29.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rustc-ap-syntax_pos 29.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"term 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "rustc-ap-syntax_pos"
-version = "12.0.0"
+version = "29.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "rustc-ap-rustc_data_structures 12.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "rustc-ap-serialize 12.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rustc-ap-rustc_data_structures 29.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rustc-ap-serialize 29.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"term 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"unicode-width 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
]
"rustc_privacy 0.0.0",
"rustc_resolve 0.0.0",
"rustc_save_analysis 0.0.0",
- "rustc_trans 0.0.0",
"rustc_trans_utils 0.0.0",
"rustc_typeck 0.0.0",
"serialize 0.0.0",
"bitflags 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
"build_helper 0.1.0",
"cc 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc_cratesio_shim 0.0.0",
]
"cc 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
"flate2 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
"jobserver 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
"num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc 0.0.0",
[[package]]
name = "rustfmt-nightly"
-version = "0.3.6"
+version = "0.3.8"
dependencies = [
"cargo_metadata 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"derive-new 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
"regex 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
- "rustc-ap-rustc_errors 12.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "rustc-ap-syntax 12.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rustc-ap-rustc_errors 29.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rustc-ap-syntax 29.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.27 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.27 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)",
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
+[[package]]
+name = "shlex"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
[[package]]
name = "siphasher"
version = "0.2.2"
"serde 1.0.27 (registry+https://github.com/rust-lang/crates.io-index)",
]
+[[package]]
+name = "toml-query"
+version = "0.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "error-chain 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "is-match 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "lazy_static 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "regex 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "toml 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
[[package]]
name = "unicode-bidi"
version = "0.3.4"
"checksum coco 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c06169f5beb7e31c7c67ebf5540b8b472d23e3eade3b2ec7d1f5b504a85f91bd"
"checksum commoncrypto 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d056a8586ba25a1e4d61cb090900e495952c7886786fc55f909ab2f819b69007"
"checksum commoncrypto-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1fed34f46747aa73dfaa578069fd8279d2818ade2b55f38f22a9401c7f4083e2"
-"checksum compiletest_rs 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "562bafeec9aef1e3e08f1c5b0c542220bb80ff2894e5373a1f9d17c346412c66"
+"checksum compiletest_rs 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "6c5aafb5d4a77c6b5fa384fe93c7a9a3561bd88c4b8b8e45187cf5e779b1badc"
"checksum core-foundation 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "8047f547cd6856d45b1cdd75ef8d2f21f3d0e4bf1dab0a0041b0ae9a5dda9c0e"
"checksum core-foundation-sys 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "152195421a2e6497a8179195672e9d4ee8e45ed8c465b626f1606d27a08ebcd5"
"checksum crossbeam 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)" = "bd66663db5a988098a89599d4857919b3acf7f61402e61365acfd3919857b9be"
"checksum idna 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "014b298351066f1512874135335d62a789ffe78a9974f94b43ed5621951eaf7d"
"checksum if_chain 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "61bb90bdd39e3af69b0172dfc6130f6cd6332bf040fbb9bdd4401d37adbd48b8"
"checksum ignore 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "bb2f0238094bd1b41800fb6eb9b16fdd5e9832ed6053ed91409f0cd5bf28dcfd"
+"checksum is-match 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7e5b386aef33a1c677be65237cb9d32c3f3ef56bd035949710c4bb13083eb053"
"checksum itertools 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)" = "d3f2be4da1690a039e9ae5fd575f706a63ad5a2120f161b1d653c9da3930dd21"
+"checksum itertools 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)" = "b07332223953b5051bceb67e8c4700aa65291535568e1f12408c43c4a42c0394"
"checksum itoa 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "8324a32baf01e2ae060e9de58ed0bc2320c9a2833491ee36cd3b4c414de4db8c"
"checksum jobserver 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "565f6106bd87b394398f813bea4e5ecad6d6b0f6aa077592d088f882a506481d"
"checksum json 0.11.12 (registry+https://github.com/rust-lang/crates.io-index)" = "39ebf0fac977ee3a4a3242b6446004ff64514889e3e2730bbd4f764a67a2e483"
"checksum mac 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c41e0c4fef86961ac6d6f8a82609f55f31b05e4fce149ac5710e439df7619ba4"
"checksum markup5ever 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "047150a0e03b57e638fc45af33a0b63a0362305d5b9f92ecef81df472a4cceb0"
"checksum matches 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "100aabe6b8ff4e4a7e32c1c13523379802df0772b82466207ac25b013f193376"
-"checksum mdbook 0.0.28 (registry+https://github.com/rust-lang/crates.io-index)" = "1ee8ba20c002000546681dc78d7f7e91fd35832058b1e2fdd492ca842bb6e9be"
+"checksum mdbook 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "fef236caad7ba3b5b3944df946f19ab3e190bca53c111dd00fe05fa8d879f2fd"
"checksum memchr 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "d8b629fb514376c675b98c1421e80b151d3817ac42d7c667717d282761418d20"
"checksum memchr 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "796fba70e76612589ed2ce7f45282f5af869e0fdd7cc6199fa1aa1f1d591ba9d"
"checksum miniz-sys 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "609ce024854aeb19a0ef7567d348aaa5a746b32fb72e336df7fcc16869d7e2b4"
"checksum rls-rustc 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "85cfb9dde19e313da3e47738008f8a472e470cc42d910b71595a9238494701f2"
"checksum rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5d7c7046dc6a92f2ae02ed302746db4382e75131b9ce20ce967259f6b5867a6a"
"checksum rls-vfs 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "ffd34691a510938bb67fe0444fb363103c73ffb31c121d1e16bc92d8945ea8ff"
-"checksum rustc-ap-rustc_cratesio_shim 12.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f1a51c10af5abd5d698b7e3487e869e6d15f6feb04cbedb5c792e2824f9d845e"
-"checksum rustc-ap-rustc_data_structures 12.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1aa227490501072780d57f74b1164d361833ff8e172f817da0da2cdf2e4280cc"
-"checksum rustc-ap-rustc_errors 12.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "21ff6c6e13ac4fc04b7d4d398828b024c4b6577045cb3175b33d35fea35ff6d0"
-"checksum rustc-ap-serialize 12.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6b4e7f51e298675c2bf830f7265621a8936fb09e63b825b58144cbaac969e604"
-"checksum rustc-ap-syntax 12.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8bf5639869ba2f7fa581939cd217cb71a85506b82ad0ea520614fb0dceb2386c"
-"checksum rustc-ap-syntax_pos 12.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1c020cdb7379e1c733ae0a311ae47c748337ba584d2dd7b7f53baaae78de6f8b"
+"checksum rustc-ap-rustc_cratesio_shim 29.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4ad5e562044ea78a6764dd75ae8afe4b21fde49f4548024b5fdf6345c21fb524"
+"checksum rustc-ap-rustc_data_structures 29.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d4c0d65325492aba7db72899e3edbab34d39af98c42ab7c7e450c9a288ffe4ad"
+"checksum rustc-ap-rustc_errors 29.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "87d4ab2e06a671b5b5c5b0359dac346f164c99d059dce6a22feb08f2f56bd182"
+"checksum rustc-ap-serialize 29.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e0745fa445ff41c4b6699936cf35ce3ca49502377dd7b3929c829594772c3a7b"
+"checksum rustc-ap-syntax 29.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "82efedabe30f393161e11214a9130edfa01ad476372d1c6f3fec1f8d30488c9d"
+"checksum rustc-ap-syntax_pos 29.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "db9de2e927e280c75b8efab9c5f591ad31082d5d2c4c562c68fdba2ee77286b0"
"checksum rustc-demangle 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "aee45432acc62f7b9a108cc054142dac51f979e69e71ddce7d6fc7adf29e817e"
"checksum rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)" = "dcf128d1287d2ea9d80910b5f1120d0b8eede3fbf1abe91c40d39ea7d51e6fda"
"checksum same-file 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "d931a44fdaa43b8637009e7632a02adc4f2b2e0733c08caa4cf00e8da4a117a7"
"checksum serde_json 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)" = "c9db7266c7d63a4c4b7fe8719656ccdd51acf1bed6124b174f933b009fb10bcb"
"checksum shared_child 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "099b38928dbe4a0a01fcd8c233183072f14a7d126a34bed05880869be66e14cc"
"checksum shell-escape 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "dd5cc96481d54583947bfe88bf30c23d53f883c6cd0145368b69989d97b84ef8"
+"checksum shlex 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7fdf1b9db47230893d76faad238fd6097fd6d6a9245cd7a4d90dbd639536bbd2"
"checksum siphasher 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "0df90a788073e8d0235a67e50441d47db7c8ad9debd91cbf43736a2a92d36537"
"checksum smallvec 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "4f8266519bc1d17d0b5b16f6c21295625d562841c708f6376f49028a43e9c11e"
"checksum smallvec 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "44db0ecb22921ef790d17ae13a3f6d15784183ff5f2a01aa32098c7498d2b4b9"
"checksum time 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)" = "a15375f1df02096fb3317256ce2cee6a1f42fc84ea5ad5fc8c421cfe40c73098"
"checksum toml 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "736b60249cb25337bc196faa43ee12c705e426f3d55c214d73a4e7be06f92cb4"
"checksum toml 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "a7540f4ffc193e0d3c94121edb19b055670d369f77d5804db11ae053a45b6e7e"
+"checksum toml-query 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6854664bfc6df0360c695480836ee90e2d0c965f06db291d10be9344792d43e8"
"checksum unicode-bidi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "49f2bd0c6468a8230e1db229cff8029217cf623c767ea5d60bfbd42729ea54d5"
"checksum unicode-normalization 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "51ccda9ef9efa3f7ef5d91e8f9b83bbe6955f9bf86aec89d5cce2c874625920f"
"checksum unicode-segmentation 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a8083c594e02b8ae1654ae26f0ade5158b119bd88ad0e8227a5d8fcd72407946"
"rustc",
"libstd",
"libtest",
+ "librustc_trans",
"tools/cargotest",
"tools/clippy",
"tools/compiletest",
os.path.join(self.rust_root, ".gitmodules"),
"--get-regexp", "path"]
).decode(default_encoding).splitlines()]
- submodules = [module for module in submodules
- if not ((module.endswith("llvm") and
- self.get_toml('llvm-config')) or
- (module.endswith("jemalloc") and
- (self.get_toml('use-jemalloc') == "false" or
- self.get_toml('jemalloc'))))]
+ filtered_submodules = []
+ for module in submodules:
+ if module.endswith("llvm"):
+ if self.get_toml('llvm-config'):
+ continue
+ if module.endswith("llvm-emscripten"):
+ backends = self.get_toml('codegen-backends')
+ if backends is None or not 'emscripten' in backends:
+ continue
+ if module.endswith("jemalloc"):
+ if self.get_toml('use-jemalloc') == 'false':
+ continue
+ if self.get_toml('jemalloc'):
+ continue
+ filtered_submodules.append(module)
run(["git", "submodule", "update",
- "--init", "--recursive"] + submodules,
+ "--init", "--recursive"] + filtered_submodules,
cwd=self.rust_root, verbose=self.verbose)
run(["git", "submodule", "-q", "foreach", "git",
"reset", "-q", "--hard"],
self.ensure(Libdir { compiler, target })
}
+ pub fn sysroot_codegen_backends(&self, compiler: Compiler) -> PathBuf {
+ self.sysroot_libdir(compiler, compiler.host)
+ .with_file_name("codegen-backends")
+ }
+
/// Returns the compiler's libdir where it stores the dynamic libraries that
/// it itself links against.
///
stage = compiler.stage;
}
+ let mut extra_args = env::var(&format!("RUSTFLAGS_STAGE_{}", stage)).unwrap_or_default();
+ if stage != 0 {
+ let s = env::var("RUSTFLAGS_STAGE_NOT_0").unwrap_or_default();
+ extra_args.push_str(" ");
+ extra_args.push_str(&s);
+ }
+
+ if !extra_args.is_empty() {
+ cargo.env("RUSTFLAGS",
+ format!("{} {}", env::var("RUSTFLAGS").unwrap_or_default(), extra_args));
+ }
+
// Customize the compiler we're running. Specify the compiler to cargo
// as our shim and then pass it some various options used to configure
// how the actual compiler itself is called.
})
.env("TEST_MIRI", self.config.test_miri.to_string())
.env("RUSTC_ERROR_METADATA_DST", self.extended_error_dir());
- if let Some(n) = self.config.rust_codegen_units {
- cargo.env("RUSTC_CODEGEN_UNITS", n.to_string());
- }
-
if let Some(host_linker) = self.build.linker(compiler.host) {
cargo.env("RUSTC_HOST_LINKER", host_linker);
if self.is_very_verbose() {
cargo.arg("-v");
}
+
+ // This must be kept before the thinlto check, as we set codegen units
+ // to 1 forcibly there.
+ if let Some(n) = self.config.rust_codegen_units {
+ cargo.env("RUSTC_CODEGEN_UNITS", n.to_string());
+ }
+
if self.config.rust_optimize {
// FIXME: cargo bench does not accept `--release`
if cmd != "bench" {
}
if self.config.rust_codegen_units.is_none() &&
- self.build.is_rust_llvm(compiler.host)
- {
+ self.build.is_rust_llvm(compiler.host) &&
+ self.config.rust_thinlto {
cargo.env("RUSTC_THINLTO", "1");
+ } else if self.config.rust_codegen_units.is_none() {
+ // Generally, if ThinLTO has been disabled for some reason, we
+ // want to set the codegen units to 1. However, we shouldn't do
+ // this if the option was specifically set by the user.
+ cargo.env("RUSTC_CODEGEN_UNITS", "1");
}
}
+
if self.config.locked_deps {
cargo.arg("--locked");
}
build.clear_if_dirty(&stage_out, &libtest_stamp(build, compiler, target));
let mut cargo = builder.cargo(compiler, Mode::Librustc, target, "check");
- rustc_cargo(build, target, &mut cargo);
+ rustc_cargo(build, &mut cargo);
run_cargo(build,
&mut cargo,
&librustc_stamp(build, compiler, target),
// Even if we're not building std this stage, the new sysroot must
// still contain the musl startup objects.
- if target.contains("musl") && !target.contains("mips") {
+ if target.contains("musl") {
let libdir = builder.sysroot_libdir(compiler, target);
copy_musl_third_party_objects(build, target, &libdir);
}
println!("Building stage{} std artifacts ({} -> {})", compiler.stage,
&compiler.host, target);
- if target.contains("musl") && !target.contains("mips") {
+ if target.contains("musl") {
let libdir = builder.sysroot_libdir(compiler, target);
copy_musl_third_party_objects(build, target, &libdir);
}
}
for obj in ["crt2.o", "dllcrt2.o"].iter() {
- copy(&compiler_file(build.cc(target), obj), &sysroot_dir.join(obj));
+ let src = compiler_file(build,
+ build.cc(target),
+ target,
+ obj);
+ copy(&src, &sysroot_dir.join(obj));
}
}
}
builder.ensure(Test { compiler, target });
- // Build LLVM for our target. This will implicitly build the host LLVM
- // if necessary.
- builder.ensure(native::Llvm { target });
-
if build.force_use_stage1(compiler, target) {
builder.ensure(Rustc {
compiler: builder.compiler(1, build.build),
build.clear_if_dirty(&stage_out, &libtest_stamp(build, compiler, target));
let mut cargo = builder.cargo(compiler, Mode::Librustc, target, "build");
- rustc_cargo(build, target, &mut cargo);
+ rustc_cargo(build, &mut cargo);
run_cargo(build,
&mut cargo,
&librustc_stamp(build, compiler, target),
}
}
-/// Same as `std_cargo`, but for libtest
-pub fn rustc_cargo(build: &Build,
- target: Interned<String>,
- cargo: &mut Command) {
+pub fn rustc_cargo(build: &Build, cargo: &mut Command) {
cargo.arg("--features").arg(build.rustc_features())
.arg("--manifest-path")
.arg(build.src.join("src/rustc/Cargo.toml"));
+ rustc_cargo_env(build, cargo);
+}
+fn rustc_cargo_env(build: &Build, cargo: &mut Command) {
// Set some configuration variables picked up by build scripts and
// the compiler alike
cargo.env("CFG_RELEASE", build.rust_release())
if !build.unstable_features() {
cargo.env("CFG_DISABLE_UNSTABLE_FEATURES", "1");
}
- // Flag that rust llvm is in use
- if build.is_rust_llvm(target) {
- cargo.env("LLVM_RUSTLLVM", "1");
- }
- cargo.env("LLVM_CONFIG", build.llvm_config(target));
- let target_config = build.config.target_config.get(&target);
- if let Some(s) = target_config.and_then(|c| c.llvm_config.as_ref()) {
- cargo.env("CFG_LLVM_ROOT", s);
- }
- // Building with a static libstdc++ is only supported on linux right now,
- // not for MSVC or macOS
- if build.config.llvm_static_stdcpp &&
- !target.contains("freebsd") &&
- !target.contains("windows") &&
- !target.contains("apple") {
- cargo.env("LLVM_STATIC_STDCPP",
- compiler_file(build.cxx(target).unwrap(), "libstdc++.a"));
- }
- if build.config.llvm_link_shared {
- cargo.env("LLVM_LINK_SHARED", "1");
- }
if let Some(ref s) = build.config.rustc_default_linker {
cargo.env("CFG_DEFAULT_LINKER", s);
}
}
}
+#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
+pub struct CodegenBackend {
+ pub compiler: Compiler,
+ pub target: Interned<String>,
+ pub backend: Interned<String>,
+}
+
+impl Step for CodegenBackend {
+ type Output = ();
+ const ONLY_HOSTS: bool = true;
+ const DEFAULT: bool = true;
+
+ fn should_run(run: ShouldRun) -> ShouldRun {
+ run.path("src/librustc_trans")
+ }
+
+ fn make_run(run: RunConfig) {
+ let backend = run.builder.config.rust_codegen_backends.get(0);
+ let backend = backend.cloned().unwrap_or_else(|| {
+ INTERNER.intern_str("llvm")
+ });
+ run.builder.ensure(CodegenBackend {
+ compiler: run.builder.compiler(run.builder.top_stage, run.host),
+ target: run.target,
+ backend
+ });
+ }
+
+ fn run(self, builder: &Builder) {
+ let build = builder.build;
+ let compiler = self.compiler;
+ let target = self.target;
+
+ builder.ensure(Rustc { compiler, target });
+
+ if build.force_use_stage1(compiler, target) {
+ builder.ensure(CodegenBackend {
+ compiler: builder.compiler(1, build.build),
+ target,
+ backend: self.backend,
+ });
+ return;
+ }
+
+ let mut cargo = builder.cargo(compiler, Mode::Librustc, target, "build");
+ let mut features = build.rustc_features().to_string();
+ cargo.arg("--manifest-path")
+ .arg(build.src.join("src/librustc_trans/Cargo.toml"));
+ rustc_cargo_env(build, &mut cargo);
+
+ match &*self.backend {
+ "llvm" | "emscripten" => {
+ // Build LLVM for our target. This will implicitly build the
+ // host LLVM if necessary.
+ let llvm_config = builder.ensure(native::Llvm {
+ target,
+ emscripten: self.backend == "emscripten",
+ });
+
+ if self.backend == "emscripten" {
+ features.push_str(" emscripten");
+ }
+
+ let _folder = build.fold_output(|| format!("stage{}-rustc_trans", compiler.stage));
+ println!("Building stage{} codegen artifacts ({} -> {}, {})",
+ compiler.stage, &compiler.host, target, self.backend);
+
+ // Pass down configuration from the LLVM build into the build of
+ // librustc_llvm and librustc_trans.
+ if build.is_rust_llvm(target) {
+ cargo.env("LLVM_RUSTLLVM", "1");
+ }
+ cargo.env("LLVM_CONFIG", &llvm_config);
+ if self.backend != "emscripten" {
+ let target_config = build.config.target_config.get(&target);
+ if let Some(s) = target_config.and_then(|c| c.llvm_config.as_ref()) {
+ cargo.env("CFG_LLVM_ROOT", s);
+ }
+ }
+ // Building with a static libstdc++ is only supported on linux right now,
+ // not for MSVC or macOS
+ if build.config.llvm_static_stdcpp &&
+ !target.contains("freebsd") &&
+ !target.contains("windows") &&
+ !target.contains("apple") {
+ let file = compiler_file(build,
+ build.cxx(target).unwrap(),
+ target,
+ "libstdc++.a");
+ cargo.env("LLVM_STATIC_STDCPP", file);
+ }
+ if build.config.llvm_link_shared {
+ cargo.env("LLVM_LINK_SHARED", "1");
+ }
+ }
+ _ => panic!("unknown backend: {}", self.backend),
+ }
+
+ let tmp_stamp = build.cargo_out(compiler, Mode::Librustc, target)
+ .join(".tmp.stamp");
+ let files = run_cargo(build,
+ cargo.arg("--features").arg(features),
+ &tmp_stamp,
+ false);
+ let mut files = files.into_iter()
+ .filter(|f| {
+ let filename = f.file_name().unwrap().to_str().unwrap();
+ is_dylib(filename) && filename.contains("rustc_trans-")
+ });
+ let codegen_backend = match files.next() {
+ Some(f) => f,
+ None => panic!("no dylibs built for codegen backend?"),
+ };
+ if let Some(f) = files.next() {
+ panic!("codegen backend built two dylibs:\n{}\n{}",
+ codegen_backend.display(),
+ f.display());
+ }
+ let stamp = codegen_backend_stamp(build, compiler, target, self.backend);
+ let codegen_backend = codegen_backend.to_str().unwrap();
+ t!(t!(File::create(&stamp)).write_all(codegen_backend.as_bytes()));
+ }
+}
+
+/// Creates the `codegen-backends` folder for a compiler that's about to be
+/// assembled as a complete compiler.
+///
+/// This will take the codegen artifacts produced by `compiler` and link them
+/// into an appropriate location for `target_compiler` to be a functional
+/// compiler.
+fn copy_codegen_backends_to_sysroot(builder: &Builder,
+ compiler: Compiler,
+ target_compiler: Compiler) {
+ let build = builder.build;
+ let target = target_compiler.host;
+
+ // Note that this step is different than all the other `*Link` steps in
+ // that it's not assembling a bunch of libraries but rather is primarily
+ // moving the codegen backend into place. The codegen backend of rustc is
+ // not linked into the main compiler by default but is rather dynamically
+ // selected at runtime for inclusion.
+ //
+ // Here we're looking for the output dylib of the `CodegenBackend` step and
+ // we're copying that into the `codegen-backends` folder.
+ let dst = builder.sysroot_codegen_backends(target_compiler);
+ t!(fs::create_dir_all(&dst));
+
+ for backend in builder.config.rust_codegen_backends.iter() {
+ let stamp = codegen_backend_stamp(build, compiler, target, *backend);
+ let mut dylib = String::new();
+ t!(t!(File::open(&stamp)).read_to_string(&mut dylib));
+ let file = Path::new(&dylib);
+ let filename = file.file_name().unwrap().to_str().unwrap();
+ // change `librustc_trans-xxxxxx.so` to `librustc_trans-llvm.so`
+ let target_filename = {
+ let dash = filename.find("-").unwrap();
+ let dot = filename.find(".").unwrap();
+ format!("{}-{}{}",
+ &filename[..dash],
+ backend,
+ &filename[dot..])
+ };
+ copy(&file, &dst.join(target_filename));
+ }
+}
+
/// Cargo's output path for the standard library in a given stage, compiled
/// by a particular compiler for the specified target.
pub fn libstd_stamp(build: &Build, compiler: Compiler, target: Interned<String>) -> PathBuf {
build.cargo_out(compiler, Mode::Librustc, target).join(".librustc.stamp")
}
-fn compiler_file(compiler: &Path, file: &str) -> PathBuf {
- let out = output(Command::new(compiler)
- .arg(format!("-print-file-name={}", file)));
+fn codegen_backend_stamp(build: &Build,
+ compiler: Compiler,
+ target: Interned<String>,
+ backend: Interned<String>) -> PathBuf {
+ build.cargo_out(compiler, Mode::Librustc, target)
+ .join(format!(".librustc_trans-{}.stamp", backend))
+}
+
+fn compiler_file(build: &Build,
+ compiler: &Path,
+ target: Interned<String>,
+ file: &str) -> PathBuf {
+ let mut cmd = Command::new(compiler);
+ cmd.args(build.cflags(target));
+ cmd.arg(format!("-print-file-name={}", file));
+ let out = output(&mut cmd);
PathBuf::from(out.trim())
}
}
// Get the compiler that we'll use to bootstrap ourselves.
- let build_compiler = if target_compiler.host != build.build {
- // Build a compiler for the host platform. We cannot use the stage0
- // compiler for the host platform for this because it doesn't have
- // the libraries we need. FIXME: Perhaps we should download those
- // libraries? It would make builds faster...
- // FIXME: It may be faster if we build just a stage 1
- // compiler and then use that to bootstrap this compiler
- // forward.
- builder.compiler(target_compiler.stage - 1, build.build)
- } else {
- // Build the compiler we'll use to build the stage requested. This
- // may build more than one compiler (going down to stage 0).
- builder.compiler(target_compiler.stage - 1, target_compiler.host)
- };
+ //
+ // Note that this is where the recursive nature of the bootstrap
+ // happens, as this will request the previous stage's compiler on
+ // downwards to stage 0.
+ //
+ // Also note that we're building a compiler for the host platform. We
+ // only assume that we can run `build` artifacts, which means that to
+ // produce some other architecture compiler we need to start from
+ // `build` to get there.
+ //
+ // FIXME: Perhaps we should download those libraries?
+ // It would make builds faster...
+ //
+ // FIXME: It may be faster if we build just a stage 1 compiler and then
+ // use that to bootstrap this compiler forward.
+ let build_compiler =
+ builder.compiler(target_compiler.stage - 1, build.build);
// Build the libraries for this compiler to link to (i.e., the libraries
// it uses at runtime). NOTE: Crates the target compiler compiles don't
builder.ensure(RustcLink { compiler, target_compiler, target });
}
} else {
- builder.ensure(Rustc { compiler: build_compiler, target: target_compiler.host });
+ builder.ensure(Rustc {
+ compiler: build_compiler,
+ target: target_compiler.host,
+ });
+ for &backend in build.config.rust_codegen_backends.iter() {
+ builder.ensure(CodegenBackend {
+ compiler: build_compiler,
+ target: target_compiler.host,
+ backend,
+ });
+ }
}
let stage = target_compiler.stage;
}
}
- let out_dir = build.cargo_out(build_compiler, Mode::Librustc, host);
+ copy_codegen_backends_to_sysroot(builder,
+ build_compiler,
+ target_compiler);
// Link the compiler binary itself into place
+ let out_dir = build.cargo_out(build_compiler, Mode::Librustc, host);
let rustc = out_dir.join(exe("rustc", &*host));
let bindir = sysroot.join("bin");
t!(fs::create_dir_all(&bindir));
}
}
-pub fn run_cargo(build: &Build, cargo: &mut Command, stamp: &Path, is_check: bool) {
+pub fn run_cargo(build: &Build, cargo: &mut Command, stamp: &Path, is_check: bool)
+ -> Vec<PathBuf>
+{
// Instruct Cargo to give us json messages on stdout, critically leaving
// stderr as piped so we can get those pretty colors.
cargo.arg("--message-format").arg("json")
let mut new_contents = Vec::new();
let mut max = None;
let mut max_path = None;
- for dep in deps {
- let mtime = mtime(&dep);
+ for dep in deps.iter() {
+ let mtime = mtime(dep);
if Some(mtime) > max {
max = Some(mtime);
max_path = Some(dep.clone());
if stamp_contents == new_contents && max <= stamp_mtime {
build.verbose(&format!("not updating {:?}; contents equal and {} <= {}",
stamp, max, stamp_mtime));
- return
+ return deps
}
if max > stamp_mtime {
build.verbose(&format!("updating {:?} as {:?} changed", stamp, max_path));
build.verbose(&format!("updating {:?} as deps changed", stamp));
}
t!(t!(File::create(stamp)).write_all(&new_contents));
+ deps
}
// rust codegen options
pub rust_optimize: bool,
pub rust_codegen_units: Option<u32>,
+ pub rust_thinlto: bool,
pub rust_debug_assertions: bool,
pub rust_debuginfo: bool,
pub rust_debuginfo_lines: bool,
pub rust_optimize_tests: bool,
pub rust_debuginfo_tests: bool,
pub rust_dist_src: bool,
+ pub rust_codegen_backends: Vec<Interned<String>>,
pub build: Interned<String>,
pub hosts: Vec<Interned<String>>,
pub debug_jemalloc: bool,
pub use_jemalloc: bool,
pub backtrace: bool, // support for RUST_BACKTRACE
+ pub wasm_syscall: bool,
// misc
pub low_priority: bool,
struct Rust {
optimize: Option<bool>,
codegen_units: Option<u32>,
+ thinlto: Option<bool>,
debug_assertions: Option<bool>,
debuginfo: Option<bool>,
debuginfo_lines: Option<bool>,
quiet_tests: Option<bool>,
test_miri: Option<bool>,
save_toolstates: Option<String>,
+ codegen_backends: Option<Vec<String>>,
+ wasm_syscall: Option<bool>,
}
/// TOML representation of how each build target is configured.
config.ignore_git = false;
config.rust_dist_src = true;
config.test_miri = false;
+ config.rust_codegen_backends = vec![INTERNER.intern_str("llvm")];
config.on_fail = flags.on_fail;
config.stage = flags.stage;
// Store off these values as options because if they're not provided
// we'll infer default values for them later
+ let mut thinlto = None;
let mut llvm_assertions = None;
let mut debuginfo_lines = None;
let mut debuginfo_only_std = None;
optimize = rust.optimize;
ignore_git = rust.ignore_git;
debug_jemalloc = rust.debug_jemalloc;
+ thinlto = rust.thinlto;
set(&mut config.rust_optimize_tests, rust.optimize_tests);
set(&mut config.rust_debuginfo_tests, rust.debuginfo_tests);
set(&mut config.codegen_tests, rust.codegen_tests);
set(&mut config.rust_dist_src, rust.dist_src);
set(&mut config.quiet_tests, rust.quiet_tests);
set(&mut config.test_miri, rust.test_miri);
+ set(&mut config.wasm_syscall, rust.wasm_syscall);
config.rustc_parallel_queries = rust.experimental_parallel_queries.unwrap_or(false);
config.rustc_default_linker = rust.default_linker.clone();
config.musl_root = rust.musl_root.clone().map(PathBuf::from);
config.save_toolstates = rust.save_toolstates.clone().map(PathBuf::from);
+ if let Some(ref backends) = rust.codegen_backends {
+ config.rust_codegen_backends = backends.iter()
+ .map(|s| INTERNER.intern_str(s))
+ .collect();
+ }
+
match rust.codegen_units {
Some(0) => config.rust_codegen_units = Some(num_cpus::get() as u32),
Some(n) => config.rust_codegen_units = Some(n),
"stable" | "beta" | "nightly" => true,
_ => false,
};
+ config.rust_thinlto = thinlto.unwrap_or(true);
config.rust_debuginfo_lines = debuginfo_lines.unwrap_or(default);
config.rust_debuginfo_only_std = debuginfo_only_std.unwrap_or(default);
o("dist-src", "rust.dist-src", "when building tarballs enables building a source tarball")
o("cargo-openssl-static", "build.openssl-static", "static openssl in cargo")
o("profiler", "build.profiler", "build the profiler runtime")
+o("emscripten", None, "compile the emscripten backend as well as LLVM")
# Optimization and debugging options. These may be overridden by the release
# channel, etc.
o("optimize", "rust.optimize", "build optimized rust code")
+o("thinlto", "rust.thinlto", "build Rust with ThinLTO enabled")
o("optimize-llvm", "llvm.optimize", "build optimized LLVM")
o("llvm-assertions", "llvm.assertions", "build LLVM with assertions")
o("debug-assertions", "rust.debug-assertions", "build with debugging assertions")
"armv7-unknown-linux-musleabihf install directory")
v("musl-root-aarch64", "target.aarch64-unknown-linux-musl.musl-root",
"aarch64-unknown-linux-musl install directory")
+v("musl-root-mips", "target.mips-unknown-linux-musl.musl-root",
+ "mips-unknown-linux-musl install directory")
+v("musl-root-mipsel", "target.mipsel-unknown-linux-musl.musl-root",
+ "mipsel-unknown-linux-musl install directory")
v("qemu-armhf-rootfs", "target.arm-unknown-linux-gnueabihf.qemu-rootfs",
"rootfs in qemu testing, you probably don't want to use this")
v("qemu-aarch64-rootfs", "target.aarch64-unknown-linux-gnu.qemu-rootfs",
set('build.host', value.split(','))
elif option.name == 'target':
set('build.target', value.split(','))
+ elif option.name == 'emscripten':
+ set('rust.codegen-backends', ['llvm', 'emscripten'])
elif option.name == 'option-checking':
# this was handled above
pass
}
}
+ // Copy over the codegen backends
+ let backends_src = builder.sysroot_codegen_backends(compiler);
+ let backends_rel = backends_src.strip_prefix(&src).unwrap();
+ let backends_dst = image.join(&backends_rel);
+ t!(fs::create_dir_all(&backends_dst));
+ cp_r(&backends_src, &backends_dst);
+
// Man pages
t!(fs::create_dir_all(image.join("share/man/man1")));
let man_src = build.src.join("src/doc/man");
t!(fs::create_dir_all(&dst));
let mut src = builder.sysroot_libdir(compiler, target).to_path_buf();
src.pop(); // Remove the trailing /lib folder from the sysroot_libdir
- cp_r(&src, &dst);
+ cp_filtered(&src, &dst, &|path| {
+ path.file_name().and_then(|s| s.to_str()) != Some("codegen-backends")
+ });
let mut cmd = rust_installer(builder);
cmd.arg("generate")
t!(symlink_dir_force(&my_out, &out_dir));
let mut cargo = builder.cargo(compiler, Mode::Librustc, target, "doc");
- compile::rustc_cargo(build, target, &mut cargo);
+ compile::rustc_cargo(build, &mut cargo);
if build.config.compiler_docs {
// src/rustc/Cargo.toml contains a bin crate called rustc which
if self.config.profiler {
features.push_str(" profiler");
}
+ if self.config.wasm_syscall {
+ features.push_str(" wasm_syscall");
+ }
features
}
if self.config.use_jemalloc {
features.push_str(" jemalloc");
}
- if self.config.llvm_enabled {
- features.push_str(" llvm");
- }
features
}
self.out.join(&*target).join("llvm")
}
+ fn emscripten_llvm_out(&self, target: Interned<String>) -> PathBuf {
+ self.out.join(&*target).join("llvm-emscripten")
+ }
+
/// Output directory for all documentation for a target
fn doc_out(&self, target: Interned<String>) -> PathBuf {
self.out.join(&*target).join("doc")
use std::ffi::OsString;
use std::fs::{self, File};
use std::io::{Read, Write};
-use std::path::Path;
+use std::path::{Path, PathBuf};
use std::process::Command;
use build_helper::output;
use cc;
use Build;
-use util;
+use util::{self, exe};
use build_helper::up_to_date;
use builder::{Builder, RunConfig, ShouldRun, Step};
use cache::Interned;
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
pub struct Llvm {
pub target: Interned<String>,
+ pub emscripten: bool,
}
impl Step for Llvm {
- type Output = ();
+ type Output = PathBuf; // path to llvm-config
+
const ONLY_HOSTS: bool = true;
fn should_run(run: ShouldRun) -> ShouldRun {
- run.path("src/llvm")
+ run.path("src/llvm").path("src/llvm-emscripten")
}
fn make_run(run: RunConfig) {
- run.builder.ensure(Llvm { target: run.target })
+ let emscripten = run.path.map(|p| {
+ p.ends_with("llvm-emscripten")
+ }).unwrap_or(false);
+ run.builder.ensure(Llvm {
+ target: run.target,
+ emscripten,
+ });
}
/// Compile LLVM for `target`.
- fn run(self, builder: &Builder) {
+ fn run(self, builder: &Builder) -> PathBuf {
let build = builder.build;
let target = self.target;
-
- // If we're not compiling for LLVM bail out here.
- if !build.config.llvm_enabled {
- return;
- }
+ let emscripten = self.emscripten;
// If we're using a custom LLVM bail out here, but we can only use a
// custom LLVM for the build triple.
- if let Some(config) = build.config.target_config.get(&target) {
- if let Some(ref s) = config.llvm_config {
- return check_llvm_version(build, s);
+ if !self.emscripten {
+ if let Some(config) = build.config.target_config.get(&target) {
+ if let Some(ref s) = config.llvm_config {
+ check_llvm_version(build, s);
+ return s.to_path_buf()
+ }
}
}
let mut rebuild_trigger_contents = String::new();
t!(t!(File::open(&rebuild_trigger)).read_to_string(&mut rebuild_trigger_contents));
- let out_dir = build.llvm_out(target);
+ let (out_dir, llvm_config_ret_dir) = if emscripten {
+ let dir = build.emscripten_llvm_out(target);
+ let config_dir = dir.join("bin");
+ (dir, config_dir)
+ } else {
+ (build.llvm_out(target),
+ build.llvm_out(build.config.build).join("bin"))
+ };
let done_stamp = out_dir.join("llvm-finished-building");
+ let build_llvm_config = llvm_config_ret_dir
+ .join(exe("llvm-config", &*build.config.build));
if done_stamp.exists() {
let mut done_contents = String::new();
t!(t!(File::open(&done_stamp)).read_to_string(&mut done_contents));
// If LLVM was already built previously and contents of the rebuild-trigger file
// didn't change from the previous build, then no action is required.
if done_contents == rebuild_trigger_contents {
- return
+ return build_llvm_config
}
}
let _folder = build.fold_output(|| "llvm");
- println!("Building LLVM for {}", target);
+ let descriptor = if emscripten { "Emscripten " } else { "" };
+ println!("Building {}LLVM for {}", descriptor, target);
let _time = util::timeit();
t!(fs::create_dir_all(&out_dir));
// http://llvm.org/docs/CMake.html
- let mut cfg = cmake::Config::new(build.src.join("src/llvm"));
+ let root = if self.emscripten { "src/llvm-emscripten" } else { "src/llvm" };
+ let mut cfg = cmake::Config::new(build.src.join(root));
if build.config.ninja {
cfg.generator("Ninja");
}
(true, true) => "RelWithDebInfo",
};
- // NOTE: remember to also update `config.toml.example` when changing the defaults!
- let llvm_targets = match build.config.llvm_targets {
- Some(ref s) => s,
- None => "X86;ARM;AArch64;Mips;PowerPC;SystemZ;JSBackend;MSP430;Sparc;NVPTX;Hexagon",
+ // NOTE: remember to also update `config.toml.example` when changing the
+ // defaults!
+ let llvm_targets = if self.emscripten {
+ "JSBackend"
+ } else {
+ match build.config.llvm_targets {
+ Some(ref s) => s,
+ None => "X86;ARM;AArch64;Mips;PowerPC;SystemZ;MSP430;Sparc;NVPTX;Hexagon",
+ }
};
- let llvm_exp_targets = &build.config.llvm_experimental_targets;
+ let llvm_exp_targets = if self.emscripten {
+ ""
+ } else {
+ &build.config.llvm_experimental_targets[..]
+ };
let assertions = if build.config.llvm_assertions {"ON"} else {"OFF"};
// http://llvm.org/docs/HowToCrossCompileLLVM.html
if target != build.build {
- builder.ensure(Llvm { target: build.build });
+ builder.ensure(Llvm {
+ target: build.build,
+ emscripten: false,
+ });
// FIXME: if the llvm root for the build triple is overridden then we
// should use llvm-tblgen from there, also should verify that it
// actually exists most of the time in normal installs of LLVM.
cfg.build();
t!(t!(File::create(&done_stamp)).write_all(rebuild_trigger_contents.as_bytes()));
+
+ build_llvm_config
}
}
}
// Make sure musl-root is valid
- if target.contains("musl") && !target.contains("mips") {
+ if target.contains("musl") {
// If this is a native target (host is also musl) and no musl-root is given,
// fall back to the system toolchain in /usr before giving up
if build.musl_root(*target).is_none() && build.config.build == *target {
mode: "incremental",
suite: "incremental-fulldeps",
},
- Test { path: "src/test/run-make", mode: "run-make", suite: "run-make" },
Test { path: "src/test/rustdoc", mode: "rustdoc", suite: "rustdoc" },
Test { path: "src/test/pretty", mode: "pretty", suite: "pretty" },
Test { path: "src/test/run-pass-valgrind/pretty", mode: "pretty", suite: "run-pass-valgrind" },
Test { path: "src/test/run-pass-fulldeps/pretty", mode: "pretty", suite: "run-pass-fulldeps" },
Test { path: "src/test/run-fail-fulldeps/pretty", mode: "pretty", suite: "run-fail-fulldeps" },
+ Test { path: "src/test/run-make", mode: "run-make", suite: "run-make" },
];
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
cmd.env("PROFILER_SUPPORT", "1");
}
+ cmd.env("RUST_TEST_TMPDIR", build.out.join("tmp"));
+
cmd.arg("--adb-path").arg("adb");
cmd.arg("--adb-test-dir").arg(ADB_TEST_DIR);
if target.contains("android") {
}
Mode::Librustc => {
builder.ensure(compile::Rustc { compiler, target });
- compile::rustc_cargo(build, target, &mut cargo);
+ compile::rustc_cargo(build, &mut cargo);
("librustc", "rustc-main")
}
_ => panic!("can only test libraries"),
cargo.env(format!("CARGO_TARGET_{}_RUNNER", envify(&target)),
build.config.nodejs.as_ref().expect("nodejs not configured"));
} else if target.starts_with("wasm32") {
+ // Warn about running tests without the `wasm_syscall` feature enabled.
+ // The javascript shim implements the syscall interface so that test
+ // output can be correctly reported.
+ if !build.config.wasm_syscall {
+ println!("Libstd was built without `wasm_syscall` feature enabled: \
+ test output may not be visible.");
+ }
+
// On the wasm32-unknown-unknown target we're using LTO which is
// incompatible with `-C prefer-dynamic`, so disable that here
cargo.env("RUSTC_NO_PREFER_DYNAMIC", "1");
ENV TARGETS=asmjs-unknown-emscripten
-ENV RUST_CONFIGURE_ARGS --target=$TARGETS
+ENV RUST_CONFIGURE_ARGS --target=$TARGETS --enable-emscripten
ENV SCRIPT python2.7 ../x.py test --target $TARGETS
WORKDIR /build/
COPY scripts/musl.sh /build/
-RUN CC=gcc CFLAGS="-m32 -fPIC -Wa,-mrelax-relocations=no" \
+RUN CC=gcc CFLAGS="-m32 -Wa,-mrelax-relocations=no" \
CXX=g++ CXXFLAGS="-m32 -Wa,-mrelax-relocations=no" \
bash musl.sh i686 --target=i686 && \
- CC=gcc CFLAGS="-march=pentium -m32 -fPIC -Wa,-mrelax-relocations=no" \
+ CC=gcc CFLAGS="-march=pentium -m32 -Wa,-mrelax-relocations=no" \
CXX=g++ CXXFLAGS="-march=pentium -m32 -Wa,-mrelax-relocations=no" \
bash musl.sh i586 --target=i586 && \
rm -rf /build
--host=$HOSTS \
--enable-extended \
--enable-sanitizers \
- --enable-profiler
+ --enable-profiler \
+ --enable-emscripten
ENV SCRIPT python2.7 ../x.py dist --host $HOSTS --target $HOSTS
# This is the only builder which will create source tarballs
COPY dist-various-1/install-x86_64-redox.sh /build
RUN ./install-x86_64-redox.sh
+COPY dist-various-1/install-mips-musl.sh /build
+RUN ./install-mips-musl.sh
+
+COPY dist-various-1/install-mipsel-musl.sh /build
+RUN ./install-mipsel-musl.sh
+
+# Suppress some warnings in the openwrt toolchains we downloaded
+ENV STAGING_DIR=/tmp
+
COPY scripts/musl.sh /build
RUN env \
CC=arm-linux-gnueabi-gcc CFLAGS="-march=armv6 -marm" \
CC=aarch64-linux-gnu-gcc \
CXX=aarch64-linux-gnu-g++ \
bash musl.sh aarch64 && \
+ env \
+ CC=mips-openwrt-linux-gcc \
+ CXX=mips-openwrt-linux-g++ \
+ bash musl.sh mips && \
+ env \
+ CC=mipsel-openwrt-linux-gcc \
+ CXX=mipsel-openwrt-linux-g++ \
+ bash musl.sh mipsel && \
rm -rf /build/*
-COPY dist-various-1/install-mips-musl.sh /build
-RUN ./install-mips-musl.sh
-
-COPY dist-various-1/install-mipsel-musl.sh /build
-RUN ./install-mipsel-musl.sh
-
ENV TARGETS=asmjs-unknown-emscripten
ENV TARGETS=$TARGETS,wasm32-unknown-emscripten
ENV TARGETS=$TARGETS,x86_64-rumprun-netbsd
CC_armv5te_unknown_linux_gnueabi=arm-linux-gnueabi-gcc \
CFLAGS_armv5te_unknown_linux_gnueabi="-march=armv5te -marm -mfloat-abi=soft"
-# Suppress some warnings in the openwrt toolchains we downloaded
-ENV STAGING_DIR=/tmp
-
ENV RUST_CONFIGURE_ARGS \
- --enable-extended \
--target=$TARGETS \
--musl-root-arm=/musl-arm \
--musl-root-armhf=/musl-armhf \
--musl-root-armv7=/musl-armv7 \
- --musl-root-aarch64=/musl-aarch64
+ --musl-root-aarch64=/musl-aarch64 \
+ --musl-root-mips=/musl-mips \
+ --musl-root-mipsel=/musl-mipsel \
+ --enable-emscripten
+
ENV SCRIPT python2.7 ../x.py dist --target $TARGETS
# sccache
--host=$HOSTS \
--enable-extended \
--enable-sanitizers \
- --enable-profiler
+ --enable-profiler \
+ --enable-emscripten
ENV SCRIPT python2.7 ../x.py dist --host $HOSTS --target $HOSTS
# This is the only builder which will create source tarballs
COPY scripts/musl.sh /build/
# We need to mitigate rust-lang/rust#34978 when compiling musl itself as well
RUN CC=gcc \
- CFLAGS="-fPIC -Wa,-mrelax-relocations=no" \
+ CFLAGS="-Wa,-mrelax-relocations=no" \
CXX=g++ \
CXXFLAGS="-Wa,-mrelax-relocations=no" \
bash musl.sh x86_64 && rm -rf /build
TAG=$1
shift
+export CFLAGS="-fPIC $CFLAGS"
+
MUSL=musl-1.1.18
# may have been downloaded in a previous run
cd ..
-LLVM=39
+LLVM=60
+
# may have been downloaded in a previous run
if [ ! -d libunwind-release_$LLVM ]; then
curl -L https://github.com/llvm-mirror/llvm/archive/release_$LLVM.tar.gz | tar xzf -
curl -L https://github.com/llvm-mirror/libunwind/archive/release_$LLVM.tar.gz | tar xzf -
- # Whoa what's this mysterious patch we're applying to libunwind! Why are we
- # swapping the values of ESP/EBP in libunwind?!
- #
- # Discovered in #35599 it turns out that the vanilla build of libunwind is not
- # suitable for unwinding i686 musl. After some investigation it ended up
- # looking like the register values for ESP/EBP were indeed incorrect (swapped)
- # in the source. Similar commits in libunwind (r280099 and r282589) have noticed
- # this for other platforms, and we just need to realize it for musl linux as
- # well.
- #
- # More technical info can be found at #35599
- cd libunwind-release_$LLVM
- patch -Np1 << EOF
-diff --git a/include/libunwind.h b/include/libunwind.h
-index c5b9633..1360eb2 100644
---- a/include/libunwind.h
-+++ b/include/libunwind.h
-@@ -151,8 +151,8 @@ enum {
- UNW_X86_ECX = 1,
- UNW_X86_EDX = 2,
- UNW_X86_EBX = 3,
-- UNW_X86_EBP = 4,
-- UNW_X86_ESP = 5,
-+ UNW_X86_ESP = 4,
-+ UNW_X86_EBP = 5,
- UNW_X86_ESI = 6,
- UNW_X86_EDI = 7
- };
-fi
-EOF
- cd ..
fi
mkdir libunwind-build
# If this PR is intended to update one of these tools, do not let the build pass
# when they do not test-pass.
-for TOOL in rls rustfmt miri clippy; do
+for TOOL in rls rustfmt clippy; do
echo "Verifying status of $TOOL..."
if echo "$CHANGED_FILES" | grep -q "^M[[:blank:]]src/tools/$TOOL$"; then
echo "This PR updated 'src/tools/$TOOL', verifying if status is 'test-pass'..."
# Update the cache (a pristine copy of the rust source master)
retry sh -c "rm -rf $cache_src_dir && mkdir -p $cache_src_dir && \
git clone --depth 1 https://github.com/rust-lang/rust.git $cache_src_dir"
-(cd $cache_src_dir && git rm src/llvm)
+if [ -d $cache_src_dir/src/llvm ]; then
+ (cd $cache_src_dir && git rm src/llvm)
+fi
+if [ -d $cache_src_dir/src/llvm-emscripten ]; then
+ (cd $cache_src_dir && git rm src/llvm-emscripten)
+fi
retry sh -c "cd $cache_src_dir && \
git submodule deinit -f . && git submodule sync && git submodule update --init"
# http://stackoverflow.com/questions/12641469/list-submodules-in-a-git-repository
modules="$(git config --file .gitmodules --get-regexp '\.path$' | cut -d' ' -f2)"
for module in $modules; do
- if [ "$module" = src/llvm ]; then
- commit="$(git ls-tree HEAD src/llvm | awk '{print $3}')"
- git rm src/llvm
+ if [ "$module" = src/llvm ] || [ "$module" = src/llvm-emscripten ]; then
+ commit="$(git ls-tree HEAD $module | awk '{print $3}')"
+ git rm $module
retry sh -c "rm -f $commit.tar.gz && \
curl -sSL -O https://github.com/rust-lang/llvm/archive/$commit.tar.gz"
tar -C src/ -xf "$commit.tar.gz"
rm "$commit.tar.gz"
- mv "src/llvm-$commit" src/llvm
+ mv "src/llvm-$commit" $module
continue
fi
if [ ! -e "$cache_src_dir/$module/.git" ]; then
if [ "$DEPLOY$DEPLOY_ALT" != "" ]; then
RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --release-channel=$RUST_RELEASE_CHANNEL"
RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --enable-llvm-static-stdcpp"
+ RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --disable-thinlto"
if [ "$NO_LLVM_ASSERTIONS" = "1" ]; then
RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --disable-llvm-assertions"
-Subproject commit 194eb8d5f1753fb5f4501011cebdc1b585712474
+Subproject commit a645960fe48946153936dd5628df4a90bd837981
-Subproject commit 1d791b55b23ec5389fbd5b3cee80db3f8bbdd162
+Subproject commit e6a5d5d10aa2fde0baed7b29bf672bd9f3af8962
types and such.
* Traits like `Send` and `Sync` are automatically implemented for a `Generator`
- depending on the captured variables of the environment. Unlike closures though
+ depending on the captured variables of the environment. Unlike closures,
generators also depend on variables live across suspension points. This means
that although the ambient environment may be `Send` or `Sync`, the generator
itself may not be due to internal variables live across `yield` points being
- not-`Send` or not-`Sync`. Note, though, that generators, like closures, do
+ not-`Send` or not-`Sync`. Note that generators, like closures, do
not implement traits like `Copy` or `Clone` automatically.
* Whenever a generator is dropped it will drop all captured environment
### Generators as state machines
-In the compiler generators are currently compiled as state machines. Each
+In the compiler, generators are currently compiled as state machines. Each
`yield` expression will correspond to a different state that stores all live
variables over that suspension point. Resumption of a generator will dispatch on
the current state and then execute internally until a `yield` is reached, at
p
}
-#[lang = "exchange_free"]
-unsafe fn deallocate(ptr: *mut u8, _size: usize, _align: usize) {
- libc::free(ptr as *mut libc::c_void)
-}
-
#[lang = "box_free"]
unsafe fn box_free<T: ?Sized>(ptr: *mut T) {
- deallocate(ptr as *mut u8, ::core::mem::size_of_val(&*ptr), ::core::mem::align_of_val(&*ptr));
+ libc::free(ptr as *mut libc::c_void)
}
#[start]
-fn main(argc: isize, argv: *const *const u8) -> isize {
- let x = box 1;
+fn main(_argc: isize, _argv: *const *const u8) -> isize {
+ let _x = box 1;
0
}
#[lang = "eh_personality"] extern fn rust_eh_personality() {}
#[lang = "panic_fmt"] extern fn rust_begin_panic() -> ! { unsafe { intrinsics::abort() } }
-# #[lang = "eh_unwind_resume"] extern fn rust_eh_unwind_resume() {}
-# #[no_mangle] pub extern fn rust_eh_register_frames () {}
-# #[no_mangle] pub extern fn rust_eh_unregister_frames () {}
+#[lang = "eh_unwind_resume"] extern fn rust_eh_unwind_resume() {}
+#[no_mangle] pub extern fn rust_eh_register_frames () {}
+#[no_mangle] pub extern fn rust_eh_unregister_frames () {}
```
Note the use of `abort`: the `exchange_malloc` lang item is assumed to
Lang items are loaded lazily by the compiler; e.g. if one never uses
`Box` then there is no need to define functions for `exchange_malloc`
-and `exchange_free`. `rustc` will emit an error when an item is needed
+and `box_free`. `rustc` will emit an error when an item is needed
but not found in the current crate or any that it depends on.
Most lang items are defined by `libcore`, but if you're trying to build
- `phantom_data`: `libcore/marker.rs`
- `freeze`: `libcore/marker.rs`
- `debug_trait`: `libcore/fmt/mod.rs`
- - `non_zero`: `libcore/nonzero.rs`
\ No newline at end of file
+ - `non_zero`: `libcore/nonzero.rs`
+++ /dev/null
-# `match_beginning_vert`
-
-The tracking issue for this feature is [#44101].
-
-With this feature enabled, you are allowed to add a '|' to the beginning of a
-match arm:
-
-```rust
-#![feature(match_beginning_vert)]
-
-enum Foo { A, B, C }
-
-fn main() {
- let x = Foo::A;
- match x {
- | Foo::A
- | Foo::B => println!("AB"),
- | Foo::C => println!("C"),
- }
-}
-```
-
-[#44101]: https://github.com/rust-lang/rust/issues/44101
\ No newline at end of file
+++ /dev/null
-# `use_nested_groups`
-
-The tracking issue for this feature is: [#44494]
-
-[#44494]: https://github.com/rust-lang/rust/issues/44494
-
-------------------------
-
-The `use_nested_groups` feature allows you to import multiple items from a
-complex module tree easily, by nesting different imports in the same
-declaration. For example:
-
-```rust
-#![feature(use_nested_groups)]
-# #![allow(unused_imports, dead_code)]
-#
-# mod foo {
-# pub mod bar {
-# pub type Foo = ();
-# }
-# pub mod baz {
-# pub mod quux {
-# pub type Bar = ();
-# }
-# }
-# }
-
-use foo::{
- bar::{self, Foo},
- baz::{*, quux::Bar},
-};
-#
-# fn main() {}
-```
-
-## Snippet for the book's new features appendix
-
-When stabilizing, add this to
-`src/doc/book/second-edition/src/appendix-07-newest-features.md`:
-
-### Nested groups in `use` declarations
-
-If you have a complex module tree with many different submodules and you need
-to import a few items from each one, it might be useful to group all the
-imports in the same declaration to keep your code clean and avoid repeating the
-base modules' name.
-
-The `use` declaration supports nesting to help you in those cases, both with
-simple imports and glob ones. For example this snippets imports `bar`, `Foo`,
-all the items in `baz` and `Bar`:
-
-```rust
-# #![feature(use_nested_groups)]
-# #![allow(unused_imports, dead_code)]
-#
-# mod foo {
-# pub mod bar {
-# pub type Foo = ();
-# }
-# pub mod baz {
-# pub mod quux {
-# pub type Bar = ();
-# }
-# }
-# }
-#
-use foo::{
- bar::{self, Foo},
- baz::{*, quux::Bar},
-};
-#
-# fn main() {}
-```
-
-## Updated reference
-
-When stabilizing, replace the shortcut list in
-`src/doc/reference/src/items/use-declarations.md` with this updated one:
-
-* Simultaneously binding a list of paths with a common prefix, using the
- glob-like brace syntax `use a::b::{c, d, e::f, g::h::i};`
-* Simultaneously binding a list of paths with a common prefix and their common
- parent module, using the `self` keyword, such as `use a::b::{self, c, d::e};`
-* Rebinding the target name as a new local name, using the syntax `use p::q::r
- as x;`. This can also be used with the last two features:
- `use a::b::{self as ab, c as abc}`.
-* Binding all paths matching a given prefix, using the asterisk wildcard syntax
- `use a::b::*;`.
-* Nesting groups of the previous features multiple times, such as
- `use a::b::{self as ab, c d::{*, e::f}};`
let memory = null;
+function viewstruct(data, fields) {
+ return new Uint32Array(memory.buffer).subarray(data/4, data/4 + fields);
+}
+
function copystr(a, b) {
- if (memory === null) {
- return null
- }
- let view = new Uint8Array(memory.buffer).slice(a, a + b);
+ let view = new Uint8Array(memory.buffer).subarray(a, a + b);
return String.fromCharCode.apply(null, view);
}
+function syscall_write([fd, ptr, len]) {
+ let s = copystr(ptr, len);
+ switch (fd) {
+ case 1: process.stdout.write(s); break;
+ case 2: process.stderr.write(s); break;
+ }
+}
+
+function syscall_exit([code]) {
+ process.exit(code);
+}
+
+function syscall_args(params) {
+ let [ptr, len] = params;
+
+ // Calculate total required buffer size
+ let totalLen = -1;
+ for (let i = 2; i < process.argv.length; ++i) {
+ totalLen += Buffer.byteLength(process.argv[i]) + 1;
+ }
+ if (totalLen < 0) { totalLen = 0; }
+ params[2] = totalLen;
+
+ // If buffer is large enough, copy data
+ if (len >= totalLen) {
+ let view = new Uint8Array(memory.buffer);
+ for (let i = 2; i < process.argv.length; ++i) {
+ let value = process.argv[i];
+ Buffer.from(value).copy(view, ptr);
+ ptr += Buffer.byteLength(process.argv[i]) + 1;
+ }
+ }
+}
+
+function syscall_getenv(params) {
+ let [keyPtr, keyLen, valuePtr, valueLen] = params;
+
+ let key = copystr(keyPtr, keyLen);
+ let value = process.env[key];
+
+ if (value == null) {
+ params[4] = 0xFFFFFFFF;
+ } else {
+ let view = new Uint8Array(memory.buffer);
+ let totalLen = Buffer.byteLength(value);
+ params[4] = totalLen;
+ if (valueLen >= totalLen) {
+ Buffer.from(value).copy(view, valuePtr);
+ }
+ }
+}
+
+function syscall_time(params) {
+ let t = Date.now();
+ let secs = Math.floor(t / 1000);
+ let millis = t % 1000;
+ params[1] = Math.floor(secs / 0x100000000);
+ params[2] = secs % 0x100000000;
+ params[3] = Math.floor(millis * 1000000);
+}
+
let imports = {};
imports.env = {
// These are generated by LLVM itself for various intrinsic calls. Hopefully
log10: Math.log10,
log10f: Math.log10,
- // These are called in src/libstd/sys/wasm/stdio.rs and are used when
- // debugging is enabled.
- rust_wasm_write_stdout: function(a, b) {
- let s = copystr(a, b);
- if (s !== null) {
- process.stdout.write(s);
- }
- },
- rust_wasm_write_stderr: function(a, b) {
- let s = copystr(a, b);
- if (s !== null) {
- process.stderr.write(s);
- }
- },
-
- // These are called in src/libstd/sys/wasm/args.rs and are used when
- // debugging is enabled.
- rust_wasm_args_count: function() {
- if (memory === null)
- return 0;
- return process.argv.length - 2;
- },
- rust_wasm_args_arg_size: function(i) {
- return Buffer.byteLength(process.argv[i + 2]);
- },
- rust_wasm_args_arg_fill: function(idx, ptr) {
- let arg = process.argv[idx + 2];
- let view = new Uint8Array(memory.buffer);
- Buffer.from(arg).copy(view, ptr);
- },
-
- // These are called in src/libstd/sys/wasm/os.rs and are used when
- // debugging is enabled.
- rust_wasm_getenv_len: function(a, b) {
- let key = copystr(a, b);
- if (key === null) {
- return -1;
+ rust_wasm_syscall: function(index, data) {
+ switch (index) {
+ case 1: syscall_write(viewstruct(data, 3)); return true;
+ case 2: syscall_exit(viewstruct(data, 1)); return true;
+ case 3: syscall_args(viewstruct(data, 3)); return true;
+ case 4: syscall_getenv(viewstruct(data, 5)); return true;
+ case 6: syscall_time(viewstruct(data, 4)); return true;
+ default:
+ console.log("Unsupported syscall: " + index);
+ return false;
}
- if (!(key in process.env)) {
- return -1;
- }
- return Buffer.byteLength(process.env[key]);
- },
- rust_wasm_getenv_data: function(a, b, ptr) {
- let key = copystr(a, b);
- let value = process.env[key];
- let view = new Uint8Array(memory.buffer);
- Buffer.from(value).copy(view, ptr);
- },
-};
-
-let module_imports = WebAssembly.Module.imports(m);
-
-for (var i = 0; i < module_imports.length; i++) {
- let imp = module_imports[i];
- if (imp.module != 'env') {
- continue
}
- if (imp.name == 'memory' && imp.kind == 'memory') {
- memory = new WebAssembly.Memory({initial: 20});
- imports.env.memory = memory;
- }
-}
+};
let instance = new WebAssembly.Instance(m, imports);
+memory = instance.exports.memory;
+try {
+ instance.exports.main();
+} catch (e) {
+ console.error(e);
+ process.exit(101);
+}
{
type Output = V;
+ /// Returns a reference to the value corresponding to the supplied key.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the key is not present in the `BTreeMap`.
#[inline]
fn index(&self, key: &Q) -> &V {
self.get(key).expect("no entry found for key")
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
+ #[cfg(stage0)]
pub fn of<T: ?Sized + 'static>() -> TypeId {
TypeId {
t: unsafe { intrinsics::type_id::<T>() },
}
}
+
+ /// Returns the `TypeId` of the type this generic function has been
+ /// instantiated with.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::any::{Any, TypeId};
+ ///
+ /// fn is_string<T: ?Sized + Any>(_s: &T) -> bool {
+ /// TypeId::of::<String>() == TypeId::of::<T>()
+ /// }
+ ///
+ /// fn main() {
+ /// assert_eq!(is_string(&0), false);
+ /// assert_eq!(is_string(&"cookie monster".to_string()), true);
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature="const_type_id")]
+ #[cfg(not(stage0))]
+ pub const fn of<T: ?Sized + 'static>() -> TypeId {
+ TypeId {
+ t: unsafe { intrinsics::type_id::<T>() },
+ }
+ }
}
/// ```
#[stable(feature = "move_cell", since = "1.17.0")]
pub fn into_inner(self) -> T {
- unsafe { self.value.into_inner() }
+ self.value.into_inner()
}
}
// compiler statically verifies that it is not currently borrowed.
// Therefore the following assertion is just a `debug_assert!`.
debug_assert!(self.borrow.get() == UNUSED);
- unsafe { self.value.into_inner() }
+ self.value.into_inner()
}
/// Replaces the wrapped value with a new one, returning the old value,
/// Unwraps the value.
///
- /// # Safety
- ///
- /// This function is unsafe because this thread or another thread may currently be
- /// inspecting the inner value.
- ///
/// # Examples
///
/// ```
///
/// let uc = UnsafeCell::new(5);
///
- /// let five = unsafe { uc.into_inner() };
+ /// let five = uc.into_inner();
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
- pub unsafe fn into_inner(self) -> T {
+ pub fn into_inner(self) -> T {
self.value
}
}
///
/// # Generic Implementations
///
-/// - `AsMut` auto-dereferences if the inner type is a reference or a mutable
-/// reference (e.g.: `foo.as_ref()` will work the same if `foo` has type
-/// `&mut Foo` or `&&mut Foo`)
+/// - `AsMut` auto-dereferences if the inner type is a mutable reference
+/// (e.g.: `foo.as_mut()` will work the same if `foo` has type `&mut Foo`
+/// or `&mut &mut Foo`)
///
/// # Examples
///
#[stable(feature = "rust1", since = "1.0.0")]
impl Debug for bool {
+ #[inline]
fn fmt(&self, f: &mut Formatter) -> Result {
Display::fmt(self, f)
}
#[stable(feature = "rust1", since = "1.0.0")]
impl Debug for () {
+ #[inline]
fn fmt(&self, f: &mut Formatter) -> Result {
f.pad("()")
}
($T:ident) => {
#[stable(feature = "rust1", since = "1.0.0")]
impl fmt::Debug for $T {
+ #[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(self, f)
}
/// ptr::copy_nonoverlapping(y, x, 1);
/// ptr::copy_nonoverlapping(&t, y, 1);
///
- /// // y and t now point to the same thing, but we need to completely forget `tmp`
+ /// // y and t now point to the same thing, but we need to completely forget `t`
/// // because it's no longer relevant.
/// mem::forget(t);
/// }
use iter_private::TrustedRandomAccess;
use ops::Try;
use usize;
+use intrinsics;
#[stable(feature = "rust1", since = "1.0.0")]
pub use self::iterator::Iterator;
(f(inner_hint.0), inner_hint.1.map(f))
}
}
+
+ #[inline]
+ fn nth(&mut self, mut n: usize) -> Option<Self::Item> {
+ if self.first_take {
+ self.first_take = false;
+ let first = self.iter.next();
+ if n == 0 {
+ return first;
+ }
+ n -= 1;
+ }
+ // n and self.step are indices, we need to add 1 to get the amount of elements
+ // When calling `.nth`, we need to subtract 1 again to convert back to an index
+ // step + 1 can't overflow because `.step_by` sets `self.step` to `step - 1`
+ let mut step = self.step + 1;
+ // n + 1 could overflow
+ // thus, if n is usize::MAX, instead of adding one, we call .nth(step)
+ if n == usize::MAX {
+ self.iter.nth(step - 1);
+ } else {
+ n += 1;
+ }
+
+ // overflow handling
+ loop {
+ let mul = n.checked_mul(step);
+ if unsafe { intrinsics::likely(mul.is_some()) } {
+ return self.iter.nth(mul.unwrap() - 1);
+ }
+ let div_n = usize::MAX / n;
+ let div_step = usize::MAX / step;
+ let nth_n = div_n * n;
+ let nth_step = div_step * step;
+ let nth = if nth_n > nth_step {
+ step -= div_n;
+ nth_n
+ } else {
+ n -= div_step;
+ nth_step
+ };
+ self.iter.nth(nth - 1);
+ }
+ }
}
// StepBy can only make the iterator shorter, so the len will still fit.
#![feature(untagged_unions)]
#![feature(unwind_attributes)]
#![feature(doc_spotlight)]
+#![feature(rustc_const_unstable)]
#[prelude_import]
#[allow(unused)]
pub mod str;
pub mod hash;
pub mod fmt;
+pub mod time;
// note: does not need to be public
mod char_private;
/// Type | size_of::\<Type>()
/// ---- | ---------------
/// () | 0
+/// bool | 1
/// u8 | 1
/// u16 | 2
/// u32 | 4
/// Converts to degrees, assuming the number is in radians.
#[inline]
fn to_degrees(self) -> f32 {
- self * (180.0f32 / consts::PI)
+ // Use a constant for better precision.
+ const PIS_IN_180: f32 = 57.2957795130823208767981548141051703_f32;
+ self * PIS_IN_180
}
/// Converts to radians, assuming the number is in degrees.
/// Converts to degrees, assuming the number is in radians.
#[inline]
fn to_degrees(self) -> f64 {
+ // The division here is correctly rounded with respect to the true
+ // value of 180/π. (This differs from f32, where a constant must be
+ // used to ensure a correctly rounded result.)
self * (180.0f64 / consts::PI)
}
P: FnMut(Self::Item) -> bool,
{
// The addition might panic on overflow
- let n = self.len();
+ // Use the len of the slice to hint optimizer to remove result index bounds check.
+ let n = make_slice!(self.ptr, self.end).len();
self.try_fold(0, move |i, x| {
if predicate(x) { Err(i) }
else { Ok(i + 1) }
{
// No need for an overflow check here, because `ExactSizeIterator`
// implies that the number of elements fits into a `usize`.
- let n = self.len();
+ // Use the len of the slice to hint optimizer to remove result index bounds check.
+ let n = make_slice!(self.ptr, self.end).len();
self.try_rfold(n, move |i, x| {
let i = i - 1;
if predicate(x) { Err(i) }
#[inline]
#[stable(feature = "atomic_access", since = "1.15.0")]
pub fn into_inner(self) -> bool {
- unsafe { self.v.into_inner() != 0 }
+ self.v.into_inner() != 0
}
/// Loads a value from the bool.
#[inline]
#[stable(feature = "atomic_access", since = "1.15.0")]
pub fn into_inner(self) -> *mut T {
- unsafe { self.p.into_inner() }
+ self.p.into_inner()
}
/// Loads a value from the pointer.
#[inline]
#[$stable_access]
pub fn into_inner(self) -> $int_type {
- unsafe { self.v.into_inner() }
+ self.v.into_inner()
}
/// Loads a value from the atomic integer.
assert_eq!(it.next(), None);
}
+#[test]
+fn test_iterator_step_by_nth() {
+ let mut it = (0..16).step_by(5);
+ assert_eq!(it.nth(0), Some(0));
+ assert_eq!(it.nth(0), Some(5));
+ assert_eq!(it.nth(0), Some(10));
+ assert_eq!(it.nth(0), Some(15));
+ assert_eq!(it.nth(0), None);
+
+ let it = (0..18).step_by(5);
+ assert_eq!(it.clone().nth(0), Some(0));
+ assert_eq!(it.clone().nth(1), Some(5));
+ assert_eq!(it.clone().nth(2), Some(10));
+ assert_eq!(it.clone().nth(3), Some(15));
+ assert_eq!(it.clone().nth(4), None);
+ assert_eq!(it.clone().nth(42), None);
+}
+
+#[test]
+fn test_iterator_step_by_nth_overflow() {
+ #[cfg(target_pointer_width = "8")]
+ type Bigger = u16;
+ #[cfg(target_pointer_width = "16")]
+ type Bigger = u32;
+ #[cfg(target_pointer_width = "32")]
+ type Bigger = u64;
+ #[cfg(target_pointer_width = "64")]
+ type Bigger = u128;
+
+ #[derive(Clone)]
+ struct Test(Bigger);
+ impl<'a> Iterator for &'a mut Test {
+ type Item = i32;
+ fn next(&mut self) -> Option<Self::Item> { Some(21) }
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ self.0 += n as Bigger + 1;
+ Some(42)
+ }
+ }
+
+ let mut it = Test(0);
+ let root = usize::MAX >> (::std::mem::size_of::<usize>() * 8 / 2);
+ let n = root + 20;
+ (&mut it).step_by(n).nth(n);
+ assert_eq!(it.0, n as Bigger * n as Bigger);
+
+ // large step
+ let mut it = Test(0);
+ (&mut it).step_by(usize::MAX).nth(5);
+ assert_eq!(it.0, (usize::MAX as Bigger) * 5);
+
+ // n + 1 overflows
+ let mut it = Test(0);
+ (&mut it).step_by(2).nth(usize::MAX);
+ assert_eq!(it.0, (usize::MAX as Bigger) * 2);
+
+ // n + 1 overflows
+ let mut it = Test(0);
+ (&mut it).step_by(1).nth(usize::MAX);
+ assert_eq!(it.0, (usize::MAX as Bigger) * 1);
+}
+
#[test]
#[should_panic]
fn test_iterator_step_by_zero() {
--- /dev/null
+// Copyright 2012-2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+#![stable(feature = "duration_core", since = "1.24.0")]
+
+//! Temporal quantification.
+//!
+//! Example:
+//!
+//! ```
+//! use std::time::Duration;
+//!
+//! let five_seconds = Duration::new(5, 0);
+//! // both declarations are equivalent
+//! assert_eq!(Duration::new(5, 0), Duration::from_secs(5));
+//! ```
+
+use iter::Sum;
+use ops::{Add, Sub, Mul, Div, AddAssign, SubAssign, MulAssign, DivAssign};
+
+const NANOS_PER_SEC: u32 = 1_000_000_000;
+const NANOS_PER_MILLI: u32 = 1_000_000;
+const NANOS_PER_MICRO: u32 = 1_000;
+const MILLIS_PER_SEC: u64 = 1_000;
+const MICROS_PER_SEC: u64 = 1_000_000;
+
+/// A `Duration` type to represent a span of time, typically used for system
+/// timeouts.
+///
+/// Each `Duration` is composed of a whole number of seconds and a fractional part
+/// represented in nanoseconds. If the underlying system does not support
+/// nanosecond-level precision, APIs binding a system timeout will typically round up
+/// the number of nanoseconds.
+///
+/// `Duration`s implement many common traits, including [`Add`], [`Sub`], and other
+/// [`ops`] traits.
+///
+/// [`Add`]: ../../std/ops/trait.Add.html
+/// [`Sub`]: ../../std/ops/trait.Sub.html
+/// [`ops`]: ../../std/ops/index.html
+///
+/// # Examples
+///
+/// ```
+/// use std::time::Duration;
+///
+/// let five_seconds = Duration::new(5, 0);
+/// let five_seconds_and_five_nanos = five_seconds + Duration::new(0, 5);
+///
+/// assert_eq!(five_seconds_and_five_nanos.as_secs(), 5);
+/// assert_eq!(five_seconds_and_five_nanos.subsec_nanos(), 5);
+///
+/// let ten_millis = Duration::from_millis(10);
+/// ```
+#[stable(feature = "duration_core", since = "1.24.0")]
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug, Hash, Default)]
+pub struct Duration {
+ secs: u64,
+ nanos: u32, // Always 0 <= nanos < NANOS_PER_SEC
+}
+
+impl Duration {
+ /// Creates a new `Duration` from the specified number of whole seconds and
+ /// additional nanoseconds.
+ ///
+ /// If the number of nanoseconds is greater than 1 billion (the number of
+ /// nanoseconds in a second), then it will carry over into the seconds provided.
+ ///
+ /// # Panics
+ ///
+ /// This constructor will panic if the carry from the nanoseconds overflows
+ /// the seconds counter.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// let five_seconds = Duration::new(5, 0);
+ /// ```
+ #[stable(feature = "duration", since = "1.3.0")]
+ #[inline]
+ pub fn new(secs: u64, nanos: u32) -> Duration {
+ let secs = secs.checked_add((nanos / NANOS_PER_SEC) as u64)
+ .expect("overflow in Duration::new");
+ let nanos = nanos % NANOS_PER_SEC;
+ Duration { secs: secs, nanos: nanos }
+ }
+
+ /// Creates a new `Duration` from the specified number of whole seconds.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// let duration = Duration::from_secs(5);
+ ///
+ /// assert_eq!(5, duration.as_secs());
+ /// assert_eq!(0, duration.subsec_nanos());
+ /// ```
+ #[stable(feature = "duration", since = "1.3.0")]
+ #[inline]
+ pub const fn from_secs(secs: u64) -> Duration {
+ Duration { secs: secs, nanos: 0 }
+ }
+
+ /// Creates a new `Duration` from the specified number of milliseconds.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// let duration = Duration::from_millis(2569);
+ ///
+ /// assert_eq!(2, duration.as_secs());
+ /// assert_eq!(569_000_000, duration.subsec_nanos());
+ /// ```
+ #[stable(feature = "duration", since = "1.3.0")]
+ #[inline]
+ pub const fn from_millis(millis: u64) -> Duration {
+ Duration {
+ secs: millis / MILLIS_PER_SEC,
+ nanos: ((millis % MILLIS_PER_SEC) as u32) * NANOS_PER_MILLI,
+ }
+ }
+
+ /// Creates a new `Duration` from the specified number of microseconds.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(duration_from_micros)]
+ /// use std::time::Duration;
+ ///
+ /// let duration = Duration::from_micros(1_000_002);
+ ///
+ /// assert_eq!(1, duration.as_secs());
+ /// assert_eq!(2000, duration.subsec_nanos());
+ /// ```
+ #[unstable(feature = "duration_from_micros", issue = "44400")]
+ #[inline]
+ pub const fn from_micros(micros: u64) -> Duration {
+ Duration {
+ secs: micros / MICROS_PER_SEC,
+ nanos: ((micros % MICROS_PER_SEC) as u32) * NANOS_PER_MICRO,
+ }
+ }
+
+ /// Creates a new `Duration` from the specified number of nanoseconds.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(duration_extras)]
+ /// use std::time::Duration;
+ ///
+ /// let duration = Duration::from_nanos(1_000_000_123);
+ ///
+ /// assert_eq!(1, duration.as_secs());
+ /// assert_eq!(123, duration.subsec_nanos());
+ /// ```
+ #[unstable(feature = "duration_extras", issue = "46507")]
+ #[inline]
+ pub const fn from_nanos(nanos: u64) -> Duration {
+ Duration {
+ secs: nanos / (NANOS_PER_SEC as u64),
+ nanos: (nanos % (NANOS_PER_SEC as u64)) as u32,
+ }
+ }
+
+ /// Returns the number of _whole_ seconds contained by this `Duration`.
+ ///
+ /// The returned value does not include the fractional (nanosecond) part of the
+ /// duration, which can be obtained using [`subsec_nanos`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// let duration = Duration::new(5, 730023852);
+ /// assert_eq!(duration.as_secs(), 5);
+ /// ```
+ ///
+ /// To determine the total number of seconds represented by the `Duration`,
+ /// use `as_secs` in combination with [`subsec_nanos`]:
+ ///
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// let duration = Duration::new(5, 730023852);
+ ///
+ /// assert_eq!(5.730023852,
+ /// duration.as_secs() as f64
+ /// + duration.subsec_nanos() as f64 * 1e-9);
+ /// ```
+ ///
+ /// [`subsec_nanos`]: #method.subsec_nanos
+ #[stable(feature = "duration", since = "1.3.0")]
+ #[inline]
+ pub fn as_secs(&self) -> u64 { self.secs }
+
+ /// Returns the fractional part of this `Duration`, in milliseconds.
+ ///
+ /// This method does **not** return the length of the duration when
+ /// represented by milliseconds. The returned number always represents a
+ /// fractional portion of a second (i.e. it is less than one thousand).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(duration_extras)]
+ /// use std::time::Duration;
+ ///
+ /// let duration = Duration::from_millis(5432);
+ /// assert_eq!(duration.as_secs(), 5);
+ /// assert_eq!(duration.subsec_millis(), 432);
+ /// ```
+ #[unstable(feature = "duration_extras", issue = "46507")]
+ #[inline]
+ pub fn subsec_millis(&self) -> u32 { self.nanos / NANOS_PER_MILLI }
+
+ /// Returns the fractional part of this `Duration`, in microseconds.
+ ///
+ /// This method does **not** return the length of the duration when
+ /// represented by microseconds. The returned number always represents a
+ /// fractional portion of a second (i.e. it is less than one million).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(duration_extras, duration_from_micros)]
+ /// use std::time::Duration;
+ ///
+ /// let duration = Duration::from_micros(1_234_567);
+ /// assert_eq!(duration.as_secs(), 1);
+ /// assert_eq!(duration.subsec_micros(), 234_567);
+ /// ```
+ #[unstable(feature = "duration_extras", issue = "46507")]
+ #[inline]
+ pub fn subsec_micros(&self) -> u32 { self.nanos / NANOS_PER_MICRO }
+
+ /// Returns the fractional part of this `Duration`, in nanoseconds.
+ ///
+ /// This method does **not** return the length of the duration when
+ /// represented by nanoseconds. The returned number always represents a
+ /// fractional portion of a second (i.e. it is less than one billion).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// let duration = Duration::from_millis(5010);
+ /// assert_eq!(duration.as_secs(), 5);
+ /// assert_eq!(duration.subsec_nanos(), 10_000_000);
+ /// ```
+ #[stable(feature = "duration", since = "1.3.0")]
+ #[inline]
+ pub fn subsec_nanos(&self) -> u32 { self.nanos }
+
+ /// Checked `Duration` addition. Computes `self + other`, returning [`None`]
+ /// if overflow occurred.
+ ///
+ /// [`None`]: ../../std/option/enum.Option.html#variant.None
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// assert_eq!(Duration::new(0, 0).checked_add(Duration::new(0, 1)), Some(Duration::new(0, 1)));
+ /// assert_eq!(Duration::new(1, 0).checked_add(Duration::new(std::u64::MAX, 0)), None);
+ /// ```
+ #[stable(feature = "duration_checked_ops", since = "1.16.0")]
+ #[inline]
+ pub fn checked_add(self, rhs: Duration) -> Option<Duration> {
+ if let Some(mut secs) = self.secs.checked_add(rhs.secs) {
+ let mut nanos = self.nanos + rhs.nanos;
+ if nanos >= NANOS_PER_SEC {
+ nanos -= NANOS_PER_SEC;
+ if let Some(new_secs) = secs.checked_add(1) {
+ secs = new_secs;
+ } else {
+ return None;
+ }
+ }
+ debug_assert!(nanos < NANOS_PER_SEC);
+ Some(Duration {
+ secs,
+ nanos,
+ })
+ } else {
+ None
+ }
+ }
+
+ /// Checked `Duration` subtraction. Computes `self - other`, returning [`None`]
+ /// if the result would be negative or if overflow occurred.
+ ///
+ /// [`None`]: ../../std/option/enum.Option.html#variant.None
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// assert_eq!(Duration::new(0, 1).checked_sub(Duration::new(0, 0)), Some(Duration::new(0, 1)));
+ /// assert_eq!(Duration::new(0, 0).checked_sub(Duration::new(0, 1)), None);
+ /// ```
+ #[stable(feature = "duration_checked_ops", since = "1.16.0")]
+ #[inline]
+ pub fn checked_sub(self, rhs: Duration) -> Option<Duration> {
+ if let Some(mut secs) = self.secs.checked_sub(rhs.secs) {
+ let nanos = if self.nanos >= rhs.nanos {
+ self.nanos - rhs.nanos
+ } else {
+ if let Some(sub_secs) = secs.checked_sub(1) {
+ secs = sub_secs;
+ self.nanos + NANOS_PER_SEC - rhs.nanos
+ } else {
+ return None;
+ }
+ };
+ debug_assert!(nanos < NANOS_PER_SEC);
+ Some(Duration { secs: secs, nanos: nanos })
+ } else {
+ None
+ }
+ }
+
+ /// Checked `Duration` multiplication. Computes `self * other`, returning
+ /// [`None`] if overflow occurred.
+ ///
+ /// [`None`]: ../../std/option/enum.Option.html#variant.None
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// assert_eq!(Duration::new(0, 500_000_001).checked_mul(2), Some(Duration::new(1, 2)));
+ /// assert_eq!(Duration::new(std::u64::MAX - 1, 0).checked_mul(2), None);
+ /// ```
+ #[stable(feature = "duration_checked_ops", since = "1.16.0")]
+ #[inline]
+ pub fn checked_mul(self, rhs: u32) -> Option<Duration> {
+ // Multiply nanoseconds as u64, because it cannot overflow that way.
+ let total_nanos = self.nanos as u64 * rhs as u64;
+ let extra_secs = total_nanos / (NANOS_PER_SEC as u64);
+ let nanos = (total_nanos % (NANOS_PER_SEC as u64)) as u32;
+ if let Some(secs) = self.secs
+ .checked_mul(rhs as u64)
+ .and_then(|s| s.checked_add(extra_secs)) {
+ debug_assert!(nanos < NANOS_PER_SEC);
+ Some(Duration {
+ secs,
+ nanos,
+ })
+ } else {
+ None
+ }
+ }
+
+ /// Checked `Duration` division. Computes `self / other`, returning [`None`]
+ /// if `other == 0`.
+ ///
+ /// [`None`]: ../../std/option/enum.Option.html#variant.None
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// assert_eq!(Duration::new(2, 0).checked_div(2), Some(Duration::new(1, 0)));
+ /// assert_eq!(Duration::new(1, 0).checked_div(2), Some(Duration::new(0, 500_000_000)));
+ /// assert_eq!(Duration::new(2, 0).checked_div(0), None);
+ /// ```
+ #[stable(feature = "duration_checked_ops", since = "1.16.0")]
+ #[inline]
+ pub fn checked_div(self, rhs: u32) -> Option<Duration> {
+ if rhs != 0 {
+ let secs = self.secs / (rhs as u64);
+ let carry = self.secs - secs * (rhs as u64);
+ let extra_nanos = carry * (NANOS_PER_SEC as u64) / (rhs as u64);
+ let nanos = self.nanos / rhs + (extra_nanos as u32);
+ debug_assert!(nanos < NANOS_PER_SEC);
+ Some(Duration { secs: secs, nanos: nanos })
+ } else {
+ None
+ }
+ }
+}
+
+#[stable(feature = "duration", since = "1.3.0")]
+impl Add for Duration {
+ type Output = Duration;
+
+ fn add(self, rhs: Duration) -> Duration {
+ self.checked_add(rhs).expect("overflow when adding durations")
+ }
+}
+
+#[stable(feature = "time_augmented_assignment", since = "1.9.0")]
+impl AddAssign for Duration {
+ fn add_assign(&mut self, rhs: Duration) {
+ *self = *self + rhs;
+ }
+}
+
+#[stable(feature = "duration", since = "1.3.0")]
+impl Sub for Duration {
+ type Output = Duration;
+
+ fn sub(self, rhs: Duration) -> Duration {
+ self.checked_sub(rhs).expect("overflow when subtracting durations")
+ }
+}
+
+#[stable(feature = "time_augmented_assignment", since = "1.9.0")]
+impl SubAssign for Duration {
+ fn sub_assign(&mut self, rhs: Duration) {
+ *self = *self - rhs;
+ }
+}
+
+#[stable(feature = "duration", since = "1.3.0")]
+impl Mul<u32> for Duration {
+ type Output = Duration;
+
+ fn mul(self, rhs: u32) -> Duration {
+ self.checked_mul(rhs).expect("overflow when multiplying duration by scalar")
+ }
+}
+
+#[stable(feature = "time_augmented_assignment", since = "1.9.0")]
+impl MulAssign<u32> for Duration {
+ fn mul_assign(&mut self, rhs: u32) {
+ *self = *self * rhs;
+ }
+}
+
+#[stable(feature = "duration", since = "1.3.0")]
+impl Div<u32> for Duration {
+ type Output = Duration;
+
+ fn div(self, rhs: u32) -> Duration {
+ self.checked_div(rhs).expect("divide by zero error when dividing duration by scalar")
+ }
+}
+
+#[stable(feature = "time_augmented_assignment", since = "1.9.0")]
+impl DivAssign<u32> for Duration {
+ fn div_assign(&mut self, rhs: u32) {
+ *self = *self / rhs;
+ }
+}
+
+#[stable(feature = "duration_sum", since = "1.16.0")]
+impl Sum for Duration {
+ fn sum<I: Iterator<Item=Duration>>(iter: I) -> Duration {
+ iter.fold(Duration::new(0, 0), |a, b| a + b)
+ }
+}
+
+#[stable(feature = "duration_sum", since = "1.16.0")]
+impl<'a> Sum<&'a Duration> for Duration {
+ fn sum<I: Iterator<Item=&'a Duration>>(iter: I) -> Duration {
+ iter.fold(Duration::new(0, 0), |a, b| a + *b)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::Duration;
+
+ #[test]
+ fn creation() {
+ assert!(Duration::from_secs(1) != Duration::from_secs(0));
+ assert_eq!(Duration::from_secs(1) + Duration::from_secs(2),
+ Duration::from_secs(3));
+ assert_eq!(Duration::from_millis(10) + Duration::from_secs(4),
+ Duration::new(4, 10 * 1_000_000));
+ assert_eq!(Duration::from_millis(4000), Duration::new(4, 0));
+ }
+
+ #[test]
+ fn secs() {
+ assert_eq!(Duration::new(0, 0).as_secs(), 0);
+ assert_eq!(Duration::from_secs(1).as_secs(), 1);
+ assert_eq!(Duration::from_millis(999).as_secs(), 0);
+ assert_eq!(Duration::from_millis(1001).as_secs(), 1);
+ }
+
+ #[test]
+ fn nanos() {
+ assert_eq!(Duration::new(0, 0).subsec_nanos(), 0);
+ assert_eq!(Duration::new(0, 5).subsec_nanos(), 5);
+ assert_eq!(Duration::new(0, 1_000_000_001).subsec_nanos(), 1);
+ assert_eq!(Duration::from_secs(1).subsec_nanos(), 0);
+ assert_eq!(Duration::from_millis(999).subsec_nanos(), 999 * 1_000_000);
+ assert_eq!(Duration::from_millis(1001).subsec_nanos(), 1 * 1_000_000);
+ }
+
+ #[test]
+ fn add() {
+ assert_eq!(Duration::new(0, 0) + Duration::new(0, 1),
+ Duration::new(0, 1));
+ assert_eq!(Duration::new(0, 500_000_000) + Duration::new(0, 500_000_001),
+ Duration::new(1, 1));
+ }
+
+ #[test]
+ fn checked_add() {
+ assert_eq!(Duration::new(0, 0).checked_add(Duration::new(0, 1)),
+ Some(Duration::new(0, 1)));
+ assert_eq!(Duration::new(0, 500_000_000).checked_add(Duration::new(0, 500_000_001)),
+ Some(Duration::new(1, 1)));
+ assert_eq!(Duration::new(1, 0).checked_add(Duration::new(::u64::MAX, 0)), None);
+ }
+
+ #[test]
+ fn sub() {
+ assert_eq!(Duration::new(0, 1) - Duration::new(0, 0),
+ Duration::new(0, 1));
+ assert_eq!(Duration::new(0, 500_000_001) - Duration::new(0, 500_000_000),
+ Duration::new(0, 1));
+ assert_eq!(Duration::new(1, 0) - Duration::new(0, 1),
+ Duration::new(0, 999_999_999));
+ }
+
+ #[test]
+ fn checked_sub() {
+ let zero = Duration::new(0, 0);
+ let one_nano = Duration::new(0, 1);
+ let one_sec = Duration::new(1, 0);
+ assert_eq!(one_nano.checked_sub(zero), Some(Duration::new(0, 1)));
+ assert_eq!(one_sec.checked_sub(one_nano),
+ Some(Duration::new(0, 999_999_999)));
+ assert_eq!(zero.checked_sub(one_nano), None);
+ assert_eq!(zero.checked_sub(one_sec), None);
+ }
+
+ #[test] #[should_panic]
+ fn sub_bad1() {
+ Duration::new(0, 0) - Duration::new(0, 1);
+ }
+
+ #[test] #[should_panic]
+ fn sub_bad2() {
+ Duration::new(0, 0) - Duration::new(1, 0);
+ }
+
+ #[test]
+ fn mul() {
+ assert_eq!(Duration::new(0, 1) * 2, Duration::new(0, 2));
+ assert_eq!(Duration::new(1, 1) * 3, Duration::new(3, 3));
+ assert_eq!(Duration::new(0, 500_000_001) * 4, Duration::new(2, 4));
+ assert_eq!(Duration::new(0, 500_000_001) * 4000,
+ Duration::new(2000, 4000));
+ }
+
+ #[test]
+ fn checked_mul() {
+ assert_eq!(Duration::new(0, 1).checked_mul(2), Some(Duration::new(0, 2)));
+ assert_eq!(Duration::new(1, 1).checked_mul(3), Some(Duration::new(3, 3)));
+ assert_eq!(Duration::new(0, 500_000_001).checked_mul(4), Some(Duration::new(2, 4)));
+ assert_eq!(Duration::new(0, 500_000_001).checked_mul(4000),
+ Some(Duration::new(2000, 4000)));
+ assert_eq!(Duration::new(::u64::MAX - 1, 0).checked_mul(2), None);
+ }
+
+ #[test]
+ fn div() {
+ assert_eq!(Duration::new(0, 1) / 2, Duration::new(0, 0));
+ assert_eq!(Duration::new(1, 1) / 3, Duration::new(0, 333_333_333));
+ assert_eq!(Duration::new(99, 999_999_000) / 100,
+ Duration::new(0, 999_999_990));
+ }
+
+ #[test]
+ fn checked_div() {
+ assert_eq!(Duration::new(2, 0).checked_div(2), Some(Duration::new(1, 0)));
+ assert_eq!(Duration::new(1, 0).checked_div(2), Some(Duration::new(0, 500_000_000)));
+ assert_eq!(Duration::new(2, 0).checked_div(0), None);
+ }
+}
-Subproject commit 2b4cd1016bdba92becb4f982a4dcb18fe6653bc4
+Subproject commit 56444a4545bd71430d64b86b8a71714cfdbe9f5d
#![feature(libc)]
#![feature(panic_runtime)]
#![feature(staged_api)]
+#![feature(rustc_attrs)]
// Rust's "try" function, but if we're aborting on panics we just call the
// function as there's nothing else we need to do here.
#[no_mangle]
+#[rustc_std_internal_symbol]
pub unsafe extern fn __rust_maybe_catch_panic(f: fn(*mut u8),
data: *mut u8,
_data_ptr: *mut usize,
// will kill us with an illegal instruction, which will do a good enough job for
// now hopefully.
#[no_mangle]
+#[rustc_std_internal_symbol]
pub unsafe extern fn __rust_start_panic(_data: usize, _vtable: usize) -> u32 {
abort();
fmt_macros = { path = "../libfmt_macros" }
graphviz = { path = "../libgraphviz" }
jobserver = "0.1"
-log = "0.4"
+log = { version = "0.4", features = ["release_max_level_info", "std"] }
rustc_apfloat = { path = "../librustc_apfloat" }
rustc_back = { path = "../librustc_back" }
rustc_const_math = { path = "../librustc_const_math" }
backtrace = "0.3.3"
byteorder = { version = "1.1", features = ["i128"]}
-
# Note that these dependencies are a lie, they're just here to get linkage to
# work.
#
- `'gcx` -- the lifetime of the global arena (see `librustc/ty`).
- generics -- the set of generic type parameters defined on a type or item
- ICE -- internal compiler error. When the compiler crashes.
+- ICH -- incremental compilation hash.
- infcx -- the inference context (see `librustc/infer`)
- MIR -- the **Mid-level IR** that is created after type-checking for use by borrowck and trans.
Defined in the `src/librustc/mir/` module, but much of the code that manipulates it is
[] TargetFeaturesEnabled(DefId),
[] InstanceDefSizeEstimate { instance_def: InstanceDef<'tcx> },
+
+ [] GetSymbolExportLevel(DefId),
+
);
trait DepNodeParams<'a, 'gcx: 'tcx + 'a, 'tcx: 'a> : fmt::Debug {
ty::ReEarlyBound(_) |
ty::ReFree(_) => {
let scope = region.free_region_binding_scope(self);
- let prefix = match *region {
- ty::ReEarlyBound(ref br) => {
- format!("the lifetime {} as defined on", br.name)
- }
- ty::ReFree(ref fr) => {
- match fr.bound_region {
- ty::BrAnon(idx) => {
- format!("the anonymous lifetime #{} defined on", idx + 1)
- }
- ty::BrFresh(_) => "an anonymous lifetime defined on".to_owned(),
- _ => {
- format!("the lifetime {} as defined on",
- fr.bound_region)
- }
- }
- }
- _ => bug!()
- };
-
let node = self.hir.as_local_node_id(scope)
.unwrap_or(DUMMY_NODE_ID);
let unknown;
&unknown
}
};
- let (msg, opt_span) = explain_span(self, tag, self.hir.span(node));
+ let (prefix, span) = match *region {
+ ty::ReEarlyBound(ref br) => {
+ (format!("the lifetime {} as defined on", br.name),
+ self.sess.codemap().def_span(self.hir.span(node)))
+ }
+ ty::ReFree(ref fr) => {
+ match fr.bound_region {
+ ty::BrAnon(idx) => {
+ (format!("the anonymous lifetime #{} defined on", idx + 1),
+ self.hir.span(node))
+ }
+ ty::BrFresh(_) => ("an anonymous lifetime defined on".to_owned(),
+ self.hir.span(node)),
+ _ => (format!("the lifetime {} as defined on", fr.bound_region),
+ self.sess.codemap().def_span(self.hir.span(node))),
+ }
+ }
+ _ => bug!()
+ };
+ let (msg, opt_span) = explain_span(self, tag, span);
(format!("{} {}", prefix, msg), opt_span)
}
}
};
- let span = cause.span;
+ let span = cause.span(&self.tcx);
diag.span_label(span, terr.to_string());
if let Some((sp, msg)) = secondary_span {
"did you mean `{}(/* fields */)`?",
self.tcx.item_path_str(def_id)
);
- diag.span_label(cause.span, message);
+ diag.span_label(span, message);
}
}
}
trace,
terr);
- let span = trace.cause.span;
+ let span = trace.cause.span(&self.tcx);
let failure_code = trace.cause.as_failure_code(terr);
let mut diag = match failure_code {
FailureCode::Error0317(failure_str) => {
// `sp` only covers `T`, change it so that it covers
// `T:` when appropriate
let sp = if has_lifetimes {
- sp.to(sp.next_point().next_point())
+ sp.to(self.tcx.sess.codemap().next_point(
+ self.tcx.sess.codemap().next_point(sp)))
} else {
sp
};
sup_region,
"...");
+ match (&sup_origin, &sub_origin) {
+ (&infer::Subtype(ref sup_trace), &infer::Subtype(ref sub_trace)) => {
+ if let (Some((sup_expected, sup_found)),
+ Some((sub_expected, sub_found))) = (self.values_str(&sup_trace.values),
+ self.values_str(&sub_trace.values)) {
+ if sub_expected == sup_expected && sub_found == sup_found {
+ self.tcx.note_and_explain_region(
+ region_scope_tree,
+ &mut err,
+ "...but the lifetime must also be valid for ",
+ sub_region,
+ "...",
+ );
+ err.note(&format!("...so that the {}:\nexpected {}\n found {}",
+ sup_trace.cause.as_requirement_str(),
+ sup_expected.content(),
+ sup_found.content()));
+ err.emit();
+ return;
+ }
+ }
+ }
+ _ => {}
+ }
+
self.note_region_origin(&mut err, &sup_origin);
self.tcx.note_and_explain_region(region_scope_tree, &mut err,
if let Some((expected, found)) = self.values_str(&trace.values) {
let expected = expected.content();
let found = found.content();
- // FIXME: do we want a "the" here?
- err.span_note(trace.cause.span,
- &format!("...so that {} (expected {}, found {})",
- trace.cause.as_requirement_str(),
- expected,
- found));
+ err.note(&format!("...so that the {}:\nexpected {}\n found {}",
+ trace.cause.as_requirement_str(),
+ expected,
+ found));
} else {
// FIXME: this really should be handled at some earlier stage. Our
// handling of region checking when type errors are present is
#![feature(specialization)]
#![feature(unboxed_closures)]
#![feature(underscore_lifetimes)]
+#![feature(universal_impl_trait)]
#![feature(trace_macros)]
#![feature(catch_expr)]
#![feature(test)]
"raw pointer to an inference variable"
}
+declare_lint! {
+ pub ELIDED_LIFETIME_IN_PATH,
+ Allow,
+ "hidden lifetime parameters are deprecated, try `Foo<'_>`"
+}
+
/// Does nothing as a lint pass, but registers some `Lint`s
/// which are used by other parts of the compiler.
#[derive(Copy, Clone)]
UNUSED_MUT,
COERCE_NEVER,
SINGLE_USE_LIFETIME,
- TYVAR_BEHIND_RAW_POINTER
+ TYVAR_BEHIND_RAW_POINTER,
+ ELIDED_LIFETIME_IN_PATH
+
)
}
}
type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
- (self.tcx, self.param_env.reveal_all()).layout_of(ty)
+ self.tcx.layout_of(self.param_env.and(ty))
}
}
// Put the lint store levels and passes back in the session.
cx.lint_sess.restore(&sess.lint_store);
- // Emit all buffered lints from early on in the session now that we've
- // calculated the lint levels for all AST nodes.
- for (_id, lints) in cx.buffered.map {
- for early_lint in lints {
- span_bug!(early_lint.span, "failed to process buffered lint here");
+ // All of the buffered lints should have been emitted at this point.
+ // If not, that means that we somehow buffered a lint for a node id
+ // that was not lint-checked (perhaps it doesn't exist?). This is a bug.
+ //
+ // Rustdoc runs everybody-loops before the early lints and removes
+ // function bodies, so it's totally possible for linted
+ // node ids to not exist (e.g. macros defined within functions for the
+ // unused_macro lint) anymore. So we only run this check
+ // when we're not in rustdoc mode. (see issue #47639)
+ if !sess.opts.actually_rustdoc {
+ for (_id, lints) in cx.buffered.map {
+ for early_lint in lints {
+ span_bug!(early_lint.span, "failed to process buffered lint here");
+ }
}
}
}
}
hir::ExprAssign(ref l, ref r) => {
- // see comment on lvalues in
- // propagate_through_lvalue_components()
- let succ = self.write_lvalue(&l, succ, ACC_WRITE);
- let succ = self.propagate_through_lvalue_components(&l, succ);
+ // see comment on places in
+ // propagate_through_place_components()
+ let succ = self.write_place(&l, succ, ACC_WRITE);
+ let succ = self.propagate_through_place_components(&l, succ);
self.propagate_through_expr(&r, succ)
}
let succ = self.propagate_through_expr(&l, succ);
self.propagate_through_expr(&r, succ)
} else {
- // see comment on lvalues in
- // propagate_through_lvalue_components()
- let succ = self.write_lvalue(&l, succ, ACC_WRITE|ACC_READ);
+ // see comment on places in
+ // propagate_through_place_components()
+ let succ = self.write_place(&l, succ, ACC_WRITE|ACC_READ);
let succ = self.propagate_through_expr(&r, succ);
- self.propagate_through_lvalue_components(&l, succ)
+ self.propagate_through_place_components(&l, succ)
}
}
hir::ExprInlineAsm(ref ia, ref outputs, ref inputs) => {
let succ = ia.outputs.iter().zip(outputs).rev().fold(succ, |succ, (o, output)| {
- // see comment on lvalues
- // in propagate_through_lvalue_components()
+ // see comment on places
+ // in propagate_through_place_components()
if o.is_indirect {
self.propagate_through_expr(output, succ)
} else {
let acc = if o.is_rw { ACC_WRITE|ACC_READ } else { ACC_WRITE };
- let succ = self.write_lvalue(output, succ, acc);
- self.propagate_through_lvalue_components(output, succ)
+ let succ = self.write_place(output, succ, acc);
+ self.propagate_through_place_components(output, succ)
}
});
}
}
- fn propagate_through_lvalue_components(&mut self,
+ fn propagate_through_place_components(&mut self,
expr: &Expr,
succ: LiveNode)
-> LiveNode {
- // # Lvalues
+ // # Places
//
// In general, the full flow graph structure for an
// assignment/move/etc can be handled in one of two ways,
//
// The two kinds of graphs are:
//
- // Tracked lvalue Untracked lvalue
+ // Tracked place Untracked place
// ----------------------++-----------------------
// ||
// | || |
// (rvalue) || (rvalue)
// | || |
// v || v
- // (write of lvalue) || (lvalue components)
+ // (write of place) || (place components)
// | || |
// v || v
// (succ) || (succ)
//
// I will cover the two cases in turn:
//
- // # Tracked lvalues
+ // # Tracked places
//
- // A tracked lvalue is a local variable/argument `x`. In
+ // A tracked place is a local variable/argument `x`. In
// these cases, the link_node where the write occurs is linked
- // to node id of `x`. The `write_lvalue()` routine generates
+ // to node id of `x`. The `write_place()` routine generates
// the contents of this node. There are no subcomponents to
// consider.
//
- // # Non-tracked lvalues
+ // # Non-tracked places
//
- // These are lvalues like `x[5]` or `x.f`. In that case, we
+ // These are places like `x[5]` or `x.f`. In that case, we
// basically ignore the value which is written to but generate
// reads for the components---`x` in these two examples. The
// components reads are generated by
- // `propagate_through_lvalue_components()` (this fn).
+ // `propagate_through_place_components()` (this fn).
//
- // # Illegal lvalues
+ // # Illegal places
//
- // It is still possible to observe assignments to non-lvalues;
+ // It is still possible to observe assignments to non-places;
// these errors are detected in the later pass borrowck. We
// just ignore such cases and treat them as reads.
}
}
- // see comment on propagate_through_lvalue()
- fn write_lvalue(&mut self, expr: &Expr, succ: LiveNode, acc: u32)
+ // see comment on propagate_through_place()
+ fn write_place(&mut self, expr: &Expr, succ: LiveNode, acc: u32)
-> LiveNode {
match expr.node {
hir::ExprPath(hir::QPath::Resolved(_, ref path)) => {
self.access_path(expr.id, path, succ, acc)
}
- // We do not track other lvalues, so just propagate through
+ // We do not track other places, so just propagate through
// to their subcomponents. Also, it may happen that
- // non-lvalues occur here, because those are detected in the
+ // non-places occur here, because those are detected in the
// later pass borrowck.
_ => succ
}
fn check_expr<'a, 'tcx>(this: &mut Liveness<'a, 'tcx>, expr: &'tcx Expr) {
match expr.node {
hir::ExprAssign(ref l, _) => {
- this.check_lvalue(&l);
+ this.check_place(&l);
intravisit::walk_expr(this, expr);
}
hir::ExprAssignOp(_, ref l, _) => {
if !this.tables.is_method_call(expr) {
- this.check_lvalue(&l);
+ this.check_place(&l);
}
intravisit::walk_expr(this, expr);
this.visit_expr(input);
}
- // Output operands must be lvalues
+ // Output operands must be places
for (o, output) in ia.outputs.iter().zip(outputs) {
if !o.is_indirect {
- this.check_lvalue(output);
+ this.check_place(output);
}
this.visit_expr(output);
}
}
impl<'a, 'tcx> Liveness<'a, 'tcx> {
- fn check_lvalue(&mut self, expr: &'tcx Expr) {
+ fn check_place(&mut self, expr: &'tcx Expr) {
match expr.node {
hir::ExprPath(hir::QPath::Resolved(_, ref path)) => {
if let Def::Local(nid) = path.def {
}
}
_ => {
- // For other kinds of lvalues, no checks are required,
+ // For other kinds of places, no checks are required,
// and any embedded expressions are actually rvalues
intravisit::walk_expr(self, expr);
}
//! | E.comp // access to an interior component
//!
//! Imagine a routine ToAddr(Expr) that evaluates an expression and returns an
-//! address where the result is to be found. If Expr is an lvalue, then this
-//! is the address of the lvalue. If Expr is an rvalue, this is the address of
+//! address where the result is to be found. If Expr is a place, then this
+//! is the address of the place. If Expr is an rvalue, this is the address of
//! some temporary spot in memory where the result is stored.
//!
//! Now, cat_expr() classifies the expression Expr and the address A=ToAddr(Expr)
pub id: ast::NodeId, // id of expr/pat producing this value
pub span: Span, // span of same expr/pat
pub cat: Categorization<'tcx>, // categorization of expr
- pub mutbl: MutabilityCategory, // mutability of expr as lvalue
+ pub mutbl: MutabilityCategory, // mutability of expr as place
pub ty: Ty<'tcx>, // type of the expr (*see WARNING above*)
pub note: Note, // Note about the provenance of this cmt
}
// a bind-by-ref means that the base_ty will be the type of the ident itself,
// but what we want here is the type of the underlying value being borrowed.
// So peel off one-level, turning the &T into T.
- match base_ty.builtin_deref(false, ty::NoPreference) {
+ match base_ty.builtin_deref(false) {
Some(t) => t.ty,
None => {
debug!("By-ref binding of non-derefable type {:?}", base_ty);
match expr.node {
hir::ExprUnary(hir::UnDeref, ref e_base) => {
if self.tables.is_method_call(expr) {
- self.cat_overloaded_lvalue(expr, e_base, false)
+ self.cat_overloaded_place(expr, e_base, false)
} else {
let base_cmt = self.cat_expr(&e_base)?;
self.cat_deref(expr, base_cmt, false)
// The call to index() returns a `&T` value, which
// is an rvalue. That is what we will be
// dereferencing.
- self.cat_overloaded_lvalue(expr, base, true)
+ self.cat_overloaded_place(expr, base, true)
} else {
let base_cmt = self.cat_expr(&base)?;
self.cat_index(expr, base_cmt, expr_ty, InteriorOffsetKind::Index)
ret
}
- fn cat_overloaded_lvalue(&self,
+ fn cat_overloaded_place(&self,
expr: &hir::Expr,
base: &hir::Expr,
implicit: bool)
-> McResult<cmt<'tcx>> {
- debug!("cat_overloaded_lvalue: implicit={}", implicit);
+ debug!("cat_overloaded_place: implicit={}", implicit);
// Reconstruct the output assuming it's a reference with the
// same region and mutability as the receiver. This holds for
// `Deref(Mut)::Deref(_mut)` and `Index(Mut)::index(_mut)`.
- let lvalue_ty = self.expr_ty(expr)?;
+ let place_ty = self.expr_ty(expr)?;
let base_ty = self.expr_ty_adjusted(base)?;
let (region, mutbl) = match base_ty.sty {
ty::TyRef(region, mt) => (region, mt.mutbl),
_ => {
- span_bug!(expr.span, "cat_overloaded_lvalue: base is not a reference")
+ span_bug!(expr.span, "cat_overloaded_place: base is not a reference")
}
};
let ref_ty = self.tcx.mk_ref(region, ty::TypeAndMut {
- ty: lvalue_ty,
+ ty: place_ty,
mutbl,
});
debug!("cat_deref: base_cmt={:?}", base_cmt);
let base_cmt_ty = base_cmt.ty;
- let deref_ty = match base_cmt_ty.builtin_deref(true, ty::NoPreference) {
+ let deref_ty = match base_cmt_ty.builtin_deref(true) {
Some(mt) => mt.ty,
None => {
debug!("Explicit deref of non-derefable type: {:?}",
}
}
- /// Returns `FreelyAliasable(_)` if this lvalue represents a freely aliasable pointer type.
+ /// Returns `FreelyAliasable(_)` if this place represents a freely aliasable pointer type.
pub fn freely_aliasable(&self) -> Aliasability {
// Maybe non-obvious: copied upvars can only be considered
// non-aliasable in once closures, since any other kind can be
"static item".to_string()
}
Categorization::Rvalue(..) => {
- "non-lvalue".to_string()
+ "non-place".to_string()
}
Categorization::Local(vid) => {
if tcx.hir.is_argument(vid) {
}
fn visit_pat(&mut self, pat: &'tcx Pat) {
+ intravisit::walk_pat(self, pat);
+
self.expr_and_pat_count += 1;
- intravisit::walk_pat(self, pat);
+ if pat.id == self.id {
+ self.result = Some(self.expr_and_pat_count);
+ }
}
fn visit_expr(&mut self, expr: &'tcx Expr) {
/// Checks whether the given scope contains a `yield`. If so,
/// returns `Some((span, expr_count))` with the span of a yield we found and
- /// the number of expressions appearing before the `yield` in the body.
+ /// the number of expressions and patterns appearing before the `yield` in the body + 1.
+ /// If there a are multiple yields in a scope, the one with the highest number is returned.
pub fn yield_in_scope(&self, scope: Scope) -> Option<(Span, usize)> {
self.yield_in_scope.get(&scope).cloned()
}
// I mean that creating a binding into a ref-counted or managed value
// would still count.)
//
- // 3. `ET`, which matches both rvalues like `foo()` as well as lvalues
+ // 3. `ET`, which matches both rvalues like `foo()` as well as places
// based on rvalues like `foo().x[2].y`.
//
// A subexpression `<rvalue>` that appears in a let initializer
/// | (ET)
/// | <rvalue>
///
- /// Note: ET is intended to match "rvalues or lvalues based on rvalues".
+ /// Note: ET is intended to match "rvalues or places based on rvalues".
fn record_rvalue_scope<'a, 'tcx>(visitor: &mut RegionResolutionVisitor<'a, 'tcx>,
expr: &hir::Expr,
blk_scope: Option<Scope>) {
fn visit_lifetime(&mut self, lifetime_ref: &'tcx hir::Lifetime) {
if lifetime_ref.is_elided() {
- self.resolve_elided_lifetimes(slice::from_ref(lifetime_ref));
+ self.resolve_elided_lifetimes(slice::from_ref(lifetime_ref), false);
return;
}
if lifetime_ref.is_static() {
}
if params.lifetimes.iter().all(|l| l.is_elided()) {
- self.resolve_elided_lifetimes(¶ms.lifetimes);
+ self.resolve_elided_lifetimes(¶ms.lifetimes, true);
} else {
for l in ¶ms.lifetimes {
self.visit_lifetime(l);
}
}
- fn resolve_elided_lifetimes(&mut self, lifetime_refs: &'tcx [hir::Lifetime]) {
+ fn resolve_elided_lifetimes(&mut self, lifetime_refs: &'tcx [hir::Lifetime], deprecated: bool) {
if lifetime_refs.is_empty() {
return;
}
let span = lifetime_refs[0].span;
+ let id = lifetime_refs[0].id;
let mut late_depth = 0;
let mut scope = self.scope;
+ if deprecated {
+ self.tcx
+ .struct_span_lint_node(
+ lint::builtin::ELIDED_LIFETIME_IN_PATH,
+ id,
+ span,
+ &format!("hidden lifetime parameters are deprecated, try `Foo<'_>`"))
+ .emit();
+ }
let error = loop {
match *scope {
// Do not assign any resolution, it will be inferred.
},
/// Drop the Place and assign the new value over it. This ensures
- /// that the assignment to LV occurs *even if* the destructor for
+ /// that the assignment to `P` occurs *even if* the destructor for
/// place unwinds. Its semantics are best explained by by the
/// elaboration:
///
/// ```
/// BB0 {
- /// DropAndReplace(LV <- RV, goto BB1, unwind BB2)
+ /// DropAndReplace(P <- V, goto BB1, unwind BB2)
/// }
/// ```
///
///
/// ```
/// BB0 {
- /// Drop(LV, goto BB1, unwind BB2)
+ /// Drop(P, goto BB1, unwind BB2)
/// }
/// BB1 {
- /// // LV is now unitialized
- /// LV <- RV
+ /// // P is now unitialized
+ /// P <- V
/// }
/// BB2 {
- /// // LV is now unitialized -- its dtor panicked
- /// LV <- RV
+ /// // P is now unitialized -- its dtor panicked
+ /// P <- V
/// }
/// ```
DropAndReplace {
Array(Ty<'tcx>),
Tuple,
- /// The second field is variant number (discriminant), it's equal
- /// to 0 for struct and union expressions. The fourth field is
+ /// The second field is the variant index. It's equal to 0 for struct
+ /// and union expressions. The fourth field is
/// active field number and is present only for union expressions
/// -- e.g. for a union expression `SomeUnion { c: .. }`, the
/// active field index would identity the field `c`
/// the location is within this block
pub block: BasicBlock,
- /// the location is the start of the this statement; or, if `statement_index`
+ /// the location is the start of the statement; or, if `statement_index`
/// == num-statements, then the start of the terminator.
pub statement_index: usize,
}
match *elem {
ProjectionElem::Deref => {
let ty = self.to_ty(tcx)
- .builtin_deref(true, ty::LvaluePreference::NoPreference)
+ .builtin_deref(true)
.unwrap_or_else(|| {
bug!("deref projection of non-dereferencable ty {:?}", self)
})
dep_info_omit_d_target: bool = (false, parse_bool, [TRACKED],
"in dep-info output, omit targets for tracking dependencies of the dep-info files \
themselves"),
+ approximate_suggestions: bool = (false, parse_bool, [UNTRACKED],
+ "include machine-applicability of suggestions in JSON output"),
unpretty: Option<String> = (None, parse_unpretty, [UNTRACKED],
"Present the input source, unstable (and less-pretty) variants;
valid types are any of the types for `--pretty`, as well as:
let emitter: Box<Emitter> = match (sopts.error_format, emitter_dest) {
(config::ErrorOutputType::HumanReadable(color_config), None) => {
- Box::new(EmitterWriter::stderr(color_config, Some(codemap.clone()), false))
+ Box::new(EmitterWriter::stderr(color_config,
+ Some(codemap.clone()),
+ false,
+ sopts.debugging_opts.teach))
}
(config::ErrorOutputType::HumanReadable(_), Some(dst)) => {
- Box::new(EmitterWriter::new(dst, Some(codemap.clone()), false))
+ Box::new(EmitterWriter::new(dst, Some(codemap.clone()), false, false))
}
(config::ErrorOutputType::Json(pretty), None) => {
- Box::new(JsonEmitter::stderr(Some(registry), codemap.clone(), pretty))
+ Box::new(JsonEmitter::stderr(Some(registry), codemap.clone(),
+ pretty, sopts.debugging_opts.approximate_suggestions))
}
(config::ErrorOutputType::Json(pretty), Some(dst)) => {
- Box::new(JsonEmitter::new(dst, Some(registry), codemap.clone(), pretty))
+ Box::new(JsonEmitter::new(dst, Some(registry), codemap.clone(),
+ pretty, sopts.debugging_opts.approximate_suggestions))
}
(config::ErrorOutputType::Short(color_config), None) => {
- Box::new(EmitterWriter::stderr(color_config, Some(codemap.clone()), true))
+ Box::new(EmitterWriter::stderr(color_config, Some(codemap.clone()), true, false))
}
(config::ErrorOutputType::Short(_), Some(dst)) => {
- Box::new(EmitterWriter::new(dst, Some(codemap.clone()), true))
+ Box::new(EmitterWriter::new(dst, Some(codemap.clone()), true, false))
}
};
pub fn early_error(output: config::ErrorOutputType, msg: &str) -> ! {
let emitter: Box<Emitter> = match output {
config::ErrorOutputType::HumanReadable(color_config) => {
- Box::new(EmitterWriter::stderr(color_config, None, false))
+ Box::new(EmitterWriter::stderr(color_config, None, false, false))
}
config::ErrorOutputType::Json(pretty) => Box::new(JsonEmitter::basic(pretty)),
config::ErrorOutputType::Short(color_config) => {
- Box::new(EmitterWriter::stderr(color_config, None, true))
+ Box::new(EmitterWriter::stderr(color_config, None, true, false))
}
};
let handler = errors::Handler::with_emitter(true, false, emitter);
pub fn early_warn(output: config::ErrorOutputType, msg: &str) {
let emitter: Box<Emitter> = match output {
config::ErrorOutputType::HumanReadable(color_config) => {
- Box::new(EmitterWriter::stderr(color_config, None, false))
+ Box::new(EmitterWriter::stderr(color_config, None, false, false))
}
config::ErrorOutputType::Json(pretty) => Box::new(JsonEmitter::basic(pretty)),
config::ErrorOutputType::Short(color_config) => {
- Box::new(EmitterWriter::stderr(color_config, None, true))
+ Box::new(EmitterWriter::stderr(color_config, None, true, false))
}
};
let handler = errors::Handler::with_emitter(true, false, emitter);
use ty::fold::TypeFoldable;
use ty::subst::Subst;
-use infer::{InferCtxt, InferOk};
+use infer::{InferOk};
/// Whether we do the orphan check relative to this crate or
/// to some remote crate.
pub intercrate_ambiguity_causes: Vec<IntercrateAmbiguityCause>,
}
-/// If there are types that satisfy both impls, returns a suitably-freshened
-/// `ImplHeader` with those types substituted
-pub fn overlapping_impls<'cx, 'gcx, 'tcx>(infcx: &InferCtxt<'cx, 'gcx, 'tcx>,
- impl1_def_id: DefId,
- impl2_def_id: DefId,
- intercrate_mode: IntercrateMode)
- -> Option<OverlapResult<'tcx>>
+/// If there are types that satisfy both impls, invokes `on_overlap`
+/// with a suitably-freshened `ImplHeader` with those types
+/// substituted. Otherwise, invokes `no_overlap`.
+pub fn overlapping_impls<'gcx, F1, F2, R>(
+ tcx: TyCtxt<'_, 'gcx, 'gcx>,
+ impl1_def_id: DefId,
+ impl2_def_id: DefId,
+ intercrate_mode: IntercrateMode,
+ on_overlap: F1,
+ no_overlap: F2,
+) -> R
+where
+ F1: FnOnce(OverlapResult<'_>) -> R,
+ F2: FnOnce() -> R,
{
debug!("impl_can_satisfy(\
impl1_def_id={:?}, \
impl2_def_id,
intercrate_mode);
- let selcx = &mut SelectionContext::intercrate(infcx, intercrate_mode);
- overlap(selcx, impl1_def_id, impl2_def_id)
+ let overlaps = tcx.infer_ctxt().enter(|infcx| {
+ let selcx = &mut SelectionContext::intercrate(&infcx, intercrate_mode);
+ overlap(selcx, impl1_def_id, impl2_def_id).is_some()
+ });
+
+ if !overlaps {
+ return no_overlap();
+ }
+
+ // In the case where we detect an error, run the check again, but
+ // this time tracking intercrate ambuiguity causes for better
+ // diagnostics. (These take time and can lead to false errors.)
+ tcx.infer_ctxt().enter(|infcx| {
+ let selcx = &mut SelectionContext::intercrate(&infcx, intercrate_mode);
+ selcx.enable_tracking_intercrate_ambiguity_causes();
+ on_overlap(overlap(selcx, impl1_def_id, impl2_def_id).unwrap())
+ })
}
fn with_fresh_ty_vars<'cx, 'gcx, 'tcx>(selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>,
return None
}
- Some(OverlapResult {
- impl_header: selcx.infcx().resolve_type_vars_if_possible(&a_impl_header),
- intercrate_ambiguity_causes: selcx.intercrate_ambiguity_causes().to_vec(),
- })
+ let impl_header = selcx.infcx().resolve_type_vars_if_possible(&a_impl_header);
+ let intercrate_ambiguity_causes = selcx.take_intercrate_ambiguity_causes();
+ debug!("overlap: intercrate_ambiguity_causes={:#?}", intercrate_ambiguity_causes);
+ Some(OverlapResult { impl_header, intercrate_ambiguity_causes })
}
pub fn trait_ref_is_knowable<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
span,
node: hir::ImplItemKind::Method(hir::MethodSig { ref decl, .. }, _),
..
+ }) |
+ hir::map::NodeTraitItem(&hir::TraitItem {
+ span,
+ node: hir::TraitItemKind::Method(hir::MethodSig { ref decl, .. }, _),
+ ..
}) => {
(self.tcx.sess.codemap().def_span(span), decl.inputs.iter()
.map(|arg| match arg.clone().into_inner().node {
err.note("the return type of a function must have a \
statically known size");
}
+ ObligationCauseCode::SizedYieldType => {
+ err.note("the yield type of a generator must have a \
+ statically known size");
+ }
ObligationCauseCode::AssignmentLhsSized => {
err.note("the left-hand-side of an assignment must have a statically known size");
}
pub code: ObligationCauseCode<'tcx>
}
+impl<'tcx> ObligationCause<'tcx> {
+ pub fn span<'a, 'gcx>(&self, tcx: &TyCtxt<'a, 'gcx, 'tcx>) -> Span {
+ match self.code {
+ ObligationCauseCode::CompareImplMethodObligation { .. } |
+ ObligationCauseCode::MainFunctionType |
+ ObligationCauseCode::StartFunctionType => {
+ tcx.sess.codemap().def_span(self.span)
+ }
+ _ => self.span,
+ }
+ }
+}
+
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum ObligationCauseCode<'tcx> {
/// Not well classified or should be obvious from span.
VariableType(ast::NodeId),
/// Return type must be Sized
SizedReturnType,
+ /// Yield type must be Sized
+ SizedYieldType,
/// [T,..n] --> T must be Copy
RepeatVec,
inferred_obligations: SnapshotVec<InferredObligationsSnapshotVecDelegate<'tcx>>,
- intercrate_ambiguity_causes: Vec<IntercrateAmbiguityCause>,
+ intercrate_ambiguity_causes: Option<Vec<IntercrateAmbiguityCause>>,
}
-#[derive(Clone)]
+#[derive(Clone, Debug)]
pub enum IntercrateAmbiguityCause {
DownstreamCrate {
trait_desc: String,
freshener: infcx.freshener(),
intercrate: None,
inferred_obligations: SnapshotVec::new(),
- intercrate_ambiguity_causes: Vec::new(),
+ intercrate_ambiguity_causes: None,
}
}
freshener: infcx.freshener(),
intercrate: Some(mode),
inferred_obligations: SnapshotVec::new(),
- intercrate_ambiguity_causes: Vec::new(),
+ intercrate_ambiguity_causes: None,
}
}
+ /// Enables tracking of intercrate ambiguity causes. These are
+ /// used in coherence to give improved diagnostics. We don't do
+ /// this until we detect a coherence error because it can lead to
+ /// false overflow results (#47139) and because it costs
+ /// computation time.
+ pub fn enable_tracking_intercrate_ambiguity_causes(&mut self) {
+ assert!(self.intercrate.is_some());
+ assert!(self.intercrate_ambiguity_causes.is_none());
+ self.intercrate_ambiguity_causes = Some(vec![]);
+ debug!("selcx: enable_tracking_intercrate_ambiguity_causes");
+ }
+
+ /// Gets the intercrate ambiguity causes collected since tracking
+ /// was enabled and disables tracking at the same time. If
+ /// tracking is not enabled, just returns an empty vector.
+ pub fn take_intercrate_ambiguity_causes(&mut self) -> Vec<IntercrateAmbiguityCause> {
+ assert!(self.intercrate.is_some());
+ self.intercrate_ambiguity_causes.take().unwrap_or(vec![])
+ }
+
pub fn infcx(&self) -> &'cx InferCtxt<'cx, 'gcx, 'tcx> {
self.infcx
}
self.infcx
}
- pub fn intercrate_ambiguity_causes(&self) -> &[IntercrateAmbiguityCause] {
- &self.intercrate_ambiguity_causes
- }
-
/// Wraps the inference context's in_snapshot s.t. snapshot handling is only from the selection
/// context's self.
fn in_snapshot<R, F>(&mut self, f: F) -> R
debug!("evaluate_stack({:?}) --> unbound argument, intercrate --> ambiguous",
stack.fresh_trait_ref);
// Heuristics: show the diagnostics when there are no candidates in crate.
- if let Ok(candidate_set) = self.assemble_candidates(stack) {
- if !candidate_set.ambiguous && candidate_set.vec.is_empty() {
- let trait_ref = stack.obligation.predicate.skip_binder().trait_ref;
- let self_ty = trait_ref.self_ty();
- let cause = IntercrateAmbiguityCause::DownstreamCrate {
- trait_desc: trait_ref.to_string(),
- self_desc: if self_ty.has_concrete_skeleton() {
- Some(self_ty.to_string())
- } else {
- None
- },
- };
- self.intercrate_ambiguity_causes.push(cause);
+ if self.intercrate_ambiguity_causes.is_some() {
+ debug!("evaluate_stack: intercrate_ambiguity_causes is some");
+ if let Ok(candidate_set) = self.assemble_candidates(stack) {
+ if !candidate_set.ambiguous && candidate_set.vec.is_empty() {
+ let trait_ref = stack.obligation.predicate.skip_binder().trait_ref;
+ let self_ty = trait_ref.self_ty();
+ let cause = IntercrateAmbiguityCause::DownstreamCrate {
+ trait_desc: trait_ref.to_string(),
+ self_desc: if self_ty.has_concrete_skeleton() {
+ Some(self_ty.to_string())
+ } else {
+ None
+ },
+ };
+ debug!("evaluate_stack: pushing cause = {:?}", cause);
+ self.intercrate_ambiguity_causes.as_mut().unwrap().push(cause);
+ }
}
}
return EvaluatedToAmbig;
None => {}
Some(conflict) => {
debug!("coherence stage: not knowable");
- // Heuristics: show the diagnostics when there are no candidates in crate.
- let candidate_set = self.assemble_candidates(stack)?;
- if !candidate_set.ambiguous && candidate_set.vec.iter().all(|c| {
- !self.evaluate_candidate(stack, &c).may_apply()
- }) {
- let trait_ref = stack.obligation.predicate.skip_binder().trait_ref;
- let self_ty = trait_ref.self_ty();
- let trait_desc = trait_ref.to_string();
- let self_desc = if self_ty.has_concrete_skeleton() {
- Some(self_ty.to_string())
- } else {
- None
- };
- let cause = if let Conflict::Upstream = conflict {
- IntercrateAmbiguityCause::UpstreamCrateUpdate { trait_desc, self_desc }
- } else {
- IntercrateAmbiguityCause::DownstreamCrate { trait_desc, self_desc }
- };
- self.intercrate_ambiguity_causes.push(cause);
+ if self.intercrate_ambiguity_causes.is_some() {
+ debug!("evaluate_stack: intercrate_ambiguity_causes is some");
+ // Heuristics: show the diagnostics when there are no candidates in crate.
+ let candidate_set = self.assemble_candidates(stack)?;
+ if !candidate_set.ambiguous && candidate_set.vec.iter().all(|c| {
+ !self.evaluate_candidate(stack, &c).may_apply()
+ }) {
+ let trait_ref = stack.obligation.predicate.skip_binder().trait_ref;
+ let self_ty = trait_ref.self_ty();
+ let trait_desc = trait_ref.to_string();
+ let self_desc = if self_ty.has_concrete_skeleton() {
+ Some(self_ty.to_string())
+ } else {
+ None
+ };
+ let cause = if let Conflict::Upstream = conflict {
+ IntercrateAmbiguityCause::UpstreamCrateUpdate { trait_desc, self_desc }
+ } else {
+ IntercrateAmbiguityCause::DownstreamCrate { trait_desc, self_desc }
+ };
+ debug!("evaluate_stack: pushing cause = {:?}", cause);
+ self.intercrate_ambiguity_causes.as_mut().unwrap().push(cause);
+ }
}
return Ok(None);
}
};
let tcx = tcx.global_tcx();
- let (le, ge) = tcx.infer_ctxt().enter(|infcx| {
- let overlap = traits::overlapping_impls(&infcx,
- possible_sibling,
- impl_def_id,
- traits::IntercrateMode::Issue43355);
- if let Some(overlap) = overlap {
+ let (le, ge) = traits::overlapping_impls(
+ tcx,
+ possible_sibling,
+ impl_def_id,
+ traits::IntercrateMode::Issue43355,
+ |overlap| {
if tcx.impls_are_allowed_to_overlap(impl_def_id, possible_sibling) {
return Ok((false, false));
}
} else {
Ok((le, ge))
}
- } else {
- Ok((false, false))
- }
- })?;
+ },
+ || Ok((false, false)),
+ )?;
if le && !ge {
debug!("descending as child of TraitRef {:?}",
return Ok(Inserted::Replaced(possible_sibling));
} else {
if !tcx.impls_are_allowed_to_overlap(impl_def_id, possible_sibling) {
- tcx.infer_ctxt().enter(|infcx| {
- if let Some(overlap) = traits::overlapping_impls(
- &infcx,
- possible_sibling,
- impl_def_id,
- traits::IntercrateMode::Fixed)
- {
- last_lint = Some(overlap_error(overlap));
- }
- });
+ traits::overlapping_impls(
+ tcx,
+ possible_sibling,
+ impl_def_id,
+ traits::IntercrateMode::Fixed,
+ |overlap| last_lint = Some(overlap_error(overlap)),
+ || (),
+ );
}
// no overlap (error bailed already via ?)
super::VariableType(id) => Some(super::VariableType(id)),
super::ReturnType(id) => Some(super::ReturnType(id)),
super::SizedReturnType => Some(super::SizedReturnType),
+ super::SizedYieldType => Some(super::SizedYieldType),
super::RepeatVec => Some(super::RepeatVec),
super::FieldSized(item) => Some(super::FieldSized(item)),
super::ConstSized => Some(super::ConstSized),
super::VariableType(_) |
super::ReturnType(_) |
super::SizedReturnType |
+ super::SizedYieldType |
super::ReturnNoExpression |
super::RepeatVec |
super::FieldSized(_) |
super::VariableType(_) |
super::ReturnType(_) |
super::SizedReturnType |
+ super::SizedYieldType |
super::ReturnNoExpression |
super::RepeatVec |
super::FieldSized(_) |
/// Go from a mut raw pointer to a const raw pointer.
MutToConstPointer,
- /// Dereference once, producing an lvalue.
+ /// Dereference once, producing a place.
Deref(Option<OverloadedDeref<'tcx>>),
/// Take the address and produce either a `&` or `*` pointer.
}
tcx.layout_depth.set(depth+1);
- let layout = LayoutDetails::compute_uncached(tcx, param_env, ty);
+ let cx = LayoutCx { tcx, param_env };
+ let layout = cx.layout_raw_uncached(ty);
tcx.layout_depth.set(depth);
layout
};
}
-impl<'a, 'tcx> LayoutDetails {
- fn compute_uncached(tcx: TyCtxt<'a, 'tcx, 'tcx>,
- param_env: ty::ParamEnv<'tcx>,
- ty: Ty<'tcx>)
- -> Result<&'tcx Self, LayoutError<'tcx>> {
- let cx = (tcx, param_env);
- let dl = cx.data_layout();
+#[derive(Copy, Clone)]
+pub struct LayoutCx<'tcx, C> {
+ pub tcx: C,
+ pub param_env: ty::ParamEnv<'tcx>
+}
+
+impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
+ fn layout_raw_uncached(self, ty: Ty<'tcx>)
+ -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> {
+ let tcx = self.tcx;
+ let param_env = self.param_env;
+ let dl = self.data_layout();
let scalar_unit = |value: Primitive| {
let bits = value.size(dl).bits();
assert!(bits <= 128);
}
};
let scalar = |value: Primitive| {
- tcx.intern_layout(LayoutDetails::scalar(cx, scalar_unit(value)))
+ tcx.intern_layout(LayoutDetails::scalar(self, scalar_unit(value)))
};
let scalar_pair = |a: Scalar, b: Scalar| {
let align = a.value.align(dl).max(b.value.align(dl)).max(dl.aggregate_align);
Ok(match ty.sty {
// Basic scalars.
ty::TyBool => {
- tcx.intern_layout(LayoutDetails::scalar(cx, Scalar {
+ tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
value: Int(I8, false),
valid_range: 0..=1
}))
}
ty::TyChar => {
- tcx.intern_layout(LayoutDetails::scalar(cx, Scalar {
+ tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
value: Int(I32, false),
valid_range: 0..=0x10FFFF
}))
ty::TyFnPtr(_) => {
let mut ptr = scalar_unit(Pointer);
ptr.valid_range.start = 1;
- tcx.intern_layout(LayoutDetails::scalar(cx, ptr))
+ tcx.intern_layout(LayoutDetails::scalar(self, ptr))
}
// The never type.
let pointee = tcx.normalize_associated_type_in_env(&pointee, param_env);
if pointee.is_sized(tcx, param_env, DUMMY_SP) {
- return Ok(tcx.intern_layout(LayoutDetails::scalar(cx, data_ptr)));
+ return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
}
let unsized_part = tcx.struct_tail(pointee);
let metadata = match unsized_part.sty {
ty::TyForeign(..) => {
- return Ok(tcx.intern_layout(LayoutDetails::scalar(cx, data_ptr)));
+ return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
}
ty::TySlice(_) | ty::TyStr => {
scalar_unit(Int(dl.ptr_sized_integer(), false))
}
}
- let element = cx.layout_of(element)?;
+ let element = self.layout_of(element)?;
let count = count.val.to_const_int().unwrap().to_u64().unwrap();
let size = element.size.checked_mul(count, dl)
.ok_or(LayoutError::SizeOverflow(ty))?;
})
}
ty::TySlice(element) => {
- let element = cx.layout_of(element)?;
+ let element = self.layout_of(element)?;
tcx.intern_layout(LayoutDetails {
variants: Variants::Single { index: 0 },
fields: FieldPlacement::Array {
// Tuples, generators and closures.
ty::TyGenerator(def_id, ref substs, _) => {
let tys = substs.field_tys(def_id, tcx);
- univariant(&tys.map(|ty| cx.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
+ univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
&ReprOptions::default(),
StructKind::AlwaysSized)?
}
ty::TyClosure(def_id, ref substs) => {
let tys = substs.upvar_tys(def_id, tcx);
- univariant(&tys.map(|ty| cx.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
+ univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
&ReprOptions::default(),
StructKind::AlwaysSized)?
}
StructKind::MaybeUnsized
};
- univariant(&tys.iter().map(|ty| cx.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
+ univariant(&tys.iter().map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
&ReprOptions::default(), kind)?
}
// SIMD vector types.
ty::TyAdt(def, ..) if def.repr.simd() => {
- let element = cx.layout_of(ty.simd_type(tcx))?;
+ let element = self.layout_of(ty.simd_type(tcx))?;
let count = ty.simd_size(tcx) as u64;
assert!(count > 0);
let scalar = match element.abi {
// Cache the field layouts.
let variants = def.variants.iter().map(|v| {
v.fields.iter().map(|field| {
- cx.layout_of(field.ty(tcx, substs))
+ self.layout_of(field.ty(tcx, substs))
}).collect::<Result<Vec<_>, _>>()
}).collect::<Result<Vec<_>, _>>()?;
let mut st = univariant_uninterned(&variants[v], &def.repr, kind)?;
st.variants = Variants::Single { index: v };
// Exclude 0 from the range of a newtype ABI NonZero<T>.
- if Some(def.did) == cx.tcx().lang_items().non_zero() {
+ if Some(def.did) == self.tcx.lang_items().non_zero() {
match st.abi {
Abi::Scalar(ref mut scalar) |
Abi::ScalarPair(ref mut scalar, _) => {
let count = (niche_variants.end - niche_variants.start + 1) as u128;
for (field_index, field) in variants[i].iter().enumerate() {
let (offset, niche, niche_start) =
- match field.find_niche(cx, count)? {
+ match field.find_niche(self, count)? {
Some(niche) => niche,
None => continue
};
/// This is invoked by the `layout_raw` query to record the final
/// layout of each type.
#[inline]
- fn record_layout_for_printing(tcx: TyCtxt<'a, 'tcx, 'tcx>,
- ty: Ty<'tcx>,
- param_env: ty::ParamEnv<'tcx>,
- layout: TyLayout<'tcx>) {
+ fn record_layout_for_printing(self, layout: TyLayout<'tcx>) {
// If we are running with `-Zprint-type-sizes`, record layouts for
// dumping later. Ignore layouts that are done with non-empty
// environments or non-monomorphic layouts, as the user only wants
// to see the stuff resulting from the final trans session.
if
- !tcx.sess.opts.debugging_opts.print_type_sizes ||
- ty.has_param_types() ||
- ty.has_self_ty() ||
- !param_env.caller_bounds.is_empty()
+ !self.tcx.sess.opts.debugging_opts.print_type_sizes ||
+ layout.ty.has_param_types() ||
+ layout.ty.has_self_ty() ||
+ !self.param_env.caller_bounds.is_empty()
{
return;
}
- Self::record_layout_for_printing_outlined(tcx, ty, param_env, layout)
+ self.record_layout_for_printing_outlined(layout)
}
- fn record_layout_for_printing_outlined(tcx: TyCtxt<'a, 'tcx, 'tcx>,
- ty: Ty<'tcx>,
- param_env: ty::ParamEnv<'tcx>,
- layout: TyLayout<'tcx>) {
- let cx = (tcx, param_env);
+ fn record_layout_for_printing_outlined(self, layout: TyLayout<'tcx>) {
// (delay format until we actually need it)
let record = |kind, opt_discr_size, variants| {
- let type_desc = format!("{:?}", ty);
- tcx.sess.code_stats.borrow_mut().record_type_size(kind,
- type_desc,
- layout.align,
- layout.size,
- opt_discr_size,
- variants);
+ let type_desc = format!("{:?}", layout.ty);
+ self.tcx.sess.code_stats.borrow_mut().record_type_size(kind,
+ type_desc,
+ layout.align,
+ layout.size,
+ opt_discr_size,
+ variants);
};
- let adt_def = match ty.sty {
+ let adt_def = match layout.ty.sty {
ty::TyAdt(ref adt_def, _) => {
- debug!("print-type-size t: `{:?}` process adt", ty);
+ debug!("print-type-size t: `{:?}` process adt", layout.ty);
adt_def
}
ty::TyClosure(..) => {
- debug!("print-type-size t: `{:?}` record closure", ty);
+ debug!("print-type-size t: `{:?}` record closure", layout.ty);
record(DataTypeKind::Closure, None, vec![]);
return;
}
_ => {
- debug!("print-type-size t: `{:?}` skip non-nominal", ty);
+ debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
return;
}
};
layout: TyLayout<'tcx>| {
let mut min_size = Size::from_bytes(0);
let field_info: Vec<_> = flds.iter().enumerate().map(|(i, &name)| {
- match layout.field(cx, i) {
+ match layout.field(self, i) {
Err(err) => {
bug!("no layout found for field {}: `{:?}`", name, err);
}
Variants::NicheFilling { .. } |
Variants::Tagged { .. } => {
debug!("print-type-size `{:#?}` adt general variants def {}",
- ty, adt_def.variants.len());
+ layout.ty, adt_def.variants.len());
let variant_infos: Vec<_> =
adt_def.variants.iter().enumerate().map(|(i, variant_def)| {
let fields: Vec<_> =
variant_def.fields.iter().map(|f| f.name).collect();
build_variant_info(Some(variant_def.name),
&fields,
- layout.for_variant(cx, i))
+ layout.for_variant(self, i))
})
.collect();
record(adt_kind.into(), match layout.variants {
- Variants::Tagged { ref discr, .. } => Some(discr.value.size(tcx)),
+ Variants::Tagged { ref discr, .. } => Some(discr.value.size(self)),
_ => None
}, variant_infos);
}
assert!(!ty.has_infer_types());
// First try computing a static layout.
- let err = match (tcx, param_env).layout_of(ty) {
+ let err = match tcx.layout_of(param_env.and(ty)) {
Ok(layout) => {
return Ok(SizeSkeleton::Known(layout.size));
}
}
}
-impl<'a, 'gcx, 'tcx, T: Copy> HasDataLayout for (TyCtxt<'a, 'gcx, 'tcx>, T) {
+impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
fn data_layout(&self) -> &TargetDataLayout {
- self.0.data_layout()
+ self.tcx.data_layout()
}
}
-impl<'a, 'gcx, 'tcx, T: Copy> HasTyCtxt<'gcx> for (TyCtxt<'a, 'gcx, 'tcx>, T) {
+impl<'gcx, 'tcx, T: HasTyCtxt<'gcx>> HasTyCtxt<'gcx> for LayoutCx<'tcx, T> {
fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
- self.0.tcx()
+ self.tcx.tcx()
}
}
fn layout_of(self, ty: T) -> Self::TyLayout;
}
-impl<'a, 'tcx> LayoutOf<Ty<'tcx>> for (TyCtxt<'a, 'tcx, 'tcx>, ty::ParamEnv<'tcx>) {
+impl<'a, 'tcx> LayoutOf<Ty<'tcx>> for LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
/// Computes the layout of a type. Note that this implicitly
/// executes in "reveal all" mode.
- #[inline]
fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
- let (tcx, param_env) = self;
-
- let ty = tcx.normalize_associated_type_in_env(&ty, param_env.reveal_all());
- let details = tcx.layout_raw(param_env.reveal_all().and(ty))?;
+ let param_env = self.param_env.reveal_all();
+ let ty = self.tcx.normalize_associated_type_in_env(&ty, param_env);
+ let details = self.tcx.layout_raw(param_env.and(ty))?;
let layout = TyLayout {
ty,
details
// completed, to avoid problems around recursive structures
// and the like. (Admitedly, I wasn't able to reproduce a problem
// here, but it seems like the right thing to do. -nmatsakis)
- LayoutDetails::record_layout_for_printing(tcx, ty, param_env, layout);
+ self.record_layout_for_printing(layout);
Ok(layout)
}
}
-impl<'a, 'tcx> LayoutOf<Ty<'tcx>> for (ty::maps::TyCtxtAt<'a, 'tcx, 'tcx>,
- ty::ParamEnv<'tcx>) {
+impl<'a, 'tcx> LayoutOf<Ty<'tcx>> for LayoutCx<'tcx, ty::maps::TyCtxtAt<'a, 'tcx, 'tcx>> {
type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
/// Computes the layout of a type. Note that this implicitly
/// executes in "reveal all" mode.
- #[inline]
fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
- let (tcx_at, param_env) = self;
-
- let ty = tcx_at.tcx.normalize_associated_type_in_env(&ty, param_env.reveal_all());
- let details = tcx_at.layout_raw(param_env.reveal_all().and(ty))?;
+ let param_env = self.param_env.reveal_all();
+ let ty = self.tcx.normalize_associated_type_in_env(&ty, param_env.reveal_all());
+ let details = self.tcx.layout_raw(param_env.reveal_all().and(ty))?;
let layout = TyLayout {
ty,
details
// completed, to avoid problems around recursive structures
// and the like. (Admitedly, I wasn't able to reproduce a problem
// here, but it seems like the right thing to do. -nmatsakis)
- LayoutDetails::record_layout_for_printing(tcx_at.tcx, ty, param_env, layout);
+ let cx = LayoutCx {
+ tcx: *self.tcx,
+ param_env: self.param_env
+ };
+ cx.record_layout_for_printing(layout);
Ok(layout)
}
}
+// Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users.
+impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> {
+ /// Computes the layout of a type. Note that this implicitly
+ /// executes in "reveal all" mode.
+ #[inline]
+ pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
+ -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
+ let cx = LayoutCx {
+ tcx: self,
+ param_env: param_env_and_ty.param_env
+ };
+ cx.layout_of(param_env_and_ty.value)
+ }
+}
+
+impl<'a, 'tcx> ty::maps::TyCtxtAt<'a, 'tcx, 'tcx> {
+ /// Computes the layout of a type. Note that this implicitly
+ /// executes in "reveal all" mode.
+ #[inline]
+ pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
+ -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
+ let cx = LayoutCx {
+ tcx: self,
+ param_env: param_env_and_ty.param_env
+ };
+ cx.layout_of(param_env_and_ty.value)
+ }
+}
+
impl<'a, 'tcx> TyLayout<'tcx> {
pub fn for_variant<C>(&self, cx: C, variant_index: usize) -> Self
where C: LayoutOf<Ty<'tcx>> + HasTyCtxt<'tcx>,
-> (Arc<DefIdSet>, Arc<Vec<Arc<CodegenUnit<'tcx>>>>),
[] fn export_name: ExportName(DefId) -> Option<Symbol>,
[] fn contains_extern_indicator: ContainsExternIndicator(DefId) -> bool,
+ [] fn symbol_export_level: GetSymbolExportLevel(DefId) -> SymbolExportLevel,
[] fn is_translated_function: IsTranslatedFunction(DefId) -> bool,
[] fn codegen_unit: CodegenUnit(InternedString) -> Arc<CodegenUnit<'tcx>>,
[] fn compile_codegen_unit: CompileCodegenUnit(InternedString) -> Stats,
DepKind::TargetFeaturesWhitelist => { force!(target_features_whitelist, LOCAL_CRATE); }
DepKind::TargetFeaturesEnabled => { force!(target_features_enabled, def_id!()); }
+
+ DepKind::GetSymbolExportLevel => { force!(symbol_export_level, def_id!()); }
}
true
pub use self::AssociatedItemContainer::*;
pub use self::BorrowKind::*;
pub use self::IntVarValue::*;
-pub use self::LvaluePreference::*;
pub use self::fold::TypeFoldable;
use hir::{map as hir_map, FreevarMap, TraitMap};
}
}
-#[derive(Copy, Clone, Debug, PartialEq, Eq)]
-pub enum LvaluePreference {
- PreferMutLvalue,
- NoPreference
-}
-
-impl LvaluePreference {
- pub fn from_mutbl(m: hir::Mutability) -> Self {
- match m {
- hir::MutMutable => PreferMutLvalue,
- hir::MutImmutable => NoPreference,
- }
- }
-}
-
impl BorrowKind {
pub fn from_mutbl(m: hir::Mutability) -> BorrowKind {
match m {
}
}
- pub fn expr_is_lval(self, expr: &hir::Expr) -> bool {
- match expr.node {
- hir::ExprPath(hir::QPath::Resolved(_, ref path)) => {
- match path.def {
- Def::Local(..) | Def::Upvar(..) | Def::Static(..) | Def::Err => true,
- _ => false,
- }
- }
-
- hir::ExprType(ref e, _) => {
- self.expr_is_lval(e)
- }
-
- hir::ExprUnary(hir::UnDeref, _) |
- hir::ExprField(..) |
- hir::ExprTupField(..) |
- hir::ExprIndex(..) => {
- true
- }
-
- // Partially qualified paths in expressions can only legally
- // refer to associated items which are always rvalues.
- hir::ExprPath(hir::QPath::TypeRelative(..)) |
-
- hir::ExprCall(..) |
- hir::ExprMethodCall(..) |
- hir::ExprStruct(..) |
- hir::ExprTup(..) |
- hir::ExprIf(..) |
- hir::ExprMatch(..) |
- hir::ExprClosure(..) |
- hir::ExprBlock(..) |
- hir::ExprRepeat(..) |
- hir::ExprArray(..) |
- hir::ExprBreak(..) |
- hir::ExprAgain(..) |
- hir::ExprRet(..) |
- hir::ExprWhile(..) |
- hir::ExprLoop(..) |
- hir::ExprAssign(..) |
- hir::ExprInlineAsm(..) |
- hir::ExprAssignOp(..) |
- hir::ExprLit(_) |
- hir::ExprUnary(..) |
- hir::ExprBox(..) |
- hir::ExprAddrOf(..) |
- hir::ExprBinary(..) |
- hir::ExprYield(..) |
- hir::ExprCast(..) => {
- false
- }
- }
- }
-
pub fn provided_trait_methods(self, id: DefId) -> Vec<AssociatedItem> {
self.associated_items(id)
.filter(|item| item.kind == AssociatedKind::Method && item.defaultness.has_value())
state.map(move |d| d.ty.subst(tcx, self.substs))
}
+ /// This is the types of the fields of a generate which
+ /// is available before the generator transformation.
+ /// It includes the upvars and the state discriminant which is u32.
+ pub fn pre_transforms_tys(self, def_id: DefId, tcx: TyCtxt<'a, 'gcx, 'tcx>) ->
+ impl Iterator<Item=Ty<'tcx>> + 'a
+ {
+ self.upvar_tys(def_id, tcx).chain(iter::once(tcx.types.u32))
+ }
+
/// This is the types of all the fields stored in a generator.
/// It includes the upvars, state types and the state discriminant which is u32.
pub fn field_tys(self, def_id: DefId, tcx: TyCtxt<'a, 'gcx, 'tcx>) ->
impl Iterator<Item=Ty<'tcx>> + 'a
{
- let upvars = self.upvar_tys(def_id, tcx);
- let state = self.state_tys(def_id, tcx);
- upvars.chain(iter::once(tcx.types.u32)).chain(state)
+ self.pre_transforms_tys(def_id, tcx).chain(self.state_tys(def_id, tcx))
}
}
///
/// The parameter `explicit` indicates if this is an *explicit* dereference.
/// Some types---notably unsafe ptrs---can only be dereferenced explicitly.
- pub fn builtin_deref(&self, explicit: bool, pref: ty::LvaluePreference)
- -> Option<TypeAndMut<'tcx>>
- {
+ pub fn builtin_deref(&self, explicit: bool) -> Option<TypeAndMut<'tcx>> {
match self.sty {
TyAdt(def, _) if def.is_box() => {
Some(TypeAndMut {
ty: self.boxed_ty(),
- mutbl: if pref == ty::PreferMutLvalue {
- hir::MutMutable
- } else {
- hir::MutImmutable
- },
+ mutbl: hir::MutImmutable,
})
},
TyRef(_, mt) => Some(mt),
max_atomic_width: Some(32),
post_link_args: args,
target_family: Some("unix".to_string()),
+ codegen_backend: "emscripten".to_string(),
.. Default::default()
};
Ok(Target {
// except according to those terms.
use LinkerFlavor;
-use target::{Target, TargetOptions, TargetResult};
+use target::{Target, TargetResult};
pub fn target() -> TargetResult {
+ let mut base = super::linux_musl_base::opts();
+ base.cpu = "mips32r2".to_string();
+ base.features = "+mips32r2,+soft-float".to_string();
+ base.max_atomic_width = Some(32);
+ // see #36994
+ base.exe_allocation_crate = None;
+ base.crt_static_default = false;
Ok(Target {
llvm_target: "mips-unknown-linux-musl".to_string(),
target_endian: "big".to_string(),
target_env: "musl".to_string(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
- options: TargetOptions {
- cpu: "mips32r2".to_string(),
- features: "+mips32r2,+soft-float".to_string(),
- max_atomic_width: Some(32),
-
- // see #36994
- exe_allocation_crate: None,
-
- ..super::linux_base::opts()
- }
+ options: base,
})
}
// except according to those terms.
use LinkerFlavor;
-use target::{Target, TargetOptions, TargetResult};
+use target::{Target, TargetResult};
pub fn target() -> TargetResult {
+ let mut base = super::linux_musl_base::opts();
+ base.cpu = "mips32".to_string();
+ base.features = "+mips32,+soft-float".to_string();
+ base.max_atomic_width = Some(32);
+ // see #36994
+ base.exe_allocation_crate = None;
+ base.crt_static_default = false;
Ok(Target {
llvm_target: "mipsel-unknown-linux-musl".to_string(),
target_endian: "little".to_string(),
target_env: "musl".to_string(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
- options: TargetOptions {
- cpu: "mips32".to_string(),
- features: "+mips32,+soft-float".to_string(),
- max_atomic_width: Some(32),
-
- // see #36994
- exe_allocation_crate: None,
-
- ..super::linux_base::opts()
- }
+ options: base,
})
}
/// Whether to lower 128-bit operations to compiler_builtins calls. Use if
/// your backend only supports 64-bit and smaller math.
pub i128_lowering: bool,
+
+ /// The codegen backend to use for this target, typically "llvm"
+ pub codegen_backend: String,
+
+ /// The default visibility for symbols in this target should be "hidden"
+ /// rather than "default"
+ pub default_hidden_visibility: bool,
}
impl Default for TargetOptions {
singlethread: false,
no_builtins: false,
i128_lowering: false,
+ codegen_backend: "llvm".to_string(),
+ default_hidden_visibility: false,
}
}
}
key!(requires_lto, bool);
key!(singlethread, bool);
key!(no_builtins, bool);
+ key!(codegen_backend);
+ key!(default_hidden_visibility, bool);
if let Some(array) = obj.find("abi-blacklist").and_then(Json::as_array) {
for name in array.iter().filter_map(|abi| abi.as_string()) {
target_option_val!(requires_lto);
target_option_val!(singlethread);
target_option_val!(no_builtins);
+ target_option_val!(codegen_backend);
+ target_option_val!(default_hidden_visibility);
if default.abi_blacklist != self.options.abi_blacklist {
d.insert("abi-blacklist".to_string(), self.options.abi_blacklist.iter()
// don't want to invoke that many gcc instances.
default_codegen_units: Some(1),
+ // Since MSP430 doesn't meaningfully support faulting on illegal
+ // instructions, LLVM generates a call to abort() function instead
+ // of a trap instruction. Such calls are 4 bytes long, and that is
+ // too much overhead for such small target.
+ trap_unreachable: false,
+
.. Default::default( )
}
})
max_atomic_width: Some(32),
post_link_args,
target_family: Some("unix".to_string()),
+ codegen_backend: "emscripten".to_string(),
.. Default::default()
};
Ok(Target {
// performing LTO with compiler-builtins.
no_builtins: true,
+ // no dynamic linking, no need for default visibility!
+ default_hidden_visibility: true,
+
.. Default::default()
};
Ok(Target {
# Formal model
Throughout the docs we'll consider a simple subset of Rust in which
-you can only borrow from lvalues, defined like so:
+you can only borrow from places, defined like so:
```text
-LV = x | LV.f | *LV
+P = x | P.f | *P
```
-Here `x` represents some variable, `LV.f` is a field reference,
-and `*LV` is a pointer dereference. There is no auto-deref or other
+Here `x` represents some variable, `P.f` is a field reference,
+and `*P` is a pointer dereference. There is no auto-deref or other
niceties. This means that if you have a type like:
```rust
```
and a variable `a: Box<S>`, then the rust expression `a.f` would correspond
-to an `LV` of `(*a).f`.
+to an `P` of `(*a).f`.
Here is the formal grammar for the types we'll consider:
#### Loans and restrictions
The way the borrow checker works is that it analyzes each borrow
-expression (in our simple model, that's stuff like `&LV`, though in
+expression (in our simple model, that's stuff like `&P`, though in
real life there are a few other cases to consider). For each borrow
expression, it computes a `Loan`, which is a data structure that
records (1) the value being borrowed, (2) the mutability and scope of
follows:
```text
-LOAN = (LV, LT, MQ, RESTRICTION*)
-RESTRICTION = (LV, ACTION*)
+LOAN = (P, LT, MQ, RESTRICTION*)
+RESTRICTION = (P, ACTION*)
ACTION = MUTATE | CLAIM | FREEZE
```
-Here the `LOAN` tuple defines the lvalue `LV` being borrowed; the
+Here the `LOAN` tuple defines the place `P` being borrowed; the
lifetime `LT` of that borrow; the mutability `MQ` of the borrow; and a
list of restrictions. The restrictions indicate actions which, if
taken, could invalidate the loan and lead to type safety violations.
-Each `RESTRICTION` is a pair of a restrictive lvalue `LV` (which will
+Each `RESTRICTION` is a pair of a restrictive place `P` (which will
either be the path that was borrowed or some prefix of the path that
was borrowed) and a set of restricted actions. There are three kinds
-of actions that may be restricted for the path `LV`:
+of actions that may be restricted for the path `P`:
-- `MUTATE` means that `LV` cannot be assigned to;
-- `CLAIM` means that the `LV` cannot be borrowed mutably;
-- `FREEZE` means that the `LV` cannot be borrowed immutably;
+- `MUTATE` means that `P` cannot be assigned to;
+- `CLAIM` means that the `P` cannot be borrowed mutably;
+- `FREEZE` means that the `P` cannot be borrowed immutably;
-Finally, it is never possible to move from an lvalue that appears in a
-restriction. This implies that the "empty restriction" `(LV, [])`,
+Finally, it is never possible to move from a place that appears in a
+restriction. This implies that the "empty restriction" `(P, [])`,
which contains an empty set of actions, still has a purpose---it
-prevents moves from `LV`. I chose not to make `MOVE` a fourth kind of
+prevents moves from `P`. I chose not to make `MOVE` a fourth kind of
action because that would imply that sometimes moves are permitted
from restricted values, which is not the case.
We start with the `gather_loans` pass, which walks the AST looking for
borrows. For each borrow, there are three bits of information: the
-lvalue `LV` being borrowed and the mutability `MQ` and lifetime `LT`
+place `P` being borrowed and the mutability `MQ` and lifetime `LT`
of the resulting pointer. Given those, `gather_loans` applies four
validity tests:
-1. `MUTABILITY(LV, MQ)`: The mutability of the reference is
-compatible with the mutability of `LV` (i.e., not borrowing immutable
+1. `MUTABILITY(P, MQ)`: The mutability of the reference is
+compatible with the mutability of `P` (i.e., not borrowing immutable
data as mutable).
-2. `ALIASABLE(LV, MQ)`: The aliasability of the reference is
-compatible with the aliasability of `LV`. The goal is to prevent
+2. `ALIASABLE(P, MQ)`: The aliasability of the reference is
+compatible with the aliasability of `P`. The goal is to prevent
`&mut` borrows of aliasability data.
-3. `LIFETIME(LV, LT, MQ)`: The lifetime of the borrow does not exceed
+3. `LIFETIME(P, LT, MQ)`: The lifetime of the borrow does not exceed
the lifetime of the value being borrowed.
-4. `RESTRICTIONS(LV, LT, ACTIONS) = RS`: This pass checks and computes the
+4. `RESTRICTIONS(P, LT, ACTIONS) = RS`: This pass checks and computes the
restrictions to maintain memory safety. These are the restrictions
that will go into the final loan. We'll discuss in more detail below.
Checking mutability is fairly straightforward. We just want to prevent
immutable data from being borrowed as mutable. Note that it is ok to borrow
mutable data as immutable, since that is simply a freeze. The judgement
-`MUTABILITY(LV, MQ)` means the mutability of `LV` is compatible with a borrow
+`MUTABILITY(P, MQ)` means the mutability of `P` is compatible with a borrow
of mutability `MQ`. The Rust code corresponding to this predicate is the
function `check_mutability` in `middle::borrowck::gather_loans`.
Fields and boxes inherit their mutability from
their base expressions, so both of their rules basically
-delegate the check to the base expression `LV`:
+delegate the check to the base expression `P`:
```text
-MUTABILITY(LV.f, MQ) // M-Field
- MUTABILITY(LV, MQ)
+MUTABILITY(P.f, MQ) // M-Field
+ MUTABILITY(P, MQ)
-MUTABILITY(*LV, MQ) // M-Deref-Unique
- TYPE(LV) = Box<Ty>
- MUTABILITY(LV, MQ)
+MUTABILITY(*P, MQ) // M-Deref-Unique
+ TYPE(P) = Box<Ty>
+ MUTABILITY(P, MQ)
```
### Checking mutability of immutable pointer types
be borrowed if MQ is immutable:
```text
-MUTABILITY(*LV, imm) // M-Deref-Borrowed-Imm
- TYPE(LV) = &Ty
+MUTABILITY(*P, imm) // M-Deref-Borrowed-Imm
+ TYPE(P) = &Ty
```
### Checking mutability of mutable pointer types
`&mut T` can be frozen, so it is acceptable to borrow it as either imm or mut:
```text
-MUTABILITY(*LV, MQ) // M-Deref-Borrowed-Mut
- TYPE(LV) = &mut Ty
+MUTABILITY(*P, MQ) // M-Deref-Borrowed-Mut
+ TYPE(P) = &mut Ty
```
## Checking aliasability
The goal of the aliasability check is to ensure that we never permit `&mut`
-borrows of aliasable data. The judgement `ALIASABLE(LV, MQ)` means the
-aliasability of `LV` is compatible with a borrow of mutability `MQ`. The Rust
+borrows of aliasable data. The judgement `ALIASABLE(P, MQ)` means the
+aliasability of `P` is compatible with a borrow of mutability `MQ`. The Rust
code corresponding to this predicate is the function `check_aliasability()` in
`middle::borrowck::gather_loans`.
Owned content is aliasable if it is found in an aliasable location:
```text
-ALIASABLE(LV.f, MQ) // M-Field
- ALIASABLE(LV, MQ)
+ALIASABLE(P.f, MQ) // M-Field
+ ALIASABLE(P, MQ)
-ALIASABLE(*LV, MQ) // M-Deref-Unique
- ALIASABLE(LV, MQ)
+ALIASABLE(*P, MQ) // M-Deref-Unique
+ ALIASABLE(P, MQ)
```
### Checking aliasability of immutable pointer types
borrowed immutably:
```text
-ALIASABLE(*LV, imm) // M-Deref-Borrowed-Imm
- TYPE(LV) = &Ty
+ALIASABLE(*P, imm) // M-Deref-Borrowed-Imm
+ TYPE(P) = &Ty
```
### Checking aliasability of mutable pointer types
`&mut T` can be frozen, so it is acceptable to borrow it as either imm or mut:
```text
-ALIASABLE(*LV, MQ) // M-Deref-Borrowed-Mut
- TYPE(LV) = &mut Ty
+ALIASABLE(*P, MQ) // M-Deref-Borrowed-Mut
+ TYPE(P) = &mut Ty
```
## Checking lifetime
These rules aim to ensure that no data is borrowed for a scope that exceeds
its lifetime. These two computations wind up being intimately related.
-Formally, we define a predicate `LIFETIME(LV, LT, MQ)`, which states that
-"the lvalue `LV` can be safely borrowed for the lifetime `LT` with mutability
+Formally, we define a predicate `LIFETIME(P, LT, MQ)`, which states that
+"the place `P` can be safely borrowed for the lifetime `LT` with mutability
`MQ`". The Rust code corresponding to this predicate is the module
`middle::borrowck::gather_loans::lifetime`.
of its owner:
```text
-LIFETIME(LV.f, LT, MQ) // L-Field
- LIFETIME(LV, LT, MQ)
+LIFETIME(P.f, LT, MQ) // L-Field
+ LIFETIME(P, LT, MQ)
-LIFETIME(*LV, LT, MQ) // L-Deref-Send
- TYPE(LV) = Box<Ty>
- LIFETIME(LV, LT, MQ)
+LIFETIME(*P, LT, MQ) // L-Deref-Send
+ TYPE(P) = Box<Ty>
+ LIFETIME(P, LT, MQ)
```
### Checking lifetime for derefs of references
itself:
```text
-LIFETIME(*LV, LT, MQ) // L-Deref-Borrowed
- TYPE(LV) = <' Ty OR <' mut Ty
+LIFETIME(*P, LT, MQ) // L-Deref-Borrowed
+ TYPE(P) = <' Ty OR <' mut Ty
LT <= LT'
```
The final rules govern the computation of *restrictions*, meaning that
we compute the set of actions that will be illegal for the life of the
-loan. The predicate is written `RESTRICTIONS(LV, LT, ACTIONS) =
+loan. The predicate is written `RESTRICTIONS(P, LT, ACTIONS) =
RESTRICTION*`, which can be read "in order to prevent `ACTIONS` from
-occurring on `LV`, the restrictions `RESTRICTION*` must be respected
+occurring on `P`, the restrictions `RESTRICTION*` must be respected
for the lifetime of the loan".
Note that there is an initial set of restrictions: these restrictions
are computed based on the kind of borrow:
```text
-&mut LV => RESTRICTIONS(LV, LT, MUTATE|CLAIM|FREEZE)
-&LV => RESTRICTIONS(LV, LT, MUTATE|CLAIM)
+&mut P => RESTRICTIONS(P, LT, MUTATE|CLAIM|FREEZE)
+&P => RESTRICTIONS(P, LT, MUTATE|CLAIM)
```
The reasoning here is that a mutable borrow must be the only writer,
field:
```text
-RESTRICTIONS(LV.f, LT, ACTIONS) = RS, (LV.f, ACTIONS) // R-Field
- RESTRICTIONS(LV, LT, ACTIONS) = RS
+RESTRICTIONS(P.f, LT, ACTIONS) = RS, (P.f, ACTIONS) // R-Field
+ RESTRICTIONS(P, LT, ACTIONS) = RS
```
The reasoning here is as follows. If the field must not be mutated,
Because the mutability of owned referents is inherited, restricting an
owned referent is similar to restricting a field, in that it implies
restrictions on the pointer. However, boxes have an important
-twist: if the owner `LV` is mutated, that causes the owned referent
-`*LV` to be freed! So whenever an owned referent `*LV` is borrowed, we
-must prevent the box `LV` from being mutated, which means
+twist: if the owner `P` is mutated, that causes the owned referent
+`*P` to be freed! So whenever an owned referent `*P` is borrowed, we
+must prevent the box `P` from being mutated, which means
that we always add `MUTATE` and `CLAIM` to the restriction set imposed
-on `LV`:
+on `P`:
```text
-RESTRICTIONS(*LV, LT, ACTIONS) = RS, (*LV, ACTIONS) // R-Deref-Send-Pointer
- TYPE(LV) = Box<Ty>
- RESTRICTIONS(LV, LT, ACTIONS|MUTATE|CLAIM) = RS
+RESTRICTIONS(*P, LT, ACTIONS) = RS, (*P, ACTIONS) // R-Deref-Send-Pointer
+ TYPE(P) = Box<Ty>
+ RESTRICTIONS(P, LT, ACTIONS|MUTATE|CLAIM) = RS
```
### Restrictions for loans of immutable borrowed referents
Immutable borrowed referents are freely aliasable, meaning that
the compiler does not prevent you from copying the pointer. This
implies that issuing restrictions is useless. We might prevent the
-user from acting on `*LV` itself, but there could be another path
-`*LV1` that refers to the exact same memory, and we would not be
+user from acting on `*P` itself, but there could be another path
+`*P1` that refers to the exact same memory, and we would not be
restricting that path. Therefore, the rule for `&Ty` pointers
always returns an empty set of restrictions, and it only permits
restricting `MUTATE` and `CLAIM` actions:
```text
-RESTRICTIONS(*LV, LT, ACTIONS) = [] // R-Deref-Imm-Borrowed
- TYPE(LV) = <' Ty
+RESTRICTIONS(*P, LT, ACTIONS) = [] // R-Deref-Imm-Borrowed
+ TYPE(P) = <' Ty
LT <= LT' // (1)
ACTIONS subset of [MUTATE, CLAIM]
```
(`*point : &'b Point`) is enough to guarantee the memory is immutable
and valid for the lifetime `'b`. This is reflected in
`RESTRICTIONS()` by the fact that we do not recurse (i.e., we impose
-no restrictions on `LV`, which in this particular case is the pointer
+no restrictions on `P`, which in this particular case is the pointer
`point : &'a &'b Point`).
#### Why both `LIFETIME()` and `RESTRICTIONS()`?
The rule for mutable borrowed pointers is as follows:
```text
-RESTRICTIONS(*LV, LT, ACTIONS) = RS, (*LV, ACTIONS) // R-Deref-Mut-Borrowed
- TYPE(LV) = <' mut Ty
+RESTRICTIONS(*P, LT, ACTIONS) = RS, (*P, ACTIONS) // R-Deref-Mut-Borrowed
+ TYPE(P) = <' mut Ty
LT <= LT' // (1)
- RESTRICTIONS(LV, LT, ACTIONS) = RS // (2)
+ RESTRICTIONS(P, LT, ACTIONS) = RS // (2)
```
Let's examine the two numbered clauses:
Remember that `&mut` pointers are linear, and hence `let t1 = t0` is a
move of `t0` -- or would be, if it were legal. Instead, we get an
-error, because clause (2) imposes restrictions on `LV` (`t0`, here),
+error, because clause (2) imposes restrictions on `P` (`t0`, here),
and any restrictions on a path make it impossible to move from that
path.
The `MovePath` tree tracks every path that is moved or assigned to.
These paths have the same form as the `LoanPath` data structure, which
-in turn is the "real world version of the lvalues `LV` that we
+in turn is the "real world version of the places `P` that we
introduced earlier. The difference between a `MovePath` and a `LoanPath`
is that move paths are:
While writing up these docs, I encountered some rules I believe to be
stricter than necessary:
-- I think restricting the `&mut` LV against moves and `ALIAS` is sufficient,
+- I think restricting the `&mut` P against moves and `ALIAS` is sufficient,
`MUTATE` and `CLAIM` are overkill. `MUTATE` was necessary when swap was
a built-in operator, but as it is not, it is implied by `CLAIM`,
and `CLAIM` is implied by `ALIAS`. The only net effect of this is an
// 3. Where does old loan expire.
let previous_end_span =
- Some(old_loan.kill_scope.span(self.tcx(), &self.bccx.region_scope_tree)
- .end_point());
+ Some(self.tcx().sess.codemap().end_point(
+ old_loan.kill_scope.span(self.tcx(), &self.bccx.region_scope_tree)));
let mut err = match (new_loan.kind, old_loan.kind) {
(ty::MutBorrow, ty::MutBorrow) =>
fn scope(&self, cmt: &mc::cmt<'tcx>) -> ty::Region<'tcx> {
//! Returns the maximal region scope for the which the
- //! lvalue `cmt` is guaranteed to be valid without any
+ //! place `cmt` is guaranteed to be valid without any
//! rooting etc, and presuming `cmt` is not mutated.
match cmt.cat {
if !force_analysis && move_data.is_empty() && all_loans.is_empty() {
// large arrays of data inserted as constants can take a lot of
// time and memory to borrow-check - see issue #36799. However,
- // they don't have lvalues, so no borrow-check is actually needed.
+ // they don't have places, so no borrow-check is actually needed.
// Recognize that case and skip borrow-checking.
debug!("skipping loan propagation for {:?} because of no loans", body_id);
return None;
}
// This can be:
-// - a pointer dereference (`*LV` in README.md)
+// - a pointer dereference (`*P` in README.md)
// - a field reference, with an optional definition of the containing
-// enum variant (`LV.f` in README.md)
+// enum variant (`P.f` in README.md)
// `DefId` is present when the field is part of struct that is in
// a variant of an enum. For instance in:
// `enum E { X { foo: u32 }, Y { foo: u32 }}`
};
match cause {
- mc::AliasableStatic => {
- // This happens when we have an `&mut` or assignment to a
- // static. We should have already reported a mutability
- // violation first, but may have continued compiling.
- self.tcx.sess.delay_span_bug(
- span,
- &format!("aliasability violation for static `{}`", prefix)
- );
- return;
- }
mc::AliasableStaticMut => {
// This path cannot occur. `static mut X` is not checked
// for aliasability violations.
span_bug!(span, "aliasability violation for static mut `{}`", prefix)
}
- mc::AliasableBorrowed => {}
+ mc::AliasableStatic | mc::AliasableBorrowed => {}
};
let blame = cmt.immutability_blame();
let mut err = match blame {
fn region_end_span(&self, region: ty::Region<'tcx>) -> Option<Span> {
match *region {
ty::ReScope(scope) => {
- Some(scope.span(self.tcx, &self.region_scope_tree).end_point())
+ Some(self.tcx.sess.codemap().end_point(
+ scope.span(self.tcx, &self.region_scope_tree)))
}
_ => None
}
/// span of node where assignment occurs
pub span: Span,
- /// id for l-value expression on lhs of assignment
+ /// id for place expression on lhs of assignment
pub assignee_id: hir::ItemLocalId,
}
continue
}
- let mut_span = tcx.sess.codemap().span_until_char(ids[0].2, ' ');
+ let mut_span = tcx.sess.codemap().span_until_non_whitespace(ids[0].2);
// Ok, every name wasn't used mutably, so issue a warning that this
// didn't need to be mutable.
}
}
+
impl<'a, 'tcx> PatternContext<'a, 'tcx> {
fn report_inlining_errors(&self, pat_span: Span) {
for error in &self.errors {
match *error {
PatternError::StaticInPattern(span) => {
- span_err!(self.tcx.sess, span, E0158,
- "statics cannot be referenced in patterns");
+ self.span_e0158(span, "statics cannot be referenced in patterns")
+ }
+ PatternError::AssociatedConstInPattern(span) => {
+ self.span_e0158(span, "associated consts cannot be referenced in patterns")
}
PatternError::ConstEval(ref err) => {
err.report(self.tcx, pat_span, "pattern");
}
}
}
+
+ fn span_e0158(&self, span: Span, text: &str) {
+ span_err!(self.tcx.sess, span, E0158, "{}", text)
+ }
}
impl<'a, 'tcx> MatchVisitor<'a, 'tcx> {
use rustc::hir::def::{Def, CtorKind};
use rustc::hir::def_id::DefId;
use rustc::ty::{self, Ty, TyCtxt};
-use rustc::ty::layout::LayoutOf;
use rustc::ty::util::IntTypeExt;
use rustc::ty::subst::{Substs, Subst};
use rustc::util::common::ErrorReported;
if tcx.fn_sig(def_id).abi() == Abi::RustIntrinsic {
let layout_of = |ty: Ty<'tcx>| {
let ty = tcx.erase_regions(&ty);
- (tcx.at(e.span), cx.param_env).layout_of(ty).map_err(|err| {
+ tcx.at(e.span).layout_of(cx.param_env.and(ty)).map_err(|err| {
ConstEvalErr { span: e.span, kind: LayoutError(err) }
})
};
return Ok(mk_const(Integral(Usize(ConstUsize::new(align,
tcx.sess.target.usize_ty).unwrap()))));
}
+ "type_id" => {
+ let type_id = tcx.type_id_hash(substs.type_at(0));
+ return Ok(mk_const(Integral(U64(type_id))));
+ }
_ => signal!(e, TypeckError)
}
}
#[derive(Clone, Debug)]
pub enum PatternError<'tcx> {
+ AssociatedConstInPattern(Span),
StaticInPattern(Span),
ConstEval(ConstEvalErr<'tcx>),
}
-> Pattern<'tcx> {
let ty = self.tables.node_id_to_type(id);
let def = self.tables.qpath_def(qpath, id);
+ let is_associated_const = match def {
+ Def::AssociatedConst(_) => true,
+ _ => false,
+ };
let kind = match def {
Def::Const(def_id) | Def::AssociatedConst(def_id) => {
let substs = self.tables.node_substs(id);
return pat;
}
None => {
- self.errors.push(PatternError::StaticInPattern(span));
+ self.errors.push(if is_associated_const {
+ PatternError::AssociatedConstInPattern(span)
+ } else {
+ PatternError::StaticInPattern(span)
+ });
PatternKind::Wild
}
}
new_value != value
}
+ /// Returns true if the bit has changed.
+ #[inline]
+ pub fn remove(&mut self, bit: usize) -> bool {
+ let (word, mask) = word_mask(bit);
+ let data = &mut self.data[word];
+ let value = *data;
+ let new_value = value & !mask;
+ *data = new_value;
+ new_value != value
+ }
+
#[inline]
pub fn insert_all(&mut self, all: &BitVector) -> bool {
assert!(self.data.len() == all.data.len());
[dependencies]
arena = { path = "../libarena" }
graphviz = { path = "../libgraphviz" }
-log = { version = "0.4", features = ["release_max_level_info"] }
+log = "0.4"
env_logger = { version = "0.4", default-features = false }
rustc = { path = "../librustc" }
rustc_allocator = { path = "../librustc_allocator" }
rustc_privacy = { path = "../librustc_privacy" }
rustc_resolve = { path = "../librustc_resolve" }
rustc_save_analysis = { path = "../librustc_save_analysis" }
-rustc_trans = { path = "../librustc_trans", optional = true }
rustc_trans_utils = { path = "../librustc_trans_utils" }
rustc_typeck = { path = "../librustc_typeck" }
serialize = { path = "../libserialize" }
syntax_pos = { path = "../libsyntax_pos" }
ar = "0.3.0"
-
-[features]
-llvm = ["rustc_trans"]
extern crate rustc_mir;
extern crate rustc_resolve;
extern crate rustc_save_analysis;
-#[cfg(feature="llvm")]
-pub extern crate rustc_trans;
extern crate rustc_trans_utils;
extern crate rustc_typeck;
extern crate serialize;
use rustc::session::CompileIncomplete;
use rustc::session::config::{Input, PrintRequest, ErrorOutputType};
use rustc::session::config::nightly_options;
+use rustc::session::filesearch;
use rustc::session::{early_error, early_warn};
use rustc::lint::Lint;
use rustc::lint;
use rustc::middle::cstore::CrateStore;
use rustc_metadata::locator;
use rustc_metadata::cstore::CStore;
+use rustc_metadata::dynamic_lib::DynamicLibrary;
use rustc::util::common::{time, ErrorReported};
use rustc_trans_utils::trans_crate::TransCrate;
use serialize::json::ToJson;
use std::any::Any;
-use std::cmp::max;
use std::cmp::Ordering::Equal;
+use std::cmp::max;
use std::default::Default;
+use std::env::consts::{DLL_PREFIX, DLL_SUFFIX};
use std::env;
use std::ffi::OsString;
use std::io::{self, Read, Write};
use std::iter::repeat;
+use std::mem;
use std::panic;
-use std::path::PathBuf;
+use std::path::{PathBuf, Path};
use std::process::{self, Command, Stdio};
use std::rc::Rc;
use std::str;
+use std::sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT, Ordering};
+use std::sync::{Once, ONCE_INIT};
use std::thread;
use syntax::ast;
let emitter =
errors::emitter::EmitterWriter::stderr(errors::ColorConfig::Auto,
None,
- true);
+ true,
+ false);
let handler = errors::Handler::with_emitter(true, false, Box::new(emitter));
handler.emit(&MultiSpan::new(),
"aborting due to previous error(s)",
0
}
-#[cfg(not(feature="llvm"))]
-pub use rustc_trans_utils::trans_crate::MetadataOnlyTransCrate as DefaultTransCrate;
-#[cfg(feature="llvm")]
-pub use rustc_trans::LlvmTransCrate as DefaultTransCrate;
+fn load_backend_from_dylib(path: &Path) -> fn() -> Box<TransCrate> {
+ // Note that we're specifically using `open_global_now` here rather than
+ // `open`, namely we want the behavior on Unix of RTLD_GLOBAL and RTLD_NOW,
+ // where NOW means "bind everything right now" because we don't want
+ // surprises later on and RTLD_GLOBAL allows the symbols to be made
+ // available for future dynamic libraries opened. This is currently used by
+ // loading LLVM and then making its symbols available for other dynamic
+ // libraries.
+ let lib = match DynamicLibrary::open_global_now(path) {
+ Ok(lib) => lib,
+ Err(err) => {
+ let err = format!("couldn't load codegen backend {:?}: {:?}",
+ path,
+ err);
+ early_error(ErrorOutputType::default(), &err);
+ }
+ };
+ unsafe {
+ match lib.symbol("__rustc_codegen_backend") {
+ Ok(f) => {
+ mem::forget(lib);
+ mem::transmute::<*mut u8, _>(f)
+ }
+ Err(e) => {
+ let err = format!("couldn't load codegen backend as it \
+ doesn't export the `__rustc_codegen_backend` \
+ symbol: {:?}", e);
+ early_error(ErrorOutputType::default(), &err);
+ }
+ }
+ }
+}
-#[cfg(not(feature="llvm"))]
-pub mod rustc_trans {
- pub use rustc_trans_utils::trans_crate::MetadataOnlyTransCrate as LlvmTransCrate;
+pub fn get_trans(sess: &Session) -> Box<TransCrate> {
+ static INIT: Once = ONCE_INIT;
+ static mut LOAD: fn() -> Box<TransCrate> = || unreachable!();
+
+ INIT.call_once(|| {
+ let trans_name = sess.opts.debugging_opts.codegen_backend.as_ref()
+ .unwrap_or(&sess.target.target.options.codegen_backend);
+ let backend = match &trans_name[..] {
+ "metadata_only" => {
+ rustc_trans_utils::trans_crate::MetadataOnlyTransCrate::new
+ }
+ filename if filename.contains(".") => {
+ load_backend_from_dylib(filename.as_ref())
+ }
+ trans_name => get_trans_sysroot(trans_name),
+ };
- pub fn print_version() {}
- pub fn print_passes() {}
+ unsafe {
+ LOAD = backend;
+ }
+ });
+ let backend = unsafe { LOAD() };
+ backend.init(sess);
+ backend
}
-fn load_backend_from_dylib(sess: &Session, backend_name: &str) -> Box<TransCrate> {
- use std::path::Path;
- use rustc_metadata::dynamic_lib::DynamicLibrary;
-
- match DynamicLibrary::open(Some(Path::new(backend_name))) {
- Ok(lib) => {
- unsafe {
- let trans = {
- let __rustc_codegen_backend: unsafe fn(&Session) -> Box<TransCrate>;
- __rustc_codegen_backend = match lib.symbol("__rustc_codegen_backend") {
- Ok(f) => ::std::mem::transmute::<*mut u8, _>(f),
- Err(e) => sess.fatal(&format!("Couldnt load codegen backend as it\
- doesn't export the __rustc_backend_new symbol: {:?}", e)),
- };
- __rustc_codegen_backend(sess)
- };
- ::std::mem::forget(lib);
- trans
+fn get_trans_sysroot(backend_name: &str) -> fn() -> Box<TransCrate> {
+ // For now we only allow this function to be called once as it'll dlopen a
+ // few things, which seems to work best if we only do that once. In
+ // general this assertion never trips due to the once guard in `get_trans`,
+ // but there's a few manual calls to this function in this file we protect
+ // against.
+ static LOADED: AtomicBool = ATOMIC_BOOL_INIT;
+ assert!(!LOADED.fetch_or(true, Ordering::SeqCst),
+ "cannot load the default trans backend twice");
+
+ // When we're compiling this library with `--test` it'll run as a binary but
+ // not actually exercise much functionality. As a result most of the logic
+ // here is defunkt (it assumes we're a dynamic library in a sysroot) so
+ // let's just return a dummy creation function which won't be used in
+ // general anyway.
+ if cfg!(test) {
+ return rustc_trans_utils::trans_crate::MetadataOnlyTransCrate::new
+ }
+
+ let target = session::config::host_triple();
+ let mut sysroot_candidates = vec![filesearch::get_or_default_sysroot()];
+ let path = current_dll_path()
+ .and_then(|s| s.canonicalize().ok());
+ if let Some(dll) = path {
+ // use `parent` twice to chop off the file name and then also the
+ // directory containing the dll which should be either `lib` or `bin`.
+ if let Some(path) = dll.parent().and_then(|p| p.parent()) {
+ // The original `path` pointed at the `rustc_driver` crate's dll.
+ // Now that dll should only be in one of two locations. The first is
+ // in the compiler's libdir, for example `$sysroot/lib/*.dll`. The
+ // other is the target's libdir, for example
+ // `$sysroot/lib/rustlib/$target/lib/*.dll`.
+ //
+ // We don't know which, so let's assume that if our `path` above
+ // ends in `$target` we *could* be in the target libdir, and always
+ // assume that we may be in the main libdir.
+ sysroot_candidates.push(path.to_owned());
+
+ if path.ends_with(target) {
+ sysroot_candidates.extend(path.parent() // chop off `$target`
+ .and_then(|p| p.parent()) // chop off `rustlib`
+ .and_then(|p| p.parent()) // chop off `lib`
+ .map(|s| s.to_owned()));
}
}
- Err(err) => {
- sess.fatal(&format!("Couldnt load codegen backend {:?}: {:?}", backend_name, err));
+ }
+
+ let sysroot = sysroot_candidates.iter()
+ .map(|sysroot| {
+ let libdir = filesearch::relative_target_lib_path(&sysroot, &target);
+ sysroot.join(libdir).with_file_name("codegen-backends")
+ })
+ .filter(|f| {
+ info!("codegen backend candidate: {}", f.display());
+ f.exists()
+ })
+ .next();
+ let sysroot = match sysroot {
+ Some(path) => path,
+ None => {
+ let candidates = sysroot_candidates.iter()
+ .map(|p| p.display().to_string())
+ .collect::<Vec<_>>()
+ .join("\n* ");
+ let err = format!("failed to find a `codegen-backends` folder \
+ in the sysroot candidates:\n* {}", candidates);
+ early_error(ErrorOutputType::default(), &err);
+ }
+ };
+ info!("probing {} for a codegen backend", sysroot.display());
+
+ let d = match sysroot.read_dir() {
+ Ok(d) => d,
+ Err(e) => {
+ let err = format!("failed to load default codegen backend, couldn't \
+ read `{}`: {}", sysroot.display(), e);
+ early_error(ErrorOutputType::default(), &err);
+ }
+ };
+
+ let mut file: Option<PathBuf> = None;
+
+ let expected_name = format!("rustc_trans-{}", backend_name);
+ for entry in d.filter_map(|e| e.ok()) {
+ let path = entry.path();
+ let filename = match path.file_name().and_then(|s| s.to_str()) {
+ Some(s) => s,
+ None => continue,
+ };
+ if !(filename.starts_with(DLL_PREFIX) && filename.ends_with(DLL_SUFFIX)) {
+ continue
+ }
+ let name = &filename[DLL_PREFIX.len() .. filename.len() - DLL_SUFFIX.len()];
+ if name != expected_name {
+ continue
}
+ if let Some(ref prev) = file {
+ let err = format!("duplicate codegen backends found\n\
+ first: {}\n\
+ second: {}\n\
+ ", prev.display(), path.display());
+ early_error(ErrorOutputType::default(), &err);
+ }
+ file = Some(path.clone());
}
-}
-pub fn get_trans(sess: &Session) -> Box<TransCrate> {
- let trans_name = sess.opts.debugging_opts.codegen_backend.clone();
- match trans_name.as_ref().map(|s|&**s) {
- None => DefaultTransCrate::new(&sess),
- Some("llvm") => rustc_trans::LlvmTransCrate::new(&sess),
- Some("metadata_only") => {
- rustc_trans_utils::trans_crate::MetadataOnlyTransCrate::new(&sess)
+ match file {
+ Some(ref s) => return load_backend_from_dylib(s),
+ None => {
+ let err = format!("failed to load default codegen backend for `{}`, \
+ no appropriate codegen dylib found in `{}`",
+ backend_name, sysroot.display());
+ early_error(ErrorOutputType::default(), &err);
+ }
+ }
+
+ #[cfg(unix)]
+ fn current_dll_path() -> Option<PathBuf> {
+ use std::ffi::{OsStr, CStr};
+ use std::os::unix::prelude::*;
+
+ unsafe {
+ let addr = current_dll_path as usize as *mut _;
+ let mut info = mem::zeroed();
+ if libc::dladdr(addr, &mut info) == 0 {
+ info!("dladdr failed");
+ return None
+ }
+ if info.dli_fname.is_null() {
+ info!("dladdr returned null pointer");
+ return None
+ }
+ let bytes = CStr::from_ptr(info.dli_fname).to_bytes();
+ let os = OsStr::from_bytes(bytes);
+ Some(PathBuf::from(os))
}
- Some(filename) if filename.contains(".") => {
- load_backend_from_dylib(&sess, &filename)
+ }
+
+ #[cfg(windows)]
+ fn current_dll_path() -> Option<PathBuf> {
+ use std::ffi::OsString;
+ use std::os::windows::prelude::*;
+
+ extern "system" {
+ fn GetModuleHandleExW(dwFlags: u32,
+ lpModuleName: usize,
+ phModule: *mut usize) -> i32;
+ fn GetModuleFileNameW(hModule: usize,
+ lpFilename: *mut u16,
+ nSize: u32) -> u32;
+ }
+
+ const GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS: u32 = 0x00000004;
+
+ unsafe {
+ let mut module = 0;
+ let r = GetModuleHandleExW(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS,
+ current_dll_path as usize,
+ &mut module);
+ if r == 0 {
+ info!("GetModuleHandleExW failed: {}", io::Error::last_os_error());
+ return None
+ }
+ let mut space = Vec::with_capacity(1024);
+ let r = GetModuleFileNameW(module,
+ space.as_mut_ptr(),
+ space.capacity() as u32);
+ if r == 0 {
+ info!("GetModuleFileNameW failed: {}", io::Error::last_os_error());
+ return None
+ }
+ let r = r as usize;
+ if r >= space.capacity() {
+ info!("our buffer was too small? {}",
+ io::Error::last_os_error());
+ return None
+ }
+ space.set_len(r);
+ let os = OsString::from_wide(&space);
+ Some(PathBuf::from(os))
}
- Some(trans_name) => sess.fatal(&format!("Unknown codegen backend {}", trans_name)),
}
}
None);
let (odir, ofile) = make_output(&matches);
- let (input, input_file_path) = match make_input(&matches.free) {
- Some((input, input_file_path)) => callbacks.some_input(input, input_file_path),
+ let (input, input_file_path, input_err) = match make_input(&matches.free) {
+ Some((input, input_file_path, input_err)) => {
+ let (input, input_file_path) = callbacks.some_input(input, input_file_path);
+ (input, input_file_path, input_err)
+ },
None => match callbacks.no_input(&matches, &sopts, &cfg, &odir, &ofile, &descriptions) {
- Some((input, input_file_path)) => (input, input_file_path),
+ Some((input, input_file_path)) => (input, input_file_path, None),
None => return (Ok(()), None),
},
};
sopts, input_file_path.clone(), descriptions, codemap, emitter_dest,
);
+ if let Some(err) = input_err {
+ // Immediately stop compilation if there was an issue reading
+ // the input (for example if the input stream is not UTF-8).
+ sess.err(&format!("{}", err));
+ return (Err(CompileIncomplete::Stopped), Some(sess));
+ }
+
let trans = get_trans(&sess);
rustc_lint::register_builtins(&mut sess.lint_store.borrow_mut(), Some(&sess));
}
// Extract input (string or file and optional path) from matches.
-fn make_input(free_matches: &[String]) -> Option<(Input, Option<PathBuf>)> {
+fn make_input(free_matches: &[String]) -> Option<(Input, Option<PathBuf>, Option<io::Error>)> {
if free_matches.len() == 1 {
let ifile = &free_matches[0];
if ifile == "-" {
let mut src = String::new();
- io::stdin().read_to_string(&mut src).unwrap();
+ let err = if io::stdin().read_to_string(&mut src).is_err() {
+ Some(io::Error::new(io::ErrorKind::InvalidData,
+ "couldn't read from stdin, as it did not contain valid UTF-8"))
+ } else {
+ None
+ };
Some((Input::Str { name: FileName::Anon, input: src },
- None))
+ None, err))
} else {
Some((Input::File(PathBuf::from(ifile)),
- Some(PathBuf::from(ifile))))
+ Some(PathBuf::from(ifile)), None))
}
} else {
None
println!("commit-date: {}", unw(commit_date_str()));
println!("host: {}", config::host_triple());
println!("release: {}", unw(release_str()));
- rustc_trans::print_version();
+ get_trans_sysroot("llvm")().print_version();
}
}
}
if cg_flags.contains(&"passes=list".to_string()) {
- rustc_trans::print_passes();
+ get_trans_sysroot("llvm")().print_passes();
return None;
}
let emitter =
Box::new(errors::emitter::EmitterWriter::stderr(errors::ColorConfig::Auto,
None,
+ false,
false));
let handler = errors::Handler::with_emitter(true, false, emitter);
all_errors.extend_from_slice(&rustc_typeck::DIAGNOSTICS);
all_errors.extend_from_slice(&rustc_resolve::DIAGNOSTICS);
all_errors.extend_from_slice(&rustc_privacy::DIAGNOSTICS);
- #[cfg(feature="llvm")]
- all_errors.extend_from_slice(&rustc_trans::DIAGNOSTICS);
+ // FIXME: need to figure out a way to get these back in here
+ // all_errors.extend_from_slice(get_trans(sess).diagnostics());
all_errors.extend_from_slice(&rustc_trans_utils::DIAGNOSTICS);
all_errors.extend_from_slice(&rustc_const_eval::DIAGNOSTICS);
all_errors.extend_from_slice(&rustc_metadata::DIAGNOSTICS);
}
PpmTyped => {
let control = &driver::CompileController::basic();
- abort_on_err(driver::phase_3_run_analysis_passes(&*::DefaultTransCrate::new(&sess),
+ let trans = ::get_trans(sess);
+ abort_on_err(driver::phase_3_run_analysis_passes(&*trans,
control,
sess,
cstore,
let mut out = Vec::new();
let control = &driver::CompileController::basic();
- abort_on_err(driver::phase_3_run_analysis_passes(&*::DefaultTransCrate::new(&sess),
+ let trans = ::get_trans(sess);
+ abort_on_err(driver::phase_3_run_analysis_passes(&*trans,
control,
sess,
cstore,
}],
msg: msg.to_owned(),
show_code_when_inline: false,
+ approximate: false,
});
self
}
}],
msg: msg.to_owned(),
show_code_when_inline: true,
+ approximate: false,
});
self
}
}).collect(),
msg: msg.to_owned(),
show_code_when_inline: true,
+ approximate: false,
+ });
+ self
+ }
+
+ /// This is a suggestion that may contain mistakes or fillers and should
+ /// be read and understood by a human.
+ pub fn span_approximate_suggestion(&mut self, sp: Span, msg: &str,
+ suggestion: String) -> &mut Self {
+ self.suggestions.push(CodeSuggestion {
+ substitutions: vec![Substitution {
+ parts: vec![SubstitutionPart {
+ snippet: suggestion,
+ span: sp,
+ }],
+ }],
+ msg: msg.to_owned(),
+ show_code_when_inline: true,
+ approximate: true,
+ });
+ self
+ }
+
+ pub fn span_approximate_suggestions(&mut self, sp: Span, msg: &str,
+ suggestions: Vec<String>) -> &mut Self {
+ self.suggestions.push(CodeSuggestion {
+ substitutions: suggestions.into_iter().map(|snippet| Substitution {
+ parts: vec![SubstitutionPart {
+ snippet,
+ span: sp,
+ }],
+ }).collect(),
+ msg: msg.to_owned(),
+ show_code_when_inline: true,
+ approximate: true,
});
self
}
msg: &str,
suggestions: Vec<String>)
-> &mut Self);
+ forward!(pub fn span_approximate_suggestion(&mut self,
+ sp: Span,
+ msg: &str,
+ suggestion: String)
+ -> &mut Self);
+ forward!(pub fn span_approximate_suggestions(&mut self,
+ sp: Span,
+ msg: &str,
+ suggestions: Vec<String>)
+ -> &mut Self);
forward!(pub fn set_span<S: Into<MultiSpan>>(&mut self, sp: S) -> &mut Self);
forward!(pub fn code(&mut self, s: DiagnosticId) -> &mut Self);
dst: Destination,
cm: Option<Rc<CodeMapper>>,
short_message: bool,
+ teach: bool,
}
struct FileWithAnnotatedLines {
impl EmitterWriter {
pub fn stderr(color_config: ColorConfig,
code_map: Option<Rc<CodeMapper>>,
- short_message: bool)
+ short_message: bool,
+ teach: bool)
-> EmitterWriter {
if color_config.use_color() {
let dst = Destination::from_stderr();
EmitterWriter {
dst,
cm: code_map,
- short_message: short_message,
+ short_message,
+ teach,
}
} else {
EmitterWriter {
dst: Raw(Box::new(io::stderr())),
cm: code_map,
- short_message: short_message,
+ short_message,
+ teach,
}
}
}
pub fn new(dst: Box<Write + Send>,
code_map: Option<Rc<CodeMapper>>,
- short_message: bool)
+ short_message: bool,
+ teach: bool)
-> EmitterWriter {
EmitterWriter {
dst: Raw(dst),
cm: code_map,
- short_message: short_message,
+ short_message,
+ teach,
}
}
line: &Line,
width_offset: usize,
code_offset: usize) -> Vec<(usize, Style)> {
+ if line.line_index == 0 {
+ return Vec::new();
+ }
+
let source_string = match file.get_line(line.line_index - 1) {
Some(s) => s,
None => return Vec::new(),
code_offset + annotation.start_col,
style);
}
- _ => (),
+ _ if self.teach => {
+ buffer.set_style_range(line_offset,
+ code_offset + annotation.start_col,
+ code_offset + annotation.end_col,
+ style,
+ annotation.is_primary);
+ }
+ _ => {}
}
}
// Then, the secondary file indicator
buffer.prepend(buffer_msg_line_offset + 1, "::: ", Style::LineNumber);
+ let loc = if let Some(first_line) = annotated_file.lines.first() {
+ let col = if let Some(first_annotation) = first_line.annotations.first() {
+ format!(":{}", first_annotation.start_col + 1)
+ } else {
+ "".to_string()
+ };
+ format!("{}:{}{}",
+ annotated_file.file.name,
+ cm.doctest_offset_line(first_line.line_index),
+ col)
+ } else {
+ annotated_file.file.name.to_string()
+ };
buffer.append(buffer_msg_line_offset + 1,
- &annotated_file.file.name.to_string(),
+ &loc,
Style::LineAndColumn);
for _ in 0..max_line_num_len {
buffer.prepend(buffer_msg_line_offset + 1, " ", Style::NoStyle);
pub substitutions: Vec<Substitution>,
pub msg: String,
pub show_code_when_inline: bool,
+ /// Whether or not the suggestion is approximate
+ ///
+ /// Sometimes we may show suggestions with placeholders,
+ /// which are useful for users but not useful for
+ /// tools like rustfix
+ pub approximate: bool,
}
#[derive(Clone, Debug, PartialEq, Hash, RustcEncodable, RustcDecodable)]
cm: Option<Rc<CodeMapper>>,
flags: HandlerFlags)
-> Handler {
- let emitter = Box::new(EmitterWriter::stderr(color_config, cm, false));
+ let emitter = Box::new(EmitterWriter::stderr(color_config, cm, false, false));
Handler::with_emitter_and_flags(emitter, flags)
}
// Code for annotating snippets.
-use syntax_pos::{Span, FileMap};
-use CodeMapper;
-use std::rc::Rc;
use Level;
-#[derive(Clone)]
-pub struct SnippetData {
- codemap: Rc<CodeMapper>,
- files: Vec<FileInfo>,
-}
-
-#[derive(Clone)]
-pub struct FileInfo {
- file: Rc<FileMap>,
-
- /// The "primary file", if any, gets a `-->` marker instead of
- /// `>>>`, and has a line-number/column printed and not just a
- /// filename. It appears first in the listing. It is known to
- /// contain at least one primary span, though primary spans (which
- /// are designated with `^^^`) may also occur in other files.
- primary_span: Option<Span>,
-
- lines: Vec<Line>,
-}
-
#[derive(Clone, Debug, PartialOrd, Ord, PartialEq, Eq)]
pub struct Line {
pub line_index: usize,
pub fn num_lines(&self) -> usize {
self.text.len()
}
+
+ pub fn set_style_range(&mut self,
+ line: usize,
+ col_start: usize,
+ col_end: usize,
+ style: Style,
+ overwrite: bool) {
+ for col in col_start..col_end {
+ self.set_style(line, col, style, overwrite);
+ }
+ }
+
+ pub fn set_style(&mut self, line: usize, col: usize, style: Style, overwrite: bool) {
+ if let Some(ref mut line) = self.styles.get_mut(line) {
+ if let Some(s) = line.get_mut(col) {
+ if *s == Style::NoStyle || *s == Style::Quotation || overwrite {
+ *s = style;
+ }
+ }
+ }
+ }
}
// repr(transparent) types are allowed to have arbitrary ZSTs, not just
// PhantomData -- skip checking all ZST fields
if def.repr.transparent() {
- let is_zst = (cx, cx.param_env(field.did))
- .layout_of(field_ty)
+ let is_zst = cx
+ .layout_of(cx.param_env(field.did).and(field_ty))
.map(|layout| layout.is_zst())
.unwrap_or(false);
if is_zst {
Assign(_, ref value) => (value, "assigned value", false),
AssignOp(.., ref value) => (value, "assigned value", false),
InPlace(_, ref value) => (value, "emplacement value", false),
- Call(_, ref args) => {
- for arg in args {
- self.check_unused_parens_core(cx, arg, "function argument", false)
+ // either function/method call, or something this lint doesn't care about
+ ref call_or_other => {
+ let args_to_check;
+ let call_kind;
+ match *call_or_other {
+ Call(_, ref args) => {
+ call_kind = "function";
+ args_to_check = &args[..];
+ },
+ MethodCall(_, ref args) => {
+ call_kind = "method";
+ // first "argument" is self (which sometimes needs parens)
+ args_to_check = &args[1..];
+ }
+ // actual catch-all arm
+ _ => { return; }
}
- return;
- },
- MethodCall(_, ref args) => {
- for arg in &args[1..] { // first "argument" is self (which sometimes needs parens)
- self.check_unused_parens_core(cx, arg, "method argument", false)
+ // Don't lint if this is a nested macro expansion: otherwise, the lint could
+ // trigger in situations that macro authors shouldn't have to care about, e.g.,
+ // when a parenthesized token tree matched in one macro expansion is matched as
+ // an expression in another and used as a fn/method argument (Issue #47775)
+ if e.span.ctxt().outer().expn_info()
+ .map_or(false, |info| info.call_site.ctxt().outer()
+ .expn_info().is_some()) {
+ return;
+ }
+ let msg = format!("{} argument", call_kind);
+ for arg in args_to_check {
+ self.check_unused_parens_core(cx, arg, &msg, false);
}
return;
}
- _ => return,
};
self.check_unused_parens_core(cx, &value, msg, struct_lit_needs_parens);
}
[lib]
name = "rustc_llvm"
path = "lib.rs"
-crate-type = ["dylib"]
[features]
static-libstdcpp = []
+emscripten = []
[dependencies]
bitflags = "1.0"
+libc = "0.2"
rustc_cratesio_shim = { path = "../librustc_cratesio_shim" }
[build-dependencies]
cfg.define(&flag, None);
}
+ println!("cargo:rerun-if-changed-env=LLVM_RUSTLLVM");
if env::var_os("LLVM_RUSTLLVM").is_some() {
cfg.define("LLVM_RUSTLLVM", None);
}
// run.
match maybe_library {
Err(err) => Err(err),
- Ok(handle) => Ok(DynamicLibrary { handle: handle })
+ Ok(handle) => Ok(DynamicLibrary { handle })
+ }
+ }
+
+ /// Load a dynamic library into the global namespace (RTLD_GLOBAL on Unix)
+ /// and do it now (don't use RTLD_LAZY on Unix).
+ pub fn open_global_now(filename: &Path) -> Result<DynamicLibrary, String> {
+ let maybe_library = dl::open_global_now(filename.as_os_str());
+ match maybe_library {
+ Err(err) => Err(err),
+ Ok(handle) => Ok(DynamicLibrary { handle })
}
}
})
}
- const LAZY: libc::c_int = 1;
+ pub fn open_global_now(filename: &OsStr) -> Result<*mut u8, String> {
+ check_for_errors_in(|| unsafe {
+ let s = CString::new(filename.as_bytes()).unwrap();
+ libc::dlopen(s.as_ptr(), libc::RTLD_GLOBAL | libc::RTLD_NOW) as *mut u8
+ })
+ }
unsafe fn open_external(filename: &OsStr) -> *mut u8 {
let s = CString::new(filename.as_bytes()).unwrap();
- libc::dlopen(s.as_ptr(), LAZY) as *mut u8
+ libc::dlopen(s.as_ptr(), libc::RTLD_LAZY) as *mut u8
}
unsafe fn open_internal() -> *mut u8 {
- libc::dlopen(ptr::null(), LAZY) as *mut u8
+ libc::dlopen(ptr::null(), libc::RTLD_LAZY) as *mut u8
}
pub fn check_for_errors_in<T, F>(f: F) -> Result<T, String> where
fn FreeLibrary(handle: HMODULE) -> BOOL;
}
+ pub fn open_global_now(filename: &OsStr) -> Result<*mut u8, String> {
+ open(Some(filename))
+ }
+
pub fn open(filename: Option<&OsStr>) -> Result<*mut u8, String> {
// disable "dll load failed" error dialog.
let prev_error_mode = unsafe {
self.describe_field_from_ty(&tnm.ty, field)
}
ty::TyArray(ty, _) | ty::TySlice(ty) => self.describe_field_from_ty(&ty, field),
- ty::TyClosure(closure_def_id, _) => {
+ ty::TyClosure(def_id, _) | ty::TyGenerator(def_id, _, _) => {
// Convert the def-id into a node-id. node-ids are only valid for
// the local code in the current crate, so this returns an `Option` in case
// the closure comes from another crate. But in that case we wouldn't
// be borrowck'ing it, so we can just unwrap:
- let node_id = self.tcx.hir.as_local_node_id(closure_def_id).unwrap();
+ let node_id = self.tcx.hir.as_local_node_id(def_id).unwrap();
let freevar = self.tcx.with_freevars(node_id, |fv| fv[field.index()]);
self.tcx.hir.name(freevar.var_id()).to_string()
use rustc::mir::{BasicBlock, Location};
-use dataflow::{MaybeInitializedLvals, MaybeUninitializedLvals};
-use dataflow::{EverInitializedLvals, MovingOutStatements};
+use dataflow::{MaybeInitializedPlaces, MaybeUninitializedPlaces};
+use dataflow::{EverInitializedPlaces, MovingOutStatements};
use dataflow::{ActiveBorrows, FlowAtLocation, FlowsAtLocation};
use dataflow::move_paths::HasMoveData;
use std::fmt;
// (forced to be `pub` due to its use as an associated type below.)
pub(crate) struct Flows<'b, 'gcx: 'tcx, 'tcx: 'b> {
pub borrows: FlowAtLocation<ActiveBorrows<'b, 'gcx, 'tcx>>,
- pub inits: FlowAtLocation<MaybeInitializedLvals<'b, 'gcx, 'tcx>>,
- pub uninits: FlowAtLocation<MaybeUninitializedLvals<'b, 'gcx, 'tcx>>,
+ pub inits: FlowAtLocation<MaybeInitializedPlaces<'b, 'gcx, 'tcx>>,
+ pub uninits: FlowAtLocation<MaybeUninitializedPlaces<'b, 'gcx, 'tcx>>,
pub move_outs: FlowAtLocation<MovingOutStatements<'b, 'gcx, 'tcx>>,
- pub ever_inits: FlowAtLocation<EverInitializedLvals<'b, 'gcx, 'tcx>>,
+ pub ever_inits: FlowAtLocation<EverInitializedPlaces<'b, 'gcx, 'tcx>>,
}
impl<'b, 'gcx, 'tcx> Flows<'b, 'gcx, 'tcx> {
pub fn new(
borrows: FlowAtLocation<ActiveBorrows<'b, 'gcx, 'tcx>>,
- inits: FlowAtLocation<MaybeInitializedLvals<'b, 'gcx, 'tcx>>,
- uninits: FlowAtLocation<MaybeUninitializedLvals<'b, 'gcx, 'tcx>>,
+ inits: FlowAtLocation<MaybeInitializedPlaces<'b, 'gcx, 'tcx>>,
+ uninits: FlowAtLocation<MaybeUninitializedPlaces<'b, 'gcx, 'tcx>>,
move_outs: FlowAtLocation<MovingOutStatements<'b, 'gcx, 'tcx>>,
- ever_inits: FlowAtLocation<EverInitializedLvals<'b, 'gcx, 'tcx>>,
+ ever_inits: FlowAtLocation<EverInitializedPlaces<'b, 'gcx, 'tcx>>,
) -> Self {
Flows {
borrows,
use dataflow::FlowAtLocation;
use dataflow::MoveDataParamEnv;
use dataflow::{DataflowAnalysis, DataflowResultsConsumer};
-use dataflow::{MaybeInitializedLvals, MaybeUninitializedLvals};
-use dataflow::{EverInitializedLvals, MovingOutStatements};
+use dataflow::{MaybeInitializedPlaces, MaybeUninitializedPlaces};
+use dataflow::{EverInitializedPlaces, MovingOutStatements};
use dataflow::{BorrowData, Borrows, ReserveOrActivateIndex};
use dataflow::{ActiveBorrows, Reservations};
use dataflow::indexes::BorrowIndex;
id,
&attributes,
&dead_unwinds,
- MaybeInitializedLvals::new(tcx, mir, &mdpe),
+ MaybeInitializedPlaces::new(tcx, mir, &mdpe),
|bd, i| DebugFormatted::new(&bd.move_data().move_paths[i]),
));
let flow_uninits = FlowAtLocation::new(do_dataflow(
id,
&attributes,
&dead_unwinds,
- MaybeUninitializedLvals::new(tcx, mir, &mdpe),
+ MaybeUninitializedPlaces::new(tcx, mir, &mdpe),
|bd, i| DebugFormatted::new(&bd.move_data().move_paths[i]),
));
let flow_move_outs = FlowAtLocation::new(do_dataflow(
id,
&attributes,
&dead_unwinds,
- EverInitializedLvals::new(tcx, mir, &mdpe),
+ EverInitializedPlaces::new(tcx, mir, &mdpe),
|bd, i| DebugFormatted::new(&bd.move_data().inits[i]),
));
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
enum ShallowOrDeep {
/// From the RFC: "A *shallow* access means that the immediate
- /// fields reached at LV are accessed, but references or pointers
+ /// fields reached at P are accessed, but references or pointers
/// found within are not dereferenced. Right now, the only access
/// that is shallow is an assignment like `x = ...;`, which would
/// be a *shallow write* of `x`."
debug!("check_for_invalidation_at_exit({:?}): INVALID", place);
// FIXME: should be talking about the region lifetime instead
// of just a span here.
+ let span = self.tcx.sess.codemap().end_point(span);
self.report_borrowed_value_does_not_live_long_enough(
context,
borrow,
- span.end_point(),
+ span,
flow_state.borrows.operator(),
)
}
use transform::MirSource;
use util::liveness::{LivenessResults, LocalSet};
use dataflow::FlowAtLocation;
-use dataflow::MaybeInitializedLvals;
+use dataflow::MaybeInitializedPlaces;
use dataflow::move_paths::MoveData;
use util as mir_util;
universal_regions: UniversalRegions<'tcx>,
mir: &Mir<'tcx>,
param_env: ty::ParamEnv<'gcx>,
- flow_inits: &mut FlowAtLocation<MaybeInitializedLvals<'cx, 'gcx, 'tcx>>,
+ flow_inits: &mut FlowAtLocation<MaybeInitializedPlaces<'cx, 'gcx, 'tcx>>,
move_data: &MoveData<'tcx>,
) -> (
RegionInferenceContext<'tcx>,
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+use std::collections::HashMap;
+
use super::universal_regions::UniversalRegions;
use rustc::hir::def_id::DefId;
use rustc::infer::InferCtxt;
use rustc::traits::ObligationCause;
use rustc::ty::{self, RegionVid, Ty, TypeFoldable};
use rustc::util::common::ErrorReported;
+use rustc_data_structures::bitvec::BitVector;
use rustc_data_structures::indexed_vec::IndexVec;
use rustc_errors::DiagnosticBuilder;
use std::fmt;
/// satisfied. Note that some values may grow **too** large to be
/// feasible, but we check this later.
fn propagate_constraints(&mut self, mir: &Mir<'tcx>) {
- let mut changed = true;
-
debug!("propagate_constraints()");
debug!("propagate_constraints: constraints={:#?}", {
let mut constraints: Vec<_> = self.constraints.iter().collect();
// constraints we have accumulated.
let mut inferred_values = self.liveness_constraints.clone();
- while changed {
- changed = false;
- debug!("propagate_constraints: --------------------");
- for constraint in &self.constraints {
- debug!("propagate_constraints: constraint={:?}", constraint);
-
- // Grow the value as needed to accommodate the
- // outlives constraint.
- let Ok(made_changes) = self.dfs(
- mir,
- CopyFromSourceToTarget {
- source_region: constraint.sub,
- target_region: constraint.sup,
- inferred_values: &mut inferred_values,
- constraint_point: constraint.point,
- constraint_span: constraint.span,
- },
- );
+ let dependency_map = self.build_dependency_map();
+
+ // Constraints that may need to be repropagated (initially all):
+ let mut dirty_list: Vec<_> = (0..self.constraints.len()).collect();
+
+ // Set to 0 for each constraint that is on the dirty list:
+ let mut clean_bit_vec = BitVector::new(dirty_list.len());
+
+ debug!("propagate_constraints: --------------------");
+ while let Some(constraint_idx) = dirty_list.pop() {
+ clean_bit_vec.insert(constraint_idx);
+
+ let constraint = &self.constraints[constraint_idx];
+ debug!("propagate_constraints: constraint={:?}", constraint);
- if made_changes {
- debug!("propagate_constraints: sub={:?}", constraint.sub);
- debug!("propagate_constraints: sup={:?}", constraint.sup);
- changed = true;
+ // Grow the value as needed to accommodate the
+ // outlives constraint.
+ let Ok(made_changes) = self.dfs(
+ mir,
+ CopyFromSourceToTarget {
+ source_region: constraint.sub,
+ target_region: constraint.sup,
+ inferred_values: &mut inferred_values,
+ constraint_point: constraint.point,
+ constraint_span: constraint.span,
+ },
+ );
+
+ if made_changes {
+ debug!("propagate_constraints: sub={:?}", constraint.sub);
+ debug!("propagate_constraints: sup={:?}", constraint.sup);
+
+ for &dep_idx in dependency_map.get(&constraint.sup).unwrap_or(&vec![]) {
+ if clean_bit_vec.remove(dep_idx) {
+ dirty_list.push(dep_idx);
+ }
}
}
+
debug!("\n");
}
self.inferred_values = Some(inferred_values);
}
+ /// Builds up a map from each region variable X to a vector with the
+ /// indices of constraints that need to be re-evaluated when X changes.
+ /// These are constraints like Y: X @ P -- so if X changed, we may
+ /// need to grow Y.
+ fn build_dependency_map(&self) -> HashMap<RegionVid, Vec<usize>> {
+ let mut map = HashMap::new();
+
+ for (idx, constraint) in self.constraints.iter().enumerate() {
+ map.entry(constraint.sub).or_insert(Vec::new()).push(idx);
+ }
+
+ map
+ }
+
/// Once regions have been propagated, this method is used to see
/// whether the "type tests" produced by typeck were satisfied;
/// type tests encode type-outlives relationships like `T:
use dataflow::{FlowAtLocation, FlowsAtLocation};
use borrow_check::nll::region_infer::Cause;
-use dataflow::MaybeInitializedLvals;
+use dataflow::MaybeInitializedPlaces;
use dataflow::move_paths::{HasMoveData, MoveData};
use rustc::mir::{BasicBlock, Location, Mir};
use rustc::mir::Local;
use rustc::ty::{self, Ty, TyCtxt, TypeFoldable};
+use rustc::traits;
+use rustc::infer::InferOk;
use rustc::util::common::ErrorReported;
+use borrow_check::nll::type_check::AtLocation;
use rustc_data_structures::fx::FxHashSet;
use syntax::codemap::DUMMY_SP;
use util::liveness::LivenessResults;
cx: &mut TypeChecker<'_, 'gcx, 'tcx>,
mir: &Mir<'tcx>,
liveness: &LivenessResults,
- flow_inits: &mut FlowAtLocation<MaybeInitializedLvals<'_, 'gcx, 'tcx>>,
+ flow_inits: &mut FlowAtLocation<MaybeInitializedPlaces<'_, 'gcx, 'tcx>>,
move_data: &MoveData<'tcx>,
) {
let tcx = cx.tcx();
tcx: TyCtxt<'typeck, 'gcx, 'tcx>,
mir: &'gen Mir<'tcx>,
liveness: &'gen LivenessResults,
- flow_inits: &'gen mut FlowAtLocation<MaybeInitializedLvals<'flow, 'gcx, 'tcx>>,
+ flow_inits: &'gen mut FlowAtLocation<MaybeInitializedPlaces<'flow, 'gcx, 'tcx>>,
move_data: &'gen MoveData<'tcx>,
}
location
);
- let tcx = self.cx.infcx.tcx;
- let mut types = vec![(dropped_ty, 0)];
- let mut known = FxHashSet();
- while let Some((ty, depth)) = types.pop() {
- let span = DUMMY_SP; // FIXME
- let result = match tcx.dtorck_constraint_for_ty(span, dropped_ty, depth, ty) {
- Ok(result) => result,
- Err(ErrorReported) => {
- continue;
- }
- };
-
- let ty::DtorckConstraint {
- outlives,
- dtorck_types,
- } = result;
-
- // All things in the `outlives` array may be touched by
- // the destructor and must be live at this point.
- for outlive in outlives {
- let cause = Cause::DropVar(dropped_local, location);
- self.push_type_live_constraint(outlive, location, cause);
- }
+ // If we end visiting the same type twice (usually due to a cycle involving
+ // associated types), we need to ensure that its region types match up with the type
+ // we added to the 'known' map the first time around. For this reason, we need
+ // our infcx to hold onto its calculated region constraints after each call
+ // to dtorck_constraint_for_ty. Otherwise, normalizing the corresponding associated
+ // type will end up instantiating the type with a new set of inference variables
+ // Since this new type will never be in 'known', we end up looping forever.
+ //
+ // For this reason, we avoid calling TypeChecker.normalize, instead doing all normalization
+ // ourselves in one large 'fully_perform_op' callback.
+ let (type_constraints, kind_constraints) = self.cx.fully_perform_op(location.at_self(),
+ |cx| {
+
+ let tcx = cx.infcx.tcx;
+ let mut selcx = traits::SelectionContext::new(cx.infcx);
+ let cause = cx.misc(cx.last_span);
+
+ let mut types = vec![(dropped_ty, 0)];
+ let mut final_obligations = Vec::new();
+ let mut type_constraints = Vec::new();
+ let mut kind_constraints = Vec::new();
- // However, there may also be some types that
- // `dtorck_constraint_for_ty` could not resolve (e.g.,
- // associated types and parameters). We need to normalize
- // associated types here and possibly recursively process.
- for ty in dtorck_types {
- let ty = self.cx.normalize(&ty, location);
- let ty = self.cx.infcx.resolve_type_and_region_vars_if_possible(&ty);
- match ty.sty {
- ty::TyParam(..) | ty::TyProjection(..) | ty::TyAnon(..) => {
- let cause = Cause::DropVar(dropped_local, location);
- self.push_type_live_constraint(ty, location, cause);
+ let mut known = FxHashSet();
+
+ while let Some((ty, depth)) = types.pop() {
+ let span = DUMMY_SP; // FIXME
+ let result = match tcx.dtorck_constraint_for_ty(span, dropped_ty, depth, ty) {
+ Ok(result) => result,
+ Err(ErrorReported) => {
+ continue;
}
+ };
+
+ let ty::DtorckConstraint {
+ outlives,
+ dtorck_types,
+ } = result;
+
+ // All things in the `outlives` array may be touched by
+ // the destructor and must be live at this point.
+ for outlive in outlives {
+ let cause = Cause::DropVar(dropped_local, location);
+ kind_constraints.push((outlive, location, cause));
+ }
- _ => if known.insert(ty) {
- types.push((ty, depth + 1));
- },
+ // However, there may also be some types that
+ // `dtorck_constraint_for_ty` could not resolve (e.g.,
+ // associated types and parameters). We need to normalize
+ // associated types here and possibly recursively process.
+ for ty in dtorck_types {
+ let traits::Normalized { value: ty, obligations } =
+ traits::normalize(&mut selcx, cx.param_env, cause.clone(), &ty);
+
+ final_obligations.extend(obligations);
+
+ let ty = cx.infcx.resolve_type_and_region_vars_if_possible(&ty);
+ match ty.sty {
+ ty::TyParam(..) | ty::TyProjection(..) | ty::TyAnon(..) => {
+ let cause = Cause::DropVar(dropped_local, location);
+ type_constraints.push((ty, location, cause));
+ }
+
+ _ => if known.insert(ty) {
+ types.push((ty, depth + 1));
+ },
+ }
}
}
+
+ Ok(InferOk {
+ value: (type_constraints, kind_constraints), obligations: final_obligations
+ })
+ }).unwrap();
+
+ for (ty, location, cause) in type_constraints {
+ self.push_type_live_constraint(ty, location, cause);
+ }
+
+ for (kind, location, cause) in kind_constraints {
+ self.push_type_live_constraint(kind, location, cause);
}
}
}
use borrow_check::nll::region_infer::ClosureRegionRequirementsExt;
use borrow_check::nll::universal_regions::UniversalRegions;
use dataflow::FlowAtLocation;
-use dataflow::MaybeInitializedLvals;
+use dataflow::MaybeInitializedPlaces;
use dataflow::move_paths::MoveData;
use rustc::hir::def_id::DefId;
use rustc::infer::{InferCtxt, InferOk, InferResult, LateBoundRegionConversionTime, UnitResult};
mir_def_id: DefId,
universal_regions: &UniversalRegions<'tcx>,
liveness: &LivenessResults,
- flow_inits: &mut FlowAtLocation<MaybeInitializedLvals<'_, 'gcx, 'tcx>>,
+ flow_inits: &mut FlowAtLocation<MaybeInitializedPlaces<'_, 'gcx, 'tcx>>,
move_data: &MoveData<'tcx>,
) -> MirTypeckRegionConstraints<'tcx> {
let body_id = infcx.tcx.hir.as_local_node_id(mir_def_id).unwrap();
}
};
if let PlaceContext::Copy = context {
- let ty = place_ty.to_ty(self.tcx());
- if self.cx
- .infcx
- .type_moves_by_default(self.cx.param_env, ty, DUMMY_SP)
- {
- span_mirbug!(self, place, "attempted copy of non-Copy type ({:?})", ty);
- }
+ let tcx = self.tcx();
+ let trait_ref = ty::TraitRef {
+ def_id: tcx.lang_items().copy_trait().unwrap(),
+ substs: tcx.mk_substs_trait(place_ty.to_ty(tcx), &[]),
+ };
+
+ // In order to have a Copy operand, the type T of the value must be Copy. Note that we
+ // prove that T: Copy, rather than using the type_moves_by_default test. This is
+ // important because type_moves_by_default ignores the resulting region obligations and
+ // assumes they pass. This can result in bounds from Copy impls being unsoundly ignored
+ // (e.g., #29149). Note that we decide to use Copy before knowing whether the bounds
+ // fully apply: in effect, the rule is that if a value of some type could implement
+ // Copy, then it must.
+ self.cx.prove_trait_ref(trait_ref, location);
}
place_ty
}
let base_ty = base.to_ty(tcx);
match *pi {
ProjectionElem::Deref => {
- let deref_ty = base_ty.builtin_deref(true, ty::LvaluePreference::NoPreference);
+ let deref_ty = base_ty.builtin_deref(true);
PlaceTy::Ty {
ty: deref_ty.map(|t| t.ty).unwrap_or_else(|| {
span_mirbug_and_err!(self, place, "deref of non-pointer {:?}", base_ty)
}
}
ty::TyGenerator(def_id, substs, _) => {
- // Try upvars first. `field_tys` requires final optimized MIR.
- if let Some(ty) = substs.upvar_tys(def_id, tcx).nth(field.index()) {
+ // Try pre-transform fields first (upvars and current state)
+ if let Some(ty) = substs.pre_transforms_tys(def_id, tcx).nth(field.index()) {
return Ok(ty);
}
+ // Then try `field_tys` which contains all the fields, but it
+ // requires the final optimized MIR.
return match substs.field_tys(def_id, tcx).nth(field.index()) {
Some(ty) => Ok(ty),
None => Err(FieldAccessError::OutOfRange {
- field_count: substs.field_tys(def_id, tcx).count() + 1,
+ field_count: substs.field_tys(def_id, tcx).count(),
}),
};
}
}
}
AggregateKind::Generator(def_id, substs, _) => {
- if let Some(ty) = substs.upvar_tys(def_id, tcx).nth(field_index) {
+ // Try pre-transform fields first (upvars and current state)
+ if let Some(ty) = substs.pre_transforms_tys(def_id, tcx).nth(field_index) {
Ok(ty)
} else {
+ // Then try `field_tys` which contains all the fields, but it
+ // requires the final optimized MIR.
match substs.field_tys(def_id, tcx).nth(field_index) {
Some(ty) => Ok(ty),
None => Err(FieldAccessError::OutOfRange {
- field_count: substs.field_tys(def_id, tcx).count() + 1,
+ field_count: substs.field_tys(def_id, tcx).count(),
}),
}
}
if let Some(expr) = expr {
unpack!(block = this.into(destination, block, expr));
} else {
- this.cfg.push_assign_unit(block, source_info, destination);
+ // If a block has no trailing expression, then it is given an implicit return type.
+ // This return type is usually `()`, unless the block is diverging, in which case the
+ // return type is `!`. For the unit type, we need to actually return the unit, but in
+ // the case of `!`, no return value is required, as the block will never return.
+ let tcx = this.hir.tcx();
+ let ty = destination.ty(&this.local_decls, tcx).to_ty(tcx);
+ if ty.is_nil() {
+ // We only want to assign an implicit `()` as the return value of the block if the
+ // block does not diverge. (Otherwise, we may try to assign a unit to a `!`-type.)
+ this.cfg.push_assign_unit(block, source_info, destination);
+ }
}
// Finally, we pop all the let scopes before exiting out from the scope of block
// itself.
ExprKind::Continue { .. } |
ExprKind::Break { .. } |
ExprKind::InlineAsm { .. } |
- ExprKind::Return {.. } => {
+ ExprKind::Return { .. } => {
unpack!(block = this.stmt_expr(block, expr));
this.cfg.push_assign_unit(block, source_info, destination);
block.unit()
if let DropKind::Value { .. } = drop_kind {
scope.needs_cleanup = true;
}
+
let region_scope_span = region_scope.span(self.hir.tcx(),
&self.hir.region_scope_tree);
- // Attribute scope exit drops to scope's closing brace
- let scope_end = region_scope_span.with_lo(region_scope_span.hi());
+ // Attribute scope exit drops to scope's closing brace.
+ let scope_end = self.hir.tcx().sess.codemap().end_point(region_scope_span);
+
scope.drops.push(DropData {
span: scope_end,
location: place.clone(),
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+pub use super::*;
+
+use rustc::mir::*;
+use rustc::mir::visit::Visitor;
+use dataflow::BitDenotation;
+
+/// This calculates if any part of a MIR local could have previously been borrowed.
+/// This means that once a local has been borrowed, its bit will always be set
+/// from that point and onwards, even if the borrow ends. You could also think of this
+/// as computing the lifetimes of infinite borrows.
+/// This is used to compute which locals are live during a yield expression for
+/// immovable generators.
+#[derive(Copy, Clone)]
+pub struct HaveBeenBorrowedLocals<'a, 'tcx: 'a> {
+ mir: &'a Mir<'tcx>,
+}
+
+impl<'a, 'tcx: 'a> HaveBeenBorrowedLocals<'a, 'tcx> {
+ pub fn new(mir: &'a Mir<'tcx>)
+ -> Self {
+ HaveBeenBorrowedLocals { mir: mir }
+ }
+
+ pub fn mir(&self) -> &Mir<'tcx> {
+ self.mir
+ }
+}
+
+impl<'a, 'tcx> BitDenotation for HaveBeenBorrowedLocals<'a, 'tcx> {
+ type Idx = Local;
+ fn name() -> &'static str { "has_been_borrowed_locals" }
+ fn bits_per_block(&self) -> usize {
+ self.mir.local_decls.len()
+ }
+
+ fn start_block_effect(&self, _sets: &mut IdxSet<Local>) {
+ // Nothing is borrowed on function entry
+ }
+
+ fn statement_effect(&self,
+ sets: &mut BlockSets<Local>,
+ loc: Location) {
+ BorrowedLocalsVisitor {
+ sets,
+ }.visit_statement(loc.block, &self.mir[loc.block].statements[loc.statement_index], loc);
+ }
+
+ fn terminator_effect(&self,
+ sets: &mut BlockSets<Local>,
+ loc: Location) {
+ BorrowedLocalsVisitor {
+ sets,
+ }.visit_terminator(loc.block, self.mir[loc.block].terminator(), loc);
+ }
+
+ fn propagate_call_return(&self,
+ _in_out: &mut IdxSet<Local>,
+ _call_bb: mir::BasicBlock,
+ _dest_bb: mir::BasicBlock,
+ _dest_place: &mir::Place) {
+ // Nothing to do when a call returns successfully
+ }
+}
+
+impl<'a, 'tcx> BitwiseOperator for HaveBeenBorrowedLocals<'a, 'tcx> {
+ #[inline]
+ fn join(&self, pred1: usize, pred2: usize) -> usize {
+ pred1 | pred2 // "maybe" means we union effects of both preds
+ }
+}
+
+impl<'a, 'tcx> InitialFlow for HaveBeenBorrowedLocals<'a, 'tcx> {
+ #[inline]
+ fn bottom_value() -> bool {
+ false // bottom = unborrowed
+ }
+}
+
+struct BorrowedLocalsVisitor<'b, 'c: 'b> {
+ sets: &'b mut BlockSets<'c, Local>,
+}
+
+fn find_local<'tcx>(place: &Place<'tcx>) -> Option<Local> {
+ match *place {
+ Place::Local(l) => Some(l),
+ Place::Static(..) => None,
+ Place::Projection(ref proj) => {
+ match proj.elem {
+ ProjectionElem::Deref => None,
+ _ => find_local(&proj.base)
+ }
+ }
+ }
+}
+
+impl<'tcx, 'b, 'c> Visitor<'tcx> for BorrowedLocalsVisitor<'b, 'c> {
+ fn visit_rvalue(&mut self,
+ rvalue: &Rvalue<'tcx>,
+ location: Location) {
+ if let Rvalue::Ref(_, _, ref place) = *rvalue {
+ if let Some(local) = find_local(place) {
+ self.sets.gen(&local);
+ }
+ }
+
+ self.super_rvalue(rvalue, location)
+ }
+}
// Issue #46746: Two-phase borrows handles
// stmts of form `Tmp = &mut Borrow` ...
match lhs {
- Place::Local(..) => {} // okay
- Place::Static(..) => unreachable!(), // (filtered by is_unsafe_place)
+ Place::Local(..) | Place::Static(..) => {} // okay
Place::Projection(..) => {
// ... can assign into projections,
// e.g. `box (&mut _)`. Current
Some(_) => None,
None => {
match self.0.region_span_map.get(region) {
- Some(span) => Some(span.end_point()),
- None => Some(self.0.mir.span.end_point())
+ Some(span) => Some(self.0.tcx.sess.codemap().end_point(*span)),
+ None => Some(self.0.tcx.sess.codemap().end_point(self.0.mir.span))
}
}
}
/// has a reservation at the time).
fn is_potential_use(context: PlaceContext) -> bool {
match context {
- // storage effects on an place do not activate it
+ // storage effects on a place do not activate it
PlaceContext::StorageLive | PlaceContext::StorageDead => false,
- // validation effects do not activate an place
+ // validation effects do not activate a place
//
// FIXME: Should they? Is it just another read? Or can we
// guarantee it won't dereference the stored address? How
// AsmOutput existed, but it's not necessarily a pure overwrite.
// so it's possible this should activate the place.
PlaceContext::AsmOutput |
- // pure overwrites of an place do not activate it. (note
+ // pure overwrites of a place do not activate it. (note
// PlaceContext::Call is solely about dest place)
PlaceContext::Store | PlaceContext::Call => false,
- // reads of an place *do* activate it
+ // reads of a place *do* activate it
PlaceContext::Move |
PlaceContext::Copy |
PlaceContext::Drop |
pub use self::storage_liveness::*;
+mod borrowed_locals;
+
+pub use self::borrowed_locals::*;
+
#[allow(dead_code)]
pub(super) mod borrows;
-/// `MaybeInitializedLvals` tracks all l-values that might be
+/// `MaybeInitializedPlaces` tracks all places that might be
/// initialized upon reaching a particular point in the control flow
/// for a function.
///
/// }
/// ```
///
-/// To determine whether an l-value *must* be initialized at a
+/// To determine whether a place *must* be initialized at a
/// particular control-flow point, one can take the set-difference
-/// between this data and the data from `MaybeUninitializedLvals` at the
+/// between this data and the data from `MaybeUninitializedPlaces` at the
/// corresponding control-flow point.
///
/// Similarly, at a given `drop` statement, the set-intersection
-/// between this data and `MaybeUninitializedLvals` yields the set of
-/// l-values that would require a dynamic drop-flag at that statement.
-pub struct MaybeInitializedLvals<'a, 'gcx: 'tcx, 'tcx: 'a> {
+/// between this data and `MaybeUninitializedPlaces` yields the set of
+/// places that would require a dynamic drop-flag at that statement.
+pub struct MaybeInitializedPlaces<'a, 'gcx: 'tcx, 'tcx: 'a> {
tcx: TyCtxt<'a, 'gcx, 'tcx>,
mir: &'a Mir<'tcx>,
mdpe: &'a MoveDataParamEnv<'gcx, 'tcx>,
}
-impl<'a, 'gcx: 'tcx, 'tcx> MaybeInitializedLvals<'a, 'gcx, 'tcx> {
+impl<'a, 'gcx: 'tcx, 'tcx> MaybeInitializedPlaces<'a, 'gcx, 'tcx> {
pub fn new(tcx: TyCtxt<'a, 'gcx, 'tcx>,
mir: &'a Mir<'tcx>,
mdpe: &'a MoveDataParamEnv<'gcx, 'tcx>)
-> Self
{
- MaybeInitializedLvals { tcx: tcx, mir: mir, mdpe: mdpe }
+ MaybeInitializedPlaces { tcx: tcx, mir: mir, mdpe: mdpe }
}
}
-impl<'a, 'gcx, 'tcx> HasMoveData<'tcx> for MaybeInitializedLvals<'a, 'gcx, 'tcx> {
+impl<'a, 'gcx, 'tcx> HasMoveData<'tcx> for MaybeInitializedPlaces<'a, 'gcx, 'tcx> {
fn move_data(&self) -> &MoveData<'tcx> { &self.mdpe.move_data }
}
-/// `MaybeUninitializedLvals` tracks all l-values that might be
+/// `MaybeUninitializedPlaces` tracks all places that might be
/// uninitialized upon reaching a particular point in the control flow
/// for a function.
///
/// }
/// ```
///
-/// To determine whether an l-value *must* be uninitialized at a
+/// To determine whether a place *must* be uninitialized at a
/// particular control-flow point, one can take the set-difference
-/// between this data and the data from `MaybeInitializedLvals` at the
+/// between this data and the data from `MaybeInitializedPlaces` at the
/// corresponding control-flow point.
///
/// Similarly, at a given `drop` statement, the set-intersection
-/// between this data and `MaybeInitializedLvals` yields the set of
-/// l-values that would require a dynamic drop-flag at that statement.
-pub struct MaybeUninitializedLvals<'a, 'gcx: 'tcx, 'tcx: 'a> {
+/// between this data and `MaybeInitializedPlaces` yields the set of
+/// places that would require a dynamic drop-flag at that statement.
+pub struct MaybeUninitializedPlaces<'a, 'gcx: 'tcx, 'tcx: 'a> {
tcx: TyCtxt<'a, 'gcx, 'tcx>,
mir: &'a Mir<'tcx>,
mdpe: &'a MoveDataParamEnv<'gcx, 'tcx>,
}
-impl<'a, 'gcx, 'tcx> MaybeUninitializedLvals<'a, 'gcx, 'tcx> {
+impl<'a, 'gcx, 'tcx> MaybeUninitializedPlaces<'a, 'gcx, 'tcx> {
pub fn new(tcx: TyCtxt<'a, 'gcx, 'tcx>,
mir: &'a Mir<'tcx>,
mdpe: &'a MoveDataParamEnv<'gcx, 'tcx>)
-> Self
{
- MaybeUninitializedLvals { tcx: tcx, mir: mir, mdpe: mdpe }
+ MaybeUninitializedPlaces { tcx: tcx, mir: mir, mdpe: mdpe }
}
}
-impl<'a, 'gcx, 'tcx> HasMoveData<'tcx> for MaybeUninitializedLvals<'a, 'gcx, 'tcx> {
+impl<'a, 'gcx, 'tcx> HasMoveData<'tcx> for MaybeUninitializedPlaces<'a, 'gcx, 'tcx> {
fn move_data(&self) -> &MoveData<'tcx> { &self.mdpe.move_data }
}
-/// `DefinitelyInitializedLvals` tracks all l-values that are definitely
+/// `DefinitelyInitializedPlaces` tracks all places that are definitely
/// initialized upon reaching a particular point in the control flow
/// for a function.
///
/// FIXME: Note that once flow-analysis is complete, this should be
-/// the set-complement of MaybeUninitializedLvals; thus we can get rid
+/// the set-complement of MaybeUninitializedPlaces; thus we can get rid
/// of one or the other of these two. I'm inclined to get rid of
-/// MaybeUninitializedLvals, simply because the sets will tend to be
+/// MaybeUninitializedPlaces, simply because the sets will tend to be
/// smaller in this analysis and thus easier for humans to process
/// when debugging.
///
/// }
/// ```
///
-/// To determine whether an l-value *may* be uninitialized at a
+/// To determine whether a place *may* be uninitialized at a
/// particular control-flow point, one can take the set-complement
/// of this data.
///
/// Similarly, at a given `drop` statement, the set-difference between
-/// this data and `MaybeInitializedLvals` yields the set of l-values
+/// this data and `MaybeInitializedPlaces` yields the set of places
/// that would require a dynamic drop-flag at that statement.
-pub struct DefinitelyInitializedLvals<'a, 'gcx: 'tcx, 'tcx: 'a> {
+pub struct DefinitelyInitializedPlaces<'a, 'gcx: 'tcx, 'tcx: 'a> {
tcx: TyCtxt<'a, 'gcx, 'tcx>,
mir: &'a Mir<'tcx>,
mdpe: &'a MoveDataParamEnv<'gcx, 'tcx>,
}
-impl<'a, 'gcx, 'tcx: 'a> DefinitelyInitializedLvals<'a, 'gcx, 'tcx> {
+impl<'a, 'gcx, 'tcx: 'a> DefinitelyInitializedPlaces<'a, 'gcx, 'tcx> {
pub fn new(tcx: TyCtxt<'a, 'gcx, 'tcx>,
mir: &'a Mir<'tcx>,
mdpe: &'a MoveDataParamEnv<'gcx, 'tcx>)
-> Self
{
- DefinitelyInitializedLvals { tcx: tcx, mir: mir, mdpe: mdpe }
+ DefinitelyInitializedPlaces { tcx: tcx, mir: mir, mdpe: mdpe }
}
}
-impl<'a, 'gcx, 'tcx: 'a> HasMoveData<'tcx> for DefinitelyInitializedLvals<'a, 'gcx, 'tcx> {
+impl<'a, 'gcx, 'tcx: 'a> HasMoveData<'tcx> for DefinitelyInitializedPlaces<'a, 'gcx, 'tcx> {
fn move_data(&self) -> &MoveData<'tcx> { &self.mdpe.move_data }
}
/// `MovingOutStatements` tracks the statements that perform moves out
-/// of particular l-values. More precisely, it tracks whether the
+/// of particular places. More precisely, it tracks whether the
/// *effect* of such moves (namely, the uninitialization of the
-/// l-value in question) can reach some point in the control-flow of
+/// place in question) can reach some point in the control-flow of
/// the function, or if that effect is "killed" by some intervening
-/// operation reinitializing that l-value.
+/// operation reinitializing that place.
///
/// The resulting dataflow is a more enriched version of
-/// `MaybeUninitializedLvals`. Both structures on their own only tell
-/// you if an l-value *might* be uninitialized at a given point in the
+/// `MaybeUninitializedPlaces`. Both structures on their own only tell
+/// you if a place *might* be uninitialized at a given point in the
/// control flow. But `MovingOutStatements` also includes the added
/// data of *which* particular statement causing the deinitialization
/// that the borrow checker's error message may need to report.
fn move_data(&self) -> &MoveData<'tcx> { &self.mdpe.move_data }
}
-/// `EverInitializedLvals` tracks all l-values that might have ever been
+/// `EverInitializedPlaces` tracks all places that might have ever been
/// initialized upon reaching a particular point in the control flow
/// for a function, without an intervening `Storage Dead`.
///
/// c = S; // {a, b, c, d }
/// }
/// ```
-pub struct EverInitializedLvals<'a, 'gcx: 'tcx, 'tcx: 'a> {
+pub struct EverInitializedPlaces<'a, 'gcx: 'tcx, 'tcx: 'a> {
tcx: TyCtxt<'a, 'gcx, 'tcx>,
mir: &'a Mir<'tcx>,
mdpe: &'a MoveDataParamEnv<'gcx, 'tcx>,
}
-impl<'a, 'gcx: 'tcx, 'tcx: 'a> EverInitializedLvals<'a, 'gcx, 'tcx> {
+impl<'a, 'gcx: 'tcx, 'tcx: 'a> EverInitializedPlaces<'a, 'gcx, 'tcx> {
pub fn new(tcx: TyCtxt<'a, 'gcx, 'tcx>,
mir: &'a Mir<'tcx>,
mdpe: &'a MoveDataParamEnv<'gcx, 'tcx>)
-> Self
{
- EverInitializedLvals { tcx: tcx, mir: mir, mdpe: mdpe }
+ EverInitializedPlaces { tcx: tcx, mir: mir, mdpe: mdpe }
}
}
-impl<'a, 'gcx, 'tcx> HasMoveData<'tcx> for EverInitializedLvals<'a, 'gcx, 'tcx> {
+impl<'a, 'gcx, 'tcx> HasMoveData<'tcx> for EverInitializedPlaces<'a, 'gcx, 'tcx> {
fn move_data(&self) -> &MoveData<'tcx> { &self.mdpe.move_data }
}
-impl<'a, 'gcx, 'tcx> MaybeInitializedLvals<'a, 'gcx, 'tcx> {
+impl<'a, 'gcx, 'tcx> MaybeInitializedPlaces<'a, 'gcx, 'tcx> {
fn update_bits(sets: &mut BlockSets<MovePathIndex>, path: MovePathIndex,
state: DropFlagState)
{
}
}
-impl<'a, 'gcx, 'tcx> MaybeUninitializedLvals<'a, 'gcx, 'tcx> {
+impl<'a, 'gcx, 'tcx> MaybeUninitializedPlaces<'a, 'gcx, 'tcx> {
fn update_bits(sets: &mut BlockSets<MovePathIndex>, path: MovePathIndex,
state: DropFlagState)
{
}
}
-impl<'a, 'gcx, 'tcx> DefinitelyInitializedLvals<'a, 'gcx, 'tcx> {
+impl<'a, 'gcx, 'tcx> DefinitelyInitializedPlaces<'a, 'gcx, 'tcx> {
fn update_bits(sets: &mut BlockSets<MovePathIndex>, path: MovePathIndex,
state: DropFlagState)
{
}
}
-impl<'a, 'gcx, 'tcx> BitDenotation for MaybeInitializedLvals<'a, 'gcx, 'tcx> {
+impl<'a, 'gcx, 'tcx> BitDenotation for MaybeInitializedPlaces<'a, 'gcx, 'tcx> {
type Idx = MovePathIndex;
fn name() -> &'static str { "maybe_init" }
fn bits_per_block(&self) -> usize {
}
}
-impl<'a, 'gcx, 'tcx> BitDenotation for MaybeUninitializedLvals<'a, 'gcx, 'tcx> {
+impl<'a, 'gcx, 'tcx> BitDenotation for MaybeUninitializedPlaces<'a, 'gcx, 'tcx> {
type Idx = MovePathIndex;
fn name() -> &'static str { "maybe_uninit" }
fn bits_per_block(&self) -> usize {
}
}
-impl<'a, 'gcx, 'tcx> BitDenotation for DefinitelyInitializedLvals<'a, 'gcx, 'tcx> {
+impl<'a, 'gcx, 'tcx> BitDenotation for DefinitelyInitializedPlaces<'a, 'gcx, 'tcx> {
type Idx = MovePathIndex;
fn name() -> &'static str { "definite_init" }
fn bits_per_block(&self) -> usize {
}
}
-impl<'a, 'gcx, 'tcx> BitDenotation for EverInitializedLvals<'a, 'gcx, 'tcx> {
+impl<'a, 'gcx, 'tcx> BitDenotation for EverInitializedPlaces<'a, 'gcx, 'tcx> {
type Idx = InitIndex;
fn name() -> &'static str { "ever_init" }
fn bits_per_block(&self) -> usize {
}
}
-impl<'a, 'gcx, 'tcx> BitwiseOperator for MaybeInitializedLvals<'a, 'gcx, 'tcx> {
+impl<'a, 'gcx, 'tcx> BitwiseOperator for MaybeInitializedPlaces<'a, 'gcx, 'tcx> {
#[inline]
fn join(&self, pred1: usize, pred2: usize) -> usize {
pred1 | pred2 // "maybe" means we union effects of both preds
}
}
-impl<'a, 'gcx, 'tcx> BitwiseOperator for MaybeUninitializedLvals<'a, 'gcx, 'tcx> {
+impl<'a, 'gcx, 'tcx> BitwiseOperator for MaybeUninitializedPlaces<'a, 'gcx, 'tcx> {
#[inline]
fn join(&self, pred1: usize, pred2: usize) -> usize {
pred1 | pred2 // "maybe" means we union effects of both preds
}
}
-impl<'a, 'gcx, 'tcx> BitwiseOperator for DefinitelyInitializedLvals<'a, 'gcx, 'tcx> {
+impl<'a, 'gcx, 'tcx> BitwiseOperator for DefinitelyInitializedPlaces<'a, 'gcx, 'tcx> {
#[inline]
fn join(&self, pred1: usize, pred2: usize) -> usize {
pred1 & pred2 // "definitely" means we intersect effects of both preds
}
}
-impl<'a, 'gcx, 'tcx> BitwiseOperator for EverInitializedLvals<'a, 'gcx, 'tcx> {
+impl<'a, 'gcx, 'tcx> BitwiseOperator for EverInitializedPlaces<'a, 'gcx, 'tcx> {
#[inline]
fn join(&self, pred1: usize, pred2: usize) -> usize {
pred1 | pred2 // inits from both preds are in scope
// propagating, or you start at all-ones and then use Intersect as
// your merge when propagating.
-impl<'a, 'gcx, 'tcx> InitialFlow for MaybeInitializedLvals<'a, 'gcx, 'tcx> {
+impl<'a, 'gcx, 'tcx> InitialFlow for MaybeInitializedPlaces<'a, 'gcx, 'tcx> {
#[inline]
fn bottom_value() -> bool {
false // bottom = uninitialized
}
}
-impl<'a, 'gcx, 'tcx> InitialFlow for MaybeUninitializedLvals<'a, 'gcx, 'tcx> {
+impl<'a, 'gcx, 'tcx> InitialFlow for MaybeUninitializedPlaces<'a, 'gcx, 'tcx> {
#[inline]
fn bottom_value() -> bool {
false // bottom = initialized (start_block_effect counters this at outset)
}
}
-impl<'a, 'gcx, 'tcx> InitialFlow for DefinitelyInitializedLvals<'a, 'gcx, 'tcx> {
+impl<'a, 'gcx, 'tcx> InitialFlow for DefinitelyInitializedPlaces<'a, 'gcx, 'tcx> {
#[inline]
fn bottom_value() -> bool {
true // bottom = initialized (start_block_effect counters this at outset)
}
}
-impl<'a, 'gcx, 'tcx> InitialFlow for EverInitializedLvals<'a, 'gcx, 'tcx> {
+impl<'a, 'gcx, 'tcx> InitialFlow for EverInitializedPlaces<'a, 'gcx, 'tcx> {
#[inline]
fn bottom_value() -> bool {
false // bottom = no initialized variables by default
use std::usize;
pub use self::impls::{MaybeStorageLive};
-pub use self::impls::{MaybeInitializedLvals, MaybeUninitializedLvals};
-pub use self::impls::{DefinitelyInitializedLvals, MovingOutStatements};
-pub use self::impls::EverInitializedLvals;
+pub use self::impls::{MaybeInitializedPlaces, MaybeUninitializedPlaces};
+pub use self::impls::{DefinitelyInitializedPlaces, MovingOutStatements};
+pub use self::impls::EverInitializedPlaces;
pub use self::impls::borrows::{Borrows, BorrowData};
+pub use self::impls::HaveBeenBorrowedLocals;
pub(crate) use self::impls::borrows::{ActiveBorrows, Reservations, ReserveOrActivateIndex};
pub use self::at_location::{FlowAtLocation, FlowsAtLocation};
pub(crate) use self::drop_flag_effects::*;
/// It follows a tree structure.
///
/// Given `struct X { m: M, n: N }` and `x: X`, moves like `drop x.m;`
-/// move *out* of the l-value `x.m`.
+/// move *out* of the place `x.m`.
///
/// The MovePaths representing `x.m` and `x.n` are siblings (that is,
/// one of them will link to the other via the `next_sibling` field,
}
}
-/// Tables mapping from an l-value to its MovePathIndex.
+/// Tables mapping from a place to its MovePathIndex.
#[derive(Debug)]
pub struct MovePathLookup<'tcx> {
locals: IndexVec<Local, MovePathIndex>,
impl<'tcx> MovePathLookup<'tcx> {
// Unlike the builder `fn move_path_for` below, this lookup
// alternative will *not* create a MovePath on the fly for an
- // unknown l-value, but will rather return the nearest available
+ // unknown place, but will rather return the nearest available
// parent.
pub fn find(&self, place: &Place<'tcx>) -> LookupResult {
match *place {
arg: expr.to_ref(),
},
};
- ExprKind::Cast { source: expr.to_ref() }
+ let cast_expr = Expr {
+ temp_lifetime,
+ ty: adjustment.target,
+ span,
+ kind: ExprKind::Cast { source: expr.to_ref() }
+ };
+
+ // To ensure that both implicit and explicit coercions are
+ // handled the same way, we insert an extra layer of indirection here.
+ // For explicit casts (e.g. 'foo as *const T'), the source of the 'Use'
+ // will be an ExprKind::Hair with the appropriate cast expression. Here,
+ // we make our Use source the generated Cast from the original coercion.
+ //
+ // In both cases, this outer 'Use' ensures that the inner 'Cast' is handled by
+ // as_operand, not by as_rvalue - causing the cast result to be stored in a temporary.
+ // Ordinary, this is identical to using the cast directly as an rvalue. However, if the
+ // source of the cast was previously borrowed as mutable, storing the cast in a
+ // temporary gives the source a chance to expire before the cast is used. For
+ // structs with a self-referential *mut ptr, this allows assignment to work as
+ // expected.
+ //
+ // For example, consider the type 'struct Foo { field: *mut Foo }',
+ // The method 'fn bar(&mut self) { self.field = self }'
+ // triggers a coercion from '&mut self' to '*mut self'. In order
+ // for the assignment to be valid, the implicit borrow
+ // of 'self' involved in the coercion needs to end before the local
+ // containing the '*mut T' is assigned to 'self.field' - otherwise,
+ // we end up trying to assign to 'self.field' while we have another mutable borrow
+ // active.
+ //
+ // We only need to worry about this kind of thing for coercions from refs to ptrs,
+ // since they get rid of a borrow implicitly.
+ ExprKind::Use { source: cast_expr.to_ref() }
}
Adjust::Unsize => {
ExprKind::Unsize { source: expr.to_ref() }
ecx.write_primval(dest, PrimVal::from_u128(size), dest_layout.ty)?;
}
+ "type_id" => {
+ let ty = substs.type_at(0);
+ let type_id = ecx.tcx.type_id_hash(ty) as u128;
+ ecx.write_primval(dest, PrimVal::from_u128(type_id), dest_layout.ty)?;
+ }
+
name => return Err(ConstEvalError::NeedsRfc(format!("calling intrinsic `{}`", name)).into()),
}
type TyLayout = EvalResult<'tcx, TyLayout<'tcx>>;
fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
- (self.tcx, self.param_env).layout_of(ty)
+ self.tcx.layout_of(self.param_env.and(ty))
.map_err(|layout| EvalErrorKind::Layout(layout).into())
}
}
#[derive(Copy, Clone, Debug)]
pub enum Place {
- /// An place referring to a value allocated in the `Memory` system.
+ /// A place referring to a value allocated in the `Memory` system.
Ptr {
- /// An place may have an invalid (integral or undef) pointer,
+ /// A place may have an invalid (integral or undef) pointer,
/// since it might be turned back into a reference
/// before ever being dereferenced.
ptr: Pointer,
extra: PlaceExtra,
},
- /// An place referring to a value on the stack. Represented by a stack frame index paired with
+ /// A place referring to a value on the stack. Represented by a stack frame index paired with
/// a Mir local index.
Local { frame: usize, local: mir::Local },
}
}
impl<'tcx> Place {
- /// Produces an Place that will error if attempted to be read from
+ /// Produces a Place that will error if attempted to be read from
pub fn undef() -> Self {
Self::from_primval_ptr(PrimVal::Undef.into(), Align::from_bytes(1, 1).unwrap())
}
use rustc::hir::def_id::DefId;
use rustc::hir::map::DefPathData;
use rustc::mir::mono::{Linkage, Visibility};
+use rustc::middle::exported_symbols::SymbolExportLevel;
use rustc::ty::{self, TyCtxt, InstanceDef};
use rustc::ty::item_path::characteristic_def_id_of_type;
use rustc::util::nodemap::{FxHashMap, FxHashSet};
.or_insert_with(make_codegen_unit);
let mut can_be_internalized = true;
- let (linkage, visibility) = match trans_item.explicit_linkage(tcx) {
+ let default_visibility = |id: DefId| {
+ if tcx.sess.target.target.options.default_hidden_visibility &&
+ tcx.symbol_export_level(id) != SymbolExportLevel::C
+ {
+ Visibility::Hidden
+ } else {
+ Visibility::Default
+ }
+ };
+ let (linkage, mut visibility) = match trans_item.explicit_linkage(tcx) {
Some(explicit_linkage) => (explicit_linkage, Visibility::Default),
None => {
match trans_item {
Visibility::Hidden
} else if def_id.is_local() {
if tcx.is_exported_symbol(def_id) {
- Visibility::Default
+ can_be_internalized = false;
+ default_visibility(def_id)
} else {
Visibility::Hidden
}
MonoItem::GlobalAsm(node_id) => {
let def_id = tcx.hir.local_def_id(node_id);
let visibility = if tcx.is_exported_symbol(def_id) {
- Visibility::Default
+ can_be_internalized = false;
+ default_visibility(def_id)
} else {
Visibility::Hidden
};
{
debug!("build_clone_shim(def_id={:?})", def_id);
- let mut builder = CloneShimBuilder::new(tcx, def_id);
+ let mut builder = CloneShimBuilder::new(tcx, def_id, self_ty);
let is_copy = !self_ty.moves_by_default(tcx, tcx.param_env(def_id), builder.span);
+ let dest = Place::Local(RETURN_PLACE);
+ let src = Place::Local(Local::new(1+0)).deref();
+
match self_ty.sty {
_ if is_copy => builder.copy_shim(),
ty::TyArray(ty, len) => {
let len = len.val.to_const_int().unwrap().to_u64().unwrap();
- builder.array_shim(ty, len)
+ builder.array_shim(dest, src, ty, len)
}
ty::TyClosure(def_id, substs) => {
builder.tuple_like_shim(
- &substs.upvar_tys(def_id, tcx).collect::<Vec<_>>(),
- AggregateKind::Closure(def_id, substs)
+ dest, src,
+ substs.upvar_tys(def_id, tcx)
)
}
- ty::TyTuple(tys, _) => builder.tuple_like_shim(&**tys, AggregateKind::Tuple),
+ ty::TyTuple(tys, _) => builder.tuple_like_shim(dest, src, tys.iter().cloned()),
_ => {
bug!("clone shim for `{:?}` which is not `Copy` and is not an aggregate", self_ty)
}
}
impl<'a, 'tcx> CloneShimBuilder<'a, 'tcx> {
- fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> Self {
- let sig = tcx.fn_sig(def_id);
+ fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ def_id: DefId,
+ self_ty: Ty<'tcx>) -> Self {
+ // we must subst the self_ty because it's
+ // otherwise going to be TySelf and we can't index
+ // or access fields of a Place of type TySelf.
+ let substs = tcx.mk_substs_trait(self_ty, &[]);
+ let sig = tcx.fn_sig(def_id).subst(tcx, substs);
let sig = tcx.erase_late_bound_regions(&sig);
let span = tcx.def_span(def_id);
})
}
+ /// Gives the index of an upcoming BasicBlock, with an offset.
+ /// offset=0 will give you the index of the next BasicBlock,
+ /// offset=1 will give the index of the next-to-next block,
+ /// offset=-1 will give you the index of the last-created block
+ fn block_index_offset(&mut self, offset: usize) -> BasicBlock {
+ BasicBlock::new(self.blocks.len() + offset)
+ }
+
fn make_statement(&self, kind: StatementKind<'tcx>) -> Statement<'tcx> {
Statement {
source_info: self.source_info(),
fn make_clone_call(
&mut self,
+ dest: Place<'tcx>,
+ src: Place<'tcx>,
ty: Ty<'tcx>,
- rcvr_field: Place<'tcx>,
next: BasicBlock,
cleanup: BasicBlock
- ) -> Place<'tcx> {
+ ) {
let tcx = self.tcx;
let substs = Substs::for_item(
})
);
- let loc = self.make_place(Mutability::Not, ty);
-
- // `let ref_loc: &ty = &rcvr_field;`
+ // `let ref_loc: &ty = &src;`
let statement = self.make_statement(
StatementKind::Assign(
ref_loc.clone(),
- Rvalue::Ref(tcx.types.re_erased, BorrowKind::Shared, rcvr_field)
+ Rvalue::Ref(tcx.types.re_erased, BorrowKind::Shared, src)
)
);
self.block(vec![statement], TerminatorKind::Call {
func,
args: vec![Operand::Move(ref_loc)],
- destination: Some((loc.clone(), next)),
+ destination: Some((dest, next)),
cleanup: Some(cleanup),
}, false);
-
- loc
}
fn loop_header(
}
}
- fn array_shim(&mut self, ty: Ty<'tcx>, len: u64) {
+ fn array_shim(&mut self, dest: Place<'tcx>, src: Place<'tcx>, ty: Ty<'tcx>, len: u64) {
let tcx = self.tcx;
let span = self.span;
- let rcvr = Place::Local(Local::new(1+0)).deref();
let beg = self.local_decls.push(temp_decl(Mutability::Mut, tcx.types.usize, span));
let end = self.make_place(Mutability::Not, tcx.types.usize);
- let ret = self.make_place(Mutability::Mut, tcx.mk_array(ty, len));
// BB #0
// `let mut beg = 0;`
self.loop_header(Place::Local(beg), end, BasicBlock::new(2), BasicBlock::new(4), false);
// BB #2
- // `let cloned = Clone::clone(rcvr[beg])`;
+ // `dest[i] = Clone::clone(src[beg])`;
// Goto #3 if ok, #5 if unwinding happens.
- let rcvr_field = rcvr.clone().index(beg);
- let cloned = self.make_clone_call(ty, rcvr_field, BasicBlock::new(3), BasicBlock::new(5));
+ let dest_field = dest.clone().index(beg);
+ let src_field = src.clone().index(beg);
+ self.make_clone_call(dest_field, src_field, ty, BasicBlock::new(3),
+ BasicBlock::new(5));
// BB #3
- // `ret[beg] = cloned;`
// `beg = beg + 1;`
// `goto #1`;
- let ret_field = ret.clone().index(beg);
let statements = vec![
- self.make_statement(
- StatementKind::Assign(
- ret_field,
- Rvalue::Use(Operand::Move(cloned))
- )
- ),
self.make_statement(
StatementKind::Assign(
Place::Local(beg),
self.block(statements, TerminatorKind::Goto { target: BasicBlock::new(1) }, false);
// BB #4
- // `return ret;`
- let ret_statement = self.make_statement(
- StatementKind::Assign(
- Place::Local(RETURN_PLACE),
- Rvalue::Use(Operand::Move(ret.clone())),
- )
- );
- self.block(vec![ret_statement], TerminatorKind::Return, false);
+ // `return dest;`
+ self.block(vec![], TerminatorKind::Return, false);
// BB #5 (cleanup)
// `let end = beg;`
BasicBlock::new(7), BasicBlock::new(9), true);
// BB #7 (cleanup)
- // `drop(ret[beg])`;
+ // `drop(dest[beg])`;
self.block(vec![], TerminatorKind::Drop {
- location: ret.index(beg),
+ location: dest.index(beg),
target: BasicBlock::new(8),
unwind: None,
}, true);
self.block(vec![], TerminatorKind::Resume, true);
}
- fn tuple_like_shim(&mut self, tys: &[ty::Ty<'tcx>], kind: AggregateKind<'tcx>) {
- match kind {
- AggregateKind::Tuple | AggregateKind::Closure(..) => (),
- _ => bug!("only tuples and closures are accepted"),
- };
+ fn tuple_like_shim<I>(&mut self, dest: Place<'tcx>,
+ src: Place<'tcx>, tys: I)
+ where I: Iterator<Item = ty::Ty<'tcx>> {
+ let mut previous_field = None;
+ for (i, ity) in tys.enumerate() {
+ let field = Field::new(i);
+ let src_field = src.clone().field(field, ity);
- let rcvr = Place::Local(Local::new(1+0)).deref();
+ let dest_field = dest.clone().field(field, ity);
- let mut returns = Vec::new();
- for (i, ity) in tys.iter().enumerate() {
- let rcvr_field = rcvr.clone().field(Field::new(i), *ity);
+ // #(2i + 1) is the cleanup block for the previous clone operation
+ let cleanup_block = self.block_index_offset(1);
+ // #(2i + 2) is the next cloning block
+ // (or the Return terminator if this is the last block)
+ let next_block = self.block_index_offset(2);
// BB #(2i)
- // `returns[i] = Clone::clone(&rcvr.i);`
+ // `dest.i = Clone::clone(&src.i);`
// Goto #(2i + 2) if ok, #(2i + 1) if unwinding happens.
- returns.push(
- self.make_clone_call(
- *ity,
- rcvr_field,
- BasicBlock::new(2 * i + 2),
- BasicBlock::new(2 * i + 1),
- )
+ self.make_clone_call(
+ dest_field.clone(),
+ src_field,
+ ity,
+ next_block,
+ cleanup_block,
);
// BB #(2i + 1) (cleanup)
- if i == 0 {
- // Nothing to drop, just resume.
- self.block(vec![], TerminatorKind::Resume, true);
- } else {
+ if let Some((previous_field, previous_cleanup)) = previous_field.take() {
// Drop previous field and goto previous cleanup block.
self.block(vec![], TerminatorKind::Drop {
- location: returns[i - 1].clone(),
- target: BasicBlock::new(2 * i - 1),
+ location: previous_field,
+ target: previous_cleanup,
unwind: None,
}, true);
+ } else {
+ // Nothing to drop, just resume.
+ self.block(vec![], TerminatorKind::Resume, true);
}
+
+ previous_field = Some((dest_field, cleanup_block));
}
- // `return kind(returns[0], returns[1], ..., returns[tys.len() - 1]);`
- let ret_statement = self.make_statement(
- StatementKind::Assign(
- Place::Local(RETURN_PLACE),
- Rvalue::Aggregate(
- box kind,
- returns.into_iter().map(Operand::Move).collect()
- )
- )
- );
- self.block(vec![ret_statement], TerminatorKind::Return, false);
+ self.block(vec![], TerminatorKind::Return, false);
}
}
// except according to those terms.
use dataflow::move_paths::{HasMoveData, MoveData, MovePathIndex, LookupResult};
-use dataflow::{MaybeInitializedLvals, MaybeUninitializedLvals};
+use dataflow::{MaybeInitializedPlaces, MaybeUninitializedPlaces};
use dataflow::{DataflowResults};
use dataflow::{on_all_children_bits, on_all_drop_children_bits};
use dataflow::{drop_flag_effects_for_location, on_lookup_result_bits};
let dead_unwinds = find_dead_unwinds(tcx, mir, id, &env);
let flow_inits =
do_dataflow(tcx, mir, id, &[], &dead_unwinds,
- MaybeInitializedLvals::new(tcx, mir, &env),
+ MaybeInitializedPlaces::new(tcx, mir, &env),
|bd, p| DebugFormatted::new(&bd.move_data().move_paths[p]));
let flow_uninits =
do_dataflow(tcx, mir, id, &[], &dead_unwinds,
- MaybeUninitializedLvals::new(tcx, mir, &env),
+ MaybeUninitializedPlaces::new(tcx, mir, &env),
|bd, p| DebugFormatted::new(&bd.move_data().move_paths[p]));
ElaborateDropsCtxt {
let mut dead_unwinds = IdxSetBuf::new_empty(mir.basic_blocks().len());
let flow_inits =
do_dataflow(tcx, mir, id, &[], &dead_unwinds,
- MaybeInitializedLvals::new(tcx, mir, &env),
+ MaybeInitializedPlaces::new(tcx, mir, &env),
|bd, p| DebugFormatted::new(&bd.move_data().move_paths[p]));
for (bb, bb_data) in mir.basic_blocks().iter_enumerated() {
let location = match bb_data.terminator().kind {
tcx: TyCtxt<'a, 'tcx, 'tcx>,
mir: &'a Mir<'tcx>,
env: &'a MoveDataParamEnv<'tcx, 'tcx>,
- flow_inits: DataflowResults<MaybeInitializedLvals<'a, 'tcx, 'tcx>>,
- flow_uninits: DataflowResults<MaybeUninitializedLvals<'a, 'tcx, 'tcx>>,
+ flow_inits: DataflowResults<MaybeInitializedPlaces<'a, 'tcx, 'tcx>>,
+ flow_uninits: DataflowResults<MaybeUninitializedPlaces<'a, 'tcx, 'tcx>>,
drop_flags: FxHashMap<MovePathIndex, Local>,
patch: MirPatch<'tcx>,
}
use transform::{MirPass, MirSource};
use transform::simplify;
use transform::no_landing_pads::no_landing_pads;
-use dataflow::{do_dataflow, DebugFormatted, MaybeStorageLive, state_for_location};
+use dataflow::{do_dataflow, DebugFormatted, state_for_location};
+use dataflow::{MaybeStorageLive, HaveBeenBorrowedLocals};
pub struct StateTransform;
HashMap<BasicBlock, liveness::LocalSet>) {
let dead_unwinds = IdxSetBuf::new_empty(mir.basic_blocks().len());
let node_id = tcx.hir.as_local_node_id(source.def_id).unwrap();
- let analysis = MaybeStorageLive::new(mir);
+
+ // Calculate when MIR locals have live storage. This gives us an upper bound of their
+ // lifetimes.
+ let storage_live_analysis = MaybeStorageLive::new(mir);
let storage_live =
- do_dataflow(tcx, mir, node_id, &[], &dead_unwinds, analysis,
+ do_dataflow(tcx, mir, node_id, &[], &dead_unwinds, storage_live_analysis,
|bd, p| DebugFormatted::new(&bd.mir().local_decls[p]));
+ // Find the MIR locals which do not use StorageLive/StorageDead statements.
+ // The storage of these locals are always live.
let mut ignored = StorageIgnored(IdxSetBuf::new_filled(mir.local_decls.len()));
ignored.visit_mir(mir);
- let mut borrowed_locals = BorrowedLocals(IdxSetBuf::new_empty(mir.local_decls.len()));
- borrowed_locals.visit_mir(mir);
+ // Calculate the MIR locals which have been previously
+ // borrowed (even if they are still active).
+ // This is only used for immovable generators.
+ let borrowed_locals = if !movable {
+ let analysis = HaveBeenBorrowedLocals::new(mir);
+ let result =
+ do_dataflow(tcx, mir, node_id, &[], &dead_unwinds, analysis,
+ |bd, p| DebugFormatted::new(&bd.mir().local_decls[p]));
+ Some((analysis, result))
+ } else {
+ None
+ };
+ // Calculate the liveness of MIR locals ignoring borrows.
let mut set = liveness::LocalSet::new_empty(mir.local_decls.len());
let mut liveness = liveness::liveness_of_locals(mir, LivenessMode {
include_regular_use: true,
statement_index: data.statements.len(),
};
- let storage_liveness = state_for_location(loc, &analysis, &storage_live, mir);
+ if let Some((ref analysis, ref result)) = borrowed_locals {
+ let borrowed_locals = state_for_location(loc,
+ analysis,
+ result,
+ mir);
+ // The `liveness` variable contains the liveness of MIR locals ignoring borrows.
+ // This is correct for movable generators since borrows cannot live across
+ // suspension points. However for immovable generators we need to account for
+ // borrows, so we conseratively assume that all borrowed locals live forever.
+ // To do this we just union our `liveness` result with `borrowed_locals`, which
+ // contains all the locals which has been borrowed before this suspension point.
+ // If a borrow is converted to a raw reference, we must also assume that it lives
+ // forever. Note that the final liveness is still bounded by the storage liveness
+ // of the local, which happens using the `intersect` operation below.
+ liveness.outs[block].union(&borrowed_locals);
+ }
+
+ let mut storage_liveness = state_for_location(loc,
+ &storage_live_analysis,
+ &storage_live,
+ mir);
+ // Store the storage liveness for later use so we can restore the state
+ // after a suspension point
storage_liveness_map.insert(block, storage_liveness.clone());
- let mut live_locals = storage_liveness;
-
// Mark locals without storage statements as always having live storage
- live_locals.union(&ignored.0);
+ storage_liveness.union(&ignored.0);
- if !movable {
- // For immovable generators we consider borrowed locals to always be live.
- // This effectively makes those locals use just the storage liveness.
- liveness.outs[block].union(&borrowed_locals.0);
- }
+ // Locals live are live at this point only if they are used across
+ // suspension points (the `liveness` variable)
+ // and their storage is live (the `storage_liveness` variable)
+ storage_liveness.intersect(&liveness.outs[block]);
- // Locals live are live at this point only if they are used across suspension points
- // and their storage is live
- live_locals.intersect(&liveness.outs[block]);
+ let live_locals = storage_liveness;
// Add the locals life at this suspension point to the set of locals which live across
// any suspension points
use rustc::mir::*;
use rustc::mir::visit::*;
use rustc::ty::{self, Instance, Ty, TyCtxt, TypeFoldable};
-use rustc::ty::layout::LayoutOf;
use rustc::ty::subst::{Subst,Substs};
use std::collections::VecDeque;
fn type_size_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
param_env: ty::ParamEnv<'tcx>,
ty: Ty<'tcx>) -> Option<u64> {
- (tcx, param_env).layout_of(ty).ok().map(|layout| layout.size.bytes())
+ tcx.layout_of(param_env.and(ty)).ok().map(|layout| layout.size.bytes())
}
fn subst_and_normalize<'a, 'tcx: 'a>(
Abi::PlatformIntrinsic => {
assert!(!self.tcx.is_const_fn(def_id));
match &self.tcx.item_name(def_id)[..] {
- "size_of" | "min_align_of" => is_const_fn = Some(def_id),
+ "size_of" | "min_align_of" | "type_id" => is_const_fn = Some(def_id),
name if name.starts_with("simd_shuffle") => {
is_shuffle = true;
use dataflow::MoveDataParamEnv;
use dataflow::BitDenotation;
use dataflow::DataflowResults;
-use dataflow::{DefinitelyInitializedLvals, MaybeInitializedLvals, MaybeUninitializedLvals};
+use dataflow::{DefinitelyInitializedPlaces, MaybeInitializedPlaces, MaybeUninitializedPlaces};
use dataflow::move_paths::{MovePathIndex, LookupResult};
use dataflow::move_paths::{HasMoveData, MoveData};
use dataflow;
let dead_unwinds = IdxSetBuf::new_empty(mir.basic_blocks().len());
let flow_inits =
do_dataflow(tcx, mir, id, &attributes, &dead_unwinds,
- MaybeInitializedLvals::new(tcx, mir, &mdpe),
+ MaybeInitializedPlaces::new(tcx, mir, &mdpe),
|bd, i| DebugFormatted::new(&bd.move_data().move_paths[i]));
let flow_uninits =
do_dataflow(tcx, mir, id, &attributes, &dead_unwinds,
- MaybeUninitializedLvals::new(tcx, mir, &mdpe),
+ MaybeUninitializedPlaces::new(tcx, mir, &mdpe),
|bd, i| DebugFormatted::new(&bd.move_data().move_paths[i]));
let flow_def_inits =
do_dataflow(tcx, mir, id, &attributes, &dead_unwinds,
- DefinitelyInitializedLvals::new(tcx, mir, &mdpe),
+ DefinitelyInitializedPlaces::new(tcx, mir, &mdpe),
|bd, i| DebugFormatted::new(&bd.move_data().move_paths[i]));
if has_rustc_mir_with(&attributes, "rustc_peek_maybe_init").is_some() {
/// ptr = cur
/// cur = cur.offset(1)
/// } else {
- /// ptr = &mut LV[cur]
+ /// ptr = &mut P[cur]
/// cur = cur + 1
/// }
/// drop(ptr)
if ptr_based {
let tmp_ty = tcx.mk_mut_ptr(self.place_ty(self.place));
let tmp = Place::Local(self.new_temp(tmp_ty));
- // tmp = &LV;
+ // tmp = &P;
// cur = tmp as *mut T;
// end = Offset(cur, len);
drop_block_stmts.push(self.assign(&tmp, Rvalue::Ref(
kind.name())
.span_label(e.span,
"can only break with a value inside `loop`")
+ .span_suggestion(e.span,
+ &format!("instead, use `break` on its own \
+ without a value inside this `{}` loop",
+ kind.name()),
+ "break".to_string())
.emit();
}
}
if let Some(sp) = self.current_type_ascription.last() {
let mut sp = *sp;
loop { // try to find the `:`, bail on first non-':'/non-whitespace
- sp = sp.next_point();
- if let Ok(snippet) = cm.span_to_snippet(sp.to(sp.next_point())) {
+ sp = cm.next_point(sp);
+ if let Ok(snippet) = cm.span_to_snippet(sp.to(cm.next_point(sp))) {
debug!("snippet {:?}", snippet);
let line_sp = cm.lookup_char_pos(sp.hi()).line;
let line_base_sp = cm.lookup_char_pos(base_span.lo()).line;
container));
err.span_label(span, format!("`{}` re{} here", name, new_participle));
- if old_binding.span != syntax_pos::DUMMY_SP {
+ if old_binding.span != DUMMY_SP {
err.span_label(self.session.codemap().def_span(old_binding.span),
format!("previous {} of the {} `{}` here", old_noun, old_kind, name));
}
// See https://github.com/rust-lang/rust/issues/32354
if old_binding.is_import() || new_binding.is_import() {
- let binding = if new_binding.is_import() {
+ let binding = if new_binding.is_import() && new_binding.span != DUMMY_SP {
new_binding
} else {
old_binding
if let (Ok(snippet), false) = (cm.span_to_snippet(binding.span),
binding.is_renamed_extern_crate()) {
+ let suggested_name = if name.as_str().chars().next().unwrap().is_uppercase() {
+ format!("Other{}", name)
+ } else {
+ format!("other_{}", name)
+ };
+
err.span_suggestion(binding.span,
rename_msg,
- format!("{} as Other{}", snippet, name));
+ if snippet.ends_with(';') {
+ format!("{} as {};",
+ &snippet[..snippet.len()-1],
+ suggested_name)
+ } else {
+ format!("{} as {}", snippet, suggested_name)
+ });
} else {
err.span_label(binding.span, rename_msg);
}
field_ref: &ast::Field,
variant: &ty::VariantDef,
) -> Option<Ref> {
- let f = variant.field_named(field_ref.ident.node.name);
+ let f = variant.find_field_named(field_ref.ident.node.name)?;
// We don't really need a sub-span here, but no harm done
let sub_span = self.span_utils.span_for_last_ident(field_ref.ident.span);
filter!(self.span_utils, sub_span, field_ref.ident.span, None);
result.push_str(&val.as_str());
}
result.push('\n');
+ } else if let Some(meta_list) = attr.meta_item_list() {
+ meta_list.into_iter()
+ .filter(|it| it.check_name("include"))
+ .filter_map(|it| it.meta_item_list().map(|l| l.to_owned()))
+ .flat_map(|it| it)
+ .filter(|meta| meta.check_name("contents"))
+ .filter_map(|meta| meta.value_str())
+ .for_each(|val| {
+ result.push_str(&val.as_str());
+ result.push('\n');
+ });
}
}
}
bitflags = "1.0"
flate2 = "1.0"
jobserver = "0.1.5"
+libc = "0.2"
log = "0.4"
num_cpus = "1.0"
rustc = { path = "../librustc" }
[target."cfg(windows)".dependencies]
cc = "1.0.1"
+
+[features]
+# Used to communicate the feature to `rustc_back` in the same manner that the
+# `rustc` driver script communicate this.
+jemalloc = ["rustc_back/jemalloc"]
+
+# This is used to convince Cargo to separately cache builds of `rustc_trans`
+# when this option is enabled or not. That way we can build two, cache two
+# artifacts, and have nice speedy rebuilds.
+emscripten = ["rustc_llvm/emscripten"]
self.mode == PassMode::Ignore
}
- /// Get the LLVM type for an place of the original Rust type of
+ /// Get the LLVM type for a place of the original Rust type of
/// this argument/return, i.e. the result of `type_of::type_of`.
pub fn memory_ty(&self, cx: &CodegenCx<'a, 'tcx>) -> Type {
self.layout.llvm_type(cx)
_ => bug!("FnType::new_vtable: non-pair self {:?}", self_arg)
}
- let pointee = self_arg.layout.ty.builtin_deref(true, ty::NoPreference)
+ let pointee = self_arg.layout.ty.builtin_deref(true)
.unwrap_or_else(|| {
bug!("FnType::new_vtable: non-pointer self {:?}", self_arg)
}).ty;
name.as_ptr(),
ty);
+ if tcx.sess.target.target.options.default_hidden_visibility {
+ llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
+ }
+
let callee = CString::new(kind.fn_name(method.name)).unwrap();
let callee = llvm::LLVMRustGetOrInsertFunction(llmod,
callee.as_ptr(),
// Default per-arch clobbers
// Basically what clang does
let arch_clobbers = match &bx.sess().target.target.arch[..] {
- "x86" | "x86_64" => vec!["~{dirflag}", "~{fpsr}", "~{flags}"],
- _ => Vec::new()
+ "x86" | "x86_64" => vec!["~{dirflag}", "~{fpsr}", "~{flags}"],
+ "mips" | "mips64" => vec!["~{$1}"],
+ _ => Vec::new()
};
let all_constraints =
Arc::new(local_crate)
};
+
+ providers.symbol_export_level = export_level;
}
pub fn provide_extern(providers: &mut Providers) {
Arc::new(crate_exports)
};
+ providers.symbol_export_level = export_level;
}
fn export_level(tcx: TyCtxt, sym_def_id: DefId) -> SymbolExportLevel {
if cgcx.debuginfo != config::NoDebugInfo {
options.debuginfo(true);
}
- if cgcx.crate_types.contains(&config::CrateTypeExecutable) {
- options.start("main");
- }
+
options.stack(1024 * 1024);
options.import_memory(cgcx.wasm_import_memory);
let assembled = input.and_then(|input| {
target_pointer_width: tcx.sess.target.target.target_pointer_width.clone(),
binaryen_linker: tcx.sess.linker_flavor() == LinkerFlavor::Binaryen,
debuginfo: tcx.sess.opts.debuginfo,
- wasm_import_memory: wasm_import_memory,
+ wasm_import_memory,
assembler_cmd,
};
type TyLayout = TyLayout<'tcx>;
fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
- (self.tcx, ty::ParamEnv::empty(traits::Reveal::All))
- .layout_of(ty)
+ self.tcx.layout_of(ty::ParamEnv::empty(traits::Reveal::All).and(ty))
.unwrap_or_else(|e| match e {
LayoutError::SizeOverflow(_) => self.sess().fatal(&e.to_string()),
_ => bug!("failed to get layout for `{}`: {}", ty, e)
use back::bytecode::RLIB_BYTECODE_EXTENSION;
-pub use llvm_util::{target_features, print_version, print_passes};
+pub use llvm_util::target_features;
use std::any::Any;
use std::path::PathBuf;
impl !Sync for LlvmTransCrate {}
impl LlvmTransCrate {
- pub fn new(sess: &Session) -> Box<TransCrate> {
- llvm_util::init(sess); // Make sure llvm is inited
+ pub fn new() -> Box<TransCrate> {
box LlvmTransCrate(())
}
}
impl TransCrate for LlvmTransCrate {
+ fn init(&self, sess: &Session) {
+ llvm_util::init(sess); // Make sure llvm is inited
+ }
+
fn print(&self, req: PrintRequest, sess: &Session) {
match req {
PrintRequest::RelocationModels => {
}
}
+ fn print_passes(&self) {
+ llvm_util::print_passes();
+ }
+
+ fn print_version(&self) {
+ llvm_util::print_version();
+ }
+
+ #[cfg(not(stage0))]
+ fn diagnostics(&self) -> &[(&'static str, &'static str)] {
+ &DIAGNOSTICS
+ }
+
fn target_features(&self, sess: &Session) -> Vec<Symbol> {
target_features(sess)
}
/// This is the entrypoint for a hot plugged rustc_trans
#[no_mangle]
-pub fn __rustc_codegen_backend(sess: &Session) -> Box<TransCrate> {
- LlvmTransCrate::new(sess)
+pub fn __rustc_codegen_backend() -> Box<TransCrate> {
+ LlvmTransCrate::new()
}
struct ModuleTranslation {
// detection code will walk past the end of the feature array,
// leading to crashes.
-const ARM_WHITELIST: &'static [&'static str] = &["neon\0", "vfp2\0", "vfp3\0", "vfp4\0"];
+const ARM_WHITELIST: &'static [&'static str] = &["neon\0", "v7\0", "vfp2\0", "vfp3\0", "vfp4\0"];
-const AARCH64_WHITELIST: &'static [&'static str] = &["neon\0"];
+const AARCH64_WHITELIST: &'static [&'static str] = &["neon\0", "v7\0"];
const X86_WHITELIST: &'static [&'static str] = &["avx\0", "avx2\0", "bmi\0", "bmi2\0", "sse\0",
"sse2\0", "sse3\0", "sse4.1\0", "sse4.2\0",
"ssse3\0", "tbm\0", "lzcnt\0", "popcnt\0",
"sse4a\0", "rdrnd\0", "rdseed\0", "fma\0",
"xsave\0", "xsaveopt\0", "xsavec\0",
- "xsaves\0",
+ "xsaves\0", "aes\0",
"avx512bw\0", "avx512cd\0",
"avx512dq\0", "avx512er\0",
"avx512f\0", "avx512ifma\0",
}
}
_ => {
- const_get_elt(self.llval, layout.llvm_field_index(i))
+ match layout.fields {
+ layout::FieldPlacement::Union(_) => self.llval,
+ _ => const_get_elt(self.llval, layout.llvm_field_index(i)),
+ }
}
}
}
Static(ValueRef)
}
-/// An place as seen from a constant.
+/// A place as seen from a constant.
#[derive(Copy, Clone)]
struct ConstPlace<'tcx> {
base: Base,
self.cx.align_of(substs.type_at(0)).abi());
Ok(Const::new(llval, tcx.types.usize))
}
+ "type_id" => {
+ let llval = C_u64(self.cx,
+ self.cx.tcx.type_id_hash(substs.type_at(0)));
+ Ok(Const::new(llval, tcx.types.u64))
+ }
_ => span_bug!(span, "{:?} in constant", terminator.kind)
}
} else if let Some((op, is_checked)) = self.is_binop_lang_item(def_id) {
operand.llval
}
mir::CastKind::Unsize => {
- let pointee_ty = operand.ty.builtin_deref(true, ty::NoPreference)
+ let pointee_ty = operand.ty.builtin_deref(true)
.expect("consts: unsizing got non-pointer type").ty;
let (base, old_info) = if !self.cx.type_is_sized(pointee_ty) {
// Normally, the source is a thin pointer and we are
(operand.llval, None)
};
- let unsized_ty = cast_ty.builtin_deref(true, ty::NoPreference)
+ let unsized_ty = cast_ty.builtin_deref(true)
.expect("consts: unsizing got non-pointer target type").ty;
let ptr_ty = self.cx.layout_of(unsized_ty).llvm_type(self.cx).ptr_to();
let base = consts::ptrcast(base, ptr_ty);
// except according to those terms.
use llvm::ValueRef;
-use rustc::ty;
use rustc::ty::layout::{self, Align, LayoutOf, TyLayout};
use rustc::mir;
use rustc_data_structures::indexed_vec::Idx;
}
pub fn deref(self, cx: &CodegenCx<'a, 'tcx>) -> PlaceRef<'tcx> {
- let projected_ty = self.layout.ty.builtin_deref(true, ty::NoPreference)
+ let projected_ty = self.layout.ty.builtin_deref(true)
.unwrap_or_else(|| bug!("deref of non-pointer {:?}", self)).ty;
let (llptr, llextra) = match self.val {
OperandValue::Immediate(llptr) => (llptr, ptr::null_mut()),
ty::TyClosure(..) |
ty::TyGenerator(..) |
ty::TyAdt(..) |
- ty::TyDynamic(..) |
+ // FIXME(eddyb) producing readable type names for trait objects can result
+ // in problematically distinct types due to HRTB and subtyping (see #47638).
+ // ty::TyDynamic(..) |
ty::TyForeign(..) |
ty::TyStr => {
let mut name = String::with_capacity(32);
use link::{build_link_meta, out_filename};
pub trait TransCrate {
+ fn init(&self, _sess: &Session) {}
fn print(&self, _req: PrintRequest, _sess: &Session) {}
fn target_features(&self, _sess: &Session) -> Vec<Symbol> { vec![] }
+ fn print_passes(&self) {}
+ fn print_version(&self) {}
+ fn diagnostics(&self) -> &[(&'static str, &'static str)] { &[] }
fn metadata_loader(&self) -> Box<MetadataLoader>;
fn provide(&self, _providers: &mut Providers);
}
impl MetadataOnlyTransCrate {
- pub fn new(sess: &Session) -> Box<TransCrate> {
+ pub fn new() -> Box<TransCrate> {
+ box MetadataOnlyTransCrate(())
+ }
+}
+
+impl TransCrate for MetadataOnlyTransCrate {
+ fn init(&self, sess: &Session) {
for cty in sess.opts.crate_types.iter() {
match *cty {
CrateType::CrateTypeRlib | CrateType::CrateTypeDylib |
},
}
}
-
- box MetadataOnlyTransCrate(())
}
-}
-impl TransCrate for MetadataOnlyTransCrate {
fn metadata_loader(&self) -> Box<MetadataLoader> {
box NoLlvmMetadataLoader
}
use rustc::infer;
use rustc::infer::type_variable::TypeVariableOrigin;
use rustc::traits::ObligationCauseCode;
-use rustc::ty::{self, Ty, TypeFoldable, LvaluePreference};
-use check::{FnCtxt, Expectation, Diverges};
+use rustc::ty::{self, Ty, TypeFoldable};
+use check::{FnCtxt, Expectation, Diverges, Needs};
use check::coercion::CoerceMany;
use util::nodemap::FxHashMap;
pub fn check_dereferencable(&self, span: Span, expected: Ty<'tcx>, inner: &hir::Pat) -> bool {
if let PatKind::Binding(..) = inner.node {
- if let Some(mt) = self.shallow_resolve(expected).builtin_deref(true, ty::NoPreference) {
+ if let Some(mt) = self.shallow_resolve(expected).builtin_deref(true) {
if let ty::TyDynamic(..) = mt.ty.sty {
// This is "x = SomeTrait" being reduced from
// "let &x = &SomeTrait" or "let box x = Box<SomeTrait>", an error.
});
let discrim_ty;
if let Some(m) = contains_ref_bindings {
- discrim_ty = self.check_expr_with_lvalue_pref(discrim, LvaluePreference::from_mutbl(m));
+ discrim_ty = self.check_expr_with_needs(discrim, Needs::maybe_mut_place(m));
} else {
// ...but otherwise we want to use any supertype of the
// discriminant. This is sort of a workaround, see note (*) in
use astconv::AstConv;
-use super::{FnCtxt, LvalueOp};
+use super::{FnCtxt, PlaceOp, Needs};
use super::method::MethodCallee;
use rustc::infer::InferOk;
use rustc::traits;
use rustc::ty::{self, Ty, TraitRef};
use rustc::ty::{ToPredicate, TypeFoldable};
-use rustc::ty::{LvaluePreference, NoPreference};
use rustc::ty::adjustment::{Adjustment, Adjust, OverloadedDeref};
use syntax_pos::Span;
// Otherwise, deref if type is derefable:
let (kind, new_ty) =
- if let Some(mt) = self.cur_ty.builtin_deref(self.include_raw_pointers, NoPreference) {
+ if let Some(mt) = self.cur_ty.builtin_deref(self.include_raw_pointers) {
(AutoderefKind::Builtin, mt.ty)
} else {
let ty = self.overloaded_deref_ty(self.cur_ty)?;
}
/// Returns the adjustment steps.
- pub fn adjust_steps(&self, pref: LvaluePreference)
+ pub fn adjust_steps(&self, needs: Needs)
-> Vec<Adjustment<'tcx>> {
- self.fcx.register_infer_ok_obligations(self.adjust_steps_as_infer_ok(pref))
+ self.fcx.register_infer_ok_obligations(self.adjust_steps_as_infer_ok(needs))
}
- pub fn adjust_steps_as_infer_ok(&self, pref: LvaluePreference)
+ pub fn adjust_steps_as_infer_ok(&self, needs: Needs)
-> InferOk<'tcx, Vec<Adjustment<'tcx>>> {
let mut obligations = vec![];
let targets = self.steps.iter().skip(1).map(|&(ty, _)| ty)
.chain(iter::once(self.cur_ty));
let steps: Vec<_> = self.steps.iter().map(|&(source, kind)| {
if let AutoderefKind::Overloaded = kind {
- self.fcx.try_overloaded_deref(self.span, source, pref)
+ self.fcx.try_overloaded_deref(self.span, source, needs)
.and_then(|InferOk { value: method, obligations: o }| {
obligations.extend(o);
if let ty::TyRef(region, mt) = method.sig.output().sty {
pub fn try_overloaded_deref(&self,
span: Span,
base_ty: Ty<'tcx>,
- pref: LvaluePreference)
+ needs: Needs)
-> Option<InferOk<'tcx, MethodCallee<'tcx>>> {
- self.try_overloaded_lvalue_op(span, base_ty, &[], pref, LvalueOp::Deref)
+ self.try_overloaded_place_op(span, base_ty, &[], needs, PlaceOp::Deref)
}
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use super::{Expectation, FnCtxt, TupleArgumentsFlag};
+use super::{Expectation, FnCtxt, Needs, TupleArgumentsFlag};
use super::autoderef::Autoderef;
use super::method::MethodCallee;
use hir::def::Def;
use hir::def_id::{DefId, LOCAL_CRATE};
use rustc::{infer, traits};
-use rustc::ty::{self, TyCtxt, TypeFoldable, LvaluePreference, Ty};
+use rustc::ty::{self, TyCtxt, TypeFoldable, Ty};
use rustc::ty::adjustment::{Adjustment, Adjust, AutoBorrow};
use syntax::abi;
use syntax::symbol::Symbol;
// If the callee is a bare function or a closure, then we're all set.
match adjusted_ty.sty {
ty::TyFnDef(..) | ty::TyFnPtr(_) => {
- let adjustments = autoderef.adjust_steps(LvaluePreference::NoPreference);
+ let adjustments = autoderef.adjust_steps(Needs::None);
self.apply_adjustments(callee_expr, adjustments);
return Some(CallStep::Builtin(adjusted_ty));
}
infer::FnCall,
&closure_ty)
.0;
- let adjustments = autoderef.adjust_steps(LvaluePreference::NoPreference);
+ let adjustments = autoderef.adjust_steps(Needs::None);
self.record_deferred_call_resolution(def_id, DeferredCallResolution {
call_expr,
callee_expr,
}
self.try_overloaded_call_traits(call_expr, adjusted_ty).map(|(autoref, method)| {
- let mut adjustments = autoderef.adjust_steps(LvaluePreference::NoPreference);
+ let mut adjustments = autoderef.adjust_steps(Needs::None);
adjustments.extend(autoref);
self.apply_adjustments(callee_expr, adjustments);
CallStep::Overloaded(method)
//! sort of a minor point so I've opted to leave it for later---after all
//! we may want to adjust precisely when coercions occur.
-use check::{Diverges, FnCtxt};
+use check::{Diverges, FnCtxt, Needs};
use rustc::hir;
use rustc::hir::def_id::DefId;
use rustc::lint;
use rustc::traits::{self, ObligationCause, ObligationCauseCode};
use rustc::ty::adjustment::{Adjustment, Adjust, AutoBorrow};
-use rustc::ty::{self, LvaluePreference, TypeAndMut,
- Ty, ClosureSubsts};
+use rustc::ty::{self, TypeAndMut, Ty, ClosureSubsts};
use rustc::ty::fold::TypeFoldable;
use rustc::ty::error::TypeError;
use rustc::ty::relate::RelateResult;
return success(vec![], ty, obligations);
}
- let pref = LvaluePreference::from_mutbl(mt_b.mutbl);
+ let needs = Needs::maybe_mut_place(mt_b.mutbl);
let InferOk { value: mut adjustments, obligations: o }
- = autoderef.adjust_steps_as_infer_ok(pref);
+ = autoderef.adjust_steps_as_infer_ok(needs);
obligations.extend(o);
obligations.extend(autoderef.into_obligations());
debug!("compare_impl_method(impl_trait_ref={:?})",
impl_trait_ref);
+ let impl_m_span = tcx.sess.codemap().def_span(impl_m_span);
+
if let Err(ErrorReported) = compare_self_type(tcx,
impl_m,
impl_m_span,
check_region_bounds_on_impl_method(tcx,
impl_m_span,
impl_m,
+ trait_m,
&trait_m_generics,
&impl_m_generics,
trait_to_skol_substs)?;
};
let mut diag = struct_span_err!(tcx.sess,
- cause.span,
+ cause.span(&tcx),
E0053,
"method `{}` has an incompatible type for trait",
trait_m.name);
fn check_region_bounds_on_impl_method<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
span: Span,
impl_m: &ty::AssociatedItem,
+ trait_m: &ty::AssociatedItem,
trait_generics: &ty::Generics,
impl_generics: &ty::Generics,
trait_to_skol_substs: &Substs<'tcx>)
-> Result<(), ErrorReported> {
+ let span = tcx.sess.codemap().def_span(span);
let trait_params = &trait_generics.regions[..];
let impl_params = &impl_generics.regions[..];
// are zero. Since I don't quite know how to phrase things at
// the moment, give a kind of vague error message.
if trait_params.len() != impl_params.len() {
- struct_span_err!(tcx.sess,
- span,
- E0195,
- "lifetime parameters or bounds on method `{}` do not match the \
- trait declaration",
- impl_m.name)
- .span_label(span, "lifetimes do not match trait")
- .emit();
+ let mut err = struct_span_err!(tcx.sess,
+ span,
+ E0195,
+ "lifetime parameters or bounds on method `{}` do not match \
+ the trait declaration",
+ impl_m.name);
+ err.span_label(span, "lifetimes do not match method in trait");
+ if let Some(sp) = tcx.hir.span_if_local(trait_m.def_id) {
+ err.span_label(tcx.sess.codemap().def_span(sp),
+ "lifetimes in impl do not match this method in trait");
+ }
+ err.emit();
return Err(ErrorReported);
}
}).map(|(ref impl_arg, ref trait_arg)| {
(impl_arg.span, Some(trait_arg.span))
})
- .unwrap_or_else(|| (cause.span, tcx.hir.span_if_local(trait_m.def_id)))
+ .unwrap_or_else(|| (cause.span(&tcx), tcx.hir.span_if_local(trait_m.def_id)))
} else {
- (cause.span, tcx.hir.span_if_local(trait_m.def_id))
+ (cause.span(&tcx), tcx.hir.span_if_local(trait_m.def_id))
}
}
TypeError::Sorts(ExpectedFound { .. }) => {
{
(impl_m_output.span(), Some(trait_m_output.span()))
} else {
- (cause.span, tcx.hir.span_if_local(trait_m.def_id))
+ (cause.span(&tcx), tcx.hir.span_if_local(trait_m.def_id))
}
})
} else {
- (cause.span, tcx.hir.span_if_local(trait_m.def_id))
+ (cause.span(&tcx), tcx.hir.span_if_local(trait_m.def_id))
}
}
- _ => (cause.span, tcx.hir.span_if_local(trait_m.def_id)),
+ _ => (cause.span(&tcx), tcx.hir.span_if_local(trait_m.def_id)),
}
}
}
fn visit_pat(&mut self, pat: &'tcx Pat) {
+ intravisit::walk_pat(self, pat);
+
+ self.expr_count += 1;
+
if let PatKind::Binding(..) = pat.node {
let scope = self.region_scope_tree.var_scope(pat.hir_id.local_id);
let ty = self.fcx.tables.borrow().pat_ty(pat);
self.record(ty, Some(scope), None, pat.span);
}
-
- self.expr_count += 1;
-
- intravisit::walk_pat(self, pat);
}
fn visit_expr(&mut self, expr: &'tcx Expr) {
use super::{probe, MethodCallee};
use astconv::AstConv;
-use check::{FnCtxt, LvalueOp, callee};
+use check::{FnCtxt, PlaceOp, callee, Needs};
use hir::def_id::DefId;
use rustc::ty::subst::Substs;
use rustc::traits;
-use rustc::ty::{self, LvaluePreference, NoPreference, PreferMutLvalue, Ty};
+use rustc::ty::{self, Ty};
use rustc::ty::subst::Subst;
use rustc::ty::adjustment::{Adjustment, Adjust, AutoBorrow, OverloadedDeref};
use rustc::ty::fold::TypeFoldable;
};
if let Some(hir::MutMutable) = pick.autoref {
- self.convert_lvalue_derefs_to_mutable();
+ self.convert_place_derefs_to_mutable();
}
ConfirmResult { callee, illegal_sized_bound }
let (_, n) = autoderef.nth(pick.autoderefs).unwrap();
assert_eq!(n, pick.autoderefs);
- let mut adjustments = autoderef.adjust_steps(LvaluePreference::NoPreference);
+ let mut adjustments = autoderef.adjust_steps(Needs::None);
let mut target = autoderef.unambiguous_final_ty();
/// When we select a method with a mutable autoref, we have to go convert any
/// auto-derefs, indices, etc from `Deref` and `Index` into `DerefMut` and `IndexMut`
/// respectively.
- fn convert_lvalue_derefs_to_mutable(&self) {
+ fn convert_place_derefs_to_mutable(&self) {
// Gather up expressions we want to munge.
let mut exprs = Vec::new();
exprs.push(self.self_expr);
}
}
- debug!("convert_lvalue_derefs_to_mutable: exprs={:?}", exprs);
+ debug!("convert_place_derefs_to_mutable: exprs={:?}", exprs);
// Fix up autoderefs and derefs.
for (i, &expr) in exprs.iter().rev().enumerate() {
- debug!("convert_lvalue_derefs_to_mutable: i={} expr={:?}", i, expr);
+ debug!("convert_place_derefs_to_mutable: i={} expr={:?}", i, expr);
// Fix up the autoderefs. Autorefs can only occur immediately preceding
- // overloaded lvalue ops, and will be fixed by them in order to get
+ // overloaded place ops, and will be fixed by them in order to get
// the correct region.
let mut source = self.node_ty(expr.hir_id);
// Do not mutate adjustments in place, but rather take them,
.adjustments_mut()
.remove(expr.hir_id);
if let Some(mut adjustments) = previous_adjustments {
- let pref = LvaluePreference::PreferMutLvalue;
+ let needs = Needs::MutPlace;
for adjustment in &mut adjustments {
if let Adjust::Deref(Some(ref mut deref)) = adjustment.kind {
- if let Some(ok) = self.try_overloaded_deref(expr.span, source, pref) {
+ if let Some(ok) = self.try_overloaded_deref(expr.span, source, needs) {
let method = self.register_infer_ok_obligations(ok);
if let ty::TyRef(region, mt) = method.sig.output().sty {
*deref = OverloadedDeref {
match expr.node {
hir::ExprIndex(ref base_expr, ref index_expr) => {
let index_expr_ty = self.node_ty(index_expr.hir_id);
- self.convert_lvalue_op_to_mutable(
- LvalueOp::Index, expr, base_expr, &[index_expr_ty]);
+ self.convert_place_op_to_mutable(
+ PlaceOp::Index, expr, base_expr, &[index_expr_ty]);
}
hir::ExprUnary(hir::UnDeref, ref base_expr) => {
- self.convert_lvalue_op_to_mutable(
- LvalueOp::Deref, expr, base_expr, &[]);
+ self.convert_place_op_to_mutable(
+ PlaceOp::Deref, expr, base_expr, &[]);
}
_ => {}
}
}
}
- fn convert_lvalue_op_to_mutable(&self,
- op: LvalueOp,
+ fn convert_place_op_to_mutable(&self,
+ op: PlaceOp,
expr: &hir::Expr,
base_expr: &hir::Expr,
arg_tys: &[Ty<'tcx>])
{
- debug!("convert_lvalue_op_to_mutable({:?}, {:?}, {:?}, {:?})",
+ debug!("convert_place_op_to_mutable({:?}, {:?}, {:?}, {:?})",
op, expr, base_expr, arg_tys);
if !self.tables.borrow().is_method_call(expr) {
- debug!("convert_lvalue_op_to_mutable - builtin, nothing to do");
+ debug!("convert_place_op_to_mutable - builtin, nothing to do");
return
}
.map_or_else(|| self.node_ty(expr.hir_id), |adj| adj.target);
let base_ty = self.resolve_type_vars_if_possible(&base_ty);
- // Need to deref because overloaded lvalue ops take self by-reference.
- let base_ty = base_ty.builtin_deref(false, NoPreference)
- .expect("lvalue op takes something that is not a ref")
+ // Need to deref because overloaded place ops take self by-reference.
+ let base_ty = base_ty.builtin_deref(false)
+ .expect("place op takes something that is not a ref")
.ty;
- let method = self.try_overloaded_lvalue_op(
- expr.span, base_ty, arg_tys, PreferMutLvalue, op);
+ let method = self.try_overloaded_place_op(
+ expr.span, base_ty, arg_tys, Needs::MutPlace, op);
let method = match method {
Some(ok) => self.register_infer_ok_obligations(ok),
None => return self.tcx.sess.delay_span_bug(expr.span, "re-trying op failed")
};
- debug!("convert_lvalue_op_to_mutable: method={:?}", method);
+ debug!("convert_place_op_to_mutable: method={:?}", method);
self.write_method_call(expr.hir_id, method);
let (region, mutbl) = if let ty::TyRef(r, mt) = method.sig.inputs()[0].sty {
(r, mt.mutbl)
} else {
- span_bug!(expr.span, "input to lvalue op is not a ref?");
+ span_bug!(expr.span, "input to place op is not a ref?");
};
// Convert the autoref in the base expr to mutable with the correct
let mut source = base_expr_ty;
for adjustment in &mut adjustments[..] {
if let Adjust::Borrow(AutoBorrow::Ref(..)) = adjustment.kind {
- debug!("convert_lvalue_op_to_mutable: converting autoref {:?}", adjustment);
+ debug!("convert_place_op_to_mutable: converting autoref {:?}", adjustment);
adjustment.kind = Adjust::Borrow(AutoBorrow::Ref(region, mutbl));
adjustment.target = self.tcx.mk_ref(region, ty::TypeAndMut {
ty: source,
use rustc::middle::region;
use rustc::ty::subst::{Kind, Subst, Substs};
use rustc::traits::{self, FulfillmentContext, ObligationCause, ObligationCauseCode};
-use rustc::ty::{ParamTy, LvaluePreference, NoPreference, PreferMutLvalue};
use rustc::ty::{self, Ty, TyCtxt, Visibility, ToPredicate};
use rustc::ty::adjustment::{Adjust, Adjustment, AutoBorrow};
use rustc::ty::fold::TypeFoldable;
use rustc::ty::maps::Providers;
use rustc::ty::util::{Representability, IntTypeExt};
-use rustc::ty::layout::LayoutOf;
use errors::{DiagnosticBuilder, DiagnosticId};
use require_c_abi_if_variadic;
}
}
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum Needs {
+ MutPlace,
+ None
+}
+
+impl Needs {
+ fn maybe_mut_place(m: hir::Mutability) -> Self {
+ match m {
+ hir::MutMutable => Needs::MutPlace,
+ hir::MutImmutable => Needs::None,
+ }
+ }
+}
+
#[derive(Copy, Clone)]
pub struct UnsafetyState {
pub def: ast::NodeId,
}
#[derive(Debug, Copy, Clone)]
-pub enum LvalueOp {
+pub enum PlaceOp {
Deref,
Index
}
/// foo();}` or `{return; 22}`, where we would warn on the
/// `foo()` or `22`.
///
- /// - To permit assignment into a local variable or other lvalue
+ /// - To permit assignment into a local variable or other place
/// (including the "return slot") of type `!`. This is allowed
/// if **either** the type of value being assigned is `!`, which
/// means the current code is dead, **or** the expression's
let span = body.value.span;
if body.is_generator && can_be_generator.is_some() {
- fcx.yield_ty = Some(fcx.next_ty_var(TypeVariableOrigin::TypeInference(span)));
+ let yield_ty = fcx.next_ty_var(TypeVariableOrigin::TypeInference(span));
+ fcx.require_type_is_sized(yield_ty, span, traits::SizedYieldType);
+ fcx.yield_ty = Some(yield_ty);
}
GatherLocalsVisitor { fcx: &fcx, }.visit_body(body);
let field_infos: Vec<_> = adt.non_enum_variant().fields.iter().map(|field| {
let ty = field.ty(tcx, Substs::identity_for_item(tcx, field.did));
let param_env = tcx.param_env(field.did);
- let layout = (tcx, param_env).layout_of(ty);
+ let layout = tcx.layout_of(param_env.and(ty));
// We are currently checking the type this field came from, so it must be local
let span = tcx.hir.span_if_local(field.did).unwrap();
let zst = layout.map(|layout| layout.is_zst()).unwrap_or(false);
}
}
- /// For the overloaded lvalue expressions (`*x`, `x[3]`), the trait
+ fn is_place_expr(&self, expr: &hir::Expr) -> bool {
+ match expr.node {
+ hir::ExprPath(hir::QPath::Resolved(_, ref path)) => {
+ match path.def {
+ Def::Local(..) | Def::Upvar(..) | Def::Static(..) | Def::Err => true,
+ _ => false,
+ }
+ }
+
+ hir::ExprType(ref e, _) => {
+ self.is_place_expr(e)
+ }
+
+ hir::ExprUnary(hir::UnDeref, _) |
+ hir::ExprField(..) |
+ hir::ExprTupField(..) |
+ hir::ExprIndex(..) => {
+ true
+ }
+
+ // Partially qualified paths in expressions can only legally
+ // refer to associated items which are always rvalues.
+ hir::ExprPath(hir::QPath::TypeRelative(..)) |
+
+ hir::ExprCall(..) |
+ hir::ExprMethodCall(..) |
+ hir::ExprStruct(..) |
+ hir::ExprTup(..) |
+ hir::ExprIf(..) |
+ hir::ExprMatch(..) |
+ hir::ExprClosure(..) |
+ hir::ExprBlock(..) |
+ hir::ExprRepeat(..) |
+ hir::ExprArray(..) |
+ hir::ExprBreak(..) |
+ hir::ExprAgain(..) |
+ hir::ExprRet(..) |
+ hir::ExprWhile(..) |
+ hir::ExprLoop(..) |
+ hir::ExprAssign(..) |
+ hir::ExprInlineAsm(..) |
+ hir::ExprAssignOp(..) |
+ hir::ExprLit(_) |
+ hir::ExprUnary(..) |
+ hir::ExprBox(..) |
+ hir::ExprAddrOf(..) |
+ hir::ExprBinary(..) |
+ hir::ExprYield(..) |
+ hir::ExprCast(..) => {
+ false
+ }
+ }
+ }
+
+ /// For the overloaded place expressions (`*x`, `x[3]`), the trait
/// returns a type of `&T`, but the actual type we assign to the
/// *expression* is `T`. So this function just peels off the return
/// type by one layer to yield `T`.
- fn make_overloaded_lvalue_return_type(&self,
+ fn make_overloaded_place_return_type(&self,
method: MethodCallee<'tcx>)
-> ty::TypeAndMut<'tcx>
{
let ret_ty = method.sig.output();
// method returns &T, but the type as visible to user is T, so deref
- ret_ty.builtin_deref(true, NoPreference).unwrap()
+ ret_ty.builtin_deref(true).unwrap()
}
fn lookup_indexing(&self,
base_expr: &'gcx hir::Expr,
base_ty: Ty<'tcx>,
idx_ty: Ty<'tcx>,
- lvalue_pref: LvaluePreference)
+ needs: Needs)
-> Option<(/*index type*/ Ty<'tcx>, /*element type*/ Ty<'tcx>)>
{
// FIXME(#18741) -- this is almost but not quite the same as the
let mut autoderef = self.autoderef(base_expr.span, base_ty);
let mut result = None;
while result.is_none() && autoderef.next().is_some() {
- result = self.try_index_step(expr, base_expr, &autoderef, lvalue_pref, idx_ty);
+ result = self.try_index_step(expr, base_expr, &autoderef, needs, idx_ty);
}
autoderef.finalize();
result
expr: &hir::Expr,
base_expr: &hir::Expr,
autoderef: &Autoderef<'a, 'gcx, 'tcx>,
- lvalue_pref: LvaluePreference,
+ needs: Needs,
index_ty: Ty<'tcx>)
-> Option<(/*index type*/ Ty<'tcx>, /*element type*/ Ty<'tcx>)>
{
// type from the method signature.
// If some lookup succeeded, install method in table
let input_ty = self.next_ty_var(TypeVariableOrigin::AutoDeref(base_expr.span));
- let method = self.try_overloaded_lvalue_op(
- expr.span, self_ty, &[input_ty], lvalue_pref, LvalueOp::Index);
+ let method = self.try_overloaded_place_op(
+ expr.span, self_ty, &[input_ty], needs, PlaceOp::Index);
let result = method.map(|ok| {
debug!("try_index_step: success, using overloaded indexing");
let method = self.register_infer_ok_obligations(ok);
- let mut adjustments = autoderef.adjust_steps(lvalue_pref);
+ let mut adjustments = autoderef.adjust_steps(needs);
if let ty::TyRef(region, mt) = method.sig.inputs()[0].sty {
adjustments.push(Adjustment {
kind: Adjust::Borrow(AutoBorrow::Ref(region, mt.mutbl)),
self.apply_adjustments(base_expr, adjustments);
self.write_method_call(expr.hir_id, method);
- (input_ty, self.make_overloaded_lvalue_return_type(method).ty)
+ (input_ty, self.make_overloaded_place_return_type(method).ty)
});
if result.is_some() {
return result;
None
}
- fn resolve_lvalue_op(&self, op: LvalueOp, is_mut: bool) -> (Option<DefId>, Symbol) {
+ fn resolve_place_op(&self, op: PlaceOp, is_mut: bool) -> (Option<DefId>, Symbol) {
let (tr, name) = match (op, is_mut) {
- (LvalueOp::Deref, false) =>
+ (PlaceOp::Deref, false) =>
(self.tcx.lang_items().deref_trait(), "deref"),
- (LvalueOp::Deref, true) =>
+ (PlaceOp::Deref, true) =>
(self.tcx.lang_items().deref_mut_trait(), "deref_mut"),
- (LvalueOp::Index, false) =>
+ (PlaceOp::Index, false) =>
(self.tcx.lang_items().index_trait(), "index"),
- (LvalueOp::Index, true) =>
+ (PlaceOp::Index, true) =>
(self.tcx.lang_items().index_mut_trait(), "index_mut"),
};
(tr, Symbol::intern(name))
}
- fn try_overloaded_lvalue_op(&self,
+ fn try_overloaded_place_op(&self,
span: Span,
base_ty: Ty<'tcx>,
arg_tys: &[Ty<'tcx>],
- lvalue_pref: LvaluePreference,
- op: LvalueOp)
+ needs: Needs,
+ op: PlaceOp)
-> Option<InferOk<'tcx, MethodCallee<'tcx>>>
{
- debug!("try_overloaded_lvalue_op({:?},{:?},{:?},{:?})",
+ debug!("try_overloaded_place_op({:?},{:?},{:?},{:?})",
span,
base_ty,
- lvalue_pref,
+ needs,
op);
- // Try Mut first, if preferred.
- let (mut_tr, mut_op) = self.resolve_lvalue_op(op, true);
- let method = match (lvalue_pref, mut_tr) {
- (PreferMutLvalue, Some(trait_did)) => {
+ // Try Mut first, if needed.
+ let (mut_tr, mut_op) = self.resolve_place_op(op, true);
+ let method = match (needs, mut_tr) {
+ (Needs::MutPlace, Some(trait_did)) => {
self.lookup_method_in_trait(span, mut_op, trait_did, base_ty, Some(arg_tys))
}
_ => None,
};
// Otherwise, fall back to the immutable version.
- let (imm_tr, imm_op) = self.resolve_lvalue_op(op, false);
+ let (imm_tr, imm_op) = self.resolve_place_op(op, false);
let method = match (method, imm_tr) {
(None, Some(trait_did)) => {
self.lookup_method_in_trait(span, imm_op, trait_did, base_ty, Some(arg_tys))
err.span_label(def_s, "defined here");
}
if sugg_unit {
- let sugg_span = expr_sp.end_point();
+ let sugg_span = sess.codemap().end_point(expr_sp);
// remove closing `)` from the span
let sugg_span = sugg_span.with_hi(sugg_span.lo());
err.span_suggestion(
fn check_expr_coercable_to_type(&self,
expr: &'gcx hir::Expr,
expected: Ty<'tcx>) -> Ty<'tcx> {
- self.check_expr_coercable_to_type_with_lvalue_pref(expr, expected, NoPreference)
+ self.check_expr_coercable_to_type_with_needs(expr, expected, Needs::None)
}
- fn check_expr_coercable_to_type_with_lvalue_pref(&self,
- expr: &'gcx hir::Expr,
- expected: Ty<'tcx>,
- lvalue_pref: LvaluePreference)
- -> Ty<'tcx> {
- let ty = self.check_expr_with_expectation_and_lvalue_pref(
+ fn check_expr_coercable_to_type_with_needs(&self,
+ expr: &'gcx hir::Expr,
+ expected: Ty<'tcx>,
+ needs: Needs)
+ -> Ty<'tcx> {
+ let ty = self.check_expr_with_expectation_and_needs(
expr,
ExpectHasType(expected),
- lvalue_pref);
+ needs);
self.demand_coerce(expr, ty, expected)
}
fn check_expr_with_expectation(&self,
expr: &'gcx hir::Expr,
expected: Expectation<'tcx>) -> Ty<'tcx> {
- self.check_expr_with_expectation_and_lvalue_pref(expr, expected, NoPreference)
+ self.check_expr_with_expectation_and_needs(expr, expected, Needs::None)
}
fn check_expr(&self, expr: &'gcx hir::Expr) -> Ty<'tcx> {
self.check_expr_with_expectation(expr, NoExpectation)
}
- fn check_expr_with_lvalue_pref(&self, expr: &'gcx hir::Expr,
- lvalue_pref: LvaluePreference) -> Ty<'tcx> {
- self.check_expr_with_expectation_and_lvalue_pref(expr, NoExpectation, lvalue_pref)
+ fn check_expr_with_needs(&self, expr: &'gcx hir::Expr, needs: Needs) -> Ty<'tcx> {
+ self.check_expr_with_expectation_and_needs(expr, NoExpectation, needs)
}
// determine the `self` type, using fresh variables for all variables
span: Span,
args: &'gcx [hir::Expr],
expected: Expectation<'tcx>,
- lvalue_pref: LvaluePreference) -> Ty<'tcx> {
+ needs: Needs) -> Ty<'tcx> {
let rcvr = &args[0];
- let rcvr_t = self.check_expr_with_lvalue_pref(&rcvr, lvalue_pref);
+ let rcvr_t = self.check_expr_with_needs(&rcvr, needs);
// no need to check for bot/err -- callee does that
let rcvr_t = self.structurally_resolved_type(expr.span, rcvr_t);
// Check field access expressions
fn check_field(&self,
expr: &'gcx hir::Expr,
- lvalue_pref: LvaluePreference,
+ needs: Needs,
base: &'gcx hir::Expr,
field: &Spanned<ast::Name>) -> Ty<'tcx> {
- let expr_t = self.check_expr_with_lvalue_pref(base, lvalue_pref);
+ let expr_t = self.check_expr_with_needs(base, needs);
let expr_t = self.structurally_resolved_type(expr.span,
expr_t);
let mut private_candidate = None;
if let Some(field) = fields.iter().find(|f| f.name.to_ident() == ident) {
let field_ty = self.field_ty(expr.span, field, substs);
if field.vis.is_accessible_from(def_scope, self.tcx) {
- let adjustments = autoderef.adjust_steps(lvalue_pref);
+ let adjustments = autoderef.adjust_steps(needs);
self.apply_adjustments(base, adjustments);
autoderef.finalize();
// Check tuple index expressions
fn check_tup_field(&self,
expr: &'gcx hir::Expr,
- lvalue_pref: LvaluePreference,
+ needs: Needs,
base: &'gcx hir::Expr,
idx: codemap::Spanned<usize>) -> Ty<'tcx> {
- let expr_t = self.check_expr_with_lvalue_pref(base, lvalue_pref);
+ let expr_t = self.check_expr_with_needs(base, needs);
let expr_t = self.structurally_resolved_type(expr.span,
expr_t);
let mut private_candidate = None;
};
if let Some(field_ty) = field {
- let adjustments = autoderef.adjust_steps(lvalue_pref);
+ let adjustments = autoderef.adjust_steps(needs);
self.apply_adjustments(base, adjustments);
autoderef.finalize();
return field_ty;
/// Note that inspecting a type's structure *directly* may expose the fact
/// that there are actually multiple representations for `TyError`, so avoid
/// that when err needs to be handled differently.
- fn check_expr_with_expectation_and_lvalue_pref(&self,
+ fn check_expr_with_expectation_and_needs(&self,
expr: &'gcx hir::Expr,
expected: Expectation<'tcx>,
- lvalue_pref: LvaluePreference) -> Ty<'tcx> {
+ needs: Needs) -> Ty<'tcx> {
debug!(">> typechecking: expr={:?} expected={:?}",
expr, expected);
self.diverges.set(Diverges::Maybe);
self.has_errors.set(false);
- let ty = self.check_expr_kind(expr, expected, lvalue_pref);
+ let ty = self.check_expr_kind(expr, expected, needs);
// Warn for non-block expressions with diverging children.
match expr.node {
fn check_expr_kind(&self,
expr: &'gcx hir::Expr,
expected: Expectation<'tcx>,
- lvalue_pref: LvaluePreference) -> Ty<'tcx> {
+ needs: Needs) -> Ty<'tcx> {
let tcx = self.tcx;
let id = expr.id;
match expr.node {
NoExpectation
}
};
- let lvalue_pref = match unop {
- hir::UnDeref => lvalue_pref,
- _ => NoPreference
+ let needs = match unop {
+ hir::UnDeref => needs,
+ _ => Needs::None
};
- let mut oprnd_t = self.check_expr_with_expectation_and_lvalue_pref(&oprnd,
+ let mut oprnd_t = self.check_expr_with_expectation_and_needs(&oprnd,
expected_inner,
- lvalue_pref);
+ needs);
if !oprnd_t.references_error() {
oprnd_t = self.structurally_resolved_type(expr.span, oprnd_t);
match unop {
hir::UnDeref => {
- if let Some(mt) = oprnd_t.builtin_deref(true, NoPreference) {
+ if let Some(mt) = oprnd_t.builtin_deref(true) {
oprnd_t = mt.ty;
} else if let Some(ok) = self.try_overloaded_deref(
- expr.span, oprnd_t, lvalue_pref) {
+ expr.span, oprnd_t, needs) {
let method = self.register_infer_ok_obligations(ok);
if let ty::TyRef(region, mt) = method.sig.inputs()[0].sty {
self.apply_adjustments(oprnd, vec![Adjustment {
target: method.sig.inputs()[0]
}]);
}
- oprnd_t = self.make_overloaded_lvalue_return_type(method).ty;
+ oprnd_t = self.make_overloaded_place_return_type(method).ty;
self.write_method_call(expr.hir_id, method);
} else {
type_error_struct!(tcx.sess, expr.span, oprnd_t, E0614,
let hint = expected.only_has_type(self).map_or(NoExpectation, |ty| {
match ty.sty {
ty::TyRef(_, ref mt) | ty::TyRawPtr(ref mt) => {
- if self.tcx.expr_is_lval(&oprnd) {
- // Lvalues may legitimately have unsized types.
+ if self.is_place_expr(&oprnd) {
+ // Places may legitimately have unsized types.
// For example, dereferences of a fat pointer and
// the last field of a struct can be unsized.
ExpectHasType(mt.ty)
_ => NoExpectation
}
});
- let lvalue_pref = LvaluePreference::from_mutbl(mutbl);
- let ty = self.check_expr_with_expectation_and_lvalue_pref(&oprnd, hint, lvalue_pref);
+ let needs = Needs::maybe_mut_place(mutbl);
+ let ty = self.check_expr_with_expectation_and_needs(&oprnd, hint, needs);
let tm = ty::TypeAndMut { ty: ty, mutbl: mutbl };
if tm.ty.references_error() {
tcx.types.never
}
hir::ExprAssign(ref lhs, ref rhs) => {
- let lhs_ty = self.check_expr_with_lvalue_pref(&lhs, PreferMutLvalue);
+ let lhs_ty = self.check_expr_with_needs(&lhs, Needs::MutPlace);
let rhs_ty = self.check_expr_coercable_to_type(&rhs, lhs_ty);
_ => {
// Only check this if not in an `if` condition, as the
// mistyped comparison help is more appropriate.
- if !self.tcx.expr_is_lval(&lhs) {
+ if !self.is_place_expr(&lhs) {
struct_span_err!(self.tcx.sess, expr.span, E0070,
"invalid left-hand side expression")
.span_label(expr.span, "left-hand of expression not valid")
self.check_call(expr, &callee, args, expected)
}
hir::ExprMethodCall(ref segment, span, ref args) => {
- self.check_method_call(expr, segment, span, args, expected, lvalue_pref)
+ self.check_method_call(expr, segment, span, args, expected, needs)
}
hir::ExprCast(ref e, ref t) => {
// Find the type of `e`. Supply hints based on the type we are casting to,
self.check_expr_struct(expr, expected, qpath, fields, base_expr)
}
hir::ExprField(ref base, ref field) => {
- self.check_field(expr, lvalue_pref, &base, field)
+ self.check_field(expr, needs, &base, field)
}
hir::ExprTupField(ref base, idx) => {
- self.check_tup_field(expr, lvalue_pref, &base, idx)
+ self.check_tup_field(expr, needs, &base, idx)
}
hir::ExprIndex(ref base, ref idx) => {
- let base_t = self.check_expr_with_lvalue_pref(&base, lvalue_pref);
+ let base_t = self.check_expr_with_needs(&base, needs);
let idx_t = self.check_expr(&idx);
if base_t.references_error() {
idx_t
} else {
let base_t = self.structurally_resolved_type(expr.span, base_t);
- match self.lookup_indexing(expr, base, base_t, idx_t, lvalue_pref) {
+ match self.lookup_indexing(expr, base, base_t, idx_t, needs) {
Some((index_ty, element_ty)) => {
self.demand_coerce(idx, idx_t, index_ty);
element_ty
// ref mut, for soundness (issue #23116). In particular, in
// the latter case, we need to be clear that the type of the
// referent for the reference that results is *equal to* the
- // type of the lvalue it is referencing, and not some
+ // type of the place it is referencing, and not some
// supertype thereof.
- let init_ty = self.check_expr_with_lvalue_pref(init, LvaluePreference::from_mutbl(m));
+ let init_ty = self.check_expr_with_needs(init, Needs::maybe_mut_place(m));
self.demand_eqtype(init.span, local_ty, init_ty);
init_ty
} else {
/// statement and the return type has been left as default or has been specified as `()`. If so,
/// it suggests adding a semicolon.
fn suggest_missing_semicolon(&self,
- err: &mut DiagnosticBuilder<'tcx>,
- expression: &'gcx hir::Expr,
- expected: Ty<'tcx>,
- cause_span: Span) {
+ err: &mut DiagnosticBuilder<'tcx>,
+ expression: &'gcx hir::Expr,
+ expected: Ty<'tcx>,
+ cause_span: Span) {
if expected.is_nil() {
// `BlockTailExpression` only relevant if the tail expr would be
// useful on its own.
hir::ExprLoop(..) |
hir::ExprMatch(..) |
hir::ExprBlock(..) => {
- let sp = cause_span.next_point();
+ let sp = self.tcx.sess.codemap().next_point(cause_span);
err.span_suggestion(sp,
"try adding a semicolon",
";".to_string());
let lifetime_count = generics.lifetimes().count();
for leaf_ty in ty.walk() {
- if let ty::TyParam(ParamTy {idx, ..}) = leaf_ty.sty {
+ if let ty::TyParam(ty::ParamTy {idx, ..}) = leaf_ty.sty {
debug!("Found use of ty param num {}", idx);
tps_used[idx as usize - lifetime_count] = true;
} else if let ty::TyError = leaf_ty.sty {
//! Code related to processing overloaded binary and unary operators.
-use super::FnCtxt;
+use super::{FnCtxt, Needs};
use super::method::MethodCallee;
-use rustc::ty::{self, Ty, TypeFoldable, NoPreference, PreferMutLvalue, TypeVariants};
+use rustc::ty::{self, Ty, TypeFoldable, TypeVariants};
use rustc::ty::TypeVariants::{TyStr, TyRef};
use rustc::ty::adjustment::{Adjustment, Adjust, AutoBorrow};
use rustc::infer::type_variable::TypeVariableOrigin;
return_ty
};
- let tcx = self.tcx;
- if !tcx.expr_is_lval(lhs_expr) {
+ if !self.is_place_expr(lhs_expr) {
struct_span_err!(
- tcx.sess, lhs_expr.span,
+ self.tcx.sess, lhs_expr.span,
E0067, "invalid left-hand side expression")
.span_label(
lhs_expr.span,
op,
is_assign);
- let lhs_pref = match is_assign {
- IsAssign::Yes => PreferMutLvalue,
- IsAssign::No => NoPreference
+ let lhs_needs = match is_assign {
+ IsAssign::Yes => Needs::MutPlace,
+ IsAssign::No => Needs::None
};
// Find a suitable supertype of the LHS expression's type, by coercing to
// a type variable, to pass as the `Self` to the trait, avoiding invariant
// trait matching creating lifetime constraints that are too strict.
// E.g. adding `&'a T` and `&'b T`, given `&'x T: Add<&'x T>`, will result
// in `&'a T <: &'x T` and `&'b T <: &'x T`, instead of `'a = 'b = 'x`.
- let lhs_ty = self.check_expr_coercable_to_type_with_lvalue_pref(lhs_expr,
+ let lhs_ty = self.check_expr_coercable_to_type_with_needs(lhs_expr,
self.next_ty_var(TypeVariableOrigin::MiscVariable(lhs_expr.span)),
- lhs_pref);
+ lhs_needs);
let lhs_ty = self.resolve_type_vars_with_obligations(lhs_ty);
// NB: As we have not yet type-checked the RHS, we don't have the
for (i, &impl1_def_id) in impls.iter().enumerate() {
for &impl2_def_id in &impls[(i + 1)..] {
- let used_to_be_allowed = self.tcx.infer_ctxt().enter(|infcx| {
- if let Some(overlap) =
- traits::overlapping_impls(&infcx, impl1_def_id, impl2_def_id,
- IntercrateMode::Issue43355)
- {
+ let used_to_be_allowed = traits::overlapping_impls(
+ self.tcx,
+ impl1_def_id,
+ impl2_def_id,
+ IntercrateMode::Issue43355,
+ |overlap| {
self.check_for_common_items_in_impls(
- impl1_def_id, impl2_def_id, overlap, false);
+ impl1_def_id,
+ impl2_def_id,
+ overlap,
+ false,
+ );
false
- } else {
- true
- }
- });
+ },
+ || true,
+ );
if used_to_be_allowed {
- self.tcx.infer_ctxt().enter(|infcx| {
- if let Some(overlap) =
- traits::overlapping_impls(&infcx, impl1_def_id, impl2_def_id,
- IntercrateMode::Fixed)
- {
- self.check_for_common_items_in_impls(
- impl1_def_id, impl2_def_id, overlap, true);
- }
- });
+ traits::overlapping_impls(
+ self.tcx,
+ impl1_def_id,
+ impl2_def_id,
+ IntercrateMode::Fixed,
+ |overlap| self.check_for_common_items_in_impls(
+ impl1_def_id,
+ impl2_def_id,
+ overlap,
+ true,
+ ),
+ || (),
+ );
}
}
}
"##,
E0067: r##"
-The left-hand side of a compound assignment expression must be an lvalue
-expression. An lvalue expression represents a memory location and includes
+The left-hand side of a compound assignment expression must be a place
+expression. A place expression represents a memory location and includes
item paths (ie, namespaced variables), dereferences, indexing expressions,
and field references.
```compile_fail,E0067
use std::collections::LinkedList;
-// Bad: assignment to non-lvalue expression
+// Bad: assignment to non-place expression
LinkedList::new() += 1;
// ...
"##,
E0070: r##"
-The left-hand side of an assignment operator must be an lvalue expression. An
-lvalue expression represents a memory location and can be a variable (with
+The left-hand side of an assignment operator must be a place expression. An
+place expression represents a memory location and can be a variable (with
optional namespacing), a dereference, an indexing expression or a field
reference.
More details can be found in the [Expressions] section of the Reference.
-[Expressions]: https://doc.rust-lang.org/reference/expressions.html#lvalues-rvalues-and-temporaries
+[Expressions]: https://doc.rust-lang.org/reference/expressions.html#places-rvalues-and-temporaries
Now, we can go further. Here are some erroneous code examples:
fn some_function() {
SOME_CONST = 14; // error : a constant value cannot be changed!
- 1 = 3; // error : 1 isn't a valid lvalue!
+ 1 = 3; // error : 1 isn't a valid place!
some_other_func() = 4; // error : we can't assign value to a function!
SomeStruct.x = 12; // error : SomeStruct a structure name but it is used
// like a variable!
let def_id = cx.tcx.hir.body_owner_def_id(n);
let param_env = cx.tcx.param_env(def_id);
let substs = Substs::identity_for_item(cx.tcx, def_id);
- let n = cx.tcx.const_eval(param_env.and((def_id, substs))).unwrap();
+ let n = cx.tcx.const_eval(param_env.and((def_id, substs))).unwrap_or_else(|_| {
+ cx.tcx.mk_const(ty::Const {
+ val: ConstVal::Unevaluated(def_id, substs),
+ ty: cx.tcx.types.usize
+ })
+ });
let n = if let ConstVal::Integral(ConstInt::Usize(n)) = n.val {
n.to_string()
} else if let ConstVal::Unevaluated(def_id, _) = n.val {
let mut n = cx.tcx.lift(&n).unwrap();
if let ConstVal::Unevaluated(def_id, substs) = n.val {
let param_env = cx.tcx.param_env(def_id);
- n = cx.tcx.const_eval(param_env.and((def_id, substs))).unwrap()
+ if let Ok(new_n) = cx.tcx.const_eval(param_env.and((def_id, substs))) {
+ n = new_n;
+ }
};
let n = if let ConstVal::Integral(ConstInt::Usize(n)) = n.val {
n.to_string()
// except according to those terms.
use rustc_lint;
-use rustc_driver::{driver, target_features, abort_on_err};
+use rustc_driver::{self, driver, target_features, abort_on_err};
use rustc::session::{self, config};
use rustc::hir::def_id::DefId;
use rustc::hir::def::Def;
use rustc::hir::map as hir_map;
use rustc::lint;
use rustc::util::nodemap::FxHashMap;
-use rustc_trans;
use rustc_resolve as resolve;
use rustc_metadata::creader::CrateLoader;
use rustc_metadata::cstore::CStore;
let mut sess = session::build_session_(
sessopts, cpath, diagnostic_handler, codemap,
);
- let trans = rustc_trans::LlvmTransCrate::new(&sess);
+ let trans = rustc_driver::get_trans(&sess);
let cstore = Rc::new(CStore::new(trans.metadata_loader()));
rustc_lint::register_builtins(&mut sess.lint_store.borrow_mut(), Some(&sess));
let link_out = format!("<a href=\"{link}\"{title}>{content}</a>",
link = link_buf,
title = title.map_or(String::new(),
- |t| format!(" title=\"{}\"", t)),
+ |t| format!(" title=\"{}\"", Escape(&t))),
content = content.unwrap_or(String::new()));
unsafe { hoedown_buffer_put(ob, link_out.as_ptr(), link_out.len()); }
overflow: auto;
}
-.sidebar .current {
+.sidebar .block > ul > li {
margin-right: -20px;
}
-.content, nav { max-width: 960px; }
+.content, nav {
+ max-width: 960px;
+}
/* Everything else */
-.js-only, .hidden { display: none !important; }
+.js-only, .hidden {
+ display: none !important;
+}
.sidebar img {
margin: 20px auto;
border: none;
}
-.location a:first-child { font-weight: 500; }
+.location a:first-child {
+ font-weight: 500;
+}
.block {
padding: 0;
-ms-user-select: none;
user-select: none;
}
-.line-numbers span { cursor: pointer; }
+.line-numbers span {
+ cursor: pointer;
+}
.docblock-short p {
display: inline;
text-overflow: ellipsis;
margin: 0;
}
-.docblock-short code { white-space: nowrap; }
+.docblock-short code {
+ white-space: nowrap;
+}
.docblock h1, .docblock h2, .docblock h3, .docblock h4, .docblock h5 {
border-bottom: 1px solid;
display: inline-block;
}
-#main { position: relative; }
+#main {
+ position: relative;
+}
#main > .since {
top: inherit;
font-family: "Fira Sans", "Helvetica Neue", Helvetica, Arial, sans-serif;
padding: 0;
}
-.content .item-list li { margin-bottom: 1em; }
+.content .item-list li {
+ margin-bottom: 1em;
+}
.content .multi-column {
-moz-column-count: 5;
use std::process;
use std::sync::mpsc::channel;
-use rustc_driver::rustc_trans;
-
use externalfiles::ExternalHtml;
use rustc::session::search_paths::SearchPaths;
use rustc::session::config::{ErrorOutputType, RustcOptGroup, nightly_options,
use rustc_driver::driver::phase_2_configure_and_expand;
use rustc_metadata::cstore::CStore;
use rustc_resolve::MakeGlobMap;
-use rustc_trans;
use syntax::ast;
use syntax::codemap::CodeMap;
use syntax::feature_gate::UnstableFeatures;
let mut sess = session::build_session_(
sessopts, Some(input_path.to_owned()), handler, codemap.clone(),
);
- let trans = rustc_trans::LlvmTransCrate::new(&sess);
+ let trans = rustc_driver::get_trans(&sess);
let cstore = Rc::new(CStore::new(trans.metadata_loader()));
rustc_lint::register_builtins(&mut sess.lint_store.borrow_mut(), Some(&sess));
sess.parse_sess.config =
));
let emitter = errors::emitter::EmitterWriter::new(box Sink(data.clone()),
Some(codemap.clone()),
+ false,
false);
let old = io::set_panic(Some(box Sink(data.clone())));
let _bomb = Bomb(data.clone(), old.unwrap_or(box io::stdout()));
let mut sess = session::build_session_(
sessopts, None, diagnostic_handler, codemap,
);
- let trans = rustc_trans::LlvmTransCrate::new(&sess);
+ let trans = rustc_driver::get_trans(&sess);
let cstore = Rc::new(CStore::new(trans.metadata_loader()));
rustc_lint::register_builtins(&mut sess.lint_store.borrow_mut(), Some(&sess));
force_alloc_system = []
panic-unwind = ["panic_unwind"]
profiler = ["profiler_builtins"]
+wasm_syscall = []
{
type Output = V;
+ /// Returns a reference to the value corresponding to the supplied key.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the key is not present in the `HashMap`.
#[inline]
- fn index(&self, index: &Q) -> &V {
- self.get(index).expect("no entry found for key")
+ fn index(&self, key: &Q) -> &V {
+ self.get(key).expect("no entry found for key")
}
}
assert!(nan.to_degrees().is_nan());
assert_eq!(inf.to_degrees(), inf);
assert_eq!(neg_inf.to_degrees(), neg_inf);
+ assert_eq!(1_f32.to_degrees(), 57.2957795130823208767981548141051703);
}
#[test]
/// The returned slice will **not** contain the trailing nul terminator that this C
/// string has.
///
- /// > **Note**: This method is currently implemented as a 0-cost cast, but
- /// > it is planned to alter its definition in the future to perform the
- /// > length calculation whenever this method is called.
+ /// > **Note**: This method is currently implemented as a constant-time
+ /// > cast, but it is planned to alter its definition in the future to
+ /// > perform the length calculation whenever this method is called.
///
/// # Examples
///
/// it will return an error with details of where UTF-8 validation failed.
///
/// > **Note**: This method is currently implemented to check for validity
- /// > after a 0-cost cast, but it is planned to alter its definition in the
- /// > future to perform the length calculation in addition to the UTF-8
- /// > check whenever this method is called.
+ /// > after a constant-time cast, but it is planned to alter its definition
+ /// > in the future to perform the length calculation in addition to the
+ /// > UTF-8 check whenever this method is called.
///
/// [`&str`]: ../primitive.str.html
///
/// with the result.
///
/// > **Note**: This method is currently implemented to check for validity
- /// > after a 0-cost cast, but it is planned to alter its definition in the
- /// > future to perform the length calculation in addition to the UTF-8
- /// > check whenever this method is called.
+ /// > after a constant-time cast, but it is planned to alter its definition
+ /// > in the future to perform the length calculation in addition to the
+ /// > UTF-8 check whenever this method is called.
///
/// [`Cow`]: ../borrow/enum.Cow.html
/// [`Borrowed`]: ../borrow/enum.Cow.html#variant.Borrowed
self.inner.file_attr().map(Metadata)
}
- /// Creates a new independently owned handle to the underlying file.
- ///
- /// The returned `File` is a reference to the same state that this object
- /// references. Both handles will read and write with the same cursor
- /// position.
+ /// Create a new `File` instance that shares the same underlying file handle
+ /// as the existing `File` instance. Reads, writes, and seeks will affect
+ /// both `File` instances simultaneously.
///
/// # Examples
///
+ /// Create two handles for a file named `foo.txt`:
+ ///
/// ```no_run
/// use std::fs::File;
///
/// # fn foo() -> std::io::Result<()> {
- /// let mut f = File::open("foo.txt")?;
- /// let file_copy = f.try_clone()?;
+ /// let mut file = File::open("foo.txt")?;
+ /// let file_copy = file.try_clone()?;
+ /// # Ok(())
+ /// # }
+ /// ```
+ ///
+ /// Assuming there’s a file named `foo.txt` with contents `abcdef\n`, create
+ /// two handles, seek one of them, and read the remaining bytes from the
+ /// other handle:
+ ///
+ /// ```no_run
+ /// use std::fs::File;
+ /// use std::io::SeekFrom;
+ /// use std::io::prelude::*;
+ ///
+ /// # fn foo() -> std::io::Result<()> {
+ /// let mut file = File::open("foo.txt")?;
+ /// let mut file_copy = file.try_clone()?;
+ ///
+ /// file.seek(SeekFrom::Start(3))?;
+ ///
+ /// let mut contents = vec![];
+ /// file_copy.read_to_end(&mut contents)?;
+ /// assert_eq!(contents, b"def\n");
/// # Ok(())
/// # }
/// ```
self.0.accessed().map(FromInner::from_inner)
}
- /// Returns the creation time listed in the this metadata.
+ /// Returns the creation time listed in this metadata.
///
/// The returned value corresponds to the `birthtime` field of `stat` on
/// Unix platforms and the `ftCreationTime` field on Windows platforms.
/// # if cfg!(target_os = "linux") {
/// use std::io;
///
- /// let error = io::Error::from_raw_os_error(98);
- /// assert_eq!(error.kind(), io::ErrorKind::AddrInUse);
+ /// let error = io::Error::from_raw_os_error(22);
+ /// assert_eq!(error.kind(), io::ErrorKind::InvalidInput);
/// # }
/// ```
///
/// # if cfg!(windows) {
/// use std::io;
///
- /// let error = io::Error::from_raw_os_error(10048);
- /// assert_eq!(error.kind(), io::ErrorKind::AddrInUse);
+ /// let error = io::Error::from_raw_os_error(10022);
+ /// assert_eq!(error.kind(), io::ErrorKind::InvalidInput);
/// # }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
}
assert!(events > 0);
}
+
+ #[test]
+ fn test_command_implements_send() {
+ fn take_send_type<T: Send>(_: T) {}
+ take_send_type(Command::new(""))
+ }
}
#[cfg_attr(test, allow(dead_code))]
pub mod guard {
- pub unsafe fn current() -> Option<usize> {
+ pub type Guard = !;
+ pub unsafe fn current() -> Option<Guard> {
None
}
- pub unsafe fn init() -> Option<usize> {
+ pub unsafe fn init() -> Option<Guard> {
None
}
}
}
pub mod guard {
- pub unsafe fn current() -> Option<usize> { None }
- pub unsafe fn init() -> Option<usize> { None }
+ pub type Guard = !;
+ pub unsafe fn current() -> Option<Guard> { None }
+ pub unsafe fn init() -> Option<Guard> { None }
}
// other keys.
program: CString,
args: Vec<CString>,
- argv: Vec<*const c_char>,
+ argv: Argv,
env: CommandEnv<DefaultEnvKey>,
cwd: Option<CString>,
stderr: Option<Stdio>,
}
+// Create a new type for argv, so that we can make it `Send`
+struct Argv(Vec<*const c_char>);
+
+// It is safe to make Argv Send, because it contains pointers to memory owned by `Command.args`
+unsafe impl Send for Argv {}
+
// passed back to std::process with the pipes connected to the child, if any
// were requested
pub struct StdioPipes {
let mut saw_nul = false;
let program = os2c(program, &mut saw_nul);
Command {
- argv: vec![program.as_ptr(), ptr::null()],
+ argv: Argv(vec![program.as_ptr(), ptr::null()]),
program,
args: Vec::new(),
env: Default::default(),
// Overwrite the trailing NULL pointer in `argv` and then add a new null
// pointer.
let arg = os2c(arg, &mut self.saw_nul);
- self.argv[self.args.len() + 1] = arg.as_ptr();
- self.argv.push(ptr::null());
+ self.argv.0[self.args.len() + 1] = arg.as_ptr();
+ self.argv.0.push(ptr::null());
// Also make sure we keep track of the owned value to schedule a
// destructor for this memory.
self.saw_nul
}
pub fn get_argv(&self) -> &Vec<*const c_char> {
- &self.argv
+ &self.argv.0
}
#[allow(dead_code)]
use sys_common::thread_info;
- // This is initialized in init() and only read from after
- static mut PAGE_SIZE: usize = 0;
-
#[cfg(any(target_os = "linux", target_os = "android"))]
unsafe fn siginfo_si_addr(info: *mut libc::siginfo_t) -> usize {
#[repr(C)]
_data: *mut libc::c_void) {
use sys_common::util::report_overflow;
- let guard = thread_info::stack_guard().unwrap_or(0);
+ let guard = thread_info::stack_guard().unwrap_or(0..0);
let addr = siginfo_si_addr(info);
// If the faulting address is within the guard page, then we print a
// message saying so and abort.
- if guard != 0 && guard - PAGE_SIZE <= addr && addr < guard {
+ if guard.start <= addr && addr < guard.end {
report_overflow();
rtabort!("stack overflow");
} else {
static mut MAIN_ALTSTACK: *mut libc::c_void = ptr::null_mut();
pub unsafe fn init() {
- PAGE_SIZE = ::sys::os::page_size();
-
let mut action: sigaction = mem::zeroed();
action.sa_flags = SA_SIGINFO | SA_ONSTACK;
action.sa_sigaction = signal_handler as sighandler_t;
not(target_os = "solaris")))]
#[cfg_attr(test, allow(dead_code))]
pub mod guard {
- pub unsafe fn current() -> Option<usize> { None }
- pub unsafe fn init() -> Option<usize> { None }
+ use ops::Range;
+ pub type Guard = Range<usize>;
+ pub unsafe fn current() -> Option<Guard> { None }
+ pub unsafe fn init() -> Option<Guard> { None }
}
use libc;
use libc::mmap;
use libc::{PROT_NONE, MAP_PRIVATE, MAP_ANON, MAP_FAILED, MAP_FIXED};
+ use ops::Range;
use sys::os;
- #[cfg(any(target_os = "macos",
- target_os = "bitrig",
- target_os = "openbsd",
- target_os = "solaris"))]
+ // This is initialized in init() and only read from after
+ static mut PAGE_SIZE: usize = 0;
+
+ pub type Guard = Range<usize>;
+
+ #[cfg(target_os = "solaris")]
+ unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
+ let mut current_stack: libc::stack_t = ::mem::zeroed();
+ assert_eq!(libc::stack_getbounds(&mut current_stack), 0);
+ Some(current_stack.ss_sp)
+ }
+
+ #[cfg(target_os = "macos")]
unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
- current().map(|s| s as *mut libc::c_void)
+ let stackaddr = libc::pthread_get_stackaddr_np(libc::pthread_self()) as usize -
+ libc::pthread_get_stacksize_np(libc::pthread_self());
+ Some(stackaddr as *mut libc::c_void)
+ }
+
+ #[cfg(any(target_os = "openbsd", target_os = "bitrig"))]
+ unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
+ let mut current_stack: libc::stack_t = ::mem::zeroed();
+ assert_eq!(libc::pthread_stackseg_np(libc::pthread_self(),
+ &mut current_stack), 0);
+
+ let extra = if cfg!(target_os = "bitrig") {3} else {1} * PAGE_SIZE;
+ let stackaddr = if libc::pthread_main_np() == 1 {
+ // main thread
+ current_stack.ss_sp as usize - current_stack.ss_size + extra
+ } else {
+ // new thread
+ current_stack.ss_sp as usize - current_stack.ss_size
+ };
+ Some(stackaddr as *mut libc::c_void)
}
#[cfg(any(target_os = "android", target_os = "freebsd",
ret
}
- pub unsafe fn init() -> Option<usize> {
- let psize = os::page_size();
+ pub unsafe fn init() -> Option<Guard> {
+ PAGE_SIZE = os::page_size();
+
let mut stackaddr = get_stack_start()?;
// Ensure stackaddr is page aligned! A parent process might
// stackaddr < stackaddr + stacksize, so if stackaddr is not
// page-aligned, calculate the fix such that stackaddr <
// new_page_aligned_stackaddr < stackaddr + stacksize
- let remainder = (stackaddr as usize) % psize;
+ let remainder = (stackaddr as usize) % PAGE_SIZE;
if remainder != 0 {
- stackaddr = ((stackaddr as usize) + psize - remainder)
+ stackaddr = ((stackaddr as usize) + PAGE_SIZE - remainder)
as *mut libc::c_void;
}
// Instead, we'll just note where we expect rlimit to start
// faulting, so our handler can report "stack overflow", and
// trust that the kernel's own stack guard will work.
- Some(stackaddr as usize)
+ let stackaddr = stackaddr as usize;
+ Some(stackaddr - PAGE_SIZE..stackaddr)
} else {
// Reallocate the last page of the stack.
// This ensures SIGBUS will be raised on
// stack overflow.
- let result = mmap(stackaddr, psize, PROT_NONE,
+ let result = mmap(stackaddr, PAGE_SIZE, PROT_NONE,
MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
if result != stackaddr || result == MAP_FAILED {
panic!("failed to allocate a guard page");
}
+ let guardaddr = stackaddr as usize;
let offset = if cfg!(target_os = "freebsd") {
2
} else {
1
};
- Some(stackaddr as usize + offset * psize)
+ Some(guardaddr..guardaddr + offset * PAGE_SIZE)
}
}
- #[cfg(target_os = "solaris")]
- pub unsafe fn current() -> Option<usize> {
- let mut current_stack: libc::stack_t = ::mem::zeroed();
- assert_eq!(libc::stack_getbounds(&mut current_stack), 0);
- Some(current_stack.ss_sp as usize)
- }
-
- #[cfg(target_os = "macos")]
- pub unsafe fn current() -> Option<usize> {
- Some(libc::pthread_get_stackaddr_np(libc::pthread_self()) as usize -
- libc::pthread_get_stacksize_np(libc::pthread_self()))
- }
-
- #[cfg(any(target_os = "openbsd", target_os = "bitrig"))]
- pub unsafe fn current() -> Option<usize> {
- let mut current_stack: libc::stack_t = ::mem::zeroed();
- assert_eq!(libc::pthread_stackseg_np(libc::pthread_self(),
- &mut current_stack), 0);
-
- let extra = if cfg!(target_os = "bitrig") {3} else {1} * os::page_size();
- Some(if libc::pthread_main_np() == 1 {
- // main thread
- current_stack.ss_sp as usize - current_stack.ss_size + extra
- } else {
- // new thread
- current_stack.ss_sp as usize - current_stack.ss_size
- })
+ #[cfg(any(target_os = "macos",
+ target_os = "bitrig",
+ target_os = "openbsd",
+ target_os = "solaris"))]
+ pub unsafe fn current() -> Option<Guard> {
+ let stackaddr = get_stack_start()? as usize;
+ Some(stackaddr - PAGE_SIZE..stackaddr)
}
#[cfg(any(target_os = "android", target_os = "freebsd",
target_os = "linux", target_os = "netbsd", target_os = "l4re"))]
- pub unsafe fn current() -> Option<usize> {
+ pub unsafe fn current() -> Option<Guard> {
let mut ret = None;
let mut attr: libc::pthread_attr_t = ::mem::zeroed();
assert_eq!(libc::pthread_attr_init(&mut attr), 0);
assert_eq!(libc::pthread_attr_getstack(&attr, &mut stackaddr,
&mut size), 0);
+ let stackaddr = stackaddr as usize;
ret = if cfg!(target_os = "freebsd") {
- Some(stackaddr as usize - guardsize)
+ // FIXME does freebsd really fault *below* the guard addr?
+ let guardaddr = stackaddr - guardsize;
+ Some(guardaddr - PAGE_SIZE..guardaddr)
} else if cfg!(target_os = "netbsd") {
- Some(stackaddr as usize)
+ Some(stackaddr - guardsize..stackaddr)
+ } else if cfg!(all(target_os = "linux", target_env = "gnu")) {
+ // glibc used to include the guard area within the stack, as noted in the BUGS
+ // section of `man pthread_attr_getguardsize`. This has been corrected starting
+ // with glibc 2.27, and in some distro backports, so the guard is now placed at the
+ // end (below) the stack. There's no easy way for us to know which we have at
+ // runtime, so we'll just match any fault in the range right above or below the
+ // stack base to call that fault a stack overflow.
+ Some(stackaddr - guardsize..stackaddr + guardsize)
} else {
- Some(stackaddr as usize + guardsize)
+ Some(stackaddr..stackaddr + guardsize)
};
}
assert_eq!(libc::pthread_attr_destroy(&mut attr), 0);
use ffi::OsString;
use marker::PhantomData;
-use mem;
use vec;
+use sys::ArgsSysCall;
pub unsafe fn init(_argc: isize, _argv: *const *const u8) {
// On wasm these should always be null, so there's nothing for us to do here
}
pub fn args() -> Args {
- // When the runtime debugging is enabled we'll link to some extra runtime
- // functions to actually implement this. These are for now just implemented
- // in a node.js script but they're off by default as they're sort of weird
- // in a web-wasm world.
- if !super::DEBUG {
- return Args {
- iter: Vec::new().into_iter(),
- _dont_send_or_sync_me: PhantomData,
- }
- }
-
- // You'll find the definitions of these in `src/etc/wasm32-shim.js`. These
- // are just meant for debugging and should not be relied on.
- extern {
- fn rust_wasm_args_count() -> usize;
- fn rust_wasm_args_arg_size(a: usize) -> usize;
- fn rust_wasm_args_arg_fill(a: usize, ptr: *mut u8);
- }
-
- unsafe {
- let cnt = rust_wasm_args_count();
- let mut v = Vec::with_capacity(cnt);
- for i in 0..cnt {
- let n = rust_wasm_args_arg_size(i);
- let mut data = vec![0; n];
- rust_wasm_args_arg_fill(i, data.as_mut_ptr());
- v.push(mem::transmute::<Vec<u8>, OsString>(data));
- }
- Args {
- iter: v.into_iter(),
- _dont_send_or_sync_me: PhantomData,
- }
+ let v = ArgsSysCall::perform();
+ Args {
+ iter: v.into_iter(),
+ _dont_send_or_sync_me: PhantomData,
}
}
use io;
use os::raw::c_char;
-
-// Right now the wasm backend doesn't even have the ability to print to the
-// console by default. Wasm can't import anything from JS! (you have to
-// explicitly provide it).
-//
-// Sometimes that's a real bummer, though, so this flag can be set to `true` to
-// enable calling various shims defined in `src/etc/wasm32-shim.js` which should
-// help receive debug output and see what's going on. In general this flag
-// currently controls "will we call out to our own defined shims in node.js",
-// and this flag should always be `false` for release builds.
-const DEBUG: bool = false;
+use ptr;
+use sys::os_str::Buf;
+use sys_common::{AsInner, FromInner};
+use ffi::{OsString, OsStr};
+use time::Duration;
pub mod args;
#[cfg(feature = "backtrace")]
}
pub unsafe fn abort_internal() -> ! {
- ::intrinsics::abort();
+ ExitSysCall::perform(1)
}
// We don't have randomness yet, but I totally used a random number generator to
pub fn hashmap_random_keys() -> (u64, u64) {
(1, 2)
}
+
+// Implement a minimal set of system calls to enable basic IO
+pub enum SysCallIndex {
+ Read = 0,
+ Write = 1,
+ Exit = 2,
+ Args = 3,
+ GetEnv = 4,
+ SetEnv = 5,
+ Time = 6,
+}
+
+#[repr(C)]
+pub struct ReadSysCall {
+ fd: usize,
+ ptr: *mut u8,
+ len: usize,
+ result: usize,
+}
+
+impl ReadSysCall {
+ pub fn perform(fd: usize, buffer: &mut [u8]) -> usize {
+ let mut call_record = ReadSysCall {
+ fd,
+ len: buffer.len(),
+ ptr: buffer.as_mut_ptr(),
+ result: 0
+ };
+ if unsafe { syscall(SysCallIndex::Read, &mut call_record) } {
+ call_record.result
+ } else {
+ 0
+ }
+ }
+}
+
+#[repr(C)]
+pub struct WriteSysCall {
+ fd: usize,
+ ptr: *const u8,
+ len: usize,
+}
+
+impl WriteSysCall {
+ pub fn perform(fd: usize, buffer: &[u8]) {
+ let mut call_record = WriteSysCall {
+ fd,
+ len: buffer.len(),
+ ptr: buffer.as_ptr()
+ };
+ unsafe { syscall(SysCallIndex::Write, &mut call_record); }
+ }
+}
+
+#[repr(C)]
+pub struct ExitSysCall {
+ code: usize,
+}
+
+impl ExitSysCall {
+ pub fn perform(code: usize) -> ! {
+ let mut call_record = ExitSysCall {
+ code
+ };
+ unsafe {
+ syscall(SysCallIndex::Exit, &mut call_record);
+ ::intrinsics::abort();
+ }
+ }
+}
+
+fn receive_buffer<E, F: FnMut(&mut [u8]) -> Result<usize, E>>(estimate: usize, mut f: F)
+ -> Result<Vec<u8>, E>
+{
+ let mut buffer = vec![0; estimate];
+ loop {
+ let result = f(&mut buffer)?;
+ if result <= buffer.len() {
+ buffer.truncate(result);
+ break;
+ }
+ buffer.resize(result, 0);
+ }
+ Ok(buffer)
+}
+
+#[repr(C)]
+pub struct ArgsSysCall {
+ ptr: *mut u8,
+ len: usize,
+ result: usize
+}
+
+impl ArgsSysCall {
+ pub fn perform() -> Vec<OsString> {
+ receive_buffer(1024, |buffer| -> Result<usize, !> {
+ let mut call_record = ArgsSysCall {
+ len: buffer.len(),
+ ptr: buffer.as_mut_ptr(),
+ result: 0
+ };
+ if unsafe { syscall(SysCallIndex::Args, &mut call_record) } {
+ Ok(call_record.result)
+ } else {
+ Ok(0)
+ }
+ })
+ .unwrap()
+ .split(|b| *b == 0)
+ .map(|s| FromInner::from_inner(Buf { inner: s.to_owned() }))
+ .collect()
+ }
+}
+
+#[repr(C)]
+pub struct GetEnvSysCall {
+ key_ptr: *const u8,
+ key_len: usize,
+ value_ptr: *mut u8,
+ value_len: usize,
+ result: usize
+}
+
+impl GetEnvSysCall {
+ pub fn perform(key: &OsStr) -> Option<OsString> {
+ let key_buf = &AsInner::as_inner(key).inner;
+ receive_buffer(64, |buffer| {
+ let mut call_record = GetEnvSysCall {
+ key_len: key_buf.len(),
+ key_ptr: key_buf.as_ptr(),
+ value_len: buffer.len(),
+ value_ptr: buffer.as_mut_ptr(),
+ result: !0usize
+ };
+ if unsafe { syscall(SysCallIndex::GetEnv, &mut call_record) } {
+ if call_record.result == !0usize {
+ Err(())
+ } else {
+ Ok(call_record.result)
+ }
+ } else {
+ Err(())
+ }
+ }).ok().map(|s| {
+ FromInner::from_inner(Buf { inner: s })
+ })
+ }
+}
+
+#[repr(C)]
+pub struct SetEnvSysCall {
+ key_ptr: *const u8,
+ key_len: usize,
+ value_ptr: *const u8,
+ value_len: usize
+}
+
+impl SetEnvSysCall {
+ pub fn perform(key: &OsStr, value: Option<&OsStr>) {
+ let key_buf = &AsInner::as_inner(key).inner;
+ let value_buf = value.map(|v| &AsInner::as_inner(v).inner);
+ let mut call_record = SetEnvSysCall {
+ key_len: key_buf.len(),
+ key_ptr: key_buf.as_ptr(),
+ value_len: value_buf.map(|v| v.len()).unwrap_or(!0usize),
+ value_ptr: value_buf.map(|v| v.as_ptr()).unwrap_or(ptr::null())
+ };
+ unsafe { syscall(SysCallIndex::SetEnv, &mut call_record); }
+ }
+}
+
+pub enum TimeClock {
+ Monotonic = 0,
+ System = 1,
+}
+
+#[repr(C)]
+pub struct TimeSysCall {
+ clock: usize,
+ secs_hi: usize,
+ secs_lo: usize,
+ nanos: usize
+}
+
+impl TimeSysCall {
+ pub fn perform(clock: TimeClock) -> Duration {
+ let mut call_record = TimeSysCall {
+ clock: clock as usize,
+ secs_hi: 0,
+ secs_lo: 0,
+ nanos: 0
+ };
+ if unsafe { syscall(SysCallIndex::Time, &mut call_record) } {
+ Duration::new(
+ ((call_record.secs_hi as u64) << 32) | (call_record.secs_lo as u64),
+ call_record.nanos as u32
+ )
+ } else {
+ panic!("Time system call is not implemented by WebAssembly host");
+ }
+ }
+}
+
+unsafe fn syscall<T>(index: SysCallIndex, data: &mut T) -> bool {
+ #[cfg(feature = "wasm_syscall")]
+ extern {
+ #[no_mangle]
+ fn rust_wasm_syscall(index: usize, data: *mut Void) -> usize;
+ }
+
+ #[cfg(not(feature = "wasm_syscall"))]
+ unsafe fn rust_wasm_syscall(_index: usize, _data: *mut Void) -> usize { 0 }
+
+ rust_wasm_syscall(index as usize, data as *mut T as *mut Void) != 0
+}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use core::intrinsics;
-
use error::Error as StdError;
use ffi::{OsString, OsStr};
use fmt;
use io;
-use mem;
use path::{self, PathBuf};
use str;
-use sys::{unsupported, Void};
+use sys::{unsupported, Void, ExitSysCall, GetEnvSysCall, SetEnvSysCall};
pub fn errno() -> i32 {
0
}
pub fn getenv(k: &OsStr) -> io::Result<Option<OsString>> {
- // If we're debugging the runtime then we actually probe node.js to ask for
- // the value of environment variables to help provide inputs to programs.
- // The `extern` shims here are defined in `src/etc/wasm32-shim.js` and are
- // intended for debugging only, you should not rely on them.
- if !super::DEBUG {
- return Ok(None)
- }
-
- extern {
- fn rust_wasm_getenv_len(k: *const u8, kl: usize) -> isize;
- fn rust_wasm_getenv_data(k: *const u8, kl: usize, v: *mut u8);
- }
- unsafe {
- let k: &[u8] = mem::transmute(k);
- let n = rust_wasm_getenv_len(k.as_ptr(), k.len());
- if n == -1 {
- return Ok(None)
- }
- let mut data = vec![0; n as usize];
- rust_wasm_getenv_data(k.as_ptr(), k.len(), data.as_mut_ptr());
- Ok(Some(mem::transmute(data)))
- }
+ Ok(GetEnvSysCall::perform(k))
}
-pub fn setenv(_k: &OsStr, _v: &OsStr) -> io::Result<()> {
- unsupported()
+pub fn setenv(k: &OsStr, v: &OsStr) -> io::Result<()> {
+ Ok(SetEnvSysCall::perform(k, Some(v)))
}
-pub fn unsetenv(_n: &OsStr) -> io::Result<()> {
- unsupported()
+pub fn unsetenv(k: &OsStr) -> io::Result<()> {
+ Ok(SetEnvSysCall::perform(k, None))
}
pub fn temp_dir() -> PathBuf {
}
pub fn exit(_code: i32) -> ! {
- unsafe { intrinsics::abort() }
+ ExitSysCall::perform(_code as isize as usize)
}
pub fn getpid() -> u32 {
// except according to those terms.
use io;
-use sys::{Void, unsupported};
+use sys::{ReadSysCall, WriteSysCall};
-pub struct Stdin(Void);
+pub struct Stdin;
pub struct Stdout;
pub struct Stderr;
impl Stdin {
pub fn new() -> io::Result<Stdin> {
- unsupported()
+ Ok(Stdin)
}
- pub fn read(&self, _data: &mut [u8]) -> io::Result<usize> {
- match self.0 {}
+ pub fn read(&self, data: &mut [u8]) -> io::Result<usize> {
+ Ok(ReadSysCall::perform(0, data))
}
}
}
pub fn write(&self, data: &[u8]) -> io::Result<usize> {
- // If runtime debugging is enabled at compile time we'll invoke some
- // runtime functions that are defined in our src/etc/wasm32-shim.js
- // debugging script. Note that this ffi function call is intended
- // *purely* for debugging only and should not be relied upon.
- if !super::DEBUG {
- return unsupported()
- }
- extern {
- fn rust_wasm_write_stdout(data: *const u8, len: usize);
- }
- unsafe {
- rust_wasm_write_stdout(data.as_ptr(), data.len())
- }
+ WriteSysCall::perform(1, data);
Ok(data.len())
}
}
pub fn write(&self, data: &[u8]) -> io::Result<usize> {
- // See comments in stdout for what's going on here.
- if !super::DEBUG {
- return unsupported()
- }
- extern {
- fn rust_wasm_write_stderr(data: *const u8, len: usize);
- }
- unsafe {
- rust_wasm_write_stderr(data.as_ptr(), data.len())
- }
+ WriteSysCall::perform(2, data);
Ok(data.len())
}
}
pub mod guard {
- pub unsafe fn current() -> Option<usize> { None }
- pub unsafe fn init() -> Option<usize> { None }
+ pub type Guard = !;
+ pub unsafe fn current() -> Option<Guard> { None }
+ pub unsafe fn init() -> Option<Guard> { None }
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use fmt;
use time::Duration;
+use sys::{TimeSysCall, TimeClock};
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)]
-pub struct Instant;
+pub struct Instant(Duration);
-#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
-pub struct SystemTime;
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)]
+pub struct SystemTime(Duration);
-pub const UNIX_EPOCH: SystemTime = SystemTime;
+pub const UNIX_EPOCH: SystemTime = SystemTime(Duration::from_secs(0));
impl Instant {
pub fn now() -> Instant {
- panic!("not supported on web assembly");
+ Instant(TimeSysCall::perform(TimeClock::Monotonic))
}
- pub fn sub_instant(&self, _other: &Instant) -> Duration {
- panic!("can't sub yet");
+ pub fn sub_instant(&self, other: &Instant) -> Duration {
+ self.0 - other.0
}
- pub fn add_duration(&self, _other: &Duration) -> Instant {
- panic!("can't add yet");
+ pub fn add_duration(&self, other: &Duration) -> Instant {
+ Instant(self.0 + *other)
}
- pub fn sub_duration(&self, _other: &Duration) -> Instant {
- panic!("can't sub yet");
+ pub fn sub_duration(&self, other: &Duration) -> Instant {
+ Instant(self.0 - *other)
}
}
impl SystemTime {
pub fn now() -> SystemTime {
- panic!("not supported on web assembly");
+ SystemTime(TimeSysCall::perform(TimeClock::System))
}
- pub fn sub_time(&self, _other: &SystemTime)
+ pub fn sub_time(&self, other: &SystemTime)
-> Result<Duration, Duration> {
- panic!()
- }
-
- pub fn add_duration(&self, _other: &Duration) -> SystemTime {
- panic!()
+ self.0.checked_sub(other.0).ok_or_else(|| other.0 - self.0)
}
- pub fn sub_duration(&self, _other: &Duration) -> SystemTime {
- panic!()
+ pub fn add_duration(&self, other: &Duration) -> SystemTime {
+ SystemTime(self.0 + *other)
}
-}
-impl fmt::Debug for SystemTime {
- fn fmt(&self, _f: &mut fmt::Formatter) -> fmt::Result {
- panic!()
+ pub fn sub_duration(&self, other: &Duration) -> SystemTime {
+ SystemTime(self.0 - *other)
}
}
#[cfg_attr(test, allow(dead_code))]
pub mod guard {
- pub unsafe fn current() -> Option<usize> { None }
- pub unsafe fn init() -> Option<usize> { None }
+ pub type Guard = !;
+ pub unsafe fn current() -> Option<Guard> { None }
+ pub unsafe fn init() -> Option<Guard> { None }
}
#![allow(dead_code)] // stack_guard isn't used right now on all platforms
use cell::RefCell;
+use sys::thread::guard::Guard;
use thread::Thread;
struct ThreadInfo {
- stack_guard: Option<usize>,
+ stack_guard: Option<Guard>,
thread: Thread,
}
ThreadInfo::with(|info| info.thread.clone())
}
-pub fn stack_guard() -> Option<usize> {
- ThreadInfo::with(|info| info.stack_guard).and_then(|o| o)
+pub fn stack_guard() -> Option<Guard> {
+ ThreadInfo::with(|info| info.stack_guard.clone()).and_then(|o| o)
}
-pub fn set(stack_guard: Option<usize>, thread: Thread) {
+pub fn set(stack_guard: Option<Guard>, thread: Thread) {
THREAD_INFO.with(|c| assert!(c.borrow().is_none()));
THREAD_INFO.with(move |c| *c.borrow_mut() = Some(ThreadInfo{
stack_guard,
--- /dev/null
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Temporal quantification.
+//!
+//! Example:
+//!
+//! ```
+//! use std::time::Duration;
+//!
+//! let five_seconds = Duration::new(5, 0);
+//! // both declarations are equivalent
+//! assert_eq!(Duration::new(5, 0), Duration::from_secs(5));
+//! ```
+
+#![stable(feature = "time", since = "1.3.0")]
+
+use error::Error;
+use fmt;
+use ops::{Add, Sub, AddAssign, SubAssign};
+use sys::time;
+use sys_common::FromInner;
+
+#[stable(feature = "time", since = "1.3.0")]
+pub use core::time::Duration;
+
+/// A measurement of a monotonically nondecreasing clock.
+/// Opaque and useful only with `Duration`.
+///
+/// Instants are always guaranteed to be no less than any previously measured
+/// instant when created, and are often useful for tasks such as measuring
+/// benchmarks or timing how long an operation takes.
+///
+/// Note, however, that instants are not guaranteed to be **steady**. In other
+/// words, each tick of the underlying clock may not be the same length (e.g.
+/// some seconds may be longer than others). An instant may jump forwards or
+/// experience time dilation (slow down or speed up), but it will never go
+/// backwards.
+///
+/// Instants are opaque types that can only be compared to one another. There is
+/// no method to get "the number of seconds" from an instant. Instead, it only
+/// allows measuring the duration between two instants (or comparing two
+/// instants).
+///
+/// Example:
+///
+/// ```no_run
+/// use std::time::{Duration, Instant};
+/// use std::thread::sleep;
+///
+/// fn main() {
+/// let now = Instant::now();
+///
+/// // we sleep for 2 seconds
+/// sleep(Duration::new(2, 0));
+/// // it prints '2'
+/// println!("{}", now.elapsed().as_secs());
+/// }
+/// ```
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
+#[stable(feature = "time2", since = "1.8.0")]
+pub struct Instant(time::Instant);
+
+/// A measurement of the system clock, useful for talking to
+/// external entities like the file system or other processes.
+///
+/// Distinct from the [`Instant`] type, this time measurement **is not
+/// monotonic**. This means that you can save a file to the file system, then
+/// save another file to the file system, **and the second file has a
+/// `SystemTime` measurement earlier than the first**. In other words, an
+/// operation that happens after another operation in real time may have an
+/// earlier `SystemTime`!
+///
+/// Consequently, comparing two `SystemTime` instances to learn about the
+/// duration between them returns a [`Result`] instead of an infallible [`Duration`]
+/// to indicate that this sort of time drift may happen and needs to be handled.
+///
+/// Although a `SystemTime` cannot be directly inspected, the [`UNIX_EPOCH`]
+/// constant is provided in this module as an anchor in time to learn
+/// information about a `SystemTime`. By calculating the duration from this
+/// fixed point in time, a `SystemTime` can be converted to a human-readable time,
+/// or perhaps some other string representation.
+///
+/// [`Instant`]: ../../std/time/struct.Instant.html
+/// [`Result`]: ../../std/result/enum.Result.html
+/// [`Duration`]: ../../std/time/struct.Duration.html
+/// [`UNIX_EPOCH`]: ../../std/time/constant.UNIX_EPOCH.html
+///
+/// Example:
+///
+/// ```no_run
+/// use std::time::{Duration, SystemTime};
+/// use std::thread::sleep;
+///
+/// fn main() {
+/// let now = SystemTime::now();
+///
+/// // we sleep for 2 seconds
+/// sleep(Duration::new(2, 0));
+/// match now.elapsed() {
+/// Ok(elapsed) => {
+/// // it prints '2'
+/// println!("{}", elapsed.as_secs());
+/// }
+/// Err(e) => {
+/// // an error occurred!
+/// println!("Error: {:?}", e);
+/// }
+/// }
+/// }
+/// ```
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
+#[stable(feature = "time2", since = "1.8.0")]
+pub struct SystemTime(time::SystemTime);
+
+/// An error returned from the `duration_since` and `elapsed` methods on
+/// `SystemTime`, used to learn how far in the opposite direction a system time
+/// lies.
+///
+/// # Examples
+///
+/// ```no_run
+/// use std::thread::sleep;
+/// use std::time::{Duration, SystemTime};
+///
+/// let sys_time = SystemTime::now();
+/// sleep(Duration::from_secs(1));
+/// let new_sys_time = SystemTime::now();
+/// match sys_time.duration_since(new_sys_time) {
+/// Ok(_) => {}
+/// Err(e) => println!("SystemTimeError difference: {:?}", e.duration()),
+/// }
+/// ```
+#[derive(Clone, Debug)]
+#[stable(feature = "time2", since = "1.8.0")]
+pub struct SystemTimeError(Duration);
+
+impl Instant {
+ /// Returns an instant corresponding to "now".
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::time::Instant;
+ ///
+ /// let now = Instant::now();
+ /// ```
+ #[stable(feature = "time2", since = "1.8.0")]
+ pub fn now() -> Instant {
+ Instant(time::Instant::now())
+ }
+
+ /// Returns the amount of time elapsed from another instant to this one.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if `earlier` is later than `self`.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::time::{Duration, Instant};
+ /// use std::thread::sleep;
+ ///
+ /// let now = Instant::now();
+ /// sleep(Duration::new(1, 0));
+ /// let new_now = Instant::now();
+ /// println!("{:?}", new_now.duration_since(now));
+ /// ```
+ #[stable(feature = "time2", since = "1.8.0")]
+ pub fn duration_since(&self, earlier: Instant) -> Duration {
+ self.0.sub_instant(&earlier.0)
+ }
+
+ /// Returns the amount of time elapsed since this instant was created.
+ ///
+ /// # Panics
+ ///
+ /// This function may panic if the current time is earlier than this
+ /// instant, which is something that can happen if an `Instant` is
+ /// produced synthetically.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::thread::sleep;
+ /// use std::time::{Duration, Instant};
+ ///
+ /// let instant = Instant::now();
+ /// let three_secs = Duration::from_secs(3);
+ /// sleep(three_secs);
+ /// assert!(instant.elapsed() >= three_secs);
+ /// ```
+ #[stable(feature = "time2", since = "1.8.0")]
+ pub fn elapsed(&self) -> Duration {
+ Instant::now() - *self
+ }
+}
+
+#[stable(feature = "time2", since = "1.8.0")]
+impl Add<Duration> for Instant {
+ type Output = Instant;
+
+ fn add(self, other: Duration) -> Instant {
+ Instant(self.0.add_duration(&other))
+ }
+}
+
+#[stable(feature = "time_augmented_assignment", since = "1.9.0")]
+impl AddAssign<Duration> for Instant {
+ fn add_assign(&mut self, other: Duration) {
+ *self = *self + other;
+ }
+}
+
+#[stable(feature = "time2", since = "1.8.0")]
+impl Sub<Duration> for Instant {
+ type Output = Instant;
+
+ fn sub(self, other: Duration) -> Instant {
+ Instant(self.0.sub_duration(&other))
+ }
+}
+
+#[stable(feature = "time_augmented_assignment", since = "1.9.0")]
+impl SubAssign<Duration> for Instant {
+ fn sub_assign(&mut self, other: Duration) {
+ *self = *self - other;
+ }
+}
+
+#[stable(feature = "time2", since = "1.8.0")]
+impl Sub<Instant> for Instant {
+ type Output = Duration;
+
+ fn sub(self, other: Instant) -> Duration {
+ self.duration_since(other)
+ }
+}
+
+#[stable(feature = "time2", since = "1.8.0")]
+impl fmt::Debug for Instant {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ self.0.fmt(f)
+ }
+}
+
+impl SystemTime {
+ /// Returns the system time corresponding to "now".
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::time::SystemTime;
+ ///
+ /// let sys_time = SystemTime::now();
+ /// ```
+ #[stable(feature = "time2", since = "1.8.0")]
+ pub fn now() -> SystemTime {
+ SystemTime(time::SystemTime::now())
+ }
+
+ /// Returns the amount of time elapsed from an earlier point in time.
+ ///
+ /// This function may fail because measurements taken earlier are not
+ /// guaranteed to always be before later measurements (due to anomalies such
+ /// as the system clock being adjusted either forwards or backwards).
+ ///
+ /// If successful, [`Ok`]`(`[`Duration`]`)` is returned where the duration represents
+ /// the amount of time elapsed from the specified measurement to this one.
+ ///
+ /// Returns an [`Err`] if `earlier` is later than `self`, and the error
+ /// contains how far from `self` the time is.
+ ///
+ /// [`Ok`]: ../../std/result/enum.Result.html#variant.Ok
+ /// [`Duration`]: ../../std/time/struct.Duration.html
+ /// [`Err`]: ../../std/result/enum.Result.html#variant.Err
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::time::SystemTime;
+ ///
+ /// let sys_time = SystemTime::now();
+ /// let difference = sys_time.duration_since(sys_time)
+ /// .expect("SystemTime::duration_since failed");
+ /// println!("{:?}", difference);
+ /// ```
+ #[stable(feature = "time2", since = "1.8.0")]
+ pub fn duration_since(&self, earlier: SystemTime)
+ -> Result<Duration, SystemTimeError> {
+ self.0.sub_time(&earlier.0).map_err(SystemTimeError)
+ }
+
+ /// Returns the amount of time elapsed since this system time was created.
+ ///
+ /// This function may fail as the underlying system clock is susceptible to
+ /// drift and updates (e.g. the system clock could go backwards), so this
+ /// function may not always succeed. If successful, [`Ok`]`(`[`Duration`]`)` is
+ /// returned where the duration represents the amount of time elapsed from
+ /// this time measurement to the current time.
+ ///
+ /// Returns an [`Err`] if `self` is later than the current system time, and
+ /// the error contains how far from the current system time `self` is.
+ ///
+ /// [`Ok`]: ../../std/result/enum.Result.html#variant.Ok
+ /// [`Duration`]: ../../std/time/struct.Duration.html
+ /// [`Err`]: ../../std/result/enum.Result.html#variant.Err
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::thread::sleep;
+ /// use std::time::{Duration, SystemTime};
+ ///
+ /// let sys_time = SystemTime::now();
+ /// let one_sec = Duration::from_secs(1);
+ /// sleep(one_sec);
+ /// assert!(sys_time.elapsed().unwrap() >= one_sec);
+ /// ```
+ #[stable(feature = "time2", since = "1.8.0")]
+ pub fn elapsed(&self) -> Result<Duration, SystemTimeError> {
+ SystemTime::now().duration_since(*self)
+ }
+}
+
+#[stable(feature = "time2", since = "1.8.0")]
+impl Add<Duration> for SystemTime {
+ type Output = SystemTime;
+
+ fn add(self, dur: Duration) -> SystemTime {
+ SystemTime(self.0.add_duration(&dur))
+ }
+}
+
+#[stable(feature = "time_augmented_assignment", since = "1.9.0")]
+impl AddAssign<Duration> for SystemTime {
+ fn add_assign(&mut self, other: Duration) {
+ *self = *self + other;
+ }
+}
+
+#[stable(feature = "time2", since = "1.8.0")]
+impl Sub<Duration> for SystemTime {
+ type Output = SystemTime;
+
+ fn sub(self, dur: Duration) -> SystemTime {
+ SystemTime(self.0.sub_duration(&dur))
+ }
+}
+
+#[stable(feature = "time_augmented_assignment", since = "1.9.0")]
+impl SubAssign<Duration> for SystemTime {
+ fn sub_assign(&mut self, other: Duration) {
+ *self = *self - other;
+ }
+}
+
+#[stable(feature = "time2", since = "1.8.0")]
+impl fmt::Debug for SystemTime {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ self.0.fmt(f)
+ }
+}
+
+/// An anchor in time which can be used to create new `SystemTime` instances or
+/// learn about where in time a `SystemTime` lies.
+///
+/// This constant is defined to be "1970-01-01 00:00:00 UTC" on all systems with
+/// respect to the system clock. Using `duration_since` on an existing
+/// [`SystemTime`] instance can tell how far away from this point in time a
+/// measurement lies, and using `UNIX_EPOCH + duration` can be used to create a
+/// [`SystemTime`] instance to represent another fixed point in time.
+///
+/// [`SystemTime`]: ../../std/time/struct.SystemTime.html
+///
+/// # Examples
+///
+/// ```no_run
+/// use std::time::{SystemTime, UNIX_EPOCH};
+///
+/// match SystemTime::now().duration_since(UNIX_EPOCH) {
+/// Ok(n) => println!("1970-01-01 00:00:00 UTC was {} seconds ago!", n.as_secs()),
+/// Err(_) => panic!("SystemTime before UNIX EPOCH!"),
+/// }
+/// ```
+#[stable(feature = "time2", since = "1.8.0")]
+pub const UNIX_EPOCH: SystemTime = SystemTime(time::UNIX_EPOCH);
+
+impl SystemTimeError {
+ /// Returns the positive duration which represents how far forward the
+ /// second system time was from the first.
+ ///
+ /// A `SystemTimeError` is returned from the [`duration_since`] and [`elapsed`]
+ /// methods of [`SystemTime`] whenever the second system time represents a point later
+ /// in time than the `self` of the method call.
+ ///
+ /// [`duration_since`]: ../../std/time/struct.SystemTime.html#method.duration_since
+ /// [`elapsed`]: ../../std/time/struct.SystemTime.html#method.elapsed
+ /// [`SystemTime`]: ../../std/time/struct.SystemTime.html
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::thread::sleep;
+ /// use std::time::{Duration, SystemTime};
+ ///
+ /// let sys_time = SystemTime::now();
+ /// sleep(Duration::from_secs(1));
+ /// let new_sys_time = SystemTime::now();
+ /// match sys_time.duration_since(new_sys_time) {
+ /// Ok(_) => {}
+ /// Err(e) => println!("SystemTimeError difference: {:?}", e.duration()),
+ /// }
+ /// ```
+ #[stable(feature = "time2", since = "1.8.0")]
+ pub fn duration(&self) -> Duration {
+ self.0
+ }
+}
+
+#[stable(feature = "time2", since = "1.8.0")]
+impl Error for SystemTimeError {
+ fn description(&self) -> &str { "other time was not earlier than self" }
+}
+
+#[stable(feature = "time2", since = "1.8.0")]
+impl fmt::Display for SystemTimeError {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "second time provided was later than self")
+ }
+}
+
+impl FromInner<time::SystemTime> for SystemTime {
+ fn from_inner(time: time::SystemTime) -> SystemTime {
+ SystemTime(time)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::{Instant, SystemTime, Duration, UNIX_EPOCH};
+
+ macro_rules! assert_almost_eq {
+ ($a:expr, $b:expr) => ({
+ let (a, b) = ($a, $b);
+ if a != b {
+ let (a, b) = if a > b {(a, b)} else {(b, a)};
+ assert!(a - Duration::new(0, 100) <= b);
+ }
+ })
+ }
+
+ #[test]
+ fn instant_monotonic() {
+ let a = Instant::now();
+ let b = Instant::now();
+ assert!(b >= a);
+ }
+
+ #[test]
+ fn instant_elapsed() {
+ let a = Instant::now();
+ a.elapsed();
+ }
+
+ #[test]
+ fn instant_math() {
+ let a = Instant::now();
+ let b = Instant::now();
+ let dur = b.duration_since(a);
+ assert_almost_eq!(b - dur, a);
+ assert_almost_eq!(a + dur, b);
+
+ let second = Duration::new(1, 0);
+ assert_almost_eq!(a - second + second, a);
+ }
+
+ #[test]
+ #[should_panic]
+ fn instant_duration_panic() {
+ let a = Instant::now();
+ (a - Duration::new(1, 0)).duration_since(a);
+ }
+
+ #[test]
+ fn system_time_math() {
+ let a = SystemTime::now();
+ let b = SystemTime::now();
+ match b.duration_since(a) {
+ Ok(dur) if dur == Duration::new(0, 0) => {
+ assert_almost_eq!(a, b);
+ }
+ Ok(dur) => {
+ assert!(b > a);
+ assert_almost_eq!(b - dur, a);
+ assert_almost_eq!(a + dur, b);
+ }
+ Err(dur) => {
+ let dur = dur.duration();
+ assert!(a > b);
+ assert_almost_eq!(b + dur, a);
+ assert_almost_eq!(a - dur, b);
+ }
+ }
+
+ let second = Duration::new(1, 0);
+ assert_almost_eq!(a.duration_since(a - second).unwrap(), second);
+ assert_almost_eq!(a.duration_since(a + second).unwrap_err()
+ .duration(), second);
+
+ assert_almost_eq!(a - second + second, a);
+
+ // A difference of 80 and 800 years cannot fit inside a 32-bit time_t
+ if !(cfg!(unix) && ::mem::size_of::<::libc::time_t>() <= 4) {
+ let eighty_years = second * 60 * 60 * 24 * 365 * 80;
+ assert_almost_eq!(a - eighty_years + eighty_years, a);
+ assert_almost_eq!(a - (eighty_years * 10) + (eighty_years * 10), a);
+ }
+
+ let one_second_from_epoch = UNIX_EPOCH + Duration::new(1, 0);
+ let one_second_from_epoch2 = UNIX_EPOCH + Duration::new(0, 500_000_000)
+ + Duration::new(0, 500_000_000);
+ assert_eq!(one_second_from_epoch, one_second_from_epoch2);
+ }
+
+ #[test]
+ fn system_time_elapsed() {
+ let a = SystemTime::now();
+ drop(a.elapsed());
+ }
+
+ #[test]
+ fn since_epoch() {
+ let ts = SystemTime::now();
+ let a = ts.duration_since(UNIX_EPOCH).unwrap();
+ let b = ts.duration_since(UNIX_EPOCH - Duration::new(1, 0)).unwrap();
+ assert!(b > a);
+ assert_eq!(b - a, Duration::new(1, 0));
+
+ let thirty_years = Duration::new(1, 0) * 60 * 60 * 24 * 365 * 30;
+
+ // Right now for CI this test is run in an emulator, and apparently the
+ // aarch64 emulator's sense of time is that we're still living in the
+ // 70s.
+ //
+ // Otherwise let's assume that we're all running computers later than
+ // 2000.
+ if !cfg!(target_arch = "aarch64") {
+ assert!(a > thirty_years);
+ }
+
+ // let's assume that we're all running computers earlier than 2090.
+ // Should give us ~70 years to fix this!
+ let hundred_twenty_years = thirty_years * 4;
+ assert!(a < hundred_twenty_years);
+ }
+}
+++ /dev/null
-// Copyright 2012-2017 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use iter::Sum;
-use ops::{Add, Sub, Mul, Div, AddAssign, SubAssign, MulAssign, DivAssign};
-
-const NANOS_PER_SEC: u32 = 1_000_000_000;
-const NANOS_PER_MILLI: u32 = 1_000_000;
-const NANOS_PER_MICRO: u32 = 1_000;
-const MILLIS_PER_SEC: u64 = 1_000;
-const MICROS_PER_SEC: u64 = 1_000_000;
-
-/// A `Duration` type to represent a span of time, typically used for system
-/// timeouts.
-///
-/// Each `Duration` is composed of a whole number of seconds and a fractional part
-/// represented in nanoseconds. If the underlying system does not support
-/// nanosecond-level precision, APIs binding a system timeout will typically round up
-/// the number of nanoseconds.
-///
-/// `Duration`s implement many common traits, including [`Add`], [`Sub`], and other
-/// [`ops`] traits.
-///
-/// [`Add`]: ../../std/ops/trait.Add.html
-/// [`Sub`]: ../../std/ops/trait.Sub.html
-/// [`ops`]: ../../std/ops/index.html
-///
-/// # Examples
-///
-/// ```
-/// use std::time::Duration;
-///
-/// let five_seconds = Duration::new(5, 0);
-/// let five_seconds_and_five_nanos = five_seconds + Duration::new(0, 5);
-///
-/// assert_eq!(five_seconds_and_five_nanos.as_secs(), 5);
-/// assert_eq!(five_seconds_and_five_nanos.subsec_nanos(), 5);
-///
-/// let ten_millis = Duration::from_millis(10);
-/// ```
-#[stable(feature = "duration", since = "1.3.0")]
-#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug, Hash, Default)]
-pub struct Duration {
- secs: u64,
- nanos: u32, // Always 0 <= nanos < NANOS_PER_SEC
-}
-
-impl Duration {
- /// Creates a new `Duration` from the specified number of whole seconds and
- /// additional nanoseconds.
- ///
- /// If the number of nanoseconds is greater than 1 billion (the number of
- /// nanoseconds in a second), then it will carry over into the seconds provided.
- ///
- /// # Panics
- ///
- /// This constructor will panic if the carry from the nanoseconds overflows
- /// the seconds counter.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::time::Duration;
- ///
- /// let five_seconds = Duration::new(5, 0);
- /// ```
- #[stable(feature = "duration", since = "1.3.0")]
- #[inline]
- pub fn new(secs: u64, nanos: u32) -> Duration {
- let secs = secs.checked_add((nanos / NANOS_PER_SEC) as u64)
- .expect("overflow in Duration::new");
- let nanos = nanos % NANOS_PER_SEC;
- Duration { secs: secs, nanos: nanos }
- }
-
- /// Creates a new `Duration` from the specified number of whole seconds.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::time::Duration;
- ///
- /// let duration = Duration::from_secs(5);
- ///
- /// assert_eq!(5, duration.as_secs());
- /// assert_eq!(0, duration.subsec_nanos());
- /// ```
- #[stable(feature = "duration", since = "1.3.0")]
- #[inline]
- pub const fn from_secs(secs: u64) -> Duration {
- Duration { secs: secs, nanos: 0 }
- }
-
- /// Creates a new `Duration` from the specified number of milliseconds.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::time::Duration;
- ///
- /// let duration = Duration::from_millis(2569);
- ///
- /// assert_eq!(2, duration.as_secs());
- /// assert_eq!(569_000_000, duration.subsec_nanos());
- /// ```
- #[stable(feature = "duration", since = "1.3.0")]
- #[inline]
- pub const fn from_millis(millis: u64) -> Duration {
- Duration {
- secs: millis / MILLIS_PER_SEC,
- nanos: ((millis % MILLIS_PER_SEC) as u32) * NANOS_PER_MILLI,
- }
- }
-
- /// Creates a new `Duration` from the specified number of microseconds.
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(duration_from_micros)]
- /// use std::time::Duration;
- ///
- /// let duration = Duration::from_micros(1_000_002);
- ///
- /// assert_eq!(1, duration.as_secs());
- /// assert_eq!(2000, duration.subsec_nanos());
- /// ```
- #[unstable(feature = "duration_from_micros", issue = "44400")]
- #[inline]
- pub const fn from_micros(micros: u64) -> Duration {
- Duration {
- secs: micros / MICROS_PER_SEC,
- nanos: ((micros % MICROS_PER_SEC) as u32) * NANOS_PER_MICRO,
- }
- }
-
- /// Creates a new `Duration` from the specified number of nanoseconds.
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(duration_extras)]
- /// use std::time::Duration;
- ///
- /// let duration = Duration::from_nanos(1_000_000_123);
- ///
- /// assert_eq!(1, duration.as_secs());
- /// assert_eq!(123, duration.subsec_nanos());
- /// ```
- #[unstable(feature = "duration_extras", issue = "46507")]
- #[inline]
- pub const fn from_nanos(nanos: u64) -> Duration {
- Duration {
- secs: nanos / (NANOS_PER_SEC as u64),
- nanos: (nanos % (NANOS_PER_SEC as u64)) as u32,
- }
- }
-
- /// Returns the number of _whole_ seconds contained by this `Duration`.
- ///
- /// The returned value does not include the fractional (nanosecond) part of the
- /// duration, which can be obtained using [`subsec_nanos`].
- ///
- /// # Examples
- ///
- /// ```
- /// use std::time::Duration;
- ///
- /// let duration = Duration::new(5, 730023852);
- /// assert_eq!(duration.as_secs(), 5);
- /// ```
- ///
- /// To determine the total number of seconds represented by the `Duration`,
- /// use `as_secs` in combination with [`subsec_nanos`]:
- ///
- /// ```
- /// use std::time::Duration;
- ///
- /// let duration = Duration::new(5, 730023852);
- ///
- /// assert_eq!(5.730023852,
- /// duration.as_secs() as f64
- /// + duration.subsec_nanos() as f64 * 1e-9);
- /// ```
- ///
- /// [`subsec_nanos`]: #method.subsec_nanos
- #[stable(feature = "duration", since = "1.3.0")]
- #[inline]
- pub fn as_secs(&self) -> u64 { self.secs }
-
- /// Returns the fractional part of this `Duration`, in milliseconds.
- ///
- /// This method does **not** return the length of the duration when
- /// represented by milliseconds. The returned number always represents a
- /// fractional portion of a second (i.e. it is less than one thousand).
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(duration_extras)]
- /// use std::time::Duration;
- ///
- /// let duration = Duration::from_millis(5432);
- /// assert_eq!(duration.as_secs(), 5);
- /// assert_eq!(duration.subsec_millis(), 432);
- /// ```
- #[unstable(feature = "duration_extras", issue = "46507")]
- #[inline]
- pub fn subsec_millis(&self) -> u32 { self.nanos / NANOS_PER_MILLI }
-
- /// Returns the fractional part of this `Duration`, in microseconds.
- ///
- /// This method does **not** return the length of the duration when
- /// represented by microseconds. The returned number always represents a
- /// fractional portion of a second (i.e. it is less than one million).
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(duration_extras, duration_from_micros)]
- /// use std::time::Duration;
- ///
- /// let duration = Duration::from_micros(1_234_567);
- /// assert_eq!(duration.as_secs(), 1);
- /// assert_eq!(duration.subsec_micros(), 234_567);
- /// ```
- #[unstable(feature = "duration_extras", issue = "46507")]
- #[inline]
- pub fn subsec_micros(&self) -> u32 { self.nanos / NANOS_PER_MICRO }
-
- /// Returns the fractional part of this `Duration`, in nanoseconds.
- ///
- /// This method does **not** return the length of the duration when
- /// represented by nanoseconds. The returned number always represents a
- /// fractional portion of a second (i.e. it is less than one billion).
- ///
- /// # Examples
- ///
- /// ```
- /// use std::time::Duration;
- ///
- /// let duration = Duration::from_millis(5010);
- /// assert_eq!(duration.as_secs(), 5);
- /// assert_eq!(duration.subsec_nanos(), 10_000_000);
- /// ```
- #[stable(feature = "duration", since = "1.3.0")]
- #[inline]
- pub fn subsec_nanos(&self) -> u32 { self.nanos }
-
- /// Checked `Duration` addition. Computes `self + other`, returning [`None`]
- /// if overflow occurred.
- ///
- /// [`None`]: ../../std/option/enum.Option.html#variant.None
- ///
- /// # Examples
- ///
- /// Basic usage:
- ///
- /// ```
- /// use std::time::Duration;
- ///
- /// assert_eq!(Duration::new(0, 0).checked_add(Duration::new(0, 1)), Some(Duration::new(0, 1)));
- /// assert_eq!(Duration::new(1, 0).checked_add(Duration::new(std::u64::MAX, 0)), None);
- /// ```
- #[stable(feature = "duration_checked_ops", since = "1.16.0")]
- #[inline]
- pub fn checked_add(self, rhs: Duration) -> Option<Duration> {
- if let Some(mut secs) = self.secs.checked_add(rhs.secs) {
- let mut nanos = self.nanos + rhs.nanos;
- if nanos >= NANOS_PER_SEC {
- nanos -= NANOS_PER_SEC;
- if let Some(new_secs) = secs.checked_add(1) {
- secs = new_secs;
- } else {
- return None;
- }
- }
- debug_assert!(nanos < NANOS_PER_SEC);
- Some(Duration {
- secs,
- nanos,
- })
- } else {
- None
- }
- }
-
- /// Checked `Duration` subtraction. Computes `self - other`, returning [`None`]
- /// if the result would be negative or if overflow occurred.
- ///
- /// [`None`]: ../../std/option/enum.Option.html#variant.None
- ///
- /// # Examples
- ///
- /// Basic usage:
- ///
- /// ```
- /// use std::time::Duration;
- ///
- /// assert_eq!(Duration::new(0, 1).checked_sub(Duration::new(0, 0)), Some(Duration::new(0, 1)));
- /// assert_eq!(Duration::new(0, 0).checked_sub(Duration::new(0, 1)), None);
- /// ```
- #[stable(feature = "duration_checked_ops", since = "1.16.0")]
- #[inline]
- pub fn checked_sub(self, rhs: Duration) -> Option<Duration> {
- if let Some(mut secs) = self.secs.checked_sub(rhs.secs) {
- let nanos = if self.nanos >= rhs.nanos {
- self.nanos - rhs.nanos
- } else {
- if let Some(sub_secs) = secs.checked_sub(1) {
- secs = sub_secs;
- self.nanos + NANOS_PER_SEC - rhs.nanos
- } else {
- return None;
- }
- };
- debug_assert!(nanos < NANOS_PER_SEC);
- Some(Duration { secs: secs, nanos: nanos })
- } else {
- None
- }
- }
-
- /// Checked `Duration` multiplication. Computes `self * other`, returning
- /// [`None`] if overflow occurred.
- ///
- /// [`None`]: ../../std/option/enum.Option.html#variant.None
- ///
- /// # Examples
- ///
- /// Basic usage:
- ///
- /// ```
- /// use std::time::Duration;
- ///
- /// assert_eq!(Duration::new(0, 500_000_001).checked_mul(2), Some(Duration::new(1, 2)));
- /// assert_eq!(Duration::new(std::u64::MAX - 1, 0).checked_mul(2), None);
- /// ```
- #[stable(feature = "duration_checked_ops", since = "1.16.0")]
- #[inline]
- pub fn checked_mul(self, rhs: u32) -> Option<Duration> {
- // Multiply nanoseconds as u64, because it cannot overflow that way.
- let total_nanos = self.nanos as u64 * rhs as u64;
- let extra_secs = total_nanos / (NANOS_PER_SEC as u64);
- let nanos = (total_nanos % (NANOS_PER_SEC as u64)) as u32;
- if let Some(secs) = self.secs
- .checked_mul(rhs as u64)
- .and_then(|s| s.checked_add(extra_secs)) {
- debug_assert!(nanos < NANOS_PER_SEC);
- Some(Duration {
- secs,
- nanos,
- })
- } else {
- None
- }
- }
-
- /// Checked `Duration` division. Computes `self / other`, returning [`None`]
- /// if `other == 0`.
- ///
- /// [`None`]: ../../std/option/enum.Option.html#variant.None
- ///
- /// # Examples
- ///
- /// Basic usage:
- ///
- /// ```
- /// use std::time::Duration;
- ///
- /// assert_eq!(Duration::new(2, 0).checked_div(2), Some(Duration::new(1, 0)));
- /// assert_eq!(Duration::new(1, 0).checked_div(2), Some(Duration::new(0, 500_000_000)));
- /// assert_eq!(Duration::new(2, 0).checked_div(0), None);
- /// ```
- #[stable(feature = "duration_checked_ops", since = "1.16.0")]
- #[inline]
- pub fn checked_div(self, rhs: u32) -> Option<Duration> {
- if rhs != 0 {
- let secs = self.secs / (rhs as u64);
- let carry = self.secs - secs * (rhs as u64);
- let extra_nanos = carry * (NANOS_PER_SEC as u64) / (rhs as u64);
- let nanos = self.nanos / rhs + (extra_nanos as u32);
- debug_assert!(nanos < NANOS_PER_SEC);
- Some(Duration { secs: secs, nanos: nanos })
- } else {
- None
- }
- }
-}
-
-#[stable(feature = "duration", since = "1.3.0")]
-impl Add for Duration {
- type Output = Duration;
-
- fn add(self, rhs: Duration) -> Duration {
- self.checked_add(rhs).expect("overflow when adding durations")
- }
-}
-
-#[stable(feature = "time_augmented_assignment", since = "1.9.0")]
-impl AddAssign for Duration {
- fn add_assign(&mut self, rhs: Duration) {
- *self = *self + rhs;
- }
-}
-
-#[stable(feature = "duration", since = "1.3.0")]
-impl Sub for Duration {
- type Output = Duration;
-
- fn sub(self, rhs: Duration) -> Duration {
- self.checked_sub(rhs).expect("overflow when subtracting durations")
- }
-}
-
-#[stable(feature = "time_augmented_assignment", since = "1.9.0")]
-impl SubAssign for Duration {
- fn sub_assign(&mut self, rhs: Duration) {
- *self = *self - rhs;
- }
-}
-
-#[stable(feature = "duration", since = "1.3.0")]
-impl Mul<u32> for Duration {
- type Output = Duration;
-
- fn mul(self, rhs: u32) -> Duration {
- self.checked_mul(rhs).expect("overflow when multiplying duration by scalar")
- }
-}
-
-#[stable(feature = "time_augmented_assignment", since = "1.9.0")]
-impl MulAssign<u32> for Duration {
- fn mul_assign(&mut self, rhs: u32) {
- *self = *self * rhs;
- }
-}
-
-#[stable(feature = "duration", since = "1.3.0")]
-impl Div<u32> for Duration {
- type Output = Duration;
-
- fn div(self, rhs: u32) -> Duration {
- self.checked_div(rhs).expect("divide by zero error when dividing duration by scalar")
- }
-}
-
-#[stable(feature = "time_augmented_assignment", since = "1.9.0")]
-impl DivAssign<u32> for Duration {
- fn div_assign(&mut self, rhs: u32) {
- *self = *self / rhs;
- }
-}
-
-#[stable(feature = "duration_sum", since = "1.16.0")]
-impl Sum for Duration {
- fn sum<I: Iterator<Item=Duration>>(iter: I) -> Duration {
- iter.fold(Duration::new(0, 0), |a, b| a + b)
- }
-}
-
-#[stable(feature = "duration_sum", since = "1.16.0")]
-impl<'a> Sum<&'a Duration> for Duration {
- fn sum<I: Iterator<Item=&'a Duration>>(iter: I) -> Duration {
- iter.fold(Duration::new(0, 0), |a, b| a + *b)
- }
-}
-
-#[cfg(test)]
-mod tests {
- use super::Duration;
-
- #[test]
- fn creation() {
- assert!(Duration::from_secs(1) != Duration::from_secs(0));
- assert_eq!(Duration::from_secs(1) + Duration::from_secs(2),
- Duration::from_secs(3));
- assert_eq!(Duration::from_millis(10) + Duration::from_secs(4),
- Duration::new(4, 10 * 1_000_000));
- assert_eq!(Duration::from_millis(4000), Duration::new(4, 0));
- }
-
- #[test]
- fn secs() {
- assert_eq!(Duration::new(0, 0).as_secs(), 0);
- assert_eq!(Duration::from_secs(1).as_secs(), 1);
- assert_eq!(Duration::from_millis(999).as_secs(), 0);
- assert_eq!(Duration::from_millis(1001).as_secs(), 1);
- }
-
- #[test]
- fn nanos() {
- assert_eq!(Duration::new(0, 0).subsec_nanos(), 0);
- assert_eq!(Duration::new(0, 5).subsec_nanos(), 5);
- assert_eq!(Duration::new(0, 1_000_000_001).subsec_nanos(), 1);
- assert_eq!(Duration::from_secs(1).subsec_nanos(), 0);
- assert_eq!(Duration::from_millis(999).subsec_nanos(), 999 * 1_000_000);
- assert_eq!(Duration::from_millis(1001).subsec_nanos(), 1 * 1_000_000);
- }
-
- #[test]
- fn add() {
- assert_eq!(Duration::new(0, 0) + Duration::new(0, 1),
- Duration::new(0, 1));
- assert_eq!(Duration::new(0, 500_000_000) + Duration::new(0, 500_000_001),
- Duration::new(1, 1));
- }
-
- #[test]
- fn checked_add() {
- assert_eq!(Duration::new(0, 0).checked_add(Duration::new(0, 1)),
- Some(Duration::new(0, 1)));
- assert_eq!(Duration::new(0, 500_000_000).checked_add(Duration::new(0, 500_000_001)),
- Some(Duration::new(1, 1)));
- assert_eq!(Duration::new(1, 0).checked_add(Duration::new(::u64::MAX, 0)), None);
- }
-
- #[test]
- fn sub() {
- assert_eq!(Duration::new(0, 1) - Duration::new(0, 0),
- Duration::new(0, 1));
- assert_eq!(Duration::new(0, 500_000_001) - Duration::new(0, 500_000_000),
- Duration::new(0, 1));
- assert_eq!(Duration::new(1, 0) - Duration::new(0, 1),
- Duration::new(0, 999_999_999));
- }
-
- #[test]
- fn checked_sub() {
- let zero = Duration::new(0, 0);
- let one_nano = Duration::new(0, 1);
- let one_sec = Duration::new(1, 0);
- assert_eq!(one_nano.checked_sub(zero), Some(Duration::new(0, 1)));
- assert_eq!(one_sec.checked_sub(one_nano),
- Some(Duration::new(0, 999_999_999)));
- assert_eq!(zero.checked_sub(one_nano), None);
- assert_eq!(zero.checked_sub(one_sec), None);
- }
-
- #[test] #[should_panic]
- fn sub_bad1() {
- Duration::new(0, 0) - Duration::new(0, 1);
- }
-
- #[test] #[should_panic]
- fn sub_bad2() {
- Duration::new(0, 0) - Duration::new(1, 0);
- }
-
- #[test]
- fn mul() {
- assert_eq!(Duration::new(0, 1) * 2, Duration::new(0, 2));
- assert_eq!(Duration::new(1, 1) * 3, Duration::new(3, 3));
- assert_eq!(Duration::new(0, 500_000_001) * 4, Duration::new(2, 4));
- assert_eq!(Duration::new(0, 500_000_001) * 4000,
- Duration::new(2000, 4000));
- }
-
- #[test]
- fn checked_mul() {
- assert_eq!(Duration::new(0, 1).checked_mul(2), Some(Duration::new(0, 2)));
- assert_eq!(Duration::new(1, 1).checked_mul(3), Some(Duration::new(3, 3)));
- assert_eq!(Duration::new(0, 500_000_001).checked_mul(4), Some(Duration::new(2, 4)));
- assert_eq!(Duration::new(0, 500_000_001).checked_mul(4000),
- Some(Duration::new(2000, 4000)));
- assert_eq!(Duration::new(::u64::MAX - 1, 0).checked_mul(2), None);
- }
-
- #[test]
- fn div() {
- assert_eq!(Duration::new(0, 1) / 2, Duration::new(0, 0));
- assert_eq!(Duration::new(1, 1) / 3, Duration::new(0, 333_333_333));
- assert_eq!(Duration::new(99, 999_999_000) / 100,
- Duration::new(0, 999_999_990));
- }
-
- #[test]
- fn checked_div() {
- assert_eq!(Duration::new(2, 0).checked_div(2), Some(Duration::new(1, 0)));
- assert_eq!(Duration::new(1, 0).checked_div(2), Some(Duration::new(0, 500_000_000)));
- assert_eq!(Duration::new(2, 0).checked_div(0), None);
- }
-}
+++ /dev/null
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Temporal quantification.
-//!
-//! Example:
-//!
-//! ```
-//! use std::time::Duration;
-//!
-//! let five_seconds = Duration::new(5, 0);
-//! // both declarations are equivalent
-//! assert_eq!(Duration::new(5, 0), Duration::from_secs(5));
-//! ```
-
-#![stable(feature = "time", since = "1.3.0")]
-
-use error::Error;
-use fmt;
-use ops::{Add, Sub, AddAssign, SubAssign};
-use sys::time;
-use sys_common::FromInner;
-
-#[stable(feature = "time", since = "1.3.0")]
-pub use self::duration::Duration;
-
-mod duration;
-
-/// A measurement of a monotonically nondecreasing clock.
-/// Opaque and useful only with `Duration`.
-///
-/// Instants are always guaranteed to be no less than any previously measured
-/// instant when created, and are often useful for tasks such as measuring
-/// benchmarks or timing how long an operation takes.
-///
-/// Note, however, that instants are not guaranteed to be **steady**. In other
-/// words, each tick of the underlying clock may not be the same length (e.g.
-/// some seconds may be longer than others). An instant may jump forwards or
-/// experience time dilation (slow down or speed up), but it will never go
-/// backwards.
-///
-/// Instants are opaque types that can only be compared to one another. There is
-/// no method to get "the number of seconds" from an instant. Instead, it only
-/// allows measuring the duration between two instants (or comparing two
-/// instants).
-///
-/// Example:
-///
-/// ```no_run
-/// use std::time::{Duration, Instant};
-/// use std::thread::sleep;
-///
-/// fn main() {
-/// let now = Instant::now();
-///
-/// // we sleep for 2 seconds
-/// sleep(Duration::new(2, 0));
-/// // it prints '2'
-/// println!("{}", now.elapsed().as_secs());
-/// }
-/// ```
-#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
-#[stable(feature = "time2", since = "1.8.0")]
-pub struct Instant(time::Instant);
-
-/// A measurement of the system clock, useful for talking to
-/// external entities like the file system or other processes.
-///
-/// Distinct from the [`Instant`] type, this time measurement **is not
-/// monotonic**. This means that you can save a file to the file system, then
-/// save another file to the file system, **and the second file has a
-/// `SystemTime` measurement earlier than the first**. In other words, an
-/// operation that happens after another operation in real time may have an
-/// earlier `SystemTime`!
-///
-/// Consequently, comparing two `SystemTime` instances to learn about the
-/// duration between them returns a [`Result`] instead of an infallible [`Duration`]
-/// to indicate that this sort of time drift may happen and needs to be handled.
-///
-/// Although a `SystemTime` cannot be directly inspected, the [`UNIX_EPOCH`]
-/// constant is provided in this module as an anchor in time to learn
-/// information about a `SystemTime`. By calculating the duration from this
-/// fixed point in time, a `SystemTime` can be converted to a human-readable time,
-/// or perhaps some other string representation.
-///
-/// [`Instant`]: ../../std/time/struct.Instant.html
-/// [`Result`]: ../../std/result/enum.Result.html
-/// [`Duration`]: ../../std/time/struct.Duration.html
-/// [`UNIX_EPOCH`]: ../../std/time/constant.UNIX_EPOCH.html
-///
-/// Example:
-///
-/// ```no_run
-/// use std::time::{Duration, SystemTime};
-/// use std::thread::sleep;
-///
-/// fn main() {
-/// let now = SystemTime::now();
-///
-/// // we sleep for 2 seconds
-/// sleep(Duration::new(2, 0));
-/// match now.elapsed() {
-/// Ok(elapsed) => {
-/// // it prints '2'
-/// println!("{}", elapsed.as_secs());
-/// }
-/// Err(e) => {
-/// // an error occurred!
-/// println!("Error: {:?}", e);
-/// }
-/// }
-/// }
-/// ```
-#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
-#[stable(feature = "time2", since = "1.8.0")]
-pub struct SystemTime(time::SystemTime);
-
-/// An error returned from the `duration_since` and `elapsed` methods on
-/// `SystemTime`, used to learn how far in the opposite direction a system time
-/// lies.
-///
-/// # Examples
-///
-/// ```no_run
-/// use std::thread::sleep;
-/// use std::time::{Duration, SystemTime};
-///
-/// let sys_time = SystemTime::now();
-/// sleep(Duration::from_secs(1));
-/// let new_sys_time = SystemTime::now();
-/// match sys_time.duration_since(new_sys_time) {
-/// Ok(_) => {}
-/// Err(e) => println!("SystemTimeError difference: {:?}", e.duration()),
-/// }
-/// ```
-#[derive(Clone, Debug)]
-#[stable(feature = "time2", since = "1.8.0")]
-pub struct SystemTimeError(Duration);
-
-impl Instant {
- /// Returns an instant corresponding to "now".
- ///
- /// # Examples
- ///
- /// ```
- /// use std::time::Instant;
- ///
- /// let now = Instant::now();
- /// ```
- #[stable(feature = "time2", since = "1.8.0")]
- pub fn now() -> Instant {
- Instant(time::Instant::now())
- }
-
- /// Returns the amount of time elapsed from another instant to this one.
- ///
- /// # Panics
- ///
- /// This function will panic if `earlier` is later than `self`.
- ///
- /// # Examples
- ///
- /// ```no_run
- /// use std::time::{Duration, Instant};
- /// use std::thread::sleep;
- ///
- /// let now = Instant::now();
- /// sleep(Duration::new(1, 0));
- /// let new_now = Instant::now();
- /// println!("{:?}", new_now.duration_since(now));
- /// ```
- #[stable(feature = "time2", since = "1.8.0")]
- pub fn duration_since(&self, earlier: Instant) -> Duration {
- self.0.sub_instant(&earlier.0)
- }
-
- /// Returns the amount of time elapsed since this instant was created.
- ///
- /// # Panics
- ///
- /// This function may panic if the current time is earlier than this
- /// instant, which is something that can happen if an `Instant` is
- /// produced synthetically.
- ///
- /// # Examples
- ///
- /// ```no_run
- /// use std::thread::sleep;
- /// use std::time::{Duration, Instant};
- ///
- /// let instant = Instant::now();
- /// let three_secs = Duration::from_secs(3);
- /// sleep(three_secs);
- /// assert!(instant.elapsed() >= three_secs);
- /// ```
- #[stable(feature = "time2", since = "1.8.0")]
- pub fn elapsed(&self) -> Duration {
- Instant::now() - *self
- }
-}
-
-#[stable(feature = "time2", since = "1.8.0")]
-impl Add<Duration> for Instant {
- type Output = Instant;
-
- fn add(self, other: Duration) -> Instant {
- Instant(self.0.add_duration(&other))
- }
-}
-
-#[stable(feature = "time_augmented_assignment", since = "1.9.0")]
-impl AddAssign<Duration> for Instant {
- fn add_assign(&mut self, other: Duration) {
- *self = *self + other;
- }
-}
-
-#[stable(feature = "time2", since = "1.8.0")]
-impl Sub<Duration> for Instant {
- type Output = Instant;
-
- fn sub(self, other: Duration) -> Instant {
- Instant(self.0.sub_duration(&other))
- }
-}
-
-#[stable(feature = "time_augmented_assignment", since = "1.9.0")]
-impl SubAssign<Duration> for Instant {
- fn sub_assign(&mut self, other: Duration) {
- *self = *self - other;
- }
-}
-
-#[stable(feature = "time2", since = "1.8.0")]
-impl Sub<Instant> for Instant {
- type Output = Duration;
-
- fn sub(self, other: Instant) -> Duration {
- self.duration_since(other)
- }
-}
-
-#[stable(feature = "time2", since = "1.8.0")]
-impl fmt::Debug for Instant {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- self.0.fmt(f)
- }
-}
-
-impl SystemTime {
- /// Returns the system time corresponding to "now".
- ///
- /// # Examples
- ///
- /// ```
- /// use std::time::SystemTime;
- ///
- /// let sys_time = SystemTime::now();
- /// ```
- #[stable(feature = "time2", since = "1.8.0")]
- pub fn now() -> SystemTime {
- SystemTime(time::SystemTime::now())
- }
-
- /// Returns the amount of time elapsed from an earlier point in time.
- ///
- /// This function may fail because measurements taken earlier are not
- /// guaranteed to always be before later measurements (due to anomalies such
- /// as the system clock being adjusted either forwards or backwards).
- ///
- /// If successful, [`Ok`]`(`[`Duration`]`)` is returned where the duration represents
- /// the amount of time elapsed from the specified measurement to this one.
- ///
- /// Returns an [`Err`] if `earlier` is later than `self`, and the error
- /// contains how far from `self` the time is.
- ///
- /// [`Ok`]: ../../std/result/enum.Result.html#variant.Ok
- /// [`Duration`]: ../../std/time/struct.Duration.html
- /// [`Err`]: ../../std/result/enum.Result.html#variant.Err
- ///
- /// # Examples
- ///
- /// ```
- /// use std::time::SystemTime;
- ///
- /// let sys_time = SystemTime::now();
- /// let difference = sys_time.duration_since(sys_time)
- /// .expect("SystemTime::duration_since failed");
- /// println!("{:?}", difference);
- /// ```
- #[stable(feature = "time2", since = "1.8.0")]
- pub fn duration_since(&self, earlier: SystemTime)
- -> Result<Duration, SystemTimeError> {
- self.0.sub_time(&earlier.0).map_err(SystemTimeError)
- }
-
- /// Returns the amount of time elapsed since this system time was created.
- ///
- /// This function may fail as the underlying system clock is susceptible to
- /// drift and updates (e.g. the system clock could go backwards), so this
- /// function may not always succeed. If successful, [`Ok`]`(`[`Duration`]`)` is
- /// returned where the duration represents the amount of time elapsed from
- /// this time measurement to the current time.
- ///
- /// Returns an [`Err`] if `self` is later than the current system time, and
- /// the error contains how far from the current system time `self` is.
- ///
- /// [`Ok`]: ../../std/result/enum.Result.html#variant.Ok
- /// [`Duration`]: ../../std/time/struct.Duration.html
- /// [`Err`]: ../../std/result/enum.Result.html#variant.Err
- ///
- /// # Examples
- ///
- /// ```no_run
- /// use std::thread::sleep;
- /// use std::time::{Duration, SystemTime};
- ///
- /// let sys_time = SystemTime::now();
- /// let one_sec = Duration::from_secs(1);
- /// sleep(one_sec);
- /// assert!(sys_time.elapsed().unwrap() >= one_sec);
- /// ```
- #[stable(feature = "time2", since = "1.8.0")]
- pub fn elapsed(&self) -> Result<Duration, SystemTimeError> {
- SystemTime::now().duration_since(*self)
- }
-}
-
-#[stable(feature = "time2", since = "1.8.0")]
-impl Add<Duration> for SystemTime {
- type Output = SystemTime;
-
- fn add(self, dur: Duration) -> SystemTime {
- SystemTime(self.0.add_duration(&dur))
- }
-}
-
-#[stable(feature = "time_augmented_assignment", since = "1.9.0")]
-impl AddAssign<Duration> for SystemTime {
- fn add_assign(&mut self, other: Duration) {
- *self = *self + other;
- }
-}
-
-#[stable(feature = "time2", since = "1.8.0")]
-impl Sub<Duration> for SystemTime {
- type Output = SystemTime;
-
- fn sub(self, dur: Duration) -> SystemTime {
- SystemTime(self.0.sub_duration(&dur))
- }
-}
-
-#[stable(feature = "time_augmented_assignment", since = "1.9.0")]
-impl SubAssign<Duration> for SystemTime {
- fn sub_assign(&mut self, other: Duration) {
- *self = *self - other;
- }
-}
-
-#[stable(feature = "time2", since = "1.8.0")]
-impl fmt::Debug for SystemTime {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- self.0.fmt(f)
- }
-}
-
-/// An anchor in time which can be used to create new `SystemTime` instances or
-/// learn about where in time a `SystemTime` lies.
-///
-/// This constant is defined to be "1970-01-01 00:00:00 UTC" on all systems with
-/// respect to the system clock. Using `duration_since` on an existing
-/// [`SystemTime`] instance can tell how far away from this point in time a
-/// measurement lies, and using `UNIX_EPOCH + duration` can be used to create a
-/// [`SystemTime`] instance to represent another fixed point in time.
-///
-/// [`SystemTime`]: ../../std/time/struct.SystemTime.html
-///
-/// # Examples
-///
-/// ```no_run
-/// use std::time::{SystemTime, UNIX_EPOCH};
-///
-/// match SystemTime::now().duration_since(UNIX_EPOCH) {
-/// Ok(n) => println!("1970-01-01 00:00:00 UTC was {} seconds ago!", n.as_secs()),
-/// Err(_) => panic!("SystemTime before UNIX EPOCH!"),
-/// }
-/// ```
-#[stable(feature = "time2", since = "1.8.0")]
-pub const UNIX_EPOCH: SystemTime = SystemTime(time::UNIX_EPOCH);
-
-impl SystemTimeError {
- /// Returns the positive duration which represents how far forward the
- /// second system time was from the first.
- ///
- /// A `SystemTimeError` is returned from the [`duration_since`] and [`elapsed`]
- /// methods of [`SystemTime`] whenever the second system time represents a point later
- /// in time than the `self` of the method call.
- ///
- /// [`duration_since`]: ../../std/time/struct.SystemTime.html#method.duration_since
- /// [`elapsed`]: ../../std/time/struct.SystemTime.html#method.elapsed
- /// [`SystemTime`]: ../../std/time/struct.SystemTime.html
- ///
- /// # Examples
- ///
- /// ```no_run
- /// use std::thread::sleep;
- /// use std::time::{Duration, SystemTime};
- ///
- /// let sys_time = SystemTime::now();
- /// sleep(Duration::from_secs(1));
- /// let new_sys_time = SystemTime::now();
- /// match sys_time.duration_since(new_sys_time) {
- /// Ok(_) => {}
- /// Err(e) => println!("SystemTimeError difference: {:?}", e.duration()),
- /// }
- /// ```
- #[stable(feature = "time2", since = "1.8.0")]
- pub fn duration(&self) -> Duration {
- self.0
- }
-}
-
-#[stable(feature = "time2", since = "1.8.0")]
-impl Error for SystemTimeError {
- fn description(&self) -> &str { "other time was not earlier than self" }
-}
-
-#[stable(feature = "time2", since = "1.8.0")]
-impl fmt::Display for SystemTimeError {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- write!(f, "second time provided was later than self")
- }
-}
-
-impl FromInner<time::SystemTime> for SystemTime {
- fn from_inner(time: time::SystemTime) -> SystemTime {
- SystemTime(time)
- }
-}
-
-#[cfg(test)]
-mod tests {
- use super::{Instant, SystemTime, Duration, UNIX_EPOCH};
-
- macro_rules! assert_almost_eq {
- ($a:expr, $b:expr) => ({
- let (a, b) = ($a, $b);
- if a != b {
- let (a, b) = if a > b {(a, b)} else {(b, a)};
- assert!(a - Duration::new(0, 100) <= b);
- }
- })
- }
-
- #[test]
- fn instant_monotonic() {
- let a = Instant::now();
- let b = Instant::now();
- assert!(b >= a);
- }
-
- #[test]
- fn instant_elapsed() {
- let a = Instant::now();
- a.elapsed();
- }
-
- #[test]
- fn instant_math() {
- let a = Instant::now();
- let b = Instant::now();
- let dur = b.duration_since(a);
- assert_almost_eq!(b - dur, a);
- assert_almost_eq!(a + dur, b);
-
- let second = Duration::new(1, 0);
- assert_almost_eq!(a - second + second, a);
- }
-
- #[test]
- #[should_panic]
- fn instant_duration_panic() {
- let a = Instant::now();
- (a - Duration::new(1, 0)).duration_since(a);
- }
-
- #[test]
- fn system_time_math() {
- let a = SystemTime::now();
- let b = SystemTime::now();
- match b.duration_since(a) {
- Ok(dur) if dur == Duration::new(0, 0) => {
- assert_almost_eq!(a, b);
- }
- Ok(dur) => {
- assert!(b > a);
- assert_almost_eq!(b - dur, a);
- assert_almost_eq!(a + dur, b);
- }
- Err(dur) => {
- let dur = dur.duration();
- assert!(a > b);
- assert_almost_eq!(b + dur, a);
- assert_almost_eq!(a - dur, b);
- }
- }
-
- let second = Duration::new(1, 0);
- assert_almost_eq!(a.duration_since(a - second).unwrap(), second);
- assert_almost_eq!(a.duration_since(a + second).unwrap_err()
- .duration(), second);
-
- assert_almost_eq!(a - second + second, a);
-
- // A difference of 80 and 800 years cannot fit inside a 32-bit time_t
- if !(cfg!(unix) && ::mem::size_of::<::libc::time_t>() <= 4) {
- let eighty_years = second * 60 * 60 * 24 * 365 * 80;
- assert_almost_eq!(a - eighty_years + eighty_years, a);
- assert_almost_eq!(a - (eighty_years * 10) + (eighty_years * 10), a);
- }
-
- let one_second_from_epoch = UNIX_EPOCH + Duration::new(1, 0);
- let one_second_from_epoch2 = UNIX_EPOCH + Duration::new(0, 500_000_000)
- + Duration::new(0, 500_000_000);
- assert_eq!(one_second_from_epoch, one_second_from_epoch2);
- }
-
- #[test]
- fn system_time_elapsed() {
- let a = SystemTime::now();
- drop(a.elapsed());
- }
-
- #[test]
- fn since_epoch() {
- let ts = SystemTime::now();
- let a = ts.duration_since(UNIX_EPOCH).unwrap();
- let b = ts.duration_since(UNIX_EPOCH - Duration::new(1, 0)).unwrap();
- assert!(b > a);
- assert_eq!(b - a, Duration::new(1, 0));
-
- let thirty_years = Duration::new(1, 0) * 60 * 60 * 24 * 365 * 30;
-
- // Right now for CI this test is run in an emulator, and apparently the
- // aarch64 emulator's sense of time is that we're still living in the
- // 70s.
- //
- // Otherwise let's assume that we're all running computers later than
- // 2000.
- if !cfg!(target_arch = "aarch64") {
- assert!(a > thirty_years);
- }
-
- // let's assume that we're all running computers earlier than 2090.
- // Should give us ~70 years to fix this!
- let hundred_twenty_years = thirty_years * 4;
- assert!(a < hundred_twenty_years);
- }
-}
pub pats: Vec<P<Pat>>,
pub guard: Option<P<Expr>>,
pub body: P<Expr>,
- pub beginning_vert: Option<Span>, // For RFC 1925 feature gate
}
#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::stable_hasher::StableHasher;
use std::cell::{RefCell, Ref};
+use std::cmp;
use std::hash::Hash;
use std::path::{Path, PathBuf};
use std::rc::Rc;
}
}
- /// Given a `Span`, try to get a shorter span ending just after the first
- /// occurrence of `char` `c`.
+ /// Given a `Span`, get a new `Span` covering the first token and all its trailing whitespace or
+ /// the original `Span`.
+ ///
+ /// If `sp` points to `"let mut x"`, then a span pointing at `"let "` will be returned.
+ pub fn span_until_non_whitespace(&self, sp: Span) -> Span {
+ if let Ok(snippet) = self.span_to_snippet(sp) {
+ let mut offset = 0;
+ // get the bytes width of all the non-whitespace characters
+ for c in snippet.chars().take_while(|c| !c.is_whitespace()) {
+ offset += c.len_utf8();
+ }
+ // get the bytes width of all the whitespace characters after that
+ for c in snippet[offset..].chars().take_while(|c| c.is_whitespace()) {
+ offset += c.len_utf8();
+ }
+ if offset > 1 {
+ return sp.with_hi(BytePos(sp.lo().0 + offset as u32));
+ }
+ }
+ sp
+ }
+
+ /// Given a `Span`, try to get a shorter span ending just after the first occurrence of `char`
+ /// `c`.
pub fn span_through_char(&self, sp: Span, c: char) -> Span {
if let Ok(snippet) = self.span_to_snippet(sp) {
if let Some(offset) = snippet.find(c) {
self.span_until_char(sp, '{')
}
+ /// Returns a new span representing just the end-point of this span
+ pub fn end_point(&self, sp: Span) -> Span {
+ let pos = sp.hi().0;
+
+ let width = self.find_width_of_character_at_span(sp, false);
+ let corrected_end_position = pos.checked_sub(width).unwrap_or(pos);
+
+ let end_point = BytePos(cmp::max(corrected_end_position, sp.lo().0));
+ sp.with_lo(end_point)
+ }
+
+ /// Returns a new span representing the next character after the end-point of this span
+ pub fn next_point(&self, sp: Span) -> Span {
+ let start_of_next_point = sp.hi().0;
+
+ let width = self.find_width_of_character_at_span(sp, true);
+ // If the width is 1, then the next span should point to the same `lo` and `hi`. However,
+ // in the case of a multibyte character, where the width != 1, the next span should
+ // span multiple bytes to include the whole character.
+ let end_of_next_point = start_of_next_point.checked_add(
+ width - 1).unwrap_or(start_of_next_point);
+
+ let end_of_next_point = BytePos(cmp::max(sp.lo().0 + 1, end_of_next_point));
+ Span::new(BytePos(start_of_next_point), end_of_next_point, sp.ctxt())
+ }
+
+ /// Finds the width of a character, either before or after the provided span.
+ fn find_width_of_character_at_span(&self, sp: Span, forwards: bool) -> u32 {
+ // Disregard malformed spans and assume a one-byte wide character.
+ if sp.lo() >= sp.hi() {
+ debug!("find_width_of_character_at_span: early return malformed span");
+ return 1;
+ }
+
+ let local_begin = self.lookup_byte_offset(sp.lo());
+ let local_end = self.lookup_byte_offset(sp.hi());
+ debug!("find_width_of_character_at_span: local_begin=`{:?}`, local_end=`{:?}`",
+ local_begin, local_end);
+
+ let start_index = local_begin.pos.to_usize();
+ let end_index = local_end.pos.to_usize();
+ debug!("find_width_of_character_at_span: start_index=`{:?}`, end_index=`{:?}`",
+ start_index, end_index);
+
+ // Disregard indexes that are at the start or end of their spans, they can't fit bigger
+ // characters.
+ if (!forwards && end_index == usize::min_value()) ||
+ (forwards && start_index == usize::max_value()) {
+ debug!("find_width_of_character_at_span: start or end of span, cannot be multibyte");
+ return 1;
+ }
+
+ let source_len = (local_begin.fm.end_pos - local_begin.fm.start_pos).to_usize();
+ debug!("find_width_of_character_at_span: source_len=`{:?}`", source_len);
+ // Ensure indexes are also not malformed.
+ if start_index > end_index || end_index > source_len {
+ debug!("find_width_of_character_at_span: source indexes are malformed");
+ return 1;
+ }
+
+ // We need to extend the snippet to the end of the src rather than to end_index so when
+ // searching forwards for boundaries we've got somewhere to search.
+ let snippet = if let Some(ref src) = local_begin.fm.src {
+ let len = src.len();
+ (&src[start_index..len]).to_string()
+ } else if let Some(src) = local_begin.fm.external_src.borrow().get_source() {
+ let len = src.len();
+ (&src[start_index..len]).to_string()
+ } else {
+ return 1;
+ };
+ debug!("find_width_of_character_at_span: snippet=`{:?}`", snippet);
+
+ let file_start_pos = local_begin.fm.start_pos.to_usize();
+ let file_end_pos = local_begin.fm.end_pos.to_usize();
+ debug!("find_width_of_character_at_span: file_start_pos=`{:?}` file_end_pos=`{:?}`",
+ file_start_pos, file_end_pos);
+
+ let mut target = if forwards { end_index + 1 } else { end_index - 1 };
+ debug!("find_width_of_character_at_span: initial target=`{:?}`", target);
+
+ while !snippet.is_char_boundary(target - start_index)
+ && target >= file_start_pos && target <= file_end_pos {
+ target = if forwards { target + 1 } else { target - 1 };
+ debug!("find_width_of_character_at_span: target=`{:?}`", target);
+ }
+ debug!("find_width_of_character_at_span: final target=`{:?}`", target);
+
+ if forwards {
+ (target - end_index) as u32
+ } else {
+ (end_index - target) as u32
+ }
+ }
+
pub fn get_filemap(&self, filename: &FileName) -> Option<Rc<FileMap>> {
for fm in self.files.borrow().iter() {
if *filename == fm.name {
pats,
guard: None,
body: expr,
- beginning_vert: None,
}
}
use errors::FatalError;
use ext::tt::quoted::{self, TokenTree};
use parse::{Directory, ParseSess};
-use parse::parser::{PathStyle, Parser};
-use parse::token::{self, DocComment, Token, Nonterminal};
+use parse::parser::{Parser, PathStyle};
+use parse::token::{self, DocComment, Nonterminal, Token};
use print::pprust;
use symbol::keywords;
use tokenstream::TokenStream;
use std::mem;
use std::rc::Rc;
use std::collections::HashMap;
-use std::collections::hash_map::Entry::{Vacant, Occupied};
+use std::collections::hash_map::Entry::{Occupied, Vacant};
-// To avoid costly uniqueness checks, we require that `MatchSeq` always has
-// a nonempty body.
+// To avoid costly uniqueness checks, we require that `MatchSeq` always has a nonempty body.
+/// Either a sequence of token trees or a single one. This is used as the representation of the
+/// sequence of tokens that make up a matcher.
#[derive(Clone)]
enum TokenTreeOrTokenTreeVec {
Tt(TokenTree),
}
impl TokenTreeOrTokenTreeVec {
+ /// Returns the number of constituent top-level token trees of `self` (top-level in that it
+ /// will not recursively descend into subtrees).
fn len(&self) -> usize {
match *self {
TtSeq(ref v) => v.len(),
}
}
+ /// The the `index`-th token tree of `self`.
fn get_tt(&self, index: usize) -> TokenTree {
match *self {
TtSeq(ref v) => v[index].clone(),
}
}
-/// an unzipping of `TokenTree`s
+/// An unzipping of `TokenTree`s... see the `stack` field of `MatcherPos`.
+///
+/// This is used by `inner_parse_loop` to keep track of delimited submatchers that we have
+/// descended into.
#[derive(Clone)]
struct MatcherTtFrame {
+ /// The "parent" matcher that we are descending into.
elts: TokenTreeOrTokenTreeVec,
+ /// The position of the "dot" in `elts` at the time we descended.
idx: usize,
}
+/// Represents a single "position" (aka "matcher position", aka "item"), as described in the module
+/// documentation.
#[derive(Clone)]
struct MatcherPos {
- stack: Vec<MatcherTtFrame>,
+ /// The token or sequence of tokens that make up the matcher
top_elts: TokenTreeOrTokenTreeVec,
- sep: Option<Token>,
+ /// The position of the "dot" in this matcher
idx: usize,
- up: Option<Box<MatcherPos>>,
+ /// The beginning position in the source that the beginning of this matcher corresponds to. In
+ /// other words, the token in the source at `sp_lo` is matched against the first token of the
+ /// matcher.
+ sp_lo: BytePos,
+
+ /// For each named metavar in the matcher, we keep track of token trees matched against the
+ /// metavar by the black box parser. In particular, there may be more than one match per
+ /// metavar if we are in a repetition (each repetition matches each of the variables).
+ /// Moreover, matchers and repetitions can be nested; the `matches` field is shared (hence the
+ /// `Rc`) among all "nested" matchers. `match_lo`, `match_cur`, and `match_hi` keep track of
+ /// the current position of the `self` matcher position in the shared `matches` list.
+ ///
+ /// Also, note that while we are descending into a sequence, matchers are given their own
+ /// `matches` vector. Only once we reach the end of a full repetition of the sequence do we add
+ /// all bound matches from the submatcher into the shared top-level `matches` vector. If `sep`
+ /// and `up` are `Some`, then `matches` is _not_ the shared top-level list. Instead, if one
+ /// wants the shared `matches`, one should use `up.matches`.
matches: Vec<Rc<Vec<NamedMatch>>>,
+ /// The position in `matches` corresponding to the first metavar in this matcher's sequence of
+ /// token trees. In other words, the first metavar in the first token of `top_elts` corresponds
+ /// to `matches[match_lo]`.
match_lo: usize,
+ /// The position in `matches` corresponding to the metavar we are currently trying to match
+ /// against the source token stream. `match_lo <= match_cur <= match_hi`.
match_cur: usize,
+ /// Similar to `match_lo` except `match_hi` is the position in `matches` of the _last_ metavar
+ /// in this matcher.
match_hi: usize,
- sp_lo: BytePos,
+
+ // Specifically used if we are matching a repetition. If we aren't both should be `None`.
+ /// The separator if we are in a repetition
+ sep: Option<Token>,
+ /// The "parent" matcher position if we are in a repetition. That is, the matcher position just
+ /// before we enter the sequence.
+ up: Option<Box<MatcherPos>>,
+
+ // Specifically used to "unzip" token trees. By "unzip", we mean to unwrap the delimiters from
+ // a delimited token tree (e.g. something wrapped in `(` `)`) or to get the contents of a doc
+ // comment...
+ /// When matching against matchers with nested delimited submatchers (e.g. `pat ( pat ( .. )
+ /// pat ) pat`), we need to keep track of the matchers we are descending into. This stack does
+ /// that where the bottom of the stack is the outermost matcher.
+ // Also, throughout the comments, this "descent" is often referred to as "unzipping"...
+ stack: Vec<MatcherTtFrame>,
}
impl MatcherPos {
+ /// Add `m` as a named match for the `idx`-th metavar.
fn push_match(&mut self, idx: usize, m: NamedMatch) {
let matches = Rc::make_mut(&mut self.matches[idx]);
matches.push(m);
}
}
+/// Represents the possible results of an attempted parse.
+pub enum ParseResult<T> {
+ /// Parsed successfully.
+ Success(T),
+ /// Arm failed to match. If the second parameter is `token::Eof`, it indicates an unexpected
+ /// end of macro invocation. Otherwise, it indicates that no rules expected the given token.
+ Failure(syntax_pos::Span, Token),
+ /// Fatal error (malformed macro?). Abort compilation.
+ Error(syntax_pos::Span, String),
+}
+
+/// A `ParseResult` where the `Success` variant contains a mapping of `Ident`s to `NamedMatch`es.
+/// This represents the mapping of metavars to the token trees they bind to.
pub type NamedParseResult = ParseResult<HashMap<Ident, Rc<NamedMatch>>>;
+/// Count how many metavars are named in the given matcher `ms`.
pub fn count_names(ms: &[TokenTree]) -> usize {
ms.iter().fold(0, |count, elt| {
count + match *elt {
})
}
+/// Initialize `len` empty shared `Vec`s to be used to store matches of metavars.
+fn create_matches(len: usize) -> Vec<Rc<Vec<NamedMatch>>> {
+ (0..len).into_iter().map(|_| Rc::new(Vec::new())).collect()
+}
+
+/// Generate the top-level matcher position in which the "dot" is before the first token of the
+/// matcher `ms` and we are going to start matching at position `lo` in the source.
fn initial_matcher_pos(ms: Vec<TokenTree>, lo: BytePos) -> Box<MatcherPos> {
let match_idx_hi = count_names(&ms[..]);
let matches = create_matches(match_idx_hi);
Box::new(MatcherPos {
- stack: vec![],
- top_elts: TtSeq(ms),
- sep: None,
+ // Start with the top level matcher given to us
+ top_elts: TtSeq(ms), // "elts" is an abbr. for "elements"
+ // The "dot" is before the first token of the matcher
idx: 0,
- up: None,
+ // We start matching with byte `lo` in the source code
+ sp_lo: lo,
+
+ // Initialize `matches` to a bunch of empty `Vec`s -- one for each metavar in `top_elts`.
+ // `match_lo` for `top_elts` is 0 and `match_hi` is `matches.len()`. `match_cur` is 0 since
+ // we haven't actually matched anything yet.
matches,
match_lo: 0,
match_cur: 0,
match_hi: match_idx_hi,
- sp_lo: lo
+
+ // Haven't descended into any delimiters, so empty stack
+ stack: vec![],
+
+ // Haven't descended into any sequences, so both of these are `None`.
+ sep: None,
+ up: None,
})
}
/// token tree. The depth of the `NamedMatch` structure will therefore depend
/// only on the nesting depth of `ast::TTSeq`s in the originating
/// token tree it was derived from.
-
#[derive(Debug, Clone)]
pub enum NamedMatch {
MatchedSeq(Rc<Vec<NamedMatch>>, syntax_pos::Span),
- MatchedNonterminal(Rc<Nonterminal>)
+ MatchedNonterminal(Rc<Nonterminal>),
}
-fn nameize<I: Iterator<Item=NamedMatch>>(sess: &ParseSess, ms: &[TokenTree], mut res: I)
- -> NamedParseResult {
- fn n_rec<I: Iterator<Item=NamedMatch>>(sess: &ParseSess, m: &TokenTree, res: &mut I,
- ret_val: &mut HashMap<Ident, Rc<NamedMatch>>)
- -> Result<(), (syntax_pos::Span, String)> {
+/// Takes a sequence of token trees `ms` representing a matcher which successfully matched input
+/// and an iterator of items that matched input and produces a `NamedParseResult`.
+fn nameize<I: Iterator<Item = NamedMatch>>(
+ sess: &ParseSess,
+ ms: &[TokenTree],
+ mut res: I,
+) -> NamedParseResult {
+ // Recursively descend into each type of matcher (e.g. sequences, delimited, metavars) and make
+ // sure that each metavar has _exactly one_ binding. If a metavar does not have exactly one
+ // binding, then there is an error. If it does, then we insert the binding into the
+ // `NamedParseResult`.
+ fn n_rec<I: Iterator<Item = NamedMatch>>(
+ sess: &ParseSess,
+ m: &TokenTree,
+ res: &mut I,
+ ret_val: &mut HashMap<Ident, Rc<NamedMatch>>,
+ ) -> Result<(), (syntax_pos::Span, String)> {
match *m {
- TokenTree::Sequence(_, ref seq) => {
- for next_m in &seq.tts {
- n_rec(sess, next_m, res.by_ref(), ret_val)?
- }
- }
- TokenTree::Delimited(_, ref delim) => {
- for next_m in &delim.tts {
- n_rec(sess, next_m, res.by_ref(), ret_val)?;
- }
- }
+ TokenTree::Sequence(_, ref seq) => for next_m in &seq.tts {
+ n_rec(sess, next_m, res.by_ref(), ret_val)?
+ },
+ TokenTree::Delimited(_, ref delim) => for next_m in &delim.tts {
+ n_rec(sess, next_m, res.by_ref(), ret_val)?;
+ },
TokenTree::MetaVarDecl(span, _, id) if id.name == keywords::Invalid.name() => {
if sess.missing_fragment_specifiers.borrow_mut().remove(&span) {
return Err((span, "missing fragment specifier".to_string()));
let mut ret_val = HashMap::new();
for m in ms {
match n_rec(sess, m, res.by_ref(), &mut ret_val) {
- Ok(_) => {},
+ Ok(_) => {}
Err((sp, msg)) => return Error(sp, msg),
}
}
Success(ret_val)
}
-pub enum ParseResult<T> {
- Success(T),
- /// Arm failed to match. If the second parameter is `token::Eof`, it
- /// indicates an unexpected end of macro invocation. Otherwise, it
- /// indicates that no rules expected the given token.
- Failure(syntax_pos::Span, Token),
- /// Fatal error (malformed macro?). Abort compilation.
- Error(syntax_pos::Span, String)
-}
-
+/// Generate an appropriate parsing failure message. For EOF, this is "unexpected end...". For
+/// other tokens, this is "unexpected token...".
pub fn parse_failure_msg(tok: Token) -> String {
match tok {
token::Eof => "unexpected end of macro invocation".to_string(),
- _ => format!("no rules expected the token `{}`", pprust::token_to_string(&tok)),
+ _ => format!(
+ "no rules expected the token `{}`",
+ pprust::token_to_string(&tok)
+ ),
}
}
/// Perform a token equality check, ignoring syntax context (that is, an unhygienic comparison)
-fn token_name_eq(t1 : &Token, t2 : &Token) -> bool {
+fn token_name_eq(t1: &Token, t2: &Token) -> bool {
if let (Some(id1), Some(id2)) = (t1.ident(), t2.ident()) {
id1.name == id2.name
} else if let (&token::Lifetime(id1), &token::Lifetime(id2)) = (t1, t2) {
}
}
-fn create_matches(len: usize) -> Vec<Rc<Vec<NamedMatch>>> {
- (0..len).into_iter().map(|_| Rc::new(Vec::new())).collect()
-}
-
-fn inner_parse_loop(sess: &ParseSess,
- cur_items: &mut SmallVector<Box<MatcherPos>>,
- next_items: &mut Vec<Box<MatcherPos>>,
- eof_items: &mut SmallVector<Box<MatcherPos>>,
- bb_items: &mut SmallVector<Box<MatcherPos>>,
- token: &Token,
- span: syntax_pos::Span)
- -> ParseResult<()> {
+/// Process the matcher positions of `cur_items` until it is empty. In the process, this will
+/// produce more items in `next_items`, `eof_items`, and `bb_items`.
+///
+/// For more info about the how this happens, see the module-level doc comments and the inline
+/// comments of this function.
+///
+/// # Parameters
+///
+/// - `sess`: the parsing session into which errors are emitted.
+/// - `cur_items`: the set of current items to be processed. This should be empty by the end of a
+/// successful execution of this function.
+/// - `next_items`: the set of newly generated items. These are used to replenish `cur_items` in
+/// the function `parse`.
+/// - `eof_items`: the set of items that would be valid if this was the EOF.
+/// - `bb_items`: the set of items that are waiting for the black-box parser.
+/// - `token`: the current token of the parser.
+/// - `span`: the `Span` in the source code corresponding to the token trees we are trying to match
+/// against the matcher positions in `cur_items`.
+///
+/// # Returns
+///
+/// A `ParseResult`. Note that matches are kept track of through the items generated.
+fn inner_parse_loop(
+ sess: &ParseSess,
+ cur_items: &mut SmallVector<Box<MatcherPos>>,
+ next_items: &mut Vec<Box<MatcherPos>>,
+ eof_items: &mut SmallVector<Box<MatcherPos>>,
+ bb_items: &mut SmallVector<Box<MatcherPos>>,
+ token: &Token,
+ span: syntax_pos::Span,
+) -> ParseResult<()> {
+ // Pop items from `cur_items` until it is empty.
while let Some(mut item) = cur_items.pop() {
- // When unzipped trees end, remove them
+ // When unzipped trees end, remove them. This corresponds to backtracking out of a
+ // delimited submatcher into which we already descended. In backtracking out again, we need
+ // to advance the "dot" past the delimiters in the outer matcher.
while item.idx >= item.top_elts.len() {
match item.stack.pop() {
Some(MatcherTtFrame { elts, idx }) => {
item.top_elts = elts;
item.idx = idx + 1;
}
- None => break
+ None => break,
}
}
+ // Get the current position of the "dot" (`idx`) in `item` and the number of token trees in
+ // the matcher (`len`).
let idx = item.idx;
let len = item.top_elts.len();
- // at end of sequence
+ // If `idx >= len`, then we are at or past the end of the matcher of `item`.
if idx >= len {
- // We are repeating iff there is a parent
+ // We are repeating iff there is a parent. If the matcher is inside of a repetition,
+ // then we could be at the end of a sequence or at the beginning of the next
+ // repetition.
if item.up.is_some() {
- // Disregarding the separator, add the "up" case to the tokens that should be
- // examined.
- // (remove this condition to make trailing seps ok)
+ // At this point, regardless of whether there is a separator, we should add all
+ // matches from the complete repetition of the sequence to the shared, top-level
+ // `matches` list (actually, `up.matches`, which could itself not be the top-level,
+ // but anyway...). Moreover, we add another item to `cur_items` in which the "dot"
+ // is at the end of the `up` matcher. This ensures that the "dot" in the `up`
+ // matcher is also advanced sufficiently.
+ //
+ // NOTE: removing the condition `idx == len` allows trailing separators.
if idx == len {
+ // Get the `up` matcher
let mut new_pos = item.up.clone().unwrap();
- // update matches (the MBE "parse tree") by appending
- // each tree as a subtree.
-
- // Only touch the binders we have actually bound
+ // Add matches from this repetition to the `matches` of `up`
for idx in item.match_lo..item.match_hi {
let sub = item.matches[idx].clone();
let span = span.with_lo(item.sp_lo);
new_pos.push_match(idx, MatchedSeq(sub, span));
}
+ // Move the "dot" past the repetition in `up`
new_pos.match_cur = item.match_hi;
new_pos.idx += 1;
cur_items.push(new_pos);
}
- // Check if we need a separator
+ // Check if we need a separator.
if idx == len && item.sep.is_some() {
- // We have a separator, and it is the current token.
- if item.sep.as_ref().map(|sep| token_name_eq(token, sep)).unwrap_or(false) {
+ // We have a separator, and it is the current token. We can advance past the
+ // separator token.
+ if item.sep
+ .as_ref()
+ .map(|sep| token_name_eq(token, sep))
+ .unwrap_or(false)
+ {
item.idx += 1;
next_items.push(item);
}
- } else { // we don't need a separator
+ }
+ // We don't need a separator. Move the "dot" back to the beginning of the matcher
+ // and try to match again.
+ else {
item.match_cur = item.match_lo;
item.idx = 0;
cur_items.push(item);
}
- } else {
- // We aren't repeating, so we must be potentially at the end of the input.
+ }
+ // If we are not in a repetition, then being at the end of a matcher means that we have
+ // reached the potential end of the input.
+ else {
eof_items.push(item);
}
- } else {
+ }
+ // We are in the middle of a matcher.
+ else {
+ // Look at what token in the matcher we are trying to match the current token (`token`)
+ // against. Depending on that, we may generate new items.
match item.top_elts.get_tt(idx) {
- /* need to descend into sequence */
+ // Need to descend into a sequence
TokenTree::Sequence(sp, seq) => {
if seq.op == quoted::KleeneOp::ZeroOrMore {
// Examine the case where there are 0 matches of this sequence
top_elts: Tt(TokenTree::Sequence(sp, seq)),
}));
}
+
+ // We need to match a metavar (but the identifier is invalid)... this is an error
TokenTree::MetaVarDecl(span, _, id) if id.name == keywords::Invalid.name() => {
if sess.missing_fragment_specifiers.borrow_mut().remove(&span) {
return Error(span, "missing fragment specifier".to_string());
}
}
+
+ // We need to match a metavar with a valid ident... call out to the black-box
+ // parser by adding an item to `bb_items`.
TokenTree::MetaVarDecl(_, _, id) => {
// Built-in nonterminals never start with these tokens,
// so we can eliminate them from consideration.
bb_items.push(item);
}
}
+
+ // We need to descend into a delimited submatcher or a doc comment. To do this, we
+ // push the current matcher onto a stack and push a new item containing the
+ // submatcher onto `cur_items`.
+ //
+ // At the beginning of the loop, if we reach the end of the delimited submatcher,
+ // we pop the stack to backtrack out of the descent.
seq @ TokenTree::Delimited(..) | seq @ TokenTree::Token(_, DocComment(..)) => {
let lower_elts = mem::replace(&mut item.top_elts, Tt(seq));
let idx = item.idx;
item.idx = 0;
cur_items.push(item);
}
+
+ // We just matched a normal token. We can just advance the parser.
TokenTree::Token(_, ref t) if token_name_eq(t, token) => {
item.idx += 1;
next_items.push(item);
}
+
+ // There was another token that was not `token`... This means we can't add any
+ // rules. NOTE that this is not necessarily an error unless _all_ items in
+ // `cur_items` end up doing this. There may still be some other matchers that do
+ // end up working out.
TokenTree::Token(..) | TokenTree::MetaVar(..) => {}
}
}
}
+ // Yay a successful parse (so far)!
Success(())
}
-pub fn parse(sess: &ParseSess,
- tts: TokenStream,
- ms: &[TokenTree],
- directory: Option<Directory>,
- recurse_into_modules: bool)
- -> NamedParseResult {
+/// Use the given sequence of token trees (`ms`) as a matcher. Match the given token stream `tts`
+/// against it and return the match.
+///
+/// # Parameters
+///
+/// - `sess`: The session into which errors are emitted
+/// - `tts`: The tokenstream we are matching against the pattern `ms`
+/// - `ms`: A sequence of token trees representing a pattern against which we are matching
+/// - `directory`: Information about the file locations (needed for the black-box parser)
+/// - `recurse_into_modules`: Whether or not to recurse into modules (needed for the black-box
+/// parser)
+pub fn parse(
+ sess: &ParseSess,
+ tts: TokenStream,
+ ms: &[TokenTree],
+ directory: Option<Directory>,
+ recurse_into_modules: bool,
+) -> NamedParseResult {
+ // Create a parser that can be used for the "black box" parts.
let mut parser = Parser::new(sess, tts, directory, recurse_into_modules, true);
+
+ // A queue of possible matcher positions. We initialize it with the matcher position in which
+ // the "dot" is before the first token of the first token tree in `ms`. `inner_parse_loop` then
+ // processes all of these possible matcher positions and produces posible next positions into
+ // `next_items`. After some post-processing, the contents of `next_items` replenish `cur_items`
+ // and we start over again.
let mut cur_items = SmallVector::one(initial_matcher_pos(ms.to_owned(), parser.span.lo()));
- let mut next_items = Vec::new(); // or proceed normally
+ let mut next_items = Vec::new();
loop {
- let mut bb_items = SmallVector::new(); // black-box parsed by parser.rs
+ // Matcher positions black-box parsed by parser.rs (`parser`)
+ let mut bb_items = SmallVector::new();
+
+ // Matcher positions that would be valid if the macro invocation was over now
let mut eof_items = SmallVector::new();
assert!(next_items.is_empty());
- match inner_parse_loop(sess, &mut cur_items, &mut next_items, &mut eof_items, &mut bb_items,
- &parser.token, parser.span) {
- Success(_) => {},
+ // Process `cur_items` until either we have finished the input or we need to get some
+ // parsing from the black-box parser done. The result is that `next_items` will contain a
+ // bunch of possible next matcher positions in `next_items`.
+ match inner_parse_loop(
+ sess,
+ &mut cur_items,
+ &mut next_items,
+ &mut eof_items,
+ &mut bb_items,
+ &parser.token,
+ parser.span,
+ ) {
+ Success(_) => {}
Failure(sp, tok) => return Failure(sp, tok),
Error(sp, msg) => return Error(sp, msg),
}
// inner parse loop handled all cur_items, so it's empty
assert!(cur_items.is_empty());
- /* error messages here could be improved with links to orig. rules */
+ // We need to do some post processing after the `inner_parser_loop`.
+ //
+ // Error messages here could be improved with links to original rules.
+
+ // If we reached the EOF, check that there is EXACTLY ONE possible matcher. Otherwise,
+ // either the parse is ambiguous (which should never happen) or their is a syntax error.
if token_name_eq(&parser.token, &token::Eof) {
if eof_items.len() == 1 {
- let matches = eof_items[0].matches.iter_mut().map(|dv| {
- Rc::make_mut(dv).pop().unwrap()
- });
+ let matches = eof_items[0]
+ .matches
+ .iter_mut()
+ .map(|dv| Rc::make_mut(dv).pop().unwrap());
return nameize(sess, ms, matches);
} else if eof_items.len() > 1 {
- return Error(parser.span, "ambiguity: multiple successful parses".to_string());
+ return Error(
+ parser.span,
+ "ambiguity: multiple successful parses".to_string(),
+ );
} else {
return Failure(parser.span, token::Eof);
}
- } else if (!bb_items.is_empty() && !next_items.is_empty()) || bb_items.len() > 1 {
- let nts = bb_items.iter().map(|item| match item.top_elts.get_tt(item.idx) {
- TokenTree::MetaVarDecl(_, bind, name) => {
- format!("{} ('{}')", name, bind)
- }
- _ => panic!()
- }).collect::<Vec<String>>().join(" or ");
-
- return Error(parser.span, format!(
- "local ambiguity: multiple parsing options: {}",
- match next_items.len() {
- 0 => format!("built-in NTs {}.", nts),
- 1 => format!("built-in NTs {} or 1 other option.", nts),
- n => format!("built-in NTs {} or {} other options.", nts, n),
- }
- ));
- } else if bb_items.is_empty() && next_items.is_empty() {
+ }
+ // Another possibility is that we need to call out to parse some rust nonterminal
+ // (black-box) parser. However, if there is not EXACTLY ONE of these, something is wrong.
+ else if (!bb_items.is_empty() && !next_items.is_empty()) || bb_items.len() > 1 {
+ let nts = bb_items
+ .iter()
+ .map(|item| match item.top_elts.get_tt(item.idx) {
+ TokenTree::MetaVarDecl(_, bind, name) => format!("{} ('{}')", name, bind),
+ _ => panic!(),
+ })
+ .collect::<Vec<String>>()
+ .join(" or ");
+
+ return Error(
+ parser.span,
+ format!(
+ "local ambiguity: multiple parsing options: {}",
+ match next_items.len() {
+ 0 => format!("built-in NTs {}.", nts),
+ 1 => format!("built-in NTs {} or 1 other option.", nts),
+ n => format!("built-in NTs {} or {} other options.", nts, n),
+ }
+ ),
+ );
+ }
+ // If there are no posible next positions AND we aren't waiting for the black-box parser,
+ // then their is a syntax error.
+ else if bb_items.is_empty() && next_items.is_empty() {
return Failure(parser.span, parser.token);
- } else if !next_items.is_empty() {
- /* Now process the next token */
+ }
+ // Dump all possible `next_items` into `cur_items` for the next iteration.
+ else if !next_items.is_empty() {
+ // Now process the next token
cur_items.extend(next_items.drain(..));
parser.bump();
- } else /* bb_items.len() == 1 */ {
+ }
+ // Finally, we have the case where we need to call the black-box parser to get some
+ // nonterminal.
+ else {
+ assert_eq!(bb_items.len(), 1);
+
let mut item = bb_items.pop().unwrap();
if let TokenTree::MetaVarDecl(span, _, ident) = item.top_elts.get_tt(item.idx) {
let match_cur = item.match_cur;
- item.push_match(match_cur,
- MatchedNonterminal(Rc::new(parse_nt(&mut parser, span, &ident.name.as_str()))));
+ item.push_match(
+ match_cur,
+ MatchedNonterminal(Rc::new(parse_nt(&mut parser, span, &ident.name.as_str()))),
+ );
item.idx += 1;
item.match_cur += 1;
} else {
"expr" => token.can_begin_expr(),
"ty" => token.can_begin_type(),
"ident" => token.is_ident(),
- "vis" => match *token { // The follow-set of :vis + "priv" keyword + interpolated
+ "vis" => match *token {
+ // The follow-set of :vis + "priv" keyword + interpolated
Token::Comma | Token::Ident(_) | Token::Interpolated(_) => true,
_ => token.can_begin_type(),
},
"block" => match *token {
Token::OpenDelim(token::Brace) => true,
Token::Interpolated(ref nt) => match nt.0 {
- token::NtItem(_) |
- token::NtPat(_) |
- token::NtTy(_) |
- token::NtIdent(_) |
- token::NtMeta(_) |
- token::NtPath(_) |
- token::NtVis(_) => false, // none of these may start with '{'.
+ token::NtItem(_)
+ | token::NtPat(_)
+ | token::NtTy(_)
+ | token::NtIdent(_)
+ | token::NtMeta(_)
+ | token::NtPath(_)
+ | token::NtVis(_) => false, // none of these may start with '{'.
_ => true,
},
_ => false,
}
}
+/// A call to the "black-box" parser to parse some rust nonterminal.
+///
+/// # Parameters
+///
+/// - `p`: the "black-box" parser to use
+/// - `sp`: the `Span` we want to parse
+/// - `name`: the name of the metavar _matcher_ we want to match (e.g. `tt`, `ident`, `block`,
+/// etc...)
+///
+/// # Returns
+///
+/// The parsed nonterminal.
fn parse_nt<'a>(p: &mut Parser<'a>, sp: Span, name: &str) -> Nonterminal {
if name == "tt" {
return token::NtTT(p.parse_token_tree());
"ident" => match p.token {
token::Ident(sn) => {
p.bump();
- token::NtIdent(Spanned::<Ident>{node: sn, span: p.prev_span})
+ token::NtIdent(Spanned::<Ident> {
+ node: sn,
+ span: p.prev_span,
+ })
}
_ => {
let token_str = pprust::token_to_string(&p.token);
- p.fatal(&format!("expected ident, found {}",
- &token_str[..])).emit();
+ p.fatal(&format!("expected ident, found {}", &token_str[..]))
+ .emit();
FatalError.raise()
}
},
"lifetime" => token::NtLifetime(p.expect_lifetime()),
// this is not supposed to happen, since it has been checked
// when compiling the macro.
- _ => p.span_bug(sp, "invalid fragment specifier")
+ _ => p.span_bug(sp, "invalid fragment specifier"),
}
}
use ast;
use ext::tt::macro_parser;
-use parse::{ParseSess, token};
+use parse::{token, ParseSess};
use print::pprust;
use symbol::keywords;
-use syntax_pos::{DUMMY_SP, Span, BytePos};
+use syntax_pos::{BytePos, Span, DUMMY_SP};
use tokenstream;
use std::rc::Rc;
+/// Contains the sub-token-trees of a "delimited" token tree, such as the contents of `(`. Note
+/// that the delimiter itself might be `NoDelim`.
#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
pub struct Delimited {
pub delim: token::DelimToken,
}
impl Delimited {
+ /// Return the opening delimiter (possibly `NoDelim`).
pub fn open_token(&self) -> token::Token {
token::OpenDelim(self.delim)
}
+ /// Return the closing delimiter (possibly `NoDelim`).
pub fn close_token(&self) -> token::Token {
token::CloseDelim(self.delim)
}
+ /// Return a `self::TokenTree` with a `Span` corresponding to the opening delimiter.
pub fn open_tt(&self, span: Span) -> TokenTree {
let open_span = if span == DUMMY_SP {
DUMMY_SP
TokenTree::Token(open_span, self.open_token())
}
+ /// Return a `self::TokenTree` with a `Span` corresponding to the closing delimiter.
pub fn close_tt(&self, span: Span) -> TokenTree {
let close_span = if span == DUMMY_SP {
DUMMY_SP
/// for token sequences.
#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)]
pub enum KleeneOp {
+ /// Kleene star (`*`) for zero or more repetitions
ZeroOrMore,
+ /// Kleene plus (`+`) for one or more repetitions
OneOrMore,
}
/// Similar to `tokenstream::TokenTree`, except that `$i`, `$i:ident`, and `$(...)`
-/// are "first-class" token trees.
+/// are "first-class" token trees. Useful for parsing macros.
#[derive(Debug, Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash)]
pub enum TokenTree {
Token(Span, token::Token),
/// E.g. `$var`
MetaVar(Span, ast::Ident),
/// E.g. `$var:expr`. This is only used in the left hand side of MBE macros.
- MetaVarDecl(Span, ast::Ident /* name to bind */, ast::Ident /* kind of nonterminal */),
+ MetaVarDecl(
+ Span,
+ ast::Ident, /* name to bind */
+ ast::Ident, /* kind of nonterminal */
+ ),
}
impl TokenTree {
+ /// Return the number of tokens in the tree.
pub fn len(&self) -> usize {
match *self {
TokenTree::Delimited(_, ref delimed) => match delimed.delim {
}
}
+ /// Returns true if the given token tree contains no other tokens. This is vacuously true for
+ /// single tokens or metavar/decls, but may be false for delimited trees or sequences.
pub fn is_empty(&self) -> bool {
match *self {
TokenTree::Delimited(_, ref delimed) => match delimed.delim {
}
}
+ /// Get the `index`-th sub-token-tree. This only makes sense for delimited trees and sequences.
pub fn get_tt(&self, index: usize) -> TokenTree {
match (self, index) {
(&TokenTree::Delimited(_, ref delimed), _) if delimed.delim == token::NoDelim => {
/// Retrieve the `TokenTree`'s span.
pub fn span(&self) -> Span {
match *self {
- TokenTree::Token(sp, _) |
- TokenTree::MetaVar(sp, _) |
- TokenTree::MetaVarDecl(sp, _, _) |
- TokenTree::Delimited(sp, _) |
- TokenTree::Sequence(sp, _) => sp,
+ TokenTree::Token(sp, _)
+ | TokenTree::MetaVar(sp, _)
+ | TokenTree::MetaVarDecl(sp, _, _)
+ | TokenTree::Delimited(sp, _)
+ | TokenTree::Sequence(sp, _) => sp,
}
}
}
-pub fn parse(input: tokenstream::TokenStream, expect_matchers: bool, sess: &ParseSess)
- -> Vec<TokenTree> {
+/// Takes a `tokenstream::TokenStream` and returns a `Vec<self::TokenTree>`. Specifically, this
+/// takes a generic `TokenStream`, such as is used in the rest of the compiler, and returns a
+/// collection of `TokenTree` for use in parsing a macro.
+///
+/// # Parameters
+///
+/// - `input`: a token stream to read from, the contents of which we are parsing.
+/// - `expect_matchers`: `parse` can be used to parse either the "patterns" or the "body" of a
+/// macro. Both take roughly the same form _except_ that in a pattern, metavars are declared with
+/// their "matcher" type. For example `$var:expr` or `$id:ident`. In this example, `expr` and
+/// `ident` are "matchers". They are not present in the body of a macro rule -- just in the
+/// pattern, so we pass a parameter to indicate whether to expect them or not.
+/// - `sess`: the parsing session. Any errors will be emitted to this session.
+///
+/// # Returns
+///
+/// A collection of `self::TokenTree`. There may also be some errors emitted to `sess`.
+pub fn parse(
+ input: tokenstream::TokenStream,
+ expect_matchers: bool,
+ sess: &ParseSess,
+) -> Vec<TokenTree> {
+ // Will contain the final collection of `self::TokenTree`
let mut result = Vec::new();
+
+ // For each token tree in `input`, parse the token into a `self::TokenTree`, consuming
+ // additional trees if need be.
let mut trees = input.trees();
while let Some(tree) = trees.next() {
let tree = parse_tree(tree, &mut trees, expect_matchers, sess);
+
+ // Given the parsed tree, if there is a metavar and we are expecting matchers, actually
+ // parse out the matcher (i.e. in `$id:ident` this would parse the `:` and `ident`).
match tree {
TokenTree::MetaVar(start_sp, ident) if expect_matchers => {
let span = match trees.next() {
Some(kind) => {
let span = end_sp.with_lo(start_sp.lo());
result.push(TokenTree::MetaVarDecl(span, ident, kind));
- continue
+ continue;
}
_ => end_sp,
},
- tree => tree.as_ref().map(tokenstream::TokenTree::span).unwrap_or(span),
+ tree => tree.as_ref()
+ .map(tokenstream::TokenTree::span)
+ .unwrap_or(span),
},
- tree => tree.as_ref().map(tokenstream::TokenTree::span).unwrap_or(start_sp),
+ tree => tree.as_ref()
+ .map(tokenstream::TokenTree::span)
+ .unwrap_or(start_sp),
};
sess.missing_fragment_specifiers.borrow_mut().insert(span);
- result.push(TokenTree::MetaVarDecl(span, ident, keywords::Invalid.ident()));
+ result.push(TokenTree::MetaVarDecl(
+ span,
+ ident,
+ keywords::Invalid.ident(),
+ ));
}
+
+ // Not a metavar or no matchers allowed, so just return the tree
_ => result.push(tree),
}
}
result
}
-fn parse_tree<I>(tree: tokenstream::TokenTree,
- trees: &mut I,
- expect_matchers: bool,
- sess: &ParseSess)
- -> TokenTree
- where I: Iterator<Item = tokenstream::TokenTree>,
+/// Takes a `tokenstream::TokenTree` and returns a `self::TokenTree`. Specifically, this takes a
+/// generic `TokenTree`, such as is used in the rest of the compiler, and returns a `TokenTree`
+/// for use in parsing a macro.
+///
+/// Converting the given tree may involve reading more tokens.
+///
+/// # Parameters
+///
+/// - `tree`: the tree we wish to convert.
+/// - `trees`: an iterator over trees. We may need to read more tokens from it in order to finish
+/// converting `tree`
+/// - `expect_matchers`: same as for `parse` (see above).
+/// - `sess`: the parsing session. Any errors will be emitted to this session.
+fn parse_tree<I>(
+ tree: tokenstream::TokenTree,
+ trees: &mut I,
+ expect_matchers: bool,
+ sess: &ParseSess,
+) -> TokenTree
+where
+ I: Iterator<Item = tokenstream::TokenTree>,
{
+ // Depending on what `tree` is, we could be parsing different parts of a macro
match tree {
+ // `tree` is a `$` token. Look at the next token in `trees`
tokenstream::TokenTree::Token(span, token::Dollar) => match trees.next() {
+ // `tree` is followed by a delimited set of token trees. This indicates the beginning
+ // of a repetition sequence in the macro (e.g. `$(pat)*`).
Some(tokenstream::TokenTree::Delimited(span, delimited)) => {
+ // Must have `(` not `{` or `[`
if delimited.delim != token::Paren {
let tok = pprust::token_to_string(&token::OpenDelim(delimited.delim));
let msg = format!("expected `(`, found `{}`", tok);
sess.span_diagnostic.span_err(span, &msg);
}
+ // Parse the contents of the sequence itself
let sequence = parse(delimited.tts.into(), expect_matchers, sess);
+ // Get the Kleene operator and optional separator
let (separator, op) = parse_sep_and_kleene_op(trees, span, sess);
+ // Count the number of captured "names" (i.e. named metavars)
let name_captures = macro_parser::count_names(&sequence);
- TokenTree::Sequence(span, Rc::new(SequenceRepetition {
- tts: sequence,
- separator,
- op,
- num_captures: name_captures,
- }))
+ TokenTree::Sequence(
+ span,
+ Rc::new(SequenceRepetition {
+ tts: sequence,
+ separator,
+ op,
+ num_captures: name_captures,
+ }),
+ )
}
+
+ // `tree` is followed by an `ident`. This could be `$meta_var` or the `$crate` special
+ // metavariable that names the crate of the invokation.
Some(tokenstream::TokenTree::Token(ident_span, ref token)) if token.is_ident() => {
let ident = token.ident().unwrap();
let span = ident_span.with_lo(span.lo());
if ident.name == keywords::Crate.name() {
- let ident = ast::Ident { name: keywords::DollarCrate.name(), ..ident };
+ let ident = ast::Ident {
+ name: keywords::DollarCrate.name(),
+ ..ident
+ };
TokenTree::Token(span, token::Ident(ident))
} else {
TokenTree::MetaVar(span, ident)
}
}
+
+ // `tree` is followed by a random token. This is an error.
Some(tokenstream::TokenTree::Token(span, tok)) => {
- let msg = format!("expected identifier, found `{}`", pprust::token_to_string(&tok));
+ let msg = format!(
+ "expected identifier, found `{}`",
+ pprust::token_to_string(&tok)
+ );
sess.span_diagnostic.span_err(span, &msg);
TokenTree::MetaVar(span, keywords::Invalid.ident())
}
+
+ // There are no more tokens. Just return the `$` we already have.
None => TokenTree::Token(span, token::Dollar),
},
+
+ // `tree` is an arbitrary token. Keep it.
tokenstream::TokenTree::Token(span, tok) => TokenTree::Token(span, tok),
- tokenstream::TokenTree::Delimited(span, delimited) => {
- TokenTree::Delimited(span, Rc::new(Delimited {
+
+ // `tree` is the beginning of a delimited set of tokens (e.g. `(` or `{`). We need to
+ // descend into the delimited set and further parse it.
+ tokenstream::TokenTree::Delimited(span, delimited) => TokenTree::Delimited(
+ span,
+ Rc::new(Delimited {
delim: delimited.delim,
tts: parse(delimited.tts.into(), expect_matchers, sess),
- }))
- }
+ }),
+ ),
}
}
-fn parse_sep_and_kleene_op<I>(input: &mut I, span: Span, sess: &ParseSess)
- -> (Option<token::Token>, KleeneOp)
- where I: Iterator<Item = tokenstream::TokenTree>,
+/// Attempt to parse a single Kleene star, possibly with a separator.
+///
+/// For example, in a pattern such as `$(a),*`, `a` is the pattern to be repeated, `,` is the
+/// separator, and `*` is the Kleene operator. This function is specifically concerned with parsing
+/// the last two tokens of such a pattern: namely, the optional separator and the Kleene operator
+/// itself. Note that here we are parsing the _macro_ itself, rather than trying to match some
+/// stream of tokens in an invocation of a macro.
+///
+/// This function will take some input iterator `input` corresponding to `span` and a parsing
+/// session `sess`. If the next one (or possibly two) tokens in `input` correspond to a Kleene
+/// operator and separator, then a tuple with `(separator, KleeneOp)` is returned. Otherwise, an
+/// error with the appropriate span is emitted to `sess` and a dummy value is returned.
+fn parse_sep_and_kleene_op<I>(
+ input: &mut I,
+ span: Span,
+ sess: &ParseSess,
+) -> (Option<token::Token>, KleeneOp)
+where
+ I: Iterator<Item = tokenstream::TokenTree>,
{
fn kleene_op(token: &token::Token) -> Option<KleeneOp> {
match *token {
}
}
+ // We attempt to look at the next two token trees in `input`. I will call the first #1 and the
+ // second #2. If #1 and #2 don't match a valid KleeneOp with/without separator, that is an
+ // error, and we should emit an error on the most specific span possible.
let span = match input.next() {
+ // #1 is a token
Some(tokenstream::TokenTree::Token(span, tok)) => match kleene_op(&tok) {
+ // #1 is a KleeneOp with no separator
Some(op) => return (None, op),
+
+ // #1 is not a KleeneOp, but may be a separator... need to look at #2
None => match input.next() {
+ // #2 is a token
Some(tokenstream::TokenTree::Token(span, tok2)) => match kleene_op(&tok2) {
+ // #2 is a KleeneOp, so #1 must be a separator
Some(op) => return (Some(tok), op),
+
+ // #2 is not a KleeneOp... error
None => span,
},
- tree => tree.as_ref().map(tokenstream::TokenTree::span).unwrap_or(span),
- }
+
+ // #2 is not a token at all... error
+ tree => tree.as_ref()
+ .map(tokenstream::TokenTree::span)
+ .unwrap_or(span),
+ },
},
- tree => tree.as_ref().map(tokenstream::TokenTree::span).unwrap_or(span),
+
+ // #1 is not a token at all... error
+ tree => tree.as_ref()
+ .map(tokenstream::TokenTree::span)
+ .unwrap_or(span),
};
+ // Error...
sess.span_diagnostic.span_err(span, "expected `*` or `+`");
(None, KleeneOp::ZeroOrMore)
}
// allow `#[must_use]` on functions and comparison operators (RFC 1940)
(active, fn_must_use, "1.21.0", Some(43302)),
- // allow '|' at beginning of match arms (RFC 1925)
- (active, match_beginning_vert, "1.21.0", Some(44101)),
-
// Future-proofing enums/structs with #[non_exhaustive] attribute (RFC 2008)
(active, non_exhaustive, "1.22.0", Some(44109)),
// In-band lifetime bindings (e.g. `fn foo(x: &'a u8) -> &'a u8`)
(active, in_band_lifetimes, "1.23.0", Some(44524)),
- // Nested groups in `use` (RFC 2128)
- (active, use_nested_groups, "1.23.0", Some(44494)),
-
// generic associated types (RFC 1598)
(active, generic_associated_types, "1.23.0", Some(44265)),
(accepted, abi_sysv64, "1.24.0", Some(36167)),
// Allows `repr(align(16))` struct attribute (RFC 1358)
(accepted, repr_align, "1.24.0", Some(33626)),
+ // allow '|' at beginning of match arms (RFC 1925)
+ (accepted, match_beginning_vert, "1.25.0", Some(44101)),
+ // Nested groups in `use` (RFC 2128)
+ (accepted, use_nested_groups, "1.25.0", Some(44494)),
);
// If you change this, please modify src/doc/unstable-book as well. You must
("thread_local", Whitelisted, Gated(Stability::Unstable,
"thread_local",
"`#[thread_local]` is an experimental feature, and does \
- not currently handle destructors. There is no \
- corresponding `#[task_local]` mapping to the task \
- model",
+ not currently handle destructors.",
cfg_fn!(thread_local))),
("rustc_on_unimplemented", Normal, Gated(Stability::Unstable,
is just used for rustc unit tests \
and will never be stable",
cfg_fn!(rustc_attrs))),
+ ("rustc_serialize_exclude_null", Normal, Gated(Stability::Unstable,
+ "rustc_attrs",
+ "the `#[rustc_serialize_exclude_null]` attribute \
+ is an internal-only feature",
+ cfg_fn!(rustc_attrs))),
("rustc_synthetic", Whitelisted, Gated(Stability::Unstable,
"rustc_attrs",
"this attribute \
}
fn visit_arm(&mut self, arm: &'a ast::Arm) {
- if let Some(span) = arm.beginning_vert {
- gate_feature_post!(&self, match_beginning_vert,
- span,
- "Use of a '|' at the beginning of a match arm is experimental")
- }
visit::walk_arm(self, arm)
}
visit::walk_path(self, path);
}
- fn visit_use_tree(&mut self, use_tree: &'a ast::UseTree, id: NodeId, nested: bool) {
- if nested {
- match use_tree.kind {
- ast::UseTreeKind::Simple(_) => {
- if use_tree.prefix.segments.len() != 1 {
- gate_feature_post!(&self, use_nested_groups, use_tree.span,
- "paths in `use` groups are experimental");
- }
- }
- ast::UseTreeKind::Glob => {
- gate_feature_post!(&self, use_nested_groups, use_tree.span,
- "glob imports in `use` groups are experimental");
- }
- ast::UseTreeKind::Nested(_) => {
- gate_feature_post!(&self, use_nested_groups, use_tree.span,
- "nested groups in `use` are experimental");
- }
- }
- }
-
- visit::walk_use_tree(self, use_tree, id);
- }
-
fn visit_vis(&mut self, vis: &'a ast::Visibility) {
if let ast::Visibility::Crate(span, ast::CrateSugar::JustCrate) = *vis {
gate_feature_post!(&self, crate_visibility_modifier, span,
fold_attrs(attrs.into(), fld).into()
}
-pub fn noop_fold_arm<T: Folder>(Arm {attrs, pats, guard, body, beginning_vert}: Arm,
+pub fn noop_fold_arm<T: Folder>(Arm {attrs, pats, guard, body}: Arm,
fld: &mut T) -> Arm {
Arm {
attrs: fold_attrs(attrs, fld),
pats: pats.move_map(|x| fld.fold_pat(x)),
guard: guard.map(|x| fld.fold_expr(x)),
body: fld.fold_expr(body),
- beginning_vert,
}
}
registry: Option<Registry>,
cm: Rc<CodeMapper + 'static>,
pretty: bool,
+ /// Whether "approximate suggestions" are enabled in the config
+ approximate_suggestions: bool,
}
impl JsonEmitter {
pub fn stderr(registry: Option<Registry>,
code_map: Rc<CodeMap>,
- pretty: bool) -> JsonEmitter {
+ pretty: bool,
+ approximate_suggestions: bool) -> JsonEmitter {
JsonEmitter {
dst: Box::new(io::stderr()),
registry,
cm: code_map,
pretty,
+ approximate_suggestions,
}
}
pub fn basic(pretty: bool) -> JsonEmitter {
let file_path_mapping = FilePathMapping::empty();
- JsonEmitter::stderr(None, Rc::new(CodeMap::new(file_path_mapping)), pretty)
+ JsonEmitter::stderr(None, Rc::new(CodeMap::new(file_path_mapping)),
+ pretty, false)
}
pub fn new(dst: Box<Write + Send>,
registry: Option<Registry>,
code_map: Rc<CodeMap>,
- pretty: bool) -> JsonEmitter {
+ pretty: bool,
+ approximate_suggestions: bool) -> JsonEmitter {
JsonEmitter {
dst,
registry,
cm: code_map,
pretty,
+ approximate_suggestions,
}
}
}
}
#[derive(RustcEncodable)]
+#[allow(unused_attributes)]
struct DiagnosticSpan {
file_name: String,
byte_start: u32,
/// If we are suggesting a replacement, this will contain text
/// that should be sliced in atop this span.
suggested_replacement: Option<String>,
+ /// If the suggestion is approximate
+ #[rustc_serialize_exclude_null]
+ suggestion_approximate: Option<bool>,
/// Macro invocations that created the code at this span, if any.
expansion: Option<Box<DiagnosticSpanMacroExpansion>>,
}
}
let buf = BufWriter::default();
let output = buf.clone();
- EmitterWriter::new(Box::new(buf), Some(je.cm.clone()), false).emit(db);
+ EmitterWriter::new(Box::new(buf), Some(je.cm.clone()), false, false).emit(db);
let output = Arc::try_unwrap(output.0).unwrap().into_inner().unwrap();
let output = String::from_utf8(output).unwrap();
impl DiagnosticSpan {
fn from_span_label(span: SpanLabel,
- suggestion: Option<&String>,
+ suggestion: Option<(&String, bool)>,
je: &JsonEmitter)
-> DiagnosticSpan {
Self::from_span_etc(span.span,
fn from_span_etc(span: Span,
is_primary: bool,
label: Option<String>,
- suggestion: Option<&String>,
+ suggestion: Option<(&String, bool)>,
je: &JsonEmitter)
-> DiagnosticSpan {
// obtain the full backtrace from the `macro_backtrace`
fn from_span_full(span: Span,
is_primary: bool,
label: Option<String>,
- suggestion: Option<&String>,
+ suggestion: Option<(&String, bool)>,
mut backtrace: vec::IntoIter<MacroBacktrace>,
je: &JsonEmitter)
-> DiagnosticSpan {
def_site_span,
})
});
+
+ let suggestion_approximate = if je.approximate_suggestions {
+ suggestion.map(|x| x.1)
+ } else {
+ None
+ };
+
DiagnosticSpan {
file_name: start.file.name.to_string(),
byte_start: span.lo().0 - start.file.start_pos.0,
column_end: end.col.0 + 1,
is_primary,
text: DiagnosticSpanLine::from_span(span, je),
- suggested_replacement: suggestion.cloned(),
+ suggested_replacement: suggestion.map(|x| x.0.clone()),
+ suggestion_approximate,
expansion: backtrace_step,
label,
}
suggestion.substitutions
.iter()
.flat_map(|substitution| {
- substitution.parts.iter().map(move |suggestion| {
+ substitution.parts.iter().map(move |suggestion_inner| {
let span_label = SpanLabel {
- span: suggestion.span,
+ span: suggestion_inner.span,
is_primary: true,
label: None,
};
DiagnosticSpan::from_span_label(span_label,
- Some(&suggestion.snippet),
+ Some((&suggestion_inner.snippet,
+ suggestion.approximate)),
je)
})
})
#![feature(match_default_bindings)]
#![feature(i128_type)]
#![feature(const_atomic_usize_new)]
+#![feature(rustc_attrs)]
// See librustc_cratesio_shim/Cargo.toml for a comment explaining this.
#[allow(unused_extern_crates)]
self.err_span(self.mk_sp(from_pos, to_pos), m)
}
+ /// Pushes a character to a message string for error reporting
+ fn push_escaped_char_for_msg(m: &mut String, c: char) {
+ match c {
+ '\u{20}'...'\u{7e}' => {
+ // Don't escape \, ' or " for user-facing messages
+ m.push(c);
+ }
+ _ => {
+ for c in c.escape_default() {
+ m.push(c);
+ }
+ }
+ }
+ }
+
/// Report a lexical error spanning [`from_pos`, `to_pos`), appending an
/// escaped character to the error message
fn fatal_span_char(&self, from_pos: BytePos, to_pos: BytePos, m: &str, c: char) -> FatalError {
let mut m = m.to_string();
m.push_str(": ");
- for c in c.escape_default() {
- m.push(c)
- }
+ Self::push_escaped_char_for_msg(&mut m, c);
self.fatal_span_(from_pos, to_pos, &m[..])
}
fn struct_fatal_span_char(&self,
-> DiagnosticBuilder<'a> {
let mut m = m.to_string();
m.push_str(": ");
- for c in c.escape_default() {
- m.push(c)
- }
+ Self::push_escaped_char_for_msg(&mut m, c);
self.sess.span_diagnostic.struct_span_fatal(self.mk_sp(from_pos, to_pos), &m[..])
}
fn err_span_char(&self, from_pos: BytePos, to_pos: BytePos, m: &str, c: char) {
let mut m = m.to_string();
m.push_str(": ");
- for c in c.escape_default() {
- m.push(c)
- }
+ Self::push_escaped_char_for_msg(&mut m, c);
self.err_span_(from_pos, to_pos, &m[..]);
}
fn struct_err_span_char(&self,
-> DiagnosticBuilder<'a> {
let mut m = m.to_string();
m.push_str(": ");
- for c in c.escape_default() {
- m.push(c)
- }
+ Self::push_escaped_char_for_msg(&mut m, c);
self.sess.span_diagnostic.struct_span_err(self.mk_sp(from_pos, to_pos), &m[..])
}
fn mk_sess(cm: Rc<CodeMap>) -> ParseSess {
let emitter = errors::emitter::EmitterWriter::new(Box::new(io::sink()),
Some(cm.clone()),
+ false,
false);
ParseSess {
span_diagnostic: errors::Handler::with_emitter(true, false, Box::new(emitter)),
expect.clone()
};
(format!("expected one of {}, found `{}`", expect, actual),
- (self.prev_span.next_point(), format!("expected one of {} here", short_expect)))
+ (self.sess.codemap().next_point(self.prev_span),
+ format!("expected one of {} here", short_expect)))
} else if expected.is_empty() {
(format!("unexpected token: `{}`", actual),
(self.prev_span, "unexpected token after this".to_string()))
} else {
(format!("expected {}, found `{}`", expect, actual),
- (self.prev_span.next_point(), format!("expected {} here", expect)))
+ (self.sess.codemap().next_point(self.prev_span),
+ format!("expected {} here", expect)))
};
let mut err = self.fatal(&msg_exp);
let sp = if self.token == token::Token::Eof {
self.expect_keyword(keywords::Fn)?;
let (inputs, variadic) = self.parse_fn_args(false, true)?;
- let ret_ty = self.parse_ret_ty()?;
+ let ret_ty = self.parse_ret_ty(false)?;
let decl = P(FnDecl {
inputs,
output: ret_ty,
}
/// Parse optional return type [ -> TY ] in function decl
- pub fn parse_ret_ty(&mut self) -> PResult<'a, FunctionRetTy> {
+ fn parse_ret_ty(&mut self, allow_plus: bool) -> PResult<'a, FunctionRetTy> {
if self.eat(&token::RArrow) {
- Ok(FunctionRetTy::Ty(self.parse_ty_no_plus()?))
+ Ok(FunctionRetTy::Ty(self.parse_ty_common(allow_plus, true)?))
} else {
Ok(FunctionRetTy::Default(self.span.with_hi(self.span.lo())))
}
maybe_whole!(self, NtTy, |x| x);
let lo = self.span;
+ let mut impl_dyn_multi = false;
let node = if self.eat(&token::OpenDelim(token::Paren)) {
// `(TYPE)` is a parenthesized type.
// `(TYPE,)` is a tuple with a single field of type TYPE.
self.parse_remaining_bounds(lifetime_defs, path, lo, parse_plus)?
}
} else if self.eat_keyword(keywords::Impl) {
- // FIXME: figure out priority of `+` in `impl Trait1 + Trait2` (#34511).
- TyKind::ImplTrait(self.parse_ty_param_bounds()?)
+ // Always parse bounds greedily for better error recovery.
+ let bounds = self.parse_ty_param_bounds()?;
+ impl_dyn_multi = bounds.len() > 1 || self.prev_token_kind == PrevTokenKind::Plus;
+ TyKind::ImplTrait(bounds)
} else if self.check_keyword(keywords::Dyn) &&
self.look_ahead(1, |t| t.can_begin_bound() && !can_continue_type_after_ident(t)) {
- // FIXME: figure out priority of `+` in `dyn Trait1 + Trait2` (#34511).
self.bump(); // `dyn`
- TyKind::TraitObject(self.parse_ty_param_bounds()?, TraitObjectSyntax::Dyn)
+ // Always parse bounds greedily for better error recovery.
+ let bounds = self.parse_ty_param_bounds()?;
+ impl_dyn_multi = bounds.len() > 1 || self.prev_token_kind == PrevTokenKind::Plus;
+ TyKind::TraitObject(bounds, TraitObjectSyntax::Dyn)
} else if self.check(&token::Question) ||
self.check_lifetime() && self.look_ahead(1, |t| t == &token::BinOp(token::Plus)) {
// Bound list (trait object type)
let ty = Ty { node, span, id: ast::DUMMY_NODE_ID };
// Try to recover from use of `+` with incorrect priority.
+ self.maybe_report_ambiguous_plus(allow_plus, impl_dyn_multi, &ty);
self.maybe_recover_from_bad_type_plus(allow_plus, &ty)?;
let ty = self.maybe_recover_from_bad_qpath(ty, allow_qpath_recovery)?;
Ok(TyKind::TraitObject(bounds, TraitObjectSyntax::None))
}
+ fn maybe_report_ambiguous_plus(&mut self, allow_plus: bool, impl_dyn_multi: bool, ty: &Ty) {
+ if !allow_plus && impl_dyn_multi {
+ let sum_with_parens = format!("({})", pprust::ty_to_string(&ty));
+ self.struct_span_err(ty.span, "ambiguous `+` in a type")
+ .span_suggestion(ty.span, "use parentheses to disambiguate", sum_with_parens)
+ .emit();
+ }
+ }
+
fn maybe_recover_from_bad_type_plus(&mut self, allow_plus: bool, ty: &Ty) -> PResult<'a, ()> {
// Do not add `+` to expected tokens.
if !allow_plus || self.token != token::BinOp(token::Plus) {
// return. This won't catch blocks with an explicit `return`, but that would be caught by
// the dead code lint.
if self.eat_keyword(keywords::Else) || !cond.returns() {
- let sp = lo.next_point();
+ let sp = self.sess.codemap().next_point(lo);
let mut err = self.diagnostic()
.struct_span_err(sp, "missing condition for `if` statemement");
err.span_label(sp, "expected if condition here");
let attrs = self.parse_outer_attributes()?;
// Allow a '|' before the pats (RFC 1925)
- let beginning_vert = if self.eat(&token::BinOp(token::Or)) {
- Some(self.prev_span)
- } else {
- None
- };
+ self.eat(&token::BinOp(token::Or));
let pats = self.parse_pats()?;
let guard = if self.eat_keyword(keywords::If) {
Some(self.parse_expr()?)
pats,
guard,
body: expr,
- beginning_vert,
})
}
pub fn parse_fn_decl(&mut self, allow_variadic: bool) -> PResult<'a, P<FnDecl>> {
let (args, variadic) = self.parse_fn_args(true, allow_variadic)?;
- let ret_ty = self.parse_ret_ty()?;
+ let ret_ty = self.parse_ret_ty(true)?;
Ok(P(FnDecl {
inputs: args,
self.expect(&token::CloseDelim(token::Paren))?;
Ok(P(FnDecl {
inputs: fn_inputs,
- output: self.parse_ret_ty()?,
+ output: self.parse_ret_ty(true)?,
variadic: false
}))
}
args
}
};
- let output = self.parse_ret_ty()?;
+ let output = self.parse_ret_ty(true)?;
Ok(P(FnDecl {
inputs: inputs_captures,
self.expect(&token::Semi)?;
let prev_span = self.prev_span;
+
Ok(self.mk_item(lo.to(prev_span),
ident,
ItemKind::ExternCrate(maybe_path),
keywords::Unsafe.name(),
keywords::While.name(),
keywords::Yield.name(),
+ keywords::Static.name(),
].contains(&ident.name)
}
let emitter = EmitterWriter::new(Box::new(Shared { data: output.clone() }),
Some(code_map.clone()),
+ false,
false);
let handler = Handler::with_emitter(true, false, Box::new(emitter));
handler.span_err(msp, "foo");
}
}
+ // If there are no outputs, the inline assembly is executed just for its side effects,
+ // so ensure that it is volatile
+ if outputs.is_empty() {
+ volatile = true;
+ }
+
MacEager::expr(P(ast::Expr {
id: ast::DUMMY_NODE_ID,
node: ast::ExprKind::InlineAsm(P(ast::InlineAsm {
Struct(_, ref fields) => {
let emit_struct_field = cx.ident_of("emit_struct_field");
let mut stmts = Vec::new();
- for (i, &FieldInfo { name, ref self_, span, .. }) in fields.iter().enumerate() {
+ for (i, &FieldInfo { name, ref self_, span, attrs, .. }) in fields.iter().enumerate() {
let name = match name {
Some(id) => id.name,
None => Symbol::intern(&format!("_field{}", i)),
} else {
cx.expr(span, ExprKind::Ret(Some(call)))
};
- stmts.push(cx.stmt_expr(call));
+
+ // This exists for https://github.com/rust-lang/rust/pull/47540
+ //
+ // If we decide to stabilize that flag this can be removed
+ let expr = if attrs.iter().any(|a| a.check_name("rustc_serialize_exclude_null")) {
+ let is_some = cx.ident_of("is_some");
+ let condition = cx.expr_method_call(span, self_.clone(), is_some, vec![]);
+ cx.expr_if(span, condition, call, None)
+ } else {
+ call
+ };
+ let stmt = cx.stmt_expr(expr);
+ stmts.push(stmt);
}
// unit structs have no fields and need to return Ok()
&catch_all_substructure);
// Final wrinkle: the self_args are expressions that deref
- // down to desired l-values, but we cannot actually deref
+ // down to desired places, but we cannot actually deref
// them when they are fed as r-values into a tuple
// expression; here add a layer of borrowing, turning
// `(*self, *__arg_0, ...)` into `(&*self, &*__arg_0, ...)`.
} else {
// Final wrinkle: the self_args are expressions that deref
- // down to desired l-values, but we cannot actually deref
+ // down to desired places, but we cannot actually deref
// them when they are fed as r-values into a tuple
// expression; here add a layer of borrowing, turning
// `(*self, *__arg_0, ...)` into `(&*self, &*__arg_0, ...)`.
self.data().with_ctxt(ctxt)
}
- /// Returns a new span representing just the end-point of this span
- pub fn end_point(self) -> Span {
- let span = self.data();
- let lo = cmp::max(span.hi.0 - 1, span.lo.0);
- span.with_lo(BytePos(lo))
- }
-
- /// Returns a new span representing the next character after the end-point of this span
- pub fn next_point(self) -> Span {
- let span = self.data();
- let lo = cmp::max(span.hi.0, span.lo.0 + 1);
- Span::new(BytePos(lo), BytePos(lo), span.ctxt)
- }
-
/// Returns `self` if `self` is not the dummy span, and `other` otherwise.
pub fn substitute_dummy(self, other: Span) -> Span {
if self.source_equal(&DUMMY_SP) { other } else { self }
/// Return a `Span` that would enclose both `self` and `end`.
pub fn to(self, end: Span) -> Span {
- let span = self.data();
- let end = end.data();
+ let span_data = self.data();
+ let end_data = end.data();
+ // FIXME(jseyfried): self.ctxt should always equal end.ctxt here (c.f. issue #23480)
+ // Return the macro span on its own to avoid weird diagnostic output. It is preferable to
+ // have an incomplete span than a completely nonsensical one.
+ if span_data.ctxt != end_data.ctxt {
+ if span_data.ctxt == SyntaxContext::empty() {
+ return end;
+ } else if end_data.ctxt == SyntaxContext::empty() {
+ return self;
+ }
+ // both span fall within a macro
+ // FIXME(estebank) check if it is the *same* macro
+ }
Span::new(
- cmp::min(span.lo, end.lo),
- cmp::max(span.hi, end.hi),
- // FIXME(jseyfried): self.ctxt should always equal end.ctxt here (c.f. issue #23480)
- if span.ctxt == SyntaxContext::empty() { end.ctxt } else { span.ctxt },
+ cmp::min(span_data.lo, end_data.lo),
+ cmp::max(span_data.hi, end_data.hi),
+ if span_data.ctxt == SyntaxContext::empty() { end_data.ctxt } else { span_data.ctxt },
)
}
let target = env::var("TARGET").expect("TARGET was not set");
if target.contains("linux") {
- if target.contains("musl") && !target.contains("mips") {
+ if target.contains("musl") {
// musl is handled in lib.rs
} else if !target.contains("android") {
println!("cargo:rustc-link-lib=gcc_s");
}
}
-#[cfg(all(target_env = "musl", not(target_arch = "mips")))]
+#[cfg(target_env = "musl")]
#[link(name = "unwind", kind = "static", cfg(target_feature = "crt-static"))]
#[link(name = "gcc_s", cfg(not(target_feature = "crt-static")))]
extern {}
--- /dev/null
+Subproject commit 2717444753318e461e0c3b30dacd03ffbac96903
name = "rustc"
path = "rustc.rs"
-# All optional dependencies so the features passed to this Cargo.toml select
-# what should actually be built.
[dependencies]
rustc_back = { path = "../librustc_back" }
rustc_driver = { path = "../librustc_driver" }
[features]
jemalloc = ["rustc_back/jemalloc"]
-llvm = ["rustc_driver/llvm"]
StringMap<FunctionImporter::ImportMapTy> ImportLists;
StringMap<FunctionImporter::ExportSetTy> ExportLists;
StringMap<GVSummaryMapTy> ModuleToDefinedGVSummaries;
+
+#if LLVM_VERSION_GE(7, 0)
+ LLVMRustThinLTOData() : Index(/* isPerformingAnalysis = */ false) {}
+#endif
};
// Just an argument to the `LLVMRustCreateThinLTOData` function below.
//
// This is copied from `lib/LTO/ThinLTOCodeGenerator.cpp`
#if LLVM_VERSION_GE(5, 0)
+#if LLVM_VERSION_GE(7, 0)
+ auto deadIsPrevailing = [&](GlobalValue::GUID G) {
+ return PrevailingType::Unknown;
+ };
+ computeDeadSymbols(Ret->Index, Ret->GUIDPreservedSymbols, deadIsPrevailing);
+#else
computeDeadSymbols(Ret->Index, Ret->GUIDPreservedSymbols);
+#endif
ComputeCrossModuleImport(
Ret->Index,
Ret->ModuleToDefinedGVSummaries,
pub fn str(_: &[u8]) {
}
-// CHECK: @trait_borrow(%"core::ops::drop::Drop"* nonnull %arg0.0, {}* noalias nonnull readonly %arg0.1)
+// CHECK: @trait_borrow({}* nonnull %arg0.0, {}* noalias nonnull readonly %arg0.1)
// FIXME #25759 This should also have `nocapture`
#[no_mangle]
pub fn trait_borrow(_: &Drop) {
}
-// CHECK: @trait_box(%"core::ops::drop::Drop"* noalias nonnull, {}* noalias nonnull readonly)
+// CHECK: @trait_box({}* noalias nonnull, {}* noalias nonnull readonly)
#[no_mangle]
pub fn trait_box(_: Box<Drop>) {
}
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -O
+
+// ignore-asmjs
+
+#![feature(asm)]
+#![crate_type = "lib"]
+
+// Check that inline assembly expressions without any outputs
+// are marked as having side effects / being volatile
+
+// CHECK-LABEL: @assembly
+#[no_mangle]
+pub fn assembly() {
+ unsafe { asm!("") }
+// CHECK: tail call void asm sideeffect "", {{.*}}
+}
// ignore-wasm
// ignore-emscripten
// ignore-windows
-// no-system-llvm
+// min-system-llvm-version 5.0
// compile-flags: -C no-prepopulate-passes
#![crate_type = "lib"]
trait Trait {
fn bar<'a,'b:'a>(x: &'a str, y: &'b str);
+ //~^ NOTE lifetimes in impl do not match this method in trait
}
struct Foo;
impl Trait for Foo {
fn bar<'a,'b>(x: &'a str, y: &'b str) { //~ ERROR E0195
- //~^ lifetimes do not match trait
+ //~^ NOTE lifetimes do not match method in trait
}
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(use_nested_groups)]
#![allow(unused_imports)]
mod foo {}
}
struct Abc;
+
impl Foo for Abc {
const X: EFoo = EFoo::B;
}
pub fn test<A: Foo, B: Foo>(arg: EFoo) {
match arg {
- A::X => println!("A::X"), //~ error: statics cannot be referenced in patterns [E0158]
- B::X => println!("B::X"), //~ error: statics cannot be referenced in patterns [E0158]
+ A::X => println!("A::X"),
+ //~^ error: associated consts cannot be referenced in patterns [E0158]
+ B::X => println!("B::X"),
+ //~^ error: associated consts cannot be referenced in patterns [E0158]
_ => (),
}
}
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::any::TypeId;
+
+struct A;
+
+fn main() {
+ const A_ID: TypeId = TypeId::of::<A>();
+ //~^ ERROR `std::any::TypeId::of` is not yet stable as a const fn
+}
trait NoLifetime {
fn get<'p, T : Test<'p>>(&self) -> T;
+ //~^ NOTE lifetimes in impl do not match this method in trait
}
trait Test<'p> {
impl<'a> NoLifetime for Foo<'a> {
fn get<'p, T : Test<'a>>(&self) -> T {
-//~^ ERROR E0195
-//~| lifetimes do not match trait
+ //~^ ERROR E0195
+ //~| NOTE lifetimes do not match method in trait
return *self as T;
}
}
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+static TAB: [&mut [u8]; 0] = [];
+
+pub unsafe fn test() {
+ TAB[0].iter_mut(); //~ ERROR cannot borrow data mutably in a `&` reference [E0389]
+}
+
+pub fn main() {}
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(const_fn)]
+#![feature(core_intrinsics)]
+
+use std::intrinsics;
+
+struct Foo {
+ bytes: [u8; unsafe { intrinsics::size_of::<Foo>() }],
+ //~^ ERROR unsupported cyclic reference between types/traits detected
+ x: usize,
+}
+
+fn main() {}
fn main() {
write(&buf);
- buf[0]=2; //[mir]~ ERROR E0594
+ buf[0]=2; //[ast]~ ERROR E0389
+ //[mir]~^ ERROR E0594
}
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test that the 'static bound from the Copy impl is respected. Regression test for #29149.
+
+#![feature(nll)]
+
+#[derive(Clone)] struct Foo<'a>(&'a u32);
+impl Copy for Foo<'static> {}
+
+fn main() {
+ let s = 2;
+ let a = Foo(&s); //~ ERROR `s` does not live long enough [E0597]
+ drop(a);
+ drop(a);
+}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-// check that we link regions in mutable lvalue ops correctly - issue #41774
+// check that we link regions in mutable place ops correctly - issue #41774
struct Data(i32);
+++ /dev/null
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// Check that explicit region bounds are allowed on the various
-// nominal types (but not on other types) and that they are type
-// checked.
-
-struct Inv<'a> { // invariant w/r/t 'a
- x: &'a mut &'a isize
-}
-
-pub trait Foo<'a, 't> {
- fn no_bound<'b>(self, b: Inv<'b>);
- fn has_bound<'b:'a>(self, b: Inv<'b>);
- fn wrong_bound1<'b,'c,'d:'a+'b>(self, b: Inv<'b>, c: Inv<'c>, d: Inv<'d>);
- fn okay_bound<'b,'c,'d:'a+'b+'c>(self, b: Inv<'b>, c: Inv<'c>, d: Inv<'d>);
- fn another_bound<'x: 'a>(self, x: Inv<'x>, y: Inv<'t>);
-}
-
-impl<'a, 't> Foo<'a, 't> for &'a isize {
- fn no_bound<'b:'a>(self, b: Inv<'b>) {
- //~^ ERROR lifetime parameters or bounds on method `no_bound` do not match
- }
-
- fn has_bound<'b>(self, b: Inv<'b>) {
- //~^ ERROR lifetime parameters or bounds on method `has_bound` do not match
- }
-
- fn wrong_bound1<'b,'c,'d:'a+'c>(self, b: Inv<'b>, c: Inv<'c>, d: Inv<'d>) {
- //~^ ERROR method not compatible with trait
- //
- // Note: This is a terrible error message. It is caused
- // because, in the trait, 'b is early bound, and in the impl,
- // 'c is early bound, so -- after substitution -- the
- // lifetimes themselves look isomorphic. We fail because the
- // lifetimes that appear in the types are in the wrong
- // order. This should really be fixed by keeping more
- // information about the lifetime declarations in the trait so
- // that we can compare better to the impl, even in cross-crate
- // cases.
- }
-
- fn okay_bound<'b,'c,'e:'b+'c>(self, b: Inv<'b>, c: Inv<'c>, e: Inv<'e>) {
- }
-
- fn another_bound<'x: 't>(self, x: Inv<'x>, y: Inv<'t>) {
- //~^ ERROR E0276
- }
-}
-
-fn main() { }
-// Change l-value in assignment ------------------------------------------------
+// Change place in assignment --------------------------------------------------
#[cfg(cfail1)]
-pub fn lvalue() -> i32 {
+pub fn place() -> i32 {
let mut x = 10;
let mut y = 11;
x = 9;
#[cfg(not(cfail1))]
#[rustc_clean(except="HirBody,MirOptimized,MirValidated", cfg="cfail2")]
#[rustc_clean(cfg="cfail3")]
-pub fn lvalue() -> i32 {
+pub fn place() -> i32 {
let mut x = 10;
let mut y = 11;
y = 9;
// Validate(Acquire, [_1: &ReFree(DefId(0/1:9 ~ validate_5[317d]::main[0]::{{closure}}[0]), BrEnv) [closure@NodeId(46)], _2: &ReFree(DefId(0/1:9 ~ validate_5[317d]::main[0]::{{closure}}[0]), BrAnon(0)) mut i32]);
// StorageLive(_3);
// StorageLive(_4);
+// StorageLive(_5);
// Validate(Suspend(ReScope(Node(ItemLocalId(9)))), [(*_2): i32]);
-// _4 = &ReErased mut (*_2);
-// Validate(Acquire, [(*_4): i32/ReScope(Node(ItemLocalId(9)))]);
-// _3 = move _4 as *mut i32 (Misc);
+// _5 = &ReErased mut (*_2);
+// Validate(Acquire, [(*_5): i32/ReScope(Node(ItemLocalId(9)))]);
+// _4 = move _5 as *mut i32 (Misc);
+// _3 = move _4;
// EndRegion(ReScope(Node(ItemLocalId(9))));
// StorageDead(_4);
+// StorageDead(_5);
// Validate(Release, [_0: bool, _3: *mut i32]);
// _0 = const write_42(move _3) -> bb1;
// }
fn main() {
// these literals are just silly.
''';
- //~^ ERROR: character constant must be escaped: \'
+ //~^ ERROR: character constant must be escaped: '
// note that this is a literal "\n" byte
'
fn main() {
let x = || -> i32 22;
- //~^ ERROR expected one of `!`, `(`, `::`, `<`, or `{`, found `22`
+ //~^ ERROR expected one of `!`, `(`, `+`, `::`, `<`, or `{`, found `22`
}
// compile-flags: -Z parse-only
fn foo() -> Vec<usize>> {
- //~^ ERROR expected one of `!`, `::`, `where`, or `{`, found `>`
+ //~^ ERROR expected one of `!`, `+`, `::`, `where`, or `{`, found `>`
Vec::new()
}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -Z parse-only
+
+\ //~ ERROR: unknown start of token: \
/// This is the entrypoint for a hot plugged rustc_trans
#[no_mangle]
-pub fn __rustc_codegen_backend(sess: &Session) -> Box<TransCrate> {
- Box::new(TheBackend(MetadataOnlyTransCrate::new(sess)))
+pub fn __rustc_codegen_backend() -> Box<TransCrate> {
+ Box::new(TheBackend(MetadataOnlyTransCrate::new()))
}
extern crate rustc_lint;
extern crate rustc_metadata;
extern crate rustc_errors;
-extern crate rustc_trans;
extern crate rustc_trans_utils;
extern crate syntax;
let descriptions = Registry::new(&rustc::DIAGNOSTICS);
let sess = build_session(opts, None, descriptions);
- let trans = rustc_trans::LlvmTransCrate::new(&sess);
+ let trans = rustc_driver::get_trans(&sess);
let cstore = Rc::new(CStore::new(trans.metadata_loader()));
rustc_lint::register_builtins(&mut sess.lint_store.borrow_mut(), Some(&sess));
(sess, cstore, trans)
-include ../tools.mk
+ifeq ($(UNAME),Darwin)
+PLUGIN_FLAGS := -C link-args=-Wl,-undefined,dynamic_lookup
+endif
+
ifeq ($(findstring stage1,$(RUST_BUILD_STAGE)),stage1)
# ignore stage1
all:
all:
else
all: $(call NATIVE_STATICLIB,llvm-function-pass) $(call NATIVE_STATICLIB,llvm-module-pass)
- $(RUSTC) plugin.rs -C prefer-dynamic
+ $(RUSTC) plugin.rs -C prefer-dynamic $(PLUGIN_FLAGS)
$(RUSTC) main.rs
$(TMPDIR)/libllvm-function-pass.o:
extern crate rustc;
extern crate rustc_plugin;
-extern crate rustc_trans;
#[link(name = "llvm-function-pass", kind = "static")]
#[link(name = "llvm-module-pass", kind = "static")]
static EXTERN_FOO: u8;
fn extern_foo(a: u8, b: i32) -> String;
}
+
+struct Rls699 {
+ f: u32,
+}
+
+fn new(f: u32) -> Rls699 {
+ Rls699 { fs }
+}
--- /dev/null
+Extra docs for this struct.
#![feature(box_syntax)]
#![feature(rustc_private)]
#![feature(associated_type_defaults)]
+#![feature(external_doc)]
extern crate graphviz;
// A simple rust project
trait Foo {
type Bar = FrameBuffer;
}
+
+#[doc(include="extra-docs.md")]
+struct StructWithDocs;
--- /dev/null
+-include ../tools.mk
+
+all:
+ cp non-utf8 $(TMPDIR)/non-utf.rs
+ cat $(TMPDIR)/non-utf.rs | $(RUSTC) - 2>&1 \
+ | $(CGREP) "error: couldn't read from stdin, as it did not contain valid UTF-8"
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-// ignore-cross-compile
-
-#![feature(rustc_private)]
-
-extern crate tempdir;
-
use std::env;
use std::fs;
-use tempdir::TempDir;
+use std::path::PathBuf;
fn main() {
- let td = TempDir::new("create-dir-all-bare").unwrap();
- env::set_current_dir(td.path()).unwrap();
+ let path = PathBuf::from(env::var_os("RUST_TEST_TMPDIR").unwrap());
+ env::set_current_dir(&path).unwrap();
fs::create_dir_all("create-dir-all-bare").unwrap();
}
// no-prefer-dynamic
// ignore-cross-compile
-#![feature(rustc_private)]
-
-extern crate tempdir;
-
use std::env;
use std::fs;
use std::process;
use std::str;
-use tempdir::TempDir;
+use std::path::PathBuf;
fn main() {
// If we're the child, make sure we were invoked correctly
let my_path = env::current_exe().unwrap();
let my_dir = my_path.parent().unwrap();
- let child_dir = TempDir::new_in(&my_dir, "issue-15140-child").unwrap();
- let child_dir = child_dir.path();
+ let child_dir = PathBuf::from(env::var_os("RUST_TEST_TMPDIR").unwrap());
+ let child_dir = child_dir.join("issue-15140-child");
+ fs::create_dir_all(&child_dir).unwrap();
let child_path = child_dir.join(&format!("mytest{}",
env::consts::EXE_SUFFIX));
format!("child assertion failed\n child stdout:\n {}\n child stderr:\n {}",
str::from_utf8(&child_output.stdout).unwrap(),
str::from_utf8(&child_output.stderr).unwrap()));
-
- let res = fs::remove_dir_all(&child_dir);
- if res.is_err() {
- // On Windows deleting just executed mytest.exe can fail because it's still locked
- std::thread::sleep_ms(1000);
- fs::remove_dir_all(&child_dir).unwrap();
- }
}
// ignore-cross-compile
-#![feature(rustc_private)]
-
-extern crate tempdir;
-
+use std::env;
use std::ffi::CString;
use std::fs::{self, File};
-use tempdir::TempDir;
+use std::path::PathBuf;
fn rename_directory() {
- let tmpdir = TempDir::new("rename_directory").ok().expect("rename_directory failed");
- let tmpdir = tmpdir.path();
+ let tmpdir = PathBuf::from(env::var_os("RUST_TEST_TMPDIR").unwrap());
let old_path = tmpdir.join("foo/bar/baz");
fs::create_dir_all(&old_path).unwrap();
let test_file = &old_path.join("temp.txt");
// ignore-cross-compile
-#![feature(rustc_private)]
-
-extern crate tempdir;
-
use std::env;
use std::fs::File;
use std::io;
use std::io::{Read, Write};
use std::process::{Command, Stdio};
-
-use tempdir::TempDir;
+use std::path::PathBuf;
fn main() {
if env::args().len() > 1 {
}
fn parent() -> io::Result<()> {
- let td = TempDir::new("foo").unwrap();
- let input = td.path().join("input");
- let output = td.path().join("output");
+ let td = PathBuf::from(env::var_os("RUST_TEST_TMPDIR").unwrap());
+ let input = td.join("stdio-from-input");
+ let output = td.join("stdio-from-output");
File::create(&input)?.write_all(b"foo\n")?;
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(rustc_private)]
-
-extern crate tempdir;
-
+use std::env;
use std::fs::File;
use std::io::{Read, Write};
-
-use tempdir::TempDir;
+use std::path::PathBuf;
#[cfg(unix)]
fn switch_stdout_to(file: File) {
}
fn main() {
- let td = TempDir::new("foo").unwrap();
- let path = td.path().join("bar");
+ let path = PathBuf::from(env::var_os("RUST_TEST_TMPDIR").unwrap());
+ let path = path.join("switch-stdout-output");
let f = File::create(&path).unwrap();
println!("foo");
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(core_intrinsics)]
+#![feature(const_type_id)]
+
+use std::any::TypeId;
+
+struct A;
+
+static ID_ISIZE: TypeId = TypeId::of::<isize>();
+
+pub fn main() {
+ assert_eq!(ID_ISIZE, TypeId::of::<isize>());
+
+ // sanity test of TypeId
+ const T: (TypeId, TypeId, TypeId) = (TypeId::of::<usize>(),
+ TypeId::of::<&'static str>(),
+ TypeId::of::<A>());
+ let (d, e, f) = (TypeId::of::<usize>(), TypeId::of::<&'static str>(),
+ TypeId::of::<A>());
+
+ assert!(T.0 != T.1);
+ assert!(T.0 != T.2);
+ assert!(T.1 != T.2);
+
+ assert_eq!(T.0, d);
+ assert_eq!(T.1, e);
+ assert_eq!(T.2, f);
+
+ // Check fn pointer against collisions
+ const F: (TypeId, TypeId) = (TypeId::of::<fn(fn(A) -> A) -> A>(),
+ TypeId::of::<fn(fn() -> A, A) -> A>());
+
+ assert!(F.0 != F.1);
+}
if cfg!(target_os = "android") {
assert!(home_dir().is_none());
} else {
- assert!(home_dir().is_some());
+ // When HOME is not set, some platforms return `None`,
+ // but others return `Some` with a default.
+ // Just check that it is not "/home/MountainView".
+ assert_ne!(home_dir(), Some(PathBuf::from("/home/MountainView")));
}
}
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(generators)]
+
+fn main() {
+ unsafe {
+ static move || {
+ // Tests that the generator transformation finds out that `a` is not live
+ // during the yield expression. Type checking will also compute liveness
+ // and it should also find out that `a` is not live.
+ // The compiler will panic if the generator transformation finds that
+ // `a` is live and type checking finds it dead.
+ let a = {
+ yield ();
+ 4i32
+ };
+ &a;
+ };
+ }
+}
// Test that we don't ICE when translating a generic impl method from
// an extern crate that contains a match expression on a local
-// variable lvalue where one of the match case bodies contains an
+// variable place where one of the match case bodies contains an
// expression that autoderefs through an overloaded generic deref
// impl.
// This used to generate invalid IR in that even if we took the
// `false` branch we'd still try to free the Box from the other
// arm. This was due to treating `*Box::new(9)` as an rvalue datum
-// instead of as an lvalue.
+// instead of as a place.
fn test(foo: bool) -> u8 {
match foo {
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Regression test for issue #47139:
+//
+// Coherence was encountering an (unnecessary) overflow trying to
+// decide if the two impls of dummy overlap.
+//
+// The overflow went something like:
+//
+// - `&'a ?T: Insertable` ?
+// - let ?T = Option<?U> ?
+// - `Option<?U>: Insertable` ?
+// - `Option<&'a ?U>: Insertable` ?
+// - `&'a ?U: Insertable` ?
+//
+// While somewhere in the middle, a projection would occur, which
+// broke cycle detection.
+//
+// It turned out that this cycle was being kicked off due to some
+// extended diagnostic attempts in coherence, so removing those
+// sidestepped the issue for now.
+
+#![allow(dead_code)]
+
+pub trait Insertable {
+ type Values;
+
+ fn values(self) -> Self::Values;
+}
+
+impl<T> Insertable for Option<T>
+ where
+ T: Insertable,
+ T::Values: Default,
+{
+ type Values = T::Values;
+
+ fn values(self) -> Self::Values {
+ self.map(Insertable::values).unwrap_or_default()
+ }
+}
+
+impl<'a, T> Insertable for &'a Option<T>
+ where
+ Option<&'a T>: Insertable,
+{
+ type Values = <Option<&'a T> as Insertable>::Values;
+
+ fn values(self) -> Self::Values {
+ self.as_ref().values()
+ }
+}
+
+impl<'a, T> Insertable for &'a [T]
+{
+ type Values = Self;
+
+ fn values(self) -> Self::Values {
+ self
+ }
+}
+
+trait Unimplemented { }
+
+trait Dummy { }
+
+struct Foo<T> { t: T }
+
+impl<'a, U> Dummy for Foo<&'a U>
+ where &'a U: Insertable
+{
+}
+
+impl<T> Dummy for T
+ where T: Unimplemented
+{ }
+
+fn main() {
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Regression test for issue #47139:
+//
+// Same as issue-47139-1.rs, but the impls of dummy are in the
+// opposite order. This influenced the way that coherence ran and in
+// some cases caused the overflow to occur when it wouldn't otherwise.
+// In an effort to make the regr test more robust, I am including both
+// orderings.
+
+#![allow(dead_code)]
+
+pub trait Insertable {
+ type Values;
+
+ fn values(self) -> Self::Values;
+}
+
+impl<T> Insertable for Option<T>
+ where
+ T: Insertable,
+ T::Values: Default,
+{
+ type Values = T::Values;
+
+ fn values(self) -> Self::Values {
+ self.map(Insertable::values).unwrap_or_default()
+ }
+}
+
+impl<'a, T> Insertable for &'a Option<T>
+ where
+ Option<&'a T>: Insertable,
+{
+ type Values = <Option<&'a T> as Insertable>::Values;
+
+ fn values(self) -> Self::Values {
+ self.as_ref().values()
+ }
+}
+
+impl<'a, T> Insertable for &'a [T]
+{
+ type Values = Self;
+
+ fn values(self) -> Self::Values {
+ self
+ }
+}
+
+trait Unimplemented { }
+
+trait Dummy { }
+
+struct Foo<T> { t: T }
+
+impl<T> Dummy for T
+ where T: Unimplemented
+{ }
+
+impl<'a, U> Dummy for Foo<&'a U>
+ where &'a U: Insertable
+{
+}
+
+fn main() {
+}
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn id<'c, 'b>(f: &'c &'b Fn(&i32)) -> &'c &'b Fn(&'static i32) {
+ f
+}
+
+fn main() {
+ let f: &Fn(&i32) = &|x| {};
+ id(&f);
+}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(use_nested_groups)]
#![allow(unused_import)]
use {{}, {}};
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+//
+// Tests that automatic coercions from &mut T to *mut T
+// allow borrows of T to expire immediately - essentially, that
+// they work identically to 'foo as *mut T'
+#![feature(nll)]
+
+struct SelfReference {
+ self_reference: *mut SelfReference,
+}
+
+impl SelfReference {
+ fn set_self_ref(&mut self) {
+ self.self_reference = self;
+ }
+}
+
+fn main() {}
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+
+#![feature(nll)]
+
+static mut x: &'static u32 = &0;
+
+fn foo() {
+ unsafe { x = &1; }
+}
+
+fn main() { }
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+enum Foo {
+ A,
+ B,
+ C,
+ D,
+ E,
+}
+use Foo::*;
+
+fn main() {
+ for foo in &[A, B, C, D, E] {
+ match *foo {
+ | A => println!("A"),
+ | B | C if 1 < 2 => println!("BC!"),
+ | _ => {},
+ }
+ }
+}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-// Test that an `&mut self` method, when invoked on an lvalue whose
-// type is `&mut [u8]`, passes in a pointer to the lvalue and not a
+// Test that an `&mut self` method, when invoked on a place whose
+// type is `&mut [u8]`, passes in a pointer to the place and not a
// temporary. Issue #19147.
use std::slice;
// all borrows are extended - nothing has been dropped yet
assert_eq!(get(), vec![]);
}
- // in a let-statement, extended lvalues are dropped
+ // in a let-statement, extended places are dropped
// *after* the let result (tho they have the same scope
// as far as scope-based borrowck goes).
assert_eq!(get(), vec![0, 2, 3, 1]);
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(never_type)]
+#![allow(dead_code)]
+#![allow(path_statements)]
+#![allow(unreachable_patterns)]
+
+fn never_direct(x: !) {
+ x;
+}
+
+fn never_ref_pat(ref x: !) {
+ *x;
+}
+
+fn never_ref(x: &!) {
+ let &y = x;
+ y;
+}
+
+fn never_pointer(x: *const !) {
+ unsafe {
+ *x;
+ }
+}
+
+fn never_slice(x: &[!]) {
+ x[0];
+}
+
+fn never_match(x: Result<(), !>) {
+ match x {
+ Ok(_) => {},
+ Err(_) => {},
+ }
+}
+
+pub fn main() { }
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(nll)]
+
+pub struct DescriptorSet<'a> {
+ pub slots: Vec<AttachInfo<'a, Resources>>
+}
+
+pub trait ResourcesTrait<'r>: Sized {
+ type DescriptorSet: 'r;
+}
+
+pub struct Resources;
+
+impl<'a> ResourcesTrait<'a> for Resources {
+ type DescriptorSet = DescriptorSet<'a>;
+}
+
+pub enum AttachInfo<'a, R: ResourcesTrait<'a>> {
+ NextDescriptorSet(Box<R::DescriptorSet>)
+}
+
+fn main() {
+ let _x = DescriptorSet {slots: Vec::new()};
+}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-// min-llvm-version 4.0
+// no-system-llvm -- needs MCSubtargetInfo::getFeatureTable()
// ignore-cloudabi no std::env
#![feature(cfg_target_feature)]
// ignore-emscripten no processes
// ignore-musl FIXME #31506
// ignore-pretty
-// no-system-llvm
+// min-system-llvm-version 5.0
// compile-flags: -C lto
// no-prefer-dynamic
// ignore-cloudabi no processes
// ignore-emscripten no processes
// ignore-musl FIXME #31506
-// no-system-llvm
+// min-system-llvm-version 5.0
use std::mem;
use std::process::Command;
assert_eq!(b, 1: u16);
let mut v = Vec::new();
- v: Vec<u8> = vec![1, 2, 3]; // Lvalue type ascription
+ v: Vec<u8> = vec![1, 2, 3]; // Place expression type ascription
assert_eq!(v, [1u8, 2, 3]);
}
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(const_fn)]
+
+type Field1 = i32;
+type Field2 = f32;
+type Field3 = i64;
+
+union DummyUnion {
+ field1: Field1,
+ field2: Field2,
+ field3: Field3,
+}
+
+const FLOAT1_AS_I32: i32 = 1065353216;
+const UNION: DummyUnion = DummyUnion { field1: FLOAT1_AS_I32 };
+
+const fn read_field1() -> Field1 {
+ const FIELD1: Field1 = unsafe { UNION.field1 };
+ FIELD1
+}
+
+const fn read_field2() -> Field2 {
+ const FIELD2: Field2 = unsafe { UNION.field2 };
+ FIELD2
+}
+
+const fn read_field3() -> Field3 {
+ const FIELD3: Field3 = unsafe { UNION.field3 };
+ FIELD3
+}
+
+fn main() {
+ assert_eq!(read_field1(), FLOAT1_AS_I32);
+ assert_eq!(read_field2(), 1.0);
+ assert_eq!(read_field3(), unsafe { UNION.field3 });
+}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(use_nested_groups)]
-
mod a {
pub enum B {}
}
}
+// Test every possible part of the syntax
use a::{B, d::{self, *, g::H}};
+// Test a more common use case
+use std::sync::{Arc, atomic::{AtomicBool, Ordering}};
+
fn main() {
let _: B;
let _: E;
let _: F;
let _: H;
let _: d::g::I;
+
+ let _: Arc<AtomicBool>;
+ let _: Ordering;
}
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Just check if we don't get an ICE for the _S type.
+
+#![feature(const_size_of)]
+
+use std::cell::Cell;
+use std::mem;
+
+pub struct S {
+ s: Cell<usize>
+}
+
+pub type _S = [usize; 0 - (mem::size_of::<S>() != 4) as usize];
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// This should not ICE
+pub fn test() {
+ macro_rules! foo {
+ () => ()
+ }
+}
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -Z unstable-options --disable-commonmark
+
+#![crate_name = "foo"]
+
+//! hello [foo]
+//!
+//! [foo]: url 'title & <stuff> & "things"'
+
+// @has 'foo/index.html' 'title & <stuff> & "things"'
note: the lifetime 'a as defined on the impl at 17:1...
--> $DIR/associated-const-impl-wrong-lifetime.rs:17:1
|
-17 | / impl<'a> Foo for &'a () {
-18 | | const NAME: &'a str = "unit";
-19 | | //~^ ERROR mismatched types [E0308]
-20 | | }
- | |_^
+17 | impl<'a> Foo for &'a () {
+ | ^^^^^^^^^^^^^^^^^^^^^^^
= note: ...does not necessarily outlive the static lifetime
error: aborting due to previous error
= note: `foo` must be defined only once in the type namespace of this module
help: You can use `as` to change the binding name of the import
|
-13 | use foo::foo as Otherfoo;
- | ^^^^^^^^^^^^^^^^^^^^
+13 | use foo::foo as other_foo;
+ | ^^^^^^^^^^^^^^^^^^^^^
error: aborting due to previous error
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Check that explicit region bounds are allowed on the various
+// nominal types (but not on other types) and that they are type
+// checked.
+
+struct Inv<'a> { // invariant w/r/t 'a
+ x: &'a mut &'a isize
+}
+
+pub trait Foo<'a, 't> {
+ fn no_bound<'b>(self, b: Inv<'b>);
+ fn has_bound<'b:'a>(self, b: Inv<'b>);
+ fn wrong_bound1<'b,'c,'d:'a+'b>(self, b: Inv<'b>, c: Inv<'c>, d: Inv<'d>);
+ fn okay_bound<'b,'c,'d:'a+'b+'c>(self, b: Inv<'b>, c: Inv<'c>, d: Inv<'d>);
+ fn another_bound<'x: 'a>(self, x: Inv<'x>, y: Inv<'t>);
+}
+
+impl<'a, 't> Foo<'a, 't> for &'a isize {
+ fn no_bound<'b:'a>(self, b: Inv<'b>) {
+ //~^ ERROR lifetime parameters or bounds on method `no_bound` do not match
+ }
+
+ fn has_bound<'b>(self, b: Inv<'b>) {
+ //~^ ERROR lifetime parameters or bounds on method `has_bound` do not match
+ }
+
+ fn wrong_bound1<'b,'c,'d:'a+'c>(self, b: Inv<'b>, c: Inv<'c>, d: Inv<'d>) {
+ //~^ ERROR method not compatible with trait
+ //
+ // Note: This is a terrible error message. It is caused
+ // because, in the trait, 'b is early bound, and in the impl,
+ // 'c is early bound, so -- after substitution -- the
+ // lifetimes themselves look isomorphic. We fail because the
+ // lifetimes that appear in the types are in the wrong
+ // order. This should really be fixed by keeping more
+ // information about the lifetime declarations in the trait so
+ // that we can compare better to the impl, even in cross-crate
+ // cases.
+ }
+
+ fn okay_bound<'b,'c,'e:'b+'c>(self, b: Inv<'b>, c: Inv<'c>, e: Inv<'e>) {
+ }
+
+ fn another_bound<'x: 't>(self, x: Inv<'x>, y: Inv<'t>) {
+ //~^ ERROR E0276
+ }
+}
+
+fn main() { }
--- /dev/null
+error[E0195]: lifetime parameters or bounds on method `no_bound` do not match the trait declaration
+ --> $DIR/regions-bound-missing-bound-in-impl.rs:28:5
+ |
+20 | fn no_bound<'b>(self, b: Inv<'b>);
+ | ---------------------------------- lifetimes in impl do not match this method in trait
+...
+28 | fn no_bound<'b:'a>(self, b: Inv<'b>) {
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ lifetimes do not match method in trait
+
+error[E0195]: lifetime parameters or bounds on method `has_bound` do not match the trait declaration
+ --> $DIR/regions-bound-missing-bound-in-impl.rs:32:5
+ |
+21 | fn has_bound<'b:'a>(self, b: Inv<'b>);
+ | -------------------------------------- lifetimes in impl do not match this method in trait
+...
+32 | fn has_bound<'b>(self, b: Inv<'b>) {
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ lifetimes do not match method in trait
+
+error[E0308]: method not compatible with trait
+ --> $DIR/regions-bound-missing-bound-in-impl.rs:36:5
+ |
+36 | fn wrong_bound1<'b,'c,'d:'a+'c>(self, b: Inv<'b>, c: Inv<'c>, d: Inv<'d>) {
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ lifetime mismatch
+ |
+ = note: expected type `fn(&'a isize, Inv<'c>, Inv<'c>, Inv<'d>)`
+ found type `fn(&'a isize, Inv<'_>, Inv<'c>, Inv<'d>)`
+note: the lifetime 'c as defined on the method body at 36:5...
+ --> $DIR/regions-bound-missing-bound-in-impl.rs:36:5
+ |
+36 | fn wrong_bound1<'b,'c,'d:'a+'c>(self, b: Inv<'b>, c: Inv<'c>, d: Inv<'d>) {
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+note: ...does not necessarily outlive the lifetime 'c as defined on the method body at 36:5
+ --> $DIR/regions-bound-missing-bound-in-impl.rs:36:5
+ |
+36 | fn wrong_bound1<'b,'c,'d:'a+'c>(self, b: Inv<'b>, c: Inv<'c>, d: Inv<'d>) {
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+error[E0276]: impl has stricter requirements than trait
+ --> $DIR/regions-bound-missing-bound-in-impl.rs:53:5
+ |
+24 | fn another_bound<'x: 'a>(self, x: Inv<'x>, y: Inv<'t>);
+ | ------------------------------------------------------- definition of `another_bound` from trait
+...
+53 | fn another_bound<'x: 't>(self, x: Inv<'x>, y: Inv<'t>) {
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ impl has extra requirement `'x: 't`
+
+error: aborting due to 4 previous errors
+
note: ...does not necessarily outlive the lifetime 'x as defined on the function body at 42:1
--> $DIR/expect-region-supply-region.rs:42:1
|
-42 | / fn expect_bound_supply_named<'x>() {
-43 | | let mut f: Option<&u32> = None;
-44 | |
-45 | | // Here we give a type annotation that `x` should be free. We get
-... |
-54 | | });
-55 | | }
- | |_^
+42 | fn expect_bound_supply_named<'x>() {
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
error[E0308]: mismatched types
--> $DIR/expect-region-supply-region.rs:47:33
note: the lifetime 'x as defined on the function body at 42:1...
--> $DIR/expect-region-supply-region.rs:42:1
|
-42 | / fn expect_bound_supply_named<'x>() {
-43 | | let mut f: Option<&u32> = None;
-44 | |
-45 | | // Here we give a type annotation that `x` should be free. We get
-... |
-54 | | });
-55 | | }
- | |_^
+42 | fn expect_bound_supply_named<'x>() {
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
note: ...does not necessarily outlive the anonymous lifetime #2 defined on the body at 47:29
--> $DIR/expect-region-supply-region.rs:47:29
|
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#[macro_use]
+mod underscore;
+
+fn main() {
+ underscore!();
+}
--- /dev/null
+error: expected expression, found `_`
+ --> $DIR/underscore.rs:18:9
+ |
+18 | _
+ | ^
+ |
+ ::: $DIR/main.rs:15:5
+ |
+15 | underscore!();
+ | -------------- in this macro invocation
+
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// We want this file only so we can test cross-file error
+// messages, but we don't want it in an external crate.
+// ignore-test
+#![crate_type = "lib"]
+
+macro_rules! underscore {
+ () => (
+ _
+ )
+}
= note: `foo` must be defined only once in the value namespace of this module
help: You can use `as` to change the binding name of the import
|
-23 | use sub2::foo as Otherfoo; //~ ERROR the name `foo` is defined multiple times
- | ^^^^^^^^^^^^^^^^^^^^^
+23 | use sub2::foo as other_foo; //~ ERROR the name `foo` is defined multiple times
+ | ^^^^^^^^^^^^^^^^^^^^^^
error: aborting due to previous error
+++ /dev/null
-// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#[allow(dead_code)]
-enum Foo {
- A,
- B,
- C,
- D,
- E,
-}
-use Foo::*;
-
-fn main() {
- let x = Foo::A;
- match x {
- | A => println!("A"),
- //~^ ERROR: Use of a '|' at the beginning of a match arm is experimental (see issue #44101)
- | B | C => println!("BC!"),
- //~^ ERROR: Use of a '|' at the beginning of a match arm is experimental (see issue #44101)
- | _ => {},
- //~^ ERROR: Use of a '|' at the beginning of a match arm is experimental (see issue #44101)
- };
- match x {
- A | B | C => println!("ABC!"),
- _ => {},
- };
-}
-
+++ /dev/null
-error[E0658]: Use of a '|' at the beginning of a match arm is experimental (see issue #44101)
- --> $DIR/feature-gate-match_beginning_vert.rs:24:9
- |
-24 | | A => println!("A"),
- | ^
- |
- = help: add #![feature(match_beginning_vert)] to the crate attributes to enable
-
-error[E0658]: Use of a '|' at the beginning of a match arm is experimental (see issue #44101)
- --> $DIR/feature-gate-match_beginning_vert.rs:26:9
- |
-26 | | B | C => println!("BC!"),
- | ^
- |
- = help: add #![feature(match_beginning_vert)] to the crate attributes to enable
-
-error[E0658]: Use of a '|' at the beginning of a match arm is experimental (see issue #44101)
- --> $DIR/feature-gate-match_beginning_vert.rs:28:9
- |
-28 | | _ => {},
- | ^
- |
- = help: add #![feature(match_beginning_vert)] to the crate attributes to enable
-
-error: aborting due to 3 previous errors
-
-error[E0658]: `#[thread_local]` is an experimental feature, and does not currently handle destructors. There is no corresponding `#[task_local]` mapping to the task model (see issue #29594)
+error[E0658]: `#[thread_local]` is an experimental feature, and does not currently handle destructors. (see issue #29594)
--> $DIR/feature-gate-thread_local.rs:18:1
|
18 | #[thread_local] //~ ERROR `#[thread_local]` is an experimental feature
+++ /dev/null
-// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![allow(unused_imports, dead_code)]
-
-mod a {
- pub enum B {}
- pub enum C {}
-
- pub mod d {
- pub enum E {}
- pub enum F {}
-
- pub mod g {
- pub enum H {}
- }
- }
-}
-
-use a::{B, d::{*, g::H}}; //~ ERROR glob imports in `use` groups are experimental
- //~^ ERROR nested groups in `use` are experimental
- //~^^ ERROR paths in `use` groups are experimental
-
-fn main() {}
+++ /dev/null
-error[E0658]: nested groups in `use` are experimental (see issue #44494)
- --> $DIR/feature-gate-use_nested_groups.rs:27:12
- |
-27 | use a::{B, d::{*, g::H}}; //~ ERROR glob imports in `use` groups are experimental
- | ^^^^^^^^^^^^
- |
- = help: add #![feature(use_nested_groups)] to the crate attributes to enable
-
-error[E0658]: glob imports in `use` groups are experimental (see issue #44494)
- --> $DIR/feature-gate-use_nested_groups.rs:27:16
- |
-27 | use a::{B, d::{*, g::H}}; //~ ERROR glob imports in `use` groups are experimental
- | ^
- |
- = help: add #![feature(use_nested_groups)] to the crate attributes to enable
-
-error[E0658]: paths in `use` groups are experimental (see issue #44494)
- --> $DIR/feature-gate-use_nested_groups.rs:27:19
- |
-27 | use a::{B, d::{*, g::H}}; //~ ERROR glob imports in `use` groups are experimental
- | ^^^^
- |
- = help: add #![feature(use_nested_groups)] to the crate attributes to enable
-
-error: aborting due to 3 previous errors
-
-error[E0626]: borrow may still be in use when generator yields (Mir)
- --> $DIR/generator-with-nll.rs:20:17
- |
-20 | let b = &mut true; //~ ERROR borrow may still be in use when generator yields (Ast)
- | ^^^^^^^^^
-21 | //~^ borrow may still be in use when generator yields (Mir)
-22 | yield ();
- | -------- possible yield occurs here
-
error[E0626]: borrow may still be in use when generator yields (Ast)
--> $DIR/generator-with-nll.rs:19:23
|
22 | yield ();
| -------- possible yield occurs here
+error[E0626]: borrow may still be in use when generator yields (Mir)
+ --> $DIR/generator-with-nll.rs:20:17
+ |
+20 | let b = &mut true; //~ ERROR borrow may still be in use when generator yields (Ast)
+ | ^^^^^^^^^
+21 | //~^ borrow may still be in use when generator yields (Mir)
+22 | yield ();
+ | -------- possible yield occurs here
+
error: aborting due to 3 previous errors
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(generators)]
+
+enum Test { A(i32), B, }
+
+fn main() { }
+
+fn fun(test: Test) {
+ move || {
+ if let Test::A(ref _a) = test { //~ ERROR borrow may still be in use when generator yields
+ yield ();
+ }
+ };
+}
--- /dev/null
+error[E0626]: borrow may still be in use when generator yields
+ --> $DIR/pattern-borrow.rs:19:24
+ |
+19 | if let Test::A(ref _a) = test { //~ ERROR borrow may still be in use when generator yields
+ | ^^^^^^
+20 | yield ();
+ | -------- possible yield occurs here
+
+error: aborting due to previous error
+
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(generators, generator_trait)]
+
+use std::ops::Generator;
+
+fn main() {
+ let s = String::from("foo");
+ let mut gen = move || { //~ ERROR the trait bound `str: std::marker::Sized` is not satisfied
+ yield s[..];
+ };
+ gen.resume(); //~ ERROR the trait bound `str: std::marker::Sized` is not satisfied
+}
--- /dev/null
+error[E0277]: the trait bound `str: std::marker::Sized` is not satisfied
+ --> $DIR/sized-yield.rs:17:26
+ |
+17 | let mut gen = move || { //~ ERROR the trait bound `str: std::marker::Sized` is not satisfied
+ | __________________________^
+18 | | yield s[..];
+19 | | };
+ | |____^ `str` does not have a constant size known at compile-time
+ |
+ = help: the trait `std::marker::Sized` is not implemented for `str`
+ = note: the yield type of a generator must have a statically known size
+
+error[E0277]: the trait bound `str: std::marker::Sized` is not satisfied
+ --> $DIR/sized-yield.rs:20:8
+ |
+20 | gen.resume(); //~ ERROR the trait bound `str: std::marker::Sized` is not satisfied
+ | ^^^^^^ `str` does not have a constant size known at compile-time
+ |
+ = help: the trait `std::marker::Sized` is not implemented for `str`
+
+error: aborting due to 2 previous errors
+
-error[E0626]: borrow may still be in use when generator yields (Mir)
- --> $DIR/yield-while-local-borrowed.rs:24:17
- |
-24 | let a = &mut 3;
- | ^^^^^^
-...
-27 | yield();
- | ------- possible yield occurs here
-
error[E0626]: borrow may still be in use when generator yields (Ast)
--> $DIR/yield-while-local-borrowed.rs:24:22
|
55 | yield();
| ------- possible yield occurs here
+error[E0626]: borrow may still be in use when generator yields (Mir)
+ --> $DIR/yield-while-local-borrowed.rs:24:17
+ |
+24 | let a = &mut 3;
+ | ^^^^^^
+...
+27 | yield();
+ | ------- possible yield occurs here
+
error[E0626]: borrow may still be in use when generator yields (Mir)
--> $DIR/yield-while-local-borrowed.rs:52:21
|
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -Z parse-only -Z continue-parse-after-error
+
+fn f() -> impl A + {} // OK
+fn f() -> impl A + B {} // OK
+fn f() -> dyn A + B {} // OK
+fn f() -> A + B {} // OK
+
+impl S {
+ fn f(self) -> impl A + { // OK
+ let _ = |a, b| -> impl A + {}; // OK
+ }
+ fn f(self) -> impl A + B { // OK
+ let _ = |a, b| -> impl A + B {}; // OK
+ }
+ fn f(self) -> dyn A + B { // OK
+ let _ = |a, b| -> dyn A + B {}; // OK
+ }
+ fn f(self) -> A + B { // OK
+ let _ = |a, b| -> A + B {}; // OK
+ }
+}
+
+type A = fn() -> impl A +;
+//~^ ERROR ambiguous `+` in a type
+type A = fn() -> impl A + B;
+//~^ ERROR ambiguous `+` in a type
+type A = fn() -> dyn A + B;
+//~^ ERROR ambiguous `+` in a type
+type A = fn() -> A + B;
+//~^ ERROR expected a path on the left-hand side of `+`, not `fn() -> A`
+
+type A = Fn() -> impl A +;
+//~^ ERROR ambiguous `+` in a type
+type A = Fn() -> impl A + B;
+//~^ ERROR ambiguous `+` in a type
+type A = Fn() -> dyn A + B;
+//~^ ERROR ambiguous `+` in a type
+type A = Fn() -> A + B; // OK, interpreted as `(Fn() -> A) + B` for compatibility
+
+type A = &impl A +;
+//~^ ERROR ambiguous `+` in a type
+type A = &impl A + B;
+//~^ ERROR ambiguous `+` in a type
+type A = &dyn A + B;
+//~^ ERROR ambiguous `+` in a type
+type A = &A + B;
+//~^ ERROR expected a path on the left-hand side of `+`, not `&A`
+
+fn main() {}
--- /dev/null
+error: ambiguous `+` in a type
+ --> $DIR/impl-trait-plus-priority.rs:33:18
+ |
+33 | type A = fn() -> impl A +;
+ | ^^^^^^^^ help: use parentheses to disambiguate: `(impl A)`
+
+error: ambiguous `+` in a type
+ --> $DIR/impl-trait-plus-priority.rs:35:18
+ |
+35 | type A = fn() -> impl A + B;
+ | ^^^^^^^^^^ help: use parentheses to disambiguate: `(impl A + B)`
+
+error: ambiguous `+` in a type
+ --> $DIR/impl-trait-plus-priority.rs:37:18
+ |
+37 | type A = fn() -> dyn A + B;
+ | ^^^^^^^^^ help: use parentheses to disambiguate: `(dyn A + B)`
+
+error[E0178]: expected a path on the left-hand side of `+`, not `fn() -> A`
+ --> $DIR/impl-trait-plus-priority.rs:39:10
+ |
+39 | type A = fn() -> A + B;
+ | ^^^^^^^^^^^^^ perhaps you forgot parentheses?
+
+error: ambiguous `+` in a type
+ --> $DIR/impl-trait-plus-priority.rs:42:18
+ |
+42 | type A = Fn() -> impl A +;
+ | ^^^^^^^^ help: use parentheses to disambiguate: `(impl A)`
+
+error: ambiguous `+` in a type
+ --> $DIR/impl-trait-plus-priority.rs:44:18
+ |
+44 | type A = Fn() -> impl A + B;
+ | ^^^^^^^^^^ help: use parentheses to disambiguate: `(impl A + B)`
+
+error: ambiguous `+` in a type
+ --> $DIR/impl-trait-plus-priority.rs:46:18
+ |
+46 | type A = Fn() -> dyn A + B;
+ | ^^^^^^^^^ help: use parentheses to disambiguate: `(dyn A + B)`
+
+error: ambiguous `+` in a type
+ --> $DIR/impl-trait-plus-priority.rs:50:11
+ |
+50 | type A = &impl A +;
+ | ^^^^^^^^ help: use parentheses to disambiguate: `(impl A)`
+
+error: ambiguous `+` in a type
+ --> $DIR/impl-trait-plus-priority.rs:52:11
+ |
+52 | type A = &impl A + B;
+ | ^^^^^^^^^^ help: use parentheses to disambiguate: `(impl A + B)`
+
+error: ambiguous `+` in a type
+ --> $DIR/impl-trait-plus-priority.rs:54:11
+ |
+54 | type A = &dyn A + B;
+ | ^^^^^^^^^ help: use parentheses to disambiguate: `(dyn A + B)`
+
+error[E0178]: expected a path on the left-hand side of `+`, not `&A`
+ --> $DIR/impl-trait-plus-priority.rs:56:10
+ |
+56 | type A = &A + B;
+ | ^^^^^^ help: try adding parentheses: `&(A + B)`
+
+error: aborting due to 11 previous errors
+
--> $DIR/trait_type.rs:17:4
|
17 | fn fmt(&self, x: &str) -> () { }
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ types differ in mutability
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ types differ in mutability
|
= note: expected type `fn(&MyType, &mut std::fmt::Formatter<'_>) -> std::result::Result<(), std::fmt::Error>`
found type `fn(&MyType, &str)`
--> $DIR/trait_type.rs:27:4
|
27 | fn fmt() -> () { }
- | ^^^^^^^^^^^^^^^^^^ expected `&self` in impl
+ | ^^^^^^^^^^^^^^ expected `&self` in impl
|
= note: `fmt` from trait: `fn(&Self, &mut std::fmt::Formatter<'_>) -> std::result::Result<(), std::fmt::Error>`
= note: `foo` must be defined only once in the value namespace of this module
help: You can use `as` to change the binding name of the import
|
-25 | use a::foo as Otherfoo; //~ ERROR the name `foo` is defined multiple times
- | ^^^^^^^^^^^^^^^^^^
+25 | use a::foo as other_foo; //~ ERROR the name `foo` is defined multiple times
+ | ^^^^^^^^^^^^^^^^^^^
error[E0659]: `foo` is ambiguous
--> $DIR/duplicate.rs:56:9
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+#![allow(warnings)]
+#![allow(unused_variables, dead_code, unused, bad_style)]
+#![deny(elided_lifetime_in_path)]
+
+struct Foo<'a> { x: &'a u32 }
+fn foo(x: &Foo) {
+ //~^ ERROR: hidden lifetime parameters are deprecated, try `Foo<'_>`
+}
+
+fn main() {}
--- /dev/null
+error: hidden lifetime parameters are deprecated, try `Foo<'_>`
+ --> $DIR/ellided-lifetimes.rs:15:12
+ |
+15 | fn foo(x: &Foo) {
+ | ^^^
+ |
+note: lint level defined here
+ --> $DIR/ellided-lifetimes.rs:12:9
+ |
+12 | #![deny(elided_lifetime_in_path)]
+ | ^^^^^^^^^^^^^^^^^^^^^^^
+
+error: aborting due to previous error
+
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::ops::Deref;
+trait Trait {}
+
+struct Struct;
+
+impl Deref for Struct {
+ type Target = Trait;
+ fn deref(&self) -> &Trait {
+ unimplemented!();
+ }
+}
+//~^^^^ ERROR cannot infer an appropriate lifetime for lifetime parameter
--- /dev/null
+error[E0601]: main function not found
+
+error[E0495]: cannot infer an appropriate lifetime for lifetime parameter in generic type due to conflicting requirements
+ --> $DIR/mismatched_trait_impl-2.rs:18:5
+ |
+18 | fn deref(&self) -> &Trait {
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^
+ |
+note: first, the lifetime cannot outlive the anonymous lifetime #1 defined on the method body at 18:5...
+ --> $DIR/mismatched_trait_impl-2.rs:18:5
+ |
+18 | / fn deref(&self) -> &Trait {
+19 | | unimplemented!();
+20 | | }
+ | |_____^
+ = note: ...but the lifetime must also be valid for the static lifetime...
+ = note: ...so that the method type is compatible with trait:
+ expected fn(&Struct) -> &Trait + 'static
+ found fn(&Struct) -> &Trait
+
+error: aborting due to 2 previous errors
+
error[E0495]: cannot infer an appropriate lifetime for lifetime parameter 'a in generic type due to conflicting requirements
--> $DIR/mismatched_trait_impl.rs:19:5
|
-19 | / fn foo(&self, x: &u32, y: &'a u32) -> &'a u32 { //~ ERROR cannot infer
-20 | | x
-21 | | }
- | |_____^
+19 | fn foo(&self, x: &u32, y: &'a u32) -> &'a u32 { //~ ERROR cannot infer
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
note: first, the lifetime cannot outlive the anonymous lifetime #2 defined on the method body at 19:5...
--> $DIR/mismatched_trait_impl.rs:19:5
20 | | x
21 | | }
| |_____^
-note: ...so that method type is compatible with trait (expected fn(&i32, &'a u32, &u32) -> &'a u32, found fn(&i32, &u32, &u32) -> &u32)
- --> $DIR/mismatched_trait_impl.rs:19:5
- |
-19 | / fn foo(&self, x: &u32, y: &'a u32) -> &'a u32 { //~ ERROR cannot infer
-20 | | x
-21 | | }
- | |_____^
-note: but, the lifetime must be valid for the lifetime 'a as defined on the method body at 19:5...
+note: ...but the lifetime must also be valid for the lifetime 'a as defined on the method body at 19:5...
--> $DIR/mismatched_trait_impl.rs:19:5
|
-19 | / fn foo(&self, x: &u32, y: &'a u32) -> &'a u32 { //~ ERROR cannot infer
-20 | | x
-21 | | }
- | |_____^
-note: ...so that method type is compatible with trait (expected fn(&i32, &'a u32, &u32) -> &'a u32, found fn(&i32, &u32, &u32) -> &u32)
- --> $DIR/mismatched_trait_impl.rs:19:5
- |
-19 | / fn foo(&self, x: &u32, y: &'a u32) -> &'a u32 { //~ ERROR cannot infer
-20 | | x
-21 | | }
- | |_____^
+19 | fn foo(&self, x: &u32, y: &'a u32) -> &'a u32 { //~ ERROR cannot infer
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ = note: ...so that the method type is compatible with trait:
+ expected fn(&i32, &'a u32, &u32) -> &'a u32
+ found fn(&i32, &u32, &u32) -> &u32
error: aborting due to previous error
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-macro_rules! not_an_lvalue {
+macro_rules! not_a_place {
($thing:expr) => {
$thing = 42;
//~^ ERROR invalid left-hand side expression
}
fn main() {
- not_an_lvalue!(99);
+ not_a_place!(99);
}
13 | $thing = 42;
| ^^^^^^^^^^^ left-hand of expression not valid
...
-19 | not_an_lvalue!(99);
- | ------------------- in this macro invocation
+19 | not_a_place!(99);
+ | ----------------- in this macro invocation
error: aborting due to previous error
= note: `sync` must be defined only once in the type namespace of this module
help: You can use `as` to change the binding name of the import
|
-14 | use std::sync as Othersync; //~ ERROR the name `sync` is defined multiple times
- | ^^^^^^^^^^^^^^^^^^^^^^
+14 | use std::sync as other_sync; //~ ERROR the name `sync` is defined multiple times
+ | ^^^^^^^^^^^^^^^^^^^^^^^
error: aborting due to 2 previous errors
note: ...does not necessarily outlive the lifetime 'a as defined on the trait at 13:1
--> $DIR/issue-27942.rs:13:1
|
-13 | / pub trait Buffer<'a, R: Resources<'a>> {
-14 | |
-15 | | fn select(&self) -> BufferViewHandle<R>;
-16 | | //~^ ERROR mismatched types
-... |
-19 | | //~| lifetime mismatch
-20 | | }
- | |_^
+13 | pub trait Buffer<'a, R: Resources<'a>> {
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
error[E0308]: mismatched types
--> $DIR/issue-27942.rs:15:5
note: the lifetime 'a as defined on the trait at 13:1...
--> $DIR/issue-27942.rs:13:1
|
-13 | / pub trait Buffer<'a, R: Resources<'a>> {
-14 | |
-15 | | fn select(&self) -> BufferViewHandle<R>;
-16 | | //~^ ERROR mismatched types
-... |
-19 | | //~| lifetime mismatch
-20 | | }
- | |_^
+13 | pub trait Buffer<'a, R: Resources<'a>> {
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
note: ...does not necessarily outlive the anonymous lifetime #1 defined on the method body at 15:5
--> $DIR/issue-27942.rs:15:5
|
note: ...does not necessarily outlive the lifetime 'a as defined on the impl at 13:1
--> $DIR/issue-37884.rs:13:1
|
-13 | / impl<'a, T: 'a> Iterator for RepeatMut<'a, T> {
-14 | |
-15 | | type Item = &'a mut T;
-16 | | fn next(&'a mut self) -> Option<Self::Item>
-... |
-21 | | }
-22 | | }
- | |_^
+13 | impl<'a, T: 'a> Iterator for RepeatMut<'a, T> {
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
error: aborting due to previous error
16 | &mut z
| ^^^^^^ borrowed value does not live long enough
17 | };
- | - `z` dropped here while still borrowed
+ | - `z` dropped here while still borrowed
...
21 | }
| - borrowed value needs to live until here
| ^^ borrowed value does not live long enough
...
18 | }
- | - borrowed value only lives until here
+ | - borrowed value only lives until here
|
= note: borrowed value must be valid for the static lifetime...
note: borrowed value must be valid for the lifetime 'a as defined on the function body at 13:1...
--> $DIR/issue-46472.rs:13:1
|
-13 | / fn bar<'a>() -> &'a mut u32 {
-14 | | &mut 4
-15 | | //~^ ERROR borrowed value does not live long enough (Ast) [E0597]
-16 | | //~| ERROR borrowed value does not live long enough (Mir) [E0597]
-17 | | }
- | |_^
+13 | fn bar<'a>() -> &'a mut u32 {
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^
error[E0597]: borrowed value does not live long enough (Mir)
--> $DIR/issue-46472.rs:14:10
| ^ temporary value does not live long enough
...
17 | }
- | - temporary value only lives until here
+ | - temporary value only lives until here
|
note: borrowed value must be valid for the lifetime 'a as defined on the function body at 13:1...
--> $DIR/issue-46472.rs:13:1
|
-13 | / fn bar<'a>() -> &'a mut u32 {
-14 | | &mut 4
-15 | | //~^ ERROR borrowed value does not live long enough (Ast) [E0597]
-16 | | //~| ERROR borrowed value does not live long enough (Mir) [E0597]
-17 | | }
- | |_^
+13 | fn bar<'a>() -> &'a mut u32 {
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^
error: aborting due to 2 previous errors
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+trait T {
+ fn f(&self, _: ()) {
+ None::<()>.map(Self::f);
+ }
+ //~^^ ERROR function is expected to take a single 0-tuple as argument
+}
--- /dev/null
+error[E0601]: main function not found
+
+error[E0593]: function is expected to take a single 0-tuple as argument, but it takes 2 distinct arguments
+ --> $DIR/issue-47706-trait.rs:13:20
+ |
+12 | fn f(&self, _: ()) {
+ | ------------------ takes 2 distinct arguments
+13 | None::<()>.map(Self::f);
+ | ^^^ expected function that takes a single 0-tuple as argument
+
+error: aborting due to 2 previous errors
+
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// must-compile-successfully
+
+#![warn(unused_parens)]
+
+macro_rules! the_worship_the_heart_lifts_above {
+ ( @as_expr, $e:expr) => { $e };
+ ( @generate_fn, $name:tt) => {
+ #[allow(dead_code)] fn the_moth_for_the_star<'a>() -> Option<&'a str> {
+ Some(the_worship_the_heart_lifts_above!( @as_expr, $name ))
+ }
+ };
+ ( $name:ident ) => { the_worship_the_heart_lifts_above!( @generate_fn, (stringify!($name))); }
+ // ↑ Notably, this does 𝘯𝘰𝘵 warn: we're declining to lint unused parens in
+ // function/method arguments inside of nested macros because of situations
+ // like those reported in Issue #47775
+}
+
+macro_rules! and_the_heavens_reject_not {
+ () => {
+ // ↓ But let's test that we still lint for unused parens around
+ // function args inside of simple, one-deep macros.
+ #[allow(dead_code)] fn the_night_for_the_morrow() -> Option<isize> { Some((2)) }
+ //~^ WARN unnecessary parentheses around function argument
+ }
+}
+
+the_worship_the_heart_lifts_above!(rah);
+and_the_heavens_reject_not!();
+
+fn main() {}
--- /dev/null
+warning: unnecessary parentheses around function argument
+ --> $DIR/issue-47775-nested-macro-unnecessary-parens-arg.rs:32:83
+ |
+32 | #[allow(dead_code)] fn the_night_for_the_morrow() -> Option<isize> { Some((2)) }
+ | ^^^ help: remove these parentheses
+...
+38 | and_the_heavens_reject_not!();
+ | ------------------------------ in this macro invocation
+ |
+note: lint level defined here
+ --> $DIR/issue-47775-nested-macro-unnecessary-parens-arg.rs:13:9
+ |
+13 | #![warn(unused_parens)]
+ | ^^^^^^^^^^^^^
+
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+// ignore-tidy-tab
+
#![warn(unused_mut, unused_parens)] // UI tests pass `-A unused`—see Issue #43896
#![feature(no_debug)]
let mut a = (1); // should suggest no `mut`, no parens
//~^ WARN does not need to be mutable
//~| WARN unnecessary parentheses
+ // the line after `mut` has a `\t` at the beginning, this is on purpose
+ let mut
+ b = 1;
+ //~^^ WARN does not need to be mutable
let d = Equinox { warp_factor: 9.975 };
match d {
Equinox { warp_factor: warp_factor } => {} // should suggest shorthand
//~^ WARN this pattern is redundant
}
- println!("{}", a);
+ println!("{} {}", a, b);
}
}
warning: unnecessary parentheses around assigned value
- --> $DIR/suggestions.rs:46:21
+ --> $DIR/suggestions.rs:48:21
|
-46 | let mut a = (1); // should suggest no `mut`, no parens
+48 | let mut a = (1); // should suggest no `mut`, no parens
| ^^^ help: remove these parentheses
|
note: lint level defined here
- --> $DIR/suggestions.rs:11:21
+ --> $DIR/suggestions.rs:13:21
|
-11 | #![warn(unused_mut, unused_parens)] // UI tests pass `-A unused`—see Issue #43896
+13 | #![warn(unused_mut, unused_parens)] // UI tests pass `-A unused`—see Issue #43896
| ^^^^^^^^^^^^^
warning: use of deprecated attribute `no_debug`: the `#[no_debug]` attribute was an experimental feature that has been deprecated due to lack of demand. See https://github.com/rust-lang/rust/issues/29721
- --> $DIR/suggestions.rs:41:1
+ --> $DIR/suggestions.rs:43:1
|
-41 | #[no_debug] // should suggest removal of deprecated attribute
+43 | #[no_debug] // should suggest removal of deprecated attribute
| ^^^^^^^^^^^ help: remove this attribute
|
= note: #[warn(deprecated)] on by default
warning: variable does not need to be mutable
- --> $DIR/suggestions.rs:46:13
+ --> $DIR/suggestions.rs:48:13
|
-46 | let mut a = (1); // should suggest no `mut`, no parens
- | ---^^
+48 | let mut a = (1); // should suggest no `mut`, no parens
+ | ----^
| |
| help: remove this `mut`
|
note: lint level defined here
- --> $DIR/suggestions.rs:11:9
+ --> $DIR/suggestions.rs:13:9
|
-11 | #![warn(unused_mut, unused_parens)] // UI tests pass `-A unused`—see Issue #43896
+13 | #![warn(unused_mut, unused_parens)] // UI tests pass `-A unused`—see Issue #43896
| ^^^^^^^^^^
+warning: variable does not need to be mutable
+ --> $DIR/suggestions.rs:52:13
+ |
+52 | let mut
+ | _____________^
+ | |_____________|
+ | ||
+53 | || b = 1;
+ | ||____________-^
+ | |____________|
+ | help: remove this `mut`
+
warning: static is marked #[no_mangle], but not exported
- --> $DIR/suggestions.rs:14:14
+ --> $DIR/suggestions.rs:16:14
|
-14 | #[no_mangle] static SHENZHOU: usize = 1; // should suggest `pub`
+16 | #[no_mangle] static SHENZHOU: usize = 1; // should suggest `pub`
| -^^^^^^^^^^^^^^^^^^^^^^^^^^
| |
| help: try making it public: `pub`
= note: #[warn(private_no_mangle_statics)] on by default
error: const items should never be #[no_mangle]
- --> $DIR/suggestions.rs:16:14
+ --> $DIR/suggestions.rs:18:14
|
-16 | #[no_mangle] const DISCOVERY: usize = 1; // should suggest `pub static` rather than `const`
+18 | #[no_mangle] const DISCOVERY: usize = 1; // should suggest `pub static` rather than `const`
| -----^^^^^^^^^^^^^^^^^^^^^^
| |
| help: try a static value: `pub static`
= note: #[deny(no_mangle_const_items)] on by default
warning: functions generic over types must be mangled
- --> $DIR/suggestions.rs:20:1
+ --> $DIR/suggestions.rs:22:1
|
-19 | #[no_mangle] // should suggest removal (generics can't be no-mangle)
+21 | #[no_mangle] // should suggest removal (generics can't be no-mangle)
| ------------ help: remove this attribute
-20 | pub fn defiant<T>(_t: T) {}
+22 | pub fn defiant<T>(_t: T) {}
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
= note: #[warn(no_mangle_generic_items)] on by default
warning: function is marked #[no_mangle], but not exported
- --> $DIR/suggestions.rs:24:1
+ --> $DIR/suggestions.rs:26:1
|
-24 | fn rio_grande() {} // should suggest `pub`
+26 | fn rio_grande() {} // should suggest `pub`
| -^^^^^^^^^^^^^^^^^
| |
| help: try making it public: `pub`
= note: #[warn(private_no_mangle_fns)] on by default
warning: static is marked #[no_mangle], but not exported
- --> $DIR/suggestions.rs:31:18
+ --> $DIR/suggestions.rs:33:18
|
-31 | #[no_mangle] pub static DAUNTLESS: bool = true;
+33 | #[no_mangle] pub static DAUNTLESS: bool = true;
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
warning: function is marked #[no_mangle], but not exported
- --> $DIR/suggestions.rs:33:18
+ --> $DIR/suggestions.rs:35:18
|
-33 | #[no_mangle] pub fn val_jean() {}
+35 | #[no_mangle] pub fn val_jean() {}
| ^^^^^^^^^^^^^^^^^^^^
warning: denote infinite loops with `loop { ... }`
- --> $DIR/suggestions.rs:44:5
+ --> $DIR/suggestions.rs:46:5
|
-44 | while true { // should suggest `loop`
+46 | while true { // should suggest `loop`
| ^^^^^^^^^^ help: use `loop`
|
= note: #[warn(while_true)] on by default
warning: the `warp_factor:` in this pattern is redundant
- --> $DIR/suggestions.rs:51:23
+ --> $DIR/suggestions.rs:57:23
|
-51 | Equinox { warp_factor: warp_factor } => {} // should suggest shorthand
+57 | Equinox { warp_factor: warp_factor } => {} // should suggest shorthand
| ------------^^^^^^^^^^^^
| |
| help: remove this
|
22 | break 22 //~ ERROR `break` with value from a `for` loop
| ^^^^^^^^ can only break with a value inside `loop`
+help: instead, use `break` on its own without a value inside this `for` loop
+ |
+22 | break //~ ERROR `break` with value from a `for` loop
+ | ^^^^^
error: aborting due to previous error
27 | ping!();
| -------- in this macro invocation
|
- ::: <ping macros>
+ ::: <ping macros>:1:1
|
1 | ( ) => { pong ! ( ) ; }
| -------------------------
28 | deep!();
| -------- in this macro invocation (#1)
|
- ::: <deep macros>
+ ::: <deep macros>:1:1
|
1 | ( ) => { foo ! ( ) ; }
| ------------------------
| | in this macro invocation (#2)
| in this expansion of `deep!` (#1)
|
- ::: <foo macros>
+ ::: <foo macros>:1:1
|
1 | ( ) => { bar ! ( ) ; }
| ------------------------
| | in this macro invocation (#3)
| in this expansion of `foo!` (#2)
|
- ::: <bar macros>
+ ::: <bar macros>:1:1
|
1 | ( ) => { ping ! ( ) ; }
| -------------------------
| | in this macro invocation (#4)
| in this expansion of `bar!` (#3)
|
- ::: <ping macros>
+ ::: <ping macros>:1:1
|
1 | ( ) => { pong ! ( ) ; }
| -------------------------
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+macro_rules! bad {
+ ($s:ident whatever) => {
+ {
+ let $s = 0;
+ *&mut $s = 0;
+ //~^ ERROR cannot borrow immutable local variable `foo` as mutable [E0596]
+ }
+ }
+}
+
+fn main() {
+ bad!(foo whatever);
+}
--- /dev/null
+error[E0596]: cannot borrow immutable local variable `foo` as mutable
+ --> $DIR/span-covering-argument-1.rs:15:19
+ |
+14 | let $s = 0;
+ | -- consider changing this to `mut $s`
+15 | *&mut $s = 0;
+ | ^^ cannot borrow mutably
+...
+22 | bad!(foo whatever);
+ | ------------------- in this macro invocation
+
+error: aborting due to previous error
+
| ^^ borrowed value does not live long enough
...
38 | }
- | - borrowed value only lives until here
+ | - borrowed value only lives until here
39 |
40 | deref(p);
| - borrow later used here
| ^^ borrowed value does not live long enough
38 | //~^ ERROR `y` does not live long enough [E0597]
39 | }
- | - borrowed value only lives until here
+ | - borrowed value only lives until here
40 |
41 | deref(p);
| - borrow later used here
| |_________^ borrowed value does not live long enough
...
36 | }
- | - borrowed value only lives until here
+ | - borrowed value only lives until here
37 |
38 | deref(p);
| - borrow later used here
| ^^^^^^^^^ borrowed value does not live long enough
...
36 | }
- | - borrowed value only lives until here
+ | - borrowed value only lives until here
37 |
38 | deref(p);
| - borrow later used here
| ^^^^^^^^^ assignment to borrowed `v[..]` occurs here
...
35 | }
- | - borrow later used here, when `p` is dropped
+ | - borrow later used here, when `p` is dropped
error[E0506]: cannot assign to `v[..]` because it is borrowed
--> $DIR/drop-no-may-dangle.rs:34:5
34 | v[0] += 1; //~ ERROR cannot assign to `v[..]` because it is borrowed
| ^^^^^^^^^ assignment to borrowed `v[..]` occurs here
35 | }
- | - borrow later used here, when `p` is dropped
+ | - borrow later used here, when `p` is dropped
error: aborting due to 2 previous errors
| ^^^^^ assignment to borrowed `x` occurs here
33 | // FIXME ^ Should not error in the future with implicit dtors, only manually implemented ones
34 | }
- | - borrow later used here, when `foo` is dropped
+ | - borrow later used here, when `foo` is dropped
error: aborting due to previous error
31 | x = 1; //~ ERROR cannot assign to `x` because it is borrowed [E0506]
| ^^^^^ assignment to borrowed `x` occurs here
32 | }
- | - borrow later used here, when `foo` is dropped
+ | - borrow later used here, when `foo` is dropped
error: aborting due to previous error
| ^^^^^ assignment to borrowed `x` occurs here
33 | // FIXME ^ This currently errors and it should not.
34 | }
- | - borrow later used here, when `foo` is dropped
+ | - borrow later used here, when `foo` is dropped
error: aborting due to previous error
26 | x = 1; //~ ERROR cannot assign to `x` because it is borrowed [E0506]
| ^^^^^ assignment to borrowed `x` occurs here
27 | }
- | - borrow later used here, when `wrap` is dropped
+ | - borrow later used here, when `wrap` is dropped
error: aborting due to previous error
| ^^^^^^^ temporary value does not live long enough
18 | x
19 | }
- | - temporary value only lives until here
+ | - temporary value only lives until here
|
= note: borrowed value must be valid for lifetime '_#2r...
= note: `transmute` must be defined only once in the value namespace of this module
help: You can use `as` to change the binding name of the import
|
-11 | use std::mem::transmute as Othertransmute;
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+11 | use std::mem::transmute as other_transmute;
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
error: aborting due to previous error
warning: struct is never used: `S`
- --> $DIR/macro-span-replacement.rs:17:9
+ --> $DIR/macro-span-replacement.rs:17:14
|
17 | $b $a; //~ WARN struct is never used
- | ^^^^^^
+ | ^
...
22 | m!(S struct);
| ------------- in this macro invocation
--> $DIR/static-lifetime.rs:13:1
|
13 | impl<'a, A: Clone> Arbitrary for ::std::borrow::Cow<'a, A> {} //~ ERROR lifetime bound
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
= note: but lifetime parameter must outlive the static lifetime
error: aborting due to previous error
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+extern crate std;
+fn main() {}
+//~^^ ERROR the name `std` is defined multiple times [E0259]
--- /dev/null
+error[E0259]: the name `std` is defined multiple times
+ --> $DIR/issue-45799-bad-extern-crate-rename-suggestion-formatting.rs:11:1
+ |
+11 | extern crate std;
+ | ^^^^^^^^^^^^^^^^^ `std` reimported here
+ |
+ = note: `std` must be defined only once in the type namespace of this module
+help: You can use `as` to change the binding name of the import
+ |
+11 | extern crate std as other_std;
+ |
+
+error: aborting due to previous error
+
= note: `bar` must be defined only once in the type namespace of this module
help: You can use `as` to change the binding name of the import
|
-15 | self as Otherbar
+15 | self as other_bar
|
error: aborting due to 3 previous errors
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+mod a {
+ pub mod b1 {
+ pub enum C2 {}
+ }
+
+ pub enum B2 {}
+}
+
+use a::{b1::{C1, C2}, B2};
+//~^ ERROR unresolved import `a::b1::C1`
+
+fn main() {
+ let _: C2;
+ let _: B2;
+}
--- /dev/null
+error[E0432]: unresolved import `a::b1::C1`
+ --> $DIR/use-nested-groups-error.rs:19:14
+ |
+19 | use a::{b1::{C1, C2}, B2};
+ | ^^ no `C1` in `a::b1`. Did you mean to use `C2`?
+
+error: aborting due to previous error
+
-Subproject commit 7d7fef1690218bbb406cf3bcadf7bb29dbb40cc5
+Subproject commit ce47e529d29f0bf19b31ae80b37b467e42fb97e2
.expect("Malformed llvm version directive");
// Ignore if using system LLVM and actual version
// is smaller the minimum required version
- !(config.system_llvm && &actual_version[..] < min_version)
+ config.system_llvm && &actual_version[..] < min_version
} else {
false
}
for pretty_printer_file in &pretty_printer_files {
inputs.push(mtime(&rust_src_dir.join(pretty_printer_file)));
}
- for lib in config.run_lib_path.read_dir().unwrap() {
- let lib = lib.unwrap();
- inputs.push(mtime(&lib.path()));
+ let mut entries = config.run_lib_path.read_dir().unwrap()
+ .collect::<Vec<_>>();
+ while let Some(entry) = entries.pop() {
+ let entry = entry.unwrap();
+ let path = entry.path();
+ if entry.metadata().unwrap().is_file() {
+ inputs.push(mtime(&path));
+ } else {
+ entries.extend(path.read_dir().unwrap());
+ }
}
if let Some(ref rustdoc_path) = config.rustdoc_path {
inputs.push(mtime(&rustdoc_path));
let mut results = Vec::new();
let mut mismatch = Mismatch::new(0);
- for result in diff::lines(actual, expected) {
+ for result in diff::lines(expected, actual) {
match result {
diff::Result::Left(str) => {
if lines_since_mismatch >= context_size && lines_since_mismatch > 0 {
mismatch.lines.push(DiffLine::Context(line.to_owned()));
}
- mismatch.lines.push(DiffLine::Resulting(str.to_owned()));
+ mismatch.lines.push(DiffLine::Expected(str.to_owned()));
+ line_number += 1;
lines_since_mismatch = 0;
}
diff::Result::Right(str) => {
mismatch.lines.push(DiffLine::Context(line.to_owned()));
}
- mismatch.lines.push(DiffLine::Expected(str.to_owned()));
- line_number += 1;
+ mismatch.lines.push(DiffLine::Resulting(str.to_owned()));
lines_since_mismatch = 0;
}
diff::Result::Both(str, _) => {
}
/// For each `aux-build: foo/bar` annotation, we check to find the
- /// file in a `aux` directory relative to the test itself.
+ /// file in a `auxiliary` directory relative to the test itself.
fn compute_aux_test_paths(&self, rel_ab: &str) -> TestPaths {
let test_ab = self.testpaths
.file
-Subproject commit 919604e1ead8294c8ca14f101be4380ea1ea370c
+Subproject commit 61833b9aeab8bf8f0c0c0e42b7c96b6eceb37d0d
-Subproject commit 511321ae1c2fa3f0e334885fecf406dd6c882836
+Subproject commit dee42bda8156a28ead609080e27b02173bb9c29e
clap = "2.25.0"
[dependencies.mdbook]
-version = "0.0.28"
+version = "0.1.2"
default-features = false
extern crate clap;
use std::env;
-use std::io::{self, Write};
use std::path::{Path, PathBuf};
use clap::{App, ArgMatches, SubCommand, AppSettings};
};
if let Err(e) = res {
- writeln!(&mut io::stderr(), "An error occured:\n{}", e).ok();
+ eprintln!("Error: {}", e);
+
+ for cause in e.iter().skip(1) {
+ eprintln!("\tCaused By: {}", cause);
+ }
+
::std::process::exit(101);
}
}
// Build command implementation
pub fn build(args: &ArgMatches) -> Result<()> {
let book_dir = get_book_dir(args);
- let mut book = MDBook::new(&book_dir).read_config()?;
+ let mut book = MDBook::load(&book_dir)?;
// Set this to allow us to catch bugs in advance.
book.config.build.create_missing = false;
-Subproject commit e0e3e22248cd14ebbe0253e9720261a0328bfc59
+Subproject commit 346238f49740d6c98102a6a59811b1625c73a9d7
"openssl", // BSD+advertising clause, cargo, mdbook
"pest", // MPL2, mdbook via handlebars
"thread-id", // Apache-2.0, mdbook
+ "toml-query", // MPL-2.0, mdbook
+ "is-match", // MPL-2.0, mdbook
"cssparser", // MPL-2.0, rustdoc
"smallvec", // MPL-2.0, rustdoc
"fuchsia-zircon-sys", // BSD-3-Clause, rustdoc, rustc, cargo
"src/dlmalloc",
"src/jemalloc",
"src/llvm",
+ "src/llvm-emscripten",
"src/libbacktrace",
"src/libcompiler_builtins",
"src/librustc_data_structures/owning_ref",