- checkout: self
fetchDepth: 2
+# Spawn a background process to collect CPU usage statistics which we'll upload
+# at the end of the build. See the comments in the script here for more
+# information.
+- bash: python src/ci/cpu-usage-over-time.py &> cpu-usage.csv &
+ displayName: "Collect CPU-usage statistics in the background"
+
- bash: printenv | sort
displayName: Show environment variables
# images, etc.
- bash: |
set -e
+ source src/ci/shared.sh
sudo apt-get install -y python3-setuptools
- pip3 install awscli --upgrade --user
+ retry pip3 install awscli --upgrade --user
echo "##vso[task.prependpath]$HOME/.local/bin"
displayName: Install awscli (Linux)
condition: and(succeeded(), eq(variables['Agent.OS'], 'Linux'))
AWS_SECRET_ACCESS_KEY: $(AWS_SECRET_ACCESS_KEY)
condition: and(succeeded(), or(eq(variables.DEPLOY, '1'), eq(variables.DEPLOY_ALT, '1')))
displayName: Upload artifacts
+
+# Upload CPU usage statistics that we've been gathering this whole time. Always
+# execute this step in case we want to inspect failed builds, but don't let
+# errors here ever fail the build since this is just informational.
+- bash: aws s3 cp --acl public-read cpu-usage.csv s3://$DEPLOY_BUCKET/rustc-builds/$BUILD_SOURCEVERSION/cpu-$SYSTEM_JOBNAME.csv
+ env:
+ AWS_SECRET_ACCESS_KEY: $(AWS_SECRET_ACCESS_KEY)
+ condition: contains(variables, 'AWS_SECRET_ACCESS_KEY')
+ continueOnError: true
+ displayName: Upload CPU usage statistics
Chris Vittal <christopher.vittal@gmail.com> Christopher Vittal <christopher.vittal@gmail.com>
Christian Poveda <christianpoveda@protonmail.com> <cn.poveda.ruiz@gmail.com>
Christian Poveda <christianpoveda@protonmail.com> <z1mvader@protonmail.com>
+Christian Poveda <christianpoveda@protonmail.com> <cpovedar@fnal.gov>
Clark Gaebel <cg.wowus.cg@gmail.com> <cgaebel@mozilla.com>
Clinton Ryan <clint.ryan3@gmail.com>
Corey Richardson <corey@octayn.net> Elaine "See More" Nemo <corey@octayn.net>
Keegan McAllister <mcallister.keegan@gmail.com> <kmcallister@mozilla.com>
Kevin Butler <haqkrs@gmail.com>
Kyeongwoon Lee <kyeongwoon.lee@samsung.com>
+Laurențiu Nicola <lnicola@dend.ro>
Lee Jeffery <leejeffery@gmail.com> Lee Jeffery <lee@leejeffery.co.uk>
Lee Wondong <wdlee91@gmail.com>
Lennart Kudling <github@kudling.de>
name = "alloc"
version = "0.0.0"
dependencies = [
- "compiler_builtins 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)",
+ "compiler_builtins 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
"core 0.0.0",
"rand 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rand_xorshift 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"autocfg 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
"backtrace-sys 0.1.27 (registry+https://github.com/rust-lang/crates.io-index)",
"cfg-if 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)",
- "compiler_builtins 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)",
+ "compiler_builtins 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.54 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-demangle 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-std-workspace-core 1.0.0",
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"cc 1.0.35 (registry+https://github.com/rust-lang/crates.io-index)",
- "compiler_builtins 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)",
+ "compiler_builtins 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.54 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-std-workspace-core 1.0.0",
]
version = "0.1.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "compiler_builtins 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)",
+ "compiler_builtins 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-std-workspace-core 1.0.0",
]
[[package]]
name = "compiler_builtins"
-version = "0.1.15"
+version = "0.1.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"cc 1.0.35 (registry+https://github.com/rust-lang/crates.io-index)",
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "compiler_builtins 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)",
+ "compiler_builtins 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.54 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-std-workspace-core 1.0.0",
]
[[package]]
name = "fmt_macros"
version = "0.0.0"
+dependencies = [
+ "syntax_pos 0.0.0",
+]
[[package]]
name = "fnv"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "compiler_builtins 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)",
+ "compiler_builtins 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-std-workspace-core 1.0.0",
]
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "compiler_builtins 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)",
+ "compiler_builtins 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-std-workspace-alloc 1.0.0",
"rustc-std-workspace-core 1.0.0",
]
[[package]]
name = "jsonrpc-core"
-version = "10.0.1"
+version = "12.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"futures 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)",
[[package]]
name = "lsp-types"
-version = "0.57.0"
+version = "0.57.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
name = "panic_abort"
version = "0.0.0"
dependencies = [
- "compiler_builtins 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)",
+ "compiler_builtins 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
"core 0.0.0",
"libc 0.2.54 (registry+https://github.com/rust-lang/crates.io-index)",
]
version = "0.0.0"
dependencies = [
"alloc 0.0.0",
- "compiler_builtins 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)",
+ "compiler_builtins 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
"core 0.0.0",
"libc 0.2.54 (registry+https://github.com/rust-lang/crates.io-index)",
"unwind 0.0.0",
version = "0.0.0"
dependencies = [
"cc 1.0.35 (registry+https://github.com/rust-lang/crates.io-index)",
- "compiler_builtins 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)",
+ "compiler_builtins 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
"core 0.0.0",
]
[[package]]
name = "rls"
-version = "1.36.0"
+version = "1.37.0"
dependencies = [
"cargo 0.38.0",
- "cargo_metadata 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cargo_metadata 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
"clippy_lints 0.0.212",
"crossbeam-channel 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"difference 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"heck 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"home 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"itertools 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "jsonrpc-core 10.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "jsonrpc-core 12.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"lsp-codec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "lsp-types 0.57.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "lsp-types 0.57.2 (registry+https://github.com/rust-lang/crates.io-index)",
"num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
"ordslice 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"racer 2.1.22 (registry+https://github.com/rust-lang/crates.io-index)",
"rls-vfs 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-workspace-hack 1.0.0",
- "rustc_tools_util 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rustc_tools_util 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rustfmt-nightly 1.2.2",
"serde 1.0.82 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)",
version = "0.1.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "compiler_builtins 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)",
+ "compiler_builtins 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-std-workspace-core 1.0.0",
]
"alloc 0.0.0",
"build_helper 0.1.0",
"cmake 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)",
- "compiler_builtins 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)",
+ "compiler_builtins 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
"core 0.0.0",
]
"alloc 0.0.0",
"build_helper 0.1.0",
"cmake 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)",
- "compiler_builtins 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)",
+ "compiler_builtins 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
"core 0.0.0",
]
"alloc 0.0.0",
"build_helper 0.1.0",
"cmake 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)",
- "compiler_builtins 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)",
+ "compiler_builtins 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
"core 0.0.0",
]
[[package]]
name = "rustc_tools_util"
-version = "0.1.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
+version = "0.2.0"
[[package]]
name = "rustc_tools_util"
version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "rustc_traits"
"alloc 0.0.0",
"build_helper 0.1.0",
"cmake 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)",
- "compiler_builtins 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)",
+ "compiler_builtins 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
"core 0.0.0",
]
"alloc 0.0.0",
"backtrace 0.3.29 (registry+https://github.com/rust-lang/crates.io-index)",
"cc 1.0.35 (registry+https://github.com/rust-lang/crates.io-index)",
- "compiler_builtins 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)",
+ "compiler_builtins 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
"core 0.0.0",
"dlmalloc 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
"fortanix-sgx-abi 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
version = "0.0.0"
dependencies = [
"cc 1.0.35 (registry+https://github.com/rust-lang/crates.io-index)",
- "compiler_builtins 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)",
+ "compiler_builtins 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
"core 0.0.0",
"libc 0.2.54 (registry+https://github.com/rust-lang/crates.io-index)",
]
"checksum colored 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b0aa3473e85a3161b59845d6096b289bb577874cafeaf75ea1b1beaa6572c7fc"
"checksum commoncrypto 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d056a8586ba25a1e4d61cb090900e495952c7886786fc55f909ab2f819b69007"
"checksum commoncrypto-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1fed34f46747aa73dfaa578069fd8279d2818ade2b55f38f22a9401c7f4083e2"
-"checksum compiler_builtins 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)" = "e899b947d7e71c3d35c0b6194d64025b84946640510e215090c815b20828964e"
+"checksum compiler_builtins 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)" = "e79ed19793c99771b386d76e08c3419409bb3d418b81a8b8afc73524247461cf"
"checksum compiletest_rs 0.3.22 (registry+https://github.com/rust-lang/crates.io-index)" = "f40ecc9332b68270998995c00f8051ee856121764a0d3230e64c9efd059d27b6"
"checksum constant_time_eq 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "8ff012e225ce166d4422e0e78419d901719760f62ae2b7969ca6b564d1b54a9e"
"checksum core-foundation 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)" = "4e2640d6d0bf22e82bed1b73c6aef8d5dd31e5abe6666c57e6d45e2649f4f887"
"checksum jemalloc-sys 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7bef0d4ce37578dfd80b466e3d8324bd9de788e249f1accebb0c472ea4b52bdc"
"checksum jobserver 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)" = "b3d51e24009d966c8285d524dbaf6d60926636b2a89caee9ce0bd612494ddc16"
"checksum json 0.11.13 (registry+https://github.com/rust-lang/crates.io-index)" = "9ad0485404155f45cce53a40d4b2d6ac356418300daed05273d9e26f91c390be"
-"checksum jsonrpc-core 10.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7a5152c3fda235dfd68341b3edf4121bc4428642c93acbd6de88c26bf95fc5d7"
+"checksum jsonrpc-core 12.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "288dca7f9713710a29e485076b9340156cb701edb46a881f5d0c31aa4f5b9143"
"checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d"
"checksum lazy_static 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "76f033c7ad61445c5b347c7382dd1237847eb1bce590fe50365dcb33d546be73"
"checksum lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bc5729f27f159ddd61f4df6228e827e86643d4d3e7c32183cb30a1c08f604a14"
"checksum log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "c84ec4b527950aa83a329754b01dbe3f58361d1c5efacd1f6d68c494d08a17c6"
"checksum log_settings 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "19af41f0565d7c19b2058153ad0b42d4d5ce89ec4dbf06ed6741114a8b63e7cd"
"checksum lsp-codec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "169d737ad89cf8ddd82d1804d9122f54568c49377665157277cc90d747b1d31a"
-"checksum lsp-types 0.57.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d220de1fbbb12b60df17898272579c22329375fc4baa960402fbd17cf0cdd165"
+"checksum lsp-types 0.57.2 (registry+https://github.com/rust-lang/crates.io-index)" = "b62b77309737b1e262b3bbf37ff8faa740562c633b14702afe9be85dbcb6f88a"
"checksum lzma-sys 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "d1eaa027402541975218bb0eec67d6b0412f6233af96e0d096d31dbdfd22e614"
"checksum mac 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c41e0c4fef86961ac6d6f8a82609f55f31b05e4fce149ac5710e439df7619ba4"
"checksum macro-utils 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f2c4deaccc2ead6a28c16c0ba82f07d52b6475397415ce40876e559b0b0ea510"
"checksum rustc-rayon-core 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "526e7b6d2707a5b9bec3927d424ad70fa3cfc68e0ac1b75e46cdbbc95adc5108"
"checksum rustc-rayon-core 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "79d38ca7cbc22fa59f09d8534ea4b27f67b0facf0cbe274433aceea227a02543"
"checksum rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)" = "dcf128d1287d2ea9d80910b5f1120d0b8eede3fbf1abe91c40d39ea7d51e6fda"
-"checksum rustc_tools_util 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b3c5a95edfa0c893236ae4778bb7c4752760e4c0d245e19b5eff33c5aa5eb9dc"
+"checksum rustc_tools_util 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b725dadae9fabc488df69a287f5a99c5eaf5d10853842a8a3dfac52476f544ee"
"checksum rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a"
"checksum rustfix 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "af7c21531a91512a4a51b490be6ba1c8eff34fdda0dc5bf87dc28d86748aac56"
"checksum rusty-fork 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "9591f190d2852720b679c21f66ad929f9f1d7bb09d1193c26167586029d8489c"
MSVC builds of Rust additionally require an installation of Visual Studio 2017
(or later) so `rustc` can use its linker. The simplest way is to get the
-[Visual Studio Build Tools] and check the “C++ build tools” workload.
+[Visual Studio], check the “C++ build tools” and “Windows 10 SDK” workload.
-[Visual Studio Build Tools]: https://visualstudio.microsoft.com/downloads/#build-tools-for-visual-studio-2019
+[Visual Studio]: https://visualstudio.microsoft.com/downloads/
(If you're installing cmake yourself, be careful that “C++ CMake tools for
Windows” doesn't get included under “Individual components”.)
const DEFAULT: bool = false;
- /// Run this rule for all hosts without cross compiling.
+ /// If true, then this rule should be skipped if --target was specified, but --host was not
const ONLY_HOSTS: bool = false;
/// Primary function to execute this rule. Can call `builder.ensure()`
// Determine the targets participating in this rule.
let targets = if self.only_hosts {
- if !builder.config.run_host_only {
+ if builder.config.skip_only_host_steps {
return; // don't run anything
} else {
&builder.hosts
let mut config = Config::default_opts();
// don't save toolstates
config.save_toolstates = None;
- config.run_host_only = true;
+ config.skip_only_host_steps = false;
config.dry_run = true;
// try to avoid spurious failures in dist where we create/delete each others file
let dir = config.out.join("tmp-rustbuild-tests").join(
#[test]
fn dist_with_target_flag() {
let mut config = configure(&["B"], &["C"]);
- config.run_host_only = false; // as-if --target=C was passed
+ config.skip_only_host_steps = true; // as-if --target=C was passed
let build = Build::new(config);
let mut builder = Builder::new(&build);
builder.run_step_descriptions(&Builder::get_step_descriptions(Kind::Dist), &[]);
#[test]
fn build_with_target_flag() {
let mut config = configure(&["B"], &["C"]);
- config.run_host_only = false;
+ config.skip_only_host_steps = true;
let build = Build::new(config);
let mut builder = Builder::new(&build);
builder.run_step_descriptions(&Builder::get_step_descriptions(Kind::Build), &[]);
pub test_compare_mode: bool,
pub llvm_libunwind: bool,
- pub run_host_only: bool,
+ pub skip_only_host_steps: bool,
pub on_fail: Option<String>,
pub stage: Option<u32>,
}
// If --target was specified but --host wasn't specified, don't run any host-only tests.
- config.run_host_only = !(flags.host.is_empty() && !flags.target.is_empty());
+ let has_hosts = !flags.host.is_empty();
+ let has_targets = !flags.target.is_empty();
+ config.skip_only_host_steps = !has_hosts && has_targets;
let toml = file.map(|file| {
let contents = t!(fs::read_to_string(&file));
/// when this function is called.
pub fn cp_r(&self, src: &Path, dst: &Path) {
if self.config.dry_run { return; }
- for f in t!(fs::read_dir(src)) {
- let f = t!(f);
+ for f in self.read_dir(src) {
let path = f.path();
let name = path.file_name().unwrap();
let dst = dst.join(name);
--- /dev/null
+#!/usr/bin/env python
+# ignore-tidy-linelength
+
+# This is a small script that we use on CI to collect CPU usage statistics of
+# our builders. By seeing graphs of CPU usage over time we hope to correlate
+# that with possible improvements to Rust's own build system, ideally diagnosing
+# that either builders are always fully using their CPU resources or they're
+# idle for long stretches of time.
+#
+# This script is relatively simple, but it's platform specific. Each platform
+# (OSX/Windows/Linux) has a different way of calculating the current state of
+# CPU at a point in time. We then compare two captured states to determine the
+# percentage of time spent in one state versus another. The state capturing is
+# all platform-specific but the loop at the bottom is the cross platform part
+# that executes everywhere.
+#
+# # Viewing statistics
+#
+# All builders will upload their CPU statistics as CSV files to our S3 buckets.
+# These URLS look like:
+#
+# https://$bucket.s3.amazonaws.com/rustc-builds/$commit/cpu-$builder.csv
+#
+# for example
+#
+# https://rust-lang-ci2.s3.amazonaws.com/rustc-builds/68baada19cd5340f05f0db15a3e16d6671609bcc/cpu-x86_64-apple.csv
+#
+# Each CSV file has two columns. The first is the timestamp of the measurement
+# and the second column is the % of idle cpu time in that time slice. Ideally
+# the second column is always zero.
+#
+# Once you've downloaded a file there's various ways to plot it and visualize
+# it. For command line usage you can use a script like so:
+#
+# set timefmt '%Y-%m-%dT%H:%M:%S'
+# set xdata time
+# set ylabel "Idle CPU %"
+# set xlabel "Time"
+# set datafile sep ','
+# set term png
+# set output "printme.png"
+# set grid
+# builder = "i686-apple"
+# plot "cpu-".builder.".csv" using 1:2 with lines title builder
+#
+# Executed as `gnuplot < ./foo.plot` it will generate a graph called
+# `printme.png` which you can then open up. If you know how to improve this
+# script or the viewing process that would be much appreciated :) (or even if
+# you know how to automate it!)
+
+import datetime
+import sys
+import time
+
+if sys.platform == 'linux2':
+ class State:
+ def __init__(self):
+ with open('/proc/stat', 'r') as file:
+ data = file.readline().split()
+ if data[0] != 'cpu':
+ raise Exception('did not start with "cpu"')
+ self.user = int(data[1])
+ self.nice = int(data[2])
+ self.system = int(data[3])
+ self.idle = int(data[4])
+ self.iowait = int(data[5])
+ self.irq = int(data[6])
+ self.softirq = int(data[7])
+ self.steal = int(data[8])
+ self.guest = int(data[9])
+ self.guest_nice = int(data[10])
+
+ def idle_since(self, prev):
+ user = self.user - prev.user
+ nice = self.nice - prev.nice
+ system = self.system - prev.system
+ idle = self.idle - prev.idle
+ iowait = self.iowait - prev.iowait
+ irq = self.irq - prev.irq
+ softirq = self.softirq - prev.softirq
+ steal = self.steal - prev.steal
+ guest = self.guest - prev.guest
+ guest_nice = self.guest_nice - prev.guest_nice
+ total = user + nice + system + idle + iowait + irq + softirq + steal + guest + guest_nice
+ return float(idle) / float(total) * 100
+
+elif sys.platform == 'win32':
+ from ctypes.wintypes import DWORD
+ from ctypes import Structure, windll, WinError, GetLastError, byref
+
+ class FILETIME(Structure):
+ _fields_ = [
+ ("dwLowDateTime", DWORD),
+ ("dwHighDateTime", DWORD),
+ ]
+
+ class State:
+ def __init__(self):
+ idle, kernel, user = FILETIME(), FILETIME(), FILETIME()
+
+ success = windll.kernel32.GetSystemTimes(
+ byref(idle),
+ byref(kernel),
+ byref(user),
+ )
+
+ assert success, WinError(GetLastError())[1]
+
+ self.idle = (idle.dwHighDateTime << 32) | idle.dwLowDateTime
+ self.kernel = (kernel.dwHighDateTime << 32) | kernel.dwLowDateTime
+ self.user = (user.dwHighDateTime << 32) | user.dwLowDateTime
+
+ def idle_since(self, prev):
+ idle = self.idle - prev.idle
+ user = self.user - prev.user
+ kernel = self.kernel - prev.kernel
+ return float(idle) / float(user + kernel) * 100
+
+elif sys.platform == 'darwin':
+ from ctypes import *
+ libc = cdll.LoadLibrary('/usr/lib/libc.dylib')
+
+ PROESSOR_CPU_LOAD_INFO = c_int(2)
+ CPU_STATE_USER = 0
+ CPU_STATE_SYSTEM = 1
+ CPU_STATE_IDLE = 2
+ CPU_STATE_NICE = 3
+ c_int_p = POINTER(c_int)
+
+ class State:
+ def __init__(self):
+ num_cpus_u = c_uint(0)
+ cpu_info = c_int_p()
+ cpu_info_cnt = c_int(0)
+ err = libc.host_processor_info(
+ libc.mach_host_self(),
+ PROESSOR_CPU_LOAD_INFO,
+ byref(num_cpus_u),
+ byref(cpu_info),
+ byref(cpu_info_cnt),
+ )
+ assert err == 0
+ self.user = 0
+ self.system = 0
+ self.idle = 0
+ self.nice = 0
+ cur = 0
+ while cur < cpu_info_cnt.value:
+ self.user += cpu_info[cur + CPU_STATE_USER]
+ self.system += cpu_info[cur + CPU_STATE_SYSTEM]
+ self.idle += cpu_info[cur + CPU_STATE_IDLE]
+ self.nice += cpu_info[cur + CPU_STATE_NICE]
+ cur += num_cpus_u.value
+
+ def idle_since(self, prev):
+ user = self.user - prev.user
+ system = self.system - prev.system
+ idle = self.idle - prev.idle
+ nice = self.nice - prev.nice
+ return float(idle) / float(user + system + idle + nice) * 100.0
+
+else:
+ print('unknown platform', sys.platform)
+ sys.exit(1)
+
+cur_state = State();
+print("Time,Idle")
+while True:
+ time.sleep(1);
+ next_state = State();
+ now = datetime.datetime.utcnow().isoformat()
+ idle = next_state.idle_since(cur_state)
+ print("%s,%s" % (now, idle))
+ sys.stdout.flush()
+ cur_state = next_state
src/libstd \
src/liballoc \
src/libcore
+
+# Debug assertions in rustc are largely covered by other builders, and LLVM
+# assertions cause this builder to slow down by quite a large amount and don't
+# buy us a huge amount over other builders (not sure if we've ever seen an
+# asmjs-specific backend assertion trip), so disable assertions for these
+# tests.
+ENV NO_LLVM_ASSERTIONS=1
+ENV NO_DEBUG_ASSERTIONS=1
cat "$TOOLSTATE_FILE"
echo
+# This function checks if a particular tool is *not* in status "test-pass".
+check_tool_failed() {
+ grep -vq '"'"$1"'":"test-pass"' "$TOOLSTATE_FILE"
+}
+
# This function checks that if a tool's submodule changed, the tool's state must improve
verify_status() {
echo "Verifying status of $1..."
if echo "$CHANGED_FILES" | grep -q "^M[[:blank:]]$2$"; then
echo "This PR updated '$2', verifying if status is 'test-pass'..."
- if grep -vq '"'"$1"'":"test-pass"' "$TOOLSTATE_FILE"; then
+ if check_tool_failed "$1"; then
echo
echo "⚠️ We detected that this PR updated '$1', but its tests failed."
echo
fi
}
-# deduplicates the submodule check and the assertion that on beta some tools MUST be passing
+# deduplicates the submodule check and the assertion that on beta some tools MUST be passing.
+# $1 should be "submodule_changed" to only check tools that got changed by this PR,
+# or "beta_required" to check all tools that have $2 set to "beta".
check_dispatch() {
if [ "$1" = submodule_changed ]; then
# ignore $2 (branch id)
verify_status $3 $4
elif [ "$2" = beta ]; then
echo "Requiring test passing for $3..."
- if grep -q '"'"$3"'":"\(test\|build\)-fail"' "$TOOLSTATE_FILE"; then
+ if check_tool_failed "$3"; then
exit 4
fi
fi
-Subproject commit 29fe982990e43b9367be0ff47abc82fb2123fd03
+Subproject commit 62a8c6f25fbd981c80a046f3b04be9684749af3b
-Subproject commit 9858872bd1b7dbba5ec27dc30d34eba00acd7ef9
+Subproject commit f0c75b75f9c18537b78f5d17c1015247e9a49c86
-Subproject commit 862b669c395822bb0938781d74f860e5762ad4fb
+Subproject commit f8ae436d936f6f4891d3c1bbb1af5865eb8aeadb
-Subproject commit 811c697b232c611ed754d279ed20643a0c4096f6
+Subproject commit 18566f4dedc3ef5bf61f5f85685d5966db99cc11
-Subproject commit 3cb727b62b953d59b4360d39aa68b6dc8f157655
+Subproject commit 3ac9cfc9c9ab2e366feebf18718112737f572352
example code that triggers this lint:
```rust
-#![feature(custom_attribute)]
-
-#![mutable_doc]
+#![macro_export]
```
This will produce:
```text
warning: unused attribute
- --> src/main.rs:4:1
+ --> src/main.rs:1:1
|
-4 | #![mutable_doc]
- | ^^^^^^^^^^^^^^^
+1 | #![macro_export]
+ | ^^^^^^^^^^^^^^^^
|
```
extern crate rustc;
extern crate rustc_plugin;
-use syntax::parse::token;
+use syntax::parse::token::{self, Token};
use syntax::tokenstream::TokenTree;
use syntax::ext::base::{ExtCtxt, MacResult, DummyResult, MacEager};
use syntax::ext::build::AstBuilder; // A trait for expr_usize.
use rustc_plugin::Registry;
fn expand_rn(cx: &mut ExtCtxt, sp: Span, args: &[TokenTree])
- -> Box<MacResult + 'static> {
+ -> Box<dyn MacResult + 'static> {
static NUMERALS: &'static [(&'static str, usize)] = &[
("M", 1000), ("CM", 900), ("D", 500), ("CD", 400),
}
let text = match args[0] {
- TokenTree::Token(_, token::Ident(s)) => s.to_string(),
+ TokenTree::Token(Token { kind: token::Ident(s, _), .. }) => s.to_string(),
_ => {
cx.span_err(sp, "argument should be a single identifier");
return DummyResult::any(sp);
+++ /dev/null
-# `repr_align_enum`
-
-The tracking issue for this feature is: [#57996]
-
-[#57996]: https://github.com/rust-lang/rust/issues/57996
-
-------------------------
-
-The `repr_align_enum` feature allows using the `#[repr(align(x))]` attribute
-on enums, similarly to structs.
-
-# Examples
-
-```rust
-#![feature(repr_align_enum)]
-
-#[repr(align(8))]
-enum Aligned {
- Foo,
- Bar { value: u32 },
-}
-
-fn main() {
- assert_eq!(std::mem::align_of::<Aligned>(), 8);
-}
-```
-
-This is equivalent to using an aligned wrapper struct everywhere:
-
-```rust
-#[repr(align(8))]
-struct Aligned(Unaligned);
-
-enum Unaligned {
- Foo,
- Bar { value: u32 },
-}
-
-fn main() {
- assert_eq!(std::mem::align_of::<Aligned>(), 8);
-}
-```
--- /dev/null
+# `transparent_enums`
+
+The tracking issue for this feature is [#60405]
+
+[60405]: https://github.com/rust-lang/rust/issues/60405
+
+----
+
+The `transparent_enums` feature allows you mark `enum`s as
+`#[repr(transparent)]`. An `enum` may be `#[repr(transparent)]` if it has
+exactly one variant, and that variant matches the same conditions which `struct`
+requires for transparency. Some concrete illustrations follow.
+
+```rust
+#![feature(transparent_enums)]
+
+// This enum has the same representation as `f32`.
+#[repr(transparent)]
+enum SingleFieldEnum {
+ Variant(f32)
+}
+
+// This enum has the same representation as `usize`.
+#[repr(transparent)]
+enum MultiFieldEnum {
+ Variant { field: usize, nothing: () },
+}
+```
+
+For consistency with transparent `struct`s, `enum`s must have exactly one
+non-zero-sized field. If all fields are zero-sized, the `enum` must not be
+`#[repr(transparent)]`:
+
+```rust
+#![feature(transparent_enums)]
+
+// This (non-transparent) enum is already valid in stable Rust:
+pub enum GoodEnum {
+ Nothing,
+}
+
+// Error: transparent enum needs exactly one non-zero-sized field, but has 0
+// #[repr(transparent)]
+// pub enum BadEnum {
+// Nothing(()),
+// }
+
+// Error: transparent enum needs exactly one non-zero-sized field, but has 0
+// #[repr(transparent)]
+// pub enum BadEmptyEnum {
+// Nothing,
+// }
+```
+
+The one exception is if the `enum` is generic over `T` and has a field of type
+`T`, it may be `#[repr(transparent)]` even if `T` is a zero-sized type:
+
+```rust
+#![feature(transparent_enums)]
+
+// This enum has the same representation as `T`.
+#[repr(transparent)]
+pub enum GenericEnum<T> {
+ Variant(T, ()),
+}
+
+// This is okay even though `()` is a zero-sized type.
+pub const THIS_IS_OKAY: GenericEnum<()> = GenericEnum::Variant((), ());
+```
+
+Transparent `enum`s require exactly one variant:
+
+```rust
+// Error: transparent enum needs exactly one variant, but has 0
+// #[repr(transparent)]
+// pub enum TooFewVariants {
+// }
+
+// Error: transparent enum needs exactly one variant, but has 2
+// #[repr(transparent)]
+// pub enum TooManyVariants {
+// First(usize),
+// Second,
+// }
+```
+
+Like transarent `struct`s, a transparent `enum` of type `E` has the same layout,
+size, and ABI as its single non-ZST field. If it is generic over a type `T`, and
+all its fields are ZSTs except for exactly one field of type `T`, then it has
+the same layout and ABI as `T` (even if `T` is a ZST when monomorphized).
+
+Like transparent `struct`s, transparent `enum`s are FFI-safe if and only if
+their underlying representation type is also FFI-safe.
--- /dev/null
+# `transparent_unions`
+
+The tracking issue for this feature is [#60405]
+
+[60405]: https://github.com/rust-lang/rust/issues/60405
+
+----
+
+The `transparent_unions` feature allows you mark `union`s as
+`#[repr(transparent)]`. A `union` may be `#[repr(transparent)]` in exactly the
+same conditions in which a `struct` may be `#[repr(transparent)]` (generally,
+this means the `union` must have exactly one non-zero-sized field). Some
+concrete illustrations follow.
+
+```rust
+#![feature(transparent_unions)]
+
+// This union has the same representation as `f32`.
+#[repr(transparent)]
+union SingleFieldUnion {
+ field: f32,
+}
+
+// This union has the same representation as `usize`.
+#[repr(transparent)]
+union MultiFieldUnion {
+ field: usize,
+ nothing: (),
+}
+```
+
+For consistency with transparent `struct`s, `union`s must have exactly one
+non-zero-sized field. If all fields are zero-sized, the `union` must not be
+`#[repr(transparent)]`:
+
+```rust
+#![feature(transparent_unions)]
+
+// This (non-transparent) union is already valid in stable Rust:
+pub union GoodUnion {
+ pub nothing: (),
+}
+
+// Error: transparent union needs exactly one non-zero-sized field, but has 0
+// #[repr(transparent)]
+// pub union BadUnion {
+// pub nothing: (),
+// }
+```
+
+The one exception is if the `union` is generic over `T` and has a field of type
+`T`, it may be `#[repr(transparent)]` even if `T` is a zero-sized type:
+
+```rust
+#![feature(transparent_unions)]
+
+// This union has the same representation as `T`.
+#[repr(transparent)]
+pub union GenericUnion<T: Copy> { // Unions with non-`Copy` fields are unstable.
+ pub field: T,
+ pub nothing: (),
+}
+
+// This is okay even though `()` is a zero-sized type.
+pub const THIS_IS_OKAY: GenericUnion<()> = GenericUnion { field: () };
+```
+
+Like transarent `struct`s, a transparent `union` of type `U` has the same
+layout, size, and ABI as its single non-ZST field. If it is generic over a type
+`T`, and all its fields are ZSTs except for exactly one field of type `T`, then
+it has the same layout and ABI as `T` (even if `T` is a ZST when monomorphized).
+
+Like transparent `struct`s, transparent `union`s are FFI-safe if and only if
+their underlying representation type is also FFI-safe.
+
+A `union` may not be eligible for the same nonnull-style optimizations that a
+`struct` or `enum` (with the same fields) are eligible for. Adding
+`#[repr(transparent)]` to `union` does not change this. To give a more concrete
+example, it is unspecified whether `size_of::<T>()` is equal to
+`size_of::<Option<T>>()`, where `T` is a `union` (regardless of whether or not
+it is transparent). The Rust compiler is free to perform this optimization if
+possible, but is not required to, and different compiler versions may differ in
+their application of these optimizations.
// them from the `#[global_allocator]` attribute if there is one, or uses the
// default implementations in libstd (`__rdl_alloc` etc in `src/libstd/alloc.rs`)
// otherwise.
- #[allocator]
+ #[cfg_attr(bootstrap, allocator)]
+ #[cfg_attr(not(bootstrap), rustc_allocator)]
#[rustc_allocator_nounwind]
fn __rust_alloc(size: usize, align: usize) -> *mut u8;
#[rustc_allocator_nounwind]
#![feature(coerce_unsized)]
#![feature(dispatch_from_dyn)]
#![feature(core_intrinsics)]
-#![feature(custom_attribute)]
+#![cfg_attr(bootstrap, feature(custom_attribute))]
#![feature(dropck_eyepatch)]
#![feature(exact_size_is_empty)]
#![feature(fmt_internals)]
#![deny(rust_2018_idioms)]
#![deny(internal)]
+#![deny(unused_lifetimes)]
#![feature(core_intrinsics)]
#![feature(dropck_eyepatch)]
/// # Examples
///
/// ```
- /// #![feature(as_cell)]
/// use std::cell::Cell;
///
/// let slice: &mut [i32] = &mut [1, 2, 3];
/// assert_eq!(slice_cell.len(), 3);
/// ```
#[inline]
- #[unstable(feature = "as_cell", issue="43038")]
+ #[stable(feature = "as_cell", since = "1.37.0")]
pub fn from_mut(t: &mut T) -> &Cell<T> {
unsafe {
&*(t as *mut T as *const Cell<T>)
/// # Examples
///
/// ```
- /// #![feature(as_cell)]
/// use std::cell::Cell;
///
/// let slice: &mut [i32] = &mut [1, 2, 3];
///
/// assert_eq!(slice_cell.len(), 3);
/// ```
- #[unstable(feature = "as_cell", issue="43038")]
+ #[stable(feature = "as_cell", since = "1.37.0")]
pub fn as_slice_of_cells(&self) -> &[Cell<T>] {
unsafe {
&*(self as *const Cell<[T]> as *const [Cell<T>])
/// Returns the absolute value of an `f64`.
pub fn fabsf64(x: f64) -> f64;
+ /// Returns the minimum of two `f32` values.
+ #[cfg(not(bootstrap))]
+ pub fn minnumf32(x: f32, y: f32) -> f32;
+ /// Returns the minimum of two `f64` values.
+ #[cfg(not(bootstrap))]
+ pub fn minnumf64(x: f64, y: f64) -> f64;
+ /// Returns the maximum of two `f32` values.
+ #[cfg(not(bootstrap))]
+ pub fn maxnumf32(x: f32, y: f32) -> f32;
+ /// Returns the maximum of two `f64` values.
+ #[cfg(not(bootstrap))]
+ pub fn maxnumf64(x: f64, y: f64) -> f64;
+
/// Copies the sign from `y` to `x` for `f32` values.
pub fn copysignf32(x: f32, y: f32) -> f32;
/// Copies the sign from `y` to `x` for `f64` values.
pub fn nontemporal_store<T>(ptr: *mut T, val: T);
}
-mod real_intrinsics {
- extern "rust-intrinsic" {
- /// Copies `count * size_of::<T>()` bytes from `src` to `dst`. The source
- /// and destination must *not* overlap.
- /// For the full docs, see the stabilized wrapper [`copy_nonoverlapping`].
- ///
- /// [`copy_nonoverlapping`]: ../../std/ptr/fn.copy_nonoverlapping.html
- pub fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: usize);
-
- /// Copies `count * size_of::<T>()` bytes from `src` to `dst`. The source
- /// and destination may overlap.
- /// For the full docs, see the stabilized wrapper [`copy`].
- ///
- /// [`copy`]: ../../std/ptr/fn.copy.html
- pub fn copy<T>(src: *const T, dst: *mut T, count: usize);
-
- /// Sets `count * size_of::<T>()` bytes of memory starting at `dst` to
- /// `val`.
- /// For the full docs, see the stabilized wrapper [`write_bytes`].
- ///
- /// [`write_bytes`]: ../../std/ptr/fn.write_bytes.html
- pub fn write_bytes<T>(dst: *mut T, val: u8, count: usize);
- }
-}
+// Some functions are defined here because they accidentally got made
+// available in this module on stable. See <https://github.com/rust-lang/rust/issues/15702>.
+// (`transmute` also falls into this category, but it cannot be wrapped due to the
+// check that `T` and `U` have the same size.)
/// Copies `count * size_of::<T>()` bytes from `src` to `dst`. The source
/// and destination must *not* overlap.
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub unsafe fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: usize) {
- real_intrinsics::copy_nonoverlapping(src, dst, count);
+ extern "rust-intrinsic" {
+ fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: usize);
+ }
+ copy_nonoverlapping(src, dst, count);
}
/// Copies `count * size_of::<T>()` bytes from `src` to `dst`. The source
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub unsafe fn copy<T>(src: *const T, dst: *mut T, count: usize) {
- real_intrinsics::copy(src, dst, count)
+ extern "rust-intrinsic" {
+ fn copy<T>(src: *const T, dst: *mut T, count: usize);
+ }
+ copy(src, dst, count)
}
/// Sets `count * size_of::<T>()` bytes of memory starting at `dst` to
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub unsafe fn write_bytes<T>(dst: *mut T, val: u8, count: usize) {
- real_intrinsics::write_bytes(dst, val, count)
+ extern "rust-intrinsic" {
+ fn write_bytes<T>(dst: *mut T, val: u8, count: usize);
+ }
+ write_bytes(dst, val, count)
+}
+
+// Simple bootstrap implementations of minnum/maxnum for stage0 compilation.
+
+/// Returns the minimum of two `f32` values.
+#[cfg(bootstrap)]
+pub fn minnumf32(x: f32, y: f32) -> f32 {
+ // IEEE754 says: minNum(x, y) is the canonicalized number x if x < y, y if y < x, the
+ // canonicalized number if one operand is a number and the other a quiet NaN. Otherwise it
+ // is either x or y, canonicalized (this means results might differ among implementations).
+ // When either x or y is a signaling NaN, then the result is according to 6.2.
+ //
+ // Since we do not support sNaN in Rust yet, we do not need to handle them.
+ // FIXME(nagisa): due to https://bugs.llvm.org/show_bug.cgi?id=33303 we canonicalize by
+ // multiplying by 1.0. Should switch to the `canonicalize` when it works.
+ (if x < y || y != y { x } else { y }) * 1.0
+}
+
+/// Returns the minimum of two `f64` values.
+#[cfg(bootstrap)]
+pub fn minnumf64(x: f64, y: f64) -> f64 {
+ // Identical to the `f32` case.
+ (if x < y || y != y { x } else { y }) * 1.0
+}
+
+/// Returns the maximum of two `f32` values.
+#[cfg(bootstrap)]
+pub fn maxnumf32(x: f32, y: f32) -> f32 {
+ // IEEE754 says: maxNum(x, y) is the canonicalized number y if x < y, x if y < x, the
+ // canonicalized number if one operand is a number and the other a quiet NaN. Otherwise it
+ // is either x or y, canonicalized (this means results might differ among implementations).
+ // When either x or y is a signaling NaN, then the result is according to 6.2.
+ //
+ // Since we do not support sNaN in Rust yet, we do not need to handle them.
+ // FIXME(nagisa): due to https://bugs.llvm.org/show_bug.cgi?id=33303 we canonicalize by
+ // multiplying by 1.0. Should switch to the `canonicalize` when it works.
+ (if x < y || x != x { y } else { x }) * 1.0
+}
+
+/// Returns the maximum of two `f64` values.
+#[cfg(bootstrap)]
+pub fn maxnumf64(x: f64, y: f64) -> f64 {
+ // Identical to the `f32` case.
+ (if x < y || x != x { y } else { x }) * 1.0
}
None
}
}
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<A> {
+ if let Some(minus_n) = self.end.sub_usize(n) {
+ if minus_n > self.start {
+ self.end = minus_n.sub_one();
+ return Some(self.end.clone())
+ }
+ }
+
+ self.end = self.start.clone();
+ None
+ }
}
#[stable(feature = "fused", since = "1.26.0")]
})
}
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<A> {
+ self.compute_is_empty();
+ if self.is_empty.unwrap_or_default() {
+ return None;
+ }
+
+ if let Some(minus_n) = self.end.sub_usize(n) {
+ use crate::cmp::Ordering::*;
+
+ match minus_n.partial_cmp(&self.start) {
+ Some(Greater) => {
+ self.is_empty = Some(false);
+ self.end = minus_n.sub_one();
+ return Some(minus_n);
+ }
+ Some(Equal) => {
+ self.is_empty = Some(true);
+ return Some(minus_n);
+ }
+ _ => {}
+ }
+ }
+
+ self.is_empty = Some(true);
+ None
+ }
+
#[inline]
fn try_rfold<B, F, R>(&mut self, init: B, mut f: F) -> R where
Self: Sized, F: FnMut(B, Self::Item) -> R, R: Try<Ok=B>
let mut left: B = Default::default();
let mut right: B = Default::default();
- for x in self {
+ self.for_each(|x| {
if f(&x) {
left.extend(Some(x))
} else {
right.extend(Some(x))
}
- }
+ });
(left, right)
}
#![feature(arbitrary_self_types)]
#![feature(asm)]
#![feature(associated_type_defaults)]
+#![feature(bound_cloned)]
#![feature(cfg_target_has_atomic)]
#![feature(concat_idents)]
#![feature(const_fn)]
#![feature(const_fn_union)]
-#![feature(custom_attribute)]
#![feature(doc_cfg)]
#![feature(doc_spotlight)]
#![feature(extern_types)]
}
}
+/// Replace `dest` with the default value of `T`, and return the previous `dest` value.
+///
+/// # Examples
+///
+/// A simple example:
+///
+/// ```
+/// use std::mem;
+///
+/// let mut v: Vec<i32> = vec![1, 2];
+///
+/// let old_v = mem::take(&mut v);
+/// assert_eq!(vec![1, 2], old_v);
+/// assert!(v.is_empty());
+/// ```
+///
+/// `take` allows taking ownership of a struct field by replacing it with an "empty" value.
+/// Without `take` you can run into issues like these:
+///
+/// ```compile_fail,E0507
+/// struct Buffer<T> { buf: Vec<T> }
+///
+/// impl<T> Buffer<T> {
+/// fn get_and_reset(&mut self) -> Vec<T> {
+/// // error: cannot move out of dereference of `&mut`-pointer
+/// let buf = self.buf;
+/// self.buf = Vec::new();
+/// buf
+/// }
+/// }
+/// ```
+///
+/// Note that `T` does not necessarily implement [`Clone`], so it can't even clone and reset
+/// `self.buf`. But `take` can be used to disassociate the original value of `self.buf` from
+/// `self`, allowing it to be returned:
+///
+/// ```
+/// # #![allow(dead_code)]
+/// use std::mem;
+///
+/// # struct Buffer<T> { buf: Vec<T> }
+/// impl<T> Buffer<T> {
+/// fn get_and_reset(&mut self) -> Vec<T> {
+/// mem::take(&mut self.buf)
+/// }
+/// }
+/// ```
+///
+/// [`Clone`]: ../../std/clone/trait.Clone.html
+#[inline]
+#[unstable(feature = "mem_take", issue = "61129")]
+pub fn take<T: Default>(dest: &mut T) -> T {
+ replace(dest, T::default())
+}
+
/// Moves `src` into the referenced `dest`, returning the previous `dest` value.
///
/// Neither value is dropped.
#![stable(feature = "rust1", since = "1.0.0")]
+#[cfg(not(test))]
+use crate::intrinsics;
+
use crate::mem;
use crate::num::FpCategory;
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn max(self, other: f32) -> f32 {
- // IEEE754 says: maxNum(x, y) is the canonicalized number y if x < y, x if y < x, the
- // canonicalized number if one operand is a number and the other a quiet NaN. Otherwise it
- // is either x or y, canonicalized (this means results might differ among implementations).
- // When either x or y is a signalingNaN, then the result is according to 6.2.
- //
- // Since we do not support sNaN in Rust yet, we do not need to handle them.
- // FIXME(nagisa): due to https://bugs.llvm.org/show_bug.cgi?id=33303 we canonicalize by
- // multiplying by 1.0. Should switch to the `canonicalize` when it works.
- (if self.is_nan() || self < other { other } else { self }) * 1.0
+ intrinsics::maxnumf32(self, other)
}
/// Returns the minimum of the two numbers.
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn min(self, other: f32) -> f32 {
- // IEEE754 says: minNum(x, y) is the canonicalized number x if x < y, y if y < x, the
- // canonicalized number if one operand is a number and the other a quiet NaN. Otherwise it
- // is either x or y, canonicalized (this means results might differ among implementations).
- // When either x or y is a signalingNaN, then the result is according to 6.2.
- //
- // Since we do not support sNaN in Rust yet, we do not need to handle them.
- // FIXME(nagisa): due to https://bugs.llvm.org/show_bug.cgi?id=33303 we canonicalize by
- // multiplying by 1.0. Should switch to the `canonicalize` when it works.
- (if other.is_nan() || self < other { self } else { other }) * 1.0
+ intrinsics::minnumf32(self, other)
}
/// Raw transmutation to `u32`.
#![stable(feature = "rust1", since = "1.0.0")]
+#[cfg(not(test))]
+use crate::intrinsics;
+
use crate::mem;
use crate::num::FpCategory;
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn max(self, other: f64) -> f64 {
- // IEEE754 says: maxNum(x, y) is the canonicalized number y if x < y, x if y < x, the
- // canonicalized number if one operand is a number and the other a quiet NaN. Otherwise it
- // is either x or y, canonicalized (this means results might differ among implementations).
- // When either x or y is a signalingNaN, then the result is according to 6.2.
- //
- // Since we do not support sNaN in Rust yet, we do not need to handle them.
- // FIXME(nagisa): due to https://bugs.llvm.org/show_bug.cgi?id=33303 we canonicalize by
- // multiplying by 1.0. Should switch to the `canonicalize` when it works.
- (if self.is_nan() || self < other { other } else { self }) * 1.0
+ intrinsics::maxnumf64(self, other)
}
/// Returns the minimum of the two numbers.
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn min(self, other: f64) -> f64 {
- // IEEE754 says: minNum(x, y) is the canonicalized number x if x < y, y if y < x, the
- // canonicalized number if one operand is a number and the other a quiet NaN. Otherwise it
- // is either x or y, canonicalized (this means results might differ among implementations).
- // When either x or y is a signalingNaN, then the result is according to 6.2.
- //
- // Since we do not support sNaN in Rust yet, we do not need to handle them.
- // FIXME(nagisa): due to https://bugs.llvm.org/show_bug.cgi?id=33303 we canonicalize by
- // multiplying by 1.0. Should switch to the `canonicalize` when it works.
- (if other.is_nan() || self < other { self } else { other }) * 1.0
+ intrinsics::minnumf64(self, other)
}
/// Raw transmutation to `u64`.
$EndFeature, "
```"),
#[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_int_sign")]
#[inline]
- pub fn signum(self) -> Self {
- match self {
- n if n > 0 => 1,
- 0 => 0,
- _ => -1,
- }
+ pub const fn signum(self) -> Self {
+ (self > 0) as Self - (self < 0) as Self
}
}
Unbounded,
}
+impl<T: Clone> Bound<&T> {
+ /// Map a `Bound<&T>` to a `Bound<T>` by cloning the contents of the bound.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(bound_cloned)]
+ /// use std::ops::Bound::*;
+ /// use std::ops::RangeBounds;
+ ///
+ /// assert_eq!((1..12).start_bound(), Included(&1));
+ /// assert_eq!((1..12).start_bound().cloned(), Included(1));
+ /// ```
+ #[unstable(feature = "bound_cloned", issue = "61356")]
+ pub fn cloned(self) -> Bound<T> {
+ match self {
+ Bound::Unbounded => Bound::Unbounded,
+ Bound::Included(x) => Bound::Included(x.clone()),
+ Bound::Excluded(x) => Bound::Excluded(x.clone()),
+ }
+ }
+}
+
#[stable(feature = "collections_range", since = "1.28.0")]
/// `RangeBounds` is implemented by Rust's built-in range types, produced
/// by range syntax like `..`, `a..`, `..b`, `..=c`, `d..e`, or `f..=g`.
// which basically means it must be `Option`.
/// The `Option` type. See [the module level documentation](index.html) for more.
-#[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Debug, Hash)]
+#[derive(Copy, PartialEq, PartialOrd, Eq, Ord, Debug, Hash)]
#[stable(feature = "rust1", since = "1.0.0")]
pub enum Option<T> {
/// No value
// Trait implementations
/////////////////////////////////////////////////////////////////////////////
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Clone> Clone for Option<T> {
+ #[inline]
+ fn clone(&self) -> Self {
+ match self {
+ Some(x) => Some(x.clone()),
+ None => None,
+ }
+ }
+
+ #[inline]
+ fn clone_from(&mut self, source: &Self) {
+ match (self, source) {
+ (Some(to), Some(from)) => to.clone_from(from),
+ (to, from) => *to = from.clone(),
+ }
+ }
+}
+
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Default for Option<T> {
/// Returns [`None`][Option::None].
//! Types that pin data to its location in memory.
//!
-//! It is sometimes useful to have objects that are guaranteed to not move,
+//! It is sometimes useful to have objects that are guaranteed not to move,
//! in the sense that their placement in memory does not change, and can thus be relied upon.
//! A prime example of such a scenario would be building self-referential structs,
-//! since moving an object with pointers to itself will invalidate them,
-//! which could cause undefined behavior.
+//! as moving an object with pointers to itself will invalidate them, which could cause undefined
+//! behavior.
//!
//! A [`Pin<P>`] ensures that the pointee of any pointer type `P` has a stable location in memory,
//! meaning it cannot be moved elsewhere and its memory cannot be deallocated
//! moving the values they contain: you can move out of a `Box<T>`, or you can use [`mem::swap`].
//! [`Pin<P>`] wraps a pointer type `P`, so `Pin<Box<T>>` functions much like a regular `Box<T>`:
//! when a `Pin<Box<T>>` gets dropped, so do its contents, and the memory gets deallocated.
-//! Similarily, `Pin<&mut T>` is a lot like `&mut T`. However, [`Pin<P>`] does not let clients
+//! Similarly, `Pin<&mut T>` is a lot like `&mut T`. However, [`Pin<P>`] does not let clients
//! actually obtain a `Box<T>` or `&mut T` to pinned data, which implies that you cannot use
//! operations such as [`mem::swap`]:
+//!
//! ```
//! use std::pin::Pin;
//! fn swap_pins<T>(x: Pin<&mut T>, y: Pin<&mut T>) {
//! as a "`P`-style pointer" to a pinned `P::Target` -- so, a `Pin<Box<T>>` is
//! an owned pointer to a pinned `T`, and a `Pin<Rc<T>>` is a reference-counted
//! pointer to a pinned `T`.
-//! For correctness, [`Pin<P>`] relies on the [`Deref`] and [`DerefMut`] implementations
-//! to not move out of their `self` parameter, and to only ever return a pointer
-//! to pinned data when they are called on a pinned pointer.
+//! For correctness, [`Pin<P>`] relies on the implementations of [`Deref`] and
+//! [`DerefMut`] not to move out of their `self` parameter, and only ever to
+//! return a pointer to pinned data when they are called on a pinned pointer.
//!
//! # `Unpin`
//!
-//! However, these restrictions are usually not necessary. Many types are always freely
-//! movable, even when pinned, because they do not rely on having a stable address.
-//! This includes all the basic types (like `bool`, `i32`, references)
-//! as well as types consisting solely of these types.
-//! Types that do not care about pinning implement the [`Unpin`] auto-trait, which
-//! cancels the effect of [`Pin<P>`]. For `T: Unpin`, `Pin<Box<T>>` and `Box<T>` function
-//! identically, as do `Pin<&mut T>` and `&mut T`.
+//! Many types are always freely movable, even when pinned, because they do not
+//! rely on having a stable address. This includes all the basic types (like
+//! `bool`, `i32`, and references) as well as types consisting solely of these
+//! types. Types that do not care about pinning implement the [`Unpin`]
+//! auto-trait, which cancels the effect of [`Pin<P>`]. For `T: Unpin`,
+//! `Pin<Box<T>>` and `Box<T>` function identically, as do `Pin<&mut T>` and
+//! `&mut T`.
//!
//! Note that pinning and `Unpin` only affect the pointed-to type `P::Target`, not the pointer
//! type `P` itself that got wrapped in `Pin<P>`. For example, whether or not `Box<T>` is
//! use std::marker::PhantomPinned;
//! use std::ptr::NonNull;
//!
-//! // This is a self-referential struct since the slice field points to the data field.
+//! // This is a self-referential struct because the slice field points to the data field.
//! // We cannot inform the compiler about that with a normal reference,
-//! // since this pattern cannot be described with the usual borrowing rules.
-//! // Instead we use a raw pointer, though one which is known to not be null,
-//! // since we know it's pointing at the string.
+//! // as this pattern cannot be described with the usual borrowing rules.
+//! // Instead we use a raw pointer, though one which is known not to be null,
+//! // as we know it's pointing at the string.
//! struct Unmovable {
//! data: String,
//! slice: NonNull<String>,
//! section needs to function correctly.
//!
//! Notice that this guarantee does *not* mean that memory does not leak! It is still
-//! completely okay not to ever call `drop` on a pinned element (e.g., you can still
+//! completely okay not ever to call `drop` on a pinned element (e.g., you can still
//! call [`mem::forget`] on a `Pin<Box<T>>`). In the example of the doubly-linked
//! list, that element would just stay in the list. However you may not free or reuse the storage
//! *without calling `drop`*.
//! `Unpin`. This is the default, but `Unpin` is a safe trait, so as the author of
//! the wrapper it is your responsibility *not* to add something like
//! `impl<T> Unpin for Wrapper<T>`. (Notice that adding a projection operation
-//! requires unsafe code, so the fact that `Unpin` is a safe trait does not break
+//! requires unsafe code, so the fact that `Unpin` is a safe trait does not break
//! the principle that you only have to worry about any of this if you use `unsafe`.)
//! 2. The destructor of the wrapper must not move structural fields out of its argument. This
//! is the exact point that was raised in the [previous section][drop-impl]: `drop` takes
///
/// [`Ok`]: enum.Result.html#variant.Ok
/// [`Err`]: enum.Result.html#variant.Err
-#[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Debug, Hash)]
+#[derive(Copy, PartialEq, PartialOrd, Eq, Ord, Debug, Hash)]
#[must_use = "this `Result` may be an `Err` variant, which should be handled"]
#[stable(feature = "rust1", since = "1.0.0")]
pub enum Result<T, E> {
// Trait implementations
/////////////////////////////////////////////////////////////////////////////
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Clone, E: Clone> Clone for Result<T, E> {
+ #[inline]
+ fn clone(&self) -> Self {
+ match self {
+ Ok(x) => Ok(x.clone()),
+ Err(x) => Err(x.clone()),
+ }
+ }
+
+ #[inline]
+ fn clone_from(&mut self, source: &Self) {
+ match (self, source) {
+ (Ok(to), Ok(from)) => to.clone_from(from),
+ (Err(to), Err(from)) => to.clone_from(from),
+ (to, from) => *to = from.clone(),
+ }
+ }
+}
+
+
#[stable(feature = "rust1", since = "1.0.0")]
impl<T, E> IntoIterator for Result<T, E> {
type Item = T;
}
impl_marker_for!(BytewiseEquality,
- u8 i8 u16 i16 u32 i32 u64 i64 usize isize char bool);
+ u8 i8 u16 i16 u32 i32 u64 i64 u128 i128 usize isize char bool);
#[doc(hidden)]
unsafe impl<'a, T> TrustedRandomAccess for Iter<'a, T> {
/// [`to_ascii_uppercase`].
///
/// [`to_ascii_uppercase`]: #method.to_ascii_uppercase
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut s = String::from("Grüße, Jürgen ❤");
+ ///
+ /// s.make_ascii_uppercase();
+ ///
+ /// assert_eq!("GRüßE, JüRGEN ❤", s);
+ /// ```
#[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
pub fn make_ascii_uppercase(&mut self) {
let me = unsafe { self.as_bytes_mut() };
/// [`to_ascii_lowercase`].
///
/// [`to_ascii_lowercase`]: #method.to_ascii_lowercase
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut s = String::from("GRÜßE, JÜRGEN ❤");
+ ///
+ /// s.make_ascii_lowercase();
+ ///
+ /// assert_eq!("grÜße, jÜrgen ❤", s);
+ /// ```
#[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
pub fn make_ascii_lowercase(&mut self) {
let me = unsafe { self.as_bytes_mut() };
assert_eq!(r, 20..20);
}
+#[test]
+fn test_range_nth_back() {
+ assert_eq!((10..15).nth_back(0), Some(14));
+ assert_eq!((10..15).nth_back(1), Some(13));
+ assert_eq!((10..15).nth_back(4), Some(10));
+ assert_eq!((10..15).nth_back(5), None);
+ assert_eq!((-120..80_i8).nth_back(199), Some(-120));
+
+ let mut r = 10..20;
+ assert_eq!(r.nth_back(2), Some(17));
+ assert_eq!(r, 10..17);
+ assert_eq!(r.nth_back(2), Some(14));
+ assert_eq!(r, 10..14);
+ assert_eq!(r.nth_back(10), None);
+ assert_eq!(r, 10..10);
+}
+
#[test]
fn test_range_from_nth() {
assert_eq!((10..).nth(0), Some(10));
assert_eq!(ExactSizeIterator::is_empty(&r), true);
}
+#[test]
+fn test_range_inclusive_nth_back() {
+ assert_eq!((10..=15).nth_back(0), Some(15));
+ assert_eq!((10..=15).nth_back(1), Some(14));
+ assert_eq!((10..=15).nth_back(5), Some(10));
+ assert_eq!((10..=15).nth_back(6), None);
+ assert_eq!((-120..=80_i8).nth_back(200), Some(-120));
+
+ let mut r = 10_u8..=20;
+ assert_eq!(r.nth_back(2), Some(18));
+ assert_eq!(r, 10..=17);
+ assert_eq!(r.nth_back(2), Some(15));
+ assert_eq!(r, 10..=14);
+ assert_eq!(r.is_empty(), false);
+ assert_eq!(ExactSizeIterator::is_empty(&r), false);
+ assert_eq!(r.nth_back(10), None);
+ assert_eq!(r.is_empty(), true);
+ assert_eq!(ExactSizeIterator::is_empty(&r), true);
+}
+
#[test]
fn test_range_step() {
#![allow(deprecated)]
+#![feature(bound_cloned)]
#![feature(box_syntax)]
#![feature(cell_update)]
#![feature(core_private_bignum)]
test_literal!(0.1);
test_literal!(12345.);
test_literal!(0.9999999);
+ #[cfg(not(miri))] // Miri is too slow
test_literal!(2.2250738585072014e-308);
}
test_literal!(0.0);
test_literal!(1e-325);
test_literal!(1e-326);
+ #[cfg(not(miri))] // Miri is too slow
test_literal!(1e-500);
}
-use core::ops::{Range, RangeFull, RangeFrom, RangeTo, RangeInclusive};
+use core::ops::{Bound, Range, RangeFull, RangeFrom, RangeTo, RangeInclusive};
// Test the Range structs without the syntactic sugar.
assert!( (NAN ..= EPSILON).is_empty());
assert!( (NAN ..= NAN).is_empty());
}
+
+#[test]
+fn test_bound_cloned_unbounded() {
+ assert_eq!(Bound::<&u32>::Unbounded.cloned(), Bound::Unbounded);
+}
+
+#[test]
+fn test_bound_cloned_included() {
+ assert_eq!(Bound::Included(&3).cloned(), Bound::Included(3));
+}
+
+#[test]
+fn test_bound_cloned_excluded() {
+ assert_eq!(Bound::Excluded(&3).cloned(), Bound::Excluded(3));
+}
name = "fmt_macros"
path = "lib.rs"
crate-type = ["dylib"]
+
+[dependencies]
+syntax_pos = { path = "../libsyntax_pos" }
#![deny(rust_2018_idioms)]
#![deny(internal)]
+#![deny(unused_lifetimes)]
#![feature(nll)]
#![feature(rustc_private)]
use std::string;
use std::iter;
+use syntax_pos::{InnerSpan, Symbol};
+
+#[derive(Copy, Clone)]
+struct InnerOffset(usize);
+
+impl InnerOffset {
+ fn to(self, end: InnerOffset) -> InnerSpan {
+ InnerSpan::new(self.0, end.0)
+ }
+}
+
/// A piece is a portion of the format string which represents the next part
/// to emit. These are emitted as a stream by the `Parser` class.
#[derive(Copy, Clone, PartialEq)]
#[derive(Copy, Clone, PartialEq)]
pub struct Argument<'a> {
/// Where to find this argument
- pub position: Position<'a>,
+ pub position: Position,
/// How to format the argument
pub format: FormatSpec<'a>,
}
/// Packed version of various flags provided
pub flags: u32,
/// The integer precision to use
- pub precision: Count<'a>,
+ pub precision: Count,
/// The string width requested for the resulting format
- pub width: Count<'a>,
+ pub width: Count,
/// The descriptor string representing the name of the format desired for
/// this argument, this can be empty or any number of characters, although
/// it is required to be one word.
/// Enum describing where an argument for a format can be located.
#[derive(Copy, Clone, PartialEq)]
-pub enum Position<'a> {
+pub enum Position {
/// The argument is implied to be located at an index
ArgumentImplicitlyIs(usize),
/// The argument is located at a specific index given in the format
ArgumentIs(usize),
/// The argument has a name.
- ArgumentNamed(&'a str),
+ ArgumentNamed(Symbol),
}
-impl Position<'_> {
+impl Position {
pub fn index(&self) -> Option<usize> {
match self {
ArgumentIs(i) | ArgumentImplicitlyIs(i) => Some(*i),
/// A count is used for the precision and width parameters of an integer, and
/// can reference either an argument or a literal integer.
#[derive(Copy, Clone, PartialEq)]
-pub enum Count<'a> {
+pub enum Count {
/// The count is specified explicitly.
CountIs(usize),
/// The count is specified by the argument with the given name.
- CountIsName(&'a str),
+ CountIsName(Symbol),
/// The count is specified by the argument at the given index.
CountIsParam(usize),
/// The count is implied and cannot be explicitly specified.
pub description: string::String,
pub note: Option<string::String>,
pub label: string::String,
- pub start: SpanIndex,
- pub end: SpanIndex,
- pub secondary_label: Option<(string::String, SpanIndex, SpanIndex)>,
+ pub span: InnerSpan,
+ pub secondary_label: Option<(string::String, InnerSpan)>,
}
/// The parser structure for interpreting the input format string. This is
/// `Some(raw count)` when the string is "raw", used to position spans correctly
style: Option<usize>,
/// Start and end byte offset of every successfully parsed argument
- pub arg_places: Vec<(SpanIndex, SpanIndex)>,
+ pub arg_places: Vec<InnerSpan>,
/// Characters that need to be shifted
skips: Vec<usize>,
- /// Span offset of the last opening brace seen, used for error reporting
- last_opening_brace_pos: Option<SpanIndex>,
+ /// Span of the last opening brace seen, used for error reporting
+ last_opening_brace: Option<InnerSpan>,
/// Wether the source string is comes from `println!` as opposed to `format!` or `print!`
append_newline: bool,
}
-#[derive(Clone, Copy, Debug)]
-pub struct SpanIndex(pub usize);
-
-impl SpanIndex {
- pub fn unwrap(self) -> usize {
- self.0
- }
-}
-
impl<'a> Iterator for Parser<'a> {
type Item = Piece<'a>;
if let Some(&(pos, c)) = self.cur.peek() {
match c {
'{' => {
- let curr_last_brace = self.last_opening_brace_pos;
- self.last_opening_brace_pos = Some(self.to_span_index(pos));
+ let curr_last_brace = self.last_opening_brace;
+ let byte_pos = self.to_span_index(pos);
+ self.last_opening_brace = Some(byte_pos.to(InnerOffset(byte_pos.0 + 1)));
self.cur.next();
if self.consume('{') {
- self.last_opening_brace_pos = curr_last_brace;
+ self.last_opening_brace = curr_last_brace;
Some(String(self.string(pos + 1)))
} else {
let arg = self.argument();
- if let Some(arg_pos) = self.must_consume('}').map(|end| {
- (self.to_span_index(pos), self.to_span_index(end + 1))
- }) {
- self.arg_places.push(arg_pos);
+ if let Some(end) = self.must_consume('}') {
+ let start = self.to_span_index(pos);
+ let end = self.to_span_index(end + 1);
+ self.arg_places.push(start.to(end));
}
Some(NextArgument(arg))
}
"unmatched `}` found",
"unmatched `}`",
"if you intended to print `}`, you can escape it using `}}`",
- err_pos,
- err_pos,
+ err_pos.to(err_pos),
);
None
}
style,
arg_places: vec![],
skips,
- last_opening_brace_pos: None,
+ last_opening_brace: None,
append_newline,
}
}
&mut self,
description: S1,
label: S2,
- start: SpanIndex,
- end: SpanIndex,
+ span: InnerSpan,
) {
self.errors.push(ParseError {
description: description.into(),
note: None,
label: label.into(),
- start,
- end,
+ span,
secondary_label: None,
});
}
description: S1,
label: S2,
note: S3,
- start: SpanIndex,
- end: SpanIndex,
+ span: InnerSpan,
) {
self.errors.push(ParseError {
description: description.into(),
note: Some(note.into()),
label: label.into(),
- start,
- end,
+ span,
secondary_label: None,
});
}
}
}
- fn raw(&self) -> usize {
- self.style.map(|raw| raw + 1).unwrap_or(0)
- }
-
- fn to_span_index(&self, pos: usize) -> SpanIndex {
+ fn to_span_index(&self, pos: usize) -> InnerOffset {
let mut pos = pos;
+ // This handles the raw string case, the raw argument is the number of #
+ // in r###"..."### (we need to add one because of the `r`).
+ let raw = self.style.map(|raw| raw + 1).unwrap_or(0);
for skip in &self.skips {
if pos > *skip {
pos += 1;
- } else if pos == *skip && self.raw() == 0 {
+ } else if pos == *skip && raw == 0 {
pos += 1;
} else {
break;
}
}
- SpanIndex(self.raw() + pos + 1)
+ InnerOffset(raw + pos + 1)
}
/// Forces consumption of the specified character. If the character is not
let label = "expected `}`".to_owned();
let (note, secondary_label) = if c == '}' {
(Some("if you intended to print `{`, you can escape it using `{{`".to_owned()),
- self.last_opening_brace_pos.map(|pos| {
- ("because of this opening brace".to_owned(), pos, pos)
+ self.last_opening_brace.map(|sp| {
+ ("because of this opening brace".to_owned(), sp)
}))
} else {
(None, None)
description,
note,
label,
- start: pos,
- end: pos,
+ span: pos.to(pos),
secondary_label,
});
None
let label = format!("expected `{:?}`", c);
let (note, secondary_label) = if c == '}' {
(Some("if you intended to print `{`, you can escape it using `{{`".to_owned()),
- self.last_opening_brace_pos.map(|pos| {
- ("because of this opening brace".to_owned(), pos, pos)
+ self.last_opening_brace.map(|sp| {
+ ("because of this opening brace".to_owned(), sp)
}))
} else {
(None, None)
description,
note,
label,
- start: pos,
- end: pos,
+ span: pos.to(pos),
secondary_label,
});
} else {
- self.err(description, format!("expected `{:?}`", c), pos, pos);
+ self.err(description, format!("expected `{:?}`", c), pos.to(pos));
}
None
}
/// integer index of an argument, a named argument, or a blank string.
/// Returns `Some(parsed_position)` if the position is not implicitly
/// consuming a macro argument, `None` if it's the case.
- fn position(&mut self) -> Option<Position<'a>> {
+ fn position(&mut self) -> Option<Position> {
if let Some(i) = self.integer() {
Some(ArgumentIs(i))
} else {
match self.cur.peek() {
- Some(&(_, c)) if c.is_alphabetic() => Some(ArgumentNamed(self.word())),
+ Some(&(_, c)) if c.is_alphabetic() => {
+ Some(ArgumentNamed(Symbol::intern(self.word())))
+ }
Some(&(pos, c)) if c == '_' => {
let invalid_name = self.string(pos);
self.err_with_note(format!("invalid argument name `{}`", invalid_name),
"invalid argument name",
"argument names cannot start with an underscore",
- self.to_span_index(pos),
- self.to_span_index(pos + invalid_name.len()));
- Some(ArgumentNamed(invalid_name))
+ self.to_span_index(pos).to(
+ self.to_span_index(pos + invalid_name.len())
+ ),
+ );
+ Some(ArgumentNamed(Symbol::intern(invalid_name)))
},
// This is an `ArgumentNext`.
/// Parses a Count parameter at the current position. This does not check
/// for 'CountIsNextParam' because that is only used in precision, not
/// width.
- fn count(&mut self) -> Count<'a> {
+ fn count(&mut self) -> Count {
if let Some(i) = self.integer() {
if self.consume('$') {
CountIsParam(i)
self.cur = tmp;
CountImplied
} else if self.consume('$') {
- CountIsName(word)
+ CountIsName(Symbol::intern(word))
} else {
self.cur = tmp;
CountImplied
}
#[test]
fn format_counts() {
+ use syntax_pos::{GLOBALS, Globals, edition};
+ GLOBALS.set(&Globals::new(edition::DEFAULT_EDITION), || {
same("{:10s}",
&[NextArgument(Argument {
position: ArgumentImplicitlyIs(0),
fill: None,
align: AlignUnknown,
flags: 0,
- precision: CountIsName("b"),
- width: CountIsName("a"),
+ precision: CountIsName(Symbol::intern("b")),
+ width: CountIsName(Symbol::intern("a")),
ty: "s",
},
})]);
+ });
}
#[test]
fn format_flags() {
let (article, allowed_targets) = match hint.name_or_empty() {
name @ sym::C | name @ sym::align => {
is_c |= name == sym::C;
- if target != Target::Struct &&
- target != Target::Union &&
- target != Target::Enum {
- ("a", "struct, enum or union")
- } else {
- continue
+ match target {
+ Target::Struct | Target::Union | Target::Enum => continue,
+ _ => ("a", "struct, enum, or union"),
}
}
sym::packed => {
}
sym::transparent => {
is_transparent = true;
- if target != Target::Struct {
- ("a", "struct")
- } else {
- continue
+ match target {
+ Target::Struct | Target::Union | Target::Enum => continue,
+ _ => ("a", "struct, enum, or union"),
}
}
sym::i8 | sym::u8 | sym::i16 | sym::u16 |
if is_transparent && hints.len() > 1 {
let hint_spans: Vec<_> = hint_spans.clone().collect();
span_err!(self.tcx.sess, hint_spans, E0692,
- "transparent struct cannot have other repr hints");
+ "transparent {} cannot have other repr hints", target);
}
// Warn on repr(u8, u16), repr(C, simd), and c-like-enum-repr(C, u8)
if (int_reprs > 1)
attr.span,
stmt.span,
"attribute should not be applied to a statement",
- "not a struct, enum or union",
+ "not a struct, enum, or union",
);
}
}
attr.span,
expr.span,
"attribute should not be applied to an expression",
- "not defining a struct, enum or union",
+ "not defining a struct, enum, or union",
);
}
}
ExprKind::AddrOf(_, ref subexpression) | ExprKind::Unary(_, ref subexpression) => {
visitor.visit_expr(subexpression)
}
- ExprKind::Lit(_) => {}
ExprKind::Cast(ref subexpression, ref typ) | ExprKind::Type(ref subexpression, ref typ) => {
visitor.visit_expr(subexpression);
visitor.visit_ty(typ)
ExprKind::Yield(ref subexpression) => {
visitor.visit_expr(subexpression);
}
- ExprKind::Err => {}
+ ExprKind::Lit(_) | ExprKind::Err => {}
}
}
use syntax::std_inject;
use syntax::symbol::{kw, sym, Symbol};
use syntax::tokenstream::{TokenStream, TokenTree};
-use syntax::parse::token::Token;
+use syntax::parse::token::{self, Token};
use syntax::visit::{self, Visitor};
use syntax_pos::{DUMMY_SP, edition, Span};
fn lower_token_tree(&mut self, tree: TokenTree) -> TokenStream {
match tree {
- TokenTree::Token(span, token) => self.lower_token(token, span),
+ TokenTree::Token(token) => self.lower_token(token),
TokenTree::Delimited(span, delim, tts) => TokenTree::Delimited(
span,
delim,
}
}
- fn lower_token(&mut self, token: Token, span: Span) -> TokenStream {
- match token {
- Token::Interpolated(nt) => {
- let tts = nt.to_tokenstream(&self.sess.parse_sess, span);
+ fn lower_token(&mut self, token: Token) -> TokenStream {
+ match token.kind {
+ token::Interpolated(nt) => {
+ let tts = nt.to_tokenstream(&self.sess.parse_sess, token.span);
self.lower_token_stream(tts)
}
- other => TokenTree::Token(span, other).into(),
+ _ => TokenTree::Token(token).into(),
}
}
itctx: ImplTraitContext<'_>,
explicit_owner: Option<NodeId>,
) -> hir::PathSegment {
- let (mut generic_args, infer_types) = if let Some(ref generic_args) = segment.args {
+ let (mut generic_args, infer_args) = if let Some(ref generic_args) = segment.args {
let msg = "parenthesized type parameters may only be used with a `Fn` trait";
match **generic_args {
GenericArgs::AngleBracketed(ref data) => {
.collect();
if expected_lifetimes > 0 && param_mode == ParamMode::Explicit {
let anon_lt_suggestion = vec!["'_"; expected_lifetimes].join(", ");
- let no_ty_args = generic_args.args.len() == expected_lifetimes;
+ let no_non_lt_args = generic_args.args.len() == expected_lifetimes;
let no_bindings = generic_args.bindings.is_empty();
- let (incl_angl_brckt, insertion_span, suggestion) = if no_ty_args && no_bindings {
+ let (incl_angl_brckt, insertion_sp, suggestion) = if no_non_lt_args && no_bindings {
// If there are no (non-implicit) generic args or associated type
// bindings, our suggestion includes the angle brackets.
(true, path_span.shrink_to_hi(), format!("<{}>", anon_lt_suggestion))
// Otherwise (sorry, this is kind of gross) we need to infer the
// place to splice in the `'_, ` from the generics that do exist.
let first_generic_span = first_generic_span
- .expect("already checked that type args or bindings exist");
+ .expect("already checked that non-lifetime args or bindings exist");
(false, first_generic_span.shrink_to_lo(), format!("{}, ", anon_lt_suggestion))
};
match self.anonymous_lifetime_mode {
expected_lifetimes,
path_span,
incl_angl_brckt,
- insertion_span,
+ insertion_sp,
suggestion,
);
err.emit();
expected_lifetimes,
path_span,
incl_angl_brckt,
- insertion_span,
+ insertion_sp,
suggestion,
)
);
Some(id),
Some(self.lower_res(res)),
generic_args,
- infer_types,
+ infer_args,
)
}
mut itctx: ImplTraitContext<'_>,
) -> (hir::GenericArgs, bool) {
let &AngleBracketedArgs { ref args, ref constraints, .. } = data;
- let has_types = args.iter().any(|arg| match arg {
+ let has_non_lt_args = args.iter().any(|arg| match arg {
+ ast::GenericArg::Lifetime(_) => false,
ast::GenericArg::Type(_) => true,
- _ => false,
+ ast::GenericArg::Const(_) => true,
});
(
hir::GenericArgs {
.collect(),
parenthesized: false,
},
- !has_types && param_mode == ParamMode::Optional
+ !has_non_lt_args && param_mode == ParamMode::Optional
)
}
hash_bodies: bool,
}
-impl<'a, 'hir, T> HashStable<StableHashingContext<'hir>> for HirItemLike<T>
- where T: HashStable<StableHashingContext<'hir>>
+impl<'hir, T> HashStable<StableHashingContext<'hir>> for HirItemLike<T>
+where
+ T: HashStable<StableHashingContext<'hir>>,
{
fn hash_stable<W: StableHasherResult>(&self,
hcx: &mut StableHashingContext<'hir>,
}
fn visit_token(&mut self, t: Token) {
- if let Token::Interpolated(nt) = t {
+ if let token::Interpolated(nt) = t.kind {
if let token::NtExpr(ref expr) = *nt {
if let ExprKind::Mac(..) = expr.node {
self.visit_macro_invoc(expr.id);
}
}
+/// Evaluates to the number of tokens passed to it.
+///
+/// Logarithmic counting: every one or two recursive expansions, the number of
+/// tokens to count is divided by two, instead of being reduced by one.
+/// Therefore, the recursion depth is the binary logarithm of the number of
+/// tokens to count, and the expanded tree is likewise very small.
macro_rules! count {
- () => (0usize);
- ( $x:tt $($xs:tt)* ) => (1usize + count!($($xs)*));
+ () => (0usize);
+ ($one:tt) => (1usize);
+ ($($pairs:tt $_p:tt)*) => (count!($($pairs)*) << 1usize);
+ ($odd:tt $($rest:tt)*) => (count!($($rest)*) | 1usize);
}
// We define the GlobalMetaDataKind enum with this macro because we want to
/// This only applies to expression and pattern paths, and
/// out of those only the segments with no type parameters
/// to begin with, e.g., `Vec::new` is `<Vec<..>>::new::<..>`.
- pub infer_types: bool,
+ pub infer_args: bool,
}
impl PathSegment {
ident,
hir_id: None,
res: None,
- infer_types: true,
+ infer_args: true,
args: None,
}
}
hir_id: Option<HirId>,
res: Option<Res>,
args: GenericArgs,
- infer_types: bool,
+ infer_args: bool,
) -> Self {
PathSegment {
ident,
hir_id,
res,
- infer_types,
+ infer_args,
args: if args.is_empty() {
None
} else {
/// `#[cold]`: a hint to LLVM that this function, when called, is never on
/// the hot path.
const COLD = 1 << 0;
- /// `#[allocator]`: a hint to LLVM that the pointer returned from this
+ /// `#[rustc_allocator]`: a hint to LLVM that the pointer returned from this
/// function is never null.
const ALLOCATOR = 1 << 1;
/// `#[unwind]`: an indicator that this function may unwind despite what
segment.with_generic_args(|generic_args| {
if !generic_args.args.is_empty() || !generic_args.bindings.is_empty() {
- return self.print_generic_args(&generic_args, segment.infer_types, true);
+ return self.print_generic_args(&generic_args, segment.infer_args, true);
}
Ok(())
})?;
if segment.ident.name != kw::PathRoot {
self.print_ident(segment.ident)?;
segment.with_generic_args(|generic_args| {
- self.print_generic_args(generic_args, segment.infer_types,
+ self.print_generic_args(generic_args, segment.infer_args,
colons_before_params)
})?;
}
if segment.ident.name != kw::PathRoot {
self.print_ident(segment.ident)?;
segment.with_generic_args(|generic_args| {
- self.print_generic_args(generic_args, segment.infer_types, false)
+ self.print_generic_args(generic_args, segment.infer_args, false)
})?;
}
Ok(())
self.print_ident(segment.ident)?;
segment.with_generic_args(|generic_args| {
self.print_generic_args(generic_args,
- segment.infer_types,
+ segment.infer_args,
colons_before_params)
})?;
}
self.print_ident(item_segment.ident)?;
item_segment.with_generic_args(|generic_args| {
self.print_generic_args(generic_args,
- item_segment.infer_types,
+ item_segment.infer_args,
colons_before_params)
})
}
self.print_ident(item_segment.ident)?;
item_segment.with_generic_args(|generic_args| {
self.print_generic_args(generic_args,
- item_segment.infer_types,
+ item_segment.infer_args,
colons_before_params)
})
}
fn print_generic_args(&mut self,
generic_args: &hir::GenericArgs,
- infer_types: bool,
+ infer_args: bool,
colons_before_params: bool)
-> io::Result<()> {
if generic_args.parenthesized {
// FIXME(eddyb): this would leak into error messages (e.g.,
// "non-exhaustive patterns: `Some::<..>(_)` not covered").
- if infer_types && false {
+ if infer_args && false {
start_or_comma(self)?;
self.s.word("..")?;
}
}
}
-pub fn hash_stable_trait_impls<'a, 'gcx, W>(
+pub fn hash_stable_trait_impls<'a, W>(
hcx: &mut StableHashingContext<'a>,
hasher: &mut StableHasher<W>,
blanket_impls: &[DefId],
- non_blanket_impls: &FxHashMap<fast_reject::SimplifiedType, Vec<DefId>>)
- where W: StableHasherResult
+ non_blanket_impls: &FxHashMap<fast_reject::SimplifiedType, Vec<DefId>>,
+) where
+ W: StableHasherResult,
{
{
let mut blanket_impls: SmallVec<[_; 8]> = blanket_impls
impl_stable_hash_for!(enum ::syntax::ast::LitKind {
Str(value, style),
- Err(value),
ByteStr(value),
Byte(value),
Char(value),
Int(value, lit_int_type),
Float(value, float_ty),
FloatUnsuffixed(value),
- Bool(value)
+ Bool(value),
+ Err(value)
});
impl_stable_hash_for_spanned!(::syntax::ast::LitKind);
hasher: &mut StableHasher<W>) {
mem::discriminant(self).hash_stable(hcx, hasher);
match *self {
- tokenstream::TokenTree::Token(span, ref token) => {
- span.hash_stable(hcx, hasher);
- hash_token(token, hcx, hasher);
+ tokenstream::TokenTree::Token(ref token) => {
+ token.hash_stable(hcx, hasher);
}
tokenstream::TokenTree::Delimited(span, delim, ref tts) => {
span.hash_stable(hcx, hasher);
suffix
});
-fn hash_token<'a, 'gcx, W: StableHasherResult>(
- token: &token::Token,
- hcx: &mut StableHashingContext<'a>,
- hasher: &mut StableHasher<W>,
-) {
- mem::discriminant(token).hash_stable(hcx, hasher);
- match *token {
- token::Token::Eq |
- token::Token::Lt |
- token::Token::Le |
- token::Token::EqEq |
- token::Token::Ne |
- token::Token::Ge |
- token::Token::Gt |
- token::Token::AndAnd |
- token::Token::OrOr |
- token::Token::Not |
- token::Token::Tilde |
- token::Token::At |
- token::Token::Dot |
- token::Token::DotDot |
- token::Token::DotDotDot |
- token::Token::DotDotEq |
- token::Token::Comma |
- token::Token::Semi |
- token::Token::Colon |
- token::Token::ModSep |
- token::Token::RArrow |
- token::Token::LArrow |
- token::Token::FatArrow |
- token::Token::Pound |
- token::Token::Dollar |
- token::Token::Question |
- token::Token::SingleQuote |
- token::Token::Whitespace |
- token::Token::Comment |
- token::Token::Eof => {}
-
- token::Token::BinOp(bin_op_token) |
- token::Token::BinOpEq(bin_op_token) => {
- std_hash::Hash::hash(&bin_op_token, hasher);
- }
+impl<'a> HashStable<StableHashingContext<'a>> for token::TokenKind {
+ fn hash_stable<W: StableHasherResult>(&self,
+ hcx: &mut StableHashingContext<'a>,
+ hasher: &mut StableHasher<W>) {
+ mem::discriminant(self).hash_stable(hcx, hasher);
+ match *self {
+ token::Eq |
+ token::Lt |
+ token::Le |
+ token::EqEq |
+ token::Ne |
+ token::Ge |
+ token::Gt |
+ token::AndAnd |
+ token::OrOr |
+ token::Not |
+ token::Tilde |
+ token::At |
+ token::Dot |
+ token::DotDot |
+ token::DotDotDot |
+ token::DotDotEq |
+ token::Comma |
+ token::Semi |
+ token::Colon |
+ token::ModSep |
+ token::RArrow |
+ token::LArrow |
+ token::FatArrow |
+ token::Pound |
+ token::Dollar |
+ token::Question |
+ token::SingleQuote |
+ token::Whitespace |
+ token::Comment |
+ token::Eof => {}
+
+ token::BinOp(bin_op_token) |
+ token::BinOpEq(bin_op_token) => {
+ std_hash::Hash::hash(&bin_op_token, hasher);
+ }
- token::Token::OpenDelim(delim_token) |
- token::Token::CloseDelim(delim_token) => {
- std_hash::Hash::hash(&delim_token, hasher);
- }
- token::Token::Literal(lit) => lit.hash_stable(hcx, hasher),
+ token::OpenDelim(delim_token) |
+ token::CloseDelim(delim_token) => {
+ std_hash::Hash::hash(&delim_token, hasher);
+ }
+ token::Literal(lit) => lit.hash_stable(hcx, hasher),
- token::Token::Ident(ident, is_raw) => {
- ident.name.hash_stable(hcx, hasher);
- is_raw.hash_stable(hcx, hasher);
- }
- token::Token::Lifetime(ident) => ident.name.hash_stable(hcx, hasher),
+ token::Ident(name, is_raw) => {
+ name.hash_stable(hcx, hasher);
+ is_raw.hash_stable(hcx, hasher);
+ }
+ token::Lifetime(name) => name.hash_stable(hcx, hasher),
- token::Token::Interpolated(_) => {
- bug!("interpolated tokens should not be present in the HIR")
- }
+ token::Interpolated(_) => {
+ bug!("interpolated tokens should not be present in the HIR")
+ }
- token::Token::DocComment(val) |
- token::Token::Shebang(val) => val.hash_stable(hcx, hasher),
+ token::DocComment(val) |
+ token::Shebang(val) => val.hash_stable(hcx, hasher),
+ }
}
}
+impl_stable_hash_for!(struct token::Token {
+ kind,
+ span
+});
+
impl_stable_hash_for!(enum ::syntax::ast::NestedMetaItem {
MetaItem(meta_item),
Literal(lit)
}
}
-impl<'a, 'gcx, T> HashStable<StableHashingContext<'a>> for ty::Binder<T>
- where T: HashStable<StableHashingContext<'a>>
+impl<'a, T> HashStable<StableHashingContext<'a>> for ty::Binder<T>
+where
+ T: HashStable<StableHashingContext<'a>>,
{
fn hash_stable<W: StableHasherResult>(&self,
hcx: &mut StableHashingContext<'a>,
}
}
-impl<'a, 'gcx> HashStable<StableHashingContext<'a>>
-for ty::TyVid
-{
+impl<'a> HashStable<StableHashingContext<'a>> for ty::TyVid {
fn hash_stable<W: StableHasherResult>(&self,
_hcx: &mut StableHashingContext<'a>,
_hasher: &mut StableHasher<W>) {
}
}
-impl<'a, 'gcx> HashStable<StableHashingContext<'a>>
-for ty::IntVid
-{
+impl<'a> HashStable<StableHashingContext<'a>> for ty::IntVid {
fn hash_stable<W: StableHasherResult>(&self,
_hcx: &mut StableHashingContext<'a>,
_hasher: &mut StableHasher<W>) {
}
}
-impl<'a, 'gcx> HashStable<StableHashingContext<'a>>
-for ty::FloatVid
-{
+impl<'a> HashStable<StableHashingContext<'a>> for ty::FloatVid {
fn hash_stable<W: StableHasherResult>(&self,
_hcx: &mut StableHashingContext<'a>,
_hasher: &mut StableHasher<W>) {
}
}
-impl<'a, 'gcx, T> HashStable<StableHashingContext<'a>>
-for ty::steal::Steal<T>
- where T: HashStable<StableHashingContext<'a>>
+impl<'a, T> HashStable<StableHashingContext<'a>> for ty::steal::Steal<T>
+where
+ T: HashStable<StableHashingContext<'a>>,
{
fn hash_stable<W: StableHasherResult>(&self,
hcx: &mut StableHashingContext<'a>,
}
}
-impl<'a, 'gcx, 'tcx> VerifyBound<'tcx> {
+impl<'tcx> VerifyBound<'tcx> {
pub fn must_hold(&self) -> bool {
match self {
VerifyBound::IfEq(..) => false,
#![deny(rust_2018_idioms)]
#![deny(internal)]
+#![deny(unused_lifetimes)]
#![allow(explicit_outlives_requirements)]
#![feature(arbitrary_self_types)]
impl LintPassObject for LateLintPassObject {}
-pub trait LintContext<'tcx>: Sized {
+pub trait LintContext: Sized {
type PassObject: LintPassObject;
fn sess(&self) -> &Session;
}
}
-impl<'a, 'tcx> LintContext<'tcx> for LateContext<'a, 'tcx> {
+impl LintContext for LateContext<'_, '_> {
type PassObject = LateLintPassObject;
/// Gets the overall compiler `Session` object.
}
}
-impl<'a> LintContext<'a> for EarlyContext<'a> {
+impl LintContext for EarlyContext<'_> {
type PassObject = EarlyLintPassObject;
/// Gets the overall compiler `Session` object.
// We want to use the enum name both in the `impl ... for $enum_name` as well as for
// importing all the variants. Unfortunately it seems we have to take the name
// twice for this purpose
- (impl<$($lt:lifetime $(: $lt_bound:lifetime)? ),* $(,)? $($T:ident),* $(,)?>
+ (impl<$($T:ident),* $(,)?>
for enum $enum_name:path
[ $enum_path:path ]
{
$( { $($named_field:ident $(-> $named_delegate:tt)?),* } )?
),* $(,)?
}) => {
- impl<'a, $($lt $(: $lt_bound)?,)* $($T,)*>
+ impl<$($T,)*>
::rustc_data_structures::stable_hasher::HashStable<$crate::ich::StableHashingContext<'a>>
for $enum_name
where $($T: ::rustc_data_structures::stable_hasher::HashStable<$crate::ich::StableHashingContext<'a>>),*
// Structs
(struct $struct_name:path { $($field:ident $(-> $delegate:tt)?),* $(,)? }) => {
impl_stable_hash_for!(
- impl<'tcx> for struct $struct_name { $($field $(-> $delegate)?),* }
+ impl<> for struct $struct_name { $($field $(-> $delegate)?),* }
);
};
- (impl<$($lt:lifetime $(: $lt_bound:lifetime)? ),* $(,)? $($T:ident),* $(,)?> for struct $struct_name:path {
+ (impl<$($T:ident),* $(,)?> for struct $struct_name:path {
$($field:ident $(-> $delegate:tt)?),* $(,)?
}) => {
- impl<'a, $($lt $(: $lt_bound)?,)* $($T,)*>
+ impl<$($T,)*>
::rustc_data_structures::stable_hasher::HashStable<$crate::ich::StableHashingContext<'a>> for $struct_name
where $($T: ::rustc_data_structures::stable_hasher::HashStable<$crate::ich::StableHashingContext<'a>>),*
{
// We cannot use normal parentheses here, the parser won't allow it
(tuple_struct $struct_name:path { $($field:ident $(-> $delegate:tt)?),* $(,)? }) => {
impl_stable_hash_for!(
- impl<'tcx> for tuple_struct $struct_name { $($field $(-> $delegate)?),* }
+ impl<> for tuple_struct $struct_name { $($field $(-> $delegate)?),* }
);
};
- (impl<$($lt:lifetime $(: $lt_bound:lifetime)? ),* $(,)? $($T:ident),* $(,)?>
+ (impl<$($T:ident),* $(,)?>
for tuple_struct $struct_name:path { $($field:ident $(-> $delegate:tt)?),* $(,)? }) => {
- impl<'a, $($lt $(: $lt_bound)?,)* $($T,)*>
+ impl<$($T,)*>
::rustc_data_structures::stable_hasher::HashStable<$crate::ich::StableHashingContext<'a>> for $struct_name
where $($T: ::rustc_data_structures::stable_hasher::HashStable<$crate::ich::StableHashingContext<'a>>),*
{
macro_rules! impl_stable_hash_for_spanned {
($T:path) => (
- impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for ::syntax::source_map::Spanned<$T>
+ impl HashStable<StableHashingContext<'a>> for ::syntax::source_map::Spanned<$T>
{
#[inline]
fn hash_stable<W: StableHasherResult>(&self,
pub fn predecessors(
&self,
- mir: &Body<'_>
+ body: &Body<'_>
) -> MappedReadGuard<'_, IndexVec<BasicBlock, Vec<BasicBlock>>> {
if self.predecessors.borrow().is_none() {
- *self.predecessors.borrow_mut() = Some(calculate_predecessors(mir));
+ *self.predecessors.borrow_mut() = Some(calculate_predecessors(body));
}
ReadGuard::map(self.predecessors.borrow(), |p| p.as_ref().unwrap())
}
}
-fn calculate_predecessors(mir: &Body<'_>) -> IndexVec<BasicBlock, Vec<BasicBlock>> {
- let mut result = IndexVec::from_elem(vec![], mir.basic_blocks());
- for (bb, data) in mir.basic_blocks().iter_enumerated() {
+fn calculate_predecessors(body: &Body<'_>) -> IndexVec<BasicBlock, Vec<BasicBlock>> {
+ let mut result = IndexVec::from_elem(vec![], body.basic_blocks());
+ for (bb, data) in body.basic_blocks().iter_enumerated() {
if let Some(ref term) = data.terminator {
for &tgt in term.successors() {
result[tgt].push(bb);
//! The virtual memory representation of the MIR interpreter.
use super::{
- Pointer, EvalResult, AllocId, ScalarMaybeUndef, write_target_uint, read_target_uint, Scalar,
+ Pointer, InterpResult, AllocId, ScalarMaybeUndef, write_target_uint, read_target_uint, Scalar,
};
use crate::ty::layout::{Size, Align};
_alloc: &Allocation<Tag, Self>,
_ptr: Pointer<Tag>,
_size: Size,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
Ok(())
}
_alloc: &mut Allocation<Tag, Self>,
_ptr: Pointer<Tag>,
_size: Size,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
Ok(())
}
_alloc: &mut Allocation<Tag, Self>,
_ptr: Pointer<Tag>,
_size: Size,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
Ok(())
}
}
&self,
ptr: Pointer<Tag>,
msg: CheckInAllocMsg,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
let allocation_size = self.bytes.len() as u64;
ptr.check_in_alloc(Size::from_bytes(allocation_size), msg)
}
ptr: Pointer<Tag>,
size: Size,
msg: CheckInAllocMsg,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
// if ptr.offset is in bounds, then so is ptr (because offset checks for overflow)
self.check_bounds_ptr(ptr.offset(size, cx)?, msg)
}
size: Size,
check_defined_and_ptr: bool,
msg: CheckInAllocMsg,
- ) -> EvalResult<'tcx, &[u8]>
+ ) -> InterpResult<'tcx, &[u8]>
{
self.check_bounds(cx, ptr, size, msg)?;
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
size: Size,
- ) -> EvalResult<'tcx, &[u8]>
+ ) -> InterpResult<'tcx, &[u8]>
{
self.get_bytes_internal(cx, ptr, size, true, CheckInAllocMsg::MemoryAccessTest)
}
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
size: Size,
- ) -> EvalResult<'tcx, &[u8]>
+ ) -> InterpResult<'tcx, &[u8]>
{
self.get_bytes_internal(cx, ptr, size, false, CheckInAllocMsg::MemoryAccessTest)
}
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
size: Size,
- ) -> EvalResult<'tcx, &mut [u8]>
+ ) -> InterpResult<'tcx, &mut [u8]>
{
assert_ne!(size.bytes(), 0, "0-sized accesses should never even get a `Pointer`");
self.check_bounds(cx, ptr, size, CheckInAllocMsg::MemoryAccessTest)?;
&self,
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
- ) -> EvalResult<'tcx, &[u8]>
+ ) -> InterpResult<'tcx, &[u8]>
{
assert_eq!(ptr.offset.bytes() as usize as u64, ptr.offset.bytes());
let offset = ptr.offset.bytes() as usize;
ptr: Pointer<Tag>,
size: Size,
allow_ptr_and_undef: bool,
- ) -> EvalResult<'tcx>
+ ) -> InterpResult<'tcx>
{
// Check bounds and relocations on the edges
self.get_bytes_with_undef_and_ptr(cx, ptr, size)?;
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
src: &[u8],
- ) -> EvalResult<'tcx>
+ ) -> InterpResult<'tcx>
{
let bytes = self.get_bytes_mut(cx, ptr, Size::from_bytes(src.len() as u64))?;
bytes.clone_from_slice(src);
ptr: Pointer<Tag>,
val: u8,
count: Size
- ) -> EvalResult<'tcx>
+ ) -> InterpResult<'tcx>
{
let bytes = self.get_bytes_mut(cx, ptr, count)?;
for b in bytes {
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
size: Size
- ) -> EvalResult<'tcx, ScalarMaybeUndef<Tag>>
+ ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>>
{
// get_bytes_unchecked tests relocation edges
let bytes = self.get_bytes_with_undef_and_ptr(cx, ptr, size)?;
&self,
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
- ) -> EvalResult<'tcx, ScalarMaybeUndef<Tag>>
+ ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>>
{
self.read_scalar(cx, ptr, cx.data_layout().pointer_size)
}
ptr: Pointer<Tag>,
val: ScalarMaybeUndef<Tag>,
type_size: Size,
- ) -> EvalResult<'tcx>
+ ) -> InterpResult<'tcx>
{
let val = match val {
ScalarMaybeUndef::Scalar(scalar) => scalar,
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
val: ScalarMaybeUndef<Tag>
- ) -> EvalResult<'tcx>
+ ) -> InterpResult<'tcx>
{
let ptr_size = cx.data_layout().pointer_size;
self.write_scalar(cx, ptr.into(), val, ptr_size)
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
size: Size,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
if self.relocations(cx, ptr, size).is_empty() {
Ok(())
} else {
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
size: Size,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
// Find the start and end of the given range and its outermost relocations.
let (first, last) = {
// Find all relocations overlapping the given range.
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
size: Size,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
self.check_relocations(cx, ptr, Size::ZERO)?;
self.check_relocations(cx, ptr.offset(size, cx)?, Size::ZERO)?;
Ok(())
/// Checks that a range of bytes is defined. If not, returns the `ReadUndefBytes`
/// error which will report the first byte which is undefined.
#[inline]
- fn check_defined(&self, ptr: Pointer<Tag>, size: Size) -> EvalResult<'tcx> {
+ fn check_defined(&self, ptr: Pointer<Tag>, size: Size) -> InterpResult<'tcx> {
self.undef_mask.is_range_defined(
ptr.offset,
ptr.offset + size,
ptr: Pointer<Tag>,
size: Size,
new_state: bool,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
if size.bytes() == 0 {
return Ok(());
}
struct_span_err!(tcx.sess, tcx.span, E0080, "{}", msg)
}
+/// Packages the kind of error we got from the const code interpreter
+/// up with a Rust-level backtrace of where the error occured.
+/// Thsese should always be constructed by calling `.into()` on
+/// a `InterpError`. In `librustc_mir::interpret`, we have the `err!`
+/// macro for this
#[derive(Debug, Clone)]
-pub struct EvalError<'tcx> {
+pub struct InterpErrorInfo<'tcx> {
pub kind: InterpError<'tcx, u64>,
- pub backtrace: Option<Box<Backtrace>>,
+ backtrace: Option<Box<Backtrace>>,
}
-impl<'tcx> EvalError<'tcx> {
+impl<'tcx> InterpErrorInfo<'tcx> {
pub fn print_backtrace(&mut self) {
if let Some(ref mut backtrace) = self.backtrace {
print_backtrace(&mut *backtrace);
eprintln!("\n\nAn error occurred in miri:\n{:?}", backtrace);
}
-impl<'tcx> From<InterpError<'tcx, u64>> for EvalError<'tcx> {
+impl<'tcx> From<InterpError<'tcx, u64>> for InterpErrorInfo<'tcx> {
fn from(kind: InterpError<'tcx, u64>) -> Self {
let backtrace = match env::var("RUST_CTFE_BACKTRACE") {
// Matching `RUST_BACKTRACE` -- we treat "0" the same as "not present".
},
_ => None,
};
- EvalError {
+ InterpErrorInfo {
kind,
backtrace,
}
InfiniteLoop,
}
-pub type EvalResult<'tcx, T = ()> = Result<T, EvalError<'tcx>>;
+pub type InterpResult<'tcx, T = ()> = Result<T, InterpErrorInfo<'tcx>>;
impl<'tcx, O> InterpError<'tcx, O> {
pub fn description(&self) -> &str {
}
}
-impl<'tcx> fmt::Display for EvalError<'tcx> {
+impl<'tcx> fmt::Display for InterpErrorInfo<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.kind)
}
mod pointer;
pub use self::error::{
- EvalError, EvalResult, InterpError, AssertMessage, ConstEvalErr, struct_error,
+ InterpErrorInfo, InterpResult, InterpError, AssertMessage, ConstEvalErr, struct_error,
FrameInfo, ConstEvalRawResult, ConstEvalResult, ErrorHandled,
};
use rustc_macros::HashStable;
use super::{
- AllocId, EvalResult, CheckInAllocMsg
+ AllocId, InterpResult, CheckInAllocMsg
};
////////////////////////////////////////////////////////////////////////////////
}
#[inline]
- fn offset<'tcx>(&self, val: u64, i: u64) -> EvalResult<'tcx, u64> {
+ fn offset<'tcx>(&self, val: u64, i: u64) -> InterpResult<'tcx, u64> {
let (res, over) = self.overflowing_offset(val, i);
if over { err!(Overflow(mir::BinOp::Add)) } else { Ok(res) }
}
#[inline]
- fn signed_offset<'tcx>(&self, val: u64, i: i64) -> EvalResult<'tcx, u64> {
+ fn signed_offset<'tcx>(&self, val: u64, i: i64) -> InterpResult<'tcx, u64> {
let (res, over) = self.overflowing_signed_offset(val, i128::from(i));
if over { err!(Overflow(mir::BinOp::Add)) } else { Ok(res) }
}
}
}
-impl<'tcx> Pointer<()> {
+impl Pointer<()> {
#[inline(always)]
pub fn new(alloc_id: AllocId, offset: Size) -> Self {
Pointer { alloc_id, offset, tag: () }
}
#[inline]
- pub fn offset(self, i: Size, cx: &impl HasDataLayout) -> EvalResult<'tcx, Self> {
+ pub fn offset(self, i: Size, cx: &impl HasDataLayout) -> InterpResult<'tcx, Self> {
Ok(Pointer::new_with_tag(
self.alloc_id,
Size::from_bytes(cx.data_layout().offset(self.offset.bytes(), i.bytes())?),
}
#[inline]
- pub fn signed_offset(self, i: i64, cx: &impl HasDataLayout) -> EvalResult<'tcx, Self> {
+ pub fn signed_offset(self, i: i64, cx: &impl HasDataLayout) -> InterpResult<'tcx, Self> {
Ok(Pointer::new_with_tag(
self.alloc_id,
Size::from_bytes(cx.data_layout().signed_offset(self.offset.bytes(), i)?),
self,
allocation_size: Size,
msg: CheckInAllocMsg,
- ) -> EvalResult<'tcx, ()> {
+ ) -> InterpResult<'tcx, ()> {
if self.offset > allocation_size {
err!(PointerOutOfBounds {
ptr: self.erase_tag(),
use std::fmt;
use rustc_macros::HashStable;
+use rustc_apfloat::{Float, ieee::{Double, Single}};
use crate::ty::{Ty, InferConst, ParamConst, layout::{HasDataLayout, Size}, subst::SubstsRef};
use crate::ty::PlaceholderConst;
use crate::hir::def_id::DefId;
-use super::{EvalResult, Pointer, PointerArithmetic, Allocation, AllocId, sign_extend, truncate};
+use super::{InterpResult, Pointer, PointerArithmetic, Allocation, AllocId, sign_extend, truncate};
/// Represents the result of a raw const operation, pre-validation.
#[derive(Copy, Clone, Debug, Eq, PartialEq, RustcEncodable, RustcDecodable, Hash, HashStable)]
}
}
-impl<'tcx> Scalar<()> {
+impl<Tag> From<Single> for Scalar<Tag> {
+ #[inline(always)]
+ fn from(f: Single) -> Self {
+ Scalar::from_f32(f)
+ }
+}
+
+impl<Tag> From<Double> for Scalar<Tag> {
+ #[inline(always)]
+ fn from(f: Double) -> Self {
+ Scalar::from_f64(f)
+ }
+}
+
+impl Scalar<()> {
#[inline(always)]
fn check_data(data: u128, size: u8) {
debug_assert_eq!(truncate(data, Size::from_bytes(size as u64)), data,
}
#[inline]
- pub fn ptr_offset(self, i: Size, cx: &impl HasDataLayout) -> EvalResult<'tcx, Self> {
+ pub fn ptr_offset(self, i: Size, cx: &impl HasDataLayout) -> InterpResult<'tcx, Self> {
let dl = cx.data_layout();
match self {
Scalar::Raw { data, size } => {
}
#[inline]
- pub fn ptr_signed_offset(self, i: i64, cx: &impl HasDataLayout) -> EvalResult<'tcx, Self> {
+ pub fn ptr_signed_offset(self, i: i64, cx: &impl HasDataLayout) -> InterpResult<'tcx, Self> {
let dl = cx.data_layout();
match self {
Scalar::Raw { data, size } => {
Scalar::Raw { data: i, size: size.bytes() as u8 }
}
+ #[inline]
+ pub fn from_u8(i: u8) -> Self {
+ Scalar::Raw { data: i as u128, size: 1 }
+ }
+
+ #[inline]
+ pub fn from_u16(i: u16) -> Self {
+ Scalar::Raw { data: i as u128, size: 2 }
+ }
+
+ #[inline]
+ pub fn from_u32(i: u32) -> Self {
+ Scalar::Raw { data: i as u128, size: 4 }
+ }
+
+ #[inline]
+ pub fn from_u64(i: u64) -> Self {
+ Scalar::Raw { data: i as u128, size: 8 }
+ }
+
#[inline]
pub fn from_int(i: impl Into<i128>, size: Size) -> Self {
let i = i.into();
}
#[inline]
- pub fn from_f32(f: f32) -> Self {
- Scalar::Raw { data: f.to_bits() as u128, size: 4 }
+ pub fn from_f32(f: Single) -> Self {
+ // We trust apfloat to give us properly truncated data.
+ Scalar::Raw { data: f.to_bits(), size: 4 }
}
#[inline]
- pub fn from_f64(f: f64) -> Self {
- Scalar::Raw { data: f.to_bits() as u128, size: 8 }
+ pub fn from_f64(f: Double) -> Self {
+ // We trust apfloat to give us properly truncated data.
+ Scalar::Raw { data: f.to_bits(), size: 8 }
}
#[inline]
}
#[inline]
- pub fn to_bits(self, target_size: Size) -> EvalResult<'tcx, u128> {
+ pub fn to_bits(self, target_size: Size) -> InterpResult<'tcx, u128> {
match self {
Scalar::Raw { data, size } => {
assert_eq!(target_size.bytes(), size as u64);
}
#[inline]
- pub fn to_ptr(self) -> EvalResult<'tcx, Pointer<Tag>> {
+ pub fn to_ptr(self) -> InterpResult<'tcx, Pointer<Tag>> {
match self {
Scalar::Raw { data: 0, .. } => err!(InvalidNullPointerUsage),
Scalar::Raw { .. } => err!(ReadBytesAsPointer),
}
}
- pub fn to_bool(self) -> EvalResult<'tcx, bool> {
+ pub fn to_bool(self) -> InterpResult<'tcx, bool> {
match self {
Scalar::Raw { data: 0, size: 1 } => Ok(false),
Scalar::Raw { data: 1, size: 1 } => Ok(true),
}
}
- pub fn to_char(self) -> EvalResult<'tcx, char> {
+ pub fn to_char(self) -> InterpResult<'tcx, char> {
let val = self.to_u32()?;
match ::std::char::from_u32(val) {
Some(c) => Ok(c),
}
}
- pub fn to_u8(self) -> EvalResult<'static, u8> {
+ pub fn to_u8(self) -> InterpResult<'static, u8> {
let sz = Size::from_bits(8);
let b = self.to_bits(sz)?;
Ok(b as u8)
}
- pub fn to_u32(self) -> EvalResult<'static, u32> {
+ pub fn to_u32(self) -> InterpResult<'static, u32> {
let sz = Size::from_bits(32);
let b = self.to_bits(sz)?;
Ok(b as u32)
}
- pub fn to_u64(self) -> EvalResult<'static, u64> {
+ pub fn to_u64(self) -> InterpResult<'static, u64> {
let sz = Size::from_bits(64);
let b = self.to_bits(sz)?;
Ok(b as u64)
}
- pub fn to_usize(self, cx: &impl HasDataLayout) -> EvalResult<'static, u64> {
+ pub fn to_usize(self, cx: &impl HasDataLayout) -> InterpResult<'static, u64> {
let b = self.to_bits(cx.data_layout().pointer_size)?;
Ok(b as u64)
}
- pub fn to_i8(self) -> EvalResult<'static, i8> {
+ pub fn to_i8(self) -> InterpResult<'static, i8> {
let sz = Size::from_bits(8);
let b = self.to_bits(sz)?;
let b = sign_extend(b, sz) as i128;
Ok(b as i8)
}
- pub fn to_i32(self) -> EvalResult<'static, i32> {
+ pub fn to_i32(self) -> InterpResult<'static, i32> {
let sz = Size::from_bits(32);
let b = self.to_bits(sz)?;
let b = sign_extend(b, sz) as i128;
Ok(b as i32)
}
- pub fn to_i64(self) -> EvalResult<'static, i64> {
+ pub fn to_i64(self) -> InterpResult<'static, i64> {
let sz = Size::from_bits(64);
let b = self.to_bits(sz)?;
let b = sign_extend(b, sz) as i128;
Ok(b as i64)
}
- pub fn to_isize(self, cx: &impl HasDataLayout) -> EvalResult<'static, i64> {
+ pub fn to_isize(self, cx: &impl HasDataLayout) -> InterpResult<'static, i64> {
let sz = cx.data_layout().pointer_size;
let b = self.to_bits(sz)?;
let b = sign_extend(b, sz) as i128;
}
#[inline]
- pub fn to_f32(self) -> EvalResult<'static, f32> {
- Ok(f32::from_bits(self.to_u32()?))
+ pub fn to_f32(self) -> InterpResult<'static, Single> {
+ // Going through `u32` to check size and truncation.
+ Ok(Single::from_bits(self.to_u32()? as u128))
}
#[inline]
- pub fn to_f64(self) -> EvalResult<'static, f64> {
- Ok(f64::from_bits(self.to_u64()?))
+ pub fn to_f64(self) -> InterpResult<'static, Double> {
+ // Going through `u64` to check size and truncation.
+ Ok(Double::from_bits(self.to_u64()? as u128))
}
}
}
#[inline]
- pub fn not_undef(self) -> EvalResult<'static, Scalar<Tag>> {
+ pub fn not_undef(self) -> InterpResult<'static, Scalar<Tag>> {
match self {
ScalarMaybeUndef::Scalar(scalar) => Ok(scalar),
ScalarMaybeUndef::Undef => err!(ReadUndefBytes(Size::from_bytes(0))),
}
#[inline(always)]
- pub fn to_ptr(self) -> EvalResult<'tcx, Pointer<Tag>> {
+ pub fn to_ptr(self) -> InterpResult<'tcx, Pointer<Tag>> {
self.not_undef()?.to_ptr()
}
#[inline(always)]
- pub fn to_bits(self, target_size: Size) -> EvalResult<'tcx, u128> {
+ pub fn to_bits(self, target_size: Size) -> InterpResult<'tcx, u128> {
self.not_undef()?.to_bits(target_size)
}
#[inline(always)]
- pub fn to_bool(self) -> EvalResult<'tcx, bool> {
+ pub fn to_bool(self) -> InterpResult<'tcx, bool> {
self.not_undef()?.to_bool()
}
#[inline(always)]
- pub fn to_char(self) -> EvalResult<'tcx, char> {
+ pub fn to_char(self) -> InterpResult<'tcx, char> {
self.not_undef()?.to_char()
}
#[inline(always)]
- pub fn to_f32(self) -> EvalResult<'tcx, f32> {
+ pub fn to_f32(self) -> InterpResult<'tcx, Single> {
self.not_undef()?.to_f32()
}
#[inline(always)]
- pub fn to_f64(self) -> EvalResult<'tcx, f64> {
+ pub fn to_f64(self) -> InterpResult<'tcx, Double> {
self.not_undef()?.to_f64()
}
#[inline(always)]
- pub fn to_u8(self) -> EvalResult<'tcx, u8> {
+ pub fn to_u8(self) -> InterpResult<'tcx, u8> {
self.not_undef()?.to_u8()
}
#[inline(always)]
- pub fn to_u32(self) -> EvalResult<'tcx, u32> {
+ pub fn to_u32(self) -> InterpResult<'tcx, u32> {
self.not_undef()?.to_u32()
}
#[inline(always)]
- pub fn to_u64(self) -> EvalResult<'tcx, u64> {
+ pub fn to_u64(self) -> InterpResult<'tcx, u64> {
self.not_undef()?.to_u64()
}
#[inline(always)]
- pub fn to_usize(self, cx: &impl HasDataLayout) -> EvalResult<'tcx, u64> {
+ pub fn to_usize(self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64> {
self.not_undef()?.to_usize(cx)
}
#[inline(always)]
- pub fn to_i8(self) -> EvalResult<'tcx, i8> {
+ pub fn to_i8(self) -> InterpResult<'tcx, i8> {
self.not_undef()?.to_i8()
}
#[inline(always)]
- pub fn to_i32(self) -> EvalResult<'tcx, i32> {
+ pub fn to_i32(self) -> InterpResult<'tcx, i32> {
self.not_undef()?.to_i32()
}
#[inline(always)]
- pub fn to_i64(self) -> EvalResult<'tcx, i64> {
+ pub fn to_i64(self) -> InterpResult<'tcx, i64> {
self.not_undef()?.to_i64()
}
#[inline(always)]
- pub fn to_isize(self, cx: &impl HasDataLayout) -> EvalResult<'tcx, i64> {
+ pub fn to_isize(self, cx: &impl HasDataLayout) -> InterpResult<'tcx, i64> {
self.not_undef()?.to_isize(cx)
}
}
use crate::hir::{self, InlineAsm as HirInlineAsm};
use crate::mir::interpret::{ConstValue, InterpError, Scalar};
use crate::mir::visit::MirVisitable;
+use rustc_data_structures::bit_set::BitMatrix;
use rustc_data_structures::fx::FxHashSet;
use rustc_data_structures::graph::dominators::{dominators, Dominators};
use rustc_data_structures::graph::{self, GraphPredecessors, GraphSuccessors};
}
/// Returns `true` if `other` is earlier in the control flow graph than `self`.
- pub fn is_predecessor_of<'tcx>(&self, other: Location, mir: &Body<'tcx>) -> bool {
+ pub fn is_predecessor_of<'tcx>(&self, other: Location, body: &Body<'tcx>) -> bool {
// If we are in the same block as the other location and are an earlier statement
// then we are a predecessor of `other`.
if self.block == other.block && self.statement_index < other.statement_index {
}
// If we're in another block, then we want to check that block is a predecessor of `other`.
- let mut queue: Vec<BasicBlock> = mir.predecessors_for(other.block).clone();
+ let mut queue: Vec<BasicBlock> = body.predecessors_for(other.block).clone();
let mut visited = FxHashSet::default();
while let Some(block) = queue.pop() {
// If we haven't visited this block before, then make sure we visit it's predecessors.
if visited.insert(block) {
- queue.append(&mut mir.predecessors_for(block).clone());
+ queue.append(&mut body.predecessors_for(block).clone());
} else {
continue;
}
/// be stored in multiple variants.
pub variant_fields: IndexVec<VariantIdx, IndexVec<Field, GeneratorSavedLocal>>,
+ /// Which saved locals are storage-live at the same time. Locals that do not
+ /// have conflicts with each other are allowed to overlap in the computed
+ /// layout.
+ pub storage_conflicts: BitMatrix<GeneratorSavedLocal, GeneratorSavedLocal>,
+
/// Names and scopes of all the stored generator locals.
/// NOTE(tmandry) This is *strictly* a temporary hack for codegen
/// debuginfo generation, and will be removed at some point.
impl<'tcx> TypeFoldable<'tcx> for GeneratorLayout<'tcx> {
field_tys,
variant_fields,
+ storage_conflicts,
__local_debuginfo_codegen_only_do_not_use,
}
}
}
}
+impl<'tcx, R: Idx, C: Idx> TypeFoldable<'tcx> for BitMatrix<R, C> {
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, _: &mut F) -> Self {
+ self.clone()
+ }
+ fn super_visit_with<V: TypeVisitor<'tcx>>(&self, _: &mut V) -> bool {
+ false
+ }
+}
+
impl<'tcx> TypeFoldable<'tcx> for Constant<'tcx> {
fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
Constant {
/// A preorder traversal of this graph is either `A B D C` or `A C D B`
#[derive(Clone)]
pub struct Preorder<'a, 'tcx: 'a> {
- mir: &'a Body<'tcx>,
+ body: &'a Body<'tcx>,
visited: BitSet<BasicBlock>,
worklist: Vec<BasicBlock>,
root_is_start_block: bool,
}
impl<'a, 'tcx> Preorder<'a, 'tcx> {
- pub fn new(mir: &'a Body<'tcx>, root: BasicBlock) -> Preorder<'a, 'tcx> {
+ pub fn new(body: &'a Body<'tcx>, root: BasicBlock) -> Preorder<'a, 'tcx> {
let worklist = vec![root];
Preorder {
- mir,
- visited: BitSet::new_empty(mir.basic_blocks().len()),
+ body,
+ visited: BitSet::new_empty(body.basic_blocks().len()),
worklist,
root_is_start_block: root == START_BLOCK,
}
}
}
-pub fn preorder<'a, 'tcx>(mir: &'a Body<'tcx>) -> Preorder<'a, 'tcx> {
- Preorder::new(mir, START_BLOCK)
+pub fn preorder<'a, 'tcx>(body: &'a Body<'tcx>) -> Preorder<'a, 'tcx> {
+ Preorder::new(body, START_BLOCK)
}
impl<'a, 'tcx> Iterator for Preorder<'a, 'tcx> {
continue;
}
- let data = &self.mir[idx];
+ let data = &self.body[idx];
if let Some(ref term) = data.terminator {
self.worklist.extend(term.successors());
fn size_hint(&self) -> (usize, Option<usize>) {
// All the blocks, minus the number of blocks we've visited.
- let upper = self.mir.basic_blocks().len() - self.visited.count();
+ let upper = self.body.basic_blocks().len() - self.visited.count();
let lower = if self.root_is_start_block {
// We will visit all remaining blocks exactly once.
///
/// A Postorder traversal of this graph is `D B C A` or `D C B A`
pub struct Postorder<'a, 'tcx: 'a> {
- mir: &'a Body<'tcx>,
+ body: &'a Body<'tcx>,
visited: BitSet<BasicBlock>,
visit_stack: Vec<(BasicBlock, Successors<'a>)>,
root_is_start_block: bool,
}
impl<'a, 'tcx> Postorder<'a, 'tcx> {
- pub fn new(mir: &'a Body<'tcx>, root: BasicBlock) -> Postorder<'a, 'tcx> {
+ pub fn new(body: &'a Body<'tcx>, root: BasicBlock) -> Postorder<'a, 'tcx> {
let mut po = Postorder {
- mir,
- visited: BitSet::new_empty(mir.basic_blocks().len()),
+ body,
+ visited: BitSet::new_empty(body.basic_blocks().len()),
visit_stack: Vec::new(),
root_is_start_block: root == START_BLOCK,
};
- let data = &po.mir[root];
+ let data = &po.body[root];
if let Some(ref term) = data.terminator {
po.visited.insert(root);
};
if self.visited.insert(bb) {
- if let Some(term) = &self.mir[bb].terminator {
+ if let Some(term) = &self.body[bb].terminator {
self.visit_stack.push((bb, term.successors()));
}
}
}
}
-pub fn postorder<'a, 'tcx>(mir: &'a Body<'tcx>) -> Postorder<'a, 'tcx> {
- Postorder::new(mir, START_BLOCK)
+pub fn postorder<'a, 'tcx>(body: &'a Body<'tcx>) -> Postorder<'a, 'tcx> {
+ Postorder::new(body, START_BLOCK)
}
impl<'a, 'tcx> Iterator for Postorder<'a, 'tcx> {
self.traverse_successor();
}
- next.map(|(bb, _)| (bb, &self.mir[bb]))
+ next.map(|(bb, _)| (bb, &self.body[bb]))
}
fn size_hint(&self) -> (usize, Option<usize>) {
// All the blocks, minus the number of blocks we've visited.
- let upper = self.mir.basic_blocks().len() - self.visited.count();
+ let upper = self.body.basic_blocks().len() - self.visited.count();
let lower = if self.root_is_start_block {
// We will visit all remaining blocks exactly once.
/// to re-use the traversal
#[derive(Clone)]
pub struct ReversePostorder<'a, 'tcx: 'a> {
- mir: &'a Body<'tcx>,
+ body: &'a Body<'tcx>,
blocks: Vec<BasicBlock>,
idx: usize
}
impl<'a, 'tcx> ReversePostorder<'a, 'tcx> {
- pub fn new(mir: &'a Body<'tcx>, root: BasicBlock) -> ReversePostorder<'a, 'tcx> {
- let blocks : Vec<_> = Postorder::new(mir, root).map(|(bb, _)| bb).collect();
+ pub fn new(body: &'a Body<'tcx>, root: BasicBlock) -> ReversePostorder<'a, 'tcx> {
+ let blocks : Vec<_> = Postorder::new(body, root).map(|(bb, _)| bb).collect();
let len = blocks.len();
ReversePostorder {
- mir,
+ body,
blocks,
idx: len
}
}
-pub fn reverse_postorder<'a, 'tcx>(mir: &'a Body<'tcx>) -> ReversePostorder<'a, 'tcx> {
- ReversePostorder::new(mir, START_BLOCK)
+pub fn reverse_postorder<'a, 'tcx>(body: &'a Body<'tcx>) -> ReversePostorder<'a, 'tcx> {
+ ReversePostorder::new(body, START_BLOCK)
}
impl<'a, 'tcx> Iterator for ReversePostorder<'a, 'tcx> {
if self.idx == 0 { return None; }
self.idx -= 1;
- self.blocks.get(self.idx).map(|&bb| (bb, &self.mir[bb]))
+ self.blocks.get(self.idx).map(|&bb| (bb, &self.body[bb]))
}
fn size_hint(&self) -> (usize, Option<usize>) {
// Override these, and call `self.super_xxx` to revert back to the
// default behavior.
- fn visit_body(&mut self, mir: & $($mutability)? Body<'tcx>) {
- self.super_body(mir);
+ fn visit_body(&mut self, body: & $($mutability)? Body<'tcx>) {
+ self.super_body(body);
}
fn visit_basic_block_data(&mut self,
self.super_place(place, context, location);
}
+ fn visit_place_base(&mut self,
+ place_base: & $($mutability)? PlaceBase<'tcx>,
+ context: PlaceContext,
+ location: Location) {
+ self.super_place_base(place_base, context, location);
+ }
+
fn visit_projection(&mut self,
place: & $($mutability)? Projection<'tcx>,
context: PlaceContext,
self.super_projection(place, context, location);
}
- fn visit_projection_elem(&mut self,
- place: & $($mutability)? PlaceElem<'tcx>,
- location: Location) {
- self.super_projection_elem(place, location);
- }
-
fn visit_constant(&mut self,
constant: & $($mutability)? Constant<'tcx>,
location: Location) {
// not meant to be overridden.
fn super_body(&mut self,
- mir: & $($mutability)? Body<'tcx>) {
- if let Some(yield_ty) = &$($mutability)? mir.yield_ty {
+ body: & $($mutability)? Body<'tcx>) {
+ if let Some(yield_ty) = &$($mutability)? body.yield_ty {
self.visit_ty(yield_ty, TyContext::YieldTy(SourceInfo {
- span: mir.span,
+ span: body.span,
scope: OUTERMOST_SOURCE_SCOPE,
}));
}
// for best performance, we want to use an iterator rather
- // than a for-loop, to avoid calling `mir::Body::invalidate` for
+ // than a for-loop, to avoid calling `body::Body::invalidate` for
// each basic block.
macro_rules! basic_blocks {
- (mut) => (mir.basic_blocks_mut().iter_enumerated_mut());
- () => (mir.basic_blocks().iter_enumerated());
+ (mut) => (body.basic_blocks_mut().iter_enumerated_mut());
+ () => (body.basic_blocks().iter_enumerated());
};
for (bb, data) in basic_blocks!($($mutability)?) {
self.visit_basic_block_data(bb, data);
}
- for scope in &$($mutability)? mir.source_scopes {
+ for scope in &$($mutability)? body.source_scopes {
self.visit_source_scope_data(scope);
}
- self.visit_ty(&$($mutability)? mir.return_ty(), TyContext::ReturnTy(SourceInfo {
- span: mir.span,
+ self.visit_ty(&$($mutability)? body.return_ty(), TyContext::ReturnTy(SourceInfo {
+ span: body.span,
scope: OUTERMOST_SOURCE_SCOPE,
}));
- for local in mir.local_decls.indices() {
- self.visit_local_decl(local, & $($mutability)? mir.local_decls[local]);
+ for local in body.local_decls.indices() {
+ self.visit_local_decl(local, & $($mutability)? body.local_decls[local]);
}
macro_rules! type_annotations {
- (mut) => (mir.user_type_annotations.iter_enumerated_mut());
- () => (mir.user_type_annotations.iter_enumerated());
+ (mut) => (body.user_type_annotations.iter_enumerated_mut());
+ () => (body.user_type_annotations.iter_enumerated());
};
for (index, annotation) in type_annotations!($($mutability)?) {
);
}
- self.visit_span(&$($mutability)? mir.span);
+ self.visit_span(&$($mutability)? body.span);
}
fn super_basic_block_data(&mut self,
context: PlaceContext,
location: Location) {
match place {
- Place::Base(PlaceBase::Local(local)) => {
- self.visit_local(local, context, location);
- }
- Place::Base(PlaceBase::Static(box Static { kind: _, ty })) => {
- self.visit_ty(& $($mutability)? *ty, TyContext::Location(location));
+ Place::Base(place_base) => {
+ self.visit_place_base(place_base, context, location);
}
Place::Projection(proj) => {
+ let context = if context.is_mutating_use() {
+ PlaceContext::MutatingUse(MutatingUseContext::Projection)
+ } else {
+ PlaceContext::NonMutatingUse(NonMutatingUseContext::Projection)
+ };
+
self.visit_projection(proj, context, location);
}
}
}
- fn super_projection(&mut self,
- proj: & $($mutability)? Projection<'tcx>,
+ fn super_place_base(&mut self,
+ place_base: & $($mutability)? PlaceBase<'tcx>,
context: PlaceContext,
location: Location) {
- let Projection { base, elem } = proj;
- let context = if context.is_mutating_use() {
- PlaceContext::MutatingUse(MutatingUseContext::Projection)
- } else {
- PlaceContext::NonMutatingUse(NonMutatingUseContext::Projection)
- };
- self.visit_place(base, context, location);
- self.visit_projection_elem(elem, location);
+ match place_base {
+ PlaceBase::Local(local) => {
+ self.visit_local(local, context, location);
+ }
+ PlaceBase::Static(box Static { kind: _, ty }) => {
+ self.visit_ty(& $($mutability)? *ty, TyContext::Location(location));
+ }
+ }
}
- fn super_projection_elem(&mut self,
- proj: & $($mutability)? PlaceElem<'tcx>,
- location: Location) {
- match proj {
+ fn super_projection(&mut self,
+ proj: & $($mutability)? Projection<'tcx>,
+ context: PlaceContext,
+ location: Location) {
+ // this is calling `super_place` in preparation for changing `Place` to be
+ // a struct with a base and a slice of projections. `visit_place` should only ever
+ // be called for the outermost place now.
+ self.super_place(& $($mutability)? proj.base, context, location);
+ match & $($mutability)? proj.elem {
ProjectionElem::Deref => {
}
ProjectionElem::Subslice { from: _, to: _ } => {
// Convenience methods
- fn visit_location(&mut self, mir: & $($mutability)? Body<'tcx>, location: Location) {
- let basic_block = & $($mutability)? mir[location.block];
+ fn visit_location(&mut self, body: & $($mutability)? Body<'tcx>, location: Location) {
+ let basic_block = & $($mutability)? body[location.block];
if basic_block.statements.len() == location.statement_index {
if let Some(ref $($mutability)? terminator) = basic_block.terminator {
self.visit_terminator(terminator, location)
NonUse(NonUseContext),
}
-impl<'tcx> PlaceContext {
+impl PlaceContext {
/// Returns `true` if this place context represents a drop.
pub fn is_drop(&self) -> bool {
match *self {
}
}
+/// The type of diagnostics output to generate.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum ErrorOutputType {
+ /// Output meant for the consumption of humans.
HumanReadable(HumanReadableErrorType),
+ /// Output that's consumed by other tools such as `rustfix` or the `RLS`.
Json {
- /// Render the json in a human readable way (with indents and newlines)
+ /// Render the JSON in a human readable way (with indents and newlines).
pretty: bool,
- /// The way the `rendered` field is created
+ /// The JSON output includes a `rendered` field that includes the rendered
+ /// human output.
json_rendered: HumanReadableErrorType,
},
}
return op;
}
- impl<'a> dep_tracking::DepTrackingHash for $struct_name {
+ impl dep_tracking::DepTrackingHash for $struct_name {
fn hash(&self, hasher: &mut DefaultHasher, error_format: ErrorOutputType) {
let mut sub_hashes = BTreeMap::new();
$({
// the final synthesized generics: we don't want our generated docs page to contain something
// like 'T: Copy + Clone', as that's redundant. Therefore, we keep track of a separate
// 'user_env', which only holds the predicates that will actually be displayed to the user.
- fn evaluate_predicates<'b, 'gcx, 'c>(
+ fn evaluate_predicates<'b, 'c>(
&self,
infcx: &InferCtxt<'b, 'tcx, 'c>,
trait_did: DefId,
_ => {
// this is a "direct", user-specified, rather than derived,
// obligation.
- flags.push(("direct".to_owned(), None));
+ flags.push((sym::direct, None));
}
}
// Currently I'm leaving it for what I need for `try`.
if self.tcx.trait_of_item(item) == Some(trait_ref.def_id) {
let method = self.tcx.item_name(item);
- flags.push(("from_method".to_owned(), None));
- flags.push(("from_method".to_owned(), Some(method.to_string())));
+ flags.push((sym::from_method, None));
+ flags.push((sym::from_method, Some(method.to_string())));
}
}
if let Some(t) = self.get_parent_trait_ref(&obligation.cause.code) {
- flags.push(("parent_trait".to_owned(), Some(t)));
+ flags.push((sym::parent_trait, Some(t)));
}
if let Some(k) = obligation.cause.span.compiler_desugaring_kind() {
- flags.push(("from_desugaring".to_owned(), None));
- flags.push(("from_desugaring".to_owned(), Some(k.name().to_string())));
+ flags.push((sym::from_desugaring, None));
+ flags.push((sym::from_desugaring, Some(k.name().to_string())));
}
let generics = self.tcx.generics_of(def_id);
let self_ty = trait_ref.self_ty();
// This is also included through the generics list as `Self`,
// but the parser won't allow you to use it
- flags.push(("_Self".to_owned(), Some(self_ty.to_string())));
+ flags.push((sym::_Self, Some(self_ty.to_string())));
if let Some(def) = self_ty.ty_adt_def() {
// We also want to be able to select self's original
// signature with no type arguments resolved
- flags.push(("_Self".to_owned(), Some(self.tcx.type_of(def.did).to_string())));
+ flags.push((sym::_Self, Some(self.tcx.type_of(def.did).to_string())));
}
for param in generics.params.iter() {
},
GenericParamDefKind::Lifetime => continue,
};
- let name = param.name.to_string();
+ let name = param.name.as_symbol();
flags.push((name, Some(value)));
}
if let Some(true) = self_ty.ty_adt_def().map(|def| def.did.is_local()) {
- flags.push(("crate_local".to_owned(), None));
+ flags.push((sym::crate_local, None));
}
// Allow targeting all integers using `{integral}`, even if the exact type was resolved
if self_ty.is_integral() {
- flags.push(("_Self".to_owned(), Some("{integral}".to_owned())));
+ flags.push((sym::_Self, Some("{integral}".to_owned())));
}
if let ty::Array(aty, len) = self_ty.sty {
- flags.push(("_Self".to_owned(), Some("[]".to_owned())));
- flags.push(("_Self".to_owned(), Some(format!("[{}]", aty))));
+ flags.push((sym::_Self, Some("[]".to_owned())));
+ flags.push((sym::_Self, Some(format!("[{}]", aty))));
if let Some(def) = aty.ty_adt_def() {
// We also want to be able to select the array's type's original
// signature with no type arguments resolved
flags.push((
- "_Self".to_owned(),
+ sym::_Self,
Some(format!("[{}]", self.tcx.type_of(def.did).to_string())),
));
let tcx = self.tcx;
if let Some(len) = len.assert_usize(tcx) {
flags.push((
- "_Self".to_owned(),
+ sym::_Self,
Some(format!("[{}; {}]", self.tcx.type_of(def.did).to_string(), len)),
));
} else {
flags.push((
- "_Self".to_owned(),
+ sym::_Self,
Some(format!("[{}; _]", self.tcx.type_of(def.did).to_string())),
));
}
folder: &mut F,
) -> chalk_engine::ExClause<Self>;
- fn visit_ex_clause_with<'gcx: 'tcx, V: TypeVisitor<'tcx>>(
+ fn visit_ex_clause_with<V: TypeVisitor<'tcx>>(
ex_clause: &chalk_engine::ExClause<Self>,
visitor: &mut V,
) -> bool;
use syntax::ast::{MetaItem, NestedMetaItem};
use syntax::attr;
-use syntax::symbol::sym;
+use syntax::symbol::{Symbol, kw, sym};
use syntax_pos::Span;
use syntax_pos::symbol::LocalInternedString;
}
impl<'a, 'gcx, 'tcx> OnUnimplementedDirective {
- pub fn parse(tcx: TyCtxt<'a, 'gcx, 'tcx>,
+ fn parse(tcx: TyCtxt<'a, 'gcx, 'tcx>,
trait_def_id: DefId,
items: &[NestedMetaItem],
span: Span,
pub fn evaluate(&self,
tcx: TyCtxt<'a, 'gcx, 'tcx>,
trait_ref: ty::TraitRef<'tcx>,
- options: &[(String, Option<String>)])
+ options: &[(Symbol, Option<String>)])
-> OnUnimplementedNote
{
let mut message = None;
if !attr::eval_condition(condition, &tcx.sess.parse_sess, &mut |c| {
c.ident().map_or(false, |ident| {
options.contains(&(
- ident.to_string(),
+ ident.name,
c.value_str().map(|s| s.as_str().to_string())
))
})
}
}
- let options: FxHashMap<String, String> = options.into_iter()
- .filter_map(|(k, v)| v.as_ref().map(|v| (k.to_owned(), v.to_owned())))
+ let options: FxHashMap<Symbol, String> = options.into_iter()
+ .filter_map(|(k, v)| v.as_ref().map(|v| (*k, v.to_owned())))
.collect();
OnUnimplementedNote {
label: label.map(|l| l.format(tcx, trait_ref, &options)),
}
impl<'a, 'gcx, 'tcx> OnUnimplementedFormatString {
- pub fn try_parse(tcx: TyCtxt<'a, 'gcx, 'tcx>,
+ fn try_parse(tcx: TyCtxt<'a, 'gcx, 'tcx>,
trait_def_id: DefId,
from: LocalInternedString,
err_sp: Span)
Piece::String(_) => (), // Normal string, no need to check it
Piece::NextArgument(a) => match a.position {
// `{Self}` is allowed
- Position::ArgumentNamed(s) if s == "Self" => (),
+ Position::ArgumentNamed(s) if s == kw::SelfUpper => (),
// `{ThisTraitsName}` is allowed
- Position::ArgumentNamed(s) if s == name.as_str() => (),
+ Position::ArgumentNamed(s) if s == name => (),
// `{from_method}` is allowed
- Position::ArgumentNamed(s) if s == "from_method" => (),
+ Position::ArgumentNamed(s) if s == sym::from_method => (),
// `{from_desugaring}` is allowed
- Position::ArgumentNamed(s) if s == "from_desugaring" => (),
+ Position::ArgumentNamed(s) if s == sym::from_desugaring => (),
// So is `{A}` if A is a type parameter
Position::ArgumentNamed(s) => match generics.params.iter().find(|param| {
- param.name.as_str() == s
+ param.name.as_symbol() == s
}) {
Some(_) => (),
None => {
&self,
tcx: TyCtxt<'a, 'gcx, 'tcx>,
trait_ref: ty::TraitRef<'tcx>,
- options: &FxHashMap<String, String>,
+ options: &FxHashMap<Symbol, String>,
) -> String {
let name = tcx.item_name(trait_ref.def_id);
let trait_str = tcx.def_path_str(trait_ref.def_id);
},
GenericParamDefKind::Lifetime => return None
};
- let name = param.name.to_string();
+ let name = param.name.as_symbol();
Some((name, value))
- }).collect::<FxHashMap<String, String>>();
+ }).collect::<FxHashMap<Symbol, String>>();
let empty_string = String::new();
let parser = Parser::new(&self.0, None, vec![], false);
match p {
Piece::String(s) => s,
Piece::NextArgument(a) => match a.position {
- Position::ArgumentNamed(s) => match generic_map.get(s) {
+ Position::ArgumentNamed(s) => match generic_map.get(&s) {
Some(val) => val,
- None if s == name.as_str() => {
+ None if s == name => {
&trait_str
}
None => {
- if let Some(val) = options.get(s) {
+ if let Some(val) = options.get(&s) {
val
- } else if s == "from_desugaring" || s == "from_method" {
+ } else if s == sym::from_desugaring || s == sym::from_method {
// don't break messages using these two arguments incorrectly
&empty_string
} else {
}
impl_stable_hash_for! {
- impl<'tcx, T> for struct Normalize<T> {
+ impl<T> for struct Normalize<T> {
value
}
}
impl IntercrateAmbiguityCause {
/// Emits notes when the overlap is caused by complex intercrate ambiguities.
/// See #23980 for details.
- pub fn add_intercrate_ambiguity_hint<'a, 'tcx>(
- &self,
- err: &mut errors::DiagnosticBuilder<'_>,
- ) {
+ pub fn add_intercrate_ambiguity_hint(&self, err: &mut errors::DiagnosticBuilder<'_>) {
err.note(&self.intercrate_ambiguity_hint());
}
/// candidates and prefer where-clause candidates.
///
/// See the comment for "SelectionCandidate" for more details.
- fn candidate_should_be_dropped_in_favor_of<'o>(
+ fn candidate_should_be_dropped_in_favor_of(
&mut self,
victim: &EvaluatedCandidate<'tcx>,
other: &EvaluatedCandidate<'tcx>,
// These cover the traits that are built-in to the language
// itself: `Copy`, `Clone` and `Sized`.
- fn assemble_builtin_bound_candidates<'o>(
+ fn assemble_builtin_bound_candidates(
&mut self,
conditions: BuiltinImplConditions<'tcx>,
candidates: &mut SelectionCandidateSet<'tcx>,
}
}
-impl<'tcx, N: fmt::Debug> fmt::Debug for traits::VtableBuiltinData<N> {
+impl<N: fmt::Debug> fmt::Debug for traits::VtableBuiltinData<N> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "VtableBuiltinData(nested={:?})", self.nested)
}
}
-impl<'tcx, N: fmt::Debug> fmt::Debug for traits::VtableAutoImplData<N> {
+impl<N: fmt::Debug> fmt::Debug for traits::VtableAutoImplData<N> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
use crate::hir::def_id::DefId;
use crate::hir;
use crate::ty::TyCtxt;
-use syntax_pos::symbol::Symbol;
+use syntax_pos::symbol::{sym, Symbol};
use crate::hir::map::blocks::FnLikeNode;
use syntax::attr;
/// Whether the `def_id` counts as const fn in your current crate, considering all active
/// feature gates
pub fn is_const_fn(self, def_id: DefId) -> bool {
- self.is_const_fn_raw(def_id) && match self.lookup_stability(def_id) {
- Some(stab) => match stab.const_stability {
+ self.is_const_fn_raw(def_id) && match self.is_unstable_const_fn(def_id) {
+ Some(feature_name) => {
// has a `rustc_const_unstable` attribute, check whether the user enabled the
- // corresponding feature gate
- Some(feature_name) => self.features()
+ // corresponding feature gate, const_constructor is not a lib feature, so has
+ // to be checked separately.
+ self.features()
.declared_lib_features
.iter()
- .any(|&(sym, _)| sym == feature_name),
- // the function has no stability attribute, it is stable as const fn or the user
- // needs to use feature gates to use the function at all
- None => true,
+ .any(|&(sym, _)| sym == feature_name)
+ || (feature_name == sym::const_constructor
+ && self.features().const_constructor)
},
- // functions without stability are either stable user written const fn or the user is
- // using feature gates and we thus don't care what they do
+ // functions without const stability are either stable user written
+ // const fn or the user is using feature gates and we thus don't
+ // care what they do
None => true,
}
}
/// Whether the `def_id` is an unstable const fn and what feature gate is necessary to enable it
pub fn is_unstable_const_fn(self, def_id: DefId) -> Option<Symbol> {
- if self.is_const_fn_raw(def_id) {
+ if self.is_constructor(def_id) {
+ Some(sym::const_constructor)
+ } else if self.is_const_fn_raw(def_id) {
self.lookup_stability(def_id)?.const_stability
} else {
None
let hir_id = tcx.hir().as_local_hir_id(def_id)
.expect("Non-local call to local provider is_const_fn");
- if let Some(fn_like) = FnLikeNode::from_node(tcx.hir().get_by_hir_id(hir_id)) {
+ let node = tcx.hir().get_by_hir_id(hir_id);
+ if let Some(fn_like) = FnLikeNode::from_node(node) {
fn_like.constness() == hir::Constness::Const
+ } else if let hir::Node::Ctor(_) = node {
+ true
} else {
false
}
}
}
-impl<'a, 'gcx, D> HashStable<StableHashingContext<'a>> for SimplifiedTypeGen<D>
- where D: Copy + Debug + Ord + Eq + Hash +
- HashStable<StableHashingContext<'a>>,
+impl<'a, D> HashStable<StableHashingContext<'a>> for SimplifiedTypeGen<D>
+where
+ D: Copy + Debug + Ord + Eq + Hash + HashStable<StableHashingContext<'a>>,
{
fn hash_stable<W: StableHasherResult>(&self,
hcx: &mut StableHashingContext<'a>,
}
}
-fn needs_fn_once_adapter_shim<'a, 'tcx>(actual_closure_kind: ty::ClosureKind,
- trait_closure_kind: ty::ClosureKind)
- -> Result<bool, ()>
-{
+fn needs_fn_once_adapter_shim(
+ actual_closure_kind: ty::ClosureKind,
+ trait_closure_kind: ty::ClosureKind,
+) -> Result<bool, ()> {
match (actual_closure_kind, trait_closure_kind) {
(ty::ClosureKind::Fn, ty::ClosureKind::Fn) |
(ty::ClosureKind::FnMut, ty::ClosureKind::FnMut) |
use crate::hir;
use crate::ich::StableHashingContext;
+use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
+use crate::ty::GeneratorSubsts;
+use crate::ty::subst::Subst;
+use rustc_data_structures::bit_set::BitSet;
use rustc_data_structures::indexed_vec::{IndexVec, Idx};
use rustc_data_structures::stable_hasher::{HashStable, StableHasher,
StableHasherResult};
pub param_env: ty::ParamEnv<'tcx>,
}
+#[derive(Copy, Clone, Debug)]
+enum StructKind {
+ /// A tuple, closure, or univariant which cannot be coerced to unsized.
+ AlwaysSized,
+ /// A univariant, the last field of which may be coerced to unsized.
+ MaybeUnsized,
+ /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
+ Prefixed(Size, Align),
+}
+
impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
- fn layout_raw_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> {
- let tcx = self.tcx;
- let param_env = self.param_env;
+ fn scalar_pair(&self, a: Scalar, b: Scalar) -> LayoutDetails {
let dl = self.data_layout();
- let scalar_unit = |value: Primitive| {
- let bits = value.size(dl).bits();
- assert!(bits <= 128);
- Scalar {
- value,
- valid_range: 0..=(!0 >> (128 - bits))
- }
- };
- let scalar = |value: Primitive| {
- tcx.intern_layout(LayoutDetails::scalar(self, scalar_unit(value)))
- };
- let scalar_pair = |a: Scalar, b: Scalar| {
- let b_align = b.value.align(dl);
- let align = a.value.align(dl).max(b_align).max(dl.aggregate_align);
- let b_offset = a.value.size(dl).align_to(b_align.abi);
- let size = (b_offset + b.value.size(dl)).align_to(align.abi);
- LayoutDetails {
- variants: Variants::Single { index: VariantIdx::new(0) },
- fields: FieldPlacement::Arbitrary {
- offsets: vec![Size::ZERO, b_offset],
- memory_index: vec![0, 1]
- },
- abi: Abi::ScalarPair(a, b),
- align,
- size
- }
- };
-
- #[derive(Copy, Clone, Debug)]
- enum StructKind {
- /// A tuple, closure, or univariant which cannot be coerced to unsized.
- AlwaysSized,
- /// A univariant, the last field of which may be coerced to unsized.
- MaybeUnsized,
- /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
- Prefixed(Size, Align),
+ let b_align = b.value.align(dl);
+ let align = a.value.align(dl).max(b_align).max(dl.aggregate_align);
+ let b_offset = a.value.size(dl).align_to(b_align.abi);
+ let size = (b_offset + b.value.size(dl)).align_to(align.abi);
+ LayoutDetails {
+ variants: Variants::Single { index: VariantIdx::new(0) },
+ fields: FieldPlacement::Arbitrary {
+ offsets: vec![Size::ZERO, b_offset],
+ memory_index: vec![0, 1]
+ },
+ abi: Abi::ScalarPair(a, b),
+ align,
+ size
}
+ }
- let univariant_uninterned = |fields: &[TyLayout<'_>], repr: &ReprOptions, kind| {
- let packed = repr.packed();
- if packed && repr.align > 0 {
- bug!("struct cannot be packed and aligned");
- }
+ fn univariant_uninterned(&self,
+ ty: Ty<'tcx>,
+ fields: &[TyLayout<'_>],
+ repr: &ReprOptions,
+ kind: StructKind) -> Result<LayoutDetails, LayoutError<'tcx>> {
+ let dl = self.data_layout();
+ let packed = repr.packed();
+ if packed && repr.align > 0 {
+ bug!("struct cannot be packed and aligned");
+ }
- let pack = Align::from_bytes(repr.pack as u64).unwrap();
+ let pack = Align::from_bytes(repr.pack as u64).unwrap();
- let mut align = if packed {
- dl.i8_align
- } else {
- dl.aggregate_align
- };
+ let mut align = if packed {
+ dl.i8_align
+ } else {
+ dl.aggregate_align
+ };
- let mut sized = true;
- let mut offsets = vec![Size::ZERO; fields.len()];
- let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
+ let mut sized = true;
+ let mut offsets = vec![Size::ZERO; fields.len()];
+ let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
- let mut optimize = !repr.inhibit_struct_field_reordering_opt();
- if let StructKind::Prefixed(_, align) = kind {
- optimize &= align.bytes() == 1;
- }
+ let mut optimize = !repr.inhibit_struct_field_reordering_opt();
+ if let StructKind::Prefixed(_, align) = kind {
+ optimize &= align.bytes() == 1;
+ }
- if optimize {
- let end = if let StructKind::MaybeUnsized = kind {
- fields.len() - 1
- } else {
- fields.len()
- };
- let optimizing = &mut inverse_memory_index[..end];
- let field_align = |f: &TyLayout<'_>| {
- if packed { f.align.abi.min(pack) } else { f.align.abi }
- };
- match kind {
- StructKind::AlwaysSized |
- StructKind::MaybeUnsized => {
- optimizing.sort_by_key(|&x| {
- // Place ZSTs first to avoid "interesting offsets",
- // especially with only one or two non-ZST fields.
- let f = &fields[x as usize];
- (!f.is_zst(), cmp::Reverse(field_align(f)))
- });
- }
- StructKind::Prefixed(..) => {
- optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
- }
+ if optimize {
+ let end = if let StructKind::MaybeUnsized = kind {
+ fields.len() - 1
+ } else {
+ fields.len()
+ };
+ let optimizing = &mut inverse_memory_index[..end];
+ let field_align = |f: &TyLayout<'_>| {
+ if packed { f.align.abi.min(pack) } else { f.align.abi }
+ };
+ match kind {
+ StructKind::AlwaysSized |
+ StructKind::MaybeUnsized => {
+ optimizing.sort_by_key(|&x| {
+ // Place ZSTs first to avoid "interesting offsets",
+ // especially with only one or two non-ZST fields.
+ let f = &fields[x as usize];
+ (!f.is_zst(), cmp::Reverse(field_align(f)))
+ });
+ }
+ StructKind::Prefixed(..) => {
+ optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
}
}
+ }
- // inverse_memory_index holds field indices by increasing memory offset.
- // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
- // We now write field offsets to the corresponding offset slot;
- // field 5 with offset 0 puts 0 in offsets[5].
- // At the bottom of this function, we use inverse_memory_index to produce memory_index.
+ // inverse_memory_index holds field indices by increasing memory offset.
+ // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
+ // We now write field offsets to the corresponding offset slot;
+ // field 5 with offset 0 puts 0 in offsets[5].
+ // At the bottom of this function, we use inverse_memory_index to produce memory_index.
- let mut offset = Size::ZERO;
+ let mut offset = Size::ZERO;
- if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
- let prefix_align = if packed {
- prefix_align.min(pack)
- } else {
- prefix_align
- };
- align = align.max(AbiAndPrefAlign::new(prefix_align));
- offset = prefix_size.align_to(prefix_align);
+ if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
+ let prefix_align = if packed {
+ prefix_align.min(pack)
+ } else {
+ prefix_align
+ };
+ align = align.max(AbiAndPrefAlign::new(prefix_align));
+ offset = prefix_size.align_to(prefix_align);
+ }
+
+ for &i in &inverse_memory_index {
+ let field = fields[i as usize];
+ if !sized {
+ bug!("univariant: field #{} of `{}` comes after unsized field",
+ offsets.len(), ty);
}
- for &i in &inverse_memory_index {
- let field = fields[i as usize];
- if !sized {
- bug!("univariant: field #{} of `{}` comes after unsized field",
- offsets.len(), ty);
- }
+ if field.is_unsized() {
+ sized = false;
+ }
- if field.is_unsized() {
- sized = false;
- }
+ // Invariant: offset < dl.obj_size_bound() <= 1<<61
+ let field_align = if packed {
+ field.align.min(AbiAndPrefAlign::new(pack))
+ } else {
+ field.align
+ };
+ offset = offset.align_to(field_align.abi);
+ align = align.max(field_align);
- // Invariant: offset < dl.obj_size_bound() <= 1<<61
- let field_align = if packed {
- field.align.min(AbiAndPrefAlign::new(pack))
- } else {
- field.align
- };
- offset = offset.align_to(field_align.abi);
- align = align.max(field_align);
+ debug!("univariant offset: {:?} field: {:#?}", offset, field);
+ offsets[i as usize] = offset;
- debug!("univariant offset: {:?} field: {:#?}", offset, field);
- offsets[i as usize] = offset;
+ offset = offset.checked_add(field.size, dl)
+ .ok_or(LayoutError::SizeOverflow(ty))?;
+ }
- offset = offset.checked_add(field.size, dl)
- .ok_or(LayoutError::SizeOverflow(ty))?;
- }
+ if repr.align > 0 {
+ let repr_align = repr.align as u64;
+ align = align.max(AbiAndPrefAlign::new(Align::from_bytes(repr_align).unwrap()));
+ debug!("univariant repr_align: {:?}", repr_align);
+ }
- if repr.align > 0 {
- let repr_align = repr.align as u64;
- align = align.max(AbiAndPrefAlign::new(Align::from_bytes(repr_align).unwrap()));
- debug!("univariant repr_align: {:?}", repr_align);
- }
+ debug!("univariant min_size: {:?}", offset);
+ let min_size = offset;
- debug!("univariant min_size: {:?}", offset);
- let min_size = offset;
+ // As stated above, inverse_memory_index holds field indices by increasing offset.
+ // This makes it an already-sorted view of the offsets vec.
+ // To invert it, consider:
+ // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
+ // Field 5 would be the first element, so memory_index is i:
+ // Note: if we didn't optimize, it's already right.
- // As stated above, inverse_memory_index holds field indices by increasing offset.
- // This makes it an already-sorted view of the offsets vec.
- // To invert it, consider:
- // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
- // Field 5 would be the first element, so memory_index is i:
- // Note: if we didn't optimize, it's already right.
+ let mut memory_index;
+ if optimize {
+ memory_index = vec![0; inverse_memory_index.len()];
- let mut memory_index;
- if optimize {
- memory_index = vec![0; inverse_memory_index.len()];
+ for i in 0..inverse_memory_index.len() {
+ memory_index[inverse_memory_index[i] as usize] = i as u32;
+ }
+ } else {
+ memory_index = inverse_memory_index;
+ }
- for i in 0..inverse_memory_index.len() {
- memory_index[inverse_memory_index[i] as usize] = i as u32;
- }
- } else {
- memory_index = inverse_memory_index;
- }
-
- let size = min_size.align_to(align.abi);
- let mut abi = Abi::Aggregate { sized };
-
- // Unpack newtype ABIs and find scalar pairs.
- if sized && size.bytes() > 0 {
- // All other fields must be ZSTs, and we need them to all start at 0.
- let mut zst_offsets =
- offsets.iter().enumerate().filter(|&(i, _)| fields[i].is_zst());
- if zst_offsets.all(|(_, o)| o.bytes() == 0) {
- let mut non_zst_fields =
- fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
-
- match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
- // We have exactly one non-ZST field.
- (Some((i, field)), None, None) => {
- // Field fills the struct and it has a scalar or scalar pair ABI.
- if offsets[i].bytes() == 0 &&
- align.abi == field.align.abi &&
- size == field.size {
- match field.abi {
- // For plain scalars, or vectors of them, we can't unpack
- // newtypes for `#[repr(C)]`, as that affects C ABIs.
- Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
- abi = field.abi.clone();
- }
- // But scalar pairs are Rust-specific and get
- // treated as aggregates by C ABIs anyway.
- Abi::ScalarPair(..) => {
- abi = field.abi.clone();
- }
- _ => {}
+ let size = min_size.align_to(align.abi);
+ let mut abi = Abi::Aggregate { sized };
+
+ // Unpack newtype ABIs and find scalar pairs.
+ if sized && size.bytes() > 0 {
+ // All other fields must be ZSTs, and we need them to all start at 0.
+ let mut zst_offsets =
+ offsets.iter().enumerate().filter(|&(i, _)| fields[i].is_zst());
+ if zst_offsets.all(|(_, o)| o.bytes() == 0) {
+ let mut non_zst_fields =
+ fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
+
+ match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
+ // We have exactly one non-ZST field.
+ (Some((i, field)), None, None) => {
+ // Field fills the struct and it has a scalar or scalar pair ABI.
+ if offsets[i].bytes() == 0 &&
+ align.abi == field.align.abi &&
+ size == field.size {
+ match field.abi {
+ // For plain scalars, or vectors of them, we can't unpack
+ // newtypes for `#[repr(C)]`, as that affects C ABIs.
+ Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
+ abi = field.abi.clone();
}
+ // But scalar pairs are Rust-specific and get
+ // treated as aggregates by C ABIs anyway.
+ Abi::ScalarPair(..) => {
+ abi = field.abi.clone();
+ }
+ _ => {}
}
}
+ }
- // Two non-ZST fields, and they're both scalars.
- (Some((i, &TyLayout {
- details: &LayoutDetails { abi: Abi::Scalar(ref a), .. }, ..
- })), Some((j, &TyLayout {
- details: &LayoutDetails { abi: Abi::Scalar(ref b), .. }, ..
- })), None) => {
- // Order by the memory placement, not source order.
- let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
- ((i, a), (j, b))
- } else {
- ((j, b), (i, a))
- };
- let pair = scalar_pair(a.clone(), b.clone());
- let pair_offsets = match pair.fields {
- FieldPlacement::Arbitrary {
- ref offsets,
- ref memory_index
- } => {
- assert_eq!(memory_index, &[0, 1]);
- offsets
- }
- _ => bug!()
- };
- if offsets[i] == pair_offsets[0] &&
- offsets[j] == pair_offsets[1] &&
- align == pair.align &&
- size == pair.size {
- // We can use `ScalarPair` only when it matches our
- // already computed layout (including `#[repr(C)]`).
- abi = pair.abi;
+ // Two non-ZST fields, and they're both scalars.
+ (Some((i, &TyLayout {
+ details: &LayoutDetails { abi: Abi::Scalar(ref a), .. }, ..
+ })), Some((j, &TyLayout {
+ details: &LayoutDetails { abi: Abi::Scalar(ref b), .. }, ..
+ })), None) => {
+ // Order by the memory placement, not source order.
+ let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
+ ((i, a), (j, b))
+ } else {
+ ((j, b), (i, a))
+ };
+ let pair = self.scalar_pair(a.clone(), b.clone());
+ let pair_offsets = match pair.fields {
+ FieldPlacement::Arbitrary {
+ ref offsets,
+ ref memory_index
+ } => {
+ assert_eq!(memory_index, &[0, 1]);
+ offsets
}
+ _ => bug!()
+ };
+ if offsets[i] == pair_offsets[0] &&
+ offsets[j] == pair_offsets[1] &&
+ align == pair.align &&
+ size == pair.size {
+ // We can use `ScalarPair` only when it matches our
+ // already computed layout (including `#[repr(C)]`).
+ abi = pair.abi;
}
-
- _ => {}
}
+
+ _ => {}
}
}
+ }
- if sized && fields.iter().any(|f| f.abi.is_uninhabited()) {
- abi = Abi::Uninhabited;
- }
+ if sized && fields.iter().any(|f| f.abi.is_uninhabited()) {
+ abi = Abi::Uninhabited;
+ }
- Ok(LayoutDetails {
- variants: Variants::Single { index: VariantIdx::new(0) },
- fields: FieldPlacement::Arbitrary {
- offsets,
- memory_index
- },
- abi,
- align,
- size
- })
+ Ok(LayoutDetails {
+ variants: Variants::Single { index: VariantIdx::new(0) },
+ fields: FieldPlacement::Arbitrary {
+ offsets,
+ memory_index
+ },
+ abi,
+ align,
+ size
+ })
+ }
+
+ fn layout_raw_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> {
+ let tcx = self.tcx;
+ let param_env = self.param_env;
+ let dl = self.data_layout();
+ let scalar_unit = |value: Primitive| {
+ let bits = value.size(dl).bits();
+ assert!(bits <= 128);
+ Scalar {
+ value,
+ valid_range: 0..=(!0 >> (128 - bits))
+ }
+ };
+ let scalar = |value: Primitive| {
+ tcx.intern_layout(LayoutDetails::scalar(self, scalar_unit(value)))
};
+
let univariant = |fields: &[TyLayout<'_>], repr: &ReprOptions, kind| {
- Ok(tcx.intern_layout(univariant_uninterned(fields, repr, kind)?))
+ Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
};
debug_assert!(!ty.has_infer_types());
};
// Effectively a (ptr, meta) tuple.
- tcx.intern_layout(scalar_pair(data_ptr, metadata))
+ tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
}
// Arrays and slices.
univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?
}
ty::Dynamic(..) | ty::Foreign(..) => {
- let mut unit = univariant_uninterned(&[], &ReprOptions::default(),
+ let mut unit = self.univariant_uninterned(ty, &[], &ReprOptions::default(),
StructKind::AlwaysSized)?;
match unit.abi {
Abi::Aggregate { ref mut sized } => *sized = false,
tcx.intern_layout(unit)
}
- ty::Generator(def_id, ref substs, _) => {
- // FIXME(tmandry): For fields that are repeated in multiple
- // variants in the GeneratorLayout, we need code to ensure that
- // the offset of these fields never change. Right now this is
- // not an issue since every variant has every field, but once we
- // optimize this we have to be more careful.
-
- let discr_index = substs.prefix_tys(def_id, tcx).count();
- let prefix_tys = substs.prefix_tys(def_id, tcx)
- .chain(iter::once(substs.discr_ty(tcx)));
- let prefix = univariant_uninterned(
- &prefix_tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
- &ReprOptions::default(),
- StructKind::AlwaysSized)?;
-
- let mut size = prefix.size;
- let mut align = prefix.align;
- let variants_tys = substs.state_tys(def_id, tcx);
- let variants = variants_tys.enumerate().map(|(i, variant_tys)| {
- let mut variant = univariant_uninterned(
- &variant_tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
- &ReprOptions::default(),
- StructKind::Prefixed(prefix.size, prefix.align.abi))?;
-
- variant.variants = Variants::Single { index: VariantIdx::new(i) };
-
- size = size.max(variant.size);
- align = align.max(variant.align);
-
- Ok(variant)
- }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
-
- let abi = if prefix.abi.is_uninhabited() ||
- variants.iter().all(|v| v.abi.is_uninhabited()) {
- Abi::Uninhabited
- } else {
- Abi::Aggregate { sized: true }
- };
- let discr = match &self.layout_of(substs.discr_ty(tcx))?.abi {
- Abi::Scalar(s) => s.clone(),
- _ => bug!(),
- };
-
- let layout = tcx.intern_layout(LayoutDetails {
- variants: Variants::Multiple {
- discr,
- discr_kind: DiscriminantKind::Tag,
- discr_index,
- variants,
- },
- fields: prefix.fields,
- abi,
- size,
- align,
- });
- debug!("generator layout ({:?}): {:#?}", ty, layout);
- layout
- }
+ ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, &substs)?,
ty::Closure(def_id, ref substs) => {
let tys = substs.upvar_tys(def_id, tcx);
else { StructKind::AlwaysSized }
};
- let mut st = univariant_uninterned(&variants[v], &def.repr, kind)?;
+ let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr, kind)?;
st.variants = Variants::Single { index: v };
let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
match st.abi {
let mut align = dl.aggregate_align;
let st = variants.iter_enumerated().map(|(j, v)| {
- let mut st = univariant_uninterned(v,
+ let mut st = self.univariant_uninterned(ty, v,
&def.repr, StructKind::AlwaysSized)?;
st.variants = Variants::Single { index: j };
// Create the set of structs that represent each variant.
let mut layout_variants = variants.iter_enumerated().map(|(i, field_layouts)| {
- let mut st = univariant_uninterned(&field_layouts,
+ let mut st = self.univariant_uninterned(ty, &field_layouts,
&def.repr, StructKind::Prefixed(min_ity.size(), prefix_align))?;
st.variants = Variants::Single { index: i };
// Find the first field we can't move later
}
}
if let Some((prim, offset)) = common_prim {
- let pair = scalar_pair(tag.clone(), scalar_unit(prim));
+ let pair = self.scalar_pair(tag.clone(), scalar_unit(prim));
let pair_offsets = match pair.fields {
FieldPlacement::Arbitrary {
ref offsets,
}
})
}
+}
+/// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
+#[derive(Clone, Debug, PartialEq)]
+enum SavedLocalEligibility {
+ Unassigned,
+ Assigned(VariantIdx),
+ // FIXME: Use newtype_index so we aren't wasting bytes
+ Ineligible(Option<u32>),
+}
+
+// When laying out generators, we divide our saved local fields into two
+// categories: overlap-eligible and overlap-ineligible.
+//
+// Those fields which are ineligible for overlap go in a "prefix" at the
+// beginning of the layout, and always have space reserved for them.
+//
+// Overlap-eligible fields are only assigned to one variant, so we lay
+// those fields out for each variant and put them right after the
+// prefix.
+//
+// Finally, in the layout details, we point to the fields from the
+// variants they are assigned to. It is possible for some fields to be
+// included in multiple variants. No field ever "moves around" in the
+// layout; its offset is always the same.
+//
+// Also included in the layout are the upvars and the discriminant.
+// These are included as fields on the "outer" layout; they are not part
+// of any variant.
+impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
+ /// Compute the eligibility and assignment of each local.
+ fn generator_saved_local_eligibility(&self, info: &GeneratorLayout<'tcx>)
+ -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
+ use SavedLocalEligibility::*;
+
+ let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
+ IndexVec::from_elem_n(Unassigned, info.field_tys.len());
+
+ // The saved locals not eligible for overlap. These will get
+ // "promoted" to the prefix of our generator.
+ let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
+
+ // Figure out which of our saved locals are fields in only
+ // one variant. The rest are deemed ineligible for overlap.
+ for (variant_index, fields) in info.variant_fields.iter_enumerated() {
+ for local in fields {
+ match assignments[*local] {
+ Unassigned => {
+ assignments[*local] = Assigned(variant_index);
+ }
+ Assigned(idx) => {
+ // We've already seen this local at another suspension
+ // point, so it is no longer a candidate.
+ trace!("removing local {:?} in >1 variant ({:?}, {:?})",
+ local, variant_index, idx);
+ ineligible_locals.insert(*local);
+ assignments[*local] = Ineligible(None);
+ }
+ Ineligible(_) => {},
+ }
+ }
+ }
+
+ // Next, check every pair of eligible locals to see if they
+ // conflict.
+ for local_a in info.storage_conflicts.rows() {
+ let conflicts_a = info.storage_conflicts.count(local_a);
+ if ineligible_locals.contains(local_a) {
+ continue;
+ }
+
+ for local_b in info.storage_conflicts.iter(local_a) {
+ // local_a and local_b are storage live at the same time, therefore they
+ // cannot overlap in the generator layout. The only way to guarantee
+ // this is if they are in the same variant, or one is ineligible
+ // (which means it is stored in every variant).
+ if ineligible_locals.contains(local_b) ||
+ assignments[local_a] == assignments[local_b]
+ {
+ continue;
+ }
+
+ // If they conflict, we will choose one to make ineligible.
+ // This is not always optimal; it's just a greedy heuristic that
+ // seems to produce good results most of the time.
+ let conflicts_b = info.storage_conflicts.count(local_b);
+ let (remove, other) = if conflicts_a > conflicts_b {
+ (local_a, local_b)
+ } else {
+ (local_b, local_a)
+ };
+ ineligible_locals.insert(remove);
+ assignments[remove] = Ineligible(None);
+ trace!("removing local {:?} due to conflict with {:?}", remove, other);
+ }
+ }
+
+ // Write down the order of our locals that will be promoted to the prefix.
+ {
+ let mut idx = 0u32;
+ for local in ineligible_locals.iter() {
+ assignments[local] = Ineligible(Some(idx));
+ idx += 1;
+ }
+ }
+ debug!("generator saved local assignments: {:?}", assignments);
+
+ (ineligible_locals, assignments)
+ }
+
+ /// Compute the full generator layout.
+ fn generator_layout(
+ &self,
+ ty: Ty<'tcx>,
+ def_id: hir::def_id::DefId,
+ substs: &GeneratorSubsts<'tcx>,
+ ) -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> {
+ use SavedLocalEligibility::*;
+ let tcx = self.tcx;
+ let recompute_memory_index = |offsets: &[Size]| -> Vec<u32> {
+ debug!("recompute_memory_index({:?})", offsets);
+ let mut inverse_index = (0..offsets.len() as u32).collect::<Vec<_>>();
+ inverse_index.sort_unstable_by_key(|i| offsets[*i as usize]);
+
+ let mut index = vec![0; offsets.len()];
+ for i in 0..index.len() {
+ index[inverse_index[i] as usize] = i as u32;
+ }
+ debug!("recompute_memory_index() => {:?}", index);
+ index
+ };
+ let subst_field = |ty: Ty<'tcx>| { ty.subst(tcx, substs.substs) };
+
+ let info = tcx.generator_layout(def_id);
+ let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
+
+ // Build a prefix layout, including "promoting" all ineligible
+ // locals as part of the prefix. We compute the layout of all of
+ // these fields at once to get optimal packing.
+ let discr_index = substs.prefix_tys(def_id, tcx).count();
+ let promoted_tys =
+ ineligible_locals.iter().map(|local| subst_field(info.field_tys[local]));
+ let prefix_tys = substs.prefix_tys(def_id, tcx)
+ .chain(iter::once(substs.discr_ty(tcx)))
+ .chain(promoted_tys);
+ let prefix = self.univariant_uninterned(
+ ty,
+ &prefix_tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
+ &ReprOptions::default(),
+ StructKind::AlwaysSized)?;
+ let (prefix_size, prefix_align) = (prefix.size, prefix.align);
+
+ // Split the prefix layout into the "outer" fields (upvars and
+ // discriminant) and the "promoted" fields. Promoted fields will
+ // get included in each variant that requested them in
+ // GeneratorLayout.
+ debug!("prefix = {:#?}", prefix);
+ let (outer_fields, promoted_offsets) = match prefix.fields {
+ FieldPlacement::Arbitrary { mut offsets, .. } => {
+ let offsets_b = offsets.split_off(discr_index + 1);
+ let offsets_a = offsets;
+
+ let memory_index = recompute_memory_index(&offsets_a);
+ let outer_fields = FieldPlacement::Arbitrary { offsets: offsets_a, memory_index };
+ (outer_fields, offsets_b)
+ }
+ _ => bug!(),
+ };
+
+ let mut size = prefix.size;
+ let mut align = prefix.align;
+ let variants = info.variant_fields.iter_enumerated().map(|(index, variant_fields)| {
+ // Only include overlap-eligible fields when we compute our variant layout.
+ let variant_only_tys = variant_fields
+ .iter()
+ .filter(|local| {
+ match assignments[**local] {
+ Unassigned => bug!(),
+ Assigned(v) if v == index => true,
+ Assigned(_) => bug!("assignment does not match variant"),
+ Ineligible(_) => false,
+ }
+ })
+ .map(|local| subst_field(info.field_tys[*local]));
+
+ let mut variant = self.univariant_uninterned(
+ ty,
+ &variant_only_tys
+ .map(|ty| self.layout_of(ty))
+ .collect::<Result<Vec<_>, _>>()?,
+ &ReprOptions::default(),
+ StructKind::Prefixed(prefix_size, prefix_align.abi))?;
+ variant.variants = Variants::Single { index };
+
+ let offsets = match variant.fields {
+ FieldPlacement::Arbitrary { offsets, .. } => offsets,
+ _ => bug!(),
+ };
+
+ // Now, stitch the promoted and variant-only fields back together in
+ // the order they are mentioned by our GeneratorLayout.
+ let mut next_variant_field = 0;
+ let mut combined_offsets = Vec::new();
+ for local in variant_fields.iter() {
+ match assignments[*local] {
+ Unassigned => bug!(),
+ Assigned(_) => {
+ combined_offsets.push(offsets[next_variant_field]);
+ next_variant_field += 1;
+ }
+ Ineligible(field_idx) => {
+ let field_idx = field_idx.unwrap() as usize;
+ combined_offsets.push(promoted_offsets[field_idx]);
+ }
+ }
+ }
+ let memory_index = recompute_memory_index(&combined_offsets);
+ variant.fields = FieldPlacement::Arbitrary { offsets: combined_offsets, memory_index };
+
+ size = size.max(variant.size);
+ align = align.max(variant.align);
+ Ok(variant)
+ }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
+
+ let abi = if prefix.abi.is_uninhabited() ||
+ variants.iter().all(|v| v.abi.is_uninhabited()) {
+ Abi::Uninhabited
+ } else {
+ Abi::Aggregate { sized: true }
+ };
+ let discr = match &self.layout_of(substs.discr_ty(tcx))?.abi {
+ Abi::Scalar(s) => s.clone(),
+ _ => bug!(),
+ };
+
+ let layout = tcx.intern_layout(LayoutDetails {
+ variants: Variants::Multiple {
+ discr,
+ discr_kind: DiscriminantKind::Tag,
+ discr_index,
+ variants,
+ },
+ fields: outer_fields,
+ abi,
+ size,
+ align,
+ });
+ debug!("generator layout ({:?}): {:#?}", ty, layout);
+ Ok(layout)
+ }
+}
+
+impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
/// This is invoked by the `layout_raw` query to record the final
/// layout of each type.
#[inline(always)]
}
}
-impl<'a, 'tcx, C> TyLayoutMethods<'tcx, C> for Ty<'tcx>
- where C: LayoutOf<Ty = Ty<'tcx>> + HasTyCtxt<'tcx>,
- C::TyLayout: MaybeResult<TyLayout<'tcx>>,
- C: HasParamEnv<'tcx>
+impl<'tcx, C> TyLayoutMethods<'tcx, C> for Ty<'tcx>
+where
+ C: LayoutOf<Ty = Ty<'tcx>> + HasTyCtxt<'tcx>,
+ C::TyLayout: MaybeResult<TyLayout<'tcx>>,
+ C: HasParamEnv<'tcx>,
{
fn for_variant(this: TyLayout<'tcx>, cx: &C, variant_index: VariantIdx) -> TyLayout<'tcx> {
let details = match this.variants {
/// Tests whether the associated item admits a non-trivial implementation
/// for !
- pub fn relevant_for_never<'tcx>(&self) -> bool {
+ pub fn relevant_for_never(&self) -> bool {
match self.kind {
AssocKind::Existential |
AssocKind::Const |
pub name: T,
}
-impl<'a, 'gcx, T> HashStable<StableHashingContext<'a>> for Placeholder<T>
- where T: HashStable<StableHashingContext<'a>>
+impl<'a, T> HashStable<StableHashingContext<'a>> for Placeholder<T>
+where
+ T: HashStable<StableHashingContext<'a>>,
{
fn hash_stable<W: StableHasherResult>(
&self,
/// Returns an iterator over all fields contained
/// by this ADT.
#[inline]
- pub fn all_fields<'s>(&'s self) -> impl Iterator<Item = &'s FieldDef> {
+ pub fn all_fields<'s>(&'s self) -> impl Iterator<Item = &'s FieldDef> + Clone {
self.variants.iter().flat_map(|v| v.fields.iter())
}
pub mod obsolete;
+// FIXME(eddyb) false positive, the lifetime parameters are used with `P: Printer<...>`.
+#[allow(unused_lifetimes)]
pub trait Print<'gcx, 'tcx, P> {
type Output;
type Error;
// Query configuration and description traits.
+// FIXME(eddyb) false positive, the lifetime parameter is used for `Key`/`Value`.
+#[allow(unused_lifetimes)]
pub trait QueryConfig<'tcx> {
const NAME: QueryName;
const CATEGORY: ProfileCategory;
}
/// Wraps `value` in a binder, binding higher-ranked vars (if any).
- pub fn bind<'tcx>(value: T) -> Binder<T> {
+ pub fn bind(value: T) -> Binder<T> {
Binder(value)
}
pub item_def_id: DefId,
}
-impl<'a, 'tcx> ProjectionTy<'tcx> {
+impl<'tcx> ProjectionTy<'tcx> {
/// Construct a `ProjectionTy` by searching the trait from `trait_ref` for the
/// associated item named `item_name`.
pub fn from_ref_and_name(
/// Type utilities
impl<'a, 'gcx, 'tcx> TyS<'tcx> {
+ #[inline]
pub fn is_unit(&self) -> bool {
match self.sty {
Tuple(ref tys) => tys.is_empty(),
}
}
+ #[inline]
pub fn is_never(&self) -> bool {
match self.sty {
Never => true,
}
}
+ #[inline]
pub fn is_primitive(&self) -> bool {
match self.sty {
Bool | Char | Int(_) | Uint(_) | Float(_) => true,
}
}
+ #[inline]
pub fn is_ty_infer(&self) -> bool {
match self.sty {
Infer(_) => true,
}
}
+ #[inline]
pub fn is_phantom_data(&self) -> bool {
if let Adt(def, _) = self.sty {
def.is_phantom_data()
}
}
+ #[inline]
pub fn is_bool(&self) -> bool { self.sty == Bool }
+ #[inline]
pub fn is_param(&self, index: u32) -> bool {
match self.sty {
ty::Param(ref data) => data.index == index,
}
}
+ #[inline]
pub fn is_self(&self) -> bool {
match self.sty {
Param(ref p) => p.is_self(),
}
}
+ #[inline]
pub fn is_slice(&self) -> bool {
match self.sty {
RawPtr(TypeAndMut { ty, .. }) | Ref(_, ty, _) => match ty.sty {
}
}
+ #[inline]
pub fn is_region_ptr(&self) -> bool {
match self.sty {
Ref(..) => true,
}
}
+ #[inline]
pub fn is_mutable_pointer(&self) -> bool {
match self.sty {
RawPtr(TypeAndMut { mutbl: hir::Mutability::MutMutable, .. }) |
}
}
+ #[inline]
pub fn is_unsafe_ptr(&self) -> bool {
match self.sty {
RawPtr(_) => return true,
}
/// Returns `true` if this type is an `Arc<T>`.
+ #[inline]
pub fn is_arc(&self) -> bool {
match self.sty {
Adt(def, _) => def.is_arc(),
}
/// Returns `true` if this type is an `Rc<T>`.
+ #[inline]
pub fn is_rc(&self) -> bool {
match self.sty {
Adt(def, _) => def.is_rc(),
}
}
+ #[inline]
pub fn is_box(&self) -> bool {
match self.sty {
Adt(def, _) => def.is_box(),
/// A scalar type is one that denotes an atomic datum, with no sub-components.
/// (A RawPtr is scalar because it represents a non-managed pointer, so its
/// contents are abstract to rustc.)
+ #[inline]
pub fn is_scalar(&self) -> bool {
match self.sty {
Bool | Char | Int(_) | Float(_) | Uint(_) |
}
/// Returns `true` if this type is a floating point type.
+ #[inline]
pub fn is_floating_point(&self) -> bool {
match self.sty {
Float(_) |
}
}
+ #[inline]
pub fn is_trait(&self) -> bool {
match self.sty {
Dynamic(..) => true,
}
}
+ #[inline]
pub fn is_enum(&self) -> bool {
match self.sty {
Adt(adt_def, _) => {
}
}
+ #[inline]
pub fn is_closure(&self) -> bool {
match self.sty {
Closure(..) => true,
}
}
+ #[inline]
pub fn is_generator(&self) -> bool {
match self.sty {
Generator(..) => true,
}
}
+ #[inline]
pub fn is_fresh_ty(&self) -> bool {
match self.sty {
Infer(FreshTy(_)) => true,
}
}
+ #[inline]
pub fn is_fresh(&self) -> bool {
match self.sty {
Infer(FreshTy(_)) => true,
}
}
+ #[inline]
pub fn is_char(&self) -> bool {
match self.sty {
Char => true,
}
#[inline]
- pub fn is_fp(&self) -> bool {
- match self.sty {
- Infer(FloatVar(_)) | Float(_) => true,
- _ => false
- }
- }
-
pub fn is_numeric(&self) -> bool {
- self.is_integral() || self.is_fp()
+ self.is_integral() || self.is_floating_point()
}
+ #[inline]
pub fn is_signed(&self) -> bool {
match self.sty {
Int(_) => true,
}
}
+ #[inline]
pub fn is_pointer_sized(&self) -> bool {
match self.sty {
Int(ast::IntTy::Isize) | Uint(ast::UintTy::Usize) => true,
}
}
+ #[inline]
pub fn is_machine(&self) -> bool {
match self.sty {
Int(..) | Uint(..) | Float(..) => true,
}
}
+ #[inline]
pub fn has_concrete_skeleton(&self) -> bool {
match self.sty {
Param(_) | Infer(_) | Error => false,
}
}
+ #[inline]
pub fn is_fn(&self) -> bool {
match self.sty {
FnDef(..) | FnPtr(_) => true,
}
}
+ #[inline]
+ pub fn is_fn_ptr(&self) -> bool {
+ match self.sty {
+ FnPtr(_) => true,
+ _ => false,
+ }
+ }
+
+ #[inline]
pub fn is_impl_trait(&self) -> bool {
match self.sty {
Opaque(..) => true,
/// Basically a workaround; see [this comment] for details.
///
/// [this comment]: https://github.com/rust-lang/rust/issues/34511#issuecomment-373423999
+// FIXME(eddyb) false positive, the lifetime parameter is "phantom" but needed.
+#[allow(unused_lifetimes)]
pub trait Captures<'a> { }
impl<'a, T: ?Sized> Captures<'a> for T { }
#![deny(rust_2018_idioms)]
#![deny(internal)]
+#![deny(unused_lifetimes)]
pub mod expand;
}
}
-impl<'tcx> fmt::Debug for InteriorKind {
+impl fmt::Debug for InteriorKind {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
InteriorField(mc::FieldIndex(_, info)) => write!(f, "{}", info),
#![allow(non_camel_case_types)]
#![deny(rust_2018_idioms)]
#![deny(internal)]
+#![deny(unused_lifetimes)]
#![feature(nll)]
}
}
-impl AsmMethods<'tcx> for CodegenCx<'ll, 'tcx> {
+impl AsmMethods for CodegenCx<'ll, 'tcx> {
fn codegen_global_asm(&self, ga: &hir::GlobalAsm) {
let asm = CString::new(ga.asm.as_str().as_bytes()).unwrap();
unsafe {
}
}
-impl StaticBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> {
-fn get_static(&mut self, def_id: DefId) -> &'ll Value {
+impl StaticBuilderMethods for Builder<'a, 'll, 'tcx> {
+ fn get_static(&mut self, def_id: DefId) -> &'ll Value {
// Forward to the `get_static` method of `CodegenCx`
self.cx().get_static(def_id)
}
ifn!("llvm.fabs.v4f64", fn(t_v4f64) -> t_v4f64);
ifn!("llvm.fabs.v8f64", fn(t_v8f64) -> t_v8f64);
+ ifn!("llvm.minnum.f32", fn(t_f32, t_f32) -> t_f32);
+ ifn!("llvm.minnum.f64", fn(t_f64, t_f64) -> t_f64);
+ ifn!("llvm.maxnum.f32", fn(t_f32, t_f32) -> t_f32);
+ ifn!("llvm.maxnum.f64", fn(t_f64, t_f64) -> t_f64);
+
ifn!("llvm.floor.f32", fn(t_f32) -> t_f32);
ifn!("llvm.floor.v2f32", fn(t_v2f32) -> t_v2f32);
ifn!("llvm.floor.v4f32", fn(t_v4f32) -> t_v4f32);
"fmaf64" => "llvm.fma.f64",
"fabsf32" => "llvm.fabs.f32",
"fabsf64" => "llvm.fabs.f64",
+ "minnumf32" => "llvm.minnum.f32",
+ "minnumf64" => "llvm.minnum.f64",
+ "maxnumf32" => "llvm.maxnum.f32",
+ "maxnumf64" => "llvm.maxnum.f64",
"copysignf32" => "llvm.copysign.f32",
"copysignf64" => "llvm.copysign.f64",
"floorf32" => "llvm.floor.f32",
#![feature(box_syntax)]
#![feature(const_cstr_unchecked)]
#![feature(crate_visibility_modifier)]
-#![feature(custom_attribute)]
#![feature(extern_types)]
#![feature(in_band_lifetimes)]
#![allow(unused_attributes)]
#![feature(box_patterns)]
#![feature(box_syntax)]
#![feature(core_intrinsics)]
-#![feature(custom_attribute)]
#![feature(libc)]
#![feature(rustc_diagnostic_macros)]
#![feature(stmt_expr_attributes)]
#![allow(dead_code)]
#![deny(rust_2018_idioms)]
#![deny(internal)]
+#![deny(unused_lifetimes)]
#![allow(explicit_outlives_requirements)]
#![recursion_limit="256"]
}
}
-pub fn cleanup_kinds<'a, 'tcx>(mir: &mir::Body<'tcx>) -> IndexVec<mir::BasicBlock, CleanupKind> {
+pub fn cleanup_kinds<'tcx>(mir: &mir::Body<'tcx>) -> IndexVec<mir::BasicBlock, CleanupKind> {
fn discover_masters<'tcx>(result: &mut IndexVec<mir::BasicBlock, CleanupKind>,
mir: &mir::Body<'tcx>) {
for (bb, data) in mir.basic_blocks().iter_enumerated() {
}
}
- fn codegen_return_terminator<'b>(
- &mut self,
- mut bx: Bx,
- ) {
+ fn codegen_return_terminator(&mut self, mut bx: Bx) {
if self.fn_ty.c_variadic {
match self.va_list_ref {
Some(va_list) => {
mir::Rvalue::UnaryOp(op, ref operand) => {
let operand = self.codegen_operand(&mut bx, operand);
let lloperand = operand.immediate();
- let is_float = operand.layout.ty.is_fp();
+ let is_float = operand.layout.ty.is_floating_point();
let llval = match op {
mir::UnOp::Not => bx.not(lloperand),
mir::UnOp::Neg => if is_float {
rhs: Bx::Value,
input_ty: Ty<'tcx>,
) -> Bx::Value {
- let is_float = input_ty.is_fp();
+ let is_float = input_ty.is_floating_point();
let is_signed = input_ty.is_signed();
let is_unit = input_ty.is_unit();
match op {
) -> bool;
}
-pub trait AsmMethods<'tcx> {
+pub trait AsmMethods {
fn codegen_global_asm(&self, ga: &GlobalAsm);
}
+ AbiBuilderMethods<'tcx>
+ IntrinsicCallMethods<'tcx>
+ AsmBuilderMethods<'tcx>
- + StaticBuilderMethods<'tcx>
+ + StaticBuilderMethods
+ HasParamEnv<'tcx>
+ HasTargetSpec
+ StaticMethods
+ DebugInfoMethods<'tcx>
+ DeclareMethods<'tcx>
- + AsmMethods<'tcx>
+ + AsmMethods
+ PreDefineMethods<'tcx>
+ HasParamEnv<'tcx>
+ HasTyCtxt<'tcx>
+ StaticMethods
+ DebugInfoMethods<'tcx>
+ DeclareMethods<'tcx>
- + AsmMethods<'tcx>
+ + AsmMethods
+ PreDefineMethods<'tcx>
+ HasParamEnv<'tcx>
+ HasTyCtxt<'tcx>
fn codegen_static(&self, def_id: DefId, is_mutable: bool);
}
-pub trait StaticBuilderMethods<'tcx>: BackendTypes {
+pub trait StaticBuilderMethods: BackendTypes {
fn get_static(&mut self, def_id: DefId) -> Self::Value;
fn static_panic_msg(
&mut self,
fn is_backend_immediate(&self, layout: TyLayout<'tcx>) -> bool;
fn is_backend_scalar_pair(&self, layout: TyLayout<'tcx>) -> bool;
fn backend_field_index(&self, layout: TyLayout<'tcx>, index: usize) -> u64;
- fn scalar_pair_element_backend_type<'a>(
+ fn scalar_pair_element_backend_type(
&self,
layout: TyLayout<'tcx>,
index: usize,
#![feature(box_patterns)]
#![feature(box_syntax)]
#![feature(core_intrinsics)]
-#![feature(custom_attribute)]
#![feature(never_type)]
#![feature(nll)]
#![allow(unused_attributes)]
#![deny(rust_2018_idioms)]
#![deny(internal)]
+#![deny(unused_lifetimes)]
#[macro_use]
extern crate rustc;
///
/// All operations that involve a row and/or column index will panic if the
/// index exceeds the relevant bound.
-#[derive(Clone, Debug)]
+#[derive(Clone, Debug, Eq, PartialEq, RustcDecodable, RustcEncodable)]
pub struct BitMatrix<R: Idx, C: Idx> {
num_rows: usize,
num_columns: usize,
}
}
+ /// Creates a new matrix, with `row` used as the value for every row.
+ pub fn from_row_n(row: &BitSet<C>, num_rows: usize) -> BitMatrix<R, C> {
+ let num_columns = row.domain_size();
+ let words_per_row = num_words(num_columns);
+ assert_eq!(words_per_row, row.words().len());
+ BitMatrix {
+ num_rows,
+ num_columns,
+ words: iter::repeat(row.words()).take(num_rows).flatten().cloned().collect(),
+ marker: PhantomData,
+ }
+ }
+
+ pub fn rows(&self) -> impl Iterator<Item = R> {
+ (0..self.num_rows).map(R::new)
+ }
+
/// The range of bits for a given row.
fn range(&self, row: R) -> (usize, usize) {
let words_per_row = num_words(self.num_columns);
changed
}
+ /// Adds the bits from `with` to the bits from row `write`, and
+ /// returns `true` if anything changed.
+ pub fn union_row_with(&mut self, with: &BitSet<C>, write: R) -> bool {
+ assert!(write.index() < self.num_rows);
+ assert_eq!(with.domain_size(), self.num_columns);
+ let (write_start, write_end) = self.range(write);
+ let mut changed = false;
+ for (read_index, write_index) in (0..with.words().len()).zip(write_start..write_end) {
+ let word = self.words[write_index];
+ let new_word = word | with.words()[read_index];
+ self.words[write_index] = new_word;
+ changed |= word != new_word;
+ }
+ changed
+ }
+
+ /// Sets every cell in `row` to true.
+ pub fn insert_all_into_row(&mut self, row: R) {
+ assert!(row.index() < self.num_rows);
+ let (start, end) = self.range(row);
+ let words = &mut self.words[..];
+ for index in start..end {
+ words[index] = !0;
+ }
+ self.clear_excess_bits(row);
+ }
+
+ /// Clear excess bits in the final word of the row.
+ fn clear_excess_bits(&mut self, row: R) {
+ let num_bits_in_final_word = self.num_columns % WORD_BITS;
+ if num_bits_in_final_word > 0 {
+ let mask = (1 << num_bits_in_final_word) - 1;
+ let (_, end) = self.range(row);
+ let final_word_idx = end - 1;
+ self.words[final_word_idx] &= mask;
+ }
+ }
+
+ /// Gets a slice of the underlying words.
+ pub fn words(&self) -> &[Word] {
+ &self.words
+ }
+
/// Iterates through all the columns set to true in a given row of
/// the matrix.
pub fn iter<'a>(&'a self, row: R) -> BitIter<'a, C> {
marker: PhantomData,
}
}
+
+ /// Returns the number of elements in `row`.
+ pub fn count(&self, row: R) -> usize {
+ let (start, end) = self.range(row);
+ self.words[start..end].iter().map(|e| e.count_ones() as usize).sum()
+ }
}
/// A fixed-column-size, variable-row-size 2D bit matrix with a moderately
matrix.insert(2, 99);
matrix.insert(4, 0);
matrix.union_rows(3, 5);
+ matrix.insert_all_into_row(6);
let expected = [99];
let mut iter = expected.iter();
let expected = [22, 75];
let mut iter = expected.iter();
+ assert_eq!(matrix.count(3), expected.len());
for i in matrix.iter(3) {
let j = *iter.next().unwrap();
assert_eq!(i, j);
let expected = [0];
let mut iter = expected.iter();
+ assert_eq!(matrix.count(4), expected.len());
for i in matrix.iter(4) {
let j = *iter.next().unwrap();
assert_eq!(i, j);
let expected = [22, 75];
let mut iter = expected.iter();
+ assert_eq!(matrix.count(5), expected.len());
for i in matrix.iter(5) {
let j = *iter.next().unwrap();
assert_eq!(i, j);
}
assert!(iter.next().is_none());
+
+ assert_eq!(matrix.count(6), 100);
+ let mut count = 0;
+ for (idx, i) in matrix.iter(6).enumerate() {
+ assert_eq!(idx, i);
+ count += 1;
+ }
+ assert_eq!(count, 100);
+
+ if let Some(i) = matrix.iter(7).next() {
+ panic!("expected no elements in row, but contains element {:?}", i);
+ }
}
#[test]
}
}
+impl<R: indexed_vec::Idx, C: indexed_vec::Idx, CTX> HashStable<CTX>
+for bit_set::BitMatrix<R, C>
+{
+ fn hash_stable<W: StableHasherResult>(&self,
+ ctx: &mut CTX,
+ hasher: &mut StableHasher<W>) {
+ self.words().hash_stable(ctx, hasher);
+ }
+}
+
impl_stable_hash_via_hash!(::std::path::Path);
impl_stable_hash_via_hash!(::std::path::PathBuf);
#![deny(rust_2018_idioms)]
#![deny(internal)]
+#![deny(unused_lifetimes)]
pub extern crate getopts;
#[cfg(unix)]
/// Convenience function for internal use, clients should use one of the
/// struct_* methods on Handler.
- pub fn new_with_code(handler: &'a Handler,
+ crate fn new_with_code(handler: &'a Handler,
level: Level,
code: Option<DiagnosticId>,
message: &str)
+//! The current rustc diagnostics emitter.
+//!
+//! An `Emitter` takes care of generating the output from a `DiagnosticBuilder` struct.
+//!
+//! There are various `Emitter` implementations that generate different output formats such as
+//! JSON and human readable output.
+//!
+//! The output types are defined in `librustc::session::config::ErrorOutputType`.
+
use Destination::*;
use syntax_pos::{SourceFile, Span, MultiSpan};
+//! Diagnostics creation and emission for `rustc`.
+//!
+//! This module contains the code for creating and emitting diagnostics.
+
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/")]
-#![feature(custom_attribute)]
+#![feature(crate_visibility_modifier)]
#![allow(unused_attributes)]
#![cfg_attr(unix, feature(libc))]
#![feature(nll)]
#![feature(optin_builtin_traits)]
#![deny(rust_2018_idioms)]
#![deny(internal)]
+#![deny(unused_lifetimes)]
#[allow(unused_extern_crates)]
extern crate serialize as rustc_serialize; // used by deriving
pub struct GraphvizDepGraph<'q>(FxHashSet<&'q DepNode>,
Vec<(&'q DepNode, &'q DepNode)>);
-impl<'a, 'tcx, 'q> dot::GraphWalk<'a> for GraphvizDepGraph<'q> {
+impl<'a, 'q> dot::GraphWalk<'a> for GraphvizDepGraph<'q> {
type Node = &'q DepNode;
type Edge = (&'q DepNode, &'q DepNode);
fn nodes(&self) -> dot::Nodes<'_, &'q DepNode> {
}
}
-impl<'a, 'tcx, 'q> dot::Labeller<'a> for GraphvizDepGraph<'q> {
+impl<'a, 'q> dot::Labeller<'a> for GraphvizDepGraph<'q> {
type Node = &'q DepNode;
type Edge = (&'q DepNode, &'q DepNode);
fn graph_id(&self) -> dot::Id<'_> {
#![deny(rust_2018_idioms)]
#![deny(internal)]
+#![deny(unused_lifetimes)]
#[macro_use] extern crate rustc;
#[allow(unused_extern_crates)]
#![deny(rust_2018_idioms)]
#![deny(internal)]
+#![deny(unused_lifetimes)]
#![allow(unused_imports)]
}
// Temporarily have stack size set to 32MB to deal with various crates with long method
-// chains or deep syntax trees.
+// chains or deep syntax trees, except when on Haiku.
// FIXME(oli-obk): get https://github.com/rust-lang/rust/pull/55617 the finish line
-const STACK_SIZE: usize = 32 * 1024 * 1024; // 32MB
+#[cfg(not(target_os = "haiku"))]
+const STACK_SIZE: usize = 32 * 1024 * 1024;
+
+#[cfg(target_os = "haiku")]
+const STACK_SIZE: usize = 16 * 1024 * 1024;
fn get_stack_size() -> Option<usize> {
// FIXME: Hacks on hacks. If the env is trying to override the stack size
declare_lint_pass!(BoxPointers => [BOX_POINTERS]);
impl BoxPointers {
- fn check_heap_type<'a, 'tcx>(&self, cx: &LateContext<'_, '_>, span: Span, ty: Ty<'_>) {
+ fn check_heap_type(&self, cx: &LateContext<'_, '_>, span: Span, ty: Ty<'_>) {
for leaf_ty in ty.walk() {
if leaf_ty.is_box() {
let m = format!("type uses owned (Box type) pointers: {}", ty);
fn check_tokens(&mut self, cx: &EarlyContext<'_>, tokens: TokenStream) {
for tt in tokens.into_trees() {
match tt {
- TokenTree::Token(span, tok) => match tok.ident() {
- // only report non-raw idents
- Some((ident, false)) => {
- self.check_ident_token(cx, UnderMacro(true), ast::Ident {
- span: span.substitute_dummy(ident.span),
- ..ident
- });
- }
- _ => {},
+ // Only report non-raw idents.
+ TokenTree::Token(token) => if let Some((ident, false)) = token.ident() {
+ self.check_ident_token(cx, UnderMacro(true), ident);
}
TokenTree::Delimited(_, _, tts) => {
self.check_tokens(cx, tts)
#![deny(rust_2018_idioms)]
#![deny(internal)]
+#![deny(unused_lifetimes)]
#[macro_use]
extern crate rustc;
// - `uX` => `uY`
//
// No suggestion for: `isize`, `usize`.
-fn get_type_suggestion<'a>(
- t: Ty<'_>,
- val: u128,
- negative: bool,
-) -> Option<String> {
+fn get_type_suggestion(t: Ty<'_>, val: u128, negative: bool) -> Option<String> {
use syntax::ast::IntTy::*;
use syntax::ast::UintTy::*;
macro_rules! find_fit {
match ty.sty {
ty::FnPtr(_) => true,
ty::Ref(..) => true,
- ty::Adt(field_def, substs) if field_def.repr.transparent() && field_def.is_struct() => {
- for field in &field_def.non_enum_variant().fields {
+ ty::Adt(field_def, substs) if field_def.repr.transparent() && !field_def.is_union() => {
+ for field in field_def.all_fields() {
let field_ty = tcx.normalize_erasing_regions(
ParamEnv::reveal_all(),
field.ty(tcx, substs),
return FfiUnsafe {
ty: ty,
reason: "this struct has unspecified layout",
- help: Some("consider adding a #[repr(C)] or #[repr(transparent)] \
- attribute to this struct"),
+ help: Some("consider adding a `#[repr(C)]` or \
+ `#[repr(transparent)]` attribute to this struct"),
};
}
if all_phantom { FfiPhantom(ty) } else { FfiSafe }
}
AdtKind::Union => {
- if !def.repr.c() {
+ if !def.repr.c() && !def.repr.transparent() {
return FfiUnsafe {
ty: ty,
reason: "this union has unspecified layout",
- help: Some("consider adding a #[repr(C)] attribute to this union"),
+ help: Some("consider adding a `#[repr(C)]` or \
+ `#[repr(transparent)]` attribute to this union"),
};
}
ParamEnv::reveal_all(),
field.ty(cx, substs),
);
+ // repr(transparent) types are allowed to have arbitrary ZSTs, not just
+ // PhantomData -- skip checking all ZST fields.
+ if def.repr.transparent() && is_zst(cx, field.did, field_ty) {
+ continue;
+ }
let r = self.check_type_for_ffi(cache, field_ty);
match r {
FfiSafe => {
// Check for a repr() attribute to specify the size of the
// discriminant.
- if !def.repr.c() && def.repr.int.is_none() {
+ if !def.repr.c() && !def.repr.transparent() && def.repr.int.is_none() {
// Special-case types like `Option<extern fn()>`.
if !is_repr_nullable_ptr(cx, ty, def, substs) {
return FfiUnsafe {
ty: ty,
reason: "enum has no representation hint",
- help: Some("consider adding a #[repr(...)] attribute \
- to this enum"),
+ help: Some("consider adding a `#[repr(C)]`, \
+ `#[repr(transparent)]`, or integer `#[repr(...)]` \
+ attribute to this enum"),
};
}
}
// Check the contained variants.
for variant in &def.variants {
for field in &variant.fields {
- let arg = cx.normalize_erasing_regions(
+ let field_ty = cx.normalize_erasing_regions(
ParamEnv::reveal_all(),
field.ty(cx, substs),
);
- let r = self.check_type_for_ffi(cache, arg);
+ // repr(transparent) types are allowed to have arbitrary ZSTs, not
+ // just PhantomData -- skip checking all ZST fields.
+ if def.repr.transparent() && is_zst(cx, field.did, field_ty) {
+ continue;
+ }
+ let r = self.check_type_for_ffi(cache, field_ty);
match r {
FfiSafe => {}
FfiUnsafe { .. } => {
match decl {
ProcMacro::CustomDerive { trait_name, attributes, client } => {
let attrs = attributes.iter().cloned().map(Symbol::intern).collect::<Vec<_>>();
- (trait_name, SyntaxExtension::ProcMacroDerive(
+ (trait_name, SyntaxExtension::Derive(
Box::new(ProcMacroDerive {
client,
attrs: attrs.clone(),
))
}
ProcMacro::Attr { name, client } => {
- (name, SyntaxExtension::AttrProcMacro(
+ (name, SyntaxExtension::Attr(
Box::new(AttrProcMacro { client }),
root.edition,
))
}
ProcMacro::Bang { name, client } => {
- (name, SyntaxExtension::ProcMacro {
+ (name, SyntaxExtension::Bang {
expander: Box::new(BangProcMacro { client }),
allow_internal_unstable: None,
edition: root.edition,
use syntax_ext::proc_macro_impl::BangProcMacro;
let client = proc_macro::bridge::client::Client::expand1(proc_macro::quote);
- let ext = SyntaxExtension::ProcMacro {
+ let ext = SyntaxExtension::Bang {
expander: Box::new(BangProcMacro { client }),
allow_internal_unstable: Some(vec![sym::proc_macro_def_site].into()),
edition: data.root.edition,
let constness = match self.entry(id).kind {
EntryKind::Method(data) => data.decode(self).fn_data.constness,
EntryKind::Fn(data) => data.decode(self).constness,
+ EntryKind::Variant(..) | EntryKind::Struct(..) => hir::Constness::Const,
_ => hir::Constness::NotConst,
};
constness == hir::Constness::Const
#![deny(rust_2018_idioms)]
#![deny(internal)]
+#![deny(unused_lifetimes)]
extern crate libc;
#[allow(unused_extern_crates)]
impl LocalsStateAtExit {
fn build(
locals_are_invalidated_at_exit: bool,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
move_data: &MoveData<'tcx>
) -> Self {
struct HasStorageDead(BitSet<Local>);
if locals_are_invalidated_at_exit {
LocalsStateAtExit::AllAreInvalidated
} else {
- let mut has_storage_dead = HasStorageDead(BitSet::new_empty(mir.local_decls.len()));
- has_storage_dead.visit_body(mir);
+ let mut has_storage_dead = HasStorageDead(BitSet::new_empty(body.local_decls.len()));
+ has_storage_dead.visit_body(body);
let mut has_storage_dead_or_moved = has_storage_dead.0;
for move_out in &move_data.moves {
if let Some(index) = move_data.base_local(move_out.path) {
impl<'tcx> BorrowSet<'tcx> {
pub fn build(
tcx: TyCtxt<'_, '_, 'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
locals_are_invalidated_at_exit: bool,
move_data: &MoveData<'tcx>
) -> Self {
let mut visitor = GatherBorrows {
tcx,
- mir,
+ body,
idx_vec: IndexVec::new(),
location_map: Default::default(),
activation_map: Default::default(),
local_map: Default::default(),
pending_activations: Default::default(),
locals_state_at_exit:
- LocalsStateAtExit::build(locals_are_invalidated_at_exit, mir, move_data),
+ LocalsStateAtExit::build(locals_are_invalidated_at_exit, body, move_data),
};
- for (block, block_data) in traversal::preorder(mir) {
+ for (block, block_data) in traversal::preorder(body) {
visitor.visit_basic_block_data(block, block_data);
}
struct GatherBorrows<'a, 'gcx: 'tcx, 'tcx: 'a> {
tcx: TyCtxt<'a, 'gcx, 'tcx>,
- mir: &'a Body<'tcx>,
+ body: &'a Body<'tcx>,
idx_vec: IndexVec<BorrowIndex, BorrowData<'tcx>>,
location_map: FxHashMap<Location, BorrowIndex>,
activation_map: FxHashMap<Location, Vec<BorrowIndex>>,
) {
if let mir::Rvalue::Ref(region, kind, ref borrowed_place) = *rvalue {
if borrowed_place.ignore_borrow(
- self.tcx, self.mir, &self.locals_state_at_exit) {
+ self.tcx, self.body, &self.locals_state_at_exit) {
return;
}
if let TwoPhaseActivation::ActivatedAt(other_location) =
borrow_data.activation_location {
span_bug!(
- self.mir.source_info(location).span,
+ self.body.source_info(location).span,
"found two uses for 2-phase borrow temporary {:?}: \
{:?} and {:?}",
temp,
temp
} else {
span_bug!(
- self.mir.source_info(start_location).span,
+ self.body.source_info(start_location).span,
"expected 2-phase borrow to assign to a local, not `{:?}`",
assigned_place,
);
// assignment.
let old_value = self.pending_activations.insert(temp, borrow_index);
if let Some(old_index) = old_value {
- span_bug!(self.mir.source_info(start_location).span,
+ span_bug!(self.body.source_info(start_location).span,
"found already pending activation for temp: {:?} \
at borrow_index: {:?} with associated data {:?}",
temp, old_index, self.idx_vec[old_index]);
);
}
- let ty = used_place.ty(self.mir, self.infcx.tcx).ty;
+ let ty = used_place.ty(self.body, self.infcx.tcx).ty;
let needs_note = match ty.sty {
ty::Closure(id, _) => {
let tables = self.infcx.tcx.typeck_tables_of(id);
let mpi = self.move_data.moves[move_out_indices[0]].path;
let place = &self.move_data.move_paths[mpi].place;
- let ty = place.ty(self.mir, self.infcx.tcx).ty;
+ let ty = place.ty(self.body, self.infcx.tcx).ty;
let opt_name = self.describe_place_with_options(place, IncludingDowncast(true));
let note_msg = match opt_name {
Some(ref name) => format!("`{}`", name),
}
}
let span = if let Place::Base(PlaceBase::Local(local)) = place {
- let decl = &self.mir.local_decls[*local];
+ let decl = &self.body.local_decls[*local];
Some(decl.source_info.span)
} else {
None
location,
borrow,
None,
- ).add_explanation_to_diagnostic(self.infcx.tcx, self.mir, &mut err, "", Some(borrow_span));
+ ).add_explanation_to_diagnostic(self.infcx.tcx, self.body, &mut err, "", Some(borrow_span));
err.buffer(&mut self.errors_buffer);
}
});
self.explain_why_borrow_contains_point(location, borrow, None)
- .add_explanation_to_diagnostic(self.infcx.tcx, self.mir, &mut err, "", None);
+ .add_explanation_to_diagnostic(self.infcx.tcx, self.body, &mut err, "", None);
err
}
explanation.add_explanation_to_diagnostic(
self.infcx.tcx,
- self.mir,
+ self.body,
&mut err,
first_borrow_desc,
None,
// Define a small closure that we can use to check if the type of a place
// is a union.
let union_ty = |place: &Place<'tcx>| -> Option<Ty<'tcx>> {
- let ty = place.ty(self.mir, self.infcx.tcx).ty;
+ let ty = place.ty(self.body, self.infcx.tcx).ty;
ty.ty_adt_def().filter(|adt| adt.is_union()).map(|_| ty)
};
let describe_place = |place| self.describe_place(place).unwrap_or_else(|| "_".to_owned());
let borrow_span = borrow_spans.var_or_use();
let proper_span = match *root_place {
- Place::Base(PlaceBase::Local(local)) => self.mir.local_decls[local].source_info.span,
+ Place::Base(PlaceBase::Local(local)) => self.body.local_decls[local].source_info.span,
_ => drop_span,
};
} else {
explanation.add_explanation_to_diagnostic(
self.infcx.tcx,
- self.mir,
+ self.body,
&mut err,
"",
None,
format!("value captured here{}", within),
);
- explanation.add_explanation_to_diagnostic(self.infcx.tcx, self.mir, &mut err, "", None);
+ explanation.add_explanation_to_diagnostic(
+ self.infcx.tcx, self.body, &mut err, "", None);
}
err
_ => {}
}
- explanation.add_explanation_to_diagnostic(self.infcx.tcx, self.mir, &mut err, "", None);
+ explanation.add_explanation_to_diagnostic(self.infcx.tcx, self.body, &mut err, "", None);
err.buffer(&mut self.errors_buffer);
}
}
_ => {}
}
- explanation.add_explanation_to_diagnostic(self.infcx.tcx, self.mir, &mut err, "", None);
+ explanation.add_explanation_to_diagnostic(self.infcx.tcx, self.body, &mut err, "", None);
let within = if borrow_spans.for_generator() {
" by generator"
};
// FIXME use a better heuristic than Spans
- let reference_desc = if return_span == self.mir.source_info(borrow.reserve_location).span {
+ let reference_desc = if return_span == self.body.source_info(borrow.reserve_location).span {
"reference to"
} else {
"value referencing"
let (place_desc, note) = if let Some(place_desc) = opt_place_desc {
let local_kind = match borrow.borrowed_place {
Place::Base(PlaceBase::Local(local)) => {
- match self.mir.local_kind(local) {
+ match self.body.local_kind(local) {
LocalKind::ReturnPointer
| LocalKind::Temp => bug!("temporary or return pointer with a name"),
LocalKind::Var => "local variable ",
} else {
bug!("try_report_cannot_return_reference_to_local: not a local")
};
- match self.mir.local_kind(local) {
+ match self.body.local_kind(local) {
LocalKind::ReturnPointer | LocalKind::Temp => {
(
"temporary value".to_string(),
}
fn get_moved_indexes(&mut self, location: Location, mpi: MovePathIndex) -> Vec<MoveSite> {
- let mir = self.mir;
+ let body = self.body;
let mut stack = Vec::new();
- stack.extend(mir.predecessor_locations(location).map(|predecessor| {
+ stack.extend(body.predecessor_locations(location).map(|predecessor| {
let is_back_edge = location.dominates(predecessor, &self.dominators);
(predecessor, is_back_edge)
}));
}
// check for moves
- let stmt_kind = mir[location.block]
+ let stmt_kind = body[location.block]
.statements
.get(location.statement_index)
.map(|s| &s.kind);
let mut any_match = false;
drop_flag_effects::for_location_inits(
self.infcx.tcx,
- self.mir,
+ self.body,
self.move_data,
location,
|m| {
continue 'dfs;
}
- stack.extend(mir.predecessor_locations(location).map(|predecessor| {
+ stack.extend(body.predecessor_locations(location).map(|predecessor| {
let back_edge = location.dominates(predecessor, &self.dominators);
(predecessor, is_back_edge || back_edge)
}));
);
self.explain_why_borrow_contains_point(location, loan, None)
- .add_explanation_to_diagnostic(self.infcx.tcx, self.mir, &mut err, "", None);
+ .add_explanation_to_diagnostic(self.infcx.tcx, self.body, &mut err, "", None);
err.buffer(&mut self.errors_buffer);
}
err_place: &Place<'tcx>,
) {
let (from_arg, local_decl) = if let Place::Base(PlaceBase::Local(local)) = *err_place {
- if let LocalKind::Arg = self.mir.local_kind(local) {
- (true, Some(&self.mir.local_decls[local]))
+ if let LocalKind::Arg = self.body.local_kind(local) {
+ (true, Some(&self.body.local_decls[local]))
} else {
- (false, Some(&self.mir.local_decls[local]))
+ (false, Some(&self.body.local_decls[local]))
}
} else {
(false, None)
StorageDeadOrDrop::LocalStorageDead
| StorageDeadOrDrop::BoxedStorageDead => {
assert!(
- base.ty(self.mir, tcx).ty.is_box(),
+ base.ty(self.body, tcx).ty.is_box(),
"Drop of value behind a reference or raw pointer"
);
StorageDeadOrDrop::BoxedStorageDead
StorageDeadOrDrop::Destructor(_) => base_access,
},
ProjectionElem::Field(..) | ProjectionElem::Downcast(..) => {
- let base_ty = base.ty(self.mir, tcx).ty;
+ let base_ty = base.ty(self.body, tcx).ty;
match base_ty.sty {
ty::Adt(def, _) if def.has_dtor(tcx) => {
// Report the outermost adt with a destructor
location
);
if let Some(&Statement { kind: StatementKind::Assign(ref reservation, _), ..})
- = &self.mir[location.block].statements.get(location.statement_index)
+ = &self.body[location.block].statements.get(location.statement_index)
{
debug!(
"annotate_argument_and_return_for_borrow: reservation={:?}",
// Check that the initial assignment of the reserve location is into a temporary.
let mut target = *match reservation {
Place::Base(PlaceBase::Local(local))
- if self.mir.local_kind(*local) == LocalKind::Temp => local,
+ if self.body.local_kind(*local) == LocalKind::Temp => local,
_ => return None,
};
// Next, look through the rest of the block, checking if we are assigning the
// `target` (that is, the place that contains our borrow) to anything.
let mut annotated_closure = None;
- for stmt in &self.mir[location.block].statements[location.statement_index + 1..] {
+ for stmt in &self.body[location.block].statements[location.statement_index + 1..] {
debug!(
"annotate_argument_and_return_for_borrow: target={:?} stmt={:?}",
target, stmt
}
// Check the terminator if we didn't find anything in the statements.
- let terminator = &self.mir[location.block].terminator();
+ let terminator = &self.body[location.block].terminator();
debug!(
"annotate_argument_and_return_for_borrow: target={:?} terminator={:?}",
target, terminator
) {
debug!("add_moved_or_invoked_closure_note: location={:?} place={:?}", location, place);
let mut target = place.local_or_deref_local();
- for stmt in &self.mir[location.block].statements[location.statement_index..] {
+ for stmt in &self.body[location.block].statements[location.statement_index..] {
debug!("add_moved_or_invoked_closure_note: stmt={:?} target={:?}", stmt, target);
if let StatementKind::Assign(into, box Rvalue::Use(from)) = &stmt.kind {
debug!("add_fnonce_closure_note: into={:?} from={:?}", into, from);
}
// Check if we are attempting to call a closure after it has been invoked.
- let terminator = self.mir[location.block].terminator();
+ let terminator = self.body[location.block].terminator();
debug!("add_moved_or_invoked_closure_note: terminator={:?}", terminator);
if let TerminatorKind::Call {
func: Operand::Constant(box Constant {
};
debug!("add_moved_or_invoked_closure_note: closure={:?}", closure);
- if let ty::Closure(did, _) = self.mir.local_decls[closure].ty.sty {
+ if let ty::Closure(did, _) = self.body.local_decls[closure].ty.sty {
let hir_id = self.infcx.tcx.hir().as_local_hir_id(did).unwrap();
if let Some((span, name)) = self.infcx.tcx.typeck_tables_of(did)
// Check if we are just moving a closure after it has been invoked.
if let Some(target) = target {
- if let ty::Closure(did, _) = self.mir.local_decls[target].ty.sty {
+ if let ty::Closure(did, _) = self.body.local_decls[target].ty.sty {
let hir_id = self.infcx.tcx.hir().as_local_hir_id(did).unwrap();
if let Some((span, name)) = self.infcx.tcx.typeck_tables_of(did)
&including_downcast,
)?;
} else if let Place::Base(PlaceBase::Local(local)) = proj.base {
- if self.mir.local_decls[local].is_ref_for_guard() {
+ if self.body.local_decls[local].is_ref_for_guard() {
self.append_place_to_string(
&proj.base,
buf,
/// Appends end-user visible description of the `local` place to `buf`. If `local` doesn't have
/// a name, or its name was generated by the compiler, then `Err` is returned
fn append_local_to_string(&self, local_index: Local, buf: &mut String) -> Result<(), ()> {
- let local = &self.mir.local_decls[local_index];
+ let local = &self.body.local_decls[local_index];
match local.name {
Some(name) if !local.from_compiler_desugaring() => {
buf.push_str(name.as_str().get());
fn describe_field(&self, base: &Place<'tcx>, field: Field) -> String {
match *base {
Place::Base(PlaceBase::Local(local)) => {
- let local = &self.mir.local_decls[local];
+ let local = &self.body.local_decls[local];
self.describe_field_from_ty(&local.ty, field, None)
}
Place::Base(PlaceBase::Static(ref static_)) =>
Place::Projection(ref proj) => match proj.elem {
ProjectionElem::Deref => self.describe_field(&proj.base, field),
ProjectionElem::Downcast(_, variant_index) => {
- let base_ty = base.ty(self.mir, self.infcx.tcx).ty;
+ let base_ty = base.ty(self.body, self.infcx.tcx).ty;
self.describe_field_from_ty(&base_ty, field, Some(variant_index))
}
ProjectionElem::Field(_, field_type) => {
) -> UseSpans {
use self::UseSpans::*;
- let stmt = match self.mir[location.block].statements.get(location.statement_index) {
+ let stmt = match self.body[location.block].statements.get(location.statement_index) {
Some(stmt) => stmt,
- None => return OtherUse(self.mir.source_info(location).span),
+ None => return OtherUse(self.body.source_info(location).span),
};
debug!("move_spans: moved_place={:?} location={:?} stmt={:?}", moved_place, location, stmt);
use self::UseSpans::*;
debug!("borrow_spans: use_span={:?} location={:?}", use_span, location);
- let target = match self.mir[location.block]
+ let target = match self.body[location.block]
.statements
.get(location.statement_index)
{
_ => return OtherUse(use_span),
};
- if self.mir.local_kind(target) != LocalKind::Temp {
+ if self.body.local_kind(target) != LocalKind::Temp {
// operands are always temporaries.
return OtherUse(use_span);
}
- for stmt in &self.mir[location.block].statements[location.statement_index + 1..] {
+ for stmt in &self.body[location.block].statements[location.statement_index + 1..] {
if let StatementKind::Assign(
_, box Rvalue::Aggregate(ref kind, ref places)
) = stmt.kind {
/// Helper to retrieve span(s) of given borrow from the current MIR
/// representation
pub(super) fn retrieve_borrow_spans(&self, borrow: &BorrowData<'_>) -> UseSpans {
- let span = self.mir.source_info(borrow.reserve_location).span;
+ let span = self.body.source_info(borrow.reserve_location).span;
self.borrow_spans(span, borrow.reserve_location)
}
}
}
impl LocationTable {
- crate fn new(mir: &Body<'_>) -> Self {
+ crate fn new(body: &Body<'_>) -> Self {
let mut num_points = 0;
- let statements_before_block = mir.basic_blocks()
+ let statements_before_block = body.basic_blocks()
.iter()
.map(|block_data| {
let v = num_points;
}
fn mir_borrowck<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> BorrowCheckResult<'tcx> {
- let input_mir = tcx.mir_validated(def_id);
+ let input_body = tcx.mir_validated(def_id);
debug!("run query mir_borrowck: {}", tcx.def_path_str(def_id));
- // We are not borrow checking the automatically generated struct/variant constructors
- // because we want to accept structs such as this (taken from the `linked-hash-map`
- // crate):
- // ```rust
- // struct Qey<Q: ?Sized>(Q);
- // ```
- // MIR of this struct constructor looks something like this:
- // ```rust
- // fn Qey(_1: Q) -> Qey<Q>{
- // let mut _0: Qey<Q>; // return place
- //
- // bb0: {
- // (_0.0: Q) = move _1; // bb0[0]: scope 0 at src/main.rs:1:1: 1:26
- // return; // bb0[1]: scope 0 at src/main.rs:1:1: 1:26
- // }
- // }
- // ```
- // The problem here is that `(_0.0: Q) = move _1;` is valid only if `Q` is
- // of statically known size, which is not known to be true because of the
- // `Q: ?Sized` constraint. However, it is true because the constructor can be
- // called only when `Q` is of statically known size.
- if tcx.is_constructor(def_id) {
- return BorrowCheckResult {
- closure_requirements: None,
- used_mut_upvars: SmallVec::new(),
- };
- }
-
let opt_closure_req = tcx.infer_ctxt().enter(|infcx| {
- let input_mir: &Body<'_> = &input_mir.borrow();
- do_mir_borrowck(&infcx, input_mir, def_id)
+ let input_body: &Body<'_> = &input_body.borrow();
+ do_mir_borrowck(&infcx, input_body, def_id)
});
debug!("mir_borrowck done");
fn do_mir_borrowck<'a, 'gcx, 'tcx>(
infcx: &InferCtxt<'a, 'gcx, 'tcx>,
- input_mir: &Body<'gcx>,
+ input_body: &Body<'gcx>,
def_id: DefId,
) -> BorrowCheckResult<'gcx> {
debug!("do_mir_borrowck(def_id = {:?})", def_id);
// requires first making our own copy of the MIR. This copy will
// be modified (in place) to contain non-lexical lifetimes. It
// will have a lifetime tied to the inference context.
- let mut mir: Body<'tcx> = input_mir.clone();
- let free_regions = nll::replace_regions_in_mir(infcx, def_id, param_env, &mut mir);
- let mir = &mir; // no further changes
- let location_table = &LocationTable::new(mir);
+ let mut body: Body<'tcx> = input_body.clone();
+ let free_regions = nll::replace_regions_in_mir(infcx, def_id, param_env, &mut body);
+ let body = &body; // no further changes
+ let location_table = &LocationTable::new(body);
let mut errors_buffer = Vec::new();
let (move_data, move_errors): (MoveData<'tcx>, Option<Vec<(Place<'tcx>, MoveError<'tcx>)>>) =
- match MoveData::gather_moves(mir, tcx) {
+ match MoveData::gather_moves(body, tcx) {
Ok(move_data) => (move_data, None),
Err((move_data, move_errors)) => (move_data, Some(move_errors)),
};
param_env: param_env,
};
- let dead_unwinds = BitSet::new_empty(mir.basic_blocks().len());
+ let dead_unwinds = BitSet::new_empty(body.basic_blocks().len());
let mut flow_inits = FlowAtLocation::new(do_dataflow(
tcx,
- mir,
+ body,
def_id,
&attributes,
&dead_unwinds,
- MaybeInitializedPlaces::new(tcx, mir, &mdpe),
+ MaybeInitializedPlaces::new(tcx, body, &mdpe),
|bd, i| DebugFormatted::new(&bd.move_data().move_paths[i]),
));
let locals_are_invalidated_at_exit = tcx.hir().body_owner_kind_by_hir_id(id).is_fn_or_closure();
let borrow_set = Rc::new(BorrowSet::build(
- tcx, mir, locals_are_invalidated_at_exit, &mdpe.move_data));
+ tcx, body, locals_are_invalidated_at_exit, &mdpe.move_data));
// If we are in non-lexical mode, compute the non-lexical lifetimes.
let (regioncx, polonius_output, opt_closure_req) = nll::compute_regions(
infcx,
def_id,
free_regions,
- mir,
+ body,
&upvars,
location_table,
param_env,
let flow_borrows = FlowAtLocation::new(do_dataflow(
tcx,
- mir,
+ body,
def_id,
&attributes,
&dead_unwinds,
- Borrows::new(tcx, mir, regioncx.clone(), &borrow_set),
+ Borrows::new(tcx, body, regioncx.clone(), &borrow_set),
|rs, i| DebugFormatted::new(&rs.location(i)),
));
let flow_uninits = FlowAtLocation::new(do_dataflow(
tcx,
- mir,
+ body,
def_id,
&attributes,
&dead_unwinds,
- MaybeUninitializedPlaces::new(tcx, mir, &mdpe),
+ MaybeUninitializedPlaces::new(tcx, body, &mdpe),
|bd, i| DebugFormatted::new(&bd.move_data().move_paths[i]),
));
let flow_ever_inits = FlowAtLocation::new(do_dataflow(
tcx,
- mir,
+ body,
def_id,
&attributes,
&dead_unwinds,
- EverInitializedPlaces::new(tcx, mir, &mdpe),
+ EverInitializedPlaces::new(tcx, body, &mdpe),
|bd, i| DebugFormatted::new(&bd.move_data().inits[i]),
));
_ => true,
};
- let dominators = mir.dominators();
+ let dominators = body.dominators();
let mut mbcx = MirBorrowckCtxt {
infcx,
- mir,
+ body,
mir_def_id: def_id,
move_data: &mdpe.move_data,
location_table,
let mut initial_diag =
mbcx.report_conflicting_borrow(location, (&place, span), bk, &borrow);
- let lint_root = if let ClearCrossCrate::Set(ref vsi) = mbcx.mir.source_scope_local_data {
- let scope = mbcx.mir.source_info(location).scope;
+ let lint_root = if let ClearCrossCrate::Set(ref vsi) = mbcx.body.source_scope_local_data {
+ let scope = mbcx.body.source_info(location).scope;
vsi[scope].lint_root
} else {
id
// would have a chance of erroneously adding non-user-defined mutable vars
// to the set.
let temporary_used_locals: FxHashSet<Local> = mbcx.used_mut.iter()
- .filter(|&local| mbcx.mir.local_decls[*local].is_user_variable.is_none())
+ .filter(|&local| mbcx.body.local_decls[*local].is_user_variable.is_none())
.cloned()
.collect();
// For the remaining unused locals that are marked as mutable, we avoid linting any that
// were never initialized. These locals may have been removed as unreachable code; or will be
// linted as unused variables.
- let unused_mut_locals = mbcx.mir.mut_vars_iter()
+ let unused_mut_locals = mbcx.body.mut_vars_iter()
.filter(|local| !mbcx.used_mut.contains(local))
.collect();
mbcx.gather_used_muts(temporary_used_locals, unused_mut_locals);
debug!("mbcx.used_mut: {:?}", mbcx.used_mut);
let used_mut = mbcx.used_mut;
- for local in mbcx.mir.mut_vars_and_args_iter().filter(|local| !used_mut.contains(local)) {
- if let ClearCrossCrate::Set(ref vsi) = mbcx.mir.source_scope_local_data {
- let local_decl = &mbcx.mir.local_decls[local];
+ for local in mbcx.body.mut_vars_and_args_iter().filter(|local| !used_mut.contains(local)) {
+ if let ClearCrossCrate::Set(ref vsi) = mbcx.body.source_scope_local_data {
+ let local_decl = &mbcx.body.local_decls[local];
// Skip implicit `self` argument for closures
if local.index() == 1 && tcx.is_closure(mbcx.mir_def_id) {
pub struct MirBorrowckCtxt<'cx, 'gcx: 'tcx, 'tcx: 'cx> {
infcx: &'cx InferCtxt<'cx, 'gcx, 'tcx>,
- mir: &'cx Body<'tcx>,
+ body: &'cx Body<'tcx>,
mir_def_id: DefId,
move_data: &'cx MoveData<'tcx>,
impl<'cx, 'gcx, 'tcx> DataflowResultsConsumer<'cx, 'tcx> for MirBorrowckCtxt<'cx, 'gcx, 'tcx> {
type FlowState = Flows<'cx, 'gcx, 'tcx>;
- fn mir(&self) -> &'cx Body<'tcx> {
- self.mir
+ fn body(&self) -> &'cx Body<'tcx> {
+ self.body
}
fn visit_block_entry(&mut self, bb: BasicBlock, flow_state: &Self::FlowState) {
let gcx = self.infcx.tcx.global_tcx();
// Compute the type with accurate region information.
- let drop_place_ty = drop_place.ty(self.mir, self.infcx.tcx);
+ let drop_place_ty = drop_place.ty(self.body, self.infcx.tcx);
// Erase the regions.
let drop_place_ty = self.infcx.tcx.erase_regions(&drop_place_ty).ty;
let mut error_reported = false;
let tcx = self.infcx.tcx;
- let mir = self.mir;
+ let body = self.body;
let location_table = self.location_table.start_index(location);
let borrow_set = self.borrow_set.clone();
each_borrow_involving_path(
self,
tcx,
- mir,
+ body,
location,
(sd, place_span.0),
&borrow_set,
// (e.g., `x = ...`) so long as it has never been initialized
// before (at this point in the flow).
if let &Place::Base(PlaceBase::Local(local)) = place_span.0 {
- if let Mutability::Not = self.mir.local_decls[local].mutability {
+ if let Mutability::Not = self.body.local_decls[local].mutability {
// check for reassignments to immutable local variables
self.check_if_reassignment_to_immutable_state(
location,
match *operand {
Operand::Move(Place::Base(PlaceBase::Local(local)))
| Operand::Copy(Place::Base(PlaceBase::Local(local)))
- if self.mir.local_decls[local].is_user_variable.is_none() =>
+ if self.body.local_decls[local].is_user_variable.is_none() =>
{
- if self.mir.local_decls[local].ty.is_mutable_pointer() {
+ if self.body.local_decls[local].ty.is_mutable_pointer() {
// The variable will be marked as mutable by the borrow.
return;
}
_ => bug!("temporary initialized in arguments"),
};
- let bbd = &self.mir[loc.block];
+ let bbd = &self.body[loc.block];
let stmt = &bbd.statements[loc.statement_index];
debug!("temporary assigned in: stmt={:?}", stmt);
if places_conflict::borrow_conflicts_with_place(
self.infcx.tcx,
- self.mir,
+ self.body,
place,
borrow.kind,
root_place,
if let Some(init_index) = self.is_local_ever_initialized(local, flow_state) {
// And, if so, report an error.
let init = &self.move_data.inits[init_index];
- let span = init.span(&self.mir);
+ let span = init.span(&self.body);
self.report_illegal_reassignment(
location, place_span, span, place_span.0
);
// assigning to `P.f` requires `P` itself
// be already initialized
let tcx = self.infcx.tcx;
- match base.ty(self.mir, tcx).ty.sty {
+ match base.ty(self.body, tcx).ty.sty {
ty::Adt(def, _) if def.has_dtor(tcx) => {
self.check_if_path_or_subpath_is_moved(
location, InitializationRequiringAction::Assignment,
// no move out from an earlier location) then this is an attempt at initialization
// of the union - we should error in that case.
let tcx = this.infcx.tcx;
- if let ty::Adt(def, _) = base.ty(this.mir, tcx).ty.sty {
+ if let ty::Adt(def, _) = base.ty(this.body, tcx).ty.sty {
if def.is_union() {
if this.move_data.path_map[mpi].iter().any(|moi| {
this.move_data.moves[*moi].source.is_predecessor_of(
- location, this.mir,
+ location, this.body,
)
}) {
return;
) -> Result<RootPlace<'d, 'tcx>, &'d Place<'tcx>> {
match *place {
Place::Base(PlaceBase::Local(local)) => {
- let local = &self.mir.local_decls[local];
+ let local = &self.body.local_decls[local];
match local.mutability {
Mutability::Not => match is_local_mutation_allowed {
LocalMutationIsAllowed::Yes => Ok(RootPlace {
Place::Projection(ref proj) => {
match proj.elem {
ProjectionElem::Deref => {
- let base_ty = proj.base.ty(self.mir, self.infcx.tcx).ty;
+ let base_ty = proj.base.ty(self.body, self.infcx.tcx).ty;
// Check the kind of deref to decide
match base_ty.sty {
Place::Projection(ref proj) => match proj.elem {
ProjectionElem::Field(field, _ty) => {
let tcx = self.infcx.tcx;
- let base_ty = proj.base.ty(self.mir, tcx).ty;
+ let base_ty = proj.base.ty(self.body, tcx).ty;
if (base_ty.is_closure() || base_ty.is_generator()) &&
(!by_ref || self.upvars[field.index()].by_ref)
if let Some(StatementKind::Assign(
Place::Base(PlaceBase::Local(local)),
box Rvalue::Use(Operand::Move(move_from)),
- )) = self.mir.basic_blocks()[location.block]
+ )) = self.body.basic_blocks()[location.block]
.statements
.get(location.statement_index)
.map(|stmt| &stmt.kind)
{
- let local_decl = &self.mir.local_decls[*local];
+ let local_decl = &self.body.local_decls[*local];
// opt_match_place is the
// match_span is the span of the expression being matched on
// match *x.y { ... } match_place is Some(*x.y)
pat_span: _,
}))) = local_decl.is_user_variable
{
- let stmt_source_info = self.mir.source_info(location);
+ let stmt_source_info = self.body.source_info(location);
self.append_binding_error(
grouped_errors,
kind,
// Inspect the type of the content behind the
// borrow to provide feedback about why this
// was a move rather than a copy.
- let ty = deref_target_place.ty(self.mir, self.infcx.tcx).ty;
+ let ty = deref_target_place.ty(self.body, self.infcx.tcx).ty;
let upvar_field = self.prefixes(&move_place, PrefixSet::All)
.find_map(|p| self.is_upvar_field_projection(p));
};
if let Place::Base(PlaceBase::Local(local)) = *deref_base {
- let decl = &self.mir.local_decls[local];
+ let decl = &self.body.local_decls[local];
if decl.is_ref_for_guard() {
let mut err = self.infcx.tcx.cannot_move_out_of(
span,
};
let move_ty = format!(
"{:?}",
- move_place.ty(self.mir, self.infcx.tcx).ty,
+ move_place.ty(self.body, self.infcx.tcx).ty,
);
let snippet = self.infcx.tcx.sess.source_map().span_to_snippet(span).unwrap();
let is_option = move_ty.starts_with("std::option::Option");
move_from,
..
} => {
- let try_remove_deref = match move_from {
- Place::Projection(box Projection {
- elem: ProjectionElem::Deref,
- ..
- }) => true,
- _ => false,
- };
- if try_remove_deref && snippet.starts_with('*') {
- // The snippet doesn't start with `*` in (e.g.) index
- // expressions `a[b]`, which roughly desugar to
- // `*Index::index(&a, b)` or
- // `*IndexMut::index_mut(&mut a, b)`.
- err.span_suggestion(
- span,
- "consider removing the `*`",
- snippet[1..].to_owned(),
- Applicability::Unspecified,
- );
- } else {
- err.span_suggestion(
- span,
- "consider borrowing here",
- format!("&{}", snippet),
- Applicability::Unspecified,
- );
- }
+ err.span_suggestion(
+ span,
+ "consider borrowing here",
+ format!("&{}", snippet),
+ Applicability::Unspecified,
+ );
if binds_to.is_empty() {
- let place_ty = move_from.ty(self.mir, self.infcx.tcx).ty;
+ let place_ty = move_from.ty(self.body, self.infcx.tcx).ty;
let place_desc = match self.describe_place(&move_from) {
Some(desc) => format!("`{}`", desc),
None => format!("value"),
// No binding. Nothing to suggest.
GroupedMoveError::OtherIllegalMove { ref original_path, use_spans, .. } => {
let span = use_spans.var_or_use();
- let place_ty = original_path.ty(self.mir, self.infcx.tcx).ty;
+ let place_ty = original_path.ty(self.body, self.infcx.tcx).ty;
let place_desc = match self.describe_place(original_path) {
Some(desc) => format!("`{}`", desc),
None => format!("value"),
) {
let mut suggestions: Vec<(Span, &str, String)> = Vec::new();
for local in binds_to {
- let bind_to = &self.mir.local_decls[*local];
+ let bind_to = &self.body.local_decls[*local];
if let Some(
ClearCrossCrate::Set(BindingForm::Var(VarBindingForm {
pat_span,
) {
let mut noncopy_var_spans = Vec::new();
for (j, local) in binds_to.into_iter().enumerate() {
- let bind_to = &self.mir.local_decls[*local];
+ let bind_to = &self.body.local_decls[*local];
let binding_span = bind_to.source_info.span;
if j == 0 {
_ => continue,
};
- let bbd = &self.mir[loc.block];
+ let bbd = &self.body[loc.block];
let is_terminator = bbd.statements.len() == loc.statement_index;
debug!(
"borrowed_content_source: loc={:?} is_terminator={:?}",
..
}) = bbd.terminator {
if let Some(source)
- = BorrowedContentSource::from_call(func.ty(self.mir, tcx), tcx)
+ = BorrowedContentSource::from_call(func.ty(self.body, tcx), tcx)
{
return source;
}
// If we didn't find an overloaded deref or index, then assume it's a
// built in deref and check the type of the base.
- let base_ty = deref_base.ty(self.mir, tcx).ty;
+ let base_ty = deref_base.ty(self.body, tcx).ty;
if base_ty.is_unsafe_ptr() {
BorrowedContentSource::DerefRawPointer
} else if base_ty.is_mutable_pointer() {
if let Place::Base(PlaceBase::Local(_)) = access_place {
reason = ", as it is not declared as mutable".to_string();
} else {
- let name = self.mir.local_decls[*local]
+ let name = self.body.local_decls[*local]
.name
.expect("immutable unnamed local");
reason = format!(", as `{}` is not declared as mutable", name);
elem: ProjectionElem::Field(upvar_index, _),
}) => {
debug_assert!(is_closure_or_generator(
- base.ty(self.mir, self.infcx.tcx).ty
+ base.ty(self.body, self.infcx.tcx).ty
));
item_msg = format!("`{}`", access_place_desc.unwrap());
if *base == Place::Base(PlaceBase::Local(Local::new(1))) &&
!self.upvars.is_empty() {
item_msg = format!("`{}`", access_place_desc.unwrap());
- debug_assert!(self.mir.local_decls[Local::new(1)].ty.is_region_ptr());
+ debug_assert!(self.body.local_decls[Local::new(1)].ty.is_region_ptr());
debug_assert!(is_closure_or_generator(
- the_place_err.ty(self.mir, self.infcx.tcx).ty
+ the_place_err.ty(self.body, self.infcx.tcx).ty
));
reason = if self.is_upvar_field_projection(access_place).is_some() {
}
} else if {
if let Place::Base(PlaceBase::Local(local)) = *base {
- self.mir.local_decls[local].is_ref_for_guard()
+ self.body.local_decls[local].is_ref_for_guard()
} else {
false
}
reason = ", as it is immutable for the pattern guard".to_string();
} else {
let pointer_type =
- if base.ty(self.mir, self.infcx.tcx).ty.is_region_ptr() {
+ if base.ty(self.body, self.infcx.tcx).ty.is_region_ptr() {
"`&` reference"
} else {
"`*const` pointer"
if let Some((span, message)) = annotate_struct_field(
self.infcx.tcx,
- base.ty(self.mir, self.infcx.tcx).ty,
+ base.ty(self.body, self.infcx.tcx).ty,
field,
) {
err.span_suggestion(
// Suggest removing a `&mut` from the use of a mutable reference.
Place::Base(PlaceBase::Local(local))
if {
- self.mir.local_decls.get(*local).map(|local_decl| {
+ self.body.local_decls.get(*local).map(|local_decl| {
if let ClearCrossCrate::Set(
mir::BindingForm::ImplicitSelf(kind)
) = local_decl.is_user_variable.as_ref().unwrap() {
// We want to suggest users use `let mut` for local (user
// variable) mutations...
Place::Base(PlaceBase::Local(local))
- if self.mir.local_decls[*local].can_be_made_mutable() => {
+ if self.body.local_decls[*local].can_be_made_mutable() => {
// ... but it doesn't make sense to suggest it on
// variables that are `ref x`, `ref mut x`, `&self`,
// or `&mut self` (such variables are simply not
// mutable).
- let local_decl = &self.mir.local_decls[*local];
+ let local_decl = &self.body.local_decls[*local];
assert_eq!(local_decl.mutability, Mutability::Not);
err.span_label(span, format!("cannot {ACT}", ACT = act));
elem: ProjectionElem::Field(upvar_index, _),
}) => {
debug_assert!(is_closure_or_generator(
- base.ty(self.mir, self.infcx.tcx).ty
+ base.ty(self.body, self.infcx.tcx).ty
));
err.span_label(span, format!("cannot {ACT}", ACT = act));
elem: ProjectionElem::Deref,
}) if {
if let Some(ClearCrossCrate::Set(BindingForm::RefForGuard)) =
- self.mir.local_decls[*local].is_user_variable
+ self.body.local_decls[*local].is_user_variable
{
true
} else {
Place::Projection(box Projection {
base: Place::Base(PlaceBase::Local(local)),
elem: ProjectionElem::Deref,
- }) if self.mir.local_decls[*local].is_user_variable.is_some() =>
+ }) if self.body.local_decls[*local].is_user_variable.is_some() =>
{
- let local_decl = &self.mir.local_decls[*local];
+ let local_decl = &self.body.local_decls[*local];
let suggestion = match local_decl.is_user_variable.as_ref().unwrap() {
ClearCrossCrate::Set(mir::BindingForm::ImplicitSelf(_)) => {
Some(suggest_ampmut_self(self.infcx.tcx, local_decl))
..
})) => Some(suggest_ampmut(
self.infcx.tcx,
- self.mir,
+ self.body,
*local,
local_decl,
*opt_ty_info,
{
err.span_label(span, format!("cannot {ACT}", ACT = act));
err.span_help(
- self.mir.span,
+ self.body.span,
"consider changing this to accept closures that implement `FnMut`"
);
}
},
..
}
- ) = &self.mir.basic_blocks()[location.block].terminator {
+ ) = &self.body.basic_blocks()[location.block].terminator {
let index_trait = self.infcx.tcx.lang_items().index_trait();
if self.infcx.tcx.parent(id) == index_trait {
let mut found = false;
// by trying (3.), then (2.) and finally falling back on (1.).
fn suggest_ampmut<'cx, 'gcx, 'tcx>(
tcx: TyCtxt<'cx, 'gcx, 'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
local: Local,
local_decl: &mir::LocalDecl<'tcx>,
opt_ty_info: Option<Span>,
) -> (Span, String) {
- let locations = mir.find_assignments(local);
+ let locations = body.find_assignments(local);
if !locations.is_empty() {
- let assignment_rhs_span = mir.source_info(locations[0]).span;
+ let assignment_rhs_span = body.source_info(locations[0]).span;
if let Ok(src) = tcx.sess.source_map().span_to_snippet(assignment_rhs_span) {
if let (true, Some(ws_pos)) = (
src.starts_with("&'"),
liveness_constraints: &mut LivenessValues<RegionVid>,
all_facts: &mut Option<AllFacts>,
location_table: &LocationTable,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
borrow_set: &BorrowSet<'tcx>,
) {
let mut cg = ConstraintGeneration {
all_facts,
};
- for (bb, data) in mir.basic_blocks().iter_enumerated() {
+ for (bb, data) in body.basic_blocks().iter_enumerated() {
cg.visit_basic_block_data(bb, data);
}
}
use rustc_data_structures::fx::FxHashSet;
crate fn find<'tcx>(
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
regioncx: &Rc<RegionInferenceContext<'tcx>>,
tcx: TyCtxt<'_, '_, 'tcx>,
region_vid: RegionVid,
start_point: Location,
) -> Option<Cause> {
let mut uf = UseFinder {
- mir,
+ body,
regioncx,
tcx,
region_vid,
}
struct UseFinder<'cx, 'gcx: 'tcx, 'tcx: 'cx> {
- mir: &'cx Body<'tcx>,
+ body: &'cx Body<'tcx>,
regioncx: &'cx Rc<RegionInferenceContext<'tcx>>,
tcx: TyCtxt<'cx, 'gcx, 'tcx>,
region_vid: RegionVid,
continue;
}
- let block_data = &self.mir[p.block];
+ let block_data = &self.body[p.block];
match self.def_use(p, block_data.visitable(p.statement_index)) {
Some(DefUseResult::Def) => {}
fn def_use(&self, location: Location, thing: &dyn MirVisitable<'tcx>) -> Option<DefUseResult> {
let mut visitor = DefUseVisitor {
- mir: self.mir,
+ body: self.body,
tcx: self.tcx,
region_vid: self.region_vid,
def_use_result: None,
}
struct DefUseVisitor<'cx, 'gcx: 'tcx, 'tcx: 'cx> {
- mir: &'cx Body<'tcx>,
+ body: &'cx Body<'tcx>,
tcx: TyCtxt<'cx, 'gcx, 'tcx>,
region_vid: RegionVid,
def_use_result: Option<DefUseResult>,
impl<'cx, 'gcx, 'tcx> Visitor<'tcx> for DefUseVisitor<'cx, 'gcx, 'tcx> {
fn visit_local(&mut self, &local: &Local, context: PlaceContext, _: Location) {
- let local_ty = self.mir.local_decls[local].ty;
+ let local_ty = self.body.local_decls[local].ty;
let mut found_it = false;
self.tcx.for_each_free_region(&local_ty, |r| {
pub(in crate::borrow_check) fn add_explanation_to_diagnostic<'cx, 'gcx, 'tcx>(
&self,
tcx: TyCtxt<'cx, 'gcx, 'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
err: &mut DiagnosticBuilder<'_>,
borrow_desc: &str,
borrow_span: Option<Span>,
dropped_local,
should_note_order,
} => {
- let local_decl = &mir.local_decls[dropped_local];
+ let local_decl = &body.local_decls[dropped_local];
let (dtor_desc, type_desc) = match local_decl.ty.sty {
// If type is an ADT that implements Drop, then
// simplify output by reporting just the ADT name.
TYPE = type_desc,
DTOR = dtor_desc
);
- err.span_label(mir.source_info(drop_loc).span, message);
+ err.span_label(body.source_info(drop_loc).span, message);
if should_note_order {
err.note(
TYPE = type_desc,
DTOR = dtor_desc
);
- err.span_label(mir.source_info(drop_loc).span, message);
+ err.span_label(body.source_info(drop_loc).span, message);
if let Some(info) = &local_decl.is_block_tail {
// FIXME: use span_suggestion instead, highlighting the
);
let regioncx = &self.nonlexical_regioncx;
- let mir = self.mir;
+ let body = self.body;
let tcx = self.infcx.tcx;
let borrow_region_vid = borrow.region;
region_sub
);
- match find_use::find(mir, regioncx, tcx, region_sub, location) {
+ match find_use::find(body, regioncx, tcx, region_sub, location) {
Some(Cause::LiveVar(local, location)) => {
- let span = mir.source_info(location).span;
+ let span = body.source_info(location).span;
let spans = self
.move_spans(&Place::Base(PlaceBase::Local(local)), location)
.or_else(|| self.borrow_spans(span, location));
Some(Cause::DropVar(local, location)) => {
let mut should_note_order = false;
- if mir.local_decls[local].name.is_some() {
+ if body.local_decls[local].name.is_some() {
if let Some((WriteKind::StorageDeadOrDrop, place)) = kind_place {
if let Place::Base(PlaceBase::Local(borrowed_local)) = place {
- if mir.local_decls[*borrowed_local].name.is_some()
+ if body.local_decls[*borrowed_local].name.is_some()
&& local != *borrowed_local
{
should_note_order = true;
if let Some(region) = regioncx.to_error_region_vid(borrow_region_vid) {
let (category, from_closure, span, region_name) =
self.nonlexical_regioncx.free_region_constraint_info(
- self.mir,
+ self.body,
&self.upvars,
self.mir_def_id,
self.infcx,
return outmost_back_edge;
}
- let block = &self.mir.basic_blocks()[location.block];
+ let block = &self.body.basic_blocks()[location.block];
if location.statement_index < block.statements.len() {
let successor = location.successor_within_block();
}
if loop_head.dominates(from, &self.dominators) {
- let block = &self.mir.basic_blocks()[from.block];
+ let block = &self.body.basic_blocks()[from.block];
if from.statement_index < block.statements.len() {
let successor = from.successor_within_block();
/// True if an edge `source -> target` is a backedge -- in other words, if the target
/// dominates the source.
fn is_back_edge(&self, source: Location, target: Location) -> bool {
- target.dominates(source, &self.mir.dominators())
+ target.dominates(source, &self.body.dominators())
}
/// Determine how the borrow was later used.
(LaterUseKind::ClosureCapture, var_span)
}
UseSpans::OtherUse(span) => {
- let block = &self.mir.basic_blocks()[location.block];
+ let block = &self.body.basic_blocks()[location.block];
let kind = if let Some(&Statement {
kind: StatementKind::FakeRead(FakeReadCause::ForLet, _),
Operand::Constant(c) => c.span,
Operand::Copy(Place::Base(PlaceBase::Local(l))) |
Operand::Move(Place::Base(PlaceBase::Local(l))) => {
- let local_decl = &self.mir.local_decls[*l];
+ let local_decl = &self.body.local_decls[*l];
if local_decl.name.is_none() {
local_decl.source_info.span
} else {
fn was_captured_by_trait_object(&self, borrow: &BorrowData<'tcx>) -> bool {
// Start at the reserve location, find the place that we want to see cast to a trait object.
let location = borrow.reserve_location;
- let block = &self.mir[location.block];
+ let block = &self.body[location.block];
let stmt = block.statements.get(location.statement_index);
debug!(
"was_captured_by_trait_object: location={:?} stmt={:?}",
);
while let Some(current_location) = queue.pop() {
debug!("was_captured_by_trait: target={:?}", target);
- let block = &self.mir[current_location.block];
+ let block = &self.body[current_location.block];
// We need to check the current location to find out if it is a terminator.
let is_terminator = current_location.statement_index == block.statements.len();
if !is_terminator {
tcx: TyCtxt<'cx, 'gcx, 'tcx>,
all_facts: &mut Option<AllFacts>,
location_table: &LocationTable,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
borrow_set: &BorrowSet<'tcx>,
) {
if all_facts.is_none() {
}
if let Some(all_facts) = all_facts {
- let dominators = mir.dominators();
+ let dominators = body.dominators();
let mut ig = InvalidationGenerator {
all_facts,
borrow_set,
tcx,
location_table,
- mir,
+ body,
dominators,
};
- ig.visit_body(mir);
+ ig.visit_body(body);
}
}
tcx: TyCtxt<'cx, 'gcx, 'tcx>,
all_facts: &'cx mut AllFacts,
location_table: &'cx LocationTable,
- mir: &'cx Body<'tcx>,
+ body: &'cx Body<'tcx>,
dominators: Dominators<BasicBlock>,
borrow_set: &'cx BorrowSet<'tcx>,
}
}
}
-impl<'cg, 'cx, 'tcx, 'gcx> InvalidationGenerator<'cx, 'tcx, 'gcx> {
+impl<'cx, 'tcx, 'gcx> InvalidationGenerator<'cx, 'tcx, 'gcx> {
/// Simulates mutation of a place.
fn mutate_place(
&mut self,
rw,
);
let tcx = self.tcx;
- let mir = self.mir;
+ let body = self.body;
let borrow_set = self.borrow_set.clone();
let indices = self.borrow_set.borrows.indices();
each_borrow_involving_path(
self,
tcx,
- mir,
+ body,
location,
(sd, place),
&borrow_set.clone(),
infcx: &InferCtxt<'cx, 'gcx, 'tcx>,
def_id: DefId,
param_env: ty::ParamEnv<'tcx>,
- mir: &mut Body<'tcx>,
+ body: &mut Body<'tcx>,
) -> UniversalRegions<'tcx> {
debug!("replace_regions_in_mir(def_id={:?})", def_id);
let universal_regions = UniversalRegions::new(infcx, def_id, param_env);
// Replace all remaining regions with fresh inference variables.
- renumber::renumber_mir(infcx, mir);
+ renumber::renumber_mir(infcx, body);
let source = MirSource::item(def_id);
- mir_util::dump_mir(infcx.tcx, None, "renumber", &0, source, mir, |_, _| Ok(()));
+ mir_util::dump_mir(infcx.tcx, None, "renumber", &0, source, body, |_, _| Ok(()));
universal_regions
}
infcx: &InferCtxt<'cx, 'gcx, 'tcx>,
def_id: DefId,
universal_regions: UniversalRegions<'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
upvars: &[Upvar],
location_table: &LocationTable,
param_env: ty::ParamEnv<'gcx>,
let universal_regions = Rc::new(universal_regions);
- let elements = &Rc::new(RegionValueElements::new(mir));
+ let elements = &Rc::new(RegionValueElements::new(body));
// Run the MIR type-checker.
let MirTypeckResults {
} = type_check::type_check(
infcx,
param_env,
- mir,
+ body,
def_id,
&universal_regions,
location_table,
&mut liveness_constraints,
&mut all_facts,
location_table,
- &mir,
+ &body,
borrow_set,
);
universal_regions,
placeholder_indices,
universal_region_relations,
- mir,
+ body,
outlives_constraints,
closure_bounds_mapping,
type_tests,
infcx.tcx,
&mut all_facts,
location_table,
- &mir,
+ &body,
borrow_set,
);
// Solve the region constraints.
let closure_region_requirements =
- regioncx.solve(infcx, &mir, upvars, def_id, errors_buffer);
+ regioncx.solve(infcx, &body, upvars, def_id, errors_buffer);
// Dump MIR results into a file, if that is enabled. This let us
// write unit-tests, as well as helping with debugging.
dump_mir_results(
infcx,
MirSource::item(def_id),
- &mir,
+ &body,
®ioncx,
&closure_region_requirements,
);
// We also have a `#[rustc_nll]` annotation that causes us to dump
// information
- dump_annotation(infcx, &mir, def_id, ®ioncx, &closure_region_requirements, errors_buffer);
+ dump_annotation(infcx, &body, def_id, ®ioncx, &closure_region_requirements, errors_buffer);
(regioncx, polonius_output, closure_region_requirements)
}
fn dump_mir_results<'a, 'gcx, 'tcx>(
infcx: &InferCtxt<'a, 'gcx, 'tcx>,
source: MirSource<'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
regioncx: &RegionInferenceContext<'_>,
closure_region_requirements: &Option<ClosureRegionRequirements<'_>>,
) {
"nll",
&0,
source,
- mir,
+ body,
|pass_where, out| {
match pass_where {
// Before the CFG, dump out the values for each region variable.
fn dump_annotation<'a, 'gcx, 'tcx>(
infcx: &InferCtxt<'a, 'gcx, 'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
mir_def_id: DefId,
regioncx: &RegionInferenceContext<'tcx>,
closure_region_requirements: &Option<ClosureRegionRequirements<'_>>,
let mut err = tcx
.sess
.diagnostic()
- .span_note_diag(mir.span, "External requirements");
+ .span_note_diag(body.span, "External requirements");
regioncx.annotate(tcx, &mut err);
let mut err = tcx
.sess
.diagnostic()
- .span_note_diag(mir.span, "No external requirements");
+ .span_note_diag(body.span, "No external requirements");
regioncx.annotate(tcx, &mut err);
err.buffer(errors_buffer);
/// path to blame.
fn best_blame_constraint(
&self,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
from_region: RegionVid,
target_test: impl Fn(RegionVid) -> bool,
) -> (ConstraintCategory, bool, Span) {
let mut categorized_path: Vec<(ConstraintCategory, bool, Span)> = path.iter()
.map(|constraint| {
if constraint.category == ConstraintCategory::ClosureBounds {
- self.retrieve_closure_constraint_info(mir, &constraint)
+ self.retrieve_closure_constraint_info(body, &constraint)
} else {
- (constraint.category, false, constraint.locations.span(mir))
+ (constraint.category, false, constraint.locations.span(body))
}
})
.collect();
/// Here we would be invoked with `fr = 'a` and `outlived_fr = `'b`.
pub(super) fn report_error(
&self,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
upvars: &[Upvar],
infcx: &InferCtxt<'_, '_, 'tcx>,
mir_def_id: DefId,
) {
debug!("report_error(fr={:?}, outlived_fr={:?})", fr, outlived_fr);
- let (category, _, span) = self.best_blame_constraint(mir, fr, |r| {
+ let (category, _, span) = self.best_blame_constraint(body, fr, |r| {
self.provides_universal_region(r, fr, outlived_fr)
});
match (category, fr_is_local, outlived_fr_is_local) {
(ConstraintCategory::Return, true, false) if self.is_closure_fn_mut(infcx, fr) => {
self.report_fnmut_error(
- mir,
+ body,
upvars,
infcx,
mir_def_id,
}
(ConstraintCategory::Assignment, true, false)
| (ConstraintCategory::CallArgument, true, false) => self.report_escaping_data_error(
- mir,
+ body,
upvars,
infcx,
mir_def_id,
errors_buffer,
),
_ => self.report_general_error(
- mir,
+ body,
upvars,
infcx,
mir_def_id,
/// ```
fn report_fnmut_error(
&self,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
upvars: &[Upvar],
infcx: &InferCtxt<'_, '_, 'tcx>,
mir_def_id: DefId,
diag.span_label(span, message);
- match self.give_region_a_name(infcx, mir, upvars, mir_def_id, outlived_fr, &mut 1)
+ match self.give_region_a_name(infcx, body, upvars, mir_def_id, outlived_fr, &mut 1)
.unwrap().source
{
RegionNameSource::NamedEarlyBoundRegion(fr_span)
/// ```
fn report_escaping_data_error(
&self,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
upvars: &[Upvar],
infcx: &InferCtxt<'_, '_, 'tcx>,
mir_def_id: DefId,
errors_buffer: &mut Vec<Diagnostic>,
) {
let fr_name_and_span =
- self.get_var_name_and_span_for_region(infcx.tcx, mir, upvars, fr);
+ self.get_var_name_and_span_for_region(infcx.tcx, body, upvars, fr);
let outlived_fr_name_and_span =
- self.get_var_name_and_span_for_region(infcx.tcx, mir, upvars, outlived_fr);
+ self.get_var_name_and_span_for_region(infcx.tcx, body, upvars, outlived_fr);
let escapes_from = match self.universal_regions.defining_ty {
DefiningTy::Closure(..) => "closure",
|| escapes_from == "const"
{
return self.report_general_error(
- mir,
+ body,
upvars,
infcx,
mir_def_id,
/// ```
fn report_general_error(
&self,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
upvars: &[Upvar],
infcx: &InferCtxt<'_, '_, 'tcx>,
mir_def_id: DefId,
);
let counter = &mut 1;
- let fr_name = self.give_region_a_name(infcx, mir, upvars, mir_def_id, fr, counter).unwrap();
+ let fr_name = self.give_region_a_name(
+ infcx, body, upvars, mir_def_id, fr, counter).unwrap();
fr_name.highlight_region_name(&mut diag);
let outlived_fr_name =
- self.give_region_a_name(infcx, mir, upvars, mir_def_id, outlived_fr, counter).unwrap();
+ self.give_region_a_name(infcx, body, upvars, mir_def_id, outlived_fr, counter).unwrap();
outlived_fr_name.highlight_region_name(&mut diag);
let mir_def_name = if infcx.tcx.is_closure(mir_def_id) {
crate fn free_region_constraint_info(
&self,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
upvars: &[Upvar],
mir_def_id: DefId,
infcx: &InferCtxt<'_, '_, 'tcx>,
outlived_region: RegionVid,
) -> (ConstraintCategory, bool, Span, Option<RegionName>) {
let (category, from_closure, span) = self.best_blame_constraint(
- mir,
+ body,
borrow_region,
|r| self.provides_universal_region(r, borrow_region, outlived_region)
);
let outlived_fr_name =
- self.give_region_a_name(infcx, mir, upvars, mir_def_id, outlived_region, &mut 1);
+ self.give_region_a_name(infcx, body, upvars, mir_def_id, outlived_region, &mut 1);
(category, from_closure, span, outlived_fr_name)
}
// Finds a good span to blame for the fact that `fr1` outlives `fr2`.
crate fn find_outlives_blame_span(
&self,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
fr1: RegionVid,
fr2: RegionVid,
) -> (ConstraintCategory, Span) {
let (category, _, span) =
- self.best_blame_constraint(mir, fr1, |r| self.provides_universal_region(r, fr1, fr2));
+ self.best_blame_constraint(body, fr1, |r| self.provides_universal_region(r, fr1, fr2));
(category, span)
}
fn retrieve_closure_constraint_info(
&self,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
constraint: &OutlivesConstraint,
) -> (ConstraintCategory, bool, Span) {
let loc = match constraint.locations {
self.closure_bounds_mapping[&loc].get(&(constraint.sup, constraint.sub));
opt_span_category
.map(|&(category, span)| (category, true, span))
- .unwrap_or((constraint.category, false, mir.source_info(loc).span))
+ .unwrap_or((constraint.category, false, body.source_info(loc).span))
}
/// Returns `true` if a closure is inferred to be an `FnMut` closure.
crate fn give_region_a_name(
&self,
infcx: &InferCtxt<'_, '_, 'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
upvars: &[Upvar],
mir_def_id: DefId,
fr: RegionVid,
let value = self.give_name_from_error_region(infcx.tcx, mir_def_id, fr, counter)
.or_else(|| {
self.give_name_if_anonymous_region_appears_in_arguments(
- infcx, mir, mir_def_id, fr, counter,
+ infcx, body, mir_def_id, fr, counter,
)
})
.or_else(|| {
})
.or_else(|| {
self.give_name_if_anonymous_region_appears_in_output(
- infcx, mir, mir_def_id, fr, counter,
+ infcx, body, mir_def_id, fr, counter,
)
})
.or_else(|| {
self.give_name_if_anonymous_region_appears_in_yield_ty(
- infcx, mir, mir_def_id, fr, counter,
+ infcx, body, mir_def_id, fr, counter,
)
});
fn give_name_if_anonymous_region_appears_in_arguments(
&self,
infcx: &InferCtxt<'_, '_, 'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
mir_def_id: DefId,
fr: RegionVid,
counter: &mut usize,
self.universal_regions.unnormalized_input_tys[implicit_inputs + argument_index];
if let Some(region_name) = self.give_name_if_we_can_match_hir_ty_from_argument(
infcx,
- mir,
+ body,
mir_def_id,
fr,
arg_ty,
return Some(region_name);
}
- self.give_name_if_we_cannot_match_hir_ty(infcx, mir, fr, arg_ty, counter)
+ self.give_name_if_we_cannot_match_hir_ty(infcx, body, fr, arg_ty, counter)
}
fn give_name_if_we_can_match_hir_ty_from_argument(
&self,
infcx: &InferCtxt<'_, '_, 'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
mir_def_id: DefId,
needle_fr: RegionVid,
argument_ty: Ty<'tcx>,
// must highlight the variable.
hir::TyKind::Infer => self.give_name_if_we_cannot_match_hir_ty(
infcx,
- mir,
+ body,
needle_fr,
argument_ty,
counter,
fn give_name_if_we_cannot_match_hir_ty(
&self,
infcx: &InferCtxt<'_, '_, 'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
needle_fr: RegionVid,
argument_ty: Ty<'tcx>,
counter: &mut usize,
let assigned_region_name = if type_name.find(&format!("'{}", counter)).is_some() {
// Only add a label if we can confirm that a region was labelled.
let argument_index = self.get_argument_index_for_region(infcx.tcx, needle_fr)?;
- let (_, span) = self.get_argument_name_and_span_for_region(mir, argument_index);
+ let (_, span) = self.get_argument_name_and_span_for_region(body, argument_index);
Some(RegionName {
// This counter value will already have been used, so this function will increment
fn give_name_if_anonymous_region_appears_in_output(
&self,
infcx: &InferCtxt<'_, '_, 'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
mir_def_id: DefId,
fr: RegionVid,
counter: &mut usize,
node: hir::ImplItemKind::Method(method_sig, _),
..
}) => (method_sig.decl.output.span(), ""),
- _ => (mir.span, ""),
+ _ => (body.span, ""),
};
Some(RegionName {
fn give_name_if_anonymous_region_appears_in_yield_ty(
&self,
infcx: &InferCtxt<'_, '_, 'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
mir_def_id: DefId,
fr: RegionVid,
counter: &mut usize,
}) => (
tcx.sess.source_map().end_point(*span)
),
- _ => mir.span,
+ _ => body.span,
};
debug!(
crate fn get_var_name_and_span_for_region(
&self,
tcx: TyCtxt<'_, '_, 'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
upvars: &[Upvar],
fr: RegionVid,
) -> Option<(Option<Symbol>, Span)> {
.or_else(|| {
debug!("get_var_name_and_span_for_region: attempting argument");
self.get_argument_index_for_region(tcx, fr)
- .map(|index| self.get_argument_name_and_span_for_region(mir, index))
+ .map(|index| self.get_argument_name_and_span_for_region(body, index))
})
}
/// declared.
crate fn get_argument_name_and_span_for_region(
&self,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
argument_index: usize,
) -> (Option<Symbol>, Span) {
let implicit_inputs = self.universal_regions.defining_ty.implicit_inputs();
let argument_local = Local::new(implicit_inputs + argument_index + 1);
debug!("get_argument_name_and_span_for_region: argument_local={:?}", argument_local);
- let argument_name = mir.local_decls[argument_local].name;
- let argument_span = mir.local_decls[argument_local].source_info.span;
+ let argument_name = body.local_decls[argument_local].name;
+ let argument_span = body.local_decls[argument_local].source_info.span;
debug!("get_argument_name_and_span_for_region: argument_name={:?} argument_span={:?}",
argument_name, argument_span);
universal_regions: Rc<UniversalRegions<'tcx>>,
placeholder_indices: Rc<PlaceholderIndices>,
universal_region_relations: Rc<UniversalRegionRelations<'tcx>>,
- _mir: &Body<'tcx>,
+ _body: &Body<'tcx>,
outlives_constraints: ConstraintSet,
closure_bounds_mapping: FxHashMap<
Location,
pub(super) fn solve<'gcx>(
&mut self,
infcx: &InferCtxt<'_, 'gcx, 'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
upvars: &[Upvar],
mir_def_id: DefId,
errors_buffer: &mut Vec<Diagnostic>,
infcx.tcx.sess.time_extended(),
Some(infcx.tcx.sess),
&format!("solve_nll_region_constraints({:?})", mir_def_id),
- || self.solve_inner(infcx, mir, upvars, mir_def_id, errors_buffer),
+ || self.solve_inner(infcx, body, upvars, mir_def_id, errors_buffer),
)
}
fn solve_inner<'gcx>(
&mut self,
infcx: &InferCtxt<'_, 'gcx, 'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
upvars: &[Upvar],
mir_def_id: DefId,
errors_buffer: &mut Vec<Diagnostic>,
) -> Option<ClosureRegionRequirements<'gcx>> {
- self.propagate_constraints(mir);
+ self.propagate_constraints(body);
// If this is a closure, we can propagate unsatisfied
// `outlives_requirements` to our creator, so create a vector
self.check_type_tests(
infcx,
- mir,
+ body,
mir_def_id,
outlives_requirements.as_mut(),
errors_buffer,
self.check_universal_regions(
infcx,
- mir,
+ body,
upvars,
mir_def_id,
outlives_requirements.as_mut(),
/// for each region variable until all the constraints are
/// satisfied. Note that some values may grow **too** large to be
/// feasible, but we check this later.
- fn propagate_constraints(&mut self, _mir: &Body<'tcx>) {
+ fn propagate_constraints(&mut self, _body: &Body<'tcx>) {
debug!("propagate_constraints()");
debug!("propagate_constraints: constraints={:#?}", {
fn check_type_tests<'gcx>(
&self,
infcx: &InferCtxt<'_, 'gcx, 'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
mir_def_id: DefId,
mut propagated_outlives_requirements: Option<&mut Vec<ClosureOutlivesRequirement<'gcx>>>,
errors_buffer: &mut Vec<Diagnostic>,
let generic_ty = type_test.generic_kind.to_ty(tcx);
if self.eval_verify_bound(
tcx,
- mir,
+ body,
generic_ty,
type_test.lower_bound,
&type_test.verify_bound,
if let Some(propagated_outlives_requirements) = &mut propagated_outlives_requirements {
if self.try_promote_type_test(
infcx,
- mir,
+ body,
type_test,
propagated_outlives_requirements,
) {
let lower_bound_region = self.to_error_region(type_test.lower_bound);
// Skip duplicate-ish errors.
- let type_test_span = type_test.locations.span(mir);
+ let type_test_span = type_test.locations.span(body);
let erased_generic_kind = tcx.erase_regions(&type_test.generic_kind);
if !deduplicate_errors.insert((
erased_generic_kind,
fn try_promote_type_test<'gcx>(
&self,
infcx: &InferCtxt<'_, 'gcx, 'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
type_test: &TypeTest<'tcx>,
propagated_outlives_requirements: &mut Vec<ClosureOutlivesRequirement<'gcx>>,
) -> bool {
// where `ur` is a local bound -- we are sometimes in a
// position to prove things that our caller cannot. See
// #53570 for an example.
- if self.eval_verify_bound(tcx, mir, generic_ty, ur, &type_test.verify_bound) {
+ if self.eval_verify_bound(tcx, body, generic_ty, ur, &type_test.verify_bound) {
continue;
}
let requirement = ClosureOutlivesRequirement {
subject,
outlived_free_region: upper_bound,
- blame_span: locations.span(mir),
+ blame_span: locations.span(body),
category: ConstraintCategory::Boring,
};
debug!("try_promote_type_test: pushing {:#?}", requirement);
fn eval_verify_bound(
&self,
tcx: TyCtxt<'_, '_, 'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
generic_ty: Ty<'tcx>,
lower_bound: RegionVid,
verify_bound: &VerifyBound<'tcx>,
match verify_bound {
VerifyBound::IfEq(test_ty, verify_bound1) => {
- self.eval_if_eq(tcx, mir, generic_ty, lower_bound, test_ty, verify_bound1)
+ self.eval_if_eq(tcx, body, generic_ty, lower_bound, test_ty, verify_bound1)
}
VerifyBound::OutlivedBy(r) => {
let r_vid = self.to_region_vid(r);
- self.eval_outlives(mir, r_vid, lower_bound)
+ self.eval_outlives(body, r_vid, lower_bound)
}
VerifyBound::AnyBound(verify_bounds) => verify_bounds.iter().any(|verify_bound| {
- self.eval_verify_bound(tcx, mir, generic_ty, lower_bound, verify_bound)
+ self.eval_verify_bound(tcx, body, generic_ty, lower_bound, verify_bound)
}),
VerifyBound::AllBounds(verify_bounds) => verify_bounds.iter().all(|verify_bound| {
- self.eval_verify_bound(tcx, mir, generic_ty, lower_bound, verify_bound)
+ self.eval_verify_bound(tcx, body, generic_ty, lower_bound, verify_bound)
}),
}
}
fn eval_if_eq(
&self,
tcx: TyCtxt<'_, '_, 'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
generic_ty: Ty<'tcx>,
lower_bound: RegionVid,
test_ty: Ty<'tcx>,
let generic_ty_normalized = self.normalize_to_scc_representatives(tcx, generic_ty);
let test_ty_normalized = self.normalize_to_scc_representatives(tcx, test_ty);
if generic_ty_normalized == test_ty_normalized {
- self.eval_verify_bound(tcx, mir, generic_ty, lower_bound, verify_bound)
+ self.eval_verify_bound(tcx, body, generic_ty, lower_bound, verify_bound)
} else {
false
}
// Evaluate whether `sup_region: sub_region @ point`.
fn eval_outlives(
&self,
- _mir: &Body<'tcx>,
+ _body: &Body<'tcx>,
sup_region: RegionVid,
sub_region: RegionVid,
) -> bool {
fn check_universal_regions<'gcx>(
&self,
infcx: &InferCtxt<'_, 'gcx, 'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
upvars: &[Upvar],
mir_def_id: DefId,
mut propagated_outlives_requirements: Option<&mut Vec<ClosureOutlivesRequirement<'gcx>>>,
// for our caller into the `outlives_requirements` vector.
self.check_universal_region(
infcx,
- mir,
+ body,
upvars,
mir_def_id,
fr,
}
NLLRegionVariableOrigin::Placeholder(placeholder) => {
- self.check_bound_universal_region(infcx, mir, mir_def_id, fr, placeholder);
+ self.check_bound_universal_region(infcx, body, mir_def_id, fr, placeholder);
}
NLLRegionVariableOrigin::Existential => {
fn check_universal_region<'gcx>(
&self,
infcx: &InferCtxt<'_, 'gcx, 'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
upvars: &[Upvar],
mir_def_id: DefId,
longer_fr: RegionVid,
longer_fr,
representative,
infcx,
- mir,
+ body,
upvars,
mir_def_id,
propagated_outlives_requirements,
longer_fr,
shorter_fr,
infcx,
- mir,
+ body,
upvars,
mir_def_id,
propagated_outlives_requirements,
longer_fr: RegionVid,
shorter_fr: RegionVid,
infcx: &InferCtxt<'_, 'gcx, 'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
upvars: &[Upvar],
mir_def_id: DefId,
propagated_outlives_requirements: &mut Option<&mut Vec<ClosureOutlivesRequirement<'gcx>>>,
{
debug!("check_universal_region: fr_minus={:?}", fr_minus);
- let blame_span_category = self.find_outlives_blame_span(mir, longer_fr, shorter_fr);
+ let blame_span_category =
+ self.find_outlives_blame_span(body, longer_fr, shorter_fr);
// Grow `shorter_fr` until we find some non-local regions. (We
// always will.) We'll call them `shorter_fr+` -- they're ever
//
// Note: in this case, we use the unapproximated regions to report the
// error. This gives better error messages in some cases.
- self.report_error(mir, upvars, infcx, mir_def_id, longer_fr, shorter_fr, errors_buffer);
+ self.report_error(body, upvars, infcx, mir_def_id, longer_fr, shorter_fr, errors_buffer);
Some(ErrorReported)
}
fn check_bound_universal_region<'gcx>(
&self,
infcx: &InferCtxt<'_, 'gcx, 'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
_mir_def_id: DefId,
longer_fr: RegionVid,
placeholder: ty::PlaceholderRegion,
};
// Find the code to blame for the fact that `longer_fr` outlives `error_fr`.
- let (_, span) = self.find_outlives_blame_span(mir, longer_fr, error_region);
+ let (_, span) = self.find_outlives_blame_span(body, longer_fr, error_region);
// Obviously, this error message is far from satisfactory.
// At present, though, it only appears in unit tests --
}
impl RegionValueElements {
- crate fn new(mir: &Body<'_>) -> Self {
+ crate fn new(body: &Body<'_>) -> Self {
let mut num_points = 0;
- let statements_before_block: IndexVec<BasicBlock, usize> = mir.basic_blocks()
+ let statements_before_block: IndexVec<BasicBlock, usize> = body.basic_blocks()
.iter()
.map(|block_data| {
let v = num_points;
debug!("RegionValueElements: num_points={:#?}", num_points);
let mut basic_blocks = IndexVec::with_capacity(num_points);
- for (bb, bb_data) in mir.basic_blocks().iter_enumerated() {
+ for (bb, bb_data) in body.basic_blocks().iter_enumerated() {
basic_blocks.extend((0..=bb_data.statements.len()).map(|_| bb));
}
/// Pushes all predecessors of `index` onto `stack`.
crate fn push_predecessors(
&self,
- mir: &Body<'_>,
+ body: &Body<'_>,
index: PointIndex,
stack: &mut Vec<PointIndex>,
) {
// If this is a basic block head, then the predecessors are
// the terminators of other basic blocks
stack.extend(
- mir.predecessors_for(block)
+ body.predecessors_for(block)
.iter()
- .map(|&pred_bb| mir.terminator_loc(pred_bb))
+ .map(|&pred_bb| body.terminator_loc(pred_bb))
.map(|pred_loc| self.point_from_location(pred_loc)),
);
} else {
/// Replaces all free regions appearing in the MIR with fresh
/// inference variables, returning the number of variables created.
-pub fn renumber_mir<'tcx>(infcx: &InferCtxt<'_, '_, 'tcx>, mir: &mut Body<'tcx>) {
+pub fn renumber_mir<'tcx>(infcx: &InferCtxt<'_, '_, 'tcx>, body: &mut Body<'tcx>) {
debug!("renumber_mir()");
- debug!("renumber_mir: mir.arg_count={:?}", mir.arg_count);
+ debug!("renumber_mir: body.arg_count={:?}", body.arg_count);
let mut visitor = NLLVisitor { infcx };
- visitor.visit_body(mir);
+ visitor.visit_body(body);
}
/// Replaces all regions appearing in `value` with fresh inference
}
impl<'a, 'gcx, 'tcx> MutVisitor<'tcx> for NLLVisitor<'a, 'gcx, 'tcx> {
- fn visit_body(&mut self, mir: &mut Body<'tcx>) {
- for promoted in mir.promoted.iter_mut() {
+ fn visit_body(&mut self, body: &mut Body<'tcx>) {
+ for promoted in body.promoted.iter_mut() {
self.visit_body(promoted);
}
- self.super_body(mir);
+ self.super_body(body);
}
fn visit_ty(&mut self, ty: &mut Ty<'tcx>, ty_context: TyContext) {
impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> {
pub(super) fn equate_inputs_and_outputs(
&mut self,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
universal_regions: &UniversalRegions<'tcx>,
normalized_inputs_and_output: &[Ty<'tcx>],
) {
// user-provided signature (e.g., the `_` in the code
// above) with fresh variables.
let (poly_sig, _) = self.infcx.instantiate_canonical_with_fresh_inference_vars(
- mir.span,
+ body.span,
&user_provided_poly_sig,
);
Some(
self.infcx
.replace_bound_vars_with_fresh_vars(
- mir.span,
+ body.span,
LateBoundRegionConversionTime::FnCall,
&poly_sig,
)
normalized_input_ty
);
- let mir_input_ty = mir.local_decls[local].ty;
- let mir_input_span = mir.local_decls[local].source_info.span;
+ let mir_input_ty = body.local_decls[local].ty;
+ let mir_input_span = body.local_decls[local].source_info.span;
self.equate_normalized_input_or_output(
normalized_input_ty,
mir_input_ty,
// In MIR, closures begin an implicit `self`, so
// argument N is stored in local N+2.
let local = Local::new(argument_index + 2);
- let mir_input_ty = mir.local_decls[local].ty;
- let mir_input_span = mir.local_decls[local].source_info.span;
+ let mir_input_ty = body.local_decls[local].ty;
+ let mir_input_span = body.local_decls[local].source_info.span;
// If the user explicitly annotated the input types, enforce those.
let user_provided_input_ty =
}
assert!(
- mir.yield_ty.is_some() && universal_regions.yield_ty.is_some()
- || mir.yield_ty.is_none() && universal_regions.yield_ty.is_none()
+ body.yield_ty.is_some() && universal_regions.yield_ty.is_some()
+ || body.yield_ty.is_none() && universal_regions.yield_ty.is_none()
);
- if let Some(mir_yield_ty) = mir.yield_ty {
+ if let Some(mir_yield_ty) = body.yield_ty {
let ur_yield_ty = universal_regions.yield_ty.unwrap();
- let yield_span = mir.local_decls[RETURN_PLACE].source_info.span;
+ let yield_span = body.local_decls[RETURN_PLACE].source_info.span;
self.equate_normalized_input_or_output(ur_yield_ty, mir_yield_ty, yield_span);
}
// Return types are a bit more complex. They may contain existential `impl Trait`
// types.
- let mir_output_ty = mir.local_decls[RETURN_PLACE].ty;
- let output_span = mir.local_decls[RETURN_PLACE].source_info.span;
+ let mir_output_ty = body.local_decls[RETURN_PLACE].ty;
+ let output_span = body.local_decls[RETURN_PLACE].source_info.span;
if let Err(terr) = self.eq_opaque_type_and_type(
mir_output_ty,
normalized_output_ty,
crate fn build(
live_locals: &Vec<Local>,
elements: &RegionValueElements,
- mir: &Body<'_>,
+ body: &Body<'_>,
) -> Self {
- let nones = IndexVec::from_elem_n(None, mir.local_decls.len());
+ let nones = IndexVec::from_elem_n(None, body.local_decls.len());
let mut local_use_map = LocalUseMap {
first_def_at: nones.clone(),
first_use_at: nones.clone(),
};
let mut locals_with_use_data: IndexVec<Local, bool> =
- IndexVec::from_elem_n(false, mir.local_decls.len());
+ IndexVec::from_elem_n(false, body.local_decls.len());
live_locals
.iter()
.for_each(|&local| locals_with_use_data[local] = true);
elements,
locals_with_use_data,
}
- .visit_body(mir);
+ .visit_body(body);
local_use_map
}
/// performed before
pub(super) fn generate<'gcx, 'tcx>(
typeck: &mut TypeChecker<'_, 'gcx, 'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
elements: &Rc<RegionValueElements>,
flow_inits: &mut FlowAtLocation<'tcx, MaybeInitializedPlaces<'_, 'gcx, 'tcx>>,
move_data: &MoveData<'tcx>,
// of the `live_locals`.
// FIXME: Review "live" terminology past this point, we should
// not be naming the `Local`s as live.
- mir.local_decls.indices().collect()
+ body.local_decls.indices().collect()
} else {
let free_regions = {
regions_that_outlive_free_regions(
&typeck.borrowck_context.constraints.outlives_constraints,
)
};
- compute_live_locals(typeck.tcx(), &free_regions, mir)
+ compute_live_locals(typeck.tcx(), &free_regions, body)
};
if !live_locals.is_empty() {
trace::trace(
typeck,
- mir,
+ body,
elements,
flow_inits,
move_data,
fn compute_live_locals(
tcx: TyCtxt<'_, '_, 'tcx>,
free_regions: &FxHashSet<RegionVid>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
) -> Vec<Local> {
- let live_locals: Vec<Local> = mir
+ let live_locals: Vec<Local> = body
.local_decls
.iter_enumerated()
.filter_map(|(local, local_decl)| {
})
.collect();
- debug!("{} total variables", mir.local_decls.len());
+ debug!("{} total variables", body.local_decls.len());
debug!("{} variables need liveness", live_locals.len());
debug!("{} regions outlive free regions", free_regions.len());
/// this respects `#[may_dangle]` annotations).
pub(super) fn trace(
typeck: &mut TypeChecker<'_, 'gcx, 'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
elements: &Rc<RegionValueElements>,
flow_inits: &mut FlowAtLocation<'tcx, MaybeInitializedPlaces<'_, 'gcx, 'tcx>>,
move_data: &MoveData<'tcx>,
) {
debug!("trace()");
- let local_use_map = &LocalUseMap::build(&live_locals, elements, mir);
+ let local_use_map = &LocalUseMap::build(&live_locals, elements, body);
let cx = LivenessContext {
typeck,
- mir,
+ body,
flow_inits,
elements,
local_use_map,
elements: &'me RegionValueElements,
/// MIR we are analyzing.
- mir: &'me Body<'tcx>,
+ body: &'me Body<'tcx>,
/// Mapping to/from the various indices used for initialization tracking.
move_data: &'me MoveData<'tcx>,
self.compute_use_live_points_for(local);
self.compute_drop_live_points_for(local);
- let local_ty = self.cx.mir.local_decls[local].ty;
+ let local_ty = self.cx.body.local_decls[local].ty;
if !self.use_live_at.is_empty() {
self.cx.add_use_live_facts_for(local_ty, &self.use_live_at);
if self.use_live_at.insert(p) {
self.cx
.elements
- .push_predecessors(self.cx.mir, p, &mut self.stack)
+ .push_predecessors(self.cx.body, p, &mut self.stack)
}
}
}
// Find the drops where `local` is initialized.
for drop_point in self.cx.local_use_map.drops(local) {
let location = self.cx.elements.to_location(drop_point);
- debug_assert_eq!(self.cx.mir.terminator_loc(location.block), location,);
+ debug_assert_eq!(self.cx.body.terminator_loc(location.block), location,);
if self.cx.initialized_at_terminator(location.block, mpi) {
if self.drop_live_at.insert(drop_point) {
// live point.
let term_location = self.cx.elements.to_location(term_point);
debug_assert_eq!(
- self.cx.mir.terminator_loc(term_location.block),
+ self.cx.body.terminator_loc(term_location.block),
term_location,
);
let block = term_location.block;
}
}
- for &pred_block in self.cx.mir.predecessors_for(block).iter() {
+ for &pred_block in self.cx.body.predecessors_for(block).iter() {
debug!(
"compute_drop_live_points_for_block: pred_block = {:?}",
pred_block,
continue;
}
- let pred_term_loc = self.cx.mir.terminator_loc(pred_block);
+ let pred_term_loc = self.cx.body.terminator_loc(pred_block);
let pred_term_point = self.cx.elements.point_from_location(pred_term_loc);
// If the terminator of this predecessor either *assigns*
// the effects of all statements. This is the only way to get
// "just ahead" of a terminator.
self.flow_inits.reset_to_entry_of(block);
- for statement_index in 0..self.mir[block].statements.len() {
+ for statement_index in 0..self.body[block].statements.len() {
let location = Location {
block,
statement_index,
drop_data.dropck_result.report_overflows(
self.typeck.infcx.tcx,
- self.mir.source_info(*drop_locations.first().unwrap()).span,
+ self.body.source_info(*drop_locations.first().unwrap()).span,
dropped_ty,
);
pub(crate) fn type_check<'gcx, 'tcx>(
infcx: &InferCtxt<'_, 'gcx, 'tcx>,
param_env: ty::ParamEnv<'gcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
mir_def_id: DefId,
universal_regions: &Rc<UniversalRegions<'tcx>>,
location_table: &LocationTable,
infcx,
mir_def_id,
param_env,
- mir,
+ body,
®ion_bound_pairs,
implicit_region_bound,
&mut borrowck_context,
&universal_region_relations,
|mut cx| {
- cx.equate_inputs_and_outputs(mir, universal_regions, &normalized_inputs_and_output);
- liveness::generate(&mut cx, mir, elements, flow_inits, move_data, location_table);
+ cx.equate_inputs_and_outputs(body, universal_regions, &normalized_inputs_and_output);
+ liveness::generate(&mut cx, body, elements, flow_inits, move_data, location_table);
translate_outlives_facts(cx.borrowck_context);
},
infcx: &'a InferCtxt<'a, 'gcx, 'tcx>,
mir_def_id: DefId,
param_env: ty::ParamEnv<'gcx>,
- mir: &'a Body<'tcx>,
+ body: &'a Body<'tcx>,
region_bound_pairs: &'a RegionBoundPairs<'tcx>,
implicit_region_bound: ty::Region<'tcx>,
borrowck_context: &'a mut BorrowCheckContext<'a, 'tcx>,
) -> R where {
let mut checker = TypeChecker::new(
infcx,
- mir,
+ body,
mir_def_id,
param_env,
region_bound_pairs,
universal_region_relations,
);
let errors_reported = {
- let mut verifier = TypeVerifier::new(&mut checker, mir);
- verifier.visit_body(mir);
+ let mut verifier = TypeVerifier::new(&mut checker, body);
+ verifier.visit_body(body);
verifier.errors_reported
};
if !errors_reported {
// if verifier failed, don't do further checks to avoid ICEs
- checker.typeck_mir(mir);
+ checker.typeck_mir(body);
}
extra(&mut checker)
/// is a problem.
struct TypeVerifier<'a, 'b: 'a, 'gcx: 'tcx, 'tcx: 'b> {
cx: &'a mut TypeChecker<'b, 'gcx, 'tcx>,
- mir: &'b Body<'tcx>,
+ body: &'b Body<'tcx>,
last_span: Span,
mir_def_id: DefId,
errors_reported: bool,
fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) {
self.super_rvalue(rvalue, location);
- let rval_ty = rvalue.ty(self.mir, self.tcx());
+ let rval_ty = rvalue.ty(self.body, self.tcx());
self.sanitize_type(rvalue, rval_ty);
}
}
}
- fn visit_body(&mut self, mir: &Body<'tcx>) {
- self.sanitize_type(&"return type", mir.return_ty());
- for local_decl in &mir.local_decls {
+ fn visit_body(&mut self, body: &Body<'tcx>) {
+ self.sanitize_type(&"return type", body.return_ty());
+ for local_decl in &body.local_decls {
self.sanitize_type(local_decl, local_decl.ty);
}
if self.errors_reported {
return;
}
- self.super_body(mir);
+ self.super_body(body);
}
}
impl<'a, 'b, 'gcx, 'tcx> TypeVerifier<'a, 'b, 'gcx, 'tcx> {
- fn new(cx: &'a mut TypeChecker<'b, 'gcx, 'tcx>, mir: &'b Body<'tcx>) -> Self {
+ fn new(cx: &'a mut TypeChecker<'b, 'gcx, 'tcx>, body: &'b Body<'tcx>) -> Self {
TypeVerifier {
- mir,
+ body,
mir_def_id: cx.mir_def_id,
cx,
- last_span: mir.span,
+ last_span: body.span,
errors_reported: false,
}
}
place.iterate(|place_base, place_projection| {
let mut place_ty = match place_base {
PlaceBase::Local(index) =>
- PlaceTy::from_ty(self.mir.local_decls[*index].ty),
+ PlaceTy::from_ty(self.body.local_decls[*index].ty),
PlaceBase::Static(box Static { kind, ty: sty }) => {
let sty = self.sanitize_type(place, sty);
let check_err =
match kind {
StaticKind::Promoted(promoted) => {
if !self.errors_reported {
- let promoted_mir = &self.mir.promoted[*promoted];
- self.sanitize_promoted(promoted_mir, location);
+ let promoted_body = &self.body.promoted[*promoted];
+ self.sanitize_promoted(promoted_body, location);
- let promoted_ty = promoted_mir.return_ty();
+ let promoted_ty = promoted_body.return_ty();
check_err(self, place, promoted_ty, sty);
}
}
})
}
- fn sanitize_promoted(&mut self, promoted_mir: &'b Body<'tcx>, location: Location) {
+ fn sanitize_promoted(&mut self, promoted_body: &'b Body<'tcx>, location: Location) {
// Determine the constraints from the promoted MIR by running the type
// checker on the promoted MIR, then transfer the constraints back to
// the main MIR, changing the locations to the provided location.
- let parent_mir = mem::replace(&mut self.mir, promoted_mir);
+ let parent_body = mem::replace(&mut self.body, promoted_body);
let all_facts = &mut None;
let mut constraints = Default::default();
&mut closure_bounds
);
- self.visit_body(promoted_mir);
+ self.visit_body(promoted_body);
if !self.errors_reported {
// if verifier failed, don't do further checks to avoid ICEs
- self.cx.typeck_mir(promoted_mir);
+ self.cx.typeck_mir(promoted_body);
}
- self.mir = parent_mir;
+ self.body = parent_body;
// Merge the outlives constraints back in, at the given location.
mem::swap(self.cx.borrowck_context.all_facts, all_facts);
mem::swap(
)
}
ProjectionElem::Index(i) => {
- let index_ty = Place::Base(PlaceBase::Local(i)).ty(self.mir, tcx).ty;
+ let index_ty = Place::Base(PlaceBase::Local(i)).ty(self.body, tcx).ty;
if index_ty != tcx.types.usize {
PlaceTy::from_ty(
span_mirbug_and_err!(self, i, "index by non-usize {:?}", i),
}
/// Gets a span representing the location.
- pub fn span(&self, mir: &Body<'_>) -> Span {
+ pub fn span(&self, body: &Body<'_>) -> Span {
match self {
Locations::All(span) => *span,
- Locations::Single(l) => mir.source_info(*l).span,
+ Locations::Single(l) => body.source_info(*l).span,
}
}
}
impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> {
fn new(
infcx: &'a InferCtxt<'a, 'gcx, 'tcx>,
- mir: &'a Body<'tcx>,
+ body: &'a Body<'tcx>,
mir_def_id: DefId,
param_env: ty::ParamEnv<'gcx>,
region_bound_pairs: &'a RegionBoundPairs<'tcx>,
infcx,
last_span: DUMMY_SP,
mir_def_id,
- user_type_annotations: &mir.user_type_annotations,
+ user_type_annotations: &body.user_type_annotations,
param_env,
region_bound_pairs,
implicit_region_bound,
self.infcx.tcx
}
- fn check_stmt(&mut self, mir: &Body<'tcx>, stmt: &Statement<'tcx>, location: Location) {
+ fn check_stmt(&mut self, body: &Body<'tcx>, stmt: &Statement<'tcx>, location: Location) {
debug!("check_stmt: {:?}", stmt);
let tcx = self.tcx();
match stmt.kind {
ConstraintCategory::Return
},
Place::Base(PlaceBase::Local(l))
- if !mir.local_decls[l].is_user_variable.is_some() => {
+ if !body.local_decls[l].is_user_variable.is_some() => {
ConstraintCategory::Boring
}
_ => ConstraintCategory::Assignment,
};
- let place_ty = place.ty(mir, tcx).ty;
- let rv_ty = rv.ty(mir, tcx);
+ let place_ty = place.ty(body, tcx).ty;
+ let rv_ty = rv.ty(body, tcx);
if let Err(terr) =
self.sub_types_or_anon(rv_ty, place_ty, location.to_locations(), category)
{
}
}
- self.check_rvalue(mir, rv, location);
+ self.check_rvalue(body, rv, location);
if !self.tcx().features().unsized_locals {
let trait_ref = ty::TraitRef {
def_id: tcx.lang_items().sized_trait().unwrap(),
ref place,
variant_index,
} => {
- let place_type = place.ty(mir, tcx).ty;
+ let place_type = place.ty(body, tcx).ty;
let adt = match place_type.sty {
ty::Adt(adt, _) if adt.is_enum() => adt,
_ => {
};
}
StatementKind::AscribeUserType(ref place, variance, box ref projection) => {
- let place_ty = place.ty(mir, tcx).ty;
+ let place_ty = place.ty(body, tcx).ty;
if let Err(terr) = self.relate_type_and_user_type(
place_ty,
variance,
fn check_terminator(
&mut self,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
term: &Terminator<'tcx>,
term_location: Location,
) {
target: _,
unwind: _,
} => {
- let place_ty = location.ty(mir, tcx).ty;
- let rv_ty = value.ty(mir, tcx);
+ let place_ty = location.ty(body, tcx).ty;
+ let rv_ty = value.ty(body, tcx);
let locations = term_location.to_locations();
if let Err(terr) =
switch_ty,
..
} => {
- let discr_ty = discr.ty(mir, tcx);
+ let discr_ty = discr.ty(body, tcx);
if let Err(terr) = self.sub_types(
discr_ty,
switch_ty,
from_hir_call,
..
} => {
- let func_ty = func.ty(mir, tcx);
+ let func_ty = func.ty(body, tcx);
debug!("check_terminator: call, func_ty={:?}", func_ty);
let sig = match func_ty.sty {
ty::FnDef(..) | ty::FnPtr(_) => func_ty.fn_sig(tcx),
&sig,
);
let sig = self.normalize(sig, term_location);
- self.check_call_dest(mir, term, &sig, destination, term_location);
+ self.check_call_dest(body, term, &sig, destination, term_location);
self.prove_predicates(
sig.inputs_and_output.iter().map(|ty| ty::Predicate::WellFormed(ty)),
.add_element(region_vid, term_location);
}
- self.check_call_inputs(mir, term, &sig, args, term_location, from_hir_call);
+ self.check_call_inputs(body, term, &sig, args, term_location, from_hir_call);
}
TerminatorKind::Assert {
ref cond, ref msg, ..
} => {
- let cond_ty = cond.ty(mir, tcx);
+ let cond_ty = cond.ty(body, tcx);
if cond_ty != tcx.types.bool {
span_mirbug!(self, term, "bad Assert ({:?}, not bool", cond_ty);
}
if let BoundsCheck { ref len, ref index } = *msg {
- if len.ty(mir, tcx) != tcx.types.usize {
+ if len.ty(body, tcx) != tcx.types.usize {
span_mirbug!(self, len, "bounds-check length non-usize {:?}", len)
}
- if index.ty(mir, tcx) != tcx.types.usize {
+ if index.ty(body, tcx) != tcx.types.usize {
span_mirbug!(self, index, "bounds-check index non-usize {:?}", index)
}
}
}
TerminatorKind::Yield { ref value, .. } => {
- let value_ty = value.ty(mir, tcx);
- match mir.yield_ty {
+ let value_ty = value.ty(body, tcx);
+ match body.yield_ty {
None => span_mirbug!(self, term, "yield in non-generator"),
Some(ty) => {
if let Err(terr) = self.sub_types(
fn check_call_dest(
&mut self,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
term: &Terminator<'tcx>,
sig: &ty::FnSig<'tcx>,
destination: &Option<(Place<'tcx>, BasicBlock)>,
let tcx = self.tcx();
match *destination {
Some((ref dest, _target_block)) => {
- let dest_ty = dest.ty(mir, tcx).ty;
+ let dest_ty = dest.ty(body, tcx).ty;
let category = match *dest {
Place::Base(PlaceBase::Local(RETURN_PLACE)) => {
if let BorrowCheckContext {
}
}
Place::Base(PlaceBase::Local(l))
- if !mir.local_decls[l].is_user_variable.is_some() => {
+ if !body.local_decls[l].is_user_variable.is_some() => {
ConstraintCategory::Boring
}
_ => ConstraintCategory::Assignment,
fn check_call_inputs(
&mut self,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
term: &Terminator<'tcx>,
sig: &ty::FnSig<'tcx>,
args: &[Operand<'tcx>],
span_mirbug!(self, term, "call to {:?} with wrong # of args", sig);
}
for (n, (fn_arg, op_arg)) in inputs.iter().zip(args).enumerate() {
- let op_arg_ty = op_arg.ty(mir, self.tcx());
+ let op_arg_ty = op_arg.ty(body, self.tcx());
let category = if from_hir_call {
ConstraintCategory::CallArgument
} else {
}
}
- fn check_iscleanup(&mut self, mir: &Body<'tcx>, block_data: &BasicBlockData<'tcx>) {
+ fn check_iscleanup(&mut self, body: &Body<'tcx>, block_data: &BasicBlockData<'tcx>) {
let is_cleanup = block_data.is_cleanup;
self.last_span = block_data.terminator().source_info.span;
match block_data.terminator().kind {
TerminatorKind::Goto { target } => {
- self.assert_iscleanup(mir, block_data, target, is_cleanup)
+ self.assert_iscleanup(body, block_data, target, is_cleanup)
}
TerminatorKind::SwitchInt { ref targets, .. } => for target in targets {
- self.assert_iscleanup(mir, block_data, *target, is_cleanup);
+ self.assert_iscleanup(body, block_data, *target, is_cleanup);
},
TerminatorKind::Resume => if !is_cleanup {
span_mirbug!(self, block_data, "resume on non-cleanup block!")
if is_cleanup {
span_mirbug!(self, block_data, "yield in cleanup block")
}
- self.assert_iscleanup(mir, block_data, resume, is_cleanup);
+ self.assert_iscleanup(body, block_data, resume, is_cleanup);
if let Some(drop) = drop {
- self.assert_iscleanup(mir, block_data, drop, is_cleanup);
+ self.assert_iscleanup(body, block_data, drop, is_cleanup);
}
}
TerminatorKind::Unreachable => {}
cleanup: unwind,
..
} => {
- self.assert_iscleanup(mir, block_data, target, is_cleanup);
+ self.assert_iscleanup(body, block_data, target, is_cleanup);
if let Some(unwind) = unwind {
if is_cleanup {
span_mirbug!(self, block_data, "unwind on cleanup block")
}
- self.assert_iscleanup(mir, block_data, unwind, true);
+ self.assert_iscleanup(body, block_data, unwind, true);
}
}
TerminatorKind::Call {
..
} => {
if let &Some((_, target)) = destination {
- self.assert_iscleanup(mir, block_data, target, is_cleanup);
+ self.assert_iscleanup(body, block_data, target, is_cleanup);
}
if let Some(cleanup) = cleanup {
if is_cleanup {
span_mirbug!(self, block_data, "cleanup on cleanup block")
}
- self.assert_iscleanup(mir, block_data, cleanup, true);
+ self.assert_iscleanup(body, block_data, cleanup, true);
}
}
TerminatorKind::FalseEdges {
real_target,
ref imaginary_targets,
} => {
- self.assert_iscleanup(mir, block_data, real_target, is_cleanup);
+ self.assert_iscleanup(body, block_data, real_target, is_cleanup);
for target in imaginary_targets {
- self.assert_iscleanup(mir, block_data, *target, is_cleanup);
+ self.assert_iscleanup(body, block_data, *target, is_cleanup);
}
}
TerminatorKind::FalseUnwind {
real_target,
unwind,
} => {
- self.assert_iscleanup(mir, block_data, real_target, is_cleanup);
+ self.assert_iscleanup(body, block_data, real_target, is_cleanup);
if let Some(unwind) = unwind {
if is_cleanup {
span_mirbug!(
"cleanup in cleanup block via false unwind"
);
}
- self.assert_iscleanup(mir, block_data, unwind, true);
+ self.assert_iscleanup(body, block_data, unwind, true);
}
}
}
fn assert_iscleanup(
&mut self,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
ctxt: &dyn fmt::Debug,
bb: BasicBlock,
iscleanuppad: bool,
) {
- if mir[bb].is_cleanup != iscleanuppad {
+ if body[bb].is_cleanup != iscleanuppad {
span_mirbug!(
self,
ctxt,
}
}
- fn check_local(&mut self, mir: &Body<'tcx>, local: Local, local_decl: &LocalDecl<'tcx>) {
- match mir.local_kind(local) {
+ fn check_local(&mut self, body: &Body<'tcx>, local: Local, local_decl: &LocalDecl<'tcx>) {
+ match body.local_kind(local) {
LocalKind::ReturnPointer | LocalKind::Arg => {
// return values of normal functions are required to be
// sized by typeck, but return values of ADT constructors are
}
}
- fn check_rvalue(&mut self, mir: &Body<'tcx>, rvalue: &Rvalue<'tcx>, location: Location) {
+ fn check_rvalue(&mut self, body: &Body<'tcx>, rvalue: &Rvalue<'tcx>, location: Location) {
let tcx = self.tcx();
match rvalue {
Rvalue::Aggregate(ak, ops) => {
- self.check_aggregate_rvalue(mir, rvalue, ak, ops, location)
+ self.check_aggregate_rvalue(body, rvalue, ak, ops, location)
}
Rvalue::Repeat(operand, len) => if *len > 1 {
- let operand_ty = operand.ty(mir, tcx);
+ let operand_ty = operand.ty(body, tcx);
let trait_ref = ty::TraitRef {
def_id: tcx.lang_items().copy_trait().unwrap(),
Rvalue::NullaryOp(_, ty) => {
// Even with unsized locals cannot box an unsized value.
if self.tcx().features().unsized_locals {
- let span = mir.source_info(location).span;
+ let span = body.source_info(location).span;
self.ensure_place_sized(ty, span);
}
Rvalue::Cast(cast_kind, op, ty) => {
match cast_kind {
CastKind::Pointer(PointerCast::ReifyFnPointer) => {
- let fn_sig = op.ty(mir, tcx).fn_sig(tcx);
+ let fn_sig = op.ty(body, tcx).fn_sig(tcx);
// The type that we see in the fcx is like
// `foo::<'a, 'b>`, where `foo` is the path to a
}
CastKind::Pointer(PointerCast::ClosureFnPointer(unsafety)) => {
- let sig = match op.ty(mir, tcx).sty {
+ let sig = match op.ty(body, tcx).sty {
ty::Closure(def_id, substs) => {
substs.closure_sig_ty(def_id, tcx).fn_sig(tcx)
}
}
CastKind::Pointer(PointerCast::UnsafeFnPointer) => {
- let fn_sig = op.ty(mir, tcx).fn_sig(tcx);
+ let fn_sig = op.ty(body, tcx).fn_sig(tcx);
// The type that we see in the fcx is like
// `foo::<'a, 'b>`, where `foo` is the path to a
let &ty = ty;
let trait_ref = ty::TraitRef {
def_id: tcx.lang_items().coerce_unsized_trait().unwrap(),
- substs: tcx.mk_substs_trait(op.ty(mir, tcx), &[ty.into()]),
+ substs: tcx.mk_substs_trait(op.ty(body, tcx), &[ty.into()]),
};
self.prove_trait_ref(
}
CastKind::Pointer(PointerCast::MutToConstPointer) => {
- let ty_from = match op.ty(mir, tcx).sty {
+ let ty_from = match op.ty(body, tcx).sty {
ty::RawPtr(ty::TypeAndMut {
ty: ty_from,
mutbl: hir::MutMutable,
}
CastKind::Misc => {
- if let ty::Ref(_, mut ty_from, _) = op.ty(mir, tcx).sty {
+ if let ty::Ref(_, mut ty_from, _) = op.ty(body, tcx).sty {
let (mut ty_to, mutability) = if let ty::RawPtr(ty::TypeAndMut {
ty: ty_to,
mutbl,
self,
rvalue,
"invalid cast types {:?} -> {:?}",
- op.ty(mir, tcx),
+ op.ty(body, tcx),
ty,
);
return;
}
Rvalue::Ref(region, _borrow_kind, borrowed_place) => {
- self.add_reborrow_constraint(mir, location, region, borrowed_place);
+ self.add_reborrow_constraint(body, location, region, borrowed_place);
}
Rvalue::BinaryOp(BinOp::Eq, left, right)
| Rvalue::BinaryOp(BinOp::Le, left, right)
| Rvalue::BinaryOp(BinOp::Gt, left, right)
| Rvalue::BinaryOp(BinOp::Ge, left, right) => {
- let ty_left = left.ty(mir, tcx);
+ let ty_left = left.ty(body, tcx);
if let ty::RawPtr(_) | ty::FnPtr(_) = ty_left.sty {
- let ty_right = right.ty(mir, tcx);
+ let ty_right = right.ty(body, tcx);
let common_ty = self.infcx.next_ty_var(
TypeVariableOrigin {
kind: TypeVariableOriginKind::MiscVariable,
- span: mir.source_info(location).span,
+ span: body.source_info(location).span,
}
);
self.sub_types(
fn check_aggregate_rvalue(
&mut self,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
rvalue: &Rvalue<'tcx>,
aggregate_kind: &AggregateKind<'tcx>,
operands: &[Operand<'tcx>],
continue;
}
};
- let operand_ty = operand.ty(mir, tcx);
+ let operand_ty = operand.ty(body, tcx);
if let Err(terr) = self.sub_types(
operand_ty,
/// - `borrowed_place`: the place `P` being borrowed
fn add_reborrow_constraint(
&mut self,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
location: Location,
borrow_region: ty::Region<'tcx>,
borrowed_place: &Place<'tcx>,
match *elem {
ProjectionElem::Deref => {
let tcx = self.infcx.tcx;
- let base_ty = base.ty(mir, tcx).ty;
+ let base_ty = base.ty(body, tcx).ty;
debug!("add_reborrow_constraint - base_ty = {:?}", base_ty);
match base_ty.sty {
})
}
- fn typeck_mir(&mut self, mir: &Body<'tcx>) {
- self.last_span = mir.span;
- debug!("run_on_mir: {:?}", mir.span);
+ fn typeck_mir(&mut self, body: &Body<'tcx>) {
+ self.last_span = body.span;
+ debug!("run_on_mir: {:?}", body.span);
- for (local, local_decl) in mir.local_decls.iter_enumerated() {
- self.check_local(mir, local, local_decl);
+ for (local, local_decl) in body.local_decls.iter_enumerated() {
+ self.check_local(body, local, local_decl);
}
- for (block, block_data) in mir.basic_blocks().iter_enumerated() {
+ for (block, block_data) in body.basic_blocks().iter_enumerated() {
let mut location = Location {
block,
statement_index: 0,
if !stmt.source_info.span.is_dummy() {
self.last_span = stmt.source_info.span;
}
- self.check_stmt(mir, stmt, location);
+ self.check_stmt(body, stmt, location);
location.statement_index += 1;
}
- self.check_terminator(mir, block_data.terminator(), location);
- self.check_iscleanup(mir, block_data);
+ self.check_terminator(body, block_data.terminator(), location);
+ self.check_iscleanup(body, block_data);
}
}
/// Returns `true` if the borrow represented by `kind` is
/// allowed to be split into separate Reservation and
/// Activation phases.
-pub(super) fn allow_two_phase_borrow<'a, 'tcx, 'gcx: 'tcx>(kind: BorrowKind) -> bool {
+pub(super) fn allow_two_phase_borrow(kind: BorrowKind) -> bool {
kind.allows_two_phase_borrow()
}
pub(super) fn each_borrow_involving_path<'a, 'tcx, 'gcx: 'tcx, F, I, S> (
s: &mut S,
tcx: TyCtxt<'a, 'gcx, 'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
_location: Location,
access_place: (AccessDepth, &Place<'tcx>),
borrow_set: &BorrowSet<'tcx>,
if places_conflict::borrow_conflicts_with_place(
tcx,
- mir,
+ body,
&borrowed.borrowed_place,
borrowed.kind,
place,
fn ignore_borrow(
&self,
tcx: TyCtxt<'_, '_, 'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
locals_state_at_exit: &LocalsStateAtExit,
) -> bool;
}
fn ignore_borrow(
&self,
tcx: TyCtxt<'_, '_, 'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
locals_state_at_exit: &LocalsStateAtExit,
) -> bool {
self.iterate(|place_base, place_projection| {
LocalsStateAtExit::AllAreInvalidated => false,
LocalsStateAtExit::SomeAreInvalidated { has_storage_dead_or_moved } => {
let ignore = !has_storage_dead_or_moved.contains(*index) &&
- mir.local_decls[*index].mutability == Mutability::Not;
+ body.local_decls[*index].mutability == Mutability::Not;
debug!("ignore_borrow: local {:?} => {:?}", index, ignore);
ignore
}
for proj in place_projection {
if proj.elem == ProjectionElem::Deref {
- let ty = proj.base.ty(mir, tcx).ty;
+ let ty = proj.base.ty(body, tcx).ty;
match ty.sty {
// For both derefs of raw pointers and `&T`
// references, the original path is `Copy` and
/// dataflow).
crate fn places_conflict<'gcx, 'tcx>(
tcx: TyCtxt<'_, 'gcx, 'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
borrow_place: &Place<'tcx>,
access_place: &Place<'tcx>,
bias: PlaceConflictBias,
) -> bool {
borrow_conflicts_with_place(
tcx,
- mir,
+ body,
borrow_place,
BorrowKind::Mut { allow_two_phase_borrow: true },
access_place,
/// order to make the conservative choice and preserve soundness.
pub(super) fn borrow_conflicts_with_place<'gcx, 'tcx>(
tcx: TyCtxt<'_, 'gcx, 'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
borrow_place: &Place<'tcx>,
borrow_kind: BorrowKind,
access_place: &Place<'tcx>,
access_place.iterate(|access_base, access_projections| {
place_components_conflict(
tcx,
- mir,
+ body,
(borrow_base, borrow_projections),
borrow_kind,
(access_base, access_projections),
fn place_components_conflict<'gcx, 'tcx>(
tcx: TyCtxt<'_, 'gcx, 'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
borrow_projections: (&PlaceBase<'tcx>, ProjectionsIter<'_, 'tcx>),
borrow_kind: BorrowKind,
access_projections: (&PlaceBase<'tcx>, ProjectionsIter<'_, 'tcx>),
// check whether the components being borrowed vs
// accessed are disjoint (as in the second example,
// but not the first).
- match place_projection_conflict(tcx, mir, borrow_c, access_c, bias) {
+ match place_projection_conflict(tcx, body, borrow_c, access_c, bias) {
Overlap::Arbitrary => {
// We have encountered different fields of potentially
// the same union - the borrow now partially overlaps.
let base = &borrow_c.base;
let elem = &borrow_c.elem;
- let base_ty = base.ty(mir, tcx).ty;
+ let base_ty = base.ty(body, tcx).ty;
match (elem, &base_ty.sty, access) {
(_, _, Shallow(Some(ArtificialField::ArrayLength)))
// between `elem1` and `elem2`.
fn place_projection_conflict<'a, 'gcx: 'tcx, 'tcx>(
tcx: TyCtxt<'a, 'gcx, 'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
pi1: &Projection<'tcx>,
pi2: &Projection<'tcx>,
bias: PlaceConflictBias,
debug!("place_element_conflict: DISJOINT-OR-EQ-FIELD");
Overlap::EqualOrDisjoint
} else {
- let ty = pi1.base.ty(mir, tcx).ty;
+ let ty = pi1.base.ty(body, tcx).ty;
match ty.sty {
ty::Adt(def, _) if def.is_union() => {
// Different fields of a union, we are basically stuck.
pub(super) struct Prefixes<'cx, 'gcx: 'tcx, 'tcx: 'cx> {
- mir: &'cx Body<'tcx>,
+ body: &'cx Body<'tcx>,
tcx: TyCtxt<'cx, 'gcx, 'tcx>,
kind: PrefixSet,
next: Option<&'cx Place<'tcx>>,
Prefixes {
next: Some(place),
kind,
- mir: self.mir,
+ body: self.body,
tcx: self.infcx.tcx,
}
}
// derefs, except we stop at the deref of a shared
// reference.
- let ty = proj.base.ty(self.mir, self.tcx).ty;
+ let ty = proj.base.ty(self.body, self.tcx).ty;
match ty.sty {
ty::RawPtr(_) |
ty::Ref(
never_initialized_mut_locals: &mut never_initialized_mut_locals,
mbcx: self,
};
- visitor.visit_body(visitor.mbcx.mir);
+ visitor.visit_body(visitor.mbcx.body);
}
// Take the union of the existed `used_mut` set with those variables we've found were
/// Append `AscribeUserType` statements onto the end of `block`
/// for each ascription
- fn ascribe_types<'pat>(
- &mut self,
- block: BasicBlock,
- ascriptions: &[Ascription<'tcx>],
- ) {
+ fn ascribe_types(&mut self, block: BasicBlock, ascriptions: &[Ascription<'tcx>]) {
for ascription in ascriptions {
let source_info = self.source_info(ascription.span);
/// that it *doesn't* apply. For now, we return false, indicate that the
/// test does not apply to this candidate, but it might be we can get
/// tighter match code if we do something a bit different.
- pub fn sort_candidate<'pat, 'cand>(
+ pub fn sort_candidate<'pat>(
&mut self,
test_place: &Place<'tcx>,
test: &Test<'tcx>,
use crate::build::scope::DropKind;
use crate::hair::cx::Cx;
use crate::hair::{LintLevel, BindingMode, PatternKind};
-use crate::shim;
use crate::transform::MirSource;
use crate::util as mir_util;
use rustc::hir;
// Figure out what primary body this item has.
let (body_id, return_ty_span) = match tcx.hir().get_by_hir_id(id) {
- Node::Ctor(ctor) => return create_constructor_shim(tcx, id, ctor),
-
Node::Expr(hir::Expr { node: hir::ExprKind::Closure(_, decl, body_id, _, _), .. })
| Node::Item(hir::Item { node: hir::ItemKind::Fn(decl, _, _, body_id), .. })
| Node::ImplItem(
tcx.infer_ctxt().enter(|infcx| {
let cx = Cx::new(&infcx, id);
- let mut mir = if cx.tables().tainted_by_errors {
+ let mut body = if cx.tables().tainted_by_errors {
build::construct_error(cx, body_id)
} else if cx.body_owner_kind.is_fn_or_closure() {
// fetch the fully liberated fn signature (that is, all bound
// Convert the `mir::Body` to global types.
let mut globalizer = GlobalizeMir {
tcx,
- span: mir.span
+ span: body.span
};
- globalizer.visit_body(&mut mir);
- let mir = unsafe {
- mem::transmute::<Body<'_>, Body<'tcx>>(mir)
+ globalizer.visit_body(&mut body);
+ let body = unsafe {
+ mem::transmute::<Body<'_>, Body<'tcx>>(body)
};
mir_util::dump_mir(tcx, None, "mir_map", &0,
- MirSource::item(def_id), &mir, |_, _| Ok(()) );
+ MirSource::item(def_id), &body, |_, _| Ok(()) );
- lints::check(tcx, &mir, def_id);
+ lints::check(tcx, &body, def_id);
- mir
+ body
})
}
}
}
-fn create_constructor_shim<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
- ctor_id: hir::HirId,
- v: &'tcx hir::VariantData)
- -> Body<'tcx>
-{
- let span = tcx.hir().span_by_hir_id(ctor_id);
- if let hir::VariantData::Tuple(ref fields, ctor_id) = *v {
- tcx.infer_ctxt().enter(|infcx| {
- let mut mir = shim::build_adt_ctor(&infcx, ctor_id, fields, span);
-
- // Convert the `mir::Body` to global types.
- let tcx = infcx.tcx.global_tcx();
- let mut globalizer = GlobalizeMir {
- tcx,
- span: mir.span
- };
- globalizer.visit_body(&mut mir);
- let mir = unsafe {
- mem::transmute::<Body<'_>, Body<'tcx>>(mir)
- };
-
- mir_util::dump_mir(tcx, None, "mir_map", &0,
- MirSource::item(tcx.hir().local_def_id_from_hir_id(ctor_id)),
- &mir, |_, _| Ok(()) );
-
- mir
- })
- } else {
- span_bug!(span, "attempting to create MIR for non-tuple variant {:?}", v);
- }
-}
-
///////////////////////////////////////////////////////////////////////////
// BuildMir -- walks a crate, looking for fn items and methods to build MIR from
info!("fn_id {:?} has attrs {:?}", fn_def_id,
tcx.get_attrs(fn_def_id));
- let mut mir = builder.finish(yield_ty);
- mir.spread_arg = spread_arg;
- mir
+ let mut body = builder.finish(yield_ty);
+ body.spread_arg = spread_arg;
+ body
}
fn construct_const<'a, 'gcx, 'tcx>(
use crate::interpret::{self,
PlaceTy, MPlaceTy, MemPlace, OpTy, ImmTy, Immediate, Scalar,
RawConst, ConstValue,
- EvalResult, EvalError, InterpError, GlobalId, InterpretCx, StackPopCleanup,
+ InterpResult, InterpErrorInfo, InterpError, GlobalId, InterpretCx, StackPopCleanup,
Allocation, AllocId, MemoryKind,
snapshot, RefTracking,
};
pub(crate) fn eval_promoted<'a, 'mir, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
cid: GlobalId<'tcx>,
- mir: &'mir mir::Body<'tcx>,
+ body: &'mir mir::Body<'tcx>,
param_env: ty::ParamEnv<'tcx>,
-) -> EvalResult<'tcx, MPlaceTy<'tcx>> {
+) -> InterpResult<'tcx, MPlaceTy<'tcx>> {
let span = tcx.def_span(cid.instance.def_id());
let mut ecx = mk_eval_cx(tcx, span, param_env);
- eval_body_using_ecx(&mut ecx, cid, mir, param_env)
+ eval_body_using_ecx(&mut ecx, cid, body, param_env)
}
fn mplace_to_const<'tcx>(
fn eval_body_using_ecx<'mir, 'tcx>(
ecx: &mut CompileTimeEvalContext<'_, 'mir, 'tcx>,
cid: GlobalId<'tcx>,
- mir: &'mir mir::Body<'tcx>,
+ body: &'mir mir::Body<'tcx>,
param_env: ty::ParamEnv<'tcx>,
-) -> EvalResult<'tcx, MPlaceTy<'tcx>> {
+) -> InterpResult<'tcx, MPlaceTy<'tcx>> {
debug!("eval_body_using_ecx: {:?}, {:?}", cid, param_env);
let tcx = ecx.tcx.tcx;
- let layout = ecx.layout_of(mir.return_ty().subst(tcx, cid.instance.substs))?;
+ let layout = ecx.layout_of(body.return_ty().subst(tcx, cid.instance.substs))?;
assert!(!layout.is_unsized());
let ret = ecx.allocate(layout, MemoryKind::Stack);
let name = ty::tls::with(|tcx| tcx.def_path_str(cid.instance.def_id()));
let prom = cid.promoted.map_or(String::new(), |p| format!("::promoted[{:?}]", p));
trace!("eval_body_using_ecx: pushing stack frame for global: {}{}", name, prom);
- assert!(mir.arg_count == 0);
+ assert!(body.arg_count == 0);
ecx.push_stack_frame(
cid.instance,
- mir.span,
- mir,
+ body.span,
+ body,
Some(ret.into()),
StackPopCleanup::None { cleanup: false },
)?;
// Intern the result
let mutability = if tcx.is_mutable_static(cid.instance.def_id()) ||
- !layout.ty.is_freeze(tcx, param_env, mir.span) {
+ !layout.ty.is_freeze(tcx, param_env, body.span) {
Mutability::Mutable
} else {
Mutability::Immutable
Ok(ret)
}
-impl<'tcx> Into<EvalError<'tcx>> for ConstEvalError {
- fn into(self) -> EvalError<'tcx> {
+impl<'tcx> Into<InterpErrorInfo<'tcx>> for ConstEvalError {
+ fn into(self) -> InterpErrorInfo<'tcx> {
InterpError::MachineError(self.to_string()).into()
}
}
args: &[OpTy<'tcx>],
dest: Option<PlaceTy<'tcx>>,
ret: Option<mir::BasicBlock>,
- ) -> EvalResult<'tcx, Option<&'mir mir::Body<'tcx>>> {
+ ) -> InterpResult<'tcx, Option<&'mir mir::Body<'tcx>>> {
debug!("eval_fn_call: {:?}", instance);
// Only check non-glue functions
if let ty::InstanceDef::Item(def_id) = instance.def {
}
// This is a const fn. Call it.
Ok(Some(match ecx.load_mir(instance.def) {
- Ok(mir) => mir,
+ Ok(body) => body,
Err(err) => {
if let InterpError::NoMirFor(ref path) = err.kind {
return Err(
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx>],
dest: PlaceTy<'tcx>,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
if ecx.emulate_intrinsic(instance, args, dest)? {
return Ok(());
}
_bin_op: mir::BinOp,
_left: ImmTy<'tcx>,
_right: ImmTy<'tcx>,
- ) -> EvalResult<'tcx, (Scalar, bool)> {
+ ) -> InterpResult<'tcx, (Scalar, bool)> {
Err(
ConstEvalError::NeedsRfc("pointer arithmetic or comparison".to_string()).into(),
)
fn find_foreign_static(
_def_id: DefId,
_tcx: TyCtxtAt<'a, 'tcx, 'tcx>,
- ) -> EvalResult<'tcx, Cow<'tcx, Allocation<Self::PointerTag>>> {
+ ) -> InterpResult<'tcx, Cow<'tcx, Allocation<Self::PointerTag>>> {
err!(ReadForeignStatic)
}
fn box_alloc(
_ecx: &mut InterpretCx<'a, 'mir, 'tcx, Self>,
_dest: PlaceTy<'tcx>,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
Err(
ConstEvalError::NeedsRfc("heap allocations via `box` keyword".to_string()).into(),
)
}
- fn before_terminator(ecx: &mut InterpretCx<'a, 'mir, 'tcx, Self>) -> EvalResult<'tcx> {
+ fn before_terminator(ecx: &mut InterpretCx<'a, 'mir, 'tcx, Self>) -> InterpResult<'tcx> {
{
let steps = &mut ecx.machine.steps_since_detector_enabled;
#[inline(always)]
fn stack_push(
_ecx: &mut InterpretCx<'a, 'mir, 'tcx, Self>,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
Ok(())
}
fn stack_pop(
_ecx: &mut InterpretCx<'a, 'mir, 'tcx, Self>,
_extra: (),
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
Ok(())
}
}
pub fn error_to_const_error<'a, 'mir, 'tcx>(
ecx: &InterpretCx<'a, 'mir, 'tcx, CompileTimeInterpreter<'a, 'mir, 'tcx>>,
- mut error: EvalError<'tcx>
+ mut error: InterpErrorInfo<'tcx>
) -> ConstEvalErr<'tcx> {
error.print_backtrace();
let stacktrace = ecx.generate_stacktrace(None);
let mut ecx = InterpretCx::new(tcx.at(span), key.param_env, CompileTimeInterpreter::new());
let res = ecx.load_mir(cid.instance.def);
- res.map(|mir| {
+ res.map(|body| {
if let Some(index) = cid.promoted {
- &mir.promoted[index]
+ &body.promoted[index]
} else {
- mir
+ body
}
}).and_then(
- |mir| eval_body_using_ecx(&mut ecx, cid, mir, key.param_env)
+ |body| eval_body_using_ecx(&mut ecx, cid, body, key.param_env)
).and_then(|place| {
Ok(RawConst {
alloc_id: place.to_ptr().expect("we allocated this ptr!").alloc_id,
curr_state.subtract(&self.stmt_kill);
f(curr_state.iter());
}
+
+ /// Returns a bitset of the elements present in the current state.
+ pub fn as_dense(&self) -> &BitSet<BD::Idx> {
+ &self.curr_state
+ }
}
impl<'tcx, BD> FlowsAtLocation for FlowAtLocation<'tcx, BD>
//
// FIXME: we have to do something for moving slice patterns.
fn place_contents_drop_state_cannot_differ<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
place: &mir::Place<'tcx>) -> bool {
- let ty = place.ty(mir, tcx).ty;
+ let ty = place.ty(body, tcx).ty;
match ty.sty {
ty::Array(..) => {
debug!("place_contents_drop_state_cannot_differ place: {:?} ty: {:?} => false",
pub(crate) fn on_lookup_result_bits<'a, 'gcx, 'tcx, F>(
tcx: TyCtxt<'a, 'gcx, 'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
move_data: &MoveData<'tcx>,
lookup_result: LookupResult,
each_child: F)
// access to untracked value - do not touch children
}
LookupResult::Exact(e) => {
- on_all_children_bits(tcx, mir, move_data, e, each_child)
+ on_all_children_bits(tcx, body, move_data, e, each_child)
}
}
}
pub(crate) fn on_all_children_bits<'a, 'gcx, 'tcx, F>(
tcx: TyCtxt<'a, 'gcx, 'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
move_data: &MoveData<'tcx>,
move_path_index: MovePathIndex,
mut each_child: F)
{
fn is_terminal_path<'a, 'gcx, 'tcx>(
tcx: TyCtxt<'a, 'gcx, 'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
move_data: &MoveData<'tcx>,
path: MovePathIndex) -> bool
{
place_contents_drop_state_cannot_differ(
- tcx, mir, &move_data.move_paths[path].place)
+ tcx, body, &move_data.move_paths[path].place)
}
fn on_all_children_bits<'a, 'gcx, 'tcx, F>(
tcx: TyCtxt<'a, 'gcx, 'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
move_data: &MoveData<'tcx>,
move_path_index: MovePathIndex,
each_child: &mut F)
{
each_child(move_path_index);
- if is_terminal_path(tcx, mir, move_data, move_path_index) {
+ if is_terminal_path(tcx, body, move_data, move_path_index) {
return
}
let mut next_child_index = move_data.move_paths[move_path_index].first_child;
while let Some(child_index) = next_child_index {
- on_all_children_bits(tcx, mir, move_data, child_index, each_child);
+ on_all_children_bits(tcx, body, move_data, child_index, each_child);
next_child_index = move_data.move_paths[child_index].next_sibling;
}
}
- on_all_children_bits(tcx, mir, move_data, move_path_index, &mut each_child);
+ on_all_children_bits(tcx, body, move_data, move_path_index, &mut each_child);
}
pub(crate) fn on_all_drop_children_bits<'a, 'gcx, 'tcx, F>(
tcx: TyCtxt<'a, 'gcx, 'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
ctxt: &MoveDataParamEnv<'gcx, 'tcx>,
path: MovePathIndex,
mut each_child: F)
where F: FnMut(MovePathIndex)
{
- on_all_children_bits(tcx, mir, &ctxt.move_data, path, |child| {
+ on_all_children_bits(tcx, body, &ctxt.move_data, path, |child| {
let place = &ctxt.move_data.move_paths[path].place;
- let ty = place.ty(mir, tcx).ty;
+ let ty = place.ty(body, tcx).ty;
debug!("on_all_drop_children_bits({:?}, {:?} : {:?})", path, place, ty);
let gcx = tcx.global_tcx();
pub(crate) fn drop_flag_effects_for_function_entry<'a, 'gcx, 'tcx, F>(
tcx: TyCtxt<'a, 'gcx, 'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
ctxt: &MoveDataParamEnv<'gcx, 'tcx>,
mut callback: F)
where F: FnMut(MovePathIndex, DropFlagState)
{
let move_data = &ctxt.move_data;
- for arg in mir.args_iter() {
+ for arg in body.args_iter() {
let place = mir::Place::Base(mir::PlaceBase::Local(arg));
let lookup_result = move_data.rev_lookup.find(&place);
- on_lookup_result_bits(tcx, mir, move_data,
+ on_lookup_result_bits(tcx, body, move_data,
lookup_result,
|mpi| callback(mpi, DropFlagState::Present));
}
pub(crate) fn drop_flag_effects_for_location<'a, 'gcx, 'tcx, F>(
tcx: TyCtxt<'a, 'gcx, 'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
ctxt: &MoveDataParamEnv<'gcx, 'tcx>,
loc: Location,
mut callback: F)
let path = mi.move_path_index(move_data);
debug!("moving out of path {:?}", move_data.move_paths[path]);
- on_all_children_bits(tcx, mir, move_data,
+ on_all_children_bits(tcx, body, move_data,
path,
|mpi| callback(mpi, DropFlagState::Absent))
}
for_location_inits(
tcx,
- mir,
+ body,
move_data,
loc,
|mpi| callback(mpi, DropFlagState::Present)
pub(crate) fn for_location_inits<'a, 'gcx, 'tcx, F>(
tcx: TyCtxt<'a, 'gcx, 'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
move_data: &MoveData<'tcx>,
loc: Location,
mut callback: F)
InitKind::Deep => {
let path = init.path;
- on_all_children_bits(tcx, mir, move_data,
+ on_all_children_bits(tcx, body, move_data,
path,
&mut callback)
},
pub trait MirWithFlowState<'tcx> {
type BD: BitDenotation<'tcx>;
fn def_id(&self) -> DefId;
- fn mir(&self) -> &Body<'tcx>;
+ fn body(&self) -> &Body<'tcx>;
fn flow_state(&self) -> &DataflowState<'tcx, Self::BD>;
}
{
type BD = BD;
fn def_id(&self) -> DefId { self.def_id }
- fn mir(&self) -> &Body<'tcx> { self.flow_state.mir() }
+ fn body(&self) -> &Body<'tcx> { self.flow_state.body() }
fn flow_state(&self) -> &DataflowState<'tcx, Self::BD> { &self.flow_state.flow_state }
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub struct Edge { source: BasicBlock, index: usize }
-fn outgoing(mir: &Body<'_>, bb: BasicBlock) -> Vec<Edge> {
- (0..mir[bb].terminator().successors().count())
+fn outgoing(body: &Body<'_>, bb: BasicBlock) -> Vec<Edge> {
+ (0..body[bb].terminator().successors().count())
.map(|index| Edge { source: bb, index: index}).collect()
}
// | [00-00] | _7 = const Foo::twiddle(move _8) | [0c-00] | [f3-0f] |
// +---------+----------------------------------+------------------+------------------+
let mut v = Vec::new();
- self.node_label_internal(n, &mut v, *n, self.mbcx.mir()).unwrap();
+ self.node_label_internal(n, &mut v, *n, self.mbcx.body()).unwrap();
dot::LabelText::html(String::from_utf8(v).unwrap())
}
}
fn edge_label(&'a self, e: &Edge) -> dot::LabelText<'a> {
- let term = self.mbcx.mir()[e.source].terminator();
+ let term = self.mbcx.body()[e.source].terminator();
let label = &term.kind.fmt_successor_labels()[e.index];
dot::LabelText::label(label.clone())
}
n: &Node,
w: &mut W,
block: BasicBlock,
- mir: &Body<'_>) -> io::Result<()> {
+ body: &Body<'_>) -> io::Result<()> {
// Header rows
const HDRS: [&str; 4] = ["ENTRY", "MIR", "BLOCK GENS", "BLOCK KILLS"];
const HDR_FMT: &str = "bgcolor=\"grey\"";
write!(w, "</tr>")?;
// Data row
- self.node_label_verbose_row(n, w, block, mir)?;
- self.node_label_final_row(n, w, block, mir)?;
+ self.node_label_verbose_row(n, w, block, body)?;
+ self.node_label_final_row(n, w, block, body)?;
write!(w, "</table>")?;
Ok(())
n: &Node,
w: &mut W,
block: BasicBlock,
- mir: &Body<'_>)
+ body: &Body<'_>)
-> io::Result<()> {
let i = n.index();
// MIR statements
write!(w, "<td>")?;
{
- let data = &mir[block];
+ let data = &body[block];
for (i, statement) in data.statements.iter().enumerate() {
write!(w, "{}<br align=\"left\"/>",
dot::escape_html(&format!("{:3}: {:?}", i, statement)))?;
n: &Node,
w: &mut W,
block: BasicBlock,
- mir: &Body<'_>)
+ body: &Body<'_>)
-> io::Result<()> {
let i = n.index();
// Terminator
write!(w, "<td>")?;
{
- let data = &mir[block];
+ let data = &body[block];
let mut terminator_head = String::new();
data.terminator().kind.fmt_head(&mut terminator_head).unwrap();
write!(w, "{}", dot::escape_html(&terminator_head))?;
type Node = Node;
type Edge = Edge;
fn nodes(&self) -> dot::Nodes<'_, Node> {
- self.mbcx.mir()
+ self.mbcx.body()
.basic_blocks()
.indices()
.collect::<Vec<_>>()
}
fn edges(&self) -> dot::Edges<'_, Edge> {
- let mir = self.mbcx.mir();
+ let body = self.mbcx.body();
- mir.basic_blocks()
+ body.basic_blocks()
.indices()
- .flat_map(|bb| outgoing(mir, bb))
+ .flat_map(|bb| outgoing(body, bb))
.collect::<Vec<_>>()
.into()
}
}
fn target(&self, edge: &Edge) -> Node {
- let mir = self.mbcx.mir();
- *mir[edge.source].terminator().successors().nth(edge.index).unwrap()
+ let body = self.mbcx.body();
+ *body[edge.source].terminator().successors().nth(edge.index).unwrap()
}
}
/// immovable generators.
#[derive(Copy, Clone)]
pub struct HaveBeenBorrowedLocals<'a, 'tcx: 'a> {
- mir: &'a Body<'tcx>,
+ body: &'a Body<'tcx>,
}
impl<'a, 'tcx: 'a> HaveBeenBorrowedLocals<'a, 'tcx> {
- pub fn new(mir: &'a Body<'tcx>)
+ pub fn new(body: &'a Body<'tcx>)
-> Self {
- HaveBeenBorrowedLocals { mir }
+ HaveBeenBorrowedLocals { body }
}
- pub fn mir(&self) -> &Body<'tcx> {
- self.mir
+ pub fn body(&self) -> &Body<'tcx> {
+ self.body
}
}
type Idx = Local;
fn name() -> &'static str { "has_been_borrowed_locals" }
fn bits_per_block(&self) -> usize {
- self.mir.local_decls.len()
+ self.body.local_decls.len()
}
fn start_block_effect(&self, _sets: &mut BitSet<Local>) {
fn statement_effect(&self,
sets: &mut BlockSets<'_, Local>,
loc: Location) {
- let stmt = &self.mir[loc.block].statements[loc.statement_index];
+ let stmt = &self.body[loc.block].statements[loc.statement_index];
BorrowedLocalsVisitor {
sets,
fn terminator_effect(&self,
sets: &mut BlockSets<'_, Local>,
loc: Location) {
- let terminator = self.mir[loc.block].terminator();
+ let terminator = self.body[loc.block].terminator();
BorrowedLocalsVisitor {
sets,
}.visit_terminator(terminator, loc);
/// borrows in compact bitvectors.
pub struct Borrows<'a, 'gcx: 'tcx, 'tcx: 'a> {
tcx: TyCtxt<'a, 'gcx, 'tcx>,
- mir: &'a Body<'tcx>,
+ body: &'a Body<'tcx>,
borrow_set: Rc<BorrowSet<'tcx>>,
borrows_out_of_scope_at_location: FxHashMap<Location, Vec<BorrowIndex>>,
}
fn precompute_borrows_out_of_scope<'tcx>(
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
regioncx: &Rc<RegionInferenceContext<'tcx>>,
borrows_out_of_scope_at_location: &mut FxHashMap<Location, Vec<BorrowIndex>>,
borrow_index: BorrowIndex,
stack.push(StackEntry {
bb: location.block,
lo: location.statement_index,
- hi: mir[location.block].statements.len(),
+ hi: body[location.block].statements.len(),
first_part_only: false,
});
if !finished_early {
// Add successor BBs to the work list, if necessary.
- let bb_data = &mir[bb];
+ let bb_data = &body[bb];
assert!(hi == bb_data.statements.len());
for &succ_bb in bb_data.terminator.as_ref().unwrap().successors() {
visited.entry(succ_bb)
stack.push(StackEntry {
bb: succ_bb,
lo: 0,
- hi: mir[succ_bb].statements.len(),
+ hi: body[succ_bb].statements.len(),
first_part_only: false,
});
// Insert 0 for this BB, to represent the whole BB
impl<'a, 'gcx, 'tcx> Borrows<'a, 'gcx, 'tcx> {
crate fn new(
tcx: TyCtxt<'a, 'gcx, 'tcx>,
- mir: &'a Body<'tcx>,
+ body: &'a Body<'tcx>,
nonlexical_regioncx: Rc<RegionInferenceContext<'tcx>>,
borrow_set: &Rc<BorrowSet<'tcx>>,
) -> Self {
let borrow_region = borrow_data.region.to_region_vid();
let location = borrow_set.borrows[borrow_index].reserve_location;
- precompute_borrows_out_of_scope(mir, &nonlexical_regioncx,
+ precompute_borrows_out_of_scope(body, &nonlexical_regioncx,
&mut borrows_out_of_scope_at_location,
borrow_index, borrow_region, location);
}
Borrows {
tcx: tcx,
- mir: mir,
+ body: body,
borrow_set: borrow_set.clone(),
borrows_out_of_scope_at_location,
_nonlexical_regioncx: nonlexical_regioncx,
// locations.
if places_conflict::places_conflict(
self.tcx,
- self.mir,
+ self.body,
&borrow_data.borrowed_place,
place,
places_conflict::PlaceConflictBias::NoOverlap,
fn statement_effect(&self, sets: &mut BlockSets<'_, BorrowIndex>, location: Location) {
debug!("Borrows::statement_effect: sets={:?} location={:?}", sets, location);
- let block = &self.mir.basic_blocks().get(location.block).unwrap_or_else(|| {
+ let block = &self.body.basic_blocks().get(location.block).unwrap_or_else(|| {
panic!("could not find block at location {:?}", location);
});
let stmt = block.statements.get(location.statement_index).unwrap_or_else(|| {
if let mir::Rvalue::Ref(_, _, ref place) = **rhs {
if place.ignore_borrow(
self.tcx,
- self.mir,
+ self.body,
&self.borrow_set.locals_state_at_exit,
) {
return;
/// places that would require a dynamic drop-flag at that statement.
pub struct MaybeInitializedPlaces<'a, 'gcx: 'tcx, 'tcx: 'a> {
tcx: TyCtxt<'a, 'gcx, 'tcx>,
- mir: &'a Body<'tcx>,
+ body: &'a Body<'tcx>,
mdpe: &'a MoveDataParamEnv<'gcx, 'tcx>,
}
impl<'a, 'gcx: 'tcx, 'tcx> MaybeInitializedPlaces<'a, 'gcx, 'tcx> {
pub fn new(tcx: TyCtxt<'a, 'gcx, 'tcx>,
- mir: &'a Body<'tcx>,
+ body: &'a Body<'tcx>,
mdpe: &'a MoveDataParamEnv<'gcx, 'tcx>)
-> Self
{
- MaybeInitializedPlaces { tcx: tcx, mir: mir, mdpe: mdpe }
+ MaybeInitializedPlaces { tcx: tcx, body: body, mdpe: mdpe }
}
}
/// places that would require a dynamic drop-flag at that statement.
pub struct MaybeUninitializedPlaces<'a, 'gcx: 'tcx, 'tcx: 'a> {
tcx: TyCtxt<'a, 'gcx, 'tcx>,
- mir: &'a Body<'tcx>,
+ body: &'a Body<'tcx>,
mdpe: &'a MoveDataParamEnv<'gcx, 'tcx>,
}
impl<'a, 'gcx, 'tcx> MaybeUninitializedPlaces<'a, 'gcx, 'tcx> {
pub fn new(tcx: TyCtxt<'a, 'gcx, 'tcx>,
- mir: &'a Body<'tcx>,
+ body: &'a Body<'tcx>,
mdpe: &'a MoveDataParamEnv<'gcx, 'tcx>)
-> Self
{
- MaybeUninitializedPlaces { tcx: tcx, mir: mir, mdpe: mdpe }
+ MaybeUninitializedPlaces { tcx: tcx, body: body, mdpe: mdpe }
}
}
/// that would require a dynamic drop-flag at that statement.
pub struct DefinitelyInitializedPlaces<'a, 'gcx: 'tcx, 'tcx: 'a> {
tcx: TyCtxt<'a, 'gcx, 'tcx>,
- mir: &'a Body<'tcx>,
+ body: &'a Body<'tcx>,
mdpe: &'a MoveDataParamEnv<'gcx, 'tcx>,
}
impl<'a, 'gcx, 'tcx: 'a> DefinitelyInitializedPlaces<'a, 'gcx, 'tcx> {
pub fn new(tcx: TyCtxt<'a, 'gcx, 'tcx>,
- mir: &'a Body<'tcx>,
+ body: &'a Body<'tcx>,
mdpe: &'a MoveDataParamEnv<'gcx, 'tcx>)
-> Self
{
- DefinitelyInitializedPlaces { tcx: tcx, mir: mir, mdpe: mdpe }
+ DefinitelyInitializedPlaces { tcx: tcx, body: body, mdpe: mdpe }
}
}
/// ```
pub struct EverInitializedPlaces<'a, 'gcx: 'tcx, 'tcx: 'a> {
tcx: TyCtxt<'a, 'gcx, 'tcx>,
- mir: &'a Body<'tcx>,
+ body: &'a Body<'tcx>,
mdpe: &'a MoveDataParamEnv<'gcx, 'tcx>,
}
impl<'a, 'gcx: 'tcx, 'tcx: 'a> EverInitializedPlaces<'a, 'gcx, 'tcx> {
pub fn new(tcx: TyCtxt<'a, 'gcx, 'tcx>,
- mir: &'a Body<'tcx>,
+ body: &'a Body<'tcx>,
mdpe: &'a MoveDataParamEnv<'gcx, 'tcx>)
-> Self
{
- EverInitializedPlaces { tcx: tcx, mir: mir, mdpe: mdpe }
+ EverInitializedPlaces { tcx: tcx, body: body, mdpe: mdpe }
}
}
fn start_block_effect(&self, entry_set: &mut BitSet<MovePathIndex>) {
drop_flag_effects_for_function_entry(
- self.tcx, self.mir, self.mdpe,
+ self.tcx, self.body, self.mdpe,
|path, s| {
assert!(s == DropFlagState::Present);
entry_set.insert(path);
location: Location)
{
drop_flag_effects_for_location(
- self.tcx, self.mir, self.mdpe,
+ self.tcx, self.body, self.mdpe,
location,
|path, s| Self::update_bits(sets, path, s)
)
location: Location)
{
drop_flag_effects_for_location(
- self.tcx, self.mir, self.mdpe,
+ self.tcx, self.body, self.mdpe,
location,
|path, s| Self::update_bits(sets, path, s)
)
) {
// when a call returns successfully, that means we need to set
// the bits for that dest_place to 1 (initialized).
- on_lookup_result_bits(self.tcx, self.mir, self.move_data(),
+ on_lookup_result_bits(self.tcx, self.body, self.move_data(),
self.move_data().rev_lookup.find(dest_place),
|mpi| { in_out.insert(mpi); });
}
entry_set.insert_all();
drop_flag_effects_for_function_entry(
- self.tcx, self.mir, self.mdpe,
+ self.tcx, self.body, self.mdpe,
|path, s| {
assert!(s == DropFlagState::Present);
entry_set.remove(path);
location: Location)
{
drop_flag_effects_for_location(
- self.tcx, self.mir, self.mdpe,
+ self.tcx, self.body, self.mdpe,
location,
|path, s| Self::update_bits(sets, path, s)
)
location: Location)
{
drop_flag_effects_for_location(
- self.tcx, self.mir, self.mdpe,
+ self.tcx, self.body, self.mdpe,
location,
|path, s| Self::update_bits(sets, path, s)
)
) {
// when a call returns successfully, that means we need to set
// the bits for that dest_place to 0 (initialized).
- on_lookup_result_bits(self.tcx, self.mir, self.move_data(),
+ on_lookup_result_bits(self.tcx, self.body, self.move_data(),
self.move_data().rev_lookup.find(dest_place),
|mpi| { in_out.remove(mpi); });
}
entry_set.clear();
drop_flag_effects_for_function_entry(
- self.tcx, self.mir, self.mdpe,
+ self.tcx, self.body, self.mdpe,
|path, s| {
assert!(s == DropFlagState::Present);
entry_set.insert(path);
location: Location)
{
drop_flag_effects_for_location(
- self.tcx, self.mir, self.mdpe,
+ self.tcx, self.body, self.mdpe,
location,
|path, s| Self::update_bits(sets, path, s)
)
location: Location)
{
drop_flag_effects_for_location(
- self.tcx, self.mir, self.mdpe,
+ self.tcx, self.body, self.mdpe,
location,
|path, s| Self::update_bits(sets, path, s)
)
) {
// when a call returns successfully, that means we need to set
// the bits for that dest_place to 1 (initialized).
- on_lookup_result_bits(self.tcx, self.mir, self.move_data(),
+ on_lookup_result_bits(self.tcx, self.body, self.move_data(),
self.move_data().rev_lookup.find(dest_place),
|mpi| { in_out.insert(mpi); });
}
}
fn start_block_effect(&self, entry_set: &mut BitSet<InitIndex>) {
- for arg_init in 0..self.mir.arg_count {
+ for arg_init in 0..self.body.arg_count {
entry_set.insert(InitIndex::new(arg_init));
}
}
fn statement_effect(&self,
sets: &mut BlockSets<'_, InitIndex>,
location: Location) {
- let (_, mir, move_data) = (self.tcx, self.mir, self.move_data());
- let stmt = &mir[location.block].statements[location.statement_index];
+ let (_, body, move_data) = (self.tcx, self.body, self.move_data());
+ let stmt = &body[location.block].statements[location.statement_index];
let init_path_map = &move_data.init_path_map;
let init_loc_map = &move_data.init_loc_map;
let rev_lookup = &move_data.rev_lookup;
sets: &mut BlockSets<'_, InitIndex>,
location: Location)
{
- let (mir, move_data) = (self.mir, self.move_data());
- let term = mir[location.block].terminator();
+ let (body, move_data) = (self.body, self.move_data());
+ let term = body[location.block].terminator();
let init_loc_map = &move_data.init_loc_map;
debug!("terminator {:?} at loc {:?} initializes move_indexes {:?}",
term, location, &init_loc_map[location]);
let call_loc = Location {
block: call_bb,
- statement_index: self.mir[call_bb].statements.len(),
+ statement_index: self.body[call_bb].statements.len(),
};
for init_index in &init_loc_map[call_loc] {
assert!(init_index.index() < bits_per_block);
#[derive(Copy, Clone)]
pub struct MaybeStorageLive<'a, 'tcx: 'a> {
- mir: &'a Body<'tcx>,
+ body: &'a Body<'tcx>,
}
impl<'a, 'tcx: 'a> MaybeStorageLive<'a, 'tcx> {
- pub fn new(mir: &'a Body<'tcx>)
+ pub fn new(body: &'a Body<'tcx>)
-> Self {
- MaybeStorageLive { mir }
+ MaybeStorageLive { body }
}
- pub fn mir(&self) -> &Body<'tcx> {
- self.mir
+ pub fn body(&self) -> &Body<'tcx> {
+ self.body
}
}
type Idx = Local;
fn name() -> &'static str { "maybe_storage_live" }
fn bits_per_block(&self) -> usize {
- self.mir.local_decls.len()
+ self.body.local_decls.len()
}
fn start_block_effect(&self, _sets: &mut BitSet<Local>) {
fn statement_effect(&self,
sets: &mut BlockSets<'_, Local>,
loc: Location) {
- let stmt = &self.mir[loc.block].statements[loc.statement_index];
+ let stmt = &self.body[loc.block].statements[loc.statement_index];
match stmt.kind {
StatementKind::StorageLive(l) => sets.gen(l),
}
pub(crate) fn do_dataflow<'a, 'gcx, 'tcx, BD, P>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
- mir: &'a Body<'tcx>,
+ body: &'a Body<'tcx>,
def_id: DefId,
attributes: &[ast::Attribute],
dead_unwinds: &BitSet<BasicBlock>,
where BD: BitDenotation<'tcx> + InitialFlow,
P: Fn(&BD, BD::Idx) -> DebugFormatted
{
- let flow_state = DataflowAnalysis::new(mir, dead_unwinds, bd);
+ let flow_state = DataflowAnalysis::new(body, dead_unwinds, bd);
flow_state.run(tcx, def_id, attributes, p)
}
self.flow_state.operator.start_block_effect(&mut sets.on_entry);
}
- for (bb, data) in self.mir.basic_blocks().iter_enumerated() {
+ for (bb, data) in self.body.basic_blocks().iter_enumerated() {
let &mir::BasicBlockData { ref statements, ref terminator, is_cleanup: _ } = data;
let mut interim_state;
{
fn walk_cfg(&mut self, in_out: &mut BitSet<BD::Idx>) {
let mut dirty_queue: WorkQueue<mir::BasicBlock> =
- WorkQueue::with_all(self.builder.mir.basic_blocks().len());
- let mir = self.builder.mir;
+ WorkQueue::with_all(self.builder.body.basic_blocks().len());
+ let body = self.builder.body;
while let Some(bb) = dirty_queue.pop() {
- let bb_data = &mir[bb];
+ let bb_data = &body[bb];
{
let sets = self.builder.flow_state.sets.for_block(bb.index());
debug_assert!(in_out.words().len() == sets.on_entry.words().len());
fn analyze_results(&mut self, flow_uninit: &mut Self::FlowState) {
let flow = flow_uninit;
- for (bb, _) in traversal::reverse_postorder(self.mir()) {
+ for (bb, _) in traversal::reverse_postorder(self.body()) {
flow.reset_to_entry_of(bb);
self.process_basic_block(bb, flow);
}
fn process_basic_block(&mut self, bb: BasicBlock, flow_state: &mut Self::FlowState) {
let BasicBlockData { ref statements, ref terminator, is_cleanup: _ } =
- self.mir()[bb];
+ self.body()[bb];
let mut location = Location { block: bb, statement_index: 0 };
for stmt in statements.iter() {
flow_state.reconstruct_statement_effect(location);
// Delegated Hooks: Provide access to the MIR and process the flow state.
- fn mir(&self) -> &'a Body<'tcx>;
+ fn body(&self) -> &'a Body<'tcx>;
}
pub fn state_for_location<'tcx, T: BitDenotation<'tcx>>(loc: Location,
analysis: &T,
result: &DataflowResults<'tcx, T>,
- mir: &Body<'tcx>)
+ body: &Body<'tcx>)
-> BitSet<T::Idx> {
let mut on_entry = result.sets().on_entry_set_for(loc.block.index()).to_owned();
let mut kill_set = on_entry.to_hybrid();
}
// Apply the pre-statement effect of the statement we're evaluating.
- if loc.statement_index == mir[loc.block].statements.len() {
+ if loc.statement_index == body[loc.block].statements.len() {
analysis.before_terminator_effect(&mut sets, loc);
} else {
analysis.before_statement_effect(&mut sets, loc);
{
flow_state: DataflowState<'tcx, O>,
dead_unwinds: &'a BitSet<mir::BasicBlock>,
- mir: &'a Body<'tcx>,
+ body: &'a Body<'tcx>,
}
impl<'a, 'tcx: 'a, O> DataflowAnalysis<'a, 'tcx, O> where O: BitDenotation<'tcx>
DataflowResults(self.flow_state)
}
- pub fn mir(&self) -> &'a Body<'tcx> { self.mir }
+ pub fn body(&self) -> &'a Body<'tcx> { self.body }
}
pub struct DataflowResults<'tcx, O>(pub(crate) DataflowState<'tcx, O>) where O: BitDenotation<'tcx>;
impl<'a, 'tcx, D> DataflowAnalysis<'a, 'tcx, D> where D: BitDenotation<'tcx>
{
- pub fn new(mir: &'a Body<'tcx>,
+ pub fn new(body: &'a Body<'tcx>,
dead_unwinds: &'a BitSet<mir::BasicBlock>,
denotation: D) -> Self where D: InitialFlow {
let bits_per_block = denotation.bits_per_block();
- let num_blocks = mir.basic_blocks().len();
+ let num_blocks = body.basic_blocks().len();
let on_entry_sets = if D::bottom_value() {
vec![BitSet::new_filled(bits_per_block); num_blocks]
let kill_sets = gen_sets.clone();
DataflowAnalysis {
- mir,
+ body,
dead_unwinds,
flow_state: DataflowState {
sets: AllSets {
use super::IllegalMoveOriginKind::*;
struct MoveDataBuilder<'a, 'gcx: 'tcx, 'tcx: 'a> {
- mir: &'a Body<'tcx>,
+ body: &'a Body<'tcx>,
tcx: TyCtxt<'a, 'gcx, 'tcx>,
data: MoveData<'tcx>,
errors: Vec<(Place<'tcx>, MoveError<'tcx>)>,
}
impl<'a, 'gcx, 'tcx> MoveDataBuilder<'a, 'gcx, 'tcx> {
- fn new(mir: &'a Body<'tcx>, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Self {
+ fn new(body: &'a Body<'tcx>, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Self {
let mut move_paths = IndexVec::new();
let mut path_map = IndexVec::new();
let mut init_path_map = IndexVec::new();
MoveDataBuilder {
- mir,
+ body,
tcx,
errors: Vec::new(),
data: MoveData {
moves: IndexVec::new(),
- loc_map: LocationMap::new(mir),
+ loc_map: LocationMap::new(body),
rev_lookup: MovePathLookup {
- locals: mir.local_decls.indices().map(PlaceBase::Local).map(|v| {
+ locals: body.local_decls.indices().map(PlaceBase::Local).map(|v| {
Self::new_move_path(
&mut move_paths,
&mut path_map,
move_paths,
path_map,
inits: IndexVec::new(),
- init_loc_map: LocationMap::new(mir),
+ init_loc_map: LocationMap::new(body),
init_path_map,
}
}
};
for proj in place_projection {
- let mir = self.builder.mir;
+ let body = self.builder.body;
let tcx = self.builder.tcx;
- let place_ty = proj.base.ty(mir, tcx).ty;
+ let place_ty = proj.base.ty(body, tcx).ty;
match place_ty.sty {
ty::Ref(..) | ty::RawPtr(..) =>
return Err(MoveError::cannot_move_out_of(
self
) -> Result<MoveData<'tcx>, (MoveData<'tcx>, Vec<(Place<'tcx>, MoveError<'tcx>)>)> {
debug!("{}", {
- debug!("moves for {:?}:", self.mir.span);
+ debug!("moves for {:?}:", self.body.span);
for (j, mo) in self.data.moves.iter_enumerated() {
debug!(" {:?} = {:?}", j, mo);
}
- debug!("move paths for {:?}:", self.mir.span);
+ debug!("move paths for {:?}:", self.body.span);
for (j, path) in self.data.move_paths.iter_enumerated() {
debug!(" {:?} = {:?}", j, path);
}
}
pub(super) fn gather_moves<'a, 'gcx, 'tcx>(
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
tcx: TyCtxt<'a, 'gcx, 'tcx>
) -> Result<MoveData<'tcx>, (MoveData<'tcx>, Vec<(Place<'tcx>, MoveError<'tcx>)>)> {
- let mut builder = MoveDataBuilder::new(mir, tcx);
+ let mut builder = MoveDataBuilder::new(body, tcx);
builder.gather_args();
- for (bb, block) in mir.basic_blocks().iter_enumerated() {
+ for (bb, block) in body.basic_blocks().iter_enumerated() {
for (i, stmt) in block.statements.iter().enumerate() {
let source = Location { block: bb, statement_index: i };
builder.gather_statement(source, stmt);
impl<'a, 'gcx, 'tcx> MoveDataBuilder<'a, 'gcx, 'tcx> {
fn gather_args(&mut self) {
- for arg in self.mir.args_iter() {
+ for arg in self.body.args_iter() {
let path = self.data.rev_lookup.locals[arg];
let init = self.data.inits.push(Init {
Place::Projection(box Projection {
base,
elem: ProjectionElem::Field(_, _),
- }) if match base.ty(self.builder.mir, self.builder.tcx).ty.sty {
+ }) if match base.ty(self.builder.body, self.builder.tcx).ty.sty {
ty::Adt(def, _) if def.is_union() => true,
_ => false,
} => base,
}
impl<T> LocationMap<T> where T: Default + Clone {
- fn new(mir: &Body<'_>) -> Self {
+ fn new(body: &Body<'_>) -> Self {
LocationMap {
- map: mir.basic_blocks().iter().map(|block| {
+ map: body.basic_blocks().iter().map(|block| {
vec![T::default(); block.statements.len()+1]
}).collect()
}
}
impl Init {
- crate fn span<'gcx>(&self, mir: &Body<'gcx>) -> Span {
+ crate fn span<'gcx>(&self, body: &Body<'gcx>) -> Span {
match self.location {
- InitLocation::Argument(local) => mir.local_decls[local].source_info.span,
- InitLocation::Statement(location) => mir.source_info(location).span,
+ InitLocation::Argument(local) => body.local_decls[local].source_info.span,
+ InitLocation::Statement(location) => body.source_info(location).span,
}
}
}
}
impl<'a, 'gcx, 'tcx> MoveData<'tcx> {
- pub fn gather_moves(mir: &Body<'tcx>, tcx: TyCtxt<'a, 'gcx, 'tcx>)
+ pub fn gather_moves(body: &Body<'tcx>, tcx: TyCtxt<'a, 'gcx, 'tcx>)
-> Result<Self, (Self, Vec<(Place<'tcx>, MoveError<'tcx>)>)> {
- builder::gather_moves(mir, tcx)
+ builder::gather_moves(body, tcx)
}
/// For the move path `mpi`, returns the root local variable (if any) that starts the path.
let allocation = tcx.intern_const_alloc(allocation);
ConstValue::Slice { data: allocation, start: 0, end: s.len() }
},
- LitKind::Err(ref s) => {
- let s = s.as_str();
- let allocation = Allocation::from_byte_aligned_bytes(s.as_bytes());
- let allocation = tcx.intern_const_alloc(allocation);
- return Ok(tcx.mk_const(ty::Const {
- val: ConstValue::Slice{ data: allocation, start: 0, end: s.len() },
- ty: tcx.types.err,
- }));
- },
LitKind::ByteStr(ref data) => {
let id = tcx.allocate_bytes(data);
ConstValue::Scalar(Scalar::Ptr(id.into()))
}
LitKind::Bool(b) => ConstValue::Scalar(Scalar::from_bool(b)),
LitKind::Char(c) => ConstValue::Scalar(Scalar::from_char(c)),
+ LitKind::Err(_) => unreachable!(),
};
Ok(tcx.mk_const(ty::Const { val: lit, ty }))
}
) -> Result<ConstValue<'tcx>, ()> {
let num = num.as_str();
use rustc_apfloat::ieee::{Single, Double};
- use rustc_apfloat::Float;
- let (data, size) = match fty {
+ let scalar = match fty {
ast::FloatTy::F32 => {
num.parse::<f32>().map_err(|_| ())?;
let mut f = num.parse::<Single>().unwrap_or_else(|e| {
if neg {
f = -f;
}
- (f.to_bits(), 4)
+ Scalar::from_f32(f)
}
ast::FloatTy::F64 => {
num.parse::<f64>().map_err(|_| ())?;
let mut f = num.parse::<Double>().unwrap_or_else(|e| {
- panic!("apfloat::ieee::Single failed to parse `{}`: {:?}", num, e)
+ panic!("apfloat::ieee::Double failed to parse `{}`: {:?}", num, e)
});
if neg {
f = -f;
}
- (f.to_bits(), 8)
+ Scalar::from_f64(f)
}
};
- Ok(ConstValue::Scalar(Scalar::from_uint(data, Size::from_bytes(size))))
+ Ok(ConstValue::Scalar(scalar))
}
use syntax::symbol::sym;
use rustc_apfloat::ieee::{Single, Double};
+use rustc_apfloat::{Float, FloatConvert};
use rustc::mir::interpret::{
- Scalar, EvalResult, Pointer, PointerArithmetic, InterpError,
+ Scalar, InterpResult, Pointer, PointerArithmetic, InterpError,
};
use rustc::mir::CastKind;
-use rustc_apfloat::Float;
use super::{InterpretCx, Machine, PlaceTy, OpTy, Immediate};
src: OpTy<'tcx, M::PointerTag>,
kind: CastKind,
dest: PlaceTy<'tcx, M::PointerTag>,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
use rustc::mir::CastKind::*;
match kind {
Pointer(PointerCast::Unsize) => {
if self.tcx.has_attr(def_id, sym::rustc_args_required_const) {
bug!("reifying a fn ptr that requires const arguments");
}
- let instance: EvalResult<'tcx, _> = ty::Instance::resolve(
+ let instance: InterpResult<'tcx, _> = ty::Instance::resolve(
*self.tcx,
self.param_env,
def_id,
Ok(())
}
- pub(super) fn cast_scalar(
+ fn cast_scalar(
&self,
val: Scalar<M::PointerTag>,
src_layout: TyLayout<'tcx>,
dest_layout: TyLayout<'tcx>,
- ) -> EvalResult<'tcx, Scalar<M::PointerTag>> {
+ ) -> InterpResult<'tcx, Scalar<M::PointerTag>> {
use rustc::ty::TyKind::*;
trace!("Casting {:?}: {:?} to {:?}", val, src_layout.ty, dest_layout.ty);
- match val.to_bits_or_ptr(src_layout.size, self) {
- Err(ptr) => self.cast_from_ptr(ptr, dest_layout.ty),
- Ok(data) => {
- match src_layout.ty.sty {
- Float(fty) => self.cast_from_float(data, fty, dest_layout.ty),
- _ => self.cast_from_int(data, src_layout, dest_layout),
+ match src_layout.ty.sty {
+ // Floating point
+ Float(FloatTy::F32) => self.cast_from_float(val.to_f32()?, dest_layout.ty),
+ Float(FloatTy::F64) => self.cast_from_float(val.to_f64()?, dest_layout.ty),
+ // Integer(-like), including fn ptr casts and casts from enums that
+ // are represented as integers (this excludes univariant enums, which
+ // are handled in `cast` directly).
+ _ => {
+ assert!(
+ src_layout.ty.is_bool() || src_layout.ty.is_char() ||
+ src_layout.ty.is_enum() || src_layout.ty.is_integral() ||
+ src_layout.ty.is_unsafe_ptr() || src_layout.ty.is_fn_ptr() ||
+ src_layout.ty.is_region_ptr(),
+ "Unexpected cast from type {:?}", src_layout.ty
+ );
+ match val.to_bits_or_ptr(src_layout.size, self) {
+ Err(ptr) => self.cast_from_ptr(ptr, dest_layout.ty),
+ Ok(data) => self.cast_from_int(data, src_layout, dest_layout),
}
}
}
fn cast_from_int(
&self,
- v: u128,
+ v: u128, // raw bits
src_layout: TyLayout<'tcx>,
dest_layout: TyLayout<'tcx>,
- ) -> EvalResult<'tcx, Scalar<M::PointerTag>> {
+ ) -> InterpResult<'tcx, Scalar<M::PointerTag>> {
+ // Let's make sure v is sign-extended *if* it has a signed type.
let signed = src_layout.abi.is_signed();
let v = if signed {
self.sign_extend(v, src_layout)
Ok(Scalar::from_uint(v, dest_layout.size))
}
- Float(FloatTy::F32) if signed => Ok(Scalar::from_uint(
- Single::from_i128(v as i128).value.to_bits(),
- Size::from_bits(32)
+ Float(FloatTy::F32) if signed => Ok(Scalar::from_f32(
+ Single::from_i128(v as i128).value
)),
- Float(FloatTy::F64) if signed => Ok(Scalar::from_uint(
- Double::from_i128(v as i128).value.to_bits(),
- Size::from_bits(64)
+ Float(FloatTy::F64) if signed => Ok(Scalar::from_f64(
+ Double::from_i128(v as i128).value
)),
- Float(FloatTy::F32) => Ok(Scalar::from_uint(
- Single::from_u128(v).value.to_bits(),
- Size::from_bits(32)
+ Float(FloatTy::F32) => Ok(Scalar::from_f32(
+ Single::from_u128(v).value
)),
- Float(FloatTy::F64) => Ok(Scalar::from_uint(
- Double::from_u128(v).value.to_bits(),
- Size::from_bits(64)
+ Float(FloatTy::F64) => Ok(Scalar::from_f64(
+ Double::from_u128(v).value
)),
Char => {
}
}
- fn cast_from_float(
+ fn cast_from_float<F>(
&self,
- bits: u128,
- fty: FloatTy,
+ f: F,
dest_ty: Ty<'tcx>
- ) -> EvalResult<'tcx, Scalar<M::PointerTag>> {
+ ) -> InterpResult<'tcx, Scalar<M::PointerTag>>
+ where F: Float + Into<Scalar<M::PointerTag>> + FloatConvert<Single> + FloatConvert<Double>
+ {
use rustc::ty::TyKind::*;
- use rustc_apfloat::FloatConvert;
match dest_ty.sty {
// float -> uint
Uint(t) => {
let width = t.bit_width().unwrap_or_else(|| self.pointer_size().bits() as usize);
- let v = match fty {
- FloatTy::F32 => Single::from_bits(bits).to_u128(width).value,
- FloatTy::F64 => Double::from_bits(bits).to_u128(width).value,
- };
+ let v = f.to_u128(width).value;
// This should already fit the bit width
Ok(Scalar::from_uint(v, Size::from_bits(width as u64)))
},
// float -> int
Int(t) => {
let width = t.bit_width().unwrap_or_else(|| self.pointer_size().bits() as usize);
- let v = match fty {
- FloatTy::F32 => Single::from_bits(bits).to_i128(width).value,
- FloatTy::F64 => Double::from_bits(bits).to_i128(width).value,
- };
+ let v = f.to_i128(width).value;
Ok(Scalar::from_int(v, Size::from_bits(width as u64)))
},
- // f64 -> f32
- Float(FloatTy::F32) if fty == FloatTy::F64 => {
- Ok(Scalar::from_uint(
- Single::to_bits(Double::from_bits(bits).convert(&mut false).value),
- Size::from_bits(32),
- ))
- },
- // f32 -> f64
- Float(FloatTy::F64) if fty == FloatTy::F32 => {
- Ok(Scalar::from_uint(
- Double::to_bits(Single::from_bits(bits).convert(&mut false).value),
- Size::from_bits(64),
- ))
- },
- // identity cast
- Float(FloatTy:: F64) => Ok(Scalar::from_uint(bits, Size::from_bits(64))),
- Float(FloatTy:: F32) => Ok(Scalar::from_uint(bits, Size::from_bits(32))),
- _ => err!(Unimplemented(format!("float to {:?} cast", dest_ty))),
+ // float -> f32
+ Float(FloatTy::F32) =>
+ Ok(Scalar::from_f32(f.convert(&mut false).value)),
+ // float -> f64
+ Float(FloatTy::F64) =>
+ Ok(Scalar::from_f64(f.convert(&mut false).value)),
+ // That's it.
+ _ => bug!("invalid float to {:?} cast", dest_ty),
}
}
&self,
ptr: Pointer<M::PointerTag>,
ty: Ty<'tcx>
- ) -> EvalResult<'tcx, Scalar<M::PointerTag>> {
+ ) -> InterpResult<'tcx, Scalar<M::PointerTag>> {
use rustc::ty::TyKind::*;
match ty.sty {
// Casting to a reference or fn pointer is not permitted by rustc,
// The pointee types
sty: Ty<'tcx>,
dty: Ty<'tcx>,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
// A<Struct> -> A<Trait> conversion
let (src_pointee_ty, dest_pointee_ty) = self.tcx.struct_lockstep_tails(sty, dty);
&mut self,
src: OpTy<'tcx, M::PointerTag>,
dest: PlaceTy<'tcx, M::PointerTag>,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
trace!("Unsizing {:?} into {:?}", src, dest);
match (&src.layout.ty.sty, &dest.layout.ty.sty) {
(&ty::Ref(_, s, _), &ty::Ref(_, d, _)) |
use rustc::mir::interpret::{
ErrorHandled,
GlobalId, Scalar, Pointer, FrameInfo, AllocId,
- EvalResult, InterpError,
+ InterpResult, InterpError,
truncate, sign_extend,
};
use rustc_data_structures::fx::FxHashMap;
// Function and callsite information
////////////////////////////////////////////////////////////////////////////////
/// The MIR for the function called on this frame.
- pub mir: &'mir mir::Body<'tcx>,
+ pub body: &'mir mir::Body<'tcx>,
/// The def_id and substs of the current function.
pub instance: ty::Instance<'tcx>,
}
impl<'tcx, Tag: Copy + 'static> LocalState<'tcx, Tag> {
- pub fn access(&self) -> EvalResult<'tcx, Operand<Tag>> {
+ pub fn access(&self) -> InterpResult<'tcx, Operand<Tag>> {
match self.value {
LocalValue::Dead => err!(DeadLocal),
LocalValue::Uninitialized =>
/// to do so; otherwise return the `MemPlace` to consult instead.
pub fn access_mut(
&mut self,
- ) -> EvalResult<'tcx, Result<&mut LocalValue<Tag>, MemPlace<Tag>>> {
+ ) -> InterpResult<'tcx, Result<&mut LocalValue<Tag>, MemPlace<Tag>>> {
match self.value {
LocalValue::Dead => err!(DeadLocal),
LocalValue::Live(Operand::Indirect(mplace)) => Ok(Err(mplace)),
for InterpretCx<'a, 'mir, 'tcx, M>
{
type Ty = Ty<'tcx>;
- type TyLayout = EvalResult<'tcx, TyLayout<'tcx>>;
+ type TyLayout = InterpResult<'tcx, TyLayout<'tcx>>;
#[inline]
fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
}
#[inline(always)]
- pub(super) fn mir(&self) -> &'mir mir::Body<'tcx> {
- self.frame().mir
+ pub(super) fn body(&self) -> &'mir mir::Body<'tcx> {
+ self.frame().body
}
pub(super) fn subst_and_normalize_erasing_regions<T: TypeFoldable<'tcx>>(
&self,
substs: T,
- ) -> EvalResult<'tcx, T> {
+ ) -> InterpResult<'tcx, T> {
match self.stack.last() {
Some(frame) => Ok(self.tcx.subst_and_normalize_erasing_regions(
frame.instance.substs,
&self,
def_id: DefId,
substs: SubstsRef<'tcx>
- ) -> EvalResult<'tcx, ty::Instance<'tcx>> {
+ ) -> InterpResult<'tcx, ty::Instance<'tcx>> {
trace!("resolve: {:?}, {:#?}", def_id, substs);
trace!("param_env: {:#?}", self.param_env);
let substs = self.subst_and_normalize_erasing_regions(substs)?;
pub fn load_mir(
&self,
instance: ty::InstanceDef<'tcx>,
- ) -> EvalResult<'tcx, &'tcx mir::Body<'tcx>> {
+ ) -> InterpResult<'tcx, &'tcx mir::Body<'tcx>> {
// do not continue if typeck errors occurred (can only occur in local crate)
let did = instance.def_id();
if did.is_local()
pub(super) fn monomorphize<T: TypeFoldable<'tcx> + Subst<'tcx>>(
&self,
t: T,
- ) -> EvalResult<'tcx, T> {
+ ) -> InterpResult<'tcx, T> {
match self.stack.last() {
Some(frame) => Ok(self.monomorphize_with_substs(t, frame.instance.substs)),
None => if t.needs_subst() {
frame: &Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>,
local: mir::Local,
layout: Option<TyLayout<'tcx>>,
- ) -> EvalResult<'tcx, TyLayout<'tcx>> {
+ ) -> InterpResult<'tcx, TyLayout<'tcx>> {
match frame.locals[local].layout.get() {
None => {
let layout = crate::interpret::operand::from_known_layout(layout, || {
- let local_ty = frame.mir.local_decls[local].ty;
+ let local_ty = frame.body.local_decls[local].ty;
let local_ty = self.monomorphize_with_substs(local_ty, frame.instance.substs);
self.layout_of(local_ty)
})?;
&self,
metadata: Option<Scalar<M::PointerTag>>,
layout: TyLayout<'tcx>,
- ) -> EvalResult<'tcx, Option<(Size, Align)>> {
+ ) -> InterpResult<'tcx, Option<(Size, Align)>> {
if !layout.is_unsized() {
return Ok(Some((layout.size, layout.align.abi)));
}
pub fn size_and_align_of_mplace(
&self,
mplace: MPlaceTy<'tcx, M::PointerTag>
- ) -> EvalResult<'tcx, Option<(Size, Align)>> {
+ ) -> InterpResult<'tcx, Option<(Size, Align)>> {
self.size_and_align_of(mplace.meta, mplace.layout)
}
&mut self,
instance: ty::Instance<'tcx>,
span: source_map::Span,
- mir: &'mir mir::Body<'tcx>,
+ body: &'mir mir::Body<'tcx>,
return_place: Option<PlaceTy<'tcx, M::PointerTag>>,
return_to_block: StackPopCleanup,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
if self.stack.len() > 0 {
info!("PAUSING({}) {}", self.cur_frame(), self.frame().instance);
}
// first push a stack frame so we have access to the local substs
let extra = M::stack_push(self)?;
self.stack.push(Frame {
- mir,
+ body,
block: mir::START_BLOCK,
return_to_block,
return_place,
});
// don't allocate at all for trivial constants
- if mir.local_decls.len() > 1 {
+ if body.local_decls.len() > 1 {
// Locals are initially uninitialized.
let dummy = LocalState {
value: LocalValue::Uninitialized,
layout: Cell::new(None),
};
- let mut locals = IndexVec::from_elem(dummy, &mir.local_decls);
+ let mut locals = IndexVec::from_elem(dummy, &body.local_decls);
// Return place is handled specially by the `eval_place` functions, and the
// entry in `locals` should never be used. Make it dead, to be sure.
locals[mir::RETURN_PLACE].value = LocalValue::Dead;
| Some(DefKind::Const)
| Some(DefKind::AssocConst) => {},
_ => {
- trace!("push_stack_frame: {:?}: num_bbs: {}", span, mir.basic_blocks().len());
- for block in mir.basic_blocks() {
+ trace!("push_stack_frame: {:?}: num_bbs: {}", span, body.basic_blocks().len());
+ for block in body.basic_blocks() {
for stmt in block.statements.iter() {
use rustc::mir::StatementKind::{StorageDead, StorageLive};
match stmt.kind {
}
}
- pub(super) fn pop_stack_frame(&mut self) -> EvalResult<'tcx> {
+ pub(super) fn pop_stack_frame(&mut self) -> InterpResult<'tcx> {
info!("LEAVING({}) {}", self.cur_frame(), self.frame().instance);
::log_settings::settings().indentation -= 1;
let frame = self.stack.pop().expect(
pub fn storage_live(
&mut self,
local: mir::Local
- ) -> EvalResult<'tcx, LocalValue<M::PointerTag>> {
+ ) -> InterpResult<'tcx, LocalValue<M::PointerTag>> {
assert!(local != mir::RETURN_PLACE, "Cannot make return place live");
trace!("{:?} is now live", local);
pub(super) fn deallocate_local(
&mut self,
local: LocalValue<M::PointerTag>,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
// FIXME: should we tell the user that there was a local which was never written to?
if let LocalValue::Live(Operand::Indirect(MemPlace { ptr, .. })) = local {
trace!("deallocating local");
pub fn const_eval_raw(
&self,
gid: GlobalId<'tcx>,
- ) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
+ ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
let param_env = if self.tcx.is_static(gid.instance.def_id()) {
ty::ParamEnv::reveal_all()
} else {
pub fn generate_stacktrace(&self, explicit_span: Option<Span>) -> Vec<FrameInfo<'tcx>> {
let mut last_span = None;
let mut frames = Vec::new();
- for &Frame { instance, span, mir, block, stmt, .. } in self.stack().iter().rev() {
+ for &Frame { instance, span, body, block, stmt, .. } in self.stack().iter().rev() {
// make sure we don't emit frames that are duplicates of the previous
if explicit_span == Some(span) {
last_span = Some(span);
} else {
last_span = Some(span);
}
- let block = &mir.basic_blocks()[block];
+ let block = &body.basic_blocks()[block];
let source_info = if stmt < block.statements.len() {
block.statements[stmt].source_info
} else {
block.terminator().source_info
};
- let lint_root = match mir.source_scope_local_data {
+ let lint_root = match body.source_scope_local_data {
mir::ClearCrossCrate::Set(ref ivs) => Some(ivs[source_info.scope].lint_root),
mir::ClearCrossCrate::Clear => None,
};
use rustc::ty::layout::{LayoutOf, Primitive, Size};
use rustc::mir::BinOp;
use rustc::mir::interpret::{
- EvalResult, InterpError, Scalar,
+ InterpResult, InterpError, Scalar,
};
use super::{
name: &str,
bits: u128,
kind: Primitive,
-) -> EvalResult<'tcx, Scalar<Tag>> {
+) -> InterpResult<'tcx, Scalar<Tag>> {
let size = match kind {
Primitive::Int(integer, _) => integer.size(),
_ => bug!("invalid `{}` argument: {:?}", name, bits),
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx, M::PointerTag>],
dest: PlaceTy<'tcx, M::PointerTag>,
- ) -> EvalResult<'tcx, bool> {
+ ) -> InterpResult<'tcx, bool> {
let substs = instance.substs;
let intrinsic_name = &self.tcx.item_name(instance.def_id()).as_str()[..];
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx, M::PointerTag>],
dest: Option<PlaceTy<'tcx, M::PointerTag>>,
- ) -> EvalResult<'tcx, bool> {
+ ) -> InterpResult<'tcx, bool> {
let def_id = instance.def_id();
// Some fn calls are actually BinOp intrinsics
if let Some((op, oflo)) = self.tcx.is_binop_lang_item(def_id) {
use rustc::ty::{self, query::TyCtxtAt};
use super::{
- Allocation, AllocId, EvalResult, Scalar, AllocationExtra,
+ Allocation, AllocId, InterpResult, Scalar, AllocationExtra,
InterpretCx, PlaceTy, OpTy, ImmTy, MemoryKind,
};
/// Called before a basic block terminator is executed.
/// You can use this to detect endlessly running programs.
- fn before_terminator(ecx: &mut InterpretCx<'a, 'mir, 'tcx, Self>) -> EvalResult<'tcx>;
+ fn before_terminator(ecx: &mut InterpretCx<'a, 'mir, 'tcx, Self>) -> InterpResult<'tcx>;
/// Entry point to all function calls.
///
args: &[OpTy<'tcx, Self::PointerTag>],
dest: Option<PlaceTy<'tcx, Self::PointerTag>>,
ret: Option<mir::BasicBlock>,
- ) -> EvalResult<'tcx, Option<&'mir mir::Body<'tcx>>>;
+ ) -> InterpResult<'tcx, Option<&'mir mir::Body<'tcx>>>;
/// Directly process an intrinsic without pushing a stack frame.
/// If this returns successfully, the engine will take care of jumping to the next block.
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx, Self::PointerTag>],
dest: PlaceTy<'tcx, Self::PointerTag>,
- ) -> EvalResult<'tcx>;
+ ) -> InterpResult<'tcx>;
/// Called for read access to a foreign static item.
///
fn find_foreign_static(
def_id: DefId,
tcx: TyCtxtAt<'a, 'tcx, 'tcx>,
- ) -> EvalResult<'tcx, Cow<'tcx, Allocation>>;
+ ) -> InterpResult<'tcx, Cow<'tcx, Allocation>>;
/// Called for all binary operations on integer(-like) types when one operand is a pointer
/// value, and for the `Offset` operation that is inherently about pointers.
bin_op: mir::BinOp,
left: ImmTy<'tcx, Self::PointerTag>,
right: ImmTy<'tcx, Self::PointerTag>,
- ) -> EvalResult<'tcx, (Scalar<Self::PointerTag>, bool)>;
+ ) -> InterpResult<'tcx, (Scalar<Self::PointerTag>, bool)>;
/// Heap allocations via the `box` keyword.
fn box_alloc(
ecx: &mut InterpretCx<'a, 'mir, 'tcx, Self>,
dest: PlaceTy<'tcx, Self::PointerTag>,
- ) -> EvalResult<'tcx>;
+ ) -> InterpResult<'tcx>;
/// Called to initialize the "extra" state of an allocation and make the pointers
/// it contains (in relocations) tagged. The way we construct allocations is
_ecx: &mut InterpretCx<'a, 'mir, 'tcx, Self>,
_kind: mir::RetagKind,
_place: PlaceTy<'tcx, Self::PointerTag>,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
Ok(())
}
/// Called immediately before a new stack frame got pushed
fn stack_push(
ecx: &mut InterpretCx<'a, 'mir, 'tcx, Self>,
- ) -> EvalResult<'tcx, Self::FrameExtra>;
+ ) -> InterpResult<'tcx, Self::FrameExtra>;
/// Called immediately after a stack frame gets popped
fn stack_pop(
ecx: &mut InterpretCx<'a, 'mir, 'tcx, Self>,
extra: Self::FrameExtra,
- ) -> EvalResult<'tcx>;
+ ) -> InterpResult<'tcx>;
}
use super::{
Pointer, AllocId, Allocation, GlobalId, AllocationExtra,
- EvalResult, Scalar, InterpError, GlobalAlloc, PointerArithmetic,
+ InterpResult, Scalar, InterpError, GlobalAlloc, PointerArithmetic,
Machine, AllocMap, MayLeak, ErrorHandled, CheckInAllocMsg, InboundsCheck,
};
new_size: Size,
new_align: Align,
kind: MemoryKind<M::MemoryKinds>,
- ) -> EvalResult<'tcx, Pointer<M::PointerTag>> {
+ ) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
if ptr.offset.bytes() != 0 {
return err!(ReallocateNonBasePtr);
}
}
/// Deallocate a local, or do nothing if that local has been made into a static
- pub fn deallocate_local(&mut self, ptr: Pointer<M::PointerTag>) -> EvalResult<'tcx> {
+ pub fn deallocate_local(&mut self, ptr: Pointer<M::PointerTag>) -> InterpResult<'tcx> {
// The allocation might be already removed by static interning.
// This can only really happen in the CTFE instance, not in miri.
if self.alloc_map.contains_key(&ptr.alloc_id) {
ptr: Pointer<M::PointerTag>,
size_and_align: Option<(Size, Align)>,
kind: MemoryKind<M::MemoryKinds>,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
trace!("deallocating: {}", ptr.alloc_id);
if ptr.offset.bytes() != 0 {
&self,
ptr: Scalar<M::PointerTag>,
required_align: Align
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
// Check non-NULL/Undef, extract offset
let (offset, alloc_align) = match ptr.to_bits_or_ptr(self.pointer_size(), self) {
Err(ptr) => {
ptr: Pointer<M::PointerTag>,
liveness: InboundsCheck,
msg: CheckInAllocMsg,
- ) -> EvalResult<'tcx, Align> {
+ ) -> InterpResult<'tcx, Align> {
let (allocation_size, align) = self.get_size_and_align(ptr.alloc_id, liveness)?;
ptr.check_in_alloc(allocation_size, msg)?;
Ok(align)
id: AllocId,
tcx: TyCtxtAt<'a, 'tcx, 'tcx>,
memory_extra: &M::MemoryExtra,
- ) -> EvalResult<'tcx, Cow<'tcx, Allocation<M::PointerTag, M::AllocExtra>>> {
+ ) -> InterpResult<'tcx, Cow<'tcx, Allocation<M::PointerTag, M::AllocExtra>>> {
let alloc = tcx.alloc_map.lock().get(id);
let alloc = match alloc {
Some(GlobalAlloc::Memory(mem)) =>
).0)
}
- pub fn get(&self, id: AllocId) -> EvalResult<'tcx, &Allocation<M::PointerTag, M::AllocExtra>> {
+ pub fn get(
+ &self,
+ id: AllocId,
+ ) -> InterpResult<'tcx, &Allocation<M::PointerTag, M::AllocExtra>> {
// The error type of the inner closure here is somewhat funny. We have two
// ways of "erroring": An actual error, or because we got a reference from
// `get_static_alloc` that we can actually use directly without inserting anything anywhere.
- // So the error type is `EvalResult<'tcx, &Allocation<M::PointerTag>>`.
+ // So the error type is `InterpResult<'tcx, &Allocation<M::PointerTag>>`.
let a = self.alloc_map.get_or(id, || {
let alloc = Self::get_static_alloc(id, self.tcx, &self.extra).map_err(Err)?;
match alloc {
pub fn get_mut(
&mut self,
id: AllocId,
- ) -> EvalResult<'tcx, &mut Allocation<M::PointerTag, M::AllocExtra>> {
+ ) -> InterpResult<'tcx, &mut Allocation<M::PointerTag, M::AllocExtra>> {
let tcx = self.tcx;
let memory_extra = &self.extra;
let a = self.alloc_map.get_mut_or(id, || {
&self,
id: AllocId,
liveness: InboundsCheck,
- ) -> EvalResult<'static, (Size, Align)> {
+ ) -> InterpResult<'static, (Size, Align)> {
if let Ok(alloc) = self.get(id) {
return Ok((Size::from_bytes(alloc.bytes.len() as u64), alloc.align));
}
}
}
- pub fn get_fn(&self, ptr: Pointer<M::PointerTag>) -> EvalResult<'tcx, Instance<'tcx>> {
+ pub fn get_fn(&self, ptr: Pointer<M::PointerTag>) -> InterpResult<'tcx, Instance<'tcx>> {
if ptr.offset.bytes() != 0 {
return err!(InvalidFunctionPointer);
}
}
}
- pub fn mark_immutable(&mut self, id: AllocId) -> EvalResult<'tcx> {
+ pub fn mark_immutable(&mut self, id: AllocId) -> InterpResult<'tcx> {
self.get_mut(id)?.mutability = Mutability::Immutable;
Ok(())
}
&self,
ptr: Scalar<M::PointerTag>,
size: Size,
- ) -> EvalResult<'tcx, &[u8]> {
+ ) -> InterpResult<'tcx, &[u8]> {
if size.bytes() == 0 {
Ok(&[])
} else {
&mut self,
alloc_id: AllocId,
mutability: Mutability,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
trace!(
"mark_static_initialized {:?}, mutability: {:?}",
alloc_id,
dest_align: Align,
size: Size,
nonoverlapping: bool,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
self.copy_repeatedly(src, src_align, dest, dest_align, size, 1, nonoverlapping)
}
size: Size,
length: u64,
nonoverlapping: bool,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
self.check_align(src, src_align)?;
self.check_align(dest, dest_align)?;
if size.bytes() == 0 {
dest: Pointer<M::PointerTag>,
size: Size,
repeat: u64,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
// The bits have to be saved locally before writing to dest in case src and dest overlap.
assert_eq!(size.bytes() as usize as u64, size.bytes());
use rustc::mir::interpret::{
GlobalId, AllocId, CheckInAllocMsg,
ConstValue, Pointer, Scalar,
- EvalResult, InterpError, InboundsCheck,
+ InterpResult, InterpError, InboundsCheck,
sign_extend, truncate,
};
use super::{
}
#[inline]
- pub fn to_scalar(self) -> EvalResult<'tcx, Scalar<Tag>> {
+ pub fn to_scalar(self) -> InterpResult<'tcx, Scalar<Tag>> {
self.to_scalar_or_undef().not_undef()
}
#[inline]
- pub fn to_scalar_pair(self) -> EvalResult<'tcx, (Scalar<Tag>, Scalar<Tag>)> {
+ pub fn to_scalar_pair(self) -> InterpResult<'tcx, (Scalar<Tag>, Scalar<Tag>)> {
match self {
Immediate::Scalar(..) => bug!("Got a thin pointer where a scalar pair was expected"),
Immediate::ScalarPair(a, b) => Ok((a.not_undef()?, b.not_undef()?))
/// Converts the immediate into a pointer (or a pointer-sized integer).
/// Throws away the second half of a ScalarPair!
#[inline]
- pub fn to_scalar_ptr(self) -> EvalResult<'tcx, Scalar<Tag>> {
+ pub fn to_scalar_ptr(self) -> InterpResult<'tcx, Scalar<Tag>> {
match self {
Immediate::Scalar(ptr) |
Immediate::ScalarPair(ptr, _) => ptr.not_undef(),
/// Converts the value into its metadata.
/// Throws away the first half of a ScalarPair!
#[inline]
- pub fn to_meta(self) -> EvalResult<'tcx, Option<Scalar<Tag>>> {
+ pub fn to_meta(self) -> InterpResult<'tcx, Option<Scalar<Tag>>> {
Ok(match self {
Immediate::Scalar(_) => None,
Immediate::ScalarPair(_, meta) => Some(meta.not_undef()?),
}
#[inline]
- pub fn to_bits(self) -> EvalResult<'tcx, u128> {
+ pub fn to_bits(self) -> InterpResult<'tcx, u128> {
self.to_scalar()?.to_bits(self.layout.size)
}
}
#[inline(always)]
pub(super) fn from_known_layout<'tcx>(
layout: Option<TyLayout<'tcx>>,
- compute: impl FnOnce() -> EvalResult<'tcx, TyLayout<'tcx>>
-) -> EvalResult<'tcx, TyLayout<'tcx>> {
+ compute: impl FnOnce() -> InterpResult<'tcx, TyLayout<'tcx>>
+) -> InterpResult<'tcx, TyLayout<'tcx>> {
match layout {
None => compute(),
Some(layout) => {
fn try_read_immediate_from_mplace(
&self,
mplace: MPlaceTy<'tcx, M::PointerTag>,
- ) -> EvalResult<'tcx, Option<Immediate<M::PointerTag>>> {
+ ) -> InterpResult<'tcx, Option<Immediate<M::PointerTag>>> {
if mplace.layout.is_unsized() {
// Don't touch unsized
return Ok(None);
pub(crate) fn try_read_immediate(
&self,
src: OpTy<'tcx, M::PointerTag>,
- ) -> EvalResult<'tcx, Result<Immediate<M::PointerTag>, MemPlace<M::PointerTag>>> {
+ ) -> InterpResult<'tcx, Result<Immediate<M::PointerTag>, MemPlace<M::PointerTag>>> {
Ok(match src.try_as_mplace() {
Ok(mplace) => {
if let Some(val) = self.try_read_immediate_from_mplace(mplace)? {
pub fn read_immediate(
&self,
op: OpTy<'tcx, M::PointerTag>
- ) -> EvalResult<'tcx, ImmTy<'tcx, M::PointerTag>> {
+ ) -> InterpResult<'tcx, ImmTy<'tcx, M::PointerTag>> {
if let Ok(imm) = self.try_read_immediate(op)? {
Ok(ImmTy { imm, layout: op.layout })
} else {
pub fn read_scalar(
&self,
op: OpTy<'tcx, M::PointerTag>
- ) -> EvalResult<'tcx, ScalarMaybeUndef<M::PointerTag>> {
+ ) -> InterpResult<'tcx, ScalarMaybeUndef<M::PointerTag>> {
Ok(self.read_immediate(op)?.to_scalar_or_undef())
}
pub fn read_str(
&self,
mplace: MPlaceTy<'tcx, M::PointerTag>,
- ) -> EvalResult<'tcx, &str> {
+ ) -> InterpResult<'tcx, &str> {
let len = mplace.len(self)?;
let bytes = self.memory.read_bytes(mplace.ptr, Size::from_bytes(len as u64))?;
let str = ::std::str::from_utf8(bytes)
&self,
op: OpTy<'tcx, M::PointerTag>,
field: u64,
- ) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> {
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
let base = match op.try_as_mplace() {
Ok(mplace) => {
// The easy case
&self,
op: OpTy<'tcx, M::PointerTag>,
variant: VariantIdx,
- ) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> {
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
// Downcasts only change the layout
Ok(match op.try_as_mplace() {
Ok(mplace) => {
&self,
base: OpTy<'tcx, M::PointerTag>,
proj_elem: &mir::PlaceElem<'tcx>,
- ) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> {
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
use rustc::mir::ProjectionElem::*;
Ok(match *proj_elem {
Field(field, _) => self.operand_field(base, field.index() as u64)?,
frame: &super::Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>,
local: mir::Local,
layout: Option<TyLayout<'tcx>>,
- ) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> {
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
assert_ne!(local, mir::RETURN_PLACE);
let layout = self.layout_of_local(frame, local, layout)?;
let op = if layout.is_zst() {
pub fn place_to_op(
&self,
place: PlaceTy<'tcx, M::PointerTag>
- ) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> {
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
let op = match *place {
Place::Ptr(mplace) => {
Operand::Indirect(mplace)
&self,
mir_place: &mir::Place<'tcx>,
layout: Option<TyLayout<'tcx>>,
- ) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> {
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
use rustc::mir::Place;
use rustc::mir::PlaceBase;
&self,
mir_op: &mir::Operand<'tcx>,
layout: Option<TyLayout<'tcx>>,
- ) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> {
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
use rustc::mir::Operand::*;
let op = match *mir_op {
// FIXME: do some more logic on `move` to invalidate the old location
pub(super) fn eval_operands(
&self,
ops: &[mir::Operand<'tcx>],
- ) -> EvalResult<'tcx, Vec<OpTy<'tcx, M::PointerTag>>> {
+ ) -> InterpResult<'tcx, Vec<OpTy<'tcx, M::PointerTag>>> {
ops.into_iter()
.map(|op| self.eval_operand(op, None))
.collect()
&self,
val: &'tcx ty::Const<'tcx>,
layout: Option<TyLayout<'tcx>>,
- ) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> {
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
let tag_scalar = |scalar| match scalar {
Scalar::Ptr(ptr) => Scalar::Ptr(self.tag_static_base_pointer(ptr)),
Scalar::Raw { data, size } => Scalar::Raw { data, size },
pub fn read_discriminant(
&self,
rval: OpTy<'tcx, M::PointerTag>,
- ) -> EvalResult<'tcx, (u128, VariantIdx)> {
+ ) -> InterpResult<'tcx, (u128, VariantIdx)> {
trace!("read_discriminant_value {:#?}", rval.layout);
let (discr_kind, discr_index) = match rval.layout.variants {
use rustc::mir;
-use rustc::ty::{self, layout::{Size, TyLayout}};
+use rustc::ty::{self, layout::TyLayout};
use syntax::ast::FloatTy;
-use rustc_apfloat::ieee::{Double, Single};
use rustc_apfloat::Float;
-use rustc::mir::interpret::{EvalResult, Scalar};
+use rustc::mir::interpret::{InterpResult, Scalar};
use super::{InterpretCx, PlaceTy, Immediate, Machine, ImmTy};
left: ImmTy<'tcx, M::PointerTag>,
right: ImmTy<'tcx, M::PointerTag>,
dest: PlaceTy<'tcx, M::PointerTag>,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
let (val, overflowed) = self.binary_op(op, left, right)?;
let val = Immediate::ScalarPair(val.into(), Scalar::from_bool(overflowed).into());
self.write_immediate(val, dest)
left: ImmTy<'tcx, M::PointerTag>,
right: ImmTy<'tcx, M::PointerTag>,
dest: PlaceTy<'tcx, M::PointerTag>,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
let (val, _overflowed) = self.binary_op(op, left, right)?;
self.write_scalar(val, dest)
}
bin_op: mir::BinOp,
l: char,
r: char,
- ) -> EvalResult<'tcx, (Scalar<M::PointerTag>, bool)> {
+ ) -> (Scalar<M::PointerTag>, bool) {
use rustc::mir::BinOp::*;
let res = match bin_op {
Ge => l >= r,
_ => bug!("Invalid operation on char: {:?}", bin_op),
};
- return Ok((Scalar::from_bool(res), false));
+ return (Scalar::from_bool(res), false);
}
fn binary_bool_op(
bin_op: mir::BinOp,
l: bool,
r: bool,
- ) -> EvalResult<'tcx, (Scalar<M::PointerTag>, bool)> {
+ ) -> (Scalar<M::PointerTag>, bool) {
use rustc::mir::BinOp::*;
let res = match bin_op {
BitXor => l ^ r,
_ => bug!("Invalid operation on bool: {:?}", bin_op),
};
- return Ok((Scalar::from_bool(res), false));
+ return (Scalar::from_bool(res), false);
}
- fn binary_float_op(
+ fn binary_float_op<F: Float + Into<Scalar<M::PointerTag>>>(
&self,
bin_op: mir::BinOp,
- fty: FloatTy,
- // passing in raw bits
- l: u128,
- r: u128,
- ) -> EvalResult<'tcx, (Scalar<M::PointerTag>, bool)> {
+ l: F,
+ r: F,
+ ) -> (Scalar<M::PointerTag>, bool) {
use rustc::mir::BinOp::*;
- macro_rules! float_math {
- ($ty:path, $size:expr) => {{
- let l = <$ty>::from_bits(l);
- let r = <$ty>::from_bits(r);
- let bitify = |res: ::rustc_apfloat::StatusAnd<$ty>|
- Scalar::from_uint(res.value.to_bits(), Size::from_bytes($size));
- let val = match bin_op {
- Eq => Scalar::from_bool(l == r),
- Ne => Scalar::from_bool(l != r),
- Lt => Scalar::from_bool(l < r),
- Le => Scalar::from_bool(l <= r),
- Gt => Scalar::from_bool(l > r),
- Ge => Scalar::from_bool(l >= r),
- Add => bitify(l + r),
- Sub => bitify(l - r),
- Mul => bitify(l * r),
- Div => bitify(l / r),
- Rem => bitify(l % r),
- _ => bug!("invalid float op: `{:?}`", bin_op),
- };
- return Ok((val, false));
- }};
- }
- match fty {
- FloatTy::F32 => float_math!(Single, 4),
- FloatTy::F64 => float_math!(Double, 8),
- }
+ let val = match bin_op {
+ Eq => Scalar::from_bool(l == r),
+ Ne => Scalar::from_bool(l != r),
+ Lt => Scalar::from_bool(l < r),
+ Le => Scalar::from_bool(l <= r),
+ Gt => Scalar::from_bool(l > r),
+ Ge => Scalar::from_bool(l >= r),
+ Add => (l + r).value.into(),
+ Sub => (l - r).value.into(),
+ Mul => (l * r).value.into(),
+ Div => (l / r).value.into(),
+ Rem => (l % r).value.into(),
+ _ => bug!("invalid float op: `{:?}`", bin_op),
+ };
+ return (val, false);
}
fn binary_int_op(
left_layout: TyLayout<'tcx>,
r: u128,
right_layout: TyLayout<'tcx>,
- ) -> EvalResult<'tcx, (Scalar<M::PointerTag>, bool)> {
+ ) -> InterpResult<'tcx, (Scalar<M::PointerTag>, bool)> {
use rustc::mir::BinOp::*;
// Shift ops can have an RHS with a different numeric type.
bin_op: mir::BinOp,
left: ImmTy<'tcx, M::PointerTag>,
right: ImmTy<'tcx, M::PointerTag>,
- ) -> EvalResult<'tcx, (Scalar<M::PointerTag>, bool)> {
+ ) -> InterpResult<'tcx, (Scalar<M::PointerTag>, bool)> {
trace!("Running binary op {:?}: {:?} ({:?}), {:?} ({:?})",
bin_op, *left, left.layout.ty, *right, right.layout.ty);
match left.layout.ty.sty {
ty::Char => {
assert_eq!(left.layout.ty, right.layout.ty);
- let left = left.to_scalar()?.to_char()?;
- let right = right.to_scalar()?.to_char()?;
- self.binary_char_op(bin_op, left, right)
+ let left = left.to_scalar()?;
+ let right = right.to_scalar()?;
+ Ok(self.binary_char_op(bin_op, left.to_char()?, right.to_char()?))
}
ty::Bool => {
assert_eq!(left.layout.ty, right.layout.ty);
- let left = left.to_scalar()?.to_bool()?;
- let right = right.to_scalar()?.to_bool()?;
- self.binary_bool_op(bin_op, left, right)
+ let left = left.to_scalar()?;
+ let right = right.to_scalar()?;
+ Ok(self.binary_bool_op(bin_op, left.to_bool()?, right.to_bool()?))
}
ty::Float(fty) => {
assert_eq!(left.layout.ty, right.layout.ty);
- let left = left.to_bits()?;
- let right = right.to_bits()?;
- self.binary_float_op(bin_op, fty, left, right)
+ let left = left.to_scalar()?;
+ let right = right.to_scalar()?;
+ Ok(match fty {
+ FloatTy::F32 => self.binary_float_op(bin_op, left.to_f32()?, right.to_f32()?),
+ FloatTy::F64 => self.binary_float_op(bin_op, left.to_f64()?, right.to_f64()?),
+ })
}
_ => {
// Must be integer(-like) types. Don't forget about == on fn pointers.
- assert!(left.layout.ty.is_integral() || left.layout.ty.is_unsafe_ptr() ||
- left.layout.ty.is_fn());
- assert!(right.layout.ty.is_integral() || right.layout.ty.is_unsafe_ptr() ||
- right.layout.ty.is_fn());
+ assert!(
+ left.layout.ty.is_integral() ||
+ left.layout.ty.is_unsafe_ptr() || left.layout.ty.is_fn_ptr(),
+ "Unexpected LHS type {:?} for BinOp {:?}", left.layout.ty, bin_op);
+ assert!(
+ right.layout.ty.is_integral() ||
+ right.layout.ty.is_unsafe_ptr() || right.layout.ty.is_fn_ptr(),
+ "Unexpected RHS type {:?} for BinOp {:?}", right.layout.ty, bin_op);
// Handle operations that support pointer values
if left.to_scalar_ptr()?.is_ptr() ||
&self,
un_op: mir::UnOp,
val: ImmTy<'tcx, M::PointerTag>,
- ) -> EvalResult<'tcx, Scalar<M::PointerTag>> {
+ ) -> InterpResult<'tcx, Scalar<M::PointerTag>> {
use rustc::mir::UnOp::*;
let layout = val.layout;
Ok(Scalar::from_bool(res))
}
ty::Float(fty) => {
- let val = val.to_bits(layout.size)?;
let res = match (un_op, fty) {
- (Neg, FloatTy::F32) => Single::to_bits(-Single::from_bits(val)),
- (Neg, FloatTy::F64) => Double::to_bits(-Double::from_bits(val)),
+ (Neg, FloatTy::F32) => Scalar::from_f32(-val.to_f32()?),
+ (Neg, FloatTy::F64) => Scalar::from_f64(-val.to_f64()?),
_ => bug!("Invalid float op {:?}", un_op)
};
- Ok(Scalar::from_uint(res, layout.size))
+ Ok(res)
}
_ => {
assert!(layout.ty.is_integral());
use rustc::ty::TypeFoldable;
use super::{
- GlobalId, AllocId, Allocation, Scalar, EvalResult, Pointer, PointerArithmetic,
+ GlobalId, AllocId, Allocation, Scalar, InterpResult, Pointer, PointerArithmetic,
InterpretCx, Machine, AllocMap, AllocationExtra,
RawConst, Immediate, ImmTy, ScalarMaybeUndef, Operand, OpTy, MemoryKind, LocalValue
};
/// metact the ptr part of the mplace
#[inline(always)]
- pub fn to_ptr(self) -> EvalResult<'tcx, Pointer<Tag>> {
+ pub fn to_ptr(self) -> InterpResult<'tcx, Pointer<Tag>> {
// At this point, we forget about the alignment information --
// the place has been turned into a reference, and no matter where it came from,
// it now must be aligned.
offset: Size,
meta: Option<Scalar<Tag>>,
cx: &impl HasDataLayout,
- ) -> EvalResult<'tcx, Self> {
+ ) -> InterpResult<'tcx, Self> {
Ok(MemPlace {
ptr: self.ptr.ptr_offset(offset, cx)?,
align: self.align.restrict_for_offset(offset),
meta: Option<Scalar<Tag>>,
layout: TyLayout<'tcx>,
cx: &impl HasDataLayout,
- ) -> EvalResult<'tcx, Self> {
+ ) -> InterpResult<'tcx, Self> {
Ok(MPlaceTy {
mplace: self.mplace.offset(offset, meta, cx)?,
layout,
}
#[inline]
- pub(super) fn len(self, cx: &impl HasDataLayout) -> EvalResult<'tcx, u64> {
+ pub(super) fn len(self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64> {
if self.layout.is_unsized() {
// We need to consult `meta` metadata
match self.layout.ty.sty {
}
#[inline]
- pub(super) fn vtable(self) -> EvalResult<'tcx, Pointer<Tag>> {
+ pub(super) fn vtable(self) -> InterpResult<'tcx, Pointer<Tag>> {
match self.layout.ty.sty {
ty::Dynamic(..) => self.mplace.meta.unwrap().to_ptr(),
_ => bug!("vtable not supported on type {:?}", self.layout.ty),
}
#[inline]
- pub fn to_ptr(self) -> EvalResult<'tcx, Pointer<Tag>> {
+ pub fn to_ptr(self) -> InterpResult<'tcx, Pointer<Tag>> {
self.to_mem_place().to_ptr()
}
}
pub fn ref_to_mplace(
&self,
val: ImmTy<'tcx, M::PointerTag>,
- ) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
+ ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
let pointee_type = val.layout.ty.builtin_deref(true).unwrap().ty;
let layout = self.layout_of(pointee_type)?;
pub fn deref_operand(
&self,
src: OpTy<'tcx, M::PointerTag>,
- ) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
+ ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
let val = self.read_immediate(src)?;
trace!("deref to {} on {:?}", val.layout.ty, *val);
self.ref_to_mplace(val)
&self,
base: MPlaceTy<'tcx, M::PointerTag>,
field: u64,
- ) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
+ ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
// Not using the layout method because we want to compute on u64
let offset = match base.layout.fields {
layout::FieldPlacement::Arbitrary { ref offsets, .. } =>
&self,
base: MPlaceTy<'tcx, Tag>,
) ->
- EvalResult<'tcx, impl Iterator<Item=EvalResult<'tcx, MPlaceTy<'tcx, Tag>>> + 'a>
+ InterpResult<'tcx, impl Iterator<Item=InterpResult<'tcx, MPlaceTy<'tcx, Tag>>> + 'a>
{
let len = base.len(self)?; // also asserts that we have a type where this makes sense
let stride = match base.layout.fields {
base: MPlaceTy<'tcx, M::PointerTag>,
from: u64,
to: u64,
- ) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
+ ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
let len = base.len(self)?; // also asserts that we have a type where this makes sense
assert!(from <= len - to);
&self,
base: MPlaceTy<'tcx, M::PointerTag>,
variant: VariantIdx,
- ) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
+ ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
// Downcasts only change the layout
assert!(base.meta.is_none());
Ok(MPlaceTy { layout: base.layout.for_variant(self, variant), ..base })
&self,
base: MPlaceTy<'tcx, M::PointerTag>,
proj_elem: &mir::PlaceElem<'tcx>,
- ) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
+ ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
use rustc::mir::ProjectionElem::*;
Ok(match *proj_elem {
Field(field, _) => self.mplace_field(base, field.index() as u64)?,
&mut self,
base: PlaceTy<'tcx, M::PointerTag>,
field: u64,
- ) -> EvalResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
+ ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
// FIXME: We could try to be smarter and avoid allocation for fields that span the
// entire place.
let mplace = self.force_allocation(base)?;
&self,
base: PlaceTy<'tcx, M::PointerTag>,
variant: VariantIdx,
- ) -> EvalResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
+ ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
// Downcast just changes the layout
Ok(match base.place {
Place::Ptr(mplace) =>
&mut self,
base: PlaceTy<'tcx, M::PointerTag>,
proj_elem: &mir::ProjectionElem<mir::Local, Ty<'tcx>>,
- ) -> EvalResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
+ ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
use rustc::mir::ProjectionElem::*;
Ok(match *proj_elem {
Field(field, _) => self.place_field(base, field.index() as u64)?,
pub(super) fn eval_static_to_mplace(
&self,
place_static: &mir::Static<'tcx>
- ) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
+ ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
use rustc::mir::StaticKind;
Ok(match place_static.kind {
pub fn eval_place(
&mut self,
mir_place: &mir::Place<'tcx>,
- ) -> EvalResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
+ ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
use rustc::mir::PlaceBase;
mir_place.iterate(|place_base, place_projection| {
PlaceTy {
place: *return_place,
layout: self
- .layout_of(self.monomorphize(self.frame().mir.return_ty())?)?,
+ .layout_of(self.monomorphize(self.frame().body.return_ty())?)?,
}
}
None => return err!(InvalidNullPointerUsage),
&mut self,
val: impl Into<ScalarMaybeUndef<M::PointerTag>>,
dest: PlaceTy<'tcx, M::PointerTag>,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
self.write_immediate(Immediate::Scalar(val.into()), dest)
}
&mut self,
src: Immediate<M::PointerTag>,
dest: PlaceTy<'tcx, M::PointerTag>,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
self.write_immediate_no_validate(src, dest)?;
if M::enforce_validity(self) {
Ok(())
}
+ /// Write an `Immediate` to memory.
+ #[inline(always)]
+ pub fn write_immediate_to_mplace(
+ &mut self,
+ src: Immediate<M::PointerTag>,
+ dest: MPlaceTy<'tcx, M::PointerTag>,
+ ) -> InterpResult<'tcx> {
+ self.write_immediate_to_mplace_no_validate(src, dest)?;
+
+ if M::enforce_validity(self) {
+ // Data got changed, better make sure it matches the type!
+ self.validate_operand(dest.into(), vec![], None, /*const_mode*/ false)?;
+ }
+
+ Ok(())
+ }
+
/// Write an immediate to a place.
/// If you use this you are responsible for validating that things got copied at the
/// right type.
&mut self,
src: Immediate<M::PointerTag>,
dest: PlaceTy<'tcx, M::PointerTag>,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
if cfg!(debug_assertions) {
// This is a very common path, avoid some checks in release mode
assert!(!dest.layout.is_unsized(), "Cannot write unsized data");
&mut self,
value: Immediate<M::PointerTag>,
dest: MPlaceTy<'tcx, M::PointerTag>,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
let (ptr, ptr_align) = dest.to_scalar_ptr_align();
// Note that it is really important that the type here is the right one, and matches the
// type things are read at. In case `src_val` is a `ScalarPair`, we don't do any magic here
&mut self,
src: OpTy<'tcx, M::PointerTag>,
dest: PlaceTy<'tcx, M::PointerTag>,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
self.copy_op_no_validate(src, dest)?;
if M::enforce_validity(self) {
&mut self,
src: OpTy<'tcx, M::PointerTag>,
dest: PlaceTy<'tcx, M::PointerTag>,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
// We do NOT compare the types for equality, because well-typed code can
// actually "transmute" `&mut T` to `&T` in an assignment without a cast.
assert!(src.layout.details == dest.layout.details,
&mut self,
src: OpTy<'tcx, M::PointerTag>,
dest: PlaceTy<'tcx, M::PointerTag>,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
if src.layout.details == dest.layout.details {
// Fast path: Just use normal `copy_op`
return self.copy_op(src, dest);
&mut self,
place: PlaceTy<'tcx, M::PointerTag>,
meta: Option<Scalar<M::PointerTag>>,
- ) -> EvalResult<'tcx, (MPlaceTy<'tcx, M::PointerTag>, Option<Size>)> {
+ ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::PointerTag>, Option<Size>)> {
let (mplace, size) = match place.place {
Place::Local { frame, local } => {
match self.stack[frame].locals[local].access_mut()? {
pub fn force_allocation(
&mut self,
place: PlaceTy<'tcx, M::PointerTag>,
- ) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
+ ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
Ok(self.force_allocation_maybe_sized(place, None)?.0)
}
&mut self,
variant_index: VariantIdx,
dest: PlaceTy<'tcx, M::PointerTag>,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
match dest.layout.variants {
layout::Variants::Single { index } => {
assert_eq!(index, variant_index);
pub fn raw_const_to_mplace(
&self,
raw: RawConst<'tcx>,
- ) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
+ ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
// This must be an allocation in `tcx`
assert!(self.tcx.alloc_map.lock().get(raw.alloc_id).is_some());
let ptr = self.tag_static_base_pointer(Pointer::from(raw.alloc_id));
/// Turn a place with a `dyn Trait` type into a place with the actual dynamic type.
/// Also return some more information so drop doesn't have to run the same code twice.
pub(super) fn unpack_dyn_trait(&self, mplace: MPlaceTy<'tcx, M::PointerTag>)
- -> EvalResult<'tcx, (ty::Instance<'tcx>, MPlaceTy<'tcx, M::PointerTag>)> {
+ -> InterpResult<'tcx, (ty::Instance<'tcx>, MPlaceTy<'tcx, M::PointerTag>)> {
let vtable = mplace.vtable()?; // also sanity checks the type
let (instance, ty) = self.read_drop_type_from_vtable(vtable)?;
let layout = self.layout_of(ty)?;
use rustc::mir::interpret::{
AllocId, Pointer, Scalar,
Relocations, Allocation, UndefMask,
- EvalResult, InterpError,
+ InterpResult, InterpError,
};
use rustc::ty::{self, TyCtxt};
#[derive(Default)]
pub(crate) struct InfiniteLoopDetector<'a, 'mir, 'tcx: 'a + 'mir> {
- /// The set of all `EvalSnapshot` *hashes* observed by this detector.
+ /// The set of all `InterpSnapshot` *hashes* observed by this detector.
///
/// When a collision occurs in this table, we store the full snapshot in
/// `snapshots`.
hashes: FxHashSet<u64>,
- /// The set of all `EvalSnapshot`s observed by this detector.
+ /// The set of all `InterpSnapshot`s observed by this detector.
///
- /// An `EvalSnapshot` will only be fully cloned once it has caused a
+ /// An `InterpSnapshot` will only be fully cloned once it has caused a
/// collision in `hashes`. As a result, the detector must observe at least
/// *two* full cycles of an infinite loop before it triggers.
- snapshots: FxHashSet<EvalSnapshot<'a, 'mir, 'tcx>>,
+ snapshots: FxHashSet<InterpSnapshot<'a, 'mir, 'tcx>>,
}
impl<'a, 'mir, 'tcx> InfiniteLoopDetector<'a, 'mir, 'tcx>
span: Span,
memory: &Memory<'a, 'mir, 'tcx, CompileTimeInterpreter<'a, 'mir, 'tcx>>,
stack: &[Frame<'mir, 'tcx>],
- ) -> EvalResult<'tcx, ()> {
+ ) -> InterpResult<'tcx, ()> {
// Compute stack's hash before copying anything
let mut hcx = tcx.get_stable_hashing_context();
let mut hasher = StableHasher::<u64>::new();
// We need to make a full copy. NOW things that to get really expensive.
info!("snapshotting the state of the interpreter");
- if self.snapshots.insert(EvalSnapshot::new(memory, stack)) {
+ if self.snapshots.insert(InterpSnapshot::new(memory, stack)) {
// Spurious collision or first cycle
return Ok(())
}
stmt: usize,
}
-impl_stable_hash_for!(impl<'mir, 'tcx: 'mir> for struct Frame<'mir, 'tcx> {
- mir,
+impl_stable_hash_for!(impl<> for struct Frame<'mir, 'tcx> {
+ body,
instance,
span,
return_to_block,
fn snapshot(&self, ctx: &'a Ctx) -> Self::Item {
let Frame {
- mir: _,
+ body: _,
instance,
span,
return_to_block,
/// The virtual machine state during const-evaluation at a given point in time.
/// We assume the `CompileTimeInterpreter` has no interesting extra state that
/// is worth considering here.
-struct EvalSnapshot<'a, 'mir, 'tcx: 'a + 'mir> {
+struct InterpSnapshot<'a, 'mir, 'tcx: 'a + 'mir> {
memory: Memory<'a, 'mir, 'tcx, CompileTimeInterpreter<'a, 'mir, 'tcx>>,
stack: Vec<Frame<'mir, 'tcx>>,
}
-impl<'a, 'mir, 'tcx: 'a + 'mir> EvalSnapshot<'a, 'mir, 'tcx>
+impl<'a, 'mir, 'tcx: 'a + 'mir> InterpSnapshot<'a, 'mir, 'tcx>
{
fn new(
memory: &Memory<'a, 'mir, 'tcx, CompileTimeInterpreter<'a, 'mir, 'tcx>>,
stack: &[Frame<'mir, 'tcx>]
) -> Self {
- EvalSnapshot {
+ InterpSnapshot {
memory: memory.clone(),
stack: stack.into(),
}
}
-impl<'a, 'mir, 'tcx> Hash for EvalSnapshot<'a, 'mir, 'tcx>
+impl<'a, 'mir, 'tcx> Hash for InterpSnapshot<'a, 'mir, 'tcx>
{
fn hash<H: Hasher>(&self, state: &mut H) {
// Implement in terms of hash stable, so that k1 == k2 -> hash(k1) == hash(k2)
}
}
-impl_stable_hash_for!(impl<'tcx, 'b, 'mir> for struct EvalSnapshot<'b, 'mir, 'tcx> {
+impl_stable_hash_for!(impl<> for struct InterpSnapshot<'_, 'mir, 'tcx> {
// Not hashing memory: Avoid hashing memory all the time during execution
memory -> _,
stack,
});
-impl<'a, 'mir, 'tcx> Eq for EvalSnapshot<'a, 'mir, 'tcx>
+impl<'a, 'mir, 'tcx> Eq for InterpSnapshot<'a, 'mir, 'tcx>
{}
-impl<'a, 'mir, 'tcx> PartialEq for EvalSnapshot<'a, 'mir, 'tcx>
+impl<'a, 'mir, 'tcx> PartialEq for InterpSnapshot<'a, 'mir, 'tcx>
{
fn eq(&self, other: &Self) -> bool {
// FIXME: This looks to be a *ridiculously expensive* comparison operation.
use rustc::mir;
use rustc::ty::layout::LayoutOf;
-use rustc::mir::interpret::{EvalResult, Scalar, PointerArithmetic};
+use rustc::mir::interpret::{InterpResult, Scalar, PointerArithmetic};
use super::{InterpretCx, Machine};
}
impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> InterpretCx<'a, 'mir, 'tcx, M> {
- pub fn run(&mut self) -> EvalResult<'tcx> {
+ pub fn run(&mut self) -> InterpResult<'tcx> {
while self.step()? {}
Ok(())
}
/// Returns `true` as long as there are more things to do.
///
/// This is used by [priroda](https://github.com/oli-obk/priroda)
- pub fn step(&mut self) -> EvalResult<'tcx, bool> {
+ pub fn step(&mut self) -> InterpResult<'tcx, bool> {
if self.stack.is_empty() {
return Ok(false);
}
let block = self.frame().block;
let stmt_id = self.frame().stmt;
- let mir = self.mir();
- let basic_block = &mir.basic_blocks()[block];
+ let body = self.body();
+ let basic_block = &body.basic_blocks()[block];
let old_frames = self.cur_frame();
Ok(true)
}
- fn statement(&mut self, stmt: &mir::Statement<'tcx>) -> EvalResult<'tcx> {
+ fn statement(&mut self, stmt: &mir::Statement<'tcx>) -> InterpResult<'tcx> {
info!("{:?}", stmt);
use rustc::mir::StatementKind::*;
&mut self,
rvalue: &mir::Rvalue<'tcx>,
place: &mir::Place<'tcx>,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
let dest = self.eval_place(place)?;
use rustc::mir::Rvalue::*;
Ok(())
}
- fn terminator(&mut self, terminator: &mir::Terminator<'tcx>) -> EvalResult<'tcx> {
+ fn terminator(&mut self, terminator: &mir::Terminator<'tcx>) -> InterpResult<'tcx> {
info!("{:?}", terminator.kind);
self.tcx.span = terminator.source_info.span;
self.memory.tcx.span = terminator.source_info.span;
use syntax::source_map::Span;
use rustc_target::spec::abi::Abi;
-use rustc::mir::interpret::{EvalResult, PointerArithmetic, InterpError, Scalar};
+use rustc::mir::interpret::{InterpResult, PointerArithmetic, InterpError, Scalar};
use super::{
InterpretCx, Machine, Immediate, OpTy, ImmTy, PlaceTy, MPlaceTy, StackPopCleanup
};
impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> InterpretCx<'a, 'mir, 'tcx, M> {
#[inline]
- pub fn goto_block(&mut self, target: Option<mir::BasicBlock>) -> EvalResult<'tcx> {
+ pub fn goto_block(&mut self, target: Option<mir::BasicBlock>) -> InterpResult<'tcx> {
if let Some(target) = target {
self.frame_mut().block = target;
self.frame_mut().stmt = 0;
pub(super) fn eval_terminator(
&mut self,
terminator: &mir::Terminator<'tcx>,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
use rustc::mir::TerminatorKind::*;
match terminator.kind {
Return => {
rust_abi: bool,
caller_arg: &mut impl Iterator<Item=OpTy<'tcx, M::PointerTag>>,
callee_arg: PlaceTy<'tcx, M::PointerTag>,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
if rust_abi && callee_arg.layout.is_zst() {
// Nothing to do.
trace!("Skipping callee ZST");
args: &[OpTy<'tcx, M::PointerTag>],
dest: Option<PlaceTy<'tcx, M::PointerTag>>,
ret: Option<mir::BasicBlock>,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
trace!("eval_fn_call: {:#?}", instance);
match instance.def {
}
// We need MIR for this fn
- let mir = match M::find_fn(self, instance, args, dest, ret)? {
- Some(mir) => mir,
+ let body = match M::find_fn(self, instance, args, dest, ret)? {
+ Some(body) => body,
None => return Ok(()),
};
self.push_stack_frame(
instance,
span,
- mir,
+ body,
dest,
StackPopCleanup::Goto(ret),
)?;
);
trace!(
"spread_arg: {:?}, locals: {:#?}",
- mir.spread_arg,
- mir.args_iter()
+ body.spread_arg,
+ body.args_iter()
.map(|local|
(local, self.layout_of_local(self.frame(), local, None).unwrap().ty)
)
.chain((0..untuple_arg.layout.fields.count()).into_iter()
.map(|i| self.operand_field(untuple_arg, i as u64))
)
- .collect::<EvalResult<'_, Vec<OpTy<'tcx, M::PointerTag>>>>()?)
+ .collect::<InterpResult<'_, Vec<OpTy<'tcx, M::PointerTag>>>>()?)
} else {
// Plain arg passing
Cow::from(args)
// this is a single iterator (that handles `spread_arg`), then
// `pass_argument` would be the loop body. It takes care to
// not advance `caller_iter` for ZSTs.
- let mut locals_iter = mir.args_iter();
+ let mut locals_iter = body.args_iter();
while let Some(local) = locals_iter.next() {
let dest = self.eval_place(
&mir::Place::Base(mir::PlaceBase::Local(local))
)?;
- if Some(local) == mir.spread_arg {
+ if Some(local) == body.spread_arg {
// Must be a tuple
for i in 0..dest.layout.fields.count() {
let dest = self.place_field(dest, i as u64)?;
instance: ty::Instance<'tcx>,
span: Span,
target: mir::BasicBlock,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
trace!("drop_in_place: {:?},\n {:?}, {:?}", *place, place.layout.ty, instance);
// We take the address of the object. This may well be unaligned, which is fine
// for us here. However, unaligned accesses will probably make the actual drop
use rustc::ty::{self, Ty, Instance};
use rustc::ty::layout::{Size, Align, LayoutOf};
-use rustc::mir::interpret::{Scalar, Pointer, EvalResult, PointerArithmetic};
+use rustc::mir::interpret::{Scalar, Pointer, InterpResult, PointerArithmetic};
use super::{InterpretCx, InterpError, Machine, MemoryKind};
&mut self,
ty: Ty<'tcx>,
poly_trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
- ) -> EvalResult<'tcx, Pointer<M::PointerTag>> {
+ ) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
trace!("get_vtable(trait_ref={:?})", poly_trait_ref);
let (ty, poly_trait_ref) = self.tcx.erase_regions(&(ty, poly_trait_ref));
pub fn read_drop_type_from_vtable(
&self,
vtable: Pointer<M::PointerTag>,
- ) -> EvalResult<'tcx, (ty::Instance<'tcx>, Ty<'tcx>)> {
+ ) -> InterpResult<'tcx, (ty::Instance<'tcx>, Ty<'tcx>)> {
// we don't care about the pointee type, we just want a pointer
self.memory.check_align(vtable.into(), self.tcx.data_layout.pointer_align.abi)?;
let drop_fn = self.memory
pub fn read_size_and_align_from_vtable(
&self,
vtable: Pointer<M::PointerTag>,
- ) -> EvalResult<'tcx, (Size, Align)> {
+ ) -> InterpResult<'tcx, (Size, Align)> {
let pointer_size = self.pointer_size();
self.memory.check_align(vtable.into(), self.tcx.data_layout.pointer_align.abi)?;
let alloc = self.memory.get(vtable.alloc_id)?;
use rustc::ty;
use rustc_data_structures::fx::FxHashSet;
use rustc::mir::interpret::{
- Scalar, GlobalAlloc, EvalResult, InterpError, CheckInAllocMsg,
+ Scalar, GlobalAlloc, InterpResult, InterpError, CheckInAllocMsg,
};
use super::{
pub todo: Vec<(T, Vec<PathElem>)>,
}
-impl<'tcx, T: Copy + Eq + Hash> RefTracking<T> {
+impl<T: Copy + Eq + Hash> RefTracking<T> {
pub fn new(op: T) -> Self {
let mut ref_tracking = RefTracking {
seen: FxHashSet::default(),
&mut self,
new_op: OpTy<'tcx, M::PointerTag>,
elem: PathElem,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
// Remember the old state
let path_len = self.path.len();
// Perform operation
old_op: OpTy<'tcx, M::PointerTag>,
field: usize,
new_op: OpTy<'tcx, M::PointerTag>
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
let elem = self.aggregate_field_path_elem(old_op.layout, field);
self.visit_elem(new_op, elem)
}
old_op: OpTy<'tcx, M::PointerTag>,
variant_id: VariantIdx,
new_op: OpTy<'tcx, M::PointerTag>
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
let name = match old_op.layout.ty.sty {
ty::Adt(adt, _) => PathElem::Variant(adt.variants[variant_id].ident.name),
// Generators also have variants
}
#[inline]
- fn visit_value(&mut self, op: OpTy<'tcx, M::PointerTag>) -> EvalResult<'tcx>
+ fn visit_value(&mut self, op: OpTy<'tcx, M::PointerTag>) -> InterpResult<'tcx>
{
trace!("visit_value: {:?}, {:?}", *op, op.layout);
// Translate some possible errors to something nicer.
}
}
- fn visit_primitive(&mut self, value: OpTy<'tcx, M::PointerTag>) -> EvalResult<'tcx>
+ fn visit_primitive(&mut self, value: OpTy<'tcx, M::PointerTag>) -> InterpResult<'tcx>
{
let value = self.ecx.read_immediate(value)?;
// Go over all the primitive types
Ok(())
}
- fn visit_uninhabited(&mut self) -> EvalResult<'tcx>
+ fn visit_uninhabited(&mut self) -> InterpResult<'tcx>
{
validation_failure!("a value of an uninhabited type", self.path)
}
&mut self,
op: OpTy<'tcx, M::PointerTag>,
layout: &layout::Scalar,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
let value = self.ecx.read_scalar(op)?;
// Determine the allowed range
let (lo, hi) = layout.valid_range.clone().into_inner();
fn visit_aggregate(
&mut self,
op: OpTy<'tcx, M::PointerTag>,
- fields: impl Iterator<Item=EvalResult<'tcx, Self::V>>,
- ) -> EvalResult<'tcx> {
+ fields: impl Iterator<Item=InterpResult<'tcx, Self::V>>,
+ ) -> InterpResult<'tcx> {
match op.layout.ty.sty {
ty::Str => {
let mplace = op.to_mem_place(); // strings are never immediate
path: Vec<PathElem>,
ref_tracking: Option<&mut RefTracking<MPlaceTy<'tcx, M::PointerTag>>>,
const_mode: bool,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
trace!("validate_operand: {:?}, {:?}", *op, op.layout.ty);
// Construct a visitor
use rustc::ty::layout::{self, TyLayout, VariantIdx};
use rustc::ty;
use rustc::mir::interpret::{
- EvalResult,
+ InterpResult,
};
use super::{
fn to_op(
self,
ecx: &InterpretCx<'a, 'mir, 'tcx, M>,
- ) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>>;
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>>;
/// Creates this from an `MPlaceTy`.
fn from_mem_place(mplace: MPlaceTy<'tcx, M::PointerTag>) -> Self;
self,
ecx: &InterpretCx<'a, 'mir, 'tcx, M>,
variant: VariantIdx,
- ) -> EvalResult<'tcx, Self>;
+ ) -> InterpResult<'tcx, Self>;
/// Projects to the n-th field.
fn project_field(
self,
ecx: &InterpretCx<'a, 'mir, 'tcx, M>,
field: u64,
- ) -> EvalResult<'tcx, Self>;
+ ) -> InterpResult<'tcx, Self>;
}
// Operands and memory-places are both values.
fn to_op(
self,
_ecx: &InterpretCx<'a, 'mir, 'tcx, M>,
- ) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> {
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
Ok(self)
}
self,
ecx: &InterpretCx<'a, 'mir, 'tcx, M>,
variant: VariantIdx,
- ) -> EvalResult<'tcx, Self> {
+ ) -> InterpResult<'tcx, Self> {
ecx.operand_downcast(self, variant)
}
self,
ecx: &InterpretCx<'a, 'mir, 'tcx, M>,
field: u64,
- ) -> EvalResult<'tcx, Self> {
+ ) -> InterpResult<'tcx, Self> {
ecx.operand_field(self, field)
}
}
fn to_op(
self,
_ecx: &InterpretCx<'a, 'mir, 'tcx, M>,
- ) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> {
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
Ok(self.into())
}
self,
ecx: &InterpretCx<'a, 'mir, 'tcx, M>,
variant: VariantIdx,
- ) -> EvalResult<'tcx, Self> {
+ ) -> InterpResult<'tcx, Self> {
ecx.mplace_downcast(self, variant)
}
self,
ecx: &InterpretCx<'a, 'mir, 'tcx, M>,
field: u64,
- ) -> EvalResult<'tcx, Self> {
+ ) -> InterpResult<'tcx, Self> {
ecx.mplace_field(self, field)
}
}
// Recursive actions, ready to be overloaded.
/// Visits the given value, dispatching as appropriate to more specialized visitors.
#[inline(always)]
- fn visit_value(&mut self, v: Self::V) -> EvalResult<'tcx>
+ fn visit_value(&mut self, v: Self::V) -> InterpResult<'tcx>
{
self.walk_value(v)
}
/// Visits the given value as a union. No automatic recursion can happen here.
#[inline(always)]
- fn visit_union(&mut self, _v: Self::V) -> EvalResult<'tcx>
+ fn visit_union(&mut self, _v: Self::V) -> InterpResult<'tcx>
{
Ok(())
}
/// Visits this value as an aggregate, you are getting an iterator yielding
- /// all the fields (still in an `EvalResult`, you have to do error handling yourself).
+ /// all the fields (still in an `InterpResult`, you have to do error handling yourself).
/// Recurses into the fields.
#[inline(always)]
fn visit_aggregate(
&mut self,
v: Self::V,
- fields: impl Iterator<Item=EvalResult<'tcx, Self::V>>,
- ) -> EvalResult<'tcx> {
+ fields: impl Iterator<Item=InterpResult<'tcx, Self::V>>,
+ ) -> InterpResult<'tcx> {
self.walk_aggregate(v, fields)
}
_old_val: Self::V,
_field: usize,
new_val: Self::V,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
self.visit_value(new_val)
}
_old_val: Self::V,
_variant: VariantIdx,
new_val: Self::V,
- ) -> EvalResult<'tcx> {
+ ) -> InterpResult<'tcx> {
self.visit_value(new_val)
}
/// it is meant to provide the chance for additional checks when a value of uninhabited
/// layout is detected.
#[inline(always)]
- fn visit_uninhabited(&mut self) -> EvalResult<'tcx>
+ fn visit_uninhabited(&mut self) -> InterpResult<'tcx>
{ Ok(()) }
/// Called whenever we reach a value with scalar layout.
/// We do NOT provide a `ScalarMaybeUndef` here to avoid accessing memory if the
/// it is meant to provide the chance for additional checks when a value of scalar
/// layout is detected.
#[inline(always)]
- fn visit_scalar(&mut self, _v: Self::V, _layout: &layout::Scalar) -> EvalResult<'tcx>
+ fn visit_scalar(&mut self, _v: Self::V, _layout: &layout::Scalar) -> InterpResult<'tcx>
{ Ok(()) }
/// Called whenever we reach a value of primitive type. There can be no recursion
/// We do *not* provide an `ImmTy` here because some implementations might want
/// to write to the place this primitive lives in.
#[inline(always)]
- fn visit_primitive(&mut self, _v: Self::V) -> EvalResult<'tcx>
+ fn visit_primitive(&mut self, _v: Self::V) -> InterpResult<'tcx>
{ Ok(()) }
// Default recursors. Not meant to be overloaded.
fn walk_aggregate(
&mut self,
v: Self::V,
- fields: impl Iterator<Item=EvalResult<'tcx, Self::V>>,
- ) -> EvalResult<'tcx> {
+ fields: impl Iterator<Item=InterpResult<'tcx, Self::V>>,
+ ) -> InterpResult<'tcx> {
// Now iterate over it.
for (idx, field_val) in fields.enumerate() {
self.visit_field(v, idx, field_val?)?;
}
Ok(())
}
- fn walk_value(&mut self, v: Self::V) -> EvalResult<'tcx>
+ fn walk_value(&mut self, v: Self::V) -> InterpResult<'tcx>
{
trace!("walk_value: type: {}", v.layout().ty);
// If this is a multi-variant layout, we have to find the right one and proceed with
layout::FieldPlacement::Arbitrary { ref offsets, .. } => {
// FIXME: We collect in a vec because otherwise there are lifetime
// errors: Projecting to a field needs access to `ecx`.
- let fields: Vec<EvalResult<'tcx, Self::V>> =
+ let fields: Vec<InterpResult<'tcx, Self::V>> =
(0..offsets.len()).map(|i| {
v.project_field(self.ecx(), i as u64)
})
#![feature(unicode_internals)]
#![feature(step_trait)]
#![feature(slice_concat_ext)]
+#![feature(trusted_len)]
#![feature(try_blocks)]
#![recursion_limit="256"]
#![deny(rust_2018_idioms)]
#![deny(internal)]
+#![deny(unused_lifetimes)]
#![allow(explicit_outlives_requirements)]
#[macro_use] extern crate log;
use rustc::ty::subst::InternalSubsts;
pub fn check(tcx: TyCtxt<'a, 'tcx, 'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
def_id: DefId) {
let hir_id = tcx.hir().as_local_hir_id(def_id).unwrap();
if let Some(fn_like_node) = FnLikeNode::from_node(tcx.hir().get_by_hir_id(hir_id)) {
- check_fn_for_unconditional_recursion(tcx, fn_like_node.kind(), mir, def_id);
+ check_fn_for_unconditional_recursion(tcx, fn_like_node.kind(), body, def_id);
}
}
fn check_fn_for_unconditional_recursion(tcx: TyCtxt<'a, 'tcx, 'tcx>,
fn_kind: FnKind<'_>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
def_id: DefId) {
if let FnKind::Closure(_) = fn_kind {
// closures can't recur, so they don't matter.
// to have behaviour like the above, rather than
// e.g., accidentally recursing after an assert.
- let basic_blocks = mir.basic_blocks();
+ let basic_blocks = body.basic_blocks();
let mut reachable_without_self_call_queue = vec![mir::START_BLOCK];
let mut reached_exit_without_self_call = false;
let mut self_call_locations = vec![];
if let Some(ref terminator) = block.terminator {
match terminator.kind {
TerminatorKind::Call { ref func, .. } => {
- let func_ty = func.ty(mir, tcx);
+ let func_ty = func.ty(body, tcx);
if let ty::FnDef(fn_def_id, substs) = func_ty.sty {
let (call_fn_id, call_substs) =
use rustc::ty::print::obsolete::DefPathBasedNames;
use rustc::ty::adjustment::{CustomCoerceUnsized, PointerCast};
use rustc::session::config::EntryFnType;
-use rustc::mir::{self, Location, Place, PlaceBase, Promoted, Static, StaticKind};
+use rustc::mir::{self, Location, PlaceBase, Promoted, Static, StaticKind};
use rustc::mir::visit::Visitor as MirVisitor;
use rustc::mir::mono::{MonoItem, InstantiationMode};
use rustc::mir::interpret::{Scalar, GlobalId, GlobalAlloc, ErrorHandled};
struct MirNeighborCollector<'a, 'tcx: 'a> {
tcx: TyCtxt<'a, 'tcx, 'tcx>,
- mir: &'a mir::Body<'tcx>,
+ body: &'a mir::Body<'tcx>,
output: &'a mut Vec<MonoItem<'tcx>>,
param_substs: SubstsRef<'tcx>,
}
ty::ParamEnv::reveal_all(),
&target_ty,
);
- let source_ty = operand.ty(self.mir, self.tcx);
+ let source_ty = operand.ty(self.body, self.tcx);
let source_ty = self.tcx.subst_and_normalize_erasing_regions(
self.param_substs,
ty::ParamEnv::reveal_all(),
mir::Rvalue::Cast(
mir::CastKind::Pointer(PointerCast::ReifyFnPointer), ref operand, _
) => {
- let fn_ty = operand.ty(self.mir, self.tcx);
+ let fn_ty = operand.ty(self.body, self.tcx);
let fn_ty = self.tcx.subst_and_normalize_erasing_regions(
self.param_substs,
ty::ParamEnv::reveal_all(),
mir::Rvalue::Cast(
mir::CastKind::Pointer(PointerCast::ClosureFnPointer(_)), ref operand, _
) => {
- let source_ty = operand.ty(self.mir, self.tcx);
+ let source_ty = operand.ty(self.body, self.tcx);
let source_ty = self.tcx.subst_and_normalize_erasing_regions(
self.param_substs,
ty::ParamEnv::reveal_all(),
let tcx = self.tcx;
match *kind {
mir::TerminatorKind::Call { ref func, .. } => {
- let callee_ty = func.ty(self.mir, tcx);
+ let callee_ty = func.ty(self.body, tcx);
let callee_ty = tcx.subst_and_normalize_erasing_regions(
self.param_substs,
ty::ParamEnv::reveal_all(),
}
mir::TerminatorKind::Drop { ref location, .. } |
mir::TerminatorKind::DropAndReplace { ref location, .. } => {
- let ty = location.ty(self.mir, self.tcx).ty;
+ let ty = location.ty(self.body, self.tcx).ty;
let ty = tcx.subst_and_normalize_erasing_regions(
self.param_substs,
ty::ParamEnv::reveal_all(),
self.super_terminator_kind(kind, location);
}
- fn visit_place(&mut self,
- place: &mir::Place<'tcx>,
- context: mir::visit::PlaceContext,
- location: Location) {
- match place {
- Place::Base(
- PlaceBase::Static(box Static{ kind:StaticKind::Static(def_id), .. })
- ) => {
+ fn visit_place_base(&mut self,
+ place_base: &mir::PlaceBase<'tcx>,
+ _context: mir::visit::PlaceContext,
+ location: Location) {
+ match place_base {
+ PlaceBase::Static(box Static { kind: StaticKind::Static(def_id), .. }) => {
debug!("visiting static {:?} @ {:?}", def_id, location);
let tcx = self.tcx;
self.output.push(MonoItem::Static(*def_id));
}
}
- _ => {}
+ PlaceBase::Static(box Static { kind: StaticKind::Promoted(_), .. }) => {
+ // FIXME: should we handle promoteds here instead of eagerly in collect_neighbours?
+ }
+ PlaceBase::Local(_) => {
+ // Locals have no relevance for collector
+ }
}
-
- self.super_place(place, context, location);
}
}
}
}
-fn create_fn_mono_item<'a, 'tcx>(instance: Instance<'tcx>) -> MonoItem<'tcx> {
+fn create_fn_mono_item<'tcx>(instance: Instance<'tcx>) -> MonoItem<'tcx> {
debug!("create_fn_mono_item(instance={})", instance);
MonoItem::Fn(instance)
}
instance: Instance<'tcx>,
output: &mut Vec<MonoItem<'tcx>>)
{
- let mir = tcx.instance_mir(instance.def);
+ let body = tcx.instance_mir(instance.def);
MirNeighborCollector {
tcx,
- mir: &mir,
+ body: &body,
output,
param_substs: instance.substs,
- }.visit_body(&mir);
+ }.visit_body(&body);
let param_env = ty::ParamEnv::reveal_all();
- for i in 0..mir.promoted.len() {
+ for i in 0..body.promoted.len() {
use rustc_data_structures::indexed_vec::Idx;
let i = Promoted::new(i);
let cid = GlobalId {
Ok(val) => collect_const(tcx, val, instance.substs, output),
Err(ErrorHandled::Reported) => {},
Err(ErrorHandled::TooGeneric) => span_bug!(
- mir.promoted[i].span, "collection encountered polymorphic constant",
+ body.promoted[i].span, "collection encountered polymorphic constant",
),
}
}
use rustc::hir;
use rustc::hir::def_id::DefId;
-use rustc::infer;
use rustc::mir::*;
use rustc::ty::{self, Ty, TyCtxt};
use rustc::ty::layout::VariantIdx;
};
use crate::util::elaborate_drops::{self, DropElaborator, DropStyle, DropFlagMode};
use crate::util::patch::MirPatch;
+use crate::util::expand_aggregate;
pub fn provide(providers: &mut Providers<'_>) {
providers.mir_shims = make_shim;
// Check if this is a generator, if so, return the drop glue for it
if let Some(&ty::TyS { sty: ty::Generator(gen_def_id, substs, _), .. }) = ty {
- let mir = &**tcx.optimized_mir(gen_def_id).generator_drop.as_ref().unwrap();
- return mir.subst(tcx, substs.substs);
+ let body = &**tcx.optimized_mir(gen_def_id).generator_drop.as_ref().unwrap();
+ return body.subst(tcx, substs.substs);
}
let substs = if let Some(ty) = ty {
block(&mut blocks, TerminatorKind::Goto { target: return_block });
block(&mut blocks, TerminatorKind::Return);
- let mut mir = Body::new(
+ let mut body = Body::new(
blocks,
IndexVec::from_elem_n(
SourceScopeData { span: span, parent_scope: None }, 1
let dropee_ptr = Place::Base(PlaceBase::Local(Local::new(1+0)));
if tcx.sess.opts.debugging_opts.mir_emit_retag {
// Function arguments should be retagged, and we make this one raw.
- mir.basic_blocks_mut()[START_BLOCK].statements.insert(0, Statement {
+ body.basic_blocks_mut()[START_BLOCK].statements.insert(0, Statement {
source_info,
kind: StatementKind::Retag(RetagKind::Raw, dropee_ptr.clone()),
});
let patch = {
let param_env = tcx.param_env(def_id).with_reveal_all();
let mut elaborator = DropShimElaborator {
- mir: &mir,
- patch: MirPatch::new(&mir),
+ body: &body,
+ patch: MirPatch::new(&body),
tcx,
param_env
};
);
elaborator.patch
};
- patch.apply(&mut mir);
+ patch.apply(&mut body);
}
- mir
+ body
}
pub struct DropShimElaborator<'a, 'tcx: 'a> {
- pub mir: &'a Body<'tcx>,
+ pub body: &'a Body<'tcx>,
pub patch: MirPatch<'tcx>,
pub tcx: TyCtxt<'a, 'tcx, 'tcx>,
pub param_env: ty::ParamEnv<'tcx>,
type Path = ();
fn patch(&mut self) -> &mut MirPatch<'tcx> { &mut self.patch }
- fn mir(&self) -> &'a Body<'tcx> { self.mir }
+ fn body(&self) -> &'a Body<'tcx> { self.body }
fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx> { self.tcx }
fn param_env(&self) -> ty::ParamEnv<'tcx> { self.param_env }
block(&mut blocks, vec![], TerminatorKind::Resume, true);
}
- let mut mir = Body::new(
+ let mut body = Body::new(
blocks,
IndexVec::from_elem_n(
SourceScopeData { span: span, parent_scope: None }, 1
vec![],
);
if let Abi::RustCall = sig.abi {
- mir.spread_arg = Some(Local::new(sig.inputs().len()));
+ body.spread_arg = Some(Local::new(sig.inputs().len()));
}
- mir
+ body
}
-pub fn build_adt_ctor<'a, 'gcx, 'tcx>(infcx: &infer::InferCtxt<'a, 'gcx, 'tcx>,
- ctor_id: hir::HirId,
- fields: &[hir::StructField],
- span: Span)
- -> Body<'tcx>
-{
- let tcx = infcx.tcx;
- let gcx = tcx.global_tcx();
- let def_id = tcx.hir().local_def_id_from_hir_id(ctor_id);
- let param_env = gcx.param_env(def_id);
+pub fn build_adt_ctor<'gcx>(tcx: TyCtxt<'_, 'gcx, 'gcx>, ctor_id: DefId) -> &'gcx Body<'gcx> {
+ debug_assert!(tcx.is_constructor(ctor_id));
+
+ let span = tcx.hir().span_if_local(ctor_id)
+ .unwrap_or_else(|| bug!("no span for ctor {:?}", ctor_id));
+
+ let param_env = tcx.param_env(ctor_id);
// Normalize the sig.
- let sig = gcx.fn_sig(def_id)
+ let sig = tcx.fn_sig(ctor_id)
.no_bound_vars()
.expect("LBR in ADT constructor signature");
- let sig = gcx.normalize_erasing_regions(param_env, sig);
+ let sig = tcx.normalize_erasing_regions(param_env, sig);
let (adt_def, substs) = match sig.output().sty {
ty::Adt(adt_def, substs) => (adt_def, substs),
_ => bug!("unexpected type for ADT ctor {:?}", sig.output())
};
- debug!("build_ctor: def_id={:?} sig={:?} fields={:?}", def_id, sig, fields);
+ debug!("build_ctor: ctor_id={:?} sig={:?}", ctor_id, sig);
let local_decls = local_decls_for_sig(&sig, span);
scope: OUTERMOST_SOURCE_SCOPE
};
- let variant_no = if adt_def.is_enum() {
- adt_def.variant_index_with_ctor_id(def_id)
+ let variant_index = if adt_def.is_enum() {
+ adt_def.variant_index_with_ctor_id(ctor_id)
} else {
VariantIdx::new(0)
};
- // return = ADT(arg0, arg1, ...); return
+ // Generate the following MIR:
+ //
+ // (return as Variant).field0 = arg0;
+ // (return as Variant).field1 = arg1;
+ //
+ // return;
+ debug!("build_ctor: variant_index={:?}", variant_index);
+
+ let statements = expand_aggregate(
+ Place::RETURN_PLACE,
+ adt_def
+ .variants[variant_index]
+ .fields
+ .iter()
+ .enumerate()
+ .map(|(idx, field_def)| (
+ Operand::Move(Place::Base(PlaceBase::Local(Local::new(idx + 1)))),
+ field_def.ty(tcx, substs),
+ )),
+ AggregateKind::Adt(adt_def, variant_index, substs, None, None),
+ source_info,
+ ).collect();
+
let start_block = BasicBlockData {
- statements: vec![Statement {
- source_info,
- kind: StatementKind::Assign(
- Place::RETURN_PLACE,
- box Rvalue::Aggregate(
- box AggregateKind::Adt(adt_def, variant_no, substs, None, None),
- (1..sig.inputs().len()+1).map(|i| {
- Operand::Move(Place::Base(PlaceBase::Local(Local::new(i))))
- }).collect()
- )
- )
- }],
+ statements,
terminator: Some(Terminator {
source_info,
kind: TerminatorKind::Return,
is_cleanup: false
};
- Body::new(
+ let body = Body::new(
IndexVec::from_elem_n(start_block, 1),
IndexVec::from_elem_n(
SourceScopeData { span: span, parent_scope: None }, 1
vec![],
span,
vec![],
- )
+ );
+
+ crate::util::dump_mir(
+ tcx,
+ None,
+ "mir_map",
+ &0,
+ crate::transform::MirSource::item(ctor_id),
+ &body,
+ |_, _| Ok(()),
+ );
+
+ tcx.arena.alloc(body)
}
fn run_pass<'a, 'tcx>(&self,
_tcx: TyCtxt<'a, 'tcx, 'tcx>,
_src: MirSource<'tcx>,
- mir: &mut Body<'tcx>) {
- self.add_call_guards(mir);
+ body: &mut Body<'tcx>) {
+ self.add_call_guards(body);
}
}
impl AddCallGuards {
- pub fn add_call_guards(&self, mir: &mut Body<'_>) {
+ pub fn add_call_guards(&self, body: &mut Body<'_>) {
let pred_count: IndexVec<_, _> =
- mir.predecessors().iter().map(|ps| ps.len()).collect();
+ body.predecessors().iter().map(|ps| ps.len()).collect();
// We need a place to store the new blocks generated
let mut new_blocks = Vec::new();
- let cur_len = mir.basic_blocks().len();
+ let cur_len = body.basic_blocks().len();
- for block in mir.basic_blocks_mut() {
+ for block in body.basic_blocks_mut() {
match block.terminator {
Some(Terminator {
kind: TerminatorKind::Call {
debug!("Broke {} N edges", new_blocks.len());
- mir.basic_blocks_mut().extend(new_blocks);
+ body.basic_blocks_mut().extend(new_blocks);
}
}
fn run_pass<'a, 'tcx>(&self,
tcx: TyCtxt<'a, 'tcx, 'tcx>,
src: MirSource<'tcx>,
- mir: &mut Body<'tcx>)
+ body: &mut Body<'tcx>)
{
- debug!("add_moves_for_packed_drops({:?} @ {:?})", src, mir.span);
- add_moves_for_packed_drops(tcx, mir, src.def_id());
+ debug!("add_moves_for_packed_drops({:?} @ {:?})", src, body.span);
+ add_moves_for_packed_drops(tcx, body, src.def_id());
}
}
pub fn add_moves_for_packed_drops<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
- mir: &mut Body<'tcx>,
+ body: &mut Body<'tcx>,
def_id: DefId)
{
- let patch = add_moves_for_packed_drops_patch(tcx, mir, def_id);
- patch.apply(mir);
+ let patch = add_moves_for_packed_drops_patch(tcx, body, def_id);
+ patch.apply(body);
}
fn add_moves_for_packed_drops_patch<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
def_id: DefId)
-> MirPatch<'tcx>
{
- let mut patch = MirPatch::new(mir);
+ let mut patch = MirPatch::new(body);
let param_env = tcx.param_env(def_id);
- for (bb, data) in mir.basic_blocks().iter_enumerated() {
+ for (bb, data) in body.basic_blocks().iter_enumerated() {
let loc = Location { block: bb, statement_index: data.statements.len() };
let terminator = data.terminator();
match terminator.kind {
TerminatorKind::Drop { ref location, .. }
- if util::is_disaligned(tcx, mir, param_env, location) =>
+ if util::is_disaligned(tcx, body, param_env, location) =>
{
- add_move_for_packed_drop(tcx, mir, &mut patch, terminator,
+ add_move_for_packed_drop(tcx, body, &mut patch, terminator,
loc, data.is_cleanup);
}
TerminatorKind::DropAndReplace { .. } => {
fn add_move_for_packed_drop<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
patch: &mut MirPatch<'tcx>,
terminator: &Terminator<'tcx>,
loc: Location,
};
let source_info = terminator.source_info;
- let ty = location.ty(mir, tcx).ty;
+ let ty = location.ty(body, tcx).ty;
let temp = patch.new_temp(ty, terminator.source_info.span);
let storage_dead_block = patch.new_block(BasicBlockData {
fn run_pass<'a, 'tcx>(&self,
tcx: TyCtxt<'a, 'tcx, 'tcx>,
_src: MirSource<'tcx>,
- mir: &mut Body<'tcx>)
+ body: &mut Body<'tcx>)
{
if !tcx.sess.opts.debugging_opts.mir_emit_retag {
return;
}
- let (span, arg_count) = (mir.span, mir.arg_count);
- let (basic_blocks, local_decls) = mir.basic_blocks_and_local_decls_mut();
+ let (span, arg_count) = (body.span, body.arg_count);
+ let (basic_blocks, local_decls) = body.basic_blocks_and_local_decls_mut();
let needs_retag = |place: &Place<'tcx>| {
// FIXME: Instead of giving up for unstable places, we should introduce
// a temporary and retag on that.
use crate::util;
pub struct UnsafetyChecker<'a, 'tcx: 'a> {
- mir: &'a Body<'tcx>,
+ body: &'a Body<'tcx>,
const_context: bool,
min_const_fn: bool,
source_scope_local_data: &'a IndexVec<SourceScope, SourceScopeLocalData>,
inherited_blocks: Vec<(hir::HirId, bool)>,
}
-impl<'a, 'gcx, 'tcx> UnsafetyChecker<'a, 'tcx> {
+impl<'a, 'tcx> UnsafetyChecker<'a, 'tcx> {
fn new(
const_context: bool,
min_const_fn: bool,
- mir: &'a Body<'tcx>,
+ body: &'a Body<'tcx>,
source_scope_local_data: &'a IndexVec<SourceScope, SourceScopeLocalData>,
tcx: TyCtxt<'a, 'tcx, 'tcx>,
param_env: ty::ParamEnv<'tcx>,
assert!(const_context);
}
Self {
- mir,
+ body,
const_context,
min_const_fn,
source_scope_local_data,
violations: vec![],
source_info: SourceInfo {
- span: mir.span,
+ span: body.span,
scope: OUTERMOST_SOURCE_SCOPE
},
tcx,
}
TerminatorKind::Call { ref func, .. } => {
- let func_ty = func.ty(self.mir, self.tcx);
+ let func_ty = func.ty(self.body, self.tcx);
let sig = func_ty.fn_sig(self.tcx);
if let hir::Unsafety::Unsafe = sig.unsafety() {
self.require_unsafe("call to unsafe function",
// pointers during const evaluation have no integral address, only an abstract one
Rvalue::Cast(CastKind::Misc, ref operand, cast_ty)
if self.const_context && self.tcx.features().const_raw_ptr_to_usize_cast => {
- let operand_ty = operand.ty(self.mir, self.tcx);
+ let operand_ty = operand.ty(self.body, self.tcx);
let cast_in = CastTy::from_ty(operand_ty).expect("bad input type for cast");
let cast_out = CastTy::from_ty(cast_ty).expect("bad output type for cast");
match (cast_in, cast_out) {
// result of a comparison of addresses would differ between runtime and compile-time.
Rvalue::BinaryOp(_, ref lhs, _)
if self.const_context && self.tcx.features().const_compare_raw_pointers => {
- if let ty::RawPtr(_) | ty::FnPtr(..) = lhs.ty(self.mir, self.tcx).sty {
+ if let ty::RawPtr(_) | ty::FnPtr(..) = lhs.ty(self.body, self.tcx).sty {
self.register_violations(&[UnsafetyViolation {
source_info: self.source_info,
description: InternedString::intern("pointer operation"),
fn visit_place(&mut self,
place: &Place<'tcx>,
context: PlaceContext,
- location: Location) {
- match place {
- &Place::Projection(box Projection {
- ref base, ref elem
- }) => {
+ _location: Location) {
+ place.iterate(|place_base, place_projections| {
+ match place_base {
+ PlaceBase::Local(..) => {
+ // Locals are safe.
+ }
+ PlaceBase::Static(box Static { kind: StaticKind::Promoted(_), .. }) => {
+ bug!("unsafety checking should happen before promotion")
+ }
+ PlaceBase::Static(box Static { kind: StaticKind::Static(def_id), .. }) => {
+ if self.tcx.is_mutable_static(*def_id) {
+ self.require_unsafe("use of mutable static",
+ "mutable statics can be mutated by multiple threads: aliasing \
+ violations or data races will cause undefined behavior",
+ UnsafetyViolationKind::General);
+ } else if self.tcx.is_foreign_item(*def_id) {
+ let source_info = self.source_info;
+ let lint_root =
+ self.source_scope_local_data[source_info.scope].lint_root;
+ self.register_violations(&[UnsafetyViolation {
+ source_info,
+ description: InternedString::intern("use of extern static"),
+ details: InternedString::intern(
+ "extern statics are not controlled by the Rust type system: \
+ invalid data, aliasing violations or data races will cause \
+ undefined behavior"),
+ kind: UnsafetyViolationKind::ExternStatic(lint_root)
+ }], &[]);
+ }
+ }
+ }
+
+ for proj in place_projections {
if context.is_borrow() {
- if util::is_disaligned(self.tcx, self.mir, self.param_env, place) {
+ if util::is_disaligned(self.tcx, self.body, self.param_env, place) {
let source_info = self.source_info;
let lint_root =
self.source_scope_local_data[source_info.scope].lint_root;
}], &[]);
}
}
- let is_borrow_of_interior_mut = context.is_borrow() && !base
- .ty(self.mir, self.tcx)
+ let is_borrow_of_interior_mut = context.is_borrow() && !proj.base
+ .ty(self.body, self.tcx)
.ty
.is_freeze(self.tcx, self.param_env, self.source_info.span);
// prevent
);
}
let old_source_info = self.source_info;
- if let &Place::Base(PlaceBase::Local(local)) = base {
- if self.mir.local_decls[local].internal {
+ if let Place::Base(PlaceBase::Local(local)) = proj.base {
+ if self.body.local_decls[local].internal {
// Internal locals are used in the `move_val_init` desugaring.
// We want to check unsafety against the source info of the
// desugaring, rather than the source info of the RHS.
- self.source_info = self.mir.local_decls[local].source_info;
+ self.source_info = self.body.local_decls[local].source_info;
}
}
- let base_ty = base.ty(self.mir, self.tcx).ty;
+ let base_ty = proj.base.ty(self.body, self.tcx).ty;
match base_ty.sty {
ty::RawPtr(..) => {
self.require_unsafe("dereference of raw pointer",
MutatingUseContext::AsmOutput
)
{
- let elem_ty = match elem {
- &ProjectionElem::Field(_, ty) => ty,
+ let elem_ty = match proj.elem {
+ ProjectionElem::Field(_, ty) => ty,
_ => span_bug!(
self.source_info.span,
"non-field projection {:?} from union?",
}
self.source_info = old_source_info;
}
- &Place::Base(PlaceBase::Local(..)) => {
- // locals are safe
- }
- &Place::Base(PlaceBase::Static(box Static { kind: StaticKind::Promoted(_), .. })) => {
- bug!("unsafety checking should happen before promotion")
- }
- &Place::Base(
- PlaceBase::Static(box Static { kind: StaticKind::Static(def_id), .. })
- ) => {
- if self.tcx.is_mutable_static(def_id) {
- self.require_unsafe("use of mutable static",
- "mutable statics can be mutated by multiple threads: aliasing violations \
- or data races will cause undefined behavior",
- UnsafetyViolationKind::General);
- } else if self.tcx.is_foreign_item(def_id) {
- let source_info = self.source_info;
- let lint_root =
- self.source_scope_local_data[source_info.scope].lint_root;
- self.register_violations(&[UnsafetyViolation {
- source_info,
- description: InternedString::intern("use of extern static"),
- details: InternedString::intern(
- "extern statics are not controlled by the Rust type system: invalid \
- data, aliasing violations or data races will cause undefined behavior"),
- kind: UnsafetyViolationKind::ExternStatic(lint_root)
- }], &[]);
- }
- }
- };
- self.super_place(place, context, location);
+ });
}
}
}) = place {
match *elem {
ProjectionElem::Field(..) => {
- let ty = base.ty(&self.mir.local_decls, self.tcx).ty;
+ let ty = base.ty(&self.body.local_decls, self.tcx).ty;
match ty.sty {
ty::Adt(def, _) => match self.tcx.layout_scalar_valid_range(def.did) {
(Bound::Unbounded, Bound::Unbounded) => {},
// N.B., this borrow is valid because all the consumers of
// `mir_built` force this.
- let mir = &tcx.mir_built(def_id).borrow();
+ let body = &tcx.mir_built(def_id).borrow();
- let source_scope_local_data = match mir.source_scope_local_data {
+ let source_scope_local_data = match body.source_scope_local_data {
ClearCrossCrate::Set(ref data) => data,
ClearCrossCrate::Clear => {
debug!("unsafety_violations: {:?} - remote, skipping", def_id);
};
let mut checker = UnsafetyChecker::new(
const_context, min_const_fn,
- mir, source_scope_local_data, tcx, param_env);
- checker.visit_body(mir);
+ body, source_scope_local_data, tcx, param_env);
+ checker.visit_body(body);
check_unused_unsafe(tcx, def_id, &checker.used_unsafe, &mut checker.inherited_blocks);
UnsafetyCheckResult {
fn run_pass<'a, 'tcx>(&self,
_tcx: TyCtxt<'a, 'tcx, 'tcx>,
_source: MirSource<'tcx>,
- mir: &mut Body<'tcx>) {
+ body: &mut Body<'tcx>) {
let mut delete = DeleteNonCodegenStatements;
- delete.visit_body(mir);
+ delete.visit_body(body);
}
}
use rustc::mir::visit::{
Visitor, PlaceContext, MutatingUseContext, MutVisitor, NonMutatingUseContext,
};
-use rustc::mir::interpret::{InterpError, Scalar, GlobalId, EvalResult};
+use rustc::mir::interpret::{InterpError, Scalar, GlobalId, InterpResult};
use rustc::ty::{self, Instance, ParamEnv, Ty, TyCtxt};
use syntax_pos::{Span, DUMMY_SP};
use rustc::ty::subst::InternalSubsts;
use rustc_data_structures::indexed_vec::IndexVec;
use rustc::ty::layout::{
- LayoutOf, TyLayout, LayoutError,
- HasTyCtxt, TargetDataLayout, HasDataLayout,
+ LayoutOf, TyLayout, LayoutError, HasTyCtxt, TargetDataLayout, HasDataLayout, Size,
};
use crate::interpret::{
fn run_pass<'a, 'tcx>(&self,
tcx: TyCtxt<'a, 'tcx, 'tcx>,
source: MirSource<'tcx>,
- mir: &mut Body<'tcx>) {
+ body: &mut Body<'tcx>) {
// will be evaluated by miri and produce its errors there
if source.promoted.is_some() {
return;
// constants, instead of just checking for const-folding succeeding.
// That would require an uniform one-def no-mutation analysis
// and RPO (or recursing when needing the value of a local).
- let mut optimization_finder = ConstPropagator::new(mir, tcx, source);
- optimization_finder.visit_body(mir);
+ let mut optimization_finder = ConstPropagator::new(body, tcx, source);
+ optimization_finder.visit_body(body);
// put back the data we stole from `mir`
std::mem::replace(
- &mut mir.source_scope_local_data,
+ &mut body.source_scope_local_data,
optimization_finder.source_scope_local_data
);
std::mem::replace(
- &mut mir.promoted,
+ &mut body.promoted,
optimization_finder.promoted
);
impl<'a, 'mir, 'tcx> ConstPropagator<'a, 'mir, 'tcx> {
fn new(
- mir: &mut Body<'tcx>,
+ body: &mut Body<'tcx>,
tcx: TyCtxt<'a, 'tcx, 'tcx>,
source: MirSource<'tcx>,
) -> ConstPropagator<'a, 'mir, 'tcx> {
let param_env = tcx.param_env(source.def_id());
let ecx = mk_eval_cx(tcx, tcx.def_span(source.def_id()), param_env);
- let can_const_prop = CanConstProp::check(mir);
+ let can_const_prop = CanConstProp::check(body);
let source_scope_local_data = std::mem::replace(
- &mut mir.source_scope_local_data,
+ &mut body.source_scope_local_data,
ClearCrossCrate::Clear
);
let promoted = std::mem::replace(
- &mut mir.promoted,
+ &mut body.promoted,
IndexVec::new()
);
source,
param_env,
can_const_prop,
- places: IndexVec::from_elem(None, &mir.local_decls),
+ places: IndexVec::from_elem(None, &body.local_decls),
source_scope_local_data,
//FIXME(wesleywiser) we can't steal this because `Visitor::super_visit_body()` needs it
- local_decls: mir.local_decls.clone(),
+ local_decls: body.local_decls.clone(),
promoted,
}
}
f: F
) -> Option<T>
where
- F: FnOnce(&mut Self) -> EvalResult<'tcx, T>,
+ F: FnOnce(&mut Self) -> InterpResult<'tcx, T>,
{
self.ecx.tcx.span = source_info.span;
let lint_root = match self.source_scope_local_data {
// cannot use `const_eval` here, because that would require having the MIR
// for the current function available, but we're producing said MIR right now
let res = self.use_ecx(source_info, |this| {
- let mir = &this.promoted[*promoted];
- eval_promoted(this.tcx, cid, mir, this.param_env)
+ let body = &this.promoted[*promoted];
+ eval_promoted(this.tcx, cid, body, this.param_env)
})?;
trace!("evaluated promoted {:?} to {:?}", promoted, res);
res.into()
this.ecx.operand_field(eval, field.index() as u64)
})?;
},
+ ProjectionElem::Deref => {
+ trace!("processing deref");
+ eval = self.use_ecx(source_info, |this| {
+ this.ecx.deref_operand(eval)
+ })?.into();
+ }
// We could get more projections by using e.g., `operand_projection`,
// but we do not even have the stack frame set up properly so
// an `Index` projection would throw us off-track.
Rvalue::Use(ref op) => {
self.eval_operand(op, source_info)
},
+ Rvalue::Ref(_, _, ref place) => {
+ let src = self.eval_place(place, source_info)?;
+ let mplace = src.try_as_mplace().ok()?;
+ Some(ImmTy::from_scalar(mplace.ptr.into(), place_layout).into())
+ },
Rvalue::Repeat(..) |
- Rvalue::Ref(..) |
Rvalue::Aggregate(..) |
Rvalue::NullaryOp(NullOp::Box, _) |
Rvalue::Discriminant(..) => None,
this.ecx.cast(op, kind, dest.into())?;
Ok(dest.into())
})
- }
+ },
+ Rvalue::Len(ref place) => {
+ let place = self.eval_place(&place, source_info)?;
+ let mplace = place.try_as_mplace().ok()?;
+
+ if let ty::Slice(_) = mplace.layout.ty.sty {
+ let len = mplace.meta.unwrap().to_usize(&self.ecx).unwrap();
- // FIXME(oli-obk): evaluate static/constant slice lengths
- Rvalue::Len(_) => None,
+ Some(ImmTy {
+ imm: Immediate::Scalar(
+ Scalar::from_uint(
+ len,
+ Size::from_bits(
+ self.tcx.sess.target.usize_ty.bit_width().unwrap() as u64
+ )
+ ).into(),
+ ),
+ layout: self.tcx.layout_of(self.param_env.and(self.tcx.types.usize)).ok()?,
+ }.into())
+ } else {
+ trace!("not slice: {:?}", mplace.layout.ty.sty);
+ None
+ }
+ },
Rvalue::NullaryOp(NullOp::SizeOf, ty) => {
type_size_of(self.tcx, self.param_env, ty).and_then(|n| Some(
ImmTy {
source_info: SourceInfo,
) {
trace!("attepting to replace {:?} with {:?}", rval, value);
- self.ecx.validate_operand(
- value,
- vec![],
- None,
- true,
- ).expect("value should already be a valid const");
+ if let Err(e) = self.ecx.validate_operand(value, vec![], None, true) {
+ trace!("validation error, attempt failed: {:?}", e);
+ return;
+ }
// FIXME> figure out what tho do when try_read_immediate fails
let imm = self.use_ecx(source_info, |this| {
impl CanConstProp {
/// returns true if `local` can be propagated
- fn check(mir: &Body<'_>) -> IndexVec<Local, bool> {
+ fn check(body: &Body<'_>) -> IndexVec<Local, bool> {
let mut cpv = CanConstProp {
- can_const_prop: IndexVec::from_elem(true, &mir.local_decls),
- found_assignment: IndexVec::from_elem(false, &mir.local_decls),
+ can_const_prop: IndexVec::from_elem(true, &body.local_decls),
+ found_assignment: IndexVec::from_elem(false, &body.local_decls),
};
for (local, val) in cpv.can_const_prop.iter_enumerated_mut() {
// cannot use args at all
// lint for x != y
// FIXME(oli-obk): lint variables until they are used in a condition
// FIXME(oli-obk): lint if return value is constant
- *val = mir.local_kind(local) == LocalKind::Temp;
+ *val = body.local_kind(local) == LocalKind::Temp;
if !*val {
trace!("local {:?} can't be propagated because it's not a temporary", local);
}
}
- cpv.visit_body(mir);
+ cpv.visit_body(body);
cpv.can_const_prop
}
}
fn run_pass<'a, 'tcx>(&self,
tcx: TyCtxt<'a, 'tcx, 'tcx>,
_source: MirSource<'tcx>,
- mir: &mut Body<'tcx>) {
+ body: &mut Body<'tcx>) {
// We only run when the MIR optimization level is > 1.
// This avoids a slow pass, and messing up debug info.
if tcx.sess.opts.debugging_opts.mir_opt_level <= 1 {
return;
}
- let mut def_use_analysis = DefUseAnalysis::new(mir);
+ let mut def_use_analysis = DefUseAnalysis::new(body);
loop {
- def_use_analysis.analyze(mir);
+ def_use_analysis.analyze(body);
- if eliminate_self_assignments(mir, &def_use_analysis) {
- def_use_analysis.analyze(mir);
+ if eliminate_self_assignments(body, &def_use_analysis) {
+ def_use_analysis.analyze(body);
}
let mut changed = false;
- for dest_local in mir.local_decls.indices() {
+ for dest_local in body.local_decls.indices() {
debug!("Considering destination local: {:?}", dest_local);
let action;
}
// Conservatively gives up if the dest is an argument,
// because there may be uses of the original argument value.
- if mir.local_kind(dest_local) == LocalKind::Arg {
+ if body.local_kind(dest_local) == LocalKind::Arg {
debug!(" Can't copy-propagate local: dest {:?} (argument)",
dest_local);
continue;
let dest_place_def = dest_use_info.defs_not_including_drop().next().unwrap();
location = dest_place_def.location;
- let basic_block = &mir[location.block];
+ let basic_block = &body[location.block];
let statement_index = location.statement_index;
let statement = match basic_block.statements.get(statement_index) {
Some(statement) => statement,
let maybe_action = match *operand {
Operand::Copy(ref src_place) |
Operand::Move(ref src_place) => {
- Action::local_copy(&mir, &def_use_analysis, src_place)
+ Action::local_copy(&body, &def_use_analysis, src_place)
}
Operand::Constant(ref src_constant) => {
Action::constant(src_constant)
}
}
- changed = action.perform(mir, &def_use_analysis, dest_local, location) || changed;
+ changed = action.perform(body, &def_use_analysis, dest_local, location) || changed;
// FIXME(pcwalton): Update the use-def chains to delete the instructions instead of
// regenerating the chains.
break
}
fn eliminate_self_assignments(
- mir: &mut Body<'_>,
+ body: &mut Body<'_>,
def_use_analysis: &DefUseAnalysis,
) -> bool {
let mut changed = false;
- for dest_local in mir.local_decls.indices() {
+ for dest_local in body.local_decls.indices() {
let dest_use_info = def_use_analysis.local_info(dest_local);
for def in dest_use_info.defs_not_including_drop() {
let location = def.location;
- if let Some(stmt) = mir[location.block].statements.get(location.statement_index) {
+ if let Some(stmt) = body[location.block].statements.get(location.statement_index) {
match stmt.kind {
StatementKind::Assign(
Place::Base(PlaceBase::Local(local)),
continue;
}
debug!("Deleting a self-assignment for {:?}", dest_local);
- mir.make_statement_nop(location);
+ body.make_statement_nop(location);
changed = true;
}
}
}
impl<'tcx> Action<'tcx> {
- fn local_copy(mir: &Body<'tcx>, def_use_analysis: &DefUseAnalysis, src_place: &Place<'tcx>)
+ fn local_copy(body: &Body<'tcx>, def_use_analysis: &DefUseAnalysis, src_place: &Place<'tcx>)
-> Option<Action<'tcx>> {
// The source must be a local.
let src_local = if let Place::Base(PlaceBase::Local(local)) = *src_place {
// USE(SRC);
let src_def_count = src_use_info.def_count_not_including_drop();
// allow function arguments to be propagated
- let is_arg = mir.local_kind(src_local) == LocalKind::Arg;
+ let is_arg = body.local_kind(src_local) == LocalKind::Arg;
if (is_arg && src_def_count != 0) || (!is_arg && src_def_count != 1) {
debug!(
" Can't copy-propagate local: {} defs of src{}",
}
fn perform(self,
- mir: &mut Body<'tcx>,
+ body: &mut Body<'tcx>,
def_use_analysis: &DefUseAnalysis,
dest_local: Local,
location: Location)
src_local);
for place_use in &def_use_analysis.local_info(dest_local).defs_and_uses {
if place_use.context.is_storage_marker() {
- mir.make_statement_nop(place_use.location)
+ body.make_statement_nop(place_use.location)
}
}
for place_use in &def_use_analysis.local_info(src_local).defs_and_uses {
if place_use.context.is_storage_marker() {
- mir.make_statement_nop(place_use.location)
+ body.make_statement_nop(place_use.location)
}
}
// Replace all uses of the destination local with the source local.
- def_use_analysis.replace_all_defs_and_uses_with(dest_local, mir, src_local);
+ def_use_analysis.replace_all_defs_and_uses_with(dest_local, body, src_local);
// Finally, zap the now-useless assignment instruction.
debug!(" Deleting assignment");
- mir.make_statement_nop(location);
+ body.make_statement_nop(location);
true
}
let dest_local_info = def_use_analysis.local_info(dest_local);
for place_use in &dest_local_info.defs_and_uses {
if place_use.context.is_storage_marker() {
- mir.make_statement_nop(place_use.location)
+ body.make_statement_nop(place_use.location)
}
}
let mut visitor = ConstantPropagationVisitor::new(dest_local,
src_constant);
for dest_place_use in &dest_local_info.defs_and_uses {
- visitor.visit_location(mir, dest_place_use.location)
+ visitor.visit_location(body, dest_place_use.location)
}
// Zap the assignment instruction if we eliminated all the uses. We won't have been
debug!(" {} of {} use(s) replaced; deleting assignment",
visitor.uses_replaced,
use_count);
- mir.make_statement_nop(location);
+ body.make_statement_nop(location);
true
} else if visitor.uses_replaced == 0 {
debug!(" No uses replaced; not deleting assignment");
use rustc::mir::*;
use rustc::ty::TyCtxt;
-use rustc::ty::layout::VariantIdx;
-use rustc_data_structures::indexed_vec::Idx;
use crate::transform::{MirPass, MirSource};
+use crate::util::expand_aggregate;
pub struct Deaggregator;
fn run_pass<'a, 'tcx>(&self,
tcx: TyCtxt<'a, 'tcx, 'tcx>,
_source: MirSource<'tcx>,
- mir: &mut Body<'tcx>) {
- let (basic_blocks, local_decls) = mir.basic_blocks_and_local_decls_mut();
+ body: &mut Body<'tcx>) {
+ let (basic_blocks, local_decls) = body.basic_blocks_and_local_decls_mut();
let local_decls = &*local_decls;
for bb in basic_blocks {
bb.expand_statements(|stmt| {
let stmt = stmt.replace_nop();
let source_info = stmt.source_info;
- let (mut lhs, kind, operands) = match stmt.kind {
+ let (lhs, kind, operands) = match stmt.kind {
StatementKind::Assign(lhs, box rvalue) => {
match rvalue {
Rvalue::Aggregate(kind, operands) => (lhs, kind, operands),
_ => bug!()
};
- let mut set_discriminant = None;
- let active_field_index = match *kind {
- AggregateKind::Adt(adt_def, variant_index, _, _, active_field_index) => {
- if adt_def.is_enum() {
- set_discriminant = Some(Statement {
- kind: StatementKind::SetDiscriminant {
- place: lhs.clone(),
- variant_index,
- },
- source_info,
- });
- lhs = lhs.downcast(adt_def, variant_index);
- }
- active_field_index
- }
- AggregateKind::Generator(..) => {
- // Right now we only support initializing generators to
- // variant 0 (Unresumed).
- let variant_index = VariantIdx::new(0);
- set_discriminant = Some(Statement {
- kind: StatementKind::SetDiscriminant {
- place: lhs.clone(),
- variant_index,
- },
- source_info,
- });
-
- // Operands are upvars stored on the base place, so no
- // downcast is necessary.
-
- None
- }
- _ => None
- };
-
- Some(operands.into_iter().enumerate().map(move |(i, op)| {
- let lhs_field = if let AggregateKind::Array(_) = *kind {
- // FIXME(eddyb) `offset` should be u64.
- let offset = i as u32;
- assert_eq!(offset as usize, i);
- lhs.clone().elem(ProjectionElem::ConstantIndex {
- offset,
- // FIXME(eddyb) `min_length` doesn't appear to be used.
- min_length: offset + 1,
- from_end: false
- })
- } else {
+ Some(expand_aggregate(
+ lhs,
+ operands.into_iter().map(|op| {
let ty = op.ty(local_decls, tcx);
- let field = Field::new(active_field_index.unwrap_or(i));
- lhs.clone().field(field, ty)
- };
- Statement {
- source_info,
- kind: StatementKind::Assign(lhs_field, box Rvalue::Use(op)),
- }
- }).chain(set_discriminant))
+ (op, ty)
+ }),
+ *kind,
+ source_info,
+ ))
});
}
}
fn run_pass<'a, 'tcx>(&self,
_tcx: TyCtxt<'a, 'tcx, 'tcx>,
_source: MirSource<'tcx>,
- _mir: &mut Body<'tcx>)
+ _body: &mut Body<'tcx>)
{
}
}
pass_num: &dyn fmt::Display,
pass_name: &str,
source: MirSource<'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
is_after: bool) {
if mir_util::dump_enabled(tcx, pass_name, source) {
mir_util::dump_mir(tcx,
pass_name,
&Disambiguator { is_after },
source,
- mir,
+ body,
|_, _| Ok(()) );
}
}
fn run_pass<'a, 'tcx>(&self,
tcx: TyCtxt<'a, 'tcx, 'tcx>,
src: MirSource<'tcx>,
- mir: &mut Body<'tcx>)
+ body: &mut Body<'tcx>)
{
- debug!("elaborate_drops({:?} @ {:?})", src, mir.span);
+ debug!("elaborate_drops({:?} @ {:?})", src, body.span);
let def_id = src.def_id();
let param_env = tcx.param_env(src.def_id()).with_reveal_all();
- let move_data = match MoveData::gather_moves(mir, tcx) {
+ let move_data = match MoveData::gather_moves(body, tcx) {
Ok(move_data) => move_data,
Err((move_data, _move_errors)) => {
// The only way we should be allowing any move_errors
}
};
let elaborate_patch = {
- let mir = &*mir;
+ let body = &*body;
let env = MoveDataParamEnv {
move_data,
param_env,
};
- let dead_unwinds = find_dead_unwinds(tcx, mir, def_id, &env);
+ let dead_unwinds = find_dead_unwinds(tcx, body, def_id, &env);
let flow_inits =
- do_dataflow(tcx, mir, def_id, &[], &dead_unwinds,
- MaybeInitializedPlaces::new(tcx, mir, &env),
+ do_dataflow(tcx, body, def_id, &[], &dead_unwinds,
+ MaybeInitializedPlaces::new(tcx, body, &env),
|bd, p| DebugFormatted::new(&bd.move_data().move_paths[p]));
let flow_uninits =
- do_dataflow(tcx, mir, def_id, &[], &dead_unwinds,
- MaybeUninitializedPlaces::new(tcx, mir, &env),
+ do_dataflow(tcx, body, def_id, &[], &dead_unwinds,
+ MaybeUninitializedPlaces::new(tcx, body, &env),
|bd, p| DebugFormatted::new(&bd.move_data().move_paths[p]));
ElaborateDropsCtxt {
tcx,
- mir,
+ body,
env: &env,
flow_inits,
flow_uninits,
drop_flags: Default::default(),
- patch: MirPatch::new(mir),
+ patch: MirPatch::new(body),
}.elaborate()
};
- elaborate_patch.apply(mir);
+ elaborate_patch.apply(body);
}
}
/// that can't drop anything.
fn find_dead_unwinds<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
def_id: hir::def_id::DefId,
env: &MoveDataParamEnv<'tcx, 'tcx>)
-> BitSet<BasicBlock>
{
- debug!("find_dead_unwinds({:?})", mir.span);
+ debug!("find_dead_unwinds({:?})", body.span);
// We only need to do this pass once, because unwind edges can only
// reach cleanup blocks, which can't have unwind edges themselves.
- let mut dead_unwinds = BitSet::new_empty(mir.basic_blocks().len());
+ let mut dead_unwinds = BitSet::new_empty(body.basic_blocks().len());
let flow_inits =
- do_dataflow(tcx, mir, def_id, &[], &dead_unwinds,
- MaybeInitializedPlaces::new(tcx, mir, &env),
+ do_dataflow(tcx, body, def_id, &[], &dead_unwinds,
+ MaybeInitializedPlaces::new(tcx, body, &env),
|bd, p| DebugFormatted::new(&bd.move_data().move_paths[p]));
- for (bb, bb_data) in mir.basic_blocks().iter_enumerated() {
+ for (bb, bb_data) in body.basic_blocks().iter_enumerated() {
let location = match bb_data.terminator().kind {
TerminatorKind::Drop { ref location, unwind: Some(_), .. } |
TerminatorKind::DropAndReplace { ref location, unwind: Some(_), .. } => location,
bb, bb_data, init_data.live);
for stmt in 0..bb_data.statements.len() {
let loc = Location { block: bb, statement_index: stmt };
- init_data.apply_location(tcx, mir, env, loc);
+ init_data.apply_location(tcx, body, env, loc);
}
let path = match env.move_data.rev_lookup.find(location) {
debug!("find_dead_unwinds @ {:?}: path({:?})={:?}", bb, location, path);
let mut maybe_live = false;
- on_all_drop_children_bits(tcx, mir, &env, path, |child| {
+ on_all_drop_children_bits(tcx, body, &env, path, |child| {
let (child_maybe_live, _) = init_data.state(child);
maybe_live |= child_maybe_live;
});
impl InitializationData {
fn apply_location<'a,'tcx>(&mut self,
tcx: TyCtxt<'a, 'tcx, 'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
env: &MoveDataParamEnv<'tcx, 'tcx>,
loc: Location)
{
- drop_flag_effects_for_location(tcx, mir, env, loc, |path, df| {
+ drop_flag_effects_for_location(tcx, body, env, loc, |path, df| {
debug!("at location {:?}: setting {:?} to {:?}",
loc, path, df);
match df {
&mut self.ctxt.patch
}
- fn mir(&self) -> &'a Body<'tcx> {
- self.ctxt.mir
+ fn body(&self) -> &'a Body<'tcx> {
+ self.ctxt.body
}
fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx> {
let mut some_dead = false;
let mut children_count = 0;
on_all_drop_children_bits(
- self.tcx(), self.mir(), self.ctxt.env, path, |child| {
+ self.tcx(), self.body(), self.ctxt.env, path, |child| {
let (live, dead) = self.init_data.state(child);
debug!("elaborate_drop: state({:?}) = {:?}",
child, (live, dead));
}
DropFlagMode::Deep => {
on_all_children_bits(
- self.tcx(), self.mir(), self.ctxt.move_data(), path,
+ self.tcx(), self.body(), self.ctxt.move_data(), path,
|child| self.ctxt.set_drop_flag(loc, child, DropFlagState::Absent)
);
}
struct ElaborateDropsCtxt<'a, 'tcx: 'a> {
tcx: TyCtxt<'a, 'tcx, 'tcx>,
- mir: &'a Body<'tcx>,
+ body: &'a Body<'tcx>,
env: &'a MoveDataParamEnv<'tcx, 'tcx>,
flow_inits: DataflowResults<'tcx, MaybeInitializedPlaces<'a, 'tcx, 'tcx>>,
flow_uninits: DataflowResults<'tcx, MaybeUninitializedPlaces<'a, 'tcx, 'tcx>>,
.to_owned(),
};
for stmt in 0..loc.statement_index {
- data.apply_location(self.tcx, self.mir, self.env,
+ data.apply_location(self.tcx, self.body, self.env,
Location { block: loc.block, statement_index: stmt });
}
data
fn create_drop_flag(&mut self, index: MovePathIndex, span: Span) {
let tcx = self.tcx;
let patch = &mut self.patch;
- debug!("create_drop_flag({:?})", self.mir.span);
+ debug!("create_drop_flag({:?})", self.body.span);
self.drop_flags.entry(index).or_insert_with(|| {
patch.new_internal(tcx.types.bool, span)
});
fn collect_drop_flags(&mut self)
{
- for (bb, data) in self.mir.basic_blocks().iter_enumerated() {
+ for (bb, data) in self.body.basic_blocks().iter_enumerated() {
let terminator = data.terminator();
let location = match terminator.kind {
TerminatorKind::Drop { ref location, .. } |
}
};
- on_all_drop_children_bits(self.tcx, self.mir, self.env, path, |child| {
+ on_all_drop_children_bits(self.tcx, self.body, self.env, path, |child| {
let (maybe_live, maybe_dead) = init_data.state(child);
debug!("collect_drop_flags: collecting {:?} from {:?}@{:?} - {:?}",
child, location, path, (maybe_live, maybe_dead));
fn elaborate_drops(&mut self)
{
- for (bb, data) in self.mir.basic_blocks().iter_enumerated() {
+ for (bb, data) in self.body.basic_blocks().iter_enumerated() {
let loc = Location { block: bb, statement_index: data.statements.len() };
let terminator = data.terminator();
unwind: Option<BasicBlock>)
{
let bb = loc.block;
- let data = &self.mir[bb];
+ let data = &self.body[bb];
let terminator = data.terminator();
assert!(!data.is_cleanup, "DropAndReplace in unwind path not supported");
target,
Unwind::To(unwind),
bb);
- on_all_children_bits(self.tcx, self.mir, self.move_data(), path, |child| {
+ on_all_children_bits(self.tcx, self.body, self.move_data(), path, |child| {
self.set_drop_flag(Location { block: target, statement_index: 0 },
child, DropFlagState::Present);
self.set_drop_flag(Location { block: unwind, statement_index: 0 },
fn set_drop_flag(&mut self, loc: Location, path: MovePathIndex, val: DropFlagState) {
if let Some(&flag) = self.drop_flags.get(&path) {
- let span = self.patch.source_info_for_location(self.mir, loc).span;
+ let span = self.patch.source_info_for_location(self.body, loc).span;
let val = self.constant_bool(span, val.value());
self.patch.add_assign(loc, Place::Base(PlaceBase::Local(flag)), val);
}
fn drop_flags_on_init(&mut self) {
let loc = Location::START;
- let span = self.patch.source_info_for_location(self.mir, loc).span;
+ let span = self.patch.source_info_for_location(self.body, loc).span;
let false_ = self.constant_bool(span, false);
for flag in self.drop_flags.values() {
self.patch.add_assign(loc, Place::Base(PlaceBase::Local(*flag)), false_.clone());
}
fn drop_flags_for_fn_rets(&mut self) {
- for (bb, data) in self.mir.basic_blocks().iter_enumerated() {
+ for (bb, data) in self.body.basic_blocks().iter_enumerated() {
if let TerminatorKind::Call {
destination: Some((ref place, tgt)), cleanup: Some(_), ..
} = data.terminator().kind {
let loc = Location { block: tgt, statement_index: 0 };
let path = self.move_data().rev_lookup.find(place);
on_lookup_result_bits(
- self.tcx, self.mir, self.move_data(), path,
+ self.tcx, self.body, self.move_data(), path,
|child| self.set_drop_flag(loc, child, DropFlagState::Present)
);
}
fn drop_flags_for_args(&mut self) {
let loc = Location::START;
dataflow::drop_flag_effects_for_function_entry(
- self.tcx, self.mir, self.env, |path, ds| {
+ self.tcx, self.body, self.env, |path, ds| {
self.set_drop_flag(loc, path, ds);
}
)
// drop flags by themselves, to avoid the drop flags being
// clobbered before they are read.
- for (bb, data) in self.mir.basic_blocks().iter_enumerated() {
+ for (bb, data) in self.body.basic_blocks().iter_enumerated() {
debug!("drop_flags_for_locs({:?})", data);
for i in 0..(data.statements.len()+1) {
debug!("drop_flag_for_locs: stmt {}", i);
}
let loc = Location { block: bb, statement_index: i };
dataflow::drop_flag_effects_for_location(
- self.tcx, self.mir, self.env, loc, |path, ds| {
+ self.tcx, self.body, self.env, loc, |path, ds| {
if ds == DropFlagState::Absent || allow_initializations {
self.set_drop_flag(loc, path, ds)
}
let loc = Location { block: bb, statement_index: data.statements.len() };
let path = self.move_data().rev_lookup.find(place);
on_lookup_result_bits(
- self.tcx, self.mir, self.move_data(), path,
+ self.tcx, self.body, self.move_data(), path,
|child| self.set_drop_flag(loc, child, DropFlagState::Present)
);
}
fn run_pass<'a, 'tcx>(&self,
tcx: TyCtxt<'a, 'tcx, 'tcx>,
_: MirSource<'tcx>,
- mir: &mut Body<'tcx>) {
- EraseRegionsVisitor::new(tcx).visit_body(mir);
+ body: &mut Body<'tcx>) {
+ EraseRegionsVisitor::new(tcx).visit_body(body);
}
}
use rustc::ty::subst::SubstsRef;
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::indexed_vec::{Idx, IndexVec};
-use rustc_data_structures::bit_set::BitSet;
+use rustc_data_structures::bit_set::{BitSet, BitMatrix};
use std::borrow::Cow;
use std::iter;
use std::mem;
use crate::transform::{MirPass, MirSource};
use crate::transform::simplify;
use crate::transform::no_landing_pads::no_landing_pads;
+use crate::dataflow::{DataflowResults, DataflowResultsConsumer, FlowAtLocation};
use crate::dataflow::{do_dataflow, DebugFormatted, state_for_location};
use crate::dataflow::{MaybeStorageLive, HaveBeenBorrowedLocals};
use crate::util::dump_mir;
place: &mut Place<'tcx>,
context: PlaceContext,
location: Location) {
- if *place == Place::Base(PlaceBase::Local(self_arg())) {
- *place = Place::Projection(Box::new(Projection {
- base: place.clone(),
+ if place.base_local() == Some(self_arg()) {
+ replace_base(place, Place::Projection(Box::new(Projection {
+ base: Place::Base(PlaceBase::Local(self_arg())),
elem: ProjectionElem::Deref,
- }));
+ })));
} else {
self.super_place(place, context, location);
}
place: &mut Place<'tcx>,
context: PlaceContext,
location: Location) {
- if *place == Place::Base(PlaceBase::Local(self_arg())) {
- *place = Place::Projection(Box::new(Projection {
- base: place.clone(),
+ if place.base_local() == Some(self_arg()) {
+ replace_base(place, Place::Projection(Box::new(Projection {
+ base: Place::Base(PlaceBase::Local(self_arg())),
elem: ProjectionElem::Field(Field::new(0), self.ref_gen_ty),
- }));
+ })));
} else {
self.super_place(place, context, location);
}
}
}
+fn replace_base(place: &mut Place<'tcx>, new_base: Place<'tcx>) {
+ if let Place::Projection(proj) = place {
+ replace_base(&mut proj.base, new_base);
+ } else {
+ *place = new_base;
+ }
+}
+
fn self_arg() -> Local {
Local::new(1)
}
}
// Create a statement which reads the discriminant into a temporary
- fn get_discr(&self, mir: &mut Body<'tcx>) -> (Statement<'tcx>, Place<'tcx>) {
- let temp_decl = LocalDecl::new_internal(self.tcx.types.isize, mir.span);
- let local_decls_len = mir.local_decls.push(temp_decl);
+ fn get_discr(&self, body: &mut Body<'tcx>) -> (Statement<'tcx>, Place<'tcx>) {
+ let temp_decl = LocalDecl::new_internal(self.tcx.types.isize, body.span);
+ let local_decls_len = body.local_decls.push(temp_decl);
let temp = Place::Base(PlaceBase::Local(local_decls_len));
let self_place = Place::Base(PlaceBase::Local(self_arg()));
let assign = Statement {
- source_info: source_info(mir),
+ source_info: source_info(body),
kind: StatementKind::Assign(temp.clone(), box Rvalue::Discriminant(self_place)),
};
(assign, temp)
place: &mut Place<'tcx>,
context: PlaceContext,
location: Location) {
- if let Place::Base(PlaceBase::Local(l)) = *place {
+ if let Some(l) = place.base_local() {
// Replace an Local in the remap with a generator struct access
if let Some(&(ty, variant_index, idx)) = self.remap.get(&l) {
- *place = self.make_field(variant_index, idx, ty);
+ replace_base(place, self.make_field(variant_index, idx, ty));
}
} else {
self.super_place(place, context, location);
fn make_generator_state_argument_indirect<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId,
- mir: &mut Body<'tcx>) {
- let gen_ty = mir.local_decls.raw[1].ty;
+ body: &mut Body<'tcx>) {
+ let gen_ty = body.local_decls.raw[1].ty;
let region = ty::ReFree(ty::FreeRegion {
scope: def_id,
});
// Replace the by value generator argument
- mir.local_decls.raw[1].ty = ref_gen_ty;
+ body.local_decls.raw[1].ty = ref_gen_ty;
// Add a deref to accesses of the generator state
- DerefArgVisitor.visit_body(mir);
+ DerefArgVisitor.visit_body(body);
}
fn make_generator_state_argument_pinned<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
- mir: &mut Body<'tcx>) {
- let ref_gen_ty = mir.local_decls.raw[1].ty;
+ body: &mut Body<'tcx>) {
+ let ref_gen_ty = body.local_decls.raw[1].ty;
let pin_did = tcx.lang_items().pin_type().unwrap();
let pin_adt_ref = tcx.adt_def(pin_did);
let pin_ref_gen_ty = tcx.mk_adt(pin_adt_ref, substs);
// Replace the by ref generator argument
- mir.local_decls.raw[1].ty = pin_ref_gen_ty;
+ body.local_decls.raw[1].ty = pin_ref_gen_ty;
// Add the Pin field access to accesses of the generator state
- PinArgVisitor { ref_gen_ty }.visit_body(mir);
+ PinArgVisitor { ref_gen_ty }.visit_body(body);
}
fn replace_result_variable<'tcx>(
ret_ty: Ty<'tcx>,
- mir: &mut Body<'tcx>,
+ body: &mut Body<'tcx>,
) -> Local {
- let source_info = source_info(mir);
+ let source_info = source_info(body);
let new_ret = LocalDecl {
mutability: Mutability::Mut,
ty: ret_ty,
is_block_tail: None,
is_user_variable: None,
};
- let new_ret_local = Local::new(mir.local_decls.len());
- mir.local_decls.push(new_ret);
- mir.local_decls.swap(RETURN_PLACE, new_ret_local);
+ let new_ret_local = Local::new(body.local_decls.len());
+ body.local_decls.push(new_ret);
+ body.local_decls.swap(RETURN_PLACE, new_ret_local);
RenameLocalVisitor {
from: RETURN_PLACE,
to: new_ret_local,
- }.visit_body(mir);
+ }.visit_body(body);
new_ret_local
}
}
}
+struct LivenessInfo {
+ /// Which locals are live across any suspension point.
+ ///
+ /// GeneratorSavedLocal is indexed in terms of the elements in this set;
+ /// i.e. GeneratorSavedLocal::new(1) corresponds to the second local
+ /// included in this set.
+ live_locals: liveness::LiveVarSet,
+
+ /// The set of saved locals live at each suspension point.
+ live_locals_at_suspension_points: Vec<BitSet<GeneratorSavedLocal>>,
+
+ /// For every saved local, the set of other saved locals that are
+ /// storage-live at the same time as this local. We cannot overlap locals in
+ /// the layout which have conflicting storage.
+ storage_conflicts: BitMatrix<GeneratorSavedLocal, GeneratorSavedLocal>,
+
+ /// For every suspending block, the locals which are storage-live across
+ /// that suspension point.
+ storage_liveness: FxHashMap<BasicBlock, liveness::LiveVarSet>,
+}
+
fn locals_live_across_suspend_points(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
source: MirSource<'tcx>,
movable: bool,
-) -> (
- liveness::LiveVarSet,
- FxHashMap<BasicBlock, liveness::LiveVarSet>,
- BitSet<BasicBlock>,
-) {
- let dead_unwinds = BitSet::new_empty(mir.basic_blocks().len());
+) -> LivenessInfo {
+ let dead_unwinds = BitSet::new_empty(body.basic_blocks().len());
let def_id = source.def_id();
// Calculate when MIR locals have live storage. This gives us an upper bound of their
// lifetimes.
- let storage_live_analysis = MaybeStorageLive::new(mir);
+ let storage_live_analysis = MaybeStorageLive::new(body);
let storage_live =
- do_dataflow(tcx, mir, def_id, &[], &dead_unwinds, storage_live_analysis,
- |bd, p| DebugFormatted::new(&bd.mir().local_decls[p]));
+ do_dataflow(tcx, body, def_id, &[], &dead_unwinds, storage_live_analysis,
+ |bd, p| DebugFormatted::new(&bd.body().local_decls[p]));
// Find the MIR locals which do not use StorageLive/StorageDead statements.
// The storage of these locals are always live.
- let mut ignored = StorageIgnored(BitSet::new_filled(mir.local_decls.len()));
- ignored.visit_body(mir);
+ let mut ignored = StorageIgnored(BitSet::new_filled(body.local_decls.len()));
+ ignored.visit_body(body);
// Calculate the MIR locals which have been previously
// borrowed (even if they are still active).
// This is only used for immovable generators.
let borrowed_locals = if !movable {
- let analysis = HaveBeenBorrowedLocals::new(mir);
+ let analysis = HaveBeenBorrowedLocals::new(body);
let result =
- do_dataflow(tcx, mir, def_id, &[], &dead_unwinds, analysis,
- |bd, p| DebugFormatted::new(&bd.mir().local_decls[p]));
+ do_dataflow(tcx, body, def_id, &[], &dead_unwinds, analysis,
+ |bd, p| DebugFormatted::new(&bd.body().local_decls[p]));
Some((analysis, result))
} else {
None
};
// Calculate the liveness of MIR locals ignoring borrows.
- let mut set = liveness::LiveVarSet::new_empty(mir.local_decls.len());
+ let mut live_locals = liveness::LiveVarSet::new_empty(body.local_decls.len());
let mut liveness = liveness::liveness_of_locals(
- mir,
+ body,
);
liveness::dump_mir(
tcx,
"generator_liveness",
source,
- mir,
+ body,
&liveness,
);
let mut storage_liveness_map = FxHashMap::default();
+ let mut live_locals_at_suspension_points = Vec::new();
- let mut suspending_blocks = BitSet::new_empty(mir.basic_blocks().len());
-
- for (block, data) in mir.basic_blocks().iter_enumerated() {
+ for (block, data) in body.basic_blocks().iter_enumerated() {
if let TerminatorKind::Yield { .. } = data.terminator().kind {
- suspending_blocks.insert(block);
-
let loc = Location {
block: block,
statement_index: data.statements.len(),
let borrowed_locals = state_for_location(loc,
analysis,
result,
- mir);
+ body);
// The `liveness` variable contains the liveness of MIR locals ignoring borrows.
// This is correct for movable generators since borrows cannot live across
// suspension points. However for immovable generators we need to account for
let mut storage_liveness = state_for_location(loc,
&storage_live_analysis,
&storage_live,
- mir);
+ body);
// Store the storage liveness for later use so we can restore the state
// after a suspension point
// Locals live are live at this point only if they are used across
// suspension points (the `liveness` variable)
// and their storage is live (the `storage_liveness` variable)
- storage_liveness.intersect(&liveness.outs[block]);
+ let mut live_locals_here = storage_liveness;
+ live_locals_here.intersect(&liveness.outs[block]);
- let live_locals = storage_liveness;
+ // The generator argument is ignored
+ live_locals_here.remove(self_arg());
- // Add the locals life at this suspension point to the set of locals which live across
+ // Add the locals live at this suspension point to the set of locals which live across
// any suspension points
- set.union(&live_locals);
+ live_locals.union(&live_locals_here);
+
+ live_locals_at_suspension_points.push(live_locals_here);
+ }
+ }
+
+ // Renumber our liveness_map bitsets to include only the locals we are
+ // saving.
+ let live_locals_at_suspension_points = live_locals_at_suspension_points
+ .iter()
+ .map(|live_here| renumber_bitset(&live_here, &live_locals))
+ .collect();
+
+ let storage_conflicts = compute_storage_conflicts(
+ body,
+ &live_locals,
+ &ignored,
+ storage_live,
+ storage_live_analysis);
+
+ LivenessInfo {
+ live_locals,
+ live_locals_at_suspension_points,
+ storage_conflicts,
+ storage_liveness: storage_liveness_map,
+ }
+}
+
+/// Renumbers the items present in `stored_locals` and applies the renumbering
+/// to 'input`.
+///
+/// For example, if `stored_locals = [1, 3, 5]`, this would be renumbered to
+/// `[0, 1, 2]`. Thus, if `input = [3, 5]` we would return `[1, 2]`.
+fn renumber_bitset(input: &BitSet<Local>, stored_locals: &liveness::LiveVarSet)
+-> BitSet<GeneratorSavedLocal> {
+ assert!(stored_locals.superset(&input), "{:?} not a superset of {:?}", stored_locals, input);
+ let mut out = BitSet::new_empty(stored_locals.count());
+ for (idx, local) in stored_locals.iter().enumerate() {
+ let saved_local = GeneratorSavedLocal::from(idx);
+ if input.contains(local) {
+ out.insert(saved_local);
+ }
+ }
+ debug!("renumber_bitset({:?}, {:?}) => {:?}", input, stored_locals, out);
+ out
+}
+
+/// For every saved local, looks for which locals are StorageLive at the same
+/// time. Generates a bitset for every local of all the other locals that may be
+/// StorageLive simultaneously with that local. This is used in the layout
+/// computation; see `GeneratorLayout` for more.
+fn compute_storage_conflicts(
+ body: &'mir Body<'tcx>,
+ stored_locals: &liveness::LiveVarSet,
+ ignored: &StorageIgnored,
+ storage_live: DataflowResults<'tcx, MaybeStorageLive<'mir, 'tcx>>,
+ _storage_live_analysis: MaybeStorageLive<'mir, 'tcx>,
+) -> BitMatrix<GeneratorSavedLocal, GeneratorSavedLocal> {
+ assert_eq!(body.local_decls.len(), ignored.0.domain_size());
+ assert_eq!(body.local_decls.len(), stored_locals.domain_size());
+ debug!("compute_storage_conflicts({:?})", body.span);
+ debug!("ignored = {:?}", ignored.0);
+
+ // Storage ignored locals are not eligible for overlap, since their storage
+ // is always live.
+ let mut ineligible_locals = ignored.0.clone();
+ ineligible_locals.intersect(&stored_locals);
+
+ // Compute the storage conflicts for all eligible locals.
+ let mut visitor = StorageConflictVisitor {
+ body,
+ stored_locals: &stored_locals,
+ local_conflicts: BitMatrix::from_row_n(&ineligible_locals, body.local_decls.len())
+ };
+ let mut state = FlowAtLocation::new(storage_live);
+ visitor.analyze_results(&mut state);
+ let local_conflicts = visitor.local_conflicts;
+
+ // Compress the matrix using only stored locals (Local -> GeneratorSavedLocal).
+ //
+ // NOTE: Today we store a full conflict bitset for every local. Technically
+ // this is twice as many bits as we need, since the relation is symmetric.
+ // However, in practice these bitsets are not usually large. The layout code
+ // also needs to keep track of how many conflicts each local has, so it's
+ // simpler to keep it this way for now.
+ let mut storage_conflicts = BitMatrix::new(stored_locals.count(), stored_locals.count());
+ for (idx_a, local_a) in stored_locals.iter().enumerate() {
+ let saved_local_a = GeneratorSavedLocal::new(idx_a);
+ if ineligible_locals.contains(local_a) {
+ // Conflicts with everything.
+ storage_conflicts.insert_all_into_row(saved_local_a);
+ } else {
+ // Keep overlap information only for stored locals.
+ for (idx_b, local_b) in stored_locals.iter().enumerate() {
+ let saved_local_b = GeneratorSavedLocal::new(idx_b);
+ if local_conflicts.contains(local_a, local_b) {
+ storage_conflicts.insert(saved_local_a, saved_local_b);
+ }
+ }
}
}
+ storage_conflicts
+}
+
+struct StorageConflictVisitor<'body, 'tcx: 'body, 's> {
+ body: &'body Body<'tcx>,
+ stored_locals: &'s liveness::LiveVarSet,
+ // FIXME(tmandry): Consider using sparse bitsets here once we have good
+ // benchmarks for generators.
+ local_conflicts: BitMatrix<Local, Local>,
+}
+
+impl<'body, 'tcx: 'body, 's> DataflowResultsConsumer<'body, 'tcx>
+for StorageConflictVisitor<'body, 'tcx, 's> {
+ type FlowState = FlowAtLocation<'tcx, MaybeStorageLive<'body, 'tcx>>;
+
+ fn body(&self) -> &'body Body<'tcx> {
+ self.body
+ }
+
+ fn visit_block_entry(&mut self,
+ block: BasicBlock,
+ flow_state: &Self::FlowState) {
+ // statement_index is only used for logging, so this is fine.
+ self.apply_state(flow_state, Location { block, statement_index: 0 });
+ }
- // The generator argument is ignored
- set.remove(self_arg());
+ fn visit_statement_entry(&mut self,
+ loc: Location,
+ _stmt: &Statement<'tcx>,
+ flow_state: &Self::FlowState) {
+ self.apply_state(flow_state, loc);
+ }
- (set, storage_liveness_map, suspending_blocks)
+ fn visit_terminator_entry(&mut self,
+ loc: Location,
+ _term: &Terminator<'tcx>,
+ flow_state: &Self::FlowState) {
+ self.apply_state(flow_state, loc);
+ }
+}
+
+impl<'body, 'tcx: 'body, 's> StorageConflictVisitor<'body, 'tcx, 's> {
+ fn apply_state(&mut self,
+ flow_state: &FlowAtLocation<'tcx, MaybeStorageLive<'body, 'tcx>>,
+ loc: Location) {
+ // Ignore unreachable blocks.
+ match self.body.basic_blocks()[loc.block].terminator().kind {
+ TerminatorKind::Unreachable => return,
+ _ => (),
+ };
+
+ let mut eligible_storage_live = flow_state.as_dense().clone();
+ eligible_storage_live.intersect(&self.stored_locals);
+
+ for local in eligible_storage_live.iter() {
+ self.local_conflicts.union_row_with(&eligible_storage_live, local);
+ }
+
+ if eligible_storage_live.count() > 1 {
+ trace!("at {:?}, eligible_storage_live={:?}", loc, eligible_storage_live);
+ }
+ }
}
fn compute_layout<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
upvars: &Vec<Ty<'tcx>>,
interior: Ty<'tcx>,
movable: bool,
- mir: &mut Body<'tcx>)
+ body: &mut Body<'tcx>)
-> (FxHashMap<Local, (Ty<'tcx>, VariantIdx, usize)>,
GeneratorLayout<'tcx>,
FxHashMap<BasicBlock, liveness::LiveVarSet>)
{
// Use a liveness analysis to compute locals which are live across a suspension point
- let (live_locals, storage_liveness, suspending_blocks) =
- locals_live_across_suspend_points(tcx, mir, source, movable);
+ let LivenessInfo {
+ live_locals, live_locals_at_suspension_points, storage_conflicts, storage_liveness
+ } = locals_live_across_suspend_points(tcx, body, source, movable);
// Erase regions from the types passed in from typeck so we can compare them with
// MIR types
_ => bug!(),
};
- for (local, decl) in mir.local_decls.iter_enumerated() {
+ for (local, decl) in body.local_decls.iter_enumerated() {
// Ignore locals which are internal or not live
if !live_locals.contains(local) || decl.internal {
continue;
// Sanity check that typeck knows about the type of locals which are
// live across a suspension point
if !allowed.contains(&decl.ty) && !allowed_upvars.contains(&decl.ty) {
- span_bug!(mir.span,
+ span_bug!(body.span,
"Broken MIR: generator contains type {} in MIR, \
but typeck only knows about {}",
decl.ty,
}
}
- let dummy_local = LocalDecl::new_internal(tcx.mk_unit(), mir.span);
-
- // Gather live locals and their indices replacing values in mir.local_decls with a dummy
- // to avoid changing local indices
- let live_decls = live_locals.iter().map(|local| {
- let var = mem::replace(&mut mir.local_decls[local], dummy_local.clone());
- (local, var)
- });
+ let dummy_local = LocalDecl::new_internal(tcx.mk_unit(), body.span);
+
+ // Gather live locals and their indices replacing values in body.local_decls
+ // with a dummy to avoid changing local indices.
+ let mut locals = IndexVec::<GeneratorSavedLocal, _>::new();
+ let mut tys = IndexVec::<GeneratorSavedLocal, _>::new();
+ let mut decls = IndexVec::<GeneratorSavedLocal, _>::new();
+ for (idx, local) in live_locals.iter().enumerate() {
+ let var = mem::replace(&mut body.local_decls[local], dummy_local.clone());
+ locals.push(local);
+ tys.push(var.ty);
+ decls.push(var);
+ debug!("generator saved local {:?} => {:?}", GeneratorSavedLocal::from(idx), local);
+ }
- // For now we will access everything via variant #3, leaving empty variants
- // for the UNRESUMED, RETURNED, and POISONED states.
- // If there were a yield-less generator without a variant #3, it would not
- // have any vars to remap, so we would never use this.
- let variant_index = VariantIdx::new(3);
+ // Leave empty variants for the UNRESUMED, RETURNED, and POISONED states.
+ const RESERVED_VARIANTS: usize = 3;
+ // Build the generator variant field list.
// Create a map from local indices to generator struct indices.
- // We also create a vector of the LocalDecls of these locals.
+ let mut variant_fields: IndexVec<VariantIdx, IndexVec<Field, GeneratorSavedLocal>> =
+ iter::repeat(IndexVec::new()).take(RESERVED_VARIANTS).collect();
let mut remap = FxHashMap::default();
- let mut decls = IndexVec::new();
- for (idx, (local, var)) in live_decls.enumerate() {
- remap.insert(local, (var.ty, variant_index, idx));
- decls.push(var);
+ for (suspension_point_idx, live_locals) in live_locals_at_suspension_points.iter().enumerate() {
+ let variant_index = VariantIdx::from(RESERVED_VARIANTS + suspension_point_idx);
+ let mut fields = IndexVec::new();
+ for (idx, saved_local) in live_locals.iter().enumerate() {
+ fields.push(saved_local);
+ // Note that if a field is included in multiple variants, we will
+ // just use the first one here. That's fine; fields do not move
+ // around inside generators, so it doesn't matter which variant
+ // index we access them by.
+ remap.entry(locals[saved_local]).or_insert((tys[saved_local], variant_index, idx));
+ }
+ variant_fields.push(fields);
}
- let field_tys = decls.iter().map(|field| field.ty).collect::<IndexVec<_, _>>();
-
- // Put every var in each variant, for now.
- let all_vars = (0..field_tys.len()).map(GeneratorSavedLocal::from).collect();
- let empty_variants = iter::repeat(IndexVec::new()).take(3);
- let state_variants = iter::repeat(all_vars).take(suspending_blocks.count());
+ debug!("generator variant_fields = {:?}", variant_fields);
+ debug!("generator storage_conflicts = {:#?}", storage_conflicts);
let layout = GeneratorLayout {
- field_tys,
- variant_fields: empty_variants.chain(state_variants).collect(),
+ field_tys: tys,
+ variant_fields,
+ storage_conflicts,
__local_debuginfo_codegen_only_do_not_use: decls,
};
(remap, layout, storage_liveness)
}
-fn insert_switch<'a, 'tcx>(mir: &mut Body<'tcx>,
+fn insert_switch<'a, 'tcx>(body: &mut Body<'tcx>,
cases: Vec<(usize, BasicBlock)>,
transform: &TransformVisitor<'a, 'tcx>,
default: TerminatorKind<'tcx>) {
- let default_block = insert_term_block(mir, default);
- let (assign, discr) = transform.get_discr(mir);
+ let default_block = insert_term_block(body, default);
+ let (assign, discr) = transform.get_discr(body);
let switch = TerminatorKind::SwitchInt {
discr: Operand::Move(discr),
switch_ty: transform.discr_ty,
targets: cases.iter().map(|&(_, d)| d).chain(iter::once(default_block)).collect(),
};
- let source_info = source_info(mir);
- mir.basic_blocks_mut().raw.insert(0, BasicBlockData {
+ let source_info = source_info(body);
+ body.basic_blocks_mut().raw.insert(0, BasicBlockData {
statements: vec![assign],
terminator: Some(Terminator {
source_info,
is_cleanup: false,
});
- let blocks = mir.basic_blocks_mut().iter_mut();
+ let blocks = body.basic_blocks_mut().iter_mut();
for target in blocks.flat_map(|b| b.terminator_mut().successors_mut()) {
*target = BasicBlock::new(target.index() + 1);
fn elaborate_generator_drops<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId,
- mir: &mut Body<'tcx>) {
+ body: &mut Body<'tcx>) {
use crate::util::elaborate_drops::{elaborate_drop, Unwind};
use crate::util::patch::MirPatch;
use crate::shim::DropShimElaborator;
let gen = self_arg();
let mut elaborator = DropShimElaborator {
- mir: mir,
- patch: MirPatch::new(mir),
+ body: body,
+ patch: MirPatch::new(body),
tcx,
param_env
};
- for (block, block_data) in mir.basic_blocks().iter_enumerated() {
+ for (block, block_data) in body.basic_blocks().iter_enumerated() {
let (target, unwind, source_info) = match block_data.terminator() {
&Terminator {
source_info,
block,
);
}
- elaborator.patch.apply(mir);
+ elaborator.patch.apply(body);
}
fn create_generator_drop_shim<'a, 'tcx>(
def_id: DefId,
source: MirSource<'tcx>,
gen_ty: Ty<'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
drop_clean: BasicBlock) -> Body<'tcx> {
- let mut mir = mir.clone();
+ let mut body = body.clone();
- let source_info = source_info(&mir);
+ let source_info = source_info(&body);
- let mut cases = create_cases(&mut mir, transform, |point| point.drop);
+ let mut cases = create_cases(&mut body, transform, |point| point.drop);
cases.insert(0, (UNRESUMED, drop_clean));
// The returned state and the poisoned state fall through to the default
// case which is just to return
- insert_switch(&mut mir, cases, &transform, TerminatorKind::Return);
+ insert_switch(&mut body, cases, &transform, TerminatorKind::Return);
- for block in mir.basic_blocks_mut() {
+ for block in body.basic_blocks_mut() {
let kind = &mut block.terminator_mut().kind;
if let TerminatorKind::GeneratorDrop = *kind {
*kind = TerminatorKind::Return;
}
// Replace the return variable
- mir.local_decls[RETURN_PLACE] = LocalDecl {
+ body.local_decls[RETURN_PLACE] = LocalDecl {
mutability: Mutability::Mut,
ty: tcx.mk_unit(),
user_ty: UserTypeProjections::none(),
is_user_variable: None,
};
- make_generator_state_argument_indirect(tcx, def_id, &mut mir);
+ make_generator_state_argument_indirect(tcx, def_id, &mut body);
// Change the generator argument from &mut to *mut
- mir.local_decls[self_arg()] = LocalDecl {
+ body.local_decls[self_arg()] = LocalDecl {
mutability: Mutability::Mut,
ty: tcx.mk_ptr(ty::TypeAndMut {
ty: gen_ty,
};
if tcx.sess.opts.debugging_opts.mir_emit_retag {
// Alias tracking must know we changed the type
- mir.basic_blocks_mut()[START_BLOCK].statements.insert(0, Statement {
+ body.basic_blocks_mut()[START_BLOCK].statements.insert(0, Statement {
source_info,
kind: StatementKind::Retag(RetagKind::Raw, Place::Base(PlaceBase::Local(self_arg()))),
})
}
- no_landing_pads(tcx, &mut mir);
+ no_landing_pads(tcx, &mut body);
// Make sure we remove dead blocks to remove
// unrelated code from the resume part of the function
- simplify::remove_dead_blocks(&mut mir);
+ simplify::remove_dead_blocks(&mut body);
- dump_mir(tcx, None, "generator_drop", &0, source, &mut mir, |_, _| Ok(()) );
+ dump_mir(tcx, None, "generator_drop", &0, source, &mut body, |_, _| Ok(()) );
- mir
+ body
}
-fn insert_term_block<'tcx>(mir: &mut Body<'tcx>, kind: TerminatorKind<'tcx>) -> BasicBlock {
- let term_block = BasicBlock::new(mir.basic_blocks().len());
- let source_info = source_info(mir);
- mir.basic_blocks_mut().push(BasicBlockData {
+fn insert_term_block<'tcx>(body: &mut Body<'tcx>, kind: TerminatorKind<'tcx>) -> BasicBlock {
+ let term_block = BasicBlock::new(body.basic_blocks().len());
+ let source_info = source_info(body);
+ body.basic_blocks_mut().push(BasicBlockData {
statements: Vec::new(),
terminator: Some(Terminator {
source_info,
}
fn insert_panic_block<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
- mir: &mut Body<'tcx>,
+ body: &mut Body<'tcx>,
message: AssertMessage<'tcx>) -> BasicBlock {
- let assert_block = BasicBlock::new(mir.basic_blocks().len());
+ let assert_block = BasicBlock::new(body.basic_blocks().len());
let term = TerminatorKind::Assert {
cond: Operand::Constant(box Constant {
- span: mir.span,
+ span: body.span,
ty: tcx.types.bool,
user_ty: None,
literal: ty::Const::from_bool(tcx, false),
cleanup: None,
};
- let source_info = source_info(mir);
- mir.basic_blocks_mut().push(BasicBlockData {
+ let source_info = source_info(body);
+ body.basic_blocks_mut().push(BasicBlockData {
statements: Vec::new(),
terminator: Some(Terminator {
source_info,
transform: TransformVisitor<'a, 'tcx>,
def_id: DefId,
source: MirSource<'tcx>,
- mir: &mut Body<'tcx>) {
+ body: &mut Body<'tcx>) {
// Poison the generator when it unwinds
- for block in mir.basic_blocks_mut() {
+ for block in body.basic_blocks_mut() {
let source_info = block.terminator().source_info;
if let &TerminatorKind::Resume = &block.terminator().kind {
block.statements.push(
}
}
- let mut cases = create_cases(mir, &transform, |point| Some(point.resume));
+ let mut cases = create_cases(body, &transform, |point| Some(point.resume));
use rustc::mir::interpret::InterpError::{
GeneratorResumedAfterPanic,
// Jump to the entry point on the unresumed
cases.insert(0, (UNRESUMED, BasicBlock::new(0)));
// Panic when resumed on the returned state
- cases.insert(1, (RETURNED, insert_panic_block(tcx, mir, GeneratorResumedAfterReturn)));
+ cases.insert(1, (RETURNED, insert_panic_block(tcx, body, GeneratorResumedAfterReturn)));
// Panic when resumed on the poisoned state
- cases.insert(2, (POISONED, insert_panic_block(tcx, mir, GeneratorResumedAfterPanic)));
+ cases.insert(2, (POISONED, insert_panic_block(tcx, body, GeneratorResumedAfterPanic)));
- insert_switch(mir, cases, &transform, TerminatorKind::Unreachable);
+ insert_switch(body, cases, &transform, TerminatorKind::Unreachable);
- make_generator_state_argument_indirect(tcx, def_id, mir);
- make_generator_state_argument_pinned(tcx, mir);
+ make_generator_state_argument_indirect(tcx, def_id, body);
+ make_generator_state_argument_pinned(tcx, body);
- no_landing_pads(tcx, mir);
+ no_landing_pads(tcx, body);
// Make sure we remove dead blocks to remove
// unrelated code from the drop part of the function
- simplify::remove_dead_blocks(mir);
+ simplify::remove_dead_blocks(body);
- dump_mir(tcx, None, "generator_resume", &0, source, mir, |_, _| Ok(()) );
+ dump_mir(tcx, None, "generator_resume", &0, source, body, |_, _| Ok(()) );
}
-fn source_info<'a, 'tcx>(mir: &Body<'tcx>) -> SourceInfo {
+fn source_info<'tcx>(body: &Body<'tcx>) -> SourceInfo {
SourceInfo {
- span: mir.span,
+ span: body.span,
scope: OUTERMOST_SOURCE_SCOPE,
}
}
-fn insert_clean_drop<'a, 'tcx>(mir: &mut Body<'tcx>) -> BasicBlock {
- let return_block = insert_term_block(mir, TerminatorKind::Return);
+fn insert_clean_drop<'tcx>(body: &mut Body<'tcx>) -> BasicBlock {
+ let return_block = insert_term_block(body, TerminatorKind::Return);
// Create a block to destroy an unresumed generators. This can only destroy upvars.
- let drop_clean = BasicBlock::new(mir.basic_blocks().len());
+ let drop_clean = BasicBlock::new(body.basic_blocks().len());
let term = TerminatorKind::Drop {
location: Place::Base(PlaceBase::Local(self_arg())),
target: return_block,
unwind: None,
};
- let source_info = source_info(mir);
- mir.basic_blocks_mut().push(BasicBlockData {
+ let source_info = source_info(body);
+ body.basic_blocks_mut().push(BasicBlockData {
statements: Vec::new(),
terminator: Some(Terminator {
source_info,
drop_clean
}
-fn create_cases<'a, 'tcx, F>(mir: &mut Body<'tcx>,
+fn create_cases<'a, 'tcx, F>(body: &mut Body<'tcx>,
transform: &TransformVisitor<'a, 'tcx>,
target: F) -> Vec<(usize, BasicBlock)>
where F: Fn(&SuspensionPoint) -> Option<BasicBlock> {
- let source_info = source_info(mir);
+ let source_info = source_info(body);
transform.suspension_points.iter().filter_map(|point| {
// Find the target for this suspension point, if applicable
target(point).map(|target| {
- let block = BasicBlock::new(mir.basic_blocks().len());
+ let block = BasicBlock::new(body.basic_blocks().len());
let mut statements = Vec::new();
// Create StorageLive instructions for locals with live storage
- for i in 0..(mir.local_decls.len()) {
+ for i in 0..(body.local_decls.len()) {
let l = Local::new(i);
if point.storage_liveness.contains(l) && !transform.remap.contains_key(&l) {
statements.push(Statement {
}
// Then jump to the real target
- mir.basic_blocks_mut().push(BasicBlockData {
+ body.basic_blocks_mut().push(BasicBlockData {
statements,
terminator: Some(Terminator {
source_info,
fn run_pass<'a, 'tcx>(&self,
tcx: TyCtxt<'a, 'tcx, 'tcx>,
source: MirSource<'tcx>,
- mir: &mut Body<'tcx>) {
- let yield_ty = if let Some(yield_ty) = mir.yield_ty {
+ body: &mut Body<'tcx>) {
+ let yield_ty = if let Some(yield_ty) = body.yield_ty {
yield_ty
} else {
// This only applies to generators
return
};
- assert!(mir.generator_drop.is_none());
+ assert!(body.generator_drop.is_none());
let def_id = source.def_id();
// The first argument is the generator type passed by value
- let gen_ty = mir.local_decls.raw[1].ty;
+ let gen_ty = body.local_decls.raw[1].ty;
// Get the interior types and substs which typeck computed
let (upvars, interior, discr_ty, movable) = match gen_ty.sty {
let state_adt_ref = tcx.adt_def(state_did);
let state_substs = tcx.intern_substs(&[
yield_ty.into(),
- mir.return_ty().into(),
+ body.return_ty().into(),
]);
let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
// We rename RETURN_PLACE which has type mir.return_ty to new_ret_local
// RETURN_PLACE then is a fresh unused local with type ret_ty.
- let new_ret_local = replace_result_variable(ret_ty, mir);
+ let new_ret_local = replace_result_variable(ret_ty, body);
// Extract locals which are live across suspension point into `layout`
// `remap` gives a mapping from local indices onto generator struct indices
&upvars,
interior,
movable,
- mir);
+ body);
// Run the transformation which converts Places from Local to generator struct
// accesses for locals in `remap`.
new_ret_local,
discr_ty,
};
- transform.visit_body(mir);
+ transform.visit_body(body);
// Update our MIR struct to reflect the changed we've made
- mir.yield_ty = None;
- mir.arg_count = 1;
- mir.spread_arg = None;
- mir.generator_layout = Some(layout);
+ body.yield_ty = None;
+ body.arg_count = 1;
+ body.spread_arg = None;
+ body.generator_layout = Some(layout);
// Insert `drop(generator_struct)` which is used to drop upvars for generators in
// the unresumed state.
// This is expanded to a drop ladder in `elaborate_generator_drops`.
- let drop_clean = insert_clean_drop(mir);
+ let drop_clean = insert_clean_drop(body);
- dump_mir(tcx, None, "generator_pre-elab", &0, source, mir, |_, _| Ok(()) );
+ dump_mir(tcx, None, "generator_pre-elab", &0, source, body, |_, _| Ok(()) );
// Expand `drop(generator_struct)` to a drop ladder which destroys upvars.
// If any upvars are moved out of, drop elaboration will handle upvar destruction.
// However we need to also elaborate the code generated by `insert_clean_drop`.
- elaborate_generator_drops(tcx, def_id, mir);
+ elaborate_generator_drops(tcx, def_id, body);
- dump_mir(tcx, None, "generator_post-transform", &0, source, mir, |_, _| Ok(()) );
+ dump_mir(tcx, None, "generator_post-transform", &0, source, body, |_, _| Ok(()) );
// Create a copy of our MIR and use it to create the drop shim for the generator
let drop_shim = create_generator_drop_shim(tcx,
def_id,
source,
gen_ty,
- &mir,
+ &body,
drop_clean);
- mir.generator_drop = Some(box drop_shim);
+ body.generator_drop = Some(box drop_shim);
// Create the Generator::resume function
- create_generator_resume_function(tcx, transform, def_id, source, mir);
+ create_generator_resume_function(tcx, transform, def_id, source, body);
}
}
fn run_pass<'a, 'tcx>(&self,
tcx: TyCtxt<'a, 'tcx, 'tcx>,
source: MirSource<'tcx>,
- mir: &mut Body<'tcx>) {
+ body: &mut Body<'tcx>) {
if tcx.sess.opts.debugging_opts.mir_opt_level >= 2 {
- Inliner { tcx, source }.run_pass(mir);
+ Inliner { tcx, source }.run_pass(body);
}
}
}
}
impl<'a, 'tcx> Inliner<'a, 'tcx> {
- fn run_pass(&self, caller_mir: &mut Body<'tcx>) {
+ fn run_pass(&self, caller_body: &mut Body<'tcx>) {
// Keep a queue of callsites to try inlining on. We take
// advantage of the fact that queries detect cycles here to
// allow us to try and fetch the fully optimized MIR of a
if self.tcx.hir().body_owner_kind_by_hir_id(id).is_fn_or_closure()
&& self.source.promoted.is_none()
{
- for (bb, bb_data) in caller_mir.basic_blocks().iter_enumerated() {
+ for (bb, bb_data) in caller_body.basic_blocks().iter_enumerated() {
if let Some(callsite) = self.get_valid_function_call(bb,
bb_data,
- caller_mir,
+ caller_body,
param_env) {
callsites.push_back(callsite);
}
let self_node_id = self.tcx.hir().as_local_node_id(self.source.def_id()).unwrap();
let callee_node_id = self.tcx.hir().as_local_node_id(callsite.callee);
- let callee_mir = if let Some(callee_node_id) = callee_node_id {
+ let callee_body = if let Some(callee_node_id) = callee_node_id {
// Avoid a cycle here by only using `optimized_mir` only if we have
// a lower node id than the callee. This ensures that the callee will
// not inline us. This trick only works without incremental compilation.
self.tcx.optimized_mir(callsite.callee)
};
- let callee_mir = if self.consider_optimizing(callsite, callee_mir) {
+ let callee_body = if self.consider_optimizing(callsite, callee_body) {
self.tcx.subst_and_normalize_erasing_regions(
&callsite.substs,
param_env,
- callee_mir,
+ callee_body,
)
} else {
continue;
};
- let start = caller_mir.basic_blocks().len();
- debug!("attempting to inline callsite {:?} - mir={:?}", callsite, callee_mir);
- if !self.inline_call(callsite, caller_mir, callee_mir) {
+ let start = caller_body.basic_blocks().len();
+ debug!("attempting to inline callsite {:?} - body={:?}", callsite, callee_body);
+ if !self.inline_call(callsite, caller_body, callee_body) {
debug!("attempting to inline callsite {:?} - failure", callsite);
continue;
}
debug!("attempting to inline callsite {:?} - success", callsite);
// Add callsites from inlined function
- for (bb, bb_data) in caller_mir.basic_blocks().iter_enumerated().skip(start) {
+ for (bb, bb_data) in caller_body.basic_blocks().iter_enumerated().skip(start) {
if let Some(new_callsite) = self.get_valid_function_call(bb,
bb_data,
- caller_mir,
+ caller_body,
param_env) {
// Don't inline the same function multiple times.
if callsite.callee != new_callsite.callee {
// Simplify if we inlined anything.
if changed {
debug!("Running simplify cfg on {:?}", self.source);
- CfgSimplifier::new(caller_mir).simplify();
- remove_dead_blocks(caller_mir);
+ CfgSimplifier::new(caller_body).simplify();
+ remove_dead_blocks(caller_body);
}
}
fn get_valid_function_call(&self,
bb: BasicBlock,
bb_data: &BasicBlockData<'tcx>,
- caller_mir: &Body<'tcx>,
+ caller_body: &Body<'tcx>,
param_env: ParamEnv<'tcx>,
) -> Option<CallSite<'tcx>> {
// Don't inline calls that are in cleanup blocks.
// Only consider direct calls to functions
let terminator = bb_data.terminator();
if let TerminatorKind::Call { func: ref op, .. } = terminator.kind {
- if let ty::FnDef(callee_def_id, substs) = op.ty(caller_mir, self.tcx).sty {
+ if let ty::FnDef(callee_def_id, substs) = op.ty(caller_body, self.tcx).sty {
let instance = Instance::resolve(self.tcx,
param_env,
callee_def_id,
fn consider_optimizing(&self,
callsite: CallSite<'tcx>,
- callee_mir: &Body<'tcx>)
+ callee_body: &Body<'tcx>)
-> bool
{
debug!("consider_optimizing({:?})", callsite);
- self.should_inline(callsite, callee_mir)
+ self.should_inline(callsite, callee_body)
&& self.tcx.consider_optimizing(|| format!("Inline {:?} into {:?}",
- callee_mir.span,
+ callee_body.span,
callsite))
}
fn should_inline(&self,
callsite: CallSite<'tcx>,
- callee_mir: &Body<'tcx>)
+ callee_body: &Body<'tcx>)
-> bool
{
debug!("should_inline({:?})", callsite);
// Don't inline closures that have capture debuginfo
// FIXME: Handle closures better
- if callee_mir.__upvar_debuginfo_codegen_only_do_not_use.len() > 0 {
+ if callee_body.__upvar_debuginfo_codegen_only_do_not_use.len() > 0 {
debug!(" upvar debuginfo present - not inlining");
return false;
}
// Cannot inline generators which haven't been transformed yet
- if callee_mir.yield_ty.is_some() {
+ if callee_body.yield_ty.is_some() {
debug!(" yield ty present - not inlining");
return false;
}
// Give a bonus functions with a small number of blocks,
// We normally have two or three blocks for even
// very small functions.
- if callee_mir.basic_blocks().len() <= 3 {
+ if callee_body.basic_blocks().len() <= 3 {
threshold += threshold / 4;
}
debug!(" final inline threshold = {}", threshold);
// Traverse the MIR manually so we can account for the effects of
// inlining on the CFG.
let mut work_list = vec![START_BLOCK];
- let mut visited = BitSet::new_empty(callee_mir.basic_blocks().len());
+ let mut visited = BitSet::new_empty(callee_body.basic_blocks().len());
while let Some(bb) = work_list.pop() {
if !visited.insert(bb.index()) { continue; }
- let blk = &callee_mir.basic_blocks()[bb];
+ let blk = &callee_body.basic_blocks()[bb];
for stmt in &blk.statements {
// Don't count StorageLive/StorageDead in the inlining cost.
work_list.push(target);
// If the location doesn't actually need dropping, treat it like
// a regular goto.
- let ty = location.ty(callee_mir, tcx).subst(tcx, callsite.substs).ty;
+ let ty = location.ty(callee_body, tcx).subst(tcx, callsite.substs).ty;
if ty.needs_drop(tcx, param_env) {
cost += CALL_PENALTY;
if let Some(unwind) = unwind {
let ptr_size = tcx.data_layout.pointer_size.bytes();
- for v in callee_mir.vars_and_temps_iter() {
- let v = &callee_mir.local_decls[v];
+ for v in callee_body.vars_and_temps_iter() {
+ let v = &callee_body.local_decls[v];
let ty = v.ty.subst(tcx, callsite.substs);
// Cost of the var is the size in machine-words, if we know
// it.
fn inline_call(&self,
callsite: CallSite<'tcx>,
- caller_mir: &mut Body<'tcx>,
- mut callee_mir: Body<'tcx>) -> bool {
- let terminator = caller_mir[callsite.bb].terminator.take().unwrap();
+ caller_body: &mut Body<'tcx>,
+ mut callee_body: Body<'tcx>) -> bool {
+ let terminator = caller_body[callsite.bb].terminator.take().unwrap();
match terminator.kind {
// FIXME: Handle inlining of diverging calls
TerminatorKind::Call { args, destination: Some(destination), cleanup, .. } => {
debug!("Inlined {:?} into {:?}", callsite.callee, self.source);
- let mut local_map = IndexVec::with_capacity(callee_mir.local_decls.len());
- let mut scope_map = IndexVec::with_capacity(callee_mir.source_scopes.len());
- let mut promoted_map = IndexVec::with_capacity(callee_mir.promoted.len());
+ let mut local_map = IndexVec::with_capacity(callee_body.local_decls.len());
+ let mut scope_map = IndexVec::with_capacity(callee_body.source_scopes.len());
+ let mut promoted_map = IndexVec::with_capacity(callee_body.promoted.len());
- for mut scope in callee_mir.source_scopes.iter().cloned() {
+ for mut scope in callee_body.source_scopes.iter().cloned() {
if scope.parent_scope.is_none() {
scope.parent_scope = Some(callsite.location.scope);
- scope.span = callee_mir.span;
+ scope.span = callee_body.span;
}
scope.span = callsite.location.span;
- let idx = caller_mir.source_scopes.push(scope);
+ let idx = caller_body.source_scopes.push(scope);
scope_map.push(idx);
}
- for loc in callee_mir.vars_and_temps_iter() {
- let mut local = callee_mir.local_decls[loc].clone();
+ for loc in callee_body.vars_and_temps_iter() {
+ let mut local = callee_body.local_decls[loc].clone();
local.source_info.scope =
scope_map[local.source_info.scope];
local.source_info.span = callsite.location.span;
local.visibility_scope = scope_map[local.visibility_scope];
- let idx = caller_mir.local_decls.push(local);
+ let idx = caller_body.local_decls.push(local);
local_map.push(idx);
}
promoted_map.extend(
- callee_mir.promoted.iter().cloned().map(|p| caller_mir.promoted.push(p))
+ callee_body.promoted.iter().cloned().map(|p| caller_body.promoted.push(p))
);
// If the call is something like `a[*i] = f(i)`, where
BorrowKind::Mut { allow_two_phase_borrow: false },
destination.0);
- let ty = dest.ty(caller_mir, self.tcx);
+ let ty = dest.ty(caller_body, self.tcx);
let temp = LocalDecl::new_temp(ty, callsite.location.span);
- let tmp = caller_mir.local_decls.push(temp);
+ let tmp = caller_body.local_decls.push(temp);
let tmp = Place::Base(PlaceBase::Local(tmp));
let stmt = Statement {
source_info: callsite.location,
kind: StatementKind::Assign(tmp.clone(), box dest)
};
- caller_mir[callsite.bb]
+ caller_body[callsite.bb]
.statements.push(stmt);
tmp.deref()
} else {
let return_block = destination.1;
// Copy the arguments if needed.
- let args: Vec<_> = self.make_call_args(args, &callsite, caller_mir);
+ let args: Vec<_> = self.make_call_args(args, &callsite, caller_body);
- let bb_len = caller_mir.basic_blocks().len();
+ let bb_len = caller_body.basic_blocks().len();
let mut integrator = Integrator {
block_idx: bb_len,
args: &args,
};
- for (bb, mut block) in callee_mir.basic_blocks_mut().drain_enumerated(..) {
+ for (bb, mut block) in callee_body.basic_blocks_mut().drain_enumerated(..) {
integrator.visit_basic_block_data(bb, &mut block);
- caller_mir.basic_blocks_mut().push(block);
+ caller_body.basic_blocks_mut().push(block);
}
let terminator = Terminator {
kind: TerminatorKind::Goto { target: BasicBlock::new(bb_len) }
};
- caller_mir[callsite.bb].terminator = Some(terminator);
+ caller_body[callsite.bb].terminator = Some(terminator);
true
}
kind => {
- caller_mir[callsite.bb].terminator = Some(Terminator {
+ caller_body[callsite.bb].terminator = Some(Terminator {
source_info: terminator.source_info,
kind,
});
&self,
args: Vec<Operand<'tcx>>,
callsite: &CallSite<'tcx>,
- caller_mir: &mut Body<'tcx>,
+ caller_body: &mut Body<'tcx>,
) -> Vec<Local> {
let tcx = self.tcx;
// and the vector is `[closure_ref, tmp0, tmp1, tmp2]`.
if tcx.is_closure(callsite.callee) {
let mut args = args.into_iter();
- let self_ = self.create_temp_if_necessary(args.next().unwrap(), callsite, caller_mir);
- let tuple = self.create_temp_if_necessary(args.next().unwrap(), callsite, caller_mir);
+ let self_ = self.create_temp_if_necessary(args.next().unwrap(), callsite, caller_body);
+ let tuple = self.create_temp_if_necessary(args.next().unwrap(), callsite, caller_body);
assert!(args.next().is_none());
let tuple = Place::Base(PlaceBase::Local(tuple));
- let tuple_tys = if let ty::Tuple(s) = tuple.ty(caller_mir, tcx).ty.sty {
+ let tuple_tys = if let ty::Tuple(s) = tuple.ty(caller_body, tcx).ty.sty {
s
} else {
bug!("Closure arguments are not passed as a tuple");
));
// Spill to a local to make e.g., `tmp0`.
- self.create_temp_if_necessary(tuple_field, callsite, caller_mir)
+ self.create_temp_if_necessary(tuple_field, callsite, caller_body)
});
closure_ref_arg.chain(tuple_tmp_args).collect()
} else {
args.into_iter()
- .map(|a| self.create_temp_if_necessary(a, callsite, caller_mir))
+ .map(|a| self.create_temp_if_necessary(a, callsite, caller_body))
.collect()
}
}
&self,
arg: Operand<'tcx>,
callsite: &CallSite<'tcx>,
- caller_mir: &mut Body<'tcx>,
+ caller_body: &mut Body<'tcx>,
) -> Local {
// FIXME: Analysis of the usage of the arguments to avoid
// unnecessary temporaries.
if let Operand::Move(Place::Base(PlaceBase::Local(local))) = arg {
- if caller_mir.local_kind(local) == LocalKind::Temp {
+ if caller_body.local_kind(local) == LocalKind::Temp {
// Reuse the operand if it's a temporary already
return local;
}
// Otherwise, create a temporary for the arg
let arg = Rvalue::Use(arg);
- let ty = arg.ty(caller_mir, self.tcx);
+ let ty = arg.ty(caller_body, self.tcx);
let arg_tmp = LocalDecl::new_temp(ty, callsite.location.span);
- let arg_tmp = caller_mir.local_decls.push(arg_tmp);
+ let arg_tmp = caller_body.local_decls.push(arg_tmp);
let stmt = Statement {
source_info: callsite.location,
kind: StatementKind::Assign(Place::Base(PlaceBase::Local(arg_tmp)), box arg),
};
- caller_mir[callsite.bb].statements.push(stmt);
+ caller_body[callsite.bb].statements.push(stmt);
arg_tmp
}
}
fn run_pass<'a, 'tcx>(&self,
tcx: TyCtxt<'a, 'tcx, 'tcx>,
_: MirSource<'tcx>,
- mir: &mut Body<'tcx>) {
+ body: &mut Body<'tcx>) {
// We only run when optimizing MIR (at any level).
if tcx.sess.opts.debugging_opts.mir_opt_level == 0 {
return
// read-only so that we can do global analyses on the MIR in the process (e.g.
// `Place::ty()`).
let optimizations = {
- let mut optimization_finder = OptimizationFinder::new(mir, tcx);
- optimization_finder.visit_body(mir);
+ let mut optimization_finder = OptimizationFinder::new(body, tcx);
+ optimization_finder.visit_body(body);
optimization_finder.optimizations
};
// Then carry out those optimizations.
- MutVisitor::visit_body(&mut InstCombineVisitor { optimizations }, mir);
+ MutVisitor::visit_body(&mut InstCombineVisitor { optimizations }, body);
}
}
/// Finds optimization opportunities on the MIR.
struct OptimizationFinder<'b, 'a, 'tcx:'a+'b> {
- mir: &'b Body<'tcx>,
+ body: &'b Body<'tcx>,
tcx: TyCtxt<'a, 'tcx, 'tcx>,
optimizations: OptimizationList<'tcx>,
}
impl<'b, 'a, 'tcx:'b> OptimizationFinder<'b, 'a, 'tcx> {
- fn new(mir: &'b Body<'tcx>, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> OptimizationFinder<'b, 'a, 'tcx> {
+ fn new(body: &'b Body<'tcx>, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> OptimizationFinder<'b, 'a, 'tcx> {
OptimizationFinder {
- mir,
+ body,
tcx,
optimizations: OptimizationList::default(),
}
fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) {
if let Rvalue::Ref(_, _, Place::Projection(ref projection)) = *rvalue {
if let ProjectionElem::Deref = projection.elem {
- if projection.base.ty(self.mir, self.tcx).ty.is_region_ptr() {
+ if projection.base.ty(self.body, self.tcx).ty.is_region_ptr() {
self.optimizations.and_stars.insert(location);
}
}
}
if let Rvalue::Len(ref place) = *rvalue {
- let place_ty = place.ty(&self.mir.local_decls, self.tcx).ty;
+ let place_ty = place.ty(&self.body.local_decls, self.tcx).ty;
if let ty::Array(_, len) = place_ty.sty {
- let span = self.mir.source_info(location).span;
+ let span = self.body.source_info(location).span;
let ty = self.tcx.types.usize;
let constant = Constant { span, ty, literal: len, user_ty: None };
self.optimizations.arrays_lengths.insert(location, constant);
fn run_pass<'a, 'tcx>(&self,
tcx: TyCtxt<'a, 'tcx, 'tcx>,
_src: MirSource<'tcx>,
- mir: &mut Body<'tcx>) {
+ body: &mut Body<'tcx>) {
let debugging_override = tcx.sess.opts.debugging_opts.lower_128bit_ops;
let target_default = tcx.sess.host.options.i128_lowering;
if !debugging_override.unwrap_or(target_default) {
return
}
- self.lower_128bit_ops(tcx, mir);
+ self.lower_128bit_ops(tcx, body);
}
}
impl Lower128Bit {
- fn lower_128bit_ops<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, mir: &mut Body<'tcx>) {
+ fn lower_128bit_ops<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, body: &mut Body<'tcx>) {
let mut new_blocks = Vec::new();
- let cur_len = mir.basic_blocks().len();
+ let cur_len = body.basic_blocks().len();
- let (basic_blocks, local_decls) = mir.basic_blocks_and_local_decls_mut();
+ let (basic_blocks, local_decls) = body.basic_blocks_and_local_decls_mut();
for block in basic_blocks.iter_mut() {
for i in (0..block.statements.len()).rev() {
let (lang_item, rhs_kind) =
-use crate::build;
+use crate::{build, shim};
use rustc::hir::def_id::{CrateNum, DefId, LOCAL_CRATE};
use rustc::mir::{Body, MirPhase, Promoted};
use rustc::ty::{TyCtxt, InstanceDef};
fn run_pass<'a, 'tcx>(&self,
tcx: TyCtxt<'a, 'tcx, 'tcx>,
source: MirSource<'tcx>,
- mir: &mut Body<'tcx>);
+ body: &mut Body<'tcx>);
}
pub fn run_passes(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
- mir: &mut Body<'tcx>,
+ body: &mut Body<'tcx>,
instance: InstanceDef<'tcx>,
mir_phase: MirPhase,
passes: &[&dyn MirPass],
) {
let phase_index = mir_phase.phase_index();
- let run_passes = |mir: &mut Body<'tcx>, promoted| {
- if mir.phase >= mir_phase {
+ let run_passes = |body: &mut Body<'tcx>, promoted| {
+ if body.phase >= mir_phase {
return;
}
};
let mut index = 0;
let mut run_pass = |pass: &dyn MirPass| {
- let run_hooks = |mir: &_, index, is_after| {
+ let run_hooks = |body: &_, index, is_after| {
dump_mir::on_mir_pass(tcx, &format_args!("{:03}-{:03}", phase_index, index),
- &pass.name(), source, mir, is_after);
+ &pass.name(), source, body, is_after);
};
- run_hooks(mir, index, false);
- pass.run_pass(tcx, source, mir);
- run_hooks(mir, index, true);
+ run_hooks(body, index, false);
+ pass.run_pass(tcx, source, body);
+ run_hooks(body, index, true);
index += 1;
};
run_pass(*pass);
}
- mir.phase = mir_phase;
+ body.phase = mir_phase;
};
- run_passes(mir, None);
+ run_passes(body, None);
- for (index, promoted_mir) in mir.promoted.iter_enumerated_mut() {
- run_passes(promoted_mir, Some(index));
+ for (index, promoted_body) in body.promoted.iter_enumerated_mut() {
+ run_passes(promoted_body, Some(index));
//Let's make sure we don't miss any nested instances
- assert!(promoted_mir.promoted.is_empty())
+ assert!(promoted_body.promoted.is_empty())
}
}
// Unsafety check uses the raw mir, so make sure it is run
let _ = tcx.unsafety_check_result(def_id);
- let mut mir = tcx.mir_built(def_id).steal();
- run_passes(tcx, &mut mir, InstanceDef::Item(def_id), MirPhase::Const, &[
+ let mut body = tcx.mir_built(def_id).steal();
+ run_passes(tcx, &mut body, InstanceDef::Item(def_id), MirPhase::Const, &[
// What we need to do constant evaluation.
&simplify::SimplifyCfg::new("initial"),
&rustc_peek::SanityCheck,
&uniform_array_move_out::UniformArrayMoveOut,
]);
- tcx.alloc_steal_mir(mir)
+ tcx.alloc_steal_mir(body)
}
fn mir_validated<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> &'tcx Steal<Body<'tcx>> {
let _ = tcx.mir_const_qualif(def_id);
}
- let mut mir = tcx.mir_const(def_id).steal();
- run_passes(tcx, &mut mir, InstanceDef::Item(def_id), MirPhase::Validated, &[
+ let mut body = tcx.mir_const(def_id).steal();
+ run_passes(tcx, &mut body, InstanceDef::Item(def_id), MirPhase::Validated, &[
// What we need to run borrowck etc.
&qualify_consts::QualifyAndPromoteConstants,
&simplify::SimplifyCfg::new("qualify-consts"),
]);
- tcx.alloc_steal_mir(mir)
+ tcx.alloc_steal_mir(body)
}
fn optimized_mir<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> &'tcx Body<'tcx> {
- // `mir_borrowck` uses `mir_validated`, so we have to force it to
+ if tcx.is_constructor(def_id) {
+ // There's no reason to run all of the MIR passes on constructors when
+ // we can just output the MIR we want directly. This also saves const
+ // qualification and borrow checking the trouble of special casing
+ // constructors.
+ return shim::build_adt_ctor(tcx, def_id);
+ }
+
+ // (Mir-)Borrowck uses `mir_validated`, so we have to force it to
// execute before we can steal.
tcx.ensure().mir_borrowck(def_id);
tcx.ensure().borrowck(def_id);
}
- let mut mir = tcx.mir_validated(def_id).steal();
- run_passes(tcx, &mut mir, InstanceDef::Item(def_id), MirPhase::Optimized, &[
+ let mut body = tcx.mir_validated(def_id).steal();
+ run_passes(tcx, &mut body, InstanceDef::Item(def_id), MirPhase::Optimized, &[
// Remove all things only needed by analysis
&no_landing_pads::NoLandingPads,
&simplify_branches::SimplifyBranches::new("initial"),
&add_call_guards::CriticalCallEdges,
&dump_mir::Marker("PreCodegen"),
]);
- tcx.arena.alloc(mir)
+ tcx.arena.alloc(body)
}
fn run_pass<'a, 'tcx>(&self,
tcx: TyCtxt<'a, 'tcx, 'tcx>,
_: MirSource<'tcx>,
- mir: &mut Body<'tcx>) {
- no_landing_pads(tcx, mir)
+ body: &mut Body<'tcx>) {
+ no_landing_pads(tcx, body)
}
}
-pub fn no_landing_pads<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, mir: &mut Body<'tcx>) {
+pub fn no_landing_pads<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, body: &mut Body<'tcx>) {
if tcx.sess.no_landing_pads() {
- NoLandingPads.visit_body(mir);
+ NoLandingPads.visit_body(body);
}
}
struct TempCollector<'tcx> {
temps: IndexVec<Local, TempState>,
span: Span,
- mir: &'tcx Body<'tcx>,
+ body: &'tcx Body<'tcx>,
}
impl<'tcx> Visitor<'tcx> for TempCollector<'tcx> {
location: Location) {
debug!("visit_local: index={:?} context={:?} location={:?}", index, context, location);
// We're only interested in temporaries and the return place
- match self.mir.local_kind(index) {
+ match self.body.local_kind(index) {
| LocalKind::Temp
| LocalKind::ReturnPointer
=> {},
}
}
-pub fn collect_temps(mir: &Body<'_>,
+pub fn collect_temps(body: &Body<'_>,
rpo: &mut ReversePostorder<'_, '_>) -> IndexVec<Local, TempState> {
let mut collector = TempCollector {
- temps: IndexVec::from_elem(TempState::Undefined, &mir.local_decls),
- span: mir.span,
- mir,
+ temps: IndexVec::from_elem(TempState::Undefined, &body.local_decls),
+ span: body.span,
+ body,
};
for (bb, data) in rpo {
collector.visit_basic_block_data(bb, data);
}
}
-pub fn promote_candidates<'a, 'tcx>(mir: &mut Body<'tcx>,
+pub fn promote_candidates<'a, 'tcx>(body: &mut Body<'tcx>,
tcx: TyCtxt<'a, 'tcx, 'tcx>,
mut temps: IndexVec<Local, TempState>,
candidates: Vec<Candidate>) {
for candidate in candidates.into_iter().rev() {
match candidate {
Candidate::Ref(Location { block, statement_index }) => {
- match mir[block].statements[statement_index].kind {
+ match body[block].statements[statement_index].kind {
StatementKind::Assign(Place::Base(PlaceBase::Local(local)), _) => {
if temps[local] == TempState::PromotedOut {
// Already promoted.
// Declare return place local so that `mir::Body::new` doesn't complain.
let initial_locals = iter::once(
- LocalDecl::new_return_place(tcx.types.never, mir.span)
+ LocalDecl::new_return_place(tcx.types.never, body.span)
).collect();
let promoter = Promoter {
IndexVec::new(),
// FIXME: maybe try to filter this to avoid blowing up
// memory usage?
- mir.source_scopes.clone(),
- mir.source_scope_local_data.clone(),
+ body.source_scopes.clone(),
+ body.source_scope_local_data.clone(),
IndexVec::new(),
None,
initial_locals,
IndexVec::new(),
0,
vec![],
- mir.span,
+ body.span,
vec![],
),
tcx,
- source: mir,
+ source: body,
temps: &mut temps,
keep_original: false
};
// Eliminate assignments to, and drops of promoted temps.
let promoted = |index: Local| temps[index] == TempState::PromotedOut;
- for block in mir.basic_blocks_mut() {
+ for block in body.basic_blocks_mut() {
block.statements.retain(|statement| {
match statement.kind {
StatementKind::Assign(Place::Base(PlaceBase::Local(index)), _) |
/// What kind of item we are in.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
enum Mode {
- Const,
+ /// A `static` item.
Static,
+ /// A `static mut` item.
StaticMut,
+ /// A `const fn` item.
ConstFn,
- Fn
+ /// A `const` item or an anonymous constant (e.g. in array lengths).
+ Const,
+ /// Other type of `fn`.
+ NonConstFn,
+}
+
+impl Mode {
+ /// Determine whether we have to do full const-checking because syntactically, we
+ /// are required to be "const".
+ #[inline]
+ fn requires_const_checking(self) -> bool {
+ self != Mode::NonConstFn
+ }
}
impl fmt::Display for Mode {
Mode::Const => write!(f, "constant"),
Mode::Static | Mode::StaticMut => write!(f, "static"),
Mode::ConstFn => write!(f, "constant function"),
- Mode::Fn => write!(f, "function")
+ Mode::NonConstFn => write!(f, "function")
}
}
}
tcx: TyCtxt<'a, 'tcx, 'tcx>,
param_env: ty::ParamEnv<'tcx>,
mode: Mode,
- mir: &'a Body<'tcx>,
+ body: &'a Body<'tcx>,
per_local: PerQualif<BitSet<Local>>,
}
},
}
+/// A "qualif"(-ication) is a way to look for something "bad" in the MIR that would disqualify some
+/// code for promotion or prevent it from evaluating at compile time. So `return true` means
+/// "I found something bad, no reason to go on searching". `false` is only returned if we
+/// definitely cannot find anything bad anywhere.
+///
+/// The default implementations proceed structurally.
trait Qualif {
const IDX: usize;
let base_qualif = Self::in_place(cx, &proj.base);
let qualif = base_qualif && Self::mask_for_ty(
cx,
- proj.base.ty(cx.mir, cx.tcx)
+ proj.base.ty(cx.body, cx.tcx)
.projection_ty(cx.tcx, &proj.elem)
.ty,
);
// Special-case reborrows to be more like a copy of the reference.
if let Place::Projection(ref proj) = *place {
if let ProjectionElem::Deref = proj.elem {
- let base_ty = proj.base.ty(cx.mir, cx.tcx).ty;
+ let base_ty = proj.base.ty(cx.body, cx.tcx).ty;
if let ty::Ref(..) = base_ty.sty {
return Self::in_place(cx, &proj.base);
}
}
}
-// Constant containing interior mutability (UnsafeCell).
+/// Constant containing interior mutability (`UnsafeCell<T>`).
+/// This must be ruled out to make sure that evaluating the constant at compile-time
+/// and at *any point* during the run-time would produce the same result. In particular,
+/// promotion of temporaries must not change program behavior; if the promoted could be
+/// written to, that would be a problem.
struct HasMutInterior;
impl Qualif for HasMutInterior {
// allowed in constants (and the `Checker` will error), and/or it
// won't be promoted, due to `&mut ...` or interior mutability.
Rvalue::Ref(_, kind, ref place) => {
- let ty = place.ty(cx.mir, cx.tcx).ty;
+ let ty = place.ty(cx.body, cx.tcx).ty;
if let BorrowKind::Mut { .. } = kind {
// In theory, any zero-sized value could be borrowed
_ => return true,
}
} else if let ty::Array(_, len) = ty.sty {
- // FIXME(eddyb) the `cx.mode == Mode::Fn` condition
+ // FIXME(eddyb) the `cx.mode == Mode::NonConstFn` condition
// seems unnecessary, given that this is merely a ZST.
match len.assert_usize(cx.tcx) {
- Some(0) if cx.mode == Mode::Fn => {},
+ Some(0) if cx.mode == Mode::NonConstFn => {},
_ => return true,
}
} else {
Rvalue::Aggregate(ref kind, _) => {
if let AggregateKind::Adt(def, ..) = **kind {
if Some(def.did) == cx.tcx.lang_items().unsafe_cell_type() {
- let ty = rvalue.ty(cx.mir, cx.tcx);
+ let ty = rvalue.ty(cx.body, cx.tcx);
assert_eq!(Self::in_any_value_of_ty(cx, ty), Some(true));
return true;
}
}
}
-// Constant containing an ADT that implements Drop.
+/// Constant containing an ADT that implements `Drop`.
+/// This must be ruled out (a) because we cannot run `Drop` during compile-time
+/// as that might not be a `const fn`, and (b) because implicit promotion would
+/// remove side-effects that occur as part of dropping that value.
struct NeedsDrop;
impl Qualif for NeedsDrop {
}
}
-// Not promotable at all - non-`const fn` calls, asm!,
-// pointer comparisons, ptr-to-int casts, etc.
+/// Not promotable at all - non-`const fn` calls, `asm!`,
+/// pointer comparisons, ptr-to-int casts, etc.
+/// Inside a const context all constness rules apply, so promotion simply has to follow the regular
+/// constant rules (modulo interior mutability or `Drop` rules which are handled `HasMutInterior`
+/// and `NeedsDrop` respectively). Basically this duplicates the checks that the const-checking
+/// visitor enforces by emitting errors when working in const context.
struct IsNotPromotable;
impl Qualif for IsNotPromotable {
ProjectionElem::Index(_) => {}
ProjectionElem::Field(..) => {
- if cx.mode == Mode::Fn {
- let base_ty = proj.base.ty(cx.mir, cx.tcx).ty;
+ if cx.mode == Mode::NonConstFn {
+ let base_ty = proj.base.ty(cx.body, cx.tcx).ty;
if let Some(def) = base_ty.ty_adt_def() {
+ // No promotion of union field accesses.
if def.is_union() {
return true;
}
fn in_rvalue(cx: &ConstCx<'_, 'tcx>, rvalue: &Rvalue<'tcx>) -> bool {
match *rvalue {
- Rvalue::Cast(CastKind::Misc, ref operand, cast_ty) if cx.mode == Mode::Fn => {
- let operand_ty = operand.ty(cx.mir, cx.tcx);
+ Rvalue::Cast(CastKind::Misc, ref operand, cast_ty) if cx.mode == Mode::NonConstFn => {
+ let operand_ty = operand.ty(cx.body, cx.tcx);
let cast_in = CastTy::from_ty(operand_ty).expect("bad input type for cast");
let cast_out = CastTy::from_ty(cast_ty).expect("bad output type for cast");
match (cast_in, cast_out) {
}
}
- Rvalue::BinaryOp(op, ref lhs, _) if cx.mode == Mode::Fn => {
- if let ty::RawPtr(_) | ty::FnPtr(..) = lhs.ty(cx.mir, cx.tcx).sty {
+ Rvalue::BinaryOp(op, ref lhs, _) if cx.mode == Mode::NonConstFn => {
+ if let ty::RawPtr(_) | ty::FnPtr(..) = lhs.ty(cx.body, cx.tcx).sty {
assert!(op == BinOp::Eq || op == BinOp::Ne ||
op == BinOp::Le || op == BinOp::Lt ||
op == BinOp::Ge || op == BinOp::Gt ||
args: &[Operand<'tcx>],
_return_ty: Ty<'tcx>,
) -> bool {
- let fn_ty = callee.ty(cx.mir, cx.tcx);
+ let fn_ty = callee.ty(cx.body, cx.tcx);
match fn_ty.sty {
ty::FnDef(def_id, _) => {
match cx.tcx.fn_sig(def_id).abi() {
/// Refers to temporaries which cannot be promoted *implicitly*.
/// Explicit promotion happens e.g. for constant arguments declared via `rustc_args_required_const`.
-/// Inside a const context all constness rules
-/// apply, so implicit promotion simply has to follow the regular constant rules (modulo interior
-/// mutability or `Drop` rules which are handled `HasMutInterior` and `NeedsDrop` respectively).
-/// Implicit promotion inside regular functions does not happen if `const fn` calls are involved,
-/// as the call may be perfectly alright at runtime, but fail at compile time e.g. due to addresses
-/// being compared inside the function.
+/// Implicit promotion has almost the same rules, except that disallows `const fn` except for
+/// those marked `#[rustc_promotable]`. This is to avoid changing a legitimate run-time operation
+/// into a failing compile-time operation e.g. due to addresses being compared inside the function.
struct IsNotImplicitlyPromotable;
impl Qualif for IsNotImplicitlyPromotable {
args: &[Operand<'tcx>],
_return_ty: Ty<'tcx>,
) -> bool {
- if cx.mode == Mode::Fn {
- if let ty::FnDef(def_id, _) = callee.ty(cx.mir, cx.tcx).sty {
+ if cx.mode == Mode::NonConstFn {
+ if let ty::FnDef(def_id, _) = callee.ty(cx.body, cx.tcx).sty {
// Never promote runtime `const fn` calls of
// functions without `#[rustc_promotable]`.
if !cx.tcx.is_promotable_const_fn(def_id) {
}
}
+/// Checks MIR for being admissible as a compile-time constant, using `ConstCx`
+/// for value qualifications, and accumulates writes of
+/// rvalue/call results to locals, in `local_qualif`.
+/// It also records candidates for promotion in `promotion_candidates`,
+/// both in functions and const/static items.
struct Checker<'a, 'tcx> {
cx: ConstCx<'a, 'tcx>,
impl<'a, 'tcx> Checker<'a, 'tcx> {
fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId,
- mir: &'a Body<'tcx>,
+ body: &'a Body<'tcx>,
mode: Mode)
-> Self {
assert!(def_id.is_local());
- let mut rpo = traversal::reverse_postorder(mir);
- let temps = promote_consts::collect_temps(mir, &mut rpo);
+ let mut rpo = traversal::reverse_postorder(body);
+ let temps = promote_consts::collect_temps(body, &mut rpo);
rpo.reset();
let param_env = tcx.param_env(def_id);
tcx,
param_env,
mode,
- mir,
- per_local: PerQualif::new(BitSet::new_empty(mir.local_decls.len())),
+ body,
+ per_local: PerQualif::new(BitSet::new_empty(body.local_decls.len())),
};
- for (local, decl) in mir.local_decls.iter_enumerated() {
- if let LocalKind::Arg = mir.local_kind(local) {
+ for (local, decl) in body.local_decls.iter_enumerated() {
+ if let LocalKind::Arg = body.local_kind(local) {
let qualifs = cx.qualifs_in_any_value_of_ty(decl.ty);
for (per_local, qualif) in &mut cx.per_local.as_mut().zip(qualifs).0 {
if *qualif {
if !temps[local].is_promotable() {
cx.per_local[IsNotPromotable].insert(local);
}
- if let LocalKind::Var = mir.local_kind(local) {
+ if let LocalKind::Var = body.local_kind(local) {
// Sanity check to prevent implicit and explicit promotion of
// named locals
assert!(cx.per_local[IsNotPromotable].contains(local));
Checker {
cx,
- span: mir.span,
+ span: body.span,
def_id,
rpo,
temp_promotion_state: temps,
// slightly pointless (even with feature-gating).
fn not_const(&mut self) {
unleash_miri!(self);
- if self.mode != Mode::Fn {
+ if self.mode.requires_const_checking() {
let mut err = struct_span_err!(
self.tcx.sess,
self.span,
qualifs[HasMutInterior] = false;
qualifs[IsNotPromotable] = true;
- if self.mode != Mode::Fn {
+ if self.mode.requires_const_checking() {
if let BorrowKind::Mut { .. } = kind {
let mut err = struct_span_err!(self.tcx.sess, self.span, E0017,
"references in {}s may only refer \
// We might have a candidate for promotion.
let candidate = Candidate::Ref(location);
- // We can only promote interior borrows of promotable temps.
+ // Start by traversing to the "base", with non-deref projections removed.
let mut place = place;
while let Place::Projection(ref proj) = *place {
if proj.elem == ProjectionElem::Deref {
place = &proj.base;
}
debug!("qualify_consts: promotion candidate: place={:?}", place);
+ // We can only promote interior borrows of promotable temps (non-temps
+ // don't get promoted anyway).
+ // (If we bailed out of the loop due to a `Deref` above, we will definitely
+ // not enter the conditional here.)
if let Place::Base(PlaceBase::Local(local)) = *place {
- if self.mir.local_kind(local) == LocalKind::Temp {
+ if self.body.local_kind(local) == LocalKind::Temp {
debug!("qualify_consts: promotion candidate: local={:?}", local);
// The borrowed place doesn't have `HasMutInterior`
// (from `in_rvalue`), so we can safely ignore
// `HasMutInterior`, from a type that does, e.g.:
// `let _: &'static _ = &(Cell::new(1), 2).1;`
let mut local_qualifs = self.qualifs_in_local(local);
+ // Any qualifications, except HasMutInterior (see above), disqualify
+ // from promotion.
+ // This is, in particular, the "implicit promotion" version of
+ // the check making sure that we don't run drop glue during const-eval.
local_qualifs[HasMutInterior] = false;
if !local_qualifs.0.iter().any(|&qualif| qualif) {
debug!("qualify_consts: promotion candidate: {:?}", candidate);
}
};
- let kind = self.mir.local_kind(index);
+ let kind = self.body.local_kind(index);
debug!("store to {:?} {:?}", kind, index);
// Only handle promotable temps in non-const functions.
- if self.mode == Mode::Fn {
+ if self.mode == Mode::NonConstFn {
if kind != LocalKind::Temp ||
!self.temp_promotion_state[index].is_promotable() {
return;
fn check_const(&mut self) -> (u8, &'tcx BitSet<Local>) {
debug!("const-checking {} {:?}", self.mode, self.def_id);
- let mir = self.mir;
+ let body = self.body;
- let mut seen_blocks = BitSet::new_empty(mir.basic_blocks().len());
+ let mut seen_blocks = BitSet::new_empty(body.basic_blocks().len());
let mut bb = START_BLOCK;
loop {
seen_blocks.insert(bb.index());
- self.visit_basic_block_data(bb, &mir[bb]);
+ self.visit_basic_block_data(bb, &body[bb]);
- let target = match mir[bb].terminator().kind {
+ let target = match body[bb].terminator().kind {
TerminatorKind::Goto { target } |
TerminatorKind::Drop { target, .. } |
TerminatorKind::Assert { target, .. } |
for candidate in &self.promotion_candidates {
match *candidate {
Candidate::Ref(Location { block: bb, statement_index: stmt_idx }) => {
- match self.mir[bb].statements[stmt_idx].kind {
+ match self.body[bb].statements[stmt_idx].kind {
StatementKind::Assign(
_,
box Rvalue::Ref(_, _, Place::Base(PlaceBase::Local(index)))
// Account for errors in consts by using the
// conservative type qualification instead.
if qualifs[IsNotPromotable] {
- qualifs = self.qualifs_in_any_value_of_ty(mir.return_ty());
+ qualifs = self.qualifs_in_any_value_of_ty(body.return_ty());
}
(qualifs.encode_to_bits(), self.tcx.arena.alloc(promoted_temps))
}
}
-/// Checks MIR for const-correctness, using `ConstCx`
-/// for value qualifications, and accumulates writes of
-/// rvalue/call results to locals, in `local_qualif`.
-/// For functions (constant or not), it also records
-/// candidates for promotion in `promotion_candidates`.
impl<'a, 'tcx> Visitor<'tcx> for Checker<'a, 'tcx> {
- fn visit_place(&mut self,
- place: &Place<'tcx>,
- context: PlaceContext,
- location: Location) {
- debug!("visit_place: place={:?} context={:?} location={:?}", place, context, location);
- place.iterate(|place_base, place_projections| {
- match place_base {
- PlaceBase::Local(_) => {}
- PlaceBase::Static(box Static{ kind: StaticKind::Promoted(_), .. }) => {
- unreachable!()
- }
- PlaceBase::Static(box Static{ kind: StaticKind::Static(def_id), .. }) => {
- if self.tcx
- .get_attrs(*def_id)
- .iter()
- .any(|attr| attr.check_name(sym::thread_local)) {
- if self.mode != Mode::Fn {
- span_err!(self.tcx.sess, self.span, E0625,
- "thread-local statics cannot be \
- accessed at compile-time");
- }
- return;
+ fn visit_place_base(
+ &mut self,
+ place_base: &PlaceBase<'tcx>,
+ context: PlaceContext,
+ location: Location,
+ ) {
+ self.super_place_base(place_base, context, location);
+ match place_base {
+ PlaceBase::Local(_) => {}
+ PlaceBase::Static(box Static{ kind: StaticKind::Promoted(_), .. }) => {
+ unreachable!()
+ }
+ PlaceBase::Static(box Static{ kind: StaticKind::Static(def_id), .. }) => {
+ if self.tcx
+ .get_attrs(*def_id)
+ .iter()
+ .any(|attr| attr.check_name(sym::thread_local)) {
+ if self.mode.requires_const_checking() {
+ span_err!(self.tcx.sess, self.span, E0625,
+ "thread-local statics cannot be \
+ accessed at compile-time");
}
+ return;
+ }
- // Only allow statics (not consts) to refer to other statics.
- if self.mode == Mode::Static || self.mode == Mode::StaticMut {
- if self.mode == Mode::Static && context.is_mutating_use() {
- // this is not strictly necessary as miri will also bail out
- // For interior mutability we can't really catch this statically as that
- // goes through raw pointers and intermediate temporaries, so miri has
- // to catch this anyway
- self.tcx.sess.span_err(
- self.span,
- "cannot mutate statics in the initializer of another static",
- );
- }
- return;
+ // Only allow statics (not consts) to refer to other statics.
+ if self.mode == Mode::Static || self.mode == Mode::StaticMut {
+ if self.mode == Mode::Static && context.is_mutating_use() {
+ // this is not strictly necessary as miri will also bail out
+ // For interior mutability we can't really catch this statically as that
+ // goes through raw pointers and intermediate temporaries, so miri has
+ // to catch this anyway
+ self.tcx.sess.span_err(
+ self.span,
+ "cannot mutate statics in the initializer of another static",
+ );
}
- unleash_miri!(self);
+ return;
+ }
+ unleash_miri!(self);
- if self.mode != Mode::Fn {
- let mut err = struct_span_err!(self.tcx.sess, self.span, E0013,
- "{}s cannot refer to statics, use \
- a constant instead", self.mode);
- if self.tcx.sess.teach(&err.get_code().unwrap()) {
- err.note(
- "Static and const variables can refer to other const variables. \
- But a const variable cannot refer to a static variable."
- );
- err.help(
- "To fix this, the value can be extracted as a const and then used."
- );
- }
- err.emit()
+ if self.mode.requires_const_checking() {
+ let mut err = struct_span_err!(self.tcx.sess, self.span, E0013,
+ "{}s cannot refer to statics, use \
+ a constant instead", self.mode);
+ if self.tcx.sess.teach(&err.get_code().unwrap()) {
+ err.note(
+ "Static and const variables can refer to other const variables. \
+ But a const variable cannot refer to a static variable."
+ );
+ err.help(
+ "To fix this, the value can be extracted as a const and then used."
+ );
}
+ err.emit()
}
}
+ }
+ }
- for proj in place_projections {
- match proj.elem {
- ProjectionElem::Deref => {
- if context.is_mutating_use() {
- // `not_const` errors out in const contexts
- self.not_const()
- }
- let base_ty = proj.base.ty(self.mir, self.tcx).ty;
- match self.mode {
- Mode::Fn => {},
- _ => {
- if let ty::RawPtr(_) = base_ty.sty {
- if !self.tcx.features().const_raw_ptr_deref {
- emit_feature_err(
- &self.tcx.sess.parse_sess, sym::const_raw_ptr_deref,
- self.span, GateIssue::Language,
- &format!(
- "dereferencing raw pointers in {}s is unstable",
- self.mode,
- ),
- );
- }
- }
+ fn visit_projection(
+ &mut self,
+ proj: &Projection<'tcx>,
+ context: PlaceContext,
+ location: Location,
+ ) {
+ debug!(
+ "visit_place_projection: proj={:?} context={:?} location={:?}",
+ proj, context, location,
+ );
+ self.super_projection(proj, context, location);
+ match proj.elem {
+ ProjectionElem::Deref => {
+ if context.is_mutating_use() {
+ // `not_const` errors out in const contexts
+ self.not_const()
+ }
+ let base_ty = proj.base.ty(self.body, self.tcx).ty;
+ match self.mode {
+ Mode::NonConstFn => {},
+ _ => {
+ if let ty::RawPtr(_) = base_ty.sty {
+ if !self.tcx.features().const_raw_ptr_deref {
+ emit_feature_err(
+ &self.tcx.sess.parse_sess, sym::const_raw_ptr_deref,
+ self.span, GateIssue::Language,
+ &format!(
+ "dereferencing raw pointers in {}s is unstable",
+ self.mode,
+ ),
+ );
}
}
}
+ }
+ }
- ProjectionElem::ConstantIndex {..} |
- ProjectionElem::Subslice {..} |
- ProjectionElem::Field(..) |
- ProjectionElem::Index(_) => {
- let base_ty = proj.base.ty(self.mir, self.tcx).ty;
- if let Some(def) = base_ty.ty_adt_def() {
- if def.is_union() {
- match self.mode {
- Mode::ConstFn => {
- if !self.tcx.features().const_fn_union {
- emit_feature_err(
- &self.tcx.sess.parse_sess, sym::const_fn_union,
- self.span, GateIssue::Language,
- "unions in const fn are unstable",
- );
- }
- },
-
- | Mode::Fn
- | Mode::Static
- | Mode::StaticMut
- | Mode::Const
- => {},
+ ProjectionElem::ConstantIndex {..} |
+ ProjectionElem::Subslice {..} |
+ ProjectionElem::Field(..) |
+ ProjectionElem::Index(_) => {
+ let base_ty = proj.base.ty(self.body, self.tcx).ty;
+ if let Some(def) = base_ty.ty_adt_def() {
+ if def.is_union() {
+ match self.mode {
+ Mode::ConstFn => {
+ if !self.tcx.features().const_fn_union {
+ emit_feature_err(
+ &self.tcx.sess.parse_sess, sym::const_fn_union,
+ self.span, GateIssue::Language,
+ "unions in const fn are unstable",
+ );
}
- }
- }
- }
+ },
- ProjectionElem::Downcast(..) => {
- self.not_const()
+ | Mode::NonConstFn
+ | Mode::Static
+ | Mode::StaticMut
+ | Mode::Const
+ => {},
+ }
}
}
}
- });
+
+ ProjectionElem::Downcast(..) => {
+ self.not_const()
+ }
+ }
}
fn visit_operand(&mut self, operand: &Operand<'tcx>, location: Location) {
// Check nested operands and places.
if let Rvalue::Ref(_, kind, ref place) = *rvalue {
// Special-case reborrows.
- let mut is_reborrow = false;
+ let mut reborrow_place = None;
if let Place::Projection(ref proj) = *place {
if let ProjectionElem::Deref = proj.elem {
- let base_ty = proj.base.ty(self.mir, self.tcx).ty;
+ let base_ty = proj.base.ty(self.body, self.tcx).ty;
if let ty::Ref(..) = base_ty.sty {
- is_reborrow = true;
+ reborrow_place = Some(&proj.base);
}
}
}
- if is_reborrow {
+ if let Some(place) = reborrow_place {
let ctx = match kind {
BorrowKind::Shared => PlaceContext::NonMutatingUse(
NonMutatingUseContext::SharedBorrow,
MutatingUseContext::Borrow,
),
};
- self.super_place(place, ctx, location);
+ self.visit_place(place, ctx, location);
} else {
self.super_rvalue(rvalue, location);
}
Rvalue::Aggregate(..) => {}
Rvalue::Cast(CastKind::Misc, ref operand, cast_ty) => {
- let operand_ty = operand.ty(self.mir, self.tcx);
+ let operand_ty = operand.ty(self.body, self.tcx);
let cast_in = CastTy::from_ty(operand_ty).expect("bad input type for cast");
let cast_out = CastTy::from_ty(cast_ty).expect("bad output type for cast");
match (cast_in, cast_out) {
(CastTy::Ptr(_), CastTy::Int(_)) |
- (CastTy::FnPtr, CastTy::Int(_)) if self.mode != Mode::Fn => {
+ (CastTy::FnPtr, CastTy::Int(_)) if self.mode != Mode::NonConstFn => {
unleash_miri!(self);
if !self.tcx.features().const_raw_ptr_to_usize_cast {
// in const fn and constants require the feature gate
}
Rvalue::BinaryOp(op, ref lhs, _) => {
- if let ty::RawPtr(_) | ty::FnPtr(..) = lhs.ty(self.mir, self.tcx).sty {
+ if let ty::RawPtr(_) | ty::FnPtr(..) = lhs.ty(self.body, self.tcx).sty {
assert!(op == BinOp::Eq || op == BinOp::Ne ||
op == BinOp::Le || op == BinOp::Lt ||
op == BinOp::Ge || op == BinOp::Gt ||
op == BinOp::Offset);
unleash_miri!(self);
- if self.mode != Mode::Fn && !self.tcx.features().const_compare_raw_pointers {
+ if self.mode.requires_const_checking() &&
+ !self.tcx.features().const_compare_raw_pointers
+ {
// require the feature gate inside constants and const fn
// FIXME: make it unsafe to use these operations
emit_feature_err(
Rvalue::NullaryOp(NullOp::Box, _) => {
unleash_miri!(self);
- if self.mode != Mode::Fn {
+ if self.mode.requires_const_checking() {
let mut err = struct_span_err!(self.tcx.sess, self.span, E0010,
"allocations are not allowed in {}s", self.mode);
err.span_label(self.span, format!("allocation not allowed in {}s", self.mode));
self.assign(dest, ValueSource::Call {
callee: func,
args,
- return_ty: dest.ty(self.mir, self.tcx).ty,
+ return_ty: dest.ty(self.body, self.tcx).ty,
}, location);
}
- let fn_ty = func.ty(self.mir, self.tcx);
+ let fn_ty = func.ty(self.body, self.tcx);
let mut callee_def_id = None;
let mut is_shuffle = false;
match fn_ty.sty {
// special intrinsic that can be called diretly without an intrinsic
// feature gate needs a language feature gate
"transmute" => {
- // never promote transmute calls
- if self.mode != Mode::Fn {
+ if self.mode.requires_const_checking() {
// const eval transmute calls only with the feature gate
if !self.tcx.features().const_transmute {
emit_feature_err(
}
_ => {
// In normal functions no calls are feature-gated.
- if self.mode != Mode::Fn {
+ if self.mode.requires_const_checking() {
let unleash_miri = self
.tcx
.sess
}
}
ty::FnPtr(_) => {
- if self.mode != Mode::Fn {
+ if self.mode.requires_const_checking() {
let mut err = self.tcx.sess.struct_span_err(
self.span,
&format!("function pointers are not allowed in const fn"));
self.super_terminator_kind(kind, location);
// Deny *any* live drops anywhere other than functions.
- if self.mode != Mode::Fn {
+ if self.mode.requires_const_checking() {
unleash_miri!(self);
// HACK(eddyb): emulate a bit of dataflow analysis,
// conservatively, that drop elaboration will do.
let needs_drop = if let Place::Base(PlaceBase::Local(local)) = *place {
if NeedsDrop::in_local(self, local) {
- Some(self.mir.local_decls[local].source_info.span)
+ Some(self.body.local_decls[local].source_info.span)
} else {
None
}
if let Some(span) = needs_drop {
// Double-check the type being dropped, to minimize false positives.
- let ty = place.ty(self.mir, self.tcx).ty;
+ let ty = place.ty(self.body, self.tcx).ty;
if ty.needs_drop(self.tcx, self.param_env) {
struct_span_err!(self.tcx.sess, span, E0493,
"destructors cannot be evaluated at compile-time")
// cannot yet be stolen), because `mir_validated()`, which steals
// from `mir_const(), forces this query to execute before
// performing the steal.
- let mir = &tcx.mir_const(def_id).borrow();
+ let body = &tcx.mir_const(def_id).borrow();
- if mir.return_ty().references_error() {
- tcx.sess.delay_span_bug(mir.span, "mir_const_qualif: MIR had errors");
+ if body.return_ty().references_error() {
+ tcx.sess.delay_span_bug(body.span, "mir_const_qualif: MIR had errors");
return (1 << IsNotPromotable::IDX, tcx.arena.alloc(BitSet::new_empty(0)));
}
- Checker::new(tcx, def_id, mir, Mode::Const).check_const()
+ Checker::new(tcx, def_id, body, Mode::Const).check_const()
}
pub struct QualifyAndPromoteConstants;
fn run_pass<'a, 'tcx>(&self,
tcx: TyCtxt<'a, 'tcx, 'tcx>,
src: MirSource<'tcx>,
- mir: &mut Body<'tcx>) {
+ body: &mut Body<'tcx>) {
// There's not really any point in promoting errorful MIR.
- if mir.return_ty().references_error() {
- tcx.sess.delay_span_bug(mir.span, "QualifyAndPromoteConstants: MIR had errors");
+ if body.return_ty().references_error() {
+ tcx.sess.delay_span_bug(body.span, "QualifyAndPromoteConstants: MIR had errors");
return;
}
let id = tcx.hir().as_local_hir_id(def_id).unwrap();
let mut const_promoted_temps = None;
let mode = match tcx.hir().body_owner_kind_by_hir_id(id) {
- hir::BodyOwnerKind::Closure => Mode::Fn,
+ hir::BodyOwnerKind::Closure => Mode::NonConstFn,
hir::BodyOwnerKind::Fn => {
if tcx.is_const_fn(def_id) {
Mode::ConstFn
} else {
- Mode::Fn
+ Mode::NonConstFn
}
}
hir::BodyOwnerKind::Const => {
};
debug!("run_pass: mode={:?}", mode);
- if mode == Mode::Fn || mode == Mode::ConstFn {
+ if mode == Mode::NonConstFn || mode == Mode::ConstFn {
// This is ugly because Checker holds onto mir,
// which can't be mutated until its scope ends.
let (temps, candidates) = {
- let mut checker = Checker::new(tcx, def_id, mir, mode);
+ let mut checker = Checker::new(tcx, def_id, body, mode);
if mode == Mode::ConstFn {
if tcx.sess.opts.debugging_opts.unleash_the_miri_inside_of_you {
checker.check_const();
} else if tcx.is_min_const_fn(def_id) {
// enforce `min_const_fn` for stable const fns
use super::qualify_min_const_fn::is_min_const_fn;
- if let Err((span, err)) = is_min_const_fn(tcx, def_id, mir) {
+ if let Err((span, err)) = is_min_const_fn(tcx, def_id, body) {
let mut diag = struct_span_err!(
tcx.sess,
span,
};
// Do the actual promotion, now that we know what's viable.
- promote_consts::promote_candidates(mir, tcx, temps, candidates);
+ promote_consts::promote_candidates(body, tcx, temps, candidates);
} else {
- if !mir.control_flow_destroyed.is_empty() {
- let mut locals = mir.vars_iter();
+ if !body.control_flow_destroyed.is_empty() {
+ let mut locals = body.vars_iter();
if let Some(local) = locals.next() {
- let span = mir.local_decls[local].source_info.span;
+ let span = body.local_decls[local].source_info.span;
let mut error = tcx.sess.struct_span_err(
span,
&format!(
mode,
),
);
- for (span, kind) in mir.control_flow_destroyed.iter() {
+ for (span, kind) in body.control_flow_destroyed.iter() {
error.span_note(
*span,
&format!("use of {} here does not actually short circuit due to \
);
}
for local in locals {
- let span = mir.local_decls[local].source_info.span;
+ let span = body.local_decls[local].source_info.span;
error.span_note(
span,
"more locals defined here",
// Already computed by `mir_const_qualif`.
const_promoted_temps.unwrap()
} else {
- Checker::new(tcx, def_id, mir, mode).check_const().1
+ Checker::new(tcx, def_id, body, mode).check_const().1
};
// In `const` and `static` everything without `StorageDead`
// is `'static`, we don't have to create promoted MIR fragments,
// just remove `Drop` and `StorageDead` on "promoted" locals.
debug!("run_pass: promoted_temps={:?}", promoted_temps);
- for block in mir.basic_blocks_mut() {
+ for block in body.basic_blocks_mut() {
block.statements.retain(|statement| {
match statement.kind {
StatementKind::StorageDead(index) => {
return;
}
}
- let ty = mir.return_ty();
+ let ty = body.return_ty();
tcx.infer_ctxt().enter(|infcx| {
let param_env = ty::ParamEnv::empty();
- let cause = traits::ObligationCause::new(mir.span, id, traits::SharedStatic);
+ let cause = traits::ObligationCause::new(body.span, id, traits::SharedStatic);
let mut fulfillment_cx = traits::FulfillmentContext::new();
fulfillment_cx.register_bound(&infcx,
param_env,
pub fn is_min_const_fn(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId,
- mir: &'a Body<'tcx>,
+ body: &'a Body<'tcx>,
) -> McfResult {
let mut current = def_id;
loop {
}
}
- for local in &mir.local_decls {
+ for local in &body.local_decls {
check_ty(tcx, local.ty, local.source_info.span, def_id)?;
}
// impl trait is gone in MIR, so check the return type manually
check_ty(
tcx,
tcx.fn_sig(def_id).output().skip_binder(),
- mir.local_decls.iter().next().unwrap().source_info.span,
+ body.local_decls.iter().next().unwrap().source_info.span,
def_id,
)?;
- for bb in mir.basic_blocks() {
- check_terminator(tcx, mir, bb.terminator())?;
+ for bb in body.basic_blocks() {
+ check_terminator(tcx, body, bb.terminator())?;
for stmt in &bb.statements {
- check_statement(tcx, mir, stmt)?;
+ check_statement(tcx, body, stmt)?;
}
}
Ok(())
fn check_rvalue(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
- mir: &'a Body<'tcx>,
+ body: &'a Body<'tcx>,
rvalue: &Rvalue<'tcx>,
span: Span,
) -> McfResult {
}
Rvalue::Cast(CastKind::Misc, operand, cast_ty) => {
use rustc::ty::cast::CastTy;
- let cast_in = CastTy::from_ty(operand.ty(mir, tcx)).expect("bad input type for cast");
+ let cast_in = CastTy::from_ty(operand.ty(body, tcx)).expect("bad input type for cast");
let cast_out = CastTy::from_ty(cast_ty).expect("bad output type for cast");
match (cast_in, cast_out) {
(CastTy::Ptr(_), CastTy::Int(_)) | (CastTy::FnPtr, CastTy::Int(_)) => Err((
Rvalue::BinaryOp(_, lhs, rhs) | Rvalue::CheckedBinaryOp(_, lhs, rhs) => {
check_operand(lhs, span)?;
check_operand(rhs, span)?;
- let ty = lhs.ty(mir, tcx);
+ let ty = lhs.ty(body, tcx);
if ty.is_integral() || ty.is_bool() || ty.is_char() {
Ok(())
} else {
"heap allocations are not allowed in const fn".into(),
)),
Rvalue::UnaryOp(_, operand) => {
- let ty = operand.ty(mir, tcx);
+ let ty = operand.ty(body, tcx);
if ty.is_integral() || ty.is_bool() {
check_operand(operand, span)
} else {
fn check_statement(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
- mir: &'a Body<'tcx>,
+ body: &'a Body<'tcx>,
statement: &Statement<'tcx>,
) -> McfResult {
let span = statement.source_info.span;
match &statement.kind {
StatementKind::Assign(place, rval) => {
check_place(place, span)?;
- check_rvalue(tcx, mir, rval, span)
+ check_rvalue(tcx, body, rval, span)
}
StatementKind::FakeRead(_, place) => check_place(place, span),
fn check_terminator(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
- mir: &'a Body<'tcx>,
+ body: &'a Body<'tcx>,
terminator: &Terminator<'tcx>,
) -> McfResult {
let span = terminator.source_info.span;
TerminatorKind::FalseEdges { .. } | TerminatorKind::SwitchInt { .. } => Err((
span,
- "`if`, `match`, `&&` and `||` are not stable in const fn".into(),
+ "loops and conditional expressions are not stable in const fn".into(),
)),
| TerminatorKind::Abort | TerminatorKind::Unreachable => {
Err((span, "const fn with unreachable code is not stable".into()))
destination: _,
cleanup: _,
} => {
- let fn_ty = func.ty(mir, tcx);
+ let fn_ty = func.ty(body, tcx);
if let ty::FnDef(def_id, _) = fn_ty.sty {
// some intrinsics are waved through if called inside the
pub fn remove_noop_landing_pads<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
- mir: &mut Body<'tcx>)
+ body: &mut Body<'tcx>)
{
if tcx.sess.no_landing_pads() {
return
}
- debug!("remove_noop_landing_pads({:?})", mir);
+ debug!("remove_noop_landing_pads({:?})", body);
- RemoveNoopLandingPads.remove_nop_landing_pads(mir)
+ RemoveNoopLandingPads.remove_nop_landing_pads(body)
}
impl MirPass for RemoveNoopLandingPads {
fn run_pass<'a, 'tcx>(&self,
tcx: TyCtxt<'a, 'tcx, 'tcx>,
_src: MirSource<'tcx>,
- mir: &mut Body<'tcx>) {
- remove_noop_landing_pads(tcx, mir);
+ body: &mut Body<'tcx>) {
+ remove_noop_landing_pads(tcx, body);
}
}
fn is_nop_landing_pad(
&self,
bb: BasicBlock,
- mir: &Body<'_>,
+ body: &Body<'_>,
nop_landing_pads: &BitSet<BasicBlock>,
) -> bool {
- for stmt in &mir[bb].statements {
+ for stmt in &body[bb].statements {
match stmt.kind {
StatementKind::FakeRead(..) |
StatementKind::StorageLive(_) |
}
}
- let terminator = mir[bb].terminator();
+ let terminator = body[bb].terminator();
match terminator.kind {
TerminatorKind::Goto { .. } |
TerminatorKind::Resume |
}
}
- fn remove_nop_landing_pads(&self, mir: &mut Body<'_>) {
+ fn remove_nop_landing_pads(&self, body: &mut Body<'_>) {
// make sure there's a single resume block
let resume_block = {
- let patch = MirPatch::new(mir);
+ let patch = MirPatch::new(body);
let resume_block = patch.resume_block();
- patch.apply(mir);
+ patch.apply(body);
resume_block
};
debug!("remove_noop_landing_pads: resume block is {:?}", resume_block);
let mut jumps_folded = 0;
let mut landing_pads_removed = 0;
- let mut nop_landing_pads = BitSet::new_empty(mir.basic_blocks().len());
+ let mut nop_landing_pads = BitSet::new_empty(body.basic_blocks().len());
// This is a post-order traversal, so that if A post-dominates B
// then A will be visited before B.
- let postorder: Vec<_> = traversal::postorder(mir).map(|(bb, _)| bb).collect();
+ let postorder: Vec<_> = traversal::postorder(body).map(|(bb, _)| bb).collect();
for bb in postorder {
debug!(" processing {:?}", bb);
- for target in mir[bb].terminator_mut().successors_mut() {
+ for target in body[bb].terminator_mut().successors_mut() {
if *target != resume_block && nop_landing_pads.contains(*target) {
debug!(" folding noop jump to {:?} to resume block", target);
*target = resume_block;
}
}
- match mir[bb].terminator_mut().unwind_mut() {
+ match body[bb].terminator_mut().unwind_mut() {
Some(unwind) => {
if *unwind == Some(resume_block) {
debug!(" removing noop landing pad");
_ => {}
}
- let is_nop_landing_pad = self.is_nop_landing_pad(bb, mir, &nop_landing_pads);
+ let is_nop_landing_pad = self.is_nop_landing_pad(bb, body, &nop_landing_pads);
if is_nop_landing_pad {
nop_landing_pads.insert(bb);
}
impl MirPass for SanityCheck {
fn run_pass<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>,
- src: MirSource<'tcx>, mir: &mut Body<'tcx>) {
+ src: MirSource<'tcx>, body: &mut Body<'tcx>) {
let def_id = src.def_id();
if !tcx.has_attr(def_id, sym::rustc_mir) {
debug!("skipping rustc_peek::SanityCheck on {}", tcx.def_path_str(def_id));
let attributes = tcx.get_attrs(def_id);
let param_env = tcx.param_env(def_id);
- let move_data = MoveData::gather_moves(mir, tcx).unwrap();
+ let move_data = MoveData::gather_moves(body, tcx).unwrap();
let mdpe = MoveDataParamEnv { move_data: move_data, param_env: param_env };
- let dead_unwinds = BitSet::new_empty(mir.basic_blocks().len());
+ let dead_unwinds = BitSet::new_empty(body.basic_blocks().len());
let flow_inits =
- do_dataflow(tcx, mir, def_id, &attributes, &dead_unwinds,
- MaybeInitializedPlaces::new(tcx, mir, &mdpe),
+ do_dataflow(tcx, body, def_id, &attributes, &dead_unwinds,
+ MaybeInitializedPlaces::new(tcx, body, &mdpe),
|bd, i| DebugFormatted::new(&bd.move_data().move_paths[i]));
let flow_uninits =
- do_dataflow(tcx, mir, def_id, &attributes, &dead_unwinds,
- MaybeUninitializedPlaces::new(tcx, mir, &mdpe),
+ do_dataflow(tcx, body, def_id, &attributes, &dead_unwinds,
+ MaybeUninitializedPlaces::new(tcx, body, &mdpe),
|bd, i| DebugFormatted::new(&bd.move_data().move_paths[i]));
let flow_def_inits =
- do_dataflow(tcx, mir, def_id, &attributes, &dead_unwinds,
- DefinitelyInitializedPlaces::new(tcx, mir, &mdpe),
+ do_dataflow(tcx, body, def_id, &attributes, &dead_unwinds,
+ DefinitelyInitializedPlaces::new(tcx, body, &mdpe),
|bd, i| DebugFormatted::new(&bd.move_data().move_paths[i]));
if has_rustc_mir_with(&attributes, sym::rustc_peek_maybe_init).is_some() {
- sanity_check_via_rustc_peek(tcx, mir, def_id, &attributes, &flow_inits);
+ sanity_check_via_rustc_peek(tcx, body, def_id, &attributes, &flow_inits);
}
if has_rustc_mir_with(&attributes, sym::rustc_peek_maybe_uninit).is_some() {
- sanity_check_via_rustc_peek(tcx, mir, def_id, &attributes, &flow_uninits);
+ sanity_check_via_rustc_peek(tcx, body, def_id, &attributes, &flow_uninits);
}
if has_rustc_mir_with(&attributes, sym::rustc_peek_definite_init).is_some() {
- sanity_check_via_rustc_peek(tcx, mir, def_id, &attributes, &flow_def_inits);
+ sanity_check_via_rustc_peek(tcx, body, def_id, &attributes, &flow_def_inits);
}
if has_rustc_mir_with(&attributes, sym::stop_after_dataflow).is_some() {
tcx.sess.fatal("stop_after_dataflow ended compilation");
/// expression form above, then that emits an error as well, but those
/// errors are not intended to be used for unit tests.)
pub fn sanity_check_via_rustc_peek<'a, 'tcx, O>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
def_id: DefId,
_attributes: &[ast::Attribute],
results: &DataflowResults<'tcx, O>)
// `dataflow::build_sets`. (But note it is doing non-standard
// stuff, so such generalization may not be realistic.)
- for bb in mir.basic_blocks().indices() {
- each_block(tcx, mir, results, bb);
+ for bb in body.basic_blocks().indices() {
+ each_block(tcx, body, results, bb);
}
}
fn each_block<'a, 'tcx, O>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
results: &DataflowResults<'tcx, O>,
bb: mir::BasicBlock) where
O: BitDenotation<'tcx, Idx=MovePathIndex> + HasMoveData<'tcx>
{
let move_data = results.0.operator.move_data();
- let mir::BasicBlockData { ref statements, ref terminator, is_cleanup: _ } = mir[bb];
+ let mir::BasicBlockData { ref statements, ref terminator, is_cleanup: _ } = body[bb];
let (args, span) = match is_rustc_peek(tcx, terminator) {
Some(args_and_span) => args_and_span,
}
}
-pub fn simplify_cfg(mir: &mut Body<'_>) {
- CfgSimplifier::new(mir).simplify();
- remove_dead_blocks(mir);
+pub fn simplify_cfg(body: &mut Body<'_>) {
+ CfgSimplifier::new(body).simplify();
+ remove_dead_blocks(body);
// FIXME: Should probably be moved into some kind of pass manager
- mir.basic_blocks_mut().raw.shrink_to_fit();
+ body.basic_blocks_mut().raw.shrink_to_fit();
}
impl MirPass for SimplifyCfg {
fn run_pass<'a, 'tcx>(&self,
_tcx: TyCtxt<'a, 'tcx, 'tcx>,
_src: MirSource<'tcx>,
- mir: &mut Body<'tcx>) {
- debug!("SimplifyCfg({:?}) - simplifying {:?}", self.label, mir);
- simplify_cfg(mir);
+ body: &mut Body<'tcx>) {
+ debug!("SimplifyCfg({:?}) - simplifying {:?}", self.label, body);
+ simplify_cfg(body);
}
}
}
impl<'a, 'tcx: 'a> CfgSimplifier<'a, 'tcx> {
- pub fn new(mir: &'a mut Body<'tcx>) -> Self {
- let mut pred_count = IndexVec::from_elem(0u32, mir.basic_blocks());
+ pub fn new(body: &'a mut Body<'tcx>) -> Self {
+ let mut pred_count = IndexVec::from_elem(0u32, body.basic_blocks());
// we can't use mir.predecessors() here because that counts
// dead blocks, which we don't want to.
pred_count[START_BLOCK] = 1;
- for (_, data) in traversal::preorder(mir) {
+ for (_, data) in traversal::preorder(body) {
if let Some(ref term) = data.terminator {
for &tgt in term.successors() {
pred_count[tgt] += 1;
}
}
- let basic_blocks = mir.basic_blocks_mut();
+ let basic_blocks = body.basic_blocks_mut();
CfgSimplifier {
basic_blocks,
}
}
-pub fn remove_dead_blocks(mir: &mut Body<'_>) {
- let mut seen = BitSet::new_empty(mir.basic_blocks().len());
- for (bb, _) in traversal::preorder(mir) {
+pub fn remove_dead_blocks(body: &mut Body<'_>) {
+ let mut seen = BitSet::new_empty(body.basic_blocks().len());
+ for (bb, _) in traversal::preorder(body) {
seen.insert(bb.index());
}
- let basic_blocks = mir.basic_blocks_mut();
+ let basic_blocks = body.basic_blocks_mut();
let num_blocks = basic_blocks.len();
let mut replacements : Vec<_> = (0..num_blocks).map(BasicBlock::new).collect();
fn run_pass<'a, 'tcx>(&self,
tcx: TyCtxt<'a, 'tcx, 'tcx>,
_: MirSource<'tcx>,
- mir: &mut Body<'tcx>) {
- let mut marker = DeclMarker { locals: BitSet::new_empty(mir.local_decls.len()) };
- marker.visit_body(mir);
+ body: &mut Body<'tcx>) {
+ let mut marker = DeclMarker { locals: BitSet::new_empty(body.local_decls.len()) };
+ marker.visit_body(body);
// Return pointer and arguments are always live
marker.locals.insert(RETURN_PLACE);
- for arg in mir.args_iter() {
+ for arg in body.args_iter() {
marker.locals.insert(arg);
}
// We may need to keep dead user variables live for debuginfo.
if tcx.sess.opts.debuginfo == DebugInfo::Full {
- for local in mir.vars_iter() {
+ for local in body.vars_iter() {
marker.locals.insert(local);
}
}
- let map = make_local_map(&mut mir.local_decls, marker.locals);
+ let map = make_local_map(&mut body.local_decls, marker.locals);
// Update references to all vars and tmps now
- LocalUpdater { map }.visit_body(mir);
- mir.local_decls.shrink_to_fit();
+ LocalUpdater { map }.visit_body(body);
+ body.local_decls.shrink_to_fit();
}
}
/// Construct the mapping while swapping out unused stuff out from the `vec`.
-fn make_local_map<'tcx, V>(
+fn make_local_map<V>(
vec: &mut IndexVec<Local, V>,
mask: BitSet<Local>,
) -> IndexVec<Local, Option<Local>> {
fn run_pass<'a, 'tcx>(&self,
tcx: TyCtxt<'a, 'tcx, 'tcx>,
_src: MirSource<'tcx>,
- mir: &mut Body<'tcx>) {
- for block in mir.basic_blocks_mut() {
+ body: &mut Body<'tcx>) {
+ for block in body.basic_blocks_mut() {
let terminator = block.terminator_mut();
terminator.kind = match terminator.kind {
TerminatorKind::SwitchInt {
fn run_pass<'a, 'tcx>(&self,
tcx: TyCtxt<'a, 'tcx, 'tcx>,
_src: MirSource<'tcx>,
- mir: &mut Body<'tcx>) {
- let mut patch = MirPatch::new(mir);
+ body: &mut Body<'tcx>) {
+ let mut patch = MirPatch::new(body);
{
- let mut visitor = UniformArrayMoveOutVisitor{mir, patch: &mut patch, tcx};
- visitor.visit_body(mir);
+ let mut visitor = UniformArrayMoveOutVisitor{body, patch: &mut patch, tcx};
+ visitor.visit_body(body);
}
- patch.apply(mir);
+ patch.apply(body);
}
}
struct UniformArrayMoveOutVisitor<'a, 'tcx: 'a> {
- mir: &'a Body<'tcx>,
+ body: &'a Body<'tcx>,
patch: &'a mut MirPatch<'tcx>,
tcx: TyCtxt<'a, 'tcx, 'tcx>,
}
from_end: false} = proj.elem {
// no need to transformation
} else {
- let place_ty = proj.base.ty(self.mir, self.tcx).ty;
+ let place_ty = proj.base.ty(self.body, self.tcx).ty;
if let ty::Array(item_ty, const_size) = place_ty.sty {
if let Some(size) = const_size.assert_usize(self.tcx) {
assert!(size <= u32::max_value() as u64,
ProjectionElem::Subslice{from, to} => {
self.patch.make_nop(location);
let temps : Vec<_> = (from..(size-to)).map(|i| {
- let temp = self.patch.new_temp(item_ty, self.mir.source_info(location).span);
+ let temp = self.patch.new_temp(item_ty, self.body.source_info(location).span);
self.patch.add_statement(location, StatementKind::StorageLive(temp));
self.patch.add_assign(location,
Place::Base(PlaceBase::Local(temp)),
fn run_pass<'a, 'tcx>(&self,
tcx: TyCtxt<'a, 'tcx, 'tcx>,
_src: MirSource<'tcx>,
- mir: &mut Body<'tcx>) {
- let mut patch = MirPatch::new(mir);
+ body: &mut Body<'tcx>) {
+ let mut patch = MirPatch::new(body);
{
let mut visitor = RestoreDataCollector {
- locals_use: IndexVec::from_elem(LocalUse::new(), &mir.local_decls),
+ locals_use: IndexVec::from_elem(LocalUse::new(), &body.local_decls),
candidates: vec![],
};
- visitor.visit_body(mir);
+ visitor.visit_body(body);
for candidate in &visitor.candidates {
- let statement = &mir[candidate.block].statements[candidate.statement_index];
+ let statement = &body[candidate.block].statements[candidate.statement_index];
if let StatementKind::Assign(ref dst_place, ref rval) = statement.kind {
if let Rvalue::Aggregate(box AggregateKind::Array(_), ref items) = **rval {
let items : Vec<_> = items.iter().map(|item| {
if let Operand::Move(Place::Base(PlaceBase::Local(local))) = item {
let local_use = &visitor.locals_use[*local];
- let opt_index_and_place = Self::try_get_item_source(local_use, mir);
+ let opt_index_and_place =
+ Self::try_get_item_source(local_use, body);
// each local should be used twice:
// in assign and in aggregate statements
if local_use.use_count == 2 && opt_index_and_place.is_some() {
let opt_src_place = items.first().and_then(|x| *x).map(|x| x.2);
let opt_size = opt_src_place.and_then(|src_place| {
- let src_ty = src_place.ty(mir, tcx).ty;
+ let src_ty = src_place.ty(body, tcx).ty;
if let ty::Array(_, ref size_o) = src_ty.sty {
size_o.assert_usize(tcx)
} else {
}
}
}
- patch.apply(mir);
+ patch.apply(body);
}
}
}
fn try_get_item_source<'a, 'tcx>(local_use: &LocalUse,
- mir: &'a Body<'tcx>) -> Option<(u32, &'a Place<'tcx>)> {
+ body: &'a Body<'tcx>) -> Option<(u32, &'a Place<'tcx>)> {
if let Some(location) = local_use.first_use {
- let block = &mir[location.block];
+ let block = &body[location.block];
if block.statements.len() > location.statement_index {
let statement = &block.statements[location.statement_index];
if let StatementKind::Assign(
--- /dev/null
+use rustc::mir::*;
+use rustc::ty::Ty;
+use rustc::ty::layout::VariantIdx;
+use rustc_data_structures::indexed_vec::Idx;
+
+use std::iter::TrustedLen;
+
+/// Expand `lhs = Rvalue::Aggregate(kind, operands)` into assignments to the fields.
+///
+/// Produces something like
+///
+/// (lhs as Variant).field0 = arg0; // We only have a downcast if this is an enum
+/// (lhs as Variant).field1 = arg1;
+/// discriminant(lhs) = variant_index; // If lhs is an enum or generator.
+pub fn expand_aggregate<'tcx>(
+ mut lhs: Place<'tcx>,
+ operands: impl Iterator<Item=(Operand<'tcx>, Ty<'tcx>)> + TrustedLen,
+ kind: AggregateKind<'tcx>,
+ source_info: SourceInfo,
+) -> impl Iterator<Item=Statement<'tcx>> + TrustedLen {
+ let mut set_discriminant = None;
+ let active_field_index = match kind {
+ AggregateKind::Adt(adt_def, variant_index, _, _, active_field_index) => {
+ if adt_def.is_enum() {
+ set_discriminant = Some(Statement {
+ kind: StatementKind::SetDiscriminant {
+ place: lhs.clone(),
+ variant_index,
+ },
+ source_info,
+ });
+ lhs = lhs.downcast(adt_def, variant_index);
+ }
+ active_field_index
+ }
+ AggregateKind::Generator(..) => {
+ // Right now we only support initializing generators to
+ // variant 0 (Unresumed).
+ let variant_index = VariantIdx::new(0);
+ set_discriminant = Some(Statement {
+ kind: StatementKind::SetDiscriminant {
+ place: lhs.clone(),
+ variant_index,
+ },
+ source_info,
+ });
+
+ // Operands are upvars stored on the base place, so no
+ // downcast is necessary.
+
+ None
+ }
+ _ => None
+ };
+
+ operands.into_iter().enumerate().map(move |(i, (op, ty))| {
+ let lhs_field = if let AggregateKind::Array(_) = kind {
+ // FIXME(eddyb) `offset` should be u64.
+ let offset = i as u32;
+ assert_eq!(offset as usize, i);
+ lhs.clone().elem(ProjectionElem::ConstantIndex {
+ offset,
+ // FIXME(eddyb) `min_length` doesn't appear to be used.
+ min_length: offset + 1,
+ from_end: false
+ })
+ } else {
+ let field = Field::new(active_field_index.unwrap_or(i));
+ lhs.clone().field(field, ty)
+ };
+ Statement {
+ source_info,
+ kind: StatementKind::Assign(lhs_field, box Rvalue::Use(op)),
+ }
+ }).chain(set_discriminant)
+}
}
impl DefUseAnalysis {
- pub fn new(mir: &Body<'_>) -> DefUseAnalysis {
+ pub fn new(body: &Body<'_>) -> DefUseAnalysis {
DefUseAnalysis {
- info: IndexVec::from_elem_n(Info::new(), mir.local_decls.len()),
+ info: IndexVec::from_elem_n(Info::new(), body.local_decls.len()),
}
}
- pub fn analyze(&mut self, mir: &Body<'_>) {
+ pub fn analyze(&mut self, body: &Body<'_>) {
self.clear();
let mut finder = DefUseFinder {
info: mem::replace(&mut self.info, IndexVec::new()),
};
- finder.visit_body(mir);
+ finder.visit_body(body);
self.info = finder.info
}
&self.info[local]
}
- fn mutate_defs_and_uses<F>(&self, local: Local, mir: &mut Body<'_>, mut callback: F)
+ fn mutate_defs_and_uses<F>(&self, local: Local, body: &mut Body<'_>, mut callback: F)
where F: for<'a> FnMut(&'a mut Local,
PlaceContext,
Location) {
for place_use in &self.info[local].defs_and_uses {
MutateUseVisitor::new(local,
&mut callback,
- mir).visit_location(mir, place_use.location)
+ body).visit_location(body, place_use.location)
}
}
// FIXME(pcwalton): this should update the def-use chains.
pub fn replace_all_defs_and_uses_with(&self,
local: Local,
- mir: &mut Body<'_>,
+ body: &mut Body<'_>,
new_local: Local) {
- self.mutate_defs_and_uses(local, mir, |local, _, _| *local = new_local)
+ self.mutate_defs_and_uses(local, body, |local, _, _| *local = new_local)
}
}
type Path : Copy + fmt::Debug;
fn patch(&mut self) -> &mut MirPatch<'tcx>;
- fn mir(&self) -> &'a Body<'tcx>;
+ fn body(&self) -> &'a Body<'tcx>;
fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx>;
fn param_env(&self) -> ty::ParamEnv<'tcx>;
}
impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D>
- where D: DropElaborator<'b, 'tcx>
+where
+ D: DropElaborator<'b, 'tcx>,
{
fn place_ty(&self, place: &Place<'tcx>) -> Ty<'tcx> {
- place.ty(self.elaborator.mir(), self.tcx()).ty
+ place.ty(self.elaborator.body(), self.tcx()).ty
}
fn tcx(&self) -> TyCtxt<'b, 'tcx, 'tcx> {
//
// FIXME: I think we should just control the flags externally,
// and then we do not need this machinery.
- pub fn elaborate_drop<'a>(&mut self, bb: BasicBlock) {
+ pub fn elaborate_drop(&mut self, bb: BasicBlock) {
debug!("elaborate_drop({:?})", self);
let style = self.elaborator.drop_style(self.path, DropFlagMode::Deep);
debug!("elaborate_drop({:?}): live - {:?}", self, style);
///
/// NOTE: this does not clear the master drop flag, so you need
/// to point succ/unwind on a `drop_ladder_bottom`.
- fn drop_ladder<'a>(&mut self,
- fields: Vec<(Place<'tcx>, Option<D::Path>)>,
- succ: BasicBlock,
- unwind: Unwind)
- -> (BasicBlock, Unwind)
- {
+ fn drop_ladder(
+ &mut self,
+ fields: Vec<(Place<'tcx>, Option<D::Path>)>,
+ succ: BasicBlock,
+ unwind: Unwind,
+ ) -> (BasicBlock, Unwind) {
debug!("drop_ladder({:?}, {:?})", self, fields);
let mut fields = fields;
(*normal_ladder.last().unwrap(), *unwind_ladder.last().unwrap())
}
- fn open_drop_for_tuple<'a>(&mut self, tys: &[Ty<'tcx>])
- -> BasicBlock
- {
+ fn open_drop_for_tuple(&mut self, tys: &[Ty<'tcx>]) -> BasicBlock {
debug!("open_drop_for_tuple({:?}, {:?})", self, tys);
let fields = tys.iter().enumerate().map(|(i, &ty)| {
self.drop_ladder(fields, succ, unwind).0
}
- fn open_drop_for_box<'a>(&mut self, adt: &'tcx ty::AdtDef, substs: SubstsRef<'tcx>)
- -> BasicBlock
- {
+ fn open_drop_for_box(&mut self, adt: &'tcx ty::AdtDef, substs: SubstsRef<'tcx>) -> BasicBlock {
debug!("open_drop_for_box({:?}, {:?}, {:?})", self, adt, substs);
let interior = self.place.clone().deref();
self.drop_subpath(&interior, interior_path, succ, unwind_succ)
}
- fn open_drop_for_adt<'a>(&mut self, adt: &'tcx ty::AdtDef, substs: SubstsRef<'tcx>)
- -> BasicBlock {
+ fn open_drop_for_adt(&mut self, adt: &'tcx ty::AdtDef, substs: SubstsRef<'tcx>) -> BasicBlock {
debug!("open_drop_for_adt({:?}, {:?}, {:?})", self, adt, substs);
if adt.variants.len() == 0 {
return self.elaborator.patch().new_block(BasicBlockData {
self.drop_flag_test_block(switch_block, succ, unwind)
}
- fn destructor_call_block<'a>(&mut self, (succ, unwind): (BasicBlock, Unwind))
- -> BasicBlock
- {
+ fn destructor_call_block(&mut self, (succ, unwind): (BasicBlock, Unwind)) -> BasicBlock {
debug!("destructor_call_block({:?}, {:?})", self, succ);
let tcx = self.tcx();
let drop_trait = tcx.lang_items().drop_trait().unwrap();
///
/// This creates a "drop ladder" that drops the needed fields of the
/// ADT, both in the success case or if one of the destructors fail.
- fn open_drop<'a>(&mut self) -> BasicBlock {
+ fn open_drop(&mut self) -> BasicBlock {
let ty = self.place_ty(self.place);
match ty.sty {
ty::Closure(def_id, substs) => {
// within that own generator's resume function.
// This should only happen for the self argument on the resume function.
// It effetively only contains upvars until the generator transformation runs.
- // See librustc_mir/transform/generator.rs for more details.
+ // See librustc_body/transform/generator.rs for more details.
ty::Generator(def_id, substs, _) => {
let tys : Vec<_> = substs.upvar_tys(def_id, self.tcx()).collect();
self.open_drop_for_tuple(&tys)
/// if FLAG(self.path)
/// if let Some(mode) = mode: FLAG(self.path)[mode] = false
/// drop(self.place)
- fn complete_drop<'a>(&mut self,
- drop_mode: Option<DropFlagMode>,
- succ: BasicBlock,
- unwind: Unwind) -> BasicBlock
- {
+ fn complete_drop(
+ &mut self,
+ drop_mode: Option<DropFlagMode>,
+ succ: BasicBlock,
+ unwind: Unwind,
+ ) -> BasicBlock {
debug!("complete_drop({:?},{:?})", self, drop_mode);
let drop_block = self.drop_block(succ, unwind);
block
}
- fn elaborated_drop_block<'a>(&mut self) -> BasicBlock {
+ fn elaborated_drop_block(&mut self) -> BasicBlock {
debug!("elaborated_drop_block({:?})", self);
let unwind = self.unwind; // FIXME(#43234)
let succ = self.succ;
blk
}
- fn box_free_block<'a>(
+ fn box_free_block(
&mut self,
adt: &'tcx ty::AdtDef,
substs: SubstsRef<'tcx>,
self.drop_flag_test_block(block, target, unwind)
}
- fn unelaborated_free_block<'a>(
+ fn unelaborated_free_block(
&mut self,
adt: &'tcx ty::AdtDef,
substs: SubstsRef<'tcx>,
target: BasicBlock,
- unwind: Unwind
+ unwind: Unwind,
) -> BasicBlock {
let tcx = self.tcx();
let unit_temp = Place::Base(PlaceBase::Local(self.new_temp(tcx.mk_unit())));
free_block
}
- fn drop_block<'a>(&mut self, target: BasicBlock, unwind: Unwind) -> BasicBlock {
+ fn drop_block(&mut self, target: BasicBlock, unwind: Unwind) -> BasicBlock {
let block = TerminatorKind::Drop {
location: self.place.clone(),
target,
}
}
- fn new_block<'a>(&mut self,
- unwind: Unwind,
- k: TerminatorKind<'tcx>)
- -> BasicBlock
- {
+ fn new_block(&mut self, unwind: Unwind, k: TerminatorKind<'tcx>) -> BasicBlock {
self.elaborator.patch().new_block(BasicBlockData {
statements: vec![],
terminator: Some(Terminator {
}
fn terminator_loc(&mut self, bb: BasicBlock) -> Location {
- let mir = self.elaborator.mir();
- self.elaborator.patch().terminator_loc(mir, bb)
+ let body = self.elaborator.body();
+ self.elaborator.patch().terminator_loc(body, bb)
}
fn constant_usize(&self, val: u16) -> Operand<'tcx> {
where W: Write
{
for def_id in dump_mir_def_ids(tcx, single) {
- let mir = &tcx.optimized_mir(def_id);
- write_mir_fn_graphviz(tcx, def_id, mir, w)?;
+ let body = &tcx.optimized_mir(def_id);
+ write_mir_fn_graphviz(tcx, def_id, body, w)?;
}
Ok(())
}
/// Write a graphviz DOT graph of the MIR.
pub fn write_mir_fn_graphviz<'tcx, W>(tcx: TyCtxt<'_, '_, 'tcx>,
def_id: DefId,
- mir: &Body<'_>,
+ body: &Body<'_>,
w: &mut W) -> io::Result<()>
where W: Write
{
writeln!(w, r#" edge [fontname="monospace"];"#)?;
// Graph label
- write_graph_label(tcx, def_id, mir, w)?;
+ write_graph_label(tcx, def_id, body, w)?;
// Nodes
- for (block, _) in mir.basic_blocks().iter_enumerated() {
- write_node(block, mir, w)?;
+ for (block, _) in body.basic_blocks().iter_enumerated() {
+ write_node(block, body, w)?;
}
// Edges
- for (source, _) in mir.basic_blocks().iter_enumerated() {
- write_edges(source, mir, w)?;
+ for (source, _) in body.basic_blocks().iter_enumerated() {
+ write_edges(source, body, w)?;
}
writeln!(w, "}}")
}
/// `init` and `fini` are callbacks for emitting additional rows of
/// data (using HTML enclosed with `<tr>` in the emitted text).
pub fn write_node_label<W: Write, INIT, FINI>(block: BasicBlock,
- mir: &Body<'_>,
+ body: &Body<'_>,
w: &mut W,
num_cols: u32,
init: INIT,
where INIT: Fn(&mut W) -> io::Result<()>,
FINI: Fn(&mut W) -> io::Result<()>
{
- let data = &mir[block];
+ let data = &body[block];
write!(w, r#"<table border="0" cellborder="1" cellspacing="0">"#)?;
}
/// Write a graphviz DOT node for the given basic block.
-fn write_node<W: Write>(block: BasicBlock, mir: &Body<'_>, w: &mut W) -> io::Result<()> {
+fn write_node<W: Write>(block: BasicBlock, body: &Body<'_>, w: &mut W) -> io::Result<()> {
// Start a new node with the label to follow, in one of DOT's pseudo-HTML tables.
write!(w, r#" {} [shape="none", label=<"#, node(block))?;
- write_node_label(block, mir, w, 1, |_| Ok(()), |_| Ok(()))?;
+ write_node_label(block, body, w, 1, |_| Ok(()), |_| Ok(()))?;
// Close the node label and the node itself.
writeln!(w, ">];")
}
/// Write graphviz DOT edges with labels between the given basic block and all of its successors.
-fn write_edges<W: Write>(source: BasicBlock, mir: &Body<'_>, w: &mut W) -> io::Result<()> {
- let terminator = mir[source].terminator();
+fn write_edges<W: Write>(source: BasicBlock, body: &Body<'_>, w: &mut W) -> io::Result<()> {
+ let terminator = body[source].terminator();
let labels = terminator.kind.fmt_successor_labels();
for (&target, label) in terminator.successors().zip(labels) {
/// all the variables and temporaries.
fn write_graph_label<'a, 'gcx, 'tcx, W: Write>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
def_id: DefId,
- mir: &Body<'_>,
+ body: &Body<'_>,
w: &mut W)
-> io::Result<()> {
write!(w, " label=<fn {}(", dot::escape_html(&tcx.def_path_str(def_id)))?;
// fn argument types.
- for (i, arg) in mir.args_iter().enumerate() {
+ for (i, arg) in body.args_iter().enumerate() {
if i > 0 {
write!(w, ", ")?;
}
write!(w,
"{:?}: {}",
Place::Base(PlaceBase::Local(arg)),
- escape(&mir.local_decls[arg].ty)
+ escape(&body.local_decls[arg].ty)
)?;
}
- write!(w, ") -> {}", escape(&mir.return_ty()))?;
+ write!(w, ") -> {}", escape(&body.return_ty()))?;
write!(w, r#"<br align="left"/>"#)?;
- for local in mir.vars_and_temps_iter() {
- let decl = &mir.local_decls[local];
+ for local in body.vars_and_temps_iter() {
+ let decl = &body.local_decls[local];
write!(w, "let ")?;
if decl.mutability == Mutability::Mut {
/// Computes which local variables are live within the given function
/// `mir`, including drops.
pub fn liveness_of_locals<'tcx>(
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
) -> LivenessResult {
- let num_live_vars = mir.local_decls.len();
+ let num_live_vars = body.local_decls.len();
- let def_use: IndexVec<_, DefsUses> = mir
+ let def_use: IndexVec<_, DefsUses> = body
.basic_blocks()
.iter()
.map(|b| block(b, num_live_vars))
.collect();
- let mut outs: IndexVec<_, LiveVarSet> = mir
+ let mut outs: IndexVec<_, LiveVarSet> = body
.basic_blocks()
.indices()
.map(|_| LiveVarSet::new_empty(num_live_vars))
// queue of things that need to be re-processed, and a set containing
// the things currently in the queue
- let mut dirty_queue: WorkQueue<BasicBlock> = WorkQueue::with_all(mir.basic_blocks().len());
+ let mut dirty_queue: WorkQueue<BasicBlock> = WorkQueue::with_all(body.basic_blocks().len());
- let predecessors = mir.predecessors();
+ let predecessors = body.predecessors();
while let Some(bb) = dirty_queue.pop() {
// bits = use ∪ (bits - def)
Drop,
}
-pub fn categorize<'tcx>(context: PlaceContext) -> Option<DefUse> {
+pub fn categorize(context: PlaceContext) -> Option<DefUse> {
match context {
///////////////////////////////////////////////////////////////////////////
// DEFS
tcx: TyCtxt<'a, 'tcx, 'tcx>,
pass_name: &str,
source: MirSource<'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
result: &LivenessResult,
) {
if !dump_enabled(tcx, pass_name, source) {
// see notes on #41697 below
tcx.def_path_str(source.def_id())
});
- dump_matched_mir_node(tcx, pass_name, &node_path, source, mir, result);
+ dump_matched_mir_node(tcx, pass_name, &node_path, source, body, result);
}
fn dump_matched_mir_node<'a, 'tcx>(
pass_name: &str,
node_path: &str,
source: MirSource<'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
result: &LivenessResult,
) {
let mut file_path = PathBuf::new();
writeln!(file, "// source = {:?}", source)?;
writeln!(file, "// pass_name = {}", pass_name)?;
writeln!(file, "")?;
- write_mir_fn(tcx, source, mir, &mut file, result)?;
+ write_mir_fn(tcx, source, body, &mut file, result)?;
Ok(())
});
}
pub fn write_mir_fn<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
src: MirSource<'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
w: &mut dyn Write,
result: &LivenessResult,
) -> io::Result<()> {
- write_mir_intro(tcx, src, mir, w)?;
- for block in mir.basic_blocks().indices() {
+ write_mir_intro(tcx, src, body, w)?;
+ for block in body.basic_blocks().indices() {
let print = |w: &mut dyn Write, prefix, result: &IndexVec<BasicBlock, LiveVarSet>| {
let live: Vec<String> = result[block]
.iter()
.collect();
writeln!(w, "{} {{{}}}", prefix, live.join(", "))
};
- write_basic_block(tcx, block, mir, &mut |_, _| Ok(()), w)?;
+ write_basic_block(tcx, block, body, &mut |_, _| Ok(()), w)?;
print(w, " ", &result.outs)?;
- if block.index() + 1 != mir.basic_blocks().len() {
+ if block.index() + 1 != body.basic_blocks().len() {
writeln!(w, "")?;
}
}
use rustc::ty::TyCtxt;
use syntax_pos::Span;
+pub mod aggregate;
pub mod borrowck_errors;
pub mod elaborate_drops;
pub mod def_use;
pub mod liveness;
pub mod collect_writes;
+pub use self::aggregate::expand_aggregate;
pub use self::alignment::is_disaligned;
pub use self::pretty::{dump_enabled, dump_mir, write_mir_pretty, PassWhere};
pub use self::graphviz::{graphviz_safe_def_name, write_mir_graphviz};
}
impl<'tcx> MirPatch<'tcx> {
- pub fn new(mir: &Body<'tcx>) -> Self {
+ pub fn new(body: &Body<'tcx>) -> Self {
let mut result = MirPatch {
- patch_map: IndexVec::from_elem(None, mir.basic_blocks()),
+ patch_map: IndexVec::from_elem(None, body.basic_blocks()),
new_blocks: vec![],
new_statements: vec![],
new_locals: vec![],
- next_local: mir.local_decls.len(),
+ next_local: body.local_decls.len(),
resume_block: START_BLOCK,
make_nop: vec![]
};
let mut resume_block = None;
let mut resume_stmt_block = None;
- for (bb, block) in mir.basic_blocks().iter_enumerated() {
+ for (bb, block) in body.basic_blocks().iter_enumerated() {
if let TerminatorKind::Resume = block.terminator().kind {
if block.statements.len() > 0 {
assert!(resume_stmt_block.is_none());
statements: vec![],
terminator: Some(Terminator {
source_info: SourceInfo {
- span: mir.span,
+ span: body.span,
scope: OUTERMOST_SOURCE_SCOPE
},
kind: TerminatorKind::Resume
self.patch_map[bb].is_some()
}
- pub fn terminator_loc(&self, mir: &Body<'tcx>, bb: BasicBlock) -> Location {
- let offset = match bb.index().checked_sub(mir.basic_blocks().len()) {
+ pub fn terminator_loc(&self, body: &Body<'tcx>, bb: BasicBlock) -> Location {
+ let offset = match bb.index().checked_sub(body.basic_blocks().len()) {
Some(index) => self.new_blocks[index].statements.len(),
- None => mir[bb].statements.len()
+ None => body[bb].statements.len()
};
Location {
block: bb,
self.make_nop.push(loc);
}
- pub fn apply(self, mir: &mut Body<'tcx>) {
+ pub fn apply(self, body: &mut Body<'tcx>) {
debug!("MirPatch: make nops at: {:?}", self.make_nop);
for loc in self.make_nop {
- mir.make_statement_nop(loc);
+ body.make_statement_nop(loc);
}
debug!("MirPatch: {:?} new temps, starting from index {}: {:?}",
- self.new_locals.len(), mir.local_decls.len(), self.new_locals);
+ self.new_locals.len(), body.local_decls.len(), self.new_locals);
debug!("MirPatch: {} new blocks, starting from index {}",
- self.new_blocks.len(), mir.basic_blocks().len());
- mir.basic_blocks_mut().extend(self.new_blocks);
- mir.local_decls.extend(self.new_locals);
+ self.new_blocks.len(), body.basic_blocks().len());
+ body.basic_blocks_mut().extend(self.new_blocks);
+ body.local_decls.extend(self.new_locals);
for (src, patch) in self.patch_map.into_iter_enumerated() {
if let Some(patch) = patch {
debug!("MirPatch: patching block {:?}", src);
- mir[src].terminator_mut().kind = patch;
+ body[src].terminator_mut().kind = patch;
}
}
stmt, loc, delta);
loc.statement_index += delta;
let source_info = Self::source_info_for_index(
- &mir[loc.block], loc
+ &body[loc.block], loc
);
- mir[loc.block].statements.insert(
+ body[loc.block].statements.insert(
loc.statement_index, Statement {
source_info,
kind: stmt
}
}
- pub fn source_info_for_location(&self, mir: &Body<'_>, loc: Location) -> SourceInfo {
- let data = match loc.block.index().checked_sub(mir.basic_blocks().len()) {
+ pub fn source_info_for_location(&self, body: &Body<'_>, loc: Location) -> SourceInfo {
+ let data = match loc.block.index().checked_sub(body.basic_blocks().len()) {
Some(new) => &self.new_blocks[new],
- None => &mir[loc.block]
+ None => &body[loc.block]
};
Self::source_info_for_index(data, loc)
}
pass_name: &str,
disambiguator: &dyn Display,
source: MirSource<'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
extra_data: F,
) where
F: FnMut(PassWhere, &mut dyn Write) -> io::Result<()>,
&node_path,
disambiguator,
source,
- mir,
+ body,
extra_data,
);
}
node_path: &str,
disambiguator: &dyn Display,
source: MirSource<'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
mut extra_data: F,
) where
F: FnMut(PassWhere, &mut dyn Write) -> io::Result<()>,
writeln!(file, "// source = {:?}", source)?;
writeln!(file, "// pass_name = {}", pass_name)?;
writeln!(file, "// disambiguator = {}", disambiguator)?;
- if let Some(ref layout) = mir.generator_layout {
+ if let Some(ref layout) = body.generator_layout {
writeln!(file, "// generator_layout = {:?}", layout)?;
}
writeln!(file, "")?;
extra_data(PassWhere::BeforeCFG, &mut file)?;
- write_user_type_annotations(mir, &mut file)?;
- write_mir_fn(tcx, source, mir, &mut extra_data, &mut file)?;
+ write_user_type_annotations(body, &mut file)?;
+ write_mir_fn(tcx, source, body, &mut extra_data, &mut file)?;
extra_data(PassWhere::AfterCFG, &mut file)?;
};
let _: io::Result<()> = try {
let mut file =
create_dump_file(tcx, "dot", pass_num, pass_name, disambiguator, source)?;
- write_mir_fn_graphviz(tcx, source.def_id(), mir, &mut file)?;
+ write_mir_fn_graphviz(tcx, source.def_id(), body, &mut file)?;
};
}
}
let mut first = true;
for def_id in dump_mir_def_ids(tcx, single) {
- let mir = &tcx.optimized_mir(def_id);
+ let body = &tcx.optimized_mir(def_id);
if first {
first = false;
writeln!(w, "")?;
}
- write_mir_fn(tcx, MirSource::item(def_id), mir, &mut |_, _| Ok(()), w)?;
+ write_mir_fn(tcx, MirSource::item(def_id), body, &mut |_, _| Ok(()), w)?;
- for (i, mir) in mir.promoted.iter_enumerated() {
+ for (i, body) in body.promoted.iter_enumerated() {
writeln!(w, "")?;
let src = MirSource {
instance: ty::InstanceDef::Item(def_id),
promoted: Some(i),
};
- write_mir_fn(tcx, src, mir, &mut |_, _| Ok(()), w)?;
+ write_mir_fn(tcx, src, body, &mut |_, _| Ok(()), w)?;
}
}
Ok(())
pub fn write_mir_fn<'a, 'gcx, 'tcx, F>(
tcx: TyCtxt<'a, 'gcx, 'tcx>,
src: MirSource<'tcx>,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
extra_data: &mut F,
w: &mut dyn Write,
) -> io::Result<()>
where
F: FnMut(PassWhere, &mut dyn Write) -> io::Result<()>,
{
- write_mir_intro(tcx, src, mir, w)?;
- for block in mir.basic_blocks().indices() {
+ write_mir_intro(tcx, src, body, w)?;
+ for block in body.basic_blocks().indices() {
extra_data(PassWhere::BeforeBlock(block), w)?;
- write_basic_block(tcx, block, mir, extra_data, w)?;
- if block.index() + 1 != mir.basic_blocks().len() {
+ write_basic_block(tcx, block, body, extra_data, w)?;
+ if block.index() + 1 != body.basic_blocks().len() {
writeln!(w, "")?;
}
}
pub fn write_basic_block<'cx, 'gcx, 'tcx, F>(
tcx: TyCtxt<'cx, 'gcx, 'tcx>,
block: BasicBlock,
- mir: &Body<'tcx>,
+ body: &Body<'tcx>,
extra_data: &mut F,
w: &mut dyn Write,
) -> io::Result<()>
where
F: FnMut(PassWhere, &mut dyn Write) -> io::Result<()>,
{
- let data = &mir[block];
+ let data = &body[block];
// Basic block label at the top.
let cleanup_text = if data.is_cleanup { " (cleanup)" } else { "" };
};
for statement in &data.statements {
extra_data(PassWhere::BeforeLocation(current_location), w)?;
- let indented_mir = format!("{0}{0}{1:?};", INDENT, statement);
+ let indented_body = format!("{0}{0}{1:?};", INDENT, statement);
writeln!(
w,
"{:A$} // {:?}: {}",
- indented_mir,
+ indented_body,
current_location,
comment(tcx, statement.source_info),
A = ALIGN,
/// Prints local variables in a scope tree.
fn write_scope_tree(
tcx: TyCtxt<'_, '_, '_>,
- mir: &Body<'_>,
+ body: &Body<'_>,
scope_tree: &FxHashMap<SourceScope, Vec<SourceScope>>,
w: &mut dyn Write,
parent: SourceScope,
let indent = depth * INDENT.len();
// Local variable types (including the user's name in a comment).
- for (local, local_decl) in mir.local_decls.iter_enumerated() {
- if (1..mir.arg_count+1).contains(&local.index()) {
+ for (local, local_decl) in body.local_decls.iter_enumerated() {
+ if (1..body.arg_count+1).contains(&local.index()) {
// Skip over argument locals, they're printed in the signature.
continue;
}
};
for &child in children {
- assert_eq!(mir.source_scopes[child].parent_scope, Some(parent));
+ assert_eq!(body.source_scopes[child].parent_scope, Some(parent));
writeln!(w, "{0:1$}scope {2} {{", "", indent, child.index())?;
- write_scope_tree(tcx, mir, scope_tree, w, child, depth + 1)?;
+ write_scope_tree(tcx, body, scope_tree, w, child, depth + 1)?;
writeln!(w, "{0:1$}}}", "", depth * INDENT.len())?;
}
pub fn write_mir_intro<'a, 'gcx, 'tcx>(
tcx: TyCtxt<'a, 'gcx, 'tcx>,
src: MirSource<'tcx>,
- mir: &Body<'_>,
+ body: &Body<'_>,
w: &mut dyn Write,
) -> io::Result<()> {
- write_mir_sig(tcx, src, mir, w)?;
+ write_mir_sig(tcx, src, body, w)?;
writeln!(w, "{{")?;
// construct a scope tree and write it out
let mut scope_tree: FxHashMap<SourceScope, Vec<SourceScope>> = Default::default();
- for (index, scope_data) in mir.source_scopes.iter().enumerate() {
+ for (index, scope_data) in body.source_scopes.iter().enumerate() {
if let Some(parent) = scope_data.parent_scope {
scope_tree
.entry(parent)
}
}
- write_scope_tree(tcx, mir, &scope_tree, w, OUTERMOST_SOURCE_SCOPE, 1)?;
+ write_scope_tree(tcx, body, &scope_tree, w, OUTERMOST_SOURCE_SCOPE, 1)?;
// Add an empty line before the first block is printed.
writeln!(w, "")?;
fn write_mir_sig(
tcx: TyCtxt<'_, '_, '_>,
src: MirSource<'tcx>,
- mir: &Body<'_>,
+ body: &Body<'_>,
w: &mut dyn Write,
) -> io::Result<()> {
use rustc::hir::def::DefKind;
write!(w, "(")?;
// fn argument types.
- for (i, arg) in mir.args_iter().enumerate() {
+ for (i, arg) in body.args_iter().enumerate() {
if i != 0 {
write!(w, ", ")?;
}
- write!(w, "{:?}: {}", Place::Base(PlaceBase::Local(arg)), mir.local_decls[arg].ty)?;
+ write!(w, "{:?}: {}", Place::Base(PlaceBase::Local(arg)), body.local_decls[arg].ty)?;
}
- write!(w, ") -> {}", mir.return_ty())?;
+ write!(w, ") -> {}", body.return_ty())?;
} else {
- assert_eq!(mir.arg_count, 0);
- write!(w, ": {} =", mir.return_ty())?;
+ assert_eq!(body.arg_count, 0);
+ write!(w, ": {} =", body.return_ty())?;
}
- if let Some(yield_ty) = mir.yield_ty {
+ if let Some(yield_ty) = body.yield_ty {
writeln!(w)?;
writeln!(w, "yields {}", yield_ty)?;
}
Ok(())
}
-fn write_user_type_annotations(mir: &Body<'_>, w: &mut dyn Write) -> io::Result<()> {
- if !mir.user_type_annotations.is_empty() {
+fn write_user_type_annotations(body: &Body<'_>, w: &mut dyn Write) -> io::Result<()> {
+ if !body.user_type_annotations.is_empty() {
writeln!(w, "| User Type Annotations")?;
}
- for (index, annotation) in mir.user_type_annotations.iter_enumerated() {
+ for (index, annotation) in body.user_type_annotations.iter_enumerated() {
writeln!(w, "| {:?}: {:?} at {:?}", index.index(), annotation.user_ty, annotation.span)?;
}
- if !mir.user_type_annotations.is_empty() {
+ if !body.user_type_annotations.is_empty() {
writeln!(w, "|")?;
}
Ok(())
#![deny(rust_2018_idioms)]
#![deny(internal)]
+#![deny(unused_lifetimes)]
#[macro_use]
extern crate rustc;
use rustc::session::Session;
use rustc::util::nodemap::FxHashMap;
-use syntax::ext::base::{SyntaxExtension, NamedSyntaxExtension, NormalTT, IdentTT};
+use syntax::ext::base::{SyntaxExtension, NamedSyntaxExtension};
use syntax::ext::base::MacroExpanderFn;
+use syntax::ext::hygiene::Transparency;
use syntax::symbol::{Symbol, sym};
use syntax::ast;
use syntax::feature_gate::AttributeType;
/// Register a syntax extension of any kind.
///
/// This is the most general hook into `libsyntax`'s expansion behavior.
- pub fn register_syntax_extension(&mut self, name: ast::Name, extension: SyntaxExtension) {
+ pub fn register_syntax_extension(&mut self, name: ast::Name, mut extension: SyntaxExtension) {
if name == sym::macro_rules {
panic!("user-defined macros may not be named `macro_rules`");
}
- self.syntax_exts.push((name, match extension {
- NormalTT {
- expander,
- def_info: _,
- allow_internal_unstable,
- allow_internal_unsafe,
- local_inner_macros,
- unstable_feature,
- edition,
- } => {
- let nid = ast::CRATE_NODE_ID;
- NormalTT {
- expander,
- def_info: Some((nid, self.krate_span)),
- allow_internal_unstable,
- allow_internal_unsafe,
- local_inner_macros,
- unstable_feature,
- edition,
- }
- }
- IdentTT { expander, span: _, allow_internal_unstable } => {
- IdentTT { expander, span: Some(self.krate_span), allow_internal_unstable }
- }
- _ => extension,
- }));
+ if let SyntaxExtension::LegacyBang { def_info: ref mut def_info @ None, .. } = extension {
+ *def_info = Some((ast::CRATE_NODE_ID, self.krate_span));
+ }
+ self.syntax_exts.push((name, extension));
}
/// Register a macro of the usual kind.
///
/// This is a convenience wrapper for `register_syntax_extension`.
- /// It builds for you a `NormalTT` that calls `expander`,
+ /// It builds for you a `SyntaxExtension::LegacyBang` that calls `expander`,
/// and also takes care of interning the macro's name.
pub fn register_macro(&mut self, name: &str, expander: MacroExpanderFn) {
- self.register_syntax_extension(Symbol::intern(name), NormalTT {
+ self.register_syntax_extension(Symbol::intern(name), SyntaxExtension::LegacyBang {
expander: Box::new(expander),
def_info: None,
+ transparency: Transparency::SemiTransparent,
allow_internal_unstable: None,
allow_internal_unsafe: false,
local_inner_macros: false,
#![deny(rust_2018_idioms)]
#![deny(internal)]
+#![deny(unused_lifetimes)]
#![feature(nll)]
#![feature(rustc_diagnostic_macros)]
}
fn visit_token(&mut self, t: Token) {
- if let Token::Interpolated(nt) = t {
+ if let token::Interpolated(nt) = t.kind {
if let token::NtExpr(ref expr) = *nt {
if let ast::ExprKind::Mac(..) = expr.node {
self.visit_invoc(expr.id);
#![deny(rust_2018_idioms)]
#![deny(internal)]
+#![deny(unused_lifetimes)]
pub use rustc::hir::def::{Namespace, PerNS};
fn check_unused_macros(&self) {
for did in self.unused_macros.iter() {
let id_span = match *self.macro_map[did] {
- SyntaxExtension::NormalTT { def_info, .. } |
- SyntaxExtension::DeclMacro { def_info, .. } => def_info,
+ SyntaxExtension::LegacyBang { def_info, .. } => def_info,
_ => None,
};
if let Some((id, span)) = id_span {
match self.resolve_macro_to_res(derive, MacroKind::Derive,
&parent_scope, true, force) {
Ok((_, ext)) => {
- if let SyntaxExtension::ProcMacroDerive(_, helpers, _) = &*ext {
+ if let SyntaxExtension::Derive(_, helpers, _) = &*ext {
if helpers.contains(&ident.name) {
let binding =
(Res::NonMacroAttr(NonMacroAttrKind::DeriveHelper),
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/")]
-#![feature(custom_attribute)]
#![feature(nll)]
#![deny(rust_2018_idioms)]
#![deny(internal)]
+#![deny(unused_lifetimes)]
#![allow(unused_attributes)]
#![recursion_limit="256"]
}
}
- fn output_file(&self, ctx: &SaveContext<'_, '_>) -> File {
+ fn output_file(&self, ctx: &SaveContext<'_, '_>) -> (File, PathBuf) {
let sess = &ctx.tcx.sess;
let file_name = match ctx.config.output_file {
Some(ref s) => PathBuf::from(s),
|e| sess.fatal(&format!("Could not open {}: {}", file_name.display(), e)),
);
- output_file
+ (output_file, file_name)
}
}
cratename: &str,
input: &'l Input,
) {
- let output = &mut self.output_file(&save_ctxt);
- let mut dumper = JsonDumper::new(output, save_ctxt.config.clone());
- let mut visitor = DumpVisitor::new(save_ctxt, &mut dumper);
+ let sess = &save_ctxt.tcx.sess;
+ let file_name = {
+ let (mut output, file_name) = self.output_file(&save_ctxt);
+ let mut dumper = JsonDumper::new(&mut output, save_ctxt.config.clone());
+ let mut visitor = DumpVisitor::new(save_ctxt, &mut dumper);
- visitor.dump_crate_info(cratename, krate);
- visitor.dump_compilation_options(input, cratename);
- visit::walk_crate(&mut visitor, krate);
+ visitor.dump_crate_info(cratename, krate);
+ visitor.dump_compilation_options(input, cratename);
+ visit::walk_crate(&mut visitor, krate);
+
+ file_name
+ };
+
+ if sess.opts.debugging_opts.emit_artifact_notifications {
+ sess.parse_sess.span_diagnostic
+ .emit_artifact_notification(&file_name, "save-analysis");
+ }
}
}
use std::cell::Cell;
use syntax::parse::lexer::{self, StringReader};
-use syntax::parse::token::{self, Token};
+use syntax::parse::token::{self, TokenKind};
use syntax_pos::*;
#[derive(Clone)]
lexer::StringReader::retokenize(&self.sess.parse_sess, span)
}
- pub fn sub_span_of_token(&self, span: Span, tok: Token) -> Option<Span> {
+ pub fn sub_span_of_token(&self, span: Span, tok: TokenKind) -> Option<Span> {
let mut toks = self.retokenise_span(span);
loop {
let next = toks.real_token();
- if next.tok == token::Eof {
+ if next == token::Eof {
return None;
}
- if next.tok == tok {
- return Some(next.sp);
+ if next == tok {
+ return Some(next.span);
}
}
}
// let mut toks = self.retokenise_span(span);
// loop {
// let ts = toks.real_token();
- // if ts.tok == token::Eof {
+ // if ts == token::Eof {
// return None;
// }
- // if ts.tok == token::Not {
+ // if ts == token::Not {
// let ts = toks.real_token();
- // if ts.tok.is_ident() {
+ // if ts.kind.is_ident() {
// return Some(ts.sp);
// } else {
// return None;
// let mut toks = self.retokenise_span(span);
// let mut prev = toks.real_token();
// loop {
- // if prev.tok == token::Eof {
+ // if prev == token::Eof {
// return None;
// }
// let ts = toks.real_token();
- // if ts.tok == token::Not {
- // if prev.tok.is_ident() {
+ // if ts == token::Not {
+ // if prev.kind.is_ident() {
// return Some(prev.sp);
// } else {
// return None;
Pointer
}
-impl<'a, 'tcx> Primitive {
+impl Primitive {
pub fn size<C: HasDataLayout>(self, cx: &C) -> Size {
let dl = cx.data_layout();
#![deny(rust_2018_idioms)]
#![deny(internal)]
+#![deny(unused_lifetimes)]
#[macro_use] extern crate log;
}
}
- fn visit_ex_clause_with<'gcx: 'tcx, V: TypeVisitor<'tcx>>(
+ fn visit_ex_clause_with<V: TypeVisitor<'tcx>>(
ex_clause: &ExClause<Self>,
visitor: &mut V,
) -> bool {
#![deny(rust_2018_idioms)]
#![deny(internal)]
+#![deny(unused_lifetimes)]
#![feature(crate_visibility_modifier)]
#![feature(in_band_lifetimes)]
use crate::namespace::Namespace;
use rustc::lint::builtin::AMBIGUOUS_ASSOCIATED_ITEMS;
use rustc::traits;
-use rustc::ty::{self, DefIdTree, Ty, TyCtxt, ToPredicate, TypeFoldable};
+use rustc::ty::{self, DefIdTree, Ty, TyCtxt, Const, ToPredicate, TypeFoldable};
use rustc::ty::{GenericParamDef, GenericParamDefKind};
use rustc::ty::subst::{Kind, Subst, InternalSubsts, SubstsRef};
use rustc::ty::wf::object_region_bounds;
-> &'tcx ty::GenericPredicates<'tcx>;
/// Returns the lifetime to use when a lifetime is omitted (and not elided).
- fn re_infer(&self, span: Span, _def: Option<&ty::GenericParamDef>)
+ fn re_infer(
+ &self,
+ param: Option<&ty::GenericParamDef>,
+ span: Span,
+ )
-> Option<ty::Region<'tcx>>;
/// Returns the type to use when a type is omitted.
- fn ty_infer(&self, span: Span) -> Ty<'tcx>;
+ fn ty_infer(&self, param: Option<&ty::GenericParamDef>, span: Span) -> Ty<'tcx>;
- /// Same as `ty_infer`, but with a known type parameter definition.
- fn ty_infer_for_def(&self,
- _def: &ty::GenericParamDef,
- span: Span) -> Ty<'tcx> {
- self.ty_infer(span)
- }
+ /// Returns the const to use when a const is omitted.
+ fn ct_infer(
+ &self,
+ ty: Ty<'tcx>,
+ param: Option<&ty::GenericParamDef>,
+ span: Span,
+ ) -> &'tcx Const<'tcx>;
/// Projecting an associated type from a (potentially)
/// higher-ranked trait reference is more complicated, because of
}
None => {
- self.re_infer(lifetime.span, def)
+ self.re_infer(def, lifetime.span)
.unwrap_or_else(|| {
// This indicates an illegal lifetime
// elision. `resolve_lifetime` should have
span,
def_id,
generic_args,
- item_segment.infer_types,
+ item_segment.infer_args,
None,
)
});
seg: &hir::PathSegment,
generics: &ty::Generics,
) -> bool {
- let explicit = !seg.infer_types;
+ let explicit = !seg.infer_args;
let impl_trait = generics.params.iter().any(|param| match param.kind {
ty::GenericParamDefKind::Type {
synthetic: Some(hir::SyntheticTyParamKind::ImplTrait), ..
GenericArgPosition::Value
},
def.parent.is_none() && def.has_self, // `has_self`
- seg.infer_types || suppress_mismatch, // `infer_types`
+ seg.infer_args || suppress_mismatch, // `infer_args`
).0
}
args: &hir::GenericArgs,
position: GenericArgPosition,
has_self: bool,
- infer_types: bool,
+ infer_args: bool,
) -> (bool, Option<Vec<Span>>) {
// At this stage we are guaranteed that the generic arguments are in the correct order, e.g.
// that lifetimes will proceed types. So it suffices to check the number of each generic
let param_counts = def.own_counts();
let arg_counts = args.own_counts();
let infer_lifetimes = position != GenericArgPosition::Type && arg_counts.lifetimes == 0;
- let infer_consts = position != GenericArgPosition::Type && arg_counts.consts == 0;
let mut defaults: ty::GenericParamCount = Default::default();
for param in &def.params {
offset
);
// We enforce the following: `required` <= `provided` <= `permitted`.
- // For kinds without defaults (i.e., lifetimes), `required == permitted`.
+ // For kinds without defaults (e.g.., lifetimes), `required == permitted`.
// For other kinds (i.e., types), `permitted` may be greater than `required`.
if required <= provided && provided <= permitted {
return (reported_late_bound_region_err.unwrap_or(false), None);
);
}
// FIXME(const_generics:defaults)
- if !infer_consts || arg_counts.consts > param_counts.consts {
+ if !infer_args || arg_counts.consts > param_counts.consts {
check_kind_count(
"const",
param_counts.consts,
);
}
// Note that type errors are currently be emitted *after* const errors.
- if !infer_types
+ if !infer_args
|| arg_counts.types > param_counts.types - defaults.types - has_self as usize {
check_kind_count(
"type",
}
// Check whether this segment takes generic arguments and the user has provided any.
- let (generic_args, infer_types) = args_for_def_id(def_id);
+ let (generic_args, infer_args) = args_for_def_id(def_id);
let mut args = generic_args.iter().flat_map(|generic_args| generic_args.args.iter())
.peekable();
| (GenericArg::Const(_), GenericParamDefKind::Lifetime) => {
// We expected a lifetime argument, but got a type or const
// argument. That means we're inferring the lifetimes.
- substs.push(inferred_kind(None, param, infer_types));
+ substs.push(inferred_kind(None, param, infer_args));
params.next();
}
(_, _) => {
(None, Some(¶m)) => {
// If there are fewer arguments than parameters, it means
// we're inferring the remaining arguments.
- substs.push(inferred_kind(Some(&substs), param, infer_types));
+ substs.push(inferred_kind(Some(&substs), param, infer_args));
args.next();
params.next();
}
span: Span,
def_id: DefId,
generic_args: &'a hir::GenericArgs,
- infer_types: bool,
+ infer_args: bool,
self_ty: Option<Ty<'tcx>>)
-> (SubstsRef<'tcx>, Vec<ConvertedBinding<'tcx>>, Option<Vec<Span>>)
{
&generic_args,
GenericArgPosition::Type,
has_self,
- infer_types,
+ infer_args,
);
let is_object = self_ty.map_or(false, |ty| {
self_ty.is_some(),
self_ty,
// Provide the generic args, and whether types should be inferred.
- |_| (Some(generic_args), infer_types),
+ |_| (Some(generic_args), infer_args),
// Provide substitutions for parameters for which (valid) arguments have been provided.
|param, arg| {
match (¶m.kind, arg) {
}
},
// Provide substitutions for parameters for which arguments are inferred.
- |substs, param, infer_types| {
+ |substs, param, infer_args| {
match param.kind {
GenericParamDefKind::Lifetime => tcx.lifetimes.re_static.into(),
GenericParamDefKind::Type { has_default, .. } => {
- if !infer_types && has_default {
+ if !infer_args && has_default {
// No type parameter provided, but a default exists.
// If we are converting an object type, then the
.subst_spanned(tcx, substs.unwrap(), Some(span))
).into()
}
- } else if infer_types {
+ } else if infer_args {
// No type parameters were provided, we can infer all.
- if !default_needs_object_self(param) {
- self.ty_infer_for_def(param, span).into()
+ let param = if !default_needs_object_self(param) {
+ Some(param)
} else {
- self.ty_infer(span).into()
- }
+ None
+ };
+ self.ty_infer(param, span).into()
} else {
// We've already errored above about the mismatch.
tcx.types.err.into()
}
GenericParamDefKind::Const => {
// FIXME(const_generics:defaults)
- // We've already errored above about the mismatch.
- tcx.consts.err.into()
+ if infer_args {
+ // No const parameters were provided, we can infer all.
+ let ty = tcx.at(span).type_of(param.def_id);
+ self.ct_infer(ty, Some(param), span).into()
+ } else {
+ // We've already errored above about the mismatch.
+ tcx.consts.err.into()
+ }
}
}
},
self.create_substs_for_ast_path(span,
trait_def_id,
generic_args,
- trait_segment.infer_types,
+ trait_segment.infer_args,
Some(self_ty))
})
}
if tcx.named_region(lifetime.hir_id).is_some() {
self.ast_region_to_region(lifetime, None)
} else {
- self.re_infer(span, None).unwrap_or_else(|| {
+ self.re_infer(None, span).unwrap_or_else(|| {
span_err!(tcx.sess, span, E0228,
"the lifetime bound for this object type cannot be deduced \
from context; please supply an explicit bound");
// values in a ExprKind::Closure, or as
// the type of local variables. Both of these cases are
// handled specially and will not descend into this routine.
- self.ty_infer(ast_ty.span)
+ self.ty_infer(None, ast_ty.span)
}
hir::TyKind::CVarArgs(lt) => {
let va_list_did = match tcx.lang_items().va_list() {
result_ty
}
+ /// Returns the `DefId` of the constant parameter that the provided expression is a path to.
+ pub fn const_param_def_id(&self, expr: &hir::Expr) -> Option<DefId> {
+ match &expr.node {
+ ExprKind::Path(hir::QPath::Resolved(_, path)) => match path.res {
+ Res::Def(DefKind::ConstParam, did) => Some(did),
+ _ => None,
+ },
+ _ => None,
+ }
+ }
+
pub fn ast_const_to_const(
&self,
ast_const: &hir::AnonConst,
}
}
- if let ExprKind::Path(ref qpath) = expr.node {
- if let hir::QPath::Resolved(_, ref path) = qpath {
- if let Res::Def(DefKind::ConstParam, def_id) = path.res {
- let node_id = tcx.hir().as_local_node_id(def_id).unwrap();
- let item_id = tcx.hir().get_parent_node(node_id);
- let item_def_id = tcx.hir().local_def_id(item_id);
- let generics = tcx.generics_of(item_def_id);
- let index = generics.param_def_id_to_index[&tcx.hir().local_def_id(node_id)];
- let name = tcx.hir().name(node_id).as_interned_str();
- const_.val = ConstValue::Param(ty::ParamConst::new(index, name));
- }
- }
- };
+ if let Some(def_id) = self.const_param_def_id(expr) {
+ // Find the name and index of the const parameter by indexing the generics of the
+ // parent item and construct a `ParamConst`.
+ let node_id = tcx.hir().as_local_node_id(def_id).unwrap();
+ let item_id = tcx.hir().get_parent_node(node_id);
+ let item_def_id = tcx.hir().local_def_id(item_id);
+ let generics = tcx.generics_of(item_def_id);
+ let index = generics.param_def_id_to_index[&tcx.hir().local_def_id(node_id)];
+ let name = tcx.hir().name(node_id).as_interned_str();
+ const_.val = ConstValue::Param(ty::ParamConst::new(index, name));
+ }
tcx.mk_const(const_)
}
let supplied_arguments = decl.inputs.iter().map(|a| astconv.ast_ty_to_ty(a));
let supplied_return = match decl.output {
hir::Return(ref output) => astconv.ast_ty_to_ty(&output),
- hir::DefaultReturn(_) => astconv.ty_infer(decl.output.span()),
+ hir::DefaultReturn(_) => astconv.ty_infer(None, decl.output.span()),
};
let result = ty::Binder::bind(self.tcx.mk_fn_sig(
"overflowing_add" | "overflowing_sub" | "overflowing_mul" |
"saturating_add" | "saturating_sub" |
"rotate_left" | "rotate_right" |
- "ctpop" | "ctlz" | "cttz" | "bswap" | "bitreverse"
+ "ctpop" | "ctlz" | "cttz" | "bswap" | "bitreverse" |
+ "minnumf32" | "minnumf64" | "maxnumf32" | "maxnumf64"
=> hir::Unsafety::Normal,
_ => hir::Unsafety::Unsafe,
}
}
"fabsf32" => (0, vec![ tcx.types.f32 ], tcx.types.f32),
"fabsf64" => (0, vec![ tcx.types.f64 ], tcx.types.f64),
+ "minnumf32" => (0, vec![ tcx.types.f32, tcx.types.f32 ], tcx.types.f32),
+ "minnumf64" => (0, vec![ tcx.types.f64, tcx.types.f64 ], tcx.types.f64),
+ "maxnumf32" => (0, vec![ tcx.types.f32, tcx.types.f32 ], tcx.types.f32),
+ "maxnumf64" => (0, vec![ tcx.types.f64, tcx.types.f64 ], tcx.types.f64),
"copysignf32" => (0, vec![ tcx.types.f32, tcx.types.f32 ], tcx.types.f32),
"copysignf64" => (0, vec![ tcx.types.f64, tcx.types.f64 ], tcx.types.f64),
"floorf32" => (0, vec![ tcx.types.f32 ], tcx.types.f32),
use rustc_target::spec::abi::Abi;
use rustc::infer::opaque_types::OpaqueTypeDecl;
use rustc::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
+use rustc::infer::unify_key::{ConstVariableOrigin, ConstVariableOriginKind};
use rustc::middle::region;
use rustc::mir::interpret::{ConstValue, GlobalId};
use rustc::traits::{self, ObligationCause, ObligationCauseCode, TraitEngine};
use rustc::ty::{
- self, AdtKind, CanonicalUserType, Ty, TyCtxt, GenericParamDefKind, Visibility,
+ self, AdtKind, CanonicalUserType, Ty, TyCtxt, Const, GenericParamDefKind, Visibility,
ToPolyTraitRef, ToPredicate, RegionKind, UserType
};
use rustc::ty::adjustment::{
let def = tcx.adt_def(def_id);
def.destructor(tcx); // force the destructor to be evaluated
check_representable(tcx, span, def_id);
-
+ check_transparent(tcx, span, def_id);
check_packed(tcx, span, def_id);
}
return;
}
+ if adt.is_enum() {
+ if !tcx.features().transparent_enums {
+ emit_feature_err(&tcx.sess.parse_sess,
+ sym::transparent_enums,
+ sp,
+ GateIssue::Language,
+ "transparent enums are unstable");
+ }
+ if adt.variants.len() != 1 {
+ let variant_spans: Vec<_> = adt.variants.iter().map(|variant| {
+ tcx.hir().span_if_local(variant.def_id).unwrap()
+ }).collect();
+ let mut err = struct_span_err!(tcx.sess, sp, E0731,
+ "transparent enum needs exactly one variant, but has {}",
+ adt.variants.len());
+ if !variant_spans.is_empty() {
+ err.span_note(variant_spans, &format!("the following variants exist on `{}`",
+ tcx.def_path_str(def_id)));
+ }
+ err.emit();
+ if adt.variants.is_empty() {
+ // Don't bother checking the fields. No variants (and thus no fields) exist.
+ return;
+ }
+ }
+ }
+
+ if adt.is_union() && !tcx.features().transparent_unions {
+ emit_feature_err(&tcx.sess.parse_sess,
+ sym::transparent_unions,
+ sp,
+ GateIssue::Language,
+ "transparent unions are unstable");
+ }
+
// For each field, figure out if it's known to be a ZST and align(1)
- let field_infos = adt.non_enum_variant().fields.iter().map(|field| {
+ let field_infos = adt.all_fields().map(|field| {
let ty = field.ty(tcx, InternalSubsts::identity_for_item(tcx, field.did));
let param_env = tcx.param_env(field.did);
let layout = tcx.layout_of(param_env.and(ty));
let non_zst_count = non_zst_fields.clone().count();
if non_zst_count != 1 {
let field_spans: Vec<_> = non_zst_fields.map(|(span, _zst, _align1)| span).collect();
- struct_span_err!(tcx.sess, sp, E0690,
- "transparent struct needs exactly one non-zero-sized field, but has {}",
- non_zst_count)
- .span_note(field_spans, "non-zero-sized field")
- .emit();
+
+ let mut err = struct_span_err!(tcx.sess, sp, E0690,
+ "{}transparent {} needs exactly one non-zero-sized field, but has {}",
+ if adt.is_enum() { "the variant of a " } else { "" },
+ adt.descr(),
+ non_zst_count);
+ if !field_spans.is_empty() {
+ err.span_note(field_spans,
+ &format!("the following non-zero-sized fields exist on `{}`:",
+ tcx.def_path_str(def_id)));
+ }
+ err.emit();
}
for (span, zst, align1) in field_infos {
if zst && !align1 {
span_err!(tcx.sess, span, E0691,
- "zero-sized field in transparent struct has alignment larger than 1");
+ "zero-sized field in transparent {} has alignment larger than 1",
+ adt.descr());
}
}
}
}
check_representable(tcx, sp, def_id);
+ check_transparent(tcx, sp, def_id);
}
fn report_unexpected_variant_res<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
})
}
- fn re_infer(&self, span: Span, def: Option<&ty::GenericParamDef>)
- -> Option<ty::Region<'tcx>> {
+ fn re_infer(
+ &self,
+ def: Option<&ty::GenericParamDef>,
+ span: Span,
+ ) -> Option<ty::Region<'tcx>> {
let v = match def {
Some(def) => infer::EarlyBoundRegion(span, def.name),
None => infer::MiscVariable(span)
Some(self.next_region_var(v))
}
- fn ty_infer(&self, span: Span) -> Ty<'tcx> {
- self.next_ty_var(TypeVariableOrigin {
- kind: TypeVariableOriginKind::TypeInference,
- span,
- })
+ fn ty_infer(&self, param: Option<&ty::GenericParamDef>, span: Span) -> Ty<'tcx> {
+ if let Some(param) = param {
+ if let UnpackedKind::Type(ty) = self.var_for_def(span, param).unpack() {
+ return ty;
+ }
+ unreachable!()
+ } else {
+ self.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::TypeInference,
+ span,
+ })
+ }
}
- fn ty_infer_for_def(&self,
- ty_param_def: &ty::GenericParamDef,
- span: Span) -> Ty<'tcx> {
- if let UnpackedKind::Type(ty) = self.var_for_def(span, ty_param_def).unpack() {
- return ty;
+ fn ct_infer(
+ &self,
+ ty: Ty<'tcx>,
+ param: Option<&ty::GenericParamDef>,
+ span: Span,
+ ) -> &'tcx Const<'tcx> {
+ if let Some(param) = param {
+ if let UnpackedKind::Const(ct) = self.var_for_def(span, param).unpack() {
+ return ct;
+ }
+ unreachable!()
+ } else {
+ self.next_const_var(ty, ConstVariableOrigin {
+ kind: ConstVariableOriginKind::ConstInference,
+ span,
+ })
}
- unreachable!()
}
fn projected_ty_from_poly_trait_ref(&self,
ty
}
+ /// Returns the `DefId` of the constant parameter that the provided expression is a path to.
+ pub fn const_param_def_id(&self, hir_c: &hir::AnonConst) -> Option<DefId> {
+ AstConv::const_param_def_id(self, &self.tcx.hir().body(hir_c.body).value)
+ }
+
pub fn to_const(&self, ast_c: &hir::AnonConst, ty: Ty<'tcx>) -> &'tcx ty::Const<'tcx> {
AstConv::ast_const_to_const(self, ast_c, ty)
}
hir::UnNeg => {
let result = self.check_user_unop(expr, oprnd_t, unop);
// If it's builtin, we can reuse the type, this helps inference.
- if !(oprnd_t.is_integral() || oprnd_t.is_fp()) {
+ if !oprnd_t.is_numeric() {
oprnd_t = result;
}
}
}
ExprKind::Repeat(ref element, ref count) => {
let count_def_id = tcx.hir().local_def_id_from_hir_id(count.hir_id);
- let param_env = ty::ParamEnv::empty();
- let substs = InternalSubsts::identity_for_item(tcx.global_tcx(), count_def_id);
- let instance = ty::Instance::resolve(
- tcx.global_tcx(),
- param_env,
- count_def_id,
- substs,
- ).unwrap();
- let global_id = GlobalId {
- instance,
- promoted: None
+ let count = if self.const_param_def_id(count).is_some() {
+ Ok(self.to_const(count, self.tcx.type_of(count_def_id)))
+ } else {
+ let param_env = ty::ParamEnv::empty();
+ let substs = InternalSubsts::identity_for_item(tcx.global_tcx(), count_def_id);
+ let instance = ty::Instance::resolve(
+ tcx.global_tcx(),
+ param_env,
+ count_def_id,
+ substs,
+ ).unwrap();
+ let global_id = GlobalId {
+ instance,
+ promoted: None
+ };
+
+ tcx.const_eval(param_env.and(global_id))
};
- let count = tcx.const_eval(param_env.and(global_id));
let uty = match expected {
ExpectHasType(uty) => {
if !infer_args_for_err.contains(&index) {
// Check whether the user has provided generic arguments.
if let Some(ref data) = segments[index].args {
- return (Some(data), segments[index].infer_types);
+ return (Some(data), segments[index].infer_args);
}
}
- return (None, segments[index].infer_types);
+ return (None, segments[index].infer_args);
}
(None, true)
}
},
// Provide substitutions for parameters for which arguments are inferred.
- |substs, param, infer_types| {
+ |substs, param, infer_args| {
match param.kind {
GenericParamDefKind::Lifetime => {
- self.re_infer(span, Some(param)).unwrap().into()
+ self.re_infer(Some(param), span).unwrap().into()
}
GenericParamDefKind::Type { has_default, .. } => {
- if !infer_types && has_default {
+ if !infer_args && has_default {
// If we have a default, then we it doesn't matter that we're not
// inferring the type arguments: we provide the default where any
// is missing.
use rustc::ty::util::Discr;
use rustc::ty::util::IntTypeExt;
use rustc::ty::subst::UnpackedKind;
-use rustc::ty::{self, AdtKind, DefIdTree, ToPolyTraitRef, Ty, TyCtxt};
+use rustc::ty::{self, AdtKind, DefIdTree, ToPolyTraitRef, Ty, TyCtxt, Const};
use rustc::ty::{ReprOptions, ToPredicate};
use rustc::util::captures::Captures;
use rustc::util::nodemap::FxHashMap;
use rustc::hir::GenericParamKind;
use rustc::hir::{self, CodegenFnAttrFlags, CodegenFnAttrs, Unsafety};
-use errors::Applicability;
+use errors::{Applicability, DiagnosticId};
use std::iter;
fn re_infer(
&self,
- _span: Span,
- _def: Option<&ty::GenericParamDef>,
+ _: Option<&ty::GenericParamDef>,
+ _: Span,
) -> Option<ty::Region<'tcx>> {
None
}
- fn ty_infer(&self, span: Span) -> Ty<'tcx> {
- struct_span_err!(
- self.tcx().sess,
+ fn ty_infer(&self, _: Option<&ty::GenericParamDef>, span: Span) -> Ty<'tcx> {
+ self.tcx().sess.struct_span_err_with_code(
span,
- E0121,
- "the type placeholder `_` is not allowed within types on item signatures"
+ "the type placeholder `_` is not allowed within types on item signatures",
+ DiagnosticId::Error("E0121".into()),
).span_label(span, "not allowed in type signatures")
.emit();
self.tcx().types.err
}
+ fn ct_infer(
+ &self,
+ _: Ty<'tcx>,
+ _: Option<&ty::GenericParamDef>,
+ span: Span,
+ ) -> &'tcx Const<'tcx> {
+ self.tcx().sess.struct_span_err_with_code(
+ span,
+ "the const placeholder `_` is not allowed within types on item signatures",
+ DiagnosticId::Error("E0121".into()),
+ ).span_label(span, "not allowed in type signatures")
+ .emit();
+
+ self.tcx().consts.err
+ }
+
fn projected_ty_from_poly_trait_ref(
&self,
span: Span,
for attr in attrs.iter() {
if attr.check_name(sym::cold) {
codegen_fn_attrs.flags |= CodegenFnAttrFlags::COLD;
- } else if attr.check_name(sym::allocator) {
+ } else if attr.check_name(sym::rustc_allocator) {
codegen_fn_attrs.flags |= CodegenFnAttrFlags::ALLOCATOR;
} else if attr.check_name(sym::unwind) {
codegen_fn_attrs.flags |= CodegenFnAttrFlags::UNWIND;
"##,
E0121: r##"
-In order to be consistent with Rust's lack of global type inference, type
-placeholders are disallowed by design in item signatures.
+In order to be consistent with Rust's lack of global type inference,
+type and const placeholders are disallowed by design in item signatures.
Examples of this error include:
E0690: r##"
A struct with the representation hint `repr(transparent)` had zero or more than
-on fields that were not guaranteed to be zero-sized.
+one fields that were not guaranteed to be zero-sized.
Erroneous code example:
"##,
E0691: r##"
-A struct with the `repr(transparent)` representation hint contains a zero-sized
-field that requires non-trivial alignment.
+A struct, enum, or union with the `repr(transparent)` representation hint
+contains a zero-sized field that requires non-trivial alignment.
Erroneous code example:
// struct has alignment larger than 1
```
-A transparent struct is supposed to be represented exactly like the piece of
-data it contains. Zero-sized fields with different alignment requirements
-potentially conflict with this property. In the example above, `Wrapper` would
-have to be aligned to 32 bytes even though `f32` has a smaller alignment
-requirement.
+A transparent struct, enum, or union is supposed to be represented exactly like
+the piece of data it contains. Zero-sized fields with different alignment
+requirements potentially conflict with this property. In the example above,
+`Wrapper` would have to be aligned to 32 bytes even though `f32` has a smaller
+alignment requirement.
Consider removing the over-aligned zero-sized field:
alignment.
"##,
-
E0699: r##"
A method was called on a raw pointer whose inner type wasn't completely known.
```
"##,
+E0731: r##"
+An enum with the representation hint `repr(transparent)` had zero or more than
+one variants.
+
+Erroneous code example:
+
+```compile_fail,E0731
+#[repr(transparent)]
+enum Status { // error: transparent enum needs exactly one variant, but has 2
+ Errno(u32),
+ Ok,
+}
+```
+
+Because transparent enums are represented exactly like one of their variants at
+run time, said variant must be uniquely determined. If there is no variant, or
+if there are multiple variants, it is not clear how the enum should be
+represented.
+"##,
+
}
register_diagnostics! {
#![deny(rust_2018_idioms)]
#![deny(internal)]
+#![deny(unused_lifetimes)]
#![allow(explicit_outlives_requirements)]
#[macro_use] extern crate log;
.collect()
}
- fn make_final_bounds<'b, 'c, 'cx>(
+ fn make_final_bounds(
&self,
ty_to_bounds: FxHashMap<Type, FxHashSet<GenericBound>>,
ty_to_fn: FxHashMap<Type, (Option<PolyTrait>, Option<Type>)>,
}
LoadedMacro::ProcMacro(ext) => {
let helpers = match &*ext {
- &SyntaxExtension::ProcMacroDerive(_, ref syms, ..) => { syms.clean(cx) }
+ &SyntaxExtension::Derive(_, ref syms, ..) => { syms.clean(cx) }
_ => Vec::new(),
};
}
}
-impl<'tcx> Clean<Lifetime> for ty::GenericParamDef {
+impl Clean<Lifetime> for ty::GenericParamDef {
fn clean(&self, _cx: &DocContext<'_>) -> Lifetime {
Lifetime(self.name.to_string())
}
}
}
-impl<'a, 'tcx> Clean<FnDecl> for (DefId, ty::PolyFnSig<'tcx>) {
+impl<'tcx> Clean<FnDecl> for (DefId, ty::PolyFnSig<'tcx>) {
fn clean(&self, cx: &DocContext<'_>) -> FnDecl {
let (did, sig) = *self;
let mut names = if cx.tcx.hir().as_local_hir_id(did).is_some() {
}
}
-impl<'tcx> Clean<Item> for ty::AssocItem {
+impl Clean<Item> for ty::AssocItem {
fn clean(&self, cx: &DocContext<'_>) -> Item {
let inner = match self.kind {
ty::AssocKind::Const => {
};
let length = match cx.tcx.const_eval(param_env.and(cid)) {
Ok(length) => print_const(cx, length),
- Err(_) => "_".to_string(),
+ Err(_) => cx.sess()
+ .source_map()
+ .span_to_snippet(cx.tcx.def_span(def_id))
+ .unwrap_or_else(|_| "_".to_string()),
};
Array(box ty.clean(cx), length)
},
}
}
-impl<'tcx> Clean<Item> for ty::FieldDef {
+impl Clean<Item> for ty::FieldDef {
fn clean(&self, cx: &DocContext<'_>) -> Item {
Item {
name: Some(self.ident.name).clean(cx),
}
}
-impl<'tcx> Clean<Item> for ty::VariantDef {
+impl Clean<Item> for ty::VariantDef {
fn clean(&self, cx: &DocContext<'_>) -> Item {
let kind = match self.ctor_kind {
CtorKind::Const => VariantKind::CLike,
use std::io::prelude::*;
use syntax::source_map::{SourceMap, FilePathMapping};
-use syntax::parse::lexer::{self, TokenAndSpan};
-use syntax::parse::token;
+use syntax::parse::lexer;
+use syntax::parse::token::{self, Token};
use syntax::parse;
use syntax::symbol::{kw, sym};
use syntax_pos::{Span, FileName};
}
/// Gets the next token out of the lexer.
- fn try_next_token(&mut self) -> Result<TokenAndSpan, HighlightError> {
+ fn try_next_token(&mut self) -> Result<Token, HighlightError> {
match self.lexer.try_next_token() {
- Ok(tas) => Ok(tas),
+ Ok(token) => Ok(token),
Err(_) => Err(HighlightError::LexError),
}
}
-> Result<(), HighlightError> {
loop {
let next = self.try_next_token()?;
- if next.tok == token::Eof {
+ if next == token::Eof {
break;
}
// Handles an individual token from the lexer.
fn write_token<W: Writer>(&mut self,
out: &mut W,
- tas: TokenAndSpan)
+ token: Token)
-> Result<(), HighlightError> {
- let klass = match tas.tok {
+ let klass = match token.kind {
token::Shebang(s) => {
out.string(Escape(&s.as_str()), Class::None)?;
return Ok(());
// reference or dereference operator or a reference or pointer type, instead of the
// bit-and or multiplication operator.
token::BinOp(token::And) | token::BinOp(token::Star)
- if self.lexer.peek().tok != token::Whitespace => Class::RefKeyWord,
+ if self.lexer.peek() != &token::Whitespace => Class::RefKeyWord,
// Consider this as part of a macro invocation if there was a
// leading identifier.
token::Question => Class::QuestionMark,
token::Dollar => {
- if self.lexer.peek().tok.is_ident() {
+ if self.lexer.peek().is_ident() {
self.in_macro_nonterminal = true;
Class::MacroNonTerminal
} else {
// as an attribute.
// Case 1: #![inner_attribute]
- if self.lexer.peek().tok == token::Not {
+ if self.lexer.peek() == &token::Not {
self.try_next_token()?; // NOTE: consumes `!` token!
- if self.lexer.peek().tok == token::OpenDelim(token::Bracket) {
+ if self.lexer.peek() == &token::OpenDelim(token::Bracket) {
self.in_attribute = true;
out.enter_span(Class::Attribute)?;
}
}
// Case 2: #[outer_attribute]
- if self.lexer.peek().tok == token::OpenDelim(token::Bracket) {
+ if self.lexer.peek() == &token::OpenDelim(token::Bracket) {
self.in_attribute = true;
out.enter_span(Class::Attribute)?;
}
}
// Keywords are also included in the identifier set.
- token::Ident(ident, is_raw) => {
- match ident.name {
+ token::Ident(name, is_raw) => {
+ match name {
kw::Ref | kw::Mut if !is_raw => Class::RefKeyWord,
kw::SelfLower | kw::SelfUpper => Class::Self_,
sym::Option | sym::Result => Class::PreludeTy,
sym::Some | sym::None | sym::Ok | sym::Err => Class::PreludeVal,
- _ if tas.tok.is_reserved_ident() => Class::KeyWord,
+ _ if token.is_reserved_ident() => Class::KeyWord,
_ => {
if self.in_macro_nonterminal {
self.in_macro_nonterminal = false;
Class::MacroNonTerminal
- } else if self.lexer.peek().tok == token::Not {
+ } else if self.lexer.peek() == &token::Not {
self.in_macro = true;
Class::Macro
} else {
// Anything that didn't return above is the simple case where we the
// class just spans a single token, so we can use the `string` method.
- out.string(Escape(&self.snip(tas.sp)), klass)?;
+ out.string(Escape(&self.snip(token.span)), klass)?;
Ok(())
}
}
}
-impl<'a> Cache {
+impl Cache {
fn generics(&mut self, generics: &clean::Generics) {
for param in &generics.params {
match param.kind {
#![deny(rust_2018_idioms)]
#![deny(internal)]
+#![deny(unused_lifetimes)]
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/",
html_playground_url = "https://play.rust-lang.org/")]
use errors::Applicability;
-use syntax::parse::lexer::{TokenAndSpan, StringReader as Lexer};
+use syntax::parse::lexer::{StringReader as Lexer};
use syntax::parse::{ParseSess, token};
use syntax::source_map::FilePathMapping;
-use syntax_pos::FileName;
+use syntax_pos::{InnerSpan, FileName};
use crate::clean;
use crate::core::DocContext;
);
let errors = Lexer::new_or_buffered_errs(&sess, source_file, None).and_then(|mut lexer| {
- while let Ok(TokenAndSpan { tok, .. }) = lexer.try_next_token() {
- if tok == token::Eof {
+ while let Ok(token::Token { kind, .. }) = lexer.try_next_token() {
+ if kind == token::Eof {
break;
}
}
}
if code_block.syntax.is_none() && code_block.is_fenced {
- let sp = sp.from_inner_byte_pos(0, 3);
+ let sp = sp.from_inner(InnerSpan::new(0, 3));
diag.span_suggestion(
sp,
"mark blocks that do not contain Rust code as text",
if let Res::Def(DefKind::Macro(MacroKind::ProcMacroStub), _) = res {
// skip proc-macro stubs, they'll cause `get_macro` to crash
} else {
- if let SyntaxExtension::DeclMacro { .. } = *resolver.get_macro(res) {
+ if let SyntaxExtension::LegacyBang { .. } = *resolver.get_macro(res) {
return Some(res.map_id(|_| panic!("unexpected id")));
}
}
use rustc::middle::privacy::AccessLevels;
use rustc::util::nodemap::DefIdSet;
use std::mem;
-use syntax_pos::{DUMMY_SP, Span};
+use syntax_pos::{DUMMY_SP, InnerSpan, Span};
use std::ops::Range;
use crate::clean::{self, GetDefId, Item};
}
}
- let sp = span_of_attrs(attrs).from_inner_byte_pos(
+ let sp = span_of_attrs(attrs).from_inner(InnerSpan::new(
md_range.start + start_bytes,
md_range.end + start_bytes + end_bytes,
- );
+ ));
Some(sp)
}
($name:ident, $($other:ident,)*) => (tuple! { $($other,)* })
}
-/// Evaluates to the number of identifiers passed to it, for example: `count_idents!(a, b, c) == 3
-macro_rules! count_idents {
- () => { 0 };
- ($_i:ident, $($rest:ident,)*) => { 1 + count_idents!($($rest,)*) }
+/// Evaluates to the number of tokens passed to it.
+///
+/// Logarithmic counting: every one or two recursive expansions, the number of
+/// tokens to count is divided by two, instead of being reduced by one.
+/// Therefore, the recursion depth is the binary logarithm of the number of
+/// tokens to count, and the expanded tree is likewise very small.
+macro_rules! count {
+ () => (0usize);
+ ($one:tt) => (1usize);
+ ($($pairs:tt $_p:tt)*) => (count!($($pairs)*) << 1usize);
+ ($odd:tt $($rest:tt)*) => (count!($($rest)*) | 1usize);
}
macro_rules! tuple {
impl<$($name:Decodable),*> Decodable for ($($name,)*) {
#[allow(non_snake_case)]
fn decode<D: Decoder>(d: &mut D) -> Result<($($name,)*), D::Error> {
- let len: usize = count_idents!($($name,)*);
+ let len: usize = count!($($name)*);
d.read_tuple(len, |d| {
let mut i = 0;
let ret = ($(d.read_tuple_arg({ i+=1; i-1 }, |d| -> Result<$name, D::Error> {
panic_abort = { path = "../libpanic_abort" }
core = { path = "../libcore" }
libc = { version = "0.2.51", default-features = false, features = ['rustc-dep-of-std'] }
-compiler_builtins = { version = "0.1.15" }
+compiler_builtins = { version = "0.1.16" }
profiler_builtins = { path = "../libprofiler_builtins", optional = true }
unwind = { path = "../libunwind" }
hashbrown = { version = "0.4.0", features = ['rustc-dep-of-std'] }
[package.metadata.fortanix-sgx]
# Maximum possible number of threads when testing
threads = 125
+# Maximum heap size
+heap_size = 0x8000000
if self.is_nan() {
NAN
} else {
- unsafe { intrinsics::copysignf32(1.0, self) }
+ 1.0_f32.copysign(self)
}
}
if self.is_nan() {
NAN
} else {
- unsafe { intrinsics::copysignf64(1.0, self) }
+ 1.0_f64.copysign(self)
}
}
/// an array. Indeed, this provides most of the API for working with arrays.
/// Slices have a dynamic size and do not coerce to arrays.
///
-/// There is no way to move elements out of an array. See [`mem::replace`][replace]
-/// for an alternative.
+/// You can move elements out of an array with a slice pattern. If you want
+/// one element, see [`mem::replace`][replace].
///
/// # Examples
///
/// for x in &array { }
/// ```
///
+/// You can use a slice pattern to move elements out of an array:
+///
+/// ```
+/// fn move_away(_: String) { /* Do interesting things. */ }
+///
+/// let [john, roa] = ["John".to_string(), "Roa".to_string()];
+/// move_away(john);
+/// move_away(roa);
+/// ```
+///
/// [slice]: primitive.slice.html
/// [copy]: marker/trait.Copy.html
/// [clone]: clone/trait.Clone.html
/// assert_eq!(tuple.2, 'c');
/// ```
///
+/// The sequential nature of the tuple applies to its implementations of various
+/// traits. For example, in `PartialOrd` and `Ord`, the elements are compared
+/// sequentially until the first non-equal set is found.
+///
/// For more about tuples, see [the book](../book/ch03-02-data-types.html#the-tuple-type).
///
/// # Trait implementations
FloatUnsuffixed(Symbol),
/// A boolean literal.
Bool(bool),
- /// A recovered character literal that contains mutliple `char`s, most likely a typo.
+ /// Placeholder for a literal that wasn't well-formed in some way.
Err(Symbol),
}
| LitKind::ByteStr(..)
| LitKind::Byte(..)
| LitKind::Char(..)
- | LitKind::Err(..)
| LitKind::Int(_, LitIntType::Unsuffixed)
| LitKind::FloatUnsuffixed(..)
- | LitKind::Bool(..) => true,
+ | LitKind::Bool(..)
+ | LitKind::Err(..) => true,
// suffixed variants
LitKind::Int(_, LitIntType::Signed(..))
| LitKind::Int(_, LitIntType::Unsigned(..))
let mod_sep_span = Span::new(last_pos,
segment.ident.span.lo(),
segment.ident.span.ctxt());
- idents.push(TokenTree::Token(mod_sep_span, Token::ModSep).into());
+ idents.push(TokenTree::token(token::ModSep, mod_sep_span).into());
}
- idents.push(TokenTree::Token(segment.ident.span,
- Token::from_ast_ident(segment.ident)).into());
+ idents.push(TokenTree::Token(Token::from_ast_ident(segment.ident)).into());
last_pos = segment.ident.span.hi();
}
self.node.tokens(self.span).append_to_tree_and_joint_vec(&mut idents);
{
// FIXME: Share code with `parse_path`.
let path = match tokens.next() {
- Some(TokenTree::Token(span, token @ Token::Ident(..))) |
- Some(TokenTree::Token(span, token @ Token::ModSep)) => 'arm: {
- let mut segments = if let Token::Ident(ident, _) = token {
- if let Some(TokenTree::Token(_, Token::ModSep)) = tokens.peek() {
+ Some(TokenTree::Token(Token { kind: kind @ token::Ident(..), span })) |
+ Some(TokenTree::Token(Token { kind: kind @ token::ModSep, span })) => 'arm: {
+ let mut segments = if let token::Ident(name, _) = kind {
+ if let Some(TokenTree::Token(Token { kind: token::ModSep, .. }))
+ = tokens.peek() {
tokens.next();
- vec![PathSegment::from_ident(ident.with_span_pos(span))]
+ vec![PathSegment::from_ident(Ident::new(name, span))]
} else {
- break 'arm Path::from_ident(ident.with_span_pos(span));
+ break 'arm Path::from_ident(Ident::new(name, span));
}
} else {
vec![PathSegment::path_root(span)]
};
loop {
- if let Some(TokenTree::Token(span,
- Token::Ident(ident, _))) = tokens.next() {
- segments.push(PathSegment::from_ident(ident.with_span_pos(span)));
+ if let Some(TokenTree::Token(Token { kind: token::Ident(name, _), span }))
+ = tokens.next() {
+ segments.push(PathSegment::from_ident(Ident::new(name, span)));
} else {
return None;
}
- if let Some(TokenTree::Token(_, Token::ModSep)) = tokens.peek() {
+ if let Some(TokenTree::Token(Token { kind: token::ModSep, .. }))
+ = tokens.peek() {
tokens.next();
} else {
break;
let span = span.with_hi(segments.last().unwrap().ident.span.hi());
Path { span, segments }
}
- Some(TokenTree::Token(_, Token::Interpolated(nt))) => match *nt {
+ Some(TokenTree::Token(Token { kind: token::Interpolated(nt), .. })) => match *nt {
token::Nonterminal::NtIdent(ident, _) => Path::from_ident(ident),
token::Nonterminal::NtMeta(ref meta) => return Some(meta.clone()),
token::Nonterminal::NtPath(ref path) => path.clone(),
match *self {
MetaItemKind::Word => TokenStream::empty(),
MetaItemKind::NameValue(ref lit) => {
- let mut vec = vec![TokenTree::Token(span, Token::Eq).into()];
+ let mut vec = vec![TokenTree::token(token::Eq, span).into()];
lit.tokens().append_to_tree_and_joint_vec(&mut vec);
TokenStream::new(vec)
}
let mut tokens = Vec::new();
for (i, item) in list.iter().enumerate() {
if i > 0 {
- tokens.push(TokenTree::Token(span, Token::Comma).into());
+ tokens.push(TokenTree::token(token::Comma, span).into());
}
item.tokens().append_to_tree_and_joint_vec(&mut tokens);
}
where I: Iterator<Item = TokenTree>,
{
let delimited = match tokens.peek().cloned() {
- Some(TokenTree::Token(_, token::Eq)) => {
+ Some(TokenTree::Token(token)) if token == token::Eq => {
tokens.next();
- return if let Some(TokenTree::Token(span, token)) = tokens.next() {
- Lit::from_token(&token, span).ok().map(MetaItemKind::NameValue)
+ return if let Some(TokenTree::Token(token)) = tokens.next() {
+ Lit::from_token(&token).ok().map(MetaItemKind::NameValue)
} else {
None
};
let item = NestedMetaItem::from_tokens(&mut tokens)?;
result.push(item);
match tokens.next() {
- None | Some(TokenTree::Token(_, Token::Comma)) => {}
+ None | Some(TokenTree::Token(Token { kind: token::Comma, .. })) => {}
_ => return None,
}
}
fn from_tokens<I>(tokens: &mut iter::Peekable<I>) -> Option<NestedMetaItem>
where I: Iterator<Item = TokenTree>,
{
- if let Some(TokenTree::Token(span, token)) = tokens.peek().cloned() {
- if let Ok(lit) = Lit::from_token(&token, span) {
+ if let Some(TokenTree::Token(token)) = tokens.peek() {
+ if let Ok(lit) = Lit::from_token(token) {
tokens.next();
return Some(NestedMetaItem::Literal(lit));
}
raw_attr.clone(),
);
- let start_span = parser.span;
+ let start_span = parser.token.span;
let (path, tokens) = panictry!(parser.parse_meta_item_unrestricted());
- let end_span = parser.span;
+ let end_span = parser.token.span;
if parser.token != token::Eof {
parse_sess.span_diagnostic
.span_err(start_span.to(end_span), "invalid crate attribute");
let mut expanded_attrs = Vec::with_capacity(1);
while !parser.check(&token::CloseDelim(token::Paren)) {
- let lo = parser.span.lo();
+ let lo = parser.token.span.lo();
let (path, tokens) = parser.parse_meta_item_unrestricted()?;
expanded_attrs.push((path, tokens, parser.prev_span.with_lo(lo)));
parser.expect_one_of(&[token::Comma], &[token::CloseDelim(token::Paren)])?;
use crate::source_map;
use crate::ext::base::{ExtCtxt, MacEager, MacResult};
use crate::ext::build::AstBuilder;
-use crate::parse::token;
+use crate::parse::token::{self, Token};
use crate::ptr::P;
use crate::symbol::kw;
use crate::tokenstream::{TokenTree};
span: Span,
token_tree: &[TokenTree])
-> Box<dyn MacResult+'cx> {
- let code = match (token_tree.len(), token_tree.get(0)) {
- (1, Some(&TokenTree::Token(_, token::Ident(code, _)))) => code,
+ let code = match token_tree {
+ [
+ TokenTree::Token(Token { kind: token::Ident(code, _), .. })
+ ] => code,
_ => unreachable!()
};
ecx.parse_sess.registered_diagnostics.with_lock(|diagnostics| {
- match diagnostics.get_mut(&code.name) {
+ match diagnostics.get_mut(&code) {
// Previously used errors.
Some(&mut ErrorInfo { description: _, use_site: Some(previous_span) }) => {
ecx.struct_span_warn(span, &format!(
span: Span,
token_tree: &[TokenTree])
-> Box<dyn MacResult+'cx> {
- let (code, description) = match (
- token_tree.len(),
- token_tree.get(0),
- token_tree.get(1),
- token_tree.get(2)
- ) {
- (1, Some(&TokenTree::Token(_, token::Ident(ref code, _))), None, None) => {
- (code, None)
+ let (code, description) = match token_tree {
+ [
+ TokenTree::Token(Token { kind: token::Ident(code, _), .. })
+ ] => {
+ (*code, None)
+ },
+ [
+ TokenTree::Token(Token { kind: token::Ident(code, _), .. }),
+ TokenTree::Token(Token { kind: token::Comma, .. }),
+ TokenTree::Token(Token { kind: token::Literal(token::Lit { symbol, .. }), ..})
+ ] => {
+ (*code, Some(*symbol))
},
- (3, Some(&TokenTree::Token(_, token::Ident(ref code, _))),
- Some(&TokenTree::Token(_, token::Comma)),
- Some(&TokenTree::Token(_, token::Literal(token::Lit { symbol, .. })))) => {
- (code, Some(symbol))
- }
_ => unreachable!()
};
description,
use_site: None
};
- if diagnostics.insert(code.name, info).is_some() {
+ if diagnostics.insert(code, info).is_some() {
ecx.span_err(span, &format!(
"diagnostic code {} already registered", code
));
token_tree: &[TokenTree])
-> Box<dyn MacResult+'cx> {
assert_eq!(token_tree.len(), 3);
- let (crate_name, name) = match (&token_tree[0], &token_tree[2]) {
+ let (crate_name, ident) = match (&token_tree[0], &token_tree[2]) {
(
// Crate name.
- &TokenTree::Token(_, token::Ident(ref crate_name, _)),
+ &TokenTree::Token(Token { kind: token::Ident(crate_name, _), .. }),
// DIAGNOSTICS ident.
- &TokenTree::Token(_, token::Ident(ref name, _))
- ) => (*&crate_name, name),
+ &TokenTree::Token(Token { kind: token::Ident(name, _), span })
+ ) => (crate_name, Ident::new(name, span)),
_ => unreachable!()
};
MacEager::items(smallvec![
P(ast::Item {
- ident: *name,
+ ident,
attrs: Vec::new(),
id: ast::DUMMY_NODE_ID,
node: ast::ItemKind::Const(
//! Since we cannot have a dependency on `librustc`, we implement some types here that are somewhat
//! redundant. Later, these types can be converted to types for use by the rest of the compiler.
-use crate::syntax::ast::NodeId;
+use crate::ast::NodeId;
use syntax_pos::MultiSpan;
/// Since we cannot import `LintId`s from `rustc::lint`, we define some Ids here which can later be
-pub use SyntaxExtension::*;
-
-use crate::ast::{self, Attribute, Name, PatKind, MetaItem};
+use crate::ast::{self, Attribute, Name, PatKind};
use crate::attr::HasAttrs;
use crate::source_map::{SourceMap, Spanned, respan};
use crate::edition::Edition;
}
}
-// A more flexible ItemDecorator.
-pub trait MultiItemDecorator {
- fn expand(&self,
- ecx: &mut ExtCtxt<'_>,
- sp: Span,
- meta_item: &ast::MetaItem,
- item: &Annotatable,
- push: &mut dyn FnMut(Annotatable));
-}
-
-impl<F> MultiItemDecorator for F
- where F : Fn(&mut ExtCtxt<'_>, Span, &ast::MetaItem, &Annotatable, &mut dyn FnMut(Annotatable))
-{
- fn expand(&self,
- ecx: &mut ExtCtxt<'_>,
- sp: Span,
- meta_item: &ast::MetaItem,
- item: &Annotatable,
- push: &mut dyn FnMut(Annotatable)) {
- (*self)(ecx, sp, meta_item, item, push)
- }
-}
-
// `meta_item` is the annotation, and `item` is the item being modified.
// FIXME Decorators should follow the same pattern too.
pub trait MultiItemModifier {
impl MutVisitor for AvoidInterpolatedIdents {
fn visit_tt(&mut self, tt: &mut tokenstream::TokenTree) {
- if let tokenstream::TokenTree::Token(_, token::Interpolated(nt)) = tt {
- if let token::NtIdent(ident, is_raw) = **nt {
- *tt = tokenstream::TokenTree::Token(ident.span,
- token::Ident(ident, is_raw));
+ if let tokenstream::TokenTree::Token(token) = tt {
+ if let token::Interpolated(nt) = &token.kind {
+ if let token::NtIdent(ident, is_raw) = **nt {
+ *tt = tokenstream::TokenTree::token(
+ token::Ident(ident.name, is_raw), ident.span
+ );
+ }
}
}
mut_visit::noop_visit_tt(tt, self)
}
}
-pub trait IdentMacroExpander {
- fn expand<'cx>(&self,
- cx: &'cx mut ExtCtxt<'_>,
- sp: Span,
- ident: ast::Ident,
- token_tree: Vec<tokenstream::TokenTree>)
- -> Box<dyn MacResult+'cx>;
-}
-
-pub type IdentMacroExpanderFn =
- for<'cx> fn(&'cx mut ExtCtxt<'_>, Span, ast::Ident, Vec<tokenstream::TokenTree>)
- -> Box<dyn MacResult+'cx>;
-
-impl<F> IdentMacroExpander for F
- where F : for<'cx> Fn(&'cx mut ExtCtxt<'_>, Span, ast::Ident,
- Vec<tokenstream::TokenTree>) -> Box<dyn MacResult+'cx>
-{
- fn expand<'cx>(&self,
- cx: &'cx mut ExtCtxt<'_>,
- sp: Span,
- ident: ast::Ident,
- token_tree: Vec<tokenstream::TokenTree>)
- -> Box<dyn MacResult+'cx>
- {
- (*self)(cx, sp, ident, token_tree)
- }
-}
-
// Use a macro because forwarding to a simple function has type system issues
macro_rules! make_stmts_default {
($me:expr) => {
}
}
-pub type BuiltinDeriveFn =
- for<'cx> fn(&'cx mut ExtCtxt<'_>, Span, &MetaItem, &Annotatable, &mut dyn FnMut(Annotatable));
-
/// Represents different kinds of macro invocations that can be resolved.
#[derive(Clone, Copy, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
pub enum MacroKind {
/// An enum representing the different kinds of syntax extensions.
pub enum SyntaxExtension {
- /// A trivial "extension" that does nothing, only keeps the attribute and marks it as known.
- NonMacroAttr { mark_used: bool },
-
- /// A syntax extension that is attached to an item and creates new items
- /// based upon it.
- ///
- /// `#[derive(...)]` is a `MultiItemDecorator`.
- ///
- /// Prefer ProcMacro or MultiModifier since they are more flexible.
- MultiDecorator(Box<dyn MultiItemDecorator + sync::Sync + sync::Send>),
-
- /// A syntax extension that is attached to an item and modifies it
- /// in-place. Also allows decoration, i.e., creating new items.
- MultiModifier(Box<dyn MultiItemModifier + sync::Sync + sync::Send>),
-
- /// A function-like procedural macro. TokenStream -> TokenStream.
- ProcMacro {
+ /// A token-based function-like macro.
+ Bang {
+ /// An expander with signature TokenStream -> TokenStream.
expander: Box<dyn ProcMacro + sync::Sync + sync::Send>,
- /// Whitelist of unstable features that are treated as stable inside this macro
+ /// Whitelist of unstable features that are treated as stable inside this macro.
allow_internal_unstable: Option<Lrc<[Symbol]>>,
+ /// Edition of the crate in which this macro is defined.
edition: Edition,
},
- /// An attribute-like procedural macro. TokenStream, TokenStream -> TokenStream.
- /// The first TokenSteam is the attribute, the second is the annotated item.
- /// Allows modification of the input items and adding new items, similar to
- /// MultiModifier, but uses TokenStreams, rather than AST nodes.
- AttrProcMacro(Box<dyn AttrProcMacro + sync::Sync + sync::Send>, Edition),
-
- /// A normal, function-like syntax extension.
- ///
- /// `bytes!` is a `NormalTT`.
- NormalTT {
+ /// An AST-based function-like macro.
+ LegacyBang {
+ /// An expander with signature TokenStream -> AST.
expander: Box<dyn TTMacroExpander + sync::Sync + sync::Send>,
+ /// Some info about the macro's definition point.
def_info: Option<(ast::NodeId, Span)>,
- /// Whether the contents of the macro can
- /// directly use `#[unstable]` things.
- ///
- /// Only allows things that require a feature gate in the given whitelist
+ /// Hygienic properties of identifiers produced by this macro.
+ transparency: Transparency,
+ /// Whitelist of unstable features that are treated as stable inside this macro.
allow_internal_unstable: Option<Lrc<[Symbol]>>,
- /// Whether the contents of the macro can use `unsafe`
- /// without triggering the `unsafe_code` lint.
+ /// Suppresses the `unsafe_code` lint for code produced by this macro.
allow_internal_unsafe: bool,
- /// Enables the macro helper hack (`ident!(...)` -> `$crate::ident!(...)`)
- /// for a given macro.
+ /// Enables the macro helper hack (`ident!(...)` -> `$crate::ident!(...)`) for this macro.
local_inner_macros: bool,
- /// The macro's feature name if it is unstable, and the stability feature
+ /// The macro's feature name and tracking issue number if it is unstable.
unstable_feature: Option<(Symbol, u32)>,
- /// Edition of the crate in which the macro is defined
+ /// Edition of the crate in which this macro is defined.
edition: Edition,
},
- /// A function-like syntax extension that has an extra ident before
- /// the block.
- IdentTT {
- expander: Box<dyn IdentMacroExpander + sync::Sync + sync::Send>,
- span: Option<Span>,
- allow_internal_unstable: Option<Lrc<[Symbol]>>,
+ /// A token-based attribute macro.
+ Attr(
+ /// An expander with signature (TokenStream, TokenStream) -> TokenStream.
+ /// The first TokenSteam is the attribute itself, the second is the annotated item.
+ /// The produced TokenSteam replaces the input TokenSteam.
+ Box<dyn AttrProcMacro + sync::Sync + sync::Send>,
+ /// Edition of the crate in which this macro is defined.
+ Edition,
+ ),
+
+ /// An AST-based attribute macro.
+ LegacyAttr(
+ /// An expander with signature (AST, AST) -> AST.
+ /// The first AST fragment is the attribute itself, the second is the annotated item.
+ /// The produced AST fragment replaces the input AST fragment.
+ Box<dyn MultiItemModifier + sync::Sync + sync::Send>,
+ ),
+
+ /// A trivial attribute "macro" that does nothing,
+ /// only keeps the attribute and marks it as known.
+ NonMacroAttr {
+ /// Suppresses the `unused_attributes` lint for this attribute.
+ mark_used: bool,
},
- /// An attribute-like procedural macro. TokenStream -> TokenStream.
- /// The input is the annotated item.
- /// Allows generating code to implement a Trait for a given struct
- /// or enum item.
- ProcMacroDerive(Box<dyn MultiItemModifier + sync::Sync + sync::Send>,
- Vec<Symbol> /* inert attribute names */, Edition),
-
- /// An attribute-like procedural macro that derives a builtin trait.
- BuiltinDerive(BuiltinDeriveFn),
-
- /// A declarative macro, e.g., `macro m() {}`.
- DeclMacro {
- expander: Box<dyn TTMacroExpander + sync::Sync + sync::Send>,
- def_info: Option<(ast::NodeId, Span)>,
- is_transparent: bool,
- edition: Edition,
- }
+ /// A token-based derive macro.
+ Derive(
+ /// An expander with signature TokenStream -> TokenStream (not yet).
+ /// The produced TokenSteam is appended to the input TokenSteam.
+ Box<dyn MultiItemModifier + sync::Sync + sync::Send>,
+ /// Names of helper attributes registered by this macro.
+ Vec<Symbol>,
+ /// Edition of the crate in which this macro is defined.
+ Edition,
+ ),
+
+ /// An AST-based derive macro.
+ LegacyDerive(
+ /// An expander with signature AST -> AST.
+ /// The produced AST fragment is appended to the input AST fragment.
+ Box<dyn MultiItemModifier + sync::Sync + sync::Send>,
+ ),
}
impl SyntaxExtension {
/// Returns which kind of macro calls this syntax extension.
pub fn kind(&self) -> MacroKind {
match *self {
- SyntaxExtension::DeclMacro { .. } |
- SyntaxExtension::NormalTT { .. } |
- SyntaxExtension::IdentTT { .. } |
- SyntaxExtension::ProcMacro { .. } =>
- MacroKind::Bang,
- SyntaxExtension::NonMacroAttr { .. } |
- SyntaxExtension::MultiDecorator(..) |
- SyntaxExtension::MultiModifier(..) |
- SyntaxExtension::AttrProcMacro(..) =>
- MacroKind::Attr,
- SyntaxExtension::ProcMacroDerive(..) |
- SyntaxExtension::BuiltinDerive(..) =>
- MacroKind::Derive,
+ SyntaxExtension::Bang { .. } |
+ SyntaxExtension::LegacyBang { .. } => MacroKind::Bang,
+ SyntaxExtension::Attr(..) |
+ SyntaxExtension::LegacyAttr(..) |
+ SyntaxExtension::NonMacroAttr { .. } => MacroKind::Attr,
+ SyntaxExtension::Derive(..) |
+ SyntaxExtension::LegacyDerive(..) => MacroKind::Derive,
}
}
pub fn default_transparency(&self) -> Transparency {
match *self {
- SyntaxExtension::ProcMacro { .. } |
- SyntaxExtension::AttrProcMacro(..) |
- SyntaxExtension::ProcMacroDerive(..) |
- SyntaxExtension::DeclMacro { is_transparent: false, .. } => Transparency::Opaque,
- SyntaxExtension::DeclMacro { is_transparent: true, .. } => Transparency::Transparent,
- _ => Transparency::SemiTransparent,
+ SyntaxExtension::LegacyBang { transparency, .. } => transparency,
+ SyntaxExtension::Bang { .. } |
+ SyntaxExtension::Attr(..) |
+ SyntaxExtension::Derive(..) |
+ SyntaxExtension::NonMacroAttr { .. } => Transparency::Opaque,
+ SyntaxExtension::LegacyAttr(..) |
+ SyntaxExtension::LegacyDerive(..) => Transparency::SemiTransparent,
}
}
pub fn edition(&self, default_edition: Edition) -> Edition {
match *self {
- SyntaxExtension::NormalTT { edition, .. } |
- SyntaxExtension::DeclMacro { edition, .. } |
- SyntaxExtension::ProcMacro { edition, .. } |
- SyntaxExtension::AttrProcMacro(.., edition) |
- SyntaxExtension::ProcMacroDerive(.., edition) => edition,
+ SyntaxExtension::Bang { edition, .. } |
+ SyntaxExtension::LegacyBang { edition, .. } |
+ SyntaxExtension::Attr(.., edition) |
+ SyntaxExtension::Derive(.., edition) => edition,
// Unstable legacy stuff
SyntaxExtension::NonMacroAttr { .. } |
- SyntaxExtension::IdentTT { .. } |
- SyntaxExtension::MultiDecorator(..) |
- SyntaxExtension::MultiModifier(..) |
- SyntaxExtension::BuiltinDerive(..) => default_edition,
+ SyntaxExtension::LegacyAttr(..) |
+ SyntaxExtension::LegacyDerive(..) => default_edition,
}
}
}
use crate::feature_gate::{self, Features, GateIssue, is_builtin_attr, emit_feature_err};
use crate::mut_visit::*;
use crate::parse::{DirectoryOwnership, PResult, ParseSess};
-use crate::parse::token::{self, Token};
+use crate::parse::token;
use crate::parse::parser::Parser;
use crate::ptr::P;
use crate::symbol::Symbol;
let item = match self.cx.resolver.resolve_macro_path(
path, MacroKind::Derive, Mark::root(), Vec::new(), false) {
Ok(ext) => match *ext {
- BuiltinDerive(..) => item_with_markers.clone(),
+ SyntaxExtension::LegacyDerive(..) => item_with_markers.clone(),
_ => item.clone(),
},
_ => item.clone(),
_ => unreachable!(),
};
- if let NonMacroAttr { mark_used: false } = *ext {} else {
+ if let SyntaxExtension::NonMacroAttr { mark_used: false } = *ext {} else {
// Macro attrs are always used when expanded,
// non-macro attrs are considered used when the field says so.
attr::mark_used(&attr);
});
match *ext {
- NonMacroAttr { .. } => {
+ SyntaxExtension::NonMacroAttr { .. } => {
attr::mark_known(&attr);
item.visit_attrs(|attrs| attrs.push(attr));
Some(invoc.fragment_kind.expect_from_annotatables(iter::once(item)))
}
- MultiModifier(ref mac) => {
+ SyntaxExtension::LegacyAttr(ref mac) => {
let meta = attr.parse_meta(self.cx.parse_sess)
.map_err(|mut e| { e.emit(); }).ok()?;
let item = mac.expand(self.cx, attr.span, &meta, item);
Some(invoc.fragment_kind.expect_from_annotatables(item))
}
- MultiDecorator(ref mac) => {
- let mut items = Vec::new();
- let meta = attr.parse_meta(self.cx.parse_sess)
- .expect("derive meta should already have been parsed");
- mac.expand(self.cx, attr.span, &meta, &item, &mut |item| items.push(item));
- items.push(item);
- Some(invoc.fragment_kind.expect_from_annotatables(items))
- }
- AttrProcMacro(ref mac, ..) => {
+ SyntaxExtension::Attr(ref mac, ..) => {
self.gate_proc_macro_attr_item(attr.span, &item);
- let item_tok = TokenTree::Token(DUMMY_SP, Token::Interpolated(Lrc::new(match item {
+ let item_tok = TokenTree::token(token::Interpolated(Lrc::new(match item {
Annotatable::Item(item) => token::NtItem(item),
Annotatable::TraitItem(item) => token::NtTraitItem(item.into_inner()),
Annotatable::ImplItem(item) => token::NtImplItem(item.into_inner()),
Annotatable::ForeignItem(item) => token::NtForeignItem(item.into_inner()),
Annotatable::Stmt(stmt) => token::NtStmt(stmt.into_inner()),
Annotatable::Expr(expr) => token::NtExpr(expr),
- }))).into();
+ })), DUMMY_SP).into();
let input = self.extract_proc_macro_attr_input(attr.tokens, attr.span);
let tok_result = mac.expand(self.cx, attr.span, input, item_tok);
let res = self.parse_ast_fragment(tok_result, invoc.fragment_kind,
self.gate_proc_macro_expansion(attr.span, &res);
res
}
- ProcMacroDerive(..) | BuiltinDerive(..) => {
+ SyntaxExtension::Derive(..) | SyntaxExtension::LegacyDerive(..) => {
self.cx.span_err(attr.span, &format!("`{}` is a derive macro", attr.path));
self.cx.trace_macros_diag();
invoc.fragment_kind.dummy(attr.span)
};
let opt_expanded = match *ext {
- DeclMacro { ref expander, def_info, edition, .. } => {
- if let Err(dummy_span) = validate_and_set_expn_info(self, def_info.map(|(_, s)| s),
- None, false, false, None,
- edition) {
- dummy_span
- } else {
- kind.make_from(expander.expand(self.cx, span, mac.node.stream(), None))
- }
- }
-
- NormalTT {
+ SyntaxExtension::LegacyBang {
ref expander,
def_info,
ref allow_internal_unstable,
local_inner_macros,
unstable_feature,
edition,
+ ..
} => {
if let Err(dummy_span) = validate_and_set_expn_info(self, def_info.map(|(_, s)| s),
allow_internal_unstable.clone(),
}
}
- IdentTT { ref expander, span: tt_span, ref allow_internal_unstable } => {
- if ident.name == kw::Invalid {
- self.cx.span_err(path.span,
- &format!("macro {}! expects an ident argument", path));
- self.cx.trace_macros_diag();
- kind.dummy(span)
- } else {
- invoc.expansion_data.mark.set_expn_info(ExpnInfo {
- call_site: span,
- def_site: tt_span,
- format: macro_bang_format(path),
- allow_internal_unstable: allow_internal_unstable.clone(),
- allow_internal_unsafe: false,
- local_inner_macros: false,
- edition: self.cx.parse_sess.edition,
- });
-
- let input: Vec<_> = mac.node.stream().into_trees().collect();
- kind.make_from(expander.expand(self.cx, span, ident, input))
- }
- }
-
- MultiDecorator(..) | MultiModifier(..) |
- AttrProcMacro(..) | SyntaxExtension::NonMacroAttr { .. } => {
+ SyntaxExtension::Attr(..) |
+ SyntaxExtension::LegacyAttr(..) |
+ SyntaxExtension::NonMacroAttr { .. } => {
self.cx.span_err(path.span,
&format!("`{}` can only be used in attributes", path));
self.cx.trace_macros_diag();
kind.dummy(span)
}
- ProcMacroDerive(..) | BuiltinDerive(..) => {
+ SyntaxExtension::Derive(..) | SyntaxExtension::LegacyDerive(..) => {
self.cx.span_err(path.span, &format!("`{}` is a derive macro", path));
self.cx.trace_macros_diag();
kind.dummy(span)
}
- SyntaxExtension::ProcMacro { ref expander, ref allow_internal_unstable, edition } => {
+ SyntaxExtension::Bang { ref expander, ref allow_internal_unstable, edition } => {
if ident.name != kw::Invalid {
let msg =
format!("macro {}! expects no ident argument, given '{}'", path, ident);
edition: ext.edition(self.cx.parse_sess.edition),
};
- match *ext {
- ProcMacroDerive(ref ext, ..) => {
- invoc.expansion_data.mark.set_expn_info(expn_info);
- let span = span.with_ctxt(self.cx.backtrace());
- let dummy = ast::MetaItem { // FIXME(jseyfried) avoid this
- path: Path::from_ident(Ident::invalid()),
- span: DUMMY_SP,
- node: ast::MetaItemKind::Word,
+ match ext {
+ SyntaxExtension::Derive(expander, ..) | SyntaxExtension::LegacyDerive(expander) => {
+ let meta = match ext {
+ SyntaxExtension::Derive(..) => ast::MetaItem { // FIXME(jseyfried) avoid this
+ path: Path::from_ident(Ident::invalid()),
+ span: DUMMY_SP,
+ node: ast::MetaItemKind::Word,
+ },
+ _ => {
+ expn_info.allow_internal_unstable = Some(vec![
+ sym::rustc_attrs,
+ Symbol::intern("derive_clone_copy"),
+ Symbol::intern("derive_eq"),
+ // RustcDeserialize and RustcSerialize
+ Symbol::intern("libstd_sys_internals"),
+ ].into());
+ attr.meta()?
+ }
};
- let items = ext.expand(self.cx, span, &dummy, item);
- Some(invoc.fragment_kind.expect_from_annotatables(items))
- }
- BuiltinDerive(func) => {
- expn_info.allow_internal_unstable = Some(vec![
- sym::rustc_attrs,
- Symbol::intern("derive_clone_copy"),
- Symbol::intern("derive_eq"),
- Symbol::intern("libstd_sys_internals"), // RustcDeserialize and RustcSerialize
- ].into());
+
invoc.expansion_data.mark.set_expn_info(expn_info);
let span = span.with_ctxt(self.cx.backtrace());
- let mut items = Vec::new();
- func(self.cx, span, &attr.meta()?, &item, &mut |a| items.push(a));
+ let items = expander.expand(self.cx, span, &meta, item);
Some(invoc.fragment_kind.expect_from_annotatables(items))
}
_ => {
let msg = format!("macro expansion ignores token `{}` and any following",
self.this_token_to_string());
// Avoid emitting backtrace info twice.
- let def_site_span = self.span.with_ctxt(SyntaxContext::empty());
+ let def_site_span = self.token.span.with_ctxt(SyntaxContext::empty());
let mut err = self.diagnostic().struct_span_err(def_site_span, &msg);
err.span_label(span, "caused by the macro expansion here");
let msg = format!(
while self.p.token != token::Eof {
match panictry!(self.p.parse_item()) {
Some(item) => ret.push(item),
- None => self.p.diagnostic().span_fatal(self.p.span,
+ None => self.p.diagnostic().span_fatal(self.p.token.span,
&format!("expected item, found `{}`",
self.p.this_token_to_string()))
.raise()
pub use ParseResult::*;
use TokenTreeOrTokenTreeSlice::*;
-use crate::ast::Ident;
+use crate::ast::{Ident, Name};
use crate::ext::tt::quoted::{self, TokenTree};
use crate::parse::{Directory, ParseSess};
use crate::parse::parser::{Parser, PathStyle};
Success(T),
/// Arm failed to match. If the second parameter is `token::Eof`, it indicates an unexpected
/// end of macro invocation. Otherwise, it indicates that no rules expected the given token.
- Failure(syntax_pos::Span, Token, &'static str),
+ Failure(Token, &'static str),
/// Fatal error (malformed macro?). Abort compilation.
Error(syntax_pos::Span, String),
}
/// Generates an appropriate parsing failure message. For EOF, this is "unexpected end...". For
/// other tokens, this is "unexpected token...".
-pub fn parse_failure_msg(tok: Token) -> String {
- match tok {
+pub fn parse_failure_msg(tok: &Token) -> String {
+ match tok.kind {
token::Eof => "unexpected end of macro invocation".to_string(),
_ => format!(
"no rules expected the token `{}`",
- pprust::token_to_string(&tok)
+ pprust::token_to_string(tok)
),
}
}
/// Performs a token equality check, ignoring syntax context (that is, an unhygienic comparison)
fn token_name_eq(t1: &Token, t2: &Token) -> bool {
- if let (Some((id1, is_raw1)), Some((id2, is_raw2))) = (t1.ident(), t2.ident()) {
- id1.name == id2.name && is_raw1 == is_raw2
- } else if let (Some(id1), Some(id2)) = (t1.lifetime(), t2.lifetime()) {
- id1.name == id2.name
+ if let (Some((ident1, is_raw1)), Some((ident2, is_raw2))) = (t1.ident(), t2.ident()) {
+ ident1.name == ident2.name && is_raw1 == is_raw2
+ } else if let (Some(ident1), Some(ident2)) = (t1.lifetime(), t2.lifetime()) {
+ ident1.name == ident2.name
} else {
- *t1 == *t2
+ t1.kind == t2.kind
}
}
eof_items: &mut SmallVec<[MatcherPosHandle<'root, 'tt>; 1]>,
bb_items: &mut SmallVec<[MatcherPosHandle<'root, 'tt>; 1]>,
token: &Token,
- span: syntax_pos::Span,
) -> ParseResult<()> {
// Pop items from `cur_items` until it is empty.
while let Some(mut item) = cur_items.pop() {
// Add matches from this repetition to the `matches` of `up`
for idx in item.match_lo..item.match_hi {
let sub = item.matches[idx].clone();
- let span = DelimSpan::from_pair(item.sp_open, span);
+ let span = DelimSpan::from_pair(item.sp_open, token.span);
new_pos.push_match(idx, MatchedSeq(sub, span));
}
TokenTree::MetaVarDecl(_, _, id) => {
// Built-in nonterminals never start with these tokens,
// so we can eliminate them from consideration.
- if may_begin_with(id.name, token) {
+ if may_begin_with(token, id.name) {
bb_items.push(item);
}
}
//
// At the beginning of the loop, if we reach the end of the delimited submatcher,
// we pop the stack to backtrack out of the descent.
- seq @ TokenTree::Delimited(..) | seq @ TokenTree::Token(_, DocComment(..)) => {
+ seq @ TokenTree::Delimited(..) |
+ seq @ TokenTree::Token(Token { kind: DocComment(..), .. }) => {
let lower_elts = mem::replace(&mut item.top_elts, Tt(seq));
let idx = item.idx;
item.stack.push(MatcherTtFrame {
}
// We just matched a normal token. We can just advance the parser.
- TokenTree::Token(_, ref t) if token_name_eq(t, token) => {
+ TokenTree::Token(t) if token_name_eq(&t, token) => {
item.idx += 1;
next_items.push(item);
}
//
// This MatcherPos instance is allocated on the stack. All others -- and
// there are frequently *no* others! -- are allocated on the heap.
- let mut initial = initial_matcher_pos(ms, parser.span);
+ let mut initial = initial_matcher_pos(ms, parser.token.span);
let mut cur_items = smallvec![MatcherPosHandle::Ref(&mut initial)];
let mut next_items = Vec::new();
&mut eof_items,
&mut bb_items,
&parser.token,
- parser.span,
) {
Success(_) => {}
- Failure(sp, tok, t) => return Failure(sp, tok, t),
+ Failure(token, msg) => return Failure(token, msg),
Error(sp, msg) => return Error(sp, msg),
}
// If we reached the EOF, check that there is EXACTLY ONE possible matcher. Otherwise,
// either the parse is ambiguous (which should never happen) or there is a syntax error.
- if token_name_eq(&parser.token, &token::Eof) {
+ if parser.token == token::Eof {
if eof_items.len() == 1 {
let matches = eof_items[0]
.matches
return nameize(sess, ms, matches);
} else if eof_items.len() > 1 {
return Error(
- parser.span,
+ parser.token.span,
"ambiguity: multiple successful parses".to_string(),
);
} else {
return Failure(
- if parser.span.is_dummy() {
- parser.span
+ Token::new(token::Eof, if parser.token.span.is_dummy() {
+ parser.token.span
} else {
- sess.source_map().next_point(parser.span)
- },
- token::Eof,
+ sess.source_map().next_point(parser.token.span)
+ }),
"missing tokens in macro arguments",
);
}
.join(" or ");
return Error(
- parser.span,
+ parser.token.span,
format!(
"local ambiguity: multiple parsing options: {}",
match next_items.len() {
// then there is a syntax error.
else if bb_items.is_empty() && next_items.is_empty() {
return Failure(
- parser.span,
- parser.token.clone(),
+ parser.token.take(),
"no rules expected this token in macro call",
);
}
/// The token is an identifier, but not `_`.
/// We prohibit passing `_` to macros expecting `ident` for now.
-fn get_macro_ident(token: &Token) -> Option<(Ident, bool)> {
- match *token {
- token::Ident(ident, is_raw) if ident.name != kw::Underscore =>
- Some((ident, is_raw)),
+fn get_macro_name(token: &Token) -> Option<(Name, bool)> {
+ match token.kind {
+ token::Ident(name, is_raw) if name != kw::Underscore => Some((name, is_raw)),
_ => None,
}
}
///
/// Returning `false` is a *stability guarantee* that such a matcher will *never* begin with that
/// token. Be conservative (return true) if not sure.
-fn may_begin_with(name: Symbol, token: &Token) -> bool {
+fn may_begin_with(token: &Token, name: Name) -> bool {
/// Checks whether the non-terminal may contain a single (non-keyword) identifier.
fn may_be_ident(nt: &token::Nonterminal) -> bool {
match *nt {
match name {
sym::expr => token.can_begin_expr(),
sym::ty => token.can_begin_type(),
- sym::ident => get_macro_ident(token).is_some(),
+ sym::ident => get_macro_name(token).is_some(),
sym::literal => token.can_begin_literal_or_bool(),
- sym::vis => match *token {
+ sym::vis => match token.kind {
// The follow-set of :vis + "priv" keyword + interpolated
- Token::Comma | Token::Ident(..) | Token::Interpolated(_) => true,
+ token::Comma | token::Ident(..) | token::Interpolated(_) => true,
_ => token.can_begin_type(),
},
- sym::block => match *token {
- Token::OpenDelim(token::Brace) => true,
- Token::Interpolated(ref nt) => match **nt {
+ sym::block => match token.kind {
+ token::OpenDelim(token::Brace) => true,
+ token::Interpolated(ref nt) => match **nt {
token::NtItem(_)
| token::NtPat(_)
| token::NtTy(_)
},
_ => false,
},
- sym::path | sym::meta => match *token {
- Token::ModSep | Token::Ident(..) => true,
- Token::Interpolated(ref nt) => match **nt {
+ sym::path | sym::meta => match token.kind {
+ token::ModSep | token::Ident(..) => true,
+ token::Interpolated(ref nt) => match **nt {
token::NtPath(_) | token::NtMeta(_) => true,
_ => may_be_ident(&nt),
},
_ => false,
},
- sym::pat => match *token {
- Token::Ident(..) | // box, ref, mut, and other identifiers (can stricten)
- Token::OpenDelim(token::Paren) | // tuple pattern
- Token::OpenDelim(token::Bracket) | // slice pattern
- Token::BinOp(token::And) | // reference
- Token::BinOp(token::Minus) | // negative literal
- Token::AndAnd | // double reference
- Token::Literal(..) | // literal
- Token::DotDot | // range pattern (future compat)
- Token::DotDotDot | // range pattern (future compat)
- Token::ModSep | // path
- Token::Lt | // path (UFCS constant)
- Token::BinOp(token::Shl) => true, // path (double UFCS)
- Token::Interpolated(ref nt) => may_be_ident(nt),
+ sym::pat => match token.kind {
+ token::Ident(..) | // box, ref, mut, and other identifiers (can stricten)
+ token::OpenDelim(token::Paren) | // tuple pattern
+ token::OpenDelim(token::Bracket) | // slice pattern
+ token::BinOp(token::And) | // reference
+ token::BinOp(token::Minus) | // negative literal
+ token::AndAnd | // double reference
+ token::Literal(..) | // literal
+ token::DotDot | // range pattern (future compat)
+ token::DotDotDot | // range pattern (future compat)
+ token::ModSep | // path
+ token::Lt | // path (UFCS constant)
+ token::BinOp(token::Shl) => true, // path (double UFCS)
+ token::Interpolated(ref nt) => may_be_ident(nt),
_ => false,
},
- sym::lifetime => match *token {
- Token::Lifetime(_) => true,
- Token::Interpolated(ref nt) => match **nt {
+ sym::lifetime => match token.kind {
+ token::Lifetime(_) => true,
+ token::Interpolated(ref nt) => match **nt {
token::NtLifetime(_) | token::NtTT(_) => true,
_ => false,
},
_ => false,
},
- _ => match *token {
+ _ => match token.kind {
token::CloseDelim(_) => false,
_ => true,
},
sym::literal => token::NtLiteral(panictry!(p.parse_literal_maybe_minus())),
sym::ty => token::NtTy(panictry!(p.parse_ty())),
// this could be handled like a token, since it is one
- sym::ident => if let Some((ident, is_raw)) = get_macro_ident(&p.token) {
- let span = p.span;
+ sym::ident => if let Some((name, is_raw)) = get_macro_name(&p.token) {
+ let span = p.token.span;
p.bump();
- token::NtIdent(Ident::new(ident.name, span), is_raw)
+ token::NtIdent(Ident::new(name, span), is_raw)
} else {
let token_str = pprust::token_to_string(&p.token);
p.fatal(&format!("expected ident, found {}", &token_str)).emit();
use crate::{ast, attr};
use crate::edition::Edition;
-use crate::ext::base::{DummyResult, ExtCtxt, MacResult, SyntaxExtension};
-use crate::ext::base::{NormalTT, TTMacroExpander};
+use crate::ext::base::{DummyResult, ExtCtxt, MacResult, SyntaxExtension, TTMacroExpander};
use crate::ext::expand::{AstFragment, AstFragmentKind};
+use crate::ext::hygiene::Transparency;
use crate::ext::tt::macro_parser::{Success, Error, Failure};
use crate::ext::tt::macro_parser::{MatchedSeq, MatchedNonterminal};
use crate::ext::tt::macro_parser::{parse, parse_failure_msg};
use crate::feature_gate::Features;
use crate::parse::{Directory, ParseSess};
use crate::parse::parser::Parser;
-use crate::parse::token::{self, NtTT};
-use crate::parse::token::Token::*;
+use crate::parse::token::{self, Token, NtTT};
+use crate::parse::token::TokenKind::*;
use crate::symbol::{Symbol, kw, sym};
use crate::tokenstream::{DelimSpan, TokenStream, TokenTree};
use errors::FatalError;
-use syntax_pos::{Span, DUMMY_SP, symbol::Ident};
+use syntax_pos::{Span, symbol::Ident};
use log::debug;
use rustc_data_structures::fx::{FxHashMap};
let fragment = panictry!(parser.parse_ast_fragment(kind, true).map_err(|mut e| {
if parser.token == token::Eof && e.message().ends_with(", found `<eof>`") {
if !e.span.is_dummy() { // early end of macro arm (#52866)
- e.replace_span_with(parser.sess.source_map().next_point(parser.span));
+ e.replace_span_with(parser.sess.source_map().next_point(parser.token.span));
}
let msg = &e.message[0];
e.message[0] = (
if parser.sess.source_map().span_to_filename(arm_span).is_real() {
e.span_label(arm_span, "in this macro arm");
}
- } else if !parser.sess.source_map().span_to_filename(parser.span).is_real() {
+ } else if !parser.sess.source_map().span_to_filename(parser.token.span).is_real() {
e.span_label(site_span, "in this macro invocation");
}
e
}
// Which arm's failure should we report? (the one furthest along)
- let mut best_fail_spot = DUMMY_SP;
- let mut best_fail_tok = None;
- let mut best_fail_text = None;
+ let mut best_failure: Option<(Token, &str)> = None;
for (i, lhs) in lhses.iter().enumerate() { // try each arm's matchers
let lhs_tt = match *lhs {
arm_span,
})
}
- Failure(sp, tok, t) => if sp.lo() >= best_fail_spot.lo() {
- best_fail_spot = sp;
- best_fail_tok = Some(tok);
- best_fail_text = Some(t);
- },
+ Failure(token, msg) => match best_failure {
+ Some((ref best_token, _)) if best_token.span.lo() >= token.span.lo() => {}
+ _ => best_failure = Some((token, msg))
+ }
Error(err_sp, ref msg) => {
cx.span_fatal(err_sp.substitute_dummy(sp), &msg[..])
}
}
}
- let best_fail_msg = parse_failure_msg(best_fail_tok.expect("ran no matchers"));
- let span = best_fail_spot.substitute_dummy(sp);
- let mut err = cx.struct_span_err(span, &best_fail_msg);
- err.span_label(span, best_fail_text.unwrap_or(&best_fail_msg));
+ let (token, label) = best_failure.expect("ran no matchers");
+ let span = token.span.substitute_dummy(sp);
+ let mut err = cx.struct_span_err(span, &parse_failure_msg(&token));
+ err.span_label(span, label);
if let Some(sp) = def_span {
if cx.source_map().span_to_filename(sp).is_real() && !sp.is_dummy() {
err.span_label(cx.source_map().def_span(sp), "when calling this macro");
let argument_gram = vec![
quoted::TokenTree::Sequence(DelimSpan::dummy(), Lrc::new(quoted::SequenceRepetition {
tts: vec![
- quoted::TokenTree::MetaVarDecl(DUMMY_SP, lhs_nm, ast::Ident::from_str("tt")),
- quoted::TokenTree::Token(DUMMY_SP, token::FatArrow),
- quoted::TokenTree::MetaVarDecl(DUMMY_SP, rhs_nm, ast::Ident::from_str("tt")),
+ quoted::TokenTree::MetaVarDecl(def.span, lhs_nm, ast::Ident::from_str("tt")),
+ quoted::TokenTree::token(token::FatArrow, def.span),
+ quoted::TokenTree::MetaVarDecl(def.span, rhs_nm, ast::Ident::from_str("tt")),
],
- separator: Some(if body.legacy { token::Semi } else { token::Comma }),
+ separator: Some(Token::new(
+ if body.legacy { token::Semi } else { token::Comma }, def.span
+ )),
op: quoted::KleeneOp::OneOrMore,
num_captures: 2,
})),
// to phase into semicolon-termination instead of semicolon-separation
quoted::TokenTree::Sequence(DelimSpan::dummy(), Lrc::new(quoted::SequenceRepetition {
- tts: vec![quoted::TokenTree::Token(DUMMY_SP, token::Semi)],
+ tts: vec![quoted::TokenTree::token(token::Semi, def.span)],
separator: None,
op: quoted::KleeneOp::ZeroOrMore,
num_captures: 0
let argument_map = match parse(sess, body.stream(), &argument_gram, None, true) {
Success(m) => m,
- Failure(sp, tok, t) => {
- let s = parse_failure_msg(tok);
- let sp = sp.substitute_dummy(def.span);
+ Failure(token, msg) => {
+ let s = parse_failure_msg(&token);
+ let sp = token.span.substitute_dummy(def.span);
let mut err = sess.span_diagnostic.struct_span_fatal(sp, &s);
- err.span_label(sp, t);
+ err.span_label(sp, msg);
err.emit();
FatalError.raise();
}
valid,
});
- if body.legacy {
- let allow_internal_unstable = attr::find_by_name(&def.attrs, sym::allow_internal_unstable)
- .map(|attr| attr
- .meta_item_list()
- .map(|list| list.iter()
- .filter_map(|it| {
- let name = it.ident().map(|ident| ident.name);
- if name.is_none() {
- sess.span_diagnostic.span_err(it.span(),
- "allow internal unstable expects feature names")
- }
- name
- })
- .collect::<Vec<Symbol>>().into()
- )
- .unwrap_or_else(|| {
- sess.span_diagnostic.span_warn(
- attr.span, "allow_internal_unstable expects list of feature names. In the \
- future this will become a hard error. Please use `allow_internal_unstable(\
- foo, bar)` to only allow the `foo` and `bar` features",
- );
- vec![sym::allow_internal_unstable_backcompat_hack].into()
+ let transparency = if attr::contains_name(&def.attrs, sym::rustc_transparent_macro) {
+ Transparency::Transparent
+ } else if body.legacy {
+ Transparency::SemiTransparent
+ } else {
+ Transparency::Opaque
+ };
+
+ let allow_internal_unstable = attr::find_by_name(&def.attrs, sym::allow_internal_unstable)
+ .map(|attr| attr
+ .meta_item_list()
+ .map(|list| list.iter()
+ .filter_map(|it| {
+ let name = it.ident().map(|ident| ident.name);
+ if name.is_none() {
+ sess.span_diagnostic.span_err(it.span(),
+ "allow internal unstable expects feature names")
+ }
+ name
})
- );
- let allow_internal_unsafe = attr::contains_name(&def.attrs, sym::allow_internal_unsafe);
- let mut local_inner_macros = false;
- if let Some(macro_export) = attr::find_by_name(&def.attrs, sym::macro_export) {
- if let Some(l) = macro_export.meta_item_list() {
- local_inner_macros = attr::list_contains_name(&l, sym::local_inner_macros);
- }
- }
+ .collect::<Vec<Symbol>>().into()
+ )
+ .unwrap_or_else(|| {
+ sess.span_diagnostic.span_warn(
+ attr.span, "allow_internal_unstable expects list of feature names. In the \
+ future this will become a hard error. Please use `allow_internal_unstable(\
+ foo, bar)` to only allow the `foo` and `bar` features",
+ );
+ vec![sym::allow_internal_unstable_backcompat_hack].into()
+ })
+ );
- let unstable_feature = attr::find_stability(&sess,
- &def.attrs, def.span).and_then(|stability| {
- if let attr::StabilityLevel::Unstable { issue, .. } = stability.level {
- Some((stability.feature, issue))
- } else {
- None
- }
- });
-
- NormalTT {
- expander,
- def_info: Some((def.id, def.span)),
- allow_internal_unstable,
- allow_internal_unsafe,
- local_inner_macros,
- unstable_feature,
- edition,
+ let allow_internal_unsafe = attr::contains_name(&def.attrs, sym::allow_internal_unsafe);
+
+ let mut local_inner_macros = false;
+ if let Some(macro_export) = attr::find_by_name(&def.attrs, sym::macro_export) {
+ if let Some(l) = macro_export.meta_item_list() {
+ local_inner_macros = attr::list_contains_name(&l, sym::local_inner_macros);
}
- } else {
- let is_transparent = attr::contains_name(&def.attrs, sym::rustc_transparent_macro);
+ }
- SyntaxExtension::DeclMacro {
- expander,
- def_info: Some((def.id, def.span)),
- is_transparent,
- edition,
+ let unstable_feature = attr::find_stability(&sess,
+ &def.attrs, def.span).and_then(|stability| {
+ if let attr::StabilityLevel::Unstable { issue, .. } = stability.level {
+ Some((stability.feature, issue))
+ } else {
+ None
}
+ });
+
+ SyntaxExtension::LegacyBang {
+ expander,
+ def_info: Some((def.id, def.span)),
+ transparency,
+ allow_internal_unstable,
+ allow_internal_unsafe,
+ local_inner_macros,
+ unstable_feature,
+ edition,
}
}
// If the sequence contents can be empty, then the first
// token could be the separator token itself.
- if let (Some(ref sep), true) = (seq_rep.separator.clone(),
- subfirst.maybe_empty) {
- first.add_one_maybe(TokenTree::Token(sp.entire(), sep.clone()));
+ if let (Some(sep), true) = (&seq_rep.separator, subfirst.maybe_empty) {
+ first.add_one_maybe(TokenTree::Token(sep.clone()));
}
// Reverse scan: Sequence comes before `first`.
// If the sequence contents can be empty, then the first
// token could be the separator token itself.
- if let (Some(ref sep), true) = (seq_rep.separator.clone(),
- subfirst.maybe_empty) {
- first.add_one_maybe(TokenTree::Token(sp.entire(), sep.clone()));
+ if let (Some(sep), true) = (&seq_rep.separator, subfirst.maybe_empty) {
+ first.add_one_maybe(TokenTree::Token(sep.clone()));
}
assert!(first.maybe_empty);
// against SUFFIX
continue 'each_token;
}
- TokenTree::Sequence(sp, ref seq_rep) => {
+ TokenTree::Sequence(_, ref seq_rep) => {
suffix_first = build_suffix_first();
// The trick here: when we check the interior, we want
// to include the separator (if any) as a potential
// work of cloning it? But then again, this way I may
// get a "tighter" span?
let mut new;
- let my_suffix = if let Some(ref u) = seq_rep.separator {
+ let my_suffix = if let Some(sep) = &seq_rep.separator {
new = suffix_first.clone();
- new.add_one_maybe(TokenTree::Token(sp.entire(), u.clone()));
+ new.add_one_maybe(TokenTree::Token(sep.clone()));
&new
} else {
&suffix_first
continue 'each_last;
}
IsInFollow::Yes => {}
- IsInFollow::No(ref possible) => {
+ IsInFollow::No(possible) => {
let may_be = if last.tokens.len() == 1 &&
suffix_first.tokens.len() == 1
{
format!("not allowed after `{}` fragments", frag_spec),
);
let msg = "allowed there are: ";
- match &possible[..] {
+ match possible {
&[] => {}
&[t] => {
err.note(&format!(
enum IsInFollow {
Yes,
- No(Vec<&'static str>),
+ No(&'static [&'static str]),
Invalid(String, &'static str),
}
fn is_in_follow(tok: "ed::TokenTree, frag: &str) -> IsInFollow {
use quoted::TokenTree;
- if let TokenTree::Token(_, token::CloseDelim(_)) = *tok {
+ if let TokenTree::Token(Token { kind: token::CloseDelim(_), .. }) = *tok {
// closing a token tree can never be matched by any fragment;
// iow, we always require that `(` and `)` match, etc.
IsInFollow::Yes
IsInFollow::Yes
},
"stmt" | "expr" => {
- let tokens = vec!["`=>`", "`,`", "`;`"];
- match *tok {
- TokenTree::Token(_, ref tok) => match *tok {
+ const TOKENS: &[&str] = &["`=>`", "`,`", "`;`"];
+ match tok {
+ TokenTree::Token(token) => match token.kind {
FatArrow | Comma | Semi => IsInFollow::Yes,
- _ => IsInFollow::No(tokens),
+ _ => IsInFollow::No(TOKENS),
},
- _ => IsInFollow::No(tokens),
+ _ => IsInFollow::No(TOKENS),
}
},
"pat" => {
- let tokens = vec!["`=>`", "`,`", "`=`", "`|`", "`if`", "`in`"];
- match *tok {
- TokenTree::Token(_, ref tok) => match *tok {
+ const TOKENS: &[&str] = &["`=>`", "`,`", "`=`", "`|`", "`if`", "`in`"];
+ match tok {
+ TokenTree::Token(token) => match token.kind {
FatArrow | Comma | Eq | BinOp(token::Or) => IsInFollow::Yes,
- Ident(i, false) if i.name == kw::If ||
- i.name == kw::In => IsInFollow::Yes,
- _ => IsInFollow::No(tokens),
+ Ident(name, false) if name == kw::If || name == kw::In => IsInFollow::Yes,
+ _ => IsInFollow::No(TOKENS),
},
- _ => IsInFollow::No(tokens),
+ _ => IsInFollow::No(TOKENS),
}
},
"path" | "ty" => {
- let tokens = vec![
+ const TOKENS: &[&str] = &[
"`{`", "`[`", "`=>`", "`,`", "`>`","`=`", "`:`", "`;`", "`|`", "`as`",
"`where`",
];
- match *tok {
- TokenTree::Token(_, ref tok) => match *tok {
+ match tok {
+ TokenTree::Token(token) => match token.kind {
OpenDelim(token::DelimToken::Brace) |
OpenDelim(token::DelimToken::Bracket) |
Comma | FatArrow | Colon | Eq | Gt | BinOp(token::Shr) | Semi |
BinOp(token::Or) => IsInFollow::Yes,
- Ident(i, false) if i.name == kw::As ||
- i.name == kw::Where => IsInFollow::Yes,
- _ => IsInFollow::No(tokens),
+ Ident(name, false) if name == kw::As ||
+ name == kw::Where => IsInFollow::Yes,
+ _ => IsInFollow::No(TOKENS),
},
TokenTree::MetaVarDecl(_, _, frag) if frag.name == sym::block =>
IsInFollow::Yes,
- _ => IsInFollow::No(tokens),
+ _ => IsInFollow::No(TOKENS),
}
},
"ident" | "lifetime" => {
},
"vis" => {
// Explicitly disallow `priv`, on the off chance it comes back.
- let tokens = vec!["`,`", "an ident", "a type"];
- match *tok {
- TokenTree::Token(_, ref tok) => match *tok {
+ const TOKENS: &[&str] = &["`,`", "an ident", "a type"];
+ match tok {
+ TokenTree::Token(token) => match token.kind {
Comma => IsInFollow::Yes,
- Ident(i, is_raw) if is_raw || i.name != kw::Priv =>
- IsInFollow::Yes,
- ref tok => if tok.can_begin_type() {
+ Ident(name, is_raw) if is_raw || name != kw::Priv => IsInFollow::Yes,
+ _ => if token.can_begin_type() {
IsInFollow::Yes
} else {
- IsInFollow::No(tokens)
+ IsInFollow::No(TOKENS)
}
},
TokenTree::MetaVarDecl(_, _, frag) if frag.name == sym::ident
|| frag.name == sym::ty
|| frag.name == sym::path =>
IsInFollow::Yes,
- _ => IsInFollow::No(tokens),
+ _ => IsInFollow::No(TOKENS),
}
},
"" => IsInFollow::Yes, // kw::Invalid
fn quoted_tt_to_string(tt: "ed::TokenTree) -> String {
match *tt {
- quoted::TokenTree::Token(_, ref tok) => crate::print::pprust::token_to_string(tok),
+ quoted::TokenTree::Token(ref token) => crate::print::pprust::token_to_string(&token),
quoted::TokenTree::MetaVar(_, name) => format!("${}", name),
quoted::TokenTree::MetaVarDecl(_, name, kind) => format!("${}:{}", name, kind),
_ => panic!("unexpected quoted::TokenTree::{{Sequence or Delimited}} \
use crate::ast::NodeId;
-use crate::early_buffered_lints::BufferedEarlyLintId;
use crate::ext::tt::macro_parser;
use crate::feature_gate::Features;
-use crate::parse::{token, ParseSess};
+use crate::parse::token::{self, Token, TokenKind};
+use crate::parse::ParseSess;
use crate::print::pprust;
use crate::tokenstream::{self, DelimSpan};
use crate::ast;
}
impl Delimited {
- /// Returns the opening delimiter (possibly `NoDelim`).
- pub fn open_token(&self) -> token::Token {
- token::OpenDelim(self.delim)
- }
-
- /// Returns the closing delimiter (possibly `NoDelim`).
- pub fn close_token(&self) -> token::Token {
- token::CloseDelim(self.delim)
- }
-
/// Returns a `self::TokenTree` with a `Span` corresponding to the opening delimiter.
pub fn open_tt(&self, span: Span) -> TokenTree {
let open_span = if span.is_dummy() {
} else {
span.with_lo(span.lo() + BytePos(self.delim.len() as u32))
};
- TokenTree::Token(open_span, self.open_token())
+ TokenTree::token(token::OpenDelim(self.delim), open_span)
}
/// Returns a `self::TokenTree` with a `Span` corresponding to the closing delimiter.
} else {
span.with_lo(span.hi() - BytePos(self.delim.len() as u32))
};
- TokenTree::Token(close_span, self.close_token())
+ TokenTree::token(token::CloseDelim(self.delim), close_span)
}
}
/// The sequence of token trees
pub tts: Vec<TokenTree>,
/// The optional separator
- pub separator: Option<token::Token>,
+ pub separator: Option<Token>,
/// Whether the sequence can be repeated zero (*), or one or more times (+)
pub op: KleeneOp,
/// The number of `Match`s that appear in the sequence (and subsequences)
/// are "first-class" token trees. Useful for parsing macros.
#[derive(Debug, Clone, PartialEq, RustcEncodable, RustcDecodable)]
pub enum TokenTree {
- Token(Span, token::Token),
+ Token(Token),
Delimited(DelimSpan, Lrc<Delimited>),
/// A kleene-style repetition sequence
Sequence(DelimSpan, Lrc<SequenceRepetition>),
/// Retrieves the `TokenTree`'s span.
pub fn span(&self) -> Span {
match *self {
- TokenTree::Token(sp, _)
- | TokenTree::MetaVar(sp, _)
- | TokenTree::MetaVarDecl(sp, _, _) => sp,
- TokenTree::Delimited(sp, _)
- | TokenTree::Sequence(sp, _) => sp.entire(),
+ TokenTree::Token(Token { span, .. })
+ | TokenTree::MetaVar(span, _)
+ | TokenTree::MetaVarDecl(span, _, _) => span,
+ TokenTree::Delimited(span, _)
+ | TokenTree::Sequence(span, _) => span.entire(),
}
}
+
+ crate fn token(kind: TokenKind, span: Span) -> TokenTree {
+ TokenTree::Token(Token::new(kind, span))
+ }
}
/// Takes a `tokenstream::TokenStream` and returns a `Vec<self::TokenTree>`. Specifically, this
match tree {
TokenTree::MetaVar(start_sp, ident) if expect_matchers => {
let span = match trees.next() {
- Some(tokenstream::TokenTree::Token(span, token::Colon)) => match trees.next() {
- Some(tokenstream::TokenTree::Token(end_sp, ref tok)) => match tok.ident() {
- Some((kind, _)) => {
- let span = end_sp.with_lo(start_sp.lo());
- result.push(TokenTree::MetaVarDecl(span, ident, kind));
- continue;
- }
- _ => end_sp,
+ Some(tokenstream::TokenTree::Token(Token { kind: token::Colon, span })) =>
+ match trees.next() {
+ Some(tokenstream::TokenTree::Token(token)) => match token.ident() {
+ Some((kind, _)) => {
+ let span = token.span.with_lo(start_sp.lo());
+ result.push(TokenTree::MetaVarDecl(span, ident, kind));
+ continue;
+ }
+ _ => token.span,
+ },
+ tree => tree
+ .as_ref()
+ .map(tokenstream::TokenTree::span)
+ .unwrap_or(span),
},
- tree => tree
- .as_ref()
- .map(tokenstream::TokenTree::span)
- .unwrap_or(span),
- },
tree => tree
.as_ref()
.map(tokenstream::TokenTree::span)
/// - `sess`: the parsing session. Any errors will be emitted to this session.
/// - `features`, `attrs`: language feature flags and attributes so that we know whether to use
/// unstable features or not.
-fn parse_tree<I>(
+fn parse_tree(
tree: tokenstream::TokenTree,
- trees: &mut Peekable<I>,
+ trees: &mut Peekable<impl Iterator<Item = tokenstream::TokenTree>>,
expect_matchers: bool,
sess: &ParseSess,
features: &Features,
attrs: &[ast::Attribute],
edition: Edition,
macro_node_id: NodeId,
-) -> TokenTree
-where
- I: Iterator<Item = tokenstream::TokenTree>,
-{
+) -> TokenTree {
// Depending on what `tree` is, we could be parsing different parts of a macro
match tree {
// `tree` is a `$` token. Look at the next token in `trees`
- tokenstream::TokenTree::Token(span, token::Dollar) => match trees.next() {
+ tokenstream::TokenTree::Token(Token { kind: token::Dollar, span }) => match trees.next() {
// `tree` is followed by a delimited set of token trees. This indicates the beginning
// of a repetition sequence in the macro (e.g. `$(pat)*`).
Some(tokenstream::TokenTree::Delimited(span, delim, tts)) => {
// Must have `(` not `{` or `[`
if delim != token::Paren {
- let tok = pprust::token_to_string(&token::OpenDelim(delim));
+ let tok = pprust::token_kind_to_string(&token::OpenDelim(delim));
let msg = format!("expected `(`, found `{}`", tok);
sess.span_diagnostic.span_err(span.entire(), &msg);
}
macro_node_id,
);
// Get the Kleene operator and optional separator
- let (separator, op) =
- parse_sep_and_kleene_op(
- trees,
- span.entire(),
- sess,
- features,
- attrs,
- edition,
- macro_node_id,
- );
+ let (separator, op) = parse_sep_and_kleene_op(trees, span.entire(), sess);
// Count the number of captured "names" (i.e., named metavars)
let name_captures = macro_parser::count_names(&sequence);
TokenTree::Sequence(
// `tree` is followed by an `ident`. This could be `$meta_var` or the `$crate` special
// metavariable that names the crate of the invocation.
- Some(tokenstream::TokenTree::Token(ident_span, ref token)) if token.is_ident() => {
+ Some(tokenstream::TokenTree::Token(token)) if token.is_ident() => {
let (ident, is_raw) = token.ident().unwrap();
- let span = ident_span.with_lo(span.lo());
+ let span = ident.span.with_lo(span.lo());
if ident.name == kw::Crate && !is_raw {
- let ident = ast::Ident::new(kw::DollarCrate, ident.span);
- TokenTree::Token(span, token::Ident(ident, is_raw))
+ TokenTree::token(token::Ident(kw::DollarCrate, is_raw), span)
} else {
TokenTree::MetaVar(span, ident)
}
}
// `tree` is followed by a random token. This is an error.
- Some(tokenstream::TokenTree::Token(span, tok)) => {
+ Some(tokenstream::TokenTree::Token(token)) => {
let msg = format!(
"expected identifier, found `{}`",
- pprust::token_to_string(&tok)
+ pprust::token_to_string(&token),
);
- sess.span_diagnostic.span_err(span, &msg);
- TokenTree::MetaVar(span, ast::Ident::invalid())
+ sess.span_diagnostic.span_err(token.span, &msg);
+ TokenTree::MetaVar(token.span, ast::Ident::invalid())
}
// There are no more tokens. Just return the `$` we already have.
- None => TokenTree::Token(span, token::Dollar),
+ None => TokenTree::token(token::Dollar, span),
},
// `tree` is an arbitrary token. Keep it.
- tokenstream::TokenTree::Token(span, tok) => TokenTree::Token(span, tok),
+ tokenstream::TokenTree::Token(token) => TokenTree::Token(token),
// `tree` is the beginning of a delimited set of tokens (e.g., `(` or `{`). We need to
// descend into the delimited set and further parse it.
/// Takes a token and returns `Some(KleeneOp)` if the token is `+` `*` or `?`. Otherwise, return
/// `None`.
-fn kleene_op(token: &token::Token) -> Option<KleeneOp> {
- match *token {
+fn kleene_op(token: &Token) -> Option<KleeneOp> {
+ match token.kind {
token::BinOp(token::Star) => Some(KleeneOp::ZeroOrMore),
token::BinOp(token::Plus) => Some(KleeneOp::OneOrMore),
token::Question => Some(KleeneOp::ZeroOrOne),
/// - Ok(Ok((op, span))) if the next token tree is a KleeneOp
/// - Ok(Err(tok, span)) if the next token tree is a token but not a KleeneOp
/// - Err(span) if the next token tree is not a token
-fn parse_kleene_op<I>(
- input: &mut I,
+fn parse_kleene_op(
+ input: &mut impl Iterator<Item = tokenstream::TokenTree>,
span: Span,
-) -> Result<Result<(KleeneOp, Span), (token::Token, Span)>, Span>
-where
- I: Iterator<Item = tokenstream::TokenTree>,
-{
+) -> Result<Result<(KleeneOp, Span), Token>, Span> {
match input.next() {
- Some(tokenstream::TokenTree::Token(span, tok)) => match kleene_op(&tok) {
- Some(op) => Ok(Ok((op, span))),
- None => Ok(Err((tok, span))),
+ Some(tokenstream::TokenTree::Token(token)) => match kleene_op(&token) {
+ Some(op) => Ok(Ok((op, token.span))),
+ None => Ok(Err(token)),
},
tree => Err(tree
.as_ref()
/// session `sess`. If the next one (or possibly two) tokens in `input` correspond to a Kleene
/// operator and separator, then a tuple with `(separator, KleeneOp)` is returned. Otherwise, an
/// error with the appropriate span is emitted to `sess` and a dummy value is returned.
-///
-/// N.B., in the 2015 edition, `*` and `+` are the only Kleene operators, and `?` is a separator.
-/// In the 2018 edition however, `?` is a Kleene operator, and not a separator.
-fn parse_sep_and_kleene_op<I>(
- input: &mut Peekable<I>,
- span: Span,
- sess: &ParseSess,
- features: &Features,
- attrs: &[ast::Attribute],
- edition: Edition,
- macro_node_id: NodeId,
-) -> (Option<token::Token>, KleeneOp)
-where
- I: Iterator<Item = tokenstream::TokenTree>,
-{
- match edition {
- Edition::Edition2015 => parse_sep_and_kleene_op_2015(
- input,
- span,
- sess,
- features,
- attrs,
- macro_node_id,
- ),
- Edition::Edition2018 => parse_sep_and_kleene_op_2018(input, span, sess, features, attrs),
- }
-}
-
-// `?` is a separator (with a migration warning) and never a KleeneOp.
-fn parse_sep_and_kleene_op_2015<I>(
- input: &mut Peekable<I>,
- span: Span,
- sess: &ParseSess,
- _features: &Features,
- _attrs: &[ast::Attribute],
- macro_node_id: NodeId,
-) -> (Option<token::Token>, KleeneOp)
-where
- I: Iterator<Item = tokenstream::TokenTree>,
-{
- // We basically look at two token trees here, denoted as #1 and #2 below
- let span = match parse_kleene_op(input, span) {
- // #1 is a `+` or `*` KleeneOp
- //
- // `?` is ambiguous: it could be a separator (warning) or a Kleene::ZeroOrOne (error), so
- // we need to look ahead one more token to be sure.
- Ok(Ok((op, _))) if op != KleeneOp::ZeroOrOne => return (None, op),
-
- // #1 is `?` token, but it could be a Kleene::ZeroOrOne (error in 2015) without a separator
- // or it could be a `?` separator followed by any Kleene operator. We need to look ahead 1
- // token to find out which.
- Ok(Ok((op, op1_span))) => {
- assert_eq!(op, KleeneOp::ZeroOrOne);
-
- // Lookahead at #2. If it is a KleenOp, then #1 is a separator.
- let is_1_sep = if let Some(&tokenstream::TokenTree::Token(_, ref tok2)) = input.peek() {
- kleene_op(tok2).is_some()
- } else {
- false
- };
-
- if is_1_sep {
- // #1 is a separator and #2 should be a KleepeOp.
- // (N.B. We need to advance the input iterator.)
- match parse_kleene_op(input, span) {
- // #2 is `?`, which is not allowed as a Kleene op in 2015 edition,
- // but is allowed in the 2018 edition.
- Ok(Ok((op, op2_span))) if op == KleeneOp::ZeroOrOne => {
- sess.span_diagnostic
- .struct_span_err(op2_span, "expected `*` or `+`")
- .note("`?` is not a macro repetition operator in the 2015 edition, \
- but is accepted in the 2018 edition")
- .emit();
-
- // Return a dummy
- return (None, KleeneOp::ZeroOrMore);
- }
-
- // #2 is a Kleene op, which is the only valid option
- Ok(Ok((op, _))) => {
- // Warn that `?` as a separator will be deprecated
- sess.buffer_lint(
- BufferedEarlyLintId::QuestionMarkMacroSep,
- op1_span,
- macro_node_id,
- "using `?` as a separator is deprecated and will be \
- a hard error in an upcoming edition",
- );
-
- return (Some(token::Question), op);
- }
-
- // #2 is a random token (this is an error) :(
- Ok(Err((_, _))) => op1_span,
-
- // #2 is not even a token at all :(
- Err(_) => op1_span,
- }
- } else {
- // `?` is not allowed as a Kleene op in 2015,
- // but is allowed in the 2018 edition
- sess.span_diagnostic
- .struct_span_err(op1_span, "expected `*` or `+`")
- .note("`?` is not a macro repetition operator in the 2015 edition, \
- but is accepted in the 2018 edition")
- .emit();
-
- // Return a dummy
- return (None, KleeneOp::ZeroOrMore);
- }
- }
-
- // #1 is a separator followed by #2, a KleeneOp
- Ok(Err((tok, span))) => match parse_kleene_op(input, span) {
- // #2 is a `?`, which is not allowed as a Kleene op in 2015 edition,
- // but is allowed in the 2018 edition
- Ok(Ok((op, op2_span))) if op == KleeneOp::ZeroOrOne => {
- sess.span_diagnostic
- .struct_span_err(op2_span, "expected `*` or `+`")
- .note("`?` is not a macro repetition operator in the 2015 edition, \
- but is accepted in the 2018 edition")
- .emit();
-
- // Return a dummy
- return (None, KleeneOp::ZeroOrMore);
- }
-
- // #2 is a KleeneOp :D
- Ok(Ok((op, _))) => return (Some(tok), op),
-
- // #2 is a random token :(
- Ok(Err((_, span))) => span,
-
- // #2 is not a token at all :(
- Err(span) => span,
- },
-
- // #1 is not a token
- Err(span) => span,
- };
-
- sess.span_diagnostic.span_err(span, "expected `*` or `+`");
-
- // Return a dummy
- (None, KleeneOp::ZeroOrMore)
-}
-
-// `?` is a Kleene op, not a separator
-fn parse_sep_and_kleene_op_2018<I>(
- input: &mut Peekable<I>,
+fn parse_sep_and_kleene_op(
+ input: &mut Peekable<impl Iterator<Item = tokenstream::TokenTree>>,
span: Span,
sess: &ParseSess,
- _features: &Features,
- _attrs: &[ast::Attribute],
-) -> (Option<token::Token>, KleeneOp)
-where
- I: Iterator<Item = tokenstream::TokenTree>,
-{
+) -> (Option<Token>, KleeneOp) {
// We basically look at two token trees here, denoted as #1 and #2 below
let span = match parse_kleene_op(input, span) {
- // #1 is a `?` (needs feature gate)
- Ok(Ok((op, _op1_span))) if op == KleeneOp::ZeroOrOne => {
- return (None, op);
- }
-
- // #1 is a `+` or `*` KleeneOp
+ // #1 is a `?`, `+`, or `*` KleeneOp
Ok(Ok((op, _))) => return (None, op),
// #1 is a separator followed by #2, a KleeneOp
- Ok(Err((tok, span))) => match parse_kleene_op(input, span) {
+ Ok(Err(token)) => match parse_kleene_op(input, token.span) {
// #2 is the `?` Kleene op, which does not take a separator (error)
- Ok(Ok((op, _op2_span))) if op == KleeneOp::ZeroOrOne => {
+ Ok(Ok((KleeneOp::ZeroOrOne, _))) => {
// Error!
sess.span_diagnostic.span_err(
- span,
+ token.span,
"the `?` macro repetition operator does not take a separator",
);
}
// #2 is a KleeneOp :D
- Ok(Ok((op, _))) => return (Some(tok), op),
-
- // #2 is a random token :(
- Ok(Err((_, span))) => span,
+ Ok(Ok((op, _))) => return (Some(token), op),
- // #2 is not a token at all :(
- Err(span) => span,
+ // #2 is a random token or not a token at all :(
+ Ok(Err(Token { span, .. })) | Err(span) => span,
},
// #1 is not a token
use crate::tokenstream::{DelimSpan, TokenStream, TokenTree, TreeAndJoint};
use smallvec::{smallvec, SmallVec};
-use syntax_pos::DUMMY_SP;
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::sync::Lrc;
else {
// Otherwise, if we have just reached the end of a sequence and we can keep repeating,
// go back to the beginning of the sequence.
- if let Frame::Sequence { ref mut idx, ref sep, .. } = *stack.last_mut().unwrap() {
- let (ref mut repeat_idx, repeat_len) = *repeats.last_mut().unwrap();
+ if let Frame::Sequence { idx, sep, .. } = stack.last_mut().unwrap() {
+ let (repeat_idx, repeat_len) = repeats.last_mut().unwrap();
*repeat_idx += 1;
- if *repeat_idx < repeat_len {
+ if repeat_idx < repeat_len {
*idx = 0;
- if let Some(sep) = sep.clone() {
- let prev_span = match result.last() {
- Some((tt, _)) => tt.span(),
- None => DUMMY_SP,
- };
- result.push(TokenTree::Token(prev_span, sep).into());
+ if let Some(sep) = sep {
+ result.push(TokenTree::Token(sep.clone()).into());
}
continue;
}
result.push(tt.clone().into());
} else {
sp = sp.apply_mark(cx.current_expansion.mark);
- let token = TokenTree::Token(sp, Token::Interpolated(nt.clone()));
+ let token = TokenTree::token(token::Interpolated(nt.clone()), sp);
result.push(token.into());
}
} else {
let ident =
Ident::new(ident.name, ident.span.apply_mark(cx.current_expansion.mark));
sp = sp.apply_mark(cx.current_expansion.mark);
- result.push(TokenTree::Token(sp, token::Dollar).into());
- result.push(TokenTree::Token(sp, token::Token::from_ast_ident(ident)).into());
+ result.push(TokenTree::token(token::Dollar, sp).into());
+ result.push(TokenTree::Token(Token::from_ast_ident(ident)).into());
}
}
// Nothing much to do here. Just push the token to the result, being careful to
// preserve syntax context.
- quoted::TokenTree::Token(sp, tok) => {
+ quoted::TokenTree::Token(token) => {
let mut marker = Marker(cx.current_expansion.mark);
- let mut tt = TokenTree::Token(sp, tok);
+ let mut tt = TokenTree::Token(token);
noop_visit_tt(&mut tt, &mut marker);
result.push(tt.into());
}
// Allows using `#[optimize(X)]`.
(active, optimize_attribute, "1.34.0", Some(54882), None),
- // Allows using `#[repr(align(X))]` on enums.
- (active, repr_align_enum, "1.34.0", Some(57996), None),
-
// Allows using C-variadics.
(active, c_variadic, "1.34.0", Some(44930), None),
// Allows the user of associated type bounds.
(active, associated_type_bounds, "1.34.0", Some(52662), None),
+ // Allows calling constructor functions in `const fn`
+ // FIXME Create issue
+ (active, const_constructor, "1.37.0", Some(61456), None),
+
+ // #[repr(transparent)] on enums.
+ (active, transparent_enums, "1.37.0", Some(60405), None),
+
+ // #[repr(transparent)] on unions.
+ (active, transparent_unions, "1.37.0", Some(60405), None),
+
// -------------------------------------------------------------------------
// feature-group-end: actual feature gates
// -------------------------------------------------------------------------
(accepted, extern_crate_self, "1.34.0", Some(56409), None),
// Allows arbitrary delimited token streams in non-macro attributes.
(accepted, unrestricted_attribute_tokens, "1.34.0", Some(55208), None),
+ // Allows using `#[repr(align(X))]` on enums with equivalent semantics
+ // to wrapping an enum in a wrapper struct with `#[repr(align(X))]`.
+ (accepted, repr_align_enum, "1.37.0", Some(57996), None),
// -------------------------------------------------------------------------
// feature-group-end: accepted features
"internal implementation detail",
cfg_fn!(rustc_attrs))),
+ (sym::rustc_allocator, Whitelisted, template!(Word), Gated(Stability::Unstable,
+ sym::rustc_attrs,
+ "internal implementation detail",
+ cfg_fn!(rustc_attrs))),
+
+ (sym::rustc_dummy, Normal, template!(Word /* doesn't matter*/), Gated(Stability::Unstable,
+ sym::rustc_attrs,
+ "used by the test suite",
+ cfg_fn!(rustc_attrs))),
+
// FIXME: #14408 whitelist docs since rustdoc looks at them
(
sym::doc,
}
match attr_info {
- Some(&(name, _, template, _)) => self.check_builtin_attribute(
- attr,
- name,
- template
- ),
- None => if let Some(TokenTree::Token(_, token::Eq)) = attr.tokens.trees().next() {
- // All key-value attributes are restricted to meta-item syntax.
- attr.parse_meta(self.context.parse_sess).map_err(|mut err| err.emit()).ok();
+ // `rustc_dummy` doesn't have any restrictions specific to built-in attributes.
+ Some(&(name, _, template, _)) if name != sym::rustc_dummy =>
+ self.check_builtin_attribute(attr, name, template),
+ _ => if let Some(TokenTree::Token(token)) = attr.tokens.trees().next() {
+ if token == token::Eq {
+ // All key-value attributes are restricted to meta-item syntax.
+ attr.parse_meta(self.context.parse_sess).map_err(|mut err| err.emit()).ok();
+ }
}
}
}
}
}
- ast::ItemKind::Enum(..) => {
- for attr in attr::filter_by_name(&i.attrs[..], sym::repr) {
- for item in attr.meta_item_list().unwrap_or_else(Vec::new) {
- if item.check_name(sym::align) {
- gate_feature_post!(&self, repr_align_enum, attr.span,
- "`#[repr(align(x))]` on enums is experimental");
- }
- }
- }
- }
-
ast::ItemKind::Impl(_, polarity, defaultness, _, _, _, _) => {
if polarity == ast::ImplPolarity::Negative {
gate_feature_post!(&self, optin_builtin_traits,
#![deny(rust_2018_idioms)]
#![deny(internal)]
+#![deny(unused_lifetimes)]
+#![feature(bind_by_move_pattern_guards)]
#![feature(crate_visibility_modifier)]
#![feature(label_break_value)]
#![feature(nll)]
pub mod json;
-pub mod syntax {
- pub use crate::ext;
- pub use crate::parse;
- pub use crate::ast;
-}
-
pub mod ast;
pub mod attr;
pub mod source_map;
pub fn noop_visit_tt<T: MutVisitor>(tt: &mut TokenTree, vis: &mut T) {
match tt {
- TokenTree::Token(span, tok) => {
- vis.visit_span(span);
- vis.visit_token(tok);
+ TokenTree::Token(token) => {
+ vis.visit_token(token);
}
TokenTree::Delimited(DelimSpan { open, close }, _delim, tts) => {
vis.visit_span(open);
})
}
-// apply ident visitor if it's an ident, apply other visits to interpolated nodes
+// Apply ident visitor if it's an ident, apply other visits to interpolated nodes.
+// In practice the ident part is not actually used by specific visitors right now,
+// but there's a test below checking that it works.
pub fn noop_visit_token<T: MutVisitor>(t: &mut Token, vis: &mut T) {
- match t {
- token::Ident(id, _is_raw) => vis.visit_ident(id),
- token::Lifetime(id) => vis.visit_ident(id),
+ let Token { kind, span } = t;
+ match kind {
+ token::Ident(name, _) | token::Lifetime(name) => {
+ let mut ident = Ident::new(*name, *span);
+ vis.visit_ident(&mut ident);
+ *name = ident.name;
+ *span = ident.span;
+ return; // avoid visiting the span for the second time
+ }
token::Interpolated(nt) => {
let mut nt = Lrc::make_mut(nt);
vis.visit_interpolated(&mut nt);
}
_ => {}
}
+ vis.visit_span(span);
}
/// Apply visitor to elements of interpolated nodes.
vis.visit_expr(rhs);
}
ExprKind::Unary(_unop, ohs) => vis.visit_expr(ohs),
- ExprKind::Lit(_lit) => {}
ExprKind::Cast(expr, ty) => {
vis.visit_expr(expr);
vis.visit_ty(ty);
}
ExprKind::Try(expr) => vis.visit_expr(expr),
ExprKind::TryBlock(body) => vis.visit_block(body),
- ExprKind::Err => {}
+ ExprKind::Lit(_) | ExprKind::Err => {}
}
vis.visit_id(id);
vis.visit_span(span);
let mut just_parsed_doc_comment = false;
loop {
debug!("parse_outer_attributes: self.token={:?}", self.token);
- match self.token {
+ match self.token.kind {
token::Pound => {
let inner_error_reason = if just_parsed_doc_comment {
"an inner attribute is not permitted following an outer doc comment"
just_parsed_doc_comment = false;
}
token::DocComment(s) => {
- let attr = attr::mk_sugared_doc_attr(attr::mk_attr_id(), s, self.span);
+ let attr = attr::mk_sugared_doc_attr(attr::mk_attr_id(), s, self.token.span);
if attr.style != ast::AttrStyle::Outer {
let mut err = self.fatal("expected outer doc comment");
err.note("inner doc comments like this (starting with \
debug!("parse_attribute_with_inner_parse_policy: inner_parse_policy={:?} self.token={:?}",
inner_parse_policy,
self.token);
- let (span, path, tokens, style) = match self.token {
+ let (span, path, tokens, style) = match self.token.kind {
token::Pound => {
- let lo = self.span;
+ let lo = self.token.span;
self.bump();
if let InnerAttributeParsePolicy::Permitted = inner_parse_policy {
self.bump();
if let InnerAttributeParsePolicy::NotPermitted { reason } = inner_parse_policy
{
- let span = self.span;
+ let span = self.token.span;
self.diagnostic()
.struct_span_err(span, reason)
.note("inner attributes, like `#![no_std]`, annotate the item \
/// PATH `=` TOKEN_TREE
/// The delimiters or `=` are still put into the resulting token stream.
crate fn parse_meta_item_unrestricted(&mut self) -> PResult<'a, (ast::Path, TokenStream)> {
- let meta = match self.token {
+ let meta = match self.token.kind {
token::Interpolated(ref nt) => match **nt {
Nonterminal::NtMeta(ref meta) => Some(meta.clone()),
_ => None,
self.check(&token::OpenDelim(DelimToken::Brace)) {
self.parse_token_tree().into()
} else if self.eat(&token::Eq) {
- let eq = TokenTree::Token(self.prev_span, token::Eq);
+ let eq = TokenTree::token(token::Eq, self.prev_span);
let mut is_interpolated_expr = false;
- if let token::Interpolated(nt) = &self.token {
+ if let token::Interpolated(nt) = &self.token.kind {
if let token::NtExpr(..) = **nt {
is_interpolated_expr = true;
}
crate fn parse_inner_attributes(&mut self) -> PResult<'a, Vec<ast::Attribute>> {
let mut attrs: Vec<ast::Attribute> = vec![];
loop {
- match self.token {
+ match self.token.kind {
token::Pound => {
// Don't even try to parse if it's not an inner attribute.
if !self.look_ahead(1, |t| t == &token::Not) {
}
token::DocComment(s) => {
// we need to get the position of this token before we bump.
- let attr = attr::mk_sugared_doc_attr(attr::mk_attr_id(), s, self.span);
+ let attr = attr::mk_sugared_doc_attr(attr::mk_attr_id(), s, self.token.span);
if attr.style == ast::AttrStyle::Inner {
attrs.push(attr);
self.bump();
/// meta_item : IDENT ( '=' UNSUFFIXED_LIT | '(' meta_item_inner? ')' )? ;
/// meta_item_inner : (meta_item | UNSUFFIXED_LIT) (',' meta_item_inner)? ;
pub fn parse_meta_item(&mut self) -> PResult<'a, ast::MetaItem> {
- let nt_meta = match self.token {
+ let nt_meta = match self.token.kind {
token::Interpolated(ref nt) => match **nt {
token::NtMeta(ref e) => Some(e.clone()),
_ => None,
return Ok(meta);
}
- let lo = self.span;
+ let lo = self.token.span;
let path = self.parse_path(PathStyle::Mod)?;
let node = self.parse_meta_item_kind()?;
let span = lo.to(self.prev_span);
let found = self.this_token_to_string();
let msg = format!("expected unsuffixed literal or identifier, found `{}`", found);
- Err(self.diagnostic().struct_span_err(self.span, &msg))
+ Err(self.diagnostic().struct_span_err(self.token.span, &msg))
}
/// matches meta_seq = ( COMMASEP(meta_item_inner) )
self, Arg, BinOpKind, BindingMode, BlockCheckMode, Expr, ExprKind, Ident, Item, ItemKind,
Mutability, Pat, PatKind, PathSegment, QSelf, Ty, TyKind, VariantData,
};
-use crate::parse::{SeqSep, token, PResult, Parser};
+use crate::parse::{SeqSep, PResult, Parser};
use crate::parse::parser::{BlockMode, PathStyle, SemiColonMode, TokenType, TokenExpectType};
+use crate::parse::token::{self, TokenKind};
use crate::print::pprust;
use crate::ptr::P;
use crate::source_map::Spanned;
impl<'a> Parser<'a> {
pub fn fatal(&self, m: &str) -> DiagnosticBuilder<'a> {
- self.span_fatal(self.span, m)
+ self.span_fatal(self.token.span, m)
}
pub fn span_fatal<S: Into<MultiSpan>>(&self, sp: S, m: &str) -> DiagnosticBuilder<'a> {
}
pub fn bug(&self, m: &str) -> ! {
- self.sess.span_diagnostic.span_bug(self.span, m)
+ self.sess.span_diagnostic.span_bug(self.token.span, m)
}
pub fn span_err<S: Into<MultiSpan>>(&self, sp: S, m: &str) {
crate fn expected_ident_found(&self) -> DiagnosticBuilder<'a> {
let mut err = self.struct_span_err(
- self.span,
+ self.token.span,
&format!("expected identifier, found {}", self.this_token_descr()),
);
- if let token::Ident(ident, false) = &self.token {
- if ident.is_raw_guess() {
+ if let token::Ident(name, false) = self.token.kind {
+ if Ident::new(name, self.token.span).is_raw_guess() {
err.span_suggestion(
- self.span,
+ self.token.span,
"you can escape reserved keywords to use them as identifiers",
- format!("r#{}", ident),
+ format!("r#{}", name),
Applicability::MaybeIncorrect,
);
}
}
if let Some(token_descr) = self.token_descr() {
- err.span_label(self.span, format!("expected identifier, found {}", token_descr));
+ err.span_label(self.token.span, format!("expected identifier, found {}", token_descr));
} else {
- err.span_label(self.span, "expected identifier");
+ err.span_label(self.token.span, "expected identifier");
if self.token == token::Comma && self.look_ahead(1, |t| t.is_ident()) {
err.span_suggestion(
- self.span,
+ self.token.span,
"remove this comma",
String::new(),
Applicability::MachineApplicable,
pub fn expected_one_of_not_found(
&mut self,
- edible: &[token::Token],
- inedible: &[token::Token],
+ edible: &[TokenKind],
+ inedible: &[TokenKind],
) -> PResult<'a, bool /* recovered */> {
fn tokens_to_string(tokens: &[TokenType]) -> String {
let mut i = tokens.iter();
(self.sess.source_map().next_point(self.prev_span),
format!("expected {} here", expect)))
};
- self.last_unexpected_token_span = Some(self.span);
+ self.last_unexpected_token_span = Some(self.token.span);
let mut err = self.fatal(&msg_exp);
if self.token.is_ident_named(sym::and) {
err.span_suggestion_short(
- self.span,
+ self.token.span,
"use `&&` instead of `and` for the boolean operator",
"&&".to_string(),
Applicability::MaybeIncorrect,
}
if self.token.is_ident_named(sym::or) {
err.span_suggestion_short(
- self.span,
+ self.token.span,
"use `||` instead of `or` for the boolean operator",
"||".to_string(),
Applicability::MaybeIncorrect,
);
}
- let sp = if self.token == token::Token::Eof {
+ let sp = if self.token == token::Eof {
// This is EOF, don't want to point at the following char, but rather the last token
self.prev_span
} else {
self.token.is_keyword(kw::While)
);
let cm = self.sess.source_map();
- match (cm.lookup_line(self.span.lo()), cm.lookup_line(sp.lo())) {
+ match (cm.lookup_line(self.token.span.lo()), cm.lookup_line(sp.lo())) {
(Ok(ref a), Ok(ref b)) if a.line != b.line && is_semi_suggestable => {
// The spans are in different lines, expected `;` and found `let` or `return`.
// High likelihood that it is only a missing `;`.
// | -^^^^^ unexpected token
// | |
// | expected one of 8 possible tokens here
- err.span_label(self.span, label_exp);
+ err.span_label(self.token.span, label_exp);
}
_ if self.prev_span == syntax_pos::DUMMY_SP => {
// Account for macro context where the previous span might not be
// available to avoid incorrect output (#54841).
- err.span_label(self.span, "unexpected token");
+ err.span_label(self.token.span, "unexpected token");
}
_ => {
err.span_label(sp, label_exp);
- err.span_label(self.span, "unexpected token");
+ err.span_label(self.token.span, "unexpected token");
}
}
Err(err)
/// Eats and discards tokens until one of `kets` is encountered. Respects token trees,
/// passes through any errors encountered. Used for error recovery.
- crate fn eat_to_tokens(&mut self, kets: &[&token::Token]) {
+ crate fn eat_to_tokens(&mut self, kets: &[&TokenKind]) {
let handler = self.diagnostic();
if let Err(ref mut err) = self.parse_seq_to_before_tokens(
/// let _ = vec![1, 2, 3].into_iter().collect::<Vec<usize>>>>();
/// ^^ help: remove extra angle brackets
/// ```
- crate fn check_trailing_angle_brackets(&mut self, segment: &PathSegment, end: token::Token) {
+ crate fn check_trailing_angle_brackets(&mut self, segment: &PathSegment, end: TokenKind) {
// This function is intended to be invoked after parsing a path segment where there are two
// cases:
//
// Keep the span at the start so we can highlight the sequence of `>` characters to be
// removed.
- let lo = self.span;
+ let lo = self.token.span;
// We need to look-ahead to see if we have `>` characters without moving the cursor forward
// (since we might have the field access case and the characters we're eating are
// Eat from where we started until the end token so that parsing can continue
// as if we didn't have those extra angle brackets.
self.eat_to_tokens(&[&end]);
- let span = lo.until(self.span);
+ let span = lo.until(self.token.span);
let plural = number_of_gt > 1 || number_of_shr >= 1;
self.diagnostic()
match lhs.node {
ExprKind::Binary(op, _, _) if op.node.is_comparison() => {
// respan to include both operators
- let op_span = op.span.to(self.span);
+ let op_span = op.span.to(self.token.span);
let mut err = self.diagnostic().struct_span_err(op_span,
"chained comparison operators require parentheses");
if op.node == BinOpKind::Lt &&
/// closing delimiter.
pub fn unexpected_try_recover(
&mut self,
- t: &token::Token,
+ t: &TokenKind,
) -> PResult<'a, bool /* recovered */> {
- let token_str = pprust::token_to_string(t);
+ let token_str = pprust::token_kind_to_string(t);
let this_token_str = self.this_token_descr();
- let (prev_sp, sp) = match (&self.token, self.subparser_name) {
+ let (prev_sp, sp) = match (&self.token.kind, self.subparser_name) {
// Point at the end of the macro call when reaching end of macro arguments.
- (token::Token::Eof, Some(_)) => {
- let sp = self.sess.source_map().next_point(self.span);
+ (token::Eof, Some(_)) => {
+ let sp = self.sess.source_map().next_point(self.token.span);
(sp, sp)
}
// We don't want to point at the following span after DUMMY_SP.
// This happens when the parser finds an empty TokenStream.
- _ if self.prev_span == DUMMY_SP => (self.span, self.span),
+ _ if self.prev_span == DUMMY_SP => (self.token.span, self.token.span),
// EOF, don't want to point at the following char, but rather the last token.
- (token::Token::Eof, None) => (self.prev_span, self.span),
- _ => (self.sess.source_map().next_point(self.prev_span), self.span),
+ (token::Eof, None) => (self.prev_span, self.token.span),
+ _ => (self.sess.source_map().next_point(self.prev_span), self.token.span),
};
let msg = format!(
"expected `{}`, found {}",
token_str,
- match (&self.token, self.subparser_name) {
- (token::Token::Eof, Some(origin)) => format!("end of {}", origin),
+ match (&self.token.kind, self.subparser_name) {
+ (token::Eof, Some(origin)) => format!("end of {}", origin),
_ => this_token_str,
},
);
// interpreting `await { <expr> }?` as `<expr>?.await`.
self.parse_block_expr(
None,
- self.span,
+ self.token.span,
BlockCheckMode::Default,
ThinVec::new(),
)
self.look_ahead(1, |t| t == &token::CloseDelim(token::Paren))
{
// future.await()
- let lo = self.span;
+ let lo = self.token.span;
self.bump(); // (
- let sp = lo.to(self.span);
+ let sp = lo.to(self.token.span);
self.bump(); // )
self.struct_span_err(sp, "incorrect use of `await`")
.span_suggestion(
next_sp: Span,
maybe_path: bool,
) {
- err.span_label(self.span, "expecting a type here because of type ascription");
+ err.span_label(self.token.span, "expecting a type here because of type ascription");
let cm = self.sess.source_map();
let next_pos = cm.lookup_char_pos(next_sp.lo());
let op_pos = cm.lookup_char_pos(cur_op_span.hi());
crate fn recover_closing_delimiter(
&mut self,
- tokens: &[token::Token],
+ tokens: &[TokenKind],
mut err: DiagnosticBuilder<'a>,
) -> PResult<'a, bool> {
let mut pos = None;
// we want to use the last closing delim that would apply
for (i, unmatched) in self.unclosed_delims.iter().enumerate().rev() {
if tokens.contains(&token::CloseDelim(unmatched.expected_delim))
- && Some(self.span) > unmatched.unclosed_span
+ && Some(self.token.span) > unmatched.unclosed_span
{
pos = Some(i);
}
break_on_semi, break_on_block);
loop {
debug!("recover_stmt_ loop {:?}", self.token);
- match self.token {
+ match self.token.kind {
token::OpenDelim(token::DelimToken::Brace) => {
brace_depth += 1;
self.bump();
crate fn expected_semi_or_open_brace(&mut self) -> PResult<'a, ast::TraitItem> {
let token_str = self.this_token_descr();
let mut err = self.fatal(&format!("expected `;` or `{{`, found {}", token_str));
- err.span_label(self.span, "expected `;` or `{`");
+ err.span_label(self.token.span, "expected `;` or `{`");
Err(err)
}
crate fn eat_incorrect_doc_comment(&mut self, applied_to: &str) {
- if let token::DocComment(_) = self.token {
+ if let token::DocComment(_) = self.token.kind {
let mut err = self.diagnostic().struct_span_err(
- self.span,
+ self.token.span,
&format!("documentation comments cannot be applied to {}", applied_to),
);
- err.span_label(self.span, "doc comments are not allowed here");
+ err.span_label(self.token.span, "doc comments are not allowed here");
err.emit();
self.bump();
} else if self.token == token::Pound && self.look_ahead(1, |t| {
*t == token::OpenDelim(token::Bracket)
}) {
- let lo = self.span;
+ let lo = self.token.span;
// Skip every token until next possible arg.
while self.token != token::CloseDelim(token::Bracket) {
self.bump();
}
- let sp = lo.to(self.span);
+ let sp = lo.to(self.token.span);
self.bump();
let mut err = self.diagnostic().struct_span_err(
sp,
}
crate fn expected_expression_found(&self) -> DiagnosticBuilder<'a> {
- let (span, msg) = match (&self.token, self.subparser_name) {
- (&token::Token::Eof, Some(origin)) => {
- let sp = self.sess.source_map().next_point(self.span);
+ let (span, msg) = match (&self.token.kind, self.subparser_name) {
+ (&token::Eof, Some(origin)) => {
+ let sp = self.sess.source_map().next_point(self.token.span);
(sp, format!("expected expression, found end of {}", origin))
}
- _ => (self.span, format!(
+ _ => (self.token.span, format!(
"expected expression, found {}",
self.this_token_descr(),
)),
};
let mut err = self.struct_span_err(span, &msg);
- let sp = self.sess.source_map().start_point(self.span);
+ let sp = self.sess.source_map().start_point(self.token.span);
if let Some(sp) = self.sess.ambiguous_block_expr_parse.borrow().get(&sp) {
self.sess.expr_parentheses_needed(&mut err, *sp, None);
}
-use crate::ast::{self, Ident};
+use crate::ast;
use crate::parse::ParseSess;
-use crate::parse::token::{self, Token};
+use crate::parse::token::{self, Token, TokenKind};
use crate::symbol::{sym, Symbol};
use crate::parse::unescape;
use crate::parse::unescape_error_reporting::{emit_unescape_error, push_escaped_char};
use std::borrow::Cow;
use std::char;
use std::iter;
-use std::mem::replace;
use rustc_data_structures::sync::Lrc;
use log::debug;
mod tokentrees;
mod unicode_chars;
-#[derive(Clone, Debug)]
-pub struct TokenAndSpan {
- pub tok: Token,
- pub sp: Span,
-}
-
-impl Default for TokenAndSpan {
- fn default() -> Self {
- TokenAndSpan {
- tok: token::Whitespace,
- sp: syntax_pos::DUMMY_SP,
- }
- }
-}
-
#[derive(Clone, Debug)]
pub struct UnmatchedBrace {
pub expected_delim: token::DelimToken,
/// Stop reading src at this index.
crate end_src_index: usize,
// cached:
- peek_tok: Token,
- peek_span: Span,
+ peek_token: Token,
peek_span_src_raw: Span,
fatal_errs: Vec<DiagnosticBuilder<'a>>,
// cache a direct reference to the source text, so that we don't have to
(real, raw)
}
- fn mk_ident(&self, string: &str) -> Ident {
- let mut ident = Ident::from_str(string);
- if let Some(span) = self.override_span {
- ident.span = span;
- }
-
- ident
- }
-
- fn unwrap_or_abort(&mut self, res: Result<TokenAndSpan, ()>) -> TokenAndSpan {
+ fn unwrap_or_abort(&mut self, res: Result<Token, ()>) -> Token {
match res {
Ok(tok) => tok,
Err(_) => {
}
}
- fn next_token(&mut self) -> TokenAndSpan where Self: Sized {
+ fn next_token(&mut self) -> Token where Self: Sized {
let res = self.try_next_token();
self.unwrap_or_abort(res)
}
/// Returns the next token. EFFECT: advances the string_reader.
- pub fn try_next_token(&mut self) -> Result<TokenAndSpan, ()> {
+ pub fn try_next_token(&mut self) -> Result<Token, ()> {
assert!(self.fatal_errs.is_empty());
- let ret_val = TokenAndSpan {
- tok: replace(&mut self.peek_tok, token::Whitespace),
- sp: self.peek_span,
- };
+ let ret_val = self.peek_token.take();
self.advance_token()?;
Ok(ret_val)
}
return None;
}
- fn try_real_token(&mut self) -> Result<TokenAndSpan, ()> {
+ fn try_real_token(&mut self) -> Result<Token, ()> {
let mut t = self.try_next_token()?;
loop {
- match t.tok {
+ match t.kind {
token::Whitespace | token::Comment | token::Shebang(_) => {
t = self.try_next_token()?;
}
Ok(t)
}
- pub fn real_token(&mut self) -> TokenAndSpan {
+ pub fn real_token(&mut self) -> Token {
let res = self.try_real_token();
self.unwrap_or_abort(res)
}
self.ch.is_none()
}
- fn fail_unterminated_raw_string(&self, pos: BytePos, hash_count: u16) {
+ fn fail_unterminated_raw_string(&self, pos: BytePos, hash_count: u16) -> ! {
let mut err = self.struct_span_fatal(pos, pos, "unterminated raw string");
err.span_label(self.mk_sp(pos, pos), "unterminated raw string");
}
fn fatal(&self, m: &str) -> FatalError {
- self.fatal_span(self.peek_span, m)
+ self.fatal_span(self.peek_token.span, m)
}
crate fn emit_fatal_errors(&mut self) {
buffer
}
- pub fn peek(&self) -> TokenAndSpan {
- // FIXME(pcwalton): Bad copy!
- TokenAndSpan {
- tok: self.peek_tok.clone(),
- sp: self.peek_span,
- }
+ pub fn peek(&self) -> &Token {
+ &self.peek_token
}
/// For comments.rs, which hackily pokes into next_pos and ch
ch: Some('\n'),
source_file,
end_src_index: src.len(),
- // dummy values; not read
- peek_tok: token::Eof,
- peek_span: syntax_pos::DUMMY_SP,
+ peek_token: Token::dummy(),
peek_span_src_raw: syntax_pos::DUMMY_SP,
src,
fatal_errs: Vec::new(),
self.sess.span_diagnostic.struct_span_fatal(self.mk_sp(from_pos, to_pos), &m[..])
}
- /// Report a lexical error spanning [`from_pos`, `to_pos`), appending an
- /// escaped character to the error message
- fn err_span_char(&self, from_pos: BytePos, to_pos: BytePos, m: &str, c: char) {
- let mut m = m.to_string();
- m.push_str(": ");
- push_escaped_char(&mut m, c);
- self.err_span_(from_pos, to_pos, &m[..]);
- }
-
- /// Advance peek_tok and peek_span to refer to the next token, and
+ /// Advance peek_token to refer to the next token, and
/// possibly update the interner.
fn advance_token(&mut self) -> Result<(), ()> {
match self.scan_whitespace_or_comment() {
Some(comment) => {
- self.peek_span_src_raw = comment.sp;
- self.peek_span = comment.sp;
- self.peek_tok = comment.tok;
+ self.peek_span_src_raw = comment.span;
+ self.peek_token = comment;
}
None => {
- if self.is_eof() {
- self.peek_tok = token::Eof;
- let (real, raw) = self.mk_sp_and_raw(
- self.source_file.end_pos,
- self.source_file.end_pos,
- );
- self.peek_span = real;
- self.peek_span_src_raw = raw;
+ let (kind, start_pos, end_pos) = if self.is_eof() {
+ (token::Eof, self.source_file.end_pos, self.source_file.end_pos)
} else {
- let start_bytepos = self.pos;
- self.peek_tok = self.next_token_inner()?;
- let (real, raw) = self.mk_sp_and_raw(start_bytepos, self.pos);
- self.peek_span = real;
- self.peek_span_src_raw = raw;
+ let start_pos = self.pos;
+ (self.next_token_inner()?, start_pos, self.pos)
};
+ let (real, raw) = self.mk_sp_and_raw(start_pos, end_pos);
+ self.peek_token = Token::new(kind, real);
+ self.peek_span_src_raw = raw;
}
}
/// PRECONDITION: self.ch is not whitespace
/// Eats any kind of comment.
- fn scan_comment(&mut self) -> Option<TokenAndSpan> {
+ fn scan_comment(&mut self) -> Option<Token> {
if let Some(c) = self.ch {
if c.is_whitespace() {
let msg = "called consume_any_line_comment, but there was whitespace";
self.bump();
}
- let tok = if doc_comment {
+ let kind = if doc_comment {
self.with_str_from(start_bpos, |string| {
token::DocComment(Symbol::intern(string))
})
} else {
token::Comment
};
- Some(TokenAndSpan { tok, sp: self.mk_sp(start_bpos, self.pos) })
+ Some(Token::new(kind, self.mk_sp(start_bpos, self.pos)))
}
Some('*') => {
self.bump();
while !self.ch_is('\n') && !self.is_eof() {
self.bump();
}
- return Some(TokenAndSpan {
- tok: token::Shebang(self.name_from(start)),
- sp: self.mk_sp(start, self.pos),
- });
+ return Some(Token::new(
+ token::Shebang(self.name_from(start)),
+ self.mk_sp(start, self.pos),
+ ));
}
}
None
/// If there is whitespace, shebang, or a comment, scan it. Otherwise,
/// return `None`.
- fn scan_whitespace_or_comment(&mut self) -> Option<TokenAndSpan> {
+ fn scan_whitespace_or_comment(&mut self) -> Option<Token> {
match self.ch.unwrap_or('\0') {
// # to handle shebang at start of file -- this is the entry point
// for skipping over all "junk"
while is_pattern_whitespace(self.ch) {
self.bump();
}
- let c = Some(TokenAndSpan {
- tok: token::Whitespace,
- sp: self.mk_sp(start_bpos, self.pos),
- });
+ let c = Some(Token::new(token::Whitespace, self.mk_sp(start_bpos, self.pos)));
debug!("scanning whitespace: {:?}", c);
c
}
}
/// Might return a sugared-doc-attr
- fn scan_block_comment(&mut self) -> Option<TokenAndSpan> {
+ fn scan_block_comment(&mut self) -> Option<Token> {
// block comments starting with "/**" or "/*!" are doc-comments
let is_doc_comment = self.ch_is('*') || self.ch_is('!');
let start_bpos = self.pos - BytePos(2);
self.with_str_from(start_bpos, |string| {
// but comments with only "*"s between two "/"s are not
- let tok = if is_block_doc_comment(string) {
+ let kind = if is_block_doc_comment(string) {
let string = if has_cr {
self.translate_crlf(start_bpos,
string,
token::Comment
};
- Some(TokenAndSpan {
- tok,
- sp: self.mk_sp(start_bpos, self.pos),
- })
+ Some(Token::new(kind, self.mk_sp(start_bpos, self.pos)))
})
}
}
}
- fn binop(&mut self, op: token::BinOpToken) -> Token {
+ fn binop(&mut self, op: token::BinOpToken) -> TokenKind {
self.bump();
if self.ch_is('=') {
self.bump();
/// Returns the next token from the string, advances the input past that
/// token, and updates the interner
- fn next_token_inner(&mut self) -> Result<Token, ()> {
+ fn next_token_inner(&mut self) -> Result<TokenKind, ()> {
let c = self.ch;
if ident_start(c) {
return Ok(self.with_str_from(start, |string| {
// FIXME: perform NFKC normalization here. (Issue #2253)
- let ident = self.mk_ident(string);
+ let name = ast::Name::intern(string);
if is_raw_ident {
let span = self.mk_sp(raw_start, self.pos);
- if !ident.can_be_raw() {
- self.err_span(span, &format!("`{}` cannot be a raw identifier", ident));
+ if !name.can_be_raw() {
+ self.err_span(span, &format!("`{}` cannot be a raw identifier", name));
}
self.sess.raw_identifier_spans.borrow_mut().push(span);
}
- token::Ident(ident, is_raw_ident)
+ token::Ident(name, is_raw_ident)
}));
}
}
let (kind, symbol) = self.scan_number(c.unwrap());
let suffix = self.scan_optional_raw_name();
debug!("next_token_inner: scanned number {:?}, {:?}, {:?}", kind, symbol, suffix);
- return Ok(Token::lit(kind, symbol, suffix));
+ return Ok(TokenKind::lit(kind, symbol, suffix));
}
match c.expect("next_token_inner called at EOF") {
let symbol = self.name_from(start);
self.bump();
self.validate_char_escape(start_with_quote);
- return Ok(Token::lit(token::Char, symbol, None));
+ return Ok(TokenKind::lit(token::Char, symbol, None));
}
- // Include the leading `'` in the real identifier, for macro
- // expansion purposes. See #12512 for the gory details of why
- // this is necessary.
- let ident = self.with_str_from(start_with_quote, |lifetime_name| {
- self.mk_ident(lifetime_name)
- });
-
if starts_with_number {
// this is a recovered lifetime written `'1`, error but accept it
self.err_span_(
);
}
- return Ok(token::Lifetime(ident));
+ // Include the leading `'` in the real identifier, for macro
+ // expansion purposes. See #12512 for the gory details of why
+ // this is necessary.
+ return Ok(token::Lifetime(self.name_from(start_with_quote)));
}
let msg = "unterminated character literal";
let symbol = self.scan_single_quoted_string(start_with_quote, msg);
self.validate_char_escape(start_with_quote);
let suffix = self.scan_optional_raw_name();
- Ok(Token::lit(token::Char, symbol, suffix))
+ Ok(TokenKind::lit(token::Char, symbol, suffix))
}
'b' => {
self.bump();
self.validate_byte_str_escape(start_with_quote);
(token::ByteStr, symbol)
},
- Some('r') => self.scan_raw_byte_string(),
+ Some('r') => {
+ let (start, end, hash_count) = self.scan_raw_string();
+ let symbol = self.name_from_to(start, end);
+ self.validate_raw_byte_str_escape(start, end);
+
+ (token::ByteStrRaw(hash_count), symbol)
+ }
_ => unreachable!(), // Should have been a token::Ident above.
};
let suffix = self.scan_optional_raw_name();
- Ok(Token::lit(kind, symbol, suffix))
+ Ok(TokenKind::lit(kind, symbol, suffix))
}
'"' => {
let start_with_quote = self.pos;
let symbol = self.scan_double_quoted_string(msg);
self.validate_str_escape(start_with_quote);
let suffix = self.scan_optional_raw_name();
- Ok(Token::lit(token::Str, symbol, suffix))
+ Ok(TokenKind::lit(token::Str, symbol, suffix))
}
'r' => {
- let start_bpos = self.pos;
- self.bump();
- let mut hash_count: u16 = 0;
- while self.ch_is('#') {
- if hash_count == 65535 {
- let bpos = self.next_pos;
- self.fatal_span_(start_bpos,
- bpos,
- "too many `#` symbols: raw strings may be \
- delimited by up to 65535 `#` symbols").raise();
- }
- self.bump();
- hash_count += 1;
- }
-
- if self.is_eof() {
- self.fail_unterminated_raw_string(start_bpos, hash_count);
- } else if !self.ch_is('"') {
- let last_bpos = self.pos;
- let curr_char = self.ch.unwrap();
- self.fatal_span_char(start_bpos,
- last_bpos,
- "found invalid character; only `#` is allowed \
- in raw string delimitation",
- curr_char).raise();
- }
- self.bump();
- let content_start_bpos = self.pos;
- let mut content_end_bpos;
- let mut valid = true;
- 'outer: loop {
- if self.is_eof() {
- self.fail_unterminated_raw_string(start_bpos, hash_count);
- }
- // if self.ch_is('"') {
- // content_end_bpos = self.pos;
- // for _ in 0..hash_count {
- // self.bump();
- // if !self.ch_is('#') {
- // continue 'outer;
- let c = self.ch.unwrap();
- match c {
- '"' => {
- content_end_bpos = self.pos;
- for _ in 0..hash_count {
- self.bump();
- if !self.ch_is('#') {
- continue 'outer;
- }
- }
- break;
- }
- '\r' => {
- if !self.nextch_is('\n') {
- let last_bpos = self.pos;
- self.err_span_(start_bpos,
- last_bpos,
- "bare CR not allowed in raw string, use \\r \
- instead");
- valid = false;
- }
- }
- _ => (),
- }
- self.bump();
- }
-
- self.bump();
- let symbol = if valid {
- self.name_from_to(content_start_bpos, content_end_bpos)
- } else {
- Symbol::intern("??")
- };
+ let (start, end, hash_count) = self.scan_raw_string();
+ let symbol = self.name_from_to(start, end);
+ self.validate_raw_str_escape(start, end);
let suffix = self.scan_optional_raw_name();
- Ok(Token::lit(token::StrRaw(hash_count), symbol, suffix))
+ Ok(TokenKind::lit(token::StrRaw(hash_count), symbol, suffix))
}
'-' => {
if self.nextch_is('>') {
id
}
- fn scan_raw_byte_string(&mut self) -> (token::LitKind, Symbol) {
+ /// Scans a raw (byte) string, returning byte position range for `"<literal>"`
+ /// (including quotes) along with `#` character count in `(b)r##..."<literal>"##...`;
+ fn scan_raw_string(&mut self) -> (BytePos, BytePos, u16) {
let start_bpos = self.pos;
self.bump();
- let mut hash_count = 0;
+ let mut hash_count: u16 = 0;
while self.ch_is('#') {
if hash_count == 65535 {
let bpos = self.next_pos;
self.fatal_span_(start_bpos,
bpos,
- "too many `#` symbols: raw byte strings may be \
+ "too many `#` symbols: raw strings may be \
delimited by up to 65535 `#` symbols").raise();
}
self.bump();
if self.is_eof() {
self.fail_unterminated_raw_string(start_bpos, hash_count);
} else if !self.ch_is('"') {
- let pos = self.pos;
- let ch = self.ch.unwrap();
+ let last_bpos = self.pos;
+ let curr_char = self.ch.unwrap();
self.fatal_span_char(start_bpos,
- pos,
- "found invalid character; only `#` is allowed in raw \
- string delimitation",
- ch).raise();
+ last_bpos,
+ "found invalid character; only `#` is allowed \
+ in raw string delimitation",
+ curr_char).raise();
}
self.bump();
let content_start_bpos = self.pos;
}
break;
}
- Some(c) => {
- if c > '\x7F' {
- let pos = self.pos;
- self.err_span_char(pos, pos, "raw byte string must be ASCII", c);
- }
- }
+ _ => (),
}
self.bump();
}
self.bump();
- (token::ByteStrRaw(hash_count), self.name_from_to(content_start_bpos, content_end_bpos))
+ (content_start_bpos, content_end_bpos, hash_count)
}
fn validate_char_escape(&self, start_with_quote: BytePos) {
});
}
+ fn validate_raw_str_escape(&self, content_start: BytePos, content_end: BytePos) {
+ self.with_str_from_to(content_start, content_end, |lit: &str| {
+ unescape::unescape_raw_str(lit, &mut |range, c| {
+ if let Err(err) = c {
+ emit_unescape_error(
+ &self.sess.span_diagnostic,
+ lit,
+ self.mk_sp(content_start - BytePos(1), content_end + BytePos(1)),
+ unescape::Mode::Str,
+ range,
+ err,
+ )
+ }
+ })
+ });
+ }
+
+ fn validate_raw_byte_str_escape(&self, content_start: BytePos, content_end: BytePos) {
+ self.with_str_from_to(content_start, content_end, |lit: &str| {
+ unescape::unescape_raw_byte_str(lit, &mut |range, c| {
+ if let Err(err) = c {
+ emit_unescape_error(
+ &self.sess.span_diagnostic,
+ lit,
+ self.mk_sp(content_start - BytePos(1), content_end + BytePos(1)),
+ unescape::Mode::ByteStr,
+ range,
+ err,
+ )
+ }
+ })
+ });
+ }
+
fn validate_byte_str_escape(&self, start_with_quote: BytePos) {
self.with_str_from_to(start_with_quote + BytePos(1), self.pos - BytePos(1), |lit| {
unescape::unescape_byte_str(lit, &mut |range, c| {
mod tests {
use super::*;
- use crate::ast::{Ident, CrateConfig};
+ use crate::ast::CrateConfig;
use crate::symbol::Symbol;
use crate::source_map::{SourceMap, FilePathMapping};
use crate::feature_gate::UnstableFeatures;
&sh,
"/* my source file */ fn main() { println!(\"zebra\"); }\n"
.to_string());
- let id = Ident::from_str("fn");
- assert_eq!(string_reader.next_token().tok, token::Comment);
- assert_eq!(string_reader.next_token().tok, token::Whitespace);
+ assert_eq!(string_reader.next_token(), token::Comment);
+ assert_eq!(string_reader.next_token(), token::Whitespace);
let tok1 = string_reader.next_token();
- let tok2 = TokenAndSpan {
- tok: token::Ident(id, false),
- sp: Span::new(BytePos(21), BytePos(23), NO_EXPANSION),
- };
- assert_eq!(tok1.tok, tok2.tok);
- assert_eq!(tok1.sp, tok2.sp);
- assert_eq!(string_reader.next_token().tok, token::Whitespace);
+ let tok2 = Token::new(
+ mk_ident("fn"),
+ Span::new(BytePos(21), BytePos(23), NO_EXPANSION),
+ );
+ assert_eq!(tok1.kind, tok2.kind);
+ assert_eq!(tok1.span, tok2.span);
+ assert_eq!(string_reader.next_token(), token::Whitespace);
// the 'main' id is already read:
assert_eq!(string_reader.pos.clone(), BytePos(28));
// read another token:
let tok3 = string_reader.next_token();
- let tok4 = TokenAndSpan {
- tok: mk_ident("main"),
- sp: Span::new(BytePos(24), BytePos(28), NO_EXPANSION),
- };
- assert_eq!(tok3.tok, tok4.tok);
- assert_eq!(tok3.sp, tok4.sp);
+ let tok4 = Token::new(
+ mk_ident("main"),
+ Span::new(BytePos(24), BytePos(28), NO_EXPANSION),
+ );
+ assert_eq!(tok3.kind, tok4.kind);
+ assert_eq!(tok3.span, tok4.span);
// the lparen is already read:
assert_eq!(string_reader.pos.clone(), BytePos(29))
})
// check that the given reader produces the desired stream
// of tokens (stop checking after exhausting the expected vec)
- fn check_tokenization(mut string_reader: StringReader<'_>, expected: Vec<Token>) {
+ fn check_tokenization(mut string_reader: StringReader<'_>, expected: Vec<TokenKind>) {
for expected_tok in &expected {
- assert_eq!(&string_reader.next_token().tok, expected_tok);
+ assert_eq!(&string_reader.next_token(), expected_tok);
}
}
// make the identifier by looking up the string in the interner
- fn mk_ident(id: &str) -> Token {
- Token::from_ast_ident(Ident::from_str(id))
+ fn mk_ident(id: &str) -> TokenKind {
+ token::Ident(Symbol::intern(id), false)
}
- fn mk_lit(kind: token::LitKind, symbol: &str, suffix: Option<&str>) -> Token {
- Token::lit(kind, Symbol::intern(symbol), suffix.map(Symbol::intern))
+ fn mk_lit(kind: token::LitKind, symbol: &str, suffix: Option<&str>) -> TokenKind {
+ TokenKind::lit(kind, Symbol::intern(symbol), suffix.map(Symbol::intern))
}
#[test]
with_default_globals(|| {
let sm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
let sh = mk_sess(sm.clone());
- assert_eq!(setup(&sm, &sh, "'a'".to_string()).next_token().tok,
+ assert_eq!(setup(&sm, &sh, "'a'".to_string()).next_token(),
mk_lit(token::Char, "a", None));
})
}
with_default_globals(|| {
let sm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
let sh = mk_sess(sm.clone());
- assert_eq!(setup(&sm, &sh, "' '".to_string()).next_token().tok,
+ assert_eq!(setup(&sm, &sh, "' '".to_string()).next_token(),
mk_lit(token::Char, " ", None));
})
}
with_default_globals(|| {
let sm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
let sh = mk_sess(sm.clone());
- assert_eq!(setup(&sm, &sh, "'\\n'".to_string()).next_token().tok,
+ assert_eq!(setup(&sm, &sh, "'\\n'".to_string()).next_token(),
mk_lit(token::Char, "\\n", None));
})
}
with_default_globals(|| {
let sm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
let sh = mk_sess(sm.clone());
- assert_eq!(setup(&sm, &sh, "'abc".to_string()).next_token().tok,
- token::Lifetime(Ident::from_str("'abc")));
+ assert_eq!(setup(&sm, &sh, "'abc".to_string()).next_token(),
+ token::Lifetime(Symbol::intern("'abc")));
})
}
with_default_globals(|| {
let sm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
let sh = mk_sess(sm.clone());
- assert_eq!(setup(&sm, &sh, "r###\"\"#a\\b\x00c\"\"###".to_string()).next_token().tok,
+ assert_eq!(setup(&sm, &sh, "r###\"\"#a\\b\x00c\"\"###".to_string()).next_token(),
mk_lit(token::StrRaw(3), "\"#a\\b\x00c\"", None));
})
}
let sh = mk_sess(sm.clone());
macro_rules! test {
($input: expr, $tok_type: ident, $tok_contents: expr) => {{
- assert_eq!(setup(&sm, &sh, format!("{}suffix", $input)).next_token().tok,
+ assert_eq!(setup(&sm, &sh, format!("{}suffix", $input)).next_token(),
mk_lit(token::$tok_type, $tok_contents, Some("suffix")));
// with a whitespace separator:
- assert_eq!(setup(&sm, &sh, format!("{} suffix", $input)).next_token().tok,
+ assert_eq!(setup(&sm, &sh, format!("{} suffix", $input)).next_token(),
mk_lit(token::$tok_type, $tok_contents, None));
}}
}
test!("1.0", Float, "1.0");
test!("1.0e10", Float, "1.0e10");
- assert_eq!(setup(&sm, &sh, "2us".to_string()).next_token().tok,
+ assert_eq!(setup(&sm, &sh, "2us".to_string()).next_token(),
mk_lit(token::Integer, "2", Some("us")));
- assert_eq!(setup(&sm, &sh, "r###\"raw\"###suffix".to_string()).next_token().tok,
+ assert_eq!(setup(&sm, &sh, "r###\"raw\"###suffix".to_string()).next_token(),
mk_lit(token::StrRaw(3), "raw", Some("suffix")));
- assert_eq!(setup(&sm, &sh, "br###\"raw\"###suffix".to_string()).next_token().tok,
+ assert_eq!(setup(&sm, &sh, "br###\"raw\"###suffix".to_string()).next_token(),
mk_lit(token::ByteStrRaw(3), "raw", Some("suffix")));
})
}
let sm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
let sh = mk_sess(sm.clone());
let mut lexer = setup(&sm, &sh, "/* /* */ */'a'".to_string());
- match lexer.next_token().tok {
- token::Comment => {}
- _ => panic!("expected a comment!"),
- }
- assert_eq!(lexer.next_token().tok, mk_lit(token::Char, "a", None));
+ assert_eq!(lexer.next_token(), token::Comment);
+ assert_eq!(lexer.next_token(), mk_lit(token::Char, "a", None));
})
}
let sh = mk_sess(sm.clone());
let mut lexer = setup(&sm, &sh, "// test\r\n/// test\r\n".to_string());
let comment = lexer.next_token();
- assert_eq!(comment.tok, token::Comment);
- assert_eq!((comment.sp.lo(), comment.sp.hi()), (BytePos(0), BytePos(7)));
- assert_eq!(lexer.next_token().tok, token::Whitespace);
- assert_eq!(lexer.next_token().tok,
- token::DocComment(Symbol::intern("/// test")));
+ assert_eq!(comment.kind, token::Comment);
+ assert_eq!((comment.span.lo(), comment.span.hi()), (BytePos(0), BytePos(7)));
+ assert_eq!(lexer.next_token(), token::Whitespace);
+ assert_eq!(lexer.next_token(), token::DocComment(Symbol::intern("/// test")));
})
}
}
use crate::print::pprust::token_to_string;
use crate::parse::lexer::{StringReader, UnmatchedBrace};
-use crate::parse::{token, PResult};
+use crate::parse::token::{self, Token};
+use crate::parse::PResult;
use crate::tokenstream::{DelimSpan, IsJoint::*, TokenStream, TokenTree, TreeAndJoint};
impl<'a> StringReader<'a> {
crate fn into_token_trees(self) -> (PResult<'a, TokenStream>, Vec<UnmatchedBrace>) {
let mut tt_reader = TokenTreesReader {
string_reader: self,
- token: token::Eof,
- span: syntax_pos::DUMMY_SP,
+ token: Token::dummy(),
open_braces: Vec::new(),
unmatched_braces: Vec::new(),
matching_delim_spans: Vec::new(),
struct TokenTreesReader<'a> {
string_reader: StringReader<'a>,
- token: token::Token,
- span: Span,
+ token: Token,
/// Stack of open delimiters and their spans. Used for error message.
open_braces: Vec<(token::DelimToken, Span)>,
unmatched_braces: Vec<UnmatchedBrace>,
fn parse_token_trees_until_close_delim(&mut self) -> TokenStream {
let mut tts = vec![];
loop {
- if let token::CloseDelim(..) = self.token {
+ if let token::CloseDelim(..) = self.token.kind {
return TokenStream::new(tts);
}
fn parse_token_tree(&mut self) -> PResult<'a, TreeAndJoint> {
let sm = self.string_reader.sess.source_map();
- match self.token {
+ match self.token.kind {
token::Eof => {
let msg = "this file contains an un-closed delimiter";
let mut err = self.string_reader.sess.span_diagnostic
- .struct_span_err(self.span, msg);
+ .struct_span_err(self.token.span, msg);
for &(_, sp) in &self.open_braces {
err.span_label(sp, "un-closed delimiter");
}
},
token::OpenDelim(delim) => {
// The span for beginning of the delimited section
- let pre_span = self.span;
+ let pre_span = self.token.span;
// Parse the open delimiter.
- self.open_braces.push((delim, self.span));
+ self.open_braces.push((delim, self.token.span));
self.real_token();
// Parse the token trees within the delimiters.
let tts = self.parse_token_trees_until_close_delim();
// Expand to cover the entire delimited token tree
- let delim_span = DelimSpan::from_pair(pre_span, self.span);
+ let delim_span = DelimSpan::from_pair(pre_span, self.token.span);
- match self.token {
+ match self.token.kind {
// Correct delimiter.
token::CloseDelim(d) if d == delim => {
let (open_brace, open_brace_span) = self.open_braces.pop().unwrap();
self.matching_delim_spans.clear();
} else {
self.matching_delim_spans.push(
- (open_brace, open_brace_span, self.span),
+ (open_brace, open_brace_span, self.token.span),
);
}
// Parse the close delimiter.
token::CloseDelim(other) => {
let mut unclosed_delimiter = None;
let mut candidate = None;
- if self.last_unclosed_found_span != Some(self.span) {
+ if self.last_unclosed_found_span != Some(self.token.span) {
// do not complain about the same unclosed delimiter multiple times
- self.last_unclosed_found_span = Some(self.span);
+ self.last_unclosed_found_span = Some(self.token.span);
// This is a conservative error: only report the last unclosed
// delimiter. The previous unclosed delimiters could actually be
// closed! The parser just hasn't gotten to them yet.
if let Some(&(_, sp)) = self.open_braces.last() {
unclosed_delimiter = Some(sp);
};
- if let Some(current_padding) = sm.span_to_margin(self.span) {
+ if let Some(current_padding) = sm.span_to_margin(self.token.span) {
for (brace, brace_span) in &self.open_braces {
if let Some(padding) = sm.span_to_margin(*brace_span) {
// high likelihood of these two corresponding
self.unmatched_braces.push(UnmatchedBrace {
expected_delim: tok,
found_delim: other,
- found_span: self.span,
+ found_span: self.token.span,
unclosed_span: unclosed_delimiter,
candidate_span: candidate,
});
let token_str = token_to_string(&self.token);
let msg = format!("unexpected close delimiter: `{}`", token_str);
let mut err = self.string_reader.sess.span_diagnostic
- .struct_span_err(self.span, &msg);
- err.span_label(self.span, "unexpected close delimiter");
+ .struct_span_err(self.token.span, &msg);
+ err.span_label(self.token.span, "unexpected close delimiter");
Err(err)
},
_ => {
- let tt = TokenTree::Token(self.span, self.token.clone());
+ let tt = TokenTree::Token(self.token.take());
// Note that testing for joint-ness here is done via the raw
// source span as the joint-ness is a property of the raw source
// rather than wanting to take `override_span` into account.
let raw = self.string_reader.peek_span_src_raw;
self.real_token();
let is_joint = raw.hi() == self.string_reader.peek_span_src_raw.lo()
- && token::is_op(&self.token);
+ && self.token.is_op();
Ok((tt, if is_joint { Joint } else { NonJoint }))
}
}
}
fn real_token(&mut self) {
- let t = self.string_reader.real_token();
- self.token = t.tok;
- self.span = t.sp;
+ self.token = self.string_reader.real_token();
}
}
//! Code related to parsing literals.
-use crate::ast::{self, Ident, Lit, LitKind};
+use crate::ast::{self, Lit, LitKind};
use crate::parse::parser::Parser;
use crate::parse::PResult;
-use crate::parse::token::{self, Token};
-use crate::parse::unescape::{unescape_str, unescape_char, unescape_byte_str, unescape_byte};
+use crate::parse::token::{self, Token, TokenKind};
+use crate::parse::unescape::{unescape_char, unescape_byte};
+use crate::parse::unescape::{unescape_str, unescape_byte_str};
+use crate::parse::unescape::{unescape_raw_str, unescape_raw_byte_str};
use crate::print::pprust;
use crate::symbol::{kw, sym, Symbol};
use crate::tokenstream::{TokenStream, TokenTree};
// Ditto.
let s = symbol.as_str();
let symbol = if s.contains('\r') {
- Symbol::intern(&raw_str_lit(&s))
+ let mut buf = String::with_capacity(s.len());
+ let mut error = Ok(());
+ unescape_raw_str(&s, &mut |_, unescaped_char| {
+ match unescaped_char {
+ Ok(c) => buf.push(c),
+ Err(_) => error = Err(LitError::LexerError),
+ }
+ });
+ error?;
+ buf.shrink_to_fit();
+ Symbol::intern(&buf)
} else {
symbol
};
buf.shrink_to_fit();
LitKind::ByteStr(Lrc::new(buf))
}
- token::ByteStrRaw(_) => LitKind::ByteStr(Lrc::new(symbol.to_string().into_bytes())),
+ token::ByteStrRaw(_) => {
+ let s = symbol.as_str();
+ let bytes = if s.contains('\r') {
+ let mut buf = Vec::with_capacity(s.len());
+ let mut error = Ok(());
+ unescape_raw_byte_str(&s, &mut |_, unescaped_byte| {
+ match unescaped_byte {
+ Ok(c) => buf.push(c),
+ Err(_) => error = Err(LitError::LexerError),
+ }
+ });
+ error?;
+ buf.shrink_to_fit();
+ buf
+ } else {
+ symbol.to_string().into_bytes()
+ };
+
+ LitKind::ByteStr(Lrc::new(bytes))
+ },
token::Err => LitKind::Err(symbol),
})
}
}
/// Converts arbitrary token into an AST literal.
- crate fn from_token(token: &Token, span: Span) -> Result<Lit, LitError> {
- let lit = match *token {
- token::Ident(ident, false) if ident.name == kw::True || ident.name == kw::False =>
- token::Lit::new(token::Bool, ident.name, None),
+ crate fn from_token(token: &Token) -> Result<Lit, LitError> {
+ let lit = match token.kind {
+ token::Ident(name, false) if name == kw::True || name == kw::False =>
+ token::Lit::new(token::Bool, name, None),
token::Literal(lit) =>
lit,
token::Interpolated(ref nt) => {
_ => return Err(LitError::NotLiteral)
};
- Lit::from_lit_token(lit, span)
+ Lit::from_lit_token(lit, token.span)
}
/// Attempts to recover an AST literal from semantic literal.
/// Losslessly convert an AST literal into a token stream.
crate fn tokens(&self) -> TokenStream {
let token = match self.token.kind {
- token::Bool => token::Ident(Ident::new(self.token.symbol, self.span), false),
+ token::Bool => token::Ident(self.token.symbol, false),
_ => token::Literal(self.token),
};
- TokenTree::Token(self.span, token).into()
+ TokenTree::token(token, self.span).into()
}
}
let mut recovered = None;
if self.token == token::Dot {
// Attempt to recover `.4` as `0.4`.
- recovered = self.look_ahead(1, |t| {
- if let token::Literal(token::Lit { kind: token::Integer, symbol, suffix }) = *t {
- let next_span = self.look_ahead_span(1);
- if self.span.hi() == next_span.lo() {
+ recovered = self.look_ahead(1, |next_token| {
+ if let token::Literal(token::Lit { kind: token::Integer, symbol, suffix })
+ = next_token.kind {
+ if self.token.span.hi() == next_token.span.lo() {
let s = String::from("0.") + &symbol.as_str();
- let token = Token::lit(token::Float, Symbol::intern(&s), suffix);
- return Some((token, self.span.to(next_span)));
+ let kind = TokenKind::lit(token::Float, Symbol::intern(&s), suffix);
+ return Some(Token::new(kind, self.token.span.to(next_token.span)));
}
}
None
});
- if let Some((ref token, span)) = recovered {
+ if let Some(token) = &recovered {
self.bump();
self.diagnostic()
- .struct_span_err(span, "float literals must have an integer part")
+ .struct_span_err(token.span, "float literals must have an integer part")
.span_suggestion(
- span,
+ token.span,
"must have an integer part",
- pprust::token_to_string(&token),
+ pprust::token_to_string(token),
Applicability::MachineApplicable,
)
.emit();
}
}
- let (token, span) = recovered.as_ref().map_or((&self.token, self.span),
- |(token, span)| (token, *span));
-
- match Lit::from_token(token, span) {
+ let token = recovered.as_ref().unwrap_or(&self.token);
+ match Lit::from_token(token) {
Ok(lit) => {
self.bump();
Ok(lit)
}
Err(LitError::NotLiteral) => {
let msg = format!("unexpected token: {}", self.this_token_descr());
- Err(self.span_fatal(span, &msg))
+ Err(self.span_fatal(token.span, &msg))
}
Err(err) => {
- let lit = token.expect_lit();
+ let (lit, span) = (token.expect_lit(), token.span);
self.bump();
err.report(&self.sess.span_diagnostic, lit, span);
- let lit = token::Lit::new(token::Err, lit.symbol, lit.suffix);
+ // Pack possible quotes and prefixes from the original literal into
+ // the error literal's symbol so they can be pretty-printed faithfully.
+ let suffixless_lit = token::Lit::new(lit.kind, lit.symbol, None);
+ let symbol = Symbol::intern(&pprust::literal_to_string(suffixless_lit));
+ let lit = token::Lit::new(token::Err, symbol, lit.suffix);
Lit::from_lit_token(lit, span).map_err(|_| unreachable!())
}
}
}
}
-/// Parses a string representing a raw string literal into its final form. The
-/// only operation this does is convert embedded CRLF into a single LF.
-fn raw_str_lit(lit: &str) -> String {
- debug!("raw_str_lit: {:?}", lit);
- let mut res = String::with_capacity(lit.len());
-
- let mut chars = lit.chars().peekable();
- while let Some(c) = chars.next() {
- if c == '\r' {
- if *chars.peek().unwrap() != '\n' {
- panic!("lexer accepted bare CR");
- }
- chars.next();
- res.push('\n');
- } else {
- res.push(c);
- }
- }
-
- res.shrink_to_fit();
- res
-}
-
// Checks if `s` looks like i32 or u1234 etc.
fn looks_like_width_suffix(first_chars: &[char], s: &str) -> bool {
s.len() > 1 && s.starts_with(first_chars) && s[1..].chars().all(|c| c.is_ascii_digit())
use crate::source_map::{SourceMap, FilePathMapping};
use crate::feature_gate::UnstableFeatures;
use crate::parse::parser::Parser;
-use crate::syntax::parse::parser::emit_unclosed_delims;
+use crate::parse::parser::emit_unclosed_delims;
+use crate::parse::token::TokenKind;
use crate::tokenstream::{TokenStream, TokenTree};
use crate::diagnostics::plugin::ErrorMap;
-use crate::print::pprust::token_to_string;
+use crate::print::pprust;
use errors::{Applicability, FatalError, Level, Handler, ColorConfig, Diagnostic, DiagnosticBuilder};
use rustc_data_structures::sync::{Lrc, Lock};
let (stream, unclosed_delims) = maybe_file_to_stream(sess, source_file, None)?;
let mut parser = stream_to_parser(sess, stream, None);
parser.unclosed_delims = unclosed_delims;
- if parser.token == token::Eof && parser.span.is_dummy() {
- parser.span = Span::new(end_pos, end_pos, parser.span.ctxt());
+ if parser.token == token::Eof && parser.token.span.is_dummy() {
+ parser.token.span = Span::new(end_pos, end_pos, parser.token.span.ctxt());
}
Ok(parser)
for unmatched in unmatched_braces {
let mut db = sess.span_diagnostic.struct_span_err(unmatched.found_span, &format!(
"incorrect close delimiter: `{}`",
- token_to_string(&token::Token::CloseDelim(unmatched.found_delim)),
+ pprust::token_kind_to_string(&token::CloseDelim(unmatched.found_delim)),
));
db.span_label(unmatched.found_span, "incorrect close delimiter");
if let Some(sp) = unmatched.candidate_span {
/// A sequence separator.
pub struct SeqSep {
/// The seperator token.
- pub sep: Option<token::Token>,
+ pub sep: Option<TokenKind>,
/// `true` if a trailing separator is allowed.
pub trailing_sep_allowed: bool,
}
impl SeqSep {
- pub fn trailing_allowed(t: token::Token) -> SeqSep {
+ pub fn trailing_allowed(t: TokenKind) -> SeqSep {
SeqSep {
sep: Some(t),
trailing_sep_allowed: true,
#[cfg(test)]
mod tests {
use super::*;
- use crate::ast::{self, Ident, PatKind};
+ use crate::ast::{self, Name, PatKind};
use crate::attr::first_attr_value_str_by_name;
use crate::ptr::P;
+ use crate::parse::token::Token;
use crate::print::pprust::item_to_string;
+ use crate::symbol::{kw, sym};
use crate::tokenstream::{DelimSpan, TokenTree};
use crate::util::parser_testing::string_to_stream;
use crate::util::parser_testing::{string_to_expr, string_to_item};
#[test]
fn string_to_tts_macro () {
with_default_globals(|| {
- use crate::symbol::sym;
-
let tts: Vec<_> =
string_to_stream("macro_rules! zip (($a)=>($a))".to_string()).trees().collect();
let tts: &[TokenTree] = &tts[..];
- match (tts.len(), tts.get(0), tts.get(1), tts.get(2), tts.get(3)) {
- (
- 4,
- Some(&TokenTree::Token(_, token::Ident(name_macro_rules, false))),
- Some(&TokenTree::Token(_, token::Not)),
- Some(&TokenTree::Token(_, token::Ident(name_zip, false))),
- Some(&TokenTree::Delimited(_, macro_delim, ref macro_tts)),
- )
- if name_macro_rules.name == sym::macro_rules
- && name_zip.name.as_str() == "zip" => {
+ match tts {
+ [
+ TokenTree::Token(Token { kind: token::Ident(name_macro_rules, false), .. }),
+ TokenTree::Token(Token { kind: token::Not, .. }),
+ TokenTree::Token(Token { kind: token::Ident(name_zip, false), .. }),
+ TokenTree::Delimited(_, macro_delim, macro_tts)
+ ]
+ if name_macro_rules == &sym::macro_rules && name_zip.as_str() == "zip" => {
let tts = ¯o_tts.trees().collect::<Vec<_>>();
- match (tts.len(), tts.get(0), tts.get(1), tts.get(2)) {
- (
- 3,
- Some(&TokenTree::Delimited(_, first_delim, ref first_tts)),
- Some(&TokenTree::Token(_, token::FatArrow)),
- Some(&TokenTree::Delimited(_, second_delim, ref second_tts)),
- )
- if macro_delim == token::Paren => {
+ match &tts[..] {
+ [
+ TokenTree::Delimited(_, first_delim, first_tts),
+ TokenTree::Token(Token { kind: token::FatArrow, .. }),
+ TokenTree::Delimited(_, second_delim, second_tts),
+ ]
+ if macro_delim == &token::Paren => {
let tts = &first_tts.trees().collect::<Vec<_>>();
- match (tts.len(), tts.get(0), tts.get(1)) {
- (
- 2,
- Some(&TokenTree::Token(_, token::Dollar)),
- Some(&TokenTree::Token(_, token::Ident(ident, false))),
- )
- if first_delim == token::Paren && ident.name.as_str() == "a" => {},
+ match &tts[..] {
+ [
+ TokenTree::Token(Token { kind: token::Dollar, .. }),
+ TokenTree::Token(Token { kind: token::Ident(name, false), .. }),
+ ]
+ if first_delim == &token::Paren && name.as_str() == "a" => {},
_ => panic!("value 3: {:?} {:?}", first_delim, first_tts),
}
let tts = &second_tts.trees().collect::<Vec<_>>();
- match (tts.len(), tts.get(0), tts.get(1)) {
- (
- 2,
- Some(&TokenTree::Token(_, token::Dollar)),
- Some(&TokenTree::Token(_, token::Ident(ident, false))),
- )
- if second_delim == token::Paren && ident.name.as_str() == "a" => {},
+ match &tts[..] {
+ [
+ TokenTree::Token(Token { kind: token::Dollar, .. }),
+ TokenTree::Token(Token { kind: token::Ident(name, false), .. }),
+ ]
+ if second_delim == &token::Paren && name.as_str() == "a" => {},
_ => panic!("value 4: {:?} {:?}", second_delim, second_tts),
}
},
let tts = string_to_stream("fn a (b : i32) { b; }".to_string());
let expected = TokenStream::new(vec![
- TokenTree::Token(sp(0, 2), token::Ident(Ident::from_str("fn"), false)).into(),
- TokenTree::Token(sp(3, 4), token::Ident(Ident::from_str("a"), false)).into(),
+ TokenTree::token(token::Ident(kw::Fn, false), sp(0, 2)).into(),
+ TokenTree::token(token::Ident(Name::intern("a"), false), sp(3, 4)).into(),
TokenTree::Delimited(
DelimSpan::from_pair(sp(5, 6), sp(13, 14)),
token::DelimToken::Paren,
TokenStream::new(vec![
- TokenTree::Token(sp(6, 7),
- token::Ident(Ident::from_str("b"), false)).into(),
- TokenTree::Token(sp(8, 9), token::Colon).into(),
- TokenTree::Token(sp(10, 13),
- token::Ident(Ident::from_str("i32"), false)).into(),
+ TokenTree::token(token::Ident(Name::intern("b"), false), sp(6, 7)).into(),
+ TokenTree::token(token::Colon, sp(8, 9)).into(),
+ TokenTree::token(token::Ident(sym::i32, false), sp(10, 13)).into(),
]).into(),
).into(),
TokenTree::Delimited(
DelimSpan::from_pair(sp(15, 16), sp(20, 21)),
token::DelimToken::Brace,
TokenStream::new(vec![
- TokenTree::Token(sp(17, 18),
- token::Ident(Ident::from_str("b"), false)).into(),
- TokenTree::Token(sp(18, 19), token::Semi).into(),
+ TokenTree::token(token::Ident(Name::intern("b"), false), sp(17, 18)).into(),
+ TokenTree::token(token::Semi, sp(18, 19)).into(),
]).into(),
).into()
]);
#[test] fn crlf_doc_comments() {
with_default_globals(|| {
- use crate::symbol::sym;
-
let sess = ParseSess::new(FilePathMapping::empty());
let name_1 = FileName::Custom("crlf_source_1".to_string());
use crate::ast::{RangeEnd, RangeSyntax};
use crate::{ast, attr};
use crate::ext::base::DummyResult;
+use crate::ext::hygiene::SyntaxContext;
use crate::source_map::{self, SourceMap, Spanned, respan};
use crate::parse::{SeqSep, classify, literal, token};
-use crate::parse::lexer::{TokenAndSpan, UnmatchedBrace};
+use crate::parse::lexer::UnmatchedBrace;
use crate::parse::lexer::comments::{doc_comment_style, strip_doc_comment_decoration};
-use crate::parse::token::DelimToken;
+use crate::parse::token::{Token, TokenKind, DelimToken};
use crate::parse::{new_sub_parser_from_file, ParseSess, Directory, DirectoryOwnership};
use crate::util::parser::{AssocOp, Fixity};
use crate::print::pprust;
/// `token::Interpolated` tokens.
macro_rules! maybe_whole_expr {
($p:expr) => {
- if let token::Interpolated(nt) = &$p.token {
+ if let token::Interpolated(nt) = &$p.token.kind {
match &**nt {
token::NtExpr(e) | token::NtLiteral(e) => {
let e = e.clone();
token::NtPath(path) => {
let path = path.clone();
$p.bump();
- return Ok($p.mk_expr($p.span, ExprKind::Path(None, path), ThinVec::new()));
+ return Ok($p.mk_expr(
+ $p.token.span, ExprKind::Path(None, path), ThinVec::new()
+ ));
}
token::NtBlock(block) => {
let block = block.clone();
$p.bump();
- return Ok($p.mk_expr($p.span, ExprKind::Block(block, None), ThinVec::new()));
+ return Ok($p.mk_expr(
+ $p.token.span, ExprKind::Block(block, None), ThinVec::new()
+ ));
}
_ => {},
};
/// As maybe_whole_expr, but for things other than expressions
macro_rules! maybe_whole {
($p:expr, $constructor:ident, |$x:ident| $e:expr) => {
- if let token::Interpolated(nt) = &$p.token {
+ if let token::Interpolated(nt) = &$p.token.kind {
if let token::$constructor(x) = &**nt {
let $x = x.clone();
$p.bump();
macro_rules! maybe_recover_from_interpolated_ty_qpath {
($self: expr, $allow_qpath_recovery: expr) => {
if $allow_qpath_recovery && $self.look_ahead(1, |t| t == &token::ModSep) {
- if let token::Interpolated(nt) = &$self.token {
+ if let token::Interpolated(nt) = &$self.token.kind {
if let token::NtTy(ty) = &**nt {
let ty = ty.clone();
$self.bump();
#[derive(Clone)]
pub struct Parser<'a> {
pub sess: &'a ParseSess,
- /// The current token.
- pub token: token::Token,
- /// The span of the current token.
- pub span: Span,
+ /// The current normalized token.
+ /// "Normalized" means that some interpolated tokens
+ /// (`$i: ident` and `$l: lifetime` meta-variables) are replaced
+ /// with non-interpolated identifier and lifetime tokens they refer to.
+ /// Perhaps the normalized / non-normalized setup can be simplified somehow.
+ pub token: Token,
+ /// Span of the current non-normalized token.
meta_var_span: Option<Span>,
- /// The span of the previous token.
+ /// Span of the previous non-normalized token.
pub prev_span: Span,
- /// The kind of the previous troken.
+ /// Kind of the previous normalized token (in simplified form).
prev_token_kind: PrevTokenKind,
restrictions: Restrictions,
/// Used to determine the path to externally loaded source files.
}
impl TokenCursor {
- fn next(&mut self) -> TokenAndSpan {
+ fn next(&mut self) -> Token {
loop {
let tree = if !self.frame.open_delim {
self.frame.open_delim = true;
self.frame = frame;
continue
} else {
- return TokenAndSpan { tok: token::Eof, sp: DUMMY_SP }
+ return Token::new(token::Eof, DUMMY_SP);
};
match self.frame.last_token {
}
match tree {
- TokenTree::Token(sp, tok) => return TokenAndSpan { tok: tok, sp: sp },
+ TokenTree::Token(token) => return token,
TokenTree::Delimited(sp, delim, tts) => {
let frame = TokenCursorFrame::new(sp, delim, &tts);
self.stack.push(mem::replace(&mut self.frame, frame));
}
}
- fn next_desugared(&mut self) -> TokenAndSpan {
- let (sp, name) = match self.next() {
- TokenAndSpan { sp, tok: token::DocComment(name) } => (sp, name),
+ fn next_desugared(&mut self) -> Token {
+ let (name, sp) = match self.next() {
+ Token { kind: token::DocComment(name), span } => (name, span),
tok => return tok,
};
delim_span,
token::Bracket,
[
- TokenTree::Token(sp, token::Ident(ast::Ident::with_empty_ctxt(sym::doc), false)),
- TokenTree::Token(sp, token::Eq),
- TokenTree::Token(sp, token::Token::lit(
+ TokenTree::token(token::Ident(sym::doc, false), sp),
+ TokenTree::token(token::Eq, sp),
+ TokenTree::token(TokenKind::lit(
token::StrRaw(num_of_hashes), Symbol::intern(&stripped), None
- )),
+ ), sp),
]
.iter().cloned().collect::<TokenStream>().into(),
);
delim_span,
token::NoDelim,
&if doc_comment_style(&name.as_str()) == AttrStyle::Inner {
- [TokenTree::Token(sp, token::Pound), TokenTree::Token(sp, token::Not), body]
+ [TokenTree::token(token::Pound, sp), TokenTree::token(token::Not, sp), body]
.iter().cloned().collect::<TokenStream>().into()
} else {
- [TokenTree::Token(sp, token::Pound), body]
+ [TokenTree::token(token::Pound, sp), body]
.iter().cloned().collect::<TokenStream>().into()
},
)));
#[derive(Clone, PartialEq)]
crate enum TokenType {
- Token(token::Token),
+ Token(TokenKind),
Keyword(Symbol),
Operator,
Lifetime,
impl TokenType {
crate fn to_string(&self) -> String {
match *self {
- TokenType::Token(ref t) => format!("`{}`", pprust::token_to_string(t)),
+ TokenType::Token(ref t) => format!("`{}`", pprust::token_kind_to_string(t)),
TokenType::Keyword(kw) => format!("`{}`", kw),
TokenType::Operator => "an operator".to_string(),
TokenType::Lifetime => "lifetime".to_string(),
///
/// Types can also be of the form `IDENT(u8, u8) -> u8`, however this assumes
/// that `IDENT` is not the ident of a fn trait.
-fn can_continue_type_after_non_fn_ident(t: &token::Token) -> bool {
+fn can_continue_type_after_non_fn_ident(t: &Token) -> bool {
t == &token::ModSep || t == &token::Lt ||
t == &token::BinOp(token::Shl)
}
) -> Self {
let mut parser = Parser {
sess,
- token: token::Whitespace,
- span: DUMMY_SP,
+ token: Token::dummy(),
prev_span: DUMMY_SP,
meta_var_span: None,
prev_token_kind: PrevTokenKind::Other,
subparser_name,
};
- let tok = parser.next_tok();
- parser.token = tok.tok;
- parser.span = tok.sp;
+ parser.token = parser.next_tok();
if let Some(directory) = directory {
parser.directory = directory;
- } else if !parser.span.is_dummy() {
- if let FileName::Real(mut path) = sess.source_map().span_to_unmapped_path(parser.span) {
+ } else if !parser.token.span.is_dummy() {
+ if let FileName::Real(mut path) =
+ sess.source_map().span_to_unmapped_path(parser.token.span) {
path.pop();
parser.directory.path = Cow::from(path);
}
parser
}
- fn next_tok(&mut self) -> TokenAndSpan {
+ fn next_tok(&mut self) -> Token {
let mut next = if self.desugar_doc_comments {
self.token_cursor.next_desugared()
} else {
self.token_cursor.next()
};
- if next.sp.is_dummy() {
+ if next.span.is_dummy() {
// Tweak the location for better diagnostics, but keep syntactic context intact.
- next.sp = self.prev_span.with_ctxt(next.sp.ctxt());
+ next.span = self.prev_span.with_ctxt(next.span.ctxt());
}
next
}
}
crate fn token_descr(&self) -> Option<&'static str> {
- Some(match &self.token {
- t if t.is_special_ident() => "reserved identifier",
- t if t.is_used_keyword() => "keyword",
- t if t.is_unused_keyword() => "reserved keyword",
+ Some(match &self.token.kind {
+ _ if self.token.is_special_ident() => "reserved identifier",
+ _ if self.token.is_used_keyword() => "keyword",
+ _ if self.token.is_unused_keyword() => "reserved keyword",
token::DocComment(..) => "doc comment",
_ => return None,
})
}
/// Expects and consumes the token `t`. Signals an error if the next token is not `t`.
- pub fn expect(&mut self, t: &token::Token) -> PResult<'a, bool /* recovered */> {
+ pub fn expect(&mut self, t: &TokenKind) -> PResult<'a, bool /* recovered */> {
if self.expected_tokens.is_empty() {
if self.token == *t {
self.bump();
/// anything. Signal a fatal error if next token is unexpected.
pub fn expect_one_of(
&mut self,
- edible: &[token::Token],
- inedible: &[token::Token],
+ edible: &[TokenKind],
+ inedible: &[TokenKind],
) -> PResult<'a, bool /* recovered */> {
- if edible.contains(&self.token) {
+ if edible.contains(&self.token.kind) {
self.bump();
Ok(false)
- } else if inedible.contains(&self.token) {
+ } else if inedible.contains(&self.token.kind) {
// leave it in the input
Ok(false)
- } else if self.last_unexpected_token_span == Some(self.span) {
+ } else if self.last_unexpected_token_span == Some(self.token.span) {
FatalError.raise();
} else {
self.expected_one_of_not_found(edible, inedible)
}
fn parse_ident_common(&mut self, recover: bool) -> PResult<'a, ast::Ident> {
- match self.token {
- token::Ident(ident, _) => {
+ match self.token.kind {
+ token::Ident(name, _) => {
if self.token.is_reserved_ident() {
let mut err = self.expected_ident_found();
if recover {
return Err(err);
}
}
- let span = self.span;
+ let span = self.token.span;
self.bump();
- Ok(Ident::new(ident.name, span))
+ Ok(Ident::new(name, span))
}
_ => {
Err(if self.prev_token_kind == PrevTokenKind::DocComment {
///
/// This method will automatically add `tok` to `expected_tokens` if `tok` is not
/// encountered.
- crate fn check(&mut self, tok: &token::Token) -> bool {
+ crate fn check(&mut self, tok: &TokenKind) -> bool {
let is_present = self.token == *tok;
if !is_present { self.expected_tokens.push(TokenType::Token(tok.clone())); }
is_present
}
/// Consumes a token 'tok' if it exists. Returns whether the given token was present.
- pub fn eat(&mut self, tok: &token::Token) -> bool {
+ pub fn eat(&mut self, tok: &TokenKind) -> bool {
let is_present = self.check(tok);
if is_present { self.bump() }
is_present
/// See issue #47856 for an example of when this may occur.
fn eat_plus(&mut self) -> bool {
self.expected_tokens.push(TokenType::Token(token::BinOp(token::Plus)));
- match self.token {
+ match self.token.kind {
token::BinOp(token::Plus) => {
self.bump();
true
}
token::BinOpEq(token::Plus) => {
- let span = self.span.with_lo(self.span.lo() + BytePos(1));
+ let span = self.token.span.with_lo(self.token.span.lo() + BytePos(1));
self.bump_with(token::Eq, span);
true
}
/// `&` and continues. If an `&` is not seen, signals an error.
fn expect_and(&mut self) -> PResult<'a, ()> {
self.expected_tokens.push(TokenType::Token(token::BinOp(token::And)));
- match self.token {
+ match self.token.kind {
token::BinOp(token::And) => {
self.bump();
Ok(())
}
token::AndAnd => {
- let span = self.span.with_lo(self.span.lo() + BytePos(1));
+ let span = self.token.span.with_lo(self.token.span.lo() + BytePos(1));
Ok(self.bump_with(token::BinOp(token::And), span))
}
_ => self.unexpected()
/// `|` and continues. If an `|` is not seen, signals an error.
fn expect_or(&mut self) -> PResult<'a, ()> {
self.expected_tokens.push(TokenType::Token(token::BinOp(token::Or)));
- match self.token {
+ match self.token.kind {
token::BinOp(token::Or) => {
self.bump();
Ok(())
}
token::OrOr => {
- let span = self.span.with_lo(self.span.lo() + BytePos(1));
+ let span = self.token.span.with_lo(self.token.span.lo() + BytePos(1));
Ok(self.bump_with(token::BinOp(token::Or), span))
}
_ => self.unexpected()
/// starting token.
fn eat_lt(&mut self) -> bool {
self.expected_tokens.push(TokenType::Token(token::Lt));
- let ate = match self.token {
+ let ate = match self.token.kind {
token::Lt => {
self.bump();
true
}
token::BinOp(token::Shl) => {
- let span = self.span.with_lo(self.span.lo() + BytePos(1));
+ let span = self.token.span.with_lo(self.token.span.lo() + BytePos(1));
self.bump_with(token::Lt, span);
true
}
token::LArrow => {
- let span = self.span.with_lo(self.span.lo() + BytePos(1));
+ let span = self.token.span.with_lo(self.token.span.lo() + BytePos(1));
self.bump_with(token::BinOp(token::Minus), span);
true
}
/// with a single `>` and continues. If a `>` is not seen, signals an error.
fn expect_gt(&mut self) -> PResult<'a, ()> {
self.expected_tokens.push(TokenType::Token(token::Gt));
- let ate = match self.token {
+ let ate = match self.token.kind {
token::Gt => {
self.bump();
Some(())
}
token::BinOp(token::Shr) => {
- let span = self.span.with_lo(self.span.lo() + BytePos(1));
+ let span = self.token.span.with_lo(self.token.span.lo() + BytePos(1));
Some(self.bump_with(token::Gt, span))
}
token::BinOpEq(token::Shr) => {
- let span = self.span.with_lo(self.span.lo() + BytePos(1));
+ let span = self.token.span.with_lo(self.token.span.lo() + BytePos(1));
Some(self.bump_with(token::Ge, span))
}
token::Ge => {
- let span = self.span.with_lo(self.span.lo() + BytePos(1));
+ let span = self.token.span.with_lo(self.token.span.lo() + BytePos(1));
Some(self.bump_with(token::Eq, span))
}
_ => None,
/// `f` must consume tokens until reaching the next separator or
/// closing bracket.
pub fn parse_seq_to_end<T, F>(&mut self,
- ket: &token::Token,
+ ket: &TokenKind,
sep: SeqSep,
f: F)
-> PResult<'a, Vec<T>> where
/// closing bracket.
pub fn parse_seq_to_before_end<T, F>(
&mut self,
- ket: &token::Token,
+ ket: &TokenKind,
sep: SeqSep,
f: F,
) -> PResult<'a, (Vec<T>, bool)>
crate fn parse_seq_to_before_tokens<T, F>(
&mut self,
- kets: &[&token::Token],
+ kets: &[&TokenKind],
sep: SeqSep,
expect: TokenExpectType,
mut f: F,
TokenExpectType::NoExpect => self.token == **k,
}
}) {
- match self.token {
+ match self.token.kind {
token::CloseDelim(..) | token::Eof => break,
_ => {}
};
Err(mut e) => {
// Attempt to keep parsing if it was a similar separator
if let Some(ref tokens) = t.similar_tokens() {
- if tokens.contains(&self.token) {
+ if tokens.contains(&self.token.kind) {
self.bump();
}
}
/// closing bracket.
fn parse_unspanned_seq<T, F>(
&mut self,
- bra: &token::Token,
- ket: &token::Token,
+ bra: &TokenKind,
+ ket: &TokenKind,
sep: SeqSep,
f: F,
) -> PResult<'a, Vec<T>> where
self.bug("attempted to bump the parser past EOF (may be stuck in a loop)");
}
- self.prev_span = self.meta_var_span.take().unwrap_or(self.span);
+ self.prev_span = self.meta_var_span.take().unwrap_or(self.token.span);
// Record last token kind for possible error recovery.
- self.prev_token_kind = match self.token {
+ self.prev_token_kind = match self.token.kind {
token::DocComment(..) => PrevTokenKind::DocComment,
token::Comma => PrevTokenKind::Comma,
token::BinOp(token::Plus) => PrevTokenKind::Plus,
_ => PrevTokenKind::Other,
};
- let next = self.next_tok();
- self.span = next.sp;
- self.token = next.tok;
+ self.token = self.next_tok();
self.expected_tokens.clear();
// check after each token
self.process_potential_macro_variable();
/// Advance the parser using provided token as a next one. Use this when
/// consuming a part of a token. For example a single `<` from `<<`.
- fn bump_with(&mut self, next: token::Token, span: Span) {
- self.prev_span = self.span.with_hi(span.lo());
+ fn bump_with(&mut self, next: TokenKind, span: Span) {
+ self.prev_span = self.token.span.with_hi(span.lo());
// It would be incorrect to record the kind of the current token, but
// fortunately for tokens currently using `bump_with`, the
// prev_token_kind will be of no use anyway.
self.prev_token_kind = PrevTokenKind::Other;
- self.span = span;
- self.token = next;
+ self.token = Token::new(next, span);
self.expected_tokens.clear();
}
pub fn look_ahead<R, F>(&self, dist: usize, f: F) -> R where
- F: FnOnce(&token::Token) -> R,
+ F: FnOnce(&Token) -> R,
{
if dist == 0 {
- return f(&self.token)
+ return f(&self.token);
}
- f(&match self.token_cursor.frame.tree_cursor.look_ahead(dist - 1) {
+ let frame = &self.token_cursor.frame;
+ f(&match frame.tree_cursor.look_ahead(dist - 1) {
Some(tree) => match tree {
- TokenTree::Token(_, tok) => tok,
- TokenTree::Delimited(_, delim, _) => token::OpenDelim(delim),
- },
- None => token::CloseDelim(self.token_cursor.frame.delim),
+ TokenTree::Token(token) => token,
+ TokenTree::Delimited(dspan, delim, _) =>
+ Token::new(token::OpenDelim(delim), dspan.open),
+ }
+ None => Token::new(token::CloseDelim(frame.delim), frame.span.close)
})
}
- crate fn look_ahead_span(&self, dist: usize) -> Span {
- if dist == 0 {
- return self.span
- }
-
- match self.token_cursor.frame.tree_cursor.look_ahead(dist - 1) {
- Some(TokenTree::Token(span, _)) => span,
- Some(TokenTree::Delimited(span, ..)) => span.entire(),
- None => self.look_ahead_span(dist - 1),
- }
- }
-
/// Returns whether any of the given keywords are `dist` tokens ahead of the current one.
fn is_keyword_ahead(&self, dist: usize, kws: &[Symbol]) -> bool {
self.look_ahead(dist, |t| kws.iter().any(|&kw| t.is_keyword(kw)))
fn parse_trait_item_(&mut self,
at_end: &mut bool,
mut attrs: Vec<Attribute>) -> PResult<'a, TraitItem> {
- let lo = self.span;
+ let lo = self.token.span;
self.eat_bad_pub();
let (name, node, generics) = if self.eat_keyword(kw::Type) {
self.parse_trait_item_assoc_ty()?
// definition...
// We don't allow argument names to be left off in edition 2018.
- p.parse_arg_general(p.span.rust_2018(), true, false)
+ p.parse_arg_general(p.token.span.rust_2018(), true, false)
})?;
generics.where_clause = self.parse_where_clause()?;
decl,
};
- let body = match self.token {
+ let body = match self.token.kind {
token::Semi => {
self.bump();
*at_end = true;
if self.eat(&token::RArrow) {
Ok(FunctionRetTy::Ty(self.parse_ty_common(allow_plus, true, false)?))
} else {
- Ok(FunctionRetTy::Default(self.span.shrink_to_lo()))
+ Ok(FunctionRetTy::Default(self.token.span.shrink_to_lo()))
}
}
maybe_recover_from_interpolated_ty_qpath!(self, allow_qpath_recovery);
maybe_whole!(self, NtTy, |x| x);
- let lo = self.span;
+ let lo = self.token.span;
let mut impl_dyn_multi = false;
let node = if self.eat(&token::OpenDelim(token::Paren)) {
// `(TYPE)` is a parenthesized type.
// Function pointer type or bound list (trait object type) starting with a poly-trait.
// `for<'lt> [unsafe] [extern "ABI"] fn (&'lt S) -> T`
// `for<'lt> Trait1<'lt> + Trait2 + 'a`
- let lo = self.span;
+ let lo = self.token.span;
let lifetime_defs = self.parse_late_bound_lifetime_defs()?;
if self.token_is_bare_fn_keyword() {
self.parse_ty_bare_fn(lifetime_defs)?
impl_dyn_multi = bounds.len() > 1 || self.prev_token_kind == PrevTokenKind::Plus;
TyKind::ImplTrait(ast::DUMMY_NODE_ID, bounds)
} else if self.check_keyword(kw::Dyn) &&
- (self.span.rust_2018() ||
+ (self.token.span.rust_2018() ||
self.look_ahead(1, |t| t.can_begin_bound() &&
!can_continue_type_after_non_fn_ident(t))) {
self.bump(); // `dyn`
}
fn is_named_argument(&self) -> bool {
- let offset = match self.token {
+ let offset = match self.token.kind {
token::Interpolated(ref nt) => match **nt {
token::NtPat(..) => return self.look_ahead(1, |t| t == &token::Colon),
_ => 0,
crate fn parse_literal_maybe_minus(&mut self) -> PResult<'a, P<Expr>> {
maybe_whole_expr!(self);
- let minus_lo = self.span;
+ let minus_lo = self.token.span;
let minus_present = self.eat(&token::BinOp(token::Minus));
- let lo = self.span;
+ let lo = self.token.span;
let literal = self.parse_lit()?;
let hi = self.prev_span;
let expr = self.mk_expr(lo.to(hi), ExprKind::Lit(literal), ThinVec::new());
}
fn parse_path_segment_ident(&mut self) -> PResult<'a, ast::Ident> {
- match self.token {
- token::Ident(ident, _) if self.token.is_path_segment_keyword() => {
- let span = self.span;
+ match self.token.kind {
+ token::Ident(name, _) if name.is_path_segment_keyword() => {
+ let span = self.token.span;
self.bump();
- Ok(Ident::new(ident.name, span))
+ Ok(Ident::new(name, span))
}
_ => self.parse_ident(),
}
}
fn parse_ident_or_underscore(&mut self) -> PResult<'a, ast::Ident> {
- match self.token {
- token::Ident(ident, false) if ident.name == kw::Underscore => {
- let span = self.span;
+ match self.token.kind {
+ token::Ident(name, false) if name == kw::Underscore => {
+ let span = self.token.span;
self.bump();
- Ok(Ident::new(ident.name, span))
+ Ok(Ident::new(name, span))
}
_ => self.parse_ident(),
}
// span in the case of something like `<T>::Bar`.
let (mut path, path_span);
if self.eat_keyword(kw::As) {
- let path_lo = self.span;
+ let path_lo = self.token.span;
path = self.parse_path(PathStyle::Type)?;
path_span = path_lo.to(self.prev_span);
} else {
- path_span = self.span.to(self.span);
+ path_span = self.token.span.to(self.token.span);
path = ast::Path { segments: Vec::new(), span: path_span };
}
path
});
- let lo = self.meta_var_span.unwrap_or(self.span);
+ let lo = self.meta_var_span.unwrap_or(self.token.span);
let mut segments = Vec::new();
- let mod_sep_ctxt = self.span.ctxt();
+ let mod_sep_ctxt = self.token.span.ctxt();
if self.eat(&token::ModSep) {
segments.push(PathSegment::path_root(lo.shrink_to_lo().with_ctxt(mod_sep_ctxt)));
}
/// backwards-compatibility. This is used when parsing derive macro paths in `#[derive]`
/// attributes.
pub fn parse_path_allowing_meta(&mut self, style: PathStyle) -> PResult<'a, ast::Path> {
- let meta_ident = match self.token {
+ let meta_ident = match self.token.kind {
token::Interpolated(ref nt) => match **nt {
token::NtMeta(ref meta) => match meta.node {
ast::MetaItemKind::Word => Some(meta.path.clone()),
fn parse_path_segment(&mut self, style: PathStyle) -> PResult<'a, PathSegment> {
let ident = self.parse_path_segment_ident()?;
- let is_args_start = |token: &token::Token| match *token {
+ let is_args_start = |token: &Token| match token.kind {
token::Lt | token::BinOp(token::Shl) | token::OpenDelim(token::Paren)
| token::LArrow => true,
_ => false,
// Generic arguments are found - `<`, `(`, `::<` or `::(`.
self.eat(&token::ModSep);
- let lo = self.span;
+ let lo = self.token.span;
let args = if self.eat_lt() {
// `<'a, T, A = U>`
let (args, constraints) =
/// Parses a single lifetime `'a` or panics.
crate fn expect_lifetime(&mut self) -> Lifetime {
if let Some(ident) = self.token.lifetime() {
- let span = self.span;
+ let span = self.token.span;
self.bump();
Lifetime { ident: Ident::new(ident.name, span), id: ast::DUMMY_NODE_ID }
} else {
- self.span_bug(self.span, "not a lifetime")
+ self.span_bug(self.token.span, "not a lifetime")
}
}
fn eat_label(&mut self) -> Option<Label> {
if let Some(ident) = self.token.lifetime() {
- let span = self.span;
+ let span = self.token.span;
self.bump();
Some(Label { ident: Ident::new(ident.name, span) })
} else {
}
fn parse_field_name(&mut self) -> PResult<'a, Ident> {
- if let token::Literal(token::Lit { kind: token::Integer, symbol, suffix }) = self.token {
- self.expect_no_suffix(self.span, "a tuple index", suffix);
+ if let token::Literal(token::Lit { kind: token::Integer, symbol, suffix }) =
+ self.token.kind {
+ self.expect_no_suffix(self.token.span, "a tuple index", suffix);
self.bump();
Ok(Ident::new(symbol, self.prev_span))
} else {
/// Parse ident (COLON expr)?
fn parse_field(&mut self) -> PResult<'a, Field> {
let attrs = self.parse_outer_attributes()?;
- let lo = self.span;
+ let lo = self.token.span;
// Check if a colon exists one ahead. This means we're parsing a fieldname.
let (fieldname, expr, is_shorthand) = if self.look_ahead(1, |t| {
// initialize a field with an eq rather than a colon.
if self.token == token::Eq {
self.diagnostic()
- .struct_span_err(self.span, "expected `:`, found `=`")
+ .struct_span_err(self.token.span, "expected `:`, found `=`")
.span_suggestion(
- fieldname.span.shrink_to_hi().to(self.span),
+ fieldname.span.shrink_to_hi().to(self.token.span),
"replace equals symbol with a colon",
":".to_string(),
Applicability::MachineApplicable,
limits: RangeLimits)
-> PResult<'a, ast::ExprKind> {
if end.is_none() && limits == RangeLimits::Closed {
- Err(self.span_fatal_err(self.span, Error::InclusiveRangeWithNoEnd))
+ Err(self.span_fatal_err(self.token.span, Error::InclusiveRangeWithNoEnd))
} else {
Ok(ExprKind::Range(start, end, limits))
}
}
fn expect_delimited_token_tree(&mut self) -> PResult<'a, (MacDelimiter, TokenStream)> {
- let delim = match self.token {
+ let delim = match self.token.kind {
token::OpenDelim(delim) => delim,
_ => {
let msg = "expected open delimiter";
let mut err = self.fatal(msg);
- err.span_label(self.span, msg);
+ err.span_label(self.token.span, msg);
return Err(err)
}
};
// attributes by giving them a empty "already parsed" list.
let mut attrs = ThinVec::new();
- let lo = self.span;
- let mut hi = self.span;
+ let lo = self.token.span;
+ let mut hi = self.token.span;
let ex: ExprKind;
- // Note: when adding new syntax here, don't forget to adjust Token::can_begin_expr().
- match self.token {
+ // Note: when adding new syntax here, don't forget to adjust TokenKind::can_begin_expr().
+ match self.token.kind {
token::OpenDelim(token::Paren) => {
self.bump();
}
let msg = "expected `while`, `for`, `loop` or `{` after a label";
let mut err = self.fatal(msg);
- err.span_label(self.span, msg);
+ err.span_label(self.token.span, msg);
return Err(err);
}
if self.eat_keyword(kw::Loop) {
return Err(db);
}
if self.is_try_block() {
- let lo = self.span;
+ let lo = self.token.span;
assert!(self.eat_keyword(kw::Try));
return self.parse_try_block(lo, attrs);
}
// Span::rust_2018() is somewhat expensive; don't get it repeatedly.
- let is_span_rust_2018 = self.span.rust_2018();
+ let is_span_rust_2018 = self.token.span.rust_2018();
if is_span_rust_2018 && self.check_keyword(kw::Async) {
return if self.is_async_block() { // check for `async {` and `async move {`
self.parse_async_block(attrs)
// Catch this syntax error here, instead of in `parse_ident`, so
// that we can explicitly mention that let is not to be used as an expression
let mut db = self.fatal("expected expression, found statement (`let`)");
- db.span_label(self.span, "expected expression");
+ db.span_label(self.token.span, "expected expression");
db.note("variable declaration using `let` is a statement");
return Err(db);
} else if is_span_rust_2018 && self.eat_keyword(kw::Await) {
// | ^ expected expression
// ```
self.bump();
- return Ok(self.mk_expr(self.span, ExprKind::Err, ThinVec::new()));
+ return Ok(self.mk_expr(self.token.span, ExprKind::Err, ThinVec::new()));
}
match self.parse_literal_maybe_minus() {
Ok(expr) => {
"cannot use a comma after the base struct",
);
err.span_suggestion_short(
- self.span,
+ self.token.span,
"remove this comma",
String::new(),
Applicability::MachineApplicable
}
let mut recovery_field = None;
- if let token::Ident(ident, _) = self.token {
+ if let token::Ident(name, _) = self.token.kind {
if !self.token.is_reserved_ident() && self.look_ahead(1, |t| *t == token::Colon) {
// Use in case of error after field-looking code: `S { foo: () with a }`
- let mut ident = ident.clone();
- ident.span = self.span;
recovery_field = Some(ast::Field {
- ident,
- span: self.span,
- expr: self.mk_expr(self.span, ExprKind::Err, ThinVec::new()),
+ ident: Ident::new(name, self.token.span),
+ span: self.token.span,
+ expr: self.mk_expr(self.token.span, ExprKind::Err, ThinVec::new()),
is_shorthand: false,
attrs: ThinVec::new(),
});
}
}
- let span = lo.to(self.span);
+ let span = lo.to(self.token.span);
self.expect(&token::CloseDelim(token::Brace))?;
return Ok(self.mk_expr(span, ExprKind::Struct(pth, fields, base), attrs));
}
// Assuming we have just parsed `.`, continue parsing into an expression.
fn parse_dot_suffix(&mut self, self_arg: P<Expr>, lo: Span) -> PResult<'a, P<Expr>> {
- if self.span.rust_2018() && self.eat_keyword(kw::Await) {
+ if self.token.span.rust_2018() && self.eat_keyword(kw::Await) {
let span = lo.to(self.prev_span);
let await_expr = self.mk_expr(
span,
let segment = self.parse_path_segment(PathStyle::Expr)?;
self.check_trailing_angle_brackets(&segment, token::OpenDelim(token::Paren));
- Ok(match self.token {
+ Ok(match self.token.kind {
token::OpenDelim(token::Paren) => {
// Method call `expr.f()`
let mut args = self.parse_unspanned_seq(
// expr.f
if self.eat(&token::Dot) {
- match self.token {
+ match self.token.kind {
token::Ident(..) => {
e = self.parse_dot_suffix(e, lo)?;
}
token::Literal(token::Lit { kind: token::Integer, symbol, suffix }) => {
- let span = self.span;
+ let span = self.token.span;
self.bump();
let field = ExprKind::Field(e, Ident::new(symbol, span));
e = self.mk_expr(lo.to(span), field, ThinVec::new());
_ => {
// FIXME Could factor this out into non_fatal_unexpected or something.
let actual = self.this_token_to_string();
- self.span_err(self.span, &format!("unexpected token: `{}`", actual));
+ self.span_err(self.token.span, &format!("unexpected token: `{}`", actual));
}
}
continue;
}
if self.expr_is_complete(&e) { break; }
- match self.token {
+ match self.token.kind {
// expr(...)
token::OpenDelim(token::Paren) => {
let seq = self.parse_unspanned_seq(
token::OpenDelim(token::Bracket) => {
self.bump();
let ix = self.parse_expr()?;
- hi = self.span;
+ hi = self.token.span;
self.expect(&token::CloseDelim(token::Bracket))?;
let index = self.mk_index(e, ix);
e = self.mk_expr(lo.to(hi), index, ThinVec::new())
}
crate fn process_potential_macro_variable(&mut self) {
- let (token, span) = match self.token {
- token::Dollar if self.span.ctxt() != syntax_pos::hygiene::SyntaxContext::empty() &&
+ self.token = match self.token.kind {
+ token::Dollar if self.token.span.ctxt() != SyntaxContext::empty() &&
self.look_ahead(1, |t| t.is_ident()) => {
self.bump();
- let name = match self.token {
- token::Ident(ident, _) => ident,
+ let name = match self.token.kind {
+ token::Ident(name, _) => name,
_ => unreachable!()
};
- let mut err = self.fatal(&format!("unknown macro variable `{}`", name));
- err.span_label(self.span, "unknown macro variable");
- err.emit();
+ let span = self.prev_span.to(self.token.span);
+ self.diagnostic()
+ .struct_span_fatal(span, &format!("unknown macro variable `{}`", name))
+ .span_label(span, "unknown macro variable")
+ .emit();
self.bump();
return
}
token::Interpolated(ref nt) => {
- self.meta_var_span = Some(self.span);
+ self.meta_var_span = Some(self.token.span);
// Interpolated identifier and lifetime tokens are replaced with usual identifier
// and lifetime tokens, so the former are never encountered during normal parsing.
match **nt {
- token::NtIdent(ident, is_raw) => (token::Ident(ident, is_raw), ident.span),
- token::NtLifetime(ident) => (token::Lifetime(ident), ident.span),
+ token::NtIdent(ident, is_raw) =>
+ Token::new(token::Ident(ident.name, is_raw), ident.span),
+ token::NtLifetime(ident) =>
+ Token::new(token::Lifetime(ident.name), ident.span),
_ => return,
}
}
_ => return,
};
- self.token = token;
- self.span = span;
}
/// Parses a single token tree from the input.
crate fn parse_token_tree(&mut self) -> TokenTree {
- match self.token {
+ match self.token.kind {
token::OpenDelim(..) => {
let frame = mem::replace(&mut self.token_cursor.frame,
self.token_cursor.stack.pop().unwrap());
- self.span = frame.span.entire();
+ self.token.span = frame.span.entire();
self.bump();
TokenTree::Delimited(
frame.span,
},
token::CloseDelim(_) | token::Eof => unreachable!(),
_ => {
- let (token, span) = (mem::replace(&mut self.token, token::Whitespace), self.span);
+ let token = self.token.take();
self.bump();
- TokenTree::Token(span, token)
+ TokenTree::Token(token)
}
}
}
pub fn parse_tokens(&mut self) -> TokenStream {
let mut result = Vec::new();
loop {
- match self.token {
+ match self.token.kind {
token::Eof | token::CloseDelim(..) => break,
_ => result.push(self.parse_token_tree().into()),
}
already_parsed_attrs: Option<ThinVec<Attribute>>)
-> PResult<'a, P<Expr>> {
let attrs = self.parse_or_use_outer_attributes(already_parsed_attrs)?;
- let lo = self.span;
- // Note: when adding new unary operators, don't forget to adjust Token::can_begin_expr()
- let (hi, ex) = match self.token {
+ let lo = self.token.span;
+ // Note: when adding new unary operators, don't forget to adjust TokenKind::can_begin_expr()
+ let (hi, ex) = match self.token.kind {
token::Not => {
self.bump();
let e = self.parse_prefix_expr(None);
// `not` is just an ordinary identifier in Rust-the-language,
// but as `rustc`-the-compiler, we can issue clever diagnostics
// for confused users who really want to say `!`
- let token_cannot_continue_expr = |t: &token::Token| match *t {
+ let token_cannot_continue_expr = |t: &Token| match t.kind {
// These tokens can start an expression after `!`, but
// can't continue an expression after an ident
- token::Ident(ident, is_raw) => token::ident_can_begin_expr(ident, is_raw),
+ token::Ident(name, is_raw) => token::ident_can_begin_expr(name, t.span, is_raw),
token::Literal(..) | token::Pound => true,
token::Interpolated(ref nt) => match **nt {
token::NtIdent(..) | token::NtExpr(..) |
self.bump();
// Emit the error ...
let mut err = self.diagnostic()
- .struct_span_err(self.span,
+ .struct_span_err(self.token.span,
&format!("unexpected {} after identifier",
self.this_token_descr()));
// span the `not` plus trailing whitespace to avoid
// trailing whitespace after the `!` in our suggestion
let to_replace = self.sess.source_map()
- .span_until_non_whitespace(lo.to(self.span));
+ .span_until_non_whitespace(lo.to(self.token.span));
err.span_suggestion_short(
to_replace,
"use `!` to perform logical negation",
LhsExpr::AttributesParsed(attrs) => Some(attrs),
_ => None,
};
- if [token::DotDot, token::DotDotDot, token::DotDotEq].contains(&self.token) {
+ if [token::DotDot, token::DotDotDot, token::DotDotEq].contains(&self.token.kind) {
return self.parse_prefix_range_expr(attrs);
} else {
self.parse_prefix_expr(attrs)?
// `if x { a } else { b } && if y { c } else { d }`
if !self.look_ahead(1, |t| t.is_reserved_ident()) => {
// These cases are ambiguous and can't be identified in the parser alone
- let sp = self.sess.source_map().start_point(self.span);
+ let sp = self.sess.source_map().start_point(self.token.span);
self.sess.ambiguous_block_expr_parse.borrow_mut().insert(sp, lhs.span);
return Ok(lhs);
}
// We've found an expression that would be parsed as a statement, but the next
// token implies this should be parsed as an expression.
// For example: `if let Some(x) = x { x } else { 0 } / 2`
- let mut err = self.sess.span_diagnostic.struct_span_err(self.span, &format!(
+ let mut err = self.sess.span_diagnostic.struct_span_err(self.token.span, &format!(
"expected expression, found `{}`",
pprust::token_to_string(&self.token),
));
- err.span_label(self.span, "expected expression");
+ err.span_label(self.token.span, "expected expression");
self.sess.expr_parentheses_needed(
&mut err,
lhs.span,
_ => lhs.span,
};
- let cur_op_span = self.span;
+ let cur_op_span = self.token.span;
let restrictions = if op.is_assign_like() {
self.restrictions & Restrictions::NO_STRUCT_LITERAL
} else {
}
// Check for deprecated `...` syntax
if self.token == token::DotDotDot && op == AssocOp::DotDotEq {
- self.err_dotdotdot_syntax(self.span);
+ self.err_dotdotdot_syntax(self.token.span);
}
self.bump();
continue
} else if op == AssocOp::Colon {
let maybe_path = self.could_ascription_be_path(&lhs.node);
- let next_sp = self.span;
+ let next_sp = self.token.span;
lhs = match self.parse_assoc_op_cast(lhs, lhs_span, ExprKind::Type) {
Ok(lhs) => lhs,
match self.parse_path(PathStyle::Expr) {
Ok(path) => {
- let (op_noun, op_verb) = match self.token {
+ let (op_noun, op_verb) = match self.token.kind {
token::Lt => ("comparison", "comparing"),
token::BinOp(token::Shl) => ("shift", "shifting"),
_ => {
// in AST and continue parsing.
let msg = format!("`<` is interpreted as a start of generic \
arguments for `{}`, not a {}", path, op_noun);
- let mut err = self.sess.span_diagnostic.struct_span_err(self.span, &msg);
- err.span_label(self.look_ahead_span(1).to(parser_snapshot_after_type.span),
+ let mut err =
+ self.sess.span_diagnostic.struct_span_err(self.token.span, &msg);
+ let span_after_type = parser_snapshot_after_type.token.span;
+ err.span_label(self.look_ahead(1, |t| t.span).to(span_after_type),
"interpreted as generic arguments");
- err.span_label(self.span, format!("not interpreted as {}", op_noun));
+ err.span_label(self.token.span, format!("not interpreted as {}", op_noun));
let expr = mk_expr(self, P(Ty {
span: path.span,
-> PResult<'a, P<Expr>> {
// Check for deprecated `...` syntax
if self.token == token::DotDotDot {
- self.err_dotdotdot_syntax(self.span);
+ self.err_dotdotdot_syntax(self.token.span);
}
- debug_assert!([token::DotDot, token::DotDotDot, token::DotDotEq].contains(&self.token),
+ debug_assert!([token::DotDot, token::DotDotDot, token::DotDotEq].contains(&self.token.kind),
"parse_prefix_range_expr: token {:?} is not DotDot/DotDotEq",
self.token);
let tok = self.token.clone();
let attrs = self.parse_or_use_outer_attributes(already_parsed_attrs)?;
- let lo = self.span;
- let mut hi = self.span;
+ let lo = self.token.span;
+ let mut hi = self.token.span;
self.bump();
let opt_end = if self.is_at_start_of_range_notation_rhs() {
// RHS must be parsed with more associativity than the dots.
attrs: ThinVec<Attribute>)
-> PResult<'a, P<Expr>>
{
- let lo = self.span;
+ let lo = self.token.span;
let movability = if self.eat_keyword(kw::Static) {
Movability::Static
} else {
Movability::Movable
};
- let asyncness = if self.span.rust_2018() {
+ let asyncness = if self.token.span.rust_2018() {
self.parse_asyncness()
} else {
IsAsync::NotAsync
_ => {
// If an explicit return type is given, require a
// block to appear (RFC 968).
- let body_lo = self.span;
+ let body_lo = self.token.span;
self.parse_block_expr(None, body_lo, BlockCheckMode::Default, ThinVec::new())?
}
};
let pat = self.parse_top_level_pat()?;
if !self.eat_keyword(kw::In) {
- let in_span = self.prev_span.between(self.span);
+ let in_span = self.prev_span.between(self.token.span);
let mut err = self.sess.span_diagnostic
.struct_span_err(in_span, "missing `in` in `for` loop");
err.span_suggestion_short(
pub fn parse_async_block(&mut self, mut attrs: ThinVec<Attribute>)
-> PResult<'a, P<Expr>>
{
- let span_lo = self.span;
+ let span_lo = self.token.span;
self.expect_keyword(kw::Async)?;
let capture_clause = if self.eat_keyword(kw::Move) {
CaptureBy::Value
let discriminant = self.parse_expr_res(Restrictions::NO_STRUCT_LITERAL,
None)?;
if let Err(mut e) = self.expect(&token::OpenDelim(token::Brace)) {
- if self.token == token::Token::Semi {
+ if self.token == token::Semi {
e.span_suggestion_short(
match_span,
"try removing this `match`",
// Recover by skipping to the end of the block.
e.emit();
self.recover_stmt();
- let span = lo.to(self.span);
+ let span = lo.to(self.token.span);
if self.token == token::CloseDelim(token::Brace) {
self.bump();
}
}
}
}
- let hi = self.span;
+ let hi = self.token.span;
self.bump();
return Ok(self.mk_expr(lo.to(hi), ExprKind::Match(discriminant, arms), attrs));
}
crate fn parse_arm(&mut self) -> PResult<'a, Arm> {
let attrs = self.parse_outer_attributes()?;
- let lo = self.span;
+ let lo = self.token.span;
let pats = self.parse_pats()?;
let guard = if self.eat_keyword(kw::If) {
Some(Guard::If(self.parse_expr()?))
} else {
None
};
- let arrow_span = self.span;
+ let arrow_span = self.token.span;
self.expect(&token::FatArrow)?;
- let arm_start_span = self.span;
+ let arm_start_span = self.token.span;
let expr = self.parse_expr_res(Restrictions::STMT_EXPR, None)
.map_err(|mut err| {
let require_comma = classify::expr_requires_semi_to_be_stmt(&expr)
&& self.token != token::CloseDelim(token::Brace);
- let hi = self.span;
+ let hi = self.token.span;
if require_comma {
let cm = self.sess.source_map();
// | |
// | arrow_span
// X | &X => "x"
- // | - ^^ self.span
+ // | - ^^ self.token.span
// | |
// | parsed until here as `"y" & X`
err.span_suggestion_short(
pats.push(self.parse_top_level_pat()?);
if self.token == token::OrOr {
- let mut err = self.struct_span_err(self.span,
+ let mut err = self.struct_span_err(self.token.span,
"unexpected token `||` after pattern");
err.span_suggestion(
- self.span,
+ self.token.span,
"use a single `|` to specify multiple patterns",
"|".to_owned(),
Applicability::MachineApplicable
} else {
// Parsing a pattern of the form "(box) (ref) (mut) fieldname"
let is_box = self.eat_keyword(kw::Box);
- let boxed_span = self.span;
+ let boxed_span = self.token.span;
let is_ref = self.eat_keyword(kw::Ref);
let is_mut = self.eat_keyword(kw::Mut);
let fieldname = self.parse_ident()?;
while self.token != token::CloseDelim(token::Brace) {
let attrs = self.parse_outer_attributes()?;
- let lo = self.span;
+ let lo = self.token.span;
// check that a comma comes after every field
if !ate_comma {
if self.check(&token::DotDot) || self.token == token::DotDotDot {
etc = true;
- let mut etc_sp = self.span;
+ let mut etc_sp = self.token.span;
if self.token == token::DotDotDot { // Issue #46718
// Accept `...` as if it were `..` to avoid further errors
- let mut err = self.struct_span_err(self.span,
+ let mut err = self.struct_span_err(self.token.span,
"expected field pattern, found `...`");
err.span_suggestion(
- self.span,
+ self.token.span,
"to omit remaining fields, use one fewer `.`",
"..".to_owned(),
Applicability::MachineApplicable
let token_str = self.this_token_descr();
let mut err = self.fatal(&format!("expected `}}`, found {}", token_str));
- err.span_label(self.span, "expected `}`");
+ err.span_label(self.token.span, "expected `}`");
let mut comma_sp = None;
if self.token == token::Comma { // Issue #49257
- etc_sp = etc_sp.to(self.sess.source_map().span_until_non_whitespace(self.span));
+ let nw_span = self.sess.source_map().span_until_non_whitespace(self.token.span);
+ etc_sp = etc_sp.to(nw_span);
err.span_label(etc_sp,
"`..` must be at the end and cannot have a trailing comma");
- comma_sp = Some(self.span);
+ comma_sp = Some(self.token.span);
self.bump();
ate_comma = true;
}
- etc_span = Some(etc_sp.until(self.span));
+ etc_span = Some(etc_sp.until(self.token.span));
if self.token == token::CloseDelim(token::Brace) {
// If the struct looks otherwise well formed, recover and continue.
if let Some(sp) = comma_sp {
"move the `..` to the end of the field list",
vec![
(etc_span, String::new()),
- (self.span, format!("{}.. }}", if ate_comma { "" } else { ", " })),
+ (self.token.span, format!("{}.. }}", if ate_comma { "" } else { ", " })),
],
Applicability::MachineApplicable,
);
fn parse_pat_range_end(&mut self) -> PResult<'a, P<Expr>> {
if self.token.is_path_start() {
- let lo = self.span;
+ let lo = self.token.span;
let (qself, path) = if self.eat_lt() {
// Parse a qualified path
let (qself, path) = self.parse_qpath(PathStyle::Expr)?;
// helper function to decide whether to parse as ident binding or to try to do
// something more complex like range patterns
fn parse_as_ident(&mut self) -> bool {
- self.look_ahead(1, |t| match *t {
+ self.look_ahead(1, |t| match t.kind {
token::OpenDelim(token::Paren) | token::OpenDelim(token::Brace) |
token::DotDotDot | token::DotDotEq | token::ModSep | token::Not => Some(false),
// ensure slice patterns [a, b.., c] and [a, b, c..] don't go into the
// range pattern branch
token::DotDot => None,
_ => Some(true),
- }).unwrap_or_else(|| self.look_ahead(2, |t| match *t {
+ }).unwrap_or_else(|| self.look_ahead(2, |t| match t.kind {
token::Comma | token::CloseDelim(token::Bracket) => true,
_ => false,
}))
// parentheses in what should have been a tuple pattern; return a
// suggestion-enhanced error here rather than choking on the comma
// later.
- let comma_span = self.span;
+ let comma_span = self.token.span;
self.bump();
if let Err(mut err) = self.parse_pat_list() {
// We didn't expect this to work anyway; we just wanted
maybe_recover_from_interpolated_ty_qpath!(self, true);
maybe_whole!(self, NtPat, |x| x);
- let lo = self.span;
+ let lo = self.token.span;
let pat;
- match self.token {
+ match self.token.kind {
token::BinOp(token::And) | token::AndAnd => {
// Parse &pat / &mut pat
self.expect_and()?;
let mutbl = self.parse_mutability();
- if let token::Lifetime(ident) = self.token {
- let mut err = self.fatal(&format!("unexpected lifetime `{}` in pattern",
- ident));
- err.span_label(self.span, "unexpected lifetime");
+ if let token::Lifetime(name) = self.token.kind {
+ let mut err = self.fatal(&format!("unexpected lifetime `{}` in pattern", name));
+ err.span_label(self.token.span, "unexpected lifetime");
return Err(err);
}
let subpat = self.parse_pat_with_range_pat(false, expected)?;
pat = PatKind::Wild;
} else if self.eat_keyword(kw::Mut) {
// Parse mut ident @ pat / mut ref ident @ pat
- let mutref_span = self.prev_span.to(self.span);
+ let mutref_span = self.prev_span.to(self.token.span);
let binding_mode = if self.eat_keyword(kw::Ref) {
self.diagnostic()
.struct_span_err(mutref_span, "the order of `mut` and `ref` is incorrect")
// Parse an unqualified path
(None, self.parse_path(PathStyle::Expr)?)
};
- match self.token {
+ match self.token.kind {
token::Not if qself.is_none() => {
// Parse macro invocation
self.bump();
pat = PatKind::Mac(mac);
}
token::DotDotDot | token::DotDotEq | token::DotDot => {
- let end_kind = match self.token {
+ let end_kind = match self.token.kind {
token::DotDot => RangeEnd::Excluded,
token::DotDotDot => RangeEnd::Included(RangeSyntax::DotDotDot),
token::DotDotEq => RangeEnd::Included(RangeSyntax::DotDotEq),
_ => panic!("can only parse `..`/`...`/`..=` for ranges \
(checked above)"),
};
- let op_span = self.span;
+ let op_span = self.token.span;
// Parse range
let span = lo.to(self.prev_span);
let begin = self.mk_expr(span, ExprKind::Path(qself, path), ThinVec::new());
if qself.is_some() {
let msg = "unexpected `{` after qualified path";
let mut err = self.fatal(msg);
- err.span_label(self.span, msg);
+ err.span_label(self.token.span, msg);
return Err(err);
}
// Parse struct pattern
if qself.is_some() {
let msg = "unexpected `(` after qualified path";
let mut err = self.fatal(msg);
- err.span_label(self.span, msg);
+ err.span_label(self.token.span, msg);
return Err(err);
}
// Parse tuple struct or enum pattern
// Try to parse everything else as literal with optional minus
match self.parse_literal_maybe_minus() {
Ok(begin) => {
- let op_span = self.span;
+ let op_span = self.token.span;
if self.check(&token::DotDot) || self.check(&token::DotDotEq) ||
self.check(&token::DotDotDot) {
let end_kind = if self.eat(&token::DotDotDot) {
self.this_token_descr(),
);
let mut err = self.fatal(&msg);
- err.span_label(self.span, format!("expected {}", expected));
- let sp = self.sess.source_map().start_point(self.span);
+ err.span_label(self.token.span, format!("expected {}", expected));
+ let sp = self.sess.source_map().start_point(self.token.span);
if let Some(sp) = self.sess.ambiguous_block_expr_parse.borrow().get(&sp) {
self.sess.expr_parentheses_needed(&mut err, *sp, None);
}
}
};
let hi = if self.token == token::Semi {
- self.span
+ self.token.span
} else {
self.prev_span
};
fn is_try_block(&self) -> bool {
self.token.is_keyword(kw::Try) &&
self.look_ahead(1, |t| *t == token::OpenDelim(token::Brace)) &&
- self.span.rust_2018() &&
+ self.token.span.rust_2018() &&
// prevent `while try {} {}`, `if try {} {} else {}`, etc.
!self.restrictions.contains(Restrictions::NO_STRUCT_LITERAL)
}
fn eat_macro_def(&mut self, attrs: &[Attribute], vis: &Visibility, lo: Span)
-> PResult<'a, Option<P<Item>>> {
- let token_lo = self.span;
- let (ident, def) = match self.token {
- token::Ident(ident, false) if ident.name == kw::Macro => {
+ let token_lo = self.token.span;
+ let (ident, def) = match self.token.kind {
+ token::Ident(name, false) if name == kw::Macro => {
self.bump();
let ident = self.parse_ident()?;
let tokens = if self.check(&token::OpenDelim(token::Brace)) {
};
TokenStream::new(vec![
args.into(),
- TokenTree::Token(token_lo.to(self.prev_span), token::FatArrow).into(),
+ TokenTree::token(token::FatArrow, token_lo.to(self.prev_span)).into(),
body.into(),
])
} else {
(ident, ast::MacroDef { tokens: tokens.into(), legacy: false })
}
- token::Ident(ident, _) if ident.name == sym::macro_rules &&
- self.look_ahead(1, |t| *t == token::Not) => {
+ token::Ident(name, _) if name == sym::macro_rules &&
+ self.look_ahead(1, |t| *t == token::Not) => {
let prev_span = self.prev_span;
self.complain_if_pub_macro(&vis.node, prev_span);
self.bump();
maybe_whole!(self, NtStmt, |x| Some(x));
let attrs = self.parse_outer_attributes()?;
- let lo = self.span;
+ let lo = self.token.span;
Ok(Some(if self.eat_keyword(kw::Let) {
Stmt {
}
// it's a macro invocation
- let id = match self.token {
+ let id = match self.token.kind {
token::OpenDelim(_) => Ident::invalid(), // no special identifier
_ => self.parse_ident()?,
};
// check that we're pointing at delimiters (need to check
// again after the `if`, because of `parse_ident`
// consuming more tokens).
- match self.token {
+ match self.token.kind {
token::OpenDelim(_) => {}
_ => {
// we only expect an ident if we didn't parse one
let mut err = self.fatal(&format!("expected {}`(` or `{{`, found {}",
ident_str,
tok_str));
- err.span_label(self.span, format!("expected {}`(` or `{{`", ident_str));
+ err.span_label(self.token.span, format!("expected {}`(` or `{{`", ident_str));
return Err(err)
},
}
// We used to incorrectly stop parsing macro-expanded statements here.
// If the next token will be an error anyway but could have parsed with the
// earlier behavior, stop parsing here and emit a warning to avoid breakage.
- else if macro_legacy_warnings && self.token.can_begin_expr() && match self.token {
+ else if macro_legacy_warnings &&
+ self.token.can_begin_expr() &&
+ match self.token.kind {
// These can continue an expression, so we can't stop parsing and warn.
token::OpenDelim(token::Paren) | token::OpenDelim(token::Bracket) |
token::BinOp(token::Minus) | token::BinOp(token::Star) |
if s.prev_token_kind == PrevTokenKind::DocComment {
s.span_fatal_err(s.prev_span, Error::UselessDocComment).emit();
} else if attrs.iter().any(|a| a.style == AttrStyle::Outer) {
- s.span_err(s.span, "expected statement after outer attribute");
+ s.span_err(
+ s.token.span, "expected statement after outer attribute"
+ );
}
}
};
pub fn parse_block(&mut self) -> PResult<'a, P<Block>> {
maybe_whole!(self, NtBlock, |x| x);
- let lo = self.span;
+ let lo = self.token.span;
if !self.eat(&token::OpenDelim(token::Brace)) {
- let sp = self.span;
+ let sp = self.token.span;
let tok = self.this_token_descr();
let mut e = self.span_fatal(sp, &format!("expected `{{`, found {}", tok));
let do_not_suggest_help =
if self.token.is_ident_named(sym::and) {
e.span_suggestion_short(
- self.span,
+ self.token.span,
"use `&&` instead of `and` for the boolean operator",
"&&".to_string(),
Applicability::MaybeIncorrect,
}
if self.token.is_ident_named(sym::or) {
e.span_suggestion_short(
- self.span,
+ self.token.span,
"use `||` instead of `or` for the boolean operator",
"||".to_string(),
Applicability::MaybeIncorrect,
fn parse_inner_attrs_and_block(&mut self) -> PResult<'a, (Vec<Attribute>, P<Block>)> {
maybe_whole!(self, NtBlock, |x| (Vec::new(), x));
- let lo = self.span;
+ let lo = self.token.span;
self.expect(&token::OpenDelim(token::Brace))?;
Ok((self.parse_inner_attributes()?,
self.parse_block_tail(lo, BlockCheckMode::Default)?))
self.recover_stmt_(SemiColonMode::Ignore, BlockMode::Ignore);
Some(Stmt {
id: ast::DUMMY_NODE_ID,
- node: StmtKind::Expr(DummyResult::raw_expr(self.span, true)),
- span: self.span,
+ node: StmtKind::Expr(DummyResult::raw_expr(self.token.span, true)),
+ span: self.token.span,
})
}
Ok(stmt) => stmt,
}
fn warn_missing_semicolon(&self) {
- self.diagnostic().struct_span_warn(self.span, {
+ self.diagnostic().struct_span_warn(self.token.span, {
&format!("expected `;`, found {}", self.this_token_descr())
}).note({
"This was erroneously allowed and will become a hard error in a future release"
let mut last_plus_span = None;
let mut was_negative = false;
loop {
- // This needs to be synchronized with `Token::can_begin_bound`.
+ // This needs to be synchronized with `TokenKind::can_begin_bound`.
let is_bound_start = self.check_path() || self.check_lifetime() ||
self.check(&token::Not) || // used for error reporting only
self.check(&token::Question) ||
self.check_keyword(kw::For) ||
self.check(&token::OpenDelim(token::Paren));
if is_bound_start {
- let lo = self.span;
+ let lo = self.token.span;
let has_parens = self.eat(&token::OpenDelim(token::Paren));
- let inner_lo = self.span;
+ let inner_lo = self.token.span;
let is_negative = self.eat(&token::Not);
let question = if self.eat(&token::Question) { Some(self.prev_span) } else { None };
if self.token.is_lifetime() {
/// | ( < lifetimes , typaramseq ( , )? > )
/// where typaramseq = ( typaram ) | ( typaram , typaramseq )
fn parse_generics(&mut self) -> PResult<'a, ast::Generics> {
- let span_lo = self.span;
+ let span_lo = self.token.span;
let (params, span) = if self.eat_lt() {
let params = self.parse_generic_params()?;
self.expect_gt()?;
(params, span_lo.to(self.prev_span))
} else {
- (vec![], self.prev_span.between(self.span))
+ (vec![], self.prev_span.between(self.token.span))
};
Ok(ast::Generics {
params,
let mut misplaced_assoc_ty_constraints: Vec<Span> = Vec::new();
let mut assoc_ty_constraints: Vec<Span> = Vec::new();
- let args_lo = self.span;
+ let args_lo = self.token.span;
loop {
if self.check_lifetime() && self.look_ahead(1, |t| !t.is_like_plus()) {
} else if self.check_ident() && self.look_ahead(1,
|t| t == &token::Eq || t == &token::Colon) {
// Parse associated type constraint.
- let lo = self.span;
+ let lo = self.token.span;
let ident = self.parse_ident()?;
let kind = if self.eat(&token::Eq) {
AssocTyConstraintKind::Equality {
assoc_ty_constraints.push(span);
} else if self.check_const_arg() {
// Parse const argument.
- let expr = if let token::OpenDelim(token::Brace) = self.token {
- self.parse_block_expr(None, self.span, BlockCheckMode::Default, ThinVec::new())?
+ let expr = if let token::OpenDelim(token::Brace) = self.token.kind {
+ self.parse_block_expr(
+ None, self.token.span, BlockCheckMode::Default, ThinVec::new()
+ )?
} else if self.token.is_ident() {
// FIXME(const_generics): to distinguish between idents for types and consts,
// we should introduce a GenericArg::Ident in the AST and distinguish when
}
loop {
- let lo = self.span;
+ let lo = self.token.span;
if self.check_lifetime() && self.look_ahead(1, |t| !t.is_like_plus()) {
let lifetime = self.expect_lifetime();
// Bounds starting with a colon are mandatory, but possibly empty.
-> PResult<'a, (Vec<Arg> , bool)> {
self.expect(&token::OpenDelim(token::Paren))?;
- let sp = self.span;
+ let sp = self.token.span;
let mut c_variadic = false;
let (args, recovered): (Vec<Option<Arg>>, bool) =
self.parse_seq_to_before_end(
if let TyKind::CVarArgs = arg.ty.node {
c_variadic = true;
if p.token != token::CloseDelim(token::Paren) {
- let span = p.span;
+ let span = p.token.span;
p.span_err(span,
"`...` must be the last argument of a C-variadic function");
Ok(None)
/// Returns the parsed optional self argument and whether a self shortcut was used.
fn parse_self_arg(&mut self) -> PResult<'a, Option<Arg>> {
- let expect_ident = |this: &mut Self| match this.token {
+ let expect_ident = |this: &mut Self| match this.token.kind {
// Preserve hygienic context.
- token::Ident(ident, _) =>
- { let span = this.span; this.bump(); Ident::new(ident.name, span) }
+ token::Ident(name, _) =>
+ { let span = this.token.span; this.bump(); Ident::new(name, span) }
_ => unreachable!()
};
let isolated_self = |this: &mut Self, n| {
// Parse optional `self` parameter of a method.
// Only a limited set of initial token sequences is considered `self` parameters; anything
// else is parsed as a normal function parameter list, so some lookahead is required.
- let eself_lo = self.span;
- let (eself, eself_ident, eself_hi) = match self.token {
+ let eself_lo = self.token.span;
+ let (eself, eself_ident, eself_hi) = match self.token.kind {
token::BinOp(token::And) => {
// `&self`
// `&mut self`
let msg = "cannot pass `self` by raw pointer";
(if isolated_self(self, 1) {
self.bump();
- self.struct_span_err(self.span, msg)
- .span_label(self.span, msg)
+ self.struct_span_err(self.token.span, msg)
+ .span_label(self.token.span, msg)
.emit();
SelfKind::Value(Mutability::Immutable)
} else if self.look_ahead(1, |t| t.is_mutability()) &&
isolated_self(self, 2) {
self.bump();
self.bump();
- self.struct_span_err(self.span, msg)
- .span_label(self.span, msg)
+ self.struct_span_err(self.token.span, msg)
+ .span_label(self.token.span, msg)
.emit();
SelfKind::Value(Mutability::Immutable)
} else {
fn parse_impl_item_(&mut self,
at_end: &mut bool,
mut attrs: Vec<Attribute>) -> PResult<'a, ImplItem> {
- let lo = self.span;
+ let lo = self.token.span;
let vis = self.parse_visibility(false)?;
let defaultness = self.parse_defaultness();
let (name, node, generics) = if let Some(type_) = self.eat_type() {
match *vis {
VisibilityKind::Inherited => {}
_ => {
- let is_macro_rules: bool = match self.token {
- token::Ident(sid, _) => sid.name == sym::macro_rules,
- _ => false,
- };
- let mut err = if is_macro_rules {
+ let mut err = if self.token.is_keyword(sym::macro_rules) {
let mut err = self.diagnostic()
.struct_span_err(sp, "can't qualify macro_rules invocation with `pub`");
err.span_suggestion(
self.expect(&token::OpenDelim(token::Brace))?;
let mut trait_items = vec![];
while !self.eat(&token::CloseDelim(token::Brace)) {
- if let token::DocComment(_) = self.token {
+ if let token::DocComment(_) = self.token.kind {
if self.look_ahead(1,
- |tok| tok == &token::Token::CloseDelim(token::Brace)) {
+ |tok| tok == &token::CloseDelim(token::Brace)) {
let mut err = self.diagnostic().struct_span_err_with_code(
- self.span,
+ self.token.span,
"found a documentation comment that doesn't document anything",
DiagnosticId::Error("E0584".into()),
);
let err_path = |span| ast::Path::from_ident(Ident::new(kw::Invalid, span));
let ty_first = if self.token.is_keyword(kw::For) &&
self.look_ahead(1, |t| t != &token::Lt) {
- let span = self.prev_span.between(self.span);
+ let span = self.prev_span.between(self.token.span);
self.struct_span_err(span, "missing trait in a trait impl").emit();
P(Ty { node: TyKind::Path(None, err_path(span)), span, id: ast::DUMMY_NODE_ID })
} else {
// If `for` is missing we try to recover.
let has_for = self.eat_keyword(kw::For);
- let missing_for_span = self.prev_span.between(self.span);
+ let missing_for_span = self.prev_span.between(self.token.span);
let ty_second = if self.token == token::DotDot {
// We need to report this error after `cfg` expansion for compatibility reasons
"expected `where`, `{{`, `(`, or `;` after struct name, found {}",
token_str
));
- err.span_label(self.span, "expected `where`, `{`, `(`, or `;` after struct name");
+ err.span_label(self.token.span, "expected `where`, `{`, `(`, or `;` after struct name");
return Err(err);
};
let token_str = self.this_token_descr();
let mut err = self.fatal(&format!(
"expected `where` or `{{` after union name, found {}", token_str));
- err.span_label(self.span, "expected `where` or `{` after union name");
+ err.span_label(self.token.span, "expected `where` or `{` after union name");
return Err(err);
};
let token_str = self.this_token_descr();
let mut err = self.fatal(&format!(
"expected `where`, or `{{` after struct name, found {}", token_str));
- err.span_label(self.span, "expected `where`, or `{` after struct name");
+ err.span_label(self.token.span, "expected `where`, or `{` after struct name");
return Err(err);
}
SeqSep::trailing_allowed(token::Comma),
|p| {
let attrs = p.parse_outer_attributes()?;
- let lo = p.span;
+ let lo = p.token.span;
let vis = p.parse_visibility(true)?;
let ty = p.parse_ty()?;
Ok(StructField {
if self.token == token::Comma {
seen_comma = true;
}
- match self.token {
+ match self.token.kind {
token::Comma => {
self.bump();
}
token::CloseDelim(token::Brace) => {}
token::DocComment(_) => {
let previous_span = self.prev_span;
- let mut err = self.span_fatal_err(self.span, Error::UselessDocComment);
+ let mut err = self.span_fatal_err(self.token.span, Error::UselessDocComment);
self.bump(); // consume the doc comment
let comma_after_doc_seen = self.eat(&token::Comma);
// `seen_comma` is always false, because we are inside doc block
/// Parses an element of a struct declaration.
fn parse_struct_decl_field(&mut self) -> PResult<'a, StructField> {
let attrs = self.parse_outer_attributes()?;
- let lo = self.span;
+ let lo = self.token.span;
let vis = self.parse_visibility(false)?;
self.parse_single_struct_field(lo, vis, attrs)
}
// We need a span for our `Spanned<VisibilityKind>`, but there's inherently no
// keyword to grab a span from for inherited visibility; an empty span at the
// beginning of the current token would seem to be the "Schelling span".
- return Ok(respan(self.span.shrink_to_lo(), VisibilityKind::Inherited))
+ return Ok(respan(self.token.span.shrink_to_lo(), VisibilityKind::Inherited))
}
let lo = self.prev_span;
}
/// Given a termination token, parses all of the items in a module.
- fn parse_mod_items(&mut self, term: &token::Token, inner_lo: Span) -> PResult<'a, Mod> {
+ fn parse_mod_items(&mut self, term: &TokenKind, inner_lo: Span) -> PResult<'a, Mod> {
let mut items = vec![];
while let Some(item) = self.parse_item()? {
items.push(item);
let token_str = self.this_token_descr();
if !self.maybe_consume_incorrect_semicolon(&items) {
let mut err = self.fatal(&format!("expected item, found {}", token_str));
- err.span_label(self.span, "expected item");
+ err.span_label(self.token.span, "expected item");
return Err(err);
}
}
- let hi = if self.span.is_dummy() {
+ let hi = if self.token.span.is_dummy() {
inner_lo
} else {
self.prev_span
(!self.cfg_mods || strip_unconfigured.in_cfg(&outer_attrs), outer_attrs)
};
- let id_span = self.span;
+ let id_span = self.token.span;
let id = self.parse_ident()?;
if self.eat(&token::Semi) {
if in_cfg && self.recurse_into_file_modules {
self.push_directory(id, &outer_attrs);
self.expect(&token::OpenDelim(token::Brace))?;
- let mod_inner_lo = self.span;
+ let mod_inner_lo = self.token.span;
let attrs = self.parse_inner_attributes()?;
let module = self.parse_mod_items(&token::CloseDelim(token::Brace), mod_inner_lo)?;
let mut p0 =
new_sub_parser_from_file(self.sess, &path, directory_ownership, Some(name), id_sp);
p0.cfg_mods = self.cfg_mods;
- let mod_inner_lo = p0.span;
+ let mod_inner_lo = p0.token.span;
let mod_attrs = p0.parse_inner_attributes()?;
let mut m0 = p0.parse_mod_items(&token::Eof, mod_inner_lo)?;
m0.inline = false;
let (ident, mut generics) = self.parse_fn_header()?;
let decl = self.parse_fn_decl(true)?;
generics.where_clause = self.parse_where_clause()?;
- let hi = self.span;
+ let hi = self.token.span;
self.expect(&token::Semi)?;
Ok(ast::ForeignItem {
ident,
let ident = self.parse_ident()?;
self.expect(&token::Colon)?;
let ty = self.parse_ty()?;
- let hi = self.span;
+ let hi = self.token.span;
self.expect(&token::Semi)?;
Ok(ForeignItem {
ident,
self.expect_keyword(kw::Type)?;
let ident = self.parse_ident()?;
- let hi = self.span;
+ let hi = self.token.span;
self.expect(&token::Semi)?;
Ok(ast::ForeignItem {
ident: ident,
let mut replacement = vec![];
let mut fixed_crate_name = false;
// Accept `extern crate name-like-this` for better diagnostics
- let dash = token::Token::BinOp(token::BinOpToken::Minus);
+ let dash = token::BinOp(token::BinOpToken::Minus);
if self.token == dash { // Do not include `-` as part of the expected tokens list
while self.eat(&dash) {
fixed_crate_name = true;
let mut any_disr = vec![];
while self.token != token::CloseDelim(token::Brace) {
let variant_attrs = self.parse_outer_attributes()?;
- let vlo = self.span;
+ let vlo = self.token.span;
let struct_def;
let mut disr_expr = None;
/// Parses a string as an ABI spec on an extern type or module. Consumes
/// the `extern` keyword, if one is found.
fn parse_opt_abi(&mut self) -> PResult<'a, Option<Abi>> {
- match self.token {
+ match self.token.kind {
token::Literal(token::Lit { kind: token::Str, symbol, suffix }) |
token::Literal(token::Lit { kind: token::StrRaw(..), symbol, suffix }) => {
- let sp = self.span;
+ let sp = self.token.span;
self.expect_no_suffix(sp, "an ABI spec", suffix);
self.bump();
match abi::lookup(&symbol.as_str()) {
if token.is_keyword(kw::Move) {
return true;
}
- match *token {
+ match token.kind {
token::BinOp(token::Or) | token::OrOr => true,
_ => false,
}
Some(P(item))
});
- let lo = self.span;
+ let lo = self.token.span;
let visibility = self.parse_visibility(false)?;
// Parse `async unsafe? fn`.
if self.check_keyword(kw::Async) {
- let async_span = self.span;
+ let async_span = self.token.span;
if self.is_keyword_ahead(1, &[kw::Fn])
|| self.is_keyword_ahead(2, &[kw::Fn])
{
item_,
visibility,
maybe_append(attrs, extra_attrs));
- if self.span.rust_2015() {
+ if self.token.span.rust_2015() {
self.diagnostic().struct_span_err_with_code(
async_span,
"`async fn` is not permitted in the 2015 edition",
//
// pub S {}
// ^^^ `sp` points here
- let sp = self.prev_span.between(self.span);
- let full_sp = self.prev_span.to(self.span);
- let ident_sp = self.span;
+ let sp = self.prev_span.between(self.token.span);
+ let full_sp = self.prev_span.to(self.token.span);
+ let ident_sp = self.token.span;
if self.look_ahead(1, |t| *t == token::OpenDelim(token::Brace)) {
// possible public struct definition where `struct` was forgotten
let ident = self.parse_ident().unwrap();
maybe_whole!(self, NtForeignItem, |ni| ni);
let attrs = self.parse_outer_attributes()?;
- let lo = self.span;
+ let lo = self.token.span;
let visibility = self.parse_visibility(false)?;
// FOREIGN STATIC ITEM
if self.check_keyword(kw::Static) || self.token.is_keyword(kw::Const) {
if self.token.is_keyword(kw::Const) {
self.diagnostic()
- .struct_span_err(self.span, "extern items cannot be `const`")
+ .struct_span_err(self.token.span, "extern items cannot be `const`")
.span_suggestion(
- self.span,
+ self.token.span,
"try using a static value",
"static".to_owned(),
Applicability::MachineApplicable
visibility: Visibility
) -> PResult<'a, Option<P<Item>>> {
if macros_allowed && self.token.is_path_start() &&
- !(self.is_async_fn() && self.span.rust_2015()) {
+ !(self.is_async_fn() && self.token.span.rust_2015()) {
// MACRO INVOCATION ITEM
let prev_span = self.prev_span;
self.complain_if_pub_macro(&visibility.node, prev_span);
- let mac_lo = self.span;
+ let mac_lo = self.token.span;
// item macro.
let pth = self.parse_path(PathStyle::Mod)?;
at_end: &mut bool) -> PResult<'a, Option<Mac>>
{
if self.token.is_path_start() &&
- !(self.is_async_fn() && self.span.rust_2015()) {
+ !(self.is_async_fn() && self.token.span.rust_2015()) {
let prev_span = self.prev_span;
- let lo = self.span;
+ let lo = self.token.span;
let pth = self.parse_path(PathStyle::Mod)?;
if pth.segments.len() == 1 {
/// PATH [`as` IDENT]
/// ```
fn parse_use_tree(&mut self) -> PResult<'a, UseTree> {
- let lo = self.span;
+ let lo = self.token.span;
let mut prefix = ast::Path { segments: Vec::new(), span: lo.shrink_to_lo() };
let kind = if self.check(&token::OpenDelim(token::Brace)) ||
self.check(&token::BinOp(token::Star)) ||
self.is_import_coupler() {
// `use *;` or `use ::*;` or `use {...};` or `use ::{...};`
- let mod_sep_ctxt = self.span.ctxt();
+ let mod_sep_ctxt = self.token.span.ctxt();
if self.eat(&token::ModSep) {
prefix.segments.push(
PathSegment::path_root(lo.shrink_to_lo().with_ctxt(mod_sep_ctxt))
/// Parses a source module as a crate. This is the main entry point for the parser.
pub fn parse_crate_mod(&mut self) -> PResult<'a, Crate> {
- let lo = self.span;
+ let lo = self.token.span;
let krate = Ok(ast::Crate {
attrs: self.parse_inner_attributes()?,
module: self.parse_mod_items(&token::Eof, lo)?,
- span: lo.to(self.span),
+ span: lo.to(self.token.span),
});
krate
}
pub fn parse_optional_str(&mut self) -> Option<(Symbol, ast::StrStyle, Option<ast::Name>)> {
- let ret = match self.token {
+ let ret = match self.token.kind {
token::Literal(token::Lit { kind: token::Str, symbol, suffix }) =>
(symbol, ast::StrStyle::Cooked, suffix),
token::Literal(token::Lit { kind: token::StrRaw(n), symbol, suffix }) =>
_ => {
let msg = "expected string literal";
let mut err = self.fatal(msg);
- err.span_label(self.span, msg);
+ err.span_label(self.token.span, msg);
Err(err)
}
}
for unmatched in unclosed_delims.iter() {
let mut err = handler.struct_span_err(unmatched.found_span, &format!(
"incorrect close delimiter: `{}`",
- pprust::token_to_string(&token::Token::CloseDelim(unmatched.found_delim)),
+ pprust::token_kind_to_string(&token::CloseDelim(unmatched.found_delim)),
));
err.span_label(unmatched.found_span, "incorrect close delimiter");
if let Some(sp) = unmatched.candidate_span {
pub use Nonterminal::*;
pub use DelimToken::*;
pub use LitKind::*;
-pub use Token::*;
+pub use TokenKind::*;
use crate::ast::{self};
-use crate::parse::ParseSess;
+use crate::parse::{parse_stream_from_source_str, ParseSess};
use crate::print::pprust;
use crate::ptr::P;
use crate::symbol::kw;
-use crate::syntax::parse::parse_stream_from_source_str;
use crate::tokenstream::{self, DelimSpan, TokenStream, TokenTree};
-use syntax_pos::symbol::{self, Symbol};
-use syntax_pos::{self, Span, FileName};
+use syntax_pos::symbol::Symbol;
+use syntax_pos::{self, Span, FileName, DUMMY_SP};
use log::info;
use std::fmt;
}
}
-pub(crate) fn ident_can_begin_expr(ident: ast::Ident, is_raw: bool) -> bool {
- let ident_token: Token = Ident(ident, is_raw);
+pub(crate) fn ident_can_begin_expr(name: ast::Name, span: Span, is_raw: bool) -> bool {
+ let ident_token = Token::new(Ident(name, is_raw), span);
!ident_token.is_reserved_ident() ||
ident_token.is_path_segment_keyword() ||
kw::While,
kw::Yield,
kw::Static,
- ].contains(&ident.name)
+ ].contains(&name)
}
-fn ident_can_begin_type(ident: ast::Ident, is_raw: bool) -> bool {
- let ident_token: Token = Ident(ident, is_raw);
+fn ident_can_begin_type(name: ast::Name, span: Span, is_raw: bool) -> bool {
+ let ident_token = Token::new(Ident(name, is_raw), span);
!ident_token.is_reserved_ident() ||
ident_token.is_path_segment_keyword() ||
kw::Extern,
kw::Typeof,
kw::Dyn,
- ].contains(&ident.name)
+ ].contains(&name)
}
-#[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Debug)]
-pub enum Token {
+#[derive(Clone, PartialEq, RustcEncodable, RustcDecodable, Debug)]
+pub enum TokenKind {
/* Expression-operator symbols. */
Eq,
Lt,
Literal(Lit),
/* Name components */
- Ident(ast::Ident, /* is_raw */ bool),
- Lifetime(ast::Ident),
+ Ident(ast::Name, /* is_raw */ bool),
+ Lifetime(ast::Name),
Interpolated(Lrc<Nonterminal>),
Eof,
}
-// `Token` is used a lot. Make sure it doesn't unintentionally get bigger.
+// `TokenKind` is used a lot. Make sure it doesn't unintentionally get bigger.
#[cfg(target_arch = "x86_64")]
-static_assert_size!(Token, 16);
+static_assert_size!(TokenKind, 16);
+
+#[derive(Clone, PartialEq, RustcEncodable, RustcDecodable, Debug)]
+pub struct Token {
+ pub kind: TokenKind,
+ pub span: Span,
+}
+
+impl TokenKind {
+ pub fn lit(kind: LitKind, symbol: Symbol, suffix: Option<Symbol>) -> TokenKind {
+ Literal(Lit::new(kind, symbol, suffix))
+ }
+
+ /// Returns tokens that are likely to be typed accidentally instead of the current token.
+ /// Enables better error recovery when the wrong token is found.
+ crate fn similar_tokens(&self) -> Option<Vec<TokenKind>> {
+ match *self {
+ Comma => Some(vec![Dot, Lt, Semi]),
+ Semi => Some(vec![Colon, Comma]),
+ _ => None
+ }
+ }
+}
impl Token {
+ crate fn new(kind: TokenKind, span: Span) -> Self {
+ Token { kind, span }
+ }
+
+ /// Some token that will be thrown away later.
+ crate fn dummy() -> Self {
+ Token::new(TokenKind::Whitespace, DUMMY_SP)
+ }
+
/// Recovers a `Token` from an `ast::Ident`. This creates a raw identifier if necessary.
- pub fn from_ast_ident(ident: ast::Ident) -> Token {
- Ident(ident, ident.is_raw_guess())
+ crate fn from_ast_ident(ident: ast::Ident) -> Self {
+ Token::new(Ident(ident.name, ident.is_raw_guess()), ident.span)
+ }
+
+ /// Return this token by value and leave a dummy token in its place.
+ crate fn take(&mut self) -> Self {
+ mem::replace(self, Token::dummy())
+ }
+
+ crate fn is_op(&self) -> bool {
+ match self.kind {
+ OpenDelim(..) | CloseDelim(..) | Literal(..) | DocComment(..) |
+ Ident(..) | Lifetime(..) | Interpolated(..) |
+ Whitespace | Comment | Shebang(..) | Eof => false,
+ _ => true,
+ }
}
crate fn is_like_plus(&self) -> bool {
- match *self {
+ match self.kind {
BinOp(Plus) | BinOpEq(Plus) => true,
_ => false,
}
/// Returns `true` if the token can appear at the start of an expression.
crate fn can_begin_expr(&self) -> bool {
- match *self {
- Ident(ident, is_raw) =>
- ident_can_begin_expr(ident, is_raw), // value name or keyword
+ match self.kind {
+ Ident(name, is_raw) =>
+ ident_can_begin_expr(name, self.span, is_raw), // value name or keyword
OpenDelim(..) | // tuple, array or block
Literal(..) | // literal
Not | // operator not
/// Returns `true` if the token can appear at the start of a type.
crate fn can_begin_type(&self) -> bool {
- match *self {
- Ident(ident, is_raw) =>
- ident_can_begin_type(ident, is_raw), // type name or keyword
+ match self.kind {
+ Ident(name, is_raw) =>
+ ident_can_begin_type(name, self.span, is_raw), // type name or keyword
OpenDelim(Paren) | // tuple
OpenDelim(Bracket) | // array
Not | // never
}
/// Returns `true` if the token can appear at the start of a const param.
- pub fn can_begin_const_arg(&self) -> bool {
- match self {
+ crate fn can_begin_const_arg(&self) -> bool {
+ match self.kind {
OpenDelim(Brace) => true,
Interpolated(ref nt) => match **nt {
NtExpr(..) => true,
self == &Question || self == &OpenDelim(Paren)
}
- pub fn lit(kind: LitKind, symbol: Symbol, suffix: Option<Symbol>) -> Token {
- Literal(Lit::new(kind, symbol, suffix))
- }
-
/// Returns `true` if the token is any literal
crate fn is_lit(&self) -> bool {
- match *self {
+ match self.kind {
Literal(..) => true,
_ => false,
}
}
crate fn expect_lit(&self) -> Lit {
- match *self {
+ match self.kind {
Literal(lit) => lit,
_=> panic!("`expect_lit` called on non-literal"),
}
/// Returns `true` if the token is any literal, a minus (which can prefix a literal,
/// for example a '-42', or one of the boolean idents).
crate fn can_begin_literal_or_bool(&self) -> bool {
- match *self {
+ match self.kind {
Literal(..) => true,
BinOp(Minus) => true,
- Ident(ident, false) if ident.name == kw::True => true,
- Ident(ident, false) if ident.name == kw::False => true,
+ Ident(name, false) if name == kw::True => true,
+ Ident(name, false) if name == kw::False => true,
Interpolated(ref nt) => match **nt {
NtLiteral(..) => true,
_ => false,
/// Returns an identifier if this token is an identifier.
pub fn ident(&self) -> Option<(ast::Ident, /* is_raw */ bool)> {
- match *self {
- Ident(ident, is_raw) => Some((ident, is_raw)),
+ match self.kind {
+ Ident(name, is_raw) => Some((ast::Ident::new(name, self.span), is_raw)),
Interpolated(ref nt) => match **nt {
NtIdent(ident, is_raw) => Some((ident, is_raw)),
_ => None,
_ => None,
}
}
+
/// Returns a lifetime identifier if this token is a lifetime.
pub fn lifetime(&self) -> Option<ast::Ident> {
- match *self {
- Lifetime(ident) => Some(ident),
+ match self.kind {
+ Lifetime(name) => Some(ast::Ident::new(name, self.span)),
Interpolated(ref nt) => match **nt {
NtLifetime(ident) => Some(ident),
_ => None,
_ => None,
}
}
+
/// Returns `true` if the token is an identifier.
pub fn is_ident(&self) -> bool {
self.ident().is_some()
/// Returns `true` if the token is a identifier whose name is the given
/// string slice.
crate fn is_ident_named(&self, name: Symbol) -> bool {
- match self.ident() {
- Some((ident, _)) => ident.name == name,
- None => false
- }
+ self.ident().map_or(false, |(ident, _)| ident.name == name)
}
/// Returns `true` if the token is an interpolated path.
fn is_path(&self) -> bool {
- if let Interpolated(ref nt) = *self {
+ if let Interpolated(ref nt) = self.kind {
if let NtPath(..) = **nt {
return true;
}
/// Returns `true` if the token is a given keyword, `kw`.
pub fn is_keyword(&self, kw: Symbol) -> bool {
- self.ident().map(|(ident, is_raw)| ident.name == kw && !is_raw).unwrap_or(false)
+ self.ident().map(|(id, is_raw)| id.name == kw && !is_raw).unwrap_or(false)
}
- pub fn is_path_segment_keyword(&self) -> bool {
+ crate fn is_path_segment_keyword(&self) -> bool {
match self.ident() {
Some((id, false)) => id.is_path_segment_keyword(),
_ => false,
// Returns true for reserved identifiers used internally for elided lifetimes,
// unnamed method parameters, crate root module, error recovery etc.
- pub fn is_special_ident(&self) -> bool {
+ crate fn is_special_ident(&self) -> bool {
match self.ident() {
Some((id, false)) => id.is_special(),
_ => false,
}
crate fn glue(self, joint: Token) -> Option<Token> {
- Some(match self {
- Eq => match joint {
+ let kind = match self.kind {
+ Eq => match joint.kind {
Eq => EqEq,
Gt => FatArrow,
_ => return None,
},
- Lt => match joint {
+ Lt => match joint.kind {
Eq => Le,
Lt => BinOp(Shl),
Le => BinOpEq(Shl),
BinOp(Minus) => LArrow,
_ => return None,
},
- Gt => match joint {
+ Gt => match joint.kind {
Eq => Ge,
Gt => BinOp(Shr),
Ge => BinOpEq(Shr),
_ => return None,
},
- Not => match joint {
+ Not => match joint.kind {
Eq => Ne,
_ => return None,
},
- BinOp(op) => match joint {
+ BinOp(op) => match joint.kind {
Eq => BinOpEq(op),
BinOp(And) if op == And => AndAnd,
BinOp(Or) if op == Or => OrOr,
Gt if op == Minus => RArrow,
_ => return None,
},
- Dot => match joint {
+ Dot => match joint.kind {
Dot => DotDot,
DotDot => DotDotDot,
_ => return None,
},
- DotDot => match joint {
+ DotDot => match joint.kind {
Dot => DotDotDot,
Eq => DotDotEq,
_ => return None,
},
- Colon => match joint {
+ Colon => match joint.kind {
Colon => ModSep,
_ => return None,
},
- SingleQuote => match joint {
- Ident(ident, false) => {
- let name = Symbol::intern(&format!("'{}", ident));
- Lifetime(symbol::Ident {
- name,
- span: ident.span,
- })
- }
+ SingleQuote => match joint.kind {
+ Ident(name, false) => Lifetime(Symbol::intern(&format!("'{}", name))),
_ => return None,
},
Question | OpenDelim(..) | CloseDelim(..) |
Literal(..) | Ident(..) | Lifetime(..) | Interpolated(..) | DocComment(..) |
Whitespace | Comment | Shebang(..) | Eof => return None,
- })
- }
+ };
- /// Returns tokens that are likely to be typed accidentally instead of the current token.
- /// Enables better error recovery when the wrong token is found.
- crate fn similar_tokens(&self) -> Option<Vec<Token>> {
- match *self {
- Comma => Some(vec![Dot, Lt, Semi]),
- Semi => Some(vec![Colon, Comma]),
- _ => None
- }
+ Some(Token::new(kind, self.span.to(joint.span)))
}
// See comments in `Nonterminal::to_tokenstream` for why we care about
// *probably* equal here rather than actual equality
crate fn probably_equal_for_proc_macro(&self, other: &Token) -> bool {
- if mem::discriminant(self) != mem::discriminant(other) {
+ if mem::discriminant(&self.kind) != mem::discriminant(&other.kind) {
return false
}
- match (self, other) {
+ match (&self.kind, &other.kind) {
(&Eq, &Eq) |
(&Lt, &Lt) |
(&Le, &Le) |
(&Literal(a), &Literal(b)) => a == b,
- (&Lifetime(a), &Lifetime(b)) => a.name == b.name,
- (&Ident(a, b), &Ident(c, d)) => b == d && (a.name == c.name ||
- a.name == kw::DollarCrate ||
- c.name == kw::DollarCrate),
+ (&Lifetime(a), &Lifetime(b)) => a == b,
+ (&Ident(a, b), &Ident(c, d)) => b == d && (a == c ||
+ a == kw::DollarCrate ||
+ c == kw::DollarCrate),
(&Interpolated(_), &Interpolated(_)) => false,
}
}
+impl PartialEq<TokenKind> for Token {
+ fn eq(&self, rhs: &TokenKind) -> bool {
+ self.kind == *rhs
+ }
+}
+
#[derive(Clone, RustcEncodable, RustcDecodable)]
/// For interpolation during macro expansion.
pub enum Nonterminal {
prepend_attrs(sess, &item.attrs, item.tokens.as_ref(), span)
}
Nonterminal::NtIdent(ident, is_raw) => {
- let token = Token::Ident(ident, is_raw);
- Some(TokenTree::Token(ident.span, token).into())
+ Some(TokenTree::token(Ident(ident.name, is_raw), ident.span).into())
}
Nonterminal::NtLifetime(ident) => {
- let token = Token::Lifetime(ident);
- Some(TokenTree::Token(ident.span, token).into())
+ Some(TokenTree::token(Lifetime(ident.name), ident.span).into())
}
Nonterminal::NtTT(ref tt) => {
Some(tt.clone().into())
}
}
-crate fn is_op(tok: &Token) -> bool {
- match *tok {
- OpenDelim(..) | CloseDelim(..) | Literal(..) | DocComment(..) |
- Ident(..) | Lifetime(..) | Interpolated(..) |
- Whitespace | Comment | Shebang(..) | Eof => false,
- _ => true,
- }
-}
-
fn prepend_attrs(sess: &ParseSess,
attrs: &[ast::Attribute],
tokens: Option<&tokenstream::TokenStream>,
// For simple paths, push the identifier directly
if attr.path.segments.len() == 1 && attr.path.segments[0].args.is_none() {
let ident = attr.path.segments[0].ident;
- let token = Ident(ident, ident.as_str().starts_with("r#"));
- brackets.push(tokenstream::TokenTree::Token(ident.span, token));
+ let token = Ident(ident.name, ident.as_str().starts_with("r#"));
+ brackets.push(tokenstream::TokenTree::token(token, ident.span));
// ... and for more complicated paths, fall back to a reparse hack that
// should eventually be removed.
// The span we list here for `#` and for `[ ... ]` are both wrong in
// that it encompasses more than each token, but it hopefully is "good
// enough" for now at least.
- builder.push(tokenstream::TokenTree::Token(attr.span, Pound));
+ builder.push(tokenstream::TokenTree::token(Pound, attr.span));
let delim_span = DelimSpan::from_single(attr.span);
builder.push(tokenstream::TokenTree::Delimited(
delim_span, DelimToken::Bracket, brackets.build().into()));
-//! Utilities for validating string and char literals and turning them into
+//! Utilities for validating string and char literals and turning them into
//! values they represent.
use std::str::Chars;
LoneSlash,
InvalidEscape,
BareCarriageReturn,
+ BareCarriageReturnInRawString,
EscapeOnlyChar,
TooShortHexEscape,
UnicodeEscapeInByte,
NonAsciiCharInByte,
+ NonAsciiCharInByteString,
}
/// Takes a contents of a char literal (without quotes), and returns an
})
}
+/// Takes a contents of a string literal (without quotes) and produces a
+/// sequence of characters or errors.
+/// NOTE: Raw strings do not perform any explicit character escaping, here we
+/// only translate CRLF to LF and produce errors on bare CR.
+pub(crate) fn unescape_raw_str<F>(literal_text: &str, callback: &mut F)
+where
+ F: FnMut(Range<usize>, Result<char, EscapeError>),
+{
+ unescape_raw_str_or_byte_str(literal_text, Mode::Str, callback)
+}
+
+/// Takes a contents of a string literal (without quotes) and produces a
+/// sequence of characters or errors.
+/// NOTE: Raw strings do not perform any explicit character escaping, here we
+/// only translate CRLF to LF and produce errors on bare CR.
+pub(crate) fn unescape_raw_byte_str<F>(literal_text: &str, callback: &mut F)
+where
+ F: FnMut(Range<usize>, Result<u8, EscapeError>),
+{
+ unescape_raw_str_or_byte_str(literal_text, Mode::ByteStr, &mut |range, char| {
+ callback(range, char.map(byte_from_char))
+ })
+}
+
#[derive(Debug, Clone, Copy)]
pub(crate) enum Mode {
Char,
}
}
+/// Takes a contents of a string literal (without quotes) and produces a
+/// sequence of characters or errors.
+/// NOTE: Raw strings do not perform any explicit character escaping, here we
+/// only translate CRLF to LF and produce errors on bare CR.
+fn unescape_raw_str_or_byte_str<F>(literal_text: &str, mode: Mode, callback: &mut F)
+where
+ F: FnMut(Range<usize>, Result<char, EscapeError>),
+{
+ assert!(mode.in_double_quotes());
+ let initial_len = literal_text.len();
+
+ let mut chars = literal_text.chars();
+ while let Some(curr) = chars.next() {
+ let start = initial_len - chars.as_str().len() - curr.len_utf8();
+
+ let result = match (curr, chars.clone().next()) {
+ ('\r', Some('\n')) => {
+ chars.next();
+ Ok('\n')
+ },
+ ('\r', _) => Err(EscapeError::BareCarriageReturnInRawString),
+ (c, _) if mode.is_bytes() && !c.is_ascii() =>
+ Err(EscapeError::NonAsciiCharInByteString),
+ (c, _) => Ok(c),
+ };
+ let end = initial_len - chars.as_str().len();
+
+ callback(start..end, result);
+ }
+}
+
fn byte_from_char(c: char) -> u8 {
let res = c as u32;
- assert!(res <= u8::max_value() as u32, "guaranteed because of Mode::Byte");
+ assert!(res <= u8::max_value() as u32, "guaranteed because of Mode::Byte(Str)");
res as u8
}
};
handler.span_err(span, msg);
}
+ EscapeError::BareCarriageReturnInRawString => {
+ assert!(mode.in_double_quotes());
+ let msg = "bare CR not allowed in raw string";
+ handler.span_err(span, msg);
+ }
EscapeError::InvalidEscape => {
let (c, span) = last_char();
handler.span_err(span, "byte constant must be ASCII. \
Use a \\xHH escape for a non-ASCII byte")
}
+ EscapeError::NonAsciiCharInByteString => {
+ assert!(mode.is_bytes());
+ let (_c, span) = last_char();
+ handler.span_err(span, "raw byte string must be ASCII")
+ }
EscapeError::OutOfRangeHexEscape => {
handler.span_err(span, "this form of character escape may only be used \
with characters in the range [\\x00-\\x7f]")
use crate::util::parser::{self, AssocOp, Fixity};
use crate::attr;
use crate::source_map::{self, SourceMap, Spanned};
-use crate::parse::token::{self, BinOpToken, Nonterminal, Token};
+use crate::parse::token::{self, BinOpToken, Nonterminal, Token, TokenKind};
use crate::parse::lexer::comments;
use crate::parse::{self, ParseSess};
use crate::print::pp::{self, Breaks};
let mut out = match kind {
token::Byte => format!("b'{}'", symbol),
token::Char => format!("'{}'", symbol),
- token::Bool |
- token::Float |
- token::Integer => symbol.to_string(),
token::Str => format!("\"{}\"", symbol),
token::StrRaw(n) => format!("r{delim}\"{string}\"{delim}",
delim="#".repeat(n as usize),
token::ByteStrRaw(n) => format!("br{delim}\"{string}\"{delim}",
delim="#".repeat(n as usize),
string=symbol),
- token::Err => format!("'{}'", symbol),
+ token::Integer |
+ token::Float |
+ token::Bool |
+ token::Err => symbol.to_string(),
};
if let Some(suffix) = suffix {
out
}
-pub fn token_to_string(tok: &Token) -> String {
+pub fn token_kind_to_string(tok: &TokenKind) -> String {
match *tok {
token::Eq => "=".to_string(),
token::Lt => "<".to_string(),
}
}
+pub fn token_to_string(token: &Token) -> String {
+ token_kind_to_string(&token.kind)
+}
+
pub fn nonterminal_to_string(nt: &Nonterminal) -> String {
match *nt {
token::NtExpr(ref e) => expr_to_string(e),
/// expression arguments as expressions). It can be done! I think.
fn print_tt(&mut self, tt: tokenstream::TokenTree) -> io::Result<()> {
match tt {
- TokenTree::Token(_, ref tk) => {
- self.writer().word(token_to_string(tk))?;
- match *tk {
- parse::token::DocComment(..) => {
+ TokenTree::Token(ref token) => {
+ self.writer().word(token_to_string(&token))?;
+ match token.kind {
+ token::DocComment(..) => {
self.writer().hardbreak()
}
_ => Ok(())
}
}
TokenTree::Delimited(_, delim, tts) => {
- self.writer().word(token_to_string(&token::OpenDelim(delim)))?;
+ self.writer().word(token_kind_to_string(&token::OpenDelim(delim)))?;
self.writer().space()?;
self.print_tts(tts)?;
self.writer().space()?;
- self.writer().word(token_to_string(&token::CloseDelim(delim)))
+ self.writer().word(token_kind_to_string(&token::CloseDelim(delim)))
},
}
}
use crate::ext::base;
use crate::ext::tt::{macro_parser, quoted};
use crate::parse::Directory;
-use crate::parse::token::{self, DelimToken, Token};
+use crate::parse::token::{self, DelimToken, Token, TokenKind};
use crate::print::pprust;
use syntax_pos::{BytePos, Mark, Span, DUMMY_SP};
#[derive(Debug, Clone, PartialEq, RustcEncodable, RustcDecodable)]
pub enum TokenTree {
/// A single token
- Token(Span, token::Token),
+ Token(Token),
/// A delimited sequence of token trees
Delimited(DelimSpan, DelimToken, TokenStream),
}
#[cfg(parallel_compiler)]
fn _dummy()
where
- Span: Send + Sync,
- token::Token: Send + Sync,
+ Token: Send + Sync,
DelimSpan: Send + Sync,
DelimToken: Send + Sync,
TokenStream: Send + Sync,
/// Checks if this TokenTree is equal to the other, regardless of span information.
pub fn eq_unspanned(&self, other: &TokenTree) -> bool {
match (self, other) {
- (&TokenTree::Token(_, ref tk), &TokenTree::Token(_, ref tk2)) => tk == tk2,
- (&TokenTree::Delimited(_, delim, ref tts),
- &TokenTree::Delimited(_, delim2, ref tts2)) => {
+ (TokenTree::Token(token), TokenTree::Token(token2)) => token.kind == token2.kind,
+ (TokenTree::Delimited(_, delim, tts), TokenTree::Delimited(_, delim2, tts2)) => {
delim == delim2 && tts.eq_unspanned(&tts2)
}
- (_, _) => false,
+ _ => false,
}
}
// different method.
pub fn probably_equal_for_proc_macro(&self, other: &TokenTree) -> bool {
match (self, other) {
- (&TokenTree::Token(_, ref tk), &TokenTree::Token(_, ref tk2)) => {
- tk.probably_equal_for_proc_macro(tk2)
+ (TokenTree::Token(token), TokenTree::Token(token2)) => {
+ token.probably_equal_for_proc_macro(token2)
}
- (&TokenTree::Delimited(_, delim, ref tts),
- &TokenTree::Delimited(_, delim2, ref tts2)) => {
+ (TokenTree::Delimited(_, delim, tts), TokenTree::Delimited(_, delim2, tts2)) => {
delim == delim2 && tts.probably_equal_for_proc_macro(&tts2)
}
- (_, _) => false,
+ _ => false,
}
}
/// Retrieves the TokenTree's span.
pub fn span(&self) -> Span {
- match *self {
- TokenTree::Token(sp, _) => sp,
+ match self {
+ TokenTree::Token(token) => token.span,
TokenTree::Delimited(sp, ..) => sp.entire(),
}
}
/// Modify the `TokenTree`'s span in-place.
pub fn set_span(&mut self, span: Span) {
- match *self {
- TokenTree::Token(ref mut sp, _) => *sp = span,
- TokenTree::Delimited(ref mut sp, ..) => *sp = DelimSpan::from_single(span),
- }
- }
-
- /// Indicates if the stream is a token that is equal to the provided token.
- pub fn eq_token(&self, t: Token) -> bool {
- match *self {
- TokenTree::Token(_, ref tk) => *tk == t,
- _ => false,
+ match self {
+ TokenTree::Token(token) => token.span = span,
+ TokenTree::Delimited(dspan, ..) => *dspan = DelimSpan::from_single(span),
}
}
TokenStream::new(vec![(self, Joint)])
}
+ pub fn token(kind: TokenKind, span: Span) -> TokenTree {
+ TokenTree::Token(Token::new(kind, span))
+ }
+
/// Returns the opening delimiter as a token tree.
pub fn open_tt(span: Span, delim: DelimToken) -> TokenTree {
let open_span = if span.is_dummy() {
} else {
span.with_hi(span.lo() + BytePos(delim.len() as u32))
};
- TokenTree::Token(open_span, token::OpenDelim(delim))
+ TokenTree::token(token::OpenDelim(delim), open_span)
}
/// Returns the closing delimiter as a token tree.
} else {
span.with_lo(span.hi() - BytePos(delim.len() as u32))
};
- TokenTree::Token(close_span, token::CloseDelim(delim))
+ TokenTree::token(token::CloseDelim(delim), close_span)
}
}
/// A `TokenStream` is an abstract sequence of tokens, organized into `TokenTree`s.
/// The goal is for procedural macros to work with `TokenStream`s and `TokenTree`s
/// instead of a representation of the abstract syntax tree.
-/// Today's `TokenTree`s can still contain AST via `Token::Interpolated` for back-compat.
+/// Today's `TokenTree`s can still contain AST via `token::Interpolated` for back-compat.
///
/// The use of `Option` is an optimization that avoids the need for an
/// allocation when the stream is empty. However, it is not guaranteed that an
while let Some((pos, ts)) = iter.next() {
if let Some((_, next)) = iter.peek() {
let sp = match (&ts, &next) {
- (_, (TokenTree::Token(_, token::Token::Comma), _)) => continue,
- ((TokenTree::Token(sp, token_left), NonJoint),
- (TokenTree::Token(_, token_right), _))
+ (_, (TokenTree::Token(Token { kind: token::Comma, .. }), _)) => continue,
+ ((TokenTree::Token(token_left), NonJoint),
+ (TokenTree::Token(token_right), _))
if ((token_left.is_ident() && !token_left.is_reserved_ident())
|| token_left.is_lit()) &&
((token_right.is_ident() && !token_right.is_reserved_ident())
- || token_right.is_lit()) => *sp,
+ || token_right.is_lit()) => token_left.span,
((TokenTree::Delimited(sp, ..), NonJoint), _) => sp.entire(),
_ => continue,
};
let sp = sp.shrink_to_hi();
- let comma = (TokenTree::Token(sp, token::Comma), NonJoint);
+ let comma = (TokenTree::token(token::Comma, sp), NonJoint);
suggestion = Some((pos, comma, sp));
}
}
}
}
-impl From<Token> for TokenStream {
- fn from(token: Token) -> TokenStream {
- TokenTree::Token(DUMMY_SP, token).into()
- }
-}
-
impl<T: Into<TokenStream>> iter::FromIterator<T> for TokenStream {
fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
TokenStream::from_streams(iter.into_iter().map(Into::into).collect::<SmallVec<_>>())
// streams, making a comparison between a token stream generated from an
// AST and a token stream which was parsed into an AST more reliable.
fn semantic_tree(tree: &TokenTree) -> bool {
- match tree {
- // The pretty printer tends to add trailing commas to
- // everything, and in particular, after struct fields.
- | TokenTree::Token(_, Token::Comma)
- // The pretty printer emits `NoDelim` as whitespace.
- | TokenTree::Token(_, Token::OpenDelim(DelimToken::NoDelim))
- | TokenTree::Token(_, Token::CloseDelim(DelimToken::NoDelim))
- // The pretty printer collapses many semicolons into one.
- | TokenTree::Token(_, Token::Semi)
- // The pretty printer collapses whitespace arbitrarily and can
- // introduce whitespace from `NoDelim`.
- | TokenTree::Token(_, Token::Whitespace)
- // The pretty printer can turn `$crate` into `::crate_name`
- | TokenTree::Token(_, Token::ModSep) => false,
- _ => true
+ if let TokenTree::Token(token) = tree {
+ if let
+ // The pretty printer tends to add trailing commas to
+ // everything, and in particular, after struct fields.
+ | token::Comma
+ // The pretty printer emits `NoDelim` as whitespace.
+ | token::OpenDelim(DelimToken::NoDelim)
+ | token::CloseDelim(DelimToken::NoDelim)
+ // The pretty printer collapses many semicolons into one.
+ | token::Semi
+ // The pretty printer collapses whitespace arbitrarily and can
+ // introduce whitespace from `NoDelim`.
+ | token::Whitespace
+ // The pretty printer can turn `$crate` into `::crate_name`
+ | token::ModSep = token.kind {
+ return false;
+ }
}
+ true
}
let mut t1 = self.trees().filter(semantic_tree);
pub fn push<T: Into<TokenStream>>(&mut self, stream: T) {
let stream = stream.into();
let last_tree_if_joint = self.0.last().and_then(TokenStream::last_tree_if_joint);
- if let Some(TokenTree::Token(last_span, last_tok)) = last_tree_if_joint {
- if let Some((TokenTree::Token(span, tok), is_joint)) = stream.first_tree_and_joint() {
- if let Some(glued_tok) = last_tok.glue(tok) {
+ if let Some(TokenTree::Token(last_token)) = last_tree_if_joint {
+ if let Some((TokenTree::Token(token), is_joint)) = stream.first_tree_and_joint() {
+ if let Some(glued_tok) = last_token.glue(token) {
let last_stream = self.0.pop().unwrap();
self.push_all_but_last_tree(&last_stream);
- let glued_span = last_span.to(span);
- let glued_tt = TokenTree::Token(glued_span, glued_tok);
+ let glued_tt = TokenTree::Token(glued_tok);
let glued_tokenstream = TokenStream::new(vec![(glued_tt, is_joint)]);
self.0.push(glued_tokenstream);
self.push_all_but_first_tree(&stream);
#[cfg(test)]
mod tests {
use super::*;
- use crate::syntax::ast::Ident;
+ use crate::ast::Name;
use crate::with_default_globals;
- use crate::parse::token::Token;
use crate::util::parser_testing::string_to_stream;
use syntax_pos::{Span, BytePos, NO_EXPANSION};
with_default_globals(|| {
let test0: TokenStream = Vec::<TokenTree>::new().into_iter().collect();
let test1: TokenStream =
- TokenTree::Token(sp(0, 1), Token::Ident(Ident::from_str("a"), false)).into();
+ TokenTree::token(token::Ident(Name::intern("a"), false), sp(0, 1)).into();
let test2 = string_to_ts("foo(bar::baz)");
assert_eq!(test0.is_empty(), true);
fn test_dotdotdot() {
with_default_globals(|| {
let mut builder = TokenStreamBuilder::new();
- builder.push(TokenTree::Token(sp(0, 1), Token::Dot).joint());
- builder.push(TokenTree::Token(sp(1, 2), Token::Dot).joint());
- builder.push(TokenTree::Token(sp(2, 3), Token::Dot));
+ builder.push(TokenTree::token(token::Dot, sp(0, 1)).joint());
+ builder.push(TokenTree::token(token::Dot, sp(1, 2)).joint());
+ builder.push(TokenTree::token(token::Dot, sp(2, 3)));
let stream = builder.build();
assert!(stream.eq_unspanned(&string_to_ts("...")));
assert_eq!(stream.trees().count(), 1);
-use crate::parse::token::{Token, BinOpToken};
+use crate::parse::token::{self, Token, BinOpToken};
use crate::symbol::kw;
use crate::ast::{self, BinOpKind};
/// Creates a new AssocOP from a token
pub fn from_token(t: &Token) -> Option<AssocOp> {
use AssocOp::*;
- match *t {
- Token::BinOpEq(k) => Some(AssignOp(k)),
- Token::Eq => Some(Assign),
- Token::BinOp(BinOpToken::Star) => Some(Multiply),
- Token::BinOp(BinOpToken::Slash) => Some(Divide),
- Token::BinOp(BinOpToken::Percent) => Some(Modulus),
- Token::BinOp(BinOpToken::Plus) => Some(Add),
- Token::BinOp(BinOpToken::Minus) => Some(Subtract),
- Token::BinOp(BinOpToken::Shl) => Some(ShiftLeft),
- Token::BinOp(BinOpToken::Shr) => Some(ShiftRight),
- Token::BinOp(BinOpToken::And) => Some(BitAnd),
- Token::BinOp(BinOpToken::Caret) => Some(BitXor),
- Token::BinOp(BinOpToken::Or) => Some(BitOr),
- Token::Lt => Some(Less),
- Token::Le => Some(LessEqual),
- Token::Ge => Some(GreaterEqual),
- Token::Gt => Some(Greater),
- Token::EqEq => Some(Equal),
- Token::Ne => Some(NotEqual),
- Token::AndAnd => Some(LAnd),
- Token::OrOr => Some(LOr),
- Token::DotDot => Some(DotDot),
- Token::DotDotEq => Some(DotDotEq),
+ match t.kind {
+ token::BinOpEq(k) => Some(AssignOp(k)),
+ token::Eq => Some(Assign),
+ token::BinOp(BinOpToken::Star) => Some(Multiply),
+ token::BinOp(BinOpToken::Slash) => Some(Divide),
+ token::BinOp(BinOpToken::Percent) => Some(Modulus),
+ token::BinOp(BinOpToken::Plus) => Some(Add),
+ token::BinOp(BinOpToken::Minus) => Some(Subtract),
+ token::BinOp(BinOpToken::Shl) => Some(ShiftLeft),
+ token::BinOp(BinOpToken::Shr) => Some(ShiftRight),
+ token::BinOp(BinOpToken::And) => Some(BitAnd),
+ token::BinOp(BinOpToken::Caret) => Some(BitXor),
+ token::BinOp(BinOpToken::Or) => Some(BitOr),
+ token::Lt => Some(Less),
+ token::Le => Some(LessEqual),
+ token::Ge => Some(GreaterEqual),
+ token::Gt => Some(Greater),
+ token::EqEq => Some(Equal),
+ token::Ne => Some(NotEqual),
+ token::AndAnd => Some(LAnd),
+ token::OrOr => Some(LOr),
+ token::DotDot => Some(DotDot),
+ token::DotDotEq => Some(DotDotEq),
// DotDotDot is no longer supported, but we need some way to display the error
- Token::DotDotDot => Some(DotDotEq),
- Token::Colon => Some(Colon),
+ token::DotDotDot => Some(DotDotEq),
+ token::Colon => Some(Colon),
_ if t.is_keyword(kw::As) => Some(As),
_ => None
}
ExprKind::AddrOf(_, ref subexpression) | ExprKind::Unary(_, ref subexpression) => {
visitor.visit_expr(subexpression)
}
- ExprKind::Lit(_) => {}
ExprKind::Cast(ref subexpression, ref typ) | ExprKind::Type(ref subexpression, ref typ) => {
visitor.visit_expr(subexpression);
visitor.visit_ty(typ)
ExprKind::TryBlock(ref body) => {
visitor.visit_block(body)
}
- ExprKind::Err => {}
+ ExprKind::Lit(_) | ExprKind::Err => {}
}
visitor.visit_expr_post(expression)
pub fn walk_tt<'a, V: Visitor<'a>>(visitor: &mut V, tt: TokenTree) {
match tt {
- TokenTree::Token(_, tok) => visitor.visit_token(tok),
+ TokenTree::Token(token) => visitor.visit_token(token),
TokenTree::Delimited(_, _, tts) => visitor.visit_tts(tts),
}
}
use syntax::ast;
use syntax::ext::base::{self, *};
use syntax::feature_gate;
-use syntax::parse::{self, token};
+use syntax::parse;
+use syntax::parse::token::{self, Token};
use syntax::ptr::P;
use syntax::symbol::{kw, sym, Symbol};
use syntax::ast::AsmDialect;
let first_colon = tts.iter()
.position(|tt| {
match *tt {
- tokenstream::TokenTree::Token(_, token::Colon) |
- tokenstream::TokenTree::Token(_, token::ModSep) => true,
+ tokenstream::TokenTree::Token(Token { kind: token::Colon, .. }) |
+ tokenstream::TokenTree::Token(Token { kind: token::ModSep, .. }) => true,
_ => false,
}
})
loop {
// MOD_SEP is a double colon '::' without space in between.
// When encountered, the state must be advanced twice.
- match (&p.token, state.next(), state.next().next()) {
+ match (&p.token.kind, state.next(), state.next().next()) {
(&token::Colon, StateNone, _) |
(&token::ModSep, _, StateNone) => {
p.bump();
use syntax::source_map::Spanned;
use syntax::ext::base::*;
use syntax::ext::build::AstBuilder;
-use syntax::parse::token::{self, Token};
+use syntax::parse::token::{self, TokenKind};
use syntax::parse::parser::Parser;
use syntax::print::pprust;
use syntax::ptr::P;
let panic_call = Mac_ {
path: Path::from_ident(Ident::new(sym::panic, sp)),
tts: custom_message.unwrap_or_else(|| {
- TokenStream::from(TokenTree::Token(
- DUMMY_SP,
- Token::lit(token::Str, Symbol::intern(&format!(
+ TokenStream::from(TokenTree::token(
+ TokenKind::lit(token::Str, Symbol::intern(&format!(
"assertion failed: {}",
pprust::expr_to_string(&cond_expr).escape_debug()
)), None),
+ DUMMY_SP,
))
}).into(),
delim: MacDelimiter::Parenthesis,
if parser.token == token::Semi {
let mut err = cx.struct_span_warn(sp, "macro requires an expression as an argument");
err.span_suggestion(
- parser.span,
+ parser.token.span,
"try removing semicolon",
String::new(),
Applicability::MaybeIncorrect
//
// Parse this as an actual message, and suggest inserting a comma. Eventually, this should be
// turned into an error.
- let custom_message = if let token::Literal(token::Lit { kind: token::Str, .. }) = parser.token {
- let mut err = cx.struct_span_warn(parser.span, "unexpected string literal");
+ let custom_message = if let token::Literal(token::Lit { kind: token::Str, .. })
+ = parser.token.kind {
+ let mut err = cx.struct_span_warn(parser.token.span, "unexpected string literal");
let comma_span = cx.source_map().next_point(parser.prev_span);
err.span_suggestion_short(
comma_span,
use syntax::parse::token;
use syntax_pos::Span;
-pub fn expand_cfg<'cx>(cx: &mut ExtCtxt<'_>,
- sp: Span,
- tts: &[tokenstream::TokenTree])
- -> Box<dyn base::MacResult + 'static> {
+pub fn expand_cfg(
+ cx: &mut ExtCtxt<'_>,
+ sp: Span,
+ tts: &[tokenstream::TokenTree],
+) -> Box<dyn base::MacResult + 'static> {
let sp = sp.apply_mark(cx.current_expansion.mark);
match parse_cfg(cx, sp, tts) {
match e.node {
ast::ExprKind::Lit(ref lit) => match lit.node {
ast::LitKind::Str(ref s, _)
- | ast::LitKind::Err(ref s)
| ast::LitKind::Float(ref s, _)
| ast::LitKind::FloatUnsuffixed(ref s) => {
accumulator.push_str(&s.as_str());
ast::LitKind::Byte(..) | ast::LitKind::ByteStr(..) => {
cx.span_err(e.span, "cannot concatenate a byte string literal");
}
+ ast::LitKind::Err(_) => {
+ has_errors = true;
+ }
},
ast::ExprKind::Err => {
has_errors = true;
use syntax::ast;
use syntax::ext::base::{self, *};
use syntax::feature_gate;
-use syntax::parse::token;
+use syntax::parse::token::{self, Token};
use syntax::ptr::P;
use syntax_pos::Span;
use syntax_pos::symbol::{Symbol, sym};
for (i, e) in tts.iter().enumerate() {
if i & 1 == 1 {
match *e {
- TokenTree::Token(_, token::Comma) => {}
+ TokenTree::Token(Token { kind: token::Comma, .. }) => {}
_ => {
cx.span_err(sp, "concat_idents! expecting comma.");
return DummyResult::any(sp);
}
} else {
match *e {
- TokenTree::Token(_, token::Ident(ident, _)) =>
- res_str.push_str(&ident.as_str()),
+ TokenTree::Token(Token { kind: token::Ident(name, _), .. }) =>
+ res_str.push_str(&name.as_str()),
_ => {
cx.span_err(sp, "concat_idents! requires ident args.");
return DummyResult::any(sp);
// }
let new = {
- let other_f = match (other_fs.len(), other_fs.get(0)) {
- (1, Some(o_f)) => o_f,
+ let other_f = match other_fs {
+ [o_f] => o_f,
_ => cx.span_bug(span, "not exactly 2 arguments in `derive(Ord)`"),
};
-> P<Expr>
{
let op = |cx: &mut ExtCtxt<'_>, span: Span, self_f: P<Expr>, other_fs: &[P<Expr>]| {
- let other_f = match (other_fs.len(), other_fs.get(0)) {
- (1, Some(o_f)) => o_f,
+ let other_f = match other_fs {
+ [o_f] => o_f,
_ => cx.span_bug(span, "not exactly 2 arguments in `derive(PartialEq)`"),
};
// }
let new = {
- let other_f = match (other_fs.len(), other_fs.get(0)) {
- (1, Some(o_f)) => o_f,
+ let other_f = match other_fs {
+ [o_f] => o_f,
_ => {
cx.span_bug(span,
"not exactly 2 arguments in `derive(PartialOrd)`")
};
let par_cmp = |cx: &mut ExtCtxt<'_>, span, self_f: P<Expr>, other_fs: &[P<Expr>], default| {
- let other_f = match (other_fs.len(), other_fs.get(0)) {
- (1, Some(o_f)) => o_f,
+ let other_f = match other_fs {
+ [o_f] => o_f,
_ => cx.span_bug(span, "not exactly 2 arguments in `derive(PartialOrd)`"),
};
use syntax::source_map::Span;
use syntax::ext::base::*;
use syntax::parse;
-use syntax::parse::token::{self, Token};
+use syntax::parse::token;
use syntax::tokenstream;
use syntax::visit::Visitor;
use syntax_pos::DUMMY_SP;
// Mark attributes as known, and used.
MarkAttrs(&self.attrs).visit_item(&item);
- let token = Token::Interpolated(Lrc::new(token::NtItem(item)));
- let input = tokenstream::TokenTree::Token(DUMMY_SP, token).into();
+ let token = token::Interpolated(Lrc::new(token::NtItem(item)));
+ let input = tokenstream::TokenTree::token(token, DUMMY_SP).into();
let server = proc_macro_server::Rustc::new(ecx);
let stream = match self.client.run(&EXEC_STRATEGY, server, input) {
}
fn hash_substructure(cx: &mut ExtCtxt<'_>, trait_span: Span, substr: &Substructure<'_>) -> P<Expr> {
- let state_expr = match (substr.nonself_args.len(), substr.nonself_args.get(0)) {
- (1, Some(o_f)) => o_f,
+ let state_expr = match &substr.nonself_args {
+ &[o_f] => o_f,
_ => {
cx.span_bug(trait_span,
"incorrect number of arguments in `derive(Hash)`")
//! The compiler code necessary to implement the `#[derive]` extensions.
use rustc_data_structures::sync::Lrc;
-use syntax::ast;
-use syntax::ext::base::{Annotatable, ExtCtxt, SyntaxExtension, Resolver};
+use syntax::ast::{self, MetaItem};
+use syntax::ext::base::{Annotatable, ExtCtxt, SyntaxExtension, Resolver, MultiItemModifier};
use syntax::ext::build::AstBuilder;
use syntax::ext::hygiene::{Mark, SyntaxContext};
use syntax::ptr::P;
#[path="cmp/ord.rs"]
pub mod ord;
-
pub mod generic;
+struct BuiltinDerive(
+ fn(&mut ExtCtxt<'_>, Span, &MetaItem, &Annotatable, &mut dyn FnMut(Annotatable))
+);
+
+impl MultiItemModifier for BuiltinDerive {
+ fn expand(&self,
+ ecx: &mut ExtCtxt<'_>,
+ span: Span,
+ meta_item: &MetaItem,
+ item: Annotatable)
+ -> Vec<Annotatable> {
+ let mut items = Vec::new();
+ (self.0)(ecx, span, meta_item, &item, &mut |a| items.push(a));
+ items
+ }
+}
+
macro_rules! derive_traits {
($( $name:expr => $func:path, )+) => {
pub fn is_builtin_trait(name: ast::Name) -> bool {
$(
resolver.add_builtin(
ast::Ident::with_empty_ctxt(Symbol::intern($name)),
- Lrc::new(SyntaxExtension::BuiltinDerive($func))
+ Lrc::new(SyntaxExtension::LegacyDerive(Box::new(BuiltinDerive($func))))
);
)*
}
enum Position {
Exact(usize),
- Named(String),
+ Named(Symbol),
}
struct Context<'a, 'b: 'a> {
/// Unique format specs seen for each argument.
arg_unique_types: Vec<Vec<ArgumentType>>,
/// Map from named arguments to their resolved indices.
- names: FxHashMap<String, usize>,
+ names: FxHashMap<Symbol, usize>,
/// The latest consecutive literal strings, or empty if there weren't any.
literal: String,
ecx: &mut ExtCtxt<'a>,
sp: Span,
tts: &[tokenstream::TokenTree]
-) -> Result<(P<ast::Expr>, Vec<P<ast::Expr>>, FxHashMap<String, usize>), DiagnosticBuilder<'a>> {
+) -> Result<(P<ast::Expr>, Vec<P<ast::Expr>>, FxHashMap<Symbol, usize>), DiagnosticBuilder<'a>> {
let mut args = Vec::<P<ast::Expr>>::new();
- let mut names = FxHashMap::<String, usize>::default();
+ let mut names = FxHashMap::<Symbol, usize>::default();
let mut p = ecx.new_parser_from_tts(tts);
while p.token != token::Eof {
if !p.eat(&token::Comma) {
- return Err(ecx.struct_span_err(p.span, "expected token: `,`"));
+ return Err(ecx.struct_span_err(p.token.span, "expected token: `,`"));
}
if p.token == token::Eof {
break;
} // accept trailing commas
if named || (p.token.is_ident() && p.look_ahead(1, |t| *t == token::Eq)) {
named = true;
- let ident = if let token::Ident(i, _) = p.token {
+ let name = if let token::Ident(name, _) = p.token.kind {
p.bump();
- i
+ name
} else {
return Err(ecx.struct_span_err(
- p.span,
+ p.token.span,
"expected ident, positional arguments cannot follow named arguments",
));
};
- let name: &str = &ident.as_str();
p.expect(&token::Eq)?;
let e = p.parse_expr()?;
- if let Some(prev) = names.get(name) {
+ if let Some(prev) = names.get(&name) {
ecx.struct_span_err(e.span, &format!("duplicate argument named `{}`", name))
.span_note(args[*prev].span, "previously here")
.emit();
// if the input is valid, we can simply append to the positional
// args. And remember the names.
let slot = args.len();
- names.insert(name.to_string(), slot);
+ names.insert(name, slot);
args.push(e);
} else {
let e = p.parse_expr()?;
fn resolve_name_inplace(&self, p: &mut parse::Piece<'_>) {
// NOTE: the `unwrap_or` branch is needed in case of invalid format
// arguments, e.g., `format_args!("{foo}")`.
- let lookup = |s| *self.names.get(s).unwrap_or(&0);
+ let lookup = |s: Symbol| *self.names.get(&s).unwrap_or(&0);
match *p {
parse::String(_) => {}
// it's written second, so it should come after width/precision.
let pos = match arg.position {
parse::ArgumentIs(i) | parse::ArgumentImplicitlyIs(i) => Exact(i),
- parse::ArgumentNamed(s) => Named(s.to_string()),
+ parse::ArgumentNamed(s) => Named(s),
};
let ty = Placeholder(arg.format.ty.to_string());
}
}
- fn verify_count(&mut self, c: parse::Count<'_>) {
+ fn verify_count(&mut self, c: parse::Count) {
match c {
parse::CountImplied |
parse::CountIs(..) => {}
self.verify_arg_type(Exact(i), Count);
}
parse::CountIsName(s) => {
- self.verify_arg_type(Named(s.to_string()), Count);
+ self.verify_arg_type(Named(s), Count);
}
}
}
ecx.std_path(&[sym::fmt, sym::rt, sym::v1, Symbol::intern(s)])
}
- fn build_count(&self, c: parse::Count<'_>) -> P<ast::Expr> {
+ fn build_count(&self, c: parse::Count) -> P<ast::Expr> {
let sp = self.macsp;
let count = |c, arg| {
let mut path = Context::rtpath(self.ecx, "Count");
sp: Span,
efmt: P<ast::Expr>,
args: Vec<P<ast::Expr>>,
- names: FxHashMap<String, usize>,
+ names: FxHashMap<Symbol, usize>,
append_newline: bool)
-> P<ast::Expr> {
// NOTE: this verbose way of initializing `Vec<Vec<ArgumentType>>` is because
if !parser.errors.is_empty() {
let err = parser.errors.remove(0);
- let sp = fmt.span.from_inner_byte_pos(err.start.unwrap(), err.end.unwrap());
+ let sp = fmt.span.from_inner(err.span);
let mut e = ecx.struct_span_err(sp, &format!("invalid format string: {}",
err.description));
e.span_label(sp, err.label + " in format string");
if let Some(note) = err.note {
e.note(¬e);
}
- if let Some((label, start, end)) = err.secondary_label {
- let sp = fmt.span.from_inner_byte_pos(start.unwrap(), end.unwrap());
+ if let Some((label, span)) = err.secondary_label {
+ let sp = fmt.span.from_inner(span);
e.span_label(sp, label);
}
e.emit();
}
let arg_spans = parser.arg_places.iter()
- .map(|&(parse::SpanIndex(start), parse::SpanIndex(end))| {
- fmt.span.from_inner_byte_pos(start, end)
- })
+ .map(|span| fmt.span.from_inner(*span))
.collect();
let mut cx = Context {
let mut show_doc_note = false;
let mut suggestions = vec![];
- for sub in foreign::$kind::iter_subs(fmt_str) {
+ // account for `"` and account for raw strings `r#`
+ let padding = str_style.map(|i| i + 2).unwrap_or(1);
+ for sub in foreign::$kind::iter_subs(fmt_str, padding) {
let trn = match sub.translate() {
Some(trn) => trn,
show_doc_note = true;
}
- if let Some((start, end)) = pos {
- // account for `"` and account for raw strings `r#`
- let padding = str_style.map(|i| i + 2).unwrap_or(1);
- let sp = fmt_sp.from_inner_byte_pos(start + padding, end + padding);
+ if let Some(inner_sp) = pos {
+ let sp = fmt_sp.from_inner(inner_sp);
suggestions.push((sp, trn));
} else {
diag.help(&format!("`{}` should be written as `{}`", sub, trn));
pub mod printf {
use super::strcursor::StrCursor as Cur;
+ use syntax_pos::InnerSpan;
/// Represents a single `printf`-style substitution.
#[derive(Clone, PartialEq, Debug)]
}
}
- pub fn position(&self) -> Option<(usize, usize)> {
+ pub fn position(&self) -> Option<InnerSpan> {
match *self {
Substitution::Format(ref fmt) => Some(fmt.position),
_ => None,
pub fn set_position(&mut self, start: usize, end: usize) {
match self {
Substitution::Format(ref mut fmt) => {
- fmt.position = (start, end);
+ fmt.position = InnerSpan::new(start, end);
}
_ => {}
}
/// Type of parameter being converted.
pub type_: &'a str,
/// Byte offset for the start and end of this formatting directive.
- pub position: (usize, usize),
+ pub position: InnerSpan,
}
impl Format<'_> {
}
/// Returns an iterator over all substitutions in a given string.
- pub fn iter_subs(s: &str) -> Substitutions<'_> {
+ pub fn iter_subs(s: &str, start_pos: usize) -> Substitutions<'_> {
Substitutions {
s,
- pos: 0,
+ pos: start_pos,
}
}
let (mut sub, tail) = parse_next_substitution(self.s)?;
self.s = tail;
match sub {
- Substitution::Format(_) => if let Some((start, end)) = sub.position() {
- sub.set_position(start + self.pos, end + self.pos);
- self.pos += end;
+ Substitution::Format(_) => if let Some(inner_span) = sub.position() {
+ sub.set_position(inner_span.start + self.pos, inner_span.end + self.pos);
+ self.pos += inner_span.end;
}
Substitution::Escape => self.pos += 2,
}
precision: None,
length: None,
type_: at.slice_between(next).unwrap(),
- position: (start.at, next.at),
+ position: InnerSpan::new(start.at, next.at),
}),
next.slice_after()
));
drop(next);
end = at;
- let position = (start.at, end.at);
+ let position = InnerSpan::new(start.at, end.at);
let f = Format {
span: start.slice_between(end).unwrap(),
precision: $prec,
length: $len,
type_: $type_,
- position: $pos,
+ position: syntax_pos::InnerSpan::new($pos.0, $pos.1),
}),
"!"
))
#[test]
fn test_iter() {
let s = "The %d'th word %% is: `%.*s` %!\n";
- let subs: Vec<_> = iter_subs(s).map(|sub| sub.translate()).collect();
+ let subs: Vec<_> = iter_subs(s, 0).map(|sub| sub.translate()).collect();
assert_eq!(
subs.iter().map(|ms| ms.as_ref().map(|s| &s[..])).collect::<Vec<_>>(),
vec![Some("{}"), None, Some("{:.*}"), None]
pub mod shell {
use super::strcursor::StrCursor as Cur;
+ use syntax_pos::InnerSpan;
#[derive(Clone, PartialEq, Debug)]
pub enum Substitution<'a> {
}
}
- pub fn position(&self) -> Option<(usize, usize)> {
+ pub fn position(&self) -> Option<InnerSpan> {
match self {
Substitution::Ordinal(_, pos) |
Substitution::Name(_, pos) |
- Substitution::Escape(pos) => Some(*pos),
+ Substitution::Escape(pos) => Some(InnerSpan::new(pos.0, pos.1)),
}
}
}
/// Returns an iterator over all substitutions in a given string.
- pub fn iter_subs(s: &str) -> Substitutions<'_> {
+ pub fn iter_subs(s: &str, start_pos: usize) -> Substitutions<'_> {
Substitutions {
s,
- pos: 0,
+ pos: start_pos,
}
}
match parse_next_substitution(self.s) {
Some((mut sub, tail)) => {
self.s = tail;
- if let Some((start, end)) = sub.position() {
+ if let Some(InnerSpan { start, end }) = sub.position() {
sub.set_position(start + self.pos, end + self.pos);
self.pos += end;
}
fn test_iter() {
use super::iter_subs;
let s = "The $0'th word $$ is: `$WORD` $!\n";
- let subs: Vec<_> = iter_subs(s).map(|sub| sub.translate()).collect();
+ let subs: Vec<_> = iter_subs(s, 0).map(|sub| sub.translate()).collect();
assert_eq!(
subs.iter().map(|ms| ms.as_ref().map(|s| &s[..])).collect::<Vec<_>>(),
vec![Some("{0}"), None, Some("{WORD}")]
#![deny(rust_2018_idioms)]
#![deny(internal)]
+#![deny(unused_lifetimes)]
#![feature(in_band_lifetimes)]
#![feature(proc_macro_diagnostic)]
use rustc_data_structures::sync::Lrc;
use syntax::ast;
-use syntax::ext::base::{MacroExpanderFn, NormalTT, NamedSyntaxExtension, MultiModifier};
+
+use syntax::ext::base::{MacroExpanderFn, NamedSyntaxExtension, SyntaxExtension};
+use syntax::ext::hygiene::Transparency;
use syntax::edition::Edition;
use syntax::symbol::{sym, Symbol};
macro_rules! register {
($( $name:ident: $f:expr, )*) => { $(
register(Symbol::intern(stringify!($name)),
- NormalTT {
+ SyntaxExtension::LegacyBang {
expander: Box::new($f as MacroExpanderFn),
def_info: None,
+ transparency: Transparency::SemiTransparent,
allow_internal_unstable: None,
allow_internal_unsafe: false,
local_inner_macros: false,
assert: assert::expand_assert,
}
- register(sym::test_case, MultiModifier(Box::new(test_case::expand)));
- register(sym::test, MultiModifier(Box::new(test::expand_test)));
- register(sym::bench, MultiModifier(Box::new(test::expand_bench)));
+ register(sym::test_case, SyntaxExtension::LegacyAttr(Box::new(test_case::expand)));
+ register(sym::test, SyntaxExtension::LegacyAttr(Box::new(test::expand_test)));
+ register(sym::bench, SyntaxExtension::LegacyAttr(Box::new(test::expand_bench)));
// format_args uses `unstable` things internally.
register(Symbol::intern("format_args"),
- NormalTT {
+ SyntaxExtension::LegacyBang {
expander: Box::new(format::expand_format_args),
def_info: None,
+ transparency: Transparency::SemiTransparent,
allow_internal_unstable: Some(vec![sym::fmt_internals].into()),
allow_internal_unsafe: false,
local_inner_macros: false,
edition,
});
register(sym::format_args_nl,
- NormalTT {
+ SyntaxExtension::LegacyBang {
expander: Box::new(format::expand_format_args_nl),
def_info: None,
+ transparency: Transparency::SemiTransparent,
allow_internal_unstable: Some(vec![sym::fmt_internals].into()),
allow_internal_unsafe: false,
local_inner_macros: false,
}
};
- if !trait_ident.can_be_raw() {
+ if !trait_ident.name.can_be_raw() {
self.handler.span_err(trait_attr.span,
&format!("`{}` cannot be a name of derive macro", trait_ident));
}
return None;
}
};
- if !ident.can_be_raw() {
+ if !ident.name.can_be_raw() {
self.handler.span_err(
attr.span,
&format!("`{}` cannot be a name of derive helper attribute", ident),
use syntax::parse::token::*;
let joint = is_joint == Joint;
- let (span, token) = match tree {
+ let Token { kind, span } = match tree {
tokenstream::TokenTree::Delimited(span, delim, tts) => {
let delimiter = Delimiter::from_internal(delim);
return TokenTree::Group(Group {
span,
});
}
- tokenstream::TokenTree::Token(span, token) => (span, token),
+ tokenstream::TokenTree::Token(token) => token,
};
macro_rules! tt {
}};
}
- match token {
+ match kind {
Eq => op!('='),
Lt => op!('<'),
Le => op!('<', '='),
Question => op!('?'),
SingleQuote => op!('\''),
- Ident(ident, false) if ident.name == kw::DollarCrate =>
- tt!(Ident::dollar_crate()),
- Ident(ident, is_raw) => tt!(Ident::new(ident.name, is_raw)),
- Lifetime(ident) => {
- let ident = ident.without_first_quote();
+ Ident(name, false) if name == kw::DollarCrate => tt!(Ident::dollar_crate()),
+ Ident(name, is_raw) => tt!(Ident::new(name, is_raw)),
+ Lifetime(name) => {
+ let ident = ast::Ident::new(name, span).without_first_quote();
stack.push(tt!(Ident::new(ident.name, false)));
tt!(Punct::new('\'', true))
}
escaped.extend(ch.escape_debug());
}
let stream = vec![
- Ident(ast::Ident::new(sym::doc, span), false),
+ Ident(sym::doc, false),
Eq,
- Token::lit(token::Str, Symbol::intern(&escaped), None),
+ TokenKind::lit(token::Str, Symbol::intern(&escaped), None),
]
.into_iter()
- .map(|token| tokenstream::TokenTree::Token(span, token))
+ .map(|kind| tokenstream::TokenTree::token(kind, span))
.collect();
stack.push(TokenTree::Group(Group {
delimiter: Delimiter::Bracket,
.into();
}
TokenTree::Ident(self::Ident { sym, is_raw, span }) => {
- let token = Ident(ast::Ident::new(sym, span), is_raw);
- return tokenstream::TokenTree::Token(span, token).into();
+ return tokenstream::TokenTree::token(Ident(sym, is_raw), span).into();
}
TokenTree::Literal(self::Literal {
lit: token::Lit { kind: token::Integer, symbol, suffix },
}) if symbol.as_str().starts_with("-") => {
let minus = BinOp(BinOpToken::Minus);
let symbol = Symbol::intern(&symbol.as_str()[1..]);
- let integer = Token::lit(token::Integer, symbol, suffix);
- let a = tokenstream::TokenTree::Token(span, minus);
- let b = tokenstream::TokenTree::Token(span, integer);
+ let integer = TokenKind::lit(token::Integer, symbol, suffix);
+ let a = tokenstream::TokenTree::token(minus, span);
+ let b = tokenstream::TokenTree::token(integer, span);
return vec![a, b].into_iter().collect();
}
TokenTree::Literal(self::Literal {
}) if symbol.as_str().starts_with("-") => {
let minus = BinOp(BinOpToken::Minus);
let symbol = Symbol::intern(&symbol.as_str()[1..]);
- let float = Token::lit(token::Float, symbol, suffix);
- let a = tokenstream::TokenTree::Token(span, minus);
- let b = tokenstream::TokenTree::Token(span, float);
+ let float = TokenKind::lit(token::Float, symbol, suffix);
+ let a = tokenstream::TokenTree::token(minus, span);
+ let b = tokenstream::TokenTree::token(float, span);
return vec![a, b].into_iter().collect();
}
TokenTree::Literal(self::Literal { lit, span }) => {
- return tokenstream::TokenTree::Token(span, Literal(lit)).into()
+ return tokenstream::TokenTree::token(Literal(lit), span).into()
}
};
- let token = match ch {
+ let kind = match ch {
'=' => Eq,
'<' => Lt,
'>' => Gt,
_ => unreachable!(),
};
- let tree = tokenstream::TokenTree::Token(span, token);
+ let tree = tokenstream::TokenTree::token(kind, span);
TokenStream::new(vec![(tree, if joint { Joint } else { NonJoint })])
}
}
if !Self::is_valid(&string) {
panic!("`{:?}` is not a valid identifier", string)
}
- if is_raw && !ast::Ident::from_interned_str(sym.as_interned_str()).can_be_raw() {
+ // Get rid of gensyms to conservatively check rawness on the string contents only.
+ if is_raw && !sym.as_interned_str().as_symbol().can_be_raw() {
panic!("`{}` cannot be a raw identifier", string);
}
Ident { sym, is_raw, span }
feature_gate::EXPLAIN_TRACE_MACROS);
}
- match (tt.len(), tt.first()) {
- (1, Some(&TokenTree::Token(_, ref tok))) if tok.is_keyword(kw::True) => {
+ match tt {
+ [TokenTree::Token(token)] if token.is_keyword(kw::True) => {
cx.set_trace_macros(true);
}
- (1, Some(&TokenTree::Token(_, ref tok))) if tok.is_keyword(kw::False) => {
+ [TokenTree::Token(token)] if token.is_keyword(kw::False) => {
cx.set_trace_macros(false);
}
_ => cx.span_err(sp, "trace_macros! accepts only `true` or `false`"),
#![deny(rust_2018_idioms)]
#![deny(internal)]
+#![deny(unused_lifetimes)]
#![feature(const_fn)]
#![feature(crate_visibility_modifier)]
-#![feature(custom_attribute)]
#![feature(nll)]
#![feature(non_exhaustive)]
#![feature(optin_builtin_traits)]
)
}
- pub fn from_inner_byte_pos(self, start: usize, end: usize) -> Span {
+ pub fn from_inner(self, inner: InnerSpan) -> Span {
let span = self.data();
- Span::new(span.lo + BytePos::from_usize(start),
- span.lo + BytePos::from_usize(end),
+ Span::new(span.lo + BytePos::from_usize(inner.start),
+ span.lo + BytePos::from_usize(inner.end),
span.ctxt)
}
pub end_pos: BytePos
}
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub struct InnerSpan {
+ pub start: usize,
+ pub end: usize,
+}
+
+impl InnerSpan {
+ pub fn new(start: usize, end: usize) -> InnerSpan {
+ InnerSpan { start, end }
+ }
+}
+
// Given a slice of line start positions and a position, returns the index of
// the line the position is on. Returns -1 if the position is located before
// the first line.
conservative_impl_trait,
console,
const_compare_raw_pointers,
+ const_constructor,
const_fn,
const_fn_union,
const_generics,
core_intrinsics,
crate_id,
crate_in_paths,
+ crate_local,
crate_name,
crate_type,
crate_visibility_modifier,
deref,
deref_mut,
derive,
+ direct,
doc,
doc_alias,
doc_cfg,
format_args_nl,
from,
From,
+ from_desugaring,
from_error,
from_generator,
+ from_method,
from_ok,
from_usize,
fundamental,
panic_impl,
panic_implementation,
panic_runtime,
+ parent_trait,
partial_cmp,
PartialOrd,
passes,
rust_2018_preview,
rust_begin_unwind,
rustc,
+ rustc_allocator,
rustc_allocator_nounwind,
rustc_allow_const_fn_ptr,
rustc_args_required_const,
rustc_diagnostic_macros,
rustc_dirty,
rustc_doc_only_macro,
+ rustc_dummy,
rustc_dump_env_program_clauses,
rustc_dump_program_clauses,
rustc_dump_user_substs,
__rust_unstable_column,
rvalue_static_promotion,
sanitizer_runtime,
+ _Self,
self_in_typedefs,
self_struct_ctor,
Send,
trait_alias,
transmute,
transparent,
+ transparent_enums,
+ transparent_unions,
trivial_bounds,
Try,
try_blocks,
impl Interner {
fn prefill(init: &[&'static str]) -> Self {
- let symbols = (0 .. init.len() as u32).map(Symbol::new);
Interner {
- strings: init.to_vec(),
- names: init.iter().copied().zip(symbols).collect(),
+ strings: init.into(),
+ names: init.iter().copied().zip((0..).map(Symbol::new)).collect(),
..Default::default()
}
}
pub fn is_doc_keyword(self) -> bool {
self <= kw::Union
}
+
+ /// A keyword or reserved identifier that can be used as a path segment.
+ pub fn is_path_segment_keyword(self) -> bool {
+ self == kw::Super ||
+ self == kw::SelfLower ||
+ self == kw::SelfUpper ||
+ self == kw::Crate ||
+ self == kw::PathRoot ||
+ self == kw::DollarCrate
+ }
+
+ /// This symbol can be a raw identifier.
+ pub fn can_be_raw(self) -> bool {
+ self != kw::Invalid && self != kw::Underscore && !self.is_path_segment_keyword()
+ }
}
impl Ident {
/// A keyword or reserved identifier that can be used as a path segment.
pub fn is_path_segment_keyword(self) -> bool {
- self.name == kw::Super ||
- self.name == kw::SelfLower ||
- self.name == kw::SelfUpper ||
- self.name == kw::Crate ||
- self.name == kw::PathRoot ||
- self.name == kw::DollarCrate
- }
-
- /// This identifier can be a raw identifier.
- pub fn can_be_raw(self) -> bool {
- self.name != kw::Invalid && self.name != kw::Underscore &&
- !self.is_path_segment_keyword()
+ self.name.is_path_segment_keyword()
}
/// We see this identifier in a normal identifier position, like variable name or a type.
/// How was it written originally? Did it use the raw form? Let's try to guess.
pub fn is_raw_guess(self) -> bool {
- self.can_be_raw() && self.is_reserved()
+ self.name.can_be_raw() && self.is_reserved()
}
}
#![deny(rust_2018_idioms)]
#![cfg_attr(windows, feature(libc))]
-// Handle rustfmt skips
-#![feature(custom_attribute)]
-#![allow(unused_attributes)]
use std::io::prelude::*;
use std::io::{self, Stdout, Stderr};
// min-llvm-version 7.0
#![crate_type = "lib"]
-#![feature(repr_align_enum)]
#[repr(align(64))]
pub enum Align64 {
+++ /dev/null
-// compile-flags: -C no-prepopulate-passes
-
-#![crate_type = "lib"]
-#![feature(core_intrinsics)]
-
-use std::intrinsics::exact_div;
-
-// CHECK-LABEL: @exact_sdiv
-#[no_mangle]
-pub unsafe fn exact_sdiv(x: i32, y: i32) -> i32 {
-// CHECK: sdiv exact
- exact_div(x, y)
-}
-
-// CHECK-LABEL: @exact_udiv
-#[no_mangle]
-pub unsafe fn exact_udiv(x: u32, y: u32) -> u32 {
-// CHECK: udiv exact
- exact_div(x, y)
-}
// ignore-tidy-linelength
#![crate_type = "lib"]
-#![feature(custom_attribute)]
+#![feature(rustc_attrs)]
pub struct S {
_field: [i32; 8],
// CHECK: noalias i8* @allocator()
#[no_mangle]
-#[allocator]
+#[rustc_allocator]
pub fn allocator() -> *const i8 {
std::ptr::null()
}
--- /dev/null
+// compile-flags: -C no-prepopulate-passes
+
+#![crate_type = "lib"]
+#![feature(core_intrinsics)]
+
+use std::intrinsics::exact_div;
+
+// CHECK-LABEL: @exact_sdiv
+#[no_mangle]
+pub unsafe fn exact_sdiv(x: i32, y: i32) -> i32 {
+ // CHECK: sdiv exact
+ exact_div(x, y)
+}
+
+// CHECK-LABEL: @exact_udiv
+#[no_mangle]
+pub unsafe fn exact_udiv(x: u32, y: u32) -> u32 {
+ // CHECK: udiv exact
+ exact_div(x, y)
+}
--- /dev/null
+// compile-flags: -C no-prepopulate-passes
+
+#![crate_type = "lib"]
+#![feature(core_intrinsics)]
+
+use std::intrinsics::{likely,unlikely};
+
+#[no_mangle]
+pub fn check_likely(x: i32, y: i32) -> Option<i32> {
+ unsafe {
+ // CHECK: call i1 @llvm.expect.i1(i1 %{{.*}}, i1 true)
+ if likely(x == y) {
+ None
+ } else {
+ Some(x + y)
+ }
+ }
+}
+
+#[no_mangle]
+pub fn check_unlikely(x: i32, y: i32) -> Option<i32> {
+ unsafe {
+ // CHECK: call i1 @llvm.expect.i1(i1 %{{.*}}, i1 false)
+ if unlikely(x == y) {
+ None
+ } else {
+ Some(x + y)
+ }
+ }
+}
--- /dev/null
+// compile-flags: -C no-prepopulate-passes
+
+#![feature(core_intrinsics)]
+#![crate_type = "lib"]
+
+// test that `move_val_init` actually avoids big allocas
+
+use std::intrinsics::move_val_init;
+
+pub struct Big {
+ pub data: [u8; 65536]
+}
+
+// CHECK-LABEL: @test_mvi
+#[no_mangle]
+pub unsafe fn test_mvi(target: *mut Big, make_big: fn() -> Big) {
+ // CHECK: call void %make_big(%Big*{{[^%]*}} %target)
+ move_val_init(target, make_big());
+}
--- /dev/null
+// compile-flags: -O
+
+#![feature(core_intrinsics)]
+#![crate_type = "lib"]
+
+#[no_mangle]
+pub fn a(a: &mut u32, b: u32) {
+ // CHECK-LABEL: define void @a
+ // CHECK: store i32 %b, i32* %a, align 4, !nontemporal
+ unsafe {
+ std::intrinsics::nontemporal_store(a, b);
+ }
+}
--- /dev/null
+// compile-flags: -C no-prepopulate-passes
+
+#![crate_type = "lib"]
+#![feature(core_intrinsics)]
+
+use std::intrinsics::{prefetch_read_data, prefetch_write_data,
+ prefetch_read_instruction, prefetch_write_instruction};
+
+#[no_mangle]
+pub fn check_prefetch_read_data(data: &[i8]) {
+ unsafe {
+ // CHECK: call void @llvm.prefetch(i8* %{{.*}}, i32 0, i32 0, i32 1)
+ prefetch_read_data(data.as_ptr(), 0);
+ // CHECK: call void @llvm.prefetch(i8* %{{.*}}, i32 0, i32 1, i32 1)
+ prefetch_read_data(data.as_ptr(), 1);
+ // CHECK: call void @llvm.prefetch(i8* %{{.*}}, i32 0, i32 2, i32 1)
+ prefetch_read_data(data.as_ptr(), 2);
+ // CHECK: call void @llvm.prefetch(i8* %{{.*}}, i32 0, i32 3, i32 1)
+ prefetch_read_data(data.as_ptr(), 3);
+ }
+}
+
+#[no_mangle]
+pub fn check_prefetch_write_data(data: &[i8]) {
+ unsafe {
+ // CHECK: call void @llvm.prefetch(i8* %{{.*}}, i32 1, i32 0, i32 1)
+ prefetch_write_data(data.as_ptr(), 0);
+ // CHECK: call void @llvm.prefetch(i8* %{{.*}}, i32 1, i32 1, i32 1)
+ prefetch_write_data(data.as_ptr(), 1);
+ // CHECK: call void @llvm.prefetch(i8* %{{.*}}, i32 1, i32 2, i32 1)
+ prefetch_write_data(data.as_ptr(), 2);
+ // CHECK: call void @llvm.prefetch(i8* %{{.*}}, i32 1, i32 3, i32 1)
+ prefetch_write_data(data.as_ptr(), 3);
+ }
+}
+
+#[no_mangle]
+pub fn check_prefetch_read_instruction(data: &[i8]) {
+ unsafe {
+ // CHECK: call void @llvm.prefetch(i8* %{{.*}}, i32 0, i32 0, i32 0)
+ prefetch_read_instruction(data.as_ptr(), 0);
+ // CHECK: call void @llvm.prefetch(i8* %{{.*}}, i32 0, i32 1, i32 0)
+ prefetch_read_instruction(data.as_ptr(), 1);
+ // CHECK: call void @llvm.prefetch(i8* %{{.*}}, i32 0, i32 2, i32 0)
+ prefetch_read_instruction(data.as_ptr(), 2);
+ // CHECK: call void @llvm.prefetch(i8* %{{.*}}, i32 0, i32 3, i32 0)
+ prefetch_read_instruction(data.as_ptr(), 3);
+ }
+}
+
+#[no_mangle]
+pub fn check_prefetch_write_instruction(data: &[i8]) {
+ unsafe {
+ // CHECK: call void @llvm.prefetch(i8* %{{.*}}, i32 1, i32 0, i32 0)
+ prefetch_write_instruction(data.as_ptr(), 0);
+ // CHECK: call void @llvm.prefetch(i8* %{{.*}}, i32 1, i32 1, i32 0)
+ prefetch_write_instruction(data.as_ptr(), 1);
+ // CHECK: call void @llvm.prefetch(i8* %{{.*}}, i32 1, i32 2, i32 0)
+ prefetch_write_instruction(data.as_ptr(), 2);
+ // CHECK: call void @llvm.prefetch(i8* %{{.*}}, i32 1, i32 3, i32 0)
+ prefetch_write_instruction(data.as_ptr(), 3);
+ }
+}
--- /dev/null
+#![crate_type = "lib"]
+#![feature(core_intrinsics)]
+
+use std::intrinsics::*;
+
+// CHECK-LABEL: @unchecked_add_signed
+#[no_mangle]
+pub unsafe fn unchecked_add_signed(a: i32, b: i32) -> i32 {
+ // CHECK: add nsw
+ unchecked_add(a, b)
+}
+
+// CHECK-LABEL: @unchecked_add_unsigned
+#[no_mangle]
+pub unsafe fn unchecked_add_unsigned(a: u32, b: u32) -> u32 {
+ // CHECK: add nuw
+ unchecked_add(a, b)
+}
+
+// CHECK-LABEL: @unchecked_sub_signed
+#[no_mangle]
+pub unsafe fn unchecked_sub_signed(a: i32, b: i32) -> i32 {
+ // CHECK: sub nsw
+ unchecked_sub(a, b)
+}
+
+// CHECK-LABEL: @unchecked_sub_unsigned
+#[no_mangle]
+pub unsafe fn unchecked_sub_unsigned(a: u32, b: u32) -> u32 {
+ // CHECK: sub nuw
+ unchecked_sub(a, b)
+}
+
+// CHECK-LABEL: @unchecked_mul_signed
+#[no_mangle]
+pub unsafe fn unchecked_mul_signed(a: i32, b: i32) -> i32 {
+ // CHECK: mul nsw
+ unchecked_mul(a, b)
+}
+
+// CHECK-LABEL: @unchecked_mul_unsigned
+#[no_mangle]
+pub unsafe fn unchecked_mul_unsigned(a: u32, b: u32) -> u32 {
+ // CHECK: mul nuw
+ unchecked_mul(a, b)
+}
+++ /dev/null
-// compile-flags: -C no-prepopulate-passes
-
-#![crate_type = "lib"]
-#![feature(core_intrinsics)]
-
-use std::intrinsics::{likely,unlikely};
-
-#[no_mangle]
-pub fn check_likely(x: i32, y: i32) -> Option<i32> {
- unsafe {
- // CHECK: call i1 @llvm.expect.i1(i1 %{{.*}}, i1 true)
- if likely(x == y) {
- None
- } else {
- Some(x + y)
- }
- }
-}
-
-#[no_mangle]
-pub fn check_unlikely(x: i32, y: i32) -> Option<i32> {
- unsafe {
- // CHECK: call i1 @llvm.expect.i1(i1 %{{.*}}, i1 false)
- if unlikely(x == y) {
- None
- } else {
- Some(x + y)
- }
- }
-}
+++ /dev/null
-// compile-flags: -C no-prepopulate-passes
-
-#![feature(core_intrinsics)]
-#![crate_type = "lib"]
-
-// test that `move_val_init` actually avoids big allocas
-
-use std::intrinsics::move_val_init;
-
-pub struct Big {
- pub data: [u8; 65536]
-}
-
-// CHECK-LABEL: @test_mvi
-#[no_mangle]
-pub unsafe fn test_mvi(target: *mut Big, make_big: fn() -> Big) {
- // CHECK: call void %make_big(%Big*{{[^%]*}} %target)
- move_val_init(target, make_big());
-}
+++ /dev/null
-// compile-flags: -O
-
-#![feature(core_intrinsics)]
-#![crate_type = "lib"]
-
-#[no_mangle]
-pub fn a(a: &mut u32, b: u32) {
- // CHECK-LABEL: define void @a
- // CHECK: store i32 %b, i32* %a, align 4, !nontemporal
- unsafe {
- std::intrinsics::nontemporal_store(a, b);
- }
-}
+++ /dev/null
-// compile-flags: -C no-prepopulate-passes
-
-#![crate_type = "lib"]
-#![feature(core_intrinsics)]
-
-use std::intrinsics::{prefetch_read_data, prefetch_write_data,
- prefetch_read_instruction, prefetch_write_instruction};
-
-#[no_mangle]
-pub fn check_prefetch_read_data(data: &[i8]) {
- unsafe {
- // CHECK: call void @llvm.prefetch(i8* %{{.*}}, i32 0, i32 0, i32 1)
- prefetch_read_data(data.as_ptr(), 0);
- // CHECK: call void @llvm.prefetch(i8* %{{.*}}, i32 0, i32 1, i32 1)
- prefetch_read_data(data.as_ptr(), 1);
- // CHECK: call void @llvm.prefetch(i8* %{{.*}}, i32 0, i32 2, i32 1)
- prefetch_read_data(data.as_ptr(), 2);
- // CHECK: call void @llvm.prefetch(i8* %{{.*}}, i32 0, i32 3, i32 1)
- prefetch_read_data(data.as_ptr(), 3);
- }
-}
-
-#[no_mangle]
-pub fn check_prefetch_write_data(data: &[i8]) {
- unsafe {
- // CHECK: call void @llvm.prefetch(i8* %{{.*}}, i32 1, i32 0, i32 1)
- prefetch_write_data(data.as_ptr(), 0);
- // CHECK: call void @llvm.prefetch(i8* %{{.*}}, i32 1, i32 1, i32 1)
- prefetch_write_data(data.as_ptr(), 1);
- // CHECK: call void @llvm.prefetch(i8* %{{.*}}, i32 1, i32 2, i32 1)
- prefetch_write_data(data.as_ptr(), 2);
- // CHECK: call void @llvm.prefetch(i8* %{{.*}}, i32 1, i32 3, i32 1)
- prefetch_write_data(data.as_ptr(), 3);
- }
-}
-
-#[no_mangle]
-pub fn check_prefetch_read_instruction(data: &[i8]) {
- unsafe {
- // CHECK: call void @llvm.prefetch(i8* %{{.*}}, i32 0, i32 0, i32 0)
- prefetch_read_instruction(data.as_ptr(), 0);
- // CHECK: call void @llvm.prefetch(i8* %{{.*}}, i32 0, i32 1, i32 0)
- prefetch_read_instruction(data.as_ptr(), 1);
- // CHECK: call void @llvm.prefetch(i8* %{{.*}}, i32 0, i32 2, i32 0)
- prefetch_read_instruction(data.as_ptr(), 2);
- // CHECK: call void @llvm.prefetch(i8* %{{.*}}, i32 0, i32 3, i32 0)
- prefetch_read_instruction(data.as_ptr(), 3);
- }
-}
-
-#[no_mangle]
-pub fn check_prefetch_write_instruction(data: &[i8]) {
- unsafe {
- // CHECK: call void @llvm.prefetch(i8* %{{.*}}, i32 1, i32 0, i32 0)
- prefetch_write_instruction(data.as_ptr(), 0);
- // CHECK: call void @llvm.prefetch(i8* %{{.*}}, i32 1, i32 1, i32 0)
- prefetch_write_instruction(data.as_ptr(), 1);
- // CHECK: call void @llvm.prefetch(i8* %{{.*}}, i32 1, i32 2, i32 0)
- prefetch_write_instruction(data.as_ptr(), 2);
- // CHECK: call void @llvm.prefetch(i8* %{{.*}}, i32 1, i32 3, i32 0)
- prefetch_write_instruction(data.as_ptr(), 3);
- }
-}
// compile-flags: -C no-prepopulate-passes
+// ignore-tidy-linelength
// ignore-arm
// ignore-mips
// ignore-powerpc64
// See repr-transparent.rs
+#![feature(transparent_enums, transparent_unions)]
+
#![crate_type="lib"]
+#[derive(Clone, Copy)]
#[repr(C)]
-pub struct Big([u32; 16]);
+pub struct BigS([u32; 16]);
+
+#[repr(transparent)]
+pub struct TsBigS(BigS);
+
+#[repr(transparent)]
+pub union TuBigS {
+ field: BigS,
+}
#[repr(transparent)]
-pub struct BigW(Big);
+pub enum TeBigS {
+ Variant(BigS),
+}
+
+// CHECK: define void @test_BigS(%BigS* [[BIGS_RET_ATTRS:.*]], %BigS* [[BIGS_ARG_ATTRS:.*]])
+#[no_mangle]
+pub extern fn test_BigS(_: BigS) -> BigS { loop {} }
+
+// CHECK: define void @test_TsBigS(%TsBigS* [[BIGS_RET_ATTRS]], %TsBigS* [[BIGS_ARG_ATTRS]])
+#[no_mangle]
+pub extern fn test_TsBigS(_: TsBigS) -> TsBigS { loop {} }
-// CHECK: define void @test_Big(%Big* [[BIG_RET_ATTRS:.*]], %Big* [[BIG_ARG_ATTRS:.*]])
+// CHECK: define void @test_TuBigS(%TuBigS* [[BIGS_RET_ATTRS]], %TuBigS* [[BIGS_ARG_ATTRS]])
#[no_mangle]
-pub extern fn test_Big(_: Big) -> Big { loop {} }
+pub extern fn test_TuBigS(_: TuBigS) -> TuBigS { loop {} }
-// CHECK: define void @test_BigW(%BigW* [[BIG_RET_ATTRS]], %BigW* [[BIG_ARG_ATTRS]])
+// CHECK: define void @test_TeBigS(%"TeBigS::Variant"* [[BIGS_RET_ATTRS]], %"TeBigS::Variant"* [[BIGS_ARG_ATTRS]])
#[no_mangle]
-pub extern fn test_BigW(_: BigW) -> BigW { loop {} }
+pub extern fn test_TeBigS(_: TeBigS) -> TeBigS { loop {} }
+#[derive(Clone, Copy)]
#[repr(C)]
pub union BigU {
foo: [u32; 16],
}
#[repr(transparent)]
-pub struct BigUw(BigU);
+pub struct TsBigU(BigU);
+
+#[repr(transparent)]
+pub union TuBigU {
+ field: BigU,
+}
+
+#[repr(transparent)]
+pub enum TeBigU {
+ Variant(BigU),
+}
// CHECK: define void @test_BigU(%BigU* [[BIGU_RET_ATTRS:.*]], %BigU* [[BIGU_ARG_ATTRS:.*]])
#[no_mangle]
pub extern fn test_BigU(_: BigU) -> BigU { loop {} }
-// CHECK: define void @test_BigUw(%BigUw* [[BIGU_RET_ATTRS]], %BigUw* [[BIGU_ARG_ATTRS]])
+// CHECK: define void @test_TsBigU(%TsBigU* [[BIGU_RET_ATTRS:.*]], %TsBigU* [[BIGU_ARG_ATTRS]])
+#[no_mangle]
+pub extern fn test_TsBigU(_: TsBigU) -> TsBigU { loop {} }
+
+// CHECK: define void @test_TuBigU(%TuBigU* [[BIGU_RET_ATTRS]], %TuBigU* [[BIGU_ARG_ATTRS]])
+#[no_mangle]
+pub extern fn test_TuBigU(_: TuBigU) -> TuBigU { loop {} }
+
+// CHECK: define void @test_TeBigU(%"TeBigU::Variant"* [[BIGU_RET_ATTRS]], %"TeBigU::Variant"* [[BIGU_ARG_ATTRS]])
#[no_mangle]
-pub extern fn test_BigUw(_: BigUw) -> BigUw { loop {} }
+pub extern fn test_TeBigU(_: TeBigU) -> TeBigU { loop {} }
// ignore-x86_64
// See repr-transparent.rs
+#![feature(transparent_enums, transparent_unions)]
+
#![crate_type="lib"]
+#[derive(Clone, Copy)]
#[repr(C)]
-pub struct Big([u32; 16]);
+pub struct BigS([u32; 16]);
+
+#[repr(transparent)]
+pub struct TsBigS(BigS);
+
+#[repr(transparent)]
+pub union TuBigS {
+ field: BigS,
+}
#[repr(transparent)]
-pub struct BigW(Big);
+pub enum TeBigS {
+ Variant(BigS),
+}
+
+// CHECK: define void @test_BigS(%BigS* [[BIGS_RET_ATTRS:.*]], [16 x i32]
+#[no_mangle]
+pub extern fn test_BigS(_: BigS) -> BigS { loop {} }
+
+// CHECK: define void @test_TsBigS(%TsBigS* [[BIGS_RET_ATTRS]], [16 x i32]
+#[no_mangle]
+pub extern fn test_TsBigS(_: TsBigS) -> TsBigS { loop {} }
-// CHECK: define void @test_Big(%Big* [[BIG_RET_ATTRS:.*]], [16 x i32]
+// CHECK: define void @test_TuBigS(%TuBigS* [[BIGS_RET_ATTRS]], [16 x i32]
#[no_mangle]
-pub extern fn test_Big(_: Big) -> Big { loop {} }
+pub extern fn test_TuBigS(_: TuBigS) -> TuBigS { loop {} }
-// CHECK: define void @test_BigW(%BigW* [[BIG_RET_ATTRS]], [16 x i32]
+// CHECK: define void @test_TeBigS(%"TeBigS::Variant"* [[BIGS_RET_ATTRS]], [16 x i32]
#[no_mangle]
-pub extern fn test_BigW(_: BigW) -> BigW { loop {} }
+pub extern fn test_TeBigS(_: TeBigS) -> TeBigS { loop {} }
+#[derive(Clone, Copy)]
#[repr(C)]
pub union BigU {
foo: [u32; 16],
}
#[repr(transparent)]
-pub struct BigUw(BigU);
+pub struct TsBigU(BigU);
+
+#[repr(transparent)]
+pub union TuBigU {
+ field: BigU,
+}
+
+#[repr(transparent)]
+pub enum TeBigU {
+ Variant(BigU),
+}
// CHECK: define void @test_BigU(%BigU* [[BIGU_RET_ATTRS:.*]], [16 x i32]
#[no_mangle]
pub extern fn test_BigU(_: BigU) -> BigU { loop {} }
-// CHECK: define void @test_BigUw(%BigUw* [[BIGU_RET_ATTRS]], [16 x i32]
+// CHECK: define void @test_TsBigU(%TsBigU* [[BIGU_RET_ATTRS:.*]], [16 x i32]
+#[no_mangle]
+pub extern fn test_TsBigU(_: TsBigU) -> TsBigU { loop {} }
+
+// CHECK: define void @test_TuBigU(%TuBigU* [[BIGU_RET_ATTRS]], [16 x i32]
+#[no_mangle]
+pub extern fn test_TuBigU(_: TuBigU) -> TuBigU { loop {} }
+
+// CHECK: define void @test_TeBigU(%"TeBigU::Variant"* [[BIGU_RET_ATTRS]], [16 x i32]
#[no_mangle]
-pub extern fn test_BigUw(_: BigUw) -> BigUw { loop {} }
+pub extern fn test_TeBigU(_: TeBigU) -> TeBigU { loop {} }
// only-mips64
// See repr-transparent.rs
+#![feature(transparent_enums, transparent_unions)]
+
#![crate_type="lib"]
+#[derive(Clone, Copy)]
#[repr(C)]
-pub struct Big([u32; 16]);
+pub struct BigS([u32; 16]);
+
+#[repr(transparent)]
+pub struct TsBigS(BigS);
+
+#[repr(transparent)]
+pub union TuBigS {
+ field: BigS,
+}
#[repr(transparent)]
-pub struct BigW(Big);
+pub enum TeBigS {
+ Variant(BigS),
+}
+
+// CHECK: define void @test_BigS(%BigS* [[BIGS_RET_ATTRS:.*]], [8 x i64]
+#[no_mangle]
+pub extern fn test_BigS(_: BigS) -> BigS { loop {} }
+
+// CHECK: define void @test_TsBigS(%TsBigS* [[BIGS_RET_ATTRS]], [8 x i64]
+#[no_mangle]
+pub extern fn test_TsBigS(_: TsBigS) -> TsBigS { loop {} }
-// CHECK: define void @test_Big(%Big* [[BIG_RET_ATTRS:.*]], [8 x i64]
+// CHECK: define void @test_TuBigS(%TuBigS* [[BIGS_RET_ATTRS]], [8 x i64]
#[no_mangle]
-pub extern fn test_Big(_: Big) -> Big { loop {} }
+pub extern fn test_TuBigS(_: TuBigS) -> TuBigS { loop {} }
-// CHECK: define void @test_BigW(%BigW* [[BIG_RET_ATTRS]], [8 x i64]
+// CHECK: define void @test_TeBigS(%"TeBigS::Variant"* [[BIGS_RET_ATTRS]], [8 x i64]
#[no_mangle]
-pub extern fn test_BigW(_: BigW) -> BigW { loop {} }
+pub extern fn test_TeBigS(_: TeBigS) -> TeBigS { loop {} }
+#[derive(Clone, Copy)]
#[repr(C)]
pub union BigU {
foo: [u32; 16],
}
#[repr(transparent)]
-pub struct BigUw(BigU);
+pub struct TsBigU(BigU);
+
+#[repr(transparent)]
+pub union TuBigU {
+ field: BigU,
+}
+
+#[repr(transparent)]
+pub enum TeBigU {
+ Variant(BigU),
+}
// CHECK: define void @test_BigU(%BigU* [[BIGU_RET_ATTRS:.*]], [8 x i64]
#[no_mangle]
pub extern fn test_BigU(_: BigU) -> BigU { loop {} }
-// CHECK: define void @test_BigUw(%BigUw* [[BIGU_RET_ATTRS]], [8 x i64]
+// CHECK: define void @test_TsBigU(%TsBigU* [[BIGU_RET_ATTRS:.*]], [8 x i64]
+#[no_mangle]
+pub extern fn test_TsBigU(_: TsBigU) -> TsBigU { loop {} }
+
+// CHECK: define void @test_TuBigU(%TuBigU* [[BIGU_RET_ATTRS]], [8 x i64]
+#[no_mangle]
+pub extern fn test_TuBigU(_: TuBigU) -> TuBigU { loop {} }
+
+// CHECK: define void @test_TeBigU(%"TeBigU::Variant"* [[BIGU_RET_ATTRS]], [8 x i64]
#[no_mangle]
-pub extern fn test_BigUw(_: BigUw) -> BigUw { loop {} }
+pub extern fn test_TeBigU(_: TeBigU) -> TeBigU { loop {} }
// compile-flags: -C no-prepopulate-passes
#![crate_type="lib"]
-#![feature(repr_simd)]
+#![feature(repr_simd, transparent_enums, transparent_unions)]
use std::marker::PhantomData;
+#[derive(Copy, Clone)]
pub struct Zst1;
+#[derive(Copy, Clone)]
pub struct Zst2(());
+#[derive(Copy, Clone)]
#[repr(transparent)]
pub struct F32(f32);
#[no_mangle]
pub extern fn test_Projection(_: StructWithProjection) -> StructWithProjection { loop {} }
+#[repr(transparent)]
+pub enum EnumF32 {
+ Variant(F32)
+}
+
+// CHECK: define float @test_EnumF32(float %arg0)
+#[no_mangle]
+pub extern fn test_EnumF32(_: EnumF32) -> EnumF32 { loop {} }
+
+#[repr(transparent)]
+pub enum EnumF32WithZsts {
+ Variant(Zst1, F32, Zst2)
+}
+
+// CHECK: define float @test_EnumF32WithZsts(float %arg0)
+#[no_mangle]
+pub extern fn test_EnumF32WithZsts(_: EnumF32WithZsts) -> EnumF32WithZsts { loop {} }
+
+#[repr(transparent)]
+pub union UnionF32 {
+ field: F32,
+}
+
+// CHECK: define float @test_UnionF32(float %arg0)
+#[no_mangle]
+pub extern fn test_UnionF32(_: UnionF32) -> UnionF32 { loop {} }
+
+#[repr(transparent)]
+pub union UnionF32WithZsts {
+ zst1: Zst1,
+ field: F32,
+ zst2: Zst2,
+}
+
+// CHECK: define float @test_UnionF32WithZsts(float %arg0)
+#[no_mangle]
+pub extern fn test_UnionF32WithZsts(_: UnionF32WithZsts) -> UnionF32WithZsts { loop {} }
+
// All that remains to be tested are aggregates. They are tested in separate files called repr-
// transparent-*.rs with `only-*` or `ignore-*` directives, because the expected LLVM IR
+++ /dev/null
-#![crate_type = "lib"]
-#![feature(core_intrinsics)]
-
-use std::intrinsics::*;
-
-// CHECK-LABEL: @unchecked_add_signed
-#[no_mangle]
-pub unsafe fn unchecked_add_signed(a: i32, b: i32) -> i32 {
- // CHECK: add nsw
- unchecked_add(a, b)
-}
-
-// CHECK-LABEL: @unchecked_add_unsigned
-#[no_mangle]
-pub unsafe fn unchecked_add_unsigned(a: u32, b: u32) -> u32 {
- // CHECK: add nuw
- unchecked_add(a, b)
-}
-
-// CHECK-LABEL: @unchecked_sub_signed
-#[no_mangle]
-pub unsafe fn unchecked_sub_signed(a: i32, b: i32) -> i32 {
- // CHECK: sub nsw
- unchecked_sub(a, b)
-}
-
-// CHECK-LABEL: @unchecked_sub_unsigned
-#[no_mangle]
-pub unsafe fn unchecked_sub_unsigned(a: u32, b: u32) -> u32 {
- // CHECK: sub nuw
- unchecked_sub(a, b)
-}
-
-// CHECK-LABEL: @unchecked_mul_signed
-#[no_mangle]
-pub unsafe fn unchecked_mul_signed(a: i32, b: i32) -> i32 {
- // CHECK: mul nsw
- unchecked_mul(a, b)
-}
-
-// CHECK-LABEL: @unchecked_mul_unsigned
-#[no_mangle]
-pub unsafe fn unchecked_mul_unsigned(a: u32, b: u32) -> u32 {
- // CHECK: mul nuw
- unchecked_mul(a, b)
-}
--- /dev/null
+fn main() {
+ *(&4);
+}
+
+// END RUST SOURCE
+// START rustc.main.ConstProp.before.mir
+// bb0: {
+// ...
+// _2 = &(promoted[0]: i32);
+// _1 = (*_2);
+// ...
+//}
+// END rustc.main.ConstProp.before.mir
+// START rustc.main.ConstProp.after.mir
+// bb0: {
+// ...
+// _2 = const Scalar(AllocId(0).0x0) : &i32;
+// _1 = const 4i32;
+// ...
+// }
+// END rustc.main.ConstProp.after.mir
--- /dev/null
+fn main() {
+ let _ = main as usize as *const fn();
+}
+
+// END RUST SOURCE
+// START rustc.main.ConstProp.before.mir
+// bb0: {
+// ...
+// _3 = const main as fn() (Pointer(ReifyFnPointer));
+// _2 = move _3 as usize (Misc);
+// ...
+// _1 = move _2 as *const fn() (Misc);
+// ...
+// }
+// END rustc.main.ConstProp.before.mir
+// START rustc.main.ConstProp.after.mir
+// bb0: {
+// ...
+// _3 = const Scalar(AllocId(1).0x0) : fn();
+// _2 = move _3 as usize (Misc);
+// ...
+// _1 = const Scalar(AllocId(1).0x0) : *const fn();
+// ...
+// }
+// END rustc.main.ConstProp.after.mir
-fn test() -> &'static [u32] {
- &[1, 2]
-}
-
fn main() {
- let x = test()[0];
+ (&[1u32, 2, 3] as &[u32])[1];
}
// END RUST SOURCE
// START rustc.main.ConstProp.before.mir
-// bb1: {
+// bb0: {
// ...
-// _3 = const 0usize;
-// _4 = Len((*_2));
-// _5 = Lt(_3, _4);
-// assert(move _5, "index out of bounds: the len is move _4 but the index is _3") -> bb2;
+// _4 = &(promoted[0]: [u32; 3]);
+// _3 = _4;
+// _2 = move _3 as &[u32] (Pointer(Unsize));
+// ...
+// _6 = const 1usize;
+// _7 = Len((*_2));
+// _8 = Lt(_6, _7);
+// assert(move _8, "index out of bounds: the len is move _7 but the index is _6") -> bb1;
// }
-// bb2: {
-// _1 = (*_2)[_3];
+// bb1: {
+// _1 = (*_2)[_6];
// ...
// return;
// }
// START rustc.main.ConstProp.after.mir
// bb0: {
// ...
-// _3 = const 0usize;
-// _4 = Len((*_2));
-// _5 = Lt(_3, _4);
-// assert(move _5, "index out of bounds: the len is move _4 but the index is _3") -> bb2;
+// _4 = const Scalar(AllocId(0).0x0) : &[u32; 3];
+// _3 = const Scalar(AllocId(0).0x0) : &[u32; 3];
+// _2 = move _3 as &[u32] (Pointer(Unsize));
+// ...
+// _6 = const 1usize;
+// _7 = const 3usize;
+// _8 = const true;
+// assert(const true, "index out of bounds: the len is move _7 but the index is _6") -> bb1;
// }
-// bb2: {
-// _1 = (*_2)[_3];
+// bb1: {
+// _1 = (*_2)[_6];
// ...
// return;
// }
// let mut _0: Test;
//
// bb0: {
-// _0 = Test::X(move _1,);
+// ((_0 as X).0: usize) = move _1;
+// discriminant(_0) = 0;
// return;
// }
// }
-// pp-exact
// Testing that both the inner item and next outer item are
// preserved, and that the first outer item parsed in main is not
// accidentally carried over to each inner function
-#![feature(custom_attribute)]
+// pp-exact
+
+#![feature(rustc_attrs)]
fn main() {
- #![inner_attr]
- #[outer_attr]
+ #![rustc_dummy]
+ #[rustc_dummy]
fn f() { }
- #[outer_attr]
+ #[rustc_dummy]
fn g() { }
}
-// pp-exact
// Tests literals in attributes.
-#![feature(custom_attribute)]
+// pp-exact
+
+#![feature(rustc_attrs)]
fn main() {
- #![hello("hi", 1, 2, 1.012, pi = 3.14, bye, name("John"))]
- #[align = 8]
+ #![rustc_dummy("hi", 1, 2, 1.012, pi = 3.14, bye, name("John"))]
+ #[rustc_dummy = 8]
fn f() { }
- #[vector(1, 2, 3)]
+ #[rustc_dummy(1, 2, 3)]
fn g() { }
}
// pp-exact
-#![feature(custom_attribute)]
#![feature(box_syntax)]
+#![feature(rustc_attrs)]
#![feature(stmt_expr_attributes)]
fn main() { }
fn _0() {
- #[attr]
+ #[rustc_dummy]
foo();
}
fn _1() {
- #[attr]
+ #[rustc_dummy]
unsafe {
// code
}
fn _2() {
- #[attr]
+ #[rustc_dummy]
{ foo(); }
{
- #![attr]
+ #![rustc_dummy]
foo()
}
fn _3() {
- #[attr]
+ #[rustc_dummy]
match () { _ => { } }
}
fn _4() {
- #[attr]
+ #[rustc_dummy]
match () {
- #![attr]
+ #![rustc_dummy]
_ => (),
}
let _ =
- #[attr] match () {
- #![attr]
- () => (),
- };
+ #[rustc_dummy] match () {
+ #![rustc_dummy]
+ () => (),
+ };
}
fn _5() {
- #[attr]
+ #[rustc_dummy]
let x = 1;
- let x = #[attr] 1;
+ let x = #[rustc_dummy] 1;
let y = ();
let z = ();
- foo3(x, #[attr] y, z);
+ foo3(x, #[rustc_dummy] y, z);
- qux(3 + #[attr] 2);
+ qux(3 + #[rustc_dummy] 2);
}
fn _6() {
- #[attr]
- [#![attr] 1, 2, 3];
+ #[rustc_dummy]
+ [#![rustc_dummy] 1, 2, 3];
- let _ = #[attr] [#![attr] 1, 2, 3];
+ let _ = #[rustc_dummy] [#![rustc_dummy] 1, 2, 3];
- #[attr]
- [#![attr] 1; 4];
+ #[rustc_dummy]
+ [#![rustc_dummy] 1; 4];
- let _ = #[attr] [#![attr] 1; 4];
+ let _ = #[rustc_dummy] [#![rustc_dummy] 1; 4];
}
struct Foo {
fn _7() {
- #[attr]
- Foo{#![attr] data: (),};
+ #[rustc_dummy]
+ Foo{#![rustc_dummy] data: (),};
- let _ = #[attr] Foo{#![attr] data: (),};
+ let _ = #[rustc_dummy] Foo{#![rustc_dummy] data: (),};
}
fn _8() {
- #[attr]
- (#![attr] );
+ #[rustc_dummy]
+ (#![rustc_dummy] );
- #[attr]
- (#![attr] 0);
+ #[rustc_dummy]
+ (#![rustc_dummy] 0);
- #[attr]
- (#![attr] 0,);
+ #[rustc_dummy]
+ (#![rustc_dummy] 0,);
- #[attr]
- (#![attr] 0, 1);
+ #[rustc_dummy]
+ (#![rustc_dummy] 0, 1);
}
fn _9() {
macro_rules! stmt_mac(( ) => { let _ = ( ) ; });
- #[attr]
+ #[rustc_dummy]
stmt_mac!();
- /*
- // pre existing pp bug: delimiter styles gets lost:
-
- #[attr]
+ #[rustc_dummy]
stmt_mac!{ };
- #[attr]
+ #[rustc_dummy]
stmt_mac![];
- #[attr]
- stmt_mac!{ } // pre-existing pp bug: compiler ICEs with a None unwrap
- */
+ #[rustc_dummy]
+ stmt_mac!{ }
let _ = ();
}
macro_rules! expr_mac(( ) => { ( ) });
fn _10() {
-
- let _ = #[attr] expr_mac!();
-
- /*
- // pre existing pp bug: delimiter styles gets lost:
- let _ = #[attr] expr_mac![];
- let _ = #[attr] expr_mac!{};
- */
+ let _ = #[rustc_dummy] expr_mac!();
+ let _ = #[rustc_dummy] expr_mac![];
+ let _ = #[rustc_dummy] expr_mac!{ };
}
fn _11() {
- let _ = #[attr] box 0;
- let _: [(); 0] = #[attr] [#![attr] ];
- let _ = #[attr] [#![attr] 0, 0];
- let _ = #[attr] [#![attr] 0; 0];
- let _ = #[attr] foo();
- let _ = #[attr] 1i32.clone();
- let _ = #[attr] (#![attr] );
- let _ = #[attr] (#![attr] 0);
- let _ = #[attr] (#![attr] 0,);
- let _ = #[attr] (#![attr] 0, 0);
- let _ = #[attr] 0 + #[attr] 0;
- let _ = #[attr] !0;
- let _ = #[attr] -0i32;
- let _ = #[attr] false;
- let _ = #[attr] 'c';
- let _ = #[attr] 0;
- let _ = #[attr] 0 as usize;
+ let _ = #[rustc_dummy] box 0;
+ let _: [(); 0] = #[rustc_dummy] [#![rustc_dummy] ];
+ let _ = #[rustc_dummy] [#![rustc_dummy] 0, 0];
+ let _ = #[rustc_dummy] [#![rustc_dummy] 0; 0];
+ let _ = #[rustc_dummy] foo();
+ let _ = #[rustc_dummy] 1i32.clone();
+ let _ = #[rustc_dummy] (#![rustc_dummy] );
+ let _ = #[rustc_dummy] (#![rustc_dummy] 0);
+ let _ = #[rustc_dummy] (#![rustc_dummy] 0,);
+ let _ = #[rustc_dummy] (#![rustc_dummy] 0, 0);
+ let _ = #[rustc_dummy] 0 + #[rustc_dummy] 0;
+ let _ = #[rustc_dummy] !0;
+ let _ = #[rustc_dummy] -0i32;
+ let _ = #[rustc_dummy] false;
+ let _ = #[rustc_dummy] 'c';
+ let _ = #[rustc_dummy] 0;
+ let _ = #[rustc_dummy] 0 as usize;
let _ =
- #[attr] while false {
- #![attr]
- };
+ #[rustc_dummy] while false {
+ #![rustc_dummy]
+ };
let _ =
- #[attr] while let None = Some(()) {
- #![attr]
- };
+ #[rustc_dummy] while let None = Some(()) {
+ #![rustc_dummy]
+ };
let _ =
- #[attr] for _ in 0..0 {
- #![attr]
- };
+ #[rustc_dummy] for _ in 0..0 {
+ #![rustc_dummy]
+ };
// FIXME: pp bug, two spaces after the loop
let _ =
- #[attr] loop {
- #![attr]
- };
+ #[rustc_dummy] loop {
+ #![rustc_dummy]
+ };
let _ =
- #[attr] match false {
- #![attr]
- _ => (),
- };
- let _ = #[attr] || #[attr] ();
- let _ = #[attr] move || #[attr] ();
+ #[rustc_dummy] match false {
+ #![rustc_dummy]
+ _ => (),
+ };
+ let _ = #[rustc_dummy] || #[rustc_dummy] ();
+ let _ = #[rustc_dummy] move || #[rustc_dummy] ();
let _ =
- #[attr] ||
- {
- #![attr]
- #[attr]
- ()
- };
+ #[rustc_dummy] ||
+ {
+ #![rustc_dummy]
+ #[rustc_dummy]
+ ()
+ };
let _ =
- #[attr] move ||
- {
- #![attr]
- #[attr]
- ()
- };
+ #[rustc_dummy] move ||
+ {
+ #![rustc_dummy]
+ #[rustc_dummy]
+ ()
+ };
let _ =
- #[attr] {
- #![attr]
- };
+ #[rustc_dummy] {
+ #![rustc_dummy]
+ };
let _ =
- #[attr] {
- #![attr]
- let _ = ();
- };
+ #[rustc_dummy] {
+ #![rustc_dummy]
+ let _ = ();
+ };
let _ =
- #[attr] {
- #![attr]
- let _ = ();
- ()
- };
+ #[rustc_dummy] {
+ #![rustc_dummy]
+ let _ = ();
+ ()
+ };
let mut x = 0;
- let _ = #[attr] x = 15;
- let _ = #[attr] x += 15;
+ let _ = #[rustc_dummy] x = 15;
+ let _ = #[rustc_dummy] x += 15;
let s = Foo{data: (),};
- let _ = #[attr] s.data;
- let _ = (#[attr] s).data;
+ let _ = #[rustc_dummy] s.data;
+ let _ = (#[rustc_dummy] s).data;
let t = Bar(());
- let _ = #[attr] t.0;
- let _ = (#[attr] t).0;
+ let _ = #[rustc_dummy] t.0;
+ let _ = (#[rustc_dummy] t).0;
let v = vec!(0);
- let _ = #[attr] v[0];
- let _ = (#[attr] v)[0];
- let _ = #[attr] 0..#[attr] 0;
- let _ = #[attr] 0..;
- let _ = #[attr] (0..0);
- let _ = #[attr] (0..);
- let _ = #[attr] (..0);
- let _ = #[attr] (..);
- let _: fn(&u32) -> u32 = #[attr] std::clone::Clone::clone;
- let _ = #[attr] &0;
- let _ = #[attr] &mut 0;
- let _ = #[attr] &#[attr] 0;
- let _ = #[attr] &mut #[attr] 0;
+ let _ = #[rustc_dummy] v[0];
+ let _ = (#[rustc_dummy] v)[0];
+ let _ = #[rustc_dummy] 0..#[rustc_dummy] 0;
+ let _ = #[rustc_dummy] 0..;
+ let _ = #[rustc_dummy] (0..0);
+ let _ = #[rustc_dummy] (0..);
+ let _ = #[rustc_dummy] (..0);
+ let _ = #[rustc_dummy] (..);
+ let _: fn(&u32) -> u32 = #[rustc_dummy] std::clone::Clone::clone;
+ let _ = #[rustc_dummy] &0;
+ let _ = #[rustc_dummy] &mut 0;
+ let _ = #[rustc_dummy] &#[rustc_dummy] 0;
+ let _ = #[rustc_dummy] &mut #[rustc_dummy] 0;
// FIXME: pp bug, extra space after keyword?
- while false { let _ = #[attr] continue ; }
- while true { let _ = #[attr] break ; }
- || #[attr] return;
- let _ = #[attr] expr_mac!();
- /* FIXME: pp bug, losing delimiter styles
- let _ = #[attr] expr_mac![];
- let _ = #[attr] expr_mac!{};
- */
- let _ = #[attr] Foo{#![attr] data: (),};
- let _ = #[attr] Foo{#![attr] ..s};
- let _ = #[attr] Foo{#![attr] data: (), ..s};
- let _ = #[attr] (#![attr] 0);
+ while false { let _ = #[rustc_dummy] continue ; }
+ while true { let _ = #[rustc_dummy] break ; }
+ || #[rustc_dummy] return;
+ let _ = #[rustc_dummy] expr_mac!();
+ let _ = #[rustc_dummy] expr_mac![];
+ let _ = #[rustc_dummy] expr_mac!{ };
+ let _ = #[rustc_dummy] Foo{#![rustc_dummy] data: (),};
+ let _ = #[rustc_dummy] Foo{#![rustc_dummy] ..s};
+ let _ = #[rustc_dummy] Foo{#![rustc_dummy] data: (), ..s};
+ let _ = #[rustc_dummy] (#![rustc_dummy] 0);
}
fn _12() {
- #[attr]
+ #[rustc_dummy]
let _ = 0;
- #[attr]
+ #[rustc_dummy]
0;
- #[attr]
+ #[rustc_dummy]
expr_mac!();
- #[attr]
+ #[rustc_dummy]
{
- #![attr]
+ #![rustc_dummy]
}
}
+++ /dev/null
-// force-host
-
-#![feature(plugin_registrar, rustc_private)]
-
-extern crate syntax;
-extern crate syntax_ext;
-extern crate rustc_plugin;
-
-use syntax_ext::deriving;
-use deriving::generic::*;
-use deriving::generic::ty::*;
-
-use rustc_plugin::Registry;
-use syntax::ast::*;
-use syntax::source_map::Span;
-use syntax::ext::base::*;
-use syntax::ext::build::AstBuilder;
-use syntax::symbol::Symbol;
-use syntax::ptr::P;
-
-#[plugin_registrar]
-pub fn plugin_registrar(reg: &mut Registry) {
- reg.register_syntax_extension(Symbol::intern("derive_CustomPartialEq"),
- MultiDecorator(Box::new(expand_deriving_partial_eq)));
-}
-
-fn expand_deriving_partial_eq(cx: &mut ExtCtxt, span: Span, mitem: &MetaItem, item: &Annotatable,
- push: &mut FnMut(Annotatable)) {
- // structures are equal if all fields are equal, and non equal, if
- // any fields are not equal or if the enum variants are different
- fn cs_eq(cx: &mut ExtCtxt, span: Span, substr: &Substructure) -> P<Expr> {
- cs_fold(true,
- |cx, span, subexpr, self_f, other_fs| {
- let other_f = (other_fs.len(), other_fs.get(0)).1.unwrap();
- let eq = cx.expr_binary(span, BinOpKind::Eq, self_f, other_f.clone());
- cx.expr_binary(span, BinOpKind::And, subexpr, eq)
- },
- cx.expr_bool(span, true),
- Box::new(|cx, span, _, _| cx.expr_bool(span, false)),
- cx,
- span,
- substr)
- }
-
- let inline = cx.meta_word(span, Symbol::intern("inline"));
- let attrs = vec![cx.attribute(span, inline)];
- let methods = vec![MethodDef {
- name: "eq",
- generics: LifetimeBounds::empty(),
- explicit_self: borrowed_explicit_self(),
- args: vec![(borrowed_self(), "other")],
- ret_ty: Literal(deriving::generic::ty::Path::new_local("bool")),
- attributes: attrs,
- is_unsafe: false,
- unify_fieldless_variants: true,
- combine_substructure: combine_substructure(Box::new(cs_eq)),
- }];
-
- let trait_def = TraitDef {
- span: span,
- attributes: Vec::new(),
- path: deriving::generic::ty::Path::new(vec!["cmp", "PartialEq"]),
- additional_bounds: Vec::new(),
- generics: LifetimeBounds::empty(),
- is_unsafe: false,
- supports_unions: false,
- methods: methods,
- associated_types: Vec::new(),
- };
- trait_def.expand(cx, mitem, item, push)
-}
+++ /dev/null
-// force-host
-
-#![feature(plugin_registrar)]
-#![feature(box_syntax)]
-#![feature(rustc_private)]
-
-extern crate syntax;
-extern crate syntax_ext;
-extern crate syntax_pos;
-extern crate rustc;
-extern crate rustc_plugin;
-
-use syntax::ast;
-use syntax::attr;
-use syntax::ext::base::{MultiDecorator, ExtCtxt, Annotatable};
-use syntax::ext::build::AstBuilder;
-use syntax::symbol::{Symbol, sym};
-use syntax::ptr::P;
-use syntax_ext::deriving::generic::{TraitDef, MethodDef, combine_substructure};
-use syntax_ext::deriving::generic::{Substructure, Struct, EnumMatching};
-use syntax_ext::deriving::generic::ty::{Literal, LifetimeBounds, Path, borrowed_explicit_self};
-use syntax_pos::Span;
-use rustc_plugin::Registry;
-
-#[plugin_registrar]
-pub fn plugin_registrar(reg: &mut Registry) {
- reg.register_syntax_extension(
- Symbol::intern("rustc_derive_TotalSum"),
- MultiDecorator(box expand));
-}
-
-fn expand(cx: &mut ExtCtxt,
- span: Span,
- mitem: &ast::MetaItem,
- item: &Annotatable,
- push: &mut FnMut(Annotatable)) {
- let trait_def = TraitDef {
- span: span,
- attributes: vec![],
- path: Path::new_local("TotalSum"),
- additional_bounds: vec![],
- generics: LifetimeBounds::empty(),
- associated_types: vec![],
- is_unsafe: false,
- supports_unions: false,
- methods: vec![
- MethodDef {
- name: "total_sum",
- generics: LifetimeBounds::empty(),
- explicit_self: borrowed_explicit_self(),
- args: vec![],
- ret_ty: Literal(Path::new_local("isize")),
- attributes: vec![],
- is_unsafe: false,
- unify_fieldless_variants: true,
- combine_substructure: combine_substructure(Box::new(totalsum_substructure)),
- },
- ],
- };
-
- trait_def.expand(cx, mitem, item, push)
-}
-
-// Mostly copied from syntax::ext::deriving::hash
-/// Defines how the implementation for `trace()` is to be generated
-fn totalsum_substructure(cx: &mut ExtCtxt, trait_span: Span,
- substr: &Substructure) -> P<ast::Expr> {
- let fields = match *substr.fields {
- Struct(_, ref fs) | EnumMatching(.., ref fs) => fs,
- _ => cx.span_bug(trait_span, "impossible substructure")
- };
-
- fields.iter().fold(cx.expr_isize(trait_span, 0), |acc, ref item| {
- if attr::contains_name(&item.attrs, sym::ignore) {
- acc
- } else {
- cx.expr_binary(item.span, ast::BinOpKind::Add, acc,
- cx.expr_method_call(item.span,
- item.self_.clone(),
- substr.method_ident,
- Vec::new()))
- }
- })
-}
+++ /dev/null
-// force-host
-
-#![feature(plugin_registrar)]
-#![feature(box_syntax)]
-#![feature(rustc_private)]
-
-extern crate syntax;
-extern crate syntax_ext;
-extern crate syntax_pos;
-extern crate rustc;
-extern crate rustc_plugin;
-
-use syntax::ast;
-use syntax::ext::base::{MultiDecorator, ExtCtxt, Annotatable};
-use syntax::ext::build::AstBuilder;
-use syntax::symbol::Symbol;
-use syntax_ext::deriving::generic::{cs_fold, TraitDef, MethodDef, combine_substructure};
-use syntax_ext::deriving::generic::ty::{Literal, LifetimeBounds, Path, borrowed_explicit_self};
-use syntax_pos::Span;
-use rustc_plugin::Registry;
-
-#[plugin_registrar]
-pub fn plugin_registrar(reg: &mut Registry) {
- reg.register_syntax_extension(
- Symbol::intern("derive_TotalSum"),
- MultiDecorator(box expand));
-
- reg.register_syntax_extension(
- Symbol::intern("derive_Nothing"),
- MultiDecorator(box noop));
-}
-
-fn noop(_: &mut ExtCtxt, _: Span, _: &ast::MetaItem, _: &Annotatable, _: &mut FnMut(Annotatable)) {}
-
-fn expand(cx: &mut ExtCtxt,
- span: Span,
- mitem: &ast::MetaItem,
- item: &Annotatable,
- push: &mut FnMut(Annotatable)) {
- let trait_def = TraitDef {
- span: span,
- attributes: vec![],
- path: Path::new_local("TotalSum"),
- additional_bounds: vec![],
- generics: LifetimeBounds::empty(),
- associated_types: vec![],
- is_unsafe: false,
- supports_unions: false,
- methods: vec![
- MethodDef {
- name: "total_sum",
- generics: LifetimeBounds::empty(),
- explicit_self: borrowed_explicit_self(),
- args: vec![],
- ret_ty: Literal(Path::new_local("isize")),
- attributes: vec![],
- is_unsafe: false,
- unify_fieldless_variants: true,
- combine_substructure: combine_substructure(box |cx, span, substr| {
- let zero = cx.expr_isize(span, 0);
- cs_fold(false,
- |cx, span, subexpr, field, _| {
- cx.expr_binary(span, ast::BinOpKind::Add, subexpr,
- cx.expr_method_call(span, field,
- ast::Ident::from_str("total_sum"), vec![]))
- },
- zero,
- box |cx, span, _, _| { cx.span_bug(span, "wtf??"); },
- cx, span, substr)
- }),
- },
- ],
- };
-
- trait_def.expand(cx, mitem, item, push)
-}
use std::borrow::ToOwned;
use syntax::ast;
-use syntax::ext::hygiene;
use syntax::ext::build::AstBuilder;
-use syntax::ext::base::{TTMacroExpander, ExtCtxt, MacResult, MacEager, NormalTT};
+use syntax::ext::base::{SyntaxExtension, TTMacroExpander, ExtCtxt, MacResult, MacEager};
+use syntax::ext::hygiene::Transparency;
use syntax::print::pprust;
-use syntax::ptr::P;
use syntax::symbol::Symbol;
use syntax_pos::Span;
use syntax::tokenstream::TokenStream;
ecx: &'cx mut ExtCtxt,
sp: Span,
_: TokenStream,
- _: Option<Span>) -> Box<MacResult+'cx> {
+ _: Option<Span>) -> Box<dyn MacResult+'cx> {
let args = self.args.iter().map(|i| pprust::meta_list_item_to_string(i))
.collect::<Vec<_>>().join(", ");
MacEager::expr(ecx.expr_str(sp, Symbol::intern(&args)))
pub fn plugin_registrar(reg: &mut Registry) {
let args = reg.args().to_owned();
reg.register_syntax_extension(Symbol::intern("plugin_args"),
- NormalTT {
+ SyntaxExtension::LegacyBang {
expander: Box::new(Expander { args: args, }),
def_info: None,
+ transparency: Transparency::SemiTransparent,
allow_internal_unstable: None,
allow_internal_unsafe: false,
local_inner_macros: false,
+// WARNING WARNING WARNING WARNING WARNING
+// =======================================
+//
+// This code also appears in src/doc/unstable-book/src/language-features/plugin.md.
+// Please keep the two copies in sync! FIXME: have rustdoc read this file
+
// force-host
#![crate_type="dylib"]
extern crate rustc;
extern crate rustc_plugin;
-use syntax::parse::token;
+use syntax::parse::token::{self, Token};
use syntax::tokenstream::TokenTree;
use syntax::ext::base::{ExtCtxt, MacResult, DummyResult, MacEager};
-use syntax::ext::build::AstBuilder; // trait for expr_usize
+use syntax::ext::build::AstBuilder; // A trait for expr_usize.
use syntax_pos::Span;
use rustc_plugin::Registry;
-// WARNING WARNING WARNING WARNING WARNING
-// =======================================
-//
-// This code also appears in src/doc/unstable-book/src/language-features/plugin.md.
-// Please keep the two copies in sync! FIXME: have rustdoc read this file
-
fn expand_rn(cx: &mut ExtCtxt, sp: Span, args: &[TokenTree])
- -> Box<MacResult + 'static> {
+ -> Box<dyn MacResult + 'static> {
static NUMERALS: &'static [(&'static str, usize)] = &[
("M", 1000), ("CM", 900), ("D", 500), ("CD", 400),
}
let text = match args[0] {
- TokenTree::Token(_, token::Ident(s, _)) => s.to_string(),
+ TokenTree::Token(Token { kind: token::Ident(s, _), .. }) => s.to_string(),
_ => {
cx.span_err(sp, "argument should be a single identifier");
return DummyResult::any(sp);
+++ /dev/null
-// aux-build:custom-derive-partial-eq.rs
-// ignore-stage1
-#![feature(plugin)]
-#![plugin(custom_derive_partial_eq)]
-#![allow(unused)]
-
-#[derive_CustomPartialEq] // Check that this is not a stability error.
-enum E { V1, V2 }
-
-fn main() {}
+++ /dev/null
-// aux-build:custom-derive-plugin-attr.rs
-// ignore-stage1
-
-#![feature(plugin, rustc_attrs)]
-#![plugin(custom_derive_plugin_attr)]
-
-trait TotalSum {
- fn total_sum(&self) -> isize;
-}
-
-impl TotalSum for isize {
- fn total_sum(&self) -> isize {
- *self
- }
-}
-
-struct Seven;
-
-impl TotalSum for Seven {
- fn total_sum(&self) -> isize {
- 7
- }
-}
-
-#[rustc_derive_TotalSum]
-struct Foo {
- seven: Seven,
- bar: Bar,
- baz: isize,
- #[ignore]
- nan: NaN,
-}
-
-#[rustc_derive_TotalSum]
-struct Bar {
- quux: isize,
- bleh: isize,
- #[ignore]
- nan: NaN2
-}
-
-struct NaN;
-
-impl TotalSum for NaN {
- fn total_sum(&self) -> isize {
- panic!();
- }
-}
-
-struct NaN2;
-
-pub fn main() {
- let v = Foo {
- seven: Seven,
- bar: Bar {
- quux: 9,
- bleh: 3,
- nan: NaN2
- },
- baz: 80,
- nan: NaN
- };
- assert_eq!(v.total_sum(), 99);
-}
+++ /dev/null
-// aux-build:custom-derive-plugin.rs
-// ignore-stage1
-
-#![feature(plugin)]
-#![plugin(custom_derive_plugin)]
-
-trait TotalSum {
- fn total_sum(&self) -> isize;
-}
-
-impl TotalSum for isize {
- fn total_sum(&self) -> isize {
- *self
- }
-}
-
-struct Seven;
-
-impl TotalSum for Seven {
- fn total_sum(&self) -> isize {
- 7
- }
-}
-
-#[derive_TotalSum]
-struct Foo {
- seven: Seven,
- bar: Bar,
- baz: isize,
-}
-
-#[derive_TotalSum]
-struct Bar {
- quux: isize,
- bleh: isize,
-}
-
-
-pub fn main() {
- let v = Foo {
- seven: Seven,
- bar: Bar {
- quux: 9,
- bleh: 3,
- },
- baz: 80,
- };
- assert_eq!(v.total_sum(), 99);
-}
+++ /dev/null
-#![allow(dead_code)]
-// aux-build:custom-derive-plugin.rs
-// ignore-stage1
-
-#![feature(plugin)]
-#![plugin(custom_derive_plugin)]
-
-#[derive_Nothing]
-#[derive_Nothing]
-#[derive_Nothing]
-struct S;
-
-fn main() {}
--- /dev/null
+// edition:2018
+// aux-build:arc_wake.rs
+
+#![feature(async_await, await_macro)]
+
+extern crate arc_wake;
+
+use std::pin::Pin;
+use std::future::Future;
+use std::sync::{
+ Arc,
+ atomic::{self, AtomicUsize},
+};
+use std::task::{Context, Poll};
+use arc_wake::ArcWake;
+
+struct Counter {
+ wakes: AtomicUsize,
+}
+
+impl ArcWake for Counter {
+ fn wake(self: Arc<Self>) {
+ Self::wake_by_ref(&self)
+ }
+ fn wake_by_ref(arc_self: &Arc<Self>) {
+ arc_self.wakes.fetch_add(1, atomic::Ordering::SeqCst);
+ }
+}
+
+struct WakeOnceThenComplete(bool, u8);
+
+impl Future for WakeOnceThenComplete {
+ type Output = u8;
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<u8> {
+ if self.0 {
+ Poll::Ready(self.1)
+ } else {
+ cx.waker().wake_by_ref();
+ self.0 = true;
+ Poll::Pending
+ }
+ }
+}
+
+fn wait(fut: impl Future<Output = u8>) -> u8 {
+ let mut fut = Box::pin(fut);
+ let counter = Arc::new(Counter { wakes: AtomicUsize::new(0) });
+ let waker = ArcWake::into_waker(counter.clone());
+ let mut cx = Context::from_waker(&waker);
+ loop {
+ match fut.as_mut().poll(&mut cx) {
+ Poll::Ready(out) => return out,
+ Poll::Pending => (),
+ }
+ }
+}
+
+fn base() -> WakeOnceThenComplete { WakeOnceThenComplete(false, 1) }
+
+async fn await1_level1() -> u8 {
+ await!(base())
+}
+
+async fn await2_level1() -> u8 {
+ await!(base()) + await!(base())
+}
+
+async fn await3_level1() -> u8 {
+ await!(base()) + await!(base()) + await!(base())
+}
+
+async fn await3_level2() -> u8 {
+ await!(await3_level1()) + await!(await3_level1()) + await!(await3_level1())
+}
+
+async fn await3_level3() -> u8 {
+ await!(await3_level2()) + await!(await3_level2()) + await!(await3_level2())
+}
+
+async fn await3_level4() -> u8 {
+ await!(await3_level3()) + await!(await3_level3()) + await!(await3_level3())
+}
+
+async fn await3_level5() -> u8 {
+ await!(await3_level4()) + await!(await3_level4()) + await!(await3_level4())
+}
+
+fn main() {
+ assert_eq!(2, std::mem::size_of_val(&base()));
+ assert_eq!(8, std::mem::size_of_val(&await1_level1()));
+ assert_eq!(12, std::mem::size_of_val(&await2_level1()));
+ assert_eq!(12, std::mem::size_of_val(&await3_level1()));
+ assert_eq!(20, std::mem::size_of_val(&await3_level2()));
+ assert_eq!(28, std::mem::size_of_val(&await3_level3()));
+ assert_eq!(36, std::mem::size_of_val(&await3_level4()));
+ assert_eq!(44, std::mem::size_of_val(&await3_level5()));
+
+ assert_eq!(1, wait(base()));
+ assert_eq!(1, wait(await1_level1()));
+ assert_eq!(2, wait(await2_level1()));
+ assert_eq!(3, wait(await3_level1()));
+ assert_eq!(9, wait(await3_level2()));
+ assert_eq!(27, wait(await3_level3()));
+ assert_eq!(81, wait(await3_level4()));
+ assert_eq!(243, wait(await3_level5()));
+}
+++ /dev/null
-#![allow(unused_attributes)]
-
-// pretty-expanded FIXME #23616
-
-#![feature(custom_attribute, test)]
-
-#[foo = "bar"]
-extern crate test;
-
-pub fn main() {
-}
+++ /dev/null
-#![allow(unused_attributes)]
-
-// pretty-expanded FIXME #23616
-
-#![feature(custom_attribute, test)]
-
-mod m {
- #[foo = "bar"]
- extern crate test;
-}
-
-pub fn main() {
-}
+++ /dev/null
-#![allow(unused_attributes)]
-#![allow(unknown_lints)]
-
-// pretty-expanded FIXME #23616
-
-#![allow(unused_attribute)]
-#![feature(custom_attribute)]
-
-#[foo(bar)]
-mod foo {
- #![feature(globs)]
-}
-
-pub fn main() {}
+++ /dev/null
-// no-prefer-dynamic
-
-#![feature(allocator, core_intrinsics, panic_unwind)]
-#![allocator]
-#![crate_type = "rlib"]
-#![no_std]
-
-extern crate unwind;
-
-pub static mut HITS: usize = 0;
-
-type size_t = usize;
-
-extern {
- fn malloc(size: usize) -> *mut u8;
- fn free(ptr: *mut u8);
- fn calloc(size: usize, amt: usize) -> *mut u8;
- fn realloc(ptr: *mut u8, size: usize) -> *mut u8;
-}
-
-#[no_mangle]
-pub extern fn __rust_allocate(size: usize, align: usize) -> *mut u8 {
- unsafe {
- HITS += 1;
- malloc(size as size_t) as *mut u8
- }
-}
-
-#[no_mangle]
-pub extern fn __rust_allocate_zeroed(size: usize, _align: usize) -> *mut u8 {
- unsafe { calloc(size as size_t, 1) as *mut u8 }
-}
-
-#[no_mangle]
-pub extern fn __rust_deallocate(ptr: *mut u8, old_size: usize, align: usize) {
- unsafe {
- HITS += 1;
- free(ptr as *mut _)
- }
-}
-
-#[no_mangle]
-pub extern fn __rust_reallocate(ptr: *mut u8, old_size: usize, size: usize,
- align: usize) -> *mut u8 {
- unsafe {
- realloc(ptr as *mut _, size as size_t) as *mut u8
- }
-}
-
-#[no_mangle]
-pub extern fn __rust_reallocate_inplace(ptr: *mut u8, old_size: usize,
- size: usize, align: usize) -> usize {
- unsafe { core::intrinsics::abort() }
-}
-
-#[no_mangle]
-pub extern fn __rust_usable_size(size: usize, align: usize) -> usize {
- unsafe { core::intrinsics::abort() }
-}
// pretty-expanded FIXME #23616
-#![feature(custom_attribute, rustc_private)]
+#![feature(rustc_private)]
extern crate check_static_recursion_foreign_helper;
extern crate libc;
const TO_LE_BYTES: [u8; 4] = 0x12_34_56_78_i32.to_le_bytes();
const TO_NE_BYTES: [u8; 4] = i32::min_value().to_be().to_ne_bytes();
-fn ident<T>(ident: T) -> T {
- ident
-}
-
fn main() {
- assert_eq!(REVERSE, ident(0x1e6a2c48));
- assert_eq!(FROM_BE_BYTES, ident(0x12_34_56_78));
- assert_eq!(FROM_LE_BYTES, ident(0x78_56_34_12));
- assert_eq!(FROM_NE_BYTES, ident(i32::min_value()));
- assert_eq!(TO_BE_BYTES, ident([0x12, 0x34, 0x56, 0x78]));
- assert_eq!(TO_LE_BYTES, ident([0x78, 0x56, 0x34, 0x12]));
- assert_eq!(TO_NE_BYTES, ident([0x80, 0, 0, 0]));
+ assert_eq!(REVERSE, 0x1e6a2c48);
+ assert_eq!(FROM_BE_BYTES, 0x12_34_56_78);
+ assert_eq!(FROM_LE_BYTES, 0x78_56_34_12);
+ assert_eq!(FROM_NE_BYTES, i32::min_value());
+ assert_eq!(TO_BE_BYTES, [0x12, 0x34, 0x56, 0x78]);
+ assert_eq!(TO_LE_BYTES, [0x78, 0x56, 0x34, 0x12]);
+ assert_eq!(TO_NE_BYTES, [0x80, 0, 0, 0]);
}
const NEG_A: (u32, bool) = 0u32.overflowing_neg();
const NEG_B: (u32, bool) = core::u32::MAX.overflowing_neg();
-fn ident<T>(ident: T) -> T {
- ident
-}
-
fn main() {
- assert_eq!(ADD_A, ident((7, false)));
- assert_eq!(ADD_B, ident((0, true)));
+ assert_eq!(ADD_A, (7, false));
+ assert_eq!(ADD_B, (0, true));
- assert_eq!(SUB_A, ident((3, false)));
- assert_eq!(SUB_B, ident((u32::max_value(), true)));
+ assert_eq!(SUB_A, (3, false));
+ assert_eq!(SUB_B, (u32::max_value(), true));
- assert_eq!(MUL_A, ident((10, false)));
- assert_eq!(MUL_B, ident((1410065408, true)));
+ assert_eq!(MUL_A, (10, false));
+ assert_eq!(MUL_B, (1410065408, true));
- assert_eq!(SHL_A, ident((0x10, false)));
- assert_eq!(SHL_B, ident((0x10, true)));
+ assert_eq!(SHL_A, (0x10, false));
+ assert_eq!(SHL_B, (0x10, true));
- assert_eq!(SHR_A, ident((0x1, false)));
- assert_eq!(SHR_B, ident((0x1, true)));
+ assert_eq!(SHR_A, (0x1, false));
+ assert_eq!(SHR_B, (0x1, true));
- assert_eq!(NEG_A, ident((0, false)));
- assert_eq!(NEG_B, ident((1, true)));
+ assert_eq!(NEG_A, (0, false));
+ assert_eq!(NEG_B, (1, true));
}
const MULTIPLE_ROTATE_LEFT: i32 = 0b0010_0001i32.rotate_left(128);
const MULTIPLE_ROTATE_RIGHT: i32 = 0b0010_0001i32.rotate_right(128);
-fn ident<T>(ident: T) -> T {
- ident
-}
-
fn main() {
- assert_eq!(LEFT, ident(0xb301));
- assert_eq!(RIGHT, ident(0x0100_00b3));
+ assert_eq!(LEFT, 0xb301);
+ assert_eq!(RIGHT, 0x0100_00b3);
- assert_eq!(LEFT_OVERFLOW, ident(0));
- assert_eq!(RIGHT_OVERFLOW, ident(0));
- assert_eq!(ONE_LEFT_OVERFLOW, ident(0b0001_0000_0000_0000));
- assert_eq!(ONE_RIGHT_OVERFLOW, ident(0b0001_0000));
+ assert_eq!(LEFT_OVERFLOW, 0);
+ assert_eq!(RIGHT_OVERFLOW, 0);
+ assert_eq!(ONE_LEFT_OVERFLOW, 0b0001_0000_0000_0000);
+ assert_eq!(ONE_RIGHT_OVERFLOW, 0b0001_0000);
- assert_eq!(NON_ZERO_LEFT_OVERFLOW, ident(0b0010_0000_0000_0000));
- assert_eq!(NON_ZERO_RIGHT_OVERFLOW, ident(0b0000_0000_0010_0000));
+ assert_eq!(NON_ZERO_LEFT_OVERFLOW, 0b0010_0000_0000_0000);
+ assert_eq!(NON_ZERO_RIGHT_OVERFLOW, 0b0000_0000_0010_0000);
- assert_eq!(ZERO_ROTATE_LEFT, ident(0b0010_0001));
- assert_eq!(ZERO_ROTATE_RIGHT, ident(0b0111_1001));
+ assert_eq!(ZERO_ROTATE_LEFT, 0b0010_0001);
+ assert_eq!(ZERO_ROTATE_RIGHT, 0b0111_1001);
- assert_eq!(MULTIPLE_ROTATE_LEFT, ident(0b0010_0001));
- assert_eq!(MULTIPLE_ROTATE_RIGHT, ident(0b0010_0001));
+ assert_eq!(MULTIPLE_ROTATE_LEFT, 0b0010_0001);
+ assert_eq!(MULTIPLE_ROTATE_RIGHT, 0b0010_0001);
}
+#![feature(const_int_sign)]
+
const NEGATIVE_A: bool = (-10i32).is_negative();
const NEGATIVE_B: bool = 10i32.is_negative();
-const POSITIVE_A: bool= (-10i32).is_positive();
-const POSITIVE_B: bool= 10i32.is_positive();
+const POSITIVE_A: bool = (-10i32).is_positive();
+const POSITIVE_B: bool = 10i32.is_positive();
+
+const SIGNUM_POS: i32 = 10i32.signum();
+const SIGNUM_NIL: i32 = 0i32.signum();
+const SIGNUM_NEG: i32 = (-42i32).signum();
fn main() {
assert!(NEGATIVE_A);
assert!(!NEGATIVE_B);
assert!(!POSITIVE_A);
assert!(POSITIVE_B);
+
+ assert_eq!(SIGNUM_POS, 1);
+ assert_eq!(SIGNUM_NIL, 0);
+ assert_eq!(SIGNUM_NEG, -1);
}
const NEG_A: u32 = 5u32.wrapping_neg();
const NEG_B: u32 = 1234567890u32.wrapping_neg();
-fn ident<T>(ident: T) -> T {
- ident
-}
-
fn main() {
- assert_eq!(ADD_A, ident(255));
- assert_eq!(ADD_B, ident(199));
+ assert_eq!(ADD_A, 255);
+ assert_eq!(ADD_B, 199);
- assert_eq!(SUB_A, ident(0));
- assert_eq!(SUB_B, ident(101));
+ assert_eq!(SUB_A, 0);
+ assert_eq!(SUB_B, 101);
- assert_eq!(MUL_A, ident(120));
- assert_eq!(MUL_B, ident(44));
+ assert_eq!(MUL_A, 120);
+ assert_eq!(MUL_B, 44);
- assert_eq!(SHL_A, ident(128));
- assert_eq!(SHL_B, ident(1));
+ assert_eq!(SHL_A, 128);
+ assert_eq!(SHL_B, 1);
- assert_eq!(SHR_A, ident(1));
- assert_eq!(SHR_B, ident(128));
+ assert_eq!(SHR_A, 1);
+ assert_eq!(SHR_B, 128);
- assert_eq!(NEG_A, ident(4294967291));
- assert_eq!(NEG_B, ident(3060399406));
+ assert_eq!(NEG_A, 4294967291);
+ assert_eq!(NEG_B, 3060399406);
}
#![feature(test)]
extern crate test;
-use test::black_box as b;
+use test::black_box as b; // prevent promotion of the argument and const-propagation of the result
const BE_U32: u32 = 55u32.to_be();
const LE_U32: u32 = 55u32.to_le();
// run-pass
+#![feature(ptr_internals, test)]
+
+extern crate test;
+use test::black_box as b; // prevent promotion of the argument and const-propagation of the result
+
use std::ptr::NonNull;
const DANGLING: NonNull<u32> = NonNull::dangling();
const CASTED: NonNull<u32> = NonNull::cast(NonNull::<i32>::dangling());
-fn ident<T>(ident: T) -> T {
- ident
-}
-
pub fn main() {
- assert_eq!(DANGLING, ident(NonNull::dangling()));
- assert_eq!(CASTED, ident(NonNull::dangling()));
+ // Be super-extra paranoid and cast the fn items to fn pointers before blackboxing them.
+ assert_eq!(DANGLING, b::<fn() -> _>(NonNull::dangling)());
+ assert_eq!(CASTED, b::<fn() -> _>(NonNull::dangling)());
}
// run-pass
-#![feature(ptr_internals)]
+#![feature(ptr_internals, test)]
+
+extern crate test;
+use test::black_box as b; // prevent promotion of the argument and const-propagation of the result
use std::ptr::Unique;
-const PTR: *mut u32 = Unique::empty().as_ptr();
-fn ident<T>(ident: T) -> T {
- ident
-}
+const PTR: *mut u32 = Unique::empty().as_ptr();
pub fn main() {
- assert_eq!(PTR, ident(Unique::<u32>::empty().as_ptr()));
+ // Be super-extra paranoid and cast the fn items to fn pointers before blackboxing them.
+ assert_eq!(PTR, b::<fn() -> _>(Unique::<u32>::empty)().as_ptr());
}
--- /dev/null
+#![feature(generators)]
+
+fn main() {
+ let a = || {
+ {
+ let w: i32 = 4;
+ yield;
+ println!("{:?}", w);
+ }
+ {
+ let x: i32 = 5;
+ yield;
+ println!("{:?}", x);
+ }
+ {
+ let y: i32 = 6;
+ yield;
+ println!("{:?}", y);
+ }
+ {
+ let z: i32 = 7;
+ yield;
+ println!("{:?}", z);
+ }
+ };
+ assert_eq!(8, std::mem::size_of_val(&a));
+}
+++ /dev/null
-#![allow(non_camel_case_types)]
-#![allow(non_upper_case_globals)]
-#![allow(unused_attributes)]
-#![allow(dead_code)]
-#![allow(unknown_lints)]
-// These are attributes of the implicit crate. Really this just needs to parse
-// for completeness since .rs files linked from .rc files support this
-// notation to specify their module's attributes
-
-#![feature(custom_attribute)]
-#![allow(unused_attribute)]
-#![attr1 = "val"]
-#![attr2 = "val"]
-#![attr3]
-#![attr4(attr5)]
-
-#![crate_id="foobar#0.1"]
-
-// These are attributes of the following mod
-#[attr1 = "val"]
-#[attr2 = "val"]
-mod test_first_item_in_file_mod {}
-
-mod test_single_attr_outer {
- #[attr = "val"]
- pub static x: isize = 10;
-
- #[attr = "val"]
- pub fn f() { }
-
- #[attr = "val"]
- pub mod mod1 {}
-
- pub mod rustrt {
- #[attr = "val"]
- extern {}
- }
-}
-
-mod test_multi_attr_outer {
- #[attr1 = "val"]
- #[attr2 = "val"]
- pub static x: isize = 10;
-
- #[attr1 = "val"]
- #[attr2 = "val"]
- pub fn f() { }
-
- #[attr1 = "val"]
- #[attr2 = "val"]
- pub mod mod1 {}
-
- pub mod rustrt {
- #[attr1 = "val"]
- #[attr2 = "val"]
- extern {}
- }
-
- #[attr1 = "val"]
- #[attr2 = "val"]
- struct t {x: isize}
-}
-
-mod test_stmt_single_attr_outer {
- pub fn f() {
- #[attr = "val"]
- static x: isize = 10;
-
- #[attr = "val"]
- fn f() { }
-
- #[attr = "val"]
- mod mod1 {
- }
-
- mod rustrt {
- #[attr = "val"]
- extern {
- }
- }
- }
-}
-
-mod test_stmt_multi_attr_outer {
- pub fn f() {
-
- #[attr1 = "val"]
- #[attr2 = "val"]
- static x: isize = 10;
-
- #[attr1 = "val"]
- #[attr2 = "val"]
- fn f() { }
-
- #[attr1 = "val"]
- #[attr2 = "val"]
- mod mod1 {
- }
-
- mod rustrt {
- #[attr1 = "val"]
- #[attr2 = "val"]
- extern {
- }
- }
- }
-}
-
-mod test_attr_inner {
- pub mod m {
- // This is an attribute of mod m
- #![attr = "val"]
- }
-}
-
-mod test_attr_inner_then_outer {
- pub mod m {
- // This is an attribute of mod m
- #![attr = "val"]
- // This is an attribute of fn f
- #[attr = "val"]
- fn f() { }
- }
-}
-
-mod test_attr_inner_then_outer_multi {
- pub mod m {
- // This is an attribute of mod m
- #![attr1 = "val"]
- #![attr2 = "val"]
- // This is an attribute of fn f
- #[attr1 = "val"]
- #[attr2 = "val"]
- fn f() { }
- }
-}
-
-mod test_distinguish_syntax_ext {
- pub fn f() {
- format!("test{}", "s");
- #[attr = "val"]
- fn g() { }
- }
-}
-
-mod test_other_forms {
- #[attr]
- #[attr(word)]
- #[attr(attr(word))]
- #[attr(key1 = "val", key2 = "val", attr)]
- pub fn f() { }
-}
-
-mod test_foreign_items {
- pub mod rustrt {
- extern {
- #![attr]
-
- #[attr]
- fn rust_get_test_int() -> u32;
- }
- }
-}
-
-
-// FIXME(#623): - these aren't supported yet
-/*mod test_literals {
- #![str = "s"]
- #![char = 'c']
- #![isize = 100]
- #![usize = 100_usize]
- #![mach_int = 100u32]
- #![float = 1.0]
- #![mach_float = 1.0f32]
- #![nil = ()]
- #![bool = true]
- mod m {}
-}*/
-
-fn test_fn_inner() {
- #![inner_fn_attr]
-}
-
-pub fn main() { }
let s = r"string\r
literal";\r
assert_eq!(s, "string\nliteral");\r
+ let s = br"byte string\r
+literal";\r
+ assert_eq!(s, "byte string\nliteral".as_bytes());\r
\r
// validate that our source file has CRLF endings\r
let source = include_str!("lexer-crlf-line-endings-string-literal-doc-comment.rs");\r
--- /dev/null
+// run-pass
+
+#![allow(unused_mut)]
+
+// Check that when `?` is followed by what looks like a Kleene operator (?, +, and *)
+// then that `?` is not interpreted as a separator. In other words, `$(pat)?+` matches `pat +`
+// or `+` but does not match `pat` or `pat ? pat`.
+
+// edition:2015
+
+macro_rules! foo {
+ // Check for `?`.
+ ($($a:ident)? ? $num:expr) => {
+ foo!($($a)? ; $num);
+ };
+ // Check for `+`.
+ ($($a:ident)? + $num:expr) => {
+ foo!($($a)? ; $num);
+ };
+ // Check for `*`.
+ ($($a:ident)? * $num:expr) => {
+ foo!($($a)? ; $num);
+ };
+ // Check for `;`, not a kleene operator.
+ ($($a:ident)? ; $num:expr) => {
+ let mut x = 0;
+
+ $(
+ x += $a;
+ )?
+
+ assert_eq!(x, $num);
+ };
+}
+
+pub fn main() {
+ let a = 1;
+
+ // Accept 0 repetitions.
+ foo!( ; 0);
+ foo!( + 0);
+ foo!( * 0);
+ foo!( ? 0);
+
+ // Accept 1 repetition.
+ foo!(a ; 1);
+ foo!(a + 1);
+ foo!(a * 1);
+ foo!(a ? 1);
+}
--- /dev/null
+// run-pass
+
+#![allow(unused_mut)]
+
+// Check that when `?` is followed by what looks like a Kleene operator (?, +, and *)
+// then that `?` is not interpreted as a separator. In other words, `$(pat)?+` matches `pat +`
+// or `+` but does not match `pat` or `pat ? pat`.
+
+// edition:2018
+
+macro_rules! foo {
+ // Check for `?`.
+ ($($a:ident)? ? $num:expr) => {
+ foo!($($a)? ; $num);
+ };
+ // Check for `+`.
+ ($($a:ident)? + $num:expr) => {
+ foo!($($a)? ; $num);
+ };
+ // Check for `*`.
+ ($($a:ident)? * $num:expr) => {
+ foo!($($a)? ; $num);
+ };
+ // Check for `;`, not a kleene operator.
+ ($($a:ident)? ; $num:expr) => {
+ let mut x = 0;
+
+ $(
+ x += $a;
+ )?
+
+ assert_eq!(x, $num);
+ };
+}
+
+pub fn main() {
+ let a = 1;
+
+ // Accept 0 repetitions.
+ foo!( ; 0);
+ foo!( + 0);
+ foo!( * 0);
+ foo!( ? 0);
+
+ // Accept 1 repetition.
+ foo!(a ; 1);
+ foo!(a + 1);
+ foo!(a * 1);
+ foo!(a ? 1);
+}
+++ /dev/null
-// run-pass
-#![allow(unused_mut)]
-// The logic for parsing Kleene operators in macros has a special case to disambiguate `?`.
-// Specifically, `$(pat)?` is the ZeroOrOne operator whereas `$(pat)?+` or `$(pat)?*` are the
-// ZeroOrMore and OneOrMore operators using `?` as a separator. These tests are intended to
-// exercise that logic in the macro parser.
-//
-// Moreover, we also throw in some tests for using a separator with `?`, which is meaningless but
-// included for consistency with `+` and `*`.
-//
-// This test focuses on non-error cases and making sure the correct number of repetitions happen.
-
-// edition:2018
-
-macro_rules! foo {
- ($($a:ident)? ; $num:expr) => { {
- let mut x = 0;
-
- $(
- x += $a;
- )?
-
- assert_eq!(x, $num);
- } }
-}
-
-pub fn main() {
- let a = 1;
-
- // accept 0 or 1 repetitions
- foo!( ; 0);
- foo!(a ; 1);
-}
// run-pass
-#![feature(custom_attribute)]
macro_rules! compiles_fine {
(#[$at:meta]) => {
+++ /dev/null
-// run-pass
-#![allow(unused_attributes)]
-#![allow(non_camel_case_types)]
-
-// pp-exact - Make sure we print all the attributes
-// pretty-expanded FIXME #23616
-
-#![feature(custom_attribute)]
-
-#[frobable]
-trait frobable {
- #[frob_attr]
- fn frob(&self);
- #[defrob_attr]
- fn defrob(&self);
-}
-
-#[int_frobable]
-impl frobable for isize {
- #[frob_attr1]
- fn frob(&self) {
- #![frob_attr2]
- }
-
- #[defrob_attr1]
- fn defrob(&self) {
- #![defrob_attr2]
- }
-}
-
-pub fn main() { }
// run-pass
-#![feature(as_cell)]
use std::cell::Cell;
// run-pass
#![allow(dead_code)]
-#![feature(repr_align_enum)]
use std::mem;
+++ /dev/null
-// run-pass
-#![allow(unused_attributes)]
-#![allow(non_camel_case_types)]
-
-// pp-exact - Make sure we actually print the attributes
-#![feature(custom_attribute)]
-
-struct cat {
- name: String,
-}
-
-impl Drop for cat {
- #[cat_dropper]
- fn drop(&mut self) { println!("{} landed on hir feet" , self . name); }
-}
-
-
-#[cat_maker]
-fn cat(name: String) -> cat { cat{name: name,} }
-
-pub fn main() { let _kitty = cat("Spotty".to_string()); }
+++ /dev/null
-// run-pass
-#![allow(unused_attributes)]
-#![allow(non_camel_case_types)]
-
-#![feature(custom_attribute)]
-
-struct cat {
- name: String,
-}
-
-impl Drop for cat {
- #[cat_dropper]
- /**
- Actually, cats don't always land on their feet when you drop them.
- */
- fn drop(&mut self) {
- println!("{} landed on hir feet", self.name);
- }
-}
-
-#[cat_maker]
-/**
-Maybe it should technically be a kitten_maker.
-*/
-fn cat(name: String) -> cat {
- cat {
- name: name
- }
-}
-
-pub fn main() {
- let _kitty = cat("Spotty".to_string());
-}
// run-pass
+#![feature(transparent_unions)]
+
use std::mem::size_of;
use std::num::NonZeroUsize;
use std::ptr::NonNull;
impl<T> Mirror for T { type Image = T; }
struct ParamTypeStruct<T>(T);
struct AssocTypeStruct<T>(<T as Mirror>::Image);
+#[repr(transparent)]
+union MaybeUninitUnion<T: Copy> {
+ _value: T,
+ _uninit: (),
+}
fn main() {
// Functions
// Pointers - Box<T>
assert_eq!(size_of::<Box<isize>>(), size_of::<Option<Box<isize>>>());
- // The optimization can't apply to raw pointers
+ // The optimization can't apply to raw pointers or unions with a ZST field.
assert!(size_of::<Option<*const isize>>() != size_of::<*const isize>());
assert!(Some(0 as *const isize).is_some()); // Can't collapse None to null
+ assert_ne!(size_of::<fn(isize)>(), size_of::<Option<MaybeUninitUnion<fn(isize)>>>());
+ assert_ne!(size_of::<&str>(), size_of::<Option<MaybeUninitUnion<&str>>>());
+ assert_ne!(size_of::<NonNull<isize>>(), size_of::<Option<MaybeUninitUnion<NonNull<isize>>>>());
struct Foo {
_a: Box<isize>
+++ /dev/null
-#![allow(unused_attributes)]
-#![allow(non_camel_case_types)]
-#![allow(dead_code)]
-// pp-exact - Make sure we actually print the attributes
-// pretty-expanded FIXME #23616
-
-#![feature(custom_attribute)]
-
-enum crew_of_enterprise_d {
-
- #[captain]
- jean_luc_picard,
-
- #[oldcommander]
- william_t_riker,
-
- #[chief_medical_officer]
- beverly_crusher,
-
- #[ships_councellor]
- deanna_troi,
-
- #[lieutenant_oldcommander]
- data,
-
- #[chief_of_security]
- worf,
-
- #[chief_engineer]
- geordi_la_forge,
-}
-
-fn boldly_go(_crew_member: crew_of_enterprise_d, _where: String) { }
-
-pub fn main() {
- boldly_go(crew_of_enterprise_d::worf,
- "where no one has gone before".to_string());
-}
+#![feature(generators)]
+
#![allow(non_camel_case_types)]
#![allow(dead_code)]
#![allow(unreachable_code)]
assert_eq!(val, ());
}
+fn i_yield() {
+ static || {
+ yield yield yield yield yield yield yield yield yield;
+ };
+}
+
pub fn main() {
strange();
funny();
special_characters();
punch_card();
r#match();
+ i_yield();
}
--- /dev/null
+#![crate_name = "foo"]
+#![feature(const_generics)]
+
+pub trait Array {
+ type Item;
+}
+
+// @has foo/trait.Array.html
+// @has - '//h3[@class="impl"]' 'impl<T, const N: usize> Array for [T; N]'
+impl <T, const N: usize> Array for [T; N] {
+ type Item = T;
+}
| | |
| | data moved here
| | move occurs because `v` has type `std::vec::Vec<isize>`, which does not implement the `Copy` trait
- | help: consider removing the `*`: `s`
+ | help: consider borrowing here: `&*s`
error: aborting due to previous error
--- /dev/null
+// edition:2018
+#![feature(async_await)]
+
+pub async fn f(x: Option<usize>) {
+ x.take();
+ //~^ ERROR cannot borrow `x` as mutable, as it is not declared as mutable [E0596]
+}
+
+pub async fn g(x: usize) {
+ x += 1;
+ //~^ ERROR cannot assign twice to immutable variable `x` [E0384]
+}
+
+fn main() {}
--- /dev/null
+error[E0596]: cannot borrow `x` as mutable, as it is not declared as mutable
+ --> $DIR/issue-61452.rs:5:5
+ |
+LL | pub async fn f(x: Option<usize>) {
+ | - help: consider changing this to be mutable: `mut x`
+LL | x.take();
+ | ^ cannot borrow as mutable
+
+error[E0384]: cannot assign twice to immutable variable `x`
+ --> $DIR/issue-61452.rs:10:5
+ |
+LL | pub async fn g(x: usize) {
+ | -
+ | |
+ | first assignment to `x`
+ | help: make this binding mutable: `mut x`
+LL | x += 1;
+ | ^^^^^^ cannot assign twice to immutable variable
+
+error: aborting due to 2 previous errors
+
+Some errors have detailed explanations: E0384, E0596.
+For more information about an error, try `rustc --explain E0384`.
-#![feature(custom_attribute)]
-
#[my_attr = !] //~ ERROR unexpected token: `!`
fn main() {}
error: unexpected token: `!`
- --> $DIR/attr-eq-token-tree.rs:3:13
+ --> $DIR/attr-eq-token-tree.rs:1:13
|
LL | #[my_attr = !]
| ^
#![feature(repr_simd)]
-#![feature(repr_align_enum)]
-#[repr(C)] //~ ERROR: attribute should be applied to struct, enum or union
+#[repr(C)] //~ ERROR: attribute should be applied to struct, enum, or union
fn f() {}
#[repr(C)]
-error[E0517]: attribute should be applied to struct, enum or union
- --> $DIR/attr-usage-repr.rs:4:8
+error[E0517]: attribute should be applied to struct, enum, or union
+ --> $DIR/attr-usage-repr.rs:3:8
|
LL | #[repr(C)]
| ^
LL | fn f() {}
- | --------- not a struct, enum or union
+ | --------- not a struct, enum, or union
error[E0517]: attribute should be applied to enum
- --> $DIR/attr-usage-repr.rs:16:8
+ --> $DIR/attr-usage-repr.rs:15:8
|
LL | #[repr(i8)]
| ^^
| ---------------------- not an enum
error[E0517]: attribute should be applied to struct or union
- --> $DIR/attr-usage-repr.rs:25:8
+ --> $DIR/attr-usage-repr.rs:24:8
|
LL | #[repr(packed)]
| ^^^^^^
| --------------------- not a struct or union
error[E0517]: attribute should be applied to struct
- --> $DIR/attr-usage-repr.rs:28:8
+ --> $DIR/attr-usage-repr.rs:27:8
|
LL | #[repr(simd)]
| ^^^^
--- /dev/null
+// compile-pass
+// pretty-expanded FIXME #23616
+
+#![feature(rustc_attrs)]
+#![feature(test)]
+
+#[rustc_dummy = "bar"]
+extern crate test;
+
+fn main() {}
--- /dev/null
+// compile-pass
+// pretty-expanded FIXME #23616
+
+#![feature(rustc_attrs)]
+#![feature(test)]
+
+mod m {
+ #[rustc_dummy = "bar"]
+ extern crate test;
+}
+
+fn main() {}
--- /dev/null
+// compile-pass
+// pretty-expanded FIXME #23616
+
+#![feature(rustc_attrs)]
+
+#[rustc_dummy(bar)]
+mod foo {
+ #![feature(globs)]
+}
+
+fn main() {}
--- /dev/null
+// This test checks variations on `<#[attr] 'a, #[oops]>`, where
+// `#[oops]` is left dangling (that is, it is unattached, with no
+// formal binding following it).
+
+#![feature(rustc_attrs)]
+
+struct RefIntPair<'a, 'b>(&'a u32, &'b u32);
+
+impl<#[rustc_1] 'a, 'b, #[oops]> RefIntPair<'a, 'b> {
+ //~^ ERROR trailing attribute after generic parameter
+}
+
+fn main() {
+
+}
--- /dev/null
+error: trailing attribute after generic parameter
+ --> $DIR/attrs-with-no-formal-in-generics-1.rs:9:25
+ |
+LL | impl<#[rustc_1] 'a, 'b, #[oops]> RefIntPair<'a, 'b> {
+ | ^^^^^^^ attributes must go before parameters
+
+error: aborting due to previous error
+
--- /dev/null
+// This test checks variations on `<#[attr] 'a, #[oops]>`, where
+// `#[oops]` is left dangling (that is, it is unattached, with no
+// formal binding following it).
+
+#![feature(rustc_attrs)]
+
+struct RefAny<'a, T>(&'a T);
+
+impl<#[rustc_1] 'a, #[rustc_2] T, #[oops]> RefAny<'a, T> {}
+//~^ ERROR trailing attribute after generic parameter
+
+fn main() {}
--- /dev/null
+error: trailing attribute after generic parameter
+ --> $DIR/attrs-with-no-formal-in-generics-2.rs:9:35
+ |
+LL | impl<#[rustc_1] 'a, #[rustc_2] T, #[oops]> RefAny<'a, T> {}
+ | ^^^^^^^ attributes must go before parameters
+
+error: aborting due to previous error
+
--- /dev/null
+// This test checks variations on `<#[attr] 'a, #[oops]>`, where
+// `#[oops]` is left dangling (that is, it is unattached, with no
+// formal binding following it).
+
+struct RefIntPair<'a, 'b>(&'a u32, &'b u32);
+
+fn hof_lt<Q>(_: Q)
+ where Q: for <#[allow(unused)] 'a, 'b, #[oops]> Fn(RefIntPair<'a,'b>) -> &'b u32
+ //~^ ERROR trailing attribute after generic parameter
+{}
+
+fn main() {}
--- /dev/null
+error: trailing attribute after generic parameter
+ --> $DIR/attrs-with-no-formal-in-generics-3.rs:8:44
+ |
+LL | where Q: for <#[allow(unused)] 'a, 'b, #[oops]> Fn(RefIntPair<'a,'b>) -> &'b u32
+ | ^^^^^^^ attributes must go before parameters
+
+error: aborting due to previous error
+
--- /dev/null
+// compile-pass
+// pp-exact - Make sure we actually print the attributes
+
+#![feature(rustc_attrs)]
+
+struct Cat {
+ name: String,
+}
+
+impl Drop for Cat {
+ #[rustc_dummy]
+ fn drop(&mut self) { println!("{} landed on hir feet" , self . name); }
+}
+
+
+#[rustc_dummy]
+fn cat(name: String) -> Cat { Cat{name: name,} }
+
+fn main() { let _kitty = cat("Spotty".to_string()); }
--- /dev/null
+// compile-pass
+
+#![feature(rustc_attrs)]
+
+struct Cat {
+ name: String,
+}
+
+impl Drop for Cat {
+ #[rustc_dummy]
+ /**
+ Actually, cats don't always land on their feet when you drop them.
+ */
+ fn drop(&mut self) {
+ println!("{} landed on hir feet", self.name);
+ }
+}
+
+#[rustc_dummy]
+/**
+Maybe it should technically be a kitten_maker.
+*/
+fn cat(name: String) -> Cat {
+ Cat {
+ name: name
+ }
+}
+
+fn main() {
+ let _kitty = cat("Spotty".to_string());
+}
--- /dev/null
+// These are attributes of the implicit crate. Really this just needs to parse
+// for completeness since .rs files linked from .rc files support this
+// notation to specify their module's attributes
+
+// compile-pass
+
+#![feature(rustc_attrs)]
+
+#![rustc_dummy = "val"]
+#![rustc_dummy = "val"]
+#![rustc_dummy]
+#![rustc_dummy(attr5)]
+
+#![crate_id="foobar#0.1"]
+
+// These are attributes of the following mod
+#[rustc_dummy = "val"]
+#[rustc_dummy = "val"]
+mod test_first_item_in_file_mod {}
+
+mod test_single_attr_outer {
+ #[rustc_dummy = "val"]
+ pub static X: isize = 10;
+
+ #[rustc_dummy = "val"]
+ pub fn f() { }
+
+ #[rustc_dummy = "val"]
+ pub mod mod1 {}
+
+ pub mod rustrt {
+ #[rustc_dummy = "val"]
+ extern {}
+ }
+}
+
+mod test_multi_attr_outer {
+ #[rustc_dummy = "val"]
+ #[rustc_dummy = "val"]
+ pub static X: isize = 10;
+
+ #[rustc_dummy = "val"]
+ #[rustc_dummy = "val"]
+ pub fn f() { }
+
+ #[rustc_dummy = "val"]
+ #[rustc_dummy = "val"]
+ pub mod mod1 {}
+
+ pub mod rustrt {
+ #[rustc_dummy = "val"]
+ #[rustc_dummy = "val"]
+ extern {}
+ }
+
+ #[rustc_dummy = "val"]
+ #[rustc_dummy = "val"]
+ struct T {x: isize}
+}
+
+mod test_stmt_single_attr_outer {
+ pub fn f() {
+ #[rustc_dummy = "val"]
+ static X: isize = 10;
+
+ #[rustc_dummy = "val"]
+ fn f() { }
+
+ #[rustc_dummy = "val"]
+ mod mod1 {
+ }
+
+ mod rustrt {
+ #[rustc_dummy = "val"]
+ extern {
+ }
+ }
+ }
+}
+
+mod test_stmt_multi_attr_outer {
+ pub fn f() {
+
+ #[rustc_dummy = "val"]
+ #[rustc_dummy = "val"]
+ static X: isize = 10;
+
+ #[rustc_dummy = "val"]
+ #[rustc_dummy = "val"]
+ fn f() { }
+
+ #[rustc_dummy = "val"]
+ #[rustc_dummy = "val"]
+ mod mod1 {
+ }
+
+ mod rustrt {
+ #[rustc_dummy = "val"]
+ #[rustc_dummy = "val"]
+ extern {
+ }
+ }
+ }
+}
+
+mod test_attr_inner {
+ pub mod m {
+ // This is an attribute of mod m
+ #![rustc_dummy = "val"]
+ }
+}
+
+mod test_attr_inner_then_outer {
+ pub mod m {
+ // This is an attribute of mod m
+ #![rustc_dummy = "val"]
+ // This is an attribute of fn f
+ #[rustc_dummy = "val"]
+ fn f() { }
+ }
+}
+
+mod test_attr_inner_then_outer_multi {
+ pub mod m {
+ // This is an attribute of mod m
+ #![rustc_dummy = "val"]
+ #![rustc_dummy = "val"]
+ // This is an attribute of fn f
+ #[rustc_dummy = "val"]
+ #[rustc_dummy = "val"]
+ fn f() { }
+ }
+}
+
+mod test_distinguish_syntax_ext {
+ pub fn f() {
+ format!("test{}", "s");
+ #[rustc_dummy = "val"]
+ fn g() { }
+ }
+}
+
+mod test_other_forms {
+ #[rustc_dummy]
+ #[rustc_dummy(word)]
+ #[rustc_dummy(attr(word))]
+ #[rustc_dummy(key1 = "val", key2 = "val", attr)]
+ pub fn f() { }
+}
+
+mod test_foreign_items {
+ pub mod rustrt {
+ extern {
+ #![rustc_dummy]
+
+ #[rustc_dummy]
+ fn rust_get_test_int() -> u32;
+ }
+ }
+}
+
+
+// FIXME(#623): - these aren't supported yet
+/*mod test_literals {
+ #![str = "s"]
+ #![char = 'c']
+ #![isize = 100]
+ #![usize = 100_usize]
+ #![mach_int = 100u32]
+ #![float = 1.0]
+ #![mach_float = 1.0f32]
+ #![nil = ()]
+ #![bool = true]
+ mod m {}
+}*/
+
+fn test_fn_inner() {
+ #![rustc_dummy]
+}
+
+fn main() {}
--- /dev/null
+// compile-pass
+// pp-exact - Make sure we print all the attributes
+// pretty-expanded FIXME #23616
+
+#![feature(rustc_attrs)]
+
+#[rustc_dummy]
+trait Frobable {
+ #[rustc_dummy]
+ fn frob(&self);
+ #[rustc_dummy]
+ fn defrob(&self);
+}
+
+#[rustc_dummy]
+impl Frobable for isize {
+ #[rustc_dummy]
+ fn frob(&self) {
+ #![rustc_dummy]
+ }
+
+ #[rustc_dummy]
+ fn defrob(&self) {
+ #![rustc_dummy]
+ }
+}
+
+fn main() {}
--- /dev/null
+// Obsolete attributes fall back to feature gated custom attributes.
+
+#[ab_isize="stdcall"] extern {} //~ ERROR attribute `ab_isize` is currently unknown
+
+#[fixed_stack_segment] fn f() {} //~ ERROR attribute `fixed_stack_segment` is currently unknown
+
+fn main() {}
--- /dev/null
+error[E0658]: The attribute `fixed_stack_segment` is currently unknown to the compiler and may have meaning added to it in the future
+ --> $DIR/obsolete-attr.rs:5:3
+ |
+LL | #[fixed_stack_segment] fn f() {}
+ | ^^^^^^^^^^^^^^^^^^^
+ |
+ = note: for more information, see https://github.com/rust-lang/rust/issues/29642
+ = help: add #![feature(custom_attribute)] to the crate attributes to enable
+
+error[E0658]: The attribute `ab_isize` is currently unknown to the compiler and may have meaning added to it in the future
+ --> $DIR/obsolete-attr.rs:3:3
+ |
+LL | #[ab_isize="stdcall"] extern {}
+ | ^^^^^^^^
+ |
+ = note: for more information, see https://github.com/rust-lang/rust/issues/29642
+ = help: add #![feature(custom_attribute)] to the crate attributes to enable
+
+error: aborting due to 2 previous errors
+
+For more information about this error, try `rustc --explain E0658`.
--- /dev/null
+// Unknown attributes fall back to feature gated custom attributes.
+
+#![feature(custom_inner_attributes)]
+
+#![mutable_doc] //~ ERROR attribute `mutable_doc` is currently unknown
+
+#[dance] mod a {} //~ ERROR attribute `dance` is currently unknown
+
+#[dance] fn main() {} //~ ERROR attribute `dance` is currently unknown
--- /dev/null
+error[E0658]: The attribute `mutable_doc` is currently unknown to the compiler and may have meaning added to it in the future
+ --> $DIR/unknown-attr.rs:5:4
+ |
+LL | #![mutable_doc]
+ | ^^^^^^^^^^^
+ |
+ = note: for more information, see https://github.com/rust-lang/rust/issues/29642
+ = help: add #![feature(custom_attribute)] to the crate attributes to enable
+
+error[E0658]: The attribute `dance` is currently unknown to the compiler and may have meaning added to it in the future
+ --> $DIR/unknown-attr.rs:7:3
+ |
+LL | #[dance] mod a {}
+ | ^^^^^
+ |
+ = note: for more information, see https://github.com/rust-lang/rust/issues/29642
+ = help: add #![feature(custom_attribute)] to the crate attributes to enable
+
+error[E0658]: The attribute `dance` is currently unknown to the compiler and may have meaning added to it in the future
+ --> $DIR/unknown-attr.rs:9:3
+ |
+LL | #[dance] fn main() {}
+ | ^^^^^
+ |
+ = note: for more information, see https://github.com/rust-lang/rust/issues/29642
+ = help: add #![feature(custom_attribute)] to the crate attributes to enable
+
+error: aborting due to 3 previous errors
+
+For more information about this error, try `rustc --explain E0658`.
--- /dev/null
+// compile-pass
+// pp-exact - Make sure we actually print the attributes
+// pretty-expanded FIXME #23616
+
+#![allow(non_camel_case_types)]
+#![feature(rustc_attrs)]
+
+enum crew_of_enterprise_d {
+
+ #[rustc_dummy]
+ jean_luc_picard,
+
+ #[rustc_dummy]
+ william_t_riker,
+
+ #[rustc_dummy]
+ beverly_crusher,
+
+ #[rustc_dummy]
+ deanna_troi,
+
+ #[rustc_dummy]
+ data,
+
+ #[rustc_dummy]
+ worf,
+
+ #[rustc_dummy]
+ geordi_la_forge,
+}
+
+fn boldly_go(_crew_member: crew_of_enterprise_d, _where: String) { }
+
+fn main() {
+ boldly_go(crew_of_enterprise_d::worf,
+ "where no one has gone before".to_string());
+}
+++ /dev/null
-// This test checks variations on `<#[attr] 'a, #[oops]>`, where
-// `#[oops]` is left dangling (that is, it is unattached, with no
-// formal binding following it).
-
-#![feature(rustc_attrs)]
-
-struct RefIntPair<'a, 'b>(&'a u32, &'b u32);
-
-impl<#[rustc_1] 'a, 'b, #[oops]> RefIntPair<'a, 'b> {
- //~^ ERROR trailing attribute after generic parameter
-}
-
-fn main() {
-
-}
+++ /dev/null
-error: trailing attribute after generic parameter
- --> $DIR/attrs-with-no-formal-in-generics-1.rs:9:25
- |
-LL | impl<#[rustc_1] 'a, 'b, #[oops]> RefIntPair<'a, 'b> {
- | ^^^^^^^ attributes must go before parameters
-
-error: aborting due to previous error
-
+++ /dev/null
-// This test checks variations on `<#[attr] 'a, #[oops]>`, where
-// `#[oops]` is left dangling (that is, it is unattached, with no
-// formal binding following it).
-
-#![feature(rustc_attrs)]
-
-struct RefAny<'a, T>(&'a T);
-
-impl<#[rustc_1] 'a, #[rustc_2] T, #[oops]> RefAny<'a, T> {}
-//~^ ERROR trailing attribute after generic parameter
-
-fn main() {}
+++ /dev/null
-error: trailing attribute after generic parameter
- --> $DIR/attrs-with-no-formal-in-generics-2.rs:9:35
- |
-LL | impl<#[rustc_1] 'a, #[rustc_2] T, #[oops]> RefAny<'a, T> {}
- | ^^^^^^^ attributes must go before parameters
-
-error: aborting due to previous error
-
+++ /dev/null
-// This test checks variations on `<#[attr] 'a, #[oops]>`, where
-// `#[oops]` is left dangling (that is, it is unattached, with no
-// formal binding following it).
-
-struct RefIntPair<'a, 'b>(&'a u32, &'b u32);
-
-fn hof_lt<Q>(_: Q)
- where Q: for <#[allow(unused)] 'a, 'b, #[oops]> Fn(RefIntPair<'a,'b>) -> &'b u32
- //~^ ERROR trailing attribute after generic parameter
-{}
-
-fn main() {}
+++ /dev/null
-error: trailing attribute after generic parameter
- --> $DIR/attrs-with-no-formal-in-generics-3.rs:8:44
- |
-LL | where Q: for <#[allow(unused)] 'a, 'b, #[oops]> Fn(RefIntPair<'a,'b>) -> &'b u32
- | ^^^^^^^ attributes must go before parameters
-
-error: aborting due to previous error
-
| ^^
| |
| move occurs because `*y` has type `std::boxed::Box<i32>`, which does not implement the `Copy` trait
- | help: consider removing the `*`: `y`
+ | help: consider borrowing here: `&*y`
error: aborting due to previous error
--> $DIR/borrowck-move-error-with-note.rs:11:11
|
LL | match *f {
- | ^^ help: consider removing the `*`: `f`
+ | ^^ help: consider borrowing here: `&*f`
LL | Foo::Foo1(num1,
| ---- data moved here
LL | num2) => (),
| ^^
| |
| move occurs because `*x` has type `std::boxed::Box<isize>`, which does not implement the `Copy` trait
- | help: consider removing the `*`: `x`
+ | help: consider borrowing here: `&*x`
error: aborting due to previous error
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
| |
| move occurs because value has type `std::string::String`, which does not implement the `Copy` trait
- | help: consider removing the `*`: `Rc::new("hi".to_string())`
+ | help: consider borrowing here: `&*Rc::new("hi".to_string())`
error: aborting due to previous error
| ^^^^^^
| |
| move occurs because `*array` has type `std::vec::Vec<Value>`, which does not implement the `Copy` trait
- | help: consider removing the `*`: `array`
+ | help: consider borrowing here: `&*array`
error: aborting due to previous error
--- /dev/null
+// run-pass
+
+#![feature(const_generics)]
+//~^ WARN the feature `const_generics` is incomplete and may cause the compiler to crash
+
+struct ArrayStruct<T, const N: usize> {
+ data: [T; N],
+}
+
+struct ArrayTuple<T, const N: usize>([T; N]);
+
+fn main() {
+ let _ = ArrayStruct { data: [0u32; 8] };
+ let _ = ArrayTuple([0u32; 8]);
+}
--- /dev/null
+warning: the feature `const_generics` is incomplete and may cause the compiler to crash
+ --> $DIR/array-wrapper-struct-ctor.rs:3:12
+ |
+LL | #![feature(const_generics)]
+ | ^^^^^^^^^^^^^^
+
--- /dev/null
+#![feature(const_generics)]
+//~^ WARN the feature `const_generics` is incomplete and may cause the compiler to crash
+
+#[derive(Debug)]
+struct X<const N: usize> {
+ a: [u32; N], //~ ERROR `[u32; _]` doesn't implement `std::fmt::Debug`
+}
+
+fn main() {}
--- /dev/null
+warning: the feature `const_generics` is incomplete and may cause the compiler to crash
+ --> $DIR/derive-debug-array-wrapper.rs:1:12
+ |
+LL | #![feature(const_generics)]
+ | ^^^^^^^^^^^^^^
+
+error[E0277]: `[u32; _]` doesn't implement `std::fmt::Debug`
+ --> $DIR/derive-debug-array-wrapper.rs:6:5
+ |
+LL | a: [u32; N],
+ | ^^^^^^^^^^^ `[u32; _]` cannot be formatted using `{:?}` because it doesn't implement `std::fmt::Debug`
+ |
+ = help: the trait `std::fmt::Debug` is not implemented for `[u32; _]`
+ = note: required because of the requirements on the impl of `std::fmt::Debug` for `&[u32; _]`
+ = note: required for the cast to the object type `dyn std::fmt::Debug`
+
+error: aborting due to previous error
+
+For more information about this error, try `rustc --explain E0277`.
--- /dev/null
+#![feature(const_generics)]
+//~^ WARN the feature `const_generics` is incomplete and may cause the compiler to crash
+
+fn f<T: Copy, const N: usize>(x: T) -> [T; N] {
+ [x; N]
+ //~^ ERROR array lengths can't depend on generic parameters
+}
+
+fn main() {
+ let x: [u32; 5] = f::<u32, 5>(3);
+ assert_eq!(x, [3u32; 5]);
+}
--- /dev/null
+warning: the feature `const_generics` is incomplete and may cause the compiler to crash
+ --> $DIR/issue-61336-1.rs:1:12
+ |
+LL | #![feature(const_generics)]
+ | ^^^^^^^^^^^^^^
+
+error: array lengths can't depend on generic parameters
+ --> $DIR/issue-61336-1.rs:5:9
+ |
+LL | [x; N]
+ | ^
+
+error: aborting due to previous error
+
--- /dev/null
+#![feature(const_generics)]
+//~^ WARN the feature `const_generics` is incomplete and may cause the compiler to crash
+
+fn f<T: Copy, const N: usize>(x: T) -> [T; N] {
+ [x; N]
+}
+
+fn g<T, const N: usize>(x: T) -> [T; N] {
+ [x; N]
+ //~^ ERROR the trait bound `T: std::marker::Copy` is not satisfied [E0277]
+}
+
+fn main() {
+ let x: [u32; 5] = f::<u32, 5>(3);
+ assert_eq!(x, [3u32; 5]);
+}
--- /dev/null
+warning: the feature `const_generics` is incomplete and may cause the compiler to crash
+ --> $DIR/issue-61336.rs:1:12
+ |
+LL | #![feature(const_generics)]
+ | ^^^^^^^^^^^^^^
+
+error[E0277]: the trait bound `T: std::marker::Copy` is not satisfied
+ --> $DIR/issue-61336.rs:9:5
+ |
+LL | [x; N]
+ | ^^^^^^ the trait `std::marker::Copy` is not implemented for `T`
+ |
+ = help: consider adding a `where T: std::marker::Copy` bound
+ = note: the `Copy` trait is required because the repeated element will be copied
+
+error: aborting due to previous error
+
+For more information about this error, try `rustc --explain E0277`.
LL | println!("{}", 1/(1-1));
| ^^^^^^^
+warning: this expression will panic at runtime
+ --> $DIR/promoted_errors.rs:9:20
+ |
+LL | println!("{}", 1/(1-1));
+ | ^^^^^^^ attempt to divide by zero
+
warning: attempt to divide by zero
--> $DIR/promoted_errors.rs:11:14
|
LL | println!("{}", 1/(false as u32));
| ^^^^^^^^^^^^^^^^
+warning: this expression will panic at runtime
+ --> $DIR/promoted_errors.rs:14:20
+ |
+LL | println!("{}", 1/(false as u32));
+ | ^^^^^^^^^^^^^^^^ attempt to divide by zero
+
warning: attempt to divide by zero
--> $DIR/promoted_errors.rs:16:14
|
--- /dev/null
+// Test that constructors are considered to be const fns with the required feature.
+
+// run-pass
+
+// revisions: min_const_fn const_fn
+
+#![cfg_attr(const_fn, feature(const_fn))]
+
+#![feature(const_constructor)]
+
+// Ctor(..) is transformed to Ctor { 0: ... } in HAIR lowering, so directly
+// calling constructors doesn't require them to be const.
+
+type ExternalType = std::panic::AssertUnwindSafe<(Option<i32>, Result<i32, bool>)>;
+
+const fn call_external_constructors_in_local_vars() -> ExternalType {
+ let f = Some;
+ let g = Err;
+ let h = std::panic::AssertUnwindSafe;
+ let x = f(5);
+ let y = g(false);
+ let z = h((x, y));
+ z
+}
+
+const CALL_EXTERNAL_CONSTRUCTORS_IN_LOCAL_VARS: ExternalType = {
+ let f = Some;
+ let g = Err;
+ let h = std::panic::AssertUnwindSafe;
+ let x = f(5);
+ let y = g(false);
+ let z = h((x, y));
+ z
+};
+
+const fn call_external_constructors_in_temps() -> ExternalType {
+ let x = { Some }(5);
+ let y = (*&Err)(false);
+ let z = [std::panic::AssertUnwindSafe][0]((x, y));
+ z
+}
+
+const CALL_EXTERNAL_CONSTRUCTORS_IN_TEMPS: ExternalType = {
+ let x = { Some }(5);
+ let y = (*&Err)(false);
+ let z = [std::panic::AssertUnwindSafe][0]((x, y));
+ z
+};
+
+#[derive(Debug, PartialEq)]
+enum LocalOption<T> {
+ Some(T),
+ _None,
+}
+
+#[derive(Debug, PartialEq)]
+enum LocalResult<T, E> {
+ _Ok(T),
+ Err(E),
+}
+
+#[derive(Debug, PartialEq)]
+struct LocalAssertUnwindSafe<T>(T);
+
+type LocalType = LocalAssertUnwindSafe<(LocalOption<i32>, LocalResult<i32, bool>)>;
+
+const fn call_local_constructors_in_local_vars() -> LocalType {
+ let f = LocalOption::Some;
+ let g = LocalResult::Err;
+ let h = LocalAssertUnwindSafe;
+ let x = f(5);
+ let y = g(false);
+ let z = h((x, y));
+ z
+}
+
+const CALL_LOCAL_CONSTRUCTORS_IN_LOCAL_VARS: LocalType = {
+ let f = LocalOption::Some;
+ let g = LocalResult::Err;
+ let h = LocalAssertUnwindSafe;
+ let x = f(5);
+ let y = g(false);
+ let z = h((x, y));
+ z
+};
+
+const fn call_local_constructors_in_temps() -> LocalType {
+ let x = { LocalOption::Some }(5);
+ let y = (*&LocalResult::Err)(false);
+ let z = [LocalAssertUnwindSafe][0]((x, y));
+ z
+}
+
+const CALL_LOCAL_CONSTRUCTORS_IN_TEMPS: LocalType = {
+ let x = { LocalOption::Some }(5);
+ let y = (*&LocalResult::Err)(false);
+ let z = [LocalAssertUnwindSafe][0]((x, y));
+ z
+};
+
+fn main() {
+ assert_eq!(
+ (
+ call_external_constructors_in_local_vars().0,
+ call_external_constructors_in_temps().0,
+ call_local_constructors_in_local_vars(),
+ call_local_constructors_in_temps(),
+ ),
+ (
+ CALL_EXTERNAL_CONSTRUCTORS_IN_LOCAL_VARS.0,
+ CALL_EXTERNAL_CONSTRUCTORS_IN_TEMPS.0,
+ CALL_LOCAL_CONSTRUCTORS_IN_LOCAL_VARS,
+ CALL_LOCAL_CONSTRUCTORS_IN_TEMPS,
+ )
+ );
+}
--- /dev/null
+error: `std::prelude::v1::Some` is not yet stable as a const fn
+ --> $DIR/feature-gate-const_constructor.rs:9:37
+ |
+LL | const EXTERNAL_CONST: Option<i32> = {Some}(1);
+ | ^^^^^^^^^
+ |
+ = help: add `#![feature(const_constructor)]` to the crate attributes to enable
+
+error: `E::V` is not yet stable as a const fn
+ --> $DIR/feature-gate-const_constructor.rs:12:24
+ |
+LL | const LOCAL_CONST: E = {E::V}(1);
+ | ^^^^^^^^^
+ |
+ = help: add `#![feature(const_constructor)]` to the crate attributes to enable
+
+error: `std::prelude::v1::Some` is not yet stable as a const fn
+ --> $DIR/feature-gate-const_constructor.rs:17:13
+ |
+LL | let _ = {Some}(1);
+ | ^^^^^^^^^
+ |
+ = help: add `#![feature(const_constructor)]` to the crate attributes to enable
+
+error: `E::V` is not yet stable as a const fn
+ --> $DIR/feature-gate-const_constructor.rs:23:13
+ |
+LL | let _ = {E::V}(1);
+ | ^^^^^^^^^
+ |
+ = help: add `#![feature(const_constructor)]` to the crate attributes to enable
+
+error: aborting due to 4 previous errors
+
--- /dev/null
+error: `std::prelude::v1::Some` is not yet stable as a const fn
+ --> $DIR/feature-gate-const_constructor.rs:9:37
+ |
+LL | const EXTERNAL_CONST: Option<i32> = {Some}(1);
+ | ^^^^^^^^^
+ |
+ = help: add `#![feature(const_constructor)]` to the crate attributes to enable
+
+error: `E::V` is not yet stable as a const fn
+ --> $DIR/feature-gate-const_constructor.rs:12:24
+ |
+LL | const LOCAL_CONST: E = {E::V}(1);
+ | ^^^^^^^^^
+ |
+ = help: add `#![feature(const_constructor)]` to the crate attributes to enable
+
+error: `std::prelude::v1::Some` is not yet stable as a const fn
+ --> $DIR/feature-gate-const_constructor.rs:17:13
+ |
+LL | let _ = {Some}(1);
+ | ^^^^^^^^^
+ |
+ = help: add `#![feature(const_constructor)]` to the crate attributes to enable
+
+error: `E::V` is not yet stable as a const fn
+ --> $DIR/feature-gate-const_constructor.rs:23:13
+ |
+LL | let _ = {E::V}(1);
+ | ^^^^^^^^^
+ |
+ = help: add `#![feature(const_constructor)]` to the crate attributes to enable
+
+error: aborting due to 4 previous errors
+
--- /dev/null
+// revisions: min_const_fn const_fn
+
+#![cfg_attr(const_fn, feature(const_fn))]
+
+enum E {
+ V(i32),
+}
+
+const EXTERNAL_CONST: Option<i32> = {Some}(1);
+//[min_const_fn]~^ ERROR is not yet stable as a const fn
+//[const_fn]~^^ ERROR is not yet stable as a const fn
+const LOCAL_CONST: E = {E::V}(1);
+//[min_const_fn]~^ ERROR is not yet stable as a const fn
+//[const_fn]~^^ ERROR is not yet stable as a const fn
+
+const fn external_fn() {
+ let _ = {Some}(1);
+ //[min_const_fn]~^ ERROR is not yet stable as a const fn
+ //[const_fn]~^^ ERROR is not yet stable as a const fn
+}
+
+const fn local_fn() {
+ let _ = {E::V}(1);
+ //[min_const_fn]~^ ERROR is not yet stable as a const fn
+ //[const_fn]~^^ ERROR is not yet stable as a const fn
+}
+
+fn main() {}
= note: for more information, see issue https://github.com/rust-lang/rust/issues/57563
= help: add #![feature(const_fn)] to the crate attributes to enable
-error[E0723]: `if`, `match`, `&&` and `||` are not stable in const fn
+error[E0723]: loops and conditional expressions are not stable in const fn
--> $DIR/min_const_fn.rs:100:38
|
LL | const fn foo30_4(b: bool) -> usize { if b { 1 } else { 42 } }
= note: for more information, see issue https://github.com/rust-lang/rust/issues/57563
= help: add #![feature(const_fn)] to the crate attributes to enable
-error[E0723]: `if`, `match`, `&&` and `||` are not stable in const fn
+error[E0723]: loops and conditional expressions are not stable in const fn
--> $DIR/min_const_fn.rs:102:29
|
LL | const fn foo30_5(b: bool) { while b { } }
= note: for more information, see issue https://github.com/rust-lang/rust/issues/57563
= help: add #![feature(const_fn)] to the crate attributes to enable
-error[E0723]: `if`, `match`, `&&` and `||` are not stable in const fn
+error[E0723]: loops and conditional expressions are not stable in const fn
--> $DIR/min_const_fn.rs:104:44
|
LL | const fn foo36(a: bool, b: bool) -> bool { a && b }
= note: for more information, see issue https://github.com/rust-lang/rust/issues/57563
= help: add #![feature(const_fn)] to the crate attributes to enable
-error[E0723]: `if`, `match`, `&&` and `||` are not stable in const fn
+error[E0723]: loops and conditional expressions are not stable in const fn
--> $DIR/min_const_fn.rs:106:44
|
LL | const fn foo37(a: bool, b: bool) -> bool { a || b }
const fn foo30_2_with_unsafe(x: *mut u32) -> usize { unsafe { x as usize } }
//~^ ERROR casting pointers to ints is unstable
const fn foo30_4(b: bool) -> usize { if b { 1 } else { 42 } }
-//~^ ERROR `if`, `match`, `&&` and `||` are not stable in const fn
+//~^ ERROR loops and conditional expressions are not stable in const fn
const fn foo30_5(b: bool) { while b { } } //~ ERROR not stable in const fn
const fn foo30_6() -> bool { let x = true; x }
const fn foo36(a: bool, b: bool) -> bool { a && b }
-//~^ ERROR `if`, `match`, `&&` and `||` are not stable in const fn
+//~^ ERROR loops and conditional expressions are not stable in const fn
const fn foo37(a: bool, b: bool) -> bool { a || b }
-//~^ ERROR `if`, `match`, `&&` and `||` are not stable in const fn
+//~^ ERROR loops and conditional expressions are not stable in const fn
const fn inc(x: &mut i32) { *x += 1 }
//~^ ERROR mutable references in const fn are unstable
= note: for more information, see issue https://github.com/rust-lang/rust/issues/57563
= help: add #![feature(const_fn)] to the crate attributes to enable
-error[E0723]: `if`, `match`, `&&` and `||` are not stable in const fn
+error[E0723]: loops and conditional expressions are not stable in const fn
--> $DIR/min_const_fn.rs:100:38
|
LL | const fn foo30_4(b: bool) -> usize { if b { 1 } else { 42 } }
= note: for more information, see issue https://github.com/rust-lang/rust/issues/57563
= help: add #![feature(const_fn)] to the crate attributes to enable
-error[E0723]: `if`, `match`, `&&` and `||` are not stable in const fn
+error[E0723]: loops and conditional expressions are not stable in const fn
--> $DIR/min_const_fn.rs:102:29
|
LL | const fn foo30_5(b: bool) { while b { } }
= note: for more information, see issue https://github.com/rust-lang/rust/issues/57563
= help: add #![feature(const_fn)] to the crate attributes to enable
-error[E0723]: `if`, `match`, `&&` and `||` are not stable in const fn
+error[E0723]: loops and conditional expressions are not stable in const fn
--> $DIR/min_const_fn.rs:104:44
|
LL | const fn foo36(a: bool, b: bool) -> bool { a && b }
= note: for more information, see issue https://github.com/rust-lang/rust/issues/57563
= help: add #![feature(const_fn)] to the crate attributes to enable
-error[E0723]: `if`, `match`, `&&` and `||` are not stable in const fn
+error[E0723]: loops and conditional expressions are not stable in const fn
--> $DIR/min_const_fn.rs:106:44
|
LL | const fn foo37(a: bool, b: bool) -> bool { a || b }
use self::Foo::*;
match *self {
- Prob => 0x1, //~ ERROR `if`, `match`, `&&` and `||` are not stable in const fn
+ Prob => 0x1, //~ ERROR loops and conditional expressions are not stable in const fn
}
}
}
LL | x => 42,
| ^
-error[E0723]: `if`, `match`, `&&` and `||` are not stable in const fn
+error[E0723]: loops and conditional expressions are not stable in const fn
--> $DIR/single_variant_match_ice.rs:18:13
|
LL | Prob => 0x1,
// Unresolved multi-segment attributes are not treated as custom.
-#![feature(custom_attribute)]
-
mod existent {}
#[existent::nonexistent] //~ ERROR failed to resolve: could not find `nonexistent` in `existent`
error[E0433]: failed to resolve: could not find `nonexistent` in `existent`
- --> $DIR/custom-attribute-multisegment.rs:7:13
+ --> $DIR/custom-attribute-multisegment.rs:5:13
|
LL | #[existent::nonexistent]
| ^^^^^^^^^^^ could not find `nonexistent` in `existent`
-error[E0517]: attribute should be applied to struct, enum or union
+error[E0517]: attribute should be applied to struct, enum, or union
--> $DIR/E0517.rs:1:8
|
LL | #[repr(C)]
| ^
LL | type Foo = u8;
- | -------------- not a struct, enum or union
+ | -------------- not a struct, enum, or union
error[E0517]: attribute should be applied to struct or union
--> $DIR/E0517.rs:4:8
LL | struct Foo3 {bar: bool, baz: bool}
| ---------------------------------- not an enum
-error[E0517]: attribute should be applied to struct, enum or union
+error[E0517]: attribute should be applied to struct, enum, or union
--> $DIR/E0517.rs:10:8
|
LL | #[repr(C)]
| ^
LL | / impl Foo3 {
LL | | }
- | |_- not a struct, enum or union
+ | |_- not a struct, enum, or union
error: aborting due to 4 previous errors
// skip-codegen
// compile-pass
-#![feature(custom_attribute)]
+
macro_rules! mac {
{} => {
#[cfg(attr)]
//~^ ERROR environment variable `NON_EXISTENT` not defined
}
+mod erroneous_literal {
+ include!(concat!("NON_EXISTENT"suffix, "/data.rs"));
+ //~^ ERROR suffixes on a string literal are invalid
+}
+
fn main() {}
LL | include!(concat!(env!("NON_EXISTENT"), "/data.rs"));
| ^^^^^^^^^^^^^^^^^^^^
+error: suffixes on a string literal are invalid
+ --> $DIR/issue-55897.rs:16:22
+ |
+LL | include!(concat!("NON_EXISTENT"suffix, "/data.rs"));
+ | ^^^^^^^^^^^^^^^^^^^^ invalid suffix `suffix`
+
error[E0432]: unresolved import `prelude`
--> $DIR/issue-55897.rs:1:5
|
|
= note: import resolution is stuck, try simplifying macro imports
-error: aborting due to 3 previous errors
+error: aborting due to 4 previous errors
For more information about this error, try `rustc --explain E0432`.
+++ /dev/null
-#[repr(align(16))]
-struct Foo(u64);
-
-#[repr(align(8))] //~ ERROR `#[repr(align(x))]` on enums is experimental
-enum Bar {
- Foo { foo: Foo },
- Baz,
-}
-
-fn main() { }
+++ /dev/null
-error[E0658]: `#[repr(align(x))]` on enums is experimental
- --> $DIR/feature-gate-repr_align_enum.rs:4:1
- |
-LL | #[repr(align(8))]
- | ^^^^^^^^^^^^^^^^^
- |
- = note: for more information, see https://github.com/rust-lang/rust/issues/57996
- = help: add #![feature(repr_align_enum)] to the crate attributes to enable
-
-error: aborting due to previous error
-
-For more information about this error, try `rustc --explain E0658`.
--- /dev/null
+#[repr(transparent)]
+enum OkButUnstableEnum { //~ ERROR transparent enums are unstable
+ Foo((), String, ()),
+}
+
+fn main() {}
--- /dev/null
+error[E0658]: transparent enums are unstable
+ --> $DIR/feature-gate-transparent_enums.rs:2:1
+ |
+LL | / enum OkButUnstableEnum {
+LL | | Foo((), String, ()),
+LL | | }
+ | |_^
+ |
+ = note: for more information, see https://github.com/rust-lang/rust/issues/60405
+ = help: add #![feature(transparent_enums)] to the crate attributes to enable
+
+error: aborting due to previous error
+
+For more information about this error, try `rustc --explain E0658`.
--- /dev/null
+#[repr(transparent)]
+union OkButUnstableUnion { //~ ERROR transparent unions are unstable
+ field: u8,
+ zst: (),
+}
+
+fn main() {}
--- /dev/null
+error[E0658]: transparent unions are unstable
+ --> $DIR/feature-gate-transparent_unions.rs:2:1
+ |
+LL | / union OkButUnstableUnion {
+LL | | field: u8,
+LL | | zst: (),
+LL | | }
+ | |_^
+ |
+ = note: for more information, see https://github.com/rust-lang/rust/issues/60405
+ = help: add #![feature(transparent_unions)] to the crate attributes to enable
+
+error: aborting due to previous error
+
+For more information about this error, try `rustc --explain E0658`.
|
LL | #![deny(improper_ctypes)]
| ^^^^^^^^^^^^^^^
- = help: consider adding a #[repr(C)] or #[repr(transparent)] attribute to this struct
+ = help: consider adding a `#[repr(C)]` or `#[repr(transparent)]` attribute to this struct
note: type defined here
--> $DIR/issue-14309.rs:4:1
|
LL | fn bar(x: B);
| ^
|
- = help: consider adding a #[repr(C)] or #[repr(transparent)] attribute to this struct
+ = help: consider adding a `#[repr(C)]` or `#[repr(transparent)]` attribute to this struct
note: type defined here
--> $DIR/issue-14309.rs:4:1
|
LL | fn qux(x: A2);
| ^^
|
- = help: consider adding a #[repr(C)] or #[repr(transparent)] attribute to this struct
+ = help: consider adding a `#[repr(C)]` or `#[repr(transparent)]` attribute to this struct
note: type defined here
--> $DIR/issue-14309.rs:4:1
|
LL | fn quux(x: B2);
| ^^
|
- = help: consider adding a #[repr(C)] or #[repr(transparent)] attribute to this struct
+ = help: consider adding a `#[repr(C)]` or `#[repr(transparent)]` attribute to this struct
note: type defined here
--> $DIR/issue-14309.rs:4:1
|
LL | fn fred(x: D);
| ^
|
- = help: consider adding a #[repr(C)] or #[repr(transparent)] attribute to this struct
+ = help: consider adding a `#[repr(C)]` or `#[repr(transparent)]` attribute to this struct
note: type defined here
--> $DIR/issue-14309.rs:4:1
|
LL | #![deny(warnings)]
| ^^^^^^^^
= note: #[deny(improper_ctypes)] implied by #[deny(warnings)]
- = help: consider adding a #[repr(C)] or #[repr(transparent)] attribute to this struct
+ = help: consider adding a `#[repr(C)]` or `#[repr(transparent)]` attribute to this struct
note: type defined here
--> $DIR/issue-16250.rs:3:1
|
| ^^^^^^^^^^
| |
| move occurs because value has type `T`, which does not implement the `Copy` trait
- | help: consider removing the `*`: `mut_ref()`
+ | help: consider borrowing here: `&*mut_ref()`
error[E0507]: cannot move out of a shared reference
--> $DIR/issue-20801.rs:29:22
| ^^^^^^^^^^
| |
| move occurs because value has type `T`, which does not implement the `Copy` trait
- | help: consider removing the `*`: `imm_ref()`
+ | help: consider borrowing here: `&*imm_ref()`
error[E0507]: cannot move out of a raw pointer
--> $DIR/issue-20801.rs:32:22
| ^^^^^^^^^^
| |
| move occurs because value has type `T`, which does not implement the `Copy` trait
- | help: consider removing the `*`: `mut_ptr()`
+ | help: consider borrowing here: `&*mut_ptr()`
error[E0507]: cannot move out of a raw pointer
--> $DIR/issue-20801.rs:35:22
| ^^^^^^^^^^^^
| |
| move occurs because value has type `T`, which does not implement the `Copy` trait
- | help: consider removing the `*`: `const_ptr()`
+ | help: consider borrowing here: `&*const_ptr()`
error: aborting due to 4 previous errors
// compile-pass
-#![allow(unused_attributes)]
// compile-flags:--cfg set1
-#![cfg_attr(set1, feature(custom_attribute))]
+#![cfg_attr(set1, feature(rustc_attrs))]
+#![rustc_dummy]
-#![foobar]
fn main() {}
fn main() {
#[inline] struct Foo; //~ ERROR attribute should be applied to function or closure
- #[repr(C)] fn foo() {} //~ ERROR attribute should be applied to struct, enum or union
+ #[repr(C)] fn foo() {} //~ ERROR attribute should be applied to struct, enum, or union
}
LL | #[inline] struct Foo;
| ^^^^^^^^^ ----------- not a function or closure
-error[E0517]: attribute should be applied to struct, enum or union
+error[E0517]: attribute should be applied to struct, enum, or union
--> $DIR/issue-31769.rs:3:12
|
LL | #[repr(C)] fn foo() {}
- | ^ ----------- not a struct, enum or union
+ | ^ ----------- not a struct, enum, or union
error: aborting due to 2 previous errors
#![allow(unused_macros)]
macro_rules! assign {
- (($($a:tt)*) = ($($b:tt))*) => { //~ ERROR expected `*` or `+`
+ (($($a:tt)*) = ($($b:tt))*) => { //~ ERROR expected one of: `*`, `+`, or `?`
$($a)* = $($b)*
}
}
-error: expected `*` or `+`
+error: expected one of: `*`, `+`, or `?`
--> $DIR/issue-39388.rs:4:22
|
LL | (($($a:tt)*) = ($($b:tt))*) => {
LL | #[repr(nothing)]
| ^^^^^^^^^^^^^^^^
LL | let _x = 0;
- | ----------- not a struct, enum or union
+ | ----------- not a struct, enum, or union
error[E0517]: attribute should not be applied to an expression
--> $DIR/issue-43988.rs:18:5
LL | / loop {
LL | | ()
LL | | };
- | |_____- not defining a struct, enum or union
+ | |_____- not defining a struct, enum, or union
error[E0517]: attribute should not be applied to a statement
--> $DIR/issue-43988.rs:24:5
LL | #[repr]
| ^^^^^^^
LL | let _y = "123";
- | --------------- not a struct, enum or union
+ | --------------- not a struct, enum, or union
error[E0518]: attribute should be applied to function or closure
--> $DIR/issue-43988.rs:31:5
--> $DIR/issue-43988.rs:35:14
|
LL | let _z = #[repr] 1;
- | ^^^^^^^ - not defining a struct, enum or union
+ | ^^^^^^^ - not defining a struct, enum, or union
error: aborting due to 9 previous errors
+#![feature(transparent_enums, transparent_unions)]
#![deny(improper_ctypes)]
#![allow(dead_code)]
enum Isize { A, B, C }
#[repr(transparent)]
-struct Transparent<T>(T, std::marker::PhantomData<Z>);
+struct TransparentStruct<T>(T, std::marker::PhantomData<Z>);
+
+#[repr(transparent)]
+enum TransparentEnum<T> {
+ Variant(T, std::marker::PhantomData<Z>),
+}
+
+#[repr(transparent)]
+union TransparentUnion<T: Copy> {
+ field: T,
+}
struct Rust<T>(T);
fn nonzero_i128(x: Option<num::NonZeroI128>);
//~^ ERROR 128-bit integers don't currently have a known stable ABI
fn nonzero_isize(x: Option<num::NonZeroIsize>);
- fn repr_transparent(x: Option<Transparent<num::NonZeroU8>>);
+ fn transparent_struct(x: Option<TransparentStruct<num::NonZeroU8>>);
+ fn transparent_enum(x: Option<TransparentEnum<num::NonZeroU8>>);
+ fn transparent_union(x: Option<TransparentUnion<num::NonZeroU8>>);
+ //~^ ERROR enum has no representation hint
fn repr_rust(x: Option<Rust<num::NonZeroU8>>); //~ ERROR enum has no representation hint
fn no_result(x: Result<(), num::NonZeroI32>); //~ ERROR enum has no representation hint
}
error: `extern` block uses type `U` which is not FFI-safe: enum has no representation hint
- --> $DIR/lint-ctypes-enum.rs:27:13
+ --> $DIR/lint-ctypes-enum.rs:38:13
|
LL | fn uf(x: U);
| ^
|
note: lint level defined here
- --> $DIR/lint-ctypes-enum.rs:1:9
+ --> $DIR/lint-ctypes-enum.rs:2:9
|
LL | #![deny(improper_ctypes)]
| ^^^^^^^^^^^^^^^
- = help: consider adding a #[repr(...)] attribute to this enum
+ = help: consider adding a `#[repr(C)]`, `#[repr(transparent)]`, or integer `#[repr(...)]` attribute to this enum
note: type defined here
- --> $DIR/lint-ctypes-enum.rs:7:1
+ --> $DIR/lint-ctypes-enum.rs:8:1
|
LL | enum U { A }
| ^^^^^^^^^^^^
error: `extern` block uses type `B` which is not FFI-safe: enum has no representation hint
- --> $DIR/lint-ctypes-enum.rs:28:13
+ --> $DIR/lint-ctypes-enum.rs:39:13
|
LL | fn bf(x: B);
| ^
|
- = help: consider adding a #[repr(...)] attribute to this enum
+ = help: consider adding a `#[repr(C)]`, `#[repr(transparent)]`, or integer `#[repr(...)]` attribute to this enum
note: type defined here
- --> $DIR/lint-ctypes-enum.rs:8:1
+ --> $DIR/lint-ctypes-enum.rs:9:1
|
LL | enum B { C, D }
| ^^^^^^^^^^^^^^^
error: `extern` block uses type `T` which is not FFI-safe: enum has no representation hint
- --> $DIR/lint-ctypes-enum.rs:29:13
+ --> $DIR/lint-ctypes-enum.rs:40:13
|
LL | fn tf(x: T);
| ^
|
- = help: consider adding a #[repr(...)] attribute to this enum
+ = help: consider adding a `#[repr(C)]`, `#[repr(transparent)]`, or integer `#[repr(...)]` attribute to this enum
note: type defined here
- --> $DIR/lint-ctypes-enum.rs:9:1
+ --> $DIR/lint-ctypes-enum.rs:10:1
|
LL | enum T { E, F, G }
| ^^^^^^^^^^^^^^^^^^
error: `extern` block uses type `u128` which is not FFI-safe: 128-bit integers don't currently have a known stable ABI
- --> $DIR/lint-ctypes-enum.rs:40:23
+ --> $DIR/lint-ctypes-enum.rs:51:23
|
LL | fn nonzero_u128(x: Option<num::NonZeroU128>);
| ^^^^^^^^^^^^^^^^^^^^^^^^
error: `extern` block uses type `i128` which is not FFI-safe: 128-bit integers don't currently have a known stable ABI
- --> $DIR/lint-ctypes-enum.rs:47:23
+ --> $DIR/lint-ctypes-enum.rs:58:23
|
LL | fn nonzero_i128(x: Option<num::NonZeroI128>);
| ^^^^^^^^^^^^^^^^^^^^^^^^
+error: `extern` block uses type `std::option::Option<TransparentUnion<std::num::NonZeroU8>>` which is not FFI-safe: enum has no representation hint
+ --> $DIR/lint-ctypes-enum.rs:63:28
+ |
+LL | fn transparent_union(x: Option<TransparentUnion<num::NonZeroU8>>);
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ |
+ = help: consider adding a `#[repr(C)]`, `#[repr(transparent)]`, or integer `#[repr(...)]` attribute to this enum
+
error: `extern` block uses type `std::option::Option<Rust<std::num::NonZeroU8>>` which is not FFI-safe: enum has no representation hint
- --> $DIR/lint-ctypes-enum.rs:51:20
+ --> $DIR/lint-ctypes-enum.rs:65:20
|
LL | fn repr_rust(x: Option<Rust<num::NonZeroU8>>);
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
- = help: consider adding a #[repr(...)] attribute to this enum
+ = help: consider adding a `#[repr(C)]`, `#[repr(transparent)]`, or integer `#[repr(...)]` attribute to this enum
error: `extern` block uses type `std::result::Result<(), std::num::NonZeroI32>` which is not FFI-safe: enum has no representation hint
- --> $DIR/lint-ctypes-enum.rs:52:20
+ --> $DIR/lint-ctypes-enum.rs:66:20
|
LL | fn no_result(x: Result<(), num::NonZeroI32>);
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
- = help: consider adding a #[repr(...)] attribute to this enum
+ = help: consider adding a `#[repr(C)]`, `#[repr(transparent)]`, or integer `#[repr(...)]` attribute to this enum
-error: aborting due to 7 previous errors
+error: aborting due to 8 previous errors
|
LL | #![deny(improper_ctypes)]
| ^^^^^^^^^^^^^^^
- = help: consider adding a #[repr(C)] or #[repr(transparent)] attribute to this struct
+ = help: consider adding a `#[repr(C)]` or `#[repr(transparent)]` attribute to this struct
note: type defined here
--> $DIR/lint-ctypes.rs:24:1
|
LL | pub fn ptr_type2(size: *const Foo);
| ^^^^^^^^^^
|
- = help: consider adding a #[repr(C)] or #[repr(transparent)] attribute to this struct
+ = help: consider adding a `#[repr(C)]` or `#[repr(transparent)]` attribute to this struct
note: type defined here
--> $DIR/lint-ctypes.rs:24:1
|
LL | pub fn box_type(p: Box<u32>);
| ^^^^^^^^
|
- = help: consider adding a #[repr(C)] or #[repr(transparent)] attribute to this struct
+ = help: consider adding a `#[repr(C)]` or `#[repr(transparent)]` attribute to this struct
error: `extern` block uses type `char` which is not FFI-safe: the `char` type has no C equivalent
--> $DIR/lint-ctypes.rs:51:25
LL | pub fn fn_contained(p: RustBadRet);
| ^^^^^^^^^^
|
- = help: consider adding a #[repr(C)] or #[repr(transparent)] attribute to this struct
+ = help: consider adding a `#[repr(C)]` or `#[repr(transparent)]` attribute to this struct
error: `extern` block uses type `i128` which is not FFI-safe: 128-bit integers don't currently have a known stable ABI
--> $DIR/lint-ctypes.rs:64:32
LL | pub fn transparent_fn(p: TransparentBadFn);
| ^^^^^^^^^^^^^^^^
|
- = help: consider adding a #[repr(C)] or #[repr(transparent)] attribute to this struct
+ = help: consider adding a `#[repr(C)]` or `#[repr(transparent)]` attribute to this struct
error: aborting due to 20 previous errors
+++ /dev/null
-// When denying at the crate level, be sure to not get random warnings from the
-// injected intrinsics by the compiler.
-
-#![deny(unused_attributes)]
-#![allow(dead_code)]
-#![feature(custom_attribute)]
-
-#[ab_isize="stdcall"] extern {} //~ ERROR unused attribute
-
-#[fixed_stack_segment] fn f() {} //~ ERROR unused attribute
-
-fn main() {}
+++ /dev/null
-error: unused attribute
- --> $DIR/lint-obsolete-attr.rs:8:1
- |
-LL | #[ab_isize="stdcall"] extern {}
- | ^^^^^^^^^^^^^^^^^^^^^
- |
-note: lint level defined here
- --> $DIR/lint-obsolete-attr.rs:4:9
- |
-LL | #![deny(unused_attributes)]
- | ^^^^^^^^^^^^^^^^^
-
-error: unused attribute
- --> $DIR/lint-obsolete-attr.rs:10:1
- |
-LL | #[fixed_stack_segment] fn f() {}
- | ^^^^^^^^^^^^^^^^^^^^^^
-
-error: aborting due to 2 previous errors
-
+++ /dev/null
-// When denying at the crate level, be sure to not get random warnings from the
-// injected intrinsics by the compiler.
-
-#![feature(custom_attribute)]
-#![deny(unused_attributes)]
-
-#![mutable_doc] //~ ERROR unused attribute
-
-#[dance] mod a {} //~ ERROR unused attribute
-
-#[dance] fn main() {} //~ ERROR unused attribute
+++ /dev/null
-error: unused attribute
- --> $DIR/lint-unknown-attr.rs:9:1
- |
-LL | #[dance] mod a {}
- | ^^^^^^^^
- |
-note: lint level defined here
- --> $DIR/lint-unknown-attr.rs:5:9
- |
-LL | #![deny(unused_attributes)]
- | ^^^^^^^^^^^^^^^^^
-
-error: unused attribute
- --> $DIR/lint-unknown-attr.rs:11:1
- |
-LL | #[dance] fn main() {}
- | ^^^^^^^^
-
-error: unused attribute
- --> $DIR/lint-unknown-attr.rs:7:1
- |
-LL | #![mutable_doc]
- | ^^^^^^^^^^^^^^^
-
-error: aborting due to 3 previous errors
-
+++ /dev/null
-// Test behavior of `?` macro _kleene op_ under the 2015 edition. Namely, it doesn't exist.
-
-// edition:2015
-
-macro_rules! bar {
- ($(a)?) => {} //~ERROR expected `*` or `+`
-}
-
-macro_rules! baz {
- ($(a),?) => {} //~ERROR expected `*` or `+`
-}
-
-fn main() {}
+++ /dev/null
-error: expected `*` or `+`
- --> $DIR/macro-at-most-once-rep-2015-ques-rep.rs:6:10
- |
-LL | ($(a)?) => {}
- | ^
- |
- = note: `?` is not a macro repetition operator in the 2015 edition, but is accepted in the 2018 edition
-
-error: expected `*` or `+`
- --> $DIR/macro-at-most-once-rep-2015-ques-rep.rs:10:11
- |
-LL | ($(a),?) => {}
- | ^
- |
- = note: `?` is not a macro repetition operator in the 2015 edition, but is accepted in the 2018 edition
-
-error: aborting due to 2 previous errors
-
+++ /dev/null
-// Test behavior of `?` macro _separator_ under the 2015 edition. Namely, `?` can be used as a
-// separator, but you get a migration warning for the edition.
-
-// edition:2015
-// compile-pass
-
-#![warn(rust_2018_compatibility)]
-
-macro_rules! bar {
- ($(a)?*) => {} //~WARN using `?` as a separator
- //~^WARN this was previously accepted
-}
-
-macro_rules! baz {
- ($(a)?+) => {} //~WARN using `?` as a separator
- //~^WARN this was previously accepted
-}
-
-fn main() {
- bar!();
- bar!(a);
- bar!(a?a);
- bar!(a?a?a?a?a);
-
- baz!(a);
- baz!(a?a);
- baz!(a?a?a?a?a);
-}
+++ /dev/null
-warning: using `?` as a separator is deprecated and will be a hard error in an upcoming edition
- --> $DIR/macro-at-most-once-rep-2015-ques-sep.rs:10:10
- |
-LL | ($(a)?*) => {}
- | ^
- |
-note: lint level defined here
- --> $DIR/macro-at-most-once-rep-2015-ques-sep.rs:7:9
- |
-LL | #![warn(rust_2018_compatibility)]
- | ^^^^^^^^^^^^^^^^^^^^^^^
- = note: #[warn(question_mark_macro_sep)] implied by #[warn(rust_2018_compatibility)]
- = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in the 2018 edition!
- = note: for more information, see issue #48075 <https://github.com/rust-lang/rust/issues/48075>
-
-warning: using `?` as a separator is deprecated and will be a hard error in an upcoming edition
- --> $DIR/macro-at-most-once-rep-2015-ques-sep.rs:15:10
- |
-LL | ($(a)?+) => {}
- | ^
- |
- = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in the 2018 edition!
- = note: for more information, see issue #48075 <https://github.com/rust-lang/rust/issues/48075>
-
--- /dev/null
+// Tests that `?` is a Kleene op and not a macro separator in the 2015 edition.
+
+// edition:2015
+
+macro_rules! foo {
+ ($(a)?) => {};
+}
+
+// The Kleene op `?` does not admit a separator before it.
+macro_rules! baz {
+ ($(a),?) => {}; //~ERROR the `?` macro repetition operator
+}
+
+macro_rules! barplus {
+ ($(a)?+) => {}; // ok. matches "a+" and "+"
+}
+
+macro_rules! barstar {
+ ($(a)?*) => {}; // ok. matches "a*" and "*"
+}
+
+pub fn main() {
+ foo!();
+ foo!(a);
+ foo!(a?); //~ ERROR no rules expected the token `?`
+ foo!(a?a); //~ ERROR no rules expected the token `?`
+ foo!(a?a?a); //~ ERROR no rules expected the token `?`
+
+ barplus!(); //~ERROR unexpected end of macro invocation
+ barplus!(a); //~ERROR unexpected end of macro invocation
+ barplus!(a?); //~ ERROR no rules expected the token `?`
+ barplus!(a?a); //~ ERROR no rules expected the token `?`
+ barplus!(a+);
+ barplus!(+);
+
+ barstar!(); //~ERROR unexpected end of macro invocation
+ barstar!(a); //~ERROR unexpected end of macro invocation
+ barstar!(a?); //~ ERROR no rules expected the token `?`
+ barstar!(a?a); //~ ERROR no rules expected the token `?`
+ barstar!(a*);
+ barstar!(*);
+}
--- /dev/null
+error: the `?` macro repetition operator does not take a separator
+ --> $DIR/macro-at-most-once-rep-2015.rs:11:10
+ |
+LL | ($(a),?) => {};
+ | ^
+
+error: no rules expected the token `?`
+ --> $DIR/macro-at-most-once-rep-2015.rs:25:11
+ |
+LL | macro_rules! foo {
+ | ---------------- when calling this macro
+...
+LL | foo!(a?);
+ | ^ no rules expected this token in macro call
+
+error: no rules expected the token `?`
+ --> $DIR/macro-at-most-once-rep-2015.rs:26:11
+ |
+LL | macro_rules! foo {
+ | ---------------- when calling this macro
+...
+LL | foo!(a?a);
+ | ^ no rules expected this token in macro call
+
+error: no rules expected the token `?`
+ --> $DIR/macro-at-most-once-rep-2015.rs:27:11
+ |
+LL | macro_rules! foo {
+ | ---------------- when calling this macro
+...
+LL | foo!(a?a?a);
+ | ^ no rules expected this token in macro call
+
+error: unexpected end of macro invocation
+ --> $DIR/macro-at-most-once-rep-2015.rs:29:5
+ |
+LL | macro_rules! barplus {
+ | -------------------- when calling this macro
+...
+LL | barplus!();
+ | ^^^^^^^^^^^ missing tokens in macro arguments
+
+error: unexpected end of macro invocation
+ --> $DIR/macro-at-most-once-rep-2015.rs:30:15
+ |
+LL | macro_rules! barplus {
+ | -------------------- when calling this macro
+...
+LL | barplus!(a);
+ | ^ missing tokens in macro arguments
+
+error: no rules expected the token `?`
+ --> $DIR/macro-at-most-once-rep-2015.rs:31:15
+ |
+LL | macro_rules! barplus {
+ | -------------------- when calling this macro
+...
+LL | barplus!(a?);
+ | ^ no rules expected this token in macro call
+
+error: no rules expected the token `?`
+ --> $DIR/macro-at-most-once-rep-2015.rs:32:15
+ |
+LL | macro_rules! barplus {
+ | -------------------- when calling this macro
+...
+LL | barplus!(a?a);
+ | ^ no rules expected this token in macro call
+
+error: unexpected end of macro invocation
+ --> $DIR/macro-at-most-once-rep-2015.rs:36:5
+ |
+LL | macro_rules! barstar {
+ | -------------------- when calling this macro
+...
+LL | barstar!();
+ | ^^^^^^^^^^^ missing tokens in macro arguments
+
+error: unexpected end of macro invocation
+ --> $DIR/macro-at-most-once-rep-2015.rs:37:15
+ |
+LL | macro_rules! barstar {
+ | -------------------- when calling this macro
+...
+LL | barstar!(a);
+ | ^ missing tokens in macro arguments
+
+error: no rules expected the token `?`
+ --> $DIR/macro-at-most-once-rep-2015.rs:38:15
+ |
+LL | macro_rules! barstar {
+ | -------------------- when calling this macro
+...
+LL | barstar!(a?);
+ | ^ no rules expected this token in macro call
+
+error: no rules expected the token `?`
+ --> $DIR/macro-at-most-once-rep-2015.rs:39:15
+ |
+LL | macro_rules! barstar {
+ | -------------------- when calling this macro
+...
+LL | barstar!(a?a);
+ | ^ no rules expected this token in macro call
+
+error: aborting due to 12 previous errors
+
($(a)?) => {};
}
+// The Kleene op `?` does not admit a separator before it.
macro_rules! baz {
($(a),?) => {}; //~ERROR the `?` macro repetition operator
}
error: the `?` macro repetition operator does not take a separator
- --> $DIR/macro-at-most-once-rep-2018.rs:10:10
+ --> $DIR/macro-at-most-once-rep-2018.rs:11:10
|
LL | ($(a),?) => {};
| ^
error: no rules expected the token `?`
- --> $DIR/macro-at-most-once-rep-2018.rs:24:11
+ --> $DIR/macro-at-most-once-rep-2018.rs:25:11
|
LL | macro_rules! foo {
| ---------------- when calling this macro
| ^ no rules expected this token in macro call
error: no rules expected the token `?`
- --> $DIR/macro-at-most-once-rep-2018.rs:25:11
+ --> $DIR/macro-at-most-once-rep-2018.rs:26:11
|
LL | macro_rules! foo {
| ---------------- when calling this macro
| ^ no rules expected this token in macro call
error: no rules expected the token `?`
- --> $DIR/macro-at-most-once-rep-2018.rs:26:11
+ --> $DIR/macro-at-most-once-rep-2018.rs:27:11
|
LL | macro_rules! foo {
| ---------------- when calling this macro
| ^ no rules expected this token in macro call
error: unexpected end of macro invocation
- --> $DIR/macro-at-most-once-rep-2018.rs:28:5
+ --> $DIR/macro-at-most-once-rep-2018.rs:29:5
|
LL | macro_rules! barplus {
| -------------------- when calling this macro
| ^^^^^^^^^^^ missing tokens in macro arguments
error: unexpected end of macro invocation
- --> $DIR/macro-at-most-once-rep-2018.rs:29:15
+ --> $DIR/macro-at-most-once-rep-2018.rs:30:15
|
LL | macro_rules! barplus {
| -------------------- when calling this macro
| ^ missing tokens in macro arguments
error: no rules expected the token `?`
- --> $DIR/macro-at-most-once-rep-2018.rs:30:15
+ --> $DIR/macro-at-most-once-rep-2018.rs:31:15
|
LL | macro_rules! barplus {
| -------------------- when calling this macro
| ^ no rules expected this token in macro call
error: no rules expected the token `?`
- --> $DIR/macro-at-most-once-rep-2018.rs:31:15
+ --> $DIR/macro-at-most-once-rep-2018.rs:32:15
|
LL | macro_rules! barplus {
| -------------------- when calling this macro
| ^ no rules expected this token in macro call
error: unexpected end of macro invocation
- --> $DIR/macro-at-most-once-rep-2018.rs:35:5
+ --> $DIR/macro-at-most-once-rep-2018.rs:36:5
|
LL | macro_rules! barstar {
| -------------------- when calling this macro
| ^^^^^^^^^^^ missing tokens in macro arguments
error: unexpected end of macro invocation
- --> $DIR/macro-at-most-once-rep-2018.rs:36:15
+ --> $DIR/macro-at-most-once-rep-2018.rs:37:15
|
LL | macro_rules! barstar {
| -------------------- when calling this macro
| ^ missing tokens in macro arguments
error: no rules expected the token `?`
- --> $DIR/macro-at-most-once-rep-2018.rs:37:15
+ --> $DIR/macro-at-most-once-rep-2018.rs:38:15
|
LL | macro_rules! barstar {
| -------------------- when calling this macro
| ^ no rules expected this token in macro call
error: no rules expected the token `?`
- --> $DIR/macro-at-most-once-rep-2018.rs:38:15
+ --> $DIR/macro-at-most-once-rep-2018.rs:39:15
|
LL | macro_rules! barstar {
| -------------------- when calling this macro
-#![feature(custom_attribute)]
+#![feature(rustc_attrs)]
macro_rules! test { ($nm:ident,
#[$a:meta],
#[cfg(not(qux))],
pub fn bar() { });
-#[qux]
+#[rustc_dummy]
fn main() {
a::bar();
//~^ ERROR failed to resolve: use of undeclared type or module `a`
= note: allowed there are: `{`, `[`, `=>`, `,`, `>`, `=`, `:`, `;`, `|`, `as` or `where`
error: `$ty:ty` is followed by `-`, which is not allowed for `ty` fragments
- --> $DIR/macro-input-future-proofing.rs:18:7
+ --> $DIR/macro-input-future-proofing.rs:18:15
|
LL | ($($ty:ty)-+) => ();
- | ^^^^^^^^ not allowed after `ty` fragments
+ | ^ not allowed after `ty` fragments
|
= note: allowed there are: `{`, `[`, `=>`, `,`, `>`, `=`, `:`, `;`, `|`, `as` or `where`
-#![feature(custom_attribute)]
+#![feature(rustc_attrs)]
macro_rules! test { ($nm:ident,
#[$a:meta],
pub fn bar() { });
// test1!(#[bar])
-#[qux]
+#[rustc_dummy]
fn main() {
a::bar(); //~ ERROR cannot find function `bar` in module `a`
b::bar();
error: no rules expected the token `enum E { }`
--> $DIR/nonterminal-matching.rs:19:10
|
+LL | macro n(a $nt_item b) {
+ | --------------------- when calling this macro
+...
LL | n!(a $nt_item b);
| ^^^^^^^^ no rules expected this token in macro call
...
-#![feature(custom_attribute)]
+#![feature(rustc_attrs)]
macro_rules! check {
($expr: expr) => (
- #[my_attr = $expr] //~ ERROR unexpected token: `-0`
- //~| ERROR unexpected token: `0 + 0`
+ #[rustc_dummy = $expr] //~ ERROR unexpected token: `-0`
+ //~| ERROR unexpected token: `0 + 0`
use main as _;
);
}
= help: instead of using a suffixed literal (1u8, 1.0f32, etc.), use an unsuffixed version (1, 1.0, etc.).
error: unexpected token: `-0`
- --> $DIR/malformed-interpolated.rs:5:21
+ --> $DIR/malformed-interpolated.rs:5:25
|
-LL | #[my_attr = $expr]
- | ^^^^^
+LL | #[rustc_dummy = $expr]
+ | ^^^^^
...
LL | check!(-0); // ERROR, see above
| ----------- in this macro invocation
error: unexpected token: `0 + 0`
- --> $DIR/malformed-interpolated.rs:5:21
+ --> $DIR/malformed-interpolated.rs:5:25
|
-LL | #[my_attr = $expr]
- | ^^^^^
+LL | #[rustc_dummy = $expr]
+ | ^^^^^
...
LL | check!(0 + 0); // ERROR, see above
| -------------- in this macro invocation
| ^^
| |
| move occurs because `*r` has type `std::string::String`, which does not implement the `Copy` trait
- | help: consider removing the `*`: `r`
+ | help: consider borrowing here: `&*r`
error[E0507]: cannot move out of `*r` which is behind a shared reference
--> $DIR/cannot-move-block-spans.rs:6:22
| ^^
| |
| move occurs because `*r` has type `std::string::String`, which does not implement the `Copy` trait
- | help: consider removing the `*`: `r`
+ | help: consider borrowing here: `&*r`
error[E0507]: cannot move out of `*r` which is behind a shared reference
--> $DIR/cannot-move-block-spans.rs:7:26
| ^^
| |
| move occurs because `*r` has type `std::string::String`, which does not implement the `Copy` trait
- | help: consider removing the `*`: `r`
+ | help: consider borrowing here: `&*r`
error[E0508]: cannot move out of type `[std::string::String; 2]`, a non-copy array
--> $DIR/cannot-move-block-spans.rs:11:15
| ^^
| |
| move occurs because `*r` has type `std::string::String`, which does not implement the `Copy` trait
- | help: consider removing the `*`: `r`
+ | help: consider borrowing here: `&*r`
error[E0507]: cannot move out of `*r` which is behind a shared reference
--> $DIR/cannot-move-block-spans.rs:18:45
| ^^
| |
| move occurs because `*r` has type `std::string::String`, which does not implement the `Copy` trait
- | help: consider removing the `*`: `r`
+ | help: consider borrowing here: `&*r`
error[E0507]: cannot move out of `*r` which is behind a shared reference
--> $DIR/cannot-move-block-spans.rs:19:49
| ^^
| |
| move occurs because `*r` has type `std::string::String`, which does not implement the `Copy` trait
- | help: consider removing the `*`: `r`
+ | help: consider borrowing here: `&*r`
error: aborting due to 9 previous errors
| ^^
| |
| move occurs because `*a` has type `A`, which does not implement the `Copy` trait
- | help: consider removing the `*`: `a`
+ | help: consider borrowing here: `&*a`
error[E0508]: cannot move out of type `[A; 1]`, a non-copy array
--> $DIR/move-errors.rs:12:13
| ^^^
| |
| move occurs because `**r` has type `A`, which does not implement the `Copy` trait
- | help: consider removing the `*`: `*r`
+ | help: consider borrowing here: `&**r`
error[E0507]: cannot move out of an `Rc`
--> $DIR/move-errors.rs:27:13
| ^^
| |
| move occurs because value has type `A`, which does not implement the `Copy` trait
- | help: consider removing the `*`: `r`
+ | help: consider borrowing here: `&*r`
error[E0508]: cannot move out of type `[A; 1]`, a non-copy array
--> $DIR/move-errors.rs:32:13
--> $DIR/move-errors.rs:38:16
|
LL | let A(s) = *a;
- | - ^^ help: consider removing the `*`: `a`
+ | - ^^ help: consider borrowing here: `&*a`
| |
| data moved here
| move occurs because `s` has type `std::string::String`, which does not implement the `Copy` trait
--> $DIR/move-errors.rs:110:11
|
LL | match *x {
- | ^^ help: consider removing the `*`: `x`
+ | ^^ help: consider borrowing here: `&*x`
LL |
LL | Ok(s) | Err(s) => (),
| -
--- /dev/null
+// Unit test for the "user substitutions" that are annotated on each
+// node.
+
+struct SomeStruct<T>(T);
+
+fn no_annot() {
+ let c = 66;
+ let f = SomeStruct;
+ f(&c);
+}
+
+fn annot_underscore() {
+ let c = 66;
+ let f = SomeStruct::<_>;
+ f(&c);
+}
+
+fn annot_reference_any_lifetime() {
+ let c = 66;
+ let f = SomeStruct::<&u32>;
+ f(&c);
+}
+
+fn annot_reference_static_lifetime() {
+ let c = 66;
+ let f = SomeStruct::<&'static u32>;
+ f(&c); //~ ERROR
+}
+
+fn annot_reference_named_lifetime<'a>(_d: &'a u32) {
+ let c = 66;
+ let f = SomeStruct::<&'a u32>;
+ f(&c); //~ ERROR
+}
+
+fn annot_reference_named_lifetime_ok<'a>(c: &'a u32) {
+ let f = SomeStruct::<&'a u32>;
+ f(c);
+}
+
+fn annot_reference_named_lifetime_in_closure<'a>(_: &'a u32) {
+ let _closure = || {
+ let c = 66;
+ let f = SomeStruct::<&'a u32>;
+ f(&c); //~ ERROR
+ };
+}
+
+fn annot_reference_named_lifetime_across_closure<'a>(_: &'a u32) {
+ let f = SomeStruct::<&'a u32>;
+ let _closure = || {
+ let c = 66;
+ f(&c); //~ ERROR
+ };
+}
+
+fn annot_reference_named_lifetime_in_closure_ok<'a>(c: &'a u32) {
+ let _closure = || {
+ let f = SomeStruct::<&'a u32>;
+ f(c);
+ };
+}
+
+fn annot_reference_named_lifetime_across_closure_ok<'a>(c: &'a u32) {
+ let f = SomeStruct::<&'a u32>;
+ let _closure = || {
+ f(c);
+ };
+}
+
+fn main() { }
--- /dev/null
+error[E0597]: `c` does not live long enough
+ --> $DIR/adt-tuple-struct-calls.rs:27:7
+ |
+LL | f(&c);
+ | --^^-
+ | | |
+ | | borrowed value does not live long enough
+ | argument requires that `c` is borrowed for `'static`
+LL | }
+ | - `c` dropped here while still borrowed
+
+error[E0597]: `c` does not live long enough
+ --> $DIR/adt-tuple-struct-calls.rs:33:7
+ |
+LL | fn annot_reference_named_lifetime<'a>(_d: &'a u32) {
+ | -- lifetime `'a` defined here
+...
+LL | f(&c);
+ | --^^-
+ | | |
+ | | borrowed value does not live long enough
+ | argument requires that `c` is borrowed for `'a`
+LL | }
+ | - `c` dropped here while still borrowed
+
+error[E0597]: `c` does not live long enough
+ --> $DIR/adt-tuple-struct-calls.rs:45:11
+ |
+LL | fn annot_reference_named_lifetime_in_closure<'a>(_: &'a u32) {
+ | -- lifetime `'a` defined here
+...
+LL | f(&c);
+ | --^^-
+ | | |
+ | | borrowed value does not live long enough
+ | argument requires that `c` is borrowed for `'a`
+LL | };
+ | - `c` dropped here while still borrowed
+
+error[E0597]: `c` does not live long enough
+ --> $DIR/adt-tuple-struct-calls.rs:53:11
+ |
+LL | let f = SomeStruct::<&'a u32>;
+ | - lifetime `'1` appears in the type of `f`
+...
+LL | f(&c);
+ | --^^-
+ | | |
+ | | borrowed value does not live long enough
+ | argument requires that `c` is borrowed for `'1`
+LL | };
+ | - `c` dropped here while still borrowed
+
+error: aborting due to 4 previous errors
+
+For more information about this error, try `rustc --explain E0597`.
LL | let _s = "foo\rbar";
| ^
-error: bare CR not allowed in raw string, use \r instead
- --> $DIR/lex-bare-cr-string-literal-doc-comment.rs:24:14
+error: bare CR not allowed in raw string
+ --> $DIR/lex-bare-cr-string-literal-doc-comment.rs:24:19
|
LL | let _s = r"bar\rfoo";
- | ^^^^^
+ | ^
error: unknown character escape: \r
--> $DIR/lex-bare-cr-string-literal-doc-comment.rs:27:19
macro_rules! foo {
{ $+ } => { //~ ERROR expected identifier, found `+`
//~^ ERROR missing fragment specifier
- $(x)(y) //~ ERROR expected `*` or `+`
+ $(x)(y) //~ ERROR expected one of: `*`, `+`, or `?`
}
}
LL | { $+ } => {
| ^
-error: expected `*` or `+`
+error: expected one of: `*`, `+`, or `?`
--> $DIR/issue-33569.rs:4:13
|
LL | $(x)(y)
+// ignore-tidy-cr
+// compile-flags: -Z continue-parse-after-error
pub fn main() {
+ br"a\r"; //~ ERROR bare CR not allowed in raw string
br"é"; //~ ERROR raw byte string must be ASCII
br##~"a"~##; //~ ERROR only `#` is allowed in raw string delimitation
}
-error: raw byte string must be ASCII: \u{e9}
- --> $DIR/raw-byte-string-literals.rs:2:8
+error: bare CR not allowed in raw string
+ --> $DIR/raw-byte-string-literals.rs:4:9
+ |
+LL | br"a\r";
+ | ^
+
+error: raw byte string must be ASCII
+ --> $DIR/raw-byte-string-literals.rs:5:8
|
LL | br"é";
| ^
error: found invalid character; only `#` is allowed in raw string delimitation: ~
- --> $DIR/raw-byte-string-literals.rs:3:6
+ --> $DIR/raw-byte-string-literals.rs:6:6
|
LL | br##~"a"~##;
| ^^^
-error: aborting due to 2 previous errors
+error: aborting due to 3 previous errors
// aux-build:derive-clona.rs
// aux-build:test-macros.rs
-#![feature(custom_attribute)]
-
#[macro_use]
extern crate derive_foo;
#[macro_use]
//~^ ERROR cannot find
struct Foo;
-#[attr_proc_macra] // OK, interpreted as a custom attribute
+// Interpreted as a feature gated custom attribute
+#[attr_proc_macra] //~ ERROR attribute `attr_proc_macra` is currently unknown
struct Bar;
-#[FooWithLongNan] // OK, interpreted as a custom attribute
+// Interpreted as a feature gated custom attribute
+#[FooWithLongNan] //~ ERROR attribute `FooWithLongNan` is currently unknown
struct Asdf;
#[derive(Dlone)]
+error[E0658]: The attribute `attr_proc_macra` is currently unknown to the compiler and may have meaning added to it in the future
+ --> $DIR/resolve-error.rs:27:3
+ |
+LL | #[attr_proc_macra]
+ | ^^^^^^^^^^^^^^^
+ |
+ = note: for more information, see https://github.com/rust-lang/rust/issues/29642
+ = help: add #![feature(custom_attribute)] to the crate attributes to enable
+
+error[E0658]: The attribute `FooWithLongNan` is currently unknown to the compiler and may have meaning added to it in the future
+ --> $DIR/resolve-error.rs:31:3
+ |
+LL | #[FooWithLongNan]
+ | ^^^^^^^^^^^^^^
+ |
+ = note: for more information, see https://github.com/rust-lang/rust/issues/29642
+ = help: add #![feature(custom_attribute)] to the crate attributes to enable
+
error: cannot find derive macro `FooWithLongNan` in this scope
- --> $DIR/resolve-error.rs:24:10
+ --> $DIR/resolve-error.rs:22:10
|
LL | #[derive(FooWithLongNan)]
| ^^^^^^^^^^^^^^ help: try: `FooWithLongName`
LL | bang_proc_macrp!();
| ^^^^^^^^^^^^^^^ help: you could try the macro: `bang_proc_macro`
-error: aborting due to 8 previous errors
+error: aborting due to 10 previous errors
+For more information about this error, try `rustc --explain E0658`.
-#![feature(repr_align_enum)]
#![allow(dead_code)]
#[repr(align(16.0))] //~ ERROR: invalid `repr(align)` attribute: not an unsuffixed integer
-struct A(i32);
+struct S0(i32);
#[repr(align(15))] //~ ERROR: invalid `repr(align)` attribute: not a power of two
-struct B(i32);
+struct S1(i32);
#[repr(align(4294967296))] //~ ERROR: invalid `repr(align)` attribute: larger than 2^29
-struct C(i32);
+struct S2(i32);
#[repr(align(536870912))] // ok: this is the largest accepted alignment
-struct D(i32);
+struct S3(i32);
+
+#[repr(align(16.0))] //~ ERROR: invalid `repr(align)` attribute: not an unsuffixed integer
+enum E0 { A, B }
#[repr(align(15))] //~ ERROR: invalid `repr(align)` attribute: not a power of two
-enum E { Left, Right }
+enum E1 { A, B }
+
+#[repr(align(4294967296))] //~ ERROR: invalid `repr(align)` attribute: larger than 2^29
+enum E2 { A, B }
+
+#[repr(align(536870912))] // ok: this is the largest accepted alignment
+enum E3 { A, B }
fn main() {}
error[E0589]: invalid `repr(align)` attribute: not an unsuffixed integer
- --> $DIR/repr-align.rs:4:8
+ --> $DIR/repr-align.rs:3:8
|
LL | #[repr(align(16.0))]
| ^^^^^^^^^^^
error[E0589]: invalid `repr(align)` attribute: not a power of two
- --> $DIR/repr-align.rs:7:8
+ --> $DIR/repr-align.rs:6:8
|
LL | #[repr(align(15))]
| ^^^^^^^^^
error[E0589]: invalid `repr(align)` attribute: larger than 2^29
- --> $DIR/repr-align.rs:10:8
+ --> $DIR/repr-align.rs:9:8
|
LL | #[repr(align(4294967296))]
| ^^^^^^^^^^^^^^^^^
+error[E0589]: invalid `repr(align)` attribute: not an unsuffixed integer
+ --> $DIR/repr-align.rs:15:8
+ |
+LL | #[repr(align(16.0))]
+ | ^^^^^^^^^^^
+
error[E0589]: invalid `repr(align)` attribute: not a power of two
- --> $DIR/repr-align.rs:16:8
+ --> $DIR/repr-align.rs:18:8
|
LL | #[repr(align(15))]
| ^^^^^^^^^
-error: aborting due to 4 previous errors
+error[E0589]: invalid `repr(align)` attribute: larger than 2^29
+ --> $DIR/repr-align.rs:21:8
+ |
+LL | #[repr(align(4294967296))]
+ | ^^^^^^^^^^^^^^^^^
+
+error: aborting due to 6 previous errors
For more information about this error, try `rustc --explain E0589`.
// See also repr-transparent.rs
-#[repr(transparent)] //~ ERROR unsupported representation for zero-variant enum
-enum Void {} //~| ERROR should be applied to struct
-
-#[repr(transparent)] //~ ERROR should be applied to struct
-enum FieldlessEnum {
- Foo,
- Bar,
-}
-
-#[repr(transparent)] //~ ERROR should be applied to struct
-enum Enum {
- Foo(String),
- Bar(u32),
-}
-
-#[repr(transparent)] //~ ERROR should be applied to struct
-union Foo {
- u: u32,
- s: i32
-}
-
#[repr(transparent)] //~ ERROR should be applied to struct
fn cant_repr_this() {}
-error[E0517]: attribute should be applied to struct
+error[E0517]: attribute should be applied to struct, enum, or union
--> $DIR/repr-transparent-other-items.rs:3:8
|
LL | #[repr(transparent)]
| ^^^^^^^^^^^
-LL | enum Void {}
- | ------------ not a struct
-
-error[E0517]: attribute should be applied to struct
- --> $DIR/repr-transparent-other-items.rs:6:8
- |
-LL | #[repr(transparent)]
- | ^^^^^^^^^^^
-LL | / enum FieldlessEnum {
-LL | | Foo,
-LL | | Bar,
-LL | | }
- | |_- not a struct
-
-error[E0517]: attribute should be applied to struct
- --> $DIR/repr-transparent-other-items.rs:12:8
- |
-LL | #[repr(transparent)]
- | ^^^^^^^^^^^
-LL | / enum Enum {
-LL | | Foo(String),
-LL | | Bar(u32),
-LL | | }
- | |_- not a struct
-
-error[E0517]: attribute should be applied to struct
- --> $DIR/repr-transparent-other-items.rs:18:8
- |
-LL | #[repr(transparent)]
- | ^^^^^^^^^^^
-LL | / union Foo {
-LL | | u: u32,
-LL | | s: i32
-LL | | }
- | |_- not a struct
-
-error[E0517]: attribute should be applied to struct
- --> $DIR/repr-transparent-other-items.rs:24:8
- |
-LL | #[repr(transparent)]
- | ^^^^^^^^^^^
LL | fn cant_repr_this() {}
- | ---------------------- not a struct
+ | ---------------------- not a struct, enum, or union
-error[E0517]: attribute should be applied to struct
- --> $DIR/repr-transparent-other-items.rs:27:8
+error[E0517]: attribute should be applied to struct, enum, or union
+ --> $DIR/repr-transparent-other-items.rs:6:8
|
LL | #[repr(transparent)]
| ^^^^^^^^^^^
LL | static CANT_REPR_THIS: u32 = 0;
- | ------------------------------- not a struct
-
-error[E0084]: unsupported representation for zero-variant enum
- --> $DIR/repr-transparent-other-items.rs:3:1
- |
-LL | #[repr(transparent)]
- | ^^^^^^^^^^^^^^^^^^^^
-LL | enum Void {}
- | ------------ zero-variant enum
+ | ------------------------------- not a struct, enum, or union
-error: aborting due to 7 previous errors
+error: aborting due to 2 previous errors
-Some errors have detailed explanations: E0084, E0517.
-For more information about an error, try `rustc --explain E0084`.
+For more information about this error, try `rustc --explain E0517`.
// - repr-transparent-other-reprs.rs
// - repr-transparent-other-items.rs
-#![feature(repr_align)]
+#![feature(repr_align, transparent_enums, transparent_unions)]
use std::marker::PhantomData;
#[repr(transparent)]
struct GenericAlign<T>(ZstAlign32<T>, u32); //~ ERROR alignment larger than 1
+#[repr(transparent)] //~ ERROR unsupported representation for zero-variant enum
+enum Void {}
+//~^ ERROR transparent enum needs exactly one variant, but has 0
+
+#[repr(transparent)]
+enum FieldlessEnum { //~ ERROR transparent enum needs exactly one non-zero-sized field, but has 0
+ Foo,
+}
+
+#[repr(transparent)]
+enum TooManyFieldsEnum {
+ Foo(u32, String),
+}
+//~^^^ ERROR transparent enum needs exactly one non-zero-sized field, but has 2
+
+#[repr(transparent)]
+enum TooManyVariants { //~ ERROR transparent enum needs exactly one variant, but has 2
+ Foo(String),
+ Bar,
+}
+
+#[repr(transparent)]
+union UnitUnion { //~ ERROR transparent union needs exactly one non-zero-sized field, but has 0
+ u: (),
+}
+
+#[repr(transparent)]
+union TooManyFields { //~ ERROR transparent union needs exactly one non-zero-sized field, but has 2
+ u: u32,
+ s: i32
+}
+
fn main() {}
|
LL | struct NoFields;
| ^^^^^^^^^^^^^^^^
- |
- = note: non-zero-sized field
error[E0690]: transparent struct needs exactly one non-zero-sized field, but has 0
--> $DIR/repr-transparent.rs:14:1
|
LL | struct ContainsOnlyZst(());
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^
- |
- = note: non-zero-sized field
error[E0690]: transparent struct needs exactly one non-zero-sized field, but has 0
--> $DIR/repr-transparent.rs:17:1
|
LL | struct ContainsOnlyZstArray([bool; 0]);
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- |
- = note: non-zero-sized field
error[E0690]: transparent struct needs exactly one non-zero-sized field, but has 0
--> $DIR/repr-transparent.rs:20:1
|
LL | struct ContainsMultipleZst(PhantomData<*const i32>, NoFields);
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- |
- = note: non-zero-sized field
error[E0690]: transparent struct needs exactly one non-zero-sized field, but has 2
--> $DIR/repr-transparent.rs:24:1
LL | struct MultipleNonZst(u8, u8);
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
-note: non-zero-sized field
+note: the following non-zero-sized fields exist on `MultipleNonZst`:
--> $DIR/repr-transparent.rs:24:23
|
LL | struct MultipleNonZst(u8, u8);
LL | pub struct StructWithProjection(f32, <f32 as Mirror>::It);
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
-note: non-zero-sized field
+note: the following non-zero-sized fields exist on `StructWithProjection`:
--> $DIR/repr-transparent.rs:30:33
|
LL | pub struct StructWithProjection(f32, <f32 as Mirror>::It);
LL | struct GenericAlign<T>(ZstAlign32<T>, u32);
| ^^^^^^^^^^^^^
-error: aborting due to 8 previous errors
+error[E0084]: unsupported representation for zero-variant enum
+ --> $DIR/repr-transparent.rs:42:1
+ |
+LL | #[repr(transparent)]
+ | ^^^^^^^^^^^^^^^^^^^^
+LL | enum Void {}
+ | ------------ zero-variant enum
+
+error[E0731]: transparent enum needs exactly one variant, but has 0
+ --> $DIR/repr-transparent.rs:43:1
+ |
+LL | enum Void {}
+ | ^^^^^^^^^^^^
+
+error[E0690]: the variant of a transparent enum needs exactly one non-zero-sized field, but has 0
+ --> $DIR/repr-transparent.rs:47:1
+ |
+LL | / enum FieldlessEnum {
+LL | | Foo,
+LL | | }
+ | |_^
+
+error[E0690]: the variant of a transparent enum needs exactly one non-zero-sized field, but has 2
+ --> $DIR/repr-transparent.rs:52:1
+ |
+LL | / enum TooManyFieldsEnum {
+LL | | Foo(u32, String),
+LL | | }
+ | |_^
+ |
+note: the following non-zero-sized fields exist on `TooManyFieldsEnum`:
+ --> $DIR/repr-transparent.rs:53:9
+ |
+LL | Foo(u32, String),
+ | ^^^ ^^^^^^
+
+error[E0731]: transparent enum needs exactly one variant, but has 2
+ --> $DIR/repr-transparent.rs:58:1
+ |
+LL | / enum TooManyVariants {
+LL | | Foo(String),
+LL | | Bar,
+LL | | }
+ | |_^
+ |
+note: the following variants exist on `TooManyVariants`
+ --> $DIR/repr-transparent.rs:59:5
+ |
+LL | Foo(String),
+ | ^^^^^^^^^^^
+LL | Bar,
+ | ^^^
+
+error[E0690]: transparent union needs exactly one non-zero-sized field, but has 0
+ --> $DIR/repr-transparent.rs:64:1
+ |
+LL | / union UnitUnion {
+LL | | u: (),
+LL | | }
+ | |_^
+
+error[E0690]: transparent union needs exactly one non-zero-sized field, but has 2
+ --> $DIR/repr-transparent.rs:69:1
+ |
+LL | / union TooManyFields {
+LL | | u: u32,
+LL | | s: i32
+LL | | }
+ | |_^
+ |
+note: the following non-zero-sized fields exist on `TooManyFields`:
+ --> $DIR/repr-transparent.rs:70:5
+ |
+LL | u: u32,
+ | ^^^^^^
+LL | s: i32
+ | ^^^^^^
+
+error: aborting due to 15 previous errors
-Some errors have detailed explanations: E0690, E0691.
-For more information about an error, try `rustc --explain E0690`.
+Some errors have detailed explanations: E0084, E0690, E0691, E0731.
+For more information about an error, try `rustc --explain E0084`.
--- /dev/null
+{"artifact":"$TEST_BUILD_DIR/save-analysis/emit-notifications.nll/save-analysis/libemit_notifications.json","emit":"save-analysis"}
+{"artifact":"$TEST_BUILD_DIR/save-analysis/emit-notifications.nll/libemit_notifications.rlib","emit":"link"}
--- /dev/null
+// compile-pass
+// compile-flags: -Zsave-analysis -Zemit-artifact-notifications
+// compile-flags: --crate-type rlib --error-format=json
+pub fn foo() {}
--- /dev/null
+{"artifact":"$TEST_BUILD_DIR/save-analysis/emit-notifications/save-analysis/libemit_notifications.json","emit":"save-analysis"}
+{"artifact":"$TEST_BUILD_DIR/save-analysis/emit-notifications/libemit_notifications.rlib","emit":"link"}
| ^^^
| |
| move occurs because value has type `std::sync::atomic::AtomicBool`, which does not implement the `Copy` trait
- | help: consider removing the `*`: `&x`
+ | help: consider borrowing here: `&*&x`
error[E0507]: cannot move out of a shared reference
--> $DIR/std-uncopyable-atomics.rs:11:13
| ^^^
| |
| move occurs because value has type `std::sync::atomic::AtomicIsize`, which does not implement the `Copy` trait
- | help: consider removing the `*`: `&x`
+ | help: consider borrowing here: `&*&x`
error[E0507]: cannot move out of a shared reference
--> $DIR/std-uncopyable-atomics.rs:13:13
| ^^^
| |
| move occurs because value has type `std::sync::atomic::AtomicUsize`, which does not implement the `Copy` trait
- | help: consider removing the `*`: `&x`
+ | help: consider borrowing here: `&*&x`
error[E0507]: cannot move out of a shared reference
--> $DIR/std-uncopyable-atomics.rs:15:13
| ^^^
| |
| move occurs because value has type `std::sync::atomic::AtomicPtr<usize>`, which does not implement the `Copy` trait
- | help: consider removing the `*`: `&x`
+ | help: consider borrowing here: `&*&x`
error: aborting due to 4 previous errors
-#![feature(custom_attribute)]
+#![feature(rustc_attrs)]
macro_rules! stmt_mac {
() => {
}
fn main() {
- #[attr]
+ #[rustc_dummy]
fn a() {}
- #[attr] //~ ERROR attributes on expressions are experimental
+ // Bug: built-in attrs like `rustc_dummy` are not gated on blocks, but other attrs are.
+ #[rustfmt::skip] //~ ERROR attributes on expressions are experimental
{
}
- #[attr]
+ #[rustc_dummy]
5;
- #[attr]
+ #[rustc_dummy]
stmt_mac!();
}
#[cfg(unset)]
fn c() {
- #[attr]
+ #[rustc_dummy]
5;
}
#[cfg(not(unset))]
fn j() {
- #[attr]
+ #[rustc_dummy]
5;
}
#[cfg_attr(not(unset), cfg(unset))]
fn d() {
- #[attr]
+ #[rustc_dummy]
8;
}
#[cfg_attr(not(unset), cfg(not(unset)))]
fn i() {
- #[attr]
+ #[rustc_dummy]
8;
}
macro_rules! item_mac {
($e:ident) => {
fn $e() {
- #[attr]
+ #[rustc_dummy]
42;
#[cfg(unset)]
fn f() {
- #[attr]
+ #[rustc_dummy]
5;
}
#[cfg(not(unset))]
fn k() {
- #[attr]
+ #[rustc_dummy]
5;
}
#[cfg_attr(not(unset), cfg(unset))]
fn g() {
- #[attr]
+ #[rustc_dummy]
8;
}
#[cfg_attr(not(unset), cfg(not(unset)))]
fn h() {
- #[attr]
+ #[rustc_dummy]
8;
}
extern {
#[cfg(unset)]
- fn x(a: [u8; #[attr] 5]);
- fn y(a: [u8; #[attr] 5]); //~ ERROR attributes on expressions are experimental
+ fn x(a: [u8; #[rustc_dummy] 5]);
+ fn y(a: [u8; #[rustc_dummy] 5]); //~ ERROR attributes on expressions are experimental
}
struct Foo;
impl Foo {
#[cfg(unset)]
- const X: u8 = #[attr] 5;
- const Y: u8 = #[attr] 5; //~ ERROR attributes on expressions are experimental
+ const X: u8 = #[rustc_dummy] 5;
+ const Y: u8 = #[rustc_dummy] 5; //~ ERROR attributes on expressions are experimental
}
trait Bar {
#[cfg(unset)]
- const X: [u8; #[attr] 5];
- const Y: [u8; #[attr] 5]; //~ ERROR attributes on expressions are experimental
+ const X: [u8; #[rustc_dummy] 5];
+ const Y: [u8; #[rustc_dummy] 5]; //~ ERROR attributes on expressions are experimental
}
struct Joyce {
#[cfg(unset)]
- field: [u8; #[attr] 5],
- field2: [u8; #[attr] 5] //~ ERROR attributes on expressions are experimental
+ field: [u8; #[rustc_dummy] 5],
+ field2: [u8; #[rustc_dummy] 5] //~ ERROR attributes on expressions are experimental
}
struct Walky(
- #[cfg(unset)] [u8; #[attr] 5],
- [u8; #[attr] 5] //~ ERROR attributes on expressions are experimental
+ #[cfg(unset)] [u8; #[rustc_dummy] 5],
+ [u8; #[rustc_dummy] 5] //~ ERROR attributes on expressions are experimental
);
enum Mike {
Happy(
- #[cfg(unset)] [u8; #[attr] 5],
- [u8; #[attr] 5] //~ ERROR attributes on expressions are experimental
+ #[cfg(unset)] [u8; #[rustc_dummy] 5],
+ [u8; #[rustc_dummy] 5] //~ ERROR attributes on expressions are experimental
),
Angry {
#[cfg(unset)]
- field: [u8; #[attr] 5],
- field2: [u8; #[attr] 5] //~ ERROR attributes on expressions are experimental
+ field: [u8; #[rustc_dummy] 5],
+ field2: [u8; #[rustc_dummy] 5] //~ ERROR attributes on expressions are experimental
}
}
fn pat() {
match 5 {
#[cfg(unset)]
- 5 => #[attr] (),
- 6 => #[attr] (), //~ ERROR attributes on expressions are experimental
+ 5 => #[rustc_dummy] (),
+ 6 => #[rustc_dummy] (), //~ ERROR attributes on expressions are experimental
_ => (),
}
}
error[E0658]: attributes on expressions are experimental
- --> $DIR/stmt_expr_attrs_no_feature.rs:13:5
+ --> $DIR/stmt_expr_attrs_no_feature.rs:14:5
|
-LL | #[attr]
- | ^^^^^^^
+LL | #[rustfmt::skip]
+ | ^^^^^^^^^^^^^^^^
|
= note: for more information, see https://github.com/rust-lang/rust/issues/15701
= help: add #![feature(stmt_expr_attributes)] to the crate attributes to enable
error[E0658]: attributes on expressions are experimental
- --> $DIR/stmt_expr_attrs_no_feature.rs:94:18
+ --> $DIR/stmt_expr_attrs_no_feature.rs:95:18
|
-LL | fn y(a: [u8; #[attr] 5]);
- | ^^^^^^^
+LL | fn y(a: [u8; #[rustc_dummy] 5]);
+ | ^^^^^^^^^^^^^^
|
= note: for more information, see https://github.com/rust-lang/rust/issues/15701
= help: add #![feature(stmt_expr_attributes)] to the crate attributes to enable
error[E0658]: attributes on expressions are experimental
- --> $DIR/stmt_expr_attrs_no_feature.rs:101:19
+ --> $DIR/stmt_expr_attrs_no_feature.rs:102:19
|
-LL | const Y: u8 = #[attr] 5;
- | ^^^^^^^
+LL | const Y: u8 = #[rustc_dummy] 5;
+ | ^^^^^^^^^^^^^^
|
= note: for more information, see https://github.com/rust-lang/rust/issues/15701
= help: add #![feature(stmt_expr_attributes)] to the crate attributes to enable
error[E0658]: attributes on expressions are experimental
- --> $DIR/stmt_expr_attrs_no_feature.rs:107:19
+ --> $DIR/stmt_expr_attrs_no_feature.rs:108:19
|
-LL | const Y: [u8; #[attr] 5];
- | ^^^^^^^
+LL | const Y: [u8; #[rustc_dummy] 5];
+ | ^^^^^^^^^^^^^^
|
= note: for more information, see https://github.com/rust-lang/rust/issues/15701
= help: add #![feature(stmt_expr_attributes)] to the crate attributes to enable
error[E0658]: attributes on expressions are experimental
- --> $DIR/stmt_expr_attrs_no_feature.rs:113:18
+ --> $DIR/stmt_expr_attrs_no_feature.rs:114:18
|
-LL | field2: [u8; #[attr] 5]
- | ^^^^^^^
+LL | field2: [u8; #[rustc_dummy] 5]
+ | ^^^^^^^^^^^^^^
|
= note: for more information, see https://github.com/rust-lang/rust/issues/15701
= help: add #![feature(stmt_expr_attributes)] to the crate attributes to enable
error[E0658]: attributes on expressions are experimental
- --> $DIR/stmt_expr_attrs_no_feature.rs:118:10
+ --> $DIR/stmt_expr_attrs_no_feature.rs:119:10
|
-LL | [u8; #[attr] 5]
- | ^^^^^^^
+LL | [u8; #[rustc_dummy] 5]
+ | ^^^^^^^^^^^^^^
|
= note: for more information, see https://github.com/rust-lang/rust/issues/15701
= help: add #![feature(stmt_expr_attributes)] to the crate attributes to enable
error[E0658]: attributes on expressions are experimental
- --> $DIR/stmt_expr_attrs_no_feature.rs:124:14
+ --> $DIR/stmt_expr_attrs_no_feature.rs:125:14
|
-LL | [u8; #[attr] 5]
- | ^^^^^^^
+LL | [u8; #[rustc_dummy] 5]
+ | ^^^^^^^^^^^^^^
|
= note: for more information, see https://github.com/rust-lang/rust/issues/15701
= help: add #![feature(stmt_expr_attributes)] to the crate attributes to enable
error[E0658]: attributes on expressions are experimental
- --> $DIR/stmt_expr_attrs_no_feature.rs:129:22
+ --> $DIR/stmt_expr_attrs_no_feature.rs:130:22
|
-LL | field2: [u8; #[attr] 5]
- | ^^^^^^^
+LL | field2: [u8; #[rustc_dummy] 5]
+ | ^^^^^^^^^^^^^^
|
= note: for more information, see https://github.com/rust-lang/rust/issues/15701
= help: add #![feature(stmt_expr_attributes)] to the crate attributes to enable
error[E0658]: attributes on expressions are experimental
- --> $DIR/stmt_expr_attrs_no_feature.rs:137:14
+ --> $DIR/stmt_expr_attrs_no_feature.rs:138:14
|
-LL | 6 => #[attr] (),
- | ^^^^^^^
+LL | 6 => #[rustc_dummy] (),
+ | ^^^^^^^^^^^^^^
|
= note: for more information, see https://github.com/rust-lang/rust/issues/15701
= help: add #![feature(stmt_expr_attributes)] to the crate attributes to enable
-#![feature(custom_attribute)]
+#![feature(rustc_attrs)]
-#[my_attr = 1usize] //~ ERROR: suffixed literals are not allowed in attributes
-#[my_attr = 1u8] //~ ERROR: suffixed literals are not allowed in attributes
-#[my_attr = 1u16] //~ ERROR: suffixed literals are not allowed in attributes
-#[my_attr = 1u32] //~ ERROR: suffixed literals are not allowed in attributes
-#[my_attr = 1u64] //~ ERROR: suffixed literals are not allowed in attributes
-#[my_attr = 1isize] //~ ERROR: suffixed literals are not allowed in attributes
-#[my_attr = 1i8] //~ ERROR: suffixed literals are not allowed in attributes
-#[my_attr = 1i16] //~ ERROR: suffixed literals are not allowed in attributes
-#[my_attr = 1i32] //~ ERROR: suffixed literals are not allowed in attributes
-#[my_attr = 1i64] //~ ERROR: suffixed literals are not allowed in attributes
-#[my_attr = 1.0f32] //~ ERROR: suffixed literals are not allowed in attributes
-#[my_attr = 1.0f64] //~ ERROR: suffixed literals are not allowed in attributes
-fn main() { }
+#[rustc_dummy = 1usize] //~ ERROR: suffixed literals are not allowed in attributes
+#[rustc_dummy = 1u8] //~ ERROR: suffixed literals are not allowed in attributes
+#[rustc_dummy = 1u16] //~ ERROR: suffixed literals are not allowed in attributes
+#[rustc_dummy = 1u32] //~ ERROR: suffixed literals are not allowed in attributes
+#[rustc_dummy = 1u64] //~ ERROR: suffixed literals are not allowed in attributes
+#[rustc_dummy = 1isize] //~ ERROR: suffixed literals are not allowed in attributes
+#[rustc_dummy = 1i8] //~ ERROR: suffixed literals are not allowed in attributes
+#[rustc_dummy = 1i16] //~ ERROR: suffixed literals are not allowed in attributes
+#[rustc_dummy = 1i32] //~ ERROR: suffixed literals are not allowed in attributes
+#[rustc_dummy = 1i64] //~ ERROR: suffixed literals are not allowed in attributes
+#[rustc_dummy = 1.0f32] //~ ERROR: suffixed literals are not allowed in attributes
+#[rustc_dummy = 1.0f64] //~ ERROR: suffixed literals are not allowed in attributes
+fn main() {}
error: suffixed literals are not allowed in attributes
- --> $DIR/suffixed-literal-meta.rs:3:13
+ --> $DIR/suffixed-literal-meta.rs:3:17
|
-LL | #[my_attr = 1usize]
- | ^^^^^^
+LL | #[rustc_dummy = 1usize]
+ | ^^^^^^
|
= help: instead of using a suffixed literal (1u8, 1.0f32, etc.), use an unsuffixed version (1, 1.0, etc.).
error: suffixed literals are not allowed in attributes
- --> $DIR/suffixed-literal-meta.rs:4:13
+ --> $DIR/suffixed-literal-meta.rs:4:17
|
-LL | #[my_attr = 1u8]
- | ^^^
+LL | #[rustc_dummy = 1u8]
+ | ^^^
|
= help: instead of using a suffixed literal (1u8, 1.0f32, etc.), use an unsuffixed version (1, 1.0, etc.).
error: suffixed literals are not allowed in attributes
- --> $DIR/suffixed-literal-meta.rs:5:13
+ --> $DIR/suffixed-literal-meta.rs:5:17
|
-LL | #[my_attr = 1u16]
- | ^^^^
+LL | #[rustc_dummy = 1u16]
+ | ^^^^
|
= help: instead of using a suffixed literal (1u8, 1.0f32, etc.), use an unsuffixed version (1, 1.0, etc.).
error: suffixed literals are not allowed in attributes
- --> $DIR/suffixed-literal-meta.rs:6:13
+ --> $DIR/suffixed-literal-meta.rs:6:17
|
-LL | #[my_attr = 1u32]
- | ^^^^
+LL | #[rustc_dummy = 1u32]
+ | ^^^^
|
= help: instead of using a suffixed literal (1u8, 1.0f32, etc.), use an unsuffixed version (1, 1.0, etc.).
error: suffixed literals are not allowed in attributes
- --> $DIR/suffixed-literal-meta.rs:7:13
+ --> $DIR/suffixed-literal-meta.rs:7:17
|
-LL | #[my_attr = 1u64]
- | ^^^^
+LL | #[rustc_dummy = 1u64]
+ | ^^^^
|
= help: instead of using a suffixed literal (1u8, 1.0f32, etc.), use an unsuffixed version (1, 1.0, etc.).
error: suffixed literals are not allowed in attributes
- --> $DIR/suffixed-literal-meta.rs:8:13
+ --> $DIR/suffixed-literal-meta.rs:8:17
|
-LL | #[my_attr = 1isize]
- | ^^^^^^
+LL | #[rustc_dummy = 1isize]
+ | ^^^^^^
|
= help: instead of using a suffixed literal (1u8, 1.0f32, etc.), use an unsuffixed version (1, 1.0, etc.).
error: suffixed literals are not allowed in attributes
- --> $DIR/suffixed-literal-meta.rs:9:13
+ --> $DIR/suffixed-literal-meta.rs:9:17
|
-LL | #[my_attr = 1i8]
- | ^^^
+LL | #[rustc_dummy = 1i8]
+ | ^^^
|
= help: instead of using a suffixed literal (1u8, 1.0f32, etc.), use an unsuffixed version (1, 1.0, etc.).
error: suffixed literals are not allowed in attributes
- --> $DIR/suffixed-literal-meta.rs:10:13
+ --> $DIR/suffixed-literal-meta.rs:10:17
|
-LL | #[my_attr = 1i16]
- | ^^^^
+LL | #[rustc_dummy = 1i16]
+ | ^^^^
|
= help: instead of using a suffixed literal (1u8, 1.0f32, etc.), use an unsuffixed version (1, 1.0, etc.).
error: suffixed literals are not allowed in attributes
- --> $DIR/suffixed-literal-meta.rs:11:13
+ --> $DIR/suffixed-literal-meta.rs:11:17
|
-LL | #[my_attr = 1i32]
- | ^^^^
+LL | #[rustc_dummy = 1i32]
+ | ^^^^
|
= help: instead of using a suffixed literal (1u8, 1.0f32, etc.), use an unsuffixed version (1, 1.0, etc.).
error: suffixed literals are not allowed in attributes
- --> $DIR/suffixed-literal-meta.rs:12:13
+ --> $DIR/suffixed-literal-meta.rs:12:17
|
-LL | #[my_attr = 1i64]
- | ^^^^
+LL | #[rustc_dummy = 1i64]
+ | ^^^^
|
= help: instead of using a suffixed literal (1u8, 1.0f32, etc.), use an unsuffixed version (1, 1.0, etc.).
error: suffixed literals are not allowed in attributes
- --> $DIR/suffixed-literal-meta.rs:13:13
+ --> $DIR/suffixed-literal-meta.rs:13:17
|
-LL | #[my_attr = 1.0f32]
- | ^^^^^^
+LL | #[rustc_dummy = 1.0f32]
+ | ^^^^^^
|
= help: instead of using a suffixed literal (1u8, 1.0f32, etc.), use an unsuffixed version (1, 1.0, etc.).
error: suffixed literals are not allowed in attributes
- --> $DIR/suffixed-literal-meta.rs:14:13
+ --> $DIR/suffixed-literal-meta.rs:14:17
|
-LL | #[my_attr = 1.0f64]
- | ^^^^^^
+LL | #[rustc_dummy = 1.0f64]
+ | ^^^^^^
|
= help: instead of using a suffixed literal (1u8, 1.0f32, etc.), use an unsuffixed version (1, 1.0, etc.).
let X(_t) = *s;
//~^ ERROR cannot move
- //~| HELP consider removing the `*`
+ //~| HELP consider borrowing here
//~| SUGGESTION s
if let Either::One(_t) = *r { }
//~^ ERROR cannot move
- //~| HELP consider removing the `*`
+ //~| HELP consider borrowing here
//~| SUGGESTION r
while let Either::One(_t) = *r { }
//~^ ERROR cannot move
- //~| HELP consider removing the `*`
+ //~| HELP consider borrowing here
//~| SUGGESTION r
match *r {
//~^ ERROR cannot move
- //~| HELP consider removing the `*`
+ //~| HELP consider borrowing here
//~| SUGGESTION r
Either::One(_t)
| Either::Two(_t) => (),
}
match *r {
//~^ ERROR cannot move
- //~| HELP consider removing the `*`
+ //~| HELP consider borrowing here
//~| SUGGESTION r
Either::One(_t) => (),
Either::Two(ref _t) => (),
let X(_t) = *sm;
//~^ ERROR cannot move
- //~| HELP consider removing the `*`
+ //~| HELP consider borrowing here
//~| SUGGESTION sm
if let Either::One(_t) = *rm { }
//~^ ERROR cannot move
- //~| HELP consider removing the `*`
+ //~| HELP consider borrowing here
//~| SUGGESTION rm
while let Either::One(_t) = *rm { }
//~^ ERROR cannot move
- //~| HELP consider removing the `*`
+ //~| HELP consider borrowing here
//~| SUGGESTION rm
match *rm {
//~^ ERROR cannot move
- //~| HELP consider removing the `*`
+ //~| HELP consider borrowing here
//~| SUGGESTION rm
Either::One(_t)
| Either::Two(_t) => (),
}
match *rm {
//~^ ERROR cannot move
- //~| HELP consider removing the `*`
+ //~| HELP consider borrowing here
//~| SUGGESTION rm
Either::One(_t) => (),
Either::Two(ref _t) => (),
}
match *rm {
//~^ ERROR cannot move
- //~| HELP consider removing the `*`
+ //~| HELP consider borrowing here
//~| SUGGESTION rm
Either::One(_t) => (),
Either::Two(ref mut _t) => (),
--> $DIR/simple.rs:38:17
|
LL | let X(_t) = *s;
- | -- ^^ help: consider removing the `*`: `s`
+ | -- ^^ help: consider borrowing here: `&*s`
| |
| data moved here
| move occurs because `_t` has type `Y`, which does not implement the `Copy` trait
--> $DIR/simple.rs:42:30
|
LL | if let Either::One(_t) = *r { }
- | -- ^^ help: consider removing the `*`: `r`
+ | -- ^^ help: consider borrowing here: `&*r`
| |
| data moved here
| move occurs because `_t` has type `X`, which does not implement the `Copy` trait
--> $DIR/simple.rs:46:33
|
LL | while let Either::One(_t) = *r { }
- | -- ^^ help: consider removing the `*`: `r`
+ | -- ^^ help: consider borrowing here: `&*r`
| |
| data moved here
| move occurs because `_t` has type `X`, which does not implement the `Copy` trait
--> $DIR/simple.rs:50:11
|
LL | match *r {
- | ^^ help: consider removing the `*`: `r`
+ | ^^ help: consider borrowing here: `&*r`
...
LL | Either::One(_t)
| --
--> $DIR/simple.rs:57:11
|
LL | match *r {
- | ^^ help: consider removing the `*`: `r`
+ | ^^ help: consider borrowing here: `&*r`
...
LL | Either::One(_t) => (),
| --
--> $DIR/simple.rs:66:17
|
LL | let X(_t) = *sm;
- | -- ^^^ help: consider removing the `*`: `sm`
+ | -- ^^^ help: consider borrowing here: `&*sm`
| |
| data moved here
| move occurs because `_t` has type `Y`, which does not implement the `Copy` trait
--> $DIR/simple.rs:70:30
|
LL | if let Either::One(_t) = *rm { }
- | -- ^^^ help: consider removing the `*`: `rm`
+ | -- ^^^ help: consider borrowing here: `&*rm`
| |
| data moved here
| move occurs because `_t` has type `X`, which does not implement the `Copy` trait
--> $DIR/simple.rs:74:33
|
LL | while let Either::One(_t) = *rm { }
- | -- ^^^ help: consider removing the `*`: `rm`
+ | -- ^^^ help: consider borrowing here: `&*rm`
| |
| data moved here
| move occurs because `_t` has type `X`, which does not implement the `Copy` trait
--> $DIR/simple.rs:78:11
|
LL | match *rm {
- | ^^^ help: consider removing the `*`: `rm`
+ | ^^^ help: consider borrowing here: `&*rm`
...
LL | Either::One(_t)
| --
--> $DIR/simple.rs:85:11
|
LL | match *rm {
- | ^^^ help: consider removing the `*`: `rm`
+ | ^^^ help: consider borrowing here: `&*rm`
...
LL | Either::One(_t) => (),
| --
--> $DIR/simple.rs:93:11
|
LL | match *rm {
- | ^^^ help: consider removing the `*`: `rm`
+ | ^^^ help: consider borrowing here: `&*rm`
...
LL | Either::One(_t) => (),
| --
-#![feature(custom_attribute)]
-
type A = rustfmt; //~ ERROR expected type, found tool module `rustfmt`
type B = rustfmt::skip; //~ ERROR expected type, found tool attribute `rustfmt::skip`
#[derive(rustfmt)] //~ ERROR cannot find derive macro `rustfmt` in this scope
struct S;
-#[rustfmt] // OK, interpreted as a custom attribute
+// Interpreted as a feature gated custom attribute
+#[rustfmt] //~ ERROR attribute `rustfmt` is currently unknown
fn check() {}
#[rustfmt::skip] // OK
+error[E0658]: The attribute `rustfmt` is currently unknown to the compiler and may have meaning added to it in the future
+ --> $DIR/tool-attributes-misplaced-1.rs:8:3
+ |
+LL | #[rustfmt]
+ | ^^^^^^^
+ |
+ = note: for more information, see https://github.com/rust-lang/rust/issues/29642
+ = help: add #![feature(custom_attribute)] to the crate attributes to enable
+
error: cannot find derive macro `rustfmt` in this scope
- --> $DIR/tool-attributes-misplaced-1.rs:6:10
+ --> $DIR/tool-attributes-misplaced-1.rs:4:10
|
LL | #[derive(rustfmt)]
| ^^^^^^^
error: cannot find macro `rustfmt!` in this scope
- --> $DIR/tool-attributes-misplaced-1.rs:15:5
+ --> $DIR/tool-attributes-misplaced-1.rs:14:5
|
LL | rustfmt!();
| ^^^^^^^
error[E0573]: expected type, found tool module `rustfmt`
- --> $DIR/tool-attributes-misplaced-1.rs:3:10
+ --> $DIR/tool-attributes-misplaced-1.rs:1:10
|
LL | type A = rustfmt;
| ^^^^^^^ not a type
error[E0573]: expected type, found tool attribute `rustfmt::skip`
- --> $DIR/tool-attributes-misplaced-1.rs:4:10
+ --> $DIR/tool-attributes-misplaced-1.rs:2:10
|
LL | type B = rustfmt::skip;
| ^^^^^^^^^^^^^ not a type
error[E0423]: expected value, found tool module `rustfmt`
- --> $DIR/tool-attributes-misplaced-1.rs:14:5
+ --> $DIR/tool-attributes-misplaced-1.rs:13:5
|
LL | rustfmt;
| ^^^^^^^ not a value
error[E0423]: expected value, found tool attribute `rustfmt::skip`
- --> $DIR/tool-attributes-misplaced-1.rs:17:5
+ --> $DIR/tool-attributes-misplaced-1.rs:16:5
|
LL | rustfmt::skip;
| ^^^^^^^^^^^^^ not a value
-error: aborting due to 6 previous errors
+error: aborting due to 7 previous errors
-For more information about this error, try `rustc --explain E0423`.
+Some errors have detailed explanations: E0423, E0658.
+For more information about an error, try `rustc --explain E0423`.
--- /dev/null
+// compile-pass
+// #55266
+
+struct VTable<DST: ?Sized> {
+ _to_dst_ptr: fn(*mut ()) -> *mut DST,
+}
+
+trait HasVTableFor<DST: ?Sized + 'static> {
+ const VTABLE: &'static VTable<DST>;
+}
+
+impl<T, DST: ?Sized + 'static> HasVTableFor<DST> for T {
+ const VTABLE: &'static VTable<DST> = &VTable {
+ _to_dst_ptr: |_: *mut ()| unsafe { std::mem::zeroed() },
+ };
+}
+
+pub fn push<DST: ?Sized + 'static, T>() {
+ <T as HasVTableFor<DST>>::VTABLE;
+}
+
+fn main() {}
|
LL | #![deny(improper_ctypes)]
| ^^^^^^^^^^^^^^^
- = help: consider adding a #[repr(C)] attribute to this union
+ = help: consider adding a `#[repr(C)]` or `#[repr(transparent)]` attribute to this union
note: type defined here
--> $DIR/union-repr-c.rs:9:1
|
// compile-pass
-#![feature(custom_attribute)]
+#![feature(rustc_attrs)]
-#[my_attr(a b c d)]
-#[my_attr[a b c d]]
-#[my_attr{a b c d}]
+#[rustc_dummy(a b c d)]
+#[rustc_dummy[a b c d]]
+#[rustc_dummy{a b c d}]
fn main() {}
#![deny(unused_attributes)]
-#![allow(dead_code, unused_imports, unused_extern_crates)]
-#![feature(custom_attribute)]
+#![feature(rustc_attrs)]
-#![foo] //~ ERROR unused attribute
+#![rustc_dummy] //~ ERROR unused attribute
-#[foo] //~ ERROR unused attribute
+#[rustc_dummy] //~ ERROR unused attribute
extern crate core;
-#[foo] //~ ERROR unused attribute
+#[rustc_dummy] //~ ERROR unused attribute
use std::collections;
-#[foo] //~ ERROR unused attribute
+#[rustc_dummy] //~ ERROR unused attribute
extern "C" {
- #[foo] //~ ERROR unused attribute
+ #[rustc_dummy] //~ ERROR unused attribute
fn foo();
}
-#[foo] //~ ERROR unused attribute
+#[rustc_dummy] //~ ERROR unused attribute
mod foo {
- #[foo] //~ ERROR unused attribute
+ #[rustc_dummy] //~ ERROR unused attribute
pub enum Foo {
- #[foo] //~ ERROR unused attribute
+ #[rustc_dummy] //~ ERROR unused attribute
Bar,
}
}
-#[foo] //~ ERROR unused attribute
+#[rustc_dummy] //~ ERROR unused attribute
fn bar(f: foo::Foo) {
match f {
- #[foo] //~ ERROR unused attribute
+ #[rustc_dummy] //~ ERROR unused attribute
foo::Foo::Bar => {}
}
}
-#[foo] //~ ERROR unused attribute
+#[rustc_dummy] //~ ERROR unused attribute
struct Foo {
- #[foo] //~ ERROR unused attribute
+ #[rustc_dummy] //~ ERROR unused attribute
a: isize
}
-#[foo] //~ ERROR unused attribute
+#[rustc_dummy] //~ ERROR unused attribute
trait Baz {
- #[foo] //~ ERROR unused attribute
+ #[rustc_dummy] //~ ERROR unused attribute
fn blah(&self);
- #[foo] //~ ERROR unused attribute
+ #[rustc_dummy] //~ ERROR unused attribute
fn blah2(&self) {}
}
error: unused attribute
- --> $DIR/unused-attr.rs:7:1
+ --> $DIR/unused-attr.rs:6:1
|
-LL | #[foo]
- | ^^^^^^
+LL | #[rustc_dummy]
+ | ^^^^^^^^^^^^^^
|
note: lint level defined here
--> $DIR/unused-attr.rs:1:9
| ^^^^^^^^^^^^^^^^^
error: unused attribute
- --> $DIR/unused-attr.rs:10:1
+ --> $DIR/unused-attr.rs:9:1
|
-LL | #[foo]
- | ^^^^^^
+LL | #[rustc_dummy]
+ | ^^^^^^^^^^^^^^
error: unused attribute
- --> $DIR/unused-attr.rs:15:5
+ --> $DIR/unused-attr.rs:14:5
|
-LL | #[foo]
- | ^^^^^^
+LL | #[rustc_dummy]
+ | ^^^^^^^^^^^^^^
error: unused attribute
- --> $DIR/unused-attr.rs:13:1
+ --> $DIR/unused-attr.rs:12:1
|
-LL | #[foo]
- | ^^^^^^
+LL | #[rustc_dummy]
+ | ^^^^^^^^^^^^^^
error: unused attribute
- --> $DIR/unused-attr.rs:23:9
+ --> $DIR/unused-attr.rs:22:9
|
-LL | #[foo]
- | ^^^^^^
+LL | #[rustc_dummy]
+ | ^^^^^^^^^^^^^^
error: unused attribute
- --> $DIR/unused-attr.rs:21:5
+ --> $DIR/unused-attr.rs:20:5
|
-LL | #[foo]
- | ^^^^^^
+LL | #[rustc_dummy]
+ | ^^^^^^^^^^^^^^
error: unused attribute
- --> $DIR/unused-attr.rs:19:1
+ --> $DIR/unused-attr.rs:18:1
|
-LL | #[foo]
- | ^^^^^^
+LL | #[rustc_dummy]
+ | ^^^^^^^^^^^^^^
error: unused attribute
- --> $DIR/unused-attr.rs:31:9
+ --> $DIR/unused-attr.rs:30:9
|
-LL | #[foo]
- | ^^^^^^
+LL | #[rustc_dummy]
+ | ^^^^^^^^^^^^^^
error: unused attribute
- --> $DIR/unused-attr.rs:28:1
+ --> $DIR/unused-attr.rs:27:1
|
-LL | #[foo]
- | ^^^^^^
+LL | #[rustc_dummy]
+ | ^^^^^^^^^^^^^^
error: unused attribute
- --> $DIR/unused-attr.rs:38:5
+ --> $DIR/unused-attr.rs:37:5
|
-LL | #[foo]
- | ^^^^^^
+LL | #[rustc_dummy]
+ | ^^^^^^^^^^^^^^
error: unused attribute
- --> $DIR/unused-attr.rs:36:1
+ --> $DIR/unused-attr.rs:35:1
|
-LL | #[foo]
- | ^^^^^^
+LL | #[rustc_dummy]
+ | ^^^^^^^^^^^^^^
error: unused attribute
- --> $DIR/unused-attr.rs:44:5
+ --> $DIR/unused-attr.rs:43:5
|
-LL | #[foo]
- | ^^^^^^
+LL | #[rustc_dummy]
+ | ^^^^^^^^^^^^^^
error: unused attribute
- --> $DIR/unused-attr.rs:46:5
+ --> $DIR/unused-attr.rs:45:5
|
-LL | #[foo]
- | ^^^^^^
+LL | #[rustc_dummy]
+ | ^^^^^^^^^^^^^^
error: unused attribute
- --> $DIR/unused-attr.rs:42:1
+ --> $DIR/unused-attr.rs:41:1
|
-LL | #[foo]
- | ^^^^^^
+LL | #[rustc_dummy]
+ | ^^^^^^^^^^^^^^
error: unused attribute
- --> $DIR/unused-attr.rs:5:1
+ --> $DIR/unused-attr.rs:4:1
|
-LL | #![foo]
- | ^^^^^^^
+LL | #![rustc_dummy]
+ | ^^^^^^^^^^^^^^^
error: aborting due to 15 previous errors
-Subproject commit 71be6f62fa920c0bd10cdf3a29aeb8c6719a8075
+Subproject commit c0dbd34ba99a949ece25c297a4a377685eb89c7c
-Subproject commit 9692ca8fd82a8f96a4113dc4b88c1fb1d79c1c60
+Subproject commit 483dcbc73f9923e98c71ec9df11ee3d0d5cfb467