In many cases, you don't need a full `make doc`. You can use `rustdoc` directly
to check small fixes. For example, `rustdoc src/doc/reference.md` will render
reference to `doc/reference.html`. The CSS might be messed up, but you can
-verify that HTML is right.
+verify that the HTML is right.
## Issue Triage
if [ -n "$CFG_ENABLE_DEBUGINFO" ]; then putvar CFG_ENABLE_DEBUGINFO; fi
if [ -n "$CFG_ENABLE_DEBUG_JEMALLOC" ]; then putvar CFG_ENABLE_DEBUG_JEMALLOC; fi
-if [ -n "$CFG_DISABLE_ORBIT" ]; then putvar CFG_DISABLE_ORBIT; fi
-
step_msg "looking for build programs"
probe_need CFG_CURL curl
CMAKE_ARGS="$CMAKE_ARGS -DLLVM_ENABLE_ASSERTIONS=ON"
fi
- CMAKE_ARGS="$CMAKE_ARGS -DLLVM_TARGETS_TO_BUILD='X86;ARM;AArch64;Mips;PowerPC'"
+ CMAKE_ARGS="$CMAKE_ARGS -DLLVM_TARGETS_TO_BUILD='X86;ARM;AArch64;Mips;PowerPC;SystemZ'"
CMAKE_ARGS="$CMAKE_ARGS -G '$CFG_CMAKE_GENERATOR'"
CMAKE_ARGS="$CMAKE_ARGS $CFG_LLVM_SRC_DIR"
--- /dev/null
+# rustbuild-only target
D := $(S)src/doc
-DOC_TARGETS := book nomicon style error-index
+DOC_TARGETS := book nomicon error-index
COMPILER_DOC_TARGETS :=
DOC_L10N_TARGETS :=
$(Q)rm -rf doc/nomicon
$(Q)$(RUSTBOOK) build $(S)src/doc/nomicon doc/nomicon
-style: doc/style/index.html
-
-doc/style/index.html: $(RUSTBOOK_EXE) $(wildcard $(S)/src/doc/style/*.md) | doc/
- @$(call E, rustbook: $@)
- $(Q)rm -rf doc/style
- $(Q)$(RUSTBOOK) build $(S)src/doc/style doc/style
-
error-index: doc/error-index.html
# Metadata used to generate the index is created as a side effect of
CFG_RUSTC_FLAGS += -g
endif
-ifdef CFG_DISABLE_ORBIT
- $(info cfg: HOLD HOLD HOLD (CFG_DISABLE_ORBIT))
- RUSTFLAGS_STAGE1 += -Z orbit=off
- RUSTFLAGS_STAGE2 += -Z orbit=off
-endif
-
ifdef SAVE_TEMPS
CFG_RUSTC_FLAGS += -C save-temps
endif
# LLVM macros
######################################################################
-LLVM_OPTIONAL_COMPONENTS=x86 arm aarch64 mips powerpc pnacl
+LLVM_OPTIONAL_COMPONENTS=x86 arm aarch64 mips powerpc pnacl systemz
LLVM_REQUIRED_COMPONENTS=ipo bitreader bitwriter linker asmparser mcjit \
interpreter instrumentation
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
+[metadata]
+"checksum aho-corasick 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "2b3fb52b09c1710b961acb35390d514be82e4ac96a9969a8e38565a29b878dc9"
+"checksum cmake 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)" = "dfcf5bcece56ef953b8ea042509e9dcbdfe97820b7e20d86beb53df30ed94978"
+"checksum filetime 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "5363ab8e4139b8568a6237db5248646e5a8a2f89bd5ccb02092182b11fd3e922"
+"checksum gcc 0.3.31 (git+https://github.com/alexcrichton/gcc-rs)" = "<none>"
+"checksum gcc 0.3.31 (registry+https://github.com/rust-lang/crates.io-index)" = "cfe877476e53690ebb0ce7325d0bf43e198d9500291b54b3c65e518de5039b07"
+"checksum getopts 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "d9047cfbd08a437050b363d35ef160452c5fe8ea5187ae0a624708c91581d685"
+"checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d"
+"checksum libc 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)" = "55f3730be7e803cf350d32061958171731c2395831fbd67a61083782808183e0"
+"checksum md5 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a5539a8dee9b4ae308c9c406a379838b435a8f2c84cf9fedc6d5a576be9888db"
+"checksum memchr 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "d8b629fb514376c675b98c1421e80b151d3817ac42d7c667717d282761418d20"
+"checksum num_cpus 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "51fedae97a05f7353612fe017ab705a37e6db8f4d67c5c6fe739a9e70d6eed09"
+"checksum regex 0.1.73 (registry+https://github.com/rust-lang/crates.io-index)" = "56b7ee9f764ecf412c6e2fff779bca4b22980517ae335a21aeaf4e32625a5df2"
+"checksum regex-syntax 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "31040aad7470ad9d8c46302dcffba337bb4289ca5da2e3cd6e37b64109a85199"
+"checksum rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)" = "6159e4e6e559c81bd706afe9c8fd68f547d3e851ce12e76b1de7914bab61691b"
+"checksum thread-id 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a9539db560102d1cef46b8b78ce737ff0bb64e7e18d35b2a5688f7d097d0ff03"
+"checksum thread_local 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "55dd963dbaeadc08aa7266bf7f91c3154a7805e32bb94b820b769d2ef3b4744d"
+"checksum toml 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)" = "fcd27a04ca509aff336ba5eb2abc58d456f52c4ff64d9724d88acb85ead560b6"
+"checksum utf8-ranges 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a1ca13c08c41c9c3e04224ed9ff80461d97e121589ff27c753a16cb10830ae0f"
+"checksum winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "4dfaaa8fbdaa618fa6914b59b2769d690dd7521920a18d84b42d254678dd5fd4"
+"checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc"
pub channel: String,
pub musl_root: Option<PathBuf>,
pub prefix: Option<String>,
+ pub codegen_tests: bool,
}
/// Per-target configuration stored in the global configuration structure.
config.rust_codegen_units = 1;
config.build = build.to_string();
config.channel = "dev".to_string();
+ config.codegen_tests = true;
let toml = file.map(|file| {
let mut f = t!(File::open(&file));
("DEBUGINFO_TESTS", self.rust_debuginfo_tests),
("LOCAL_REBUILD", self.local_rebuild),
("NINJA", self.ninja),
+ ("CODEGEN_TESTS", self.codegen_tests),
}
match key {
use std::collections::HashMap;
use std::env;
use std::fs::{self, File};
-use std::path::{PathBuf, Path};
+use std::path::{Component, PathBuf, Path};
use std::process::Command;
use build_helper::{run_silent, output};
doc::rustbook(self, stage, target.target, "nomicon",
&doc_out);
}
- DocStyle { stage } => {
- doc::rustbook(self, stage, target.target, "style",
- &doc_out);
- }
DocStandalone { stage } => {
doc::standalone(self, stage, target.target, &doc_out);
}
"mir-opt", "mir-opt");
}
CheckCodegen { compiler } => {
- check::compiletest(self, &compiler, target.target,
- "codegen", "codegen");
+ if self.config.codegen_tests {
+ check::compiletest(self, &compiler, target.target,
+ "codegen", "codegen");
+ }
}
CheckCodegenUnits { compiler } => {
check::compiletest(self, &compiler, target.target,
/// This will detect if any submodules are out of date an run the necessary
/// commands to sync them all with upstream.
fn update_submodules(&self) {
+ struct Submodule<'a> {
+ path: &'a Path,
+ state: State,
+ }
+
+ enum State {
+ // The submodule may have staged/unstaged changes
+ MaybeDirty,
+ // Or could be initialized but never updated
+ NotInitialized,
+ // The submodule, itself, has extra commits but those changes haven't been commited to
+ // the (outer) git repository
+ OutOfSync,
+ }
+
if !self.config.submodules {
return
}
if fs::metadata(self.src.join(".git")).is_err() {
return
}
+ let git = || {
+ let mut cmd = Command::new("git");
+ cmd.current_dir(&self.src);
+ return cmd
+ };
let git_submodule = || {
let mut cmd = Command::new("git");
cmd.current_dir(&self.src).arg("submodule");
// of detecting whether we need to run all the submodule commands
// below.
let out = output(git_submodule().arg("status"));
- if !out.lines().any(|l| l.starts_with("+") || l.starts_with("-")) {
- return
+ let mut submodules = vec![];
+ for line in out.lines() {
+ // NOTE `git submodule status` output looks like this:
+ //
+ // -5066b7dcab7e700844b0e2ba71b8af9dc627a59b src/liblibc
+ // +b37ef24aa82d2be3a3cc0fe89bf82292f4ca181c src/compiler-rt (remotes/origin/..)
+ // e058ca661692a8d01f8cf9d35939dfe3105ce968 src/jemalloc (3.6.0-533-ge058ca6)
+ //
+ // The first character can be '-', '+' or ' ' and denotes the `State` of the submodule
+ // Right next to this character is the SHA-1 of the submodule HEAD
+ // And after that comes the path to the submodule
+ let path = Path::new(line[1..].split(' ').skip(1).next().unwrap());
+ let state = if line.starts_with('-') {
+ State::NotInitialized
+ } else if line.starts_with('*') {
+ State::OutOfSync
+ } else if line.starts_with(' ') {
+ State::MaybeDirty
+ } else {
+ panic!("unexpected git submodule state: {:?}", line.chars().next());
+ };
+
+ submodules.push(Submodule { path: path, state: state })
}
self.run(git_submodule().arg("sync"));
- self.run(git_submodule().arg("init"));
- self.run(git_submodule().arg("update"));
- self.run(git_submodule().arg("update").arg("--recursive"));
- self.run(git_submodule().arg("status").arg("--recursive"));
- self.run(git_submodule().arg("foreach").arg("--recursive")
- .arg("git").arg("clean").arg("-fdx"));
- self.run(git_submodule().arg("foreach").arg("--recursive")
- .arg("git").arg("checkout").arg("."));
+
+ for submodule in submodules {
+ // If using llvm-root then don't touch the llvm submodule.
+ if submodule.path.components().any(|c| c == Component::Normal("llvm".as_ref())) &&
+ self.config.target_config.get(&self.config.build)
+ .and_then(|c| c.llvm_config.as_ref()).is_some()
+ {
+ continue
+ }
+
+ if submodule.path.components().any(|c| c == Component::Normal("jemalloc".as_ref())) &&
+ !self.config.use_jemalloc
+ {
+ continue
+ }
+
+ match submodule.state {
+ State::MaybeDirty => {
+ // drop staged changes
+ self.run(git().arg("-C").arg(submodule.path).args(&["reset", "--hard"]));
+ // drops unstaged changes
+ self.run(git().arg("-C").arg(submodule.path).args(&["clean", "-fdx"]));
+ },
+ State::NotInitialized => {
+ self.run(git_submodule().arg("init").arg(submodule.path));
+ self.run(git_submodule().arg("update").arg(submodule.path));
+ },
+ State::OutOfSync => {
+ // drops submodule commits that weren't reported to the (outer) git repository
+ self.run(git_submodule().arg("update").arg(submodule.path));
+ self.run(git().arg("-C").arg(submodule.path).args(&["reset", "--hard"]));
+ self.run(git().arg("-C").arg(submodule.path).args(&["clean", "-fdx"]));
+ },
+ }
+ }
}
/// Clear out `dir` if `input` is newer.
.arg("-j").arg(self.jobs().to_string())
.arg("--target").arg(target);
+ // FIXME: Temporary fix for https://github.com/rust-lang/cargo/issues/3005
+ // Force cargo to output binaries with disambiguating hashes in the name
+ cargo.env("__CARGO_DEFAULT_LIB_METADATA", "1");
+
let stage;
if compiler.stage == 0 && self.local_rebuild {
// Assume the local-rebuild rustc already has stage1 features.
.out_dir(&dst)
.profile(if build.config.llvm_optimize {"Release"} else {"Debug"})
.define("LLVM_ENABLE_ASSERTIONS", assertions)
- .define("LLVM_TARGETS_TO_BUILD", "X86;ARM;AArch64;Mips;PowerPC")
+ .define("LLVM_TARGETS_TO_BUILD", "X86;ARM;AArch64;Mips;PowerPC;SystemZ")
.define("LLVM_INCLUDE_EXAMPLES", "OFF")
.define("LLVM_INCLUDE_TESTS", "OFF")
.define("LLVM_INCLUDE_DOCS", "OFF")
// Externally configured LLVM requires FileCheck to exist
let filecheck = build.llvm_filecheck(&build.config.build);
- if !filecheck.starts_with(&build.out) && !filecheck.exists() {
+ if !filecheck.starts_with(&build.out) && !filecheck.exists() && build.config.codegen_tests {
panic!("filecheck executable {:?} does not exist", filecheck);
}
(doc, Doc { stage: u32 }),
(doc_book, DocBook { stage: u32 }),
(doc_nomicon, DocNomicon { stage: u32 }),
- (doc_style, DocStyle { stage: u32 }),
(doc_standalone, DocStandalone { stage: u32 }),
(doc_std, DocStd { stage: u32 }),
(doc_test, DocTest { stage: u32 }),
vec![self.libtest(compiler)]
}
Source::DocBook { stage } |
- Source::DocNomicon { stage } |
- Source::DocStyle { stage } => {
+ Source::DocNomicon { stage } => {
vec![self.target(&build.config.build).tool_rustbook(stage)]
}
Source::DocErrorIndex { stage } => {
Source::Doc { stage } => {
let mut deps = vec![
self.doc_book(stage), self.doc_nomicon(stage),
- self.doc_style(stage), self.doc_standalone(stage),
- self.doc_std(stage),
+ self.doc_standalone(stage), self.doc_std(stage),
self.doc_error_index(stage),
];
This macro causes the current thread to panic. You can give it a message
to panic with:
-```rust,no_run
+```rust,should_panic
panic!("oh no!");
```
takes two values and checks them for equality. `true` passes, `false` `panic!`s.
Like this:
-```rust,no_run
+```rust,should_panic
// A-ok!
assert!(true);
but we don’t define a body, only a type signature. When we `impl` a trait,
we use `impl Trait for Item`, rather than only `impl Item`.
+`Self` may be used in a type annotation to refer to an instance of the type
+implementing this trait passed as a parameter. `Self`, `&Self` or `&mut Self`
+may be used depending on the level of ownership required.
+
+```rust
+struct Circle {
+ x: f64,
+ y: f64,
+ radius: f64,
+}
+
+trait HasArea {
+ fn area(&self) -> f64;
+
+ fn is_larger(&self, &Self) -> bool;
+}
+
+impl HasArea for Circle {
+ fn area(&self) -> f64 {
+ std::f64::consts::PI * (self.radius * self.radius)
+ }
+
+ fn is_larger(&self, other: &Self) -> bool {
+ self.area() > other.area()
+ }
+}
+```
+
## Trait bounds on generic functions
Traits are useful because they allow a type to make certain promises about its
#[derive(PartialEq, Clone)]
struct Foo<T> {
a: i32,
- b: T
+ b: T,
}
```
use std::ops::Deref;
struct CharContainer {
- value: char
+ value: char,
}
impl Deref for CharContainer {
+++ /dev/null
-% Style Guidelines
-
-This document collects the emerging principles, conventions, abstractions, and
-best practices for writing Rust code.
-
-Since Rust is evolving at a rapid pace, these guidelines are
-preliminary. The hope is that writing them down explicitly will help
-drive discussion, consensus and adoption.
-
-Whenever feasible, guidelines provide specific examples from Rust's standard
-libraries.
-
-### Guideline statuses
-
-Every guideline has a status:
-
-* **[FIXME]**: Marks places where there is more work to be done. In
- some cases, that just means going through the RFC process.
-
-* **[FIXME #NNNNN]**: Like **[FIXME]**, but links to the issue tracker.
-
-* **[RFC #NNNN]**: Marks accepted guidelines, linking to the rust-lang
- RFC establishing them.
-
-### Guideline stabilization
-
-One purpose of these guidelines is to reach decisions on a number of
-cross-cutting API and stylistic choices. Discussion and development of
-the guidelines will happen primarily on https://internals.rust-lang.org/,
-using the Guidelines category. Discussion can also occur on the
-[guidelines issue tracker](https://github.com/rust-lang/rust-guidelines).
-
-Guidelines that are under development or discussion will be marked with the
-status **[FIXME]**, with a link to the issue tracker when appropriate.
-
-Once a concrete guideline is ready to be proposed, it should be filed
-as an [FIXME: needs RFC](https://github.com/rust-lang/rfcs). If the RFC is
-accepted, the official guidelines will be updated to match, and will
-include the tag **[RFC #NNNN]** linking to the RFC document.
-
-### What's in this document
-
-This document is broken into four parts:
-
-* **[Style](style/README.md)** provides a set of rules governing naming conventions,
- whitespace, and other stylistic issues.
-
-* **[Guidelines by Rust feature](features/README.md)** places the focus on each of
- Rust's features, starting from expressions and working the way out toward
- crates, dispensing guidelines relevant to each.
-
-* **Topical guidelines and patterns**. The rest of the document proceeds by
- cross-cutting topic, starting with
- [Ownership and resources](ownership/README.md).
-
-* **APIs for a changing Rust**
- discusses the forward-compatibility hazards, especially those that interact
- with the pre-1.0 library stabilization process.
-
-> **[FIXME]** Add cross-references throughout this document to the tutorial,
-> reference manual, and other guides.
-
-> **[FIXME]** What are some _non_-goals, _non_-principles, or _anti_-patterns that
-> we should document?
+++ /dev/null
-# Summary
-
-* [Style](style/README.md)
- * [Whitespace](style/whitespace.md)
- * [Comments](style/comments.md)
- * [Braces, semicolons, commas](style/braces.md)
- * [Naming](style/naming/README.md)
- * [Ownership variants](style/naming/ownership.md)
- * [Containers/wrappers](style/naming/containers.md)
- * [Conversions](style/naming/conversions.md)
- * [Iterators](style/naming/iterators.md)
- * [Imports](style/imports.md)
- * [Organization](style/organization.md)
-* [Guidelines by Rust feature](features/README.md)
- * [Let binding](features/let.md)
- * [Pattern matching](features/match.md)
- * [Loops](features/loops.md)
- * [Functions and methods](features/functions-and-methods/README.md)
- * [Input](features/functions-and-methods/input.md)
- * [Output](features/functions-and-methods/output.md)
- * [For convenience](features/functions-and-methods/convenience.md)
- * [Types](features/types/README.md)
- * [Conversions](features/types/conversions.md)
- * [The newtype pattern](features/types/newtype.md)
- * [Traits](features/traits/README.md)
- * [For generics](features/traits/generics.md)
- * [For objects](features/traits/objects.md)
- * [For overloading](features/traits/overloading.md)
- * [For extensions](features/traits/extensions.md)
- * [For reuse](features/traits/reuse.md)
- * [Common traits](features/traits/common.md)
- * [Modules](features/modules.md)
- * [Crates](features/crates.md)
-* [Ownership and resources](ownership/README.md)
- * [Constructors](ownership/constructors.md)
- * [Builders](ownership/builders.md)
- * [Destructors](ownership/destructors.md)
- * [RAII](ownership/raii.md)
- * [Cells and smart pointers](ownership/cell-smart.md)
-* [Errors](errors/README.md)
- * [Signaling](errors/signaling.md)
- * [Handling](errors/handling.md)
- * [Propagation](errors/propagation.md)
- * [Ergonomics](errors/ergonomics.md)
-* [Safety and guarantees](safety/README.md)
- * [Using unsafe](safety/unsafe.md)
- * [Library guarantees](safety/lib-guarantees.md)
-* [Testing](testing/README.md)
- * [Unit testing](testing/unit.md)
-* [FFI, platform-specific code](platform.md)
+++ /dev/null
-% Errors
-
-> **[FIXME]** Add some general text here.
+++ /dev/null
-% Ergonomic error handling
-
-Error propagation with raw `Result`s can require tedious matching and
-repackaging. This tedium is largely alleviated by the `try!` macro,
-and can be completely removed (in some cases) by the "`Result`-`impl`"
-pattern.
-
-### The `try!` macro
-
-Prefer
-
-```rust,ignore
-use std::io::{File, Open, Write, IoError};
-
-struct Info {
- name: String,
- age: i32,
- rating: i32
-}
-
-fn write_info(info: &Info) -> Result<(), IoError> {
- let mut file = File::open_mode(&Path::new("my_best_friends.txt"),
- Open, Write);
- // Early return on error
- try!(file.write_line(&format!("name: {}", info.name)));
- try!(file.write_line(&format!("age: {}", info.age)));
- try!(file.write_line(&format!("rating: {}", info.rating)));
- return Ok(());
-}
-```
-
-over
-
-```rust,ignore
-use std::io::{File, Open, Write, IoError};
-
-struct Info {
- name: String,
- age: i32,
- rating: i32
-}
-
-fn write_info(info: &Info) -> Result<(), IoError> {
- let mut file = File::open_mode(&Path::new("my_best_friends.txt"),
- Open, Write);
- // Early return on error
- match file.write_line(&format!("name: {}", info.name)) {
- Ok(_) => (),
- Err(e) => return Err(e)
- }
- match file.write_line(&format!("age: {}", info.age)) {
- Ok(_) => (),
- Err(e) => return Err(e)
- }
- return file.write_line(&format!("rating: {}", info.rating));
-}
-```
-
-See
-[the `result` module documentation](https://doc.rust-lang.org/stable/std/result/index.html#the-try-macro)
-for more details.
-
-### The `Result`-`impl` pattern [FIXME]
-
-> **[FIXME]** Document the way that the `io` module uses trait impls
-> on `std::io::Result` to painlessly propagate errors.
+++ /dev/null
-% Handling errors
-
-### Use thread isolation to cope with failure. [FIXME]
-
-> **[FIXME]** Explain how to isolate threads and detect thread failure for recovery.
-
-### Consuming `Result` [FIXME]
+++ /dev/null
-% Propagation
-
-> **[FIXME]** We need guidelines on how to layer error information up a stack of
-> abstractions.
-
-### Error interoperation [FIXME]
-
-> **[FIXME]** Document the `FromError` infrastructure.
+++ /dev/null
-% Signaling errors [RFC #236]
-
-> The guidelines below were approved by [RFC #236](https://github.com/rust-lang/rfcs/pull/236).
-
-Errors fall into one of three categories:
-
-* Catastrophic errors, e.g. out-of-memory.
-* Contract violations, e.g. wrong input encoding, index out of bounds.
-* Obstructions, e.g. file not found, parse error.
-
-The basic principle of the convention is that:
-
-* Catastrophic errors and programming errors (bugs) can and should only be
-recovered at a *coarse grain*, i.e. a thread boundary.
-* Obstructions preventing an operation should be reported at a maximally *fine
-grain* -- to the immediate invoker of the operation.
-
-## Catastrophic errors
-
-An error is _catastrophic_ if there is no meaningful way for the current thread to
-continue after the error occurs.
-
-Catastrophic errors are _extremely_ rare, especially outside of `libstd`.
-
-**Canonical examples**: out of memory, stack overflow.
-
-### For catastrophic errors, panic
-
-For errors like stack overflow, Rust currently aborts the process, but
-could in principle panic, which (in the best case) would allow
-reporting and recovery from a supervisory thread.
-
-## Contract violations
-
-An API may define a contract that goes beyond the type checking enforced by the
-compiler. For example, slices support an indexing operation, with the contract
-that the supplied index must be in bounds.
-
-Contracts can be complex and involve more than a single function invocation. For
-example, the `RefCell` type requires that `borrow_mut` not be called until all
-existing borrows have been relinquished.
-
-### For contract violations, panic
-
-A contract violation is always a bug, and for bugs we follow the Erlang
-philosophy of "let it crash": we assume that software *will* have bugs, and we
-design coarse-grained thread boundaries to report, and perhaps recover, from these
-bugs.
-
-### Contract design
-
-One subtle aspect of these guidelines is that the contract for a function is
-chosen by an API designer -- and so the designer also determines what counts as
-a violation.
-
-This RFC does not attempt to give hard-and-fast rules for designing
-contracts. However, here are some rough guidelines:
-
-* Prefer expressing contracts through static types whenever possible.
-
-* It *must* be possible to write code that uses the API without violating the
- contract.
-
-* Contracts are most justified when violations are *inarguably* bugs -- but this
- is surprisingly rare.
-
-* Consider whether the API client could benefit from the contract-checking
- logic. The checks may be expensive. Or there may be useful programming
- patterns where the client does not want to check inputs before hand, but would
- rather attempt the operation and then find out whether the inputs were invalid.
-
-* When a contract violation is the *only* kind of error a function may encounter
- -- i.e., there are no obstructions to its success other than "bad" inputs --
- using `Result` or `Option` instead is especially warranted. Clients can then use
- `unwrap` to assert that they have passed valid input, or re-use the error
- checking done by the API for their own purposes.
-
-* When in doubt, use loose contracts and instead return a `Result` or `Option`.
-
-## Obstructions
-
-An operation is *obstructed* if it cannot be completed for some reason, even
-though the operation's contract has been satisfied. Obstructed operations may
-have (documented!) side effects -- they are not required to roll back after
-encountering an obstruction. However, they should leave the data structures in
-a "coherent" state (satisfying their invariants, continuing to guarantee safety,
-etc.).
-
-Obstructions may involve external conditions (e.g., I/O), or they may involve
-aspects of the input that are not covered by the contract.
-
-**Canonical examples**: file not found, parse error.
-
-### For obstructions, use `Result`
-
-The
-[`Result<T,E>` type](https://doc.rust-lang.org/stable/std/result/index.html)
-represents either a success (yielding `T`) or failure (yielding `E`). By
-returning a `Result`, a function allows its clients to discover and react to
-obstructions in a fine-grained way.
-
-#### What about `Option`?
-
-The `Option` type should not be used for "obstructed" operations; it
-should only be used when a `None` return value could be considered a
-"successful" execution of the operation.
-
-This is of course a somewhat subjective question, but a good litmus
-test is: would a reasonable client ever ignore the result? The
-`Result` type provides a lint that ensures the result is actually
-inspected, while `Option` does not, and this difference of behavior
-can help when deciding between the two types.
-
-Another litmus test: can the operation be understood as asking a
-question (possibly with sideeffects)? Operations like `pop` on a
-vector can be viewed as asking for the contents of the first element,
-with the side effect of removing it if it exists -- with an `Option`
-return value.
-
-## Do not provide both `Result` and `panic!` variants.
-
-An API should not provide both `Result`-producing and `panic`king versions of an
-operation. It should provide just the `Result` version, allowing clients to use
-`try!` or `unwrap` instead as needed. This is part of the general pattern of
-cutting down on redundant variants by instead using method chaining.
+++ /dev/null
-% Guidelines by language feature
-
-Rust provides a unique combination of language features, some new and some
-old. This section gives guidance on when and how to use Rust's features, and
-brings attention to some of the tradeoffs between different features.
-
-Notably missing from this section is an in-depth discussion of Rust's pointer
-types (both built-in and in the library). The topic of pointers is discussed at
-length in a [separate section on ownership](../ownership/README.md).
+++ /dev/null
-% Crates
-
-> **[FIXME]** What general guidelines should we provide for crate design?
-
-> Possible topics: facades; per-crate preludes (to be imported as globs);
-> "lib.rs"
+++ /dev/null
-% Functions and methods
-
-### Prefer methods to functions if there is a clear receiver. **[FIXME: needs RFC]**
-
-Prefer
-
-```rust,ignore
-impl Foo {
- pub fn frob(&self, w: widget) { ... }
-}
-```
-
-over
-
-```rust,ignore
-pub fn frob(foo: &Foo, w: widget) { ... }
-```
-
-for any operation that is clearly associated with a particular
-type.
-
-Methods have numerous advantages over functions:
-
-* They do not need to be imported or qualified to be used: all you
- need is a value of the appropriate type.
-* Their invocation performs autoborrowing (including mutable borrows).
-* They make it easy to answer the question "what can I do with a value
- of type `T`" (especially when using rustdoc).
-* They provide `self` notation, which is more concise and often more
- clearly conveys ownership distinctions.
-
-> **[FIXME]** Revisit these guidelines with
-> [UFCS](https://github.com/nick29581/rfcs/blob/ufcs/0000-ufcs.md) and
-> conventions developing around it.
-
-
-
-### Guidelines for inherent methods. **[FIXME]**
-
-> **[FIXME]** We need guidelines for when to provide inherent methods on a type,
-> versus methods through a trait or functions.
-
-> **NOTE**: Rules for method resolution around inherent methods are in flux,
-> which may impact the guidelines.
+++ /dev/null
-% Convenience methods
-
-### Provide small, coherent sets of convenience methods. **[FIXME: needs RFC]**
-
-_Convenience methods_ wrap up existing functionality in a more convenient
-way. The work done by a convenience method varies widely:
-
-* _Re-providing functions as methods_. For example, the `std::path::Path` type
- provides methods like `stat` on `Path`s that simply invoke the corresponding
- function in `std::io::fs`.
-* _Skipping through conversions_. For example, the `str` type provides a
- `.len()` convenience method which is also expressible as `.as_bytes().len()`.
- Sometimes the conversion is more complex: the `str` module also provides
- `from_chars`, which encapsulates a simple use of iterators.
-* _Encapsulating common arguments_. For example, vectors of `&str`s
- provide a `connect` as well as a special case, `concat`, that is expressible
- using `connect` with a fixed separator of `""`.
-* _Providing more efficient special cases_. The `connect` and `concat` example
- also applies here: singling out `concat` as a special case allows for a more
- efficient implementation.
-
- Note, however, that the `connect` method actually detects the special case
- internally and invokes `concat`. Usually, it is not necessary to add a public
- convenience method just for efficiency gains; there should also be a
- _conceptual_ reason to add it, e.g. because it is such a common special case.
-
-It is tempting to add convenience methods in a one-off, haphazard way as
-common use patterns emerge. Avoid this temptation, and instead _design_ small,
-coherent sets of convenience methods that are easy to remember:
-
-* _Small_: Avoid combinatorial explosions of convenience methods. For example,
- instead of adding `_str` variants of methods that provide a `str` output,
- instead ensure that the normal output type of methods is easily convertible to
- `str`.
-* _Coherent_: Look for small groups of convenience methods that make sense to
- include together. For example, the `Path` API mentioned above includes a small
- selection of the most common filesystem operations that take a `Path`
- argument. If one convenience method strongly suggests the existence of others,
- consider adding the whole group.
-* _Memorable_: It is not worth saving a few characters of typing if you have to
- look up the name of a convenience method every time you use it. Add
- convenience methods with names that are obvious and easy to remember, and add
- them for the most common or painful use cases.
+++ /dev/null
-% Input to functions and methods
-
-### Let the client decide when to copy and where to place data. [FIXME: needs RFC]
-
-#### Copying:
-
-Prefer
-
-```rust,ignore
-fn foo(b: Bar) {
- // use b as owned, directly
-}
-```
-
-over
-
-```rust,ignore
-fn foo(b: &Bar) {
- let b = b.clone();
- // use b as owned after cloning
-}
-```
-
-If a function requires ownership of a value of unknown type `T`, but does not
-otherwise need to make copies, the function should take ownership of the
-argument (pass by value `T`) rather than using `.clone()`. That way, the caller
-can decide whether to relinquish ownership or to `clone`.
-
-Similarly, the `Copy` trait bound should only be demanded it when absolutely
-needed, not as a way of signaling that copies should be cheap to make.
-
-#### Placement:
-
-Prefer
-
-```rust,ignore
-fn foo(b: Bar) -> Bar { ... }
-```
-
-over
-
-```rust,ignore
-fn foo(b: Box<Bar>) -> Box<Bar> { ... }
-```
-
-for concrete types `Bar` (as opposed to trait objects). This way, the caller can
-decide whether to place data on the stack or heap. No overhead is imposed by
-letting the caller determine the placement.
-
-### Minimize assumptions about parameters. [FIXME: needs RFC]
-
-The fewer assumptions a function makes about its inputs, the more widely usable
-it becomes.
-
-#### Minimizing assumptions through generics:
-
-Prefer
-
-```rust,ignore
-fn foo<T: Iterator<i32>>(c: T) { ... }
-```
-
-over any of
-
-```rust,ignore
-fn foo(c: &[i32]) { ... }
-fn foo(c: &Vec<i32>) { ... }
-fn foo(c: &SomeOtherCollection<i32>) { ... }
-```
-
-if the function only needs to iterate over the data.
-
-More generally, consider using generics to pinpoint the assumptions a function
-needs to make about its arguments.
-
-On the other hand, generics can make it more difficult to read and understand a
-function's signature. Aim for "natural" parameter types that a neither overly
-concrete nor overly abstract. See the discussion on
-[traits](../traits/README.md) for more guidance.
-
-
-#### Minimizing ownership assumptions:
-
-Prefer either of
-
-```rust,ignore
-fn foo(b: &Bar) { ... }
-fn foo(b: &mut Bar) { ... }
-```
-
-over
-
-```rust,ignore
-fn foo(b: Bar) { ... }
-```
-
-That is, prefer borrowing arguments rather than transferring ownership, unless
-ownership is actually needed.
-
-### Prefer compound return types to out-parameters. [FIXME: needs RFC]
-
-Prefer
-
-```rust,ignore
-fn foo() -> (Bar, Bar)
-```
-
-over
-
-```rust,ignore
-fn foo(output: &mut Bar) -> Bar
-```
-
-for returning multiple `Bar` values.
-
-Compound return types like tuples and structs are efficiently compiled
-and do not require heap allocation. If a function needs to return
-multiple values, it should do so via one of these types.
-
-The primary exception: sometimes a function is meant to modify data
-that the caller already owns, for example to re-use a buffer:
-
-```rust,ignore
-fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize>
-```
-
-(From the [Read trait](https://doc.rust-lang.org/stable/std/io/trait.Read.html#tymethod.read).)
-
-### Consider validating arguments, statically or dynamically. [FIXME: needs RFC]
-
-_Note: this material is closely related to
- [library-level guarantees](../../safety/lib-guarantees.md)._
-
-Rust APIs do _not_ generally follow the
-[robustness principle](https://en.wikipedia.org/wiki/Robustness_principle): "be
-conservative in what you send; be liberal in what you accept".
-
-Instead, Rust code should _enforce_ the validity of input whenever practical.
-
-Enforcement can be achieved through the following mechanisms (listed
-in order of preference).
-
-#### Static enforcement:
-
-Choose an argument type that rules out bad inputs.
-
-For example, prefer
-
-```rust,ignore
-enum FooMode {
- Mode1,
- Mode2,
- Mode3,
-}
-fn foo(mode: FooMode) { ... }
-```
-
-over
-
-```rust,ignore
-fn foo(mode2: bool, mode3: bool) {
- assert!(!mode2 || !mode3);
- ...
-}
-```
-
-Static enforcement usually comes at little run-time cost: it pushes the
-costs to the boundaries. It also catches bugs early, during compilation,
-rather than through run-time failures.
-
-On the other hand, some properties are difficult or impossible to
-express using types.
-
-#### Dynamic enforcement:
-
-Validate the input as it is processed (or ahead of time, if necessary). Dynamic
-checking is often easier to implement than static checking, but has several
-downsides:
-
-1. Runtime overhead (unless checking can be done as part of processing the input).
-2. Delayed detection of bugs.
-3. Introduces failure cases, either via `panic!` or `Result`/`Option` types (see
- the [error handling guidelines](../../errors/README.md)), which must then be
- dealt with by client code.
-
-#### Dynamic enforcement with `debug_assert!`:
-
-Same as dynamic enforcement, but with the possibility of easily turning off
-expensive checks for production builds.
-
-#### Dynamic enforcement with opt-out:
-
-Same as dynamic enforcement, but adds sibling functions that opt out of the
-checking.
-
-The convention is to mark these opt-out functions with a suffix like
-`_unchecked` or by placing them in a `raw` submodule.
-
-The unchecked functions can be used judiciously in cases where (1) performance
-dictates avoiding checks and (2) the client is otherwise confident that the
-inputs are valid.
-
-> **[FIXME]** Should opt-out functions be marked `unsafe`?
+++ /dev/null
-% Output from functions and methods
-
-### Don't overpromise. [FIXME]
-
-> **[FIXME]** Add discussion of overly-specific return types,
-> e.g. returning a compound iterator type rather than hiding it behind
-> a use of newtype.
-
-### Let clients choose what to throw away. [FIXME: needs RFC]
-
-#### Return useful intermediate results:
-
-Many functions that answer a question also compute interesting related data. If
-this data is potentially of interest to the client, consider exposing it in the
-API.
-
-Prefer
-
-```rust,ignore
-struct SearchResult {
- found: bool, // item in container?
- expected_index: usize // what would the item's index be?
-}
-
-fn binary_search(&self, k: Key) -> SearchResult
-```
-or
-
-```rust,ignore
-fn binary_search(&self, k: Key) -> (bool, usize)
-```
-
-over
-
-```rust,ignore
-fn binary_search(&self, k: Key) -> bool
-```
-
-#### Yield back ownership:
-
-Prefer
-
-```rust,ignore
-fn from_utf8_owned(vv: Vec<u8>) -> Result<String, Vec<u8>>
-```
-
-over
-
-```rust,ignore
-fn from_utf8_owned(vv: Vec<u8>) -> Option<String>
-```
-
-The `from_utf8_owned` function gains ownership of a vector. In the successful
-case, the function consumes its input, returning an owned string without
-allocating or copying. In the unsuccessful case, however, the function returns
-back ownership of the original slice.
+++ /dev/null
-% Let binding
-
-### Always separately bind RAII guards. [FIXME: needs RFC]
-
-Prefer
-
-```rust,ignore
-fn use_mutex(m: sync::mutex::Mutex<i32>) {
- let guard = m.lock();
- do_work(guard);
- drop(guard); // unlock the lock
- // do other work
-}
-```
-
-over
-
-```rust,ignore
-fn use_mutex(m: sync::mutex::Mutex<i32>) {
- do_work(m.lock());
- // do other work
-}
-```
-
-As explained in the [RAII guide](../ownership/raii.md), RAII guards are values
-that represent ownership of some resource and whose destructor releases the
-resource. Because the lifetime of guards are significant, they should always be
-explicitly `let`-bound to make the lifetime clear. Consider using an explicit
-`drop` to release the resource early.
-
-### Prefer conditional expressions to deferred initialization. [FIXME: needs RFC]
-
-Prefer
-
-```rust,ignore
-let foo = match bar {
- Baz => 0,
- Quux => 1
-};
-```
-
-over
-
-```rust,ignore
-let foo;
-match bar {
- Baz => {
- foo = 0;
- }
- Quux => {
- foo = 1;
- }
-}
-```
-
-unless the conditions for initialization are too complex to fit into a simple
-conditional expression.
-
-### Use type annotations for clarification; prefer explicit generics when inference fails. [FIXME: needs RFC]
-
-Prefer
-
-```rust,ignore
-let v = s.iter().map(|x| x * 2)
- .collect::<Vec<_>>();
-```
-
-over
-
-```rust,ignore
-let v: Vec<_> = s.iter().map(|x| x * 2)
- .collect();
-```
-
-When the type of a value might be unclear to the _reader_ of the code, consider
-explicitly annotating it in a `let`.
-
-On the other hand, when the type is unclear to the _compiler_, prefer to specify
-the type by explicit generics instantiation, which is usually more clear.
-
-### Shadowing [FIXME]
-
-> **[FIXME]** Repeatedly shadowing a binding is somewhat common in Rust code. We
-> need to articulate a guideline on when it is appropriate/useful and when not.
-
-### Prefer immutable bindings. [FIXME: needs RFC]
-
-Use `mut` bindings to signal the span during which a value is mutated:
-
-```rust,ignore
-let mut v = Vec::new();
-// push things onto v
-let v = v;
-// use v immutably henceforth
-```
-
-### Prefer to bind all `struct` or tuple fields. [FIXME: needs RFC]
-
-When consuming a `struct` or tuple via a `let`, bind all of the fields rather
-than using `..` to elide the ones you don't need. The benefit is that when
-fields are added, the compiler will pinpoint all of the places where that type
-of value was consumed, which will often need to be adjusted to take the new
-field properly into account.
+++ /dev/null
-% Loops
-
-### Prefer `for` to `while`. [FIXME: needs RFC]
-
-A `for` loop is preferable to a `while` loop, unless the loop counts in a
-non-uniform way (making it difficult to express using `for`).
-
-### Guidelines for `loop`. [FIXME]
-
-> **[FIXME]** When is `loop` recommended? Some possibilities:
-> * For optimistic retry algorithms
-> * For servers
-> * To avoid mutating local variables sometimes needed to fit `while`
+++ /dev/null
-% Pattern matching
-
-### Dereference `match` targets when possible. [FIXME: needs RFC]
-
-Prefer
-
-~~~~ignore
-match *foo {
- X(...) => ...
- Y(...) => ...
-}
-~~~~
-
-over
-
-~~~~ignore
-match foo {
- box X(...) => ...
- box Y(...) => ...
-}
-~~~~
-
-<!-- ### Clearly indicate important scopes. **[FIXME: needs RFC]** -->
-
-<!-- If it is important that the destructor for a value be executed at a specific -->
-<!-- time, clearly bind that value using a standalone `let` -->
+++ /dev/null
-% Modules
-
-> **[FIXME]** What general guidelines should we provide for module design?
-
-> We should discuss visibility, nesting, `mod.rs`, and any interesting patterns
-> around modules.
-
-### Headers [FIXME: needs RFC]
-
-Organize module headers as follows:
- 1. [Imports](../style/imports.md).
- 1. `mod` declarations.
- 1. `pub mod` declarations.
-
-### Avoid `path` directives. [FIXME: needs RFC]
-
-Avoid using `#[path="..."]` directives; make the file system and
-module hierarchy match, instead.
-
-### Use the module hierarchy to organize APIs into coherent sections. [FIXME]
-
-> **[FIXME]** Flesh this out with examples; explain what a "coherent
-> section" is with examples.
->
-> The module hierarchy defines both the public and internal API of your module.
-> Breaking related functionality into submodules makes it understandable to both
-> users and contributors to the module.
-
-### Place modules in their own file. [FIXME: needs RFC]
-
-> **[FIXME]**
-> - "<100 lines" is arbitrary, but it's a clearer recommendation
-> than "~1 page" or similar suggestions that vary by screen size, etc.
-
-For all except very short modules (<100 lines) and [tests](../testing/README.md),
-place the module `foo` in a separate file, as in:
-
-```rust,ignore
-pub mod foo;
-
-// in foo.rs or foo/mod.rs
-pub fn bar() { println!("..."); }
-/* ... */
-```
-
-rather than declaring it inline:
-
-```rust,ignore
-pub mod foo {
- pub fn bar() { println!("..."); }
- /* ... */
-}
-```
-
-#### Use subdirectories for modules with children. [FIXME: needs RFC]
-
-For modules that themselves have submodules, place the module in a separate
-directory (e.g., `bar/mod.rs` for a module `bar`) rather than the same directory.
-
-Note the structure of
-[`std::io`](https://doc.rust-lang.org/std/io/). Many of the submodules lack
-children, like
-[`io::fs`](https://doc.rust-lang.org/std/io/fs/)
-and
-[`io::stdio`](https://doc.rust-lang.org/std/io/stdio/).
-On the other hand,
-[`io::net`](https://doc.rust-lang.org/std/io/net/)
-contains submodules, so it lives in a separate directory:
-
-```text
-io/mod.rs
- io/extensions.rs
- io/fs.rs
- io/net/mod.rs
- io/net/addrinfo.rs
- io/net/ip.rs
- io/net/tcp.rs
- io/net/udp.rs
- io/net/unix.rs
- io/pipe.rs
- ...
-```
-
-While it is possible to define all of `io` within a single directory,
-mirroring the module hierarchy in the directory structure makes
-submodules of `io::net` easier to find.
-
-### Consider top-level definitions or reexports. [FIXME: needs RFC]
-
-For modules with submodules,
-define or [reexport](https://doc.rust-lang.org/std/io/#reexports) commonly used
-definitions at the top level:
-
-* Functionality relevant to the module itself or to many of its
- children should be defined in `mod.rs`.
-* Functionality specific to a submodule should live in that
- submodule. Reexport at the top level for the most important or
- common definitions.
-
-For example,
-[`IoError`](https://doc.rust-lang.org/std/io/struct.IoError.html)
-is defined in `io/mod.rs`, since it pertains to the entirety of `io`,
-while
-[`TcpStream`](https://doc.rust-lang.org/std/io/net/tcp/struct.TcpStream.html)
-is defined in `io/net/tcp.rs` and reexported in the `io` module.
-
-### Use internal module hierarchies for organization. [FIXME: needs RFC]
-
-> **[FIXME]**
-> - Referencing internal modules from the standard library is subject to
-> becoming outdated.
-
-Internal module hierarchies (i.e., private submodules) may be used to
-hide implementation details that are not part of the module's API.
-
-For example, in [`std::io`](https://doc.rust-lang.org/std/io/), `mod mem`
-provides implementations for
-[`BufReader`](https://doc.rust-lang.org/std/io/struct.BufReader.html)
-and
-[`BufWriter`](https://doc.rust-lang.org/std/io/struct.BufWriter.html),
-but these are re-exported in `io/mod.rs` at the top level of the module:
-
-```rust,ignore
-// libstd/io/mod.rs
-
-pub use self::mem::{MemReader, BufReader, MemWriter, BufWriter};
-/* ... */
-mod mem;
-```
-
-This hides the detail that there even exists a `mod mem` in `io`, and
-helps keep code organized while offering freedom to change the
-implementation.
+++ /dev/null
-% Traits
-
-Traits are probably Rust's most complex feature, supporting a wide range of use
-cases and design tradeoffs. Patterns of trait usage are still emerging.
-
-### Know whether a trait will be used as an object. [FIXME: needs RFC]
-
-Trait objects have some [significant limitations](objects.md): methods
-invoked through a trait object cannot use generics, and cannot use
-`Self` except in receiver position.
-
-When designing a trait, decide early on whether the trait will be used
-as an [object](objects.md) or as a [bound on generics](generics.md);
-the tradeoffs are discussed in each of the linked sections.
-
-If a trait is meant to be used as an object, its methods should take
-and return trait objects rather than use generics.
-
-
-### Default methods [FIXME]
-
-> **[FIXME]** Guidelines for default methods.
+++ /dev/null
-% Common traits
-
-### Eagerly implement common traits. [FIXME: needs RFC]
-
-Rust's trait system does not allow _orphans_: roughly, every `impl` must live
-either in the crate that defines the trait or the implementing
-type. Consequently, crates that define new types should eagerly implement all
-applicable, common traits.
-
-To see why, consider the following situation:
-
-* Crate `std` defines trait `Debug`.
-* Crate `url` defines type `Url`, without implementing `Debug`.
-* Crate `webapp` imports from both `std` and `url`,
-
-There is no way for `webapp` to add `Debug` to `url`, since it defines neither.
-(Note: the newtype pattern can provide an efficient, but inconvenient
-workaround; see [newtype for views](../types/newtype.md))
-
-The most important common traits to implement from `std` are:
-
-```text
-Clone, Debug, Hash, Eq
-```
-
-#### When safe, derive or otherwise implement `Send` and `Share`. [FIXME]
-
-> **[FIXME]**. This guideline is in flux while the "opt-in" nature of
-> built-in traits is being decided. See https://github.com/rust-lang/rfcs/pull/127
-
-### Prefer to derive, rather than implement. [FIXME: needs RFC]
-
-Deriving saves implementation effort, makes correctness trivial, and
-automatically adapts to upstream changes.
-
-### Do not overload operators in surprising ways. [FIXME: needs RFC]
-
-Operators with built in syntax (`*`, `|`, and so on) can be provided for a type
-by implementing the traits in `core::ops`. These operators come with strong
-expectations: implement `Mul` only for an operation that bears some resemblance
-to multiplication (and shares the expected properties, e.g. associativity), and
-so on for the other traits.
-
-### The `Drop` trait
-
-The `Drop` trait is treated specially by the compiler as a way of
-associating destructors with types. See
-[the section on destructors](../../ownership/destructors.md) for
-guidance.
-
-### The `Deref`/`DerefMut` traits
-
-#### Use `Deref`/`DerefMut` only for smart pointers. [FIXME: needs RFC]
-
-The `Deref` traits are used implicitly by the compiler in many circumstances,
-and interact with method resolution. The relevant rules are designed
-specifically to accommodate smart pointers, and so the traits should be used
-only for that purpose.
-
-#### Do not fail within a `Deref`/`DerefMut` implementation. [FIXME: needs RFC]
-
-Because the `Deref` traits are invoked implicitly by the compiler in sometimes
-subtle ways, failure during dereferencing can be extremely confusing. If a
-dereference might not succeed, target the `Deref` trait as a `Result` or
-`Option` type instead.
-
-#### Avoid inherent methods when implementing `Deref`/`DerefMut` [FIXME: needs RFC]
-
-The rules around method resolution and `Deref` are in flux, but inherent methods
-on a type implementing `Deref` are likely to shadow any methods of the referent
-with the same name.
+++ /dev/null
-% Using traits to add extension methods
-
-> **[FIXME]** Elaborate.
-
-### Consider using default methods rather than extension traits **[FIXME]**
-
-> **[FIXME]** Elaborate.
+++ /dev/null
-% Using traits for bounds on generics
-
-The most widespread use of traits is for writing generic functions or types. For
-example, the following signature describes a function for consuming any iterator
-yielding items of type `A` to produce a collection of `A`:
-
-```rust,ignore
-fn from_iter<T: Iterator<A>>(iterator: T) -> SomeCollection<A>
-```
-
-Here, the `Iterator` trait specifies an interface that a type `T` must
-explicitly implement to be used by this generic function.
-
-**Pros**:
-
-* _Reusability_. Generic functions can be applied to an open-ended collection of
- types, while giving a clear contract for the functionality those types must
- provide.
-* _Static dispatch and optimization_. Each use of a generic function is
- specialized ("monomorphized") to the particular types implementing the trait
- bounds, which means that (1) invocations of trait methods are static, direct
- calls to the implementation and (2) the compiler can inline and otherwise
- optimize these calls.
-* _Inline layout_. If a `struct` and `enum` type is generic over some type
- parameter `T`, values of type `T` will be laid out _inline_ in the
- `struct`/`enum`, without any indirection.
-* _Inference_. Since the type parameters to generic functions can usually be
- inferred, generic functions can help cut down on verbosity in code where
- explicit conversions or other method calls would usually be necessary. See the
- overloading/implicits use case below.
-* _Precise types_. Because generics give a _name_ to the specific type
- implementing a trait, it is possible to be precise about places where that
- exact type is required or produced. For example, a function
-
- ```rust,ignore
- fn binary<T: Trait>(x: T, y: T) -> T
- ```
-
- is guaranteed to consume and produce elements of exactly the same type `T`; it
- cannot be invoked with parameters of different types that both implement
- `Trait`.
-
-**Cons**:
-
-* _Code size_. Specializing generic functions means that the function body is
- duplicated. The increase in code size must be weighed against the performance
- benefits of static dispatch.
-* _Homogeneous types_. This is the other side of the "precise types" coin: if
- `T` is a type parameter, it stands for a _single_ actual type. So for example
- a `Vec<T>` contains elements of a single concrete type (and, indeed, the
- vector representation is specialized to lay these out in line). Sometimes
- heterogeneous collections are useful; see
- trait objects below.
-* _Signature verbosity_. Heavy use of generics can bloat function signatures.
- **[Ed. note]** This problem may be mitigated by some language improvements; stay tuned.
-
-### Favor widespread traits. **[FIXME: needs RFC]**
-
-Generic types are a form of abstraction, which entails a mental indirection: if
-a function takes an argument of type `T` bounded by `Trait`, clients must first
-think about the concrete types that implement `Trait` to understand how and when
-the function is callable.
-
-To keep the cost of abstraction low, favor widely-known traits. Whenever
-possible, implement and use traits provided as part of the standard library. Do
-not introduce new traits for generics lightly; wait until there are a wide range
-of types that can implement the type.
+++ /dev/null
-% Using trait objects
-
-> **[FIXME]** What are uses of trait objects other than heterogeneous collections?
-
-Trait objects are useful primarily when _heterogeneous_ collections of objects
-need to be treated uniformly; it is the closest that Rust comes to
-object-oriented programming.
-
-```rust,ignore
-struct Frame { ... }
-struct Button { ... }
-struct Label { ... }
-
-trait Widget { ... }
-
-impl Widget for Frame { ... }
-impl Widget for Button { ... }
-impl Widget for Label { ... }
-
-impl Frame {
- fn new(contents: &[Box<Widget>]) -> Frame {
- ...
- }
-}
-
-fn make_gui() -> Box<Widget> {
- let b: Box<Widget> = box Button::new(...);
- let l: Box<Widget> = box Label::new(...);
-
- box Frame::new([b, l]) as Box<Widget>
-}
-```
-
-By using trait objects, we can set up a GUI framework with a `Frame` widget that
-contains a heterogeneous collection of children widgets.
-
-**Pros**:
-
-* _Heterogeneity_. When you need it, you really need it.
-* _Code size_. Unlike generics, trait objects do not generate specialized
- (monomorphized) versions of code, which can greatly reduce code size.
-
-**Cons**:
-
-* _No generic methods_. Trait objects cannot currently provide generic methods.
-* _Dynamic dispatch and fat pointers_. Trait objects inherently involve
- indirection and vtable dispatch, which can carry a performance penalty.
-* _No Self_. Except for the method receiver argument, methods on trait objects
- cannot use the `Self` type.
+++ /dev/null
-% Using traits for overloading
-
-> **[FIXME]** Elaborate.
-
-> **[FIXME]** We need to decide on guidelines for this use case. There are a few
-> patterns emerging in current Rust code, but it's not clear how widespread they
-> should be.
+++ /dev/null
-% Using traits to share implementations
-
-> **[FIXME]** Elaborate.
-
-> **[FIXME]** We probably want to discourage this, at least when used in a way
-> that is publicly exposed.
-
-Traits that provide default implementations for function can provide code reuse
-across types. For example, a `print` method can be defined across multiple
-types as follows:
-
-``` Rust
-trait Printable {
- // Default method implementation
- fn print(&self) { println!("{:?}", *self) }
-}
-
-impl Printable for i32 {}
-
-impl Printable for String {
- fn print(&self) { println!("{}", *self) }
-}
-
-impl Printable for bool {}
-
-impl Printable for f32 {}
-```
-
-This allows the implementation of `print` to be shared across types, yet
-overridden where needed, as seen in the `impl` for `String`.
+++ /dev/null
-% Data types
-
-### Use custom types to imbue meaning; do not abuse `bool`, `Option` or other core types. **[FIXME: needs RFC]**
-
-Prefer
-
-```rust,ignore
-let w = Widget::new(Small, Round)
-```
-
-over
-
-```rust,ignore
-let w = Widget::new(true, false)
-```
-
-Core types like `bool`, `u8` and `Option` have many possible interpretations.
-
-Use custom types (whether `enum`s, `struct`, or tuples) to convey
-interpretation and invariants. In the above example,
-it is not immediately clear what `true` and `false` are conveying without
-looking up the argument names, but `Small` and `Round` are more suggestive.
-
-Using custom types makes it easier to expand the
-options later on, for example by adding an `ExtraLarge` variant.
-
-See [the newtype pattern](newtype.md) for a no-cost way to wrap
-existing types with a distinguished name.
-
-### Prefer private fields, except for passive data. **[FIXME: needs RFC]**
-
-Making a field public is a strong commitment: it pins down a representation
-choice, _and_ prevents the type from providing any validation or maintaining any
-invariants on the contents of the field, since clients can mutate it arbitrarily.
-
-Public fields are most appropriate for `struct` types in the C spirit: compound,
-passive data structures. Otherwise, consider providing getter/setter methods
-and hiding fields instead.
-
-> **[FIXME]** Cross-reference validation for function arguments.
-
-### Use custom `enum`s for alternatives, `bitflags` for C-style flags. **[FIXME: needs RFC]**
-
-Rust supports `enum` types with "custom discriminants":
-
-~~~~
-enum Color {
- Red = 0xff0000,
- Green = 0x00ff00,
- Blue = 0x0000ff
-}
-~~~~
-
-Custom discriminants are useful when an `enum` type needs to be serialized to an
-integer value compatibly with some other system/language. They support
-"typesafe" APIs: by taking a `Color`, rather than an integer, a function is
-guaranteed to get well-formed inputs, even if it later views those inputs as
-integers.
-
-An `enum` allows an API to request exactly one choice from among many. Sometimes
-an API's input is instead the presence or absence of a set of flags. In C code,
-this is often done by having each flag correspond to a particular bit, allowing
-a single integer to represent, say, 32 or 64 flags. Rust's `std::bitflags`
-module provides a typesafe way for doing so.
-
-### Phantom types. [FIXME]
-
-> **[FIXME]** Add some material on phantom types (https://blog.mozilla.org/research/2014/06/23/static-checking-of-units-in-servo/)
+++ /dev/null
-% Conversions between types
-
-### Associate conversions with the most specific type involved. **[FIXME: needs RFC]**
-
-When in doubt, prefer `to_`/`as_`/`into_` to `from_`, because they are
-more ergonomic to use (and can be chained with other methods).
-
-For many conversions between two types, one of the types is clearly more
-"specific": it provides some additional invariant or interpretation that is not
-present in the other type. For example, `str` is more specific than `&[u8]`,
-since it is a utf-8 encoded sequence of bytes.
-
-Conversions should live with the more specific of the involved types. Thus,
-`str` provides both the `as_bytes` method and the `from_utf8` constructor for
-converting to and from `&[u8]` values. Besides being intuitive, this convention
-avoids polluting concrete types like `&[u8]` with endless conversion methods.
-
-### Explicitly mark lossy conversions, or do not label them as conversions. **[FIXME: needs RFC]**
-
-If a function's name implies that it is a conversion (prefix `from_`, `as_`,
-`to_` or `into_`), but the function loses information, add a suffix `_lossy` or
-otherwise indicate the lossyness. Consider avoiding the conversion name prefix.
+++ /dev/null
-% The newtype pattern
-
-A "newtype" is a tuple or `struct` with a single field. The terminology is borrowed from Haskell.
-
-Newtypes are a zero-cost abstraction: they introduce a new, distinct name for an
-existing type, with no runtime overhead when converting between the two types.
-
-### Use newtypes to provide static distinctions. [FIXME: needs RFC]
-
-Newtypes can statically distinguish between different interpretations of an
-underlying type.
-
-For example, a `f64` value might be used to represent a quantity in miles or in
-kilometers. Using newtypes, we can keep track of the intended interpretation:
-
-```rust,ignore
-struct Miles(pub f64);
-struct Kilometers(pub f64);
-
-impl Miles {
- fn as_kilometers(&self) -> Kilometers { ... }
-}
-impl Kilometers {
- fn as_miles(&self) -> Miles { ... }
-}
-```
-
-Once we have separated these two types, we can statically ensure that we do not
-confuse them. For example, the function
-
-```rust,ignore
-fn are_we_there_yet(distance_travelled: Miles) -> bool { ... }
-```
-
-cannot accidentally be called with a `Kilometers` value. The compiler will
-remind us to perform the conversion, thus averting certain
-[catastrophic bugs](http://en.wikipedia.org/wiki/Mars_Climate_Orbiter).
-
-### Use newtypes with private fields for hiding. [FIXME: needs RFC]
-
-A newtype can be used to hide representation details while making precise
-promises to the client.
-
-For example, consider a function `my_transform` that returns a compound iterator
-type `Enumerate<Skip<vec::MoveItems<T>>>`. We wish to hide this type from the
-client, so that the client's view of the return type is roughly `Iterator<(usize,
-T)>`. We can do so using the newtype pattern:
-
-```rust,ignore
-struct MyTransformResult<T>(Enumerate<Skip<vec::MoveItems<T>>>);
-impl<T> Iterator<(usize, T)> for MyTransformResult<T> { ... }
-
-fn my_transform<T, Iter: Iterator<T>>(iter: Iter) -> MyTransformResult<T> {
- ...
-}
-```
-
-Aside from simplifying the signature, this use of newtypes allows us to make a
-expose and promise less to the client. The client does not know _how_ the result
-iterator is constructed or represented, which means the representation can
-change in the future without breaking client code.
-
-> **[FIXME]** Interaction with auto-deref.
-
-### Use newtypes to provide cost-free _views_ of another type. **[FIXME]**
-
-> **[FIXME]** Describe the pattern of using newtypes to provide a new set of
-> inherent or trait methods, providing a different perspective on the underlying
-> type.
+++ /dev/null
-% Ownership and resource management
-
-> **[FIXME]** Add general remarks about ownership/resources here.
+++ /dev/null
-% The builder pattern
-
-Some data structures are complicated to construct, due to their construction needing:
-
-* a large number of inputs
-* compound data (e.g. slices)
-* optional configuration data
-* choice between several flavors
-
-which can easily lead to a large number of distinct constructors with
-many arguments each.
-
-If `T` is such a data structure, consider introducing a `T` _builder_:
-
-1. Introduce a separate data type `TBuilder` for incrementally configuring a `T`
- value. When possible, choose a better name: e.g. `Command` is the builder for
- `Process`.
-2. The builder constructor should take as parameters only the data _required_ to
- make a `T`.
-3. The builder should offer a suite of convenient methods for configuration,
- including setting up compound inputs (like slices) incrementally.
- These methods should return `self` to allow chaining.
-4. The builder should provide one or more "_terminal_" methods for actually building a `T`.
-
-The builder pattern is especially appropriate when building a `T` involves side
-effects, such as spawning a thread or launching a process.
-
-In Rust, there are two variants of the builder pattern, differing in the
-treatment of ownership, as described below.
-
-### Non-consuming builders (preferred):
-
-In some cases, constructing the final `T` does not require the builder itself to
-be consumed. The follow variant on
-[`std::process::Command`](https://doc.rust-lang.org/stable/std/process/struct.Command.html)
-is one example:
-
-```rust,ignore
-// NOTE: the actual Command API does not use owned Strings;
-// this is a simplified version.
-
-pub struct Command {
- program: String,
- args: Vec<String>,
- cwd: Option<String>,
- // etc
-}
-
-impl Command {
- pub fn new(program: String) -> Command {
- Command {
- program: program,
- args: Vec::new(),
- cwd: None,
- }
- }
-
- /// Add an argument to pass to the program.
- pub fn arg<'a>(&'a mut self, arg: String) -> &'a mut Command {
- self.args.push(arg);
- self
- }
-
- /// Add multiple arguments to pass to the program.
- pub fn args<'a>(&'a mut self, args: &[String])
- -> &'a mut Command {
- self.args.push_all(args);
- self
- }
-
- /// Set the working directory for the child process.
- pub fn cwd<'a>(&'a mut self, dir: String) -> &'a mut Command {
- self.cwd = Some(dir);
- self
- }
-
- /// Executes the command as a child process, which is returned.
- pub fn spawn(&self) -> std::io::Result<Process> {
- ...
- }
-}
-```
-
-Note that the `spawn` method, which actually uses the builder configuration to
-spawn a process, takes the builder by immutable reference. This is possible
-because spawning the process does not require ownership of the configuration
-data.
-
-Because the terminal `spawn` method only needs a reference, the configuration
-methods take and return a mutable borrow of `self`.
-
-#### The benefit
-
-By using borrows throughout, `Command` can be used conveniently for both
-one-liner and more complex constructions:
-
-```rust,ignore
-// One-liners
-Command::new("/bin/cat").arg("file.txt").spawn();
-
-// Complex configuration
-let mut cmd = Command::new("/bin/ls");
-cmd.arg(".");
-
-if size_sorted {
- cmd.arg("-S");
-}
-
-cmd.spawn();
-```
-
-### Consuming builders:
-
-Sometimes builders must transfer ownership when constructing the final type
-`T`, meaning that the terminal methods must take `self` rather than `&self`:
-
-```rust,ignore
-// A simplified excerpt from std::thread::Builder
-
-impl ThreadBuilder {
- /// Name the thread-to-be. Currently the name is used for identification
- /// only in failure messages.
- pub fn named(mut self, name: String) -> ThreadBuilder {
- self.name = Some(name);
- self
- }
-
- /// Redirect thread-local stdout.
- pub fn stdout(mut self, stdout: Box<Writer + Send>) -> ThreadBuilder {
- self.stdout = Some(stdout);
- // ^~~~~~ this is owned and cannot be cloned/re-used
- self
- }
-
- /// Creates and executes a new child thread.
- pub fn spawn(self, f: proc():Send) {
- // consume self
- ...
- }
-}
-```
-
-Here, the `stdout` configuration involves passing ownership of a `Writer`,
-which must be transferred to the thread upon construction (in `spawn`).
-
-When the terminal methods of the builder require ownership, there is a basic tradeoff:
-
-* If the other builder methods take/return a mutable borrow, the complex
- configuration case will work well, but one-liner configuration becomes
- _impossible_.
-
-* If the other builder methods take/return an owned `self`, one-liners
- continue to work well but complex configuration is less convenient.
-
-Under the rubric of making easy things easy and hard things possible, _all_
-builder methods for a consuming builder should take and returned an owned
-`self`. Then client code works as follows:
-
-```rust,ignore
-// One-liners
-ThreadBuilder::new().named("my_thread").spawn(proc() { ... });
-
-// Complex configuration
-let mut thread = ThreadBuilder::new();
-thread = thread.named("my_thread_2"); // must re-assign to retain ownership
-
-if reroute {
- thread = thread.stdout(mywriter);
-}
-
-thread.spawn(proc() { ... });
-```
-
-One-liners work as before, because ownership is threaded through each of the
-builder methods until being consumed by `spawn`. Complex configuration,
-however, is more verbose: it requires re-assigning the builder at each step.
+++ /dev/null
-% Cells and smart pointers
-
-> **[FIXME]** Add guidelines about when to use Cell, RefCell, Rc and
-> Arc (and how to use them together).
+++ /dev/null
-% Constructors
-
-### Define constructors as static, inherent methods. [FIXME: needs RFC]
-
-In Rust, "constructors" are just a convention:
-
-```rust,ignore
-impl<T> Vec<T> {
- pub fn new() -> Vec<T> { ... }
-}
-```
-
-Constructors are static (no `self`) inherent methods for the type that they
-construct. Combined with the practice of
-[fully importing type names](../style/imports.md), this convention leads to
-informative but concise construction:
-
-```rust,ignore
-use vec::Vec;
-
-// construct a new vector
-let mut v = Vec::new();
-```
-
-This convention also applied to conversion constructors (prefix `from` rather
-than `new`).
-
-### Provide constructors for passive `struct`s with defaults. [FIXME: needs RFC]
-
-Given the `struct`
-
-```rust,ignore
-pub struct Config {
- pub color: Color,
- pub size: Size,
- pub shape: Shape,
-}
-```
-
-provide a constructor if there are sensible defaults:
-
-```rust,ignore
-impl Config {
- pub fn new() -> Config {
- Config {
- color: Brown,
- size: Medium,
- shape: Square,
- }
- }
-}
-```
-
-which then allows clients to concisely override using `struct` update syntax:
-
-```rust,ignore
-Config { color: Red, .. Config::new() };
-```
-
-See the [guideline for field privacy](../features/types/README.md) for
-discussion on when to create such "passive" `struct`s with public
-fields.
+++ /dev/null
-% Destructors
-
-Unlike constructors, destructors in Rust have a special status: they are added
-by implementing `Drop` for a type, and they are automatically invoked as values
-go out of scope.
-
-> **[FIXME]** This section needs to be expanded.
-
-### Destructors should not fail. [FIXME: needs RFC]
-
-Destructors are executed on thread failure, and in that context a failing
-destructor causes the program to abort.
-
-Instead of failing in a destructor, provide a separate method for checking for
-clean teardown, e.g. a `close` method, that returns a `Result` to signal
-problems.
-
-### Destructors should not block. [FIXME: needs RFC]
-
-Similarly, destructors should not invoke blocking operations, which can make
-debugging much more difficult. Again, consider providing a separate method for
-preparing for an infallible, nonblocking teardown.
+++ /dev/null
-% RAII
-
-Resource Acquisition is Initialization
-
-> **[FIXME]** Explain the RAII pattern and give best practices.
-
-### Whenever possible, tie resource access to guard scopes [FIXME]
-
-> **[FIXME]** Example: Mutex guards guarantee that access to the
-> protected resource only happens when the guard is in scope.
-
-`must_use`
+++ /dev/null
-% FFI and platform-specific code **[FIXME]**
-
-> **[FIXME]** Not sure where this should live.
-
-When writing cross-platform code, group platform-specific code into a
-module called `platform`. Avoid `#[cfg]` directives outside this
-`platform` module.
+++ /dev/null
-% Safety and guarantees
-
-> **[FIXME]** Is there a better phrase than "strong guarantees" that encompasses
-> both e.g. memory safety and e.g. data structure invariants?
-
-A _guarantee_ is a property that holds no matter what client code does, unless
-the client explicitly opts out:
-
-* Rust guarantees memory safety and data-race freedom, with `unsafe`
- blocks as an opt-out mechanism.
-
-* APIs in Rust often provide their own guarantees. For example, `std::str`
-guarantees that its underlying buffer is valid utf-8. The `std::path::Path` type
-guarantees no interior nulls. Both strings and paths provide `unsafe` mechanisms
-for opting out of these guarantees (and thereby avoiding runtime checks).
-
-Thinking about guarantees is an essential part of writing good Rust code. The
-rest of this subsection outlines some cross-cutting principles around
-guarantees.
+++ /dev/null
-% Library-level guarantees
-
-Most libraries rely on internal invariants, e.g. about their data, resource
-ownership, or protocol states. In Rust, broken invariants cannot produce
-segfaults, but they can still lead to wrong answers.
-
-### Provide library-level guarantees whenever practical. **[FIXME: needs RFC]**
-
-Library-level invariants should be turned into guarantees whenever
-practical. They should hold no matter what the client does, modulo
-explicit opt-outs. Depending on the kind of invariant, this can be
-achieved through a combination of static and dynamic enforcement, as
-described below.
-
-#### Static enforcement:
-
-Guaranteeing invariants almost always requires _hiding_,
-i.e. preventing the client from directly accessing or modifying
-internal data.
-
-For example, the representation of the `str` type is hidden,
-which means that any value of type `str` must have been produced
-through an API under the control of the `str` module, and these
-APIs in turn ensure valid utf-8 encoding.
-
-Rust's type system makes it possible to provide guarantees even while
-revealing more of the representation than usual. For example, the
-`as_bytes()` method on `&str` gives a _read-only_ view into the
-underlying buffer, which cannot be used to violate the utf-8 property.
-
-#### Dynamic enforcement:
-
-Malformed inputs from the client are hazards to library-level
-guarantees, so library APIs should validate their input.
-
-For example, `std::str::from_utf8_owned` attempts to convert a `u8`
-slice into an owned string, but dynamically checks that the slice is
-valid utf-8 and returns `Err` if not.
-
-See
-[the discussion on input validation](../features/functions-and-methods/input.md)
-for more detail.
-
-
-### Prefer static enforcement of guarantees. **[FIXME: needs RFC]**
-
-Static enforcement provides two strong benefits over dynamic enforcement:
-
-* Bugs are caught at compile time.
-* There is no runtime cost.
-
-Sometimes purely static enforcement is impossible or impractical. In these
-cases, a library should check as much as possible statically, but defer to
-dynamic checks where needed.
-
-For example, the `std::string` module exports a `String` type with the guarantee
-that all instances are valid utf-8:
-
-* Any _consumer_ of a `String` is statically guaranteed utf-8 contents. For example,
- the `append` method can push a `&str` onto the end of a `String` without
- checking anything dynamically, since the existing `String` and `&str` are
- statically guaranteed to be in utf-8.
-
-* Some _producers_ of a `String` must perform dynamic checks. For example, the
- `from_utf8` function attempts to convert a `Vec<u8>` into a `String`, but
- dynamically checks that the contents are utf-8.
-
-### Provide opt-outs with caution; make them explicit. **[FIXME: needs RFC]**
-
-Providing library-level guarantees sometimes entails inconvenience (for static
-checks) or overhead (for dynamic checks). So it is sometimes desirable to allow
-clients to sidestep this checking, while promising to use the API in a way that
-still provides the guarantee. Such escape hatches should only be introduced when
-there is a demonstrated need for them.
-
-It should be trivial for clients to audit their use of the library for
-escape hatches.
-
-See
-[the discussion on input validation](../features/functions-and-methods/input.md)
-for conventions on marking opt-out functions.
+++ /dev/null
-% Using `unsafe`
-
-### Unconditionally guarantee safety, or mark API as `unsafe`. **[FIXME: needs RFC]**
-
-Memory safety, type safety, and data race freedom are basic assumptions for all
-Rust code.
-
-APIs that use `unsafe` blocks internally thus have two choices:
-
-* They can guarantee safety _unconditionally_ (i.e., regardless of client
- behavior or inputs) and be exported as safe code. Any safety violation is then
- the library's fault, not the client's fault.
-
-* They can export potentially unsafe functions with the `unsafe` qualifier. In
- this case, the documentation should make very clear the conditions under which
- safety is guaranteed.
-
-The result is that a client program can never violate safety merely by having a
-bug; it must have explicitly opted out by using an `unsafe` block.
-
-Of the two options for using `unsafe`, creating such safe abstractions (the
-first option above) is strongly preferred.
+++ /dev/null
-% Style
-
-This section gives a set of strict rules for styling Rust code.
-
-> **[FIXME]** General remarks about the style guidelines
+++ /dev/null
-% Braces, semicolons, and commas [FIXME: needs RFC]
-
-### Opening braces always go on the same line.
-
-```rust,ignore
-fn foo() {
- ...
-}
-
-fn frobnicate(a: Bar, b: Bar,
- c: Bar, d: Bar)
- -> Bar {
- ...
-}
-
-trait Bar {
- fn baz(&self);
-}
-
-impl Bar for Baz {
- fn baz(&self) {
- ...
- }
-}
-
-frob(|x| {
- x.transpose()
-})
-```
-
-### `match` arms get braces, except for single-line expressions.
-
-```rust,ignore
-match foo {
- bar => baz,
- quux => {
- do_something();
- do_something_else()
- }
-}
-```
-
-### `return` statements get semicolons.
-
-```rust,ignore
-fn foo() {
- do_something();
-
- if condition() {
- return;
- }
-
- do_something_else();
-}
-```
-
-### Trailing commas
-
-> **[FIXME]** We should have a guideline for when to include trailing
-> commas in `struct`s, `match`es, function calls, etc.
->
-> One possible rule: a trailing comma should be included whenever the
-> closing delimiter appears on a separate line:
-
-```rust,ignore
-Foo { bar: 0, baz: 1 }
-
-Foo {
- bar: 0,
- baz: 1,
-}
-
-match a_thing {
- None => 0,
- Some(x) => 1,
-}
-```
+++ /dev/null
-% Comments [RFC #505]
-
-### Avoid block comments.
-
-Use line comments:
-
-```rust
-// Wait for the main thread to return, and set the process error code
-// appropriately.
-```
-
-Instead of:
-
-``` rust
-/*
- * Wait for the main thread to return, and set the process error code
- * appropriately.
- */
-```
-
-## Doc comments
-
-Doc comments are prefixed by three slashes (`///`) and indicate
-documentation that you would like to be included in Rustdoc's output.
-They support
-[Markdown syntax](https://en.wikipedia.org/wiki/Markdown)
-and are the main way of documenting your public APIs.
-
-The supported markdown syntax includes all of the extensions listed in the
-[GitHub Flavored Markdown]
-(https://help.github.com/articles/github-flavored-markdown) documentation,
-plus superscripts.
-
-### Summary line
-
-The first line in any doc comment should be a single-line short sentence
-providing a summary of the code. This line is used as a short summary
-description throughout Rustdoc's output, so it's a good idea to keep it
-short.
-
-### Sentence structure
-
-All doc comments, including the summary line, should begin with a
-capital letter and end with a period, question mark, or exclamation
-point. Prefer full sentences to fragments.
-
-The summary line should be written in
-[third person singular present indicative form]
-(http://en.wikipedia.org/wiki/English_verbs#Third_person_singular_present).
-Basically, this means write "Returns" instead of "Return".
-
-For example:
-
-```rust,ignore
-/// Sets up a default runtime configuration, given compiler-supplied arguments.
-///
-/// This function will block until the entire pool of M:N schedulers has
-/// exited. This function also requires a local thread to be available.
-///
-/// # Arguments
-///
-/// * `argc` & `argv` - The argument vector. On Unix this information is used
-/// by `os::args`.
-/// * `main` - The initial procedure to run inside of the M:N scheduling pool.
-/// Once this procedure exits, the scheduling pool will begin to shut
-/// down. The entire pool (and this function) will only return once
-/// all child threads have finished executing.
-///
-/// # Return value
-///
-/// The return value is used as the process return code. 0 on success, 101 on
-/// error.
-```
-
-### Code snippets
-
-Only use inner doc comments `//!` to write crate and module-level documentation,
-nothing else. When using `mod` blocks, prefer `///` outside of the block:
-
-```rust
-/// This module contains tests
-mod test {
- // ...
-}
-```
-
-over
-
-```rust
-mod test {
- //! This module contains tests
-
- // ...
-}
-```
-
-### Avoid inner doc comments.
-
-Use inner doc comments _only_ to document crates and file-level modules:
-
-```rust,ignore
-//! The core library.
-//!
-//! The core library is a something something...
-```
-
-### Explain context.
-
-Rust doesn't have special constructors, only functions that return new
-instances. These aren't visible in the automatically generated documentation
-for a type, so you should specifically link to them:
-
-```rust,ignore
-/// An iterator that yields `None` forever after the underlying iterator
-/// yields `None` once.
-///
-/// These can be created through
-/// [`iter.fuse()`](trait.Iterator.html#method.fuse).
-pub struct Fuse<I> {
- // ...
-}
-```
+++ /dev/null
-## `return` [RFC #968]
-
-Terminate `return` statements with semicolons:
-
-``` rust,ignore
-fn foo(bar: i32) -> Option<i32> {
- if some_condition() {
- return None;
- }
-
- ...
-}
-```
+++ /dev/null
-% Imports [FIXME: needs RFC]
-
-The imports of a crate/module should consist of the following
-sections, in order, with a blank space between each:
-
-* `extern crate` directives
-* external `use` imports
-* local `use` imports
-* `pub use` imports
-
-For example:
-
-```rust,ignore
-// Crates.
-extern crate getopts;
-extern crate mylib;
-
-// Standard library imports.
-use getopts::{optopt, getopts};
-use std::os;
-
-// Import from a library that we wrote.
-use mylib::webserver;
-
-// Will be reexported when we import this module.
-pub use self::types::Webdata;
-```
-
-### Avoid `use *`, except in tests.
-
-Glob imports have several downsides:
-* They make it harder to tell where names are bound.
-* They are forwards-incompatible, since new upstream exports can clash
- with existing names.
-
-When writing a [`test` submodule](../testing/README.md), importing `super::*` is appropriate
-as a convenience.
-
-### Prefer fully importing types/traits while module-qualifying functions.
-
-For example:
-
-```rust,ignore
-use option::Option;
-use mem;
-
-let i: isize = mem::transmute(Option(0));
-```
-
-> **[FIXME]** Add rationale.
+++ /dev/null
-% Naming conventions
-
-### General conventions [RFC #430]
-
-> The guidelines below were approved by [RFC #430](https://github.com/rust-lang/rfcs/pull/430).
-
-In general, Rust tends to use `CamelCase` for "type-level" constructs
-(types and traits) and `snake_case` for "value-level" constructs. More
-precisely:
-
-| Item | Convention |
-| ---- | ---------- |
-| Crates | `snake_case` (but prefer single word) |
-| Modules | `snake_case` |
-| Types | `CamelCase` |
-| Traits | `CamelCase` |
-| Enum variants | `CamelCase` |
-| Functions | `snake_case` |
-| Methods | `snake_case` |
-| General constructors | `new` or `with_more_details` |
-| Conversion constructors | `from_some_other_type` |
-| Local variables | `snake_case` |
-| Static variables | `SCREAMING_SNAKE_CASE` |
-| Constant variables | `SCREAMING_SNAKE_CASE` |
-| Type parameters | concise `CamelCase`, usually single uppercase letter: `T` |
-| Lifetimes | short, lowercase: `'a` |
-
-<p>
-In `CamelCase`, acronyms count as one word: use `Uuid` rather than
-`UUID`. In `snake_case`, acronyms are lower-cased: `is_xid_start`.
-
-In `snake_case` or `SCREAMING_SNAKE_CASE`, a "word" should never
-consist of a single letter unless it is the last "word". So, we have
-`btree_map` rather than `b_tree_map`, but `PI_2` rather than `PI2`.
-
-### Referring to types in function/method names [RFC 344]
-
-> The guidelines below were approved by [RFC #344](https://github.com/rust-lang/rfcs/pull/344).
-
-Function names often involve type names, the most common example being conversions
-like `as_slice`. If the type has a purely textual name (ignoring parameters), it
-is straightforward to convert between type conventions and function conventions:
-
-Type name | Text in methods
---------- | ---------------
-`String` | `string`
-`Vec<T>` | `vec`
-`YourType`| `your_type`
-
-Types that involve notation follow the convention below. There is some
-overlap on these rules; apply the most specific applicable rule:
-
-Type name | Text in methods
---------- | ---------------
-`&str` | `str`
-`&[T]` | `slice`
-`&mut [T]`| `mut_slice`
-`&[u8]` | `bytes`
-`&T` | `ref`
-`&mut T` | `mut`
-`*const T`| `ptr`
-`*mut T` | `mut_ptr`
-
-### Avoid redundant prefixes [RFC 356]
-
-> The guidelines below were approved by [RFC #356](https://github.com/rust-lang/rfcs/pull/356).
-
-Names of items within a module should not be prefixed with that module's name:
-
-Prefer
-
-```rust,ignore
-mod foo {
- pub struct Error { ... }
-}
-```
-
-over
-
-```rust,ignore
-mod foo {
- pub struct FooError { ... }
-}
-```
-
-This convention avoids stuttering (like `io::IoError`). Library clients can
-rename on import to avoid clashes.
-
-### Getter/setter methods [RFC 344]
-
-> The guidelines below were approved by [RFC #344](https://github.com/rust-lang/rfcs/pull/344).
-
-Some data structures do not wish to provide direct access to their fields, but
-instead offer "getter" and "setter" methods for manipulating the field state
-(often providing checking or other functionality).
-
-The convention for a field `foo: T` is:
-
-* A method `foo(&self) -> &T` for getting the current value of the field.
-* A method `set_foo(&self, val: T)` for setting the field. (The `val` argument
- here may take `&T` or some other type, depending on the context.)
-
-Note that this convention is about getters/setters on ordinary data types, *not*
-on [builder objects](../../ownership/builders.html).
-
-### Escape hatches [FIXME]
-
-> **[FIXME]** Should we standardize a convention for functions that may break API
-> guarantees? e.g. `ToCStr::to_c_str_unchecked`
-
-### Predicates
-
-* Simple boolean predicates should be prefixed with `is_` or another
- short question word, e.g., `is_empty`.
-* Common exceptions: `lt`, `gt`, and other established predicate names.
+++ /dev/null
-% Common container/wrapper methods [FIXME: needs RFC]
-
-Containers, wrappers, and cells all provide ways to access the data
-they enclose. Accessor methods often have variants to access the data
-by value, by reference, and by mutable reference.
-
-In general, the `get` family of methods is used to access contained
-data without any risk of thread failure; they return `Option` as
-appropriate. This name is chosen rather than names like `find` or
-`lookup` because it is appropriate for a wider range of container types.
-
-#### Containers
-
-For a container with keys/indexes of type `K` and elements of type `V`:
-
-```rust,ignore
-// Look up element without failing
-fn get(&self, key: K) -> Option<&V>
-fn get_mut(&mut self, key: K) -> Option<&mut V>
-
-// Convenience for .get(key).map(|elt| elt.clone())
-fn get_clone(&self, key: K) -> Option<V>
-
-// Lookup element, failing if it is not found:
-impl Index<K, V> for Container { ... }
-impl IndexMut<K, V> for Container { ... }
-```
-
-#### Wrappers/Cells
-
-Prefer specific conversion functions like `as_bytes` or `into_vec` whenever
-possible. Otherwise, use:
-
-```rust,ignore
-// Extract contents without failing
-fn get(&self) -> &V
-fn get_mut(&mut self) -> &mut V
-fn unwrap(self) -> V
-```
-
-#### Wrappers/Cells around `Copy` data
-
-```rust,ignore
-// Extract contents without failing
-fn get(&self) -> V
-```
-
-#### `Option`-like types
-
-Finally, we have the cases of types like `Option` and `Result`, which
-play a special role for failure.
-
-For `Option<V>`:
-
-```rust,ignore
-// Extract contents or fail if not available
-fn assert(self) -> V
-fn expect(self, &str) -> V
-```
-
-For `Result<V, E>`:
-
-```rust,ignore
-// Extract the contents of Ok variant; fail if Err
-fn assert(self) -> V
-
-// Extract the contents of Err variant; fail if Ok
-fn assert_err(self) -> E
-```
+++ /dev/null
-% Conversions [Rust issue #7087]
-
-> The guidelines below were approved by [rust issue #7087](https://github.com/rust-lang/rust/issues/7087).
-
-> **[FIXME]** Should we provide standard traits for conversions? Doing
-> so nicely will require
-> [trait reform](https://github.com/rust-lang/rfcs/pull/48) to land.
-
-Conversions should be provided as methods, with names prefixed as follows:
-
-| Prefix | Cost | Consumes convertee |
-| ------ | ---- | ------------------ |
-| `as_` | Free | No |
-| `to_` | Expensive | No |
-| `into_` | Variable | Yes |
-
-<p>
-For example:
-
-* `as_bytes()` gives a `&[u8]` view into a `&str`, which is a no-op.
-* `to_owned()` copies a `&str` to a new `String`.
-* `into_bytes()` consumes a `String` and yields the underlying
- `Vec<u8>`, which is a no-op.
-
-Conversions prefixed `as_` and `into_` typically _decrease abstraction_, either
-exposing a view into the underlying representation (`as`) or deconstructing data
-into its underlying representation (`into`). Conversions prefixed `to_`, on the
-other hand, typically stay at the same level of abstraction but do some work to
-change one representation into another.
-
-> **[FIXME]** The distinctions between conversion methods does not work
-> so well for `from_` conversion constructors. Is that a problem?
+++ /dev/null
-% Iterators
-
-#### Method names [RFC #199]
-
-> The guidelines below were approved by [RFC #199](https://github.com/rust-lang/rfcs/pull/199).
-
-For a container with elements of type `U`, iterator methods should be named:
-
-```rust,ignore
-fn iter(&self) -> T // where T implements Iterator<&U>
-fn iter_mut(&mut self) -> T // where T implements Iterator<&mut U>
-fn into_iter(self) -> T // where T implements Iterator<U>
-```
-
-The default iterator variant yields shared references `&U`.
-
-#### Type names [RFC #344]
-
-> The guidelines below were approved by [RFC #344](https://github.com/rust-lang/rfcs/pull/344).
-
-The name of an iterator type should be the same as the method that
-produces the iterator.
-
-For example:
-
-* `iter` should yield an `Iter`
-* `iter_mut` should yield an `IterMut`
-* `into_iter` should yield an `IntoIter`
-* `keys` should yield `Keys`
-
-These type names make the most sense when prefixed with their owning module,
-e.g. `vec::IntoIter`.
+++ /dev/null
-% Ownership variants [RFC #199]
-
-> The guidelines below were approved by [RFC #199](https://github.com/rust-lang/rfcs/pull/199).
-
-Functions often come in multiple variants: immutably borrowed, mutably
-borrowed, and owned.
-
-The right default depends on the function in question. Variants should
-be marked through suffixes.
-
-#### Immutably borrowed by default
-
-If `foo` uses/produces an immutable borrow by default, use:
-
-* The `_mut` suffix (e.g. `foo_mut`) for the mutably borrowed variant.
-* The `_move` suffix (e.g. `foo_move`) for the owned variant.
-
-#### Owned by default
-
-If `foo` uses/produces owned data by default, use:
-
-* The `_ref` suffix (e.g. `foo_ref`) for the immutably borrowed variant.
-* The `_mut` suffix (e.g. `foo_mut`) for the mutably borrowed variant.
-
-#### Exceptions
-
-In the case of iterators, the moving variant can also be understood as
-an `into` conversion, `into_iter`, and `for x in v.into_iter()` reads
-arguably better than `for x in v.iter_move()`, so the convention is
-`into_iter`.
-
-For mutably borrowed variants, if the `mut` qualifier is part of a
-type name (e.g. `as_mut_slice`), it should appear as it would appear
-in the type.
+++ /dev/null
-% Organization [FIXME: needs RFC]
-
-> **[FIXME]** What else?
-
-### Reexport the most important types at the crate level.
-
-Crates `pub use` the most common types for convenience, so that clients do not
-have to remember or write the crate's module hierarchy to use these types.
-
-### Define types and operations together.
-
-Type definitions and the functions/methods that operate on them should be
-defined together in a single module, with the type appearing above the
-functions/methods.
+++ /dev/null
-% Whitespace [FIXME: needs RFC]
-
-* Lines must not exceed 99 characters.
-* Use 4 spaces for indentation, _not_ tabs.
-* No trailing whitespace at the end of lines or files.
-
-### Spaces
-
-* Use spaces around binary operators, including the equals sign in attributes:
-
-```rust,ignore
-#[deprecated = "Use `bar` instead."]
-fn foo(a: usize, b: usize) -> usize {
- a + b
-}
-```
-
-* Use a space after colons and commas:
-
-```rust,ignore
-fn foo(a: Bar);
-
-MyStruct { foo: 3, bar: 4 }
-
-foo(bar, baz);
-```
-
-* Use a space after the opening and before the closing brace for
- single line blocks or `struct` expressions:
-
-```rust,ignore
-spawn(proc() { do_something(); })
-
-Point { x: 0.1, y: 0.3 }
-```
-
-### Line wrapping
-
-* For multiline function signatures, each new line should align with the
- first parameter. Multiple parameters per line are permitted:
-
-```rust,ignore
-fn frobnicate(a: Bar, b: Bar,
- c: Bar, d: Bar)
- -> Bar {
- ...
-}
-
-fn foo<T: This,
- U: That>(
- a: Bar,
- b: Bar)
- -> Baz {
- ...
-}
-```
-
-* Multiline function invocations generally follow the same rule as for
- signatures. However, if the final argument begins a new block, the
- contents of the block may begin on a new line, indented one level:
-
-```rust,ignore
-fn foo_bar(a: Bar, b: Bar,
- c: |Bar|) -> Bar {
- ...
-}
-
-// Same line is fine:
-foo_bar(x, y, |z| { z.transpose(y) });
-
-// Indented body on new line is also fine:
-foo_bar(x, y, |z| {
- z.quux();
- z.rotate(x)
-})
-```
-
-> **[FIXME]** Do we also want to allow the following?
->
-> ```rust,ignore
-> frobnicate(
-> arg1,
-> arg2,
-> arg3)
-> ```
->
-> This style could ease the conflict between line length and functions
-> with many parameters (or long method chains).
-
-### Matches
-
-> * **[Deprecated]** If you have multiple patterns in a single `match`
-> arm, write each pattern on a separate line:
->
-> ```rust,ignore
-> match foo {
-> bar(_)
-> | baz => quux,
-> x
-> | y
-> | z => {
-> quuux
-> }
-> }
-> ```
-
-### Alignment
-
-Idiomatic code should not use extra whitespace in the middle of a line
-to provide alignment.
-
-
-```rust,ignore
-// Good
-struct Foo {
- short: f64,
- really_long: f64,
-}
-
-// Bad
-struct Bar {
- short: f64,
- really_long: f64,
-}
-
-// Good
-let a = 0;
-let radius = 7;
-
-// Bad
-let b = 0;
-let diameter = 7;
-```
+++ /dev/null
-% Testing
-
-> **[FIXME]** Add some general remarks about when and how to unit
-> test, versus other kinds of testing. What are our expectations for
-> Rust's core libraries?
+++ /dev/null
-% Unit testing
-
-Unit tests should live in a `tests` submodule at the bottom of the module they
-test. Mark the `tests` submodule with `#[cfg(test)]` so it is only compiled when
-testing.
-
-The `tests` module should contain:
-
-* Imports needed only for testing.
-* Functions marked with `#[test]` striving for full coverage of the parent module's
- definitions.
-* Auxiliary functions needed for writing the tests.
-
-For example:
-
-``` rust
-// Excerpt from std::str
-
-#[cfg(test)]
-mod tests {
- #[test]
- fn test_eq() {
- assert!((eq(&"".to_owned(), &"".to_owned())));
- assert!((eq(&"foo".to_owned(), &"foo".to_owned())));
- assert!((!eq(&"foo".to_owned(), &"bar".to_owned())));
- }
-}
-```
-
-> **[FIXME]** add details about useful macros for testing, e.g. `assert!`
+++ /dev/null
-* [Containers and iteration]()
-* [The visitor pattern]()
-* [Concurrency]()
-* [Documentation]()
-* [Macros]()
* [rust.vim](https://github.com/rust-lang/rust.vim)
* [emacs rust-mode](https://github.com/rust-lang/rust-mode)
+* [sublime-rust](https://github.com/rust-lang/sublime-rust)
* [gedit-config](https://github.com/rust-lang/gedit-config)
* [kate-config](https://github.com/rust-lang/kate-config)
* [nano-config](https://github.com/rust-lang/nano-config)
# We want a version of `range` which doesn't allocate an intermediate list,
# specifically it should use a lazy iterator. In Python 2 this was `xrange`, but
# if we're running with Python 3 then we need to use `range` instead.
-if sys.version_info.major >= 3:
+if sys.version_info[0] >= 3:
xrange = range
#===============================================================================
/// does not use atomics, making it both thread-unsafe as well as significantly
/// faster when updating the reference count.
///
+/// Note: the inherent methods defined on `Arc<T>` are all associated functions,
+/// which means that you have to call them as e.g. `Arc::get_mut(&value)`
+/// instead of `value.get_mut()`. This is so that there are no conflicts with
+/// methods on the inner type `T`, which are what you want to call in the
+/// majority of cases.
+///
/// # Examples
///
/// In this example, a large vector of data will be shared by several threads. First we
/// }
/// ```
-#[unsafe_no_drop_flag]
+#[cfg_attr(stage0, unsafe_no_drop_flag)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Arc<T: ?Sized> {
ptr: Shared<ArcInner<T>>,
/// nodes behind strong `Arc<T>` pointers, and then storing the parent pointers
/// as `Weak<T>` pointers.
-#[unsafe_no_drop_flag]
+#[cfg_attr(stage0, unsafe_no_drop_flag)]
#[stable(feature = "arc_weak", since = "1.4.0")]
pub struct Weak<T: ?Sized> {
ptr: Shared<ArcInner<T>>,
#[unsafe_destructor_blind_to_params]
#[inline]
fn drop(&mut self) {
- // This structure has #[unsafe_no_drop_flag], so this drop glue may run
- // more than once (but it is guaranteed to be zeroed after the first if
- // it's run more than once)
- let thin = *self.ptr as *const ();
-
- if thin as usize == mem::POST_DROP_USIZE {
- return;
- }
-
// Because `fetch_sub` is already atomic, we do not need to synchronize
// with other threads unless we are going to delete the object. This
// same logic applies to the below `fetch_sub` to the `weak` count.
/// ```
fn drop(&mut self) {
let ptr = *self.ptr;
- let thin = ptr as *const ();
-
- // see comments above for why this check is here
- if thin as usize == mem::POST_DROP_USIZE {
- return;
- }
// If we find out that we were the last weak pointer, then its time to
// deallocate the data entirely. See the discussion in Arc::drop() about
use core::cmp::Ordering;
use core::fmt;
use core::hash::{self, Hash};
+use core::iter::FusedIterator;
use core::marker::{self, Unsize};
use core::mem;
use core::ops::{CoerceUnsized, Deref, DerefMut};
use core::ops::{BoxPlace, Boxed, InPlace, Place, Placer};
use core::ptr::{self, Unique};
-use core::raw::TraitObject;
use core::convert::From;
/// A value that represents the heap. This is the default place that the `box`
/// proper way to do so is to convert the raw pointer back into a
/// `Box` with the `Box::from_raw` function.
///
+ /// Note: this is an associated function, which means that you have
+ /// to call it as `Box::into_raw(b)` instead of `b.into_raw()`. This
+ /// is so that there is no conflict with a method on the inner type.
+ ///
/// # Examples
///
/// ```
pub fn downcast<T: Any>(self) -> Result<Box<T>, Box<Any>> {
if self.is::<T>() {
unsafe {
- // Get the raw representation of the trait object
- let raw = Box::into_raw(self);
- let to: TraitObject = mem::transmute::<*mut Any, TraitObject>(raw);
-
- // Extract the data pointer
- Ok(Box::from_raw(to.data as *mut T))
+ let raw: *mut Any = Box::into_raw(self);
+ Ok(Box::from_raw(raw as *mut T))
}
} else {
Err(self)
#[stable(feature = "rust1", since = "1.0.0")]
impl<I: ExactSizeIterator + ?Sized> ExactSizeIterator for Box<I> {}
+#[unstable(feature = "fused", issue = "35602")]
+impl<I: FusedIterator + ?Sized> FusedIterator for Box<I> {}
+
/// `FnBox` is a version of the `FnOnce` intended for use with boxed
/// closure objects. The idea is that where one would normally store a
#![feature(staged_api)]
#![feature(unboxed_closures)]
#![feature(unique)]
-#![feature(unsafe_no_drop_flag, filling_drop)]
+#![cfg_attr(stage0, feature(unsafe_no_drop_flag))]
#![feature(unsize)]
-#![cfg_attr(not(test), feature(raw, fn_traits, placement_new_protocol))]
+#![cfg_attr(not(test), feature(fused, fn_traits, placement_new_protocol))]
#![cfg_attr(test, feature(test, box_heap))]
// Allow testing this library
/// `shrink_to_fit`, and `from_box` will actually set RawVec's private capacity
/// field. This allows zero-sized types to not be special-cased by consumers of
/// this type.
-#[unsafe_no_drop_flag]
+#[cfg_attr(stage0, unsafe_no_drop_flag)]
pub struct RawVec<T> {
ptr: Unique<T>,
cap: usize,
mem::forget(self);
output
}
-
- /// This is a stupid name in the hopes that someone will find this in the
- /// not too distant future and remove it with the rest of
- /// #[unsafe_no_drop_flag]
- pub fn unsafe_no_drop_flag_needs_drop(&self) -> bool {
- self.cap != mem::POST_DROP_USIZE
- }
}
impl<T> Drop for RawVec<T> {
/// Frees the memory owned by the RawVec *without* trying to Drop its contents.
fn drop(&mut self) {
let elem_size = mem::size_of::<T>();
- if elem_size != 0 && self.cap != 0 && self.unsafe_no_drop_flag_needs_drop() {
+ if elem_size != 0 && self.cap != 0 {
let align = mem::align_of::<T>();
let num_bytes = elem_size * self.cap;
/// A reference-counted pointer type over an immutable value.
///
/// See the [module level documentation](./index.html) for more details.
-#[unsafe_no_drop_flag]
+///
+/// Note: the inherent methods defined on `Rc<T>` are all associated functions,
+/// which means that you have to call them as e.g. `Rc::get_mut(&value)` instead
+/// of `value.get_mut()`. This is so that there are no conflicts with methods
+/// on the inner type `T`, which are what you want to call in the majority of
+/// cases.
+#[cfg_attr(stage0, unsafe_no_drop_flag)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Rc<T: ?Sized> {
ptr: Shared<RcBox<T>>,
}
/// Checks if `Rc::try_unwrap` would return `Ok`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(rc_would_unwrap)]
+ ///
+ /// use std::rc::Rc;
+ ///
+ /// let x = Rc::new(3);
+ /// assert!(Rc::would_unwrap(&x));
+ /// assert_eq!(Rc::try_unwrap(x), Ok(3));
+ ///
+ /// let x = Rc::new(4);
+ /// let _y = x.clone();
+ /// assert!(!Rc::would_unwrap(&x));
+ /// assert_eq!(Rc::try_unwrap(x), Err(Rc::new(4)));
+ /// ```
#[unstable(feature = "rc_would_unwrap",
reason = "just added for niche usecase",
issue = "28356")]
fn drop(&mut self) {
unsafe {
let ptr = *self.ptr;
- let thin = ptr as *const ();
- if thin as usize != mem::POST_DROP_USIZE {
- self.dec_strong();
- if self.strong() == 0 {
- // destroy the contained object
- ptr::drop_in_place(&mut (*ptr).value);
+ self.dec_strong();
+ if self.strong() == 0 {
+ // destroy the contained object
+ ptr::drop_in_place(&mut (*ptr).value);
- // remove the implicit "strong weak" pointer now that we've
- // destroyed the contents.
- self.dec_weak();
+ // remove the implicit "strong weak" pointer now that we've
+ // destroyed the contents.
+ self.dec_weak();
- if self.weak() == 0 {
- deallocate(ptr as *mut u8, size_of_val(&*ptr), align_of_val(&*ptr))
- }
+ if self.weak() == 0 {
+ deallocate(ptr as *mut u8, size_of_val(&*ptr), align_of_val(&*ptr))
}
}
}
/// dropped.
///
/// See the [module level documentation](./index.html) for more.
-#[unsafe_no_drop_flag]
+#[cfg_attr(stage0, unsafe_no_drop_flag)]
#[stable(feature = "rc_weak", since = "1.4.0")]
pub struct Weak<T: ?Sized> {
ptr: Shared<RcBox<T>>,
fn drop(&mut self) {
unsafe {
let ptr = *self.ptr;
- let thin = ptr as *const ();
- if thin as usize != mem::POST_DROP_USIZE {
- self.dec_weak();
- // the weak count starts at 1, and will only go to zero if all
- // the strong pointers have disappeared.
- if self.weak() == 0 {
- deallocate(ptr as *mut u8, size_of_val(&*ptr), align_of_val(&*ptr))
- }
+ self.dec_weak();
+ // the weak count starts at 1, and will only go to zero if all
+ // the strong pointers have disappeared.
+ if self.weak() == 0 {
+ deallocate(ptr as *mut u8, size_of_val(&*ptr), align_of_val(&*ptr))
}
}
}
#![allow(missing_docs)]
#![stable(feature = "rust1", since = "1.0.0")]
-use core::ops::{Drop, Deref, DerefMut};
-use core::iter::FromIterator;
+use core::ops::{Deref, DerefMut};
+use core::iter::{FromIterator, FusedIterator};
use core::mem::swap;
use core::mem::size_of;
use core::ptr;
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> ExactSizeIterator for Iter<'a, T> {}
+#[unstable(feature = "fused", issue = "35602")]
+impl<'a, T> FusedIterator for Iter<'a, T> {}
+
/// An iterator that moves out of a `BinaryHeap`.
#[stable(feature = "rust1", since = "1.0.0")]
#[derive(Clone)]
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> ExactSizeIterator for IntoIter<T> {}
+#[unstable(feature = "fused", issue = "35602")]
+impl<T> FusedIterator for IntoIter<T> {}
+
/// An iterator that drains a `BinaryHeap`.
#[stable(feature = "drain", since = "1.6.0")]
pub struct Drain<'a, T: 'a> {
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T: 'a> ExactSizeIterator for Drain<'a, T> {}
+#[unstable(feature = "fused", issue = "35602")]
+impl<'a, T: 'a> FusedIterator for Drain<'a, T> {}
+
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Ord> From<Vec<T>> for BinaryHeap<T> {
fn from(vec: Vec<T>) -> BinaryHeap<T> {
#![stable(feature = "rust1", since = "1.0.0")]
-use core::clone::Clone;
-use core::cmp::{Eq, Ord, Ordering, PartialEq, PartialOrd};
-use core::convert::AsRef;
-use core::default::Default;
+use core::cmp::Ordering;
use core::hash::{Hash, Hasher};
-use core::marker::Sized;
use core::ops::Deref;
-use core::option::Option;
use fmt;
use core::cmp::Ordering;
use core::fmt::Debug;
use core::hash::{Hash, Hasher};
-use core::iter::{FromIterator, Peekable};
+use core::iter::{FromIterator, Peekable, FusedIterator};
use core::marker::PhantomData;
use core::ops::Index;
use core::{fmt, intrinsics, mem, ptr};
}
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<'a, K, V> FusedIterator for Iter<'a, K, V> {}
+
impl<'a, K: 'a, V: 'a> DoubleEndedIterator for Iter<'a, K, V> {
fn next_back(&mut self) -> Option<(&'a K, &'a V)> {
if self.length == 0 {
}
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<'a, K, V> FusedIterator for IterMut<'a, K, V> {}
+
impl<K, V> IntoIterator for BTreeMap<K, V> {
type Item = (K, V);
type IntoIter = IntoIter<K, V>;
}
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<K, V> FusedIterator for IntoIter<K, V> {}
+
impl<'a, K, V> Iterator for Keys<'a, K, V> {
type Item = &'a K;
}
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<'a, K, V> FusedIterator for Keys<'a, K, V> {}
+
impl<'a, K, V> Clone for Keys<'a, K, V> {
fn clone(&self) -> Keys<'a, K, V> {
Keys { inner: self.inner.clone() }
}
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<'a, K, V> FusedIterator for Values<'a, K, V> {}
+
impl<'a, K, V> Clone for Values<'a, K, V> {
fn clone(&self) -> Values<'a, K, V> {
Values { inner: self.inner.clone() }
}
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<'a, K, V> FusedIterator for ValuesMut<'a, K, V> {}
+
+
impl<'a, K, V> Range<'a, K, V> {
unsafe fn next_unchecked(&mut self) -> (&'a K, &'a V) {
let handle = self.front;
}
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<'a, K, V> FusedIterator for Range<'a, K, V> {}
+
impl<'a, K, V> Clone for Range<'a, K, V> {
fn clone(&self) -> Range<'a, K, V> {
Range {
}
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<'a, K, V> FusedIterator for RangeMut<'a, K, V> {}
+
impl<'a, K, V> RangeMut<'a, K, V> {
unsafe fn next_back_unchecked(&mut self) -> (&'a K, &'a mut V) {
let handle = ptr::read(&self.back);
use core::cmp::{min, max};
use core::fmt::Debug;
use core::fmt;
-use core::iter::{Peekable, FromIterator};
+use core::iter::{Peekable, FromIterator, FusedIterator};
use core::ops::{BitOr, BitAnd, BitXor, Sub};
use borrow::Borrow;
fn len(&self) -> usize { self.iter.len() }
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<'a, T> FusedIterator for Iter<'a, T> {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Iterator for IntoIter<T> {
fn len(&self) -> usize { self.iter.len() }
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<T> FusedIterator for IntoIter<T> {}
impl<'a, T> Clone for Range<'a, T> {
fn clone(&self) -> Range<'a, T> {
}
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<'a, T> FusedIterator for Range<'a, T> {}
+
/// Compare `x` and `y`, but return `short` if x is None and `long` if y is None
fn cmp_opt<T: Ord>(x: Option<&T>, y: Option<&T>, short: Ordering, long: Ordering) -> Ordering {
match (x, y) {
}
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<'a, T: Ord> FusedIterator for Difference<'a, T> {}
+
impl<'a, T> Clone for SymmetricDifference<'a, T> {
fn clone(&self) -> SymmetricDifference<'a, T> {
SymmetricDifference {
}
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<'a, T: Ord> FusedIterator for SymmetricDifference<'a, T> {}
+
impl<'a, T> Clone for Intersection<'a, T> {
fn clone(&self) -> Intersection<'a, T> {
Intersection {
}
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<'a, T: Ord> FusedIterator for Intersection<'a, T> {}
+
impl<'a, T> Clone for Union<'a, T> {
fn clone(&self) -> Union<'a, T> {
Union {
(max(a_len, b_len), Some(a_len + b_len))
}
}
+
+#[unstable(feature = "fused", issue = "35602")]
+impl<'a, T: Ord> FusedIterator for Union<'a, T> {}
use core::marker;
use core::fmt;
-use core::iter::FromIterator;
+use core::iter::{FromIterator, FusedIterator};
use core::ops::{Sub, BitOr, BitAnd, BitXor};
// FIXME(contentions): implement union family of methods? (general design may be
}
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<E: CLike> FusedIterator for Iter<E> {}
+
impl<E: CLike> FromIterator<E> for EnumSet<E> {
fn from_iter<I: IntoIterator<Item = E>>(iter: I) -> EnumSet<E> {
let mut ret = EnumSet::new();
#![feature(core_intrinsics)]
#![feature(dropck_parametricity)]
#![feature(fmt_internals)]
+#![feature(fused)]
#![feature(heap_api)]
#![feature(inclusive_range)]
#![feature(lang_items)]
#![feature(step_by)]
#![feature(unicode)]
#![feature(unique)]
-#![feature(unsafe_no_drop_flag)]
+#![cfg_attr(stage0, feature(unsafe_no_drop_flag))]
#![cfg_attr(test, feature(rand, test))]
#![no_std]
use core::cmp::Ordering;
use core::fmt;
use core::hash::{Hasher, Hash};
-use core::iter::FromIterator;
+use core::iter::{FromIterator, FusedIterator};
use core::marker::PhantomData;
use core::mem;
use core::ops::{BoxPlace, InPlace, Place, Placer};
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> ExactSizeIterator for Iter<'a, T> {}
+#[unstable(feature = "fused", issue = "35602")]
+impl<'a, T> FusedIterator for Iter<'a, T> {}
+
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> Iterator for IterMut<'a, T> {
type Item = &'a mut T;
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> ExactSizeIterator for IterMut<'a, T> {}
+#[unstable(feature = "fused", issue = "35602")]
+impl<'a, T> FusedIterator for IterMut<'a, T> {}
+
impl<'a, T> IterMut<'a, T> {
/// Inserts the given element just after the element most recently returned by `.next()`.
/// The inserted element does not appear in the iteration.
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> ExactSizeIterator for IntoIter<T> {}
+#[unstable(feature = "fused", issue = "35602")]
+impl<T> FusedIterator for IntoIter<T> {}
+
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> FromIterator<T> for LinkedList<T> {
fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
#[cfg(test)]
mod tests {
- use std::clone::Clone;
- use std::iter::{Iterator, IntoIterator, Extend};
- use std::option::Option::{self, Some, None};
use std::__rand::{thread_rng, Rng};
use std::thread;
use std::vec::Vec;
#[test]
fn test_26021() {
- use std::iter::ExactSizeIterator;
// There was a bug in split_off that failed to null out the RHS's head's prev ptr.
// This caused the RHS's dtor to walk up into the LHS at drop and delete all of
// its nodes.
//! Range syntax.
-use core::option::Option::{self, None, Some};
use core::ops::{RangeFull, Range, RangeTo, RangeFrom};
/// **RangeArgument** is implemented by Rust's built-in range types, produced
use core::str::pattern::Pattern;
use core::str::pattern::{Searcher, ReverseSearcher, DoubleEndedSearcher};
use core::mem;
+use core::iter::FusedIterator;
use rustc_unicode::str::{UnicodeStr, Utf16Encoder};
use vec_deque::VecDeque;
}
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<'a> FusedIterator for EncodeUtf16<'a> {}
+
// Return the initial codepoint accumulator for the first byte.
// The first byte is special, only want bottom 5 bits for width 2, 4 bits
// for width 3, and 3 bits for width 4
use core::fmt;
use core::hash;
-use core::iter::FromIterator;
+use core::iter::{FromIterator, FusedIterator};
use core::mem;
use core::ops::{self, Add, AddAssign, Index, IndexMut};
use core::ptr;
/// [`OsString`]: ../../std/ffi/struct.OsString.html
///
/// Indexing is intended to be a constant-time operation, but UTF-8 encoding
-/// does not allow us to do this. Furtheremore, it's not clear what sort of
+/// does not allow us to do this. Furthermore, it's not clear what sort of
/// thing the index should return: a byte, a codepoint, or a grapheme cluster.
/// The [`as_bytes()`] and [`chars()`] methods return iterators over the first
/// two, respectively.
self.iter.next_back()
}
}
+
+#[unstable(feature = "fused", issue = "35602")]
+impl<'a> FusedIterator for Drain<'a> {}
use core::fmt;
use core::hash::{self, Hash};
use core::intrinsics::{arith_offset, assume};
-use core::iter::FromIterator;
+use core::iter::{FromIterator, FusedIterator};
use core::mem;
use core::ops::{Index, IndexMut};
use core::ops;
/// Vec does not currently guarantee the order in which elements are dropped
/// (the order has changed in the past, and may change again).
///
-#[unsafe_no_drop_flag]
+#[cfg_attr(stage0, unsafe_no_drop_flag)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Vec<T> {
buf: RawVec<T>,
impl<T> Drop for Vec<T> {
#[unsafe_destructor_blind_to_params]
fn drop(&mut self) {
- if self.buf.unsafe_no_drop_flag_needs_drop() {
- unsafe {
- // use drop for [T]
- ptr::drop_in_place(&mut self[..]);
- }
+ unsafe {
+ // use drop for [T]
+ ptr::drop_in_place(&mut self[..]);
}
// RawVec handles deallocation
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> ExactSizeIterator for IntoIter<T> {}
+#[unstable(feature = "fused", issue = "35602")]
+impl<T> FusedIterator for IntoIter<T> {}
+
#[stable(feature = "vec_into_iter_clone", since = "1.8.0")]
impl<T: Clone> Clone for IntoIter<T> {
fn clone(&self) -> IntoIter<T> {
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> ExactSizeIterator for Drain<'a, T> {}
+
+#[unstable(feature = "fused", issue = "35602")]
+impl<'a, T> FusedIterator for Drain<'a, T> {}
use core::cmp::Ordering;
use core::fmt;
-use core::iter::{repeat, FromIterator};
+use core::iter::{repeat, FromIterator, FusedIterator};
use core::mem;
use core::ops::{Index, IndexMut};
use core::ptr;
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> ExactSizeIterator for Iter<'a, T> {}
+#[unstable(feature = "fused", issue = "35602")]
+impl<'a, T> FusedIterator for Iter<'a, T> {}
+
+
/// `VecDeque` mutable iterator.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct IterMut<'a, T: 'a> {
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> ExactSizeIterator for IterMut<'a, T> {}
+#[unstable(feature = "fused", issue = "35602")]
+impl<'a, T> FusedIterator for IterMut<'a, T> {}
+
/// A by-value VecDeque iterator
#[derive(Clone)]
#[stable(feature = "rust1", since = "1.0.0")]
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> ExactSizeIterator for IntoIter<T> {}
+#[unstable(feature = "fused", issue = "35602")]
+impl<T> FusedIterator for IntoIter<T> {}
+
/// A draining VecDeque iterator
#[stable(feature = "drain", since = "1.6.0")]
pub struct Drain<'a, T: 'a> {
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T: 'a> ExactSizeIterator for Drain<'a, T> {}
+#[unstable(feature = "fused", issue = "35602")]
+impl<'a, T: 'a> FusedIterator for Drain<'a, T> {}
+
#[stable(feature = "rust1", since = "1.0.0")]
impl<A: PartialEq> PartialEq for VecDeque<A> {
fn eq(&self, other: &VecDeque<A>) -> bool {
#[cfg(test)]
mod tests {
- use core::iter::Iterator;
- use core::option::Option::Some;
-
use test;
use super::VecDeque;
#![stable(feature = "rust1", since = "1.0.0")]
use fmt;
-use marker::Send;
-use mem::transmute;
-use option::Option::{self, Some, None};
-use raw::TraitObject;
use intrinsics;
-use marker::{Reflect, Sized};
+use marker::Reflect;
///////////////////////////////////////////////////////////////////////////////
// Any trait
pub fn downcast_ref<T: Any>(&self) -> Option<&T> {
if self.is::<T>() {
unsafe {
- // Get the raw representation of the trait object
- let to: TraitObject = transmute(self);
-
- // Extract the data pointer
- Some(&*(to.data as *const T))
+ Some(&*(self as *const Any as *const T))
}
} else {
None
pub fn downcast_mut<T: Any>(&mut self) -> Option<&mut T> {
if self.is::<T>() {
unsafe {
- // Get the raw representation of the trait object
- let to: TraitObject = transmute(self);
-
- // Extract the data pointer
- Some(&mut *(to.data as *const T as *mut T))
+ Some(&mut *(self as *mut Any as *mut T))
}
} else {
None
issue = "27778")]
use borrow::{Borrow, BorrowMut};
-use clone::Clone;
-use cmp::{PartialEq, Eq, PartialOrd, Ord, Ordering};
-use convert::{AsRef, AsMut};
-use default::Default;
+use cmp::Ordering;
use fmt;
use hash::{Hash, self};
-use iter::IntoIterator;
-use marker::{Copy, Sized, Unsize};
-use option::Option;
-use slice::{Iter, IterMut, SliceExt};
+use marker::Unsize;
+use slice::{Iter, IterMut};
/// Utility trait implemented only on arrays of fixed size
///
#![stable(feature = "rust1", since = "1.0.0")]
-use marker::Sized;
-
/// A trait for borrowing data.
///
/// In general, there may be several ways to "borrow" a piece of data. The
#![stable(feature = "rust1", since = "1.0.0")]
-use clone::Clone;
-use cmp::{PartialEq, Eq, PartialOrd, Ord, Ordering};
-use convert::From;
-use default::Default;
+use cmp::Ordering;
use fmt::{self, Debug, Display};
-use marker::{Copy, PhantomData, Send, Sync, Sized, Unsize};
-use ops::{Deref, DerefMut, Drop, FnOnce, CoerceUnsized};
-use option::Option;
-use option::Option::{None, Some};
-use result::Result;
-use result::Result::{Ok, Err};
+use marker::{PhantomData, Unsize};
+use ops::{Deref, DerefMut, CoerceUnsized};
/// A mutable memory location that admits only `Copy` data.
///
}
}
+#[unstable(feature = "coerce_unsized", issue = "27732")]
+impl<T: CoerceUnsized<U>, U> CoerceUnsized<Cell<U>> for Cell<T> {}
+
/// A mutable memory location with dynamically checked borrow rules
///
/// See the [module-level documentation](index.html) for more.
}
}
+#[unstable(feature = "coerce_unsized", issue = "27732")]
+impl<T: CoerceUnsized<U>, U> CoerceUnsized<RefCell<U>> for RefCell<T> {}
+
struct BorrowRef<'b> {
borrow: &'b Cell<BorrowFlag>,
}
UnsafeCell::new(t)
}
}
+
+#[unstable(feature = "coerce_unsized", issue = "27732")]
+impl<T: CoerceUnsized<U>, U> CoerceUnsized<UnsafeCell<U>> for UnsafeCell<T> {}
+
+#[allow(unused)]
+fn assert_coerce_unsized(a: UnsafeCell<&i32>, b: Cell<&i32>, c: RefCell<&i32>) {
+ let _: UnsafeCell<&Send> = a;
+ let _: Cell<&Send> = b;
+ let _: RefCell<&Send> = c;
+}
#![allow(non_snake_case)]
#![stable(feature = "core_char", since = "1.2.0")]
-use prelude::v1::*;
-
use char_private::is_printable;
+use iter::FusedIterator;
use mem::transmute;
// UTF-8 ranges and tags for encoding characters
}
}
+#[unstable(feature = "fused", issue = "35602")]
+impl FusedIterator for EscapeUnicode {}
+
/// An iterator that yields the literal escape code of a `char`.
///
/// This `struct` is created by the [`escape_default()`] method on [`char`]. See
}
}
+#[unstable(feature = "fused", issue = "35602")]
+impl FusedIterator for EscapeDefault {}
+
/// An iterator that yields the literal escape code of a `char`.
///
/// This `struct` is created by the [`escape_debug()`] method on [`char`]. See its
#[unstable(feature = "char_escape_debug", issue = "35068")]
impl ExactSizeIterator for EscapeDebug { }
+#[unstable(feature = "fused", issue = "35602")]
+impl FusedIterator for EscapeDebug {}
+
/// An iterator over `u8` entries represending the UTF-8 encoding of a `char`
/// value.
///
}
}
+#[unstable(feature = "fused", issue = "35602")]
+impl FusedIterator for EncodeUtf8 {}
+
/// An iterator over `u16` entries represending the UTF-16 encoding of a `char`
/// value.
///
}
}
+#[unstable(feature = "fused", issue = "35602")]
+impl FusedIterator for EncodeUtf16 {}
/// An iterator over an iterator of bytes of the characters the bytes represent
/// as UTF-8
impl<I: Iterator<Item = u8>> Iterator for DecodeUtf8<I> {
type Item = Result<char, InvalidSequence>;
#[inline]
+
fn next(&mut self) -> Option<Result<char, InvalidSequence>> {
- self.0.next().map(|b| {
- if b & 0x80 == 0 { Ok(b as char) } else {
- let l = (!b).leading_zeros() as usize; // number of bytes in UTF-8 representation
- if l < 2 || l > 6 { return Err(InvalidSequence(())) };
- let mut x = (b as u32) & (0x7F >> l);
- for _ in 0..l-1 {
+ self.0.next().map(|first_byte| {
+ // Emit InvalidSequence according to
+ // Unicode §5.22 Best Practice for U+FFFD Substitution
+ // http://www.unicode.org/versions/Unicode9.0.0/ch05.pdf#G40630
+
+ // Roughly: consume at least one byte,
+ // then validate one byte at a time and stop before the first unexpected byte
+ // (which might be the valid start of the next byte sequence).
+
+ let mut code_point;
+ macro_rules! first_byte {
+ ($mask: expr) => {
+ code_point = u32::from(first_byte & $mask)
+ }
+ }
+ macro_rules! continuation_byte {
+ () => { continuation_byte!(0x80...0xBF) };
+ ($range: pat) => {
match self.0.peek() {
- Some(&b) if b & 0xC0 == 0x80 => {
+ Some(&byte @ $range) => {
+ code_point = (code_point << 6) | u32::from(byte & 0b0011_1111);
self.0.next();
- x = (x << 6) | (b as u32) & 0x3F;
- },
- _ => return Err(InvalidSequence(())),
+ }
+ _ => return Err(InvalidSequence(()))
}
}
- match from_u32(x) {
- Some(x) if l == x.len_utf8() => Ok(x),
- _ => Err(InvalidSequence(())),
+ }
+
+ match first_byte {
+ 0x00...0x7F => {
+ first_byte!(0b1111_1111);
}
+ 0xC2...0xDF => {
+ first_byte!(0b0001_1111);
+ continuation_byte!();
+ }
+ 0xE0 => {
+ first_byte!(0b0000_1111);
+ continuation_byte!(0xA0...0xBF); // 0x80...0x9F here are overlong
+ continuation_byte!();
+ }
+ 0xE1...0xEC | 0xEE...0xEF => {
+ first_byte!(0b0000_1111);
+ continuation_byte!();
+ continuation_byte!();
+ }
+ 0xED => {
+ first_byte!(0b0000_1111);
+ continuation_byte!(0x80...0x9F); // 0xA0..0xBF here are surrogates
+ continuation_byte!();
+ }
+ 0xF0 => {
+ first_byte!(0b0000_0111);
+ continuation_byte!(0x90...0xBF); // 0x80..0x8F here are overlong
+ continuation_byte!();
+ continuation_byte!();
+ }
+ 0xF1...0xF3 => {
+ first_byte!(0b0000_0111);
+ continuation_byte!();
+ continuation_byte!();
+ continuation_byte!();
+ }
+ 0xF4 => {
+ first_byte!(0b0000_0111);
+ continuation_byte!(0x80...0x8F); // 0x90..0xBF here are beyond char::MAX
+ continuation_byte!();
+ continuation_byte!();
+ }
+ _ => return Err(InvalidSequence(())) // Illegal first byte, overlong, or beyond MAX
+ }
+ unsafe {
+ Ok(from_u32_unchecked(code_point))
}
})
}
}
+
+#[unstable(feature = "fused", issue = "35602")]
+impl<I: FusedIterator<Item = u8>> FusedIterator for DecodeUtf8<I> {}
// NOTE: The following code was generated by "src/etc/char_private.py",
// do not edit directly!
-use slice::SliceExt;
-
fn check(x: u16, singletons: &[u16], normal: &[u16]) -> bool {
for &s in singletons {
if x == s {
#![stable(feature = "rust1", since = "1.0.0")]
-use marker::Sized;
-
/// A common trait for the ability to explicitly duplicate an object.
///
/// Differs from `Copy` in that `Copy` is implicit and extremely inexpensive, while
use self::Ordering::*;
-use marker::Sized;
-use option::Option::{self, Some};
-
/// Trait for equality comparisons which are [partial equivalence
/// relations](http://en.wikipedia.org/wiki/Partial_equivalence_relation).
///
// Implementation of PartialEq, Eq, PartialOrd and Ord for primitive types
mod impls {
- use cmp::{PartialOrd, Ord, PartialEq, Eq, Ordering};
- use cmp::Ordering::{Less, Greater, Equal};
- use marker::Sized;
- use option::Option;
- use option::Option::{Some, None};
+ use cmp::Ordering::{self, Less, Greater, Equal};
macro_rules! partial_eq_impl {
($($t:ty)*) => ($(
ord_impl! { char usize u8 u16 u32 u64 isize i8 i16 i32 i64 }
- // Note: This macro is a temporary hack that can be remove once we are building with a compiler
- // that supports `!`
- macro_rules! not_stage0 {
- () => {
- #[unstable(feature = "never_type", issue = "35121")]
- impl PartialEq for ! {
- fn eq(&self, _: &!) -> bool {
- *self
- }
- }
-
- #[unstable(feature = "never_type", issue = "35121")]
- impl Eq for ! {}
+ #[unstable(feature = "never_type", issue = "35121")]
+ impl PartialEq for ! {
+ fn eq(&self, _: &!) -> bool {
+ *self
+ }
+ }
- #[unstable(feature = "never_type", issue = "35121")]
- impl PartialOrd for ! {
- fn partial_cmp(&self, _: &!) -> Option<Ordering> {
- *self
- }
- }
+ #[unstable(feature = "never_type", issue = "35121")]
+ impl Eq for ! {}
- #[unstable(feature = "never_type", issue = "35121")]
- impl Ord for ! {
- fn cmp(&self, _: &!) -> Ordering {
- *self
- }
- }
+ #[unstable(feature = "never_type", issue = "35121")]
+ impl PartialOrd for ! {
+ fn partial_cmp(&self, _: &!) -> Option<Ordering> {
+ *self
}
}
- #[cfg(not(stage0))]
- not_stage0!();
+ #[unstable(feature = "never_type", issue = "35121")]
+ impl Ord for ! {
+ fn cmp(&self, _: &!) -> Ordering {
+ *self
+ }
+ }
// & pointers
#![stable(feature = "rust1", since = "1.0.0")]
-use marker::Sized;
-use result::Result;
-
/// A cheap, reference-to-reference conversion.
///
/// `AsRef` is very similar to, but different than, `Borrow`. See
#![stable(feature = "rust1", since = "1.0.0")]
-use marker::Sized;
-
/// A trait for giving a type a useful default value.
///
/// Sometimes, you want to fall back to some kind of default value, and
/// bar: f32,
/// }
///
-///
/// fn main() {
/// let options: SomeOptions = Default::default();
/// }
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use prelude::v1::*;
use fmt::{self, FlagV1};
struct PadAdapter<'a, 'b: 'a> {
#![stable(feature = "rust1", since = "1.0.0")]
-use prelude::v1::*;
-
use cell::{UnsafeCell, Cell, RefCell, Ref, RefMut, BorrowState};
use marker::PhantomData;
use mem;
prefix: &str,
buf: &str)
-> Result {
- use char::CharExt;
-
let mut width = buf.len();
let mut sign = None;
f: F) -> Result
where F: FnOnce(&mut Formatter) -> Result,
{
- use char::CharExt;
let align = match self.align {
rt::v1::Alignment::Unknown => default,
_ => self.align
fmt_refs! { Debug, Display, Octal, Binary, LowerHex, UpperHex, LowerExp, UpperExp }
-// Note: This macro is a temporary hack that can be remove once we are building with a compiler
-// that supports `!`
-macro_rules! not_stage0 {
- () => {
- #[unstable(feature = "never_type", issue = "35121")]
- impl Debug for ! {
- fn fmt(&self, _: &mut Formatter) -> Result {
- *self
- }
- }
-
- #[unstable(feature = "never_type", issue = "35121")]
- impl Display for ! {
- fn fmt(&self, _: &mut Formatter) -> Result {
- *self
- }
- }
+#[unstable(feature = "never_type", issue = "35121")]
+impl Debug for ! {
+ fn fmt(&self, _: &mut Formatter) -> Result {
+ *self
}
}
-#[cfg(not(stage0))]
-not_stage0!();
+#[unstable(feature = "never_type", issue = "35121")]
+impl Display for ! {
+ fn fmt(&self, _: &mut Formatter) -> Result {
+ *self
+ }
+}
#[stable(feature = "rust1", since = "1.0.0")]
impl Debug for bool {
// FIXME: #6220 Implement floating point formatting
-use prelude::v1::*;
-
use fmt;
use num::Zero;
use ops::{Div, Rem, Sub};
#![stable(feature = "rust1", since = "1.0.0")]
-use prelude::v1::*;
-
use fmt;
use marker;
use mem;
//////////////////////////////////////////////////////////////////////////////
mod impls {
- use prelude::v1::*;
-
use mem;
use slice;
use super::*;
//! An implementation of SipHash.
-use prelude::v1::*;
-
use marker::PhantomData;
use ptr;
issue = "0")]
#![allow(missing_docs)]
-use marker::Sized;
-
extern "rust-intrinsic" {
// NB: These intrinsics take raw pointers because they mutate aliased
/// crate it is invoked in.
pub fn type_id<T: ?Sized + 'static>() -> u64;
- /// Creates a value initialized to so that its drop flag,
- /// if any, says that it has been dropped.
- ///
- /// `init_dropped` is unsafe because it returns a datum with all
- /// of its bytes set to the drop flag, which generally does not
- /// correspond to a valid value.
- ///
- /// This intrinsic is likely to be deprecated in the future when
- /// Rust moves to non-zeroing dynamic drop (and thus removes the
- /// embedded drop flags that are being established by this
- /// intrinsic).
- pub fn init_dropped<T>() -> T;
-
/// Creates a value initialized to zero.
///
/// `init` is unsafe because it returns a zeroed-out datum,
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use clone::Clone;
-use cmp::{Ord, PartialOrd, PartialEq, Ordering};
-use default::Default;
-use ops::FnMut;
-use option::Option::{self, Some, None};
-use marker::Sized;
+use cmp::Ordering;
use super::{Chain, Cycle, Cloned, Enumerate, Filter, FilterMap, FlatMap, Fuse};
use super::{Inspect, Map, Peekable, Scan, Skip, SkipWhile, Take, TakeWhile, Rev};
use super::{Zip, Sum, Product};
-use super::ChainState;
-use super::{DoubleEndedIterator, ExactSizeIterator, Extend, FromIterator};
-use super::{IntoIterator, ZipImpl};
+use super::{ChainState, FromIterator, ZipImpl};
fn _assert_is_object_safe(_: &Iterator<Item=()>) {}
#![stable(feature = "rust1", since = "1.0.0")]
-use clone::Clone;
use cmp;
-use default::Default;
use fmt;
use iter_private::TrustedRandomAccess;
-use ops::FnMut;
-use option::Option::{self, Some, None};
use usize;
#[stable(feature = "rust1", since = "1.0.0")]
pub use self::traits::{FromIterator, IntoIterator, DoubleEndedIterator, Extend};
#[stable(feature = "rust1", since = "1.0.0")]
pub use self::traits::{ExactSizeIterator, Sum, Product};
+#[unstable(feature = "fused", issue = "35602")]
+pub use self::traits::FusedIterator;
mod iterator;
mod range;
impl<I> ExactSizeIterator for Rev<I>
where I: ExactSizeIterator + DoubleEndedIterator {}
+#[unstable(feature = "fused", issue = "35602")]
+impl<I> FusedIterator for Rev<I>
+ where I: FusedIterator + DoubleEndedIterator {}
+
/// An iterator that clones the elements of an underlying iterator.
///
/// This `struct` is created by the [`cloned()`] method on [`Iterator`]. See its
where I: ExactSizeIterator<Item=&'a T>, T: Clone
{}
+#[unstable(feature = "fused", issue = "35602")]
+impl<'a, I, T: 'a> FusedIterator for Cloned<I>
+ where I: FusedIterator<Item=&'a T>, T: Clone
+{}
+
/// An iterator that repeats endlessly.
///
/// This `struct` is created by the [`cycle()`] method on [`Iterator`]. See its
}
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<I> FusedIterator for Cycle<I> where I: Clone + Iterator {}
+
/// An iterator that strings two iterators together.
///
/// This `struct` is created by the [`chain()`] method on [`Iterator`]. See its
}
}
+// Note: *both* must be fused to handle double-ended iterators.
+#[unstable(feature = "fused", issue = "35602")]
+impl<A, B> FusedIterator for Chain<A, B>
+ where A: FusedIterator,
+ B: FusedIterator<Item=A::Item>,
+{}
+
/// An iterator that iterates two other iterators simultaneously.
///
/// This `struct` is created by the [`zip()`] method on [`Iterator`]. See its
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<A, B> FusedIterator for Zip<A, B>
+ where A: FusedIterator, B: FusedIterator, {}
+
/// An iterator that maps the values of `iter` with `f`.
///
/// This `struct` is created by the [`map()`] method on [`Iterator`]. See its
impl<B, I: ExactSizeIterator, F> ExactSizeIterator for Map<I, F>
where F: FnMut(I::Item) -> B {}
+#[unstable(feature = "fused", issue = "35602")]
+impl<B, I: FusedIterator, F> FusedIterator for Map<I, F>
+ where F: FnMut(I::Item) -> B {}
+
/// An iterator that filters the elements of `iter` with `predicate`.
///
/// This `struct` is created by the [`filter()`] method on [`Iterator`]. See its
}
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<I: FusedIterator, P> FusedIterator for Filter<I, P>
+ where P: FnMut(&I::Item) -> bool {}
+
/// An iterator that uses `f` to both filter and map elements from `iter`.
///
/// This `struct` is created by the [`filter_map()`] method on [`Iterator`]. See its
}
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<B, I: FusedIterator, F> FusedIterator for FilterMap<I, F>
+ where F: FnMut(I::Item) -> Option<B> {}
+
/// An iterator that yields the current count and the element during iteration.
///
/// This `struct` is created by the [`enumerate()`] method on [`Iterator`]. See its
}
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<I> FusedIterator for Enumerate<I> where I: FusedIterator {}
+
/// An iterator with a `peek()` that returns an optional reference to the next
/// element.
///
#[stable(feature = "rust1", since = "1.0.0")]
impl<I: ExactSizeIterator> ExactSizeIterator for Peekable<I> {}
+#[unstable(feature = "fused", issue = "35602")]
+impl<I: FusedIterator> FusedIterator for Peekable<I> {}
+
impl<I: Iterator> Peekable<I> {
/// Returns a reference to the next() value without advancing the iterator.
///
}
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<I, P> FusedIterator for SkipWhile<I, P>
+ where I: FusedIterator, P: FnMut(&I::Item) -> bool {}
+
/// An iterator that only accepts elements while `predicate` is true.
///
/// This `struct` is created by the [`take_while()`] method on [`Iterator`]. See its
}
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<I, P> FusedIterator for TakeWhile<I, P>
+ where I: FusedIterator, P: FnMut(&I::Item) -> bool {}
+
/// An iterator that skips over `n` elements of `iter`.
///
/// This `struct` is created by the [`skip()`] method on [`Iterator`]. See its
}
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<I> FusedIterator for Skip<I> where I: FusedIterator {}
+
/// An iterator that only iterates over the first `n` iterations of `iter`.
///
/// This `struct` is created by the [`take()`] method on [`Iterator`]. See its
#[stable(feature = "rust1", since = "1.0.0")]
impl<I> ExactSizeIterator for Take<I> where I: ExactSizeIterator {}
+#[unstable(feature = "fused", issue = "35602")]
+impl<I> FusedIterator for Take<I> where I: FusedIterator {}
/// An iterator to maintain state while iterating another iterator.
///
}
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<B, I, St, F> FusedIterator for Scan<I, St, F>
+ where I: FusedIterator, F: FnMut(&mut St, I::Item) -> Option<B> {}
+
/// An iterator that maps each element to an iterator, and yields the elements
/// of the produced iterators.
///
}
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<I, U, F> FusedIterator for FlatMap<I, U, F>
+ where I: FusedIterator, U: IntoIterator, F: FnMut(I::Item) -> U {}
+
/// An iterator that yields `None` forever after the underlying iterator
/// yields `None` once.
///
done: bool
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<I> FusedIterator for Fuse<I> where I: Iterator {}
+
#[stable(feature = "rust1", since = "1.0.0")]
impl<I> Iterator for Fuse<I> where I: Iterator {
type Item = <I as Iterator>::Item;
#[inline]
- fn next(&mut self) -> Option<<I as Iterator>::Item> {
+ default fn next(&mut self) -> Option<<I as Iterator>::Item> {
if self.done {
None
} else {
}
#[inline]
- fn nth(&mut self, n: usize) -> Option<I::Item> {
+ default fn nth(&mut self, n: usize) -> Option<I::Item> {
if self.done {
None
} else {
}
#[inline]
- fn last(self) -> Option<I::Item> {
+ default fn last(self) -> Option<I::Item> {
if self.done {
None
} else {
}
#[inline]
- fn count(self) -> usize {
+ default fn count(self) -> usize {
if self.done {
0
} else {
}
#[inline]
- fn size_hint(&self) -> (usize, Option<usize>) {
+ default fn size_hint(&self) -> (usize, Option<usize>) {
if self.done {
(0, Some(0))
} else {
#[stable(feature = "rust1", since = "1.0.0")]
impl<I> DoubleEndedIterator for Fuse<I> where I: DoubleEndedIterator {
#[inline]
- fn next_back(&mut self) -> Option<<I as Iterator>::Item> {
+ default fn next_back(&mut self) -> Option<<I as Iterator>::Item> {
if self.done {
None
} else {
}
}
+unsafe impl<I> TrustedRandomAccess for Fuse<I>
+ where I: TrustedRandomAccess,
+{
+ unsafe fn get_unchecked(&mut self, i: usize) -> I::Item {
+ self.iter.get_unchecked(i)
+ }
+}
+
+#[unstable(feature = "fused", issue = "35602")]
+impl<I> Iterator for Fuse<I> where I: FusedIterator {
+ #[inline]
+ fn next(&mut self) -> Option<<I as Iterator>::Item> {
+ self.iter.next()
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<I::Item> {
+ self.iter.nth(n)
+ }
+
+ #[inline]
+ fn last(self) -> Option<I::Item> {
+ self.iter.last()
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.iter.count()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+}
+
+#[unstable(feature = "fused", reason = "recently added", issue = "35602")]
+impl<I> DoubleEndedIterator for Fuse<I>
+ where I: DoubleEndedIterator + FusedIterator
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<<I as Iterator>::Item> {
+ self.iter.next_back()
+ }
+}
+
+
#[stable(feature = "rust1", since = "1.0.0")]
impl<I> ExactSizeIterator for Fuse<I> where I: ExactSizeIterator {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<I: ExactSizeIterator, F> ExactSizeIterator for Inspect<I, F>
where F: FnMut(&I::Item) {}
+
+#[unstable(feature = "fused", issue = "35602")]
+impl<I: FusedIterator, F> FusedIterator for Inspect<I, F>
+ where F: FnMut(&I::Item) {}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use clone::Clone;
-use cmp::PartialOrd;
use mem;
use ops::{self, Add, Sub};
-use option::Option::{self, Some, None};
-use marker::Sized;
use usize;
-use super::{DoubleEndedIterator, ExactSizeIterator, Iterator};
+use super::FusedIterator;
/// Objects that can be stepped over in both directions.
///
}
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<A> FusedIterator for StepBy<A, ops::RangeFrom<A>>
+ where A: Clone, for<'a> &'a A: Add<&'a A, Output = A> {}
+
#[stable(feature = "rust1", since = "1.0.0")]
impl<A: Step + Clone> Iterator for StepBy<A, ops::Range<A>> {
type Item = A;
}
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<A: Step + Clone> FusedIterator for StepBy<A, ops::Range<A>> {}
+
#[unstable(feature = "inclusive_range",
reason = "recently added, follows RFC",
issue = "28237")]
}
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<A: Step + Clone> FusedIterator for StepBy<A, ops::RangeInclusive<A>> {}
+
macro_rules! range_exact_iter_impl {
($($t:ty)*) => ($(
#[stable(feature = "rust1", since = "1.0.0")]
}
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<A> FusedIterator for ops::Range<A>
+ where A: Step, for<'a> &'a A: Add<&'a A, Output = A> {}
+
#[stable(feature = "rust1", since = "1.0.0")]
impl<A: Step> Iterator for ops::RangeFrom<A> where
for<'a> &'a A: Add<&'a A, Output = A>
}
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<A> FusedIterator for ops::RangeFrom<A>
+ where A: Step, for<'a> &'a A: Add<&'a A, Output = A> {}
+
#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")]
impl<A: Step> Iterator for ops::RangeInclusive<A> where
for<'a> &'a A: Add<&'a A, Output = A>
n
}
}
+
+#[unstable(feature = "fused", issue = "35602")]
+impl<A> FusedIterator for ops::RangeInclusive<A>
+ where A: Step, for<'a> &'a A: Add<&'a A, Output = A> {}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use clone::Clone;
-use default::Default;
use fmt;
use marker;
-use option::Option::{self, Some, None};
use usize;
-use super::{DoubleEndedIterator, IntoIterator, Iterator, ExactSizeIterator};
+use super::FusedIterator;
/// An iterator that repeats an element endlessly.
///
fn next_back(&mut self) -> Option<A> { Some(self.element.clone()) }
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<A: Clone> FusedIterator for Repeat<A> {}
+
/// Creates a new iterator that endlessly repeats a single element.
///
/// The `repeat()` function repeats a single value over and over and over and
}
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<T> FusedIterator for Empty<T> {}
+
// not #[derive] because that adds a Clone bound on T,
// which isn't necessary.
#[stable(feature = "iter_empty", since = "1.2.0")]
}
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<T> FusedIterator for Once<T> {}
+
/// Creates an iterator that yields an element exactly once.
///
/// This is commonly used to adapt a single value into a [`chain()`] of other
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use option::Option::{self, Some};
-use marker::Sized;
-
-use super::Iterator;
-
/// Conversion from an `Iterator`.
///
/// By implementing `FromIterator` for a type, you define how it will be
integer_sum_product! { i8 i16 i32 i64 isize u8 u16 u32 u64 usize }
float_sum_product! { f32 f64 }
+
+/// An iterator that always continues to yield `None` when exhausted.
+///
+/// Calling next on a fused iterator that has returned `None` once is guaranteed
+/// to return `None` again. This trait is should be implemented by all iterators
+/// that behave this way because it allows for some significant optimizations.
+///
+/// Note: In general, you should not use `FusedIterator` in generic bounds if
+/// you need a fused iterator. Instead, you should just call `Iterator::fused()`
+/// on the iterator. If the iterator is already fused, the additional `Fuse`
+/// wrapper will be a no-op with no performance penalty.
+#[unstable(feature = "fused", issue = "35602")]
+pub trait FusedIterator: Iterator {}
+
+#[unstable(feature = "fused", issue = "35602")]
+impl<'a, I: FusedIterator + ?Sized> FusedIterator for &'a mut I {}
// except according to those terms.
-use iter::ExactSizeIterator;
-
/// An iterator whose items are random accessible efficiently
///
/// # Safety
#![deny(missing_debug_implementations)]
#![cfg_attr(not(stage0), deny(warnings))]
-#![cfg_attr(stage0, allow(unused_attributes))]
-
#![feature(allow_internal_unstable)]
#![feature(asm)]
#![feature(associated_type_defaults)]
#![feature(staged_api)]
#![feature(unboxed_closures)]
#![feature(question_mark)]
+#![feature(never_type)]
+#![feature(prelude_import)]
-// NOTE: remove the cfg_attr next snapshot
-#![cfg_attr(not(stage0), feature(never_type))]
+#[prelude_import]
+#[allow(unused)]
+use prelude::v1::*;
#[macro_use]
mod macros;
#![stable(feature = "rust1", since = "1.0.0")]
-use clone::Clone;
use cmp;
-use default::Default;
-use option::Option;
use hash::Hash;
use hash::Hasher;
impls! { PhantomData }
mod impls {
- use super::{Send, Sync, Sized};
-
#[stable(feature = "rust1", since = "1.0.0")]
unsafe impl<'a, T: Sync + ?Sized> Send for &'a T {}
#[stable(feature = "rust1", since = "1.0.0")]
#![stable(feature = "rust1", since = "1.0.0")]
-use marker::Sized;
use intrinsics;
use ptr;
intrinsics::init()
}
-/// Creates a value initialized to an unspecified series of bytes.
-///
-/// The byte sequence usually indicates that the value at the memory
-/// in question has been dropped. Thus, *if* T carries a drop flag,
-/// any associated destructor will not be run when the value falls out
-/// of scope.
-///
-/// Some code at one time used the `zeroed` function above to
-/// accomplish this goal.
-///
-/// This function is expected to be deprecated with the transition
-/// to non-zeroing drop.
-#[inline]
-#[unstable(feature = "filling_drop", issue = "5016")]
-pub unsafe fn dropped<T>() -> T {
- #[inline(always)]
- unsafe fn dropped_impl<T>() -> T { intrinsics::init_dropped() }
-
- dropped_impl()
-}
-
/// Bypasses Rust's normal memory-initialization checks by pretending to
/// produce a value of type T, while doing nothing at all.
///
#[stable(feature = "rust1", since = "1.0.0")]
pub fn drop<T>(_x: T) { }
-macro_rules! repeat_u8_as_u16 {
- ($name:expr) => { (($name as u16) << 8 |
- ($name as u16)) }
-}
-macro_rules! repeat_u8_as_u32 {
- ($name:expr) => { (($name as u32) << 24 |
- ($name as u32) << 16 |
- ($name as u32) << 8 |
- ($name as u32)) }
-}
-macro_rules! repeat_u8_as_u64 {
- ($name:expr) => { ((repeat_u8_as_u32!($name) as u64) << 32 |
- (repeat_u8_as_u32!($name) as u64)) }
-}
-
-// NOTE: Keep synchronized with values used in librustc_trans::trans::adt.
-//
-// In particular, the POST_DROP_U8 marker must never equal the
-// DTOR_NEEDED_U8 marker.
-//
-// For a while pnkfelix was using 0xc1 here.
-// But having the sign bit set is a pain, so 0x1d is probably better.
-//
-// And of course, 0x00 brings back the old world of zero'ing on drop.
-#[unstable(feature = "filling_drop", issue = "5016")]
-#[allow(missing_docs)]
-pub const POST_DROP_U8: u8 = 0x1d;
-#[unstable(feature = "filling_drop", issue = "5016")]
-#[allow(missing_docs)]
-pub const POST_DROP_U16: u16 = repeat_u8_as_u16!(POST_DROP_U8);
-#[unstable(feature = "filling_drop", issue = "5016")]
-#[allow(missing_docs)]
-pub const POST_DROP_U32: u32 = repeat_u8_as_u32!(POST_DROP_U8);
-#[unstable(feature = "filling_drop", issue = "5016")]
-#[allow(missing_docs)]
-pub const POST_DROP_U64: u64 = repeat_u8_as_u64!(POST_DROP_U8);
-
-#[cfg(target_pointer_width = "16")]
-#[unstable(feature = "filling_drop", issue = "5016")]
-#[allow(missing_docs)]
-pub const POST_DROP_USIZE: usize = POST_DROP_U16 as usize;
-#[cfg(target_pointer_width = "32")]
-#[unstable(feature = "filling_drop", issue = "5016")]
-#[allow(missing_docs)]
-pub const POST_DROP_USIZE: usize = POST_DROP_U32 as usize;
-#[cfg(target_pointer_width = "64")]
-#[unstable(feature = "filling_drop", issue = "5016")]
-#[allow(missing_docs)]
-pub const POST_DROP_USIZE: usize = POST_DROP_U64 as usize;
-
/// Interprets `src` as `&U`, and then reads `src` without moving the contained
/// value.
///
reason = "needs an RFC to flesh out the design",
issue = "27730")]
-use marker::Sized;
use ops::{CoerceUnsized, Deref};
/// Unsafe trait to indicate what types are usable with the NonZero struct
issue = "0")]
#![macro_use]
-use prelude::v1::*;
-
use mem;
use intrinsics;
// this one is used for testing only.
#[doc(hidden)]
pub mod tests {
- use prelude::v1::*;
define_bignum!(Big8x3: type=u8, n=3);
}
//! The various algorithms from the paper.
-use prelude::v1::*;
use cmp::min;
use cmp::Ordering::{Less, Equal, Greater};
use num::diy_float::Fp;
#[cfg(all(target_arch="x86", not(target_feature="sse2")))]
mod fpu_precision {
use mem::size_of;
- use ops::Drop;
/// A structure used to preserve the original value of the FPU control word, so that it can be
/// restored when the structure is dropped.
reason = "internal routines only exposed for testing",
issue = "0")]
-use prelude::v1::*;
use fmt;
use str::FromStr;
// FIXME This module's name is a bit unfortunate, since other modules also import `core::num`.
-use prelude::v1::*;
use cmp::Ordering::{self, Less, Equal, Greater};
pub use num::bignum::Big32x40 as Big;
//! modules rely on to not panic (or overflow) in turn.
//! To make matters worse, all that happens in a single pass over the input.
//! So, be careful when modifying anything, and double-check with the other modules.
-use prelude::v1::*;
use super::num;
use self::ParseResult::{Valid, ShortcutToInf, ShortcutToZero, Invalid};
//! Many functions in this module only handle normal numbers. The dec2flt routines conservatively
//! take the universally-correct slow path (Algorithm M) for very small and very large numbers.
//! That algorithm needs only next_float() which does handle subnormals and zeros.
-use prelude::v1::*;
use u32;
use cmp::Ordering::{Less, Equal, Greater};
use ops::{Mul, Div, Neg};
//! Decodes a floating-point value into individual parts and error ranges.
-use prelude::v1::*;
-
use {f32, f64};
use num::FpCategory;
use num::dec2flt::rawfp::RawFloat;
reason = "internal routines only exposed for testing",
issue = "0")]
-use prelude::v1::*;
use i16;
pub use self::decoder::{decode, DecodableFloat, FullDecoded, Decoded};
quickly and accurately. SIGPLAN Not. 31, 5 (May. 1996), 108-116.
*/
-use prelude::v1::*;
-
use cmp::Ordering;
use num::flt2dec::{Decoded, MAX_SIG_DIGITS, round_up};
accurately with integers. SIGPLAN Not. 45, 6 (June 2010), 233-243.
*/
-use prelude::v1::*;
-
use num::diy_float::Fp;
use num::flt2dec::{Decoded, MAX_SIG_DIGITS, round_up};
#![stable(feature = "rust1", since = "1.0.0")]
-use char::CharExt;
-use cmp::PartialOrd;
-use convert::{From, TryFrom};
+use convert::TryFrom;
use fmt;
use intrinsics;
-use marker::{Copy, Sized};
use mem::size_of;
-use option::Option::{self, Some, None};
-use result::Result::{self, Ok, Err};
-use str::{FromStr, StrExt};
-use slice::SliceExt;
+use str::FromStr;
/// Provides intentionally-wrapped arithmetic on `T`.
///
//! ```
//!
//! See the documentation for each trait for an example implementation.
+//!
+//! The [`Fn`], [`FnMut`], and [`FnOnce`] traits are implemented by types that can be
+//! invoked like functions. Note that `Fn` takes `&self`, `FnMut` takes `&mut
+//! self` and `FnOnce` takes `self`. These correspond to the three kinds of
+//! methods that can be invoked on an instance: call-by-reference,
+//! call-by-mutable-reference, and call-by-value. The most common use of these
+//! traits is to act as bounds to higher-level functions that take functions or
+//! closures as arguments.
+//!
+//! [`Fn`]: trait.Fn.html
+//! [`FnMut`]: trait.FnMut.html
+//! [`FnOnce`]: trait.FnOnce.html
+//!
+//! Taking a `Fn` as a parameter:
+//!
+//! ```rust
+//! fn call_with_one<F>(func: F) -> usize
+//! where F: Fn(usize) -> usize
+//! {
+//! func(1)
+//! }
+//!
+//! let double = |x| x * 2;
+//! assert_eq!(call_with_one(double), 2);
+//! ```
+//!
+//! Taking a `FnMut` as a parameter:
+//!
+//! ```rust
+//! fn do_twice<F>(mut func: F)
+//! where F: FnMut()
+//! {
+//! func();
+//! func();
+//! }
+//!
+//! let mut x: usize = 1;
+//! {
+//! let add_two_to_x = || x += 2;
+//! do_twice(add_two_to_x);
+//! }
+//!
+//! assert_eq!(x, 5);
+//! ```
+//!
+//! Taking a `FnOnce` as a parameter:
+//!
+//! ```rust
+//! fn consume_with_relish<F>(func: F)
+//! where F: FnOnce() -> String
+//! {
+//! // `func` consumes its captured variables, so it cannot be run more
+//! // than once
+//! println!("Consumed: {}", func());
+//!
+//! println!("Delicious!");
+//!
+//! // Attempting to invoke `func()` again will throw a `use of moved
+//! // value` error for `func`
+//! }
+//!
+//! let x = String::from("x");
+//! let consume_and_return_x = move || x;
+//! consume_with_relish(consume_and_return_x);
+//!
+//! // `consume_and_return_x` can no longer be invoked at this point
+//! ```
#![stable(feature = "rust1", since = "1.0.0")]
-use cmp::PartialOrd;
use fmt;
-use marker::{Sized, Unsize};
+use marker::Unsize;
/// The `Drop` trait is used to run some code when a value goes out of scope.
/// This is sometimes called a 'destructor'.
///
/// # Examples
///
-/// A trivial implementation of `Sub`. When `Foo - Foo` happens, it ends up
-/// calling `sub`, and therefore, `main` prints `Subtracting!`.
+/// This example creates a `Point` struct that implements the `Sub` trait, and
+/// then demonstrates subtracting two `Point`s.
///
/// ```
/// use std::ops::Sub;
///
-/// struct Foo;
+/// #[derive(Debug)]
+/// struct Point {
+/// x: i32,
+/// y: i32,
+/// }
///
-/// impl Sub for Foo {
-/// type Output = Foo;
+/// impl Sub for Point {
+/// type Output = Point;
///
-/// fn sub(self, _rhs: Foo) -> Foo {
-/// println!("Subtracting!");
-/// self
+/// fn sub(self, other: Point) -> Point {
+/// Point {
+/// x: self.x - other.x,
+/// y: self.y - other.y,
+/// }
+/// }
+/// }
+///
+/// impl PartialEq for Point {
+/// fn eq(&self, other: &Self) -> bool {
+/// self.x == other.x && self.y == other.y
/// }
/// }
///
/// fn main() {
-/// Foo - Foo;
+/// assert_eq!(Point { x: 3, y: 3 } - Point { x: 2, y: 3 },
+/// Point { x: 1, y: 0 });
/// }
/// ```
#[lang = "sub"]
///
/// # Examples
///
-/// A trivial implementation of `Mul`. When `Foo * Foo` happens, it ends up
-/// calling `mul`, and therefore, `main` prints `Multiplying!`.
+/// Implementing a `Mul`tipliable rational number struct:
///
/// ```
/// use std::ops::Mul;
///
-/// struct Foo;
+/// // The uniqueness of rational numbers in lowest terms is a consequence of
+/// // the fundamental theorem of arithmetic.
+/// #[derive(Eq)]
+/// #[derive(PartialEq, Debug)]
+/// struct Rational {
+/// nominator: usize,
+/// denominator: usize,
+/// }
///
-/// impl Mul for Foo {
-/// type Output = Foo;
+/// impl Rational {
+/// fn new(nominator: usize, denominator: usize) -> Self {
+/// if denominator == 0 {
+/// panic!("Zero is an invalid denominator!");
+/// }
///
-/// fn mul(self, _rhs: Foo) -> Foo {
-/// println!("Multiplying!");
-/// self
+/// // Reduce to lowest terms by dividing by the greatest common
+/// // divisor.
+/// let gcd = gcd(nominator, denominator);
+/// Rational {
+/// nominator: nominator / gcd,
+/// denominator: denominator / gcd,
+/// }
/// }
/// }
///
-/// fn main() {
-/// Foo * Foo;
+/// impl Mul for Rational {
+/// // The multiplication of rational numbers is a closed operation.
+/// type Output = Self;
+///
+/// fn mul(self, rhs: Self) -> Self {
+/// let nominator = self.nominator * rhs.nominator;
+/// let denominator = self.denominator * rhs.denominator;
+/// Rational::new(nominator, denominator)
+/// }
/// }
+///
+/// // Euclid's two-thousand-year-old algorithm for finding the greatest common
+/// // divisor.
+/// fn gcd(x: usize, y: usize) -> usize {
+/// let mut x = x;
+/// let mut y = y;
+/// while y != 0 {
+/// let t = y;
+/// y = x % y;
+/// x = t;
+/// }
+/// x
+/// }
+///
+/// assert_eq!(Rational::new(1, 2), Rational::new(2, 4));
+/// assert_eq!(Rational::new(2, 3) * Rational::new(3, 4),
+/// Rational::new(1, 2));
/// ```
///
/// Note that `RHS = Self` by default, but this is not mandatory. Here is an
///
/// # Examples
///
-/// A trivial implementation of `Div`. When `Foo / Foo` happens, it ends up
-/// calling `div`, and therefore, `main` prints `Dividing!`.
+/// Implementing a `Div`idable rational number struct:
///
/// ```
/// use std::ops::Div;
///
-/// struct Foo;
+/// // The uniqueness of rational numbers in lowest terms is a consequence of
+/// // the fundamental theorem of arithmetic.
+/// #[derive(Eq)]
+/// #[derive(PartialEq, Debug)]
+/// struct Rational {
+/// nominator: usize,
+/// denominator: usize,
+/// }
///
-/// impl Div for Foo {
-/// type Output = Foo;
+/// impl Rational {
+/// fn new(nominator: usize, denominator: usize) -> Self {
+/// if denominator == 0 {
+/// panic!("Zero is an invalid denominator!");
+/// }
///
-/// fn div(self, _rhs: Foo) -> Foo {
-/// println!("Dividing!");
-/// self
+/// // Reduce to lowest terms by dividing by the greatest common
+/// // divisor.
+/// let gcd = gcd(nominator, denominator);
+/// Rational {
+/// nominator: nominator / gcd,
+/// denominator: denominator / gcd,
+/// }
+/// }
+/// }
+///
+/// impl Div for Rational {
+/// // The division of rational numbers is a closed operation.
+/// type Output = Self;
+///
+/// fn div(self, rhs: Self) -> Self {
+/// if rhs.nominator == 0 {
+/// panic!("Cannot divide by zero-valued `Rational`!");
+/// }
+///
+/// let nominator = self.nominator * rhs.denominator;
+/// let denominator = self.denominator * rhs.nominator;
+/// Rational::new(nominator, denominator)
+/// }
+/// }
+///
+/// // Euclid's two-thousand-year-old algorithm for finding the greatest common
+/// // divisor.
+/// fn gcd(x: usize, y: usize) -> usize {
+/// let mut x = x;
+/// let mut y = y;
+/// while y != 0 {
+/// let t = y;
+/// y = x % y;
+/// x = t;
/// }
+/// x
/// }
///
/// fn main() {
-/// Foo / Foo;
+/// assert_eq!(Rational::new(1, 2), Rational::new(2, 4));
+/// assert_eq!(Rational::new(1, 2) / Rational::new(3, 4),
+/// Rational::new(2, 3));
/// }
/// ```
///
///
/// # Examples
///
-/// A trivial implementation of `Rem`. When `Foo % Foo` happens, it ends up
-/// calling `rem`, and therefore, `main` prints `Remainder-ing!`.
+/// This example implements `Rem` on a `SplitSlice` object. After `Rem` is
+/// implemented, one can use the `%` operator to find out what the remaining
+/// elements of the slice would be after splitting it into equal slices of a
+/// given length.
///
/// ```
/// use std::ops::Rem;
///
-/// struct Foo;
+/// #[derive(PartialEq, Debug)]
+/// struct SplitSlice<'a, T: 'a> {
+/// slice: &'a [T],
+/// }
///
-/// impl Rem for Foo {
-/// type Output = Foo;
+/// impl<'a, T> Rem<usize> for SplitSlice<'a, T> {
+/// type Output = SplitSlice<'a, T>;
///
-/// fn rem(self, _rhs: Foo) -> Foo {
-/// println!("Remainder-ing!");
-/// self
+/// fn rem(self, modulus: usize) -> Self {
+/// let len = self.slice.len();
+/// let rem = len % modulus;
+/// let start = len - rem;
+/// SplitSlice {slice: &self.slice[start..]}
/// }
/// }
///
-/// fn main() {
-/// Foo % Foo;
-/// }
+/// // If we were to divide &[0, 1, 2, 3, 4, 5, 6, 7] into slices of size 3,
+/// // the remainder would be &[6, 7]
+/// assert_eq!(SplitSlice { slice: &[0, 1, 2, 3, 4, 5, 6, 7] } % 3,
+/// SplitSlice { slice: &[6, 7] });
/// ```
#[lang = "rem"]
#[stable(feature = "rust1", since = "1.0.0")]
///
/// # Examples
///
-/// A trivial implementation of `BitAnd`. When `Foo & Foo` happens, it ends up
-/// calling `bitand`, and therefore, `main` prints `Bitwise And-ing!`.
+/// In this example, the `BitAnd` trait is implemented for a `BooleanVector`
+/// struct.
///
/// ```
/// use std::ops::BitAnd;
///
-/// struct Foo;
-///
-/// impl BitAnd for Foo {
-/// type Output = Foo;
-///
-/// fn bitand(self, _rhs: Foo) -> Foo {
-/// println!("Bitwise And-ing!");
-/// self
+/// #[derive(Debug)]
+/// struct BooleanVector {
+/// value: Vec<bool>,
+/// };
+///
+/// impl BitAnd for BooleanVector {
+/// type Output = Self;
+///
+/// fn bitand(self, rhs: Self) -> Self {
+/// BooleanVector {
+/// value: self.value
+/// .iter()
+/// .zip(rhs.value.iter())
+/// .map(|(x, y)| *x && *y)
+/// .collect(),
+/// }
/// }
/// }
///
-/// fn main() {
-/// Foo & Foo;
+/// impl PartialEq for BooleanVector {
+/// fn eq(&self, other: &Self) -> bool {
+/// self.value == other.value
+/// }
/// }
+///
+/// let bv1 = BooleanVector { value: vec![true, true, false, false] };
+/// let bv2 = BooleanVector { value: vec![true, false, true, false] };
+/// let expected = BooleanVector { value: vec![true, false, false, false] };
+/// assert_eq!(bv1 & bv2, expected);
/// ```
#[lang = "bitand"]
#[stable(feature = "rust1", since = "1.0.0")]
///
/// # Examples
///
-/// A trivial implementation of `SubAssign`. When `Foo -= Foo` happens, it ends up
-/// calling `sub_assign`, and therefore, `main` prints `Subtracting!`.
+/// This example creates a `Point` struct that implements the `SubAssign`
+/// trait, and then demonstrates sub-assigning to a mutable `Point`.
///
/// ```
/// use std::ops::SubAssign;
///
-/// struct Foo;
+/// #[derive(Debug)]
+/// struct Point {
+/// x: i32,
+/// y: i32,
+/// }
///
-/// impl SubAssign for Foo {
-/// fn sub_assign(&mut self, _rhs: Foo) {
-/// println!("Subtracting!");
+/// impl SubAssign for Point {
+/// fn sub_assign(&mut self, other: Point) {
+/// *self = Point {
+/// x: self.x - other.x,
+/// y: self.y - other.y,
+/// };
/// }
/// }
///
-/// # #[allow(unused_assignments)]
-/// fn main() {
-/// let mut foo = Foo;
-/// foo -= Foo;
+/// impl PartialEq for Point {
+/// fn eq(&self, other: &Self) -> bool {
+/// self.x == other.x && self.y == other.y
+/// }
/// }
+///
+/// let mut point = Point { x: 3, y: 3 };
+/// point -= Point { x: 2, y: 3 };
+/// assert_eq!(point, Point {x: 1, y: 0});
/// ```
#[lang = "sub_assign"]
#[stable(feature = "op_assign_traits", since = "1.8.0")]
///
/// # Examples
///
-/// A trivial implementation of `Index`. When `Foo[Bar]` happens, it ends up
-/// calling `index`, and therefore, `main` prints `Indexing!`.
+/// This example implements `Index` on a read-only `NucleotideCount` container,
+/// enabling individual counts to be retrieved with index syntax.
///
/// ```
/// use std::ops::Index;
///
-/// #[derive(Copy, Clone)]
-/// struct Foo;
-/// struct Bar;
+/// enum Nucleotide {
+/// A,
+/// C,
+/// G,
+/// T,
+/// }
///
-/// impl Index<Bar> for Foo {
-/// type Output = Foo;
+/// struct NucleotideCount {
+/// a: usize,
+/// c: usize,
+/// g: usize,
+/// t: usize,
+/// }
///
-/// fn index<'a>(&'a self, _index: Bar) -> &'a Foo {
-/// println!("Indexing!");
-/// self
+/// impl Index<Nucleotide> for NucleotideCount {
+/// type Output = usize;
+///
+/// fn index(&self, nucleotide: Nucleotide) -> &usize {
+/// match nucleotide {
+/// Nucleotide::A => &self.a,
+/// Nucleotide::C => &self.c,
+/// Nucleotide::G => &self.g,
+/// Nucleotide::T => &self.t,
+/// }
/// }
/// }
///
-/// fn main() {
-/// Foo[Bar];
-/// }
+/// let nucleotide_count = NucleotideCount {a: 14, c: 9, g: 10, t: 12};
+/// assert_eq!(nucleotide_count[Nucleotide::A], 14);
+/// assert_eq!(nucleotide_count[Nucleotide::C], 9);
+/// assert_eq!(nucleotide_count[Nucleotide::G], 10);
+/// assert_eq!(nucleotide_count[Nucleotide::T], 12);
/// ```
#[lang = "index"]
#[rustc_on_unimplemented = "the type `{Self}` cannot be indexed by `{Idx}`"]
///
/// It cannot serve as an iterator because it doesn't have a starting point.
///
+/// # Examples
+///
+/// The `..{integer}` syntax is a `RangeTo`:
+///
+/// ```
+/// assert_eq!((..5), std::ops::RangeTo{ end: 5 });
/// ```
-/// fn main() {
-/// assert_eq!((..5), std::ops::RangeTo{ end: 5 });
///
-/// let arr = [0, 1, 2, 3];
-/// assert_eq!(arr[ .. ], [0,1,2,3]);
-/// assert_eq!(arr[ ..3], [0,1,2 ]); // RangeTo
-/// assert_eq!(arr[1.. ], [ 1,2,3]);
-/// assert_eq!(arr[1..3], [ 1,2 ]);
+/// It does not have an `IntoIterator` implementation, so you can't use it in a
+/// `for` loop directly. This won't compile:
+///
+/// ```ignore
+/// for i in ..5 {
+/// // ...
/// }
/// ```
+///
+/// When used as a slicing index, `RangeTo` produces a slice of all array
+/// elements before the index indicated by `end`.
+///
+/// ```
+/// let arr = [0, 1, 2, 3];
+/// assert_eq!(arr[ .. ], [0,1,2,3]);
+/// assert_eq!(arr[ ..3], [0,1,2 ]); // RangeTo
+/// assert_eq!(arr[1.. ], [ 1,2,3]);
+/// assert_eq!(arr[1..3], [ 1,2 ]);
+/// ```
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct RangeTo<Idx> {
///
/// # Examples
///
+/// The `...{integer}` syntax is a `RangeToInclusive`:
+///
/// ```
/// #![feature(inclusive_range,inclusive_range_syntax)]
-/// fn main() {
-/// assert_eq!((...5), std::ops::RangeToInclusive{ end: 5 });
+/// assert_eq!((...5), std::ops::RangeToInclusive{ end: 5 });
+/// ```
///
-/// let arr = [0, 1, 2, 3];
-/// assert_eq!(arr[ ...2], [0,1,2 ]); // RangeToInclusive
-/// assert_eq!(arr[1...2], [ 1,2 ]);
+/// It does not have an `IntoIterator` implementation, so you can't use it in a
+/// `for` loop directly. This won't compile:
+///
+/// ```ignore
+/// for i in ...5 {
+/// // ...
/// }
/// ```
+///
+/// When used as a slicing index, `RangeToInclusive` produces a slice of all
+/// array elements up to and including the index indicated by `end`.
+///
+/// ```
+/// #![feature(inclusive_range_syntax)]
+/// let arr = [0, 1, 2, 3];
+/// assert_eq!(arr[ ...2], [0,1,2 ]); // RangeToInclusive
+/// assert_eq!(arr[1...2], [ 1,2 ]);
+/// ```
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")]
pub struct RangeToInclusive<Idx> {
}
/// A version of the call operator that takes an immutable receiver.
+///
+/// # Examples
+///
+/// Closures automatically implement this trait, which allows them to be
+/// invoked. Note, however, that `Fn` takes an immutable reference to any
+/// captured variables. To take a mutable capture, implement [`FnMut`], and to
+/// consume the capture, implement [`FnOnce`].
+///
+/// [`FnMut`]: trait.FnMut.html
+/// [`FnOnce`]: trait.FnOnce.html
+///
+/// ```
+/// let square = |x| x * x;
+/// assert_eq!(square(5), 25);
+/// ```
+///
+/// Closures can also be passed to higher-level functions through a `Fn`
+/// parameter (or a `FnMut` or `FnOnce` parameter, which are supertraits of
+/// `Fn`).
+///
+/// ```
+/// fn call_with_one<F>(func: F) -> usize
+/// where F: Fn(usize) -> usize {
+/// func(1)
+/// }
+///
+/// let double = |x| x * 2;
+/// assert_eq!(call_with_one(double), 2);
+/// ```
#[lang = "fn"]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_paren_sugar]
}
/// A version of the call operator that takes a mutable receiver.
+///
+/// # Examples
+///
+/// Closures that mutably capture variables automatically implement this trait,
+/// which allows them to be invoked.
+///
+/// ```
+/// let mut x = 5;
+/// {
+/// let mut square_x = || x *= x;
+/// square_x();
+/// }
+/// assert_eq!(x, 25);
+/// ```
+///
+/// Closures can also be passed to higher-level functions through a `FnMut`
+/// parameter (or a `FnOnce` parameter, which is a supertrait of `FnMut`).
+///
+/// ```
+/// fn do_twice<F>(mut func: F)
+/// where F: FnMut()
+/// {
+/// func();
+/// func();
+/// }
+///
+/// let mut x: usize = 1;
+/// {
+/// let add_two_to_x = || x += 2;
+/// do_twice(add_two_to_x);
+/// }
+///
+/// assert_eq!(x, 5);
+/// ```
#[lang = "fn_mut"]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_paren_sugar]
}
/// A version of the call operator that takes a by-value receiver.
+///
+/// # Examples
+///
+/// By-value closures automatically implement this trait, which allows them to
+/// be invoked.
+///
+/// ```
+/// let x = 5;
+/// let square_x = move || x * x;
+/// assert_eq!(square_x(), 25);
+/// ```
+///
+/// By-value Closures can also be passed to higher-level functions through a
+/// `FnOnce` parameter.
+///
+/// ```
+/// fn consume_with_relish<F>(func: F)
+/// where F: FnOnce() -> String
+/// {
+/// // `func` consumes its captured variables, so it cannot be run more
+/// // than once
+/// println!("Consumed: {}", func());
+///
+/// println!("Delicious!");
+///
+/// // Attempting to invoke `func()` again will throw a `use of moved
+/// // value` error for `func`
+/// }
+///
+/// let x = String::from("x");
+/// let consume_and_return_x = move || x;
+/// consume_with_relish(consume_and_return_x);
+///
+/// // `consume_and_return_x` can no longer be invoked at this point
+/// ```
#[lang = "fn_once"]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_paren_sugar]
}
mod impls {
- use marker::Sized;
- use super::{Fn, FnMut, FnOnce};
-
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a,A,F:?Sized> Fn<A> for &'a F
where F : Fn<A>
/// Creates a globally fresh place.
fn make_place() -> Self;
}
+
+/// A trait for types which have success and error states and are meant to work
+/// with the question mark operator.
+/// When the `?` operator is used with a value, whether the value is in the
+/// success or error state is determined by calling `translate`.
+///
+/// This trait is **very** experimental, it will probably be iterated on heavily
+/// before it is stabilised. Implementors should expect change. Users of `?`
+/// should not rely on any implementations of `Carrier` other than `Result`,
+/// i.e., you should not expect `?` to continue to work with `Option`, etc.
+#[unstable(feature = "question_mark_carrier", issue = "31436")]
+pub trait Carrier {
+ /// The type of the value when computation succeeds.
+ type Success;
+ /// The type of the value when computation errors out.
+ type Error;
+
+ /// Create a `Carrier` from a success value.
+ fn from_success(Self::Success) -> Self;
+
+ /// Create a `Carrier` from an error value.
+ fn from_error(Self::Error) -> Self;
+
+ /// Translate this `Carrier` to another implementation of `Carrier` with the
+ /// same associated types.
+ fn translate<T>(self) -> T where T: Carrier<Success=Self::Success, Error=Self::Error>;
+}
+
+#[unstable(feature = "question_mark_carrier", issue = "31436")]
+impl<U, V> Carrier for Result<U, V> {
+ type Success = U;
+ type Error = V;
+
+ fn from_success(u: U) -> Result<U, V> {
+ Ok(u)
+ }
+
+ fn from_error(e: V) -> Result<U, V> {
+ Err(e)
+ }
+
+ fn translate<T>(self) -> T
+ where T: Carrier<Success=U, Error=V>
+ {
+ match self {
+ Ok(u) => T::from_success(u),
+ Err(e) => T::from_error(e),
+ }
+ }
+}
+
+struct _DummyErrorType;
+
+impl Carrier for _DummyErrorType {
+ type Success = ();
+ type Error = ();
+
+ fn from_success(_: ()) -> _DummyErrorType {
+ _DummyErrorType
+ }
+
+ fn from_error(_: ()) -> _DummyErrorType {
+ _DummyErrorType
+ }
+
+ fn translate<T>(self) -> T
+ where T: Carrier<Success=(), Error=()>
+ {
+ T::from_success(())
+ }
+}
#![stable(feature = "rust1", since = "1.0.0")]
-use self::Option::*;
-
-use clone::Clone;
-use convert::From;
-use default::Default;
-use iter::ExactSizeIterator;
-use iter::{Iterator, DoubleEndedIterator, FromIterator, IntoIterator};
+use iter::{FromIterator, FusedIterator};
use mem;
-use ops::FnOnce;
-use result::Result::{Ok, Err};
-use result::Result;
// Note that this is not a lang item per se, but it has a hidden dependency on
// `Iterator`, which is one. The compiler assumes that the `next` method of
}
impl<A> ExactSizeIterator for Item<A> {}
+impl<A> FusedIterator for Item<A> {}
/// An iterator over a reference of the contained item in an Option.
#[stable(feature = "rust1", since = "1.0.0")]
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, A> ExactSizeIterator for Iter<'a, A> {}
+#[unstable(feature = "fused", issue = "35602")]
+impl<'a, A> FusedIterator for Iter<'a, A> {}
+
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, A> Clone for Iter<'a, A> {
fn clone(&self) -> Iter<'a, A> {
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, A> ExactSizeIterator for IterMut<'a, A> {}
+#[unstable(feature = "fused", issue = "35602")]
+impl<'a, A> FusedIterator for IterMut<'a, A> {}
+
/// An iterator over the item contained inside an Option.
#[derive(Clone, Debug)]
#[stable(feature = "rust1", since = "1.0.0")]
#[stable(feature = "rust1", since = "1.0.0")]
impl<A> ExactSizeIterator for IntoIter<A> {}
+#[unstable(feature = "fused", issue = "35602")]
+impl<A> FusedIterator for IntoIter<A> {}
+
/////////////////////////////////////////////////////////////////////////////
// FromIterator
/////////////////////////////////////////////////////////////////////////////
#![stable(feature = "rust1", since = "1.0.0")]
-use clone::Clone;
use intrinsics;
use ops::{CoerceUnsized, Deref};
use fmt;
use hash;
-use option::Option::{self, Some, None};
-use marker::{Copy, PhantomData, Send, Sized, Sync, Unsize};
+use marker::{PhantomData, Unsize};
use mem;
use nonzero::NonZero;
-use cmp::{PartialEq, Eq, Ord, PartialOrd};
use cmp::Ordering::{self, Less, Equal, Greater};
// FIXME #19649: intrinsic docs don't render, so these have no docs :(
/// let x = 12;
/// let y = &x as *const i32;
///
-/// unsafe { println!("{}", std::ptr::read(y)); }
+/// unsafe {
+/// assert_eq!(std::ptr::read(y), 12);
+/// }
/// ```
#[inline(always)]
#[stable(feature = "rust1", since = "1.0.0")]
tmp
}
-#[allow(missing_docs)]
-#[inline(always)]
-#[unstable(feature = "filling_drop",
- reason = "may play a larger role in std::ptr future extensions",
- issue = "5016")]
-pub unsafe fn read_and_drop<T>(dest: *mut T) -> T {
- // Copy the data out from `dest`:
- let tmp = read(&*dest);
-
- // Now mark `dest` as dropped:
- write_bytes(dest, mem::POST_DROP_U8, 1);
-
- tmp
-}
-
/// Overwrites a memory location with the given value without reading or
/// dropping the old value.
///
///
/// unsafe {
/// std::ptr::write(y, z);
-/// println!("{}", std::ptr::read(y));
+/// assert_eq!(std::ptr::read(y), 12);
/// }
/// ```
#[inline]
/// let x = 12;
/// let y = &x as *const i32;
///
-/// unsafe { println!("{}", std::ptr::read_volatile(y)); }
+/// unsafe {
+/// assert_eq!(std::ptr::read_volatile(y), 12);
+/// }
/// ```
#[inline]
#[stable(feature = "volatile", since = "1.9.0")]
///
/// unsafe {
/// std::ptr::write_volatile(y, z);
-/// println!("{}", std::ptr::read_volatile(y));
+/// assert_eq!(std::ptr::read_volatile(y), 12);
/// }
/// ```
#[inline]
#![stable(feature = "rust1", since = "1.0.0")]
-use self::Result::{Ok, Err};
-
-use clone::Clone;
use fmt;
-use iter::{Iterator, DoubleEndedIterator, FromIterator, ExactSizeIterator, IntoIterator};
-use ops::FnOnce;
-use option::Option::{self, None, Some};
+use iter::{FromIterator, FusedIterator};
/// `Result` is a type that represents either success (`Ok`) or failure (`Err`).
///
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> ExactSizeIterator for Iter<'a, T> {}
+#[unstable(feature = "fused", issue = "35602")]
+impl<'a, T> FusedIterator for Iter<'a, T> {}
+
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> Clone for Iter<'a, T> {
fn clone(&self) -> Iter<'a, T> { Iter { inner: self.inner } }
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> ExactSizeIterator for IterMut<'a, T> {}
+#[unstable(feature = "fused", issue = "35602")]
+impl<'a, T> FusedIterator for IterMut<'a, T> {}
+
/// An iterator over the value in a `Ok` variant of a `Result`.
#[derive(Debug)]
#[stable(feature = "rust1", since = "1.0.0")]
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> ExactSizeIterator for IntoIter<T> {}
+#[unstable(feature = "fused", issue = "35602")]
+impl<T> FusedIterator for IntoIter<T> {}
+
/////////////////////////////////////////////////////////////////////////////
// FromIterator
/////////////////////////////////////////////////////////////////////////////
// * The `raw` and `bytes` submodules.
// * Boilerplate trait implementations.
-use clone::Clone;
-use cmp::{Ordering, PartialEq, PartialOrd, Eq, Ord};
-use cmp::Ordering::{Less, Equal, Greater};
+use cmp::Ordering::{self, Less, Equal, Greater};
use cmp;
-use convert::AsRef;
-use default::Default;
use fmt;
use intrinsics::assume;
use iter::*;
-use ops::{FnMut, self};
-use ops::RangeFull;
-use option::Option;
-use option::Option::{None, Some};
-use result::Result;
-use result::Result::{Ok, Err};
+use ops::{self, RangeFull};
use ptr;
use mem;
-use marker::{Copy, Send, Sync, self};
+use marker;
use iter_private::TrustedRandomAccess;
#[repr(C)]
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> ExactSizeIterator for Iter<'a, T> {}
+#[unstable(feature = "fused", issue = "35602")]
+impl<'a, T> FusedIterator for Iter<'a, T> {}
+
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> Clone for Iter<'a, T> {
fn clone(&self) -> Iter<'a, T> { Iter { ptr: self.ptr, end: self.end, _marker: self._marker } }
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> ExactSizeIterator for IterMut<'a, T> {}
+#[unstable(feature = "fused", issue = "35602")]
+impl<'a, T> FusedIterator for IterMut<'a, T> {}
+
/// An internal abstraction over the splitting iterators, so that
/// splitn, splitn_mut etc can be implemented once.
#[doc(hidden)]
}
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<'a, T, P> FusedIterator for Split<'a, T, P> where P: FnMut(&T) -> bool {}
+
/// An iterator over the subslices of the vector which are separated
/// by elements that match `pred`.
#[stable(feature = "rust1", since = "1.0.0")]
}
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<'a, T, P> FusedIterator for SplitMut<'a, T, P> where P: FnMut(&T) -> bool {}
+
/// An private iterator over subslices separated by elements that
/// match a predicate function, splitting at most a fixed number of
/// times.
self.inner.size_hint()
}
}
+
+ #[unstable(feature = "fused", issue = "35602")]
+ impl<'a, $elem, P> FusedIterator for $name<'a, $elem, P>
+ where P: FnMut(&T) -> bool {}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> ExactSizeIterator for Windows<'a, T> {}
+#[unstable(feature = "fused", issue = "35602")]
+impl<'a, T> FusedIterator for Windows<'a, T> {}
+
/// An iterator over a slice in (non-overlapping) chunks (`size` elements at a
/// time).
///
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> ExactSizeIterator for Chunks<'a, T> {}
+#[unstable(feature = "fused", issue = "35602")]
+impl<'a, T> FusedIterator for Chunks<'a, T> {}
+
/// An iterator over a slice in (non-overlapping) mutable chunks (`size`
/// elements at a time). When the slice len is not evenly divided by the chunk
/// size, the last slice of the iteration will be the remainder.
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> ExactSizeIterator for ChunksMut<'a, T> {}
+#[unstable(feature = "fused", issue = "35602")]
+impl<'a, T> FusedIterator for ChunksMut<'a, T> {}
+
//
// Free functions
//
use self::pattern::{Searcher, ReverseSearcher, DoubleEndedSearcher};
use char;
-use clone::Clone;
-use convert::AsRef;
-use default::Default;
use fmt;
-use iter::ExactSizeIterator;
-use iter::{Map, Cloned, Iterator, DoubleEndedIterator};
-use marker::Sized;
+use iter::{Map, Cloned, FusedIterator};
use mem;
-use ops::{Fn, FnMut, FnOnce};
-use option::Option::{self, None, Some};
-use result::Result::{self, Ok, Err};
-use slice::{self, SliceExt};
+use slice;
pub mod pattern;
/// Reads the last code point out of a byte iterator (assuming a
/// UTF-8-like encoding).
#[inline]
-fn next_code_point_reverse<'a,
- I: DoubleEndedIterator<Item = &'a u8>>(bytes: &mut I) -> Option<u32> {
+fn next_code_point_reverse<'a, I>(bytes: &mut I) -> Option<u32>
+ where I: DoubleEndedIterator<Item = &'a u8>,
+{
// Decode UTF-8
let w = match bytes.next_back() {
None => return None,
}
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<'a> FusedIterator for Chars<'a> {}
+
impl<'a> Chars<'a> {
/// View the underlying data as a subslice of the original data.
///
}
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<'a> FusedIterator for CharIndices<'a> {}
+
impl<'a> CharIndices<'a> {
/// View the underlying data as a subslice of the original data.
///
}
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<'a> FusedIterator for Bytes<'a> {}
+
/// This macro generates a Clone impl for string pattern API
/// wrapper types of the form X<'a, P>
macro_rules! derive_pattern_clone {
}
}
+ #[unstable(feature = "fused", issue = "35602")]
+ impl<'a, P: Pattern<'a>> FusedIterator for $forward_iterator<'a, P> {}
+
+ #[unstable(feature = "fused", issue = "35602")]
+ impl<'a, P: Pattern<'a>> FusedIterator for $reverse_iterator<'a, P>
+ where P::Searcher: ReverseSearcher<'a> {}
+
generate_pattern_iterators!($($t)* with $(#[$common_stability_attribute])*,
$forward_iterator,
$reverse_iterator, $iterty);
}
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<'a> FusedIterator for Lines<'a> {}
+
/// Created with the method [`lines_any()`].
///
/// [`lines_any()`]: ../../std/primitive.str.html#method.lines_any
}
}
+#[unstable(feature = "fused", issue = "35602")]
+#[allow(deprecated)]
+impl<'a> FusedIterator for LinesAny<'a> {}
+
/*
Section: Comparing strings
*/
*/
mod traits {
- use cmp::{Ord, Ordering, PartialEq, PartialOrd, Eq};
- use option::Option;
- use option::Option::Some;
+ use cmp::Ordering;
use ops;
- use str::{StrExt, eq_slice};
+ use str::eq_slice;
#[stable(feature = "rust1", since = "1.0.0")]
impl Ord for str {
reason = "API not fully fleshed out and ready to be stabilized",
issue = "27721")]
-use prelude::v1::*;
-
use cmp;
use fmt;
use usize;
use self::Ordering::*;
-use marker::{Send, Sync};
-
use intrinsics;
use cell::UnsafeCell;
-
-use result::Result::{self, Ok, Err};
-
-use default::Default;
use fmt;
/// A boolean type which can be safely shared between threads.
// See src/libstd/primitive_docs.rs for documentation.
-use clone::Clone;
use cmp::*;
use cmp::Ordering::*;
-use default::Default;
-use option::Option;
-use option::Option::Some;
// FIXME(#19630) Remove this work-around
macro_rules! e {
#[test]
fn test_decode_utf8() {
- use core::char::*;
- use core::iter::FromIterator;
-
- for &(str, bs) in [("", &[] as &[u8]),
- ("A", &[0x41u8] as &[u8]),
- ("�", &[0xC1u8, 0x81u8] as &[u8]),
- ("♥", &[0xE2u8, 0x99u8, 0xA5u8]),
- ("♥A", &[0xE2u8, 0x99u8, 0xA5u8, 0x41u8] as &[u8]),
- ("�", &[0xE2u8, 0x99u8] as &[u8]),
- ("�A", &[0xE2u8, 0x99u8, 0x41u8] as &[u8]),
- ("�", &[0xC0u8] as &[u8]),
- ("�A", &[0xC0u8, 0x41u8] as &[u8]),
- ("�", &[0x80u8] as &[u8]),
- ("�A", &[0x80u8, 0x41u8] as &[u8]),
- ("�", &[0xFEu8] as &[u8]),
- ("�A", &[0xFEu8, 0x41u8] as &[u8]),
- ("�", &[0xFFu8] as &[u8]),
- ("�A", &[0xFFu8, 0x41u8] as &[u8])].into_iter() {
- assert!(Iterator::eq(str.chars(),
- decode_utf8(bs.into_iter().map(|&b|b))
- .map(|r_b| r_b.unwrap_or('\u{FFFD}'))),
- "chars = {}, bytes = {:?}, decoded = {:?}", str, bs,
- Vec::from_iter(decode_utf8(bs.into_iter().map(|&b|b))
- .map(|r_b| r_b.unwrap_or('\u{FFFD}'))));
+ macro_rules! assert_decode_utf8 {
+ ($input_bytes: expr, $expected_str: expr) => {
+ let input_bytes: &[u8] = &$input_bytes;
+ let s = char::decode_utf8(input_bytes.iter().cloned())
+ .map(|r_b| r_b.unwrap_or('\u{FFFD}'))
+ .collect::<String>();
+ assert_eq!(s, $expected_str,
+ "input bytes: {:?}, expected str: {:?}, result: {:?}",
+ input_bytes, $expected_str, s);
+ assert_eq!(String::from_utf8_lossy(&$input_bytes), $expected_str);
+ }
}
+
+ assert_decode_utf8!([], "");
+ assert_decode_utf8!([0x41], "A");
+ assert_decode_utf8!([0xC1, 0x81], "��");
+ assert_decode_utf8!([0xE2, 0x99, 0xA5], "♥");
+ assert_decode_utf8!([0xE2, 0x99, 0xA5, 0x41], "♥A");
+ assert_decode_utf8!([0xE2, 0x99], "�");
+ assert_decode_utf8!([0xE2, 0x99, 0x41], "�A");
+ assert_decode_utf8!([0xC0], "�");
+ assert_decode_utf8!([0xC0, 0x41], "�A");
+ assert_decode_utf8!([0x80], "�");
+ assert_decode_utf8!([0x80, 0x41], "�A");
+ assert_decode_utf8!([0xFE], "�");
+ assert_decode_utf8!([0xFE, 0x41], "�A");
+ assert_decode_utf8!([0xFF], "�");
+ assert_decode_utf8!([0xFF, 0x41], "�A");
+ assert_decode_utf8!([0xC0, 0x80], "��");
+
+ // Surrogates
+ assert_decode_utf8!([0xED, 0x9F, 0xBF], "\u{D7FF}");
+ assert_decode_utf8!([0xED, 0xA0, 0x80], "���");
+ assert_decode_utf8!([0xED, 0xBF, 0x80], "���");
+ assert_decode_utf8!([0xEE, 0x80, 0x80], "\u{E000}");
+
+ // char::MAX
+ assert_decode_utf8!([0xF4, 0x8F, 0xBF, 0xBF], "\u{10FFFF}");
+ assert_decode_utf8!([0xF4, 0x8F, 0xBF, 0x41], "�A");
+ assert_decode_utf8!([0xF4, 0x90, 0x80, 0x80], "����");
+
+ // 5 and 6 bytes sequence
+ // Part of the original design of UTF-8,
+ // but invalid now that UTF-8 is artificially restricted to match the range of UTF-16.
+ assert_decode_utf8!([0xF8, 0x80, 0x80, 0x80, 0x80], "�����");
+ assert_decode_utf8!([0xFC, 0x80, 0x80, 0x80, 0x80, 0x80], "������");
}
eh::find_eh_action(lsda, &eh_context)
}
-// *** Delete after a new snapshot ***
-#[cfg(all(stage0, any(target_os = "ios", not(target_arch = "arm"))))]
-#[lang = "eh_personality_catch"]
-#[no_mangle]
-pub unsafe extern "C" fn rust_eh_personality_catch(version: c_int,
- actions: uw::_Unwind_Action,
- exception_class: uw::_Unwind_Exception_Class,
- ue_header: *mut uw::_Unwind_Exception,
- context: *mut uw::_Unwind_Context)
- -> uw::_Unwind_Reason_Code {
- rust_eh_personality(version, actions, exception_class, ue_header, context)
-}
-
-// *** Delete after a new snapshot ***
-#[cfg(all(stage0, target_arch = "arm", not(target_os = "ios")))]
-#[lang = "eh_personality_catch"]
-#[no_mangle]
-pub unsafe extern "C" fn rust_eh_personality_catch(state: uw::_Unwind_State,
- ue_header: *mut uw::_Unwind_Exception,
- context: *mut uw::_Unwind_Context)
- -> uw::_Unwind_Reason_Code {
- rust_eh_personality(state, ue_header, context)
-}
-
// See docs in the `unwind` module.
#[cfg(all(target_os="windows", target_arch = "x86", target_env="gnu"))]
#[lang = "eh_unwind_resume"]
// This is considered acceptable, because the behavior of throwing exceptions
// through a C ABI boundary is undefined.
-// *** Delete after a new snapshot ***
-#[cfg(stage0)]
-#[lang = "eh_personality_catch"]
-#[cfg(not(test))]
-unsafe extern "C" fn rust_eh_personality_catch(exceptionRecord: *mut c::EXCEPTION_RECORD,
- establisherFrame: c::LPVOID,
- contextRecord: *mut c::CONTEXT,
- dispatcherContext: *mut c::DISPATCHER_CONTEXT)
- -> c::EXCEPTION_DISPOSITION {
- rust_eh_personality(exceptionRecord,
- establisherFrame,
- contextRecord,
- dispatcherContext)
-}
-
#[lang = "eh_personality"]
#[cfg(not(test))]
unsafe extern "C" fn rust_eh_personality(exceptionRecord: *mut c::EXCEPTION_RECORD,
self.start == self.end
}
- pub fn as_str_slice(&self) -> &'doc str {
+ pub fn as_str(&self) -> &'doc str {
str::from_utf8(&self.data[self.start..self.end]).unwrap()
}
- pub fn as_str(&self) -> String {
- self.as_str_slice().to_string()
+ pub fn to_string(&self) -> String {
+ self.as_str().to_string()
}
}
Ok(char::from_u32(doc_as_u32(self.next_doc(EsChar)?)).unwrap())
}
fn read_str(&mut self) -> DecodeResult<String> {
- Ok(self.next_doc(EsStr)?.as_str())
+ Ok(self.next_doc(EsStr)?.to_string())
}
// Compound types:
}
}
+ if label == "Krate" {
+ // special case
+ return Ok(DepNode::Krate);
+ }
+
check! {
CollectItem,
BorrowCheck,
```
"##,
+E0478: r##"
+A lifetime bound was not satisfied.
+
+Erroneous code example:
+
+```compile_fail,E0478
+// Check that the explicit lifetime bound (`'SnowWhite`, in this example) must
+// outlive all the superbounds from the trait (`'kiss`, in this example).
+
+trait Wedding<'t>: 't { }
+
+struct Prince<'kiss, 'SnowWhite> {
+ child: Box<Wedding<'kiss> + 'SnowWhite>,
+ // error: lifetime bound not satisfied
+}
+```
+
+In this example, the `'SnowWhite` lifetime is supposed to outlive the `'kiss`
+lifetime but the declaration of the `Prince` struct doesn't enforce it. To fix
+this issue, you need to specify it:
+
+```
+trait Wedding<'t>: 't { }
+
+struct Prince<'kiss, 'SnowWhite: 'kiss> { // You say here that 'kiss must live
+ // longer than 'SnowWhite.
+ child: Box<Wedding<'kiss> + 'SnowWhite>, // And now it's all good!
+}
+```
+"##,
+
E0496: r##"
A lifetime name is shadowing another lifetime name. Erroneous code example:
```
"##,
+E0525: r##"
+A closure was attempted to get used whereas it doesn't implement the expected
+trait.
+
+Erroneous code example:
+
+```compile_fail,E0525
+struct X;
+
+fn foo<T>(_: T) {}
+fn bar<T: Fn(u32)>(_: T) {}
+
+fn main() {
+ let x = X;
+ let closure = |_| foo(x); // error: expected a closure that implements
+ // the `Fn` trait, but this closure only
+ // implements `FnOnce`
+ bar(closure);
+}
+```
+
+In the example above, `closure` is an `FnOnce` closure whereas the `bar`
+function expected an `Fn` closure. In this case, it's simple to fix the issue,
+you just have to implement `Copy` and `Clone` traits on `struct X` and it'll
+be ok:
+
+```
+#[derive(Clone, Copy)] // We implement `Clone` and `Copy` traits.
+struct X;
+
+fn foo<T>(_: T) {}
+fn bar<T: Fn(u32)>(_: T) {}
+
+fn main() {
+ let x = X;
+ let closure = |_| foo(x);
+ bar(closure); // ok!
+}
+```
+
+To understand better how closures work in Rust, read:
+https://doc.rust-lang.org/book/closures.html
+"##,
+
}
E0475, // index of slice outside its lifetime
E0476, // lifetime of the source pointer does not outlive lifetime bound...
E0477, // the type `..` does not fulfill the required lifetime...
- E0478, // lifetime bound not satisfied
E0479, // the type `..` (provided as the value of a type parameter) is...
E0480, // lifetime of method receiver does not outlive the method call
E0481, // lifetime of function argument does not outlive the function call
E0490, // a value of type `..` is borrowed for too long
E0491, // in type `..`, reference has a longer lifetime than the data it...
E0495, // cannot infer an appropriate lifetime due to conflicting requirements
- E0525 // expected a closure that implements `..` but this closure only implements `..`
}
let inplace_finalize = ["ops", "InPlace", "finalize"];
let make_call = |this: &mut LoweringContext, p, args| {
- let path = this.core_path(e.span, p);
+ let path = this.std_path(e.span, p);
let path = this.expr_path(path, ThinVec::new());
this.expr_call(e.span, path, args)
};
ast_expr: &Expr,
path: &[&str],
fields: &[(&str, &P<Expr>)]) -> P<hir::Expr> {
- let strs = this.std_path(&iter::once(&"ops")
- .chain(path)
- .map(|s| *s)
- .collect::<Vec<_>>());
-
- let structpath = this.path_global(ast_expr.span, strs);
+ let struct_path = this.std_path(ast_expr.span,
+ &iter::once(&"ops").chain(path)
+ .map(|s| *s)
+ .collect::<Vec<_>>());
let hir_expr = if fields.len() == 0 {
- this.expr_path(structpath, ast_expr.attrs.clone())
+ this.expr_path(struct_path, ast_expr.attrs.clone())
} else {
let fields = fields.into_iter().map(|&(s, e)| {
let expr = this.lower_expr(&e);
}).collect();
let attrs = ast_expr.attrs.clone();
- this.expr_struct(ast_expr.span, structpath, fields, None, attrs)
+ this.expr_struct(ast_expr.span, struct_path, fields, None, attrs)
};
this.signal_block_expr(hir_vec![],
// `match ::std::iter::Iterator::next(&mut iter) { ... }`
let match_expr = {
- let next_path = {
- let strs = self.std_path(&["iter", "Iterator", "next"]);
-
- self.path_global(e.span, strs)
- };
+ let next_path = self.std_path(e.span, &["iter", "Iterator", "next"]);
let iter = self.expr_ident(e.span, iter, iter_pat.id);
let ref_mut_iter = self.expr_mut_addr_of(e.span, iter);
let next_path = self.expr_path(next_path, ThinVec::new());
// `match ::std::iter::IntoIterator::into_iter(<head>) { ... }`
let into_iter_expr = {
- let into_iter_path = {
- let strs = self.std_path(&["iter", "IntoIterator", "into_iter"]);
-
- self.path_global(e.span, strs)
- };
+ let into_iter_path = self.std_path(e.span,
+ &["iter", "IntoIterator", "into_iter"]);
let into_iter = self.expr_path(into_iter_path, ThinVec::new());
self.expr_call(e.span, into_iter, hir_vec![head])
// to:
//
// {
- // match <expr> {
+ // match { Carrier::translate( { <expr> } ) } {
// Ok(val) => val,
- // Err(err) => {
- // return Err(From::from(err))
- // }
+ // Err(err) => { return Carrier::from_error(From::from(err)); }
// }
// }
- // expand <expr>
- let sub_expr = self.lower_expr(sub_expr);
+ // { Carrier::translate( { <expr> } ) }
+ let discr = {
+ // expand <expr>
+ let sub_expr = self.lower_expr(sub_expr);
+ let sub_expr = self.signal_block_expr(hir_vec![],
+ sub_expr,
+ e.span,
+ hir::PopUnstableBlock,
+ ThinVec::new());
+
+ let path = self.std_path(e.span, &["ops", "Carrier", "translate"]);
+ let path = self.expr_path(path, ThinVec::new());
+ let call = self.expr_call(e.span, path, hir_vec![sub_expr]);
+
+ self.signal_block_expr(hir_vec![],
+ call,
+ e.span,
+ hir::PushUnstableBlock,
+ ThinVec::new())
+ };
// Ok(val) => val
let ok_arm = {
self.arm(hir_vec![ok_pat], val_expr)
};
- // Err(err) => return Err(From::from(err))
+ // Err(err) => { return Carrier::from_error(From::from(err)); }
let err_arm = {
let err_ident = self.str_to_ident("err");
let err_local = self.pat_ident(e.span, err_ident);
let from_expr = {
- let path = self.std_path(&["convert", "From", "from"]);
- let path = self.path_global(e.span, path);
+ let path = self.std_path(e.span, &["convert", "From", "from"]);
let from = self.expr_path(path, ThinVec::new());
let err_expr = self.expr_ident(e.span, err_ident, err_local.id);
self.expr_call(e.span, from, hir_vec![err_expr])
};
- let err_expr = {
- let path = self.std_path(&["result", "Result", "Err"]);
- let path = self.path_global(e.span, path);
- let err_ctor = self.expr_path(path, ThinVec::new());
- self.expr_call(e.span, err_ctor, hir_vec![from_expr])
+ let from_err_expr = {
+ let path = self.std_path(e.span, &["ops", "Carrier", "from_error"]);
+ let from_err = self.expr_path(path, ThinVec::new());
+ self.expr_call(e.span, from_err, hir_vec![from_expr])
};
- let err_pat = self.pat_err(e.span, err_local);
+
let ret_expr = self.expr(e.span,
- hir::Expr_::ExprRet(Some(err_expr)),
- ThinVec::new());
- self.arm(hir_vec![err_pat], ret_expr)
+ hir::Expr_::ExprRet(Some(from_err_expr)),
+ ThinVec::new());
+ let ret_stmt = self.stmt_expr(ret_expr);
+ let block = self.signal_block_stmt(ret_stmt, e.span,
+ hir::PushUnstableBlock, ThinVec::new());
+
+ let err_pat = self.pat_err(e.span, err_local);
+ self.arm(hir_vec![err_pat], block)
};
- return self.expr_match(e.span, sub_expr, hir_vec![err_arm, ok_arm],
+ return self.expr_match(e.span, discr, hir_vec![err_arm, ok_arm],
hir::MatchSource::TryDesugar);
}
(respan(sp, hir::StmtDecl(P(decl), self.next_id())), pat_id)
}
+ // Turns `<expr>` into `<expr>;`, note that this produces a StmtSemi, not a
+ // StmtExpr.
+ fn stmt_expr(&self, expr: P<hir::Expr>) -> hir::Stmt {
+ hir::Stmt {
+ span: expr.span,
+ node: hir::StmtSemi(expr, self.next_id()),
+ }
+ }
+
fn block_expr(&mut self, expr: P<hir::Expr>) -> P<hir::Block> {
self.block_all(expr.span, hir::HirVec::new(), Some(expr))
}
}
fn pat_ok(&mut self, span: Span, pat: P<hir::Pat>) -> P<hir::Pat> {
- let ok = self.std_path(&["result", "Result", "Ok"]);
- let path = self.path_global(span, ok);
+ let path = self.std_path(span, &["result", "Result", "Ok"]);
self.pat_enum(span, path, hir_vec![pat])
}
fn pat_err(&mut self, span: Span, pat: P<hir::Pat>) -> P<hir::Pat> {
- let err = self.std_path(&["result", "Result", "Err"]);
- let path = self.path_global(span, err);
+ let path = self.std_path(span, &["result", "Result", "Err"]);
self.pat_enum(span, path, hir_vec![pat])
}
fn pat_some(&mut self, span: Span, pat: P<hir::Pat>) -> P<hir::Pat> {
- let some = self.std_path(&["option", "Option", "Some"]);
- let path = self.path_global(span, some);
+ let path = self.std_path(span, &["option", "Option", "Some"]);
self.pat_enum(span, path, hir_vec![pat])
}
fn pat_none(&mut self, span: Span) -> P<hir::Pat> {
- let none = self.std_path(&["option", "Option", "None"]);
- let path = self.path_global(span, none);
+ let path = self.std_path(span, &["option", "Option", "None"]);
self.pat_enum(span, path, hir_vec![])
}
}
}
- fn std_path(&mut self, components: &[&str]) -> Vec<Name> {
+ fn std_path_components(&mut self, components: &[&str]) -> Vec<Name> {
let mut v = Vec::new();
if let Some(s) = self.crate_root {
v.push(token::intern(s));
// Given suffix ["b","c","d"], returns path `::std::b::c::d` when
// `fld.cx.use_std`, and `::core::b::c::d` otherwise.
- fn core_path(&mut self, span: Span, components: &[&str]) -> hir::Path {
- let idents = self.std_path(components);
+ fn std_path(&mut self, span: Span, components: &[&str]) -> hir::Path {
+ let idents = self.std_path_components(components);
self.path_global(span, idents)
}
});
self.expr_block(block, attrs)
}
+
+ fn signal_block_stmt(&mut self,
+ stmt: hir::Stmt,
+ span: Span,
+ rule: hir::BlockCheckMode,
+ attrs: ThinVec<Attribute>)
+ -> P<hir::Expr> {
+ let id = self.next_id();
+ let block = P(hir::Block {
+ rules: rule,
+ span: span,
+ id: id,
+ stmts: hir_vec![stmt],
+ expr: None,
+ });
+ self.expr_block(block, attrs)
+ }
}
RootInlinedParent(parent) => match *parent {
InlinedItem::Item(def_id, _) |
InlinedItem::TraitItem(def_id, _) |
- InlinedItem::ImplItem(def_id, _) |
- InlinedItem::Foreign(def_id, _) =>
+ InlinedItem::ImplItem(def_id, _) =>
return DepNode::MetaData(def_id)
},
II::ImplItem(fld.fold_ops.new_def_id(d),
ii.map(|ii| fld.fold_impl_item(ii)))
}
- II::Foreign(d, i) => II::Foreign(fld.fold_ops.new_def_id(d),
- i.map(|i| fld.fold_foreign_item(i)))
};
let ii = map.forest.inlined_items.alloc(ii);
pub type CaptureModeMap = NodeMap<CaptureClause>;
-#[derive(Clone)]
+#[derive(Clone, Debug)]
pub struct TraitCandidate {
pub def_id: DefId,
pub import_id: Option<NodeId>,
}
}
- fn regions(&mut self, a: ty::Region, _: ty::Region) -> RelateResult<'tcx, ty::Region> {
+ fn regions(&mut self, a: &'tcx ty::Region, _: &'tcx ty::Region)
+ -> RelateResult<'tcx, &'tcx ty::Region> {
Ok(a)
}
}
}
- fn fold_region(&mut self, r: ty::Region) -> ty::Region {
- match r {
+ fn fold_region(&mut self, r: &'tcx ty::Region) -> &'tcx ty::Region {
+ match *r {
// Never make variables for regions bound within the type itself,
// nor for erased regions.
ty::ReLateBound(..) |
}
}
- fn regions(&mut self, a: ty::Region, b: ty::Region) -> RelateResult<'tcx, ty::Region> {
+ fn regions(&mut self, a: &'tcx ty::Region, b: &'tcx ty::Region)
+ -> RelateResult<'tcx, &'tcx ty::Region> {
debug!("{}.regions({:?}, {:?})",
self.tag(),
a,
pub fn note_and_explain_region(self,
err: &mut DiagnosticBuilder,
prefix: &str,
- region: ty::Region,
+ region: &'tcx ty::Region,
suffix: &str) {
fn item_scope_tag(item: &hir::Item) -> &'static str {
match item.node {
Some(span))
}
- let (description, span) = match region {
+ let (description, span) = match *region {
ty::ReScope(scope) => {
let new_string;
let unknown_scope = || {
}
fn free_regions_from_same_fn<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
- sub: Region,
- sup: Region)
+ sub: &'tcx Region,
+ sup: &'tcx Region)
-> Option<FreeRegionsFromSameFn> {
debug!("free_regions_from_same_fn(sub={:?}, sup={:?})", sub, sup);
let (scope_id, fr1, fr2) = match (sub, sup) {
- (ReFree(fr1), ReFree(fr2)) => {
+ (&ReFree(fr1), &ReFree(fr2)) => {
if fr1.scope != fr2.scope {
return None
}
fn report_generic_bound_failure(&self,
origin: SubregionOrigin<'tcx>,
bound_kind: GenericKind<'tcx>,
- sub: Region)
+ sub: &'tcx Region)
{
// FIXME: it would be better to report the first error message
// with the span of the parameter itself, rather than the span
format!("the associated type `{}`", p),
};
- let mut err = match sub {
+ let mut err = match *sub {
ty::ReFree(ty::FreeRegion {bound_region: ty::BrNamed(..), ..}) => {
// Does the required lifetime have a nice name we can print?
let mut err = struct_span_err!(self.tcx.sess,
fn report_concrete_failure(&self,
origin: SubregionOrigin<'tcx>,
- sub: Region,
- sup: Region)
+ sub: &'tcx Region,
+ sup: &'tcx Region)
-> DiagnosticBuilder<'tcx> {
match origin {
infer::Subtype(trace) => {
fn report_sub_sup_conflict(&self,
var_origin: RegionVariableOrigin,
sub_origin: SubregionOrigin<'tcx>,
- sub_region: Region,
+ sub_region: &'tcx Region,
sup_origin: SubregionOrigin<'tcx>,
- sup_region: Region) {
+ sup_region: &'tcx Region) {
let mut err = self.report_inference_failure(var_origin);
self.tcx.note_and_explain_region(&mut err,
use ty::{self, Ty, TyCtxt, TypeFoldable};
use ty::fold::TypeFolder;
-use std::collections::hash_map::{self, Entry};
+use util::nodemap::FnvHashMap;
+use std::collections::hash_map::Entry;
use super::InferCtxt;
use super::unify_key::ToType;
pub struct TypeFreshener<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
infcx: &'a InferCtxt<'a, 'gcx, 'tcx>,
freshen_count: u32,
- freshen_map: hash_map::HashMap<ty::InferTy, Ty<'tcx>>,
+ freshen_map: FnvHashMap<ty::InferTy, Ty<'tcx>>,
}
impl<'a, 'gcx, 'tcx> TypeFreshener<'a, 'gcx, 'tcx> {
TypeFreshener {
infcx: infcx,
freshen_count: 0,
- freshen_map: hash_map::HashMap::new(),
+ freshen_map: FnvHashMap(),
}
}
self.infcx.tcx
}
- fn fold_region(&mut self, r: ty::Region) -> ty::Region {
- match r {
+ fn fold_region(&mut self, r: &'tcx ty::Region) -> &'tcx ty::Region {
+ match *r {
ty::ReEarlyBound(..) |
ty::ReLateBound(..) => {
// leave bound regions alone
ty::ReEmpty |
ty::ReErased => {
// replace all free regions with 'erased
- ty::ReErased
+ self.tcx().mk_region(ty::ReErased)
}
}
}
lattice::super_lattice_tys(self, a, b)
}
- fn regions(&mut self, a: ty::Region, b: ty::Region) -> RelateResult<'tcx, ty::Region> {
+ fn regions(&mut self, a: &'tcx ty::Region, b: &'tcx ty::Region)
+ -> RelateResult<'tcx, &'tcx ty::Region> {
debug!("{}.regions({:?}, {:?})",
self.tag(),
a,
.map(|(&skol, &(br, ref regions))| {
let representative =
regions.iter()
- .filter(|r| !skol_resolution_map.contains_key(r))
+ .filter(|&&r| !skol_resolution_map.contains_key(r))
.cloned()
.next()
.unwrap_or_else(|| { // [1]
snapshot: &CombinedSnapshot,
debruijn: ty::DebruijnIndex,
new_vars: &[ty::RegionVid],
- a_map: &FnvHashMap<ty::BoundRegion, ty::Region>,
- r0: ty::Region)
- -> ty::Region {
+ a_map: &FnvHashMap<ty::BoundRegion, &'tcx ty::Region>,
+ r0: &'tcx ty::Region)
+ -> &'tcx ty::Region {
// Regions that pre-dated the LUB computation stay as they are.
if !is_var_in_set(new_vars, r0) {
assert!(!r0.is_bound());
debug!("generalize_region(r0={:?}): \
replacing with {:?}, tainted={:?}",
r0, *a_br, tainted);
- return ty::ReLateBound(debruijn, *a_br);
+ return infcx.tcx.mk_region(ty::ReLateBound(debruijn, *a_br));
}
}
snapshot: &CombinedSnapshot,
debruijn: ty::DebruijnIndex,
new_vars: &[ty::RegionVid],
- a_map: &FnvHashMap<ty::BoundRegion, ty::Region>,
+ a_map: &FnvHashMap<ty::BoundRegion,
+ &'tcx ty::Region>,
a_vars: &[ty::RegionVid],
b_vars: &[ty::RegionVid],
- r0: ty::Region) -> ty::Region {
+ r0: &'tcx ty::Region)
+ -> &'tcx ty::Region {
if !is_var_in_set(new_vars, r0) {
assert!(!r0.is_bound());
return r0;
if a_r.is_some() && b_r.is_some() && only_new_vars {
// Related to exactly one bound variable from each fn:
- return rev_lookup(span, a_map, a_r.unwrap());
+ return rev_lookup(infcx, span, a_map, a_r.unwrap());
} else if a_r.is_none() && b_r.is_none() {
// Not related to bound variables from either fn:
assert!(!r0.is_bound());
}
}
- fn rev_lookup(span: Span,
- a_map: &FnvHashMap<ty::BoundRegion, ty::Region>,
- r: ty::Region) -> ty::Region
+ fn rev_lookup<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>,
+ span: Span,
+ a_map: &FnvHashMap<ty::BoundRegion, &'tcx ty::Region>,
+ r: &'tcx ty::Region) -> &'tcx ty::Region
{
for (a_br, a_r) in a_map {
if *a_r == r {
- return ty::ReLateBound(ty::DebruijnIndex::new(1), *a_br);
+ return infcx.tcx.mk_region(ty::ReLateBound(ty::DebruijnIndex::new(1), *a_br));
}
}
span_bug!(
r);
}
- fn fresh_bound_variable(infcx: &InferCtxt, debruijn: ty::DebruijnIndex) -> ty::Region {
+ fn fresh_bound_variable<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>,
+ debruijn: ty::DebruijnIndex)
+ -> &'tcx ty::Region {
infcx.region_vars.new_bound(debruijn)
}
}
}
fn var_ids<'a, 'gcx, 'tcx>(fields: &CombineFields<'a, 'gcx, 'tcx>,
- map: &FnvHashMap<ty::BoundRegion, ty::Region>)
+ map: &FnvHashMap<ty::BoundRegion, &'tcx ty::Region>)
-> Vec<ty::RegionVid> {
map.iter()
- .map(|(_, r)| match *r {
+ .map(|(_, &r)| match *r {
ty::ReVar(r) => { r }
- r => {
+ _ => {
span_bug!(
fields.trace.origin.span(),
"found non-region-vid: {:?}",
.collect()
}
-fn is_var_in_set(new_vars: &[ty::RegionVid], r: ty::Region) -> bool {
- match r {
+fn is_var_in_set(new_vars: &[ty::RegionVid], r: &ty::Region) -> bool {
+ match *r {
ty::ReVar(ref v) => new_vars.iter().any(|x| x == v),
_ => false
}
mut fldr: F)
-> T
where T: TypeFoldable<'tcx>,
- F: FnMut(ty::Region, ty::DebruijnIndex) -> ty::Region,
+ F: FnMut(&'tcx ty::Region, ty::DebruijnIndex) -> &'tcx ty::Region,
{
tcx.fold_regions(unbound_value, &mut false, |region, current_depth| {
// we should only be encountering "escaping" late-bound regions here,
// because the ones at the current level should have been replaced
// with fresh variables
- assert!(match region {
+ assert!(match *region {
ty::ReLateBound(..) => false,
_ => true
});
impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> {
fn tainted_regions(&self,
snapshot: &CombinedSnapshot,
- r: ty::Region,
+ r: &'tcx ty::Region,
directions: TaintDirections)
- -> FnvHashSet<ty::Region> {
+ -> FnvHashSet<&'tcx ty::Region> {
self.region_vars.tainted(&snapshot.region_vars_snapshot, r, directions)
}
pub fn skolemize_late_bound_regions<T>(&self,
binder: &ty::Binder<T>,
snapshot: &CombinedSnapshot)
- -> (T, SkolemizationMap)
+ -> (T, SkolemizationMap<'tcx>)
where T : TypeFoldable<'tcx>
{
let (result, map) = self.tcx.replace_late_bound_regions(binder, |br| {
pub fn leak_check(&self,
overly_polymorphic: bool,
span: Span,
- skol_map: &SkolemizationMap,
+ skol_map: &SkolemizationMap<'tcx>,
snapshot: &CombinedSnapshot)
-> RelateResult<'tcx, ()>
{
for &tainted_region in &incoming_taints {
// Each skolemized should only be relatable to itself
// or new variables:
- match tainted_region {
+ match *tainted_region {
ty::ReVar(vid) => {
if new_vars.contains(&vid) {
warnings.extend(
/// to the depth of the predicate, in this case 1, so that the final
/// predicate is `for<'a> &'a int : Clone`.
pub fn plug_leaks<T>(&self,
- skol_map: SkolemizationMap,
+ skol_map: SkolemizationMap<'tcx>,
snapshot: &CombinedSnapshot,
value: &T) -> T
where T : TypeFoldable<'tcx>
// region back to the `ty::BoundRegion` that it originally
// represented. Because `leak_check` passed, we know that
// these taint sets are mutually disjoint.
- let inv_skol_map: FnvHashMap<ty::Region, ty::BoundRegion> =
+ let inv_skol_map: FnvHashMap<&'tcx ty::Region, ty::BoundRegion> =
skol_map
.iter()
.flat_map(|(&skol_br, &skol)| {
// (which ought not to escape the snapshot, but we
// don't check that) or itself
assert!(
- match r {
+ match *r {
ty::ReVar(_) => true,
ty::ReSkolemized(_, ref br1) => br == br1,
_ => false,
"leak-check would have us replace {:?} with {:?}",
r, br);
- ty::ReLateBound(ty::DebruijnIndex::new(current_depth - 1), br.clone())
+ self.tcx.mk_region(ty::ReLateBound(
+ ty::DebruijnIndex::new(current_depth - 1), br.clone()))
}
}
});
///
/// Note: popping also occurs implicitly as part of `leak_check`.
pub fn pop_skolemized(&self,
- skol_map: SkolemizationMap,
+ skol_map: SkolemizationMap<'tcx>,
snapshot: &CombinedSnapshot)
{
debug!("pop_skolemized({:?})", skol_map);
lattice::super_lattice_tys(self, a, b)
}
- fn regions(&mut self, a: ty::Region, b: ty::Region) -> RelateResult<'tcx, ty::Region> {
+ fn regions(&mut self, a: &'tcx ty::Region, b: &'tcx ty::Region)
+ -> RelateResult<'tcx, &'tcx ty::Region> {
debug!("{}.regions({:?}, {:?})",
self.tag(),
a,
/// A map returned by `skolemize_late_bound_regions()` indicating the skolemized
/// region that each late-bound region was replaced with.
-pub type SkolemizationMap = FnvHashMap<ty::BoundRegion, ty::Region>;
+pub type SkolemizationMap<'tcx> = FnvHashMap<ty::BoundRegion, &'tcx ty::Region>;
/// Why did we require that the two types be related?
///
pub fn sub_regions(&self,
origin: SubregionOrigin<'tcx>,
- a: ty::Region,
- b: ty::Region) {
+ a: &'tcx ty::Region,
+ b: &'tcx ty::Region) {
debug!("sub_regions({:?} <: {:?})", a, b);
self.region_vars.make_subregion(origin, a, b);
}
pub fn region_outlives_predicate(&self,
span: Span,
- predicate: &ty::PolyRegionOutlivesPredicate)
+ predicate: &ty::PolyRegionOutlivesPredicate<'tcx>)
-> UnitResult<'tcx>
{
self.commit_if_ok(|snapshot| {
.new_key(None)
}
- pub fn next_region_var(&self, origin: RegionVariableOrigin) -> ty::Region {
- ty::ReVar(self.region_vars.new_region_var(origin))
+ pub fn next_region_var(&self, origin: RegionVariableOrigin)
+ -> &'tcx ty::Region {
+ self.tcx.mk_region(ty::ReVar(self.region_vars.new_region_var(origin)))
}
/// Create a region inference variable for the given
pub fn region_var_for_def(&self,
span: Span,
def: &ty::RegionParameterDef)
- -> ty::Region {
+ -> &'tcx ty::Region {
self.next_region_var(EarlyBoundRegion(span, def.name))
}
})
}
- pub fn fresh_bound_region(&self, debruijn: ty::DebruijnIndex) -> ty::Region {
+ pub fn fresh_bound_region(&self, debruijn: ty::DebruijnIndex) -> &'tcx ty::Region {
self.region_vars.new_bound(debruijn)
}
span: Span,
lbrct: LateBoundRegionConversionTime,
value: &ty::Binder<T>)
- -> (T, FnvHashMap<ty::BoundRegion,ty::Region>)
+ -> (T, FnvHashMap<ty::BoundRegion, &'tcx ty::Region>)
where T : TypeFoldable<'tcx>
{
self.tcx.replace_late_bound_regions(
pub fn verify_generic_bound(&self,
origin: SubregionOrigin<'tcx>,
kind: GenericKind<'tcx>,
- a: ty::Region,
- bound: VerifyBound) {
+ a: &'tcx ty::Region,
+ bound: VerifyBound<'tcx>) {
debug!("verify_generic_bound({:?}, {:?} <: {:?})",
kind,
a,
self.tcx.region_maps.temporary_scope(rvalue_id)
}
- pub fn upvar_capture(&self, upvar_id: ty::UpvarId) -> Option<ty::UpvarCapture> {
+ pub fn upvar_capture(&self, upvar_id: ty::UpvarId) -> Option<ty::UpvarCapture<'tcx>> {
self.tables.borrow().upvar_capture_map.get(&upvar_id).cloned()
}
struct ConstraintGraph<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
tcx: TyCtxt<'a, 'gcx, 'tcx>,
graph_name: String,
- map: &'a FnvHashMap<Constraint, SubregionOrigin<'tcx>>,
+ map: &'a FnvHashMap<Constraint<'tcx>, SubregionOrigin<'tcx>>,
node_ids: FnvHashMap<Node, usize>,
}
// type Edge = Constraint;
#[derive(Clone, PartialEq, Eq, Debug, Copy)]
-enum Edge {
- Constraint(Constraint),
+enum Edge<'tcx> {
+ Constraint(Constraint<'tcx>),
EnclScope(CodeExtent, CodeExtent),
}
impl<'a, 'gcx, 'tcx> dot::Labeller<'a> for ConstraintGraph<'a, 'gcx, 'tcx> {
type Node = Node;
- type Edge = Edge;
+ type Edge = Edge<'tcx>;
fn graph_id(&self) -> dot::Id {
dot::Id::new(&*self.graph_name).unwrap()
}
Constraint::ConstrainVarSubVar(rv_1, rv_2) =>
(Node::RegionVid(rv_1), Node::RegionVid(rv_2)),
Constraint::ConstrainRegSubVar(r_1, rv_2) =>
- (Node::Region(r_1), Node::RegionVid(rv_2)),
+ (Node::Region(*r_1), Node::RegionVid(rv_2)),
Constraint::ConstrainVarSubReg(rv_1, r_2) =>
- (Node::RegionVid(rv_1), Node::Region(r_2)),
+ (Node::RegionVid(rv_1), Node::Region(*r_2)),
Constraint::ConstrainRegSubReg(r_1, r_2) =>
- (Node::Region(r_1), Node::Region(r_2)),
+ (Node::Region(*r_1), Node::Region(*r_2)),
}
}
impl<'a, 'gcx, 'tcx> dot::GraphWalk<'a> for ConstraintGraph<'a, 'gcx, 'tcx> {
type Node = Node;
- type Edge = Edge;
+ type Edge = Edge<'tcx>;
fn nodes(&self) -> dot::Nodes<Node> {
let mut set = FnvHashSet();
for node in self.node_ids.keys() {
debug!("constraint graph has {} nodes", set.len());
set.into_iter().collect()
}
- fn edges(&self) -> dot::Edges<Edge> {
+ fn edges(&self) -> dot::Edges<Edge<'tcx>> {
debug!("constraint graph has {} edges", self.map.len());
let mut v: Vec<_> = self.map.keys().map(|e| Edge::Constraint(*e)).collect();
self.tcx.region_maps.each_encl_scope(|sub, sup| v.push(Edge::EnclScope(*sub, *sup)));
debug!("region graph has {} edges", v.len());
Cow::Owned(v)
}
- fn source(&self, edge: &Edge) -> Node {
+ fn source(&self, edge: &Edge<'tcx>) -> Node {
let (n1, _) = edge_to_nodes(edge);
debug!("edge {:?} has source {:?}", edge, n1);
n1
}
- fn target(&self, edge: &Edge) -> Node {
+ fn target(&self, edge: &Edge<'tcx>) -> Node {
let (_, n2) = edge_to_nodes(edge);
debug!("edge {:?} has target {:?}", edge, n2);
n2
}
}
-pub type ConstraintMap<'tcx> = FnvHashMap<Constraint, SubregionOrigin<'tcx>>;
+pub type ConstraintMap<'tcx> = FnvHashMap<Constraint<'tcx>, SubregionOrigin<'tcx>>;
fn dump_region_constraints_to<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
map: &ConstraintMap<'tcx>,
// A constraint that influences the inference process.
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
-pub enum Constraint {
+pub enum Constraint<'tcx> {
// One region variable is subregion of another
ConstrainVarSubVar(RegionVid, RegionVid),
// Concrete region is subregion of region variable
- ConstrainRegSubVar(Region, RegionVid),
+ ConstrainRegSubVar(&'tcx Region, RegionVid),
// Region variable is subregion of concrete region. This does not
// directly affect inference, but instead is checked after
// inference is complete.
- ConstrainVarSubReg(RegionVid, Region),
+ ConstrainVarSubReg(RegionVid, &'tcx Region),
// A constraint where neither side is a variable. This does not
// directly affect inference, but instead is checked after
// inference is complete.
- ConstrainRegSubReg(Region, Region),
+ ConstrainRegSubReg(&'tcx Region, &'tcx Region),
}
// VerifyGenericBound(T, _, R, RS): The parameter type `T` (or
pub struct Verify<'tcx> {
kind: GenericKind<'tcx>,
origin: SubregionOrigin<'tcx>,
- region: Region,
- bound: VerifyBound,
+ region: &'tcx Region,
+ bound: VerifyBound<'tcx>,
}
#[derive(Copy, Clone, PartialEq, Eq)]
// particular region (let's call it `'min`) meets some bound.
// The bound is described the by the following grammar:
#[derive(Debug)]
-pub enum VerifyBound {
+pub enum VerifyBound<'tcx> {
// B = exists {R} --> some 'r in {R} must outlive 'min
//
// Put another way, the subject value is known to outlive all
// regions in {R}, so if any of those outlives 'min, then the
// bound is met.
- AnyRegion(Vec<Region>),
+ AnyRegion(Vec<&'tcx Region>),
// B = forall {R} --> all 'r in {R} must outlive 'min
//
// Put another way, the subject value is known to outlive some
// region in {R}, so if all of those outlives 'min, then the bound
// is met.
- AllRegions(Vec<Region>),
+ AllRegions(Vec<&'tcx Region>),
// B = exists {B} --> 'min must meet some bound b in {B}
- AnyBound(Vec<VerifyBound>),
+ AnyBound(Vec<VerifyBound<'tcx>>),
// B = forall {B} --> 'min must meet all bounds b in {B}
- AllBounds(Vec<VerifyBound>),
+ AllBounds(Vec<VerifyBound<'tcx>>),
}
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
-pub struct TwoRegions {
- a: Region,
- b: Region,
+pub struct TwoRegions<'tcx> {
+ a: &'tcx Region,
+ b: &'tcx Region,
}
#[derive(Copy, Clone, PartialEq)]
-pub enum UndoLogEntry {
+pub enum UndoLogEntry<'tcx> {
/// Pushed when we start a snapshot.
OpenSnapshot,
AddVar(RegionVid),
/// We added the given `constraint`
- AddConstraint(Constraint),
+ AddConstraint(Constraint<'tcx>),
/// We added the given `verify`
AddVerify(usize),
AddGiven(ty::FreeRegion, ty::RegionVid),
/// We added a GLB/LUB "combinaton variable"
- AddCombination(CombineMapType, TwoRegions),
+ AddCombination(CombineMapType, TwoRegions<'tcx>),
/// During skolemization, we sometimes purge entries from the undo
/// log in a kind of minisnapshot (unlike other snapshots, this
/// `ConcreteFailure(o, a, b)`:
///
/// `o` requires that `a <= b`, but this does not hold
- ConcreteFailure(SubregionOrigin<'tcx>, Region, Region),
+ ConcreteFailure(SubregionOrigin<'tcx>, &'tcx Region, &'tcx Region),
/// `GenericBoundFailure(p, s, a)
///
/// The parameter/associated-type `p` must be known to outlive the lifetime
/// `a` (but none of the known bounds are sufficient).
- GenericBoundFailure(SubregionOrigin<'tcx>, GenericKind<'tcx>, Region),
+ GenericBoundFailure(SubregionOrigin<'tcx>, GenericKind<'tcx>, &'tcx Region),
/// `SubSupConflict(v, sub_origin, sub_r, sup_origin, sup_r)`:
///
/// `sub_r <= sup_r` does not hold.
SubSupConflict(RegionVariableOrigin,
SubregionOrigin<'tcx>,
- Region,
+ &'tcx Region,
SubregionOrigin<'tcx>,
- Region),
+ &'tcx Region),
/// For subsets of `ConcreteFailure` and `SubSupConflict`, we can derive
/// more specific errors message by suggesting to the user where they
#[derive(Clone, Debug)]
pub enum ProcessedErrorOrigin<'tcx> {
- ConcreteFailure(SubregionOrigin<'tcx>, Region, Region),
+ ConcreteFailure(SubregionOrigin<'tcx>, &'tcx Region, &'tcx Region),
VariableFailure(RegionVariableOrigin),
}
}
}
-pub type CombineMap = FnvHashMap<TwoRegions, RegionVid>;
+pub type CombineMap<'tcx> = FnvHashMap<TwoRegions<'tcx>, RegionVid>;
pub struct RegionVarBindings<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
tcx: TyCtxt<'a, 'gcx, 'tcx>,
// Constraints of the form `A <= B` introduced by the region
// checker. Here at least one of `A` and `B` must be a region
// variable.
- constraints: RefCell<FnvHashMap<Constraint, SubregionOrigin<'tcx>>>,
+ constraints: RefCell<FnvHashMap<Constraint<'tcx>, SubregionOrigin<'tcx>>>,
// A "verify" is something that we need to verify after inference is
// done, but which does not directly affect inference in any way.
// a bit of a hack but seems to work.
givens: RefCell<FnvHashSet<(ty::FreeRegion, ty::RegionVid)>>,
- lubs: RefCell<CombineMap>,
- glbs: RefCell<CombineMap>,
+ lubs: RefCell<CombineMap<'tcx>>,
+ glbs: RefCell<CombineMap<'tcx>>,
skolemization_count: Cell<u32>,
bound_count: Cell<u32>,
// otherwise we end up adding entries for things like the lower
// bound on a variable and so forth, which can never be rolled
// back.
- undo_log: RefCell<Vec<UndoLogEntry>>,
+ undo_log: RefCell<Vec<UndoLogEntry<'tcx>>>,
unification_table: RefCell<UnificationTable<ty::RegionVid>>,
// This contains the results of inference. It begins as an empty
// option and only acquires a value after inference is complete.
- values: RefCell<Option<Vec<VarValue>>>,
+ values: RefCell<Option<Vec<VarValue<'tcx>>>>,
}
pub struct RegionSnapshot {
}
}
-struct TaintSet {
+struct TaintSet<'tcx> {
directions: TaintDirections,
- regions: FnvHashSet<ty::Region>
+ regions: FnvHashSet<&'tcx ty::Region>
}
-impl TaintSet {
+impl<'a, 'gcx, 'tcx> TaintSet<'tcx> {
fn new(directions: TaintDirections,
- initial_region: ty::Region)
+ initial_region: &'tcx ty::Region)
-> Self {
let mut regions = FnvHashSet();
regions.insert(initial_region);
}
fn fixed_point(&mut self,
- undo_log: &[UndoLogEntry],
- verifys: &[Verify]) {
+ tcx: TyCtxt<'a, 'gcx, 'tcx>,
+ undo_log: &[UndoLogEntry<'tcx>],
+ verifys: &[Verify<'tcx>]) {
let mut prev_len = 0;
while prev_len < self.len() {
debug!("tainted: prev_len = {:?} new_len = {:?}",
for undo_entry in undo_log {
match undo_entry {
&AddConstraint(ConstrainVarSubVar(a, b)) => {
- self.add_edge(ReVar(a), ReVar(b));
+ self.add_edge(tcx.mk_region(ReVar(a)),
+ tcx.mk_region(ReVar(b)));
}
&AddConstraint(ConstrainRegSubVar(a, b)) => {
- self.add_edge(a, ReVar(b));
+ self.add_edge(a, tcx.mk_region(ReVar(b)));
}
&AddConstraint(ConstrainVarSubReg(a, b)) => {
- self.add_edge(ReVar(a), b);
+ self.add_edge(tcx.mk_region(ReVar(a)), b);
}
&AddConstraint(ConstrainRegSubReg(a, b)) => {
self.add_edge(a, b);
}
&AddGiven(a, b) => {
- self.add_edge(ReFree(a), ReVar(b));
+ self.add_edge(tcx.mk_region(ReFree(a)),
+ tcx.mk_region(ReVar(b)));
}
&AddVerify(i) => {
verifys[i].bound.for_each_region(&mut |b| {
}
}
- fn into_set(self) -> FnvHashSet<ty::Region> {
+ fn into_set(self) -> FnvHashSet<&'tcx ty::Region> {
self.regions
}
}
fn add_edge(&mut self,
- source: ty::Region,
- target: ty::Region) {
+ source: &'tcx ty::Region,
+ target: &'tcx ty::Region) {
if self.directions.incoming {
if self.regions.contains(&target) {
self.regions.insert(source);
.rollback_to(snapshot.region_snapshot);
}
- pub fn rollback_undo_entry(&self, undo_entry: UndoLogEntry) {
+ pub fn rollback_undo_entry(&self, undo_entry: UndoLogEntry<'tcx>) {
match undo_entry {
OpenSnapshot => {
panic!("Failure to observe stack discipline");
/// The `snapshot` argument to this function is not really used;
/// it's just there to make it explicit which snapshot bounds the
/// skolemized region that results. It should always be the top-most snapshot.
- pub fn push_skolemized(&self, br: ty::BoundRegion, snapshot: &RegionSnapshot) -> Region {
+ pub fn push_skolemized(&self, br: ty::BoundRegion, snapshot: &RegionSnapshot)
+ -> &'tcx Region {
assert!(self.in_snapshot());
assert!(self.undo_log.borrow()[snapshot.length] == OpenSnapshot);
let sc = self.skolemization_count.get();
self.skolemization_count.set(sc + 1);
- ReSkolemized(ty::SkolemizedRegionVid { index: sc }, br)
+ self.tcx.mk_region(ReSkolemized(ty::SkolemizedRegionVid { index: sc }, br))
}
/// Removes all the edges to/from the skolemized regions that are
/// completes to remove all trace of the skolemized regions
/// created in that time.
pub fn pop_skolemized(&self,
- skols: &FnvHashSet<ty::Region>,
+ skols: &FnvHashSet<&'tcx ty::Region>,
snapshot: &RegionSnapshot) {
debug!("pop_skolemized_regions(skols={:?})", skols);
skols.len());
debug_assert! {
skols.iter()
- .all(|k| match *k {
+ .all(|&k| match *k {
ty::ReSkolemized(index, _) =>
index.index >= first_to_pop &&
index.index < last_to_pop,
self.skolemization_count.set(snapshot.skolemization_count);
return;
- fn kill_constraint(skols: &FnvHashSet<ty::Region>,
- undo_entry: &UndoLogEntry)
- -> bool {
+ fn kill_constraint<'tcx>(skols: &FnvHashSet<&'tcx ty::Region>,
+ undo_entry: &UndoLogEntry<'tcx>)
+ -> bool {
match undo_entry {
&AddConstraint(ConstrainVarSubVar(_, _)) =>
false,
}
- pub fn new_bound(&self, debruijn: ty::DebruijnIndex) -> Region {
+ pub fn new_bound(&self, debruijn: ty::DebruijnIndex) -> &'tcx Region {
// Creates a fresh bound variable for use in GLB computations.
// See discussion of GLB computation in the large comment at
// the top of this file for more details.
bug!("rollover in RegionInference new_bound()");
}
- ReLateBound(debruijn, BrFresh(sc))
+ self.tcx.mk_region(ReLateBound(debruijn, BrFresh(sc)))
}
fn values_are_none(&self) -> bool {
self.values.borrow().is_none()
}
- fn add_constraint(&self, constraint: Constraint, origin: SubregionOrigin<'tcx>) {
+ fn add_constraint(&self, constraint: Constraint<'tcx>, origin: SubregionOrigin<'tcx>) {
// cannot add constraints once regions are resolved
assert!(self.values_are_none());
}
}
- pub fn make_eqregion(&self, origin: SubregionOrigin<'tcx>, sub: Region, sup: Region) {
+ pub fn make_eqregion(&self,
+ origin: SubregionOrigin<'tcx>,
+ sub: &'tcx Region,
+ sup: &'tcx Region) {
if sub != sup {
// Eventually, it would be nice to add direct support for
// equating regions.
self.make_subregion(origin.clone(), sub, sup);
self.make_subregion(origin, sup, sub);
- if let (ty::ReVar(sub), ty::ReVar(sup)) = (sub, sup) {
+ if let (ty::ReVar(sub), ty::ReVar(sup)) = (*sub, *sup) {
self.unification_table.borrow_mut().union(sub, sup);
}
}
}
- pub fn make_subregion(&self, origin: SubregionOrigin<'tcx>, sub: Region, sup: Region) {
+ pub fn make_subregion(&self,
+ origin: SubregionOrigin<'tcx>,
+ sub: &'tcx Region,
+ sup: &'tcx Region) {
// cannot add constraints once regions are resolved
assert!(self.values_are_none());
origin);
match (sub, sup) {
- (ReEarlyBound(..), _) |
- (ReLateBound(..), _) |
- (_, ReEarlyBound(..)) |
- (_, ReLateBound(..)) => {
+ (&ReEarlyBound(..), _) |
+ (&ReLateBound(..), _) |
+ (_, &ReEarlyBound(..)) |
+ (_, &ReLateBound(..)) => {
span_bug!(origin.span(),
"cannot relate bound region: {:?} <= {:?}",
sub,
sup);
}
- (_, ReStatic) => {
+ (_, &ReStatic) => {
// all regions are subregions of static, so we can ignore this
}
- (ReVar(sub_id), ReVar(sup_id)) => {
+ (&ReVar(sub_id), &ReVar(sup_id)) => {
self.add_constraint(ConstrainVarSubVar(sub_id, sup_id), origin);
}
- (r, ReVar(sup_id)) => {
- self.add_constraint(ConstrainRegSubVar(r, sup_id), origin);
+ (_, &ReVar(sup_id)) => {
+ self.add_constraint(ConstrainRegSubVar(sub, sup_id), origin);
}
- (ReVar(sub_id), r) => {
- self.add_constraint(ConstrainVarSubReg(sub_id, r), origin);
+ (&ReVar(sub_id), _) => {
+ self.add_constraint(ConstrainVarSubReg(sub_id, sup), origin);
}
_ => {
self.add_constraint(ConstrainRegSubReg(sub, sup), origin);
pub fn verify_generic_bound(&self,
origin: SubregionOrigin<'tcx>,
kind: GenericKind<'tcx>,
- sub: Region,
- bound: VerifyBound) {
+ sub: &'tcx Region,
+ bound: VerifyBound<'tcx>) {
self.add_verify(Verify {
kind: kind,
origin: origin,
});
}
- pub fn lub_regions(&self, origin: SubregionOrigin<'tcx>, a: Region, b: Region) -> Region {
+ pub fn lub_regions(&self,
+ origin: SubregionOrigin<'tcx>,
+ a: &'tcx Region,
+ b: &'tcx Region)
+ -> &'tcx Region {
// cannot add constraints once regions are resolved
assert!(self.values_are_none());
debug!("RegionVarBindings: lub_regions({:?}, {:?})", a, b);
- if a == ty::ReStatic || b == ty::ReStatic {
- ReStatic // nothing lives longer than static
- } else if a == b {
- a // LUB(a,a) = a
- } else {
- self.combine_vars(Lub, a, b, origin.clone(), |this, old_r, new_r| {
- this.make_subregion(origin.clone(), old_r, new_r)
- })
+ match (a, b) {
+ (r @ &ReStatic, _) | (_, r @ &ReStatic) => {
+ r // nothing lives longer than static
+ }
+
+ _ if a == b => {
+ a // LUB(a,a) = a
+ }
+
+ _ => {
+ self.combine_vars(Lub, a, b, origin.clone(), |this, old_r, new_r| {
+ this.make_subregion(origin.clone(), old_r, new_r)
+ })
+ }
}
}
- pub fn glb_regions(&self, origin: SubregionOrigin<'tcx>, a: Region, b: Region) -> Region {
+ pub fn glb_regions(&self,
+ origin: SubregionOrigin<'tcx>,
+ a: &'tcx Region,
+ b: &'tcx Region)
+ -> &'tcx Region {
// cannot add constraints once regions are resolved
assert!(self.values_are_none());
debug!("RegionVarBindings: glb_regions({:?}, {:?})", a, b);
match (a, b) {
- (ReStatic, r) | (r, ReStatic) => {
+ (&ReStatic, r) | (r, &ReStatic) => {
r // static lives longer than everything else
}
}
}
- pub fn resolve_var(&self, rid: RegionVid) -> ty::Region {
+ pub fn resolve_var(&self, rid: RegionVid) -> &'tcx ty::Region {
match *self.values.borrow() {
None => {
span_bug!((*self.var_origins.borrow())[rid.index as usize].span(),
been computed!")
}
Some(ref values) => {
- let r = lookup(values, rid);
+ let r = lookup(self.tcx, values, rid);
debug!("resolve_var({:?}) = {:?}", rid, r);
r
}
}
}
- pub fn opportunistic_resolve_var(&self, rid: RegionVid) -> ty::Region {
- ty::ReVar(self.unification_table.borrow_mut().find_value(rid).min_vid)
+ pub fn opportunistic_resolve_var(&self, rid: RegionVid) -> &'tcx ty::Region {
+ let vid = self.unification_table.borrow_mut().find_value(rid).min_vid;
+ self.tcx.mk_region(ty::ReVar(vid))
}
- fn combine_map(&self, t: CombineMapType) -> &RefCell<CombineMap> {
+ fn combine_map(&self, t: CombineMapType) -> &RefCell<CombineMap<'tcx>> {
match t {
Glb => &self.glbs,
Lub => &self.lubs,
pub fn combine_vars<F>(&self,
t: CombineMapType,
- a: Region,
- b: Region,
+ a: &'tcx Region,
+ b: &'tcx Region,
origin: SubregionOrigin<'tcx>,
mut relate: F)
- -> Region
- where F: FnMut(&RegionVarBindings<'a, 'gcx, 'tcx>, Region, Region)
+ -> &'tcx Region
+ where F: FnMut(&RegionVarBindings<'a, 'gcx, 'tcx>, &'tcx Region, &'tcx Region)
{
let vars = TwoRegions { a: a, b: b };
if let Some(&c) = self.combine_map(t).borrow().get(&vars) {
- return ReVar(c);
+ return self.tcx.mk_region(ReVar(c));
}
let c = self.new_region_var(MiscVariable(origin.span()));
self.combine_map(t).borrow_mut().insert(vars, c);
if self.in_snapshot() {
self.undo_log.borrow_mut().push(AddCombination(t, vars));
}
- relate(self, a, ReVar(c));
- relate(self, b, ReVar(c));
+ relate(self, a, self.tcx.mk_region(ReVar(c)));
+ relate(self, b, self.tcx.mk_region(ReVar(c)));
debug!("combine_vars() c={:?}", c);
- ReVar(c)
+ self.tcx.mk_region(ReVar(c))
}
pub fn vars_created_since_snapshot(&self, mark: &RegionSnapshot) -> Vec<RegionVid> {
/// related to other regions.
pub fn tainted(&self,
mark: &RegionSnapshot,
- r0: Region,
+ r0: &'tcx Region,
directions: TaintDirections)
- -> FnvHashSet<ty::Region> {
+ -> FnvHashSet<&'tcx ty::Region> {
debug!("tainted(mark={:?}, r0={:?}, directions={:?})",
mark, r0, directions);
// edges and add any new regions we find to result_set. This
// is not a terribly efficient implementation.
let mut taint_set = TaintSet::new(directions, r0);
- taint_set.fixed_point(&self.undo_log.borrow()[mark.length..],
+ taint_set.fixed_point(self.tcx,
+ &self.undo_log.borrow()[mark.length..],
&self.verifys.borrow());
debug!("tainted: result={:?}", taint_set.regions);
return taint_set.into_set();
errors
}
- fn lub_concrete_regions(&self, free_regions: &FreeRegionMap, a: Region, b: Region) -> Region {
+ fn lub_concrete_regions(&self,
+ free_regions: &FreeRegionMap,
+ a: &'tcx Region,
+ b: &'tcx Region)
+ -> &'tcx Region {
match (a, b) {
- (ReLateBound(..), _) |
- (_, ReLateBound(..)) |
- (ReEarlyBound(..), _) |
- (_, ReEarlyBound(..)) |
- (ReErased, _) |
- (_, ReErased) => {
+ (&ReLateBound(..), _) |
+ (_, &ReLateBound(..)) |
+ (&ReEarlyBound(..), _) |
+ (_, &ReEarlyBound(..)) |
+ (&ReErased, _) |
+ (_, &ReErased) => {
bug!("cannot relate region: LUB({:?}, {:?})", a, b);
}
- (ReStatic, _) | (_, ReStatic) => {
- ReStatic // nothing lives longer than static
+ (r @ &ReStatic, _) | (_, r @ &ReStatic) => {
+ r // nothing lives longer than static
}
- (ReEmpty, r) | (r, ReEmpty) => {
+ (&ReEmpty, r) | (r, &ReEmpty) => {
r // everything lives longer than empty
}
- (ReVar(v_id), _) | (_, ReVar(v_id)) => {
+ (&ReVar(v_id), _) | (_, &ReVar(v_id)) => {
span_bug!((*self.var_origins.borrow())[v_id.index as usize].span(),
"lub_concrete_regions invoked with non-concrete \
regions: {:?}, {:?}",
b);
}
- (ReFree(ref fr), ReScope(s_id)) |
- (ReScope(s_id), ReFree(ref fr)) => {
- let f = ReFree(*fr);
+ (&ReFree(fr), &ReScope(s_id)) |
+ (&ReScope(s_id), &ReFree(fr)) => {
// A "free" region can be interpreted as "some region
// at least as big as the block fr.scope_id". So, we can
// reasonably compare free regions and scopes:
// if the free region's scope `fr.scope_id` is bigger than
// the scope region `s_id`, then the LUB is the free
// region itself:
- f
+ self.tcx.mk_region(ReFree(fr))
} else {
// otherwise, we don't know what the free region is,
// so we must conservatively say the LUB is static:
- ReStatic
+ self.tcx.mk_region(ReStatic)
}
}
- (ReScope(a_id), ReScope(b_id)) => {
+ (&ReScope(a_id), &ReScope(b_id)) => {
// The region corresponding to an outer block is a
// subtype of the region corresponding to an inner
// block.
- ReScope(self.tcx.region_maps.nearest_common_ancestor(a_id, b_id))
+ self.tcx.mk_region(ReScope(
+ self.tcx.region_maps.nearest_common_ancestor(a_id, b_id)))
}
- (ReFree(a_fr), ReFree(b_fr)) => {
- free_regions.lub_free_regions(a_fr, b_fr)
+ (&ReFree(a_fr), &ReFree(b_fr)) => {
+ self.tcx.mk_region(free_regions.lub_free_regions(a_fr, b_fr))
}
// For these types, we cannot define any additional
// relationship:
- (ReSkolemized(..), _) |
- (_, ReSkolemized(..)) => {
+ (&ReSkolemized(..), _) |
+ (_, &ReSkolemized(..)) => {
if a == b {
a
} else {
- ReStatic
+ self.tcx.mk_region(ReStatic)
}
}
}
// ______________________________________________________________________
#[derive(Copy, Clone, Debug)]
-pub enum VarValue {
- Value(Region),
+pub enum VarValue<'tcx> {
+ Value(&'tcx Region),
ErrorValue,
}
struct RegionAndOrigin<'tcx> {
- region: Region,
+ region: &'tcx Region,
origin: SubregionOrigin<'tcx>,
}
-type RegionGraph = graph::Graph<(), Constraint>;
+type RegionGraph<'tcx> = graph::Graph<(), Constraint<'tcx>>;
impl<'a, 'gcx, 'tcx> RegionVarBindings<'a, 'gcx, 'tcx> {
fn infer_variable_values(&self,
free_regions: &FreeRegionMap,
errors: &mut Vec<RegionResolutionError<'tcx>>,
subject: ast::NodeId)
- -> Vec<VarValue> {
+ -> Vec<VarValue<'tcx>> {
let mut var_data = self.construct_var_data();
// Dorky hack to cause `dump_constraints` to only get called
var_data
}
- fn construct_var_data(&self) -> Vec<VarValue> {
+ fn construct_var_data(&self) -> Vec<VarValue<'tcx>> {
(0..self.num_vars() as usize)
- .map(|_| Value(ty::ReEmpty))
+ .map(|_| Value(self.tcx.mk_region(ty::ReEmpty)))
.collect()
}
}
}
- fn expansion(&self, free_regions: &FreeRegionMap, var_values: &mut [VarValue]) {
+ fn expansion(&self, free_regions: &FreeRegionMap, var_values: &mut [VarValue<'tcx>]) {
self.iterate_until_fixed_point("Expansion", |constraint, origin| {
debug!("expansion: constraint={:?} origin={:?}",
constraint, origin);
fn expand_node(&self,
free_regions: &FreeRegionMap,
- a_region: Region,
+ a_region: &'tcx Region,
b_vid: RegionVid,
- b_data: &mut VarValue)
+ b_data: &mut VarValue<'tcx>)
-> bool {
debug!("expand_node({:?}, {:?} == {:?})",
a_region,
b_data);
// Check if this relationship is implied by a given.
- match a_region {
+ match *a_region {
ty::ReFree(fr) => {
if self.givens.borrow().contains(&(fr, b_vid)) {
debug!("given");
/// and check that they are satisfied.
fn collect_errors(&self,
free_regions: &FreeRegionMap,
- var_data: &mut Vec<VarValue>,
+ var_data: &mut Vec<VarValue<'tcx>>,
errors: &mut Vec<RegionResolutionError<'tcx>>) {
let constraints = self.constraints.borrow();
for (constraint, origin) in constraints.iter() {
for verify in self.verifys.borrow().iter() {
debug!("collect_errors: verify={:?}", verify);
- let sub = normalize(var_data, verify.region);
+ let sub = normalize(self.tcx, var_data, verify.region);
if verify.bound.is_met(self.tcx, free_regions, var_data, sub) {
continue;
}
/// and create a `RegionResolutionError` for each of them.
fn collect_var_errors(&self,
free_regions: &FreeRegionMap,
- var_data: &[VarValue],
- graph: &RegionGraph,
+ var_data: &[VarValue<'tcx>],
+ graph: &RegionGraph<'tcx>,
errors: &mut Vec<RegionResolutionError<'tcx>>) {
debug!("collect_var_errors");
}
}
- fn construct_graph(&self) -> RegionGraph {
+ fn construct_graph(&self) -> RegionGraph<'tcx> {
let num_vars = self.num_vars();
let constraints = self.constraints.borrow();
fn collect_error_for_expanding_node(&self,
free_regions: &FreeRegionMap,
- graph: &RegionGraph,
+ graph: &RegionGraph<'tcx>,
dup_vec: &mut [u32],
node_idx: RegionVid,
errors: &mut Vec<RegionResolutionError<'tcx>>) {
// the user will more likely get a specific suggestion.
fn free_regions_first(a: &RegionAndOrigin, b: &RegionAndOrigin) -> Ordering {
match (a.region, b.region) {
- (ReFree(..), ReFree(..)) => Equal,
- (ReFree(..), _) => Less,
- (_, ReFree(..)) => Greater,
+ (&ReFree(..), &ReFree(..)) => Equal,
+ (&ReFree(..), _) => Less,
+ (_, &ReFree(..)) => Greater,
(_, _) => Equal,
}
}
}
fn collect_concrete_regions(&self,
- graph: &RegionGraph,
+ graph: &RegionGraph<'tcx>,
orig_node_idx: RegionVid,
dir: Direction,
dup_vec: &mut [u32])
fn process_edges<'a, 'gcx, 'tcx>(this: &RegionVarBindings<'a, 'gcx, 'tcx>,
state: &mut WalkState<'tcx>,
- graph: &RegionGraph,
+ graph: &RegionGraph<'tcx>,
source_vid: RegionVid,
dir: Direction) {
debug!("process_edges(source_vid={:?}, dir={:?})", source_vid, dir);
}
fn iterate_until_fixed_point<F>(&self, tag: &str, mut body: F)
- where F: FnMut(&Constraint, &SubregionOrigin<'tcx>) -> bool
+ where F: FnMut(&Constraint<'tcx>, &SubregionOrigin<'tcx>) -> bool
{
let mut iteration = 0;
let mut changed = true;
}
-fn normalize(values: &Vec<VarValue>, r: ty::Region) -> ty::Region {
- match r {
- ty::ReVar(rid) => lookup(values, rid),
+fn normalize<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
+ values: &Vec<VarValue<'tcx>>,
+ r: &'tcx ty::Region)
+ -> &'tcx ty::Region {
+ match *r {
+ ty::ReVar(rid) => lookup(tcx, values, rid),
_ => r,
}
}
-fn lookup(values: &Vec<VarValue>, rid: ty::RegionVid) -> ty::Region {
+fn lookup<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
+ values: &Vec<VarValue<'tcx>>,
+ rid: ty::RegionVid)
+ -> &'tcx ty::Region {
match values[rid.index as usize] {
Value(r) => r,
- ErrorValue => ReStatic, // Previously reported error.
+ ErrorValue => tcx.mk_region(ReStatic), // Previously reported error.
}
}
}
}
-impl<'a, 'gcx, 'tcx> VerifyBound {
- fn for_each_region(&self, f: &mut FnMut(ty::Region)) {
+impl<'a, 'gcx, 'tcx> VerifyBound<'tcx> {
+ fn for_each_region(&self, f: &mut FnMut(&'tcx ty::Region)) {
match self {
&VerifyBound::AnyRegion(ref rs) |
&VerifyBound::AllRegions(ref rs) => for &r in rs {
pub fn must_hold(&self) -> bool {
match self {
- &VerifyBound::AnyRegion(ref bs) => bs.contains(&ty::ReStatic),
+ &VerifyBound::AnyRegion(ref bs) => bs.contains(&&ty::ReStatic),
&VerifyBound::AllRegions(ref bs) => bs.is_empty(),
&VerifyBound::AnyBound(ref bs) => bs.iter().any(|b| b.must_hold()),
&VerifyBound::AllBounds(ref bs) => bs.iter().all(|b| b.must_hold()),
pub fn cannot_hold(&self) -> bool {
match self {
&VerifyBound::AnyRegion(ref bs) => bs.is_empty(),
- &VerifyBound::AllRegions(ref bs) => bs.contains(&ty::ReEmpty),
+ &VerifyBound::AllRegions(ref bs) => bs.contains(&&ty::ReEmpty),
&VerifyBound::AnyBound(ref bs) => bs.iter().all(|b| b.cannot_hold()),
&VerifyBound::AllBounds(ref bs) => bs.iter().any(|b| b.cannot_hold()),
}
}
- pub fn or(self, vb: VerifyBound) -> VerifyBound {
+ pub fn or(self, vb: VerifyBound<'tcx>) -> VerifyBound<'tcx> {
if self.must_hold() || vb.cannot_hold() {
self
} else if self.cannot_hold() || vb.must_hold() {
}
}
- pub fn and(self, vb: VerifyBound) -> VerifyBound {
+ pub fn and(self, vb: VerifyBound<'tcx>) -> VerifyBound<'tcx> {
if self.must_hold() && vb.must_hold() {
self
} else if self.cannot_hold() && vb.cannot_hold() {
fn is_met(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>,
free_regions: &FreeRegionMap,
- var_values: &Vec<VarValue>,
- min: ty::Region)
+ var_values: &Vec<VarValue<'tcx>>,
+ min: &'tcx ty::Region)
-> bool {
match self {
&VerifyBound::AnyRegion(ref rs) =>
rs.iter()
- .map(|&r| normalize(var_values, r))
+ .map(|&r| normalize(tcx, var_values, r))
.any(|r| free_regions.is_subregion_of(tcx, min, r)),
&VerifyBound::AllRegions(ref rs) =>
rs.iter()
- .map(|&r| normalize(var_values, r))
+ .map(|&r| normalize(tcx, var_values, r))
.all(|r| free_regions.is_subregion_of(tcx, min, r)),
&VerifyBound::AnyBound(ref bs) =>
}
}
- fn fold_region(&mut self, r: ty::Region) -> ty::Region {
- match r {
- ty::ReVar(rid) => self.infcx.region_vars.opportunistic_resolve_var(rid),
- _ => r,
+ fn fold_region(&mut self, r: &'tcx ty::Region) -> &'tcx ty::Region {
+ match *r {
+ ty::ReVar(rid) => self.infcx.region_vars.opportunistic_resolve_var(rid),
+ _ => r,
}
}
}
}
}
- fn fold_region(&mut self, r: ty::Region) -> ty::Region {
- match r {
- ty::ReVar(rid) => self.infcx.region_vars.resolve_var(rid),
- _ => r,
+ fn fold_region(&mut self, r: &'tcx ty::Region) -> &'tcx ty::Region {
+ match *r {
+ ty::ReVar(rid) => self.infcx.region_vars.resolve_var(rid),
+ _ => r,
}
}
}
}
}
- fn regions(&mut self, a: ty::Region, b: ty::Region) -> RelateResult<'tcx, ty::Region> {
+ fn regions(&mut self, a: &'tcx ty::Region, b: &'tcx ty::Region)
+ -> RelateResult<'tcx, &'tcx ty::Region> {
debug!("{}.regions({:?}, {:?}) self.cause={:?}",
self.tag(), a, b, self.fields.cause);
// FIXME -- we have more fine-grained information available
#![feature(box_patterns)]
#![feature(box_syntax)]
#![feature(collections)]
+#![feature(conservative_impl_trait)]
#![feature(const_fn)]
#![feature(core_intrinsics)]
#![feature(enumset)]
declare_lint! {
pub TRANSMUTE_FROM_FN_ITEM_TYPES,
- Warn,
+ Deny,
"transmute from function item type to pointer-sized type erroneously allowed"
}
for (lint_id, level, span) in v {
let (now, now_source) = self.lints().get_level_source(lint_id);
if now == Forbid && level != Forbid {
- let lint_name = lint_id.as_str();
+ let lint_name = lint_id.to_string();
let mut diag_builder = struct_span_err!(self.sess(), span, E0453,
"{}({}) overruled by outer forbid({})",
level.as_str(), lint_name,
lint_name);
+ diag_builder.span_label(span, &format!("overruled by previous forbid"));
match now_source {
LintSource::Default => &mut diag_builder,
LintSource::Node(forbid_source_span) => {
- diag_builder.span_note(forbid_source_span,
- "`forbid` lint level set here")
+ diag_builder.span_label(forbid_source_span,
+ &format!("`forbid` level set here"))
},
LintSource::CommandLine => {
diag_builder.note("`forbid` lint level was set on command line")
for &(lint, span, ref msg) in v {
span_bug!(span,
"unprocessed lint {} at {}: {}",
- lint.as_str(), tcx.map.node_to_string(*id), *msg)
+ lint.to_string(), tcx.map.node_to_string(*id), *msg)
}
}
// in the iteration code.
for (_, v) in sess.lints.borrow().iter() {
for &(lint, span, ref msg) in v {
- span_bug!(span, "unprocessed lint {}: {}", lint.as_str(), *msg)
+ span_bug!(span, "unprocessed lint {}: {}", lint.to_string(), *msg)
}
}
}
}
/// Get the name of the lint.
- pub fn as_str(&self) -> String {
+ pub fn to_string(&self) -> String {
self.lint.name_lower()
}
}
pub enum InlinedItem {
Item(DefId /* def-id in source crate */, P<hir::Item>),
TraitItem(DefId /* impl id */, P<hir::TraitItem>),
- ImplItem(DefId /* impl id */, P<hir::ImplItem>),
- Foreign(DefId /* extern item */, P<hir::ForeignItem>),
+ ImplItem(DefId /* impl id */, P<hir::ImplItem>)
}
/// A borrowed version of `hir::InlinedItem`.
pub enum InlinedItemRef<'a> {
Item(DefId, &'a hir::Item),
TraitItem(DefId, &'a hir::TraitItem),
- ImplItem(DefId, &'a hir::ImplItem),
- Foreign(DefId, &'a hir::ForeignItem)
+ ImplItem(DefId, &'a hir::ImplItem)
}
/// Item definitions in the currently-compiled crate would have the CrateNum
fn closure_kind(&self, def_id: DefId) -> ty::ClosureKind;
fn closure_ty<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId)
-> ty::ClosureTy<'tcx>;
- fn item_variances(&self, def: DefId) -> ty::ItemVariances;
+ fn item_variances(&self, def: DefId) -> Vec<ty::Variance>;
fn repr_attrs(&self, def: DefId) -> Vec<attr::ReprAttr>;
fn item_type<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId)
-> Ty<'tcx>;
fn is_default_impl(&self, impl_did: DefId) -> bool;
fn is_extern_item<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, did: DefId) -> bool;
fn is_foreign_item(&self, did: DefId) -> bool;
- fn is_static_method(&self, did: DefId) -> bool;
fn is_statically_included_foreign_item(&self, id: ast::NodeId) -> bool;
fn is_typedef(&self, did: DefId) -> bool;
{
match *self {
InlinedItem::Item(_, ref i) => visitor.visit_item(&i),
- InlinedItem::Foreign(_, ref i) => visitor.visit_foreign_item(&i),
InlinedItem::TraitItem(_, ref ti) => visitor.visit_trait_item(ti),
InlinedItem::ImplItem(_, ref ii) => visitor.visit_impl_item(ii),
}
fn closure_kind(&self, def_id: DefId) -> ty::ClosureKind { bug!("closure_kind") }
fn closure_ty<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId)
-> ty::ClosureTy<'tcx> { bug!("closure_ty") }
- fn item_variances(&self, def: DefId) -> ty::ItemVariances { bug!("item_variances") }
+ fn item_variances(&self, def: DefId) -> Vec<ty::Variance> { bug!("item_variances") }
fn repr_attrs(&self, def: DefId) -> Vec<attr::ReprAttr> { bug!("repr_attrs") }
fn item_type<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId)
-> Ty<'tcx> { bug!("item_type") }
fn is_extern_item<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, did: DefId) -> bool
{ bug!("is_extern_item") }
fn is_foreign_item(&self, did: DefId) -> bool { bug!("is_foreign_item") }
- fn is_static_method(&self, did: DefId) -> bool { bug!("is_static_method") }
fn is_statically_included_foreign_item(&self, id: ast::NodeId) -> bool { false }
fn is_typedef(&self, did: DefId) -> bool { bug!("is_typedef") }
use hir::def::Def;
use hir::def_id::{DefId};
use lint;
+use util::nodemap::FnvHashSet;
-use std::collections::HashSet;
use syntax::{ast, codemap};
use syntax::attr;
use syntax_pos;
struct MarkSymbolVisitor<'a, 'tcx: 'a> {
worklist: Vec<ast::NodeId>,
tcx: TyCtxt<'a, 'tcx, 'tcx>,
- live_symbols: Box<HashSet<ast::NodeId>>,
+ live_symbols: Box<FnvHashSet<ast::NodeId>>,
struct_has_extern_repr: bool,
ignore_non_const_paths: bool,
inherited_pub_visibility: bool,
MarkSymbolVisitor {
worklist: worklist,
tcx: tcx,
- live_symbols: box HashSet::new(),
+ live_symbols: box FnvHashSet(),
struct_has_extern_repr: false,
ignore_non_const_paths: false,
inherited_pub_visibility: false,
Def::AssociatedTy(..) | Def::Method(_) | Def::AssociatedConst(_)
if self.tcx.trait_of_item(def.def_id()).is_some() => {
if let Some(substs) = self.tcx.tables.borrow().item_substs.get(&id) {
- match substs.substs.types[0].sty {
+ match substs.substs.type_at(0).sty {
TyEnum(tyid, _) | TyStruct(tyid, _) => {
self.check_def_id(tyid.did)
}
}
fn mark_live_symbols(&mut self) {
- let mut scanned = HashSet::new();
+ let mut scanned = FnvHashSet();
while !self.worklist.is_empty() {
let id = self.worklist.pop().unwrap();
if scanned.contains(&id) {
fn find_live<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
access_levels: &privacy::AccessLevels,
krate: &hir::Crate)
- -> Box<HashSet<ast::NodeId>> {
+ -> Box<FnvHashSet<ast::NodeId>> {
let worklist = create_and_seed_worklist(tcx, access_levels, krate);
let mut symbol_visitor = MarkSymbolVisitor::new(tcx, worklist);
symbol_visitor.mark_live_symbols();
struct DeadVisitor<'a, 'tcx: 'a> {
tcx: TyCtxt<'a, 'tcx, 'tcx>,
- live_symbols: Box<HashSet<ast::NodeId>>,
+ live_symbols: Box<FnvHashSet<ast::NodeId>>,
}
impl<'a, 'tcx> DeadVisitor<'a, 'tcx> {
borrow_id: ast::NodeId,
borrow_span: Span,
cmt: mc::cmt<'tcx>,
- loan_region: ty::Region,
+ loan_region: &'tcx ty::Region,
bk: ty::BorrowKind,
loan_cause: LoanCause);
for arg in &decl.inputs {
let arg_ty = return_if_err!(self.mc.infcx.node_ty(arg.pat.id));
- let fn_body_scope = self.tcx().region_maps.node_extent(body.id);
+ let fn_body_scope_r = self.tcx().node_scope_region(body.id);
let arg_cmt = self.mc.cat_rvalue(
arg.id,
arg.pat.span,
- ty::ReScope(fn_body_scope), // Args live only as long as the fn body.
+ fn_body_scope_r, // Args live only as long as the fn body.
arg_ty);
self.walk_irrefutable_pat(arg_cmt, &arg.pat);
fn borrow_expr(&mut self,
expr: &hir::Expr,
- r: ty::Region,
+ r: &'tcx ty::Region,
bk: ty::BorrowKind,
cause: LoanCause) {
debug!("borrow_expr(expr={:?}, r={:?}, bk={:?})",
hir::ExprMatch(ref discr, ref arms, _) => {
let discr_cmt = return_if_err!(self.mc.cat_expr(&discr));
- self.borrow_expr(&discr, ty::ReEmpty, ty::ImmBorrow, MatchDiscriminant);
+ let r = self.tcx().mk_region(ty::ReEmpty);
+ self.borrow_expr(&discr, r, ty::ImmBorrow, MatchDiscriminant);
// treatment of the discriminant is handled while walking the arms.
for arm in arms {
// make sure that the thing we are pointing out stays valid
// for the lifetime `scope_r` of the resulting ptr:
let expr_ty = return_if_err!(self.mc.infcx.node_ty(expr.id));
- if let ty::TyRef(&r, _) = expr_ty.sty {
+ if let ty::TyRef(r, _) = expr_ty.sty {
let bk = ty::BorrowKind::from_mutbl(m);
self.borrow_expr(&base, r, bk, AddrOf);
}
let callee_ty = return_if_err!(self.mc.infcx.expr_ty_adjusted(callee));
debug!("walk_callee: callee={:?} callee_ty={:?}",
callee, callee_ty);
- let call_scope = self.tcx().region_maps.node_extent(call.id);
match callee_ty.sty {
ty::TyFnDef(..) | ty::TyFnPtr(_) => {
self.consume_expr(callee);
};
match overloaded_call_type {
FnMutOverloadedCall => {
+ let call_scope_r = self.tcx().node_scope_region(call.id);
self.borrow_expr(callee,
- ty::ReScope(call_scope),
+ call_scope_r,
ty::MutBorrow,
ClosureInvocation);
}
FnOverloadedCall => {
+ let call_scope_r = self.tcx().node_scope_region(call.id);
self.borrow_expr(callee,
- ty::ReScope(call_scope),
+ call_scope_r,
ty::ImmBorrow,
ClosureInvocation);
}
};
let bk = ty::BorrowKind::from_mutbl(m);
self.delegate.borrow(expr.id, expr.span, cmt,
- *r, bk, AutoRef);
+ r, bk, AutoRef);
}
}
}
self.delegate.borrow(expr.id,
expr.span,
cmt_base,
- *r,
+ r,
ty::BorrowKind::from_mutbl(m),
AutoRef);
}
// Converting from a &T to *T (or &mut T to *mut T) is
// treated as borrowing it for the enclosing temporary
// scope.
- let r = ty::ReScope(self.tcx().region_maps.node_extent(expr.id));
+ let r = self.tcx().node_scope_region(expr.id);
self.delegate.borrow(expr.id,
expr.span,
// methods are implicitly autoref'd which sadly does not use
// adjustments, so we must hardcode the borrow here.
- let r = ty::ReScope(self.tcx().region_maps.node_extent(expr.id));
+ let r = self.tcx().node_scope_region(expr.id);
let bk = ty::ImmBorrow;
for &arg in &rhs {
// It is also a borrow or copy/move of the value being matched.
match bmode {
hir::BindByRef(m) => {
- if let ty::TyRef(&r, _) = pat_ty.sty {
+ if let ty::TyRef(r, _) = pat_ty.sty {
let bk = ty::BorrowKind::from_mutbl(m);
delegate.borrow(pat.id, pat.span, cmt_pat, r, bk, RefBinding);
}
for implied_bound in implied_bounds {
debug!("implied bound: {:?}", implied_bound);
match *implied_bound {
- ImpliedBound::RegionSubRegion(ty::ReFree(free_a), ty::ReFree(free_b)) => {
+ ImpliedBound::RegionSubRegion(&ty::ReFree(free_a), &ty::ReFree(free_b)) => {
self.relate_free_regions(free_a, free_b);
}
ImpliedBound::RegionSubRegion(..) |
}
ty::Predicate::RegionOutlives(ty::Binder(ty::OutlivesPredicate(r_a, r_b))) => {
match (r_a, r_b) {
- (ty::ReStatic, ty::ReFree(_)) => {},
- (ty::ReFree(fr_a), ty::ReStatic) => self.relate_to_static(fr_a),
- (ty::ReFree(fr_a), ty::ReFree(fr_b)) => {
+ (&ty::ReStatic, &ty::ReFree(_)) => {},
+ (&ty::ReFree(fr_a), &ty::ReStatic) => self.relate_to_static(fr_a),
+ (&ty::ReFree(fr_a), &ty::ReFree(fr_b)) => {
// Record that `'a:'b`. Or, put another way, `'b <= 'a`.
self.relate_free_regions(fr_b, fr_a);
}
/// inference* and sadly the logic is somewhat duplicated with the code in infer.rs.
pub fn is_subregion_of(&self,
tcx: TyCtxt,
- sub_region: ty::Region,
- super_region: ty::Region)
+ sub_region: &ty::Region,
+ super_region: &ty::Region)
-> bool {
let result = sub_region == super_region || {
match (sub_region, super_region) {
- (ty::ReEmpty, _) |
- (_, ty::ReStatic) =>
+ (&ty::ReEmpty, _) |
+ (_, &ty::ReStatic) =>
true,
- (ty::ReScope(sub_scope), ty::ReScope(super_scope)) =>
+ (&ty::ReScope(sub_scope), &ty::ReScope(super_scope)) =>
tcx.region_maps.is_subscope_of(sub_scope, super_scope),
- (ty::ReScope(sub_scope), ty::ReFree(fr)) =>
+ (&ty::ReScope(sub_scope), &ty::ReFree(fr)) =>
tcx.region_maps.is_subscope_of(sub_scope, fr.scope) ||
self.is_static(fr),
- (ty::ReFree(sub_fr), ty::ReFree(super_fr)) =>
+ (&ty::ReFree(sub_fr), &ty::ReFree(super_fr)) =>
self.sub_free_region(sub_fr, super_fr),
- (ty::ReStatic, ty::ReFree(sup_fr)) =>
+ (&ty::ReStatic, &ty::ReFree(sup_fr)) =>
self.is_static(sup_fr),
_ =>
self.ir.tcx.region_maps.call_site_extent(id, body.id),
&self.fn_ret(id));
- if self.live_on_entry(entry_ln, self.s.no_ret_var).is_some() {
+ if fn_ret.is_never() {
+ // FIXME(durka) this rejects code like `fn foo(x: !) -> ! { x }`
+ if self.live_on_entry(entry_ln, self.s.clean_exit_var).is_some() {
+ span_err!(self.ir.tcx.sess, sp, E0270,
+ "computation may converge in a function marked as diverging");
+ }
+ } else if self.live_on_entry(entry_ln, self.s.no_ret_var).is_some() {
let param_env = ParameterEnvironment::for_item(self.ir.tcx, id);
let t_ret_subst = fn_ret.subst(self.ir.tcx, ¶m_env.free_substs);
let is_nil = self.ir.tcx.infer_ctxt(None, Some(param_env),
#[derive(Clone, PartialEq)]
pub enum Categorization<'tcx> {
- Rvalue(ty::Region), // temporary val, argument is its scope
+ Rvalue(&'tcx ty::Region), // temporary val, argument is its scope
StaticItem,
Upvar(Upvar), // upvar referenced by closure env
Local(ast::NodeId), // local variable
- Deref(cmt<'tcx>, usize, PointerKind), // deref of a ptr
+ Deref(cmt<'tcx>, usize, PointerKind<'tcx>), // deref of a ptr
Interior(cmt<'tcx>, InteriorKind), // something interior: field, tuple, etc
Downcast(cmt<'tcx>, DefId), // selects a particular enum variant (*1)
// different kinds of pointers:
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
-pub enum PointerKind {
+pub enum PointerKind<'tcx> {
/// `Box<T>`
Unique,
/// `&T`
- BorrowedPtr(ty::BorrowKind, ty::Region),
+ BorrowedPtr(ty::BorrowKind, &'tcx ty::Region),
/// `*T`
UnsafePtr(hir::Mutability),
/// Implicit deref of the `&T` that results from an overloaded index `[]`.
- Implicit(ty::BorrowKind, ty::Region),
+ Implicit(ty::BorrowKind, &'tcx ty::Region),
}
// We use the term "interior" to mean "something reachable from the
// We pun on *T to mean both actual deref of a ptr as well
// as accessing of components:
#[derive(Copy, Clone)]
-pub enum deref_kind {
- deref_ptr(PointerKind),
+pub enum deref_kind<'tcx> {
+ deref_ptr(PointerKind<'tcx>),
deref_interior(InteriorKind),
}
ty::TyRef(r, mt) => {
let kind = ty::BorrowKind::from_mutbl(mt.mutbl);
- Ok(deref_ptr(BorrowedPtr(kind, *r)))
+ Ok(deref_ptr(BorrowedPtr(kind, r)))
}
ty::TyRawPtr(ref mt) => {
};
// Region of environment pointer
- let env_region = ty::ReFree(ty::FreeRegion {
+ let env_region = self.tcx().mk_region(ty::ReFree(ty::FreeRegion {
// The environment of a closure is guaranteed to
// outlive any bindings introduced in the body of the
// closure itself.
scope: self.tcx().region_maps.item_extent(fn_body_id),
bound_region: ty::BrEnv
- });
+ }));
let env_ptr = BorrowedPtr(env_borrow_kind, env_region);
/// Returns the lifetime of a temporary created by expr with id `id`.
/// This could be `'static` if `id` is part of a constant expression.
- pub fn temporary_scope(&self, id: ast::NodeId) -> ty::Region {
- match self.infcx.temporary_scope(id) {
+ pub fn temporary_scope(&self, id: ast::NodeId) -> &'tcx ty::Region {
+ self.tcx().mk_region(match self.infcx.temporary_scope(id) {
Some(scope) => ty::ReScope(scope),
None => ty::ReStatic
- }
+ })
}
pub fn cat_rvalue_node(&self,
let re = if qualif.intersects(ConstQualif::NON_STATIC_BORROWS) {
self.temporary_scope(id)
} else {
- ty::ReStatic
+ self.tcx().mk_region(ty::ReStatic)
};
let ret = self.cat_rvalue(id, span, re, expr_ty);
debug!("cat_rvalue_node ret {:?}", ret);
pub fn cat_rvalue(&self,
cmt_id: ast::NodeId,
span: Span,
- temp_scope: ty::Region,
+ temp_scope: &'tcx ty::Region,
expr_ty: Ty<'tcx>) -> cmt<'tcx> {
let ret = Rc::new(cmt_ {
id:cmt_id,
}
}
-impl fmt::Debug for PointerKind {
+impl<'tcx> fmt::Debug for PointerKind<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Unique => write!(f, "Box"),
pub enum AccessLevel {
// Exported items + items participating in various kinds of public interfaces,
// but not directly nameable. For example, if function `fn f() -> T {...}` is
- // public, then type `T` is exported. Its values can be obtained by other crates
- // even if the type itseld is not nameable.
- // FIXME: Mostly unimplemented. Only `type` aliases export items currently.
+ // public, then type `T` is reachable. Its values can be obtained by other crates
+ // even if the type itself is not nameable.
Reachable,
// Public items + items accessible to other crates with help of `pub use` reexports
Exported,
use ty::{self, TyCtxt};
use middle::privacy;
use session::config;
-use util::nodemap::NodeSet;
+use util::nodemap::{NodeSet, FnvHashSet};
-use std::collections::HashSet;
use syntax::abi::Abi;
use syntax::ast;
use syntax::attr;
// Step 2: Mark all symbols that the symbols on the worklist touch.
fn propagate(&mut self) {
- let mut scanned = HashSet::new();
+ let mut scanned = FnvHashSet();
loop {
let search_item = match self.worklist.pop() {
Some(item) => item,
// (This is the special case aluded to in the
// doc-comment for this method)
let stmt_span = blk.stmts[r.first_statement_index as usize].span;
- Some(Span { lo: stmt_span.hi, ..blk.span })
+ Some(Span { lo: stmt_span.hi, hi: blk.span.hi, expn_id: stmt_span.expn_id })
}
}
}
terminating(r.id);
}
- hir::ExprIf(_, ref then, Some(ref otherwise)) => {
+ hir::ExprIf(ref expr, ref then, Some(ref otherwise)) => {
+ terminating(expr.id);
terminating(then.id);
terminating(otherwise.id);
}
#[derive(PartialEq, Debug)]
enum ScopeChain<'a> {
- /// EarlyScope(['a, 'b, ...], s) extends s with early-bound
- /// lifetimes.
- EarlyScope(&'a [hir::LifetimeDef], Scope<'a>),
+ /// EarlyScope(['a, 'b, ...], start, s) extends s with early-bound
+ /// lifetimes, with consecutive parameter indices from `start`.
+ /// That is, 'a has index `start`, 'b has index `start + 1`, etc.
+ /// Indices before `start` correspond to other generic parameters
+ /// of a parent item (trait/impl of a method), or `Self` in traits.
+ EarlyScope(&'a [hir::LifetimeDef], u32, Scope<'a>),
/// LateScope(['a, 'b, ...], s) extends s with late-bound
/// lifetimes introduced by the declaration binder_id.
LateScope(&'a [hir::LifetimeDef], Scope<'a>),
hir::ItemImpl(_, _, ref generics, _, _, _) => {
// These kinds of items have only early bound lifetime parameters.
let lifetimes = &generics.lifetimes;
- this.with(EarlyScope(lifetimes, &ROOT_SCOPE), |old_scope, this| {
+ let start = if let hir::ItemTrait(..) = item.node {
+ 1 // Self comes before lifetimes
+ } else {
+ 0
+ };
+ this.with(EarlyScope(lifetimes, start, &ROOT_SCOPE), |old_scope, this| {
this.check_lifetime_defs(old_scope, lifetimes);
intravisit::walk_item(this, item);
});
FnScope { s, .. } => { scope = s; }
RootScope => { return; }
- EarlyScope(lifetimes, s) |
+ EarlyScope(lifetimes, _, s) |
LateScope(lifetimes, s) => {
for lifetime_def in lifetimes {
// FIXME (#24278): non-hygienic comparison
.cloned()
.partition(|l| self.map.late_bound.contains_key(&l.lifetime.id));
+ // Find the start of nested early scopes, e.g. in methods.
+ let mut start = 0;
+ if let EarlyScope(..) = *self.scope {
+ let parent = self.hir_map.expect_item(self.hir_map.get_parent(fn_id));
+ if let hir::ItemTrait(..) = parent.node {
+ start += 1; // Self comes first.
+ }
+ match parent.node {
+ hir::ItemTrait(_, ref generics, _, _) |
+ hir::ItemImpl(_, _, ref generics, _, _, _) => {
+ start += generics.lifetimes.len() + generics.ty_params.len();
+ }
+ _ => {}
+ }
+ }
+
let this = self;
- this.with(EarlyScope(&early, this.scope), move |old_scope, this| {
+ this.with(EarlyScope(&early, start as u32, this.scope), move |old_scope, this| {
this.with(LateScope(&late, this.scope), move |_, this| {
this.check_lifetime_defs(old_scope, &generics.lifetimes);
walk(this);
break;
}
- EarlyScope(lifetimes, s) => {
+ EarlyScope(lifetimes, start, s) => {
match search_lifetimes(lifetimes, lifetime_ref) {
- Some((mut index, lifetime_def)) => {
- // Adjust for nested early scopes, e.g. in methods.
- let mut parent = s;
- while let EarlyScope(lifetimes, s) = *parent {
- index += lifetimes.len() as u32;
- parent = s;
- }
- assert_eq!(*parent, RootScope);
-
+ Some((index, lifetime_def)) => {
let decl_id = lifetime_def.id;
- let def = DefEarlyBoundRegion(index, decl_id);
+ let def = DefEarlyBoundRegion(start + index, decl_id);
self.insert_lifetime(lifetime_ref, def);
return;
}
break;
}
- EarlyScope(lifetimes, s) |
+ EarlyScope(lifetimes, _, s) |
LateScope(lifetimes, s) => {
search_result = search_lifetimes(lifetimes, lifetime_ref);
if search_result.is_some() {
return;
}
- EarlyScope(lifetimes, s) |
+ EarlyScope(lifetimes, _, s) |
LateScope(lifetimes, s) => {
if let Some((_, lifetime_def)) = search_lifetimes(lifetimes, lifetime) {
signal_shadowing_problem(
Repeat(Operand<'tcx>, TypedConstVal<'tcx>),
/// &x or &mut x
- Ref(Region, BorrowKind, Lvalue<'tcx>),
+ Ref(&'tcx Region, BorrowKind, Lvalue<'tcx>),
/// length of a [X] or [X;n] value
Len(Lvalue<'tcx>),
type Item = BasicBlock;
type Iter = IntoIter<BasicBlock>;
}
+
+#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, Ord, PartialOrd)]
+pub struct Location {
+ /// the location is within this block
+ pub block: BasicBlock,
+
+ /// the location is the start of the this statement; or, if `statement_index`
+ /// == num-statements, then the start of the terminator.
+ pub statement_index: usize,
+}
+
}
&Rvalue::Ref(reg, bk, ref lv) => {
let lv_ty = lv.ty(mir, tcx).to_ty(tcx);
- Some(tcx.mk_ref(
- tcx.mk_region(reg),
+ Some(tcx.mk_ref(reg,
ty::TypeAndMut {
ty: lv_ty,
mutbl: bk.to_mutbl_lossy()
fn visit_statement(&mut self,
block: BasicBlock,
- statement: & $($mutability)* Statement<'tcx>) {
- self.super_statement(block, statement);
+ statement: & $($mutability)* Statement<'tcx>,
+ location: Location) {
+ self.super_statement(block, statement, location);
}
fn visit_assign(&mut self,
block: BasicBlock,
lvalue: & $($mutability)* Lvalue<'tcx>,
- rvalue: & $($mutability)* Rvalue<'tcx>) {
- self.super_assign(block, lvalue, rvalue);
+ rvalue: & $($mutability)* Rvalue<'tcx>,
+ location: Location) {
+ self.super_assign(block, lvalue, rvalue, location);
}
fn visit_terminator(&mut self,
block: BasicBlock,
- terminator: & $($mutability)* Terminator<'tcx>) {
- self.super_terminator(block, terminator);
+ terminator: & $($mutability)* Terminator<'tcx>,
+ location: Location) {
+ self.super_terminator(block, terminator, location);
}
fn visit_terminator_kind(&mut self,
block: BasicBlock,
- kind: & $($mutability)* TerminatorKind<'tcx>) {
- self.super_terminator_kind(block, kind);
+ kind: & $($mutability)* TerminatorKind<'tcx>,
+ location: Location) {
+ self.super_terminator_kind(block, kind, location);
}
fn visit_assert_message(&mut self,
- msg: & $($mutability)* AssertMessage<'tcx>) {
- self.super_assert_message(msg);
+ msg: & $($mutability)* AssertMessage<'tcx>,
+ location: Location) {
+ self.super_assert_message(msg, location);
}
fn visit_rvalue(&mut self,
- rvalue: & $($mutability)* Rvalue<'tcx>) {
- self.super_rvalue(rvalue);
+ rvalue: & $($mutability)* Rvalue<'tcx>,
+ location: Location) {
+ self.super_rvalue(rvalue, location);
}
fn visit_operand(&mut self,
- operand: & $($mutability)* Operand<'tcx>) {
- self.super_operand(operand);
+ operand: & $($mutability)* Operand<'tcx>,
+ location: Location) {
+ self.super_operand(operand, location);
}
fn visit_lvalue(&mut self,
lvalue: & $($mutability)* Lvalue<'tcx>,
- context: LvalueContext) {
- self.super_lvalue(lvalue, context);
+ context: LvalueContext,
+ location: Location) {
+ self.super_lvalue(lvalue, context, location);
}
fn visit_projection(&mut self,
lvalue: & $($mutability)* LvalueProjection<'tcx>,
- context: LvalueContext) {
- self.super_projection(lvalue, context);
+ context: LvalueContext,
+ location: Location) {
+ self.super_projection(lvalue, context, location);
}
fn visit_projection_elem(&mut self,
lvalue: & $($mutability)* LvalueElem<'tcx>,
- context: LvalueContext) {
- self.super_projection_elem(lvalue, context);
+ context: LvalueContext,
+ location: Location) {
+ self.super_projection_elem(lvalue, context, location);
}
fn visit_branch(&mut self,
}
fn visit_constant(&mut self,
- constant: & $($mutability)* Constant<'tcx>) {
- self.super_constant(constant);
+ constant: & $($mutability)* Constant<'tcx>,
+ location: Location) {
+ self.super_constant(constant, location);
}
fn visit_literal(&mut self,
- literal: & $($mutability)* Literal<'tcx>) {
- self.super_literal(literal);
+ literal: & $($mutability)* Literal<'tcx>,
+ location: Location) {
+ self.super_literal(literal, location);
}
fn visit_def_id(&mut self,
- def_id: & $($mutability)* DefId) {
+ def_id: & $($mutability)* DefId,
+ _: Location) {
self.super_def_id(def_id);
}
}
fn visit_const_val(&mut self,
- const_val: & $($mutability)* ConstVal) {
+ const_val: & $($mutability)* ConstVal,
+ _: Location) {
self.super_const_val(const_val);
}
fn visit_const_usize(&mut self,
- const_usize: & $($mutability)* ConstUsize) {
+ const_usize: & $($mutability)* ConstUsize,
+ _: Location) {
self.super_const_usize(const_usize);
}
fn visit_typed_const_val(&mut self,
- val: & $($mutability)* TypedConstVal<'tcx>) {
- self.super_typed_const_val(val);
+ val: & $($mutability)* TypedConstVal<'tcx>,
+ location: Location) {
+ self.super_typed_const_val(val, location);
}
fn visit_var_decl(&mut self,
is_cleanup: _
} = *data;
+ let mut index = 0;
for statement in statements {
- self.visit_statement(block, statement);
+ let location = Location { block: block, statement_index: index };
+ self.visit_statement(block, statement, location);
+ index += 1;
}
if let Some(ref $($mutability)* terminator) = *terminator {
- self.visit_terminator(block, terminator);
+ let location = Location { block: block, statement_index: index };
+ self.visit_terminator(block, terminator, location);
}
}
fn super_statement(&mut self,
block: BasicBlock,
- statement: & $($mutability)* Statement<'tcx>) {
+ statement: & $($mutability)* Statement<'tcx>,
+ location: Location) {
let Statement {
ref $($mutability)* source_info,
ref $($mutability)* kind,
match *kind {
StatementKind::Assign(ref $($mutability)* lvalue,
ref $($mutability)* rvalue) => {
- self.visit_assign(block, lvalue, rvalue);
+ self.visit_assign(block, lvalue, rvalue, location);
}
StatementKind::SetDiscriminant{ ref $($mutability)* lvalue, .. } => {
- self.visit_lvalue(lvalue, LvalueContext::Store);
+ self.visit_lvalue(lvalue, LvalueContext::Store, location);
}
StatementKind::StorageLive(ref $($mutability)* lvalue) => {
- self.visit_lvalue(lvalue, LvalueContext::StorageLive);
+ self.visit_lvalue(lvalue, LvalueContext::StorageLive, location);
}
StatementKind::StorageDead(ref $($mutability)* lvalue) => {
- self.visit_lvalue(lvalue, LvalueContext::StorageDead);
+ self.visit_lvalue(lvalue, LvalueContext::StorageDead, location);
}
}
}
fn super_assign(&mut self,
_block: BasicBlock,
lvalue: &$($mutability)* Lvalue<'tcx>,
- rvalue: &$($mutability)* Rvalue<'tcx>) {
- self.visit_lvalue(lvalue, LvalueContext::Store);
- self.visit_rvalue(rvalue);
+ rvalue: &$($mutability)* Rvalue<'tcx>,
+ location: Location) {
+ self.visit_lvalue(lvalue, LvalueContext::Store, location);
+ self.visit_rvalue(rvalue, location);
}
fn super_terminator(&mut self,
block: BasicBlock,
- terminator: &$($mutability)* Terminator<'tcx>) {
+ terminator: &$($mutability)* Terminator<'tcx>,
+ location: Location) {
let Terminator {
ref $($mutability)* source_info,
ref $($mutability)* kind,
} = *terminator;
self.visit_source_info(source_info);
- self.visit_terminator_kind(block, kind);
+ self.visit_terminator_kind(block, kind, location);
}
fn super_terminator_kind(&mut self,
block: BasicBlock,
- kind: & $($mutability)* TerminatorKind<'tcx>) {
+ kind: & $($mutability)* TerminatorKind<'tcx>,
+ source_location: Location) {
match *kind {
TerminatorKind::Goto { target } => {
self.visit_branch(block, target);
TerminatorKind::If { ref $($mutability)* cond,
ref $($mutability)* targets } => {
- self.visit_operand(cond);
+ self.visit_operand(cond, source_location);
for &target in targets.as_slice() {
self.visit_branch(block, target);
}
TerminatorKind::Switch { ref $($mutability)* discr,
adt_def: _,
ref targets } => {
- self.visit_lvalue(discr, LvalueContext::Inspect);
+ self.visit_lvalue(discr, LvalueContext::Inspect, source_location);
for &target in targets {
self.visit_branch(block, target);
}
ref $($mutability)* switch_ty,
ref $($mutability)* values,
ref targets } => {
- self.visit_lvalue(discr, LvalueContext::Inspect);
+ self.visit_lvalue(discr, LvalueContext::Inspect, source_location);
self.visit_ty(switch_ty);
for value in values {
- self.visit_const_val(value);
+ self.visit_const_val(value, source_location);
}
for &target in targets {
self.visit_branch(block, target);
TerminatorKind::Drop { ref $($mutability)* location,
target,
unwind } => {
- self.visit_lvalue(location, LvalueContext::Drop);
+ self.visit_lvalue(location, LvalueContext::Drop, source_location);
self.visit_branch(block, target);
unwind.map(|t| self.visit_branch(block, t));
}
ref $($mutability)* value,
target,
unwind } => {
- self.visit_lvalue(location, LvalueContext::Drop);
- self.visit_operand(value);
+ self.visit_lvalue(location, LvalueContext::Drop, source_location);
+ self.visit_operand(value, source_location);
self.visit_branch(block, target);
unwind.map(|t| self.visit_branch(block, t));
}
ref $($mutability)* args,
ref $($mutability)* destination,
cleanup } => {
- self.visit_operand(func);
+ self.visit_operand(func, source_location);
for arg in args {
- self.visit_operand(arg);
+ self.visit_operand(arg, source_location);
}
if let Some((ref $($mutability)* destination, target)) = *destination {
- self.visit_lvalue(destination, LvalueContext::Call);
+ self.visit_lvalue(destination, LvalueContext::Call, source_location);
self.visit_branch(block, target);
}
cleanup.map(|t| self.visit_branch(block, t));
ref $($mutability)* msg,
target,
cleanup } => {
- self.visit_operand(cond);
- self.visit_assert_message(msg);
+ self.visit_operand(cond, source_location);
+ self.visit_assert_message(msg, source_location);
self.visit_branch(block, target);
cleanup.map(|t| self.visit_branch(block, t));
}
}
fn super_assert_message(&mut self,
- msg: & $($mutability)* AssertMessage<'tcx>) {
+ msg: & $($mutability)* AssertMessage<'tcx>,
+ location: Location) {
match *msg {
AssertMessage::BoundsCheck {
ref $($mutability)* len,
ref $($mutability)* index
} => {
- self.visit_operand(len);
- self.visit_operand(index);
+ self.visit_operand(len, location);
+ self.visit_operand(index, location);
}
AssertMessage::Math(_) => {}
}
}
fn super_rvalue(&mut self,
- rvalue: & $($mutability)* Rvalue<'tcx>) {
+ rvalue: & $($mutability)* Rvalue<'tcx>,
+ location: Location) {
match *rvalue {
Rvalue::Use(ref $($mutability)* operand) => {
- self.visit_operand(operand);
+ self.visit_operand(operand, location);
}
Rvalue::Repeat(ref $($mutability)* value,
ref $($mutability)* typed_const_val) => {
- self.visit_operand(value);
- self.visit_typed_const_val(typed_const_val);
+ self.visit_operand(value, location);
+ self.visit_typed_const_val(typed_const_val, location);
}
Rvalue::Ref(r, bk, ref $($mutability)* path) => {
self.visit_lvalue(path, LvalueContext::Borrow {
region: r,
kind: bk
- });
+ }, location);
}
Rvalue::Len(ref $($mutability)* path) => {
- self.visit_lvalue(path, LvalueContext::Inspect);
+ self.visit_lvalue(path, LvalueContext::Inspect, location);
}
Rvalue::Cast(_cast_kind,
ref $($mutability)* operand,
ref $($mutability)* ty) => {
- self.visit_operand(operand);
+ self.visit_operand(operand, location);
self.visit_ty(ty);
}
Rvalue::CheckedBinaryOp(_bin_op,
ref $($mutability)* lhs,
ref $($mutability)* rhs) => {
- self.visit_operand(lhs);
- self.visit_operand(rhs);
+ self.visit_operand(lhs, location);
+ self.visit_operand(rhs, location);
}
Rvalue::UnaryOp(_un_op, ref $($mutability)* op) => {
- self.visit_operand(op);
+ self.visit_operand(op, location);
}
Rvalue::Box(ref $($mutability)* ty) => {
}
AggregateKind::Closure(ref $($mutability)* def_id,
ref $($mutability)* closure_substs) => {
- self.visit_def_id(def_id);
+ self.visit_def_id(def_id, location);
self.visit_closure_substs(closure_substs);
}
}
for operand in operands {
- self.visit_operand(operand);
+ self.visit_operand(operand, location);
}
}
ref $($mutability)* inputs,
asm: _ } => {
for output in & $($mutability)* outputs[..] {
- self.visit_lvalue(output, LvalueContext::Store);
+ self.visit_lvalue(output, LvalueContext::Store, location);
}
for input in & $($mutability)* inputs[..] {
- self.visit_operand(input);
+ self.visit_operand(input, location);
}
}
}
}
fn super_operand(&mut self,
- operand: & $($mutability)* Operand<'tcx>) {
+ operand: & $($mutability)* Operand<'tcx>,
+ location: Location) {
match *operand {
Operand::Consume(ref $($mutability)* lvalue) => {
- self.visit_lvalue(lvalue, LvalueContext::Consume);
+ self.visit_lvalue(lvalue, LvalueContext::Consume, location);
}
Operand::Constant(ref $($mutability)* constant) => {
- self.visit_constant(constant);
+ self.visit_constant(constant, location);
}
}
}
fn super_lvalue(&mut self,
lvalue: & $($mutability)* Lvalue<'tcx>,
- context: LvalueContext) {
+ context: LvalueContext,
+ location: Location) {
match *lvalue {
Lvalue::Var(_) |
Lvalue::Temp(_) |
Lvalue::ReturnPointer => {
}
Lvalue::Static(ref $($mutability)* def_id) => {
- self.visit_def_id(def_id);
+ self.visit_def_id(def_id, location);
}
Lvalue::Projection(ref $($mutability)* proj) => {
- self.visit_projection(proj, context);
+ self.visit_projection(proj, context, location);
}
}
}
fn super_projection(&mut self,
proj: & $($mutability)* LvalueProjection<'tcx>,
- context: LvalueContext) {
+ context: LvalueContext,
+ location: Location) {
let Projection {
ref $($mutability)* base,
ref $($mutability)* elem,
} = *proj;
- self.visit_lvalue(base, LvalueContext::Projection);
- self.visit_projection_elem(elem, context);
+ self.visit_lvalue(base, LvalueContext::Projection, location);
+ self.visit_projection_elem(elem, context, location);
}
fn super_projection_elem(&mut self,
proj: & $($mutability)* LvalueElem<'tcx>,
- _context: LvalueContext) {
+ _context: LvalueContext,
+ location: Location) {
match *proj {
ProjectionElem::Deref => {
}
self.visit_ty(ty);
}
ProjectionElem::Index(ref $($mutability)* operand) => {
- self.visit_operand(operand);
+ self.visit_operand(operand, location);
}
ProjectionElem::ConstantIndex { offset: _,
min_length: _,
}
fn super_constant(&mut self,
- constant: & $($mutability)* Constant<'tcx>) {
+ constant: & $($mutability)* Constant<'tcx>,
+ location: Location) {
let Constant {
ref $($mutability)* span,
ref $($mutability)* ty,
self.visit_span(span);
self.visit_ty(ty);
- self.visit_literal(literal);
+ self.visit_literal(literal, location);
}
fn super_typed_const_val(&mut self,
- constant: & $($mutability)* TypedConstVal<'tcx>) {
+ constant: & $($mutability)* TypedConstVal<'tcx>,
+ location: Location) {
let TypedConstVal {
ref $($mutability)* span,
ref $($mutability)* ty,
self.visit_span(span);
self.visit_ty(ty);
- self.visit_const_usize(value);
+ self.visit_const_usize(value, location);
}
fn super_literal(&mut self,
- literal: & $($mutability)* Literal<'tcx>) {
+ literal: & $($mutability)* Literal<'tcx>,
+ location: Location) {
match *literal {
Literal::Item { ref $($mutability)* def_id,
ref $($mutability)* substs } => {
- self.visit_def_id(def_id);
+ self.visit_def_id(def_id, location);
self.visit_substs(substs);
}
Literal::Value { ref $($mutability)* value } => {
- self.visit_const_val(value);
+ self.visit_const_val(value, location);
}
Literal::Promoted { index: _ } => {}
}
make_mir_visitor!(MutVisitor,mut);
#[derive(Copy, Clone, Debug)]
-pub enum LvalueContext {
+pub enum LvalueContext<'tcx> {
// Appears as LHS of an assignment
Store,
Inspect,
// Being borrowed
- Borrow { region: Region, kind: BorrowKind },
+ Borrow { region: &'tcx Region, kind: BorrowKind },
// Being sliced -- this should be same as being borrowed, probably
Slice { from_start: usize, from_end: usize },
pub const parse_bool: Option<&'static str> = None;
pub const parse_opt_bool: Option<&'static str> =
Some("one of: `y`, `yes`, `on`, `n`, `no`, or `off`");
- pub const parse_all_bool: Option<&'static str> =
- Some("one of: `y`, `yes`, `on`, `n`, `no`, or `off`");
pub const parse_string: Option<&'static str> = Some("a string");
pub const parse_opt_string: Option<&'static str> = Some("a string");
pub const parse_list: Option<&'static str> = Some("a space-separated list of strings");
}
}
- fn parse_all_bool(slot: &mut bool, v: Option<&str>) -> bool {
- match v {
- Some(s) => {
- match s {
- "n" | "no" | "off" => {
- *slot = false;
- }
- "y" | "yes" | "on" => {
- *slot = true;
- }
- _ => { return false; }
- }
-
- true
- },
- None => { *slot = true; true }
- }
- }
-
fn parse_opt_string(slot: &mut Option<String>, v: Option<&str>) -> bool {
match v {
Some(s) => { *slot = Some(s.to_string()); true },
"adds unstable command line options to rustc interface"),
force_overflow_checks: Option<bool> = (None, parse_opt_bool, [TRACKED],
"force overflow checks on or off"),
- force_dropflag_checks: Option<bool> = (None, parse_opt_bool, [TRACKED],
- "force drop flag checks on or off"),
trace_macros: bool = (false, parse_bool, [UNTRACKED],
"for every macro invocation, print its name and arguments"),
+ debug_macros: bool = (false, parse_bool, [TRACKED],
+ "emit line numbers debug info inside macros"),
enable_nonzeroing_move_hints: bool = (false, parse_bool, [TRACKED],
"force nonzeroing move optimization on"),
keep_hygiene_data: bool = (false, parse_bool, [UNTRACKED],
"dump MIR state at various points in translation"),
dump_mir_dir: Option<String> = (None, parse_opt_string, [UNTRACKED],
"the directory the MIR is dumped into"),
- orbit: bool = (true, parse_all_bool, [UNTRACKED],
- "get MIR where it belongs - everywhere; most importantly, in orbit"),
}
pub fn default_lib_output() -> CrateType {
})
});
- let mut debugging_opts = build_debugging_options(matches, error_format);
-
- // Incremental compilation only works reliably when translation is done via
- // MIR, so let's enable -Z orbit if necessary (see #34973).
- if debugging_opts.incremental.is_some() && !debugging_opts.orbit {
- early_warn(error_format, "Automatically enabling `-Z orbit` because \
- `-Z incremental` was specified");
- debugging_opts.orbit = true;
- }
+ let debugging_opts = build_debugging_options(matches, error_format);
let mir_opt_level = debugging_opts.mir_opt_level.unwrap_or(1);
assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash());
opts.debugging_opts.dump_mir_dir = Some(String::from("abc"));
assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash());
- opts.debugging_opts.orbit = false;
- assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash());
// Make sure changing a [TRACKED] option changes the hash
opts = reference.clone();
opts.debugging_opts.force_overflow_checks = Some(true);
assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash());
- opts = reference.clone();
- opts.debugging_opts.force_dropflag_checks = Some(true);
- assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash());
-
opts = reference.clone();
opts.debugging_opts.enable_nonzeroing_move_hints = true;
assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash());
if let Ok(..) = self.can_equate(&trait_self_ty, &impl_self_ty) {
self_match_impls.push(def_id);
- if trait_ref.substs.types[1..].iter()
- .zip(&impl_trait_ref.substs.types[1..])
+ if trait_ref.substs.types().skip(1)
+ .zip(impl_trait_ref.substs.types().skip(1))
.all(|(u,v)| self.fuzzy_match_tys(u, v))
{
fuzzy_match_impls.push(def_id);
return;
}
- let mut err = struct_span_err!(
- self.tcx.sess, span, E0277,
+ let mut err = struct_span_err!(self.tcx.sess, span, E0277,
"the trait bound `{}` is not satisfied",
trait_ref.to_predicate());
+ err.span_label(span, &format!("trait `{}` not satisfied",
+ trait_ref.to_predicate()));
// Try to report a help message
ty::Predicate::Trait(ref data) => {
let trait_ref = data.to_poly_trait_ref();
let self_ty = trait_ref.self_ty();
- let all_types = &trait_ref.substs().types;
- if all_types.references_error() {
+ if predicate.references_error() {
} else {
// Typically, this ambiguity should only happen if
// there are unresolved type inference variables
#[derive(Clone)]
pub struct RegionObligation<'tcx> {
- pub sub_region: ty::Region,
+ pub sub_region: &'tcx ty::Region,
pub sup_type: Ty<'tcx>,
pub cause: ObligationCause<'tcx>,
}
// Auto trait obligations on `impl Trait`.
if tcx.trait_has_default_impl(predicate.def_id()) {
let substs = predicate.skip_binder().trait_ref.substs;
- if substs.types.len() == 1 && substs.regions.is_empty() {
+ if substs.types().count() == 1 && substs.regions().next().is_none() {
if let ty::TyAnon(..) = predicate.skip_binder().self_ty().sty {
return true;
}
let concrete_ty = ty_scheme.ty.subst(tcx, substs);
let predicate = ty::TraitRef {
def_id: self.predicate.def_id(),
- substs: Substs::new_trait(tcx, vec![], vec![], concrete_ty)
+ substs: Substs::new_trait(tcx, concrete_ty, &[])
}.to_predicate();
let original_obligation = Obligation::new(self.cause.clone(),
pub fn register_region_obligation(&mut self,
t_a: Ty<'tcx>,
- r_b: ty::Region,
+ r_b: &'tcx ty::Region,
cause: ObligationCause<'tcx>)
{
register_region_obligation(t_a, r_b, cause, &mut self.region_obligations);
{
t.skip_binder() // ok b/c this check doesn't care about regions
.input_types()
- .iter()
- .map(|t| selcx.infcx().resolve_type_vars_if_possible(t))
+ .map(|t| selcx.infcx().resolve_type_vars_if_possible(&t))
.filter(|t| t.has_infer_types())
.flat_map(|t| t.walk())
.filter(|t| match t.sty { ty::TyInfer(_) => true, _ => false })
// Otherwise, we have something of the form
// `for<'a> T: 'a where 'a not in T`, which we can treat as `T: 'static`.
Some(t_a) => {
- register_region_obligation(t_a, ty::ReStatic,
+ let r_static = selcx.tcx().mk_region(ty::ReStatic);
+ register_region_obligation(t_a, r_static,
obligation.cause.clone(),
region_obligations);
Ok(Some(vec![]))
}
fn register_region_obligation<'tcx>(t_a: Ty<'tcx>,
- r_b: ty::Region,
+ r_b: &'tcx ty::Region,
cause: ObligationCause<'tcx>,
region_obligations: &mut NodeMap<Vec<RegionObligation<'tcx>>>)
{
match predicate {
ty::Predicate::Trait(ref data) => {
// In the case of a trait predicate, we can skip the "self" type.
- data.0.trait_ref.input_types()[1..].iter().any(|t| t.has_self_ty())
+ data.skip_binder().input_types().skip(1).any(|t| t.has_self_ty())
}
ty::Predicate::Projection(..) |
ty::Predicate::WellFormed(..) |
use hir::def_id::DefId;
use infer;
use infer::{InferCtxt, InferOk, TypeFreshener, TypeOrigin};
-use ty::subst::{Subst, Substs};
+use ty::subst::{Kind, Subst, Substs};
use ty::{self, ToPredicate, ToPolyTraitRef, Ty, TyCtxt, TypeFoldable};
use traits;
use ty::fast_reject;
// This suffices to allow chains like `FnMut` implemented in
// terms of `Fn` etc, but we could probably make this more
// precise still.
- let input_types = stack.fresh_trait_ref.0.input_types();
- let unbound_input_types = input_types.iter().any(|ty| ty.is_fresh());
+ let unbound_input_types = stack.fresh_trait_ref.input_types().any(|ty| ty.is_fresh());
if unbound_input_types && self.intercrate {
debug!("evaluate_stack({:?}) --> unbound argument, intercrate --> ambiguous",
stack.fresh_trait_ref);
match *candidate {
Ok(Some(_)) | Err(_) => true,
- Ok(None) => {
- cache_fresh_trait_pred.0.trait_ref.substs.types.has_infer_types()
- }
+ Ok(None) => cache_fresh_trait_pred.has_infer_types()
}
}
obligation: &TraitObligation<'tcx>,
trait_bound: ty::PolyTraitRef<'tcx>,
skol_trait_ref: ty::TraitRef<'tcx>,
- skol_map: &infer::SkolemizationMap,
+ skol_map: &infer::SkolemizationMap<'tcx>,
snapshot: &infer::CombinedSnapshot)
-> bool
{
return;
}
};
- let target = obligation.predicate.skip_binder().input_types()[1];
+ let target = obligation.predicate.skip_binder().trait_ref.substs.type_at(1);
debug!("assemble_candidates_for_unsizing(source={:?}, target={:?})",
source, target);
// for `PhantomData<T>`, we pass `T`
ty::TyStruct(def, substs) if def.is_phantom_data() => {
- substs.types.to_vec()
+ substs.types().collect()
}
ty::TyStruct(def, substs) | ty::TyEnum(def, substs) => {
trait_def_id,
recursion_depth,
normalized_ty,
- vec![]);
+ &[]);
obligations.push(skol_obligation);
this.infcx().plug_leaks(skol_map, snapshot, &obligations)
})
match self_ty.sty {
ty::TyTrait(ref data) => {
// OK to skip the binder, it is reintroduced below
- let input_types = data.principal.skip_binder().input_types();
+ let input_types = data.principal.input_types();
let assoc_types = data.projection_bounds.iter()
.map(|pb| pb.skip_binder().ty);
- let all_types: Vec<_> = input_types.iter().cloned()
- .chain(assoc_types)
- .collect();
+ let all_types: Vec<_> = input_types.chain(assoc_types)
+ .collect();
// reintroduce the two binding levels we skipped, then flatten into one
let all_types = ty::Binder(ty::Binder(all_types));
mut substs: Normalized<'tcx, &'tcx Substs<'tcx>>,
cause: ObligationCause<'tcx>,
recursion_depth: usize,
- skol_map: infer::SkolemizationMap,
+ skol_map: infer::SkolemizationMap<'tcx>,
snapshot: &infer::CombinedSnapshot)
-> VtableImplData<'tcx, PredicateObligation<'tcx>>
{
// regions here. See the comment there for more details.
let source = self.infcx.shallow_resolve(
tcx.no_late_bound_regions(&obligation.self_ty()).unwrap());
- let target = obligation.predicate.skip_binder().input_types()[1];
+ let target = obligation.predicate.skip_binder().trait_ref.substs.type_at(1);
let target = self.infcx.shallow_resolve(target);
debug!("confirm_builtin_unsize_candidate(source={:?}, target={:?})",
} else {
return Err(Unimplemented);
};
- let mut ty_params = BitVector::new(substs_a.types.len());
+ let mut ty_params = BitVector::new(substs_a.types().count());
let mut found = false;
for ty in field.walk() {
if let ty::TyParam(p) = ty.sty {
// TyError and ensure they do not affect any other fields.
// This could be checked after type collection for any struct
// with a potentially unsized trailing field.
- let types = substs_a.types.iter().enumerate().map(|(i, ty)| {
+ let params = substs_a.params().iter().enumerate().map(|(i, &k)| {
if ty_params.contains(i) {
- tcx.types.err
+ Kind::from(tcx.types.err)
} else {
- ty
+ k
}
- }).collect();
- let substs = Substs::new(tcx, types, substs_a.regions.clone());
+ });
+ let substs = Substs::new(tcx, params);
for &ty in fields.split_last().unwrap().1 {
if ty.subst(tcx, substs).references_error() {
return Err(Unimplemented);
// Check that the source structure with the target's
// type parameters is a subtype of the target.
- let types = substs_a.types.iter().enumerate().map(|(i, ty)| {
+ let params = substs_a.params().iter().enumerate().map(|(i, &k)| {
if ty_params.contains(i) {
- substs_b.types[i]
+ Kind::from(substs_b.type_at(i))
} else {
- ty
+ k
}
- }).collect();
- let substs = Substs::new(tcx, types, substs_a.regions.clone());
- let new_struct = tcx.mk_struct(def, substs);
+ });
+ let new_struct = tcx.mk_struct(def, Substs::new(tcx, params));
let origin = TypeOrigin::Misc(obligation.cause.span);
let InferOk { obligations, .. } =
self.infcx.sub_types(false, origin, new_struct, target)
obligation.predicate.def_id(),
obligation.recursion_depth + 1,
inner_source,
- vec![inner_target]));
+ &[inner_target]));
}
_ => bug!()
impl_def_id: DefId,
obligation: &TraitObligation<'tcx>,
snapshot: &infer::CombinedSnapshot)
- -> (Normalized<'tcx, &'tcx Substs<'tcx>>, infer::SkolemizationMap)
+ -> (Normalized<'tcx, &'tcx Substs<'tcx>>,
+ infer::SkolemizationMap<'tcx>)
{
match self.match_impl(impl_def_id, obligation, snapshot) {
Ok((substs, skol_map)) => (substs, skol_map),
obligation: &TraitObligation<'tcx>,
snapshot: &infer::CombinedSnapshot)
-> Result<(Normalized<'tcx, &'tcx Substs<'tcx>>,
- infer::SkolemizationMap), ()>
+ infer::SkolemizationMap<'tcx>), ()>
{
let impl_trait_ref = self.tcx().impl_trait_ref(impl_def_id).unwrap();
// substitution if we find that any of the input types, when
// simplified, do not match.
- obligation.predicate.0.input_types().iter()
+ obligation.predicate.skip_binder().input_types()
.zip(impl_trait_ref.input_types())
- .any(|(&obligation_ty, &impl_ty)| {
+ .any(|(obligation_ty, impl_ty)| {
let simplified_obligation_ty =
fast_reject::simplify_type(self.tcx(), obligation_ty, true);
let simplified_impl_ty =
recursion_depth: usize,
def_id: DefId, // of impl or trait
substs: &Substs<'tcx>, // for impl or trait
- skol_map: infer::SkolemizationMap,
+ skol_map: infer::SkolemizationMap<'tcx>,
snapshot: &infer::CombinedSnapshot)
-> Vec<PredicateObligation<'tcx>>
{
Ok(def_id) => {
Ok(ty::TraitRef {
def_id: def_id,
- substs: Substs::new_trait(self, vec![], vec![], param_ty)
+ substs: Substs::new_trait(self, param_ty, &[])
})
}
Err(e) => {
trait_def_id: DefId,
recursion_depth: usize,
param_ty: Ty<'tcx>,
- ty_params: Vec<Ty<'tcx>>)
+ ty_params: &[Ty<'tcx>])
-> PredicateObligation<'tcx>
{
let trait_ref = ty::TraitRef {
def_id: trait_def_id,
- substs: Substs::new_trait(self, ty_params, vec![], param_ty)
+ substs: Substs::new_trait(self, param_ty, ty_params)
};
predicate_for_trait_ref(cause, trait_ref, recursion_depth)
}
};
let trait_ref = ty::TraitRef {
def_id: fn_trait_def_id,
- substs: Substs::new_trait(self, vec![arguments_tuple], vec![], self_ty),
+ substs: Substs::new_trait(self, self_ty, &[arguments_tuple]),
};
ty::Binder((trait_ref, sig.0.output))
}
self.relate(a, b)
}
- fn regions(&mut self, a: ty::Region, b: ty::Region) -> RelateResult<'tcx, ty::Region> {
+ fn regions(&mut self, a: &'tcx ty::Region, b: &'tcx ty::Region)
+ -> RelateResult<'tcx, &'tcx ty::Region> {
debug!("{}.regions({:?}, {:?})",
self.tag(),
a,
pub method_map: ty::MethodMap<'tcx>,
/// Borrows
- pub upvar_capture_map: ty::UpvarCaptureMap,
+ pub upvar_capture_map: ty::UpvarCaptureMap<'tcx>,
/// Records the type of each closure. The def ID is the ID of the
/// expression defining the closure.
impl_interners!('tcx,
type_list: mk_type_list(Vec<Ty<'tcx>>, keep_local) -> [Ty<'tcx>],
substs: mk_substs(Substs<'tcx>, |substs: &Substs| {
- keep_local(&substs.types) || keep_local(&substs.regions)
+ substs.params().iter().any(keep_local)
}) -> Substs<'tcx>,
bare_fn: mk_bare_fn(BareFnTy<'tcx>, |fty: &BareFnTy| {
keep_local(&fty.sig)
}) -> BareFnTy<'tcx>,
- region: mk_region(Region, keep_local) -> Region
+ region: mk_region(Region, |r| {
+ match r {
+ &ty::ReVar(_) | &ty::ReSkolemized(..) => true,
+ _ => false
+ }
+ }) -> Region
);
impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
FixedArraySize(ExpectedFound<usize>),
TyParamSize(ExpectedFound<usize>),
ArgCount,
- RegionsDoesNotOutlive(Region, Region),
- RegionsNotSame(Region, Region),
- RegionsNoOverlap(Region, Region),
- RegionsInsufficientlyPolymorphic(BoundRegion, Region),
- RegionsOverlyPolymorphic(BoundRegion, Region),
+ RegionsDoesNotOutlive(&'tcx Region, &'tcx Region),
+ RegionsNotSame(&'tcx Region, &'tcx Region),
+ RegionsNoOverlap(&'tcx Region, &'tcx Region),
+ RegionsInsufficientlyPolymorphic(BoundRegion, &'tcx Region),
+ RegionsOverlyPolymorphic(BoundRegion, &'tcx Region),
Sorts(ExpectedFound<Ty<'tcx>>),
IntegerAsChar,
IntMismatch(ExpectedFound<ty::IntVarValue>),
self.note_and_explain_region(db, "concrete lifetime that was found is ",
conc_region, "");
}
- RegionsOverlyPolymorphic(_, ty::ReVar(_)) => {
+ RegionsOverlyPolymorphic(_, &ty::ReVar(_)) => {
// don't bother to print out the message below for
// inference variables, it's not very illuminating.
}
}
&ty::TyRef(r, ref m) => {
- self.add_region(*r);
+ self.add_region(r);
self.add_ty(m.ty);
}
self.add_bound_computation(&computation);
}
- fn add_region(&mut self, r: ty::Region) {
- match r {
+ fn add_region(&mut self, r: &ty::Region) {
+ match *r {
ty::ReVar(..) => {
self.add_flags(TypeFlags::HAS_RE_INFER);
self.add_flags(TypeFlags::KEEP_IN_LOCAL_TCX);
}
fn add_substs(&mut self, substs: &Substs) {
- self.add_tys(&substs.types);
- for &r in &substs.regions {
+ for ty in substs.types() {
+ self.add_ty(ty);
+ }
+
+ for r in substs.regions() {
self.add_region(r);
}
}
fty.super_fold_with(self)
}
- fn fold_region(&mut self, r: ty::Region) -> ty::Region {
+ fn fold_region(&mut self, r: &'tcx ty::Region) -> &'tcx ty::Region {
r.super_fold_with(self)
}
t.super_visit_with(self)
}
- fn visit_region(&mut self, r: ty::Region) -> bool {
+ fn visit_region(&mut self, r: &'tcx ty::Region) -> bool {
r.super_visit_with(self)
}
}
/// whether any late-bound regions were skipped
pub fn collect_regions<T>(self,
value: &T,
- region_set: &mut FnvHashSet<ty::Region>)
+ region_set: &mut FnvHashSet<&'tcx ty::Region>)
-> bool
where T : TypeFoldable<'tcx>
{
let mut have_bound_regions = false;
- self.fold_regions(value, &mut have_bound_regions,
- |r, d| { region_set.insert(r.from_depth(d)); r });
+ self.fold_regions(value, &mut have_bound_regions, |r, d| {
+ region_set.insert(self.mk_region(r.from_depth(d)));
+ r
+ });
have_bound_regions
}
skipped_regions: &mut bool,
mut f: F)
-> T
- where F : FnMut(ty::Region, u32) -> ty::Region,
+ where F : FnMut(&'tcx ty::Region, u32) -> &'tcx ty::Region,
T : TypeFoldable<'tcx>,
{
value.fold_with(&mut RegionFolder::new(self, skipped_regions, &mut f))
tcx: TyCtxt<'a, 'gcx, 'tcx>,
skipped_regions: &'a mut bool,
current_depth: u32,
- fld_r: &'a mut (FnMut(ty::Region, u32) -> ty::Region + 'a),
+ fld_r: &'a mut (FnMut(&'tcx ty::Region, u32) -> &'tcx ty::Region + 'a),
}
impl<'a, 'gcx, 'tcx> RegionFolder<'a, 'gcx, 'tcx> {
pub fn new<F>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
skipped_regions: &'a mut bool,
fld_r: &'a mut F) -> RegionFolder<'a, 'gcx, 'tcx>
- where F : FnMut(ty::Region, u32) -> ty::Region
+ where F : FnMut(&'tcx ty::Region, u32) -> &'tcx ty::Region
{
RegionFolder {
tcx: tcx,
t
}
- fn fold_region(&mut self, r: ty::Region) -> ty::Region {
- match r {
+ fn fold_region(&mut self, r: &'tcx ty::Region) -> &'tcx ty::Region {
+ match *r {
ty::ReLateBound(debruijn, _) if debruijn.depth < self.current_depth => {
debug!("RegionFolder.fold_region({:?}) skipped bound region (current depth={})",
r, self.current_depth);
struct RegionReplacer<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
tcx: TyCtxt<'a, 'gcx, 'tcx>,
current_depth: u32,
- fld_r: &'a mut (FnMut(ty::BoundRegion) -> ty::Region + 'a),
- map: FnvHashMap<ty::BoundRegion, ty::Region>
+ fld_r: &'a mut (FnMut(ty::BoundRegion) -> &'tcx ty::Region + 'a),
+ map: FnvHashMap<ty::BoundRegion, &'tcx ty::Region>
}
impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
pub fn replace_late_bound_regions<T,F>(self,
value: &Binder<T>,
mut f: F)
- -> (T, FnvHashMap<ty::BoundRegion, ty::Region>)
- where F : FnMut(ty::BoundRegion) -> ty::Region,
+ -> (T, FnvHashMap<ty::BoundRegion, &'tcx ty::Region>)
+ where F : FnMut(ty::BoundRegion) -> &'tcx ty::Region,
T : TypeFoldable<'tcx>,
{
let mut replacer = RegionReplacer::new(self, &mut f);
where T : TypeFoldable<'tcx>
{
self.replace_late_bound_regions(value, |br| {
- ty::ReFree(ty::FreeRegion{scope: all_outlive_scope, bound_region: br})
+ self.mk_region(ty::ReFree(ty::FreeRegion {
+ scope: all_outlive_scope,
+ bound_region: br
+ }))
}).0
}
let bound0_value = bound2_value.skip_binder().skip_binder();
let value = self.fold_regions(bound0_value, &mut false,
|region, current_depth| {
- match region {
+ match *region {
ty::ReLateBound(debruijn, br) if debruijn.depth >= current_depth => {
// should be true if no escaping regions from bound2_value
assert!(debruijn.depth - current_depth <= 1);
- ty::ReLateBound(ty::DebruijnIndex::new(current_depth), br)
+ self.mk_region(ty::ReLateBound(ty::DebruijnIndex::new(current_depth), br))
}
_ => {
region
pub fn erase_late_bound_regions<T>(self, value: &Binder<T>) -> T
where T : TypeFoldable<'tcx>
{
- self.replace_late_bound_regions(value, |_| ty::ReErased).0
+ self.replace_late_bound_regions(value, |_| self.mk_region(ty::ReErased)).0
}
/// Rewrite any late-bound regions so that they are anonymous. Region numbers are
let mut counter = 0;
Binder(self.replace_late_bound_regions(sig, |_| {
counter += 1;
- ty::ReLateBound(ty::DebruijnIndex::new(1), ty::BrAnon(counter))
+ self.mk_region(ty::ReLateBound(ty::DebruijnIndex::new(1), ty::BrAnon(counter)))
}).0)
}
}
impl<'a, 'gcx, 'tcx> RegionReplacer<'a, 'gcx, 'tcx> {
fn new<F>(tcx: TyCtxt<'a, 'gcx, 'tcx>, fld_r: &'a mut F)
-> RegionReplacer<'a, 'gcx, 'tcx>
- where F : FnMut(ty::BoundRegion) -> ty::Region
+ where F : FnMut(ty::BoundRegion) -> &'tcx ty::Region
{
RegionReplacer {
tcx: tcx,
t.super_fold_with(self)
}
- fn fold_region(&mut self, r: ty::Region) -> ty::Region {
- match r {
+ fn fold_region(&mut self, r:&'tcx ty::Region) -> &'tcx ty::Region {
+ match *r {
ty::ReLateBound(debruijn, br) if debruijn.depth == self.current_depth => {
let fld_r = &mut self.fld_r;
let region = *self.map.entry(br).or_insert_with(|| fld_r(br));
- if let ty::ReLateBound(debruijn1, br) = region {
+ if let ty::ReLateBound(debruijn1, br) = *region {
// If the callback returns a late-bound region,
// that region should always use depth 1. Then we
// adjust it to the correct depth.
assert_eq!(debruijn1.depth, 1);
- ty::ReLateBound(debruijn, br)
+ self.tcx.mk_region(ty::ReLateBound(debruijn, br))
} else {
region
}
}
- r => r
+ _ => r
}
}
}
u.super_fold_with(self)
}
- fn fold_region(&mut self, r: ty::Region) -> ty::Region {
+ fn fold_region(&mut self, r: &'tcx ty::Region) -> &'tcx ty::Region {
// because late-bound regions affect subtyping, we can't
// erase the bound/free distinction, but we can replace
// all free regions with 'erased.
// type system never "sees" those, they get substituted
// away. In trans, they will always be erased to 'erased
// whenever a substitution occurs.
- match r {
+ match *r {
ty::ReLateBound(..) => r,
- _ => ty::ReErased
+ _ => self.tcx().mk_region(ty::ReErased)
}
}
}
value, amount);
value.fold_with(&mut RegionFolder::new(tcx, &mut false, &mut |region, _current_depth| {
- shift_region(region, amount)
+ tcx.mk_region(shift_region(*region, amount))
}))
}
t.region_depth > self.depth
}
- fn visit_region(&mut self, r: ty::Region) -> bool {
+ fn visit_region(&mut self, r: &'tcx ty::Region) -> bool {
r.escapes_depth(self.depth)
}
}
t.flags.get().intersects(self.flags)
}
- fn visit_region(&mut self, r: ty::Region) -> bool {
+ fn visit_region(&mut self, r: &'tcx ty::Region) -> bool {
if self.flags.intersects(ty::TypeFlags::HAS_LOCAL_NAMES) {
// does this represent a region that cannot be named
// in a global way? used in fulfillment caching.
- match r {
+ match *r {
ty::ReStatic | ty::ReEmpty | ty::ReErased => {}
_ => return true,
}
}
- if self.flags.intersects(ty::TypeFlags::HAS_RE_INFER) {
- match r {
+ if self.flags.intersects(ty::TypeFlags::HAS_RE_INFER |
+ ty::TypeFlags::KEEP_IN_LOCAL_TCX) {
+ match *r {
ty::ReVar(_) | ty::ReSkolemized(..) => { return true }
_ => {}
}
t.super_visit_with(self)
}
- fn visit_region(&mut self, r: ty::Region) -> bool {
- match r {
+ fn visit_region(&mut self, r: &'tcx ty::Region) -> bool {
+ match *r {
ty::ReLateBound(debruijn, br) if debruijn.depth == self.current_depth => {
self.regions.insert(br);
}
match self_ty.sty {
ty::TyStruct(adt_def, substs) |
ty::TyEnum(adt_def, substs) => {
- if substs.types.is_empty() { // ignore regions
+ if substs.types().next().is_none() { // ignore regions
self.push_item_path(buffer, adt_def.did);
} else {
buffer.push(&format!("<{}>", self_ty));
let mut st = Struct::new(dl, packed);
st.extend(dl, fields, ty)?;
- // FIXME(16758) don't add a drop flag to unsized structs, as it
- // won't actually be in the location we say it is because it'll be after
- // the unsized field. Several other pieces of code assume that the unsized
- // field is definitely the last one.
- if def.dtor_kind().has_drop_flag() &&
- ty.is_sized(tcx, &infcx.parameter_environment, DUMMY_SP) {
- st.extend(dl, Some(Ok(&Scalar {
- value: Int(I8),
- non_zero: false
- })).into_iter(), ty)?;
- }
Univariant {
variant: st,
non_zero: Some(def.did) == tcx.lang_items.non_zero()
let hint = *tcx.lookup_repr_hints(def.did).get(0)
.unwrap_or(&attr::ReprAny);
- let dtor = def.dtor_kind().has_drop_flag();
- let drop_flag = if dtor {
- Some(Scalar { value: Int(I8), non_zero: false })
- } else {
- None
- };
-
if def.variants.is_empty() {
// Uninhabitable; represent as unit
// (Typechecking will reject discriminant-sizing attrs.)
assert_eq!(hint, attr::ReprAny);
- let mut st = Struct::new(dl, false);
- st.extend(dl, drop_flag.iter().map(Ok), ty)?;
- return success(Univariant { variant: st, non_zero: false });
+ return success(Univariant {
+ variant: Struct::new(dl, false),
+ non_zero: false
+ });
}
- if !dtor && def.variants.iter().all(|v| v.fields.is_empty()) {
+ if def.variants.iter().all(|v| v.fields.is_empty()) {
// All bodies empty -> intlike
let (mut min, mut max) = (i64::MAX, i64::MIN);
for v in &def.variants {
field.ty(tcx, substs).layout(infcx)
});
let mut st = Struct::new(dl, false);
- st.extend(dl, fields.chain(drop_flag.iter().map(Ok)), ty)?;
+ st.extend(dl, fields, ty)?;
return success(Univariant { variant: st, non_zero: false });
}
v.fields.iter().map(|field| field.ty(tcx, substs)).collect::<Vec<_>>()
}).collect::<Vec<_>>();
- if !dtor && variants.len() == 2 && hint == attr::ReprAny {
+ if variants.len() == 2 && hint == attr::ReprAny {
// Nullable pointer optimization
for discr in 0..2 {
let other_fields = variants[1 - discr].iter().map(|ty| {
Ok(field)
});
let mut st = Struct::new(dl, false);
- st.extend(dl, discr.iter().map(Ok).chain(fields)
- .chain(drop_flag.iter().map(Ok)), ty)?;
+ st.extend(dl, discr.iter().map(Ok).chain(fields), ty)?;
size = cmp::max(size, st.min_size());
align = align.max(st.align);
Ok(st)
return Err(err);
}
- // If there's a drop flag, it can't be just a pointer.
- if def.dtor_kind().has_drop_flag() {
- return Err(err);
- }
-
// Get a zero-sized variant or a pointer newtype.
let zero_or_ptr_variant = |i: usize| {
let fields = def.variants[i].fields.iter().map(|field| {
dep_map_ty! { ImplTraitRefs: ItemSignature(DefId) -> Option<ty::TraitRef<'tcx>> }
dep_map_ty! { TraitDefs: ItemSignature(DefId) -> &'tcx ty::TraitDef<'tcx> }
dep_map_ty! { AdtDefs: ItemSignature(DefId) -> ty::AdtDefMaster<'tcx> }
-dep_map_ty! { ItemVariances: ItemSignature(DefId) -> Rc<ty::ItemVariances> }
+dep_map_ty! { ItemVariances: ItemSignature(DefId) -> Rc<Vec<ty::Variance>> }
dep_map_ty! { InherentImpls: InherentImpls(DefId) -> Rc<Vec<DefId>> }
dep_map_ty! { ImplItems: ImplItems(DefId) -> Vec<ty::ImplOrTraitItemId> }
dep_map_ty! { TraitItems: TraitItems(DefId) -> Rc<Vec<ty::ImplOrTraitItem<'tcx>>> }
#[derive(Copy, Clone)]
pub enum DtorKind {
NoDtor,
- TraitDtor(bool)
+ TraitDtor
}
impl DtorKind {
pub fn is_present(&self) -> bool {
match *self {
- TraitDtor(..) => true,
+ TraitDtor => true,
_ => false
}
}
-
- pub fn has_drop_flag(&self) -> bool {
- match self {
- &NoDtor => false,
- &TraitDtor(flag) => flag
- }
- }
}
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub generics: &'tcx Generics<'tcx>,
pub predicates: GenericPredicates<'tcx>,
pub fty: &'tcx BareFnTy<'tcx>,
- pub explicit_self: ExplicitSelfCategory,
+ pub explicit_self: ExplicitSelfCategory<'tcx>,
pub vis: Visibility,
pub defaultness: hir::Defaultness,
pub def_id: DefId,
generics: &'tcx ty::Generics<'tcx>,
predicates: GenericPredicates<'tcx>,
fty: &'tcx BareFnTy<'tcx>,
- explicit_self: ExplicitSelfCategory,
+ explicit_self: ExplicitSelfCategory<'tcx>,
vis: Visibility,
defaultness: hir::Defaultness,
def_id: DefId,
pub container: ImplOrTraitItemContainer,
}
-#[derive(Clone, PartialEq, RustcDecodable, RustcEncodable)]
-pub struct ItemVariances {
- pub types: Vec<Variance>,
- pub regions: Vec<Variance>,
-}
-
-impl ItemVariances {
- pub fn empty() -> ItemVariances {
- ItemVariances {
- types: vec![],
- regions: vec![],
- }
- }
-}
-
#[derive(Clone, PartialEq, RustcDecodable, RustcEncodable, Copy)]
pub enum Variance {
Covariant, // T<A> <: T<B> iff A <: B -- e.g., function return type
/// Information describing the capture of an upvar. This is computed
/// during `typeck`, specifically by `regionck`.
#[derive(PartialEq, Clone, Debug, Copy)]
-pub enum UpvarCapture {
+pub enum UpvarCapture<'tcx> {
/// Upvar is captured by value. This is always true when the
/// closure is labeled `move`, but can also be true in other cases
/// depending on inference.
ByValue,
/// Upvar is captured by reference.
- ByRef(UpvarBorrow),
+ ByRef(UpvarBorrow<'tcx>),
}
#[derive(PartialEq, Clone, Copy)]
-pub struct UpvarBorrow {
+pub struct UpvarBorrow<'tcx> {
/// The kind of borrow: by-ref upvars have access to shared
/// immutable borrows, which are not part of the normal language
/// syntax.
pub kind: BorrowKind,
/// Region of the resulting reference.
- pub region: ty::Region,
+ pub region: &'tcx ty::Region,
}
-pub type UpvarCaptureMap = FnvHashMap<UpvarId, UpvarCapture>;
+pub type UpvarCaptureMap<'tcx> = FnvHashMap<UpvarId, UpvarCapture<'tcx>>;
#[derive(Copy, Clone)]
pub struct ClosureUpvar<'tcx> {
/// this is `None`, then the default is inherited from the
/// surrounding context. See RFC #599 for details.
#[derive(Copy, Clone)]
-pub enum ObjectLifetimeDefault {
+pub enum ObjectLifetimeDefault<'tcx> {
/// Require an explicit annotation. Occurs when multiple
/// `T:'a` constraints are found.
Ambiguous,
BaseDefault,
/// Use the given region as the default.
- Specific(Region),
+ Specific(&'tcx Region),
}
#[derive(Clone)]
pub index: u32,
pub default_def_id: DefId, // for use in error reporing about defaults
pub default: Option<Ty<'tcx>>,
- pub object_lifetime_default: ObjectLifetimeDefault,
+ pub object_lifetime_default: ObjectLifetimeDefault<'tcx>,
}
#[derive(Clone)]
-pub struct RegionParameterDef {
+pub struct RegionParameterDef<'tcx> {
pub name: Name,
pub def_id: DefId,
pub index: u32,
- pub bounds: Vec<ty::Region>,
+ pub bounds: Vec<&'tcx ty::Region>,
}
-impl RegionParameterDef {
+impl<'tcx> RegionParameterDef<'tcx> {
pub fn to_early_bound_region(&self) -> ty::Region {
ty::ReEarlyBound(ty::EarlyBoundRegion {
index: self.index,
pub parent: Option<DefId>,
pub parent_regions: u32,
pub parent_types: u32,
- pub regions: Vec<RegionParameterDef>,
+ pub regions: Vec<RegionParameterDef<'tcx>>,
pub types: Vec<TypeParameterDef<'tcx>>,
pub has_self: bool,
}
+impl<'tcx> Generics<'tcx> {
+ pub fn parent_count(&self) -> usize {
+ self.parent_regions as usize + self.parent_types as usize
+ }
+
+ pub fn own_count(&self) -> usize {
+ self.regions.len() + self.types.len()
+ }
+
+ pub fn count(&self) -> usize {
+ self.parent_count() + self.own_count()
+ }
+}
+
/// Bounds on generics.
#[derive(Clone)]
pub struct GenericPredicates<'tcx> {
Equate(PolyEquatePredicate<'tcx>),
/// where 'a : 'b
- RegionOutlives(PolyRegionOutlivesPredicate),
+ RegionOutlives(PolyRegionOutlivesPredicate<'tcx>),
/// where T : 'a
TypeOutlives(PolyTypeOutlivesPredicate<'tcx>),
// leads to more recompilation.
let def_ids: Vec<_> =
self.input_types()
- .iter()
.flat_map(|t| t.walk())
.filter_map(|t| match t.sty {
ty::TyStruct(adt_def, _) |
DepNode::TraitSelect(self.def_id(), def_ids)
}
- pub fn input_types(&self) -> &[Ty<'tcx>] {
- &self.trait_ref.substs.types
+ pub fn input_types<'a>(&'a self) -> impl DoubleEndedIterator<Item=Ty<'tcx>> + 'a {
+ self.trait_ref.input_types()
}
pub fn self_ty(&self) -> Ty<'tcx> {
#[derive(Clone, PartialEq, Eq, Hash, Debug)]
pub struct OutlivesPredicate<A,B>(pub A, pub B); // `A : B`
pub type PolyOutlivesPredicate<A,B> = ty::Binder<OutlivesPredicate<A,B>>;
-pub type PolyRegionOutlivesPredicate = PolyOutlivesPredicate<ty::Region, ty::Region>;
-pub type PolyTypeOutlivesPredicate<'tcx> = PolyOutlivesPredicate<Ty<'tcx>, ty::Region>;
+pub type PolyRegionOutlivesPredicate<'tcx> = PolyOutlivesPredicate<&'tcx ty::Region,
+ &'tcx ty::Region>;
+pub type PolyTypeOutlivesPredicate<'tcx> = PolyOutlivesPredicate<Ty<'tcx>, &'tcx ty::Region>;
/// This kind of predicate has no *direct* correspondent in the
/// syntax, but it roughly corresponds to the syntactic forms:
}
}
-impl<'tcx> ToPredicate<'tcx> for PolyRegionOutlivesPredicate {
+impl<'tcx> ToPredicate<'tcx> for PolyRegionOutlivesPredicate<'tcx> {
fn to_predicate(&self) -> Predicate<'tcx> {
Predicate::RegionOutlives(self.clone())
}
pub fn walk_tys(&self) -> IntoIter<Ty<'tcx>> {
let vec: Vec<_> = match *self {
ty::Predicate::Trait(ref data) => {
- data.0.trait_ref.input_types().to_vec()
+ data.skip_binder().input_types().collect()
}
ty::Predicate::Rfc1592(ref data) => {
return data.walk_tys()
}
ty::Predicate::Projection(ref data) => {
let trait_inputs = data.0.projection_ty.trait_ref.input_types();
- trait_inputs.iter()
- .cloned()
- .chain(Some(data.0.ty))
- .collect()
+ trait_inputs.chain(Some(data.0.ty)).collect()
}
ty::Predicate::WellFormed(data) => {
vec![data]
}
pub fn self_ty(&self) -> Ty<'tcx> {
- self.substs.types[0]
+ self.substs.type_at(0)
}
- pub fn input_types(&self) -> &[Ty<'tcx>] {
+ pub fn input_types<'a>(&'a self) -> impl DoubleEndedIterator<Item=Ty<'tcx>> + 'a {
// Select only the "input types" from a trait-reference. For
// now this is all the types that appear in the
// trait-reference, but it should eventually exclude
// associated types.
- &self.substs.types
+ self.substs.types()
}
}
/// indicates it must outlive at least the function body (the user
/// may specify stronger requirements). This field indicates the
/// region of the callee.
- pub implicit_region_bound: ty::Region,
+ pub implicit_region_bound: &'tcx ty::Region,
/// Obligations that the caller must satisfy. This is basically
/// the set of bounds on the in-scope type parameters, translated
const IS_PHANTOM_DATA = 1 << 3,
const IS_SIMD = 1 << 4,
const IS_FUNDAMENTAL = 1 << 5,
- const IS_NO_DROP_FLAG = 1 << 6,
}
}
if attr::contains_name(&attrs, "fundamental") {
flags = flags | AdtFlags::IS_FUNDAMENTAL;
}
- if attr::contains_name(&attrs, "unsafe_no_drop_flag") {
- flags = flags | AdtFlags::IS_NO_DROP_FLAG;
- }
if tcx.lookup_simd(did) {
flags = flags | AdtFlags::IS_SIMD;
}
/// Returns whether this type has a destructor.
pub fn has_dtor(&self) -> bool {
- match self.dtor_kind() {
- NoDtor => false,
- TraitDtor(..) => true
- }
+ self.dtor_kind().is_present()
}
/// Asserts this is a struct and returns the struct's unique
pub fn dtor_kind(&self) -> DtorKind {
match self.destructor.get() {
- Some(_) => {
- TraitDtor(!self.flags.get().intersects(AdtFlags::IS_NO_DROP_FLAG))
- }
+ Some(_) => TraitDtor,
None => NoDtor,
}
}
};
let sized_predicate = Binder(TraitRef {
def_id: sized_trait,
- substs: Substs::new_trait(tcx, vec![], vec![], ty)
+ substs: Substs::new_trait(tcx, ty, &[])
}).to_predicate();
let predicates = tcx.lookup_predicates(self.did).predicates;
if predicates.into_iter().any(|p| p == sized_predicate) {
|| self.lookup_repr_hints(did).contains(&attr::ReprSimd)
}
- pub fn item_variances(self, item_id: DefId) -> Rc<ItemVariances> {
+ pub fn item_variances(self, item_id: DefId) -> Rc<Vec<ty::Variance>> {
lookup_locally_or_in_crate_store(
"item_variance_map", item_id, &self.item_variance_map,
|| Rc::new(self.sess.cstore.item_variances(item_id)))
ty::ParameterEnvironment {
free_substs: Substs::empty(self),
caller_bounds: Vec::new(),
- implicit_region_bound: ty::ReEmpty,
+ implicit_region_bound: self.mk_region(ty::ReEmpty),
free_id_outlive: free_id_outlive
}
}
let substs = Substs::for_item(self.global_tcx(), def_id, |def, _| {
// map bound 'a => free 'a
- ReFree(FreeRegion { scope: free_id_outlive,
- bound_region: def.to_bound_region() })
+ self.global_tcx().mk_region(ReFree(FreeRegion {
+ scope: free_id_outlive,
+ bound_region: def.to_bound_region()
+ }))
}, |def, _| {
// map T => T
self.global_tcx().mk_param_from_def(def)
let unnormalized_env = ty::ParameterEnvironment {
free_substs: free_substs,
- implicit_region_bound: ty::ReScope(free_id_outlive),
+ implicit_region_bound: tcx.mk_region(ty::ReScope(free_id_outlive)),
caller_bounds: predicates,
free_id_outlive: free_id_outlive,
};
traits::normalize_param_env_or_error(tcx, unnormalized_env, cause)
}
+ pub fn node_scope_region(self, id: NodeId) -> &'tcx Region {
+ self.mk_region(ty::ReScope(self.region_maps.node_extent(id)))
+ }
+
pub fn is_method_call(self, expr_id: NodeId) -> bool {
self.tables.borrow().method_map.contains_key(&MethodCall::expr(expr_id))
}
autoderefs))
}
- pub fn upvar_capture(self, upvar_id: ty::UpvarId) -> Option<ty::UpvarCapture> {
+ pub fn upvar_capture(self, upvar_id: ty::UpvarId) -> Option<ty::UpvarCapture<'tcx>> {
Some(self.tables.borrow().upvar_capture_map.get(&upvar_id).unwrap().clone())
}
/// The category of explicit self.
#[derive(Clone, Copy, Eq, PartialEq, Debug)]
-pub enum ExplicitSelfCategory {
+pub enum ExplicitSelfCategory<'tcx> {
Static,
ByValue,
- ByReference(Region, hir::Mutability),
+ ByReference(&'tcx Region, hir::Mutability),
ByBox,
}
#[derive(Debug)]
pub enum Component<'tcx> {
- Region(ty::Region),
+ Region(&'tcx ty::Region),
Param(ty::ParamTy),
UnresolvedInferenceVariable(ty::InferTy),
}
}
-fn push_region_constraints<'tcx>(out: &mut Vec<Component<'tcx>>, regions: Vec<ty::Region>) {
+fn push_region_constraints<'tcx>(out: &mut Vec<Component<'tcx>>, regions: Vec<&'tcx ty::Region>) {
for r in regions {
if !r.is_bound() {
out.push(Component::Region(r));
//! type equality, etc.
use hir::def_id::DefId;
-use ty::subst::Substs;
+use ty::subst::{Kind, Substs};
use ty::{self, Ty, TyCtxt, TypeFoldable};
use ty::error::{ExpectedFound, TypeError};
use std::rc::Rc;
fn tys(&mut self, a: Ty<'tcx>, b: Ty<'tcx>)
-> RelateResult<'tcx, Ty<'tcx>>;
- fn regions(&mut self, a: ty::Region, b: ty::Region)
- -> RelateResult<'tcx, ty::Region>;
+ fn regions(&mut self, a: &'tcx ty::Region, b: &'tcx ty::Region)
+ -> RelateResult<'tcx, &'tcx ty::Region>;
fn binders<T>(&mut self, a: &ty::Binder<T>, b: &ty::Binder<T>)
-> RelateResult<'tcx, ty::Binder<T>>
}
pub fn relate_substs<'a, 'gcx, 'tcx, R>(relation: &mut R,
- variances: Option<&ty::ItemVariances>,
+ variances: Option<&Vec<ty::Variance>>,
a_subst: &'tcx Substs<'tcx>,
b_subst: &'tcx Substs<'tcx>)
-> RelateResult<'tcx, &'tcx Substs<'tcx>>
{
let tcx = relation.tcx();
- let types = a_subst.types.iter().enumerate().map(|(i, a_ty)| {
- let b_ty = &b_subst.types[i];
- let variance = variances.map_or(ty::Invariant, |v| v.types[i]);
- relation.relate_with_variance(variance, a_ty, b_ty)
- }).collect()?;
-
- let regions = a_subst.regions.iter().enumerate().map(|(i, a_r)| {
- let b_r = &b_subst.regions[i];
- let variance = variances.map_or(ty::Invariant, |v| v.regions[i]);
- relation.relate_with_variance(variance, a_r, b_r)
- }).collect()?;
+ let params = a_subst.params().iter().zip(b_subst.params()).enumerate().map(|(i, (a, b))| {
+ let variance = variances.map_or(ty::Invariant, |v| v[i]);
+ if let (Some(a_ty), Some(b_ty)) = (a.as_type(), b.as_type()) {
+ Ok(Kind::from(relation.relate_with_variance(variance, &a_ty, &b_ty)?))
+ } else if let (Some(a_r), Some(b_r)) = (a.as_region(), b.as_region()) {
+ Ok(Kind::from(relation.relate_with_variance(variance, &a_r, &b_r)?))
+ } else {
+ bug!()
+ }
+ });
- Ok(Substs::new(tcx, types, regions))
+ Substs::maybe_new(tcx, params)
}
impl<'tcx> Relate<'tcx> for &'tcx ty::BareFnTy<'tcx> {
(&ty::TyRef(a_r, ref a_mt), &ty::TyRef(b_r, ref b_mt)) =>
{
- let r = relation.relate_with_variance(ty::Contravariant, a_r, b_r)?;
+ let r = relation.relate_with_variance(ty::Contravariant, &a_r, &b_r)?;
let mt = relation.relate(a_mt, b_mt)?;
- Ok(tcx.mk_ref(tcx.mk_region(r), mt))
+ Ok(tcx.mk_ref(r, mt))
}
(&ty::TyArray(a_t, sz_a), &ty::TyArray(b_t, sz_b)) =>
}
}
-impl<'tcx> Relate<'tcx> for ty::Region {
+impl<'tcx> Relate<'tcx> for &'tcx ty::Region {
fn relate<'a, 'gcx, R>(relation: &mut R,
- a: &ty::Region,
- b: &ty::Region)
- -> RelateResult<'tcx, ty::Region>
+ a: &&'tcx ty::Region,
+ b: &&'tcx ty::Region)
+ -> RelateResult<'tcx, &'tcx ty::Region>
where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a
{
relation.regions(*a, *b)
// except according to those terms.
use infer::type_variable;
-use ty::subst::Substs;
use ty::{self, Lift, Ty, TyCtxt};
use ty::fold::{TypeFoldable, TypeFolder, TypeVisitor};
}
}
-impl<'tcx> Lift<'tcx> for ty::Region {
- type Lifted = Self;
- fn lift_to_tcx(&self, _: TyCtxt) -> Option<ty::Region> {
- Some(*self)
- }
-}
-
impl<'a, 'tcx> Lift<'tcx> for ty::TraitRef<'a> {
type Lifted = ty::TraitRef<'tcx>;
fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option<Self::Lifted> {
FixedArraySize(x) => FixedArraySize(x),
TyParamSize(x) => TyParamSize(x),
ArgCount => ArgCount,
- RegionsDoesNotOutlive(a, b) => RegionsDoesNotOutlive(a, b),
- RegionsNotSame(a, b) => RegionsNotSame(a, b),
- RegionsNoOverlap(a, b) => RegionsNoOverlap(a, b),
+ RegionsDoesNotOutlive(a, b) => {
+ return tcx.lift(&(a, b)).map(|(a, b)| RegionsDoesNotOutlive(a, b))
+ }
+ RegionsNotSame(a, b) => {
+ return tcx.lift(&(a, b)).map(|(a, b)| RegionsNotSame(a, b))
+ }
+ RegionsNoOverlap(a, b) => {
+ return tcx.lift(&(a, b)).map(|(a, b)| RegionsNoOverlap(a, b))
+ }
RegionsInsufficientlyPolymorphic(a, b) => {
- RegionsInsufficientlyPolymorphic(a, b)
+ return tcx.lift(&b).map(|b| RegionsInsufficientlyPolymorphic(a, b))
+ }
+ RegionsOverlyPolymorphic(a, b) => {
+ return tcx.lift(&b).map(|b| RegionsOverlyPolymorphic(a, b))
}
- RegionsOverlyPolymorphic(a, b) => RegionsOverlyPolymorphic(a, b),
IntegerAsChar => IntegerAsChar,
IntMismatch(x) => IntMismatch(x),
FloatMismatch(x) => FloatMismatch(x),
}
}
-impl<'tcx> TypeFoldable<'tcx> for ty::Region {
+impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::Region {
fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, _folder: &mut F) -> Self {
*self
}
}
}
-impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::Region {
- fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, _folder: &mut F) -> Self {
- *self
- }
-
- fn fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
- let region = folder.fold_region(**self);
- folder.tcx().mk_region(region)
- }
-
- fn super_visit_with<V: TypeVisitor<'tcx>>(&self, _visitor: &mut V) -> bool {
- false
- }
-
- fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
- visitor.visit_region(**self)
- }
-}
-
-impl<'tcx> TypeFoldable<'tcx> for &'tcx Substs<'tcx> {
- fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
- let types = self.types.fold_with(folder);
- let regions = self.regions.fold_with(folder);
- Substs::new(folder.tcx(), types, regions)
- }
-
- fn fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
- folder.fold_substs(self)
- }
-
- fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
- self.types.visit_with(visitor) || self.regions.visit_with(visitor)
- }
-}
-
impl<'tcx> TypeFoldable<'tcx> for ty::ClosureSubsts<'tcx> {
fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
ty::ClosureSubsts {
}
}
-impl<'tcx> TypeFoldable<'tcx> for ty::ObjectLifetimeDefault {
+impl<'tcx> TypeFoldable<'tcx> for ty::ObjectLifetimeDefault<'tcx> {
fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
match *self {
ty::ObjectLifetimeDefault::Ambiguous =>
}
}
-impl<'tcx> TypeFoldable<'tcx> for ty::RegionParameterDef {
+impl<'tcx> TypeFoldable<'tcx> for ty::RegionParameterDef<'tcx> {
fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
ty::RegionParameterDef {
name: self.name,
use collections::enum_set::{self, EnumSet, CLike};
use std::fmt;
-use std::ops;
use std::mem;
+use std::ops;
use syntax::abi;
use syntax::ast::{self, Name};
use syntax::parse::token::keywords;
#[derive(Clone, PartialEq, Eq, Hash)]
pub struct TraitObject<'tcx> {
pub principal: PolyExistentialTraitRef<'tcx>,
- pub region_bound: ty::Region,
+ pub region_bound: &'tcx ty::Region,
pub builtin_bounds: BuiltinBounds,
pub projection_bounds: Vec<PolyExistentialProjection<'tcx>>,
}
self.0.substs
}
- pub fn input_types(&self) -> &[Ty<'tcx>] {
+ pub fn input_types<'a>(&'a self) -> impl DoubleEndedIterator<Item=Ty<'tcx>> + 'a {
// FIXME(#20664) every use of this fn is probably a bug, it should yield Binder<>
self.0.input_types()
}
}
impl<'tcx> ExistentialTraitRef<'tcx> {
- pub fn input_types(&self) -> &[Ty<'tcx>] {
+ pub fn input_types<'a>(&'a self) -> impl DoubleEndedIterator<Item=Ty<'tcx>> + 'a {
// Select only the "input types" from a trait-reference. For
// now this is all the types that appear in the
// trait-reference, but it should eventually exclude
// associated types.
- &self.substs.types
+ self.substs.types()
}
}
self.0.def_id
}
- pub fn input_types(&self) -> &[Ty<'tcx>] {
+ pub fn input_types<'a>(&'a self) -> impl DoubleEndedIterator<Item=Ty<'tcx>> + 'a {
// FIXME(#20664) every use of this fn is probably a bug, it should yield Binder<>
self.0.input_types()
}
ReErased,
}
+impl<'tcx> Decodable for &'tcx Region {
+ fn decode<D: Decoder>(d: &mut D) -> Result<&'tcx Region, D::Error> {
+ let r = Decodable::decode(d)?;
+ cstore::tls::with_decoding_context(d, |dcx, _| {
+ Ok(dcx.tcx().mk_region(r))
+ })
+ }
+}
+
#[derive(Copy, Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable, Debug)]
pub struct EarlyBoundRegion {
pub index: u32,
/// Returns the regions directly referenced from this type (but
/// not types reachable from this type via `walk_tys`). This
/// ignores late-bound regions binders.
- pub fn regions(&self) -> Vec<ty::Region> {
+ pub fn regions(&self) -> Vec<&'tcx ty::Region> {
match self.sty {
TyRef(region, _) => {
- vec![*region]
+ vec![region]
}
TyTrait(ref obj) => {
let mut v = vec![obj.region_bound];
- v.extend_from_slice(&obj.principal.skip_binder().substs.regions);
+ v.extend(obj.principal.skip_binder().substs.regions());
v
}
TyEnum(_, substs) |
TyStruct(_, substs) |
TyAnon(_, substs) => {
- substs.regions.to_vec()
+ substs.regions().collect()
}
TyClosure(_, ref substs) => {
- substs.func_substs.regions.to_vec()
+ substs.func_substs.regions().collect()
}
TyProjection(ref data) => {
- data.trait_ref.substs.regions.to_vec()
+ data.trait_ref.substs.regions().collect()
}
TyFnDef(..) |
TyFnPtr(_) |
use middle::cstore;
use hir::def_id::DefId;
use ty::{self, Ty, TyCtxt};
-use ty::fold::{TypeFoldable, TypeFolder};
+use ty::fold::{TypeFoldable, TypeFolder, TypeVisitor};
use serialize::{Encodable, Encoder, Decodable, Decoder};
use syntax_pos::{Span, DUMMY_SP};
-///////////////////////////////////////////////////////////////////////////
+use core::nonzero::NonZero;
+use std::fmt;
+use std::iter;
+use std::marker::PhantomData;
+use std::mem;
+
+/// An entity in the Rust typesystem, which can be one of
+/// several kinds (only types and lifetimes for now).
+/// To reduce memory usage, a `Kind` is a interned pointer,
+/// with the lowest 2 bits being reserved for a tag to
+/// indicate the type (`Ty` or `Region`) it points to.
+#[derive(Copy, Clone, PartialEq, Eq, Hash)]
+pub struct Kind<'tcx> {
+ ptr: NonZero<usize>,
+ marker: PhantomData<(Ty<'tcx>, &'tcx ty::Region)>
+}
+
+const TAG_MASK: usize = 0b11;
+const TYPE_TAG: usize = 0b00;
+const REGION_TAG: usize = 0b01;
+
+impl<'tcx> From<Ty<'tcx>> for Kind<'tcx> {
+ fn from(ty: Ty<'tcx>) -> Kind<'tcx> {
+ // Ensure we can use the tag bits.
+ assert_eq!(mem::align_of_val(ty) & TAG_MASK, 0);
+
+ let ptr = ty as *const _ as usize;
+ Kind {
+ ptr: unsafe {
+ NonZero::new(ptr | TYPE_TAG)
+ },
+ marker: PhantomData
+ }
+ }
+}
+
+impl<'tcx> From<&'tcx ty::Region> for Kind<'tcx> {
+ fn from(r: &'tcx ty::Region) -> Kind<'tcx> {
+ // Ensure we can use the tag bits.
+ assert_eq!(mem::align_of_val(r) & TAG_MASK, 0);
+
+ let ptr = r as *const _ as usize;
+ Kind {
+ ptr: unsafe {
+ NonZero::new(ptr | REGION_TAG)
+ },
+ marker: PhantomData
+ }
+ }
+}
+
+impl<'tcx> Kind<'tcx> {
+ #[inline]
+ unsafe fn downcast<T>(self, tag: usize) -> Option<&'tcx T> {
+ let ptr = *self.ptr;
+ if ptr & TAG_MASK == tag {
+ Some(&*((ptr & !TAG_MASK) as *const _))
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ pub fn as_type(self) -> Option<Ty<'tcx>> {
+ unsafe {
+ self.downcast(TYPE_TAG)
+ }
+ }
+
+ #[inline]
+ pub fn as_region(self) -> Option<&'tcx ty::Region> {
+ unsafe {
+ self.downcast(REGION_TAG)
+ }
+ }
+}
+
+impl<'tcx> fmt::Debug for Kind<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ if let Some(ty) = self.as_type() {
+ write!(f, "{:?}", ty)
+ } else if let Some(r) = self.as_region() {
+ write!(f, "{:?}", r)
+ } else {
+ write!(f, "<unknwon @ {:p}>", *self.ptr as *const ())
+ }
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for Kind<'tcx> {
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
+ if let Some(ty) = self.as_type() {
+ Kind::from(ty.fold_with(folder))
+ } else if let Some(r) = self.as_region() {
+ Kind::from(r.fold_with(folder))
+ } else {
+ bug!()
+ }
+ }
+
+ fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+ if let Some(ty) = self.as_type() {
+ ty.visit_with(visitor)
+ } else if let Some(r) = self.as_region() {
+ r.visit_with(visitor)
+ } else {
+ bug!()
+ }
+ }
+}
/// A substitution mapping type/region parameters to new values.
-#[derive(Clone, PartialEq, Eq, Hash)]
+#[derive(Clone, PartialEq, Eq, Debug, Hash)]
pub struct Substs<'tcx> {
- pub types: Vec<Ty<'tcx>>,
- pub regions: Vec<ty::Region>,
+ params: Vec<Kind<'tcx>>
}
impl<'a, 'gcx, 'tcx> Substs<'tcx> {
- pub fn new(tcx: TyCtxt<'a, 'gcx, 'tcx>,
- t: Vec<Ty<'tcx>>,
- r: Vec<ty::Region>)
- -> &'tcx Substs<'tcx>
- {
- tcx.mk_substs(Substs { types: t, regions: r })
+ pub fn new<I>(tcx: TyCtxt<'a, 'gcx, 'tcx>, params: I)
+ -> &'tcx Substs<'tcx>
+ where I: IntoIterator<Item=Kind<'tcx>> {
+ tcx.mk_substs(Substs {
+ params: params.into_iter().collect()
+ })
+ }
+
+ pub fn maybe_new<I, E>(tcx: TyCtxt<'a, 'gcx, 'tcx>, params: I)
+ -> Result<&'tcx Substs<'tcx>, E>
+ where I: IntoIterator<Item=Result<Kind<'tcx>, E>> {
+ Ok(tcx.mk_substs(Substs {
+ params: params.into_iter().collect::<Result<_, _>>()?
+ }))
}
pub fn new_trait(tcx: TyCtxt<'a, 'gcx, 'tcx>,
- mut t: Vec<Ty<'tcx>>,
- r: Vec<ty::Region>,
- s: Ty<'tcx>)
+ s: Ty<'tcx>,
+ t: &[Ty<'tcx>])
-> &'tcx Substs<'tcx>
{
- t.insert(0, s);
- Substs::new(tcx, t, r)
+ let t = iter::once(s).chain(t.iter().cloned());
+ Substs::new(tcx, t.map(Kind::from))
}
pub fn empty(tcx: TyCtxt<'a, 'gcx, 'tcx>) -> &'tcx Substs<'tcx> {
- Substs::new(tcx, vec![], vec![])
+ Substs::new(tcx, vec![])
}
/// Creates a Substs for generic parameter definitions,
mut mk_region: FR,
mut mk_type: FT)
-> &'tcx Substs<'tcx>
- where FR: FnMut(&ty::RegionParameterDef, &Substs<'tcx>) -> ty::Region,
+ where FR: FnMut(&ty::RegionParameterDef, &Substs<'tcx>) -> &'tcx ty::Region,
FT: FnMut(&ty::TypeParameterDef<'tcx>, &Substs<'tcx>) -> Ty<'tcx> {
let defs = tcx.lookup_generics(def_id);
- let num_regions = defs.parent_regions as usize + defs.regions.len();
- let num_types = defs.parent_types as usize + defs.types.len();
let mut substs = Substs {
- regions: Vec::with_capacity(num_regions),
- types: Vec::with_capacity(num_types)
+ params: Vec::with_capacity(defs.count())
};
substs.fill_item(tcx, defs, &mut mk_region, &mut mk_type);
- Substs::new(tcx, substs.types, substs.regions)
+ tcx.mk_substs(substs)
}
fn fill_item<FR, FT>(&mut self,
defs: &ty::Generics<'tcx>,
mk_region: &mut FR,
mk_type: &mut FT)
- where FR: FnMut(&ty::RegionParameterDef, &Substs<'tcx>) -> ty::Region,
+ where FR: FnMut(&ty::RegionParameterDef, &Substs<'tcx>) -> &'tcx ty::Region,
FT: FnMut(&ty::TypeParameterDef<'tcx>, &Substs<'tcx>) -> Ty<'tcx> {
if let Some(def_id) = defs.parent {
let parent_defs = tcx.lookup_generics(def_id);
self.fill_item(tcx, parent_defs, mk_region, mk_type);
}
+ // Handle Self first, before all regions.
+ let mut types = defs.types.iter();
+ if defs.parent.is_none() && defs.has_self {
+ let def = types.next().unwrap();
+ let ty = mk_type(def, self);
+ assert_eq!(def.index as usize, self.params.len());
+ self.params.push(Kind::from(ty));
+ }
+
for def in &defs.regions {
let region = mk_region(def, self);
- assert_eq!(def.index as usize, self.regions.len());
- self.regions.push(region);
+ assert_eq!(def.index as usize, self.params.len());
+ self.params.push(Kind::from(region));
}
- for def in &defs.types {
+ for def in types {
let ty = mk_type(def, self);
- assert_eq!(def.index as usize, self.types.len());
- self.types.push(ty);
+ assert_eq!(def.index as usize, self.params.len());
+ self.params.push(Kind::from(ty));
}
}
pub fn is_noop(&self) -> bool {
- self.regions.is_empty() && self.types.is_empty()
+ self.params.is_empty()
+ }
+
+ #[inline]
+ pub fn params(&self) -> &[Kind<'tcx>] {
+ &self.params
+ }
+
+ #[inline]
+ pub fn types(&'a self) -> impl DoubleEndedIterator<Item=Ty<'tcx>> + 'a {
+ self.params.iter().filter_map(|k| k.as_type())
+ }
+
+ #[inline]
+ pub fn regions(&'a self) -> impl DoubleEndedIterator<Item=&'tcx ty::Region> + 'a {
+ self.params.iter().filter_map(|k| k.as_region())
+ }
+
+ #[inline]
+ pub fn type_at(&self, i: usize) -> Ty<'tcx> {
+ self.params[i].as_type().unwrap_or_else(|| {
+ bug!("expected type for param #{} in {:?}", i, self.params);
+ })
}
+ #[inline]
+ pub fn region_at(&self, i: usize) -> &'tcx ty::Region {
+ self.params[i].as_region().unwrap_or_else(|| {
+ bug!("expected region for param #{} in {:?}", i, self.params);
+ })
+ }
+
+ #[inline]
pub fn type_for_def(&self, ty_param_def: &ty::TypeParameterDef) -> Ty<'tcx> {
- self.types[ty_param_def.index as usize]
+ self.type_at(ty_param_def.index as usize)
}
- pub fn region_for_def(&self, def: &ty::RegionParameterDef) -> ty::Region {
- self.regions[def.index as usize]
+ #[inline]
+ pub fn region_for_def(&self, def: &ty::RegionParameterDef) -> &'tcx ty::Region {
+ self.region_at(def.index as usize)
}
/// Transform from substitutions for a child of `source_ancestor`
target_substs: &Substs<'tcx>)
-> &'tcx Substs<'tcx> {
let defs = tcx.lookup_generics(source_ancestor);
- let regions = target_substs.regions.iter()
- .chain(&self.regions[defs.regions.len()..]).cloned().collect();
- let types = target_substs.types.iter()
- .chain(&self.types[defs.types.len()..]).cloned().collect();
- Substs::new(tcx, types, regions)
+ tcx.mk_substs(Substs {
+ params: target_substs.params.iter()
+ .chain(&self.params[defs.own_count()..]).cloned().collect()
+ })
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for &'tcx Substs<'tcx> {
+ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
+ let params = self.params.iter().map(|k| k.fold_with(folder)).collect();
+ folder.tcx().mk_substs(Substs {
+ params: params
+ })
+ }
+
+ fn fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
+ folder.fold_substs(self)
+ }
+
+ fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
+ self.params.visit_with(visitor)
}
}
t
}
- fn fold_region(&mut self, r: ty::Region) -> ty::Region {
+ fn fold_region(&mut self, r: &'tcx ty::Region) -> &'tcx ty::Region {
// Note: This routine only handles regions that are bound on
// type declarations and other outer declarations, not those
// bound in *fn types*. Region substitution of the bound
// regions that appear in a function signature is done using
// the specialized routine `ty::replace_late_regions()`.
- match r {
+ match *r {
ty::ReEarlyBound(data) => {
- match self.substs.regions.get(data.index as usize) {
- Some(&r) => {
+ let r = self.substs.params.get(data.index as usize)
+ .and_then(|k| k.as_region());
+ match r {
+ Some(r) => {
self.shift_region_through_binders(r)
}
None => {
impl<'a, 'gcx, 'tcx> SubstFolder<'a, 'gcx, 'tcx> {
fn ty_for_param(&self, p: ty::ParamTy, source_ty: Ty<'tcx>) -> Ty<'tcx> {
// Look up the type in the substitutions. It really should be in there.
- let opt_ty = self.substs.types.get(p.idx as usize);
+ let opt_ty = self.substs.params.get(p.idx as usize)
+ .and_then(|k| k.as_type());
let ty = match opt_ty {
- Some(t) => *t,
+ Some(t) => t,
None => {
let span = self.span.unwrap_or(DUMMY_SP);
span_bug!(
source_ty,
p.idx,
self.root_ty,
- self.substs);
+ self.substs.params);
}
};
result
}
- fn shift_region_through_binders(&self, region: ty::Region) -> ty::Region {
- ty::fold::shift_region(region, self.region_binders_passed)
+ fn shift_region_through_binders(&self, region: &'tcx ty::Region) -> &'tcx ty::Region {
+ self.tcx().mk_region(ty::fold::shift_region(*region, self.region_binders_passed))
}
}
substs: &Substs<'tcx>)
-> ty::TraitRef<'tcx> {
let defs = tcx.lookup_generics(trait_id);
- let regions = substs.regions[..defs.regions.len()].to_vec();
- let types = substs.types[..defs.types.len()].to_vec();
+ let params = substs.params[..defs.own_count()].iter().cloned();
ty::TraitRef {
def_id: trait_id,
- substs: Substs::new(tcx, types, regions)
+ substs: Substs::new(tcx, params)
}
}
}
pub fn erase_self_ty(tcx: TyCtxt<'a, 'gcx, 'tcx>,
trait_ref: ty::TraitRef<'tcx>)
-> ty::ExistentialTraitRef<'tcx> {
- let Substs { mut types, regions } = trait_ref.substs.clone();
-
- types.remove(0);
+ // Assert there is a Self.
+ trait_ref.substs.type_at(0);
+ let params = trait_ref.substs.params[1..].iter().cloned();
ty::ExistentialTraitRef {
def_id: trait_ref.def_id,
- substs: Substs::new(tcx, types, regions)
+ substs: Substs::new(tcx, params)
}
}
}
assert!(!self_ty.has_escaping_regions());
self.map_bound(|trait_ref| {
- let Substs { mut types, regions } = trait_ref.substs.clone();
-
- types.insert(0, self_ty);
-
+ let params = trait_ref.substs.params.iter().cloned();
+ let params = iter::once(Kind::from(self_ty)).chain(params);
ty::TraitRef {
def_id: trait_ref.def_id,
- substs: Substs::new(tcx, types, regions)
+ substs: Substs::new(tcx, params)
}
})
}
pub fn required_region_bounds(self,
erased_self_ty: Ty<'tcx>,
predicates: Vec<ty::Predicate<'tcx>>)
- -> Vec<ty::Region> {
+ -> Vec<&'tcx ty::Region> {
debug!("required_region_bounds(erased_self_ty={:?}, predicates={:?})",
erased_self_ty,
predicates);
ty.super_visit_with(self)
}
- fn visit_region(&mut self, r: ty::Region) -> bool {
- match r {
+ fn visit_region(&mut self, r: &'tcx ty::Region) -> bool {
+ match *r {
ty::ReStatic | ty::ReErased => {
self.hash::<u32>(0);
}
return false;
}
- let types_a = &substs_a.types;
- let types_b = &substs_b.types;
-
- types_a.iter().zip(types_b).all(|(&a, &b)| same_type(a, b))
+ substs_a.types().zip(substs_b.types()).all(|(a, b)| same_type(a, b))
}
_ => {
a == b
stack.into_iter()
}
+// We push types on the stack in reverse order so as to
+// maintain a pre-order traversal. As of the time of this
+// writing, the fact that the traversal is pre-order is not
+// known to be significant to any code, but it seems like the
+// natural order one would expect (basically, the order of the
+// types as they are written).
fn push_subtypes<'tcx>(stack: &mut Vec<Ty<'tcx>>, parent_ty: Ty<'tcx>) {
match parent_ty.sty {
ty::TyBool | ty::TyChar | ty::TyInt(_) | ty::TyUint(_) | ty::TyFloat(_) |
stack.push(mt.ty);
}
ty::TyProjection(ref data) => {
- push_reversed(stack, &data.trait_ref.substs.types);
+ stack.extend(data.trait_ref.substs.types().rev());
}
ty::TyTrait(ref obj) => {
- push_reversed(stack, obj.principal.input_types());
- push_reversed(stack, &obj.projection_bounds.iter().map(|pred| {
+ stack.extend(obj.principal.input_types().rev());
+ stack.extend(obj.projection_bounds.iter().map(|pred| {
pred.0.ty
- }).collect::<Vec<_>>());
+ }).rev());
}
ty::TyEnum(_, ref substs) |
ty::TyStruct(_, ref substs) |
ty::TyAnon(_, ref substs) => {
- push_reversed(stack, &substs.types);
+ stack.extend(substs.types().rev());
}
ty::TyClosure(_, ref substs) => {
- push_reversed(stack, &substs.func_substs.types);
- push_reversed(stack, &substs.upvar_tys);
+ stack.extend(substs.func_substs.types().rev());
+ stack.extend(substs.upvar_tys.iter().cloned().rev());
}
- ty::TyTuple(ref ts) => {
- push_reversed(stack, ts);
+ ty::TyTuple(ts) => {
+ stack.extend(ts.iter().cloned().rev());
}
ty::TyFnDef(_, substs, ref ft) => {
- push_reversed(stack, &substs.types);
+ stack.extend(substs.types().rev());
push_sig_subtypes(stack, &ft.sig);
}
ty::TyFnPtr(ref ft) => {
fn push_sig_subtypes<'tcx>(stack: &mut Vec<Ty<'tcx>>, sig: &ty::PolyFnSig<'tcx>) {
stack.push(sig.0.output);
- push_reversed(stack, &sig.0.inputs);
-}
-
-fn push_reversed<'tcx>(stack: &mut Vec<Ty<'tcx>>, tys: &[Ty<'tcx>]) {
- // We push slices on the stack in reverse order so as to
- // maintain a pre-order traversal. As of the time of this
- // writing, the fact that the traversal is pre-order is not
- // known to be significant to any code, but it seems like the
- // natural order one would expect (basically, the order of the
- // types as they are written).
- for &ty in tys.iter().rev() {
- stack.push(ty);
- }
+ stack.extend(sig.0.inputs.iter().cloned().rev());
}
/// For `&'a T` to be WF, `T: 'a` must hold. So we can assume `T: 'a`.
#[derive(Debug)]
pub enum ImpliedBound<'tcx> {
- RegionSubRegion(ty::Region, ty::Region),
- RegionSubParam(ty::Region, ty::ParamTy),
- RegionSubProjection(ty::Region, ty::ProjectionTy<'tcx>),
+ RegionSubRegion(&'tcx ty::Region, &'tcx ty::Region),
+ RegionSubParam(&'tcx ty::Region, ty::ParamTy),
+ RegionSubProjection(&'tcx ty::Region, ty::ProjectionTy<'tcx>),
}
/// Compute the implied bounds that a callee/impl can assume based on
/// this down to determine what relationships would have to hold for
/// `T: 'a` to hold. We get to assume that the caller has validated
/// those relationships.
-fn implied_bounds_from_components<'tcx>(sub_region: ty::Region,
+fn implied_bounds_from_components<'tcx>(sub_region: &'tcx ty::Region,
sup_components: Vec<Component<'tcx>>)
-> Vec<ImpliedBound<'tcx>>
{
let cause = self.cause(traits::MiscObligation);
self.out.extend(
- trait_ref.substs.types
- .iter()
+ trait_ref.substs.types()
.filter(|ty| !ty.has_escaping_regions())
.map(|ty| traits::Obligation::new(cause.clone(),
ty::Predicate::WellFormed(ty))));
cause,
ty::Predicate::TypeOutlives(
ty::Binder(
- ty::OutlivesPredicate(mt.ty, *r)))));
+ ty::OutlivesPredicate(mt.ty, r)))));
}
}
tcx: TyCtxt<'a, 'gcx, 'tcx>,
principal: ty::PolyExistentialTraitRef<'tcx>,
others: ty::BuiltinBounds)
- -> Vec<ty::Region>
+ -> Vec<&'tcx ty::Region>
{
// Since we don't actually *know* the self type for an object,
// this "open(err)" serves as a kind of dummy standin -- basically
use std::cell::Cell;
use std::fmt;
+use std::usize;
+
use syntax::abi::Abi;
use syntax::parse::token;
use syntax::ast::CRATE_NODE_ID;
verbose = tcx.sess.verbose();
has_self = generics.has_self;
+ let mut child_types = 0;
if let Some(def_id) = generics.parent {
// Methods.
assert_eq!(ns, Ns::Value);
+ child_types = generics.types.len();
generics = tcx.lookup_generics(def_id);
num_regions = generics.regions.len();
num_types = generics.types.len();
if has_self {
- write!(f, "<{} as ", substs.types[0])?;
+ write!(f, "<{} as ", substs.type_at(0))?;
}
item_name = Some(tcx.item_name(did));
if !verbose {
if generics.types.last().map_or(false, |def| def.default.is_some()) {
if let Some(substs) = tcx.lift(&substs) {
- let tps = &substs.types[..num_types];
- for (def, actual) in generics.types.iter().zip(tps).rev() {
+ let tps = substs.types().rev().skip(child_types);
+ for (def, actual) in generics.types.iter().rev().zip(tps) {
if def.default.subst(tcx, substs) != Some(actual) {
break;
}
if !verbose && fn_trait_kind.is_some() && projections.len() == 1 {
let projection_ty = projections[0].ty;
- if let TyTuple(ref args) = substs.types[1].sty {
+ if let TyTuple(ref args) = substs.type_at(1).sty {
return fn_sig(f, args, false, projection_ty);
}
}
}
};
- let print_regions = |f: &mut fmt::Formatter, start: &str, regions: &[ty::Region]| {
+ let print_regions = |f: &mut fmt::Formatter, start: &str, skip, count| {
// Don't print any regions if they're all erased.
- if regions.iter().all(|r| *r == ty::ReErased) {
+ let regions = || substs.regions().skip(skip).take(count);
+ if regions().all(|r: &ty::Region| *r == ty::ReErased) {
return Ok(());
}
- for region in regions {
+ for region in regions() {
+ let region: &ty::Region = region;
start_or_continue(f, start, ", ")?;
if verbose {
write!(f, "{:?}", region)?;
Ok(())
};
- print_regions(f, "<", &substs.regions[..num_regions])?;
+ print_regions(f, "<", 0, num_regions)?;
- let tps = &substs.types[..num_types];
+ let tps = substs.types().take(num_types - num_supplied_defaults)
+ .skip(has_self as usize);
- for &ty in &tps[has_self as usize..tps.len() - num_supplied_defaults] {
+ for ty in tps {
start_or_continue(f, "<", ", ")?;
write!(f, "{}", ty)?;
}
write!(f, "::{}", item_name)?;
}
- print_regions(f, "::<", &substs.regions[num_regions..])?;
+ print_regions(f, "::<", num_regions, usize::MAX)?;
// FIXME: consider being smart with defaults here too
- for ty in &substs.types[num_types..] {
+ for ty in substs.types().skip(num_types) {
start_or_continue(f, "::<", ", ")?;
write!(f, "{}", ty)?;
}
let new_value = tcx.replace_late_bound_regions(&value, |br| {
let _ = start_or_continue(f, "for<", ", ");
- ty::ReLateBound(ty::DebruijnIndex::new(1), match br {
+ let br = match br {
ty::BrNamed(_, name, _) => {
let _ = write!(f, "{}", name);
br
name,
ty::Issue32330::WontChange)
}
- })
+ };
+ tcx.mk_region(ty::ReLateBound(ty::DebruijnIndex::new(1), br))
}).0;
start_or_continue(f, "", "> ")?;
}
}
-impl fmt::Debug for ty::RegionParameterDef {
+impl<'tcx> fmt::Debug for ty::RegionParameterDef<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "RegionParameterDef({}, {:?}, {}, {:?})",
self.name,
}
}
-impl<'tcx> fmt::Debug for Substs<'tcx> {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- write!(f, "Substs[types={:?}, regions={:?}]",
- self.types, self.regions)
- }
-}
-
impl<'tcx> fmt::Debug for ty::ItemSubsts<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "ItemSubsts({:?})", self.substs)
}
}
-impl<'tcx> fmt::Debug for ty::ObjectLifetimeDefault {
+impl<'tcx> fmt::Debug for ty::ObjectLifetimeDefault<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
ty::ObjectLifetimeDefault::Ambiguous => write!(f, "Ambiguous"),
}
}
-impl fmt::Debug for ty::ItemVariances {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- write!(f, "ItemVariances(types={:?}, regions={:?})",
- self.types, self.regions)
- }
-}
-
impl<'tcx> fmt::Debug for ty::GenericPredicates<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "GenericPredicates({:?})", self.predicates)
}
}
-impl<'tcx> fmt::Display for ty::Binder<ty::OutlivesPredicate<Ty<'tcx>, ty::Region>> {
+impl<'tcx> fmt::Display for ty::Binder<ty::OutlivesPredicate<Ty<'tcx>, &'tcx ty::Region>> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
ty::tls::with(|tcx| in_binder(f, tcx, self, tcx.lift(self)))
}
}
-impl fmt::Display for ty::Binder<ty::OutlivesPredicate<ty::Region, ty::Region>> {
+impl<'tcx> fmt::Display for ty::Binder<ty::OutlivesPredicate<&'tcx ty::Region,
+ &'tcx ty::Region>> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
ty::tls::with(|tcx| in_binder(f, tcx, self, tcx.lift(self)))
}
}
}
-impl fmt::Debug for ty::UpvarBorrow {
+impl<'tcx> fmt::Debug for ty::UpvarBorrow<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "UpvarBorrow({:?}, {:?})",
self.kind, self.region)
}
}
-impl fmt::Display for ty::ExplicitSelfCategory {
+impl<'tcx> fmt::Display for ty::ExplicitSelfCategory<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(match *self {
ty::ExplicitSelfCategory::Static => "static",
target_vendor: "unknown".to_string(),
options: TargetOptions {
- features: "+v7,+vfp3,+neon".to_string(),
- cpu: "cortex-a8".to_string(),
+ // Info about features at https://wiki.debian.org/ArmHardFloatPort
+ features: "+v7,+vfp3,+d16,+thumb2".to_string(),
+ cpu: "generic".to_string(),
max_atomic_width: 64,
.. base
}
("powerpc-unknown-linux-gnu", powerpc_unknown_linux_gnu),
("powerpc64-unknown-linux-gnu", powerpc64_unknown_linux_gnu),
("powerpc64le-unknown-linux-gnu", powerpc64le_unknown_linux_gnu),
+ ("s390x-unknown-linux-gnu", s390x_unknown_linux_gnu),
("arm-unknown-linux-gnueabi", arm_unknown_linux_gnueabi),
("arm-unknown-linux-gnueabihf", arm_unknown_linux_gnueabihf),
("arm-unknown-linux-musleabi", arm_unknown_linux_musleabi),
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use target::{Target, TargetResult};
+
+pub fn target() -> TargetResult {
+ let mut base = super::linux_base::opts();
+ // NOTE(zEC12) matches C toolchain
+ base.cpu = "zEC12".to_string();
+ base.max_atomic_width = 64;
+
+ Ok(Target {
+ llvm_target: "s390x-unknown-linux-gnu".to_string(),
+ target_endian: "big".to_string(),
+ target_pointer_width: "64".to_string(),
+ data_layout: "E-m:e-i1:8:16-i8:8:16-i64:64-f128:64-a:8:16-n32:64".to_string(),
+ arch: "s390x".to_string(),
+ target_os: "linux".to_string(),
+ target_env: "gnu".to_string(),
+ target_vendor: "unknown".to_string(),
+ options: base,
+ })
+}
borrow_id: ast::NodeId,
borrow_span: Span,
cmt: mc::cmt<'tcx>,
- loan_region: ty::Region,
+ loan_region: &'tcx ty::Region,
bk: ty::BorrowKind,
loan_cause: euv::LoanCause)
{
struct_span_err!(self.bccx, span, E0503,
"cannot use `{}` because it was mutably borrowed",
&self.bccx.loan_path_to_string(copy_path))
- .span_note(loan_span,
+ .span_label(loan_span,
&format!("borrow of `{}` occurs here",
&self.bccx.loan_path_to_string(&loan_path))
)
+ .span_label(span,
+ &format!("use of borrowed `{}`",
+ &self.bccx.loan_path_to_string(&loan_path)))
.emit();
}
}
span: Span,
cause: euv::LoanCause,
cmt: mc::cmt<'tcx>,
- loan_region: ty::Region,
+ loan_region: &'tcx ty::Region,
_: ty::BorrowKind)
-> Result<(),()> {
//! Reports error if `loan_region` is larger than S
span: Span,
cause: euv::LoanCause,
- loan_region: ty::Region,
+ loan_region: &'tcx ty::Region,
cmt_original: mc::cmt<'tcx>
}
}
}
- fn check_scope(&self, max_scope: ty::Region) -> R {
+ fn check_scope(&self, max_scope: &'tcx ty::Region) -> R {
//! Reports an error if `loan_region` is larger than `max_scope`
if !self.bccx.is_subregion_of(self.loan_region, max_scope) {
}
}
- fn scope(&self, cmt: &mc::cmt) -> ty::Region {
+ fn scope(&self, cmt: &mc::cmt<'tcx>) -> &'tcx ty::Region {
//! Returns the maximal region scope for the which the
//! lvalue `cmt` is guaranteed to be valid without any
//! rooting etc, and presuming `cmt` is not mutated.
temp_scope
}
Categorization::Upvar(..) => {
- ty::ReScope(self.item_scope)
- }
- Categorization::StaticItem => {
- ty::ReStatic
+ self.bccx.tcx.mk_region(ty::ReScope(self.item_scope))
}
Categorization::Local(local_id) => {
- ty::ReScope(self.bccx.tcx.region_maps.var_scope(local_id))
+ self.bccx.tcx.mk_region(ty::ReScope(
+ self.bccx.tcx.region_maps.var_scope(local_id)))
}
+ Categorization::StaticItem |
Categorization::Deref(_, _, mc::UnsafePtr(..)) => {
- ty::ReStatic
+ self.bccx.tcx.mk_region(ty::ReStatic)
}
Categorization::Deref(_, _, mc::BorrowedPtr(_, r)) |
Categorization::Deref(_, _, mc::Implicit(_, r)) => {
}
}
- fn report_error(&self, code: bckerr_code) {
+ fn report_error(&self, code: bckerr_code<'tcx>) {
self.bccx.report(BckError { cmt: self.cmt_original.clone(),
span: self.span,
cause: BorrowViolation(self.cause),
borrow_id: ast::NodeId,
borrow_span: Span,
cmt: mc::cmt<'tcx>,
- loan_region: ty::Region,
+ loan_region: &'tcx ty::Region,
bk: ty::BorrowKind,
loan_cause: euv::LoanCause)
{
borrow_span: Span,
cmt: mc::cmt<'tcx>,
req_kind: ty::BorrowKind,
- loan_region: ty::Region,
+ loan_region: &'tcx ty::Region,
cause: euv::LoanCause) {
debug!("guarantee_valid(borrow_id={}, cmt={:?}, \
req_mutbl={:?}, loan_region={:?})",
// a loan for the empty region can never be dereferenced, so
// it is always safe
- if loan_region == ty::ReEmpty {
+ if *loan_region == ty::ReEmpty {
return;
}
}
RestrictionResult::SafeIf(loan_path, restricted_paths) => {
- let loan_scope = match loan_region {
+ let loan_scope = match *loan_region {
ty::ReScope(scope) => scope,
ty::ReFree(ref fr) => fr.scope,
span: Span,
cause: euv::LoanCause,
cmt: mc::cmt<'tcx>,
- loan_region: ty::Region)
+ loan_region: &'tcx ty::Region)
-> RestrictionResult<'tcx> {
let ctxt = RestrictionsContext {
bccx: bccx,
struct RestrictionsContext<'a, 'tcx: 'a> {
bccx: &'a BorrowckCtxt<'a, 'tcx>,
span: Span,
- loan_region: ty::Region,
+ loan_region: &'tcx ty::Region,
cause: euv::LoanCause,
}
fn extend(&self,
result: RestrictionResult<'tcx>,
cmt: &mc::cmt<'tcx>,
- elem: LoanPathElem) -> RestrictionResult<'tcx> {
+ elem: LoanPathElem<'tcx>) -> RestrictionResult<'tcx> {
match result {
RestrictionResult::Safe => RestrictionResult::Safe,
RestrictionResult::SafeIf(base_lp, mut base_vec) => {
// except according to those terms.
use rustc::ty::TyCtxt;
-use rustc::mir::repr::{self, Mir};
+use rustc::mir::repr::{self, Mir, Location};
use rustc_data_structures::indexed_vec::Idx;
-use super::super::gather_moves::{Location};
use super::super::gather_moves::{MoveOutIndex, MovePathIndex};
use super::super::MoveDataParamEnv;
use super::super::DropFlagState;
{
drop_flag_effects_for_location(
self.tcx, self.mir, ctxt,
- Location { block: bb, index: idx },
+ Location { block: bb, statement_index: idx },
|path, s| Self::update_bits(sets, path, s)
)
}
{
drop_flag_effects_for_location(
self.tcx, self.mir, ctxt,
- Location { block: bb, index: statements_len },
+ Location { block: bb, statement_index: statements_len },
|path, s| Self::update_bits(sets, path, s)
)
}
{
drop_flag_effects_for_location(
self.tcx, self.mir, ctxt,
- Location { block: bb, index: idx },
+ Location { block: bb, statement_index: idx },
|path, s| Self::update_bits(sets, path, s)
)
}
{
drop_flag_effects_for_location(
self.tcx, self.mir, ctxt,
- Location { block: bb, index: statements_len },
+ Location { block: bb, statement_index: statements_len },
|path, s| Self::update_bits(sets, path, s)
)
}
{
drop_flag_effects_for_location(
self.tcx, self.mir, ctxt,
- Location { block: bb, index: idx },
+ Location { block: bb, statement_index: idx },
|path, s| Self::update_bits(sets, path, s)
)
}
{
drop_flag_effects_for_location(
self.tcx, self.mir, ctxt,
- Location { block: bb, index: statements_len },
+ Location { block: bb, statement_index: statements_len },
|path, s| Self::update_bits(sets, path, s)
)
}
let path_map = &move_data.path_map;
let rev_lookup = &move_data.rev_lookup;
- let loc = Location { block: bb, index: idx };
+ let loc = Location { block: bb, statement_index: idx };
debug!("stmt {:?} at loc {:?} moves out of move_indexes {:?}",
stmt, loc, &loc_map[loc]);
for move_index in &loc_map[loc] {
let (mir, move_data) = (self.mir, &ctxt.move_data);
let term = mir[bb].terminator();
let loc_map = &move_data.loc_map;
- let loc = Location { block: bb, index: statements_len };
+ let loc = Location { block: bb, statement_index: statements_len };
debug!("terminator {:?} at loc {:?} moves out of move_indexes {:?}",
term, loc, &loc_map[loc]);
let bits_per_block = self.bits_per_block(ctxt);
// except according to those terms.
use indexed_set::IdxSetBuf;
-use super::gather_moves::{MoveData, MovePathIndex, MovePathContent, Location};
+use super::gather_moves::{MoveData, MovePathIndex, MovePathContent};
use super::dataflow::{MaybeInitializedLvals, MaybeUninitializedLvals};
use super::dataflow::{DataflowResults};
use super::{drop_flag_effects_for_location, on_all_children_bits};
use super::{DropFlagState, MoveDataParamEnv};
use super::patch::MirPatch;
use rustc::ty::{self, Ty, TyCtxt};
-use rustc::ty::subst::{Subst, Substs};
+use rustc::ty::subst::{Kind, Subst, Substs};
use rustc::mir::repr::*;
use rustc::mir::transform::{Pass, MirPass, MirSource};
use rustc::middle::const_val::ConstVal;
use syntax_pos::Span;
use std::fmt;
+use std::iter;
use std::u32;
pub struct ElaborateDrops;
dead: self.flow_uninits.sets().on_entry_set_for(loc.block.index())
.to_owned(),
};
- for stmt in 0..loc.index {
+ for stmt in 0..loc.statement_index {
data.apply_location(self.tcx, self.mir, self.env,
- Location { block: loc.block, index: stmt });
+ Location { block: loc.block, statement_index: stmt });
}
data
}
let init_data = self.initialization_data_at(Location {
block: bb,
- index: data.statements.len()
+ statement_index: data.statements.len()
});
let path = self.move_data().rev_lookup.find(location);
fn elaborate_drops(&mut self)
{
for (bb, data) in self.mir.basic_blocks().iter_enumerated() {
- let loc = Location { block: bb, index: data.statements.len() };
+ let loc = Location { block: bb, statement_index: data.statements.len() };
let terminator = data.terminator();
let resume_block = self.patch.resume_block();
unwind: Some(unwind)
}, bb);
on_all_children_bits(self.tcx, self.mir, self.move_data(), path, |child| {
- self.set_drop_flag(Location { block: target, index: 0 },
+ self.set_drop_flag(Location { block: target, statement_index: 0 },
child, DropFlagState::Present);
- self.set_drop_flag(Location { block: unwind, index: 0 },
+ self.set_drop_flag(Location { block: unwind, statement_index: 0 },
child, DropFlagState::Present);
});
}
let drop_block = self.drop_block(c);
if update_drop_flag {
self.set_drop_flag(
- Location { block: drop_block, index: 0 },
+ Location { block: drop_block, statement_index: 0 },
c.path,
DropFlagState::Absent
);
let unit_temp = Lvalue::Temp(self.patch.new_temp(tcx.mk_nil()));
let free_func = tcx.lang_items.require(lang_items::BoxFreeFnLangItem)
.unwrap_or_else(|e| tcx.sess.fatal(&e));
- let substs = Substs::new(tcx, vec![ty], vec![]);
+ let substs = Substs::new(tcx, iter::once(Kind::from(ty)));
let fty = tcx.lookup_item_type(free_func).ty.subst(tcx, substs);
self.patch.new_block(BasicBlockData {
}
fn drop_flags_on_init(&mut self) {
- let loc = Location { block: START_BLOCK, index: 0 };
+ let loc = Location { block: START_BLOCK, statement_index: 0 };
let span = self.patch.source_info_for_location(self.mir, loc).span;
let false_ = self.constant_bool(span, false);
for flag in self.drop_flags.values() {
} = data.terminator().kind {
assert!(!self.patch.is_patched(bb));
- let loc = Location { block: tgt, index: 0 };
+ let loc = Location { block: tgt, statement_index: 0 };
let path = self.move_data().rev_lookup.find(lv);
on_all_children_bits(
self.tcx, self.mir, self.move_data(), path,
}
fn drop_flags_for_args(&mut self) {
- let loc = Location { block: START_BLOCK, index: 0 };
+ let loc = Location { block: START_BLOCK, statement_index: 0 };
super::drop_flag_effects_for_function_entry(
self.tcx, self.mir, self.env, |path, ds| {
self.set_drop_flag(loc, path, ds);
}
}
}
- let loc = Location { block: bb, index: i };
+ let loc = Location { block: bb, statement_index: i };
super::drop_flag_effects_for_location(
self.tcx, self.mir, self.env, loc, |path, ds| {
if ds == DropFlagState::Absent || allow_initializations {
} = data.terminator().kind {
assert!(!self.patch.is_patched(bb));
- let loc = Location { block: bb, index: data.statements.len() };
+ let loc = Location { block: bb, statement_index: data.statements.len() };
let path = self.move_data().rev_lookup.find(lv);
on_all_children_bits(
self.tcx, self.mir, self.move_data(), path,
type Output = [MoveOutIndex];
fn index(&self, index: Location) -> &Self::Output {
assert!(index.block.index() < self.map.len());
- assert!(index.index < self.map[index.block.index()].len());
- &self.map[index.block.index()][index.index]
+ assert!(index.statement_index < self.map[index.block.index()].len());
+ &self.map[index.block.index()][index.statement_index]
}
}
}
}
-#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
-pub struct Location {
- /// block where action is located
- pub block: BasicBlock,
- /// index within above block; statement when < statments.len) or
- /// the terminator (when = statements.len).
- pub index: usize,
-}
-
-impl fmt::Debug for Location {
- fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
- write!(fmt, "{:?}[{}]", self.block, self.index)
- }
-}
-
#[derive(Debug)]
pub struct MovePathData<'tcx> {
move_paths: Vec<MovePath<'tcx>>,
};
for (i, stmt) in bb_data.statements.iter().enumerate() {
- let source = Location { block: bb, index: i };
+ let source = Location { block: bb, statement_index: i };
match stmt.kind {
StatementKind::Assign(ref lval, ref rval) => {
bb_ctxt.builder.create_move_path(lval);
TerminatorKind::Return => {
let source = Location { block: bb,
- index: bb_data.statements.len() };
+ statement_index: bb_data.statements.len() };
debug!("gather_moves Return on_move_out_lval return {:?}", source);
bb_ctxt.on_move_out_lval(SK::Return, &Lvalue::ReturnPointer, source);
}
TerminatorKind::If { ref cond, targets: _ } => {
let source = Location { block: bb,
- index: bb_data.statements.len() };
+ statement_index: bb_data.statements.len() };
bb_ctxt.on_operand(SK::If, cond, source);
}
TerminatorKind::Drop { ref location, target: _, unwind: _ } => {
let source = Location { block: bb,
- index: bb_data.statements.len() };
+ statement_index: bb_data.statements.len() };
bb_ctxt.on_move_out_lval(SK::Drop, location, source);
}
TerminatorKind::DropAndReplace { ref location, ref value, .. } => {
bb_ctxt.path_map.fill_to(assigned_path.index());
let source = Location { block: bb,
- index: bb_data.statements.len() };
+ statement_index: bb_data.statements.len() };
bb_ctxt.on_operand(SK::Use, value, source);
}
TerminatorKind::Call { ref func, ref args, ref destination, cleanup: _ } => {
let source = Location { block: bb,
- index: bb_data.statements.len() };
+ statement_index: bb_data.statements.len() };
bb_ctxt.on_operand(SK::CallFn, func, source);
for arg in args {
debug!("gather_moves Call on_operand {:?} {:?}", arg, source);
stmt_kind: StmtKind,
lval: &Lvalue<'tcx>,
source: Location) {
- let i = source.index;
+ let i = source.statement_index;
let index = MoveOutIndex::new(self.moves.len());
let path = self.builder.move_path_for(lval);
use rustc::hir::intravisit::{FnKind};
use rustc::mir::repr;
-use rustc::mir::repr::{BasicBlock, BasicBlockData, Mir, Statement, Terminator};
+use rustc::mir::repr::{BasicBlock, BasicBlockData, Mir, Statement, Terminator, Location};
use rustc::session::Session;
use rustc::ty::{self, TyCtxt};
use self::dataflow::{Dataflow, DataflowAnalysis, DataflowResults};
use self::dataflow::{MaybeInitializedLvals, MaybeUninitializedLvals};
use self::dataflow::{DefinitelyInitializedLvals};
-use self::gather_moves::{MoveData, MovePathIndex, Location};
+use self::gather_moves::{MoveData, MovePathIndex};
use self::gather_moves::{MovePathContent, MovePathData};
fn has_rustc_mir_with(attrs: &[ast::Attribute], name: &str) -> Option<P<MetaItem>> {
}
let block = &mir[loc.block];
- match block.statements.get(loc.index) {
+ match block.statements.get(loc.statement_index) {
Some(stmt) => match stmt.kind {
repr::StatementKind::SetDiscriminant{ .. } => {
span_bug!(stmt.source_info.span, "SetDiscrimant should not exist during borrowck");
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use super::gather_moves::Location;
use rustc::ty::Ty;
use rustc::mir::repr::*;
use rustc_data_structures::indexed_vec::{IndexVec, Idx};
};
Location {
block: bb,
- index: offset
+ statement_index: offset
}
}
}
debug!("MirPatch: adding statement {:?} at loc {:?}+{}",
stmt, loc, delta);
- loc.index += delta;
+ loc.statement_index += delta;
let source_info = Self::source_info_for_index(
&mir[loc.block], loc
);
mir[loc.block].statements.insert(
- loc.index, Statement {
+ loc.statement_index, Statement {
source_info: source_info,
kind: stmt
});
}
pub fn source_info_for_index(data: &BasicBlockData, loc: Location) -> SourceInfo {
- match data.statements.get(loc.index) {
+ match data.statements.get(loc.statement_index) {
Some(stmt) => stmt.source_info,
None => data.terminator().source_info
}
use std::fmt;
use std::mem;
use std::rc::Rc;
+use std::hash::{Hash, Hasher};
use syntax::ast;
use syntax::attr::AttrMetaMethods;
use syntax_pos::{MultiSpan, Span};
}
}
-#[derive(Eq, Hash)]
+#[derive(Eq)]
pub struct LoanPath<'tcx> {
kind: LoanPathKind<'tcx>,
ty: ty::Ty<'tcx>,
impl<'tcx> PartialEq for LoanPath<'tcx> {
fn eq(&self, that: &LoanPath<'tcx>) -> bool {
- let r = self.kind == that.kind;
- debug_assert!(self.ty == that.ty || !r,
- "Somehow loan paths are equal though their tys are not.");
- r
+ self.kind == that.kind
+ }
+}
+
+impl<'tcx> Hash for LoanPath<'tcx> {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ self.kind.hash(state);
}
}
LpVar(ast::NodeId), // `x` in README.md
LpUpvar(ty::UpvarId), // `x` captured by-value into closure
LpDowncast(Rc<LoanPath<'tcx>>, DefId), // `x` downcast to particular enum variant
- LpExtend(Rc<LoanPath<'tcx>>, mc::MutabilityCategory, LoanPathElem)
+ LpExtend(Rc<LoanPath<'tcx>>, mc::MutabilityCategory, LoanPathElem<'tcx>)
}
impl<'tcx> LoanPath<'tcx> {
// `enum E { X { foo: u32 }, Y { foo: u32 }}`
// each `foo` is qualified by the definitition id of the variant (`X` or `Y`).
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
-pub enum LoanPathElem {
- LpDeref(mc::PointerKind),
+pub enum LoanPathElem<'tcx> {
+ LpDeref(mc::PointerKind<'tcx>),
LpInterior(Option<DefId>, InteriorKind),
}
// Errors that can occur
#[derive(PartialEq)]
-pub enum bckerr_code {
+pub enum bckerr_code<'tcx> {
err_mutbl,
- err_out_of_scope(ty::Region, ty::Region, euv::LoanCause), // superscope, subscope, loan cause
- err_borrowed_pointer_too_short(ty::Region, ty::Region), // loan, ptr
+ /// superscope, subscope, loan cause
+ err_out_of_scope(&'tcx ty::Region, &'tcx ty::Region, euv::LoanCause),
+ err_borrowed_pointer_too_short(&'tcx ty::Region, &'tcx ty::Region), // loan, ptr
}
// Combination of an error code and the categorization of the expression
span: Span,
cause: AliasableViolationKind,
cmt: mc::cmt<'tcx>,
- code: bckerr_code
+ code: bckerr_code<'tcx>
}
#[derive(Copy, Clone, Debug, PartialEq)]
self.free_region_map = old_free_region_map;
}
- pub fn is_subregion_of(&self, r_sub: ty::Region, r_sup: ty::Region)
+ pub fn is_subregion_of(&self, r_sub: &'tcx ty::Region, r_sup: &'tcx ty::Region)
-> bool
{
self.free_region_map.is_subregion_of(self.tcx, r_sub, r_sup)
pub fn report(&self, err: BckError<'tcx>) {
// Catch and handle some particular cases.
match (&err.code, &err.cause) {
- (&err_out_of_scope(ty::ReScope(_), ty::ReStatic, _),
+ (&err_out_of_scope(&ty::ReScope(_), &ty::ReStatic, _),
&BorrowViolation(euv::ClosureCapture(span))) |
- (&err_out_of_scope(ty::ReScope(_), ty::ReFree(..), _),
+ (&err_out_of_scope(&ty::ReScope(_), &ty::ReFree(..), _),
&BorrowViolation(euv::ClosureCapture(span))) => {
return self.report_out_of_scope_escaping_closure_capture(&err, span);
}
}
mc::AliasableStatic |
mc::AliasableStaticMut => {
- struct_span_err!(
+ let mut err = struct_span_err!(
self.tcx.sess, span, E0388,
- "{} in a static location", prefix)
+ "{} in a static location", prefix);
+ err.span_label(span, &format!("cannot write data in a static definition"));
+ err
}
mc::AliasableBorrowed => {
- struct_span_err!(
+ let mut e = struct_span_err!(
self.tcx.sess, span, E0389,
- "{} in a `&` reference", prefix)
+ "{} in a `&` reference", prefix);
+ e.span_label(span, &"assignment into an immutable reference");
+ e
}
};
.emit();
}
- fn region_end_span(&self, region: ty::Region) -> Option<Span> {
- match region {
+ fn region_end_span(&self, region: &'tcx ty::Region) -> Option<Span> {
+ match *region {
ty::ReScope(scope) => {
match scope.span(&self.tcx.region_maps, &self.tcx.map) {
Some(s) => {
}
}
-fn statement_scope_span(tcx: TyCtxt, region: ty::Region) -> Option<Span> {
- match region {
+fn statement_scope_span(tcx: TyCtxt, region: &ty::Region) -> Option<Span> {
+ match *region {
ty::ReScope(scope) => {
match tcx.map.find(scope.node_id(&tcx.region_maps)) {
Some(hir_map::NodeStmt(stmt)) => Some(stmt.span),
_: NodeId,
span: Span,
_: cmt,
- _: Region,
+ _: &'tcx Region,
kind: BorrowKind,
_: LoanCause) {
match kind {
use rustc::util::nodemap::NodeSet;
use rustc_back::sha2::{Sha256, Digest};
use rustc_borrowck as borrowck;
-use rustc_incremental;
+use rustc_incremental::{self, IncrementalHashesMap};
use rustc_resolve::{MakeGlobMap, Resolver};
use rustc_metadata::macro_import;
use rustc_metadata::creader::read_local_crates;
resolutions,
&arenas,
&crate_name,
- |tcx, mir_map, analysis, result| {
+ |tcx, mir_map, analysis, incremental_hashes_map, result| {
{
// Eventually, we will want to track plugins.
let _ignore = tcx.dep_graph.in_ignore();
}
let trans = phase_4_translate_to_llvm(tcx,
mir_map.unwrap(),
- analysis);
+ analysis,
+ &incremental_hashes_map);
if log_enabled!(::log::INFO) {
println!("Post-trans");
where F: for<'a> FnOnce(TyCtxt<'a, 'tcx, 'tcx>,
Option<MirMap<'tcx>>,
ty::CrateAnalysis,
+ IncrementalHashesMap,
CompileResult) -> R
{
macro_rules! try_with_f {
- ($e: expr, ($t: expr, $m: expr, $a: expr)) => {
+ ($e: expr, ($t: expr, $m: expr, $a: expr, $h: expr)) => {
match $e {
Ok(x) => x,
Err(x) => {
- f($t, $m, $a, Err(x));
+ f($t, $m, $a, $h, Err(x));
return Err(x);
}
}
index,
name,
|tcx| {
+ let incremental_hashes_map =
+ time(time_passes,
+ "compute_incremental_hashes_map",
+ || rustc_incremental::compute_incremental_hashes_map(tcx));
time(time_passes,
"load_dep_graph",
- || rustc_incremental::load_dep_graph(tcx));
+ || rustc_incremental::load_dep_graph(tcx, &incremental_hashes_map));
// passes are timed inside typeck
- try_with_f!(typeck::check_crate(tcx), (tcx, None, analysis));
+ try_with_f!(typeck::check_crate(tcx), (tcx, None, analysis, incremental_hashes_map));
time(time_passes,
"const checking",
// lint warnings and so on -- kindck used to do this abort, but
// kindck is gone now). -nmatsakis
if sess.err_count() > 0 {
- return Ok(f(tcx, Some(mir_map), analysis, Err(sess.err_count())));
+ return Ok(f(tcx,
+ Some(mir_map),
+ analysis,
+ incremental_hashes_map,
+ Err(sess.err_count())));
}
analysis.reachable =
// The above three passes generate errors w/o aborting
if sess.err_count() > 0 {
- return Ok(f(tcx, Some(mir_map), analysis, Err(sess.err_count())));
+ return Ok(f(tcx,
+ Some(mir_map),
+ analysis,
+ incremental_hashes_map,
+ Err(sess.err_count())));
}
- Ok(f(tcx, Some(mir_map), analysis, Ok(())))
+ Ok(f(tcx, Some(mir_map), analysis, incremental_hashes_map, Ok(())))
})
}
/// Run the translation phase to LLVM, after which the AST and analysis can
pub fn phase_4_translate_to_llvm<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
mut mir_map: MirMap<'tcx>,
- analysis: ty::CrateAnalysis)
+ analysis: ty::CrateAnalysis,
+ incremental_hashes_map: &IncrementalHashesMap)
-> trans::CrateTranslation {
let time_passes = tcx.sess.time_passes();
let translation =
time(time_passes,
"translation",
- move || trans::trans_crate(tcx, &mir_map, analysis));
+ move || trans::trans_crate(tcx, &mir_map, analysis, &incremental_hashes_map));
time(time_passes,
"assert dep graph",
time(time_passes,
"serialize dep graph",
- move || rustc_incremental::save_dep_graph(tcx));
+ move || rustc_incremental::save_dep_graph(tcx, &incremental_hashes_map));
translation
}
for (name, to) in lints {
let name = name.to_lowercase().replace("_", "-");
let desc = to.into_iter()
- .map(|x| x.as_str().replace("_", "-"))
+ .map(|x| x.to_string().replace("_", "-"))
.collect::<Vec<String>>()
.join(", ");
println!(" {} {}", padded(&name[..]), desc);
resolutions.clone(),
arenas,
id,
- |tcx, _, _, _| {
+ |tcx, _, _, _, _| {
let annotation = TypedAnnotation {
tcx: tcx,
};
resolutions.clone(),
arenas,
crate_name,
- |tcx, mir_map, _, _| {
+ |tcx, mir_map, _, _, _| {
match ppm {
PpmMir | PpmMirCFG => {
if let Some(mir_map) = mir_map {
use rustc::middle::region::CodeExtentData;
use rustc::middle::resolve_lifetime;
use rustc::middle::stability;
-use rustc::ty::subst::{Subst, Substs};
+use rustc::ty::subst::{Kind, Subst, Substs};
use rustc::traits::Reveal;
use rustc::ty::{self, Ty, TyCtxt, TypeFoldable};
use rustc::infer::{self, InferOk, InferResult, TypeOrigin};
use rustc_metadata::cstore::CStore;
use rustc::hir::map as hir_map;
use rustc::session::{self, config};
+use std::iter;
use std::rc::Rc;
use syntax::ast;
use syntax::abi::Abi;
pub fn re_early_bound(&self,
index: u32,
name: &'static str)
- -> ty::Region {
+ -> &'tcx ty::Region {
let name = token::intern(name);
- ty::ReEarlyBound(ty::EarlyBoundRegion {
+ self.infcx.tcx.mk_region(ty::ReEarlyBound(ty::EarlyBoundRegion {
index: index,
name: name,
- })
+ }))
}
- pub fn re_late_bound_with_debruijn(&self, id: u32, debruijn: ty::DebruijnIndex) -> ty::Region {
- ty::ReLateBound(debruijn, ty::BrAnon(id))
+ pub fn re_late_bound_with_debruijn(&self, id: u32, debruijn: ty::DebruijnIndex)
+ -> &'tcx ty::Region {
+ self.infcx.tcx.mk_region(ty::ReLateBound(debruijn, ty::BrAnon(id)))
}
- pub fn t_rptr(&self, r: ty::Region) -> Ty<'tcx> {
- self.infcx.tcx.mk_imm_ref(self.infcx.tcx.mk_region(r), self.tcx().types.isize)
+ pub fn t_rptr(&self, r: &'tcx ty::Region) -> Ty<'tcx> {
+ self.infcx.tcx.mk_imm_ref(r, self.tcx().types.isize)
}
pub fn t_rptr_late_bound(&self, id: u32) -> Ty<'tcx> {
let r = self.re_late_bound_with_debruijn(id, ty::DebruijnIndex::new(1));
- self.infcx.tcx.mk_imm_ref(self.infcx.tcx.mk_region(r), self.tcx().types.isize)
+ self.infcx.tcx.mk_imm_ref(r, self.tcx().types.isize)
}
pub fn t_rptr_late_bound_with_debruijn(&self,
debruijn: ty::DebruijnIndex)
-> Ty<'tcx> {
let r = self.re_late_bound_with_debruijn(id, debruijn);
- self.infcx.tcx.mk_imm_ref(self.infcx.tcx.mk_region(r), self.tcx().types.isize)
+ self.infcx.tcx.mk_imm_ref(r, self.tcx().types.isize)
}
pub fn t_rptr_scope(&self, id: ast::NodeId) -> Ty<'tcx> {
self.infcx.tcx.mk_imm_ref(self.infcx.tcx.mk_region(r), self.tcx().types.isize)
}
- pub fn re_free(&self, nid: ast::NodeId, id: u32) -> ty::Region {
- ty::ReFree(ty::FreeRegion {
+ pub fn re_free(&self, nid: ast::NodeId, id: u32) -> &'tcx ty::Region {
+ self.infcx.tcx.mk_region(ty::ReFree(ty::FreeRegion {
scope: self.tcx().region_maps.item_extent(nid),
bound_region: ty::BrAnon(id),
- })
+ }))
}
pub fn t_rptr_free(&self, nid: ast::NodeId, id: u32) -> Ty<'tcx> {
let r = self.re_free(nid, id);
- self.infcx.tcx.mk_imm_ref(self.infcx.tcx.mk_region(r), self.tcx().types.isize)
+ self.infcx.tcx.mk_imm_ref(r, self.tcx().types.isize)
}
pub fn t_rptr_static(&self) -> Ty<'tcx> {
env.t_fn(&[t_param], env.t_nil())
};
- let substs = Substs::new(env.infcx.tcx, vec![t_rptr_bound1], vec![]);
+ let substs = Substs::new(env.infcx.tcx, iter::once(Kind::from(t_rptr_bound1)));
let t_substituted = t_source.subst(env.infcx.tcx, substs);
// t_expected = fn(&'a isize)
env.t_pair(t_param, env.t_fn(&[t_param], env.t_nil()))
};
- let substs = Substs::new(env.infcx.tcx, vec![t_rptr_bound1], vec![]);
+ let substs = Substs::new(env.infcx.tcx, iter::once(Kind::from(t_rptr_bound1)));
let t_substituted = t_source.subst(env.infcx.tcx, substs);
// t_expected = (&'a isize, fn(&'a isize))
env.t_fn(&[env.t_rptr(re_early)], env.t_nil())
};
- let substs = Substs::new(env.infcx.tcx, vec![], vec![re_bound1]);
+ let substs = Substs::new(env.infcx.tcx, iter::once(Kind::from(re_bound1)));
let t_substituted = t_source.subst(env.infcx.tcx, substs);
// t_expected = fn(&'a isize)
}
match write!(&mut self.dst, "\n") {
Err(e) => panic!("failed to emit error: {}", e),
- _ => ()
+ _ => match self.dst.flush() {
+ Err(e) => panic!("failed to emit error: {}", e),
+ _ => ()
+ }
}
}
}
fn emit_to_destination(rendered_buffer: &Vec<Vec<StyledString>>,
lvl: &Level,
dst: &mut Destination) -> io::Result<()> {
+ use lock;
+
+ // In order to prevent error message interleaving, where multiple error lines get intermixed
+ // when multiple compiler processes error simultaneously, we emit errors with additional
+ // steps.
+ //
+ // On Unix systems, we write into a buffered terminal rather than directly to a terminal. When
+ // the .flush() is called we take the buffer created from the buffered writes and write it at
+ // one shot. Because the Unix systems use ANSI for the colors, which is a text-based styling
+ // scheme, this buffered approach works and maintains the styling.
+ //
+ // On Windows, styling happens through calls to a terminal API. This prevents us from using the
+ // same buffering approach. Instead, we use a global Windows mutex, which we acquire long
+ // enough to output the full error message, then we release.
+ let _buffer_lock = lock::acquire_global_lock("rustc_errors");
for line in rendered_buffer {
for part in line {
dst.apply_style(lvl.clone(), part.style)?;
}
write!(dst, "\n")?;
}
+ dst.flush()?;
Ok(())
}
}
}
+pub type BufferedStderr = term::Terminal<Output = BufferedWriter> + Send;
+
pub enum Destination {
Terminal(Box<term::StderrTerminal>),
+ BufferedTerminal(Box<BufferedStderr>),
Raw(Box<Write + Send>),
}
+/// Buffered writer gives us a way on Unix to buffer up an entire error message before we output
+/// it. This helps to prevent interleaving of multiple error messages when multiple compiler
+/// processes error simultaneously
+pub struct BufferedWriter {
+ buffer: Vec<u8>,
+}
+
+impl BufferedWriter {
+ // note: we use _new because the conditional compilation at its use site may make this
+ // this function unused on some platforms
+ fn _new() -> BufferedWriter {
+ BufferedWriter {
+ buffer: vec![]
+ }
+ }
+}
+
+impl Write for BufferedWriter {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ for b in buf {
+ self.buffer.push(*b);
+ }
+ Ok(buf.len())
+ }
+ fn flush(&mut self) -> io::Result<()> {
+ let mut stderr = io::stderr();
+ let result = (|| {
+ stderr.write_all(&self.buffer)?;
+ stderr.flush()
+ })();
+ self.buffer.clear();
+ result
+ }
+}
+
impl Destination {
+ #[cfg(not(windows))]
+ /// When not on Windows, prefer the buffered terminal so that we can buffer an entire error
+ /// to be emitted at one time.
+ fn from_stderr() -> Destination {
+ let stderr: Option<Box<BufferedStderr>> =
+ term::TerminfoTerminal::new(BufferedWriter::_new())
+ .map(|t| Box::new(t) as Box<BufferedStderr>);
+
+ match stderr {
+ Some(t) => BufferedTerminal(t),
+ None => Raw(Box::new(io::stderr())),
+ }
+ }
+
+ #[cfg(windows)]
+ /// Return a normal, unbuffered terminal when on Windows.
fn from_stderr() -> Destination {
- match term::stderr() {
+ let stderr: Option<Box<term::StderrTerminal>> =
+ term::TerminfoTerminal::new(io::stderr())
+ .map(|t| Box::new(t) as Box<term::StderrTerminal>)
+ .or_else(|| term::WinConsole::new(io::stderr()).ok()
+ .map(|t| Box::new(t) as Box<term::StderrTerminal>));
+
+ match stderr {
Some(t) => Terminal(t),
None => Raw(Box::new(io::stderr())),
}
fn start_attr(&mut self, attr: term::Attr) -> io::Result<()> {
match *self {
Terminal(ref mut t) => { t.attr(attr)?; }
+ BufferedTerminal(ref mut t) => { t.attr(attr)?; }
Raw(_) => { }
}
Ok(())
fn reset_attrs(&mut self) -> io::Result<()> {
match *self {
Terminal(ref mut t) => { t.reset()?; }
+ BufferedTerminal(ref mut t) => { t.reset()?; }
Raw(_) => { }
}
Ok(())
fn write(&mut self, bytes: &[u8]) -> io::Result<usize> {
match *self {
Terminal(ref mut t) => t.write(bytes),
+ BufferedTerminal(ref mut t) => t.write(bytes),
Raw(ref mut w) => w.write(bytes),
}
}
fn flush(&mut self) -> io::Result<()> {
match *self {
Terminal(ref mut t) => t.flush(),
+ BufferedTerminal(ref mut t) => t.flush(),
Raw(ref mut w) => w.flush(),
}
}
pub mod snippet;
pub mod registry;
pub mod styled_buffer;
+mod lock;
use syntax_pos::{BytePos, Loc, FileLinesResult, FileName, MultiSpan, Span, NO_EXPANSION };
use syntax_pos::{MacroBacktrace};
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Bindings to acquire a global named lock.
+//!
+//! This is intended to be used to synchronize multiple compiler processes to
+//! ensure that we can output complete errors without interleaving on Windows.
+//! Note that this is currently only needed for allowing only one 32-bit MSVC
+//! linker to execute at once on MSVC hosts, so this is only implemented for
+//! `cfg(windows)`. Also note that this may not always be used on Windows,
+//! only when targeting 32-bit MSVC.
+//!
+//! For more information about why this is necessary, see where this is called.
+
+use std::any::Any;
+
+#[cfg(windows)]
+#[allow(bad_style)]
+pub fn acquire_global_lock(name: &str) -> Box<Any> {
+ use std::ffi::CString;
+ use std::io;
+
+ type LPSECURITY_ATTRIBUTES = *mut u8;
+ type BOOL = i32;
+ type LPCSTR = *const u8;
+ type HANDLE = *mut u8;
+ type DWORD = u32;
+
+ const INFINITE: DWORD = !0;
+ const WAIT_OBJECT_0: DWORD = 0;
+ const WAIT_ABANDONED: DWORD = 0x00000080;
+
+ extern "system" {
+ fn CreateMutexA(lpMutexAttributes: LPSECURITY_ATTRIBUTES,
+ bInitialOwner: BOOL,
+ lpName: LPCSTR) -> HANDLE;
+ fn WaitForSingleObject(hHandle: HANDLE,
+ dwMilliseconds: DWORD) -> DWORD;
+ fn ReleaseMutex(hMutex: HANDLE) -> BOOL;
+ fn CloseHandle(hObject: HANDLE) -> BOOL;
+ }
+
+ struct Handle(HANDLE);
+
+ impl Drop for Handle {
+ fn drop(&mut self) {
+ unsafe {
+ CloseHandle(self.0);
+ }
+ }
+ }
+
+ struct Guard(Handle);
+
+ impl Drop for Guard {
+ fn drop(&mut self) {
+ unsafe {
+ ReleaseMutex((self.0).0);
+ }
+ }
+ }
+
+ let cname = CString::new(name).unwrap();
+ unsafe {
+ // Create a named mutex, with no security attributes and also not
+ // acquired when we create it.
+ //
+ // This will silently create one if it doesn't already exist, or it'll
+ // open up a handle to one if it already exists.
+ let mutex = CreateMutexA(0 as *mut _, 0, cname.as_ptr() as *const u8);
+ if mutex.is_null() {
+ panic!("failed to create global mutex named `{}`: {}", name,
+ io::Error::last_os_error());
+ }
+ let mutex = Handle(mutex);
+
+ // Acquire the lock through `WaitForSingleObject`.
+ //
+ // A return value of `WAIT_OBJECT_0` means we successfully acquired it.
+ //
+ // A return value of `WAIT_ABANDONED` means that the previous holder of
+ // the thread exited without calling `ReleaseMutex`. This can happen,
+ // for example, when the compiler crashes or is interrupted via ctrl-c
+ // or the like. In this case, however, we are still transferred
+ // ownership of the lock so we continue.
+ //
+ // If an error happens.. well... that's surprising!
+ match WaitForSingleObject(mutex.0, INFINITE) {
+ WAIT_OBJECT_0 | WAIT_ABANDONED => {}
+ code => {
+ panic!("WaitForSingleObject failed on global mutex named \
+ `{}`: {} (ret={:x})", name,
+ io::Error::last_os_error(), code);
+ }
+ }
+
+ // Return a guard which will call `ReleaseMutex` when dropped.
+ Box::new(Guard(mutex))
+ }
+}
+
+#[cfg(unix)]
+pub fn acquire_global_lock(_name: &str) -> Box<Any> {
+ Box::new(())
+}
--- /dev/null
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use rustc::hir::def_id::DefId;
+use rustc::ty::TyCtxt;
+use rustc::util::nodemap::DefIdMap;
+
+pub struct DefPathHashes<'a, 'tcx: 'a> {
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ data: DefIdMap<u64>,
+}
+
+impl<'a, 'tcx> DefPathHashes<'a, 'tcx> {
+ pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Self {
+ DefPathHashes {
+ tcx: tcx,
+ data: DefIdMap()
+ }
+ }
+
+ pub fn hash(&mut self, def_id: DefId) -> u64 {
+ let tcx = self.tcx;
+ *self.data.entry(def_id)
+ .or_insert_with(|| {
+ let def_path = tcx.def_path(def_id);
+ def_path.deterministic_hash(tcx)
+ })
+ }
+}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! Calculation of a Strict Version Hash for crates. For a length
-//! comment explaining the general idea, see `librustc/middle/svh.rs`.
-
+//! Calculation of the (misnamed) "strict version hash" for crates and
+//! items. This hash is used to tell when the HIR changed in such a
+//! way that results from previous compilations may no longer be
+//! applicable and hence must be recomputed. It should probably be
+//! renamed to the ICH (incremental compilation hash).
+//!
+//! The hashes for all items are computed once at the beginning of
+//! compilation and stored into a map. In addition, a hash is computed
+//! of the **entire crate**.
+//!
+//! Storing the hashes in a map avoids the need to compute them twice
+//! (once when loading prior incremental results and once when
+//! saving), but it is also important for correctness: at least as of
+//! the time of this writing, the typeck passes rewrites entries in
+//! the dep-map in-place to accommodate UFCS resolutions. Since name
+//! resolution is part of the hash, the result is that hashes computed
+//! at the end of compilation would be different from those computed
+//! at the beginning.
+
+use syntax::ast;
use syntax::attr::AttributeMethods;
use std::hash::{Hash, SipHasher, Hasher};
+use rustc::dep_graph::DepNode;
+use rustc::hir;
use rustc::hir::def_id::{CRATE_DEF_INDEX, DefId};
-use rustc::hir::map::{NodeItem, NodeForeignItem};
-use rustc::hir::svh::Svh;
+use rustc::hir::intravisit as visit;
use rustc::ty::TyCtxt;
-use rustc::hir::intravisit::{self, Visitor};
+use rustc_data_structures::fnv::FnvHashMap;
+use self::def_path_hash::DefPathHashes;
use self::svh_visitor::StrictVersionHashVisitor;
+mod def_path_hash;
mod svh_visitor;
-pub trait SvhCalculate {
- /// Calculate the SVH for an entire krate.
- fn calculate_krate_hash(self) -> Svh;
-
- /// Calculate the SVH for a particular item.
- fn calculate_item_hash(self, def_id: DefId) -> u64;
+pub type IncrementalHashesMap = FnvHashMap<DepNode<DefId>, u64>;
+
+pub fn compute_incremental_hashes_map<'a, 'tcx: 'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>)
+ -> IncrementalHashesMap {
+ let _ignore = tcx.dep_graph.in_ignore();
+ let krate = tcx.map.krate();
+ let mut visitor = HashItemsVisitor { tcx: tcx,
+ hashes: FnvHashMap(),
+ def_path_hashes: DefPathHashes::new(tcx) };
+ visitor.calculate_def_id(DefId::local(CRATE_DEF_INDEX), |v| visit::walk_crate(v, krate));
+ krate.visit_all_items(&mut visitor);
+ visitor.compute_crate_hash();
+ visitor.hashes
}
-impl<'a, 'tcx> SvhCalculate for TyCtxt<'a, 'tcx, 'tcx> {
- fn calculate_krate_hash(self) -> Svh {
- // FIXME (#14132): This is better than it used to be, but it still not
- // ideal. We now attempt to hash only the relevant portions of the
- // Crate AST as well as the top-level crate attributes. (However,
- // the hashing of the crate attributes should be double-checked
- // to ensure it is not incorporating implementation artifacts into
- // the hash that are not otherwise visible.)
+struct HashItemsVisitor<'a, 'tcx: 'a> {
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ def_path_hashes: DefPathHashes<'a, 'tcx>,
+ hashes: IncrementalHashesMap,
+}
- let crate_disambiguator = self.sess.local_crate_disambiguator();
- let krate = self.map.krate();
+impl<'a, 'tcx> HashItemsVisitor<'a, 'tcx> {
+ fn calculate_node_id<W>(&mut self, id: ast::NodeId, walk_op: W)
+ where W: for<'v> FnMut(&mut StrictVersionHashVisitor<'v, 'a, 'tcx>)
+ {
+ let def_id = self.tcx.map.local_def_id(id);
+ self.calculate_def_id(def_id, walk_op)
+ }
- // FIXME: this should use SHA1, not SipHash. SipHash is not built to
- // avoid collisions.
+ fn calculate_def_id<W>(&mut self, def_id: DefId, mut walk_op: W)
+ where W: for<'v> FnMut(&mut StrictVersionHashVisitor<'v, 'a, 'tcx>)
+ {
+ assert!(def_id.is_local());
+ debug!("HashItemsVisitor::calculate(def_id={:?})", def_id);
+ // FIXME: this should use SHA1, not SipHash. SipHash is not
+ // built to avoid collisions.
let mut state = SipHasher::new();
- debug!("state: {:?}", state);
+ walk_op(&mut StrictVersionHashVisitor::new(&mut state,
+ self.tcx,
+ &mut self.def_path_hashes));
+ let item_hash = state.finish();
+ self.hashes.insert(DepNode::Hir(def_id), item_hash);
+ debug!("calculate_item_hash: def_id={:?} hash={:?}", def_id, item_hash);
+ }
+
+ fn compute_crate_hash(&mut self) {
+ let krate = self.tcx.map.krate();
- // FIXME(#32753) -- at (*) we `to_le` for endianness, but is
- // this enough, and does it matter anyway?
- "crate_disambiguator".hash(&mut state);
- crate_disambiguator.len().to_le().hash(&mut state); // (*)
- crate_disambiguator.hash(&mut state);
+ let mut crate_state = SipHasher::new();
- debug!("crate_disambiguator: {:?}", crate_disambiguator);
- debug!("state: {:?}", state);
+ let crate_disambiguator = self.tcx.sess.local_crate_disambiguator();
+ "crate_disambiguator".hash(&mut crate_state);
+ crate_disambiguator.len().hash(&mut crate_state);
+ crate_disambiguator.hash(&mut crate_state);
+ // add each item (in some deterministic order) to the overall
+ // crate hash.
{
- let mut visit = StrictVersionHashVisitor::new(&mut state, self);
- krate.visit_all_items(&mut visit);
+ let def_path_hashes = &mut self.def_path_hashes;
+ let mut item_hashes: Vec<_> =
+ self.hashes.iter()
+ .map(|(item_dep_node, &item_hash)| {
+ // convert from a DepNode<DefId> tp a
+ // DepNode<u64> where the u64 is the
+ // hash of the def-id's def-path:
+ let item_dep_node =
+ item_dep_node.map_def(|&did| Some(def_path_hashes.hash(did)))
+ .unwrap();
+ (item_dep_node, item_hash)
+ })
+ .collect();
+ item_hashes.sort(); // avoid artificial dependencies on item ordering
+ item_hashes.hash(&mut crate_state);
}
- // FIXME (#14132): This hash is still sensitive to e.g. the
- // spans of the crate Attributes and their underlying
- // MetaItems; we should make ContentHashable impl for those
- // types and then use hash_content. But, since all crate
- // attributes should appear near beginning of the file, it is
- // not such a big deal to be sensitive to their spans for now.
- //
- // We hash only the MetaItems instead of the entire Attribute
- // to avoid hashing the AttrId
for attr in &krate.attrs {
debug!("krate attr {:?}", attr);
- attr.meta().hash(&mut state);
+ attr.meta().hash(&mut crate_state);
}
- Svh::new(state.finish())
+ let crate_hash = crate_state.finish();
+ self.hashes.insert(DepNode::Krate, crate_hash);
+ debug!("calculate_crate_hash: crate_hash={:?}", crate_hash);
}
+}
- fn calculate_item_hash(self, def_id: DefId) -> u64 {
- assert!(def_id.is_local());
-
- debug!("calculate_item_hash(def_id={:?})", def_id);
-
- let mut state = SipHasher::new();
-
- {
- let mut visit = StrictVersionHashVisitor::new(&mut state, self);
- if def_id.index == CRATE_DEF_INDEX {
- // the crate root itself is not registered in the map
- // as an item, so we have to fetch it this way
- let krate = self.map.krate();
- intravisit::walk_crate(&mut visit, krate);
- } else {
- let node_id = self.map.as_local_node_id(def_id).unwrap();
- match self.map.find(node_id) {
- Some(NodeItem(item)) => visit.visit_item(item),
- Some(NodeForeignItem(item)) => visit.visit_foreign_item(item),
- r => bug!("calculate_item_hash: expected an item for node {} not {:?}",
- node_id, r),
- }
- }
- }
-
- let hash = state.finish();
- debug!("calculate_item_hash: def_id={:?} hash={:?}", def_id, hash);
+impl<'a, 'tcx> visit::Visitor<'tcx> for HashItemsVisitor<'a, 'tcx> {
+ fn visit_item(&mut self, item: &'tcx hir::Item) {
+ self.calculate_node_id(item.id, |v| v.visit_item(item));
+ visit::walk_item(self, item);
+ }
- hash
+ fn visit_foreign_item(&mut self, item: &'tcx hir::ForeignItem) {
+ self.calculate_node_id(item.id, |v| v.visit_foreign_item(item));
+ visit::walk_foreign_item(self, item);
}
}
+
use rustc::hir::def_id::DefId;
use rustc::hir::intravisit as visit;
use rustc::hir::intravisit::{Visitor, FnKind};
-use rustc::hir::map::DefPath;
use rustc::ty::TyCtxt;
use std::hash::{Hash, SipHasher};
-pub struct StrictVersionHashVisitor<'a, 'tcx: 'a> {
- pub tcx: TyCtxt<'a, 'tcx, 'tcx>,
+use super::def_path_hash::DefPathHashes;
+
+pub struct StrictVersionHashVisitor<'a, 'hash: 'a, 'tcx: 'hash> {
+ pub tcx: TyCtxt<'hash, 'tcx, 'tcx>,
pub st: &'a mut SipHasher,
+
+ // collect a deterministic hash of def-ids that we have seen
+ def_path_hashes: &'a mut DefPathHashes<'hash, 'tcx>,
}
-impl<'a, 'tcx> StrictVersionHashVisitor<'a, 'tcx> {
+impl<'a, 'hash, 'tcx> StrictVersionHashVisitor<'a, 'hash, 'tcx> {
pub fn new(st: &'a mut SipHasher,
- tcx: TyCtxt<'a, 'tcx, 'tcx>)
+ tcx: TyCtxt<'hash, 'tcx, 'tcx>,
+ def_path_hashes: &'a mut DefPathHashes<'hash, 'tcx>)
-> Self {
- StrictVersionHashVisitor { st: st, tcx: tcx }
+ StrictVersionHashVisitor { st: st, tcx: tcx, def_path_hashes: def_path_hashes }
}
- fn hash_def_path(&mut self, path: &DefPath) {
- path.deterministic_hash_to(self.tcx, self.st);
+ fn compute_def_id_hash(&mut self, def_id: DefId) -> u64 {
+ self.def_path_hashes.hash(def_id)
}
}
SawStmtSemi,
}
-impl<'a, 'tcx> Visitor<'a> for StrictVersionHashVisitor<'a, 'tcx> {
+impl<'a, 'hash, 'tcx> Visitor<'tcx> for StrictVersionHashVisitor<'a, 'hash, 'tcx> {
fn visit_nested_item(&mut self, _: ItemId) {
// Each item is hashed independently; ignore nested items.
}
- fn visit_variant_data(&mut self, s: &'a VariantData, name: Name,
- g: &'a Generics, _: NodeId, _: Span) {
+ fn visit_variant_data(&mut self, s: &'tcx VariantData, name: Name,
+ g: &'tcx Generics, _: NodeId, _: Span) {
debug!("visit_variant_data: st={:?}", self.st);
SawStructDef(name.as_str()).hash(self.st);
visit::walk_generics(self, g);
visit::walk_struct_def(self, s)
}
- fn visit_variant(&mut self, v: &'a Variant, g: &'a Generics, item_id: NodeId) {
+ fn visit_variant(&mut self, v: &'tcx Variant, g: &'tcx Generics, item_id: NodeId) {
debug!("visit_variant: st={:?}", self.st);
SawVariant.hash(self.st);
// walk_variant does not call walk_generics, so do it here.
SawIdent(name.as_str()).hash(self.st);
}
- fn visit_lifetime(&mut self, l: &'a Lifetime) {
+ fn visit_lifetime(&mut self, l: &'tcx Lifetime) {
debug!("visit_lifetime: st={:?}", self.st);
SawLifetime(l.name.as_str()).hash(self.st);
}
- fn visit_lifetime_def(&mut self, l: &'a LifetimeDef) {
+ fn visit_lifetime_def(&mut self, l: &'tcx LifetimeDef) {
debug!("visit_lifetime_def: st={:?}", self.st);
SawLifetimeDef(l.lifetime.name.as_str()).hash(self.st);
}
// monomorphization and cross-crate inlining generally implies
// that a change to a crate body will require downstream
// crates to be recompiled.
- fn visit_expr(&mut self, ex: &'a Expr) {
+ fn visit_expr(&mut self, ex: &'tcx Expr) {
debug!("visit_expr: st={:?}", self.st);
SawExpr(saw_expr(&ex.node)).hash(self.st); visit::walk_expr(self, ex)
}
- fn visit_stmt(&mut self, s: &'a Stmt) {
+ fn visit_stmt(&mut self, s: &'tcx Stmt) {
debug!("visit_stmt: st={:?}", self.st);
// We don't want to modify the hash for decls, because
visit::walk_stmt(self, s)
}
- fn visit_foreign_item(&mut self, i: &'a ForeignItem) {
+ fn visit_foreign_item(&mut self, i: &'tcx ForeignItem) {
debug!("visit_foreign_item: st={:?}", self.st);
// FIXME (#14132) ideally we would incorporate privacy (or
SawForeignItem.hash(self.st); visit::walk_foreign_item(self, i)
}
- fn visit_item(&mut self, i: &'a Item) {
+ fn visit_item(&mut self, i: &'tcx Item) {
debug!("visit_item: {:?} st={:?}", i, self.st);
// FIXME (#14132) ideally would incorporate reachability
SawItem.hash(self.st); visit::walk_item(self, i)
}
- fn visit_mod(&mut self, m: &'a Mod, _s: Span, n: NodeId) {
+ fn visit_mod(&mut self, m: &'tcx Mod, _s: Span, n: NodeId) {
debug!("visit_mod: st={:?}", self.st);
SawMod.hash(self.st); visit::walk_mod(self, m, n)
}
- fn visit_ty(&mut self, t: &'a Ty) {
+ fn visit_ty(&mut self, t: &'tcx Ty) {
debug!("visit_ty: st={:?}", self.st);
SawTy.hash(self.st); visit::walk_ty(self, t)
}
- fn visit_generics(&mut self, g: &'a Generics) {
+ fn visit_generics(&mut self, g: &'tcx Generics) {
debug!("visit_generics: st={:?}", self.st);
SawGenerics.hash(self.st); visit::walk_generics(self, g)
}
- fn visit_fn(&mut self, fk: FnKind<'a>, fd: &'a FnDecl,
- b: &'a Block, s: Span, n: NodeId) {
+ fn visit_fn(&mut self, fk: FnKind<'tcx>, fd: &'tcx FnDecl,
+ b: &'tcx Block, s: Span, n: NodeId) {
debug!("visit_fn: st={:?}", self.st);
SawFn.hash(self.st); visit::walk_fn(self, fk, fd, b, s, n)
}
- fn visit_trait_item(&mut self, ti: &'a TraitItem) {
+ fn visit_trait_item(&mut self, ti: &'tcx TraitItem) {
debug!("visit_trait_item: st={:?}", self.st);
SawTraitItem.hash(self.st); visit::walk_trait_item(self, ti)
}
- fn visit_impl_item(&mut self, ii: &'a ImplItem) {
+ fn visit_impl_item(&mut self, ii: &'tcx ImplItem) {
debug!("visit_impl_item: st={:?}", self.st);
SawImplItem.hash(self.st); visit::walk_impl_item(self, ii)
}
- fn visit_struct_field(&mut self, s: &'a StructField) {
+ fn visit_struct_field(&mut self, s: &'tcx StructField) {
debug!("visit_struct_field: st={:?}", self.st);
SawStructField.hash(self.st); visit::walk_struct_field(self, s)
}
- fn visit_path(&mut self, path: &'a Path, _: ast::NodeId) {
+ fn visit_path(&mut self, path: &'tcx Path, _: ast::NodeId) {
debug!("visit_path: st={:?}", self.st);
SawPath.hash(self.st); visit::walk_path(self, path)
}
- fn visit_block(&mut self, b: &'a Block) {
+ fn visit_block(&mut self, b: &'tcx Block) {
debug!("visit_block: st={:?}", self.st);
SawBlock.hash(self.st); visit::walk_block(self, b)
}
- fn visit_pat(&mut self, p: &'a Pat) {
+ fn visit_pat(&mut self, p: &'tcx Pat) {
debug!("visit_pat: st={:?}", self.st);
SawPat.hash(self.st); visit::walk_pat(self, p)
}
- fn visit_local(&mut self, l: &'a Local) {
+ fn visit_local(&mut self, l: &'tcx Local) {
debug!("visit_local: st={:?}", self.st);
SawLocal.hash(self.st); visit::walk_local(self, l)
}
- fn visit_arm(&mut self, a: &'a Arm) {
+ fn visit_arm(&mut self, a: &'tcx Arm) {
debug!("visit_arm: st={:?}", self.st);
SawArm.hash(self.st); visit::walk_arm(self, a)
}
SawErr,
}
-impl<'a, 'tcx> StrictVersionHashVisitor<'a, 'tcx> {
+impl<'a, 'hash, 'tcx> StrictVersionHashVisitor<'a, 'hash, 'tcx> {
fn hash_resolve(&mut self, id: ast::NodeId) {
// Because whether or not a given id has an entry is dependent
// solely on expr variant etc, we don't need to hash whether
// variant it is above when we visit the HIR).
if let Some(def) = self.tcx.def_map.borrow().get(&id) {
+ debug!("hash_resolve: id={:?} def={:?} st={:?}", id, def, self.st);
self.hash_partial_def(def);
}
if let Some(traits) = self.tcx.trait_map.get(&id) {
+ debug!("hash_resolve: id={:?} traits={:?} st={:?}", id, traits, self.st);
traits.len().hash(self.st);
- for candidate in traits {
- self.hash_def_id(candidate.def_id);
- }
+
+ // The ordering of the candidates is not fixed. So we hash
+ // the def-ids and then sort them and hash the collection.
+ let mut candidates: Vec<_> =
+ traits.iter()
+ .map(|&TraitCandidate { def_id, import_id: _ }| {
+ self.compute_def_id_hash(def_id)
+ })
+ .collect();
+ candidates.sort();
+ candidates.hash(self.st);
}
}
fn hash_def_id(&mut self, def_id: DefId) {
- let def_path = self.tcx.def_path(def_id);
- self.hash_def_path(&def_path);
+ self.compute_def_id_hash(def_id).hash(self.st);
}
fn hash_partial_def(&mut self, def: &PathResolution) {
mod persist;
pub use assert_dep_graph::assert_dep_graph;
-pub use calculate_svh::SvhCalculate;
+pub use calculate_svh::compute_incremental_hashes_map;
+pub use calculate_svh::IncrementalHashesMap;
pub use persist::load_dep_graph;
pub use persist::save_dep_graph;
pub use persist::save_trans_partition;
debug!("assert_dirty({:?})", dep_node);
match dep_node {
+ DepNode::Krate |
DepNode::Hir(_) => {
// HIR nodes are inputs, so if we are asserting that the HIR node is
// dirty, we check the dirty input set.
debug!("assert_clean({:?})", dep_node);
match dep_node {
+ DepNode::Krate |
DepNode::Hir(_) => {
// For HIR nodes, check the inputs.
if self.dirty_inputs.contains(&dep_node) {
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use calculate_svh::SvhCalculate;
use rbml::Error;
use rbml::opaque::Decoder;
use rustc::dep_graph::DepNode;
use std::fs::File;
use syntax::ast;
+use IncrementalHashesMap;
use super::data::*;
use super::util::*;
pub struct HashContext<'a, 'tcx: 'a> {
pub tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ incremental_hashes_map: &'a IncrementalHashesMap,
item_metadata_hashes: FnvHashMap<DefId, u64>,
crate_hashes: FnvHashMap<ast::CrateNum, Svh>,
}
impl<'a, 'tcx> HashContext<'a, 'tcx> {
- pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Self {
+ pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ incremental_hashes_map: &'a IncrementalHashesMap)
+ -> Self {
HashContext {
tcx: tcx,
+ incremental_hashes_map: incremental_hashes_map,
item_metadata_hashes: FnvHashMap(),
crate_hashes: FnvHashMap(),
}
pub fn is_hashable(dep_node: &DepNode<DefId>) -> bool {
match *dep_node {
+ DepNode::Krate |
DepNode::Hir(_) => true,
DepNode::MetaData(def_id) => !def_id.is_local(),
_ => false,
}
}
- pub fn hash(&mut self, dep_node: &DepNode<DefId>) -> Option<(DefId, u64)> {
+ pub fn hash(&mut self, dep_node: &DepNode<DefId>) -> Option<u64> {
match *dep_node {
+ DepNode::Krate => {
+ Some(self.incremental_hashes_map[dep_node])
+ }
+
// HIR nodes (which always come from our crate) are an input:
DepNode::Hir(def_id) => {
- Some((def_id, self.hir_hash(def_id)))
+ assert!(def_id.is_local(),
+ "cannot hash HIR for non-local def-id {:?} => {:?}",
+ def_id,
+ self.tcx.item_path_str(def_id));
+
+ assert!(!self.tcx.map.is_inlined_def_id(def_id),
+ "cannot hash HIR for inlined def-id {:?} => {:?}",
+ def_id,
+ self.tcx.item_path_str(def_id));
+
+ Some(self.incremental_hashes_map[dep_node])
}
// MetaData from other crates is an *input* to us.
// don't hash them, but we do compute a hash for them and
// save it for others to use.
DepNode::MetaData(def_id) if !def_id.is_local() => {
- Some((def_id, self.metadata_hash(def_id)))
+ Some(self.metadata_hash(def_id))
}
_ => {
}
}
- fn hir_hash(&mut self, def_id: DefId) -> u64 {
- assert!(def_id.is_local(),
- "cannot hash HIR for non-local def-id {:?} => {:?}",
- def_id,
- self.tcx.item_path_str(def_id));
-
- assert!(!self.tcx.map.is_inlined_def_id(def_id),
- "cannot hash HIR for inlined def-id {:?} => {:?}",
- def_id,
- self.tcx.item_path_str(def_id));
-
- // FIXME(#32753) -- should we use a distinct hash here
- self.tcx.calculate_item_hash(def_id)
- }
-
fn metadata_hash(&mut self, def_id: DefId) -> u64 {
debug!("metadata_hash(def_id={:?})", def_id);
use std::fs::{self, File};
use std::path::{Path};
+use IncrementalHashesMap;
use super::data::*;
use super::directory::*;
use super::dirty_clean;
/// early in compilation, before we've really done any work, but
/// actually it doesn't matter all that much.) See `README.md` for
/// more general overview.
-pub fn load_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
+pub fn load_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ incremental_hashes_map: &IncrementalHashesMap) {
if tcx.sess.opts.incremental.is_none() {
return;
}
let _ignore = tcx.dep_graph.in_ignore();
- load_dep_graph_if_exists(tcx);
+ load_dep_graph_if_exists(tcx, incremental_hashes_map);
}
-fn load_dep_graph_if_exists<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
+fn load_dep_graph_if_exists<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ incremental_hashes_map: &IncrementalHashesMap) {
let dep_graph_path = dep_graph_path(tcx).unwrap();
let dep_graph_data = match load_data(tcx.sess, &dep_graph_path) {
Some(p) => p,
None => return // no file
};
- match decode_dep_graph(tcx, &dep_graph_data, &work_products_data) {
+ match decode_dep_graph(tcx, incremental_hashes_map, &dep_graph_data, &work_products_data) {
Ok(dirty_nodes) => dirty_nodes,
Err(err) => {
tcx.sess.warn(
/// Decode the dep graph and load the edges/nodes that are still clean
/// into `tcx.dep_graph`.
pub fn decode_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ incremental_hashes_map: &IncrementalHashesMap,
dep_graph_data: &[u8],
work_products_data: &[u8])
-> Result<(), Error>
// reason for this is that this way we can include nodes that have
// been removed (which no longer have a `DefId` in the current
// compilation).
- let dirty_raw_source_nodes = dirty_nodes(tcx, &serialized_dep_graph.hashes, &retraced);
+ let dirty_raw_source_nodes = dirty_nodes(tcx,
+ incremental_hashes_map,
+ &serialized_dep_graph.hashes,
+ &retraced);
// Create a list of (raw-source-node ->
// retracted-target-node) edges. In the process of retracing the
/// Computes which of the original set of def-ids are dirty. Stored in
/// a bit vector where the index is the DefPathIndex.
fn dirty_nodes<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
- hashes: &[SerializedHash],
+ incremental_hashes_map: &IncrementalHashesMap,
+ serialized_hashes: &[SerializedHash],
retraced: &RetracedDefIdDirectory)
-> DirtyNodes {
- let mut hcx = HashContext::new(tcx);
+ let mut hcx = HashContext::new(tcx, incremental_hashes_map);
let mut dirty_nodes = FnvHashSet();
- for hash in hashes {
+ for hash in serialized_hashes {
if let Some(dep_node) = retraced.map(&hash.dep_node) {
- let (_, current_hash) = hcx.hash(&dep_node).unwrap();
+ let current_hash = hcx.hash(&dep_node).unwrap();
if current_hash == hash.hash {
continue;
}
let mut hashes = FnvHashMap();
for input in inputs.values().flat_map(|v| v.iter().cloned()) {
hashes.entry(input)
- .or_insert_with(|| hcx.hash(input).unwrap().1);
+ .or_insert_with(|| hcx.hash(input).unwrap());
}
Predecessors {
use std::fs::{self, File};
use std::path::PathBuf;
+use IncrementalHashesMap;
use super::data::*;
use super::directory::*;
use super::hash::*;
use super::preds::*;
use super::util::*;
-pub fn save_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
+pub fn save_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ incremental_hashes_map: &IncrementalHashesMap) {
debug!("save_dep_graph()");
let _ignore = tcx.dep_graph.in_ignore();
let sess = tcx.sess;
if sess.opts.incremental.is_none() {
return;
}
- let mut hcx = HashContext::new(tcx);
+ let mut hcx = HashContext::new(tcx, incremental_hashes_map);
let mut builder = DefIdDirectoryBuilder::new(tcx);
let query = tcx.dep_graph.query();
let preds = Predecessors::new(&query, &mut hcx);
use syntax::{ast};
use syntax::attr::{self, AttrMetaMethods, AttributeMethods};
-use syntax_pos::{self, Span};
+use syntax_pos::Span;
use rustc::hir::{self, PatKind};
use rustc::hir::intravisit::FnKind;
}
}
}
-
-/// Lints for attempts to impl Drop on types that have `#[repr(C)]`
-/// attribute (see issue #24585).
-#[derive(Copy, Clone)]
-pub struct DropWithReprExtern;
-
-declare_lint! {
- DROP_WITH_REPR_EXTERN,
- Warn,
- "use of #[repr(C)] on a type that implements Drop"
-}
-
-impl LintPass for DropWithReprExtern {
- fn get_lints(&self) -> LintArray {
- lint_array!(DROP_WITH_REPR_EXTERN)
- }
-}
-
-impl LateLintPass for DropWithReprExtern {
- fn check_crate(&mut self, ctx: &LateContext, _: &hir::Crate) {
- let drop_trait = match ctx.tcx.lang_items.drop_trait() {
- Some(id) => ctx.tcx.lookup_trait_def(id), None => { return }
- };
- drop_trait.for_each_impl(ctx.tcx, |drop_impl_did| {
- if !drop_impl_did.is_local() {
- return;
- }
- let dtor_self_type = ctx.tcx.lookup_item_type(drop_impl_did).ty;
-
- match dtor_self_type.sty {
- ty::TyEnum(self_type_def, _) |
- ty::TyStruct(self_type_def, _) => {
- let self_type_did = self_type_def.did;
- let hints = ctx.tcx.lookup_repr_hints(self_type_did);
- if hints.iter().any(|attr| *attr == attr::ReprExtern) &&
- self_type_def.dtor_kind().has_drop_flag() {
- let drop_impl_span = ctx.tcx.map.def_id_span(drop_impl_did,
- syntax_pos::DUMMY_SP);
- let self_defn_span = ctx.tcx.map.def_id_span(self_type_did,
- syntax_pos::DUMMY_SP);
- ctx.span_lint_note(DROP_WITH_REPR_EXTERN,
- drop_impl_span,
- "implementing Drop adds hidden state to types, \
- possibly conflicting with `#[repr(C)]`",
- self_defn_span,
- "the `#[repr(C)]` attribute is attached here");
- }
- }
- _ => {}
- }
- })
- }
-}
UnconditionalRecursion,
InvalidNoMangleItems,
PluginAsLibrary,
- DropWithReprExtern,
MutableTransmutes,
);
// This was renamed to raw_pointer_derive, which was then removed,
// so it is also considered removed
store.register_removed("raw_pointer_deriving", "using derive with raw pointers is ok");
+ store.register_removed("drop_with_repr_extern", "drop flags have been removed");
}
let y: usize = 1.wrapping_neg();
assert_eq!(x, y);
```
-
"##
}
let host = env::var("HOST").unwrap();
let is_crossed = target != host;
- let optional_components = ["x86", "arm", "aarch64", "mips", "powerpc", "pnacl"];
+ let optional_components = ["x86", "arm", "aarch64", "mips", "powerpc", "pnacl", "systemz"];
// FIXME: surely we don't need all these components, right? Stuff like mcjit
// or interpreter the compiler itself never uses.
Col: c_uint)
-> DILexicalBlock;
+ pub fn LLVMRustDIBuilderCreateLexicalBlockFile(Builder: DIBuilderRef,
+ Scope: DIScope,
+ File: DIFile)
+ -> DILexicalBlock;
+
pub fn LLVMRustDIBuilderCreateStaticVariable(Builder: DIBuilderRef,
Context: DIScope,
Name: *const c_char,
LLVMInitializePNaClTargetInfo,
LLVMInitializePNaClTarget,
LLVMInitializePNaClTargetMC);
+ init_target!(llvm_component = "systemz",
+ LLVMInitializeSystemZTargetInfo,
+ LLVMInitializeSystemZTarget,
+ LLVMInitializeSystemZTargetMC,
+ LLVMInitializeSystemZAsmPrinter,
+ LLVMInitializeSystemZAsmParser);
}
pub fn last_error() -> Option<String> {
ii: InlinedItemRef) {
let id = match ii {
InlinedItemRef::Item(_, i) => i.id,
- InlinedItemRef::Foreign(_, i) => i.id,
InlinedItemRef::TraitItem(_, ti) => ti.id,
InlinedItemRef::ImplItem(_, ii) => ii.id,
};
dcx);
let name = match *ii {
InlinedItem::Item(_, ref i) => i.name,
- InlinedItem::Foreign(_, ref i) => i.name,
InlinedItem::TraitItem(_, ref ti) => ti.name,
InlinedItem::ImplItem(_, ref ii) => ii.name
};
InlinedItemRef::ImplItem(d, ii) => {
InlinedItem::ImplItem(d, P(fold::noop_fold_impl_item(ii.clone(), &mut fld)))
}
- InlinedItemRef::Foreign(d, i) => {
- InlinedItem::Foreign(d, P(fold::noop_fold_foreign_item(i.clone(), &mut fld)))
- }
};
(ii, fld.id_range)
// Encoding and decoding the side tables
trait rbml_writer_helpers<'tcx> {
- fn emit_region(&mut self, ecx: &e::EncodeContext, r: ty::Region);
+ fn emit_region(&mut self, ecx: &e::EncodeContext, r: &'tcx ty::Region);
fn emit_ty<'a>(&mut self, ecx: &e::EncodeContext<'a, 'tcx>, ty: Ty<'tcx>);
fn emit_substs<'a>(&mut self, ecx: &e::EncodeContext<'a, 'tcx>,
substs: &Substs<'tcx>);
}
impl<'a, 'tcx> rbml_writer_helpers<'tcx> for Encoder<'a> {
- fn emit_region(&mut self, ecx: &e::EncodeContext, r: ty::Region) {
+ fn emit_region(&mut self, ecx: &e::EncodeContext, r: &'tcx ty::Region) {
self.emit_opaque(|this| Ok(tyencode::enc_region(&mut this.cursor,
&ecx.ty_str_ctxt(),
r)));
&adjustment::AutoPtr(r, m) => {
this.emit_enum_variant("AutoPtr", 0, 2, |this| {
this.emit_enum_variant_arg(0,
- |this| Ok(this.emit_region(ecx, *r)));
+ |this| Ok(this.emit_region(ecx, r)));
this.emit_enum_variant_arg(1, |this| m.encode(this))
})
}
f: F) -> R
where F: for<'x> FnOnce(&mut tydecode::TyDecoder<'x, 'tcx>) -> R;
- fn read_region(&mut self, dcx: &DecodeContext) -> ty::Region;
+ fn read_region<'a, 'b>(&mut self, dcx: &DecodeContext<'a, 'b, 'tcx>) -> &'tcx ty::Region;
fn read_ty<'a, 'b>(&mut self, dcx: &DecodeContext<'a, 'b, 'tcx>) -> Ty<'tcx>;
fn read_tys<'a, 'b>(&mut self, dcx: &DecodeContext<'a, 'b, 'tcx>) -> Vec<Ty<'tcx>>;
fn read_trait_ref<'a, 'b>(&mut self, dcx: &DecodeContext<'a, 'b, 'tcx>)
-> ty::Predicate<'tcx>;
fn read_substs<'a, 'b>(&mut self, dcx: &DecodeContext<'a, 'b, 'tcx>)
-> &'tcx Substs<'tcx>;
- fn read_upvar_capture(&mut self, dcx: &DecodeContext)
- -> ty::UpvarCapture;
+ fn read_upvar_capture<'a, 'b>(&mut self, dcx: &DecodeContext<'a, 'b, 'tcx>)
+ -> ty::UpvarCapture<'tcx>;
fn read_auto_adjustment<'a, 'b>(&mut self, dcx: &DecodeContext<'a, 'b, 'tcx>)
-> adjustment::AutoAdjustment<'tcx>;
fn read_cast_kind<'a, 'b>(&mut self, dcx: &DecodeContext<'a, 'b, 'tcx>)
str
}
}
- fn read_region(&mut self, dcx: &DecodeContext) -> ty::Region {
+ fn read_region<'b, 'c>(&mut self, dcx: &DecodeContext<'b, 'c, 'tcx>) -> &'tcx ty::Region {
// Note: regions types embed local node ids. In principle, we
// should translate these node ids into the new decode
// context. However, we do not bother, because region types
.parse_substs())
}).unwrap()
}
- fn read_upvar_capture(&mut self, dcx: &DecodeContext) -> ty::UpvarCapture {
+ fn read_upvar_capture<'b, 'c>(&mut self, dcx: &DecodeContext<'b, 'c, 'tcx>)
+ -> ty::UpvarCapture<'tcx> {
self.read_enum("UpvarCapture", |this| {
let variants = ["ByValue", "ByRef"];
this.read_enum_variant(&variants, |this, i| {
this.read_enum_variant(&variants, |this, i| {
Ok(match i {
0 => {
- let r: ty::Region =
+ let r: &'tcx ty::Region =
this.read_enum_variant_arg(0, |this| {
Ok(this.read_region(dcx))
}).unwrap();
Decodable::decode(this)
}).unwrap();
- adjustment::AutoPtr(dcx.tcx.mk_region(r), m)
+ adjustment::AutoPtr(r, m)
}
1 => {
let m: hir::Mutability =
let item_node_id = match ii {
&InlinedItem::Item(_, ref i) => i.id,
&InlinedItem::TraitItem(_, ref ti) => ti.id,
- &InlinedItem::ImplItem(_, ref ii) => ii.id,
- &InlinedItem::Foreign(_, ref fi) => fi.id
+ &InlinedItem::ImplItem(_, ref ii) => ii.id
};
copy_item_type(dcx, item_node_id, orig_did);
if name.is_empty() {
match span {
Some(span) => {
- span_err!(sess, span, E0454,
- "#[link(name = \"\")] given with empty name");
+ struct_span_err!(sess, span, E0454,
+ "#[link(name = \"\")] given with empty name")
+ .span_label(span, &format!("empty name given"))
+ .emit();
}
None => {
sess.err("empty library name given via `-l`");
decoder::closure_ty(&cdata, def_id.index, tcx)
}
- fn item_variances(&self, def: DefId) -> ty::ItemVariances {
+ fn item_variances(&self, def: DefId) -> Vec<ty::Variance> {
self.dep_graph.read(DepNode::MetaData(def));
let cdata = self.get_crate_data(def.krate);
decoder::get_item_variances(&cdata, def.index)
decoder::is_foreign_item(&cdata, did.index)
}
- fn is_static_method(&self, def: DefId) -> bool
- {
- self.dep_graph.read(DepNode::MetaData(def));
- let cdata = self.get_crate_data(def.krate);
- decoder::is_static_method(&cdata, def.index)
- }
-
fn is_statically_included_foreign_item(&self, id: ast::NodeId) -> bool
{
self.do_is_statically_included_foreign_item(id)
let inlined_root_node_id = find_inlined_item_root(item.id);
cache_inlined_item(def_id, item.id, inlined_root_node_id);
}
- decoder::FoundAst::Found(&InlinedItem::Foreign(d, ref item)) => {
- assert_eq!(d, def_id);
- let inlined_root_node_id = find_inlined_item_root(item.id);
- cache_inlined_item(def_id, item.id, inlined_root_node_id);
- }
decoder::FoundAst::FoundParent(parent_did, item) => {
let inlined_root_node_id = find_inlined_item_root(item.id);
cache_inlined_item(parent_did, item.id, inlined_root_node_id);
use rustc::mir;
use rustc::mir::visit::MutVisitor;
+use rustc::mir::repr::Location;
use std::cell::Cell;
use std::io;
pub fn crate_rustc_version(data: &[u8]) -> Option<String> {
let doc = rbml::Doc::new(data);
- reader::maybe_get_doc(doc, tag_rustc_version).map(|s| s.as_str())
+ reader::maybe_get_doc(doc, tag_rustc_version).map(|s| s.to_string())
}
pub fn load_xrefs(data: &[u8]) -> index::DenseIndex {
fn item_sort(item: rbml::Doc) -> Option<char> {
reader::tagged_docs(item, tag_item_trait_item_sort).nth(0).map(|doc| {
- doc.as_str_slice().as_bytes()[0] as char
+ doc.as_str().as_bytes()[0] as char
})
}
fn maybe_item_name(item: rbml::Doc) -> Option<ast::Name> {
reader::maybe_get_doc(item, tag_paths_data_name).map(|name| {
- let string = name.as_str_slice();
+ let string = name.as_str();
token::intern(string)
})
}
fn parse_associated_type_names(item_doc: rbml::Doc) -> Vec<ast::Name> {
let names_doc = reader::get_doc(item_doc, tag_associated_type_names);
reader::tagged_docs(names_doc, tag_associated_type_name)
- .map(|name_doc| token::intern(name_doc.as_str_slice()))
+ .map(|name_doc| token::intern(name_doc.as_str()))
.collect()
}
let name_doc = reader::get_doc(reexport_doc,
tag_items_data_item_reexport_name);
- let name = name_doc.as_str_slice();
+ let name = name_doc.as_str();
// This reexport may be in yet another crate.
let crate_data = if child_def_id.krate == cdata.cnum {
impl<'v, 'cdata, 'codemap> mir::visit::MutVisitor<'v>
for MirDefIdAndSpanTranslator<'cdata, 'codemap>
{
- fn visit_def_id(&mut self, def_id: &mut DefId) {
+ fn visit_def_id(&mut self, def_id: &mut DefId, _: Location) {
*def_id = translate_def_id(self.crate_metadata, *def_id);
}
}
}
-fn get_explicit_self(item: rbml::Doc) -> ty::ExplicitSelfCategory {
+fn get_explicit_self<'a, 'tcx>(item: rbml::Doc, tcx: TyCtxt<'a, 'tcx, 'tcx>)
+ -> ty::ExplicitSelfCategory<'tcx> {
fn get_mutability(ch: u8) -> hir::Mutability {
match ch as char {
'i' => hir::MutImmutable,
}
let explicit_self_doc = reader::get_doc(item, tag_item_trait_method_explicit_self);
- let string = explicit_self_doc.as_str_slice();
+ let string = explicit_self_doc.as_str();
let explicit_self_kind = string.as_bytes()[0];
match explicit_self_kind as char {
// FIXME(#4846) expl. region
'&' => {
ty::ExplicitSelfCategory::ByReference(
- ty::ReEmpty,
+ tcx.mk_region(ty::ReEmpty),
get_mutability(string.as_bytes()[1]))
}
_ => bug!("unknown self type code: `{}`", explicit_self_kind as char)
item_name(doc)
}
-pub fn is_static_method(cdata: Cmd, id: DefIndex) -> bool {
- let doc = cdata.lookup_item(id);
- match item_sort(doc) {
- Some('r') | Some('p') => {
- get_explicit_self(doc) == ty::ExplicitSelfCategory::Static
- }
- _ => false
- }
-}
-
pub fn get_impl_or_trait_item<'a, 'tcx>(cdata: Cmd, id: DefIndex, tcx: TyCtxt<'a, 'tcx, 'tcx>)
-> Option<ty::ImplOrTraitItem<'tcx>> {
let item_doc = cdata.lookup_item(id);
"the type {:?} of the method {:?} is not a function?",
ity, name)
};
- let explicit_self = get_explicit_self(item_doc);
+ let explicit_self = get_explicit_self(item_doc, tcx);
ty::MethodTraitItem(Rc::new(ty::Method::new(name,
generics,
}).collect()
}
-pub fn get_item_variances(cdata: Cmd, id: DefIndex) -> ty::ItemVariances {
+pub fn get_item_variances(cdata: Cmd, id: DefIndex) -> Vec<ty::Variance> {
let item_doc = cdata.lookup_item(id);
let variance_doc = reader::get_doc(item_doc, tag_item_variances);
let mut decoder = reader::Decoder::new(variance_doc);
fn get_meta_items(md: rbml::Doc) -> Vec<P<ast::MetaItem>> {
reader::tagged_docs(md, tag_meta_item_word).map(|meta_item_doc| {
let nd = reader::get_doc(meta_item_doc, tag_meta_item_name);
- let n = token::intern_and_get_ident(nd.as_str_slice());
+ let n = token::intern_and_get_ident(nd.as_str());
attr::mk_word_item(n)
}).chain(reader::tagged_docs(md, tag_meta_item_name_value).map(|meta_item_doc| {
let nd = reader::get_doc(meta_item_doc, tag_meta_item_name);
let vd = reader::get_doc(meta_item_doc, tag_meta_item_value);
- let n = token::intern_and_get_ident(nd.as_str_slice());
- let v = token::intern_and_get_ident(vd.as_str_slice());
+ let n = token::intern_and_get_ident(nd.as_str());
+ let v = token::intern_and_get_ident(vd.as_str());
// FIXME (#623): Should be able to decode MetaItemKind::NameValue variants,
// but currently the encoder just drops them
attr::mk_name_value_item_str(n, v)
})).chain(reader::tagged_docs(md, tag_meta_item_list).map(|meta_item_doc| {
let nd = reader::get_doc(meta_item_doc, tag_meta_item_name);
- let n = token::intern_and_get_ident(nd.as_str_slice());
+ let n = token::intern_and_get_ident(nd.as_str());
let subitems = get_meta_items(meta_item_doc);
attr::mk_list_item(n, subitems)
})).collect()
fn docstr(doc: rbml::Doc, tag_: usize) -> String {
let d = reader::get_doc(doc, tag_);
- d.as_str_slice().to_string()
+ d.as_str().to_string()
}
reader::tagged_docs(depsdoc, tag_crate_dep).enumerate().map(|(crate_num, depdoc)| {
pub fn maybe_get_crate_name(data: &[u8]) -> Option<&str> {
let cratedoc = rbml::Doc::new(data);
reader::maybe_get_doc(cratedoc, tag_crate_crate_name).map(|doc| {
- doc.as_str_slice()
+ doc.as_str()
})
}
pub fn get_crate_disambiguator<'a>(data: &'a [u8]) -> &'a str {
let crate_doc = rbml::Doc::new(data);
let disambiguator_doc = reader::get_doc(crate_doc, tag_crate_disambiguator);
- let slice: &'a str = disambiguator_doc.as_str_slice();
+ let slice: &'a str = disambiguator_doc.as_str();
slice
}
tag_dylib_dependency_formats);
let mut result = Vec::new();
- debug!("found dylib deps: {}", formats.as_str_slice());
- for spec in formats.as_str_slice().split(',') {
+ debug!("found dylib deps: {}", formats.as_str());
+ for spec in formats.as_str().split(',') {
if spec.is_empty() { continue }
- let cnum = spec.split(':').nth(0).unwrap();
- let link = spec.split(':').nth(1).unwrap();
+ let mut split = spec.split(':');
+ let cnum = split.next().unwrap();
+ let link = split.next().unwrap();
let cnum: ast::CrateNum = cnum.parse().unwrap();
let cnum = cdata.cnum_map.borrow()[cnum];
result.push((cnum, if link == "d" {
match reader::maybe_get_doc(method_doc, tag_method_argument_names) {
Some(args_doc) => {
reader::tagged_docs(args_doc, tag_method_argument_name).map(|name_doc| {
- name_doc.as_str_slice().to_string()
+ name_doc.as_str().to_string()
}).collect()
},
None => vec![],
let mut decoder = reader::Decoder::new(def_key_doc);
let simple_key = def_key::DefKey::decode(&mut decoder).unwrap();
let name = reader::maybe_get_doc(item_doc, tag_paths_data_name).map(|name| {
- token::intern(name.as_str_slice()).as_str()
+ token::intern(name.as_str()).as_str()
});
def_key::recover_def_key(simple_key, name)
}
E0454: r##"
A link name was given with an empty name. Erroneous code example:
-```
+```compile_fail,E0454
#[link(name = "")] extern {} // error: #[link(name = "")] given with empty name
```
Erroneous code example:
-```compile_fail"
+```compile_fail,E0455
#[link(name = "FooCoreServices", kind = "framework")] extern {}
// OS used to compile is Linux for example
```
E0458: r##"
An unknown "kind" was specified for a link attribute. Erroneous code example:
-```
+```compile_fail,E0458
#[link(kind = "wonderful_unicorn")] extern {}
// error: unknown kind: `wonderful_unicorn`
```
E0459: r##"
A link was used without a name parameter. Erroneous code example:
-```
+```compile_fail,E0459
#[link(kind = "dylib")] extern {}
// error: #[link(...)] specified without `name = "foo"`
```
E0463: r##"
A plugin/crate was declared but cannot be found. Erroneous code example:
-```
+```compile_fail,E0463
#![feature(plugin)]
#![plugin(cookie_monster)] // error: can't find crate for `cookie_monster`
extern crate cake_is_a_lie; // error: can't find crate for `cake_is_a_lie`
use std::io::{Cursor, SeekFrom};
use std::rc::Rc;
use std::u32;
-use syntax::abi::Abi;
use syntax::ast::{self, NodeId, Name, CRATE_NODE_ID, CrateNum};
use syntax::attr::{self,AttrMetaMethods,AttributeMethods};
use errors::Handler;
if body.is_some() {
encode_item_sort(self.rbml_w, 'p');
- encode_inlined_item(ecx,
- self.rbml_w,
- InlinedItemRef::TraitItem(
- trait_def_id,
- trait_item));
self.encode_mir(trait_item.id);
} else {
encode_item_sort(self.rbml_w, 'r');
let types = generics.parent_types as usize + generics.types.len();
let needs_inline = types > 0 || is_default_impl ||
attr::requests_inline(&impl_item.attrs);
- if needs_inline || sig.constness == hir::Constness::Const {
+ if sig.constness == hir::Constness::Const {
encode_inlined_item(
ecx,
self.rbml_w,
InlinedItemRef::ImplItem(ecx.tcx.map.local_def_id(parent_id),
impl_item));
+ }
+ if needs_inline || sig.constness == hir::Constness::Const {
self.encode_mir(impl_item.id);
}
encode_constness(self.rbml_w, sig.constness);
xrefs: FnvHashMap<XRef<'tcx>, u32>)
{
let mut xref_positions = vec![0; xrefs.len()];
+
+ // Encode XRefs sorted by their ID
+ let mut sorted_xrefs: Vec<_> = xrefs.into_iter().collect();
+ sorted_xrefs.sort_by_key(|&(_, id)| id);
+
rbml_w.start_tag(tag_xref_data);
- for (xref, id) in xrefs.into_iter() {
+ for (xref, id) in sorted_xrefs.into_iter() {
xref_positions[id as usize] = rbml_w.mark_stable_position() as u32;
match xref {
XRef::Predicate(p) => {
encode_name(self.rbml_w, item.name);
encode_attributes(self.rbml_w, &item.attrs);
let needs_inline = tps_len > 0 || attr::requests_inline(&item.attrs);
- if needs_inline || constness == hir::Constness::Const {
+ if constness == hir::Constness::Const {
encode_inlined_item(ecx, self.rbml_w, InlinedItemRef::Item(def_id, item));
+ }
+ if needs_inline || constness == hir::Constness::Const {
self.encode_mir(item.id);
}
encode_constness(self.rbml_w, constness);
for v in &enum_definition.variants {
encode_variant_id(self.rbml_w, ecx.tcx.map.local_def_id(v.node.data.id()));
}
- encode_inlined_item(ecx, self.rbml_w, InlinedItemRef::Item(def_id, item));
- self.encode_mir(item.id);
// Encode inherent implementations for self enumeration.
encode_inherent_implementations(ecx, self.rbml_w, def_id);
needs to know*/
self.encode_struct_fields(variant);
- encode_inlined_item(ecx, self.rbml_w, InlinedItemRef::Item(def_id, item));
- self.encode_mir(item.id);
-
// Encode inherent implementations for self structure.
encode_inherent_implementations(ecx, self.rbml_w, def_id);
let ecx = self.ecx();
debug!("writing foreign item {}", ecx.tcx.node_path_str(nitem.id));
- let abi = ecx.tcx.map.get_foreign_abi(nitem.id);
encode_def_id_and_key(ecx, self.rbml_w, def_id);
let parent_id = ecx.tcx.map.get_parent(nitem.id);
encode_family(self.rbml_w, FN_FAMILY);
self.encode_bounds_and_type_for_item(nitem.id);
encode_name(self.rbml_w, nitem.name);
- if abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic {
- encode_inlined_item(ecx,
- self.rbml_w,
- InlinedItemRef::Foreign(def_id, nitem));
- self.encode_mir(nitem.id);
- }
encode_attributes(self.rbml_w, &nitem.attrs);
let stab = ecx.tcx.lookup_stability(ecx.tcx.map.local_def_id(nitem.id));
let depr = ecx.tcx.lookup_deprecation(ecx.tcx.map.local_def_id(nitem.id));
use rustc::session::filesearch::{FileSearch, FileMatches, FileDoesntMatch};
use rustc::session::search_paths::PathKind;
use rustc::util::common;
+use rustc::util::nodemap::FnvHashMap;
use rustc_llvm as llvm;
use rustc_llvm::{False, ObjectFile, mk_section_iter};
use rustc_back::target::Target;
use std::cmp;
-use std::collections::HashMap;
use std::fmt;
use std::fs;
use std::io;
let rlib_prefix = format!("lib{}", self.crate_name);
let staticlib_prefix = format!("{}{}", staticpair.0, self.crate_name);
- let mut candidates = HashMap::new();
+ let mut candidates = FnvHashMap();
let mut staticlibs = vec!();
// First, find all possible candidate rlibs and dylibs purely based on
let hash_str = hash.to_string();
let slot = candidates.entry(hash_str)
- .or_insert_with(|| (HashMap::new(), HashMap::new()));
+ .or_insert_with(|| (FnvHashMap(), FnvHashMap()));
let (ref mut rlibs, ref mut dylibs) = *slot;
fs::canonicalize(path).map(|p| {
if rlib {
// A Library candidate is created if the metadata for the set of
// libraries corresponds to the crate id and hash criteria that this
// search is being performed for.
- let mut libraries = HashMap::new();
+ let mut libraries = FnvHashMap();
for (_hash, (rlibs, dylibs)) in candidates {
let mut slot = None;
let rlib = self.extract_one(rlibs, CrateFlavor::Rlib, &mut slot);
// read the metadata from it if `*slot` is `None`. If the metadata couldn't
// be read, it is assumed that the file isn't a valid rust library (no
// errors are emitted).
- fn extract_one(&mut self, m: HashMap<PathBuf, PathKind>, flavor: CrateFlavor,
+ fn extract_one(&mut self, m: FnvHashMap<PathBuf, PathKind>, flavor: CrateFlavor,
slot: &mut Option<(Svh, MetadataBlob)>) -> Option<(PathBuf, PathKind)> {
let mut ret: Option<(PathBuf, PathKind)> = None;
let mut error = 0;
// rlibs/dylibs.
let sess = self.sess;
let dylibname = self.dylibname();
- let mut rlibs = HashMap::new();
- let mut dylibs = HashMap::new();
+ let mut rlibs = FnvHashMap();
+ let mut dylibs = FnvHashMap();
{
let locs = locs.map(|l| PathBuf::from(l)).filter(|loc| {
if !loc.exists() {
use cstore::CStore;
use rustc::session::Session;
+use rustc::util::nodemap::{FnvHashSet, FnvHashMap};
-use std::collections::{HashSet, HashMap};
use syntax::parse::token;
use syntax::ast;
use syntax::attr;
span_err!(a, b, E0467, "bad macro reexport");
}
-pub type MacroSelection = HashMap<token::InternedString, Span>;
+pub type MacroSelection = FnvHashMap<token::InternedString, Span>;
impl<'a> ext::base::MacroLoader for MacroLoader<'a> {
fn load_crate(&mut self, extern_crate: &ast::Item, allows_macros: bool) -> Vec<ast::MacroDef> {
// Parse the attributes relating to macros.
- let mut import = Some(HashMap::new()); // None => load all
- let mut reexport = HashMap::new();
+ let mut import = Some(FnvHashMap()); // None => load all
+ let mut reexport = FnvHashMap();
for attr in &extern_crate.attrs {
let mut used = true;
}
let mut macros = Vec::new();
- let mut seen = HashSet::new();
+ let mut seen = FnvHashSet();
for mut def in self.reader.read_exported_macros(vi) {
let name = def.ident.name.as_str();
use rustc::hir::def_id::{DefId, DefIndex};
use middle::region;
-use rustc::ty::subst::Substs;
+use rustc::ty::subst::{Kind, Substs};
use rustc::ty::{self, ToPredicate, Ty, TyCtxt, TypeFoldable};
use rbml;
}
pub fn parse_substs(&mut self) -> &'tcx Substs<'tcx> {
- let mut regions = vec![];
- let mut types = vec![];
+ let mut params = vec![];
assert_eq!(self.next(), '[');
- while self.peek() != '|' {
- regions.push(self.parse_region());
- }
- assert_eq!(self.next(), '|');
while self.peek() != ']' {
- types.push(self.parse_ty());
+ let k = match self.next() {
+ 'r' => Kind::from(self.parse_region()),
+ 't' => Kind::from(self.parse_ty()),
+ _ => bug!()
+ };
+ params.push(k);
}
assert_eq!(self.next(), ']');
- Substs::new(self.tcx, types, regions)
+ Substs::new(self.tcx, params)
}
pub fn parse_generics(&mut self) -> &'tcx ty::Generics<'tcx> {
}
}
- pub fn parse_region(&mut self) -> ty::Region {
- match self.next() {
+ pub fn parse_region(&mut self) -> &'tcx ty::Region {
+ self.tcx.mk_region(match self.next() {
'b' => {
assert_eq!(self.next(), '[');
let id = ty::DebruijnIndex::new(self.parse_u32());
'e' => ty::ReEmpty,
'E' => ty::ReErased,
_ => bug!("parse_region: bad input")
- }
+ })
}
fn parse_scope(&mut self) -> region::CodeExtent {
'~' => return tcx.mk_box(self.parse_ty()),
'*' => return tcx.mk_ptr(self.parse_mt()),
'&' => {
- let r = self.parse_region();
- let mt = self.parse_mt();
- return tcx.mk_ref(tcx.mk_region(r), mt);
+ return tcx.mk_ref(self.parse_region(), self.parse_mt());
}
'V' => {
let t = self.parse_ty();
}
}
- fn parse_region_param_def(&mut self) -> ty::RegionParameterDef {
+ fn parse_region_param_def(&mut self) -> ty::RegionParameterDef<'tcx> {
let name = self.parse_name(':');
let def_id = self.parse_def();
let index = self.parse_u32();
}
- fn parse_object_lifetime_default(&mut self) -> ty::ObjectLifetimeDefault {
+ fn parse_object_lifetime_default(&mut self) -> ty::ObjectLifetimeDefault<'tcx> {
match self.next() {
'a' => ty::ObjectLifetimeDefault::Ambiguous,
'b' => ty::ObjectLifetimeDefault::BaseDefault,
ty::TyRawPtr(mt) => { write!(w, "*"); enc_mt(w, cx, mt); }
ty::TyRef(r, mt) => {
write!(w, "&");
- enc_region(w, cx, *r);
+ enc_region(w, cx, r);
enc_mt(w, cx, mt);
}
ty::TyArray(t, sz) => {
pub fn enc_substs<'a, 'tcx>(w: &mut Cursor<Vec<u8>>, cx: &ctxt<'a, 'tcx>,
substs: &Substs<'tcx>) {
write!(w, "[");
- for &r in &substs.regions {
- enc_region(w, cx, r);
- }
- write!(w, "|");
- for &ty in &substs.types {
- enc_ty(w, cx, ty);
+ for &k in substs.params() {
+ if let Some(ty) = k.as_type() {
+ write!(w, "t");
+ enc_ty(w, cx, ty);
+ } else if let Some(r) = k.as_region() {
+ write!(w, "r");
+ enc_region(w, cx, r);
+ } else {
+ bug!()
+ }
}
write!(w, "]");
}
}
}
-pub fn enc_region(w: &mut Cursor<Vec<u8>>, cx: &ctxt, r: ty::Region) {
- match r {
+pub fn enc_region(w: &mut Cursor<Vec<u8>>, cx: &ctxt, r: &ty::Region) {
+ match *r {
ty::ReLateBound(id, br) => {
write!(w, "b[{}|", id.depth);
enc_bound_region(w, cx, br);
//! Routines for manipulating the control-flow graph.
-use build::{CFG, Location};
+use build::CFG;
use rustc::mir::repr::*;
impl<'tcx> CFG<'tcx> {
var_id: NodeId,
var_ty: Ty<'tcx>,
mutability: Mutability,
- binding_mode: BindingMode,
+ binding_mode: BindingMode<'tcx>,
}
#[derive(Clone, Debug)]
assert!(ty.is_slice());
let eq_def_id = self.hir.tcx().lang_items.eq_trait().unwrap();
let ty = mt.ty;
- let (mty, method) = self.hir.trait_method(eq_def_id, "eq", ty, vec![ty]);
+ let (mty, method) = self.hir.trait_method(eq_def_id, "eq", ty, &[ty]);
let bool_ty = self.hir.bool_ty();
let eq_result = self.temp(bool_ty);
pub postdoms: Vec<Location>,
}
-#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)]
-pub struct Location {
- /// the location is within this block
- pub block: BasicBlock,
-
- /// the location is the start of the this statement; or, if `statement_index`
- /// == num-statements, then the start of the terminator.
- pub statement_index: usize,
-}
-
pub type ScopeAuxiliaryVec = IndexVec<ScopeId, ScopeAuxiliary>;
///////////////////////////////////////////////////////////////////////////
use build::{BlockAnd, BlockAndExtension, Builder, CFG, ScopeAuxiliary, ScopeId};
use rustc::middle::region::{CodeExtent, CodeExtentData};
use rustc::middle::lang_items;
-use rustc::ty::subst::{Substs, Subst};
+use rustc::ty::subst::{Kind, Substs, Subst};
use rustc::ty::{Ty, TyCtxt};
use rustc::mir::repr::*;
use syntax_pos::Span;
use rustc_data_structures::indexed_vec::Idx;
use rustc_data_structures::fnv::FnvHashMap;
+use std::iter;
+
pub struct Scope<'tcx> {
/// the scope-id within the scope_auxiliary
id: ScopeId,
-> TerminatorKind<'tcx> {
let free_func = tcx.lang_items.require(lang_items::BoxFreeFnLangItem)
.unwrap_or_else(|e| tcx.sess.fatal(&e));
- let substs = Substs::new(tcx, vec![data.item_ty], vec![]);
+ let substs = Substs::new(tcx, iter::once(Kind::from(data.item_ty)));
TerminatorKind::Call {
func: Operand::Constant(Constant {
span: data.span,
the heap at runtime, and therefore cannot be done at compile time. Erroneous
code example:
-```compile_fail
+```compile_fail,E0010
#![feature(box_syntax)]
const CON : Box<i32> = box 0;
variable cannot refer to a static variable. For example, `Y` cannot refer to
`X` here:
-```compile_fail
+```compile_fail,E0013
static X: i32 = 42;
const Y: i32 = X;
```
Blocks in constants may only contain items (such as constant, function
definition, etc...) and a tail expression. Erroneous code example:
-```compile_fail
+```compile_fail,E0016
const FOO: i32 = { let x = 0; x }; // 'x' isn't an item!
```
References in statics and constants may only refer to immutable values.
Erroneous code example:
-```compile_fail
+```compile_fail,E0017
static X: i32 = 1;
const C: i32 = 2;
For example, if you write:
-```compile_fail
+```compile_fail,E0018
static MY_STATIC: u32 = 42;
static MY_STATIC_ADDR: usize = &MY_STATIC as *const _ as usize;
static WHAT: usize = (MY_STATIC_ADDR^17) + MY_STATIC_ADDR;
fn main() {
const FOO: Test = Test::V1;
- const A: i32 = FOO.test(); // You can't call Test::func() here !
+ const A: i32 = FOO.test(); // You can't call Test::func() here!
}
```
```
"##,
-
E0395: r##"
The value assigned to a constant scalar must be known at compile time,
which is not the case when comparing raw pointers.
Erroneous code example:
-```compile_fail
+```compile_fail,E0395
static FOO: i32 = 42;
static BAR: i32 = 42;
(or even link-time), which means it can't be used in a constant
expression. Erroneous code example:
-```compile_fail
+```compile_fail,E0396
const REG_ADDR: *const u8 = 0x5f3759df as *const u8;
const VALUE: u8 = unsafe { *REG_ADDR };
A borrow of a constant containing interior mutability was attempted. Erroneous
code example:
-```compile_fail
+```compile_fail,E0492
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT};
const A: AtomicUsize = ATOMIC_USIZE_INIT;
You can also have this error while using a cell type:
-```compile_fail
+```compile_fail,E0492
#![feature(const_fn)]
use std::cell::Cell;
A type with a destructor was assigned to an invalid type of variable. Erroneous
code example:
-```compile_fail
+```compile_fail,E0493
struct Foo {
a: u32
}
A reference of an interior static was assigned to another const/static.
Erroneous code example:
-```compile_fail
+```compile_fail,E0494
struct Foo {
a: u32
}
region, ty::TypeAndMut { ty: expr.ty, mutbl: mutbl }),
span: expr.span,
kind: ExprKind::Borrow {
- region: *region,
+ region: region,
borrow_kind: to_borrow_kind(mutbl),
arg: expr.to_ref()
}
ty: adjusted_ty,
span: self.span,
kind: ExprKind::Borrow {
- region: *r,
+ region: r,
borrow_kind: to_borrow_kind(m),
arg: expr.to_ref(),
},
ty: cx.tcx.mk_ref(region, ty::TypeAndMut { ty: expr.ty, mutbl: m }),
span: self.span,
kind: ExprKind::Borrow {
- region: *region,
+ region: region,
borrow_kind: to_borrow_kind(m),
arg: expr.to_ref(),
},
_ => span_bug!(expr.span, "type of & not region"),
};
ExprKind::Borrow {
- region: *region,
+ region: region,
borrow_kind: to_borrow_kind(mutbl),
arg: expr.to_ref(),
}
ExprKind::Deref {
arg: Expr {
temp_lifetime: temp_lifetime,
- ty: cx.tcx.mk_ref(
- cx.tcx.mk_region(borrow.region),
+ ty: cx.tcx.mk_ref(borrow.region,
ty::TypeAndMut {
ty: var_ty,
mutbl: borrow.kind.to_mutbl_lossy()
}
PassArgs::ByRef => {
- let scope = cx.tcx.region_maps.node_extent(expr.id);
- let region = cx.tcx.mk_region(ty::ReScope(scope));
+ let region = cx.tcx.node_scope_region(expr.id);
let temp_lifetime = cx.tcx.region_maps.temporary_scope(expr.id);
argrefs.extend(
args.iter()
temp_lifetime: temp_lifetime,
ty: adjusted_ty,
span: expr.span,
- kind: ExprKind::Borrow { region: *region,
+ kind: ExprKind::Borrow { region: region,
borrow_kind: BorrowKind::Shared,
arg: arg.to_ref() }
}.to_ref()
trait_def_id: DefId,
method_name: &str,
self_ty: Ty<'tcx>,
- params: Vec<Ty<'tcx>>)
+ params: &[Ty<'tcx>])
-> (Ty<'tcx>, Literal<'tcx>) {
let method_name = token::intern(method_name);
- let substs = Substs::new_trait(self.tcx, params, vec![], self_ty);
+ let substs = Substs::new_trait(self.tcx, self_ty, params);
for trait_item in self.tcx.trait_items(trait_def_id).iter() {
match *trait_item {
ty::ImplOrTraitItem::MethodTraitItem(ref method) => {
let id = self.cx.tcx.expect_def(pat.id).var_id();
let var_ty = self.cx.tcx.node_id_to_type(pat.id);
let region = match var_ty.sty {
- ty::TyRef(&r, _) => Some(r),
+ ty::TyRef(r, _) => Some(r),
_ => None,
};
let (mutability, mode) = match bm {
id: DefId,
},
Borrow {
- region: Region,
+ region: &'tcx Region,
borrow_kind: BorrowKind,
arg: ExprRef<'tcx>,
},
Binding {
mutability: Mutability,
name: ast::Name,
- mode: BindingMode,
+ mode: BindingMode<'tcx>,
var: ast::NodeId,
ty: Ty<'tcx>,
subpattern: Option<Pattern<'tcx>>,
}
#[derive(Copy, Clone, Debug)]
-pub enum BindingMode {
+pub enum BindingMode<'tcx> {
ByValue,
- ByRef(Region, BorrowKind),
+ ByRef(&'tcx Region, BorrowKind),
}
#[derive(Clone, Debug)]
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use build::{Location, ScopeAuxiliaryVec, ScopeId};
+use build::{ScopeAuxiliaryVec, ScopeId};
use rustc::hir;
use rustc::hir::def_id::DefId;
use rustc::mir::repr::*;
pub struct NoLandingPads;
impl<'tcx> MutVisitor<'tcx> for NoLandingPads {
- fn visit_terminator(&mut self, bb: BasicBlock, terminator: &mut Terminator<'tcx>) {
+ fn visit_terminator(&mut self,
+ bb: BasicBlock,
+ terminator: &mut Terminator<'tcx>,
+ location: Location) {
match terminator.kind {
TerminatorKind::Goto { .. } |
TerminatorKind::Resume |
unwind.take();
},
}
- self.super_terminator(bb, terminator);
+ self.super_terminator(bb, terminator, location);
}
}
use rustc::ty::TyCtxt;
use syntax_pos::Span;
-use build::Location;
-
use rustc_data_structures::indexed_vec::{IndexVec, Idx};
use std::mem;
+use std::usize;
/// State of a temporary during collection and promotion.
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
struct TempCollector {
temps: IndexVec<Temp, TempState>,
- location: Location,
span: Span
}
impl<'tcx> Visitor<'tcx> for TempCollector {
- fn visit_lvalue(&mut self, lvalue: &Lvalue<'tcx>, context: LvalueContext) {
- self.super_lvalue(lvalue, context);
+ fn visit_lvalue(&mut self, lvalue: &Lvalue<'tcx>, context: LvalueContext, location: Location) {
+ self.super_lvalue(lvalue, context, location);
if let Lvalue::Temp(index) = *lvalue {
// Ignore drops, if the temp gets promoted,
// then it's constant and thus drop is noop.
LvalueContext::Store |
LvalueContext::Call => {
*temp = TempState::Defined {
- location: self.location,
+ location: location,
uses: 0
};
return;
fn visit_source_info(&mut self, source_info: &SourceInfo) {
self.span = source_info.span;
}
-
- fn visit_statement(&mut self, bb: BasicBlock, statement: &Statement<'tcx>) {
- assert_eq!(self.location.block, bb);
- self.super_statement(bb, statement);
- self.location.statement_index += 1;
- }
-
- fn visit_basic_block_data(&mut self, bb: BasicBlock, data: &BasicBlockData<'tcx>) {
- self.location.statement_index = 0;
- self.location.block = bb;
- self.super_basic_block_data(bb, data);
- }
}
pub fn collect_temps(mir: &Mir, rpo: &mut ReversePostorder) -> IndexVec<Temp, TempState> {
let mut collector = TempCollector {
temps: IndexVec::from_elem(TempState::Undefined, &mir.temp_decls),
- location: Location {
- block: START_BLOCK,
- statement_index: 0
- },
span: mir.span
};
for (bb, data) in rpo {
// Then, recurse for components in the Rvalue or Call.
if stmt_idx < no_stmts {
- self.visit_rvalue(rvalue.as_mut().unwrap());
+ self.visit_rvalue(rvalue.as_mut().unwrap(), Location {
+ block: bb,
+ statement_index: stmt_idx
+ });
} else {
- self.visit_terminator_kind(bb, call.as_mut().unwrap());
+ self.visit_terminator_kind(bb, call.as_mut().unwrap(), Location {
+ block: bb,
+ statement_index: no_stmts
+ });
}
let new_temp = self.promoted.temp_decls.push(TempDecl {
}
}
};
- self.visit_rvalue(&mut rvalue);
+ self.visit_rvalue(&mut rvalue, Location{
+ block: BasicBlock::new(0),
+ statement_index: usize::MAX
+ });
self.assign(Lvalue::ReturnPointer, rvalue, span);
self.source.promoted.push(self.promoted);
}
/// Replaces all temporaries with their promoted counterparts.
impl<'a, 'tcx> MutVisitor<'tcx> for Promoter<'a, 'tcx> {
- fn visit_lvalue(&mut self, lvalue: &mut Lvalue<'tcx>, context: LvalueContext) {
+ fn visit_lvalue(&mut self,
+ lvalue: &mut Lvalue<'tcx>,
+ context: LvalueContext,
+ location: Location) {
if let Lvalue::Temp(ref mut temp) = *lvalue {
*temp = self.promote_temp(*temp);
}
- self.super_lvalue(lvalue, context);
+ self.super_lvalue(lvalue, context, location);
}
}
use std::collections::hash_map::Entry;
use std::fmt;
-
-use build::Location;
+use std::usize;
use super::promote_consts::{self, Candidate, TempState};
return_qualif: Option<Qualif>,
qualif: Qualif,
const_fn_arg_vars: BitVector,
- location: Location,
temp_promotion_state: IndexVec<Temp, TempState>,
promotion_candidates: Vec<Candidate>
}
return_qualif: None,
qualif: Qualif::empty(),
const_fn_arg_vars: BitVector::new(mir.var_decls.len()),
- location: Location {
- block: START_BLOCK,
- statement_index: 0
- },
temp_promotion_state: temps,
promotion_candidates: vec![]
}
}
/// Assign the current qualification to the given destination.
- fn assign(&mut self, dest: &Lvalue<'tcx>) {
+ fn assign(&mut self, dest: &Lvalue<'tcx>, location: Location) {
let qualif = self.qualif;
let span = self.span;
let store = |slot: &mut Option<Qualif>| {
// This must be an explicit assignment.
_ => {
// Catch more errors in the destination.
- self.visit_lvalue(dest, LvalueContext::Store);
+ self.visit_lvalue(dest, LvalueContext::Store, location);
self.statement_like();
}
}
self.qualif = Qualif::NOT_CONST;
for index in 0..mir.var_decls.len() {
if !self.const_fn_arg_vars.contains(index) {
- self.assign(&Lvalue::Var(Var::new(index)));
+ self.assign(&Lvalue::Var(Var::new(index)), Location {
+ block: bb,
+ statement_index: usize::MAX,
+ });
}
}
/// For functions (constant or not), it also records
/// candidates for promotion in promotion_candidates.
impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> {
- fn visit_lvalue(&mut self, lvalue: &Lvalue<'tcx>, context: LvalueContext) {
+ fn visit_lvalue(&mut self, lvalue: &Lvalue<'tcx>, context: LvalueContext, location: Location) {
match *lvalue {
Lvalue::Arg(_) => {
self.add(Qualif::FN_ARGUMENT);
}
Lvalue::Projection(ref proj) => {
self.nest(|this| {
- this.super_lvalue(lvalue, context);
+ this.super_lvalue(lvalue, context, location);
match proj.elem {
ProjectionElem::Deref => {
if !this.try_consume() {
}
}
- fn visit_operand(&mut self, operand: &Operand<'tcx>) {
+ fn visit_operand(&mut self, operand: &Operand<'tcx>, location: Location) {
match *operand {
Operand::Consume(_) => {
self.nest(|this| {
- this.super_operand(operand);
+ this.super_operand(operand, location);
this.try_consume();
});
}
if let Literal::Item { def_id, substs } = constant.literal {
// Don't peek inside generic (associated) constants.
- if !substs.types.is_empty() {
+ if substs.types().next().is_some() {
self.add_type(constant.ty);
} else {
let qualif = qualify_const_item_cached(self.tcx,
}
}
- fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>) {
+ fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) {
// Recurse through operands and lvalues.
- self.super_rvalue(rvalue);
+ self.super_rvalue(rvalue, location);
match *rvalue {
Rvalue::Use(_) |
}
// We might have a candidate for promotion.
- let candidate = Candidate::Ref(self.location);
+ let candidate = Candidate::Ref(location);
if self.mode == Mode::Fn || self.mode == Mode::ConstFn {
if !self.qualif.intersects(Qualif::NEVER_PROMOTE) {
// We can only promote direct borrows of temps.
}
}
- fn visit_terminator_kind(&mut self, bb: BasicBlock, kind: &TerminatorKind<'tcx>) {
+ fn visit_terminator_kind(&mut self,
+ bb: BasicBlock,
+ kind: &TerminatorKind<'tcx>,
+ location: Location) {
if let TerminatorKind::Call { ref func, ref args, ref destination, .. } = *kind {
- self.visit_operand(func);
+ self.visit_operand(func, location);
let fn_ty = func.ty(self.mir, self.tcx);
let (is_shuffle, is_const_fn) = match fn_ty.sty {
for (i, arg) in args.iter().enumerate() {
self.nest(|this| {
- this.visit_operand(arg);
+ this.visit_operand(arg, location);
if is_shuffle && i == 2 && this.mode == Mode::Fn {
let candidate = Candidate::ShuffleIndices(bb);
if !this.qualif.intersects(Qualif::NEVER_PROMOTE) {
self.deny_drop();
}
}
- self.assign(dest);
+ self.assign(dest, location);
}
} else {
// Qualify any operands inside other terminators.
- self.super_terminator_kind(bb, kind);
+ self.super_terminator_kind(bb, kind, location);
}
}
- fn visit_assign(&mut self, _: BasicBlock, dest: &Lvalue<'tcx>, rvalue: &Rvalue<'tcx>) {
- self.visit_rvalue(rvalue);
+ fn visit_assign(&mut self,
+ _: BasicBlock,
+ dest: &Lvalue<'tcx>,
+ rvalue: &Rvalue<'tcx>,
+ location: Location) {
+ self.visit_rvalue(rvalue, location);
// Check the allowed const fn argument forms.
if let (Mode::ConstFn, &Lvalue::Var(index)) = (self.mode, dest) {
}
}
- self.assign(dest);
+ self.assign(dest, location);
}
fn visit_source_info(&mut self, source_info: &SourceInfo) {
self.span = source_info.span;
}
- fn visit_statement(&mut self, bb: BasicBlock, statement: &Statement<'tcx>) {
- assert_eq!(self.location.block, bb);
+ fn visit_statement(&mut self, bb: BasicBlock, statement: &Statement<'tcx>, location: Location) {
self.nest(|this| {
this.visit_source_info(&statement.source_info);
match statement.kind {
StatementKind::Assign(ref lvalue, ref rvalue) => {
- this.visit_assign(bb, lvalue, rvalue);
+ this.visit_assign(bb, lvalue, rvalue, location);
}
StatementKind::SetDiscriminant { .. } |
StatementKind::StorageLive(_) |
StatementKind::StorageDead(_) => {}
}
});
- self.location.statement_index += 1;
- }
-
- fn visit_terminator(&mut self, bb: BasicBlock, terminator: &Terminator<'tcx>) {
- assert_eq!(self.location.block, bb);
- self.nest(|this| this.super_terminator(bb, terminator));
}
- fn visit_basic_block_data(&mut self, bb: BasicBlock, data: &BasicBlockData<'tcx>) {
- self.location.statement_index = 0;
- self.location.block = bb;
- self.super_basic_block_data(bb, data);
+ fn visit_terminator(&mut self,
+ bb: BasicBlock,
+ terminator: &Terminator<'tcx>,
+ location: Location) {
+ self.nest(|this| this.super_terminator(bb, terminator, location));
}
}
}
}
- fn visit_lvalue(&mut self, lvalue: &Lvalue<'tcx>, _context: visit::LvalueContext) {
- self.sanitize_lvalue(lvalue);
+ fn visit_lvalue(&mut self,
+ lvalue: &Lvalue<'tcx>,
+ _context: visit::LvalueContext,
+ location: Location) {
+ self.sanitize_lvalue(lvalue, location);
}
- fn visit_constant(&mut self, constant: &Constant<'tcx>) {
- self.super_constant(constant);
+ fn visit_constant(&mut self, constant: &Constant<'tcx>, location: Location) {
+ self.super_constant(constant, location);
self.sanitize_type(constant, constant.ty);
}
- fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>) {
- self.super_rvalue(rvalue);
+ fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) {
+ self.super_rvalue(rvalue, location);
if let Some(ty) = rvalue.ty(self.mir, self.tcx()) {
self.sanitize_type(rvalue, ty);
}
}
}
- fn sanitize_lvalue(&mut self, lvalue: &Lvalue<'tcx>) -> LvalueTy<'tcx> {
+ fn sanitize_lvalue(&mut self, lvalue: &Lvalue<'tcx>, location: Location) -> LvalueTy<'tcx> {
debug!("sanitize_lvalue: {:?}", lvalue);
match *lvalue {
Lvalue::Var(index) => LvalueTy::Ty { ty: self.mir.var_decls[index].ty },
LvalueTy::Ty { ty: self.mir.return_ty }
}
Lvalue::Projection(ref proj) => {
- let base_ty = self.sanitize_lvalue(&proj.base);
+ let base_ty = self.sanitize_lvalue(&proj.base, location);
if let LvalueTy::Ty { ty } = base_ty {
if ty.references_error() {
assert!(self.errors_reported);
return LvalueTy::Ty { ty: self.tcx().types.err };
}
}
- self.sanitize_projection(base_ty, &proj.elem, lvalue)
+ self.sanitize_projection(base_ty, &proj.elem, lvalue, location)
}
}
}
fn sanitize_projection(&mut self,
base: LvalueTy<'tcx>,
pi: &LvalueElem<'tcx>,
- lvalue: &Lvalue<'tcx>)
+ lvalue: &Lvalue<'tcx>,
+ location: Location)
-> LvalueTy<'tcx> {
debug!("sanitize_projection: {:?} {:?} {:?}", base, pi, lvalue);
let tcx = self.tcx();
}
}
ProjectionElem::Index(ref i) => {
- self.visit_operand(i);
+ self.visit_operand(i, location);
let index_ty = i.ty(self.mir, tcx);
if index_ty != tcx.types.usize {
LvalueTy::Ty {
borrow_id: ast::NodeId,
_borrow_span: Span,
cmt: mc::cmt<'tcx>,
- _loan_region: ty::Region,
+ _loan_region: &'tcx ty::Region,
bk: ty::BorrowKind,
loan_cause: euv::LoanCause) {
// Kind of hacky, but we allow Unsafe coercions in constants.
_borrow_id: ast::NodeId,
_borrow_span: Span,
_cmt: mc::cmt,
- _loan_region: ty::Region,
+ _loan_region: &'tcx ty::Region,
_bk: ty::BorrowKind,
_loan_cause: euv::LoanCause) {
}
}), ..}) => ty,
_ => expr_ty
}.ty_adt_def().unwrap();
- let any_priv = def.struct_variant().fields.iter().any(|f| {
- !f.vis.is_accessible_from(self.curitem, &self.tcx.map)
- });
- if any_priv {
- span_err!(self.tcx.sess, expr.span, E0450,
- "cannot invoke tuple struct constructor with private \
- fields");
+
+ let private_indexes : Vec<_> = def.struct_variant().fields.iter().enumerate()
+ .filter(|&(_,f)| {
+ !f.vis.is_accessible_from(self.curitem, &self.tcx.map)
+ }).map(|(n,&_)|n).collect();
+
+ if !private_indexes.is_empty() {
+
+ let mut error = struct_span_err!(self.tcx.sess, expr.span, E0450,
+ "cannot invoke tuple struct constructor \
+ with private fields");
+ error.span_label(expr.span,
+ &format!("cannot construct with a private field"));
+
+ if let Some(def_id) = self.tcx.map.as_local_node_id(def.did) {
+ if let Some(hir::map::NodeItem(node)) = self.tcx.map.find(def_id) {
+ if let hir::Item_::ItemStruct(ref tuple_data, _) = node.node {
+
+ for i in private_indexes {
+ error.span_label(tuple_data.fields()[i].span,
+ &format!("private field declared here"));
+ }
+ }
+ }
+ }
+ error.emit();
}
}
}
if !vis.is_at_least(self.required_visibility, &self.tcx.map) {
if self.tcx.sess.features.borrow().pub_restricted ||
self.old_error_set.contains(&trait_ref.ref_id) {
- span_err!(self.tcx.sess, trait_ref.path.span, E0445,
- "private trait in public interface");
+ struct_span_err!(self.tcx.sess, trait_ref.path.span, E0445,
+ "private trait in public interface")
+ .span_label(trait_ref.path.span, &format!(
+ "private trait can't be public"))
+ .emit();
} else {
self.tcx.sess.add_lint(lint::builtin::PRIVATE_IN_PUBLIC,
node_id,
use Resolver;
use rustc::session::Session;
+use rustc::util::nodemap::FnvHashMap;
use syntax::ast;
use syntax::ext::hygiene::Mark;
use syntax::fold::{self, Folder};
use syntax::util::move_map::MoveMap;
use syntax::util::small_vector::SmallVector;
-use std::collections::HashMap;
use std::mem;
impl<'a> Resolver<'a> {
struct NodeIdAssigner<'a> {
sess: &'a Session,
- macros_at_scope: &'a mut HashMap<ast::NodeId, Vec<Mark>>,
+ macros_at_scope: &'a mut FnvHashMap<ast::NodeId, Vec<Mark>>,
}
impl<'a> Folder for NodeIdAssigner<'a> {
pub fn build_reduced_graph(&mut self, krate: &Crate) {
let no_implicit_prelude = attr::contains_name(&krate.attrs, "no_implicit_prelude");
self.graph_root.no_implicit_prelude.set(no_implicit_prelude);
-
- let mut visitor = BuildReducedGraphVisitor {
- parent: self.graph_root,
- resolver: self,
- };
- visit::walk_crate(&mut visitor, krate);
+ visit::walk_crate(&mut BuildReducedGraphVisitor { resolver: self }, krate);
}
/// Defines `name` in namespace `ns` of module `parent` to be `def` if it is not yet defined;
}
/// Constructs the reduced graph for one item.
- fn build_reduced_graph_for_item(&mut self, item: &Item, parent_ref: &mut Module<'b>) {
- let parent = *parent_ref;
+ fn build_reduced_graph_for_item(&mut self, item: &Item) {
+ let parent = self.current_module;
+ let parent_vis = self.current_vis;
let name = item.ident.name;
let sp = item.span;
- self.current_module = parent;
let vis = self.resolve_visibility(&item.vis);
match item.node {
let subclass = ImportDirectiveSubclass::single(binding.name, source_name);
let span = view_path.span;
- parent.add_import_directive(module_path, subclass, span, item.id, vis);
- self.unresolved_imports += 1;
+ self.add_import_directive(module_path, subclass, span, item.id, vis);
}
ViewPathList(_, ref source_items) => {
// Make sure there's at most one `mod` import in the list.
};
let subclass = ImportDirectiveSubclass::single(rename, name);
let (span, id) = (source_item.span, source_item.node.id());
- parent.add_import_directive(module_path, subclass, span, id, vis);
- self.unresolved_imports += 1;
+ self.add_import_directive(module_path, subclass, span, id, vis);
}
}
ViewPathGlob(_) => {
let subclass = GlobImport { is_prelude: is_prelude };
let span = view_path.span;
- parent.add_import_directive(module_path, subclass, span, item.id, vis);
- self.unresolved_imports += 1;
+ self.add_import_directive(module_path, subclass, span, item.id, vis);
}
}
}
});
self.define(parent, name, TypeNS, (module, sp, vis));
self.module_map.insert(item.id, module);
- *parent_ref = module;
+
+ // Descend into the module.
+ self.current_module = module;
+ self.current_vis = ty::Visibility::Restricted(item.id);
}
ItemKind::ForeignMod(..) => {}
}
ItemKind::Mac(_) => panic!("unexpanded macro in resolve!"),
}
+
+ visit::walk_item(&mut BuildReducedGraphVisitor { resolver: self }, item);
+ self.current_module = parent;
+ self.current_vis = parent_vis;
}
// Constructs the reduced graph for one variant. Variants exist in the
}
/// Constructs the reduced graph for one foreign item.
- fn build_reduced_graph_for_foreign_item(&mut self,
- foreign_item: &ForeignItem,
- parent: Module<'b>) {
+ fn build_reduced_graph_for_foreign_item(&mut self, foreign_item: &ForeignItem) {
+ let parent = self.current_module;
let name = foreign_item.ident.name;
let def = match foreign_item.node {
Def::Static(self.definitions.local_def_id(foreign_item.id), m)
}
};
- self.current_module = parent;
let vis = self.resolve_visibility(&foreign_item.vis);
self.define(parent, name, ValueNS, (def, foreign_item.span, vis));
}
- fn build_reduced_graph_for_block(&mut self, block: &Block, parent: &mut Module<'b>) {
+ fn build_reduced_graph_for_block(&mut self, block: &Block) {
+ let parent = self.current_module;
if self.block_needs_anonymous_module(block) {
let block_id = block.id;
let parent_link = BlockParentLink(parent, block_id);
let new_module = self.new_module(parent_link, None, false);
self.module_map.insert(block_id, new_module);
- *parent = new_module;
+ self.current_module = new_module; // Descend into the block.
}
+
+ visit::walk_block(&mut BuildReducedGraphVisitor { resolver: self }, block);
+ self.current_module = parent;
}
/// Builds the reduced graph for a single item in an external crate.
struct BuildReducedGraphVisitor<'a, 'b: 'a> {
resolver: &'a mut Resolver<'b>,
- parent: Module<'b>,
}
impl<'a, 'b> Visitor for BuildReducedGraphVisitor<'a, 'b> {
fn visit_item(&mut self, item: &Item) {
- let old_parent = self.parent;
- self.resolver.build_reduced_graph_for_item(item, &mut self.parent);
- visit::walk_item(self, item);
- self.parent = old_parent;
+ self.resolver.build_reduced_graph_for_item(item);
}
fn visit_foreign_item(&mut self, foreign_item: &ForeignItem) {
- self.resolver.build_reduced_graph_for_foreign_item(foreign_item, &self.parent);
+ self.resolver.build_reduced_graph_for_foreign_item(foreign_item);
}
fn visit_block(&mut self, block: &Block) {
- let old_parent = self.parent;
- self.resolver.build_reduced_graph_for_block(block, &mut self.parent);
- visit::walk_block(self, block);
- self.parent = old_parent;
+ self.resolver.build_reduced_graph_for_block(block);
}
}
use syntax_pos::Span;
use errors::DiagnosticBuilder;
-use std::collections::{HashMap, HashSet};
use std::cell::{Cell, RefCell};
use std::fmt;
use std::mem::replace;
err
}
ResolutionError::TypeNotMemberOfTrait(type_, trait_) => {
- struct_span_err!(resolver.session,
+ let mut err = struct_span_err!(resolver.session,
span,
E0437,
"type `{}` is not a member of trait `{}`",
type_,
- trait_)
+ trait_);
+ err.span_label(span, &format!("not a member of trait `Foo`"));
+ err
}
ResolutionError::ConstNotMemberOfTrait(const_, trait_) => {
- struct_span_err!(resolver.session,
+ let mut err = struct_span_err!(resolver.session,
span,
E0438,
"const `{}` is not a member of trait `{}`",
const_,
- trait_)
+ trait_);
+ err.span_label(span, &format!("not a member of trait `Foo`"));
+ err
}
ResolutionError::VariableNotBoundInPattern(variable_name, from, to) => {
struct_span_err!(resolver.session,
err
}
ResolutionError::StructVariantUsedAsFunction(path_name) => {
- struct_span_err!(resolver.session,
+ let mut err = struct_span_err!(resolver.session,
span,
E0423,
"`{}` is the name of a struct or struct variant, but this expression \
uses it like a function name",
- path_name)
+ path_name);
+ err.span_label(span, &format!("struct called like a function"));
+ err
}
ResolutionError::SelfNotAvailableInStaticMethod => {
- struct_span_err!(resolver.session,
+ let mut err = struct_span_err!(resolver.session,
span,
E0424,
- "`self` is not available in a static method. Maybe a `self` \
- argument is missing?")
+ "`self` is not available in a static method");
+ err.span_label(span, &format!("not available in static method"));
+ err.note(&format!("maybe a `self` argument is missing?"));
+ err
}
ResolutionError::UnresolvedName { path, message: msg, context, is_static_method,
is_field, def } => {
err
}
ResolutionError::UndeclaredLabel(name) => {
- struct_span_err!(resolver.session,
- span,
- E0426,
- "use of undeclared label `{}`",
- name)
+ let mut err = struct_span_err!(resolver.session,
+ span,
+ E0426,
+ "use of undeclared label `{}`",
+ name);
+ err.span_label(span, &format!("undeclared label `{}`",&name));
+ err
}
ResolutionError::SelfImportsOnlyAllowedWithin => {
struct_span_err!(resolver.session,
}
ResolutionError::UnresolvedImport(name) => {
let msg = match name {
- Some((n, p)) => format!("unresolved import `{}`{}", n, p),
+ Some((n, _)) => format!("unresolved import `{}`", n),
None => "unresolved import".to_owned(),
};
- struct_span_err!(resolver.session, span, E0432, "{}", msg)
+ let mut err = struct_span_err!(resolver.session, span, E0432, "{}", msg);
+ if let Some((_, p)) = name {
+ err.span_label(span, &p);
+ }
+ err
}
ResolutionError::FailedToResolve(msg) => {
let mut err = struct_span_err!(resolver.session, span, E0433,
closure form instead")
}
ResolutionError::AttemptToUseNonConstantValueInConstant => {
- struct_span_err!(resolver.session,
+ let mut err = struct_span_err!(resolver.session,
span,
E0435,
- "attempt to use a non-constant value in a constant")
+ "attempt to use a non-constant value in a constant");
+ err.span_label(span, &format!("non-constant used with constant"));
+ err
}
ResolutionError::BindingShadowsSomethingUnacceptable(what_binding, name, binding) => {
let shadows_what = PathResolution::new(binding.def().unwrap()).kind_name();
}
// Map from the name in a pattern to its binding mode.
-type BindingMap = HashMap<ast::Ident, BindingInfo>;
+type BindingMap = FnvHashMap<ast::Ident, BindingInfo>;
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
enum PatternSource {
/// One local scope.
#[derive(Debug)]
struct Rib<'a> {
- bindings: HashMap<ast::Ident, Def>,
+ bindings: FnvHashMap<ast::Ident, Def>,
kind: RibKind<'a>,
}
impl<'a> Rib<'a> {
fn new(kind: RibKind<'a>) -> Rib<'a> {
Rib {
- bindings: HashMap::new(),
+ bindings: FnvHashMap(),
kind: kind,
}
}
// is the NodeId of the local `extern crate` item (otherwise, `extern_crate_id` is None).
extern_crate_id: Option<NodeId>,
- resolutions: RefCell<HashMap<(Name, Namespace), &'a RefCell<NameResolution<'a>>>>,
- unresolved_imports: RefCell<Vec<&'a ImportDirective<'a>>>,
+ resolutions: RefCell<FnvHashMap<(Name, Namespace), &'a RefCell<NameResolution<'a>>>>,
no_implicit_prelude: Cell<bool>,
- glob_importers: RefCell<Vec<(Module<'a>, &'a ImportDirective<'a>)>>,
+ glob_importers: RefCell<Vec<&'a ImportDirective<'a>>>,
globs: RefCell<Vec<&'a ImportDirective<'a>>>,
// Used to memoize the traits in this module for faster searches through all traits in scope.
// access the children must be preceded with a
// `populate_module_if_necessary` call.
populated: Cell<bool>,
-
- arenas: &'a ResolverArenas<'a>,
}
pub type Module<'a> = &'a ModuleS<'a>;
impl<'a> ModuleS<'a> {
- fn new(parent_link: ParentLink<'a>,
- def: Option<Def>,
- external: bool,
- arenas: &'a ResolverArenas<'a>) -> Self {
+ fn new(parent_link: ParentLink<'a>, def: Option<Def>, external: bool) -> Self {
ModuleS {
parent_link: parent_link,
def: def,
extern_crate_id: None,
- resolutions: RefCell::new(HashMap::new()),
- unresolved_imports: RefCell::new(Vec::new()),
+ resolutions: RefCell::new(FnvHashMap()),
no_implicit_prelude: Cell::new(false),
glob_importers: RefCell::new(Vec::new()),
globs: RefCell::new((Vec::new())),
traits: RefCell::new(None),
populated: Cell::new(!external),
- arenas: arenas
}
}
/// Interns the names of the primitive types.
struct PrimitiveTypeTable {
- primitive_types: HashMap<Name, PrimTy>,
+ primitive_types: FnvHashMap<Name, PrimTy>,
}
impl PrimitiveTypeTable {
fn new() -> PrimitiveTypeTable {
- let mut table = PrimitiveTypeTable { primitive_types: HashMap::new() };
+ let mut table = PrimitiveTypeTable { primitive_types: FnvHashMap() };
table.intern("bool", TyBool);
table.intern("char", TyChar);
// Maps the node id of a statement to the expansions of the `macro_rules!`s
// immediately above the statement (if appropriate).
- macros_at_scope: HashMap<NodeId, Vec<Mark>>,
+ macros_at_scope: FnvHashMap<NodeId, Vec<Mark>>,
graph_root: Module<'a>,
structs: FnvHashMap<DefId, Vec<Name>>,
- // The number of imports that are currently unresolved.
- unresolved_imports: usize,
+ // All imports known to succeed or fail.
+ determined_imports: Vec<&'a ImportDirective<'a>>,
+
+ // All non-determined imports.
+ indeterminate_imports: Vec<&'a ImportDirective<'a>>,
// The module that represents the current item scope.
current_module: Module<'a>,
+ // The visibility of `pub(self)` items in the current scope.
+ // Equivalently, the visibility required for an item to be accessible from the current scope.
+ current_vis: ty::Visibility,
+
// The current set of local scopes, for values.
// FIXME #4948: Reuse ribs to avoid allocation.
value_ribs: Vec<Rib<'a>>,
// all imports, but only glob imports are actually interesting).
pub glob_map: GlobMap,
- used_imports: HashSet<(NodeId, Namespace)>,
- used_crates: HashSet<CrateNum>,
+ used_imports: FnvHashSet<(NodeId, Namespace)>,
+ used_crates: FnvHashSet<CrateNum>,
pub maybe_unused_trait_imports: NodeSet,
privacy_errors: Vec<PrivacyError<'a>>,
-> Resolver<'a> {
let root_def_id = DefId::local(CRATE_DEF_INDEX);
let graph_root =
- ModuleS::new(NoParentLink, Some(Def::Mod(root_def_id)), false, arenas);
+ ModuleS::new(NoParentLink, Some(Def::Mod(root_def_id)), false);
let graph_root = arenas.alloc_module(graph_root);
let mut module_map = NodeMap();
module_map.insert(CRATE_NODE_ID, graph_root);
session: session,
definitions: Definitions::new(),
- macros_at_scope: HashMap::new(),
+ macros_at_scope: FnvHashMap(),
// The outermost module has def ID 0; this is not reflected in the
// AST.
trait_item_map: FnvHashMap(),
structs: FnvHashMap(),
- unresolved_imports: 0,
+ determined_imports: Vec::new(),
+ indeterminate_imports: Vec::new(),
current_module: graph_root,
+ current_vis: ty::Visibility::Restricted(ast::CRATE_NODE_ID),
value_ribs: vec![Rib::new(ModuleRibKind(graph_root))],
type_ribs: vec![Rib::new(ModuleRibKind(graph_root))],
label_ribs: Vec::new(),
make_glob_map: make_glob_map == MakeGlobMap::Yes,
glob_map: NodeMap(),
- used_imports: HashSet::new(),
- used_crates: HashSet::new(),
+ used_imports: FnvHashSet(),
+ used_crates: FnvHashSet(),
maybe_unused_trait_imports: NodeSet(),
privacy_errors: Vec::new(),
/// Entry point to crate resolution.
pub fn resolve_crate(&mut self, krate: &Crate) {
self.current_module = self.graph_root;
+ self.current_vis = ty::Visibility::Restricted(ast::CRATE_NODE_ID);
visit::walk_crate(self, krate);
check_unused::check_crate(self, krate);
fn new_module(&self, parent_link: ParentLink<'a>, def: Option<Def>, external: bool)
-> Module<'a> {
- self.arenas.alloc_module(ModuleS::new(parent_link, def, external, self.arenas))
+ self.arenas.alloc_module(ModuleS::new(parent_link, def, external))
}
fn new_extern_crate_module(&self, parent_link: ParentLink<'a>, def: Def, local_node_id: NodeId)
-> Module<'a> {
- let mut module = ModuleS::new(parent_link, Some(def), false, self.arenas);
+ let mut module = ModuleS::new(parent_link, Some(def), false);
module.extern_crate_id = Some(local_node_id);
self.arenas.modules.alloc(module)
}
mut search_module: Module<'a>,
module_path: &[Name],
index: usize,
- span: Span)
+ span: Option<Span>)
-> ResolveResult<Module<'a>> {
- fn search_parent_externals(needle: Name, module: Module) -> Option<Module> {
- match module.resolve_name(needle, TypeNS, false) {
+ fn search_parent_externals<'a>(this: &mut Resolver<'a>, needle: Name, module: Module<'a>)
+ -> Option<Module<'a>> {
+ match this.resolve_name_in_module(module, needle, TypeNS, false, None) {
Success(binding) if binding.is_extern_crate() => Some(module),
_ => match module.parent_link {
ModuleParentLink(ref parent, _) => {
- search_parent_externals(needle, parent)
+ search_parent_externals(this, needle, parent)
}
_ => None,
},
// modules as we go.
while index < module_path_len {
let name = module_path[index];
- match self.resolve_name_in_module(search_module, name, TypeNS, false, true) {
+ match self.resolve_name_in_module(search_module, name, TypeNS, false, span) {
Failed(None) => {
let segment_name = name.as_str();
let module_name = module_to_string(search_module);
let msg = if "???" == &module_name {
- match search_parent_externals(name, &self.current_module) {
+ let current_module = self.current_module;
+ match search_parent_externals(self, name, current_module) {
Some(module) => {
let path_str = names_to_string(module_path);
let target_mod_str = module_to_string(&module);
- let current_mod_str = module_to_string(&self.current_module);
+ let current_mod_str = module_to_string(current_module);
let prefix = if target_mod_str == current_mod_str {
"self::".to_string()
format!("Could not find `{}` in `{}`", segment_name, module_name)
};
- return Failed(Some((span, msg)));
+ return Failed(span.map(|span| (span, msg)));
}
Failed(err) => return Failed(err),
Indeterminate => {
// Check to see whether there are type bindings, and, if
// so, whether there is a module within.
if let Some(module_def) = binding.module() {
- self.check_privacy(name, binding, span);
search_module = module_def;
} else {
let msg = format!("Not a module `{}`", name);
- return Failed(Some((span, msg)));
+ return Failed(span.map(|span| (span, msg)));
}
}
}
fn resolve_module_path(&mut self,
module_path: &[Name],
use_lexical_scope: UseLexicalScopeFlag,
- span: Span)
+ span: Option<Span>)
-> ResolveResult<Module<'a>> {
if module_path.len() == 0 {
return Success(self.graph_root) // Use the crate root
// first component of the path in the current lexical
// scope and then proceed to resolve below that.
let ident = ast::Ident::with_empty_ctxt(module_path[0]);
- match self.resolve_ident_in_lexical_scope(ident, TypeNS, true)
+ match self.resolve_ident_in_lexical_scope(ident, TypeNS, span)
.and_then(LexicalScopeBinding::module) {
None => return Failed(None),
Some(containing_module) => {
}
}
- self.resolve_module_path_from_root(search_module,
- module_path,
- start_index,
- span)
+ self.resolve_module_path_from_root(search_module, module_path, start_index, span)
}
/// This resolves the identifier `ident` in the namespace `ns` in the current lexical scope.
fn resolve_ident_in_lexical_scope(&mut self,
mut ident: ast::Ident,
ns: Namespace,
- record_used: bool)
+ record_used: Option<Span>)
-> Option<LexicalScopeBinding<'a>> {
if ns == TypeNS {
ident = ast::Ident::with_empty_ctxt(ident.name);
if module.def.is_some() {
return match self.prelude {
Some(prelude) if !module.no_implicit_prelude.get() => {
- prelude.resolve_name(name, ns, false).success()
- .map(LexicalScopeBinding::Item)
+ self.resolve_name_in_module(prelude, name, ns, false, None).success()
+ .map(LexicalScopeBinding::Item)
}
_ => None,
};
/// Resolves a "module prefix". A module prefix is one or both of (a) `self::`;
/// (b) some chain of `super::`.
/// grammar: (SELF MOD_SEP ) ? (SUPER MOD_SEP) *
- fn resolve_module_prefix(&mut self, module_path: &[Name], span: Span)
+ fn resolve_module_prefix(&mut self, module_path: &[Name], span: Option<Span>)
-> ResolveResult<ModulePrefixResult<'a>> {
// Start at the current module if we see `self` or `super`, or at the
// top of the crate otherwise.
match self.get_nearest_normal_module_parent(containing_module) {
None => {
let msg = "There are too many initial `super`s.".into();
- return Failed(Some((span, msg)));
+ return Failed(span.map(|span| (span, msg)));
}
Some(new_module) => {
containing_module = new_module;
return Success(PrefixFound(containing_module, i));
}
- /// Attempts to resolve the supplied name in the given module for the
- /// given namespace. If successful, returns the binding corresponding to
- /// the name.
- fn resolve_name_in_module(&mut self,
- module: Module<'a>,
- name: Name,
- namespace: Namespace,
- use_lexical_scope: bool,
- record_used: bool)
- -> ResolveResult<&'a NameBinding<'a>> {
- debug!("(resolving name in module) resolving `{}` in `{}`", name, module_to_string(module));
-
- self.populate_module_if_necessary(module);
- module.resolve_name(name, namespace, use_lexical_scope).and_then(|binding| {
- if record_used {
- self.record_use(name, namespace, binding);
- }
- Success(binding)
- })
- }
-
// AST resolution
//
// We maintain a list of value ribs and type ribs.
let module = self.module_map.get(&id).cloned(); // clones a reference
if let Some(module) = module {
// Move down in the graph.
- let orig_module = ::std::mem::replace(&mut self.current_module, module);
+ let orig_module = replace(&mut self.current_module, module);
+ let orig_vis = replace(&mut self.current_vis, ty::Visibility::Restricted(id));
self.value_ribs.push(Rib::new(ModuleRibKind(module)));
self.type_ribs.push(Rib::new(ModuleRibKind(module)));
f(self);
self.current_module = orig_module;
+ self.current_vis = orig_vis;
self.value_ribs.pop();
self.type_ribs.pop();
} else {
match type_parameters {
HasTypeParameters(generics, rib_kind) => {
let mut function_type_rib = Rib::new(rib_kind);
- let mut seen_bindings = HashMap::new();
+ let mut seen_bindings = FnvHashMap();
for type_parameter in &generics.ty_params {
let name = type_parameter.ident.name;
debug!("with_type_parameter_rib: {}", type_parameter.id);
self.label_ribs.push(Rib::new(rib_kind));
// Add each argument to the rib.
- let mut bindings_list = HashMap::new();
+ let mut bindings_list = FnvHashMap();
for argument in &declaration.inputs {
self.resolve_pattern(&argument.pat, PatternSource::FnParam, &mut bindings_list);
walk_list!(self, visit_expr, &local.init);
// Resolve the pattern.
- self.resolve_pattern(&local.pat, PatternSource::Let, &mut HashMap::new());
+ self.resolve_pattern(&local.pat, PatternSource::Let, &mut FnvHashMap());
}
// build a map from pattern identifiers to binding-info's.
// that expands into an or-pattern where one 'x' was from the
// user and one 'x' came from the macro.
fn binding_mode_map(&mut self, pat: &Pat) -> BindingMap {
- let mut binding_map = HashMap::new();
+ let mut binding_map = FnvHashMap();
pat.walk(&mut |pat| {
if let PatKind::Ident(binding_mode, ident, ref sub_pat) = pat.node {
fn resolve_arm(&mut self, arm: &Arm) {
self.value_ribs.push(Rib::new(NormalRibKind));
- let mut bindings_list = HashMap::new();
+ let mut bindings_list = FnvHashMap();
for pattern in &arm.pats {
self.resolve_pattern(&pattern, PatternSource::Match, &mut bindings_list);
}
pat_id: NodeId,
outer_pat_id: NodeId,
pat_src: PatternSource,
- bindings: &mut HashMap<ast::Ident, NodeId>)
+ bindings: &mut FnvHashMap<ast::Ident, NodeId>)
-> PathResolution {
// Add the binding to the local ribs, if it
// doesn't already exist in the bindings map. (We
pat_src: PatternSource,
// Maps idents to the node ID for the
// outermost pattern that binds them.
- bindings: &mut HashMap<ast::Ident, NodeId>) {
+ bindings: &mut FnvHashMap<ast::Ident, NodeId>) {
// Visit all direct subpatterns of this pattern.
let outer_pat_id = pat.id;
pat.walk(&mut |pat| {
PatKind::Ident(bmode, ref ident, ref opt_pat) => {
// First try to resolve the identifier as some existing
// entity, then fall back to a fresh binding.
- let binding = self.resolve_ident_in_lexical_scope(ident.node, ValueNS, false)
+ let binding = self.resolve_ident_in_lexical_scope(ident.node, ValueNS, None)
.and_then(LexicalScopeBinding::item);
let resolution = binding.and_then(NameBinding::def).and_then(|def| {
let always_binding = !pat_src.is_refutable() || opt_pat.is_some() ||
//
// Such behavior is required for backward compatibility.
// The same fallback is used when `a` resolves to nothing.
- let def = resolve_identifier_with_fallback(self, true).ok_or(false);
+ let def = resolve_identifier_with_fallback(self, Some(span)).ok_or(false);
return def.and_then(|def| self.adjust_local_def(def, span).ok_or(true)).map(mk_res);
}
- let unqualified_def = resolve_identifier_with_fallback(self, false);
+ let unqualified_def = resolve_identifier_with_fallback(self, None);
let qualified_binding = self.resolve_module_relative_path(span, segments, namespace);
match (qualified_binding, unqualified_def) {
(Ok(binding), Some(ref ud)) if binding.def().unwrap() == ud.def => {
fn resolve_identifier(&mut self,
identifier: ast::Ident,
namespace: Namespace,
- record_used: bool)
+ record_used: Option<Span>)
-> Option<LocalDef> {
if identifier.name == keywords::Invalid.name() {
return None;
.collect::<Vec<_>>();
let containing_module;
- match self.resolve_module_path(&module_path, UseLexicalScope, span) {
+ match self.resolve_module_path(&module_path, UseLexicalScope, Some(span)) {
Failed(err) => {
let (span, msg) = match err {
Some((span, msg)) => (span, msg),
}
let name = segments.last().unwrap().identifier.name;
- let result = self.resolve_name_in_module(containing_module, name, namespace, false, true);
- result.success().map(|binding| {
- self.check_privacy(name, binding, span);
- binding
- }).ok_or(false)
+ let result =
+ self.resolve_name_in_module(containing_module, name, namespace, false, Some(span));
+ result.success().ok_or(false)
}
/// Invariant: This must be called only during main resolution, not during
let root_module = self.graph_root;
let containing_module;
- match self.resolve_module_path_from_root(root_module,
- &module_path,
- 0,
- span) {
+ match self.resolve_module_path_from_root(root_module, &module_path, 0, Some(span)) {
Failed(err) => {
let (span, msg) = match err {
Some((span, msg)) => (span, msg),
}
let name = segments.last().unwrap().name();
- let result = self.resolve_name_in_module(containing_module, name, namespace, false, true);
- result.success().map(|binding| {
- self.check_privacy(name, binding, span);
- binding
- }).ok_or(false)
+ let result =
+ self.resolve_name_in_module(containing_module, name, namespace, false, Some(span));
+ result.success().ok_or(false)
}
fn with_no_errors<T, F>(&mut self, f: F) -> T
fn with_empty_ribs<T, F>(&mut self, f: F) -> T
where F: FnOnce(&mut Resolver<'a>) -> T,
{
- use ::std::mem::replace;
let value_ribs = replace(&mut self.value_ribs, Vec::new());
let type_ribs = replace(&mut self.type_ribs, Vec::new());
let label_ribs = replace(&mut self.label_ribs, Vec::new());
match self.resolve_module_path(&name_path[..],
UseLexicalScope,
- expr.span) {
+ Some(expr.span)) {
Success(e) => {
if let Some(def_type) = e.def {
def = def_type;
self.visit_expr(subexpression);
self.value_ribs.push(Rib::new(NormalRibKind));
- self.resolve_pattern(pattern, PatternSource::IfLet, &mut HashMap::new());
+ self.resolve_pattern(pattern, PatternSource::IfLet, &mut FnvHashMap());
self.visit_block(if_block);
self.value_ribs.pop();
ExprKind::WhileLet(ref pattern, ref subexpression, ref block, label) => {
self.visit_expr(subexpression);
self.value_ribs.push(Rib::new(NormalRibKind));
- self.resolve_pattern(pattern, PatternSource::WhileLet, &mut HashMap::new());
+ self.resolve_pattern(pattern, PatternSource::WhileLet, &mut FnvHashMap());
self.resolve_labeled_block(label.map(|l| l.node), expr.id, block);
ExprKind::ForLoop(ref pattern, ref subexpression, ref block, label) => {
self.visit_expr(subexpression);
self.value_ribs.push(Rib::new(NormalRibKind));
- self.resolve_pattern(pattern, PatternSource::For, &mut HashMap::new());
+ self.resolve_pattern(pattern, PatternSource::For, &mut FnvHashMap());
self.resolve_labeled_block(label.map(|l| l.node), expr.id, block);
ast::Visibility::Public => return ty::Visibility::Public,
ast::Visibility::Crate(_) => return ty::Visibility::Restricted(ast::CRATE_NODE_ID),
ast::Visibility::Restricted { ref path, id } => (path, id),
- ast::Visibility::Inherited => {
- let current_module =
- self.get_nearest_normal_module_parent_or_self(self.current_module);
- let id =
- self.definitions.as_local_node_id(current_module.def_id().unwrap()).unwrap();
- return ty::Visibility::Restricted(id);
- }
+ ast::Visibility::Inherited => return self.current_vis,
};
let segments: Vec<_> = path.segments.iter().map(|seg| seg.identifier.name).collect();
let mut path_resolution = err_path_resolution();
- let vis = match self.resolve_module_path(&segments, DontUseLexicalScope, path.span) {
+ let vis = match self.resolve_module_path(&segments, DontUseLexicalScope, Some(path.span)) {
Success(module) => {
let def = module.def.unwrap();
path_resolution = PathResolution::new(def);
}
fn is_accessible(&self, vis: ty::Visibility) -> bool {
- let current_module = self.get_nearest_normal_module_parent_or_self(self.current_module);
- let node_id = self.definitions.as_local_node_id(current_module.def_id().unwrap()).unwrap();
- vis.is_accessible_from(node_id, self)
- }
-
- fn check_privacy(&mut self, name: Name, binding: &'a NameBinding<'a>, span: Span) {
- if !self.is_accessible(binding.vis) {
- self.privacy_errors.push(PrivacyError(span, name, binding));
- }
+ vis.is_at_least(self.current_vis, self)
}
fn report_privacy_errors(&self) {
if self.privacy_errors.len() == 0 { return }
- let mut reported_spans = HashSet::new();
+ let mut reported_spans = FnvHashSet();
for &PrivacyError(span, name, binding) in &self.privacy_errors {
if !reported_spans.insert(span) { continue }
if binding.is_extern_crate() {
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+use self::Determinacy::*;
use self::ImportDirectiveSubclass::*;
use Module;
}
}
+#[derive(Copy, Clone, Debug)]
+pub enum Determinacy {
+ Determined,
+ Undetermined,
+}
+
/// Contains data for specific types of import directives.
#[derive(Clone, Debug)]
-pub enum ImportDirectiveSubclass {
+pub enum ImportDirectiveSubclass<'a> {
SingleImport {
target: Name,
source: Name,
- type_determined: Cell<bool>,
- value_determined: Cell<bool>,
+ value_result: Cell<Result<&'a NameBinding<'a>, Determinacy>>,
+ type_result: Cell<Result<&'a NameBinding<'a>, Determinacy>>,
},
GlobImport { is_prelude: bool },
}
-impl ImportDirectiveSubclass {
+impl<'a> ImportDirectiveSubclass<'a> {
pub fn single(target: Name, source: Name) -> Self {
SingleImport {
target: target,
source: source,
- type_determined: Cell::new(false),
- value_determined: Cell::new(false),
+ type_result: Cell::new(Err(Undetermined)),
+ value_result: Cell::new(Err(Undetermined)),
}
}
}
#[derive(Debug,Clone)]
pub struct ImportDirective<'a> {
pub id: NodeId,
+ parent: Module<'a>,
module_path: Vec<Name>,
- target_module: Cell<Option<Module<'a>>>, // the resolution of `module_path`
- subclass: ImportDirectiveSubclass,
+ imported_module: Cell<Option<Module<'a>>>, // the resolution of `module_path`
+ subclass: ImportDirectiveSubclass<'a>,
span: Span,
- vis: ty::Visibility, // see note in ImportResolutionPerNamespace about how to use this
+ vis: Cell<ty::Visibility>,
}
impl<'a> ImportDirective<'a> {
_ => None, // The binding could be shadowed by a single import, so it is not known.
})
}
+}
+
+impl<'a> Resolver<'a> {
+ fn resolution(&self, module: Module<'a>, name: Name, ns: Namespace)
+ -> &'a RefCell<NameResolution<'a>> {
+ *module.resolutions.borrow_mut().entry((name, ns))
+ .or_insert_with(|| self.arenas.alloc_name_resolution())
+ }
+
+ /// Attempts to resolve the supplied name in the given module for the given namespace.
+ /// If successful, returns the binding corresponding to the name.
+ /// Invariant: if `record_used` is `Some`, import resolution must be complete.
+ pub fn resolve_name_in_module(&mut self,
+ module: Module<'a>,
+ name: Name,
+ ns: Namespace,
+ allow_private_imports: bool,
+ record_used: Option<Span>)
+ -> ResolveResult<&'a NameBinding<'a>> {
+ self.populate_module_if_necessary(module);
+
+ let resolution = self.resolution(module, name, ns);
+ let resolution = match resolution.borrow_state() {
+ ::std::cell::BorrowState::Unused => resolution.borrow_mut(),
+ _ => return Failed(None), // This happens when there is a cycle of imports
+ };
+
+ let is_disallowed_private_import = |binding: &NameBinding| {
+ !allow_private_imports && !binding.is_pseudo_public() && binding.is_import()
+ };
+
+ if let Some(span) = record_used {
+ if let Some(binding) = resolution.binding {
+ if is_disallowed_private_import(binding) {
+ return Failed(None);
+ }
+ self.record_use(name, ns, binding);
+ if !self.is_accessible(binding.vis) {
+ self.privacy_errors.push(PrivacyError(span, name, binding));
+ }
+ }
+
+ return resolution.binding.map(Success).unwrap_or(Failed(None));
+ }
+
+ // If the resolution doesn't depend on glob definability, check privacy and return.
+ if let Some(result) = self.try_result(&resolution, ns) {
+ return result.and_then(|binding| {
+ if self.is_accessible(binding.vis) && !is_disallowed_private_import(binding) {
+ Success(binding)
+ } else {
+ Failed(None)
+ }
+ });
+ }
+
+ // Check if the globs are determined
+ for directive in module.globs.borrow().iter() {
+ if self.is_accessible(directive.vis.get()) {
+ if let Some(module) = directive.imported_module.get() {
+ let result = self.resolve_name_in_module(module, name, ns, true, None);
+ if let Indeterminate = result {
+ return Indeterminate;
+ }
+ } else {
+ return Indeterminate;
+ }
+ }
+ }
+
+ Failed(None)
+ }
// Returns Some(the resolution of the name), or None if the resolution depends
// on whether more globs can define the name.
- fn try_result(&self, ns: Namespace, allow_private_imports: bool)
+ fn try_result(&mut self, resolution: &NameResolution<'a>, ns: Namespace)
-> Option<ResolveResult<&'a NameBinding<'a>>> {
- match self.binding {
+ match resolution.binding {
Some(binding) if !binding.is_glob_import() =>
- return Some(Success(binding)),
- _ => {} // Items and single imports are not shadowable
+ return Some(Success(binding)), // Items and single imports are not shadowable.
+ _ => {}
};
// Check if a single import can still define the name.
- match self.single_imports {
- SingleImports::None => {},
+ match resolution.single_imports {
SingleImports::AtLeastOne => return Some(Indeterminate),
- SingleImports::MaybeOne(directive) => {
- // If (1) we don't allow private imports, (2) no public single import can define
- // the name, and (3) no public glob has defined the name, the resolution depends
- // on whether more globs can define the name.
- if !allow_private_imports && directive.vis != ty::Visibility::Public &&
- !self.binding.map(NameBinding::is_pseudo_public).unwrap_or(false) {
- return None;
- }
-
- let target_module = match directive.target_module.get() {
- Some(target_module) => target_module,
+ SingleImports::MaybeOne(directive) if self.is_accessible(directive.vis.get()) => {
+ let module = match directive.imported_module.get() {
+ Some(module) => module,
None => return Some(Indeterminate),
};
let name = match directive.subclass {
SingleImport { source, .. } => source,
GlobImport { .. } => unreachable!(),
};
- match target_module.resolve_name(name, ns, false) {
+ match self.resolve_name_in_module(module, name, ns, true, None) {
Failed(_) => {}
_ => return Some(Indeterminate),
}
}
+ SingleImports::MaybeOne(_) | SingleImports::None => {},
}
- self.binding.map(Success)
- }
-}
-
-impl<'a> ::ModuleS<'a> {
- fn resolution(&self, name: Name, ns: Namespace) -> &'a RefCell<NameResolution<'a>> {
- *self.resolutions.borrow_mut().entry((name, ns))
- .or_insert_with(|| self.arenas.alloc_name_resolution())
+ resolution.binding.map(Success)
}
- pub fn resolve_name(&self, name: Name, ns: Namespace, allow_private_imports: bool)
- -> ResolveResult<&'a NameBinding<'a>> {
- let resolution = self.resolution(name, ns);
- let resolution = match resolution.borrow_state() {
- ::std::cell::BorrowState::Unused => resolution.borrow_mut(),
- _ => return Failed(None), // This happens when there is a cycle of imports
- };
-
- if let Some(result) = resolution.try_result(ns, allow_private_imports) {
- // If the resolution doesn't depend on glob definability, check privacy and return.
- return result.and_then(|binding| {
- let allowed = allow_private_imports || !binding.is_import() ||
- binding.is_pseudo_public();
- if allowed { Success(binding) } else { Failed(None) }
- });
- }
-
- // Check if the globs are determined
- for directive in self.globs.borrow().iter() {
- if !allow_private_imports && directive.vis != ty::Visibility::Public { continue }
- match directive.target_module.get() {
- None => return Indeterminate,
- Some(target_module) => match target_module.resolve_name(name, ns, false) {
- Indeterminate => return Indeterminate,
- _ => {}
- }
- }
- }
-
- Failed(None)
- }
-
- pub fn add_import_directive(&self,
+ // Add an import directive to the current module.
+ pub fn add_import_directive(&mut self,
module_path: Vec<Name>,
- subclass: ImportDirectiveSubclass,
+ subclass: ImportDirectiveSubclass<'a>,
span: Span,
id: NodeId,
vis: ty::Visibility) {
+ let current_module = self.current_module;
let directive = self.arenas.alloc_import_directive(ImportDirective {
+ parent: current_module,
module_path: module_path,
- target_module: Cell::new(None),
+ imported_module: Cell::new(None),
subclass: subclass,
span: span,
id: id,
- vis: vis,
+ vis: Cell::new(vis),
});
- self.unresolved_imports.borrow_mut().push(directive);
+ self.indeterminate_imports.push(directive);
match directive.subclass {
SingleImport { target, .. } => {
for &ns in &[ValueNS, TypeNS] {
- self.resolution(target, ns).borrow_mut().single_imports
- .add_directive(directive);
+ let mut resolution = self.resolution(current_module, target, ns).borrow_mut();
+ resolution.single_imports.add_directive(directive);
}
}
// We don't add prelude imports to the globs since they only affect lexical scopes,
// which are not relevant to import resolution.
GlobImport { is_prelude: true } => {}
- GlobImport { .. } => self.globs.borrow_mut().push(directive),
+ GlobImport { .. } => self.current_module.globs.borrow_mut().push(directive),
}
}
-}
-impl<'a> Resolver<'a> {
// Given a binding and an import directive that resolves to it,
// return the corresponding binding defined by the import directive.
fn import(&mut self, binding: &'a NameBinding<'a>, directive: &'a ImportDirective<'a>)
directive: directive,
},
span: directive.span,
- vis: directive.vis,
+ vis: directive.vis.get(),
}
}
// Ensure that `resolution` isn't borrowed when defining in the module's glob importers,
// during which the resolution might end up getting re-defined via a glob cycle.
let (new_binding, t) = {
- let mut resolution = &mut *module.resolution(name, ns).borrow_mut();
+ let mut resolution = &mut *self.resolution(module, name, ns).borrow_mut();
let was_known = resolution.binding().is_some();
let t = f(self, resolution);
// Define `new_binding` in `module`s glob importers.
if new_binding.is_importable() && new_binding.is_pseudo_public() {
- for &(importer, directive) in module.glob_importers.borrow_mut().iter() {
+ for directive in module.glob_importers.borrow_mut().iter() {
let imported_binding = self.import(new_binding, directive);
- let _ = self.try_define(importer, name, ns, imported_binding);
+ let _ = self.try_define(directive.parent, name, ns, imported_binding);
}
}
}
}
-struct ImportResolvingError<'a> {
- /// Module where the error happened
- source_module: Module<'a>,
- import_directive: &'a ImportDirective<'a>,
- span: Span,
- help: String,
-}
-
struct ImportResolver<'a, 'b: 'a> {
resolver: &'a mut Resolver<'b>,
}
// remain or unsuccessfully when no forward progress in resolving imports
// is made.
+ fn set_current_module(&mut self, module: Module<'b>) {
+ self.current_module = module;
+ self.current_vis = ty::Visibility::Restricted({
+ let normal_module = self.get_nearest_normal_module_parent_or_self(module);
+ self.definitions.as_local_node_id(normal_module.def_id().unwrap()).unwrap()
+ });
+ }
+
/// Resolves all imports for the crate. This method performs the fixed-
/// point iteration.
fn resolve_imports(&mut self) {
let mut i = 0;
- let mut prev_unresolved_imports = 0;
- let mut errors = Vec::new();
+ let mut prev_num_indeterminates = self.indeterminate_imports.len() + 1;
- loop {
- debug!("(resolving imports) iteration {}, {} imports left", i, self.unresolved_imports);
+ while self.indeterminate_imports.len() < prev_num_indeterminates {
+ prev_num_indeterminates = self.indeterminate_imports.len();
+ debug!("(resolving imports) iteration {}, {} imports left", i, prev_num_indeterminates);
- // Attempt to resolve imports in all local modules.
- for module in self.arenas.local_modules().iter() {
- self.current_module = module;
- self.resolve_imports_in_current_module(&mut errors);
- }
+ let mut imports = Vec::new();
+ ::std::mem::swap(&mut imports, &mut self.indeterminate_imports);
- if self.unresolved_imports == 0 {
- debug!("(resolving imports) success");
- for module in self.arenas.local_modules().iter() {
- self.finalize_resolutions_in(module, false);
+ for import in imports {
+ match self.resolve_import(&import) {
+ Failed(_) => self.determined_imports.push(import),
+ Indeterminate => self.indeterminate_imports.push(import),
+ Success(()) => self.determined_imports.push(import),
}
- break;
}
- if self.unresolved_imports == prev_unresolved_imports {
- // resolving failed
- // Report unresolved imports only if no hard error was already reported
- // to avoid generating multiple errors on the same import.
- // Imports that are still indeterminate at this point are actually blocked
- // by errored imports, so there is no point reporting them.
- for module in self.arenas.local_modules().iter() {
- self.finalize_resolutions_in(module, errors.len() == 0);
- }
- for e in errors {
- self.import_resolving_error(e)
- }
- break;
+ i += 1;
+ }
+
+ for module in self.arenas.local_modules().iter() {
+ self.finalize_resolutions_in(module);
+ }
+
+ let mut errors = false;
+ for i in 0 .. self.determined_imports.len() {
+ let import = self.determined_imports[i];
+ if let Failed(err) = self.finalize_import(import) {
+ errors = true;
+ let (span, help) = match err {
+ Some((span, msg)) => (span, msg),
+ None => (import.span, String::new()),
+ };
+
+ // If the error is a single failed import then create a "fake" import
+ // resolution for it so that later resolve stages won't complain.
+ self.import_dummy_binding(import);
+ let path = import_path_to_string(&import.module_path, &import.subclass);
+ let error = ResolutionError::UnresolvedImport(Some((&path, &help)));
+ resolve_error(self.resolver, span, error);
}
+ }
- i += 1;
- prev_unresolved_imports = self.unresolved_imports;
+ // Report unresolved imports only if no hard error was already reported
+ // to avoid generating multiple errors on the same import.
+ if !errors {
+ if let Some(import) = self.indeterminate_imports.iter().next() {
+ let error = ResolutionError::UnresolvedImport(None);
+ resolve_error(self.resolver, import.span, error);
+ }
}
}
// Define a "dummy" resolution containing a Def::Err as a placeholder for a
// failed resolution
- fn import_dummy_binding(&mut self,
- source_module: Module<'b>,
- directive: &'b ImportDirective<'b>) {
+ fn import_dummy_binding(&mut self, directive: &'b ImportDirective<'b>) {
if let SingleImport { target, .. } = directive.subclass {
let dummy_binding = self.arenas.alloc_name_binding(NameBinding {
kind: NameBindingKind::Def(Def::Err),
});
let dummy_binding = self.import(dummy_binding, directive);
- let _ = self.try_define(source_module, target, ValueNS, dummy_binding.clone());
- let _ = self.try_define(source_module, target, TypeNS, dummy_binding);
- }
- }
-
- /// Resolves an `ImportResolvingError` into the correct enum discriminant
- /// and passes that on to `resolve_error`.
- fn import_resolving_error(&mut self, e: ImportResolvingError<'b>) {
- // If the error is a single failed import then create a "fake" import
- // resolution for it so that later resolve stages won't complain.
- self.import_dummy_binding(e.source_module, e.import_directive);
- let path = import_path_to_string(&e.import_directive.module_path,
- &e.import_directive.subclass);
- resolve_error(self.resolver,
- e.span,
- ResolutionError::UnresolvedImport(Some((&path, &e.help))));
- }
-
- /// Attempts to resolve imports for the given module only.
- fn resolve_imports_in_current_module(&mut self, errors: &mut Vec<ImportResolvingError<'b>>) {
- let mut imports = Vec::new();
- let mut unresolved_imports = self.current_module.unresolved_imports.borrow_mut();
- ::std::mem::swap(&mut imports, &mut unresolved_imports);
-
- for import_directive in imports {
- match self.resolve_import(&import_directive) {
- Failed(err) => {
- let (span, help) = match err {
- Some((span, msg)) => (span, format!(". {}", msg)),
- None => (import_directive.span, String::new()),
- };
- errors.push(ImportResolvingError {
- source_module: self.current_module,
- import_directive: import_directive,
- span: span,
- help: help,
- });
- }
- Indeterminate => unresolved_imports.push(import_directive),
- Success(()) => {
- // Decrement the count of unresolved imports.
- assert!(self.unresolved_imports >= 1);
- self.unresolved_imports -= 1;
- }
- }
+ let _ = self.try_define(directive.parent, target, ValueNS, dummy_binding.clone());
+ let _ = self.try_define(directive.parent, target, TypeNS, dummy_binding);
}
}
names_to_string(&directive.module_path),
module_to_string(self.current_module));
- let target_module = match directive.target_module.get() {
- Some(module) => module,
- _ => match self.resolve_module_path(&directive.module_path,
- DontUseLexicalScope,
- directive.span) {
+ self.set_current_module(directive.parent);
+
+ let module = if let Some(module) = directive.imported_module.get() {
+ module
+ } else {
+ let vis = directive.vis.get();
+ // For better failure detection, pretend that the import will not define any names
+ // while resolving its module path.
+ directive.vis.set(ty::Visibility::PrivateExternal);
+ let result =
+ self.resolve_module_path(&directive.module_path, DontUseLexicalScope, None);
+ directive.vis.set(vis);
+
+ match result {
Success(module) => module,
Indeterminate => return Indeterminate,
Failed(err) => return Failed(err),
- },
+ }
};
- directive.target_module.set(Some(target_module));
- let (source, target, value_determined, type_determined) = match directive.subclass {
- SingleImport { source, target, ref value_determined, ref type_determined } =>
- (source, target, value_determined, type_determined),
- GlobImport { .. } => return self.resolve_glob_import(target_module, directive),
+ directive.imported_module.set(Some(module));
+ let (source, target, value_result, type_result) = match directive.subclass {
+ SingleImport { source, target, ref value_result, ref type_result } =>
+ (source, target, value_result, type_result),
+ GlobImport { .. } => {
+ self.resolve_glob_import(directive);
+ return Success(());
+ }
};
- // We need to resolve both namespaces for this to succeed.
- let value_result = self.resolve_name_in_module(target_module, source, ValueNS, false, true);
- let type_result = self.resolve_name_in_module(target_module, source, TypeNS, false, true);
-
- let module = self.current_module;
- let mut privacy_error = true;
- for &(ns, result, determined) in &[(ValueNS, &value_result, value_determined),
- (TypeNS, &type_result, type_determined)] {
- match *result {
- Failed(..) if !determined.get() => {
- determined.set(true);
- self.update_resolution(module, target, ns, |_, resolution| {
+ let mut indeterminate = false;
+ for &(ns, result) in &[(ValueNS, value_result), (TypeNS, type_result)] {
+ if let Err(Undetermined) = result.get() {
+ result.set({
+ match self.resolve_name_in_module(module, source, ns, false, None) {
+ Success(binding) => Ok(binding),
+ Indeterminate => Err(Undetermined),
+ Failed(_) => Err(Determined),
+ }
+ });
+ } else {
+ continue
+ };
+
+ match result.get() {
+ Err(Undetermined) => indeterminate = true,
+ Err(Determined) => {
+ self.update_resolution(directive.parent, target, ns, |_, resolution| {
resolution.single_imports.directive_failed()
});
}
- Success(binding) if !binding.is_importable() => {
+ Ok(binding) if !binding.is_importable() => {
let msg = format!("`{}` is not directly importable", target);
struct_span_err!(self.session, directive.span, E0253, "{}", &msg)
.span_label(directive.span, &format!("cannot be imported directly"))
.emit();
// Do not import this illegal binding. Import a dummy binding and pretend
// everything is fine
- self.import_dummy_binding(module, directive);
+ self.import_dummy_binding(directive);
return Success(());
}
- Success(binding) if !self.is_accessible(binding.vis) => {}
- Success(binding) if !determined.get() => {
- determined.set(true);
+ Ok(binding) => {
let imported_binding = self.import(binding, directive);
- let conflict = self.try_define(module, target, ns, imported_binding);
+ let conflict = self.try_define(directive.parent, target, ns, imported_binding);
if let Err(old_binding) = conflict {
let binding = &self.import(binding, directive);
- self.report_conflict(module, target, ns, binding, old_binding);
+ self.report_conflict(directive.parent, target, ns, binding, old_binding);
}
- privacy_error = false;
}
- Success(_) => privacy_error = false,
- _ => {}
}
}
- match (&value_result, &type_result) {
- (&Indeterminate, _) | (_, &Indeterminate) => return Indeterminate,
- (&Failed(_), &Failed(_)) => {
- let resolutions = target_module.resolutions.borrow();
- let names = resolutions.iter().filter_map(|(&(ref name, _), resolution)| {
- if *name == source { return None; } // Never suggest the same name
+ if indeterminate { Indeterminate } else { Success(()) }
+ }
+
+ fn finalize_import(&mut self, directive: &'b ImportDirective<'b>) -> ResolveResult<()> {
+ self.set_current_module(directive.parent);
+
+ let ImportDirective { ref module_path, span, .. } = *directive;
+ let module_result = self.resolve_module_path(&module_path, DontUseLexicalScope, Some(span));
+ let module = match module_result {
+ Success(module) => module,
+ Indeterminate => return Indeterminate,
+ Failed(err) => return Failed(err),
+ };
+
+ let (name, value_result, type_result) = match directive.subclass {
+ SingleImport { source, ref value_result, ref type_result, .. } =>
+ (source, value_result.get(), type_result.get()),
+ GlobImport { .. } if module.def_id() == directive.parent.def_id() => {
+ // Importing a module into itself is not allowed.
+ let msg = "Cannot glob-import a module into itself.".into();
+ return Failed(Some((directive.span, msg)));
+ }
+ GlobImport { .. } => return Success(()),
+ };
+
+ for &(ns, result) in &[(ValueNS, value_result), (TypeNS, type_result)] {
+ if let Ok(binding) = result {
+ self.record_use(name, ns, binding);
+ }
+ }
+
+ if value_result.is_err() && type_result.is_err() {
+ let (value_result, type_result);
+ value_result = self.resolve_name_in_module(module, name, ValueNS, false, Some(span));
+ type_result = self.resolve_name_in_module(module, name, TypeNS, false, Some(span));
+
+ return if let (Failed(_), Failed(_)) = (value_result, type_result) {
+ let resolutions = module.resolutions.borrow();
+ let names = resolutions.iter().filter_map(|(&(ref n, _), resolution)| {
+ if *n == name { return None; } // Never suggest the same name
match *resolution.borrow() {
- NameResolution { binding: Some(_), .. } => Some(name),
+ NameResolution { binding: Some(_), .. } => Some(n),
NameResolution { single_imports: SingleImports::None, .. } => None,
- _ => Some(name),
+ _ => Some(n),
}
});
- let lev_suggestion = match find_best_match_for_name(names, &source.as_str(), None) {
+ let lev_suggestion = match find_best_match_for_name(names, &name.as_str(), None) {
Some(name) => format!(". Did you mean to use `{}`?", name),
None => "".to_owned(),
};
- let module_str = module_to_string(target_module);
+ let module_str = module_to_string(module);
let msg = if &module_str == "???" {
- format!("There is no `{}` in the crate root{}", source, lev_suggestion)
+ format!("no `{}` in the root{}", name, lev_suggestion)
} else {
- format!("There is no `{}` in `{}`{}", source, module_str, lev_suggestion)
+ format!("no `{}` in `{}`{}", name, module_str, lev_suggestion)
};
- return Failed(Some((directive.span, msg)));
- }
- _ => (),
- }
-
- if privacy_error {
- for &(ns, result) in &[(ValueNS, &value_result), (TypeNS, &type_result)] {
- let binding = match *result { Success(binding) => binding, _ => continue };
- self.privacy_errors.push(PrivacyError(directive.span, source, binding));
- let imported_binding = self.import(binding, directive);
- let _ = self.try_define(module, target, ns, imported_binding);
+ Failed(Some((directive.span, msg)))
+ } else {
+ // `resolve_name_in_module` reported a privacy error.
+ self.import_dummy_binding(directive);
+ Success(())
}
}
- match (&value_result, &type_result) {
- (&Success(binding), _) if !binding.pseudo_vis().is_at_least(directive.vis, self) &&
- self.is_accessible(binding.vis) => {
- let msg = format!("`{}` is private, and cannot be reexported", source);
- let note_msg = format!("consider marking `{}` as `pub` in the imported module",
- source);
+ match (value_result, type_result) {
+ (Ok(binding), _) if !binding.pseudo_vis().is_at_least(directive.vis.get(), self) => {
+ let msg = format!("`{}` is private, and cannot be reexported", name);
+ let note_msg =
+ format!("consider marking `{}` as `pub` in the imported module", name);
struct_span_err!(self.session, directive.span, E0364, "{}", &msg)
.span_note(directive.span, ¬e_msg)
.emit();
}
- (_, &Success(binding)) if !binding.pseudo_vis().is_at_least(directive.vis, self) &&
- self.is_accessible(binding.vis) => {
+ (_, Ok(binding)) if !binding.pseudo_vis().is_at_least(directive.vis.get(), self) => {
if binding.is_extern_crate() {
let msg = format!("extern crate `{}` is private, and cannot be reexported \
(error E0364), consider declaring with `pub`",
- source);
+ name);
self.session.add_lint(PRIVATE_IN_PUBLIC, directive.id, directive.span, msg);
} else {
- let mut err = struct_span_err!(self.session, directive.span, E0365,
- "`{}` is private, and cannot be reexported",
- source);
- err.span_label(directive.span, &format!("reexport of private `{}`", source));
- err.note(&format!("consider declaring type or module `{}` with `pub`", source));
- err.emit();
+ struct_span_err!(self.session, directive.span, E0365,
+ "`{}` is private, and cannot be reexported", name)
+ .span_label(directive.span, &format!("reexport of private `{}`", name))
+ .note(&format!("consider declaring type or module `{}` with `pub`", name))
+ .emit();
}
}
// Record what this import resolves to for later uses in documentation,
// this may resolve to either a value or a type, but for documentation
// purposes it's good enough to just favor one over the other.
- let def = match type_result.success().and_then(NameBinding::def) {
+ let def = match type_result.ok().and_then(NameBinding::def) {
Some(def) => def,
- None => value_result.success().and_then(NameBinding::def).unwrap(),
+ None => value_result.ok().and_then(NameBinding::def).unwrap(),
};
let path_resolution = PathResolution::new(def);
self.def_map.insert(directive.id, path_resolution);
return Success(());
}
- // Resolves a glob import. Note that this function cannot fail; it either
- // succeeds or bails out (as importing * from an empty module or a module
- // that exports nothing is valid). target_module is the module we are
- // actually importing, i.e., `foo` in `use foo::*`.
- fn resolve_glob_import(&mut self, target_module: Module<'b>, directive: &'b ImportDirective<'b>)
- -> ResolveResult<()> {
- if let Some(Def::Trait(_)) = target_module.def {
- self.session.span_err(directive.span, "items in traits are not importable.");
- }
+ fn resolve_glob_import(&mut self, directive: &'b ImportDirective<'b>) {
+ let module = directive.imported_module.get().unwrap();
+ self.populate_module_if_necessary(module);
- let module = self.current_module;
- if module.def_id() == target_module.def_id() {
- // This means we are trying to glob import a module into itself, and it is a no-go
- let msg = "Cannot glob-import a module into itself.".into();
- return Failed(Some((directive.span, msg)));
+ if let Some(Def::Trait(_)) = module.def {
+ self.session.span_err(directive.span, "items in traits are not importable.");
}
- self.populate_module_if_necessary(target_module);
- if let GlobImport { is_prelude: true } = directive.subclass {
- self.prelude = Some(target_module);
- return Success(());
+ if module.def_id() == directive.parent.def_id() {
+ return;
+ } else if let GlobImport { is_prelude: true } = directive.subclass {
+ self.prelude = Some(module);
+ return;
}
- // Add to target_module's glob_importers
- target_module.glob_importers.borrow_mut().push((module, directive));
+ // Add to module's glob_importers
+ module.glob_importers.borrow_mut().push(directive);
// Ensure that `resolutions` isn't borrowed during `try_define`,
// since it might get updated via a glob cycle.
- let bindings = target_module.resolutions.borrow().iter().filter_map(|(name, resolution)| {
+ let bindings = module.resolutions.borrow().iter().filter_map(|(name, resolution)| {
resolution.borrow().binding().map(|binding| (*name, binding))
}).collect::<Vec<_>>();
for ((name, ns), binding) in bindings {
if binding.is_importable() && binding.is_pseudo_public() {
let imported_binding = self.import(binding, directive);
- let _ = self.try_define(module, name, ns, imported_binding);
+ let _ = self.try_define(directive.parent, name, ns, imported_binding);
}
}
// Record the destination of this import
- if let Some(did) = target_module.def_id() {
+ if let Some(did) = module.def_id() {
let resolution = PathResolution::new(Def::Mod(did));
self.def_map.insert(directive.id, resolution);
}
-
- debug!("(resolving glob import) successfully resolved import");
- return Success(());
}
// Miscellaneous post-processing, including recording reexports, reporting conflicts,
// reporting the PRIVATE_IN_PUBLIC lint, and reporting unresolved imports.
- fn finalize_resolutions_in(&mut self, module: Module<'b>, report_unresolved_imports: bool) {
+ fn finalize_resolutions_in(&mut self, module: Module<'b>) {
// Since import resolution is finished, globs will not define any more names.
*module.globs.borrow_mut() = Vec::new();
self.export_map.insert(node_id, reexports);
}
}
-
- if report_unresolved_imports {
- for import in module.unresolved_imports.borrow().iter() {
- resolve_error(self.resolver, import.span, ResolutionError::UnresolvedImport(None));
- break;
- }
- }
}
}
+++ /dev/null
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! # Compilation of match statements
-//!
-//! I will endeavor to explain the code as best I can. I have only a loose
-//! understanding of some parts of it.
-//!
-//! ## Matching
-//!
-//! The basic state of the code is maintained in an array `m` of `Match`
-//! objects. Each `Match` describes some list of patterns, all of which must
-//! match against the current list of values. If those patterns match, then
-//! the arm listed in the match is the correct arm. A given arm may have
-//! multiple corresponding match entries, one for each alternative that
-//! remains. As we proceed these sets of matches are adjusted by the various
-//! `enter_XXX()` functions, each of which adjusts the set of options given
-//! some information about the value which has been matched.
-//!
-//! So, initially, there is one value and N matches, each of which have one
-//! constituent pattern. N here is usually the number of arms but may be
-//! greater, if some arms have multiple alternatives. For example, here:
-//!
-//! enum Foo { A, B(int), C(usize, usize) }
-//! match foo {
-//! A => ...,
-//! B(x) => ...,
-//! C(1, 2) => ...,
-//! C(_) => ...
-//! }
-//!
-//! The value would be `foo`. There would be four matches, each of which
-//! contains one pattern (and, in one case, a guard). We could collect the
-//! various options and then compile the code for the case where `foo` is an
-//! `A`, a `B`, and a `C`. When we generate the code for `C`, we would (1)
-//! drop the two matches that do not match a `C` and (2) expand the other two
-//! into two patterns each. In the first case, the two patterns would be `1`
-//! and `2`, and the in the second case the _ pattern would be expanded into
-//! `_` and `_`. The two values are of course the arguments to `C`.
-//!
-//! Here is a quick guide to the various functions:
-//!
-//! - `compile_submatch()`: The main workhouse. It takes a list of values and
-//! a list of matches and finds the various possibilities that could occur.
-//!
-//! - `enter_XXX()`: modifies the list of matches based on some information
-//! about the value that has been matched. For example,
-//! `enter_rec_or_struct()` adjusts the values given that a record or struct
-//! has been matched. This is an infallible pattern, so *all* of the matches
-//! must be either wildcards or record/struct patterns. `enter_opt()`
-//! handles the fallible cases, and it is correspondingly more complex.
-//!
-//! ## Bindings
-//!
-//! We store information about the bound variables for each arm as part of the
-//! per-arm `ArmData` struct. There is a mapping from identifiers to
-//! `BindingInfo` structs. These structs contain the mode/id/type of the
-//! binding, but they also contain an LLVM value which points at an alloca
-//! called `llmatch`. For by value bindings that are Copy, we also create
-//! an extra alloca that we copy the matched value to so that any changes
-//! we do to our copy is not reflected in the original and vice-versa.
-//! We don't do this if it's a move since the original value can't be used
-//! and thus allowing us to cheat in not creating an extra alloca.
-//!
-//! The `llmatch` binding always stores a pointer into the value being matched
-//! which points at the data for the binding. If the value being matched has
-//! type `T`, then, `llmatch` will point at an alloca of type `T*` (and hence
-//! `llmatch` has type `T**`). So, if you have a pattern like:
-//!
-//! let a: A = ...;
-//! let b: B = ...;
-//! match (a, b) { (ref c, d) => { ... } }
-//!
-//! For `c` and `d`, we would generate allocas of type `C*` and `D*`
-//! respectively. These are called the `llmatch`. As we match, when we come
-//! up against an identifier, we store the current pointer into the
-//! corresponding alloca.
-//!
-//! Once a pattern is completely matched, and assuming that there is no guard
-//! pattern, we will branch to a block that leads to the body itself. For any
-//! by-value bindings, this block will first load the ptr from `llmatch` (the
-//! one of type `D*`) and then load a second time to get the actual value (the
-//! one of type `D`). For by ref bindings, the value of the local variable is
-//! simply the first alloca.
-//!
-//! So, for the example above, we would generate a setup kind of like this:
-//!
-//! +-------+
-//! | Entry |
-//! +-------+
-//! |
-//! +--------------------------------------------+
-//! | llmatch_c = (addr of first half of tuple) |
-//! | llmatch_d = (addr of second half of tuple) |
-//! +--------------------------------------------+
-//! |
-//! +--------------------------------------+
-//! | *llbinding_d = **llmatch_d |
-//! +--------------------------------------+
-//!
-//! If there is a guard, the situation is slightly different, because we must
-//! execute the guard code. Moreover, we need to do so once for each of the
-//! alternatives that lead to the arm, because if the guard fails, they may
-//! have different points from which to continue the search. Therefore, in that
-//! case, we generate code that looks more like:
-//!
-//! +-------+
-//! | Entry |
-//! +-------+
-//! |
-//! +-------------------------------------------+
-//! | llmatch_c = (addr of first half of tuple) |
-//! | llmatch_d = (addr of first half of tuple) |
-//! +-------------------------------------------+
-//! |
-//! +-------------------------------------------------+
-//! | *llbinding_d = **llmatch_d |
-//! | check condition |
-//! | if false { goto next case } |
-//! | if true { goto body } |
-//! +-------------------------------------------------+
-//!
-//! The handling for the cleanups is a bit... sensitive. Basically, the body
-//! is the one that invokes `add_clean()` for each binding. During the guard
-//! evaluation, we add temporary cleanups and revoke them after the guard is
-//! evaluated (it could fail, after all). Note that guards and moves are
-//! just plain incompatible.
-//!
-//! Some relevant helper functions that manage bindings:
-//! - `create_bindings_map()`
-//! - `insert_lllocals()`
-//!
-//!
-//! ## Notes on vector pattern matching.
-//!
-//! Vector pattern matching is surprisingly tricky. The problem is that
-//! the structure of the vector isn't fully known, and slice matches
-//! can be done on subparts of it.
-//!
-//! The way that vector pattern matches are dealt with, then, is as
-//! follows. First, we make the actual condition associated with a
-//! vector pattern simply a vector length comparison. So the pattern
-//! [1, .. x] gets the condition "vec len >= 1", and the pattern
-//! [.. x] gets the condition "vec len >= 0". The problem here is that
-//! having the condition "vec len >= 1" hold clearly does not mean that
-//! only a pattern that has exactly that condition will match. This
-//! means that it may well be the case that a condition holds, but none
-//! of the patterns matching that condition match; to deal with this,
-//! when doing vector length matches, we have match failures proceed to
-//! the next condition to check.
-//!
-//! There are a couple more subtleties to deal with. While the "actual"
-//! condition associated with vector length tests is simply a test on
-//! the vector length, the actual vec_len Opt entry contains more
-//! information used to restrict which matches are associated with it.
-//! So that all matches in a submatch are matching against the same
-//! values from inside the vector, they are split up by how many
-//! elements they match at the front and at the back of the vector. In
-//! order to make sure that arms are properly checked in order, even
-//! with the overmatching conditions, each vec_len Opt entry is
-//! associated with a range of matches.
-//! Consider the following:
-//!
-//! match &[1, 2, 3] {
-//! [1, 1, .. _] => 0,
-//! [1, 2, 2, .. _] => 1,
-//! [1, 2, 3, .. _] => 2,
-//! [1, 2, .. _] => 3,
-//! _ => 4
-//! }
-//! The proper arm to match is arm 2, but arms 0 and 3 both have the
-//! condition "len >= 2". If arm 3 was lumped in with arm 0, then the
-//! wrong branch would be taken. Instead, vec_len Opts are associated
-//! with a contiguous range of matches that have the same "shape".
-//! This is sort of ugly and requires a bunch of special handling of
-//! vec_len options.
-
-pub use self::BranchKind::*;
-pub use self::OptResult::*;
-pub use self::TransBindingMode::*;
-use self::Opt::*;
-use self::FailureHandler::*;
-
-use llvm::{ValueRef, BasicBlockRef};
-use rustc_const_eval::check_match::{self, Constructor, StaticInliner};
-use rustc_const_eval::{compare_lit_exprs, eval_const_expr, fatal_const_eval_err};
-use rustc::hir::def::{Def, DefMap};
-use rustc::hir::def_id::DefId;
-use middle::expr_use_visitor as euv;
-use middle::lang_items::StrEqFnLangItem;
-use middle::mem_categorization as mc;
-use middle::mem_categorization::Categorization;
-use rustc::hir::pat_util::*;
-use rustc::ty::subst::Substs;
-use adt;
-use base::*;
-use build::{AddCase, And, Br, CondBr, GEPi, InBoundsGEP, Load, PointerCast};
-use build::{Not, Store, Sub, add_comment};
-use build;
-use callee::{Callee, ArgVals};
-use cleanup::{self, CleanupMethods, DropHintMethods};
-use common::*;
-use consts;
-use datum::*;
-use debuginfo::{self, DebugLoc, ToDebugLoc};
-use expr::{self, Dest};
-use monomorphize;
-use tvec;
-use type_of;
-use Disr;
-use value::Value;
-use rustc::ty::{self, Ty, TyCtxt};
-use rustc::traits::Reveal;
-use session::config::NoDebugInfo;
-use util::common::indenter;
-use util::nodemap::FnvHashMap;
-use util::ppaux;
-
-use std;
-use std::cell::RefCell;
-use std::cmp::Ordering;
-use std::fmt;
-use std::rc::Rc;
-use rustc::hir::{self, PatKind};
-use syntax::ast::{self, DUMMY_NODE_ID, NodeId};
-use syntax_pos::Span;
-use rustc::hir::fold::Folder;
-use syntax::ptr::P;
-
-#[derive(Copy, Clone, Debug)]
-struct ConstantExpr<'a>(&'a hir::Expr);
-
-impl<'a> ConstantExpr<'a> {
- fn eq<'b, 'tcx>(self, other: ConstantExpr<'a>, tcx: TyCtxt<'b, 'tcx, 'tcx>) -> bool {
- match compare_lit_exprs(tcx, self.0.span, self.0, other.0) {
- Ok(result) => result == Ordering::Equal,
- Err(_) => bug!("compare_list_exprs: type mismatch"),
- }
- }
-}
-
-// An option identifying a branch (either a literal, an enum variant or a range)
-#[derive(Debug)]
-enum Opt<'a, 'tcx> {
- ConstantValue(ConstantExpr<'a>, DebugLoc),
- ConstantRange(ConstantExpr<'a>, ConstantExpr<'a>, DebugLoc),
- Variant(Disr, Rc<adt::Repr<'tcx>>, DefId, DebugLoc),
- SliceLengthEqual(usize, DebugLoc),
- SliceLengthGreaterOrEqual(/* prefix length */ usize,
- /* suffix length */ usize,
- DebugLoc),
-}
-
-impl<'a, 'b, 'tcx> Opt<'a, 'tcx> {
- fn eq(&self, other: &Opt<'a, 'tcx>, tcx: TyCtxt<'b, 'tcx, 'tcx>) -> bool {
- match (self, other) {
- (&ConstantValue(a, _), &ConstantValue(b, _)) => a.eq(b, tcx),
- (&ConstantRange(a1, a2, _), &ConstantRange(b1, b2, _)) => {
- a1.eq(b1, tcx) && a2.eq(b2, tcx)
- }
- (&Variant(a_disr, ref a_repr, a_def, _),
- &Variant(b_disr, ref b_repr, b_def, _)) => {
- a_disr == b_disr && *a_repr == *b_repr && a_def == b_def
- }
- (&SliceLengthEqual(a, _), &SliceLengthEqual(b, _)) => a == b,
- (&SliceLengthGreaterOrEqual(a1, a2, _),
- &SliceLengthGreaterOrEqual(b1, b2, _)) => {
- a1 == b1 && a2 == b2
- }
- _ => false
- }
- }
-
- fn trans<'blk>(&self, mut bcx: Block<'blk, 'tcx>) -> OptResult<'blk, 'tcx> {
- use consts::TrueConst::Yes;
- let _icx = push_ctxt("match::trans_opt");
- let ccx = bcx.ccx();
- match *self {
- ConstantValue(ConstantExpr(lit_expr), _) => {
- let lit_ty = bcx.tcx().node_id_to_type(lit_expr.id);
- let expr = consts::const_expr(ccx, &lit_expr, bcx.fcx.param_substs, None, Yes);
- let llval = match expr {
- Ok((llval, _)) => llval,
- Err(err) => {
- fatal_const_eval_err(bcx.tcx(), err.as_inner(), lit_expr.span, "pattern");
- }
- };
- let lit_datum = immediate_rvalue(llval, lit_ty);
- let lit_datum = unpack_datum!(bcx, lit_datum.to_appropriate_datum(bcx));
- SingleResult(Result::new(bcx, lit_datum.val))
- }
- ConstantRange(ConstantExpr(ref l1), ConstantExpr(ref l2), _) => {
- let l1 = match consts::const_expr(ccx, &l1, bcx.fcx.param_substs, None, Yes) {
- Ok((l1, _)) => l1,
- Err(err) => fatal_const_eval_err(bcx.tcx(), err.as_inner(), l1.span, "pattern"),
- };
- let l2 = match consts::const_expr(ccx, &l2, bcx.fcx.param_substs, None, Yes) {
- Ok((l2, _)) => l2,
- Err(err) => fatal_const_eval_err(bcx.tcx(), err.as_inner(), l2.span, "pattern"),
- };
- RangeResult(Result::new(bcx, l1), Result::new(bcx, l2))
- }
- Variant(disr_val, ref repr, _, _) => {
- SingleResult(Result::new(bcx, adt::trans_case(bcx, &repr, disr_val)))
- }
- SliceLengthEqual(length, _) => {
- SingleResult(Result::new(bcx, C_uint(ccx, length)))
- }
- SliceLengthGreaterOrEqual(prefix, suffix, _) => {
- LowerBound(Result::new(bcx, C_uint(ccx, prefix + suffix)))
- }
- }
- }
-
- fn debug_loc(&self) -> DebugLoc {
- match *self {
- ConstantValue(_,debug_loc) |
- ConstantRange(_, _, debug_loc) |
- Variant(_, _, _, debug_loc) |
- SliceLengthEqual(_, debug_loc) |
- SliceLengthGreaterOrEqual(_, _, debug_loc) => debug_loc
- }
- }
-}
-
-#[derive(Copy, Clone, PartialEq)]
-pub enum BranchKind {
- NoBranch,
- Single,
- Switch,
- Compare,
- CompareSliceLength
-}
-
-pub enum OptResult<'blk, 'tcx: 'blk> {
- SingleResult(Result<'blk, 'tcx>),
- RangeResult(Result<'blk, 'tcx>, Result<'blk, 'tcx>),
- LowerBound(Result<'blk, 'tcx>)
-}
-
-#[derive(Clone, Copy, PartialEq)]
-pub enum TransBindingMode {
- /// By-value binding for a copy type: copies from matched data
- /// into a fresh LLVM alloca.
- TrByCopy(/* llbinding */ ValueRef),
-
- /// By-value binding for a non-copy type where we copy into a
- /// fresh LLVM alloca; this most accurately reflects the language
- /// semantics (e.g. it properly handles overwrites of the matched
- /// input), but potentially injects an unwanted copy.
- TrByMoveIntoCopy(/* llbinding */ ValueRef),
-
- /// Binding a non-copy type by reference under the hood; this is
- /// a codegen optimization to avoid unnecessary memory traffic.
- TrByMoveRef,
-
- /// By-ref binding exposed in the original source input.
- TrByRef,
-}
-
-impl TransBindingMode {
- /// if binding by making a fresh copy; returns the alloca that it
- /// will copy into; otherwise None.
- fn alloca_if_copy(&self) -> Option<ValueRef> {
- match *self {
- TrByCopy(llbinding) | TrByMoveIntoCopy(llbinding) => Some(llbinding),
- TrByMoveRef | TrByRef => None,
- }
- }
-}
-
-/// Information about a pattern binding:
-/// - `llmatch` is a pointer to a stack slot. The stack slot contains a
-/// pointer into the value being matched. Hence, llmatch has type `T**`
-/// where `T` is the value being matched.
-/// - `trmode` is the trans binding mode
-/// - `id` is the node id of the binding
-/// - `ty` is the Rust type of the binding
-#[derive(Clone, Copy)]
-pub struct BindingInfo<'tcx> {
- pub llmatch: ValueRef,
- pub trmode: TransBindingMode,
- pub id: ast::NodeId,
- pub span: Span,
- pub ty: Ty<'tcx>,
-}
-
-type BindingsMap<'tcx> = FnvHashMap<ast::Name, BindingInfo<'tcx>>;
-
-struct ArmData<'p, 'blk, 'tcx: 'blk> {
- bodycx: Block<'blk, 'tcx>,
- arm: &'p hir::Arm,
- bindings_map: BindingsMap<'tcx>
-}
-
-/// Info about Match.
-/// If all `pats` are matched then arm `data` will be executed.
-/// As we proceed `bound_ptrs` are filled with pointers to values to be bound,
-/// these pointers are stored in llmatch variables just before executing `data` arm.
-struct Match<'a, 'p: 'a, 'blk: 'a, 'tcx: 'blk> {
- pats: Vec<&'p hir::Pat>,
- data: &'a ArmData<'p, 'blk, 'tcx>,
- bound_ptrs: Vec<(ast::Name, ValueRef)>,
- // Thread along renamings done by the check_match::StaticInliner, so we can
- // map back to original NodeIds
- pat_renaming_map: Option<&'a FnvHashMap<(NodeId, Span), NodeId>>
-}
-
-impl<'a, 'p, 'blk, 'tcx> fmt::Debug for Match<'a, 'p, 'blk, 'tcx> {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- if ppaux::verbose() {
- // for many programs, this just take too long to serialize
- write!(f, "{:?}", self.pats)
- } else {
- write!(f, "{} pats", self.pats.len())
- }
- }
-}
-
-fn has_nested_bindings(m: &[Match], col: usize) -> bool {
- for br in m {
- if let PatKind::Binding(_, _, Some(..)) = br.pats[col].node {
- return true
- }
- }
- false
-}
-
-// As noted in `fn match_datum`, we should eventually pass around a
-// `Datum<Lvalue>` for the `val`; but until we get to that point, this
-// `MatchInput` struct will serve -- it has everything `Datum<Lvalue>`
-// does except for the type field.
-#[derive(Copy, Clone)]
-pub struct MatchInput { val: ValueRef, lval: Lvalue }
-
-impl<'tcx> Datum<'tcx, Lvalue> {
- pub fn match_input(&self) -> MatchInput {
- MatchInput {
- val: self.val,
- lval: self.kind,
- }
- }
-}
-
-impl fmt::Debug for MatchInput {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- fmt::Debug::fmt(&Value(self.val), f)
- }
-}
-
-impl MatchInput {
- fn from_val(val: ValueRef) -> MatchInput {
- MatchInput {
- val: val,
- lval: Lvalue::new("MatchInput::from_val"),
- }
- }
-
- fn to_datum<'tcx>(self, ty: Ty<'tcx>) -> Datum<'tcx, Lvalue> {
- Datum::new(self.val, ty, self.lval)
- }
-}
-
-fn expand_nested_bindings<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- m: &[Match<'a, 'p, 'blk, 'tcx>],
- col: usize,
- val: MatchInput)
- -> Vec<Match<'a, 'p, 'blk, 'tcx>> {
- debug!("expand_nested_bindings(bcx={}, m={:?}, col={}, val={:?})",
- bcx.to_str(), m, col, val);
- let _indenter = indenter();
-
- m.iter().map(|br| {
- let mut bound_ptrs = br.bound_ptrs.clone();
- let mut pat = br.pats[col];
- loop {
- pat = match pat.node {
- PatKind::Binding(_, ref path, Some(ref inner)) => {
- bound_ptrs.push((path.node, val.val));
- &inner
- },
- _ => break
- }
- }
-
- let mut pats = br.pats.clone();
- pats[col] = pat;
- Match {
- pats: pats,
- data: &br.data,
- bound_ptrs: bound_ptrs,
- pat_renaming_map: br.pat_renaming_map,
- }
- }).collect()
-}
-
-fn enter_match<'a, 'b, 'p, 'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
- m: &[Match<'a, 'p, 'blk, 'tcx>],
- col: usize,
- val: MatchInput,
- mut e: F)
- -> Vec<Match<'a, 'p, 'blk, 'tcx>> where
- F: FnMut(&[(&'p hir::Pat, Option<Ty<'tcx>>)])
- -> Option<Vec<(&'p hir::Pat, Option<Ty<'tcx>>)>>,
-{
- debug!("enter_match(bcx={}, m={:?}, col={}, val={:?})",
- bcx.to_str(), m, col, val);
- let _indenter = indenter();
-
- m.iter().filter_map(|br| {
- let pats : Vec<_> = br.pats.iter().map(|p| (*p, None)).collect();
- e(&pats).map(|pats| {
- let this = br.pats[col];
- let mut bound_ptrs = br.bound_ptrs.clone();
- match this.node {
- PatKind::Binding(_, ref path, None) => {
- bound_ptrs.push((path.node, val.val));
- }
- PatKind::Vec(ref before, Some(ref slice), ref after) => {
- if let PatKind::Binding(_, ref path, None) = slice.node {
- let subslice_val = bind_subslice_pat(
- bcx, this.id, val,
- before.len(), after.len());
- bound_ptrs.push((path.node, subslice_val));
- }
- }
- _ => {}
- }
- Match {
- pats: pats.into_iter().map(|p| p.0).collect(),
- data: br.data,
- bound_ptrs: bound_ptrs,
- pat_renaming_map: br.pat_renaming_map,
- }
- })
- }).collect()
-}
-
-fn enter_default<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- m: &[Match<'a, 'p, 'blk, 'tcx>],
- col: usize,
- val: MatchInput)
- -> Vec<Match<'a, 'p, 'blk, 'tcx>> {
- debug!("enter_default(bcx={}, m={:?}, col={}, val={:?})",
- bcx.to_str(), m, col, val);
- let _indenter = indenter();
-
- // Collect all of the matches that can match against anything.
- enter_match(bcx, m, col, val, |pats| {
- match pats[col].0.node {
- PatKind::Binding(..) | PatKind::Wild => {
- let mut r = pats[..col].to_vec();
- r.extend_from_slice(&pats[col + 1..]);
- Some(r)
- }
- _ => None
- }
- })
-}
-
-// <pcwalton> nmatsakis: what does enter_opt do?
-// <pcwalton> in trans/match
-// <pcwalton> trans/match.rs is like stumbling around in a dark cave
-// <nmatsakis> pcwalton: the enter family of functions adjust the set of
-// patterns as needed
-// <nmatsakis> yeah, at some point I kind of achieved some level of
-// understanding
-// <nmatsakis> anyhow, they adjust the patterns given that something of that
-// kind has been found
-// <nmatsakis> pcwalton: ok, right, so enter_XXX() adjusts the patterns, as I
-// said
-// <nmatsakis> enter_match() kind of embodies the generic code
-// <nmatsakis> it is provided with a function that tests each pattern to see
-// if it might possibly apply and so forth
-// <nmatsakis> so, if you have a pattern like {a: _, b: _, _} and one like _
-// <nmatsakis> then _ would be expanded to (_, _)
-// <nmatsakis> one spot for each of the sub-patterns
-// <nmatsakis> enter_opt() is one of the more complex; it covers the fallible
-// cases
-// <nmatsakis> enter_rec_or_struct() or enter_tuple() are simpler, since they
-// are infallible patterns
-// <nmatsakis> so all patterns must either be records (resp. tuples) or
-// wildcards
-
-/// The above is now outdated in that enter_match() now takes a function that
-/// takes the complete row of patterns rather than just the first one.
-/// Also, most of the enter_() family functions have been unified with
-/// the check_match specialization step.
-fn enter_opt<'a, 'p, 'blk, 'tcx>(
- bcx: Block<'blk, 'tcx>,
- _: ast::NodeId,
- m: &[Match<'a, 'p, 'blk, 'tcx>],
- opt: &Opt,
- col: usize,
- variant_size: usize,
- val: MatchInput)
- -> Vec<Match<'a, 'p, 'blk, 'tcx>> {
- debug!("enter_opt(bcx={}, m={:?}, opt={:?}, col={}, val={:?})",
- bcx.to_str(), m, *opt, col, val);
- let _indenter = indenter();
-
- let ctor = match opt {
- &ConstantValue(ConstantExpr(expr), _) => Constructor::ConstantValue(
- eval_const_expr(bcx.tcx(), &expr)
- ),
- &ConstantRange(ConstantExpr(lo), ConstantExpr(hi), _) => Constructor::ConstantRange(
- eval_const_expr(bcx.tcx(), &lo),
- eval_const_expr(bcx.tcx(), &hi)
- ),
- &SliceLengthEqual(n, _) =>
- Constructor::Slice(n),
- &SliceLengthGreaterOrEqual(before, after, _) =>
- Constructor::SliceWithSubslice(before, after),
- &Variant(_, _, def_id, _) =>
- Constructor::Variant(def_id)
- };
-
- let param_env = bcx.tcx().empty_parameter_environment();
- let mcx = check_match::MatchCheckCtxt {
- tcx: bcx.tcx(),
- param_env: param_env,
- };
- enter_match(bcx, m, col, val, |pats|
- check_match::specialize(&mcx, &pats[..], &ctor, col, variant_size)
- )
-}
-
-// Returns the options in one column of matches. An option is something that
-// needs to be conditionally matched at runtime; for example, the discriminant
-// on a set of enum variants or a literal.
-fn get_branches<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- m: &[Match<'a, 'p, 'blk, 'tcx>],
- col: usize)
- -> Vec<Opt<'p, 'tcx>> {
- let tcx = bcx.tcx();
-
- let mut found: Vec<Opt> = vec![];
- for br in m {
- let cur = br.pats[col];
- let debug_loc = match br.pat_renaming_map {
- Some(pat_renaming_map) => {
- match pat_renaming_map.get(&(cur.id, cur.span)) {
- Some(&id) => DebugLoc::At(id, cur.span),
- None => DebugLoc::At(cur.id, cur.span),
- }
- }
- None => DebugLoc::None
- };
-
- let opt = match cur.node {
- PatKind::Lit(ref l) => {
- ConstantValue(ConstantExpr(&l), debug_loc)
- }
- PatKind::Path(..) | PatKind::TupleStruct(..) | PatKind::Struct(..) => {
- match tcx.expect_def(cur.id) {
- Def::Variant(enum_id, var_id) => {
- let variant = tcx.lookup_adt_def(enum_id).variant_with_id(var_id);
- Variant(Disr::from(variant.disr_val),
- adt::represent_node(bcx, cur.id),
- var_id,
- debug_loc)
- }
- _ => continue
- }
- }
- PatKind::Range(ref l1, ref l2) => {
- ConstantRange(ConstantExpr(&l1), ConstantExpr(&l2), debug_loc)
- }
- PatKind::Vec(ref before, None, ref after) => {
- SliceLengthEqual(before.len() + after.len(), debug_loc)
- }
- PatKind::Vec(ref before, Some(_), ref after) => {
- SliceLengthGreaterOrEqual(before.len(), after.len(), debug_loc)
- }
- _ => continue
- };
-
- if !found.iter().any(|x| x.eq(&opt, tcx)) {
- found.push(opt);
- }
- }
- found
-}
-
-struct ExtractedBlock<'blk, 'tcx: 'blk> {
- vals: Vec<ValueRef>,
- bcx: Block<'blk, 'tcx>,
-}
-
-fn extract_variant_args<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- repr: &adt::Repr<'tcx>,
- disr_val: Disr,
- val: MatchInput)
- -> ExtractedBlock<'blk, 'tcx> {
- let _icx = push_ctxt("match::extract_variant_args");
- // Assume enums are always sized for now.
- let val = adt::MaybeSizedValue::sized(val.val);
- let args = (0..adt::num_args(repr, disr_val)).map(|i| {
- adt::trans_field_ptr(bcx, repr, val, disr_val, i)
- }).collect();
-
- ExtractedBlock { vals: args, bcx: bcx }
-}
-
-/// Helper for converting from the ValueRef that we pass around in the match code, which is always
-/// an lvalue, into a Datum. Eventually we should just pass around a Datum and be done with it.
-fn match_datum<'tcx>(val: MatchInput, left_ty: Ty<'tcx>) -> Datum<'tcx, Lvalue> {
- val.to_datum(left_ty)
-}
-
-fn bind_subslice_pat(bcx: Block,
- pat_id: ast::NodeId,
- val: MatchInput,
- offset_left: usize,
- offset_right: usize) -> ValueRef {
- let _icx = push_ctxt("match::bind_subslice_pat");
- let vec_ty = node_id_type(bcx, pat_id);
- let vec_ty_contents = match vec_ty.sty {
- ty::TyBox(ty) => ty,
- ty::TyRef(_, mt) | ty::TyRawPtr(mt) => mt.ty,
- _ => vec_ty
- };
- let unit_ty = vec_ty_contents.sequence_element_type(bcx.tcx());
- let vec_datum = match_datum(val, vec_ty);
- let (base, len) = vec_datum.get_vec_base_and_len(bcx);
-
- let slice_begin = InBoundsGEP(bcx, base, &[C_uint(bcx.ccx(), offset_left)]);
- let diff = offset_left + offset_right;
- if let ty::TyArray(ty, n) = vec_ty_contents.sty {
- let array_ty = bcx.tcx().mk_array(ty, n-diff);
- let llty_array = type_of::type_of(bcx.ccx(), array_ty);
- return PointerCast(bcx, slice_begin, llty_array.ptr_to());
- }
-
- let slice_len_offset = C_uint(bcx.ccx(), diff);
- let slice_len = Sub(bcx, len, slice_len_offset, DebugLoc::None);
- let slice_ty = bcx.tcx().mk_imm_ref(bcx.tcx().mk_region(ty::ReErased),
- bcx.tcx().mk_slice(unit_ty));
- let scratch = rvalue_scratch_datum(bcx, slice_ty, "");
- Store(bcx, slice_begin, expr::get_dataptr(bcx, scratch.val));
- Store(bcx, slice_len, expr::get_meta(bcx, scratch.val));
- scratch.val
-}
-
-fn extract_vec_elems<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- left_ty: Ty<'tcx>,
- before: usize,
- after: usize,
- val: MatchInput)
- -> ExtractedBlock<'blk, 'tcx> {
- let _icx = push_ctxt("match::extract_vec_elems");
- let vec_datum = match_datum(val, left_ty);
- let (base, len) = vec_datum.get_vec_base_and_len(bcx);
- let mut elems = vec![];
- elems.extend((0..before).map(|i| GEPi(bcx, base, &[i])));
- elems.extend((0..after).rev().map(|i| {
- InBoundsGEP(bcx, base, &[
- Sub(bcx, len, C_uint(bcx.ccx(), i + 1), DebugLoc::None)
- ])
- }));
- ExtractedBlock { vals: elems, bcx: bcx }
-}
-
-// Macro for deciding whether any of the remaining matches fit a given kind of
-// pattern. Note that, because the macro is well-typed, either ALL of the
-// matches should fit that sort of pattern or NONE (however, some of the
-// matches may be wildcards like _ or identifiers).
-macro_rules! any_pat {
- ($m:expr, $col:expr, $pattern:pat) => (
- ($m).iter().any(|br| {
- match br.pats[$col].node {
- $pattern => true,
- _ => false
- }
- })
- )
-}
-
-fn any_uniq_pat(m: &[Match], col: usize) -> bool {
- any_pat!(m, col, PatKind::Box(_))
-}
-
-fn any_region_pat(m: &[Match], col: usize) -> bool {
- any_pat!(m, col, PatKind::Ref(..))
-}
-
-fn any_irrefutable_adt_pat(tcx: TyCtxt, m: &[Match], col: usize) -> bool {
- m.iter().any(|br| {
- let pat = br.pats[col];
- match pat.node {
- PatKind::Tuple(..) => true,
- PatKind::Struct(..) | PatKind::TupleStruct(..) | PatKind::Path(..) => {
- match tcx.expect_def(pat.id) {
- Def::Struct(..) | Def::TyAlias(..) | Def::AssociatedTy(..) => true,
- _ => false,
- }
- }
- _ => false
- }
- })
-}
-
-/// What to do when the pattern match fails.
-enum FailureHandler {
- Infallible,
- JumpToBasicBlock(BasicBlockRef),
- Unreachable
-}
-
-impl FailureHandler {
- fn is_fallible(&self) -> bool {
- match *self {
- Infallible => false,
- _ => true
- }
- }
-
- fn is_infallible(&self) -> bool {
- !self.is_fallible()
- }
-
- fn handle_fail(&self, bcx: Block) {
- match *self {
- Infallible =>
- bug!("attempted to panic in a non-panicking panic handler!"),
- JumpToBasicBlock(basic_block) =>
- Br(bcx, basic_block, DebugLoc::None),
- Unreachable =>
- build::Unreachable(bcx)
- }
- }
-}
-
-fn pick_column_to_specialize(def_map: &RefCell<DefMap>, m: &[Match]) -> Option<usize> {
- fn pat_score(def_map: &RefCell<DefMap>, pat: &hir::Pat) -> usize {
- match pat.node {
- PatKind::Binding(_, _, Some(ref inner)) => pat_score(def_map, &inner),
- _ if pat_is_refutable(&def_map.borrow(), pat) => 1,
- _ => 0
- }
- }
-
- let column_score = |m: &[Match], col: usize| -> usize {
- let total_score = m.iter()
- .map(|row| row.pats[col])
- .map(|pat| pat_score(def_map, pat))
- .sum();
-
- // Irrefutable columns always go first, they'd only be duplicated in the branches.
- if total_score == 0 {
- std::usize::MAX
- } else {
- total_score
- }
- };
-
- let column_contains_any_nonwild_patterns = |&col: &usize| -> bool {
- m.iter().any(|row| match row.pats[col].node {
- PatKind::Wild => false,
- _ => true
- })
- };
-
- (0..m[0].pats.len())
- .filter(column_contains_any_nonwild_patterns)
- .map(|col| (col, column_score(m, col)))
- .max_by_key(|&(_, score)| score)
- .map(|(col, _)| col)
-}
-
-// Compiles a comparison between two things.
-fn compare_values<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
- lhs: ValueRef,
- rhs: ValueRef,
- rhs_t: Ty<'tcx>,
- debug_loc: DebugLoc)
- -> Result<'blk, 'tcx> {
- fn compare_str<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- lhs_data: ValueRef,
- lhs_len: ValueRef,
- rhs_data: ValueRef,
- rhs_len: ValueRef,
- rhs_t: Ty<'tcx>,
- debug_loc: DebugLoc)
- -> Result<'blk, 'tcx> {
- let did = langcall(bcx.tcx(),
- None,
- &format!("comparison of `{}`", rhs_t),
- StrEqFnLangItem);
- let args = [lhs_data, lhs_len, rhs_data, rhs_len];
- Callee::def(bcx.ccx(), did, Substs::empty(bcx.tcx()))
- .call(bcx, debug_loc, ArgVals(&args), None)
- }
-
- let _icx = push_ctxt("compare_values");
- if rhs_t.is_scalar() {
- let cmp = compare_scalar_types(cx, lhs, rhs, rhs_t, hir::BiEq, debug_loc);
- return Result::new(cx, cmp);
- }
-
- match rhs_t.sty {
- ty::TyRef(_, mt) => match mt.ty.sty {
- ty::TyStr => {
- let lhs_data = Load(cx, expr::get_dataptr(cx, lhs));
- let lhs_len = Load(cx, expr::get_meta(cx, lhs));
- let rhs_data = Load(cx, expr::get_dataptr(cx, rhs));
- let rhs_len = Load(cx, expr::get_meta(cx, rhs));
- compare_str(cx, lhs_data, lhs_len, rhs_data, rhs_len, rhs_t, debug_loc)
- }
- ty::TyArray(ty, _) | ty::TySlice(ty) => match ty.sty {
- ty::TyUint(ast::UintTy::U8) => {
- // NOTE: cast &[u8] and &[u8; N] to &str and abuse the str_eq lang item,
- // which calls memcmp().
- let pat_len = val_ty(rhs).element_type().array_length();
- let ty_str_slice = cx.tcx().mk_static_str();
-
- let rhs_data = GEPi(cx, rhs, &[0, 0]);
- let rhs_len = C_uint(cx.ccx(), pat_len);
-
- let lhs_data;
- let lhs_len;
- if val_ty(lhs) == val_ty(rhs) {
- // Both the discriminant and the pattern are thin pointers
- lhs_data = GEPi(cx, lhs, &[0, 0]);
- lhs_len = C_uint(cx.ccx(), pat_len);
- } else {
- // The discriminant is a fat pointer
- let llty_str_slice = type_of::type_of(cx.ccx(), ty_str_slice).ptr_to();
- let lhs_str = PointerCast(cx, lhs, llty_str_slice);
- lhs_data = Load(cx, expr::get_dataptr(cx, lhs_str));
- lhs_len = Load(cx, expr::get_meta(cx, lhs_str));
- }
-
- compare_str(cx, lhs_data, lhs_len, rhs_data, rhs_len, rhs_t, debug_loc)
- },
- _ => bug!("only byte strings supported in compare_values"),
- },
- _ => bug!("only string and byte strings supported in compare_values"),
- },
- _ => bug!("only scalars, byte strings, and strings supported in compare_values"),
- }
-}
-
-/// For each binding in `data.bindings_map`, adds an appropriate entry into the `fcx.lllocals` map
-fn insert_lllocals<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
- bindings_map: &BindingsMap<'tcx>,
- cs: Option<cleanup::ScopeId>)
- -> Block<'blk, 'tcx> {
- for (&name, &binding_info) in bindings_map {
- let (llval, aliases_other_state) = match binding_info.trmode {
- // By value mut binding for a copy type: load from the ptr
- // into the matched value and copy to our alloca
- TrByCopy(llbinding) |
- TrByMoveIntoCopy(llbinding) => {
- let llval = Load(bcx, binding_info.llmatch);
- let lvalue = match binding_info.trmode {
- TrByCopy(..) =>
- Lvalue::new("_match::insert_lllocals"),
- TrByMoveIntoCopy(..) => {
- // match_input moves from the input into a
- // separate stack slot.
- //
- // E.g. consider moving the value `D(A)` out
- // of the tuple `(D(A), D(B))` and into the
- // local variable `x` via the pattern `(x,_)`,
- // leaving the remainder of the tuple `(_,
- // D(B))` still to be dropped in the future.
- //
- // Thus, here we must zero the place that we
- // are moving *from*, because we do not yet
- // track drop flags for a fragmented parent
- // match input expression.
- //
- // Longer term we will be able to map the move
- // into `(x, _)` up to the parent path that
- // owns the whole tuple, and mark the
- // corresponding stack-local drop-flag
- // tracking the first component of the tuple.
- let hint_kind = HintKind::ZeroAndMaintain;
- Lvalue::new_with_hint("_match::insert_lllocals (match_input)",
- bcx, binding_info.id, hint_kind)
- }
- _ => bug!(),
- };
- let datum = Datum::new(llval, binding_info.ty, lvalue);
- call_lifetime_start(bcx, llbinding);
- bcx = datum.store_to(bcx, llbinding);
- if let Some(cs) = cs {
- bcx.fcx.schedule_lifetime_end(cs, llbinding);
- }
-
- (llbinding, false)
- },
-
- // By value move bindings: load from the ptr into the matched value
- TrByMoveRef => (Load(bcx, binding_info.llmatch), true),
-
- // By ref binding: use the ptr into the matched value
- TrByRef => (binding_info.llmatch, true),
- };
-
-
- // A local that aliases some other state must be zeroed, since
- // the other state (e.g. some parent data that we matched
- // into) will still have its subcomponents (such as this
- // local) destructed at the end of the parent's scope. Longer
- // term, we will properly map such parents to the set of
- // unique drop flags for its fragments.
- let hint_kind = if aliases_other_state {
- HintKind::ZeroAndMaintain
- } else {
- HintKind::DontZeroJustUse
- };
- let lvalue = Lvalue::new_with_hint("_match::insert_lllocals (local)",
- bcx,
- binding_info.id,
- hint_kind);
- let datum = Datum::new(llval, binding_info.ty, lvalue);
- if let Some(cs) = cs {
- let opt_datum = lvalue.dropflag_hint(bcx);
- bcx.fcx.schedule_lifetime_end(cs, binding_info.llmatch);
- bcx.fcx.schedule_drop_and_fill_mem(cs, llval, binding_info.ty, opt_datum);
- }
-
- debug!("binding {} to {:?}", binding_info.id, Value(llval));
- bcx.fcx.lllocals.borrow_mut().insert(binding_info.id, datum);
- debuginfo::create_match_binding_metadata(bcx, name, binding_info);
- }
- bcx
-}
-
-fn compile_guard<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- guard_expr: &hir::Expr,
- data: &ArmData<'p, 'blk, 'tcx>,
- m: &[Match<'a, 'p, 'blk, 'tcx>],
- vals: &[MatchInput],
- chk: &FailureHandler,
- has_genuine_default: bool)
- -> Block<'blk, 'tcx> {
- debug!("compile_guard(bcx={}, guard_expr={:?}, m={:?}, vals={:?})",
- bcx.to_str(), guard_expr, m, vals);
- let _indenter = indenter();
-
- let mut bcx = insert_lllocals(bcx, &data.bindings_map, None);
-
- let val = unpack_datum!(bcx, expr::trans(bcx, guard_expr));
- let val = val.to_llbool(bcx);
-
- for (_, &binding_info) in &data.bindings_map {
- if let Some(llbinding) = binding_info.trmode.alloca_if_copy() {
- call_lifetime_end(bcx, llbinding)
- }
- }
-
- for (_, &binding_info) in &data.bindings_map {
- bcx.fcx.lllocals.borrow_mut().remove(&binding_info.id);
- }
-
- with_cond(bcx, Not(bcx, val, guard_expr.debug_loc()), |bcx| {
- for (_, &binding_info) in &data.bindings_map {
- call_lifetime_end(bcx, binding_info.llmatch);
- }
- match chk {
- // If the default arm is the only one left, move on to the next
- // condition explicitly rather than (possibly) falling back to
- // the default arm.
- &JumpToBasicBlock(_) if m.len() == 1 && has_genuine_default => {
- chk.handle_fail(bcx);
- }
- _ => {
- compile_submatch(bcx, m, vals, chk, has_genuine_default);
- }
- };
- bcx
- })
-}
-
-fn compile_submatch<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- m: &[Match<'a, 'p, 'blk, 'tcx>],
- vals: &[MatchInput],
- chk: &FailureHandler,
- has_genuine_default: bool) {
- debug!("compile_submatch(bcx={}, m={:?}, vals=[{:?}])",
- bcx.to_str(), m, vals);
- let _indenter = indenter();
- let _icx = push_ctxt("match::compile_submatch");
- let mut bcx = bcx;
- if m.is_empty() {
- if chk.is_fallible() {
- chk.handle_fail(bcx);
- }
- return;
- }
-
- let tcx = bcx.tcx();
- let def_map = &tcx.def_map;
- match pick_column_to_specialize(def_map, m) {
- Some(col) => {
- let val = vals[col];
- if has_nested_bindings(m, col) {
- let expanded = expand_nested_bindings(bcx, m, col, val);
- compile_submatch_continue(bcx,
- &expanded[..],
- vals,
- chk,
- col,
- val,
- has_genuine_default)
- } else {
- compile_submatch_continue(bcx, m, vals, chk, col, val, has_genuine_default)
- }
- }
- None => {
- let data = &m[0].data;
- for &(ref name, ref value_ptr) in &m[0].bound_ptrs {
- let binfo = *data.bindings_map.get(name).unwrap();
- call_lifetime_start(bcx, binfo.llmatch);
- if binfo.trmode == TrByRef && type_is_fat_ptr(bcx.tcx(), binfo.ty) {
- expr::copy_fat_ptr(bcx, *value_ptr, binfo.llmatch);
- }
- else {
- Store(bcx, *value_ptr, binfo.llmatch);
- }
- }
- match data.arm.guard {
- Some(ref guard_expr) => {
- bcx = compile_guard(bcx,
- &guard_expr,
- m[0].data,
- &m[1..m.len()],
- vals,
- chk,
- has_genuine_default);
- }
- _ => ()
- }
- Br(bcx, data.bodycx.llbb, DebugLoc::None);
- }
- }
-}
-
-fn compile_submatch_continue<'a, 'p, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
- m: &[Match<'a, 'p, 'blk, 'tcx>],
- vals: &[MatchInput],
- chk: &FailureHandler,
- col: usize,
- val: MatchInput,
- has_genuine_default: bool) {
- let fcx = bcx.fcx;
- let tcx = bcx.tcx();
-
- let mut vals_left = vals[0..col].to_vec();
- vals_left.extend_from_slice(&vals[col + 1..]);
- let ccx = bcx.fcx.ccx;
-
- // Find a real id (we're adding placeholder wildcard patterns, but
- // each column is guaranteed to have at least one real pattern)
- let pat_id = m.iter().map(|br| br.pats[col].id)
- .find(|&id| id != DUMMY_NODE_ID)
- .unwrap_or(DUMMY_NODE_ID);
-
- let left_ty = if pat_id == DUMMY_NODE_ID {
- tcx.mk_nil()
- } else {
- node_id_type(bcx, pat_id)
- };
-
- let mcx = check_match::MatchCheckCtxt {
- tcx: bcx.tcx(),
- param_env: bcx.tcx().empty_parameter_environment(),
- };
- let adt_vals = if any_irrefutable_adt_pat(bcx.tcx(), m, col) {
- let repr = adt::represent_type(bcx.ccx(), left_ty);
- let arg_count = adt::num_args(&repr, Disr(0));
- let (arg_count, struct_val) = if type_is_sized(bcx.tcx(), left_ty) {
- (arg_count, val.val)
- } else {
- // For an unsized ADT (i.e. DST struct), we need to treat
- // the last field specially: instead of simply passing a
- // ValueRef pointing to that field, as with all the others,
- // we skip it and instead construct a 'fat ptr' below.
- (arg_count - 1, Load(bcx, expr::get_dataptr(bcx, val.val)))
- };
- let mut field_vals: Vec<ValueRef> = (0..arg_count).map(|ix|
- // By definition, these are all sized
- adt::trans_field_ptr(bcx, &repr, adt::MaybeSizedValue::sized(struct_val), Disr(0), ix)
- ).collect();
-
- match left_ty.sty {
- ty::TyStruct(def, substs) if !type_is_sized(bcx.tcx(), left_ty) => {
- // The last field is technically unsized but
- // since we can only ever match that field behind
- // a reference we construct a fat ptr here.
- let unsized_ty = def.struct_variant().fields.last().map(|field| {
- monomorphize::field_ty(bcx.tcx(), substs, field)
- }).unwrap();
- let scratch = alloc_ty(bcx, unsized_ty, "__struct_field_fat_ptr");
-
- let meta = Load(bcx, expr::get_meta(bcx, val.val));
- let struct_val = adt::MaybeSizedValue::unsized_(struct_val, meta);
-
- let data = adt::trans_field_ptr(bcx, &repr, struct_val, Disr(0), arg_count);
- Store(bcx, data, expr::get_dataptr(bcx, scratch));
- Store(bcx, meta, expr::get_meta(bcx, scratch));
- field_vals.push(scratch);
- }
- _ => {}
- }
- Some(field_vals)
- } else if any_uniq_pat(m, col) || any_region_pat(m, col) {
- let ptr = if type_is_fat_ptr(bcx.tcx(), left_ty) {
- val.val
- } else {
- Load(bcx, val.val)
- };
- Some(vec!(ptr))
- } else {
- match left_ty.sty {
- ty::TyArray(_, n) => {
- let args = extract_vec_elems(bcx, left_ty, n, 0, val);
- Some(args.vals)
- }
- _ => None
- }
- };
- match adt_vals {
- Some(field_vals) => {
- let pats = enter_match(bcx, m, col, val, |pats|
- check_match::specialize(&mcx, pats,
- &Constructor::Single, col,
- field_vals.len())
- );
- let mut vals: Vec<_> = field_vals.into_iter()
- .map(|v|MatchInput::from_val(v))
- .collect();
- vals.extend_from_slice(&vals_left);
- compile_submatch(bcx, &pats, &vals, chk, has_genuine_default);
- return;
- }
- _ => ()
- }
-
- // Decide what kind of branch we need
- let opts = get_branches(bcx, m, col);
- debug!("options={:?}", opts);
- let mut kind = NoBranch;
- let mut test_val = val.val;
- debug!("test_val={:?}", Value(test_val));
- if !opts.is_empty() {
- match opts[0] {
- ConstantValue(..) | ConstantRange(..) => {
- test_val = load_if_immediate(bcx, val.val, left_ty);
- kind = if left_ty.is_integral() {
- Switch
- } else {
- Compare
- };
- }
- Variant(_, ref repr, _, _) => {
- let (the_kind, val_opt) = adt::trans_switch(bcx, &repr,
- val.val, true);
- kind = the_kind;
- if let Some(tval) = val_opt { test_val = tval; }
- }
- SliceLengthEqual(..) | SliceLengthGreaterOrEqual(..) => {
- let (_, len) = tvec::get_base_and_len(bcx, val.val, left_ty);
- test_val = len;
- kind = Switch;
- }
- }
- }
- for o in &opts {
- match *o {
- ConstantRange(..) => { kind = Compare; break },
- SliceLengthGreaterOrEqual(..) => { kind = CompareSliceLength; break },
- _ => ()
- }
- }
- let else_cx = match kind {
- NoBranch | Single => bcx,
- _ => bcx.fcx.new_temp_block("match_else")
- };
- let sw = if kind == Switch {
- build::Switch(bcx, test_val, else_cx.llbb, opts.len())
- } else {
- C_int(ccx, 0) // Placeholder for when not using a switch
- };
-
- let defaults = enter_default(else_cx, m, col, val);
- let exhaustive = chk.is_infallible() && defaults.is_empty();
- let len = opts.len();
-
- if exhaustive && kind == Switch {
- build::Unreachable(else_cx);
- }
-
- // Compile subtrees for each option
- for (i, opt) in opts.iter().enumerate() {
- // In some cases of range and vector pattern matching, we need to
- // override the failure case so that instead of failing, it proceeds
- // to try more matching. branch_chk, then, is the proper failure case
- // for the current conditional branch.
- let mut branch_chk = None;
- let mut opt_cx = else_cx;
- let debug_loc = opt.debug_loc();
-
- if kind == Switch || !exhaustive || i + 1 < len {
- opt_cx = bcx.fcx.new_temp_block("match_case");
- match kind {
- Single => Br(bcx, opt_cx.llbb, debug_loc),
- Switch => {
- match opt.trans(bcx) {
- SingleResult(r) => {
- AddCase(sw, r.val, opt_cx.llbb);
- bcx = r.bcx;
- }
- _ => {
- bug!(
- "in compile_submatch, expected \
- opt.trans() to return a SingleResult")
- }
- }
- }
- Compare | CompareSliceLength => {
- let t = if kind == Compare {
- left_ty
- } else {
- tcx.types.usize // vector length
- };
- let Result { bcx: after_cx, val: matches } = {
- match opt.trans(bcx) {
- SingleResult(Result { bcx, val }) => {
- compare_values(bcx, test_val, val, t, debug_loc)
- }
- RangeResult(Result { val: vbegin, .. },
- Result { bcx, val: vend }) => {
- let llge = compare_scalar_types(bcx, test_val, vbegin,
- t, hir::BiGe, debug_loc);
- let llle = compare_scalar_types(bcx, test_val, vend,
- t, hir::BiLe, debug_loc);
- Result::new(bcx, And(bcx, llge, llle, DebugLoc::None))
- }
- LowerBound(Result { bcx, val }) => {
- Result::new(bcx, compare_scalar_types(bcx, test_val,
- val, t, hir::BiGe,
- debug_loc))
- }
- }
- };
- bcx = fcx.new_temp_block("compare_next");
-
- // If none of the sub-cases match, and the current condition
- // is guarded or has multiple patterns, move on to the next
- // condition, if there is any, rather than falling back to
- // the default.
- let guarded = m[i].data.arm.guard.is_some();
- let multi_pats = m[i].pats.len() > 1;
- if i + 1 < len && (guarded || multi_pats || kind == CompareSliceLength) {
- branch_chk = Some(JumpToBasicBlock(bcx.llbb));
- }
- CondBr(after_cx, matches, opt_cx.llbb, bcx.llbb, debug_loc);
- }
- _ => ()
- }
- } else if kind == Compare || kind == CompareSliceLength {
- Br(bcx, else_cx.llbb, debug_loc);
- }
-
- let mut size = 0;
- let mut unpacked = Vec::new();
- match *opt {
- Variant(disr_val, ref repr, _, _) => {
- let ExtractedBlock {vals: argvals, bcx: new_bcx} =
- extract_variant_args(opt_cx, &repr, disr_val, val);
- size = argvals.len();
- unpacked = argvals;
- opt_cx = new_bcx;
- }
- SliceLengthEqual(len, _) => {
- let args = extract_vec_elems(opt_cx, left_ty, len, 0, val);
- size = args.vals.len();
- unpacked = args.vals.clone();
- opt_cx = args.bcx;
- }
- SliceLengthGreaterOrEqual(before, after, _) => {
- let args = extract_vec_elems(opt_cx, left_ty, before, after, val);
- size = args.vals.len();
- unpacked = args.vals.clone();
- opt_cx = args.bcx;
- }
- ConstantValue(..) | ConstantRange(..) => ()
- }
- let opt_ms = enter_opt(opt_cx, pat_id, m, opt, col, size, val);
- let mut opt_vals: Vec<_> = unpacked.into_iter()
- .map(|v|MatchInput::from_val(v))
- .collect();
- opt_vals.extend_from_slice(&vals_left[..]);
- compile_submatch(opt_cx,
- &opt_ms[..],
- &opt_vals[..],
- branch_chk.as_ref().unwrap_or(chk),
- has_genuine_default);
- }
-
- // Compile the fall-through case, if any
- if !exhaustive && kind != Single {
- if kind == Compare || kind == CompareSliceLength {
- Br(bcx, else_cx.llbb, DebugLoc::None);
- }
- match chk {
- // If there is only one default arm left, move on to the next
- // condition explicitly rather than (eventually) falling back to
- // the last default arm.
- &JumpToBasicBlock(_) if defaults.len() == 1 && has_genuine_default => {
- chk.handle_fail(else_cx);
- }
- _ => {
- compile_submatch(else_cx,
- &defaults[..],
- &vals_left[..],
- chk,
- has_genuine_default);
- }
- }
- }
-}
-
-pub fn trans_match<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- match_expr: &hir::Expr,
- discr_expr: &hir::Expr,
- arms: &[hir::Arm],
- dest: Dest)
- -> Block<'blk, 'tcx> {
- let _icx = push_ctxt("match::trans_match");
- trans_match_inner(bcx, match_expr.id, discr_expr, arms, dest)
-}
-
-/// Checks whether the binding in `discr` is assigned to anywhere in the expression `body`
-fn is_discr_reassigned(bcx: Block, discr: &hir::Expr, body: &hir::Expr) -> bool {
- let (vid, field) = match discr.node {
- hir::ExprPath(..) => match bcx.tcx().expect_def(discr.id) {
- Def::Local(_, vid) | Def::Upvar(_, vid, _, _) => (vid, None),
- _ => return false
- },
- hir::ExprField(ref base, field) => {
- let vid = match bcx.tcx().expect_def_or_none(base.id) {
- Some(Def::Local(_, vid)) | Some(Def::Upvar(_, vid, _, _)) => vid,
- _ => return false
- };
- (vid, Some(mc::NamedField(field.node)))
- },
- hir::ExprTupField(ref base, field) => {
- let vid = match bcx.tcx().expect_def_or_none(base.id) {
- Some(Def::Local(_, vid)) | Some(Def::Upvar(_, vid, _, _)) => vid,
- _ => return false
- };
- (vid, Some(mc::PositionalField(field.node)))
- },
- _ => return false
- };
-
- let mut rc = ReassignmentChecker {
- node: vid,
- field: field,
- reassigned: false
- };
- bcx.tcx().normalizing_infer_ctxt(Reveal::All).enter(|infcx| {
- let mut visitor = euv::ExprUseVisitor::new(&mut rc, &infcx);
- visitor.walk_expr(body);
- });
- rc.reassigned
-}
-
-struct ReassignmentChecker {
- node: ast::NodeId,
- field: Option<mc::FieldName>,
- reassigned: bool
-}
-
-// Determine if the expression we're matching on is reassigned to within
-// the body of the match's arm.
-// We only care for the `mutate` callback since this check only matters
-// for cases where the matched value is moved.
-impl<'tcx> euv::Delegate<'tcx> for ReassignmentChecker {
- fn consume(&mut self, _: ast::NodeId, _: Span, _: mc::cmt, _: euv::ConsumeMode) {}
- fn matched_pat(&mut self, _: &hir::Pat, _: mc::cmt, _: euv::MatchMode) {}
- fn consume_pat(&mut self, _: &hir::Pat, _: mc::cmt, _: euv::ConsumeMode) {}
- fn borrow(&mut self, _: ast::NodeId, _: Span, _: mc::cmt, _: ty::Region,
- _: ty::BorrowKind, _: euv::LoanCause) {}
- fn decl_without_init(&mut self, _: ast::NodeId, _: Span) {}
-
- fn mutate(&mut self, _: ast::NodeId, _: Span, cmt: mc::cmt, _: euv::MutateMode) {
- let cmt_id = |cmt: &mc::cmt| match cmt.cat {
- Categorization::Upvar(mc::Upvar { id: ty::UpvarId { var_id: vid, ..}, ..}) |
- Categorization::Local(vid) => Some(vid),
- Categorization::Interior(ref base_cmt, mc::InteriorField(_)) => Some(base_cmt.id),
- _ => None
- };
- match cmt.cat {
- Categorization::Upvar(mc::Upvar { id: ty::UpvarId { var_id: vid, .. }, .. }) |
- Categorization::Local(vid) => self.reassigned |= self.node == vid,
- ref cat => {
- let mut cat = cat;
- while let &Categorization::Interior(ref base_cmt, mc::InteriorField(field)) = cat {
- if let Some(vid) = cmt_id(base_cmt) {
- if self.node == vid && (self.field.is_none() || self.field == Some(field)) {
- self.reassigned = true;
- return;
- }
- }
- cat = &base_cmt.cat;
- }
- }
- }
- }
-}
-
-fn create_bindings_map<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, pat: &hir::Pat,
- discr: &hir::Expr, body: &hir::Expr)
- -> BindingsMap<'tcx> {
- // Create the bindings map, which is a mapping from each binding name
- // to an alloca() that will be the value for that local variable.
- // Note that we use the names because each binding will have many ids
- // from the various alternatives.
- let ccx = bcx.ccx();
- let reassigned = is_discr_reassigned(bcx, discr, body);
- let mut bindings_map = FnvHashMap();
- pat_bindings(&pat, |bm, p_id, span, path1| {
- let name = path1.node;
- let variable_ty = node_id_type(bcx, p_id);
- let llvariable_ty = type_of::type_of(ccx, variable_ty);
- let tcx = bcx.tcx();
- let param_env = tcx.empty_parameter_environment();
-
- let llmatch;
- let trmode;
- let moves_by_default = variable_ty.moves_by_default(tcx, ¶m_env, span);
- match bm {
- hir::BindByValue(_) if !moves_by_default || reassigned =>
- {
- llmatch = alloca(bcx, llvariable_ty.ptr_to(), "__llmatch");
- let llcopy = alloca(bcx, llvariable_ty, &bcx.name(name));
- trmode = if moves_by_default {
- TrByMoveIntoCopy(llcopy)
- } else {
- TrByCopy(llcopy)
- };
- }
- hir::BindByValue(_) => {
- // in this case, the final type of the variable will be T,
- // but during matching we need to store a *T as explained
- // above
- llmatch = alloca(bcx, llvariable_ty.ptr_to(), &bcx.name(name));
- trmode = TrByMoveRef;
- }
- hir::BindByRef(_) => {
- llmatch = alloca(bcx, llvariable_ty, &bcx.name(name));
- trmode = TrByRef;
- }
- };
- bindings_map.insert(name, BindingInfo {
- llmatch: llmatch,
- trmode: trmode,
- id: p_id,
- span: span,
- ty: variable_ty
- });
- });
- return bindings_map;
-}
-
-fn trans_match_inner<'blk, 'tcx>(scope_cx: Block<'blk, 'tcx>,
- match_id: ast::NodeId,
- discr_expr: &hir::Expr,
- arms: &[hir::Arm],
- dest: Dest) -> Block<'blk, 'tcx> {
- let _icx = push_ctxt("match::trans_match_inner");
- let fcx = scope_cx.fcx;
- let mut bcx = scope_cx;
- let tcx = bcx.tcx();
-
- let discr_datum = unpack_datum!(bcx, expr::trans_to_lvalue(bcx, discr_expr,
- "match"));
- if bcx.unreachable.get() {
- return bcx;
- }
-
- let t = node_id_type(bcx, discr_expr.id);
- let chk = if t.is_uninhabited(tcx) {
- Unreachable
- } else {
- Infallible
- };
-
- let arm_datas: Vec<ArmData> = arms.iter().map(|arm| ArmData {
- bodycx: fcx.new_id_block("case_body", arm.body.id),
- arm: arm,
- bindings_map: create_bindings_map(bcx, &arm.pats[0], discr_expr, &arm.body)
- }).collect();
-
- let mut pat_renaming_map = if scope_cx.sess().opts.debuginfo != NoDebugInfo {
- Some(FnvHashMap())
- } else {
- None
- };
-
- let arm_pats: Vec<Vec<P<hir::Pat>>> = {
- let mut static_inliner = StaticInliner::new(scope_cx.tcx(),
- pat_renaming_map.as_mut());
- arm_datas.iter().map(|arm_data| {
- arm_data.arm.pats.iter().map(|p| static_inliner.fold_pat((*p).clone())).collect()
- }).collect()
- };
-
- let mut matches = Vec::new();
- for (arm_data, pats) in arm_datas.iter().zip(&arm_pats) {
- matches.extend(pats.iter().map(|p| Match {
- pats: vec![&p],
- data: arm_data,
- bound_ptrs: Vec::new(),
- pat_renaming_map: pat_renaming_map.as_ref()
- }));
- }
-
- // `compile_submatch` works one column of arm patterns a time and
- // then peels that column off. So as we progress, it may become
- // impossible to tell whether we have a genuine default arm, i.e.
- // `_ => foo` or not. Sometimes it is important to know that in order
- // to decide whether moving on to the next condition or falling back
- // to the default arm.
- let has_default = arms.last().map_or(false, |arm| {
- arm.pats.len() == 1
- && arm.pats.last().unwrap().node == PatKind::Wild
- });
-
- compile_submatch(bcx, &matches[..], &[discr_datum.match_input()], &chk, has_default);
-
- let mut arm_cxs = Vec::new();
- for arm_data in &arm_datas {
- let mut bcx = arm_data.bodycx;
-
- // insert bindings into the lllocals map and add cleanups
- let cs = fcx.push_custom_cleanup_scope();
- bcx = insert_lllocals(bcx, &arm_data.bindings_map, Some(cleanup::CustomScope(cs)));
- bcx = expr::trans_into(bcx, &arm_data.arm.body, dest);
- bcx = fcx.pop_and_trans_custom_cleanup_scope(bcx, cs);
- arm_cxs.push(bcx);
- }
-
- bcx = scope_cx.fcx.join_blocks(match_id, &arm_cxs[..]);
- return bcx;
-}
-
-/// Generates code for a local variable declaration like `let <pat>;` or `let <pat> =
-/// <opt_init_expr>`.
-pub fn store_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- local: &hir::Local)
- -> Block<'blk, 'tcx> {
- let _icx = push_ctxt("match::store_local");
- let mut bcx = bcx;
- let tcx = bcx.tcx();
- let pat = &local.pat;
-
- fn create_dummy_locals<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
- pat: &hir::Pat)
- -> Block<'blk, 'tcx> {
- let _icx = push_ctxt("create_dummy_locals");
- // create dummy memory for the variables if we have no
- // value to store into them immediately
- let tcx = bcx.tcx();
- pat_bindings(pat, |_, p_id, _, path1| {
- let scope = cleanup::var_scope(tcx, p_id);
- bcx = mk_binding_alloca(
- bcx, p_id, path1.node, scope, (),
- "_match::store_local::create_dummy_locals",
- |(), bcx, Datum { val: llval, ty, kind }| {
- // Dummy-locals start out uninitialized, so set their
- // drop-flag hints (if any) to "moved."
- if let Some(hint) = kind.dropflag_hint(bcx) {
- let moved_hint = adt::DTOR_MOVED_HINT;
- debug!("store moved_hint={} for hint={:?}, uninitialized dummy",
- moved_hint, hint);
- Store(bcx, C_u8(bcx.fcx.ccx, moved_hint), hint.to_value().value());
- }
-
- if kind.drop_flag_info.must_zero() {
- // if no drop-flag hint, or the hint requires
- // we maintain the embedded drop-flag, then
- // mark embedded drop-flag(s) as moved
- // (i.e. "already dropped").
- drop_done_fill_mem(bcx, llval, ty);
- }
- bcx
- });
- });
- bcx
- }
-
- match local.init {
- Some(ref init_expr) => {
- // Optimize the "let x = expr" case. This just writes
- // the result of evaluating `expr` directly into the alloca
- // for `x`. Often the general path results in similar or the
- // same code post-optimization, but not always. In particular,
- // in unsafe code, you can have expressions like
- //
- // let x = intrinsics::uninit();
- //
- // In such cases, the more general path is unsafe, because
- // it assumes it is matching against a valid value.
- if let Some(name) = simple_name(pat) {
- let var_scope = cleanup::var_scope(tcx, local.id);
- return mk_binding_alloca(
- bcx, pat.id, name, var_scope, (),
- "_match::store_local",
- |(), bcx, Datum { val: v, .. }| expr::trans_into(bcx, &init_expr,
- expr::SaveIn(v)));
- }
-
- // General path.
- let init_datum =
- unpack_datum!(bcx, expr::trans_to_lvalue(bcx, &init_expr, "let"));
- if bcx.sess().asm_comments() {
- add_comment(bcx, "creating zeroable ref llval");
- }
- let var_scope = cleanup::var_scope(tcx, local.id);
- bind_irrefutable_pat(bcx, pat, init_datum.match_input(), var_scope)
- }
- None => {
- create_dummy_locals(bcx, pat)
- }
- }
-}
-
-fn mk_binding_alloca<'blk, 'tcx, A, F>(bcx: Block<'blk, 'tcx>,
- p_id: ast::NodeId,
- name: ast::Name,
- cleanup_scope: cleanup::ScopeId,
- arg: A,
- caller_name: &'static str,
- populate: F)
- -> Block<'blk, 'tcx> where
- F: FnOnce(A, Block<'blk, 'tcx>, Datum<'tcx, Lvalue>) -> Block<'blk, 'tcx>,
-{
- let var_ty = node_id_type(bcx, p_id);
-
- // Allocate memory on stack for the binding.
- let llval = alloc_ty(bcx, var_ty, &bcx.name(name));
- let lvalue = Lvalue::new_with_hint(caller_name, bcx, p_id, HintKind::DontZeroJustUse);
- let datum = Datum::new(llval, var_ty, lvalue);
-
- debug!("mk_binding_alloca cleanup_scope={:?} llval={:?} var_ty={:?}",
- cleanup_scope, Value(llval), var_ty);
-
- // Subtle: be sure that we *populate* the memory *before*
- // we schedule the cleanup.
- call_lifetime_start(bcx, llval);
- let bcx = populate(arg, bcx, datum);
- bcx.fcx.schedule_lifetime_end(cleanup_scope, llval);
- bcx.fcx.schedule_drop_mem(cleanup_scope, llval, var_ty, lvalue.dropflag_hint(bcx));
-
- // Now that memory is initialized and has cleanup scheduled,
- // insert datum into the local variable map.
- bcx.fcx.lllocals.borrow_mut().insert(p_id, datum);
- bcx
-}
-
-/// A simple version of the pattern matching code that only handles
-/// irrefutable patterns. This is used in let/argument patterns,
-/// not in match statements. Unifying this code with the code above
-/// sounds nice, but in practice it produces very inefficient code,
-/// since the match code is so much more general. In most cases,
-/// LLVM is able to optimize the code, but it causes longer compile
-/// times and makes the generated code nigh impossible to read.
-///
-/// # Arguments
-/// - bcx: starting basic block context
-/// - pat: the irrefutable pattern being matched.
-/// - val: the value being matched -- must be an lvalue (by ref, with cleanup)
-pub fn bind_irrefutable_pat<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- pat: &hir::Pat,
- val: MatchInput,
- cleanup_scope: cleanup::ScopeId)
- -> Block<'blk, 'tcx> {
- debug!("bind_irrefutable_pat(bcx={}, pat={:?}, val={:?})",
- bcx.to_str(), pat, val);
-
- if bcx.sess().asm_comments() {
- add_comment(bcx, &format!("bind_irrefutable_pat(pat={:?})",
- pat));
- }
-
- let _indenter = indenter();
-
- let _icx = push_ctxt("match::bind_irrefutable_pat");
- let mut bcx = bcx;
- let tcx = bcx.tcx();
- let ccx = bcx.ccx();
- match pat.node {
- PatKind::Binding(pat_binding_mode, ref path1, ref inner) => {
- // Allocate the stack slot where the value of this
- // binding will live and place it into the appropriate
- // map.
- bcx = mk_binding_alloca(bcx, pat.id, path1.node, cleanup_scope, (),
- "_match::bind_irrefutable_pat",
- |(), bcx, Datum { val: llval, ty, kind: _ }| {
- match pat_binding_mode {
- hir::BindByValue(_) => {
- // By value binding: move the value that `val`
- // points at into the binding's stack slot.
- let d = val.to_datum(ty);
- d.store_to(bcx, llval)
- }
-
- hir::BindByRef(_) => {
- // By ref binding: the value of the variable
- // is the pointer `val` itself or fat pointer referenced by `val`
- if type_is_fat_ptr(bcx.tcx(), ty) {
- expr::copy_fat_ptr(bcx, val.val, llval);
- }
- else {
- Store(bcx, val.val, llval);
- }
-
- bcx
- }
- }
- });
-
- if let Some(ref inner_pat) = *inner {
- bcx = bind_irrefutable_pat(bcx, &inner_pat, val, cleanup_scope);
- }
- }
- PatKind::TupleStruct(_, ref sub_pats, ddpos) => {
- match bcx.tcx().expect_def(pat.id) {
- Def::Variant(enum_id, var_id) => {
- let repr = adt::represent_node(bcx, pat.id);
- let vinfo = ccx.tcx().lookup_adt_def(enum_id).variant_with_id(var_id);
- let args = extract_variant_args(bcx,
- &repr,
- Disr::from(vinfo.disr_val),
- val);
- for (i, subpat) in sub_pats.iter()
- .enumerate_and_adjust(vinfo.fields.len(), ddpos) {
- bcx = bind_irrefutable_pat(
- bcx,
- subpat,
- MatchInput::from_val(args.vals[i]),
- cleanup_scope);
- }
- }
- Def::Struct(..) => {
- let expected_len = match *ccx.tcx().pat_ty(&pat) {
- ty::TyS{sty: ty::TyStruct(adt_def, _), ..} => {
- adt_def.struct_variant().fields.len()
- }
- ref ty => {
- span_bug!(pat.span, "tuple struct pattern unexpected type {:?}", ty);
- }
- };
-
- let repr = adt::represent_node(bcx, pat.id);
- let val = adt::MaybeSizedValue::sized(val.val);
- for (i, elem) in sub_pats.iter().enumerate_and_adjust(expected_len, ddpos) {
- let fldptr = adt::trans_field_ptr(bcx, &repr, val, Disr(0), i);
- bcx = bind_irrefutable_pat(
- bcx,
- &elem,
- MatchInput::from_val(fldptr),
- cleanup_scope);
- }
- }
- _ => {
- // Nothing to do here.
- }
- }
- }
- PatKind::Struct(_, ref fields, _) => {
- let tcx = bcx.tcx();
- let pat_ty = node_id_type(bcx, pat.id);
- let pat_repr = adt::represent_type(bcx.ccx(), pat_ty);
- let pat_v = VariantInfo::of_node(tcx, pat_ty, pat.id);
-
- let val = if type_is_sized(tcx, pat_ty) {
- adt::MaybeSizedValue::sized(val.val)
- } else {
- let data = Load(bcx, expr::get_dataptr(bcx, val.val));
- let meta = Load(bcx, expr::get_meta(bcx, val.val));
- adt::MaybeSizedValue::unsized_(data, meta)
- };
-
- for f in fields {
- let name = f.node.name;
- let field_idx = pat_v.field_index(name);
- let mut fldptr = adt::trans_field_ptr(
- bcx,
- &pat_repr,
- val,
- pat_v.discr,
- field_idx);
-
- let fty = pat_v.fields[field_idx].1;
- // If it's not sized, then construct a fat pointer instead of
- // a regular one
- if !type_is_sized(tcx, fty) {
- let scratch = alloc_ty(bcx, fty, "__struct_field_fat_ptr");
- debug!("Creating fat pointer {:?}", Value(scratch));
- Store(bcx, fldptr, expr::get_dataptr(bcx, scratch));
- Store(bcx, val.meta, expr::get_meta(bcx, scratch));
- fldptr = scratch;
- }
- bcx = bind_irrefutable_pat(bcx,
- &f.node.pat,
- MatchInput::from_val(fldptr),
- cleanup_scope);
- }
- }
- PatKind::Tuple(ref elems, ddpos) => {
- match tcx.node_id_to_type(pat.id).sty {
- ty::TyTuple(ref tys) => {
- let repr = adt::represent_node(bcx, pat.id);
- let val = adt::MaybeSizedValue::sized(val.val);
- for (i, elem) in elems.iter().enumerate_and_adjust(tys.len(), ddpos) {
- let fldptr = adt::trans_field_ptr(bcx, &repr, val, Disr(0), i);
- bcx = bind_irrefutable_pat(
- bcx,
- &elem,
- MatchInput::from_val(fldptr),
- cleanup_scope);
- }
- }
- ref sty => span_bug!(pat.span, "unexpected type for tuple pattern: {:?}", sty),
- }
- }
- PatKind::Box(ref inner) => {
- let pat_ty = node_id_type(bcx, inner.id);
- // Pass along DSTs as fat pointers.
- let val = if type_is_fat_ptr(tcx, pat_ty) {
- // We need to check for this, as the pattern could be binding
- // a fat pointer by-value.
- if let PatKind::Binding(hir::BindByRef(..),_,_) = inner.node {
- val.val
- } else {
- Load(bcx, val.val)
- }
- } else if type_is_sized(tcx, pat_ty) {
- Load(bcx, val.val)
- } else {
- val.val
- };
- bcx = bind_irrefutable_pat(
- bcx, &inner, MatchInput::from_val(val), cleanup_scope);
- }
- PatKind::Ref(ref inner, _) => {
- let pat_ty = node_id_type(bcx, inner.id);
- // Pass along DSTs as fat pointers.
- let val = if type_is_fat_ptr(tcx, pat_ty) {
- // We need to check for this, as the pattern could be binding
- // a fat pointer by-value.
- if let PatKind::Binding(hir::BindByRef(..),_,_) = inner.node {
- val.val
- } else {
- Load(bcx, val.val)
- }
- } else if type_is_sized(tcx, pat_ty) {
- Load(bcx, val.val)
- } else {
- val.val
- };
- bcx = bind_irrefutable_pat(
- bcx,
- &inner,
- MatchInput::from_val(val),
- cleanup_scope);
- }
- PatKind::Vec(ref before, ref slice, ref after) => {
- let pat_ty = node_id_type(bcx, pat.id);
- let mut extracted = extract_vec_elems(bcx, pat_ty, before.len(), after.len(), val);
- match slice {
- &Some(_) => {
- extracted.vals.insert(
- before.len(),
- bind_subslice_pat(bcx, pat.id, val, before.len(), after.len())
- );
- }
- &None => ()
- }
- bcx = before
- .iter()
- .chain(slice.iter())
- .chain(after.iter())
- .zip(extracted.vals)
- .fold(bcx, |bcx, (inner, elem)| {
- bind_irrefutable_pat(
- bcx,
- &inner,
- MatchInput::from_val(elem),
- cleanup_scope)
- });
- }
- PatKind::Path(..) | PatKind::Wild |
- PatKind::Lit(..) | PatKind::Range(..) => ()
- }
- return bcx;
-}
use syntax::ast;
use syntax::attr;
use syntax::attr::IntType;
-use _match;
use abi::FAT_PTR_ADDR;
-use base::InitAlloca;
use build::*;
-use cleanup;
-use cleanup::CleanupMethods;
use common::*;
-use datum;
use debuginfo::DebugLoc;
use glue;
use machine;
use type_of;
use value::Value;
-type Hint = attr::ReprAttr;
-
-// Representation of the context surrounding an unsized type. I want
-// to be able to track the drop flags that are injected by trans.
-#[derive(Clone, Copy, PartialEq, Debug)]
-pub struct TypeContext {
- prefix: Type,
- needs_drop_flag: bool,
+#[derive(Copy, Clone, PartialEq)]
+pub enum BranchKind {
+ Switch,
+ Single
}
-impl TypeContext {
- pub fn prefix(&self) -> Type { self.prefix }
- pub fn needs_drop_flag(&self) -> bool { self.needs_drop_flag }
-
- fn direct(t: Type) -> TypeContext {
- TypeContext { prefix: t, needs_drop_flag: false }
- }
- fn may_need_drop_flag(t: Type, needs_drop_flag: bool) -> TypeContext {
- TypeContext { prefix: t, needs_drop_flag: needs_drop_flag }
- }
-}
+type Hint = attr::ReprAttr;
/// Representations.
#[derive(Eq, PartialEq, Debug)]
/// C-like enums; basically an int.
CEnum(IntType, Disr, Disr), // discriminant range (signedness based on the IntType)
/// Single-case variants, and structs/tuples/records.
- ///
- /// Structs with destructors need a dynamic destroyedness flag to
- /// avoid running the destructor too many times; this is included
- /// in the `Struct` if present.
- /// (The flag if nonzero, represents the initialization value to use;
- /// if zero, then use no flag at all.)
- Univariant(Struct<'tcx>, u8),
+ Univariant(Struct<'tcx>),
/// General-case enums: for each case there is a struct, and they
/// all start with a field for the discriminant.
- ///
- /// Types with destructors need a dynamic destroyedness flag to
- /// avoid running the destructor too many times; the last argument
- /// indicates whether such a flag is present.
- /// (The flag, if nonzero, represents the initialization value to use;
- /// if zero, then use no flag at all.)
- General(IntType, Vec<Struct<'tcx>>, u8),
+ General(IntType, Vec<Struct<'tcx>>),
/// Two cases distinguished by a nullable pointer: the case with discriminant
/// `nndiscr` must have single field which is known to be nonnull due to its type.
/// The other case is known to be zero sized. Hence we represent the enum
}
}
-/// Convenience for `represent_type`. There should probably be more or
-/// these, for places in trans where the `Ty` isn't directly
-/// available.
-pub fn represent_node<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- node: ast::NodeId) -> Rc<Repr<'tcx>> {
- represent_type(bcx.ccx(), node_id_type(bcx, node))
-}
-
/// Decides how to represent a given type.
pub fn represent_type<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
t: Ty<'tcx>)
repr
}
-const fn repeat_u8_as_u32(val: u8) -> u32 {
- (val as u32) << 24 | (val as u32) << 16 | (val as u32) << 8 | val as u32
-}
-
-const fn repeat_u8_as_u64(val: u8) -> u64 {
- (repeat_u8_as_u32(val) as u64) << 32 | repeat_u8_as_u32(val) as u64
-}
-
-/// `DTOR_NEEDED_HINT` is a stack-local hint that just means
-/// "we do not know whether the destructor has run or not; check the
-/// drop-flag embedded in the value itself."
-pub const DTOR_NEEDED_HINT: u8 = 0x3d;
-
-/// `DTOR_MOVED_HINT` is a stack-local hint that means "this value has
-/// definitely been moved; you do not need to run its destructor."
-///
-/// (However, for now, such values may still end up being explicitly
-/// zeroed by the generated code; this is the distinction between
-/// `datum::DropFlagInfo::ZeroAndMaintain` versus
-/// `datum::DropFlagInfo::DontZeroJustUse`.)
-pub const DTOR_MOVED_HINT: u8 = 0x2d;
-
-pub const DTOR_NEEDED: u8 = 0xd4;
-#[allow(dead_code)]
-pub const DTOR_NEEDED_U64: u64 = repeat_u8_as_u64(DTOR_NEEDED);
-
-pub const DTOR_DONE: u8 = 0x1d;
-#[allow(dead_code)]
-pub const DTOR_DONE_U64: u64 = repeat_u8_as_u64(DTOR_DONE);
-
-fn dtor_to_init_u8(dtor: bool) -> u8 {
- if dtor { DTOR_NEEDED } else { 0 }
-}
-
-pub trait GetDtorType<'tcx> { fn dtor_type(self) -> Ty<'tcx>; }
-impl<'a, 'tcx> GetDtorType<'tcx> for TyCtxt<'a, 'tcx, 'tcx> {
- fn dtor_type(self) -> Ty<'tcx> { self.types.u8 }
-}
-
-fn dtor_active(flag: u8) -> bool {
- flag != 0
-}
-
fn represent_type_uncached<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
t: Ty<'tcx>) -> Repr<'tcx> {
match t.sty {
ty::TyTuple(ref elems) => {
- Univariant(mk_struct(cx, &elems[..], false, t), 0)
+ Univariant(mk_struct(cx, &elems[..], false, t))
}
ty::TyStruct(def, substs) => {
- let mut ftys = def.struct_variant().fields.iter().map(|field| {
+ let ftys = def.struct_variant().fields.iter().map(|field| {
monomorphize::field_ty(cx.tcx(), substs, field)
}).collect::<Vec<_>>();
let packed = cx.tcx().lookup_packed(def.did);
- // FIXME(16758) don't add a drop flag to unsized structs, as it
- // won't actually be in the location we say it is because it'll be after
- // the unsized field. Several other pieces of code assume that the unsized
- // field is definitely the last one.
- let dtor = def.dtor_kind().has_drop_flag() && type_is_sized(cx.tcx(), t);
- if dtor {
- ftys.push(cx.tcx().dtor_type());
- }
- Univariant(mk_struct(cx, &ftys[..], packed, t), dtor_to_init_u8(dtor))
+ Univariant(mk_struct(cx, &ftys[..], packed, t))
}
ty::TyClosure(_, ref substs) => {
- Univariant(mk_struct(cx, &substs.upvar_tys, false, t), 0)
+ Univariant(mk_struct(cx, &substs.upvar_tys, false, t))
}
ty::TyEnum(def, substs) => {
let cases = get_cases(cx.tcx(), def, substs);
let hint = *cx.tcx().lookup_repr_hints(def.did).get(0)
.unwrap_or(&attr::ReprAny);
- let dtor = def.dtor_kind().has_drop_flag();
-
if cases.is_empty() {
// Uninhabitable; represent as unit
// (Typechecking will reject discriminant-sizing attrs.)
assert_eq!(hint, attr::ReprAny);
- let ftys = if dtor { vec!(cx.tcx().dtor_type()) } else { vec!() };
- return Univariant(mk_struct(cx, &ftys[..], false, t),
- dtor_to_init_u8(dtor));
+ return Univariant(mk_struct(cx, &[], false, t));
}
- if !dtor && cases.iter().all(|c| c.tys.is_empty()) {
+ if cases.iter().all(|c| c.tys.is_empty()) {
// All bodies empty -> intlike
let discrs: Vec<_> = cases.iter().map(|c| Disr::from(c.discr)).collect();
let bounds = IntBounds {
if cases.len() == 1 && hint == attr::ReprAny {
// Equivalent to a struct/tuple/newtype.
- let mut ftys = cases[0].tys.clone();
- if dtor { ftys.push(cx.tcx().dtor_type()); }
- return Univariant(mk_struct(cx, &ftys[..], false, t),
- dtor_to_init_u8(dtor));
+ return Univariant(mk_struct(cx, &cases[0].tys, false, t));
}
- if !dtor && cases.len() == 2 && hint == attr::ReprAny {
+ if cases.len() == 2 && hint == attr::ReprAny {
// Nullable pointer optimization
let mut discr = 0;
while discr < 2 {
let fields : Vec<_> = cases.iter().map(|c| {
let mut ftys = vec!(ty_of_inttype(cx.tcx(), min_ity));
ftys.extend_from_slice(&c.tys);
- if dtor { ftys.push(cx.tcx().dtor_type()); }
mk_struct(cx, &ftys, false, t)
}).collect();
let fields : Vec<_> = cases.iter().map(|c| {
let mut ftys = vec!(ty_of_inttype(cx.tcx(), ity));
ftys.extend_from_slice(&c.tys);
- if dtor { ftys.push(cx.tcx().dtor_type()); }
mk_struct(cx, &ftys[..], false, t)
}).collect();
ensure_enum_fits_in_address_space(cx, &fields[..], t);
- General(ity, fields, dtor_to_init_u8(dtor))
+ General(ity, fields)
}
_ => bug!("adt::represent_type called on non-ADT type: {}", t)
}
/// and fill in the actual contents in a second pass to prevent
/// unbounded recursion; see also the comments in `trans::type_of`.
pub fn type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, r: &Repr<'tcx>) -> Type {
- let c = generic_type_of(cx, r, None, false, false, false);
- assert!(!c.needs_drop_flag);
- c.prefix
+ generic_type_of(cx, r, None, false, false)
}
// are going to get the wrong type (it will not include the unsized parts of it).
pub fn sizing_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
r: &Repr<'tcx>, dst: bool) -> Type {
- let c = generic_type_of(cx, r, None, true, dst, false);
- assert!(!c.needs_drop_flag);
- c.prefix
-}
-pub fn sizing_type_context_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
- r: &Repr<'tcx>, dst: bool) -> TypeContext {
- generic_type_of(cx, r, None, true, dst, true)
+ generic_type_of(cx, r, None, true, dst)
}
+
pub fn incomplete_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
r: &Repr<'tcx>, name: &str) -> Type {
- let c = generic_type_of(cx, r, Some(name), false, false, false);
- assert!(!c.needs_drop_flag);
- c.prefix
+ generic_type_of(cx, r, Some(name), false, false)
}
+
pub fn finish_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
r: &Repr<'tcx>, llty: &mut Type) {
match *r {
CEnum(..) | General(..) | RawNullablePointer { .. } => { }
- Univariant(ref st, _) | StructWrappedNullablePointer { nonnull: ref st, .. } =>
+ Univariant(ref st) | StructWrappedNullablePointer { nonnull: ref st, .. } =>
llty.set_struct_body(&struct_llfields(cx, st, false, false),
st.packed)
}
r: &Repr<'tcx>,
name: Option<&str>,
sizing: bool,
- dst: bool,
- delay_drop_flag: bool) -> TypeContext {
- debug!("adt::generic_type_of r: {:?} name: {:?} sizing: {} dst: {} delay_drop_flag: {}",
- r, name, sizing, dst, delay_drop_flag);
+ dst: bool) -> Type {
+ debug!("adt::generic_type_of r: {:?} name: {:?} sizing: {} dst: {}",
+ r, name, sizing, dst);
match *r {
- CEnum(ity, _, _) => TypeContext::direct(ll_inttype(cx, ity)),
+ CEnum(ity, _, _) => ll_inttype(cx, ity),
RawNullablePointer { nnty, .. } =>
- TypeContext::direct(type_of::sizing_type_of(cx, nnty)),
+ type_of::sizing_type_of(cx, nnty),
StructWrappedNullablePointer { nonnull: ref st, .. } => {
match name {
None => {
- TypeContext::direct(
- Type::struct_(cx, &struct_llfields(cx, st, sizing, dst),
- st.packed))
+ Type::struct_(cx, &struct_llfields(cx, st, sizing, dst),
+ st.packed)
}
Some(name) => {
assert_eq!(sizing, false);
- TypeContext::direct(Type::named_struct(cx, name))
+ Type::named_struct(cx, name)
}
}
}
- Univariant(ref st, dtor_needed) => {
- let dtor_needed = dtor_needed != 0;
+ Univariant(ref st) => {
match name {
None => {
- let mut fields = struct_llfields(cx, st, sizing, dst);
- if delay_drop_flag && dtor_needed {
- fields.pop();
- }
- TypeContext::may_need_drop_flag(
- Type::struct_(cx, &fields,
- st.packed),
- delay_drop_flag && dtor_needed)
+ let fields = struct_llfields(cx, st, sizing, dst);
+ Type::struct_(cx, &fields, st.packed)
}
Some(name) => {
// Hypothesis: named_struct's can never need a
// drop flag. (... needs validation.)
assert_eq!(sizing, false);
- TypeContext::direct(Type::named_struct(cx, name))
+ Type::named_struct(cx, name)
}
}
}
- General(ity, ref sts, dtor_needed) => {
- let dtor_needed = dtor_needed != 0;
+ General(ity, ref sts) => {
// We need a representation that has:
// * The alignment of the most-aligned field
// * The size of the largest variant (rounded up to that alignment)
};
assert_eq!(machine::llalign_of_min(cx, fill_ty), align);
assert_eq!(padded_discr_size % discr_size, 0); // Ensure discr_ty can fill pad evenly
- let mut fields: Vec<Type> =
+ let fields: Vec<Type> =
[discr_ty,
Type::array(&discr_ty, (padded_discr_size - discr_size)/discr_size),
fill_ty].iter().cloned().collect();
- if delay_drop_flag && dtor_needed {
- fields.pop();
- }
match name {
None => {
- TypeContext::may_need_drop_flag(
- Type::struct_(cx, &fields[..], false),
- delay_drop_flag && dtor_needed)
+ Type::struct_(cx, &fields[..], false)
}
Some(name) => {
let mut llty = Type::named_struct(cx, name);
llty.set_struct_body(&fields[..], false);
- TypeContext::may_need_drop_flag(
- llty,
- delay_drop_flag && dtor_needed)
+ llty
}
}
}
/// Obtain a representation of the discriminant sufficient to translate
/// destructuring; this may or may not involve the actual discriminant.
-///
-/// This should ideally be less tightly tied to `_match`.
pub fn trans_switch<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
r: &Repr<'tcx>,
scrutinee: ValueRef,
range_assert: bool)
- -> (_match::BranchKind, Option<ValueRef>) {
+ -> (BranchKind, Option<ValueRef>) {
match *r {
CEnum(..) | General(..) |
RawNullablePointer { .. } | StructWrappedNullablePointer { .. } => {
- (_match::Switch, Some(trans_get_discr(bcx, r, scrutinee, None,
- range_assert)))
+ (BranchKind::Switch, Some(trans_get_discr(bcx, r, scrutinee, None, range_assert)))
}
Univariant(..) => {
// N.B.: Univariant means <= 1 enum variants (*not* == 1 variants).
- (_match::Single, None)
+ (BranchKind::Single, None)
}
}
}
pub fn is_discr_signed<'tcx>(r: &Repr<'tcx>) -> bool {
match *r {
CEnum(ity, _, _) => ity.is_signed(),
- General(ity, _, _) => ity.is_signed(),
+ General(ity, _) => ity.is_signed(),
Univariant(..) => false,
RawNullablePointer { .. } => false,
StructWrappedNullablePointer { .. } => false,
CEnum(ity, min, max) => {
load_discr(bcx, ity, scrutinee, min, max, range_assert)
}
- General(ity, ref cases, _) => {
+ General(ity, ref cases) => {
let ptr = StructGEP(bcx, scrutinee, 0);
load_discr(bcx, ity, ptr, Disr(0), Disr(cases.len() as u64 - 1),
range_assert)
CEnum(ity, _, _) => {
C_integral(ll_inttype(bcx.ccx(), ity), discr.0, true)
}
- General(ity, _, _) => {
+ General(ity, _) => {
C_integral(ll_inttype(bcx.ccx(), ity), discr.0, true)
}
Univariant(..) => {
Store(bcx, C_integral(ll_inttype(bcx.ccx(), ity), discr.0, true),
val);
}
- General(ity, ref cases, dtor) => {
- if dtor_active(dtor) {
- let ptr = trans_field_ptr(bcx, r, MaybeSizedValue::sized(val), discr,
- cases[discr.0 as usize].fields.len() - 2);
- Store(bcx, C_u8(bcx.ccx(), DTOR_NEEDED), ptr);
- }
+ General(ity, _) => {
Store(bcx, C_integral(ll_inttype(bcx.ccx(), ity), discr.0, true),
StructGEP(bcx, val, 0));
}
- Univariant(ref st, dtor) => {
+ Univariant(_) => {
assert_eq!(discr, Disr(0));
- if dtor_active(dtor) {
- Store(bcx, C_u8(bcx.ccx(), DTOR_NEEDED),
- StructGEP(bcx, val, st.fields.len() - 1));
- }
}
RawNullablePointer { nndiscr, nnty, ..} => {
if discr != nndiscr {
}
}
-/// The number of fields in a given case; for use when obtaining this
-/// information from the type or definition is less convenient.
-pub fn num_args(r: &Repr, discr: Disr) -> usize {
- match *r {
- CEnum(..) => 0,
- Univariant(ref st, dtor) => {
- assert_eq!(discr, Disr(0));
- st.fields.len() - (if dtor_active(dtor) { 1 } else { 0 })
- }
- General(_, ref cases, dtor) => {
- cases[discr.0 as usize].fields.len() - 1 - (if dtor_active(dtor) { 1 } else { 0 })
- }
- RawNullablePointer { nndiscr, ref nullfields, .. } => {
- if discr == nndiscr { 1 } else { nullfields.len() }
- }
- StructWrappedNullablePointer { ref nonnull, nndiscr,
- ref nullfields, .. } => {
- if discr == nndiscr { nonnull.fields.len() } else { nullfields.len() }
- }
- }
-}
-
/// Access a field, at a point when the value's case is known.
pub fn trans_field_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>,
val: MaybeSizedValue, discr: Disr, ix: usize) -> ValueRef {
CEnum(..) => {
bug!("element access in C-like enum")
}
- Univariant(ref st, _dtor) => {
+ Univariant(ref st) => {
assert_eq!(discr, Disr(0));
struct_field_ptr(bcx, st, val, ix, false)
}
- General(_, ref cases, _) => {
+ General(_, ref cases) => {
struct_field_ptr(bcx, &cases[discr.0 as usize], val, ix + 1, true)
}
RawNullablePointer { nndiscr, ref nullfields, .. } |
bcx.pointercast(byte_ptr, ll_fty.ptr_to())
}
-pub fn fold_variants<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
- r: &Repr<'tcx>,
- value: ValueRef,
- mut f: F)
- -> Block<'blk, 'tcx> where
- F: FnMut(Block<'blk, 'tcx>, &Struct<'tcx>, ValueRef) -> Block<'blk, 'tcx>,
-{
- let fcx = bcx.fcx;
- match *r {
- Univariant(ref st, _) => {
- f(bcx, st, value)
- }
- General(ity, ref cases, _) => {
- let ccx = bcx.ccx();
-
- // See the comments in trans/base.rs for more information (inside
- // iter_structural_ty), but the gist here is that if the enum's
- // discriminant is *not* in the range that we're expecting (in which
- // case we'll take the fall-through branch on the switch
- // instruction) then we can't just optimize this to an Unreachable
- // block.
- //
- // Currently we still have filling drop, so this means that the drop
- // glue for enums may be called when the enum has been paved over
- // with the "I've been dropped" value. In this case the default
- // branch of the switch instruction will actually be taken at
- // runtime, so the basic block isn't actually unreachable, so we
- // need to make it do something with defined behavior. In this case
- // we just return early from the function.
- //
- // Note that this is also why the `trans_get_discr` below has
- // `false` to indicate that loading the discriminant should
- // not have a range assert.
- let ret_void_cx = fcx.new_temp_block("enum-variant-iter-ret-void");
- RetVoid(ret_void_cx, DebugLoc::None);
-
- let discr_val = trans_get_discr(bcx, r, value, None, false);
- let llswitch = Switch(bcx, discr_val, ret_void_cx.llbb, cases.len());
- let bcx_next = fcx.new_temp_block("enum-variant-iter-next");
-
- for (discr, case) in cases.iter().enumerate() {
- let mut variant_cx = fcx.new_temp_block(
- &format!("enum-variant-iter-{}", &discr.to_string())
- );
- let rhs_val = C_integral(ll_inttype(ccx, ity), discr as u64, true);
- AddCase(llswitch, rhs_val, variant_cx.llbb);
-
- let fields = case.fields.iter().map(|&ty|
- type_of::type_of(bcx.ccx(), ty)).collect::<Vec<_>>();
- let real_ty = Type::struct_(ccx, &fields[..], case.packed);
- let variant_value = PointerCast(variant_cx, value, real_ty.ptr_to());
-
- variant_cx = f(variant_cx, case, variant_value);
- Br(variant_cx, bcx_next.llbb, DebugLoc::None);
- }
-
- bcx_next
- }
- _ => bug!()
- }
-}
-
-/// Access the struct drop flag, if present.
-pub fn trans_drop_flag_ptr<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
- r: &Repr<'tcx>,
- val: ValueRef)
- -> datum::DatumBlock<'blk, 'tcx, datum::Expr>
-{
- let tcx = bcx.tcx();
- let ptr_ty = bcx.tcx().mk_imm_ptr(tcx.dtor_type());
- match *r {
- Univariant(ref st, dtor) if dtor_active(dtor) => {
- let flag_ptr = StructGEP(bcx, val, st.fields.len() - 1);
- datum::immediate_rvalue_bcx(bcx, flag_ptr, ptr_ty).to_expr_datumblock()
- }
- General(_, _, dtor) if dtor_active(dtor) => {
- let fcx = bcx.fcx;
- let custom_cleanup_scope = fcx.push_custom_cleanup_scope();
- let scratch = unpack_datum!(bcx, datum::lvalue_scratch_datum(
- bcx, tcx.dtor_type(), "drop_flag",
- InitAlloca::Uninit("drop flag itself has no dtor"),
- cleanup::CustomScope(custom_cleanup_scope), |bcx, _| {
- debug!("no-op populate call for trans_drop_flag_ptr on dtor_type={:?}",
- tcx.dtor_type());
- bcx
- }
- ));
- bcx = fold_variants(bcx, r, val, |variant_cx, st, value| {
- let ptr = struct_field_ptr(&variant_cx.build(), st,
- MaybeSizedValue::sized(value),
- (st.fields.len() - 1), false);
- datum::Datum::new(ptr, ptr_ty, datum::Lvalue::new("adt::trans_drop_flag_ptr"))
- .store_to(variant_cx, scratch.val)
- });
- let expr_datum = scratch.to_expr_datum();
- fcx.pop_custom_cleanup_scope(custom_cleanup_scope);
- datum::DatumBlock::new(bcx, expr_datum)
- }
- _ => bug!("tried to get drop flag of non-droppable type")
- }
-}
-
/// Construct a constant value, suitable for initializing a
/// GlobalVariable, given a case and constant values for its fields.
/// Note that this may have a different LLVM type (and different
assert_discr_in_range(ity, min, max, discr);
C_integral(ll_inttype(ccx, ity), discr.0, true)
}
- General(ity, ref cases, _) => {
+ General(ity, ref cases) => {
let case = &cases[discr.0 as usize];
let (max_sz, _) = union_size_and_align(&cases[..]);
let lldiscr = C_integral(ll_inttype(ccx, ity), discr.0 as u64, true);
contents.extend_from_slice(&[padding(ccx, max_sz - case.size)]);
C_struct(ccx, &contents[..], false)
}
- Univariant(ref st, _dro) => {
+ Univariant(ref st) => {
assert_eq!(discr, Disr(0));
let contents = build_const_struct(ccx, st, vals);
C_struct(ccx, &contents[..], st.packed)
#[inline]
fn roundup(x: u64, a: u32) -> u64 { let a = a as u64; ((x + (a - 1)) / a) * a }
-/// Get the discriminant of a constant value.
-pub fn const_get_discrim(r: &Repr, val: ValueRef) -> Disr {
- match *r {
- CEnum(ity, _, _) => {
- match ity {
- attr::SignedInt(..) => Disr(const_to_int(val) as u64),
- attr::UnsignedInt(..) => Disr(const_to_uint(val)),
- }
- }
- General(ity, _, _) => {
- match ity {
- attr::SignedInt(..) => Disr(const_to_int(const_get_elt(val, &[0])) as u64),
- attr::UnsignedInt(..) => Disr(const_to_uint(const_get_elt(val, &[0])))
- }
- }
- Univariant(..) => Disr(0),
- RawNullablePointer { .. } | StructWrappedNullablePointer { .. } => {
- bug!("const discrim access of non c-like enum")
- }
- }
-}
-
/// Extract a field of a constant value, as appropriate for its
/// representation.
///
use base;
use build::*;
use common::*;
-use datum::{Datum, Lvalue};
use type_of;
use type_::Type;
-use rustc::hir as ast;
+use rustc::hir;
+use rustc::ty::Ty;
+
use std::ffi::CString;
use syntax::ast::AsmDialect;
use libc::{c_uint, c_char};
// Take an inline assembly expression and splat it out via LLVM
pub fn trans_inline_asm<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- ia: &ast::InlineAsm,
- outputs: Vec<Datum<'tcx, Lvalue>>,
+ ia: &hir::InlineAsm,
+ outputs: Vec<(ValueRef, Ty<'tcx>)>,
mut inputs: Vec<ValueRef>) {
let mut ext_constraints = vec![];
let mut output_types = vec![];
// Prepare the output operands
let mut indirect_outputs = vec![];
- for (i, (out, out_datum)) in ia.outputs.iter().zip(&outputs).enumerate() {
+ for (i, (out, &(val, ty))) in ia.outputs.iter().zip(&outputs).enumerate() {
let val = if out.is_rw || out.is_indirect {
- Some(base::load_ty(bcx, out_datum.val, out_datum.ty))
+ Some(base::load_ty(bcx, val, ty))
} else {
None
};
if out.is_indirect {
indirect_outputs.push(val.unwrap());
} else {
- output_types.push(type_of::type_of(bcx.ccx(), out_datum.ty));
+ output_types.push(type_of::type_of(bcx.ccx(), ty));
}
}
if !indirect_outputs.is_empty() {
// Again, based on how many outputs we have
let outputs = ia.outputs.iter().zip(&outputs).filter(|&(ref o, _)| !o.is_indirect);
- for (i, (_, datum)) in outputs.enumerate() {
+ for (i, (_, &(val, _))) in outputs.enumerate() {
let v = if num_outputs == 1 { r } else { ExtractValue(bcx, r, i) };
- Store(bcx, v, datum.val);
+ Store(bcx, v, val);
}
// Store expn_id in a metadata node so we can map LLVM errors
use util::common::time;
use util::fs::fix_windows_verbatim_for_gcc;
use rustc::dep_graph::DepNode;
-use rustc::ty::TyCtxt;
+use rustc::hir::svh::Svh;
use rustc_back::tempdir::TempDir;
+use rustc_incremental::IncrementalHashesMap;
-use rustc_incremental::SvhCalculate;
use std::ascii;
use std::char;
use std::env;
}
-pub fn build_link_meta<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
- name: &str)
- -> LinkMeta {
+pub fn build_link_meta(incremental_hashes_map: &IncrementalHashesMap,
+ name: &str)
+ -> LinkMeta {
let r = LinkMeta {
crate_name: name.to_owned(),
- crate_hash: tcx.calculate_krate_hash(),
+ crate_hash: Svh::new(incremental_hashes_map[&DepNode::Krate]),
};
info!("{:?}", r);
return r;
return
}
+ let mut arg = OsString::new();
let path = tmpdir.join("list");
- let prefix = if self.sess.target.target.options.is_like_osx {
- "_"
- } else {
- ""
- };
- let res = (|| -> io::Result<()> {
- let mut f = BufWriter::new(File::create(&path)?);
- for sym in &self.info.cdylib_exports {
- writeln!(f, "{}{}", prefix, sym)?;
+
+ if self.sess.target.target.options.is_like_solaris {
+ let res = (|| -> io::Result<()> {
+ let mut f = BufWriter::new(File::create(&path)?);
+ writeln!(f, "{{\n global:")?;
+ for sym in &self.info.cdylib_exports {
+ writeln!(f, " {};", sym)?;
+ }
+ writeln!(f, "\n local:\n *;\n}};")?;
+ Ok(())
+ })();
+ if let Err(e) = res {
+ self.sess.fatal(&format!("failed to write version script: {}", e));
}
- Ok(())
- })();
- if let Err(e) = res {
- self.sess.fatal(&format!("failed to write lib.def file: {}", e));
- }
- let mut arg = OsString::new();
- if self.sess.target.target.options.is_like_osx {
- arg.push("-Wl,-exported_symbols_list,");
+
+ arg.push("-Wl,-M,");
+ arg.push(&path);
} else {
- arg.push("-Wl,--retain-symbols-file=");
+ let prefix = if self.sess.target.target.options.is_like_osx {
+ "_"
+ } else {
+ ""
+ };
+ let res = (|| -> io::Result<()> {
+ let mut f = BufWriter::new(File::create(&path)?);
+ for sym in &self.info.cdylib_exports {
+ writeln!(f, "{}{}", prefix, sym)?;
+ }
+ Ok(())
+ })();
+ if let Err(e) = res {
+ self.sess.fatal(&format!("failed to write lib.def file: {}", e));
+ }
+ if self.sess.target.target.options.is_like_osx {
+ arg.push("-Wl,-exported_symbols_list,");
+ } else {
+ arg.push("-Wl,--retain-symbols-file=");
+ }
+ arg.push(&path);
}
- arg.push(&path);
+
self.cmd.arg(arg);
}
}
use rustc::middle::{cstore, weak_lang_items};
use rustc::hir::def_id::DefId;
use rustc::hir::map as hir_map;
-use rustc::ty::{self, TyCtxt, TypeFoldable};
+use rustc::ty::{Ty, TyCtxt, TypeFoldable};
use rustc::ty::item_path::{self, ItemPathBuffer, RootMode};
+use rustc::ty::subst::Substs;
use rustc::hir::map::definitions::{DefPath, DefPathData};
use syntax::attr;
// parameters substituted; this is
// included in the hash as a kind of
// safeguard.
- item_type: ty::Ty<'tcx>,
+ item_type: Ty<'tcx>,
// values for generic type parameters,
// if any.
- parameters: &[ty::Ty<'tcx>])
+ substs: Option<&Substs<'tcx>>)
-> String {
debug!("get_symbol_hash(def_path={:?}, parameters={:?})",
- def_path, parameters);
+ def_path, substs);
let tcx = scx.tcx();
hash_state.input(&encoded_item_type[..]);
// also include any type parameters (for generic items)
- for t in parameters {
- assert!(!t.has_erasable_regions());
- assert!(!t.needs_subst());
- let encoded_type = tcx.sess.cstore.encode_type(tcx, t, def_id_to_string);
- hash_state.input(&encoded_type[..]);
+ if let Some(substs) = substs {
+ for t in substs.types() {
+ assert!(!t.has_erasable_regions());
+ assert!(!t.needs_subst());
+ let encoded_type = tcx.sess.cstore.encode_type(tcx, t, def_id_to_string);
+ hash_state.input(&encoded_type[..]);
+ }
}
return format!("h{}", truncated_hash_result(&mut *hash_state));
// and should not matter anyhow.
let instance_ty = scx.tcx().erase_regions(&instance_ty.ty);
- let hash = get_symbol_hash(scx, &def_path, instance_ty, &substs.types);
+ let hash = get_symbol_hash(scx, &def_path, instance_ty, Some(substs));
let mut buffer = SymbolPathBuffer {
names: Vec::with_capacity(def_path.data.len())
}
pub fn exported_name_from_type_and_prefix<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>,
- t: ty::Ty<'tcx>,
+ t: Ty<'tcx>,
prefix: &str)
-> String {
let empty_def_path = DefPath {
data: vec![],
krate: cstore::LOCAL_CRATE,
};
- let hash = get_symbol_hash(scx, &empty_def_path, t, &[]);
+ let hash = get_symbol_hash(scx, &empty_def_path, t, None);
let path = [token::intern_and_get_ident(prefix)];
mangle(path.iter().cloned(), Some(&hash[..]))
}
/// Only symbols that are invisible outside their compilation unit should use a
/// name generated by this function.
pub fn internal_name_from_type_and_suffix<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
- t: ty::Ty<'tcx>,
+ t: Ty<'tcx>,
suffix: &str)
-> String {
let path = [token::intern(&t.to_string()).as_str(),
data: vec![],
krate: cstore::LOCAL_CRATE,
};
- let hash = get_symbol_hash(ccx.shared(), &def_path, t, &[]);
+ let hash = get_symbol_hash(ccx.shared(), &def_path, t, None);
mangle(path.iter().cloned(), Some(&hash[..]))
}
use assert_module_sources;
use back::link;
use back::linker::LinkerInfo;
-use llvm::{BasicBlockRef, Linkage, ValueRef, Vector, get_param};
+use llvm::{Linkage, ValueRef, Vector, get_param};
use llvm;
-use rustc::cfg;
use rustc::hir::def_id::DefId;
use middle::lang_items::{LangItem, ExchangeMallocFnLangItem, StartFnLangItem};
-use rustc::hir::pat_util::simple_name;
use rustc::ty::subst::Substs;
use rustc::traits;
use rustc::ty::{self, Ty, TyCtxt, TypeFoldable};
use rustc::hir::map as hir_map;
use rustc::util::common::time;
use rustc::mir::mir_map::MirMap;
-use rustc_data_structures::graph::OUTGOING;
-use session::config::{self, NoDebugInfo, FullDebugInfo};
+use session::config::{self, NoDebugInfo};
+use rustc_incremental::IncrementalHashesMap;
use session::Session;
-use _match;
use abi::{self, Abi, FnType};
use adt;
use attributes;
use build::*;
use builder::{Builder, noname};
-use callee::{Callee, CallArgs, ArgExprs, ArgVals};
-use cleanup::{self, CleanupMethods, DropHint};
-use closure;
-use common::{Block, C_bool, C_bytes_in_context, C_i32, C_int, C_uint, C_integral};
+use callee::{Callee};
+use common::{Block, C_bool, C_bytes_in_context, C_i32, C_uint};
use collector::{self, TransItemCollectionMode};
use common::{C_null, C_struct_in_context, C_u64, C_u8, C_undef};
-use common::{CrateContext, DropFlagHintsMap, Field, FunctionContext};
-use common::{Result, NodeIdAndSpan, VariantInfo};
-use common::{node_id_type, fulfill_obligation};
-use common::{type_is_immediate, type_is_zero_size, val_ty};
+use common::{CrateContext, FunctionContext};
+use common::{Result};
+use common::{fulfill_obligation};
+use common::{type_is_zero_size, val_ty};
use common;
use consts;
use context::{SharedCrateContext, CrateContextList};
-use controlflow;
-use datum;
-use debuginfo::{self, DebugLoc, ToDebugLoc};
+use debuginfo::{self, DebugLoc};
use declare;
-use expr;
-use glue;
-use inline;
use machine;
use machine::{llalign_of_min, llsize_of};
use meth;
use symbol_map::SymbolMap;
use symbol_names_test;
use trans_item::TransItem;
-use tvec;
use type_::Type;
use type_of;
use value::Value;
use Disr;
-use util::common::indenter;
use util::sha2::Sha256;
-use util::nodemap::{NodeMap, NodeSet, FnvHashSet};
+use util::nodemap::{NodeSet, FnvHashMap, FnvHashSet};
use arena::TypedArena;
use libc::c_uint;
use std::ffi::{CStr, CString};
use std::borrow::Cow;
use std::cell::{Cell, RefCell};
-use std::collections::HashMap;
use std::ptr;
use std::rc::Rc;
use std::str;
-use std::{i8, i16, i32, i64};
+use std::i32;
use syntax_pos::{Span, DUMMY_SP};
-use syntax::parse::token::InternedString;
use syntax::attr::AttrMetaMethods;
use syntax::attr;
-use rustc::hir::intravisit::{self, Visitor};
use rustc::hir;
use syntax::ast;
}
}
-pub fn kind_for_closure(ccx: &CrateContext, closure_id: DefId) -> ty::ClosureKind {
- *ccx.tcx().tables.borrow().closure_kinds.get(&closure_id).unwrap()
+pub fn get_meta(bcx: Block, fat_ptr: ValueRef) -> ValueRef {
+ StructGEP(bcx, fat_ptr, abi::FAT_PTR_EXTRA)
+}
+
+pub fn get_dataptr(bcx: Block, fat_ptr: ValueRef) -> ValueRef {
+ StructGEP(bcx, fat_ptr, abi::FAT_PTR_ADDR)
}
fn require_alloc_fn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, info_ty: Ty<'tcx>, it: LangItem) -> DefId {
// Allocate space:
let def_id = require_alloc_fn(bcx, info_ty, ExchangeMallocFnLangItem);
let r = Callee::def(bcx.ccx(), def_id, Substs::empty(bcx.tcx()))
- .call(bcx, debug_loc, ArgVals(&[size, align]), None);
+ .call(bcx, debug_loc, &[size, align], None);
Result::new(r.bcx, PointerCast(r.bcx, r.val, llty_ptr))
}
SExt(bcx, ICmp(bcx, cmp, lhs, rhs, debug_loc), ret_ty)
}
-// Iterates through the elements of a structural type.
-pub fn iter_structural_ty<'blk, 'tcx, F>(cx: Block<'blk, 'tcx>,
- av: ValueRef,
- t: Ty<'tcx>,
- mut f: F)
- -> Block<'blk, 'tcx>
- where F: FnMut(Block<'blk, 'tcx>, ValueRef, Ty<'tcx>) -> Block<'blk, 'tcx>
-{
- let _icx = push_ctxt("iter_structural_ty");
-
- fn iter_variant<'blk, 'tcx, F>(cx: Block<'blk, 'tcx>,
- repr: &adt::Repr<'tcx>,
- av: adt::MaybeSizedValue,
- variant: ty::VariantDef<'tcx>,
- substs: &Substs<'tcx>,
- f: &mut F)
- -> Block<'blk, 'tcx>
- where F: FnMut(Block<'blk, 'tcx>, ValueRef, Ty<'tcx>) -> Block<'blk, 'tcx>
- {
- let _icx = push_ctxt("iter_variant");
- let tcx = cx.tcx();
- let mut cx = cx;
-
- for (i, field) in variant.fields.iter().enumerate() {
- let arg = monomorphize::field_ty(tcx, substs, field);
- cx = f(cx,
- adt::trans_field_ptr(cx, repr, av, Disr::from(variant.disr_val), i),
- arg);
- }
- return cx;
- }
-
- let value = if common::type_is_sized(cx.tcx(), t) {
- adt::MaybeSizedValue::sized(av)
- } else {
- let data = Load(cx, expr::get_dataptr(cx, av));
- let info = Load(cx, expr::get_meta(cx, av));
- adt::MaybeSizedValue::unsized_(data, info)
- };
-
- let mut cx = cx;
- match t.sty {
- ty::TyStruct(..) => {
- let repr = adt::represent_type(cx.ccx(), t);
- let VariantInfo { fields, discr } = VariantInfo::from_ty(cx.tcx(), t, None);
- for (i, &Field(_, field_ty)) in fields.iter().enumerate() {
- let llfld_a = adt::trans_field_ptr(cx, &repr, value, Disr::from(discr), i);
-
- let val = if common::type_is_sized(cx.tcx(), field_ty) {
- llfld_a
- } else {
- let scratch = datum::rvalue_scratch_datum(cx, field_ty, "__fat_ptr_iter");
- Store(cx, llfld_a, expr::get_dataptr(cx, scratch.val));
- Store(cx, value.meta, expr::get_meta(cx, scratch.val));
- scratch.val
- };
- cx = f(cx, val, field_ty);
- }
- }
- ty::TyClosure(_, ref substs) => {
- let repr = adt::represent_type(cx.ccx(), t);
- for (i, upvar_ty) in substs.upvar_tys.iter().enumerate() {
- let llupvar = adt::trans_field_ptr(cx, &repr, value, Disr(0), i);
- cx = f(cx, llupvar, upvar_ty);
- }
- }
- ty::TyArray(_, n) => {
- let (base, len) = tvec::get_fixed_base_and_len(cx, value.value, n);
- let unit_ty = t.sequence_element_type(cx.tcx());
- cx = tvec::iter_vec_raw(cx, base, unit_ty, len, f);
- }
- ty::TySlice(_) | ty::TyStr => {
- let unit_ty = t.sequence_element_type(cx.tcx());
- cx = tvec::iter_vec_raw(cx, value.value, unit_ty, value.meta, f);
- }
- ty::TyTuple(ref args) => {
- let repr = adt::represent_type(cx.ccx(), t);
- for (i, arg) in args.iter().enumerate() {
- let llfld_a = adt::trans_field_ptr(cx, &repr, value, Disr(0), i);
- cx = f(cx, llfld_a, *arg);
- }
- }
- ty::TyEnum(en, substs) => {
- let fcx = cx.fcx;
- let ccx = fcx.ccx;
-
- let repr = adt::represent_type(ccx, t);
- let n_variants = en.variants.len();
-
- // NB: we must hit the discriminant first so that structural
- // comparison know not to proceed when the discriminants differ.
-
- match adt::trans_switch(cx, &repr, av, false) {
- (_match::Single, None) => {
- if n_variants != 0 {
- assert!(n_variants == 1);
- cx = iter_variant(cx, &repr, adt::MaybeSizedValue::sized(av),
- &en.variants[0], substs, &mut f);
- }
- }
- (_match::Switch, Some(lldiscrim_a)) => {
- cx = f(cx, lldiscrim_a, cx.tcx().types.isize);
-
- // Create a fall-through basic block for the "else" case of
- // the switch instruction we're about to generate. Note that
- // we do **not** use an Unreachable instruction here, even
- // though most of the time this basic block will never be hit.
- //
- // When an enum is dropped it's contents are currently
- // overwritten to DTOR_DONE, which means the discriminant
- // could have changed value to something not within the actual
- // range of the discriminant. Currently this function is only
- // used for drop glue so in this case we just return quickly
- // from the outer function, and any other use case will only
- // call this for an already-valid enum in which case the `ret
- // void` will never be hit.
- let ret_void_cx = fcx.new_temp_block("enum-iter-ret-void");
- RetVoid(ret_void_cx, DebugLoc::None);
- let llswitch = Switch(cx, lldiscrim_a, ret_void_cx.llbb, n_variants);
- let next_cx = fcx.new_temp_block("enum-iter-next");
-
- for variant in &en.variants {
- let variant_cx = fcx.new_temp_block(&format!("enum-iter-variant-{}",
- &variant.disr_val
- .to_string()));
- let case_val = adt::trans_case(cx, &repr, Disr::from(variant.disr_val));
- AddCase(llswitch, case_val, variant_cx.llbb);
- let variant_cx = iter_variant(variant_cx,
- &repr,
- value,
- variant,
- substs,
- &mut f);
- Br(variant_cx, next_cx.llbb, DebugLoc::None);
- }
- cx = next_cx;
- }
- _ => ccx.sess().unimpl("value from adt::trans_switch in iter_structural_ty"),
- }
- }
- _ => {
- cx.sess().unimpl(&format!("type in iter_structural_ty: {}", t))
- }
- }
- return cx;
-}
-
-
/// Retrieve the information we are losing (making dynamic) in an unsizing
/// adjustment.
///
let src_repr = adt::represent_type(bcx.ccx(), src_ty);
let src_fields = match &*src_repr {
- &adt::Repr::Univariant(ref s, _) => &s.fields,
+ &adt::Repr::Univariant(ref s) => &s.fields,
_ => bug!("struct has non-univariant repr"),
};
let dst_repr = adt::represent_type(bcx.ccx(), dst_ty);
let dst_fields = match &*dst_repr {
- &adt::Repr::Univariant(ref s, _) => &s.fields,
+ &adt::Repr::Univariant(ref s) => &s.fields,
_ => bug!("struct has non-univariant repr"),
};
-> CustomCoerceUnsized {
let trait_ref = ty::Binder(ty::TraitRef {
def_id: scx.tcx().lang_items.coerce_unsized_trait().unwrap(),
- substs: Substs::new_trait(scx.tcx(), vec![target_ty], vec![], source_ty)
+ substs: Substs::new_trait(scx.tcx(), source_ty, &[target_ty])
});
match fulfill_obligation(scx, DUMMY_SP, trait_ref) {
}
}
-pub fn llty_and_min_for_signed_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
- val_t: Ty<'tcx>)
- -> (Type, u64) {
- match val_t.sty {
- ty::TyInt(t) => {
- let llty = Type::int_from_ty(cx.ccx(), t);
- let min = match t {
- ast::IntTy::Is if llty == Type::i32(cx.ccx()) => i32::MIN as u64,
- ast::IntTy::Is => i64::MIN as u64,
- ast::IntTy::I8 => i8::MIN as u64,
- ast::IntTy::I16 => i16::MIN as u64,
- ast::IntTy::I32 => i32::MIN as u64,
- ast::IntTy::I64 => i64::MIN as u64,
- };
- (llty, min)
- }
- _ => bug!(),
- }
-}
-
-pub fn fail_if_zero_or_overflows<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
- call_info: NodeIdAndSpan,
- divrem: hir::BinOp,
- lhs: ValueRef,
- rhs: ValueRef,
- rhs_t: Ty<'tcx>)
- -> Block<'blk, 'tcx> {
- use rustc_const_math::{ConstMathErr, Op};
-
- let (zero_err, overflow_err) = if divrem.node == hir::BiDiv {
- (ConstMathErr::DivisionByZero, ConstMathErr::Overflow(Op::Div))
- } else {
- (ConstMathErr::RemainderByZero, ConstMathErr::Overflow(Op::Rem))
- };
- let debug_loc = call_info.debug_loc();
-
- let (is_zero, is_signed) = match rhs_t.sty {
- ty::TyInt(t) => {
- let zero = C_integral(Type::int_from_ty(cx.ccx(), t), 0, false);
- (ICmp(cx, llvm::IntEQ, rhs, zero, debug_loc), true)
- }
- ty::TyUint(t) => {
- let zero = C_integral(Type::uint_from_ty(cx.ccx(), t), 0, false);
- (ICmp(cx, llvm::IntEQ, rhs, zero, debug_loc), false)
- }
- ty::TyStruct(def, _) if def.is_simd() => {
- let mut res = C_bool(cx.ccx(), false);
- for i in 0..rhs_t.simd_size(cx.tcx()) {
- res = Or(cx,
- res,
- IsNull(cx, ExtractElement(cx, rhs, C_int(cx.ccx(), i as i64))),
- debug_loc);
- }
- (res, false)
- }
- _ => {
- bug!("fail-if-zero on unexpected type: {}", rhs_t);
- }
- };
- let bcx = with_cond(cx, is_zero, |bcx| {
- controlflow::trans_fail(bcx, call_info, InternedString::new(zero_err.description()))
- });
-
- // To quote LLVM's documentation for the sdiv instruction:
- //
- // Division by zero leads to undefined behavior. Overflow also leads
- // to undefined behavior; this is a rare case, but can occur, for
- // example, by doing a 32-bit division of -2147483648 by -1.
- //
- // In order to avoid undefined behavior, we perform runtime checks for
- // signed division/remainder which would trigger overflow. For unsigned
- // integers, no action beyond checking for zero need be taken.
- if is_signed {
- let (llty, min) = llty_and_min_for_signed_ty(cx, rhs_t);
- let minus_one = ICmp(bcx,
- llvm::IntEQ,
- rhs,
- C_integral(llty, !0, false),
- debug_loc);
- with_cond(bcx, minus_one, |bcx| {
- let is_min = ICmp(bcx,
- llvm::IntEQ,
- lhs,
- C_integral(llty, min, true),
- debug_loc);
- with_cond(bcx, is_min, |bcx| {
- controlflow::trans_fail(bcx, call_info,
- InternedString::new(overflow_err.description()))
- })
- })
- } else {
- bcx
- }
-}
-
pub fn invoke<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
llfn: ValueRef,
llargs: &[ValueRef],
return (C_null(Type::i8(bcx.ccx())), bcx);
}
- match bcx.opt_node_id {
- None => {
- debug!("invoke at ???");
- }
- Some(id) => {
- debug!("invoke at {}", bcx.tcx().map.node_to_string(id));
- }
- }
-
if need_invoke(bcx) {
debug!("invoking {:?} at {:?}", Value(llfn), bcx.llbb);
for &llarg in llargs {
debug!("arg: {:?}", Value(llarg));
}
- let normal_bcx = bcx.fcx.new_temp_block("normal-return");
+ let normal_bcx = bcx.fcx.new_block("normal-return");
let landing_pad = bcx.fcx.get_landing_pad();
let llresult = Invoke(bcx,
}
}
-pub fn load_if_immediate<'blk, 'tcx>(cx: Block<'blk, 'tcx>, v: ValueRef, t: Ty<'tcx>) -> ValueRef {
- let _icx = push_ctxt("load_if_immediate");
- if type_is_immediate(cx.ccx(), t) {
- return load_ty(cx, v, t);
- }
- return v;
-}
-
/// Helper for loading values from memory. Does the necessary conversion if the in-memory type
/// differs from the type used for SSA values. Also handles various special cases where the type
/// gives us better information about what we are loading.
if common::type_is_fat_ptr(cx.tcx(), t) {
Store(cx,
ExtractValue(cx, v, abi::FAT_PTR_ADDR),
- expr::get_dataptr(cx, dst));
+ get_dataptr(cx, dst));
Store(cx,
ExtractValue(cx, v, abi::FAT_PTR_EXTRA),
- expr::get_meta(cx, dst));
+ get_meta(cx, dst));
} else {
Store(cx, from_immediate(cx, v), dst);
}
dst: ValueRef,
_ty: Ty<'tcx>) {
// FIXME: emit metadata
- Store(cx, data, expr::get_dataptr(cx, dst));
- Store(cx, extra, expr::get_meta(cx, dst));
+ Store(cx, data, get_dataptr(cx, dst));
+ Store(cx, extra, get_meta(cx, dst));
}
pub fn load_fat_ptr<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
_ty: Ty<'tcx>)
-> (ValueRef, ValueRef) {
// FIXME: emit metadata
- (Load(cx, expr::get_dataptr(cx, src)),
- Load(cx, expr::get_meta(cx, src)))
+ (Load(cx, get_dataptr(cx, src)),
+ Load(cx, get_meta(cx, src)))
}
pub fn from_immediate(bcx: Block, val: ValueRef) -> ValueRef {
}
}
-pub fn init_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, local: &hir::Local) -> Block<'blk, 'tcx> {
- debug!("init_local(bcx={}, local.id={})", bcx.to_str(), local.id);
- let _indenter = indenter();
- let _icx = push_ctxt("init_local");
- _match::store_local(bcx, local)
-}
-
-pub fn raw_block<'blk, 'tcx>(fcx: &'blk FunctionContext<'blk, 'tcx>,
- llbb: BasicBlockRef)
- -> Block<'blk, 'tcx> {
- common::BlockS::new(llbb, None, fcx)
-}
-
pub fn with_cond<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, val: ValueRef, f: F) -> Block<'blk, 'tcx>
where F: FnOnce(Block<'blk, 'tcx>) -> Block<'blk, 'tcx>
{
}
let fcx = bcx.fcx;
- let next_cx = fcx.new_temp_block("next");
- let cond_cx = fcx.new_temp_block("cond");
+ let next_cx = fcx.new_block("next");
+ let cond_cx = fcx.new_block("cond");
CondBr(bcx, val, cond_cx.llbb, next_cx.llbb, DebugLoc::None);
let after_cx = f(cond_cx);
if !after_cx.terminated.get() {
} else {
let exc_ptr = ExtractValue(bcx, lpval, 0);
bcx.fcx.eh_unwind_resume()
- .call(bcx, DebugLoc::None, ArgVals(&[exc_ptr]), None);
+ .call(bcx, DebugLoc::None, &[exc_ptr], None);
}
}
}
}
-pub fn drop_done_fill_mem<'blk, 'tcx>(cx: Block<'blk, 'tcx>, llptr: ValueRef, t: Ty<'tcx>) {
- if cx.unreachable.get() {
- return;
- }
- let _icx = push_ctxt("drop_done_fill_mem");
- let bcx = cx;
- memfill(&B(bcx), llptr, t, adt::DTOR_DONE);
-}
-
pub fn init_zero_mem<'blk, 'tcx>(cx: Block<'blk, 'tcx>, llptr: ValueRef, t: Ty<'tcx>) {
if cx.unreachable.get() {
return;
b.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None);
}
-
-/// In general, when we create an scratch value in an alloca, the
-/// creator may not know if the block (that initializes the scratch
-/// with the desired value) actually dominates the cleanup associated
-/// with the scratch value.
-///
-/// To deal with this, when we do an alloca (at the *start* of whole
-/// function body), we optionally can also set the associated
-/// dropped-flag state of the alloca to "dropped."
-#[derive(Copy, Clone, Debug)]
-pub enum InitAlloca {
- /// Indicates that the state should have its associated drop flag
- /// set to "dropped" at the point of allocation.
- Dropped,
- /// Indicates the value of the associated drop flag is irrelevant.
- /// The embedded string literal is a programmer provided argument
- /// for why. This is a safeguard forcing compiler devs to
- /// document; it might be a good idea to also emit this as a
- /// comment with the alloca itself when emitting LLVM output.ll.
- Uninit(&'static str),
-}
-
-
pub fn alloc_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- t: Ty<'tcx>,
+ ty: Ty<'tcx>,
name: &str) -> ValueRef {
- // pnkfelix: I do not know why alloc_ty meets the assumptions for
- // passing Uninit, but it was never needed (even back when we had
- // the original boolean `zero` flag on `lvalue_scratch_datum`).
- alloc_ty_init(bcx, t, InitAlloca::Uninit("all alloc_ty are uninit"), name)
-}
-
-/// This variant of `fn alloc_ty` does not necessarily assume that the
-/// alloca should be created with no initial value. Instead the caller
-/// controls that assumption via the `init` flag.
-///
-/// Note that if the alloca *is* initialized via `init`, then we will
-/// also inject an `llvm.lifetime.start` before that initialization
-/// occurs, and thus callers should not call_lifetime_start
-/// themselves. But if `init` says "uninitialized", then callers are
-/// in charge of choosing where to call_lifetime_start and
-/// subsequently populate the alloca.
-///
-/// (See related discussion on PR #30823.)
-pub fn alloc_ty_init<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- t: Ty<'tcx>,
- init: InitAlloca,
- name: &str) -> ValueRef {
- let _icx = push_ctxt("alloc_ty");
- let ccx = bcx.ccx();
- let ty = type_of::type_of(ccx, t);
- assert!(!t.has_param_types());
- match init {
- InitAlloca::Dropped => alloca_dropped(bcx, t, name),
- InitAlloca::Uninit(_) => alloca(bcx, ty, name),
- }
-}
-
-pub fn alloca_dropped<'blk, 'tcx>(cx: Block<'blk, 'tcx>, ty: Ty<'tcx>, name: &str) -> ValueRef {
- let _icx = push_ctxt("alloca_dropped");
- let llty = type_of::type_of(cx.ccx(), ty);
- if cx.unreachable.get() {
- unsafe { return llvm::LLVMGetUndef(llty.ptr_to().to_ref()); }
- }
- let p = alloca(cx, llty, name);
- let b = cx.fcx.ccx.builder();
- b.position_before(cx.fcx.alloca_insert_pt.get().unwrap());
-
- // This is just like `call_lifetime_start` (but latter expects a
- // Block, which we do not have for `alloca_insert_pt`).
- core_lifetime_emit(cx.ccx(), p, Lifetime::Start, |ccx, size, lifetime_start| {
- let ptr = b.pointercast(p, Type::i8p(ccx));
- b.call(lifetime_start, &[C_u64(ccx, size), ptr], None);
- });
- memfill(&b, p, ty, adt::DTOR_DONE);
- p
+ assert!(!ty.has_param_types());
+ alloca(bcx, type_of::type_of(bcx.ccx(), ty), name)
}
pub fn alloca(cx: Block, ty: Type, name: &str) -> ValueRef {
Alloca(cx, ty, name)
}
-pub fn set_value_name(val: ValueRef, name: &str) {
- unsafe {
- let name = CString::new(name).unwrap();
- llvm::LLVMSetValueName(val, name.as_ptr());
- }
-}
-
-struct FindNestedReturn {
- found: bool,
-}
-
-impl FindNestedReturn {
- fn new() -> FindNestedReturn {
- FindNestedReturn {
- found: false,
- }
- }
-}
-
-impl<'v> Visitor<'v> for FindNestedReturn {
- fn visit_expr(&mut self, e: &hir::Expr) {
- match e.node {
- hir::ExprRet(..) => {
- self.found = true;
- }
- _ => intravisit::walk_expr(self, e),
- }
- }
-}
-
-fn build_cfg<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
- id: ast::NodeId)
- -> (ast::NodeId, Option<cfg::CFG>) {
- let blk = match tcx.map.find(id) {
- Some(hir_map::NodeItem(i)) => {
- match i.node {
- hir::ItemFn(_, _, _, _, _, ref blk) => {
- blk
- }
- _ => bug!("unexpected item variant in has_nested_returns"),
- }
- }
- Some(hir_map::NodeTraitItem(trait_item)) => {
- match trait_item.node {
- hir::MethodTraitItem(_, Some(ref body)) => body,
- _ => {
- bug!("unexpected variant: trait item other than a provided method in \
- has_nested_returns")
- }
- }
- }
- Some(hir_map::NodeImplItem(impl_item)) => {
- match impl_item.node {
- hir::ImplItemKind::Method(_, ref body) => body,
- _ => {
- bug!("unexpected variant: non-method impl item in has_nested_returns")
- }
- }
- }
- Some(hir_map::NodeExpr(e)) => {
- match e.node {
- hir::ExprClosure(_, _, ref blk, _) => blk,
- _ => bug!("unexpected expr variant in has_nested_returns"),
- }
- }
- Some(hir_map::NodeVariant(..)) |
- Some(hir_map::NodeStructCtor(..)) => return (ast::DUMMY_NODE_ID, None),
-
- // glue, shims, etc
- None if id == ast::DUMMY_NODE_ID => return (ast::DUMMY_NODE_ID, None),
-
- _ => bug!("unexpected variant in has_nested_returns: {}",
- tcx.node_path_str(id)),
- };
-
- (blk.id, Some(cfg::CFG::new(tcx, blk)))
-}
-
-// Checks for the presence of "nested returns" in a function.
-// Nested returns are when the inner expression of a return expression
-// (the 'expr' in 'return expr') contains a return expression. Only cases
-// where the outer return is actually reachable are considered. Implicit
-// returns from the end of blocks are considered as well.
-//
-// This check is needed to handle the case where the inner expression is
-// part of a larger expression that may have already partially-filled the
-// return slot alloca. This can cause errors related to clean-up due to
-// the clobbering of the existing value in the return slot.
-fn has_nested_returns(tcx: TyCtxt, cfg: &cfg::CFG, blk_id: ast::NodeId) -> bool {
- for index in cfg.graph.depth_traverse(cfg.entry, OUTGOING) {
- let n = cfg.graph.node_data(index);
- match tcx.map.find(n.id()) {
- Some(hir_map::NodeExpr(ex)) => {
- if let hir::ExprRet(Some(ref ret_expr)) = ex.node {
- let mut visitor = FindNestedReturn::new();
- intravisit::walk_expr(&mut visitor, &ret_expr);
- if visitor.found {
- return true;
- }
- }
- }
- Some(hir_map::NodeBlock(blk)) if blk.id == blk_id => {
- let mut visitor = FindNestedReturn::new();
- walk_list!(&mut visitor, visit_expr, &blk.expr);
- if visitor.found {
- return true;
- }
- }
- _ => {}
- }
- }
-
- return false;
-}
-
impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> {
/// Create a function context for the given function.
/// Beware that you must call `fcx.init` or `fcx.bind_args`
pub fn new(ccx: &'blk CrateContext<'blk, 'tcx>,
llfndecl: ValueRef,
fn_ty: FnType,
- definition: Option<(Instance<'tcx>, &ty::FnSig<'tcx>, Abi, ast::NodeId)>,
+ definition: Option<(Instance<'tcx>, &ty::FnSig<'tcx>, Abi)>,
block_arena: &'blk TypedArena<common::BlockS<'blk, 'tcx>>)
-> FunctionContext<'blk, 'tcx> {
- let (param_substs, def_id, inlined_id) = match definition {
- Some((instance, _, _, inlined_id)) => {
+ let (param_substs, def_id) = match definition {
+ Some((instance, _, _)) => {
common::validate_substs(instance.substs);
- (instance.substs, Some(instance.def), Some(inlined_id))
+ (instance.substs, Some(instance.def))
}
- None => (Substs::empty(ccx.tcx()), None, None)
+ None => (Substs::empty(ccx.tcx()), None)
};
let local_id = def_id.and_then(|id| ccx.tcx().map.as_local_node_id(id));
debug!("FunctionContext::new({})",
definition.map_or(String::new(), |d| d.0.to_string()));
- let cfg = inlined_id.map(|id| build_cfg(ccx.tcx(), id));
- let nested_returns = if let Some((blk_id, Some(ref cfg))) = cfg {
- has_nested_returns(ccx.tcx(), cfg, blk_id)
- } else {
- false
- };
-
- let check_attrs = |attrs: &[ast::Attribute]| {
- let default_to_mir = ccx.sess().opts.debugging_opts.orbit;
- let invert = if default_to_mir { "rustc_no_mir" } else { "rustc_mir" };
- (default_to_mir ^ attrs.iter().any(|item| item.check_name(invert)),
- attrs.iter().any(|item| item.check_name("no_debug")))
- };
-
- let (use_mir, no_debug) = if let Some(id) = local_id {
- check_attrs(ccx.tcx().map.attrs(id))
+ let no_debug = if let Some(id) = local_id {
+ ccx.tcx().map.attrs(id)
+ .iter().any(|item| item.check_name("no_debug"))
} else if let Some(def_id) = def_id {
- check_attrs(&ccx.sess().cstore.item_attrs(def_id))
+ ccx.sess().cstore.item_attrs(def_id)
+ .iter().any(|item| item.check_name("no_debug"))
} else {
- check_attrs(&[])
+ false
};
- let mir = if use_mir {
- def_id.and_then(|id| ccx.get_mir(id))
- } else {
- None
- };
+ let mir = def_id.and_then(|id| ccx.get_mir(id));
- let debug_context = if let (false, Some(definition)) = (no_debug, definition) {
- let (instance, sig, abi, _) = definition;
- debuginfo::create_function_debug_context(ccx, instance, sig, abi, llfndecl)
+ let debug_context = if let (false, Some((instance, sig, abi)), &Some(ref mir)) =
+ (no_debug, definition, &mir) {
+ debuginfo::create_function_debug_context(ccx, instance, sig, abi, llfndecl, mir)
} else {
debuginfo::empty_function_debug_context(ccx)
};
FunctionContext {
- needs_ret_allocas: nested_returns && mir.is_none(),
mir: mir,
llfn: llfndecl,
llretslotptr: Cell::new(None),
param_env: ccx.tcx().empty_parameter_environment(),
alloca_insert_pt: Cell::new(None),
- llreturn: Cell::new(None),
landingpad_alloca: Cell::new(None),
- lllocals: RefCell::new(NodeMap()),
- llupvars: RefCell::new(NodeMap()),
- lldropflag_hints: RefCell::new(DropFlagHintsMap::new()),
fn_ty: fn_ty,
param_substs: param_substs,
- span: inlined_id.and_then(|id| ccx.tcx().map.opt_span(id)),
+ span: None,
block_arena: block_arena,
lpad_arena: TypedArena::new(),
ccx: ccx,
debug_context: debug_context,
scopes: RefCell::new(Vec::new()),
- cfg: cfg.and_then(|(_, cfg)| cfg)
}
}
/// Performs setup on a newly created function, creating the entry
/// scope block and allocating space for the return pointer.
- pub fn init(&'blk self, skip_retptr: bool, fn_did: Option<DefId>)
- -> Block<'blk, 'tcx> {
- let entry_bcx = self.new_temp_block("entry-block");
+ pub fn init(&'blk self, skip_retptr: bool) -> Block<'blk, 'tcx> {
+ let entry_bcx = self.new_block("entry-block");
// Use a dummy instruction as the insertion point for all allocas.
// This is later removed in FunctionContext::cleanup.
// which will hold the pointer to the right alloca which has the
// final ret value
let llty = self.fn_ty.ret.memory_ty(self.ccx);
- let slot = if self.needs_ret_allocas {
- // Let's create the stack slot
- let slot = AllocaFcx(self, llty.ptr_to(), "llretslotptr");
-
- // and if we're using an out pointer, then store that in our newly made slot
- if self.fn_ty.ret.is_indirect() {
- let outptr = get_param(self.llfn, 0);
-
- let b = self.ccx.builder();
- b.position_before(self.alloca_insert_pt.get().unwrap());
- b.store(outptr, slot);
- }
-
- slot
+ // But if there are no nested returns, we skip the indirection
+ // and have a single retslot
+ let slot = if self.fn_ty.ret.is_indirect() {
+ get_param(self.llfn, 0)
} else {
- // But if there are no nested returns, we skip the indirection
- // and have a single retslot
- if self.fn_ty.ret.is_indirect() {
- get_param(self.llfn, 0)
- } else {
- AllocaFcx(self, llty, "sret_slot")
- }
+ AllocaFcx(self, llty, "sret_slot")
};
self.llretslotptr.set(Some(slot));
}
- // Create the drop-flag hints for every unfragmented path in the function.
- let tcx = self.ccx.tcx();
- let tables = tcx.tables.borrow();
- let mut hints = self.lldropflag_hints.borrow_mut();
- let fragment_infos = tcx.fragment_infos.borrow();
-
- // Intern table for drop-flag hint datums.
- let mut seen = HashMap::new();
-
- let fragment_infos = fn_did.and_then(|did| fragment_infos.get(&did));
- if let Some(fragment_infos) = fragment_infos {
- for &info in fragment_infos {
-
- let make_datum = |id| {
- let init_val = C_u8(self.ccx, adt::DTOR_NEEDED_HINT);
- let llname = &format!("dropflag_hint_{}", id);
- debug!("adding hint {}", llname);
- let ty = tcx.types.u8;
- let ptr = alloc_ty(entry_bcx, ty, llname);
- Store(entry_bcx, init_val, ptr);
- let flag = datum::Lvalue::new_dropflag_hint("FunctionContext::init");
- datum::Datum::new(ptr, ty, flag)
- };
-
- let (var, datum) = match info {
- ty::FragmentInfo::Moved { var, .. } |
- ty::FragmentInfo::Assigned { var, .. } => {
- let opt_datum = seen.get(&var).cloned().unwrap_or_else(|| {
- let ty = tables.node_types[&var];
- if self.type_needs_drop(ty) {
- let datum = make_datum(var);
- seen.insert(var, Some(datum.clone()));
- Some(datum)
- } else {
- // No drop call needed, so we don't need a dropflag hint
- None
- }
- });
- if let Some(datum) = opt_datum {
- (var, datum)
- } else {
- continue
- }
- }
- };
- match info {
- ty::FragmentInfo::Moved { move_expr: expr_id, .. } => {
- debug!("FragmentInfo::Moved insert drop hint for {}", expr_id);
- hints.insert(expr_id, DropHint::new(var, datum));
- }
- ty::FragmentInfo::Assigned { assignee_id: expr_id, .. } => {
- debug!("FragmentInfo::Assigned insert drop hint for {}", expr_id);
- hints.insert(expr_id, DropHint::new(var, datum));
- }
- }
- }
- }
-
entry_bcx
}
- /// Creates lvalue datums for each of the incoming function arguments,
- /// matches all argument patterns against them to produce bindings,
- /// and returns the entry block (see FunctionContext::init).
- fn bind_args(&'blk self,
- args: &[hir::Arg],
- abi: Abi,
- id: ast::NodeId,
- closure_env: closure::ClosureEnv,
- arg_scope: cleanup::CustomScopeIndex)
- -> Block<'blk, 'tcx> {
- let _icx = push_ctxt("FunctionContext::bind_args");
- let fn_did = self.ccx.tcx().map.local_def_id(id);
- let mut bcx = self.init(false, Some(fn_did));
- let arg_scope_id = cleanup::CustomScope(arg_scope);
-
- let mut idx = 0;
- let mut llarg_idx = self.fn_ty.ret.is_indirect() as usize;
-
- let has_tupled_arg = match closure_env {
- closure::ClosureEnv::NotClosure => abi == Abi::RustCall,
- closure::ClosureEnv::Closure(..) => {
- closure_env.load(bcx, arg_scope_id);
- let env_arg = &self.fn_ty.args[idx];
- idx += 1;
- if env_arg.pad.is_some() {
- llarg_idx += 1;
- }
- if !env_arg.is_ignore() {
- llarg_idx += 1;
- }
- false
- }
- };
- let tupled_arg_id = if has_tupled_arg {
- args[args.len() - 1].id
- } else {
- ast::DUMMY_NODE_ID
- };
-
- // Return an array wrapping the ValueRefs that we get from `get_param` for
- // each argument into datums.
- //
- // For certain mode/type combinations, the raw llarg values are passed
- // by value. However, within the fn body itself, we want to always
- // have all locals and arguments be by-ref so that we can cancel the
- // cleanup and for better interaction with LLVM's debug info. So, if
- // the argument would be passed by value, we store it into an alloca.
- // This alloca should be optimized away by LLVM's mem-to-reg pass in
- // the event it's not truly needed.
- let uninit_reason = InitAlloca::Uninit("fn_arg populate dominates dtor");
- for hir_arg in args {
- let arg_ty = node_id_type(bcx, hir_arg.id);
- let arg_datum = if hir_arg.id != tupled_arg_id {
- let arg = &self.fn_ty.args[idx];
- idx += 1;
- if arg.is_indirect() && bcx.sess().opts.debuginfo != FullDebugInfo {
- // Don't copy an indirect argument to an alloca, the caller
- // already put it in a temporary alloca and gave it up, unless
- // we emit extra-debug-info, which requires local allocas :(.
- let llarg = get_param(self.llfn, llarg_idx as c_uint);
- llarg_idx += 1;
- self.schedule_lifetime_end(arg_scope_id, llarg);
- self.schedule_drop_mem(arg_scope_id, llarg, arg_ty, None);
-
- datum::Datum::new(llarg,
- arg_ty,
- datum::Lvalue::new("FunctionContext::bind_args"))
- } else {
- unpack_datum!(bcx, datum::lvalue_scratch_datum(bcx, arg_ty, "",
- uninit_reason,
- arg_scope_id, |bcx, dst| {
- debug!("FunctionContext::bind_args: {:?}: {:?}", hir_arg, arg_ty);
- let b = &bcx.build();
- if common::type_is_fat_ptr(bcx.tcx(), arg_ty) {
- let meta = &self.fn_ty.args[idx];
- idx += 1;
- arg.store_fn_arg(b, &mut llarg_idx, expr::get_dataptr(bcx, dst));
- meta.store_fn_arg(b, &mut llarg_idx, expr::get_meta(bcx, dst));
- } else {
- arg.store_fn_arg(b, &mut llarg_idx, dst);
- }
- bcx
- }))
- }
- } else {
- // FIXME(pcwalton): Reduce the amount of code bloat this is responsible for.
- let tupled_arg_tys = match arg_ty.sty {
- ty::TyTuple(ref tys) => tys,
- _ => bug!("last argument of `rust-call` fn isn't a tuple?!")
- };
-
- unpack_datum!(bcx, datum::lvalue_scratch_datum(bcx,
- arg_ty,
- "tupled_args",
- uninit_reason,
- arg_scope_id,
- |bcx, llval| {
- debug!("FunctionContext::bind_args: tupled {:?}: {:?}", hir_arg, arg_ty);
- for (j, &tupled_arg_ty) in tupled_arg_tys.iter().enumerate() {
- let dst = StructGEP(bcx, llval, j);
- let arg = &self.fn_ty.args[idx];
- idx += 1;
- let b = &bcx.build();
- if common::type_is_fat_ptr(bcx.tcx(), tupled_arg_ty) {
- let meta = &self.fn_ty.args[idx];
- idx += 1;
- arg.store_fn_arg(b, &mut llarg_idx, expr::get_dataptr(bcx, dst));
- meta.store_fn_arg(b, &mut llarg_idx, expr::get_meta(bcx, dst));
- } else {
- arg.store_fn_arg(b, &mut llarg_idx, dst);
- }
- }
- bcx
- }))
- };
-
- let pat = &hir_arg.pat;
- bcx = if let Some(name) = simple_name(pat) {
- // Generate nicer LLVM for the common case of fn a pattern
- // like `x: T`
- set_value_name(arg_datum.val, &bcx.name(name));
- self.lllocals.borrow_mut().insert(pat.id, arg_datum);
- bcx
- } else {
- // General path. Copy out the values that are used in the
- // pattern.
- _match::bind_irrefutable_pat(bcx, pat, arg_datum.match_input(), arg_scope_id)
- };
- debuginfo::create_argument_metadata(bcx, hir_arg);
- }
-
- bcx
- }
-
/// Ties up the llstaticallocas -> llloadenv -> lltop edges,
/// and builds the return block.
- pub fn finish(&'blk self, last_bcx: Block<'blk, 'tcx>,
+ pub fn finish(&'blk self, ret_cx: Block<'blk, 'tcx>,
ret_debug_loc: DebugLoc) {
let _icx = push_ctxt("FunctionContext::finish");
- let ret_cx = match self.llreturn.get() {
- Some(llreturn) => {
- if !last_bcx.terminated.get() {
- Br(last_bcx, llreturn, DebugLoc::None);
- }
- raw_block(self, llreturn)
- }
- None => last_bcx,
- };
-
self.build_return_block(ret_cx, ret_debug_loc);
DebugLoc::None.apply(self);
ret_debug_location: DebugLoc) {
if self.llretslotptr.get().is_none() ||
ret_cx.unreachable.get() ||
- (!self.needs_ret_allocas && self.fn_ty.ret.is_indirect()) {
+ self.fn_ty.ret.is_indirect() {
return RetVoid(ret_cx, ret_debug_location);
}
- let retslot = if self.needs_ret_allocas {
- Load(ret_cx, self.llretslotptr.get().unwrap())
- } else {
- self.llretslotptr.get().unwrap()
- };
+ let retslot = self.llretslotptr.get().unwrap();
let retptr = Value(retslot);
let llty = self.fn_ty.ret.original_ty;
match (retptr.get_dominating_store(ret_cx), self.fn_ty.ret.cast) {
///
/// If the function closes over its environment a closure will be returned.
pub fn trans_closure<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
- decl: &hir::FnDecl,
- body: &hir::Block,
llfndecl: ValueRef,
instance: Instance<'tcx>,
- inlined_id: ast::NodeId,
sig: &ty::FnSig<'tcx>,
- abi: Abi,
- closure_env: closure::ClosureEnv) {
+ abi: Abi) {
ccx.stats().n_closures.set(ccx.stats().n_closures.get() + 1);
let _icx = push_ctxt("trans_closure");
fcx = FunctionContext::new(ccx,
llfndecl,
fn_ty,
- Some((instance, sig, abi, inlined_id)),
+ Some((instance, sig, abi)),
&arena);
- if fcx.mir.is_some() {
- return mir::trans_mir(&fcx);
+ if fcx.mir.is_none() {
+ bug!("attempted translation of `{}` w/o MIR", instance);
}
- debuginfo::fill_scope_map_for_function(&fcx, decl, body, inlined_id);
-
- // cleanup scope for the incoming arguments
- let fn_cleanup_debug_loc = debuginfo::get_cleanup_debug_loc_for_ast_node(
- ccx, inlined_id, body.span, true);
- let arg_scope = fcx.push_custom_cleanup_scope_with_debug_loc(fn_cleanup_debug_loc);
-
- // Set up arguments to the function.
- debug!("trans_closure: function: {:?}", Value(fcx.llfn));
- let bcx = fcx.bind_args(&decl.inputs, abi, inlined_id, closure_env, arg_scope);
-
- // Up until here, IR instructions for this function have explicitly not been annotated with
- // source code location, so we don't step into call setup code. From here on, source location
- // emitting should be enabled.
- debuginfo::start_emitting_source_locations(&fcx);
-
- let dest = if fcx.fn_ty.ret.is_ignore() {
- expr::Ignore
- } else {
- expr::SaveIn(fcx.get_ret_slot(bcx, "iret_slot"))
- };
-
- // This call to trans_block is the place where we bridge between
- // translation calls that don't have a return value (trans_crate,
- // trans_mod, trans_item, et cetera) and those that do
- // (trans_block, trans_expr, et cetera).
- let mut bcx = controlflow::trans_block(bcx, body, dest);
-
- match dest {
- expr::SaveIn(slot) if fcx.needs_ret_allocas => {
- Store(bcx, slot, fcx.llretslotptr.get().unwrap());
- }
- _ => {}
- }
-
- match fcx.llreturn.get() {
- Some(_) => {
- Br(bcx, fcx.return_exit_block(), DebugLoc::None);
- fcx.pop_custom_cleanup_scope(arg_scope);
- }
- None => {
- // Microoptimization writ large: avoid creating a separate
- // llreturn basic block
- bcx = fcx.pop_and_trans_custom_cleanup_scope(bcx, arg_scope);
- }
- };
-
- // Put return block after all other blocks.
- // This somewhat improves single-stepping experience in debugger.
- unsafe {
- let llreturn = fcx.llreturn.get();
- if let Some(llreturn) = llreturn {
- llvm::LLVMMoveBasicBlockAfter(llreturn, bcx.llbb);
- }
- }
-
- // Insert the mandatory first few basic blocks before lltop.
- fcx.finish(bcx, fn_cleanup_debug_loc.debug_loc());
+ mir::trans_mir(&fcx);
}
pub fn trans_instance<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, instance: Instance<'tcx>) {
- let local_instance = inline::maybe_inline_instance(ccx, instance);
-
- let fn_node_id = ccx.tcx().map.as_local_node_id(local_instance.def).unwrap();
-
- let _s = StatRecorder::new(ccx, ccx.tcx().node_path_str(fn_node_id));
+ let _s = StatRecorder::new(ccx, ccx.tcx().item_path_str(instance.def));
debug!("trans_instance(instance={:?})", instance);
let _icx = push_ctxt("trans_instance");
- let item = ccx.tcx().map.find(fn_node_id).unwrap();
-
let fn_ty = ccx.tcx().lookup_item_type(instance.def).ty;
let fn_ty = ccx.tcx().erase_regions(&fn_ty);
let fn_ty = monomorphize::apply_param_substs(ccx.tcx(), instance.substs, &fn_ty);
let sig = ccx.tcx().normalize_associated_type(&sig);
let abi = fn_ty.fn_abi();
- let lldecl = match ccx.instances().borrow().get(&local_instance) {
+ let lldecl = match ccx.instances().borrow().get(&instance) {
Some(&val) => val,
None => bug!("Instance `{:?}` not already declared", instance)
};
- match item {
- hir_map::NodeItem(&hir::Item {
- node: hir::ItemFn(ref decl, _, _, _, _, ref body), ..
- }) |
- hir_map::NodeTraitItem(&hir::TraitItem {
- node: hir::MethodTraitItem(
- hir::MethodSig { ref decl, .. }, Some(ref body)), ..
- }) |
- hir_map::NodeImplItem(&hir::ImplItem {
- node: hir::ImplItemKind::Method(
- hir::MethodSig { ref decl, .. }, ref body), ..
- }) => {
- trans_closure(ccx, decl, body, lldecl, instance,
- fn_node_id, &sig, abi, closure::ClosureEnv::NotClosure);
- }
- _ => bug!("Instance is a {:?}?", item)
- }
-}
-
-pub fn trans_named_tuple_constructor<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
- ctor_ty: Ty<'tcx>,
- disr: Disr,
- args: CallArgs,
- dest: expr::Dest,
- debug_loc: DebugLoc)
- -> Result<'blk, 'tcx> {
-
- let ccx = bcx.fcx.ccx;
-
- let sig = ccx.tcx().erase_late_bound_regions(&ctor_ty.fn_sig());
- let sig = ccx.tcx().normalize_associated_type(&sig);
- let result_ty = sig.output;
-
- // Get location to store the result. If the user does not care about
- // the result, just make a stack slot
- let llresult = match dest {
- expr::SaveIn(d) => d,
- expr::Ignore => {
- if !type_is_zero_size(ccx, result_ty) {
- let llresult = alloc_ty(bcx, result_ty, "constructor_result");
- call_lifetime_start(bcx, llresult);
- llresult
- } else {
- C_undef(type_of::type_of(ccx, result_ty).ptr_to())
- }
- }
- };
-
- if !type_is_zero_size(ccx, result_ty) {
- match args {
- ArgExprs(exprs) => {
- let fields = exprs.iter().map(|x| &**x).enumerate().collect::<Vec<_>>();
- bcx = expr::trans_adt(bcx,
- result_ty,
- disr,
- &fields[..],
- None,
- expr::SaveIn(llresult),
- debug_loc);
- }
- _ => bug!("expected expr as arguments for variant/struct tuple constructor"),
- }
- } else {
- // Just eval all the expressions (if any). Since expressions in Rust can have arbitrary
- // contents, there could be side-effects we need from them.
- match args {
- ArgExprs(exprs) => {
- for expr in exprs {
- bcx = expr::trans_into(bcx, expr, expr::Ignore);
- }
- }
- _ => (),
- }
- }
-
- // If the caller doesn't care about the result
- // drop the temporary we made
- let bcx = match dest {
- expr::SaveIn(_) => bcx,
- expr::Ignore => {
- let bcx = glue::drop_ty(bcx, llresult, result_ty, debug_loc);
- if !type_is_zero_size(ccx, result_ty) {
- call_lifetime_end(bcx, llresult);
- }
- bcx
- }
- };
-
- Result::new(bcx, llresult)
+ trans_closure(ccx, lldecl, instance, &sig, abi);
}
pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
- ctor_id: ast::NodeId,
+ def_id: DefId,
+ substs: &'tcx Substs<'tcx>,
disr: Disr,
- param_substs: &'tcx Substs<'tcx>,
llfndecl: ValueRef) {
- let ctor_ty = ccx.tcx().node_id_to_type(ctor_id);
- let ctor_ty = monomorphize::apply_param_substs(ccx.tcx(), param_substs, &ctor_ty);
+ attributes::inline(llfndecl, attributes::InlineAttr::Hint);
+ attributes::set_frame_pointer_elimination(ccx, llfndecl);
+
+ let ctor_ty = ccx.tcx().lookup_item_type(def_id).ty;
+ let ctor_ty = monomorphize::apply_param_substs(ccx.tcx(), substs, &ctor_ty);
let sig = ccx.tcx().erase_late_bound_regions(&ctor_ty.fn_sig());
let sig = ccx.tcx().normalize_associated_type(&sig);
let (arena, fcx): (TypedArena<_>, FunctionContext);
arena = TypedArena::new();
fcx = FunctionContext::new(ccx, llfndecl, fn_ty, None, &arena);
- let bcx = fcx.init(false, None);
-
- assert!(!fcx.needs_ret_allocas);
+ let bcx = fcx.init(false);
if !fcx.fn_ty.ret.is_ignore() {
- let dest = fcx.get_ret_slot(bcx, "eret_slot");
+ let dest = fcx.llretslotptr.get().unwrap();
let dest_val = adt::MaybeSizedValue::sized(dest); // Can return unsized value
let repr = adt::represent_type(ccx, sig.output);
let mut llarg_idx = fcx.fn_ty.ret.is_indirect() as usize;
if common::type_is_fat_ptr(bcx.tcx(), arg_ty) {
let meta = &fcx.fn_ty.args[arg_idx];
arg_idx += 1;
- arg.store_fn_arg(b, &mut llarg_idx, expr::get_dataptr(bcx, lldestptr));
- meta.store_fn_arg(b, &mut llarg_idx, expr::get_meta(bcx, lldestptr));
+ arg.store_fn_arg(b, &mut llarg_idx, get_dataptr(bcx, lldestptr));
+ meta.store_fn_arg(b, &mut llarg_idx, get_meta(bcx, lldestptr));
} else {
arg.store_fn_arg(b, &mut llarg_idx, lldestptr);
}
return;
}
- let main_llfn = Callee::def(ccx, main_def_id, instance.substs).reify(ccx).val;
+ let main_llfn = Callee::def(ccx, main_def_id, instance.substs).reify(ccx);
let et = ccx.sess().entry_type.get().unwrap();
match et {
Err(s) => ccx.sess().fatal(&s)
};
let empty_substs = Substs::empty(ccx.tcx());
- let start_fn = Callee::def(ccx, start_def_id, empty_substs).reify(ccx).val;
+ let start_fn = Callee::def(ccx, start_def_id, empty_substs).reify(ccx);
let args = {
let opaque_rust_main =
llvm::LLVMBuildPointerCast(bld,
pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
mir_map: &MirMap<'tcx>,
- analysis: ty::CrateAnalysis)
+ analysis: ty::CrateAnalysis,
+ incremental_hashes_map: &IncrementalHashesMap)
-> CrateTranslation {
let _task = tcx.dep_graph.in_task(DepNode::TransCrate);
tcx.sess.opts.debug_assertions
};
- let check_dropflag = if let Some(v) = tcx.sess.opts.debugging_opts.force_dropflag_checks {
- v
- } else {
- tcx.sess.opts.debug_assertions
- };
-
- let link_meta = link::build_link_meta(tcx, name);
+ let link_meta = link::build_link_meta(incremental_hashes_map, name);
let shared_ccx = SharedCrateContext::new(tcx,
&mir_map,
Sha256::new(),
link_meta.clone(),
reachable,
- check_overflow,
- check_dropflag);
+ check_overflow);
// Translate the metadata.
let metadata = time(tcx.sess.time_passes(), "write metadata", || {
write_metadata(&shared_ccx, shared_ccx.reachable())
println!("n_null_glues: {}", stats.n_null_glues.get());
println!("n_real_glues: {}", stats.n_real_glues.get());
- println!("n_fallback_instantiations: {}", stats.n_fallback_instantiations.get());
-
println!("n_fns: {}", stats.n_fns.get());
- println!("n_monos: {}", stats.n_monos.get());
println!("n_inlines: {}", stats.n_inlines.get());
println!("n_closures: {}", stats.n_closures.get());
println!("fn stats:");
}
if scx.sess().opts.debugging_opts.print_trans_items.is_some() {
- let mut item_to_cgus = HashMap::new();
+ let mut item_to_cgus = FnvHashMap();
for cgu in &codegen_units {
for (&trans_item, &linkage) in cgu.items() {
//! closure.
pub use self::CalleeData::*;
-pub use self::CallArgs::*;
use arena::TypedArena;
use back::symbol_names;
use llvm::{self, ValueRef, get_params};
-use middle::cstore::LOCAL_CRATE;
use rustc::hir::def_id::DefId;
use rustc::ty::subst::Substs;
use rustc::traits;
-use rustc::hir::map as hir_map;
use abi::{Abi, FnType};
-use adt;
use attributes;
use base;
use base::*;
use build::*;
-use cleanup;
-use cleanup::CleanupMethods;
use closure;
-use common::{self, Block, Result, CrateContext, FunctionContext, C_undef};
+use common::{self, Block, Result, CrateContext, FunctionContext};
use consts;
-use datum::*;
use debuginfo::DebugLoc;
use declare;
-use expr;
-use glue;
-use inline;
-use intrinsic;
-use machine::llalign_of_min;
use meth;
use monomorphize::{self, Instance};
use trans_item::TransItem;
-use type_::Type;
use type_of;
-use value::Value;
use Disr;
use rustc::ty::{self, Ty, TyCtxt, TypeFoldable};
use rustc::hir;
use syntax_pos::DUMMY_SP;
-use errors;
-use syntax::ptr::P;
#[derive(Debug)]
pub enum CalleeData {
impl<'tcx> Callee<'tcx> {
/// Function pointer.
- pub fn ptr(datum: Datum<'tcx, Rvalue>) -> Callee<'tcx> {
+ pub fn ptr(llfn: ValueRef, ty: Ty<'tcx>) -> Callee<'tcx> {
Callee {
- data: Fn(datum.val),
- ty: datum.ty
+ data: Fn(llfn),
+ ty: ty
}
}
return Callee::trait_method(ccx, trait_id, def_id, substs);
}
- let maybe_node_id = inline::get_local_instance(ccx, def_id)
- .and_then(|def_id| tcx.map.as_local_node_id(def_id));
- let maybe_ast_node = maybe_node_id.and_then(|node_id| {
- tcx.map.find(node_id)
- });
-
- let data = match maybe_ast_node {
- Some(hir_map::NodeStructCtor(_)) => {
- NamedTupleConstructor(Disr(0))
- }
- Some(hir_map::NodeVariant(_)) => {
- let vinfo = common::inlined_variant_def(ccx, maybe_node_id.unwrap());
- NamedTupleConstructor(Disr::from(vinfo.disr_val))
+ let fn_ty = def_ty(tcx, def_id, substs);
+ if let ty::TyFnDef(_, _, f) = fn_ty.sty {
+ if f.abi == Abi::RustIntrinsic || f.abi == Abi::PlatformIntrinsic {
+ return Callee {
+ data: Intrinsic,
+ ty: fn_ty
+ };
}
- Some(hir_map::NodeForeignItem(fi)) if {
- let abi = tcx.map.get_foreign_abi(fi.id);
- abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic
- } => Intrinsic,
-
- _ => return Callee::ptr(get_fn(ccx, def_id, substs))
- };
+ }
- Callee {
- data: data,
- ty: def_ty(tcx, def_id, substs)
+ // FIXME(eddyb) Detect ADT constructors more efficiently.
+ if let Some(adt_def) = fn_ty.fn_ret().skip_binder().ty_adt_def() {
+ if let Some(v) = adt_def.variants.iter().find(|v| def_id == v.did) {
+ return Callee {
+ data: NamedTupleConstructor(Disr::from(v.disr_val)),
+ ty: fn_ty
+ };
+ }
}
+
+ let (llfn, ty) = get_fn(ccx, def_id, substs);
+ Callee::ptr(llfn, ty)
}
/// Trait method, which has to be resolved to an impl method.
// That is because default methods have the same ID as the
// trait method used to look up the impl method that ended
// up here, so calling Callee::def would infinitely recurse.
- Callee::ptr(get_fn(ccx, mth.method.def_id, mth.substs))
+ let (llfn, ty) = get_fn(ccx, mth.method.def_id, mth.substs);
+ Callee::ptr(llfn, ty)
}
traits::VtableClosure(vtable_closure) => {
// The substitutions should have no type parameters remaining
trait_closure_kind);
let method_ty = def_ty(tcx, def_id, substs);
- let fn_ptr_ty = match method_ty.sty {
- ty::TyFnDef(_, _, fty) => tcx.mk_fn_ptr(fty),
- _ => bug!("expected fn item type, found {}",
- method_ty)
- };
- Callee::ptr(immediate_rvalue(llfn, fn_ptr_ty))
+ Callee::ptr(llfn, method_ty)
}
traits::VtableFnPointer(vtable_fn_pointer) => {
let trait_closure_kind = tcx.lang_items.fn_trait_kind(trait_id).unwrap();
let llfn = trans_fn_pointer_shim(ccx, trait_closure_kind, vtable_fn_pointer.fn_ty);
let method_ty = def_ty(tcx, def_id, substs);
- let fn_ptr_ty = match method_ty.sty {
- ty::TyFnDef(_, _, fty) => tcx.mk_fn_ptr(fty),
- _ => bug!("expected fn item type, found {}",
- method_ty)
- };
- Callee::ptr(immediate_rvalue(llfn, fn_ptr_ty))
+ Callee::ptr(llfn, method_ty)
}
traits::VtableObject(ref data) => {
Callee {
/// function.
pub fn call<'a, 'blk>(self, bcx: Block<'blk, 'tcx>,
debug_loc: DebugLoc,
- args: CallArgs<'a, 'tcx>,
- dest: Option<expr::Dest>)
+ args: &[ValueRef],
+ dest: Option<ValueRef>)
-> Result<'blk, 'tcx> {
trans_call_inner(bcx, debug_loc, self, args, dest)
}
/// Turn the callee into a function pointer.
- pub fn reify<'a>(self, ccx: &CrateContext<'a, 'tcx>)
- -> Datum<'tcx, Rvalue> {
- let fn_ptr_ty = match self.ty.sty {
- ty::TyFnDef(_, _, f) => ccx.tcx().mk_fn_ptr(f),
- _ => self.ty
- };
+ pub fn reify<'a>(self, ccx: &CrateContext<'a, 'tcx>) -> ValueRef {
match self.data {
- Fn(llfn) => {
- immediate_rvalue(llfn, fn_ptr_ty)
- }
+ Fn(llfn) => llfn,
Virtual(idx) => {
- let llfn = meth::trans_object_shim(ccx, self.ty, idx);
- immediate_rvalue(llfn, fn_ptr_ty)
+ meth::trans_object_shim(ccx, self.ty, idx)
}
- NamedTupleConstructor(_) => match self.ty.sty {
+ NamedTupleConstructor(disr) => match self.ty.sty {
ty::TyFnDef(def_id, substs, _) => {
- return get_fn(ccx, def_id, substs);
+ let instance = Instance::new(def_id, substs);
+ if let Some(&llfn) = ccx.instances().borrow().get(&instance) {
+ return llfn;
+ }
+
+ let sym = ccx.symbol_map().get_or_compute(ccx.shared(),
+ TransItem::Fn(instance));
+ assert!(!ccx.codegen_unit().contains_item(&TransItem::Fn(instance)));
+ let lldecl = declare::define_internal_fn(ccx, &sym, self.ty);
+ base::trans_ctor_shim(ccx, def_id, substs, disr, lldecl);
+ ccx.instances().borrow_mut().insert(instance, lldecl);
+
+ lldecl
}
_ => bug!("expected fn item type, found {}", self.ty)
},
let llfnpointer = match bare_fn_ty.sty {
ty::TyFnDef(def_id, substs, _) => {
// Function definitions have to be turned into a pointer.
- let llfn = Callee::def(ccx, def_id, substs).reify(ccx).val;
+ let llfn = Callee::def(ccx, def_id, substs).reify(ccx);
if !is_by_ref {
// A by-value fn item is ignored, so the shim has
// the same signature as the original function.
let (block_arena, fcx): (TypedArena<_>, FunctionContext);
block_arena = TypedArena::new();
fcx = FunctionContext::new(ccx, llfn, fn_ty, None, &block_arena);
- let mut bcx = fcx.init(false, None);
+ let mut bcx = fcx.init(false);
let llargs = get_params(fcx.llfn);
}
});
- assert!(!fcx.needs_ret_allocas);
-
- let dest = fcx.llretslotptr.get().map(|_|
- expr::SaveIn(fcx.get_ret_slot(bcx, "ret_slot"))
- );
+ let dest = fcx.llretslotptr.get();
let callee = Callee {
data: Fn(llfnpointer),
ty: bare_fn_ty
};
- bcx = callee.call(bcx, DebugLoc::None, ArgVals(&llargs[(self_idx + 1)..]), dest).bcx;
+ bcx = callee.call(bcx, DebugLoc::None, &llargs[(self_idx + 1)..], dest).bcx;
fcx.finish(bcx, DebugLoc::None);
fn get_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
def_id: DefId,
substs: &'tcx Substs<'tcx>)
- -> Datum<'tcx, Rvalue> {
+ -> (ValueRef, Ty<'tcx>) {
let tcx = ccx.tcx();
debug!("get_fn(def_id={:?}, substs={:?})", def_id, substs);
- assert!(!substs.types.needs_infer());
- assert!(!substs.types.has_escaping_regions());
-
- // Check whether this fn has an inlined copy and, if so, redirect
- // def_id to the local id of the inlined copy.
- let def_id = inline::maybe_instantiate_inline(ccx, def_id);
-
- fn is_named_tuple_constructor(tcx: TyCtxt, def_id: DefId) -> bool {
- let node_id = match tcx.map.as_local_node_id(def_id) {
- Some(n) => n,
- None => { return false; }
- };
- let map_node = errors::expect(
- &tcx.sess.diagnostic(),
- tcx.map.find(node_id),
- || "local item should be in ast map".to_string());
-
- match map_node {
- hir_map::NodeVariant(v) => {
- v.node.data.is_tuple()
- }
- hir_map::NodeStructCtor(_) => true,
- _ => false
- }
- }
- let must_monomorphise =
- !substs.types.is_empty() || is_named_tuple_constructor(tcx, def_id);
-
- debug!("get_fn({:?}) must_monomorphise: {}",
- def_id, must_monomorphise);
-
- // Create a monomorphic version of generic functions
- if must_monomorphise {
- // Should be either intra-crate or inlined.
- assert_eq!(def_id.krate, LOCAL_CRATE);
-
- let substs = tcx.normalize_associated_type(&substs);
- let (val, fn_ty) = monomorphize::monomorphic_fn(ccx, def_id, substs);
- let fn_ptr_ty = match fn_ty.sty {
- ty::TyFnDef(_, _, fty) => {
- // Create a fn pointer with the substituted signature.
- tcx.mk_fn_ptr(fty)
- }
- _ => bug!("expected fn item type, found {}", fn_ty)
- };
- assert_eq!(type_of::type_of(ccx, fn_ptr_ty), common::val_ty(val));
- return immediate_rvalue(val, fn_ptr_ty);
- }
+ assert!(!substs.needs_infer());
+ assert!(!substs.has_escaping_regions());
+ assert!(!substs.has_param_types());
- // Find the actual function pointer.
- let ty = ccx.tcx().lookup_item_type(def_id).ty;
- let fn_ptr_ty = match ty.sty {
- ty::TyFnDef(_, _, ref fty) => {
- // Create a fn pointer with the normalized signature.
- tcx.mk_fn_ptr(tcx.normalize_associated_type(fty))
- }
- _ => bug!("expected fn item type, found {}", ty)
- };
+ let substs = tcx.normalize_associated_type(&substs);
+ let instance = Instance::new(def_id, substs);
+ let item_ty = ccx.tcx().lookup_item_type(def_id).ty;
+ let fn_ty = monomorphize::apply_param_substs(ccx.tcx(), substs, &item_ty);
- let instance = Instance::mono(ccx.shared(), def_id);
if let Some(&llfn) = ccx.instances().borrow().get(&instance) {
- return immediate_rvalue(llfn, fn_ptr_ty);
+ return (llfn, fn_ty);
}
- let local_id = ccx.tcx().map.as_local_node_id(def_id);
- let local_item = match local_id.and_then(|id| tcx.map.find(id)) {
- Some(hir_map::NodeItem(&hir::Item {
- span, node: hir::ItemFn(..), ..
- })) |
- Some(hir_map::NodeTraitItem(&hir::TraitItem {
- span, node: hir::MethodTraitItem(_, Some(_)), ..
- })) |
- Some(hir_map::NodeImplItem(&hir::ImplItem {
- span, node: hir::ImplItemKind::Method(..), ..
- })) => {
- Some(span)
- }
- _ => None
- };
+ let sym = ccx.symbol_map().get_or_compute(ccx.shared(),
+ TransItem::Fn(instance));
+ debug!("get_fn({:?}: {:?}) => {}", instance, fn_ty, sym);
// This is subtle and surprising, but sometimes we have to bitcast
// the resulting fn pointer. The reason has to do with external
// reference. It also occurs when testing libcore and in some
// other weird situations. Annoying.
- let sym = ccx.symbol_map().get_or_compute(ccx.shared(),
- TransItem::Fn(instance));
-
- let llptrty = type_of::type_of(ccx, fn_ptr_ty);
- let llfn = if let Some(llfn) = declare::get_declared_value(ccx, &sym) {
- if let Some(span) = local_item {
- if declare::get_defined_value(ccx, &sym).is_some() {
- ccx.sess().span_fatal(span,
- &format!("symbol `{}` is already defined", &sym));
- }
+ let fn_ptr_ty = match fn_ty.sty {
+ ty::TyFnDef(_, _, fty) => {
+ // Create a fn pointer with the substituted signature.
+ tcx.mk_fn_ptr(fty)
}
+ _ => bug!("expected fn item type, found {}", fn_ty)
+ };
+ let llptrty = type_of::type_of(ccx, fn_ptr_ty);
+ let llfn = if let Some(llfn) = declare::get_declared_value(ccx, &sym) {
if common::val_ty(llfn) != llptrty {
- if local_item.is_some() {
- bug!("symbol `{}` previously declared as {:?}, now wanted as {:?}",
- sym, Value(llfn), llptrty);
- }
debug!("get_fn: casting {:?} to {:?}", llfn, llptrty);
consts::ptrcast(llfn, llptrty)
} else {
llfn
}
} else {
- let llfn = declare::declare_fn(ccx, &sym, ty);
+ let llfn = declare::declare_fn(ccx, &sym, fn_ty);
assert_eq!(common::val_ty(llfn), llptrty);
debug!("get_fn: not casting pointer!");
let attrs = ccx.tcx().get_attrs(def_id);
attributes::from_fn_attrs(ccx, &attrs, llfn);
- if local_item.is_some() {
+
+ let is_local_def = ccx.shared().translation_items().borrow()
+ .contains(&TransItem::Fn(instance));
+ if is_local_def {
// FIXME(eddyb) Doubt all extern fn should allow unwinding.
attributes::unwind(llfn, true);
+ unsafe {
+ llvm::LLVMSetLinkage(llfn, llvm::ExternalLinkage);
+ }
}
llfn
ccx.instances().borrow_mut().insert(instance, llfn);
- immediate_rvalue(llfn, fn_ptr_ty)
+ (llfn, fn_ty)
}
// ______________________________________________________________________
// Translating calls
-fn trans_call_inner<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
+fn trans_call_inner<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
debug_loc: DebugLoc,
callee: Callee<'tcx>,
- args: CallArgs<'a, 'tcx>,
- dest: Option<expr::Dest>)
+ args: &[ValueRef],
+ opt_llretslot: Option<ValueRef>)
-> Result<'blk, 'tcx> {
// Introduce a temporary cleanup scope that will contain cleanups
// for the arguments while they are being evaluated. The purpose
let fcx = bcx.fcx;
let ccx = fcx.ccx;
- let abi = callee.ty.fn_abi();
- let sig = callee.ty.fn_sig();
- let output = bcx.tcx().erase_late_bound_regions(&sig.output());
- let output = bcx.tcx().normalize_associated_type(&output);
-
- let extra_args = match args {
- ArgExprs(args) if abi != Abi::RustCall => {
- args[sig.0.inputs.len()..].iter().map(|expr| {
- common::expr_ty_adjusted(bcx, expr)
- }).collect()
- }
- _ => vec![]
- };
- let fn_ty = callee.direct_fn_type(ccx, &extra_args);
+ let fn_ret = callee.ty.fn_ret();
+ let fn_ty = callee.direct_fn_type(ccx, &[]);
let mut callee = match callee.data {
- Intrinsic => {
- assert!(abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic);
- assert!(dest.is_some());
-
- return intrinsic::trans_intrinsic_call(bcx, callee.ty, &fn_ty,
- args, dest.unwrap(),
- debug_loc);
- }
- NamedTupleConstructor(disr) => {
- assert!(dest.is_some());
-
- return base::trans_named_tuple_constructor(bcx,
- callee.ty,
- disr,
- args,
- dest.unwrap(),
- debug_loc);
+ NamedTupleConstructor(_) | Intrinsic => {
+ bug!("{:?} calls should not go through Callee::call", callee);
}
f => f
};
- // Generate a location to store the result. If the user does
- // not care about the result, just make a stack slot.
- let opt_llretslot = dest.and_then(|dest| match dest {
- expr::SaveIn(dst) => Some(dst),
- expr::Ignore => {
- let needs_drop = || bcx.fcx.type_needs_drop(output);
- if fn_ty.ret.is_indirect() || fn_ty.ret.cast.is_some() || needs_drop() {
- // Push the out-pointer if we use an out-pointer for this
- // return type, otherwise push "undef".
- if fn_ty.ret.is_ignore() {
- Some(C_undef(fn_ty.ret.original_ty.ptr_to()))
- } else {
- let llresult = alloca(bcx, fn_ty.ret.original_ty, "__llret");
- call_lifetime_start(bcx, llresult);
- Some(llresult)
- }
- } else {
- None
- }
- }
- });
-
// If there no destination, return must be direct, with no cast.
if opt_llretslot.is_none() {
assert!(!fn_ty.ret.is_indirect() && fn_ty.ret.cast.is_none());
llargs.push(llretslot);
}
- let arg_cleanup_scope = fcx.push_custom_cleanup_scope();
- bcx = trans_args(bcx, abi, &fn_ty, &mut callee, args, &mut llargs,
- cleanup::CustomScope(arg_cleanup_scope));
- fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
+ match callee {
+ Virtual(idx) => {
+ llargs.push(args[0]);
+
+ let fn_ptr = meth::get_virtual_method(bcx, args[1], idx);
+ let llty = fn_ty.llvm_type(bcx.ccx()).ptr_to();
+ callee = Fn(PointerCast(bcx, fn_ptr, llty));
+ llargs.extend_from_slice(&args[2..]);
+ }
+ _ => llargs.extend_from_slice(args)
+ }
let llfn = match callee {
Fn(f) => f,
_ => bug!("expected fn pointer callee, found {:?}", callee)
};
- let (llret, mut bcx) = base::invoke(bcx, llfn, &llargs, debug_loc);
+ let (llret, bcx) = base::invoke(bcx, llfn, &llargs, debug_loc);
if !bcx.unreachable.get() {
fn_ty.apply_attrs_callsite(llret);
}
}
- fcx.pop_and_trans_custom_cleanup_scope(bcx, arg_cleanup_scope);
-
- // If the caller doesn't care about the result of this fn call,
- // drop the temporary slot we made.
- match (dest, opt_llretslot) {
- (Some(expr::Ignore), Some(llretslot)) => {
- // drop the value if it is not being saved.
- bcx = glue::drop_ty(bcx, llretslot, output, debug_loc);
- call_lifetime_end(bcx, llretslot);
- }
- _ => {}
- }
-
- // FIXME(canndrew): This is_never should really be an is_uninhabited
- if output.is_never() {
+ if fn_ret.0.is_never() {
Unreachable(bcx);
}
Result::new(bcx, llret)
}
-
-pub enum CallArgs<'a, 'tcx> {
- /// Supply value of arguments as a list of expressions that must be
- /// translated. This is used in the common case of `foo(bar, qux)`.
- ArgExprs(&'a [P<hir::Expr>]),
-
- /// Supply value of arguments as a list of LLVM value refs; frequently
- /// used with lang items and so forth, when the argument is an internal
- /// value.
- ArgVals(&'a [ValueRef]),
-
- /// For overloaded operators: `(lhs, Option(rhs))`.
- /// `lhs` is the left-hand-side and `rhs` is the datum
- /// of the right-hand-side argument (if any).
- ArgOverloadedOp(Datum<'tcx, Expr>, Option<Datum<'tcx, Expr>>),
-
- /// Supply value of arguments as a list of expressions that must be
- /// translated, for overloaded call operators.
- ArgOverloadedCall(Vec<&'a hir::Expr>),
-}
-
-fn trans_args_under_call_abi<'blk, 'tcx>(
- mut bcx: Block<'blk, 'tcx>,
- arg_exprs: &[P<hir::Expr>],
- callee: &mut CalleeData,
- fn_ty: &FnType,
- llargs: &mut Vec<ValueRef>,
- arg_cleanup_scope: cleanup::ScopeId)
- -> Block<'blk, 'tcx>
-{
- let mut arg_idx = 0;
-
- // Translate the `self` argument first.
- let arg_datum = unpack_datum!(bcx, expr::trans(bcx, &arg_exprs[0]));
- bcx = trans_arg_datum(bcx,
- arg_datum,
- callee, fn_ty, &mut arg_idx,
- arg_cleanup_scope,
- llargs);
-
- // Now untuple the rest of the arguments.
- let tuple_expr = &arg_exprs[1];
- let tuple_type = common::node_id_type(bcx, tuple_expr.id);
-
- match tuple_type.sty {
- ty::TyTuple(ref field_types) => {
- let tuple_datum = unpack_datum!(bcx,
- expr::trans(bcx, &tuple_expr));
- let tuple_lvalue_datum =
- unpack_datum!(bcx,
- tuple_datum.to_lvalue_datum(bcx,
- "args",
- tuple_expr.id));
- let repr = adt::represent_type(bcx.ccx(), tuple_type);
- let repr_ptr = &repr;
- for (i, field_type) in field_types.iter().enumerate() {
- let arg_datum = tuple_lvalue_datum.get_element(
- bcx,
- field_type,
- |srcval| {
- adt::trans_field_ptr(bcx, repr_ptr, srcval, Disr(0), i)
- }).to_expr_datum();
- bcx = trans_arg_datum(bcx,
- arg_datum,
- callee, fn_ty, &mut arg_idx,
- arg_cleanup_scope,
- llargs);
- }
- }
- _ => {
- span_bug!(tuple_expr.span,
- "argument to `.call()` wasn't a tuple?!")
- }
- };
-
- bcx
-}
-
-pub fn trans_args<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- abi: Abi,
- fn_ty: &FnType,
- callee: &mut CalleeData,
- args: CallArgs<'a, 'tcx>,
- llargs: &mut Vec<ValueRef>,
- arg_cleanup_scope: cleanup::ScopeId)
- -> Block<'blk, 'tcx> {
- debug!("trans_args(abi={})", abi);
-
- let _icx = push_ctxt("trans_args");
-
- let mut bcx = bcx;
- let mut arg_idx = 0;
-
- // First we figure out the caller's view of the types of the arguments.
- // This will be needed if this is a generic call, because the callee has
- // to cast her view of the arguments to the caller's view.
- match args {
- ArgExprs(arg_exprs) => {
- if abi == Abi::RustCall {
- // This is only used for direct calls to the `call`,
- // `call_mut` or `call_once` functions.
- return trans_args_under_call_abi(bcx,
- arg_exprs, callee, fn_ty,
- llargs,
- arg_cleanup_scope)
- }
-
- for arg_expr in arg_exprs {
- let arg_datum = unpack_datum!(bcx, expr::trans(bcx, &arg_expr));
- bcx = trans_arg_datum(bcx,
- arg_datum,
- callee, fn_ty, &mut arg_idx,
- arg_cleanup_scope,
- llargs);
- }
- }
- ArgOverloadedCall(arg_exprs) => {
- for expr in arg_exprs {
- let arg_datum =
- unpack_datum!(bcx, expr::trans(bcx, expr));
- bcx = trans_arg_datum(bcx,
- arg_datum,
- callee, fn_ty, &mut arg_idx,
- arg_cleanup_scope,
- llargs);
- }
- }
- ArgOverloadedOp(lhs, rhs) => {
- bcx = trans_arg_datum(bcx, lhs,
- callee, fn_ty, &mut arg_idx,
- arg_cleanup_scope,
- llargs);
-
- if let Some(rhs) = rhs {
- bcx = trans_arg_datum(bcx, rhs,
- callee, fn_ty, &mut arg_idx,
- arg_cleanup_scope,
- llargs);
- }
- }
- ArgVals(vs) => {
- match *callee {
- Virtual(idx) => {
- llargs.push(vs[0]);
-
- let fn_ptr = meth::get_virtual_method(bcx, vs[1], idx);
- let llty = fn_ty.llvm_type(bcx.ccx()).ptr_to();
- *callee = Fn(PointerCast(bcx, fn_ptr, llty));
- llargs.extend_from_slice(&vs[2..]);
- }
- _ => llargs.extend_from_slice(vs)
- }
- }
- }
-
- bcx
-}
-
-fn trans_arg_datum<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- arg_datum: Datum<'tcx, Expr>,
- callee: &mut CalleeData,
- fn_ty: &FnType,
- next_idx: &mut usize,
- arg_cleanup_scope: cleanup::ScopeId,
- llargs: &mut Vec<ValueRef>)
- -> Block<'blk, 'tcx> {
- let _icx = push_ctxt("trans_arg_datum");
- let mut bcx = bcx;
-
- debug!("trans_arg_datum({:?})", arg_datum);
-
- let arg = &fn_ty.args[*next_idx];
- *next_idx += 1;
-
- // Fill padding with undef value, where applicable.
- if let Some(ty) = arg.pad {
- llargs.push(C_undef(ty));
- }
-
- // Determine whether we want a by-ref datum even if not appropriate.
- let want_by_ref = arg.is_indirect() || arg.cast.is_some();
-
- let fat_ptr = common::type_is_fat_ptr(bcx.tcx(), arg_datum.ty);
- let (by_ref, val) = if fat_ptr && !bcx.fcx.type_needs_drop(arg_datum.ty) {
- (true, arg_datum.val)
- } else {
- // Make this an rvalue, since we are going to be
- // passing ownership.
- let arg_datum = unpack_datum!(
- bcx, arg_datum.to_rvalue_datum(bcx, "arg"));
-
- // Now that arg_datum is owned, get it into the appropriate
- // mode (ref vs value).
- let arg_datum = unpack_datum!(bcx, if want_by_ref {
- arg_datum.to_ref_datum(bcx)
- } else {
- arg_datum.to_appropriate_datum(bcx)
- });
-
- // Technically, ownership of val passes to the callee.
- // However, we must cleanup should we panic before the
- // callee is actually invoked.
- (arg_datum.kind.is_by_ref(),
- arg_datum.add_clean(bcx.fcx, arg_cleanup_scope))
- };
-
- if arg.is_ignore() {
- return bcx;
- }
-
- debug!("--- trans_arg_datum passing {:?}", Value(val));
-
- if fat_ptr {
- // Fat pointers should be passed without any transformations.
- assert!(!arg.is_indirect() && arg.cast.is_none());
- llargs.push(Load(bcx, expr::get_dataptr(bcx, val)));
-
- let info_arg = &fn_ty.args[*next_idx];
- *next_idx += 1;
- assert!(!info_arg.is_indirect() && info_arg.cast.is_none());
- let info = Load(bcx, expr::get_meta(bcx, val));
-
- if let Virtual(idx) = *callee {
- // We have to grab the fn pointer from the vtable when
- // handling the first argument, ensure that here.
- assert_eq!(*next_idx, 2);
- assert!(info_arg.is_ignore());
- let fn_ptr = meth::get_virtual_method(bcx, info, idx);
- let llty = fn_ty.llvm_type(bcx.ccx()).ptr_to();
- *callee = Fn(PointerCast(bcx, fn_ptr, llty));
- } else {
- assert!(!info_arg.is_ignore());
- llargs.push(info);
- }
- return bcx;
- }
-
- let mut val = val;
- if by_ref && !arg.is_indirect() {
- // Have to load the argument, maybe while casting it.
- if arg.original_ty == Type::i1(bcx.ccx()) {
- // We store bools as i8 so we need to truncate to i1.
- val = LoadRangeAssert(bcx, val, 0, 2, llvm::False);
- val = Trunc(bcx, val, arg.original_ty);
- } else if let Some(ty) = arg.cast {
- val = Load(bcx, PointerCast(bcx, val, ty.ptr_to()));
- if !bcx.unreachable.get() {
- let llalign = llalign_of_min(bcx.ccx(), arg.ty);
- unsafe {
- llvm::LLVMSetAlignment(val, llalign);
- }
- }
- } else {
- val = Load(bcx, val);
- }
- }
-
- llargs.push(val);
- bcx
-}
//! code for `expr` itself is responsible for freeing any other byproducts
//! that may be in play.
-pub use self::ScopeId::*;
-pub use self::CleanupScopeKind::*;
pub use self::EarlyExitLabel::*;
-pub use self::Heap::*;
use llvm::{BasicBlockRef, ValueRef};
use base;
use build;
use common;
-use common::{Block, FunctionContext, NodeIdAndSpan, LandingPad};
-use datum::{Datum, Lvalue};
-use debuginfo::{DebugLoc, ToDebugLoc};
+use common::{Block, FunctionContext, LandingPad};
+use debuginfo::{DebugLoc};
use glue;
-use middle::region;
use type_::Type;
use value::Value;
-use rustc::ty::{Ty, TyCtxt};
-
-use std::fmt;
-use syntax::ast;
-
-pub struct CleanupScope<'blk, 'tcx: 'blk> {
- // The id of this cleanup scope. If the id is None,
- // this is a *temporary scope* that is pushed during trans to
- // cleanup miscellaneous garbage that trans may generate whose
- // lifetime is a subset of some expression. See module doc for
- // more details.
- kind: CleanupScopeKind<'blk, 'tcx>,
+use rustc::ty::Ty;
+pub struct CleanupScope<'tcx> {
// Cleanups to run upon scope exit.
- cleanups: Vec<CleanupObj<'tcx>>,
+ cleanups: Vec<DropValue<'tcx>>,
// The debug location any drop calls generated for this scope will be
// associated with.
index: usize
}
-pub const EXIT_BREAK: usize = 0;
-pub const EXIT_LOOP: usize = 1;
-pub const EXIT_MAX: usize = 2;
-
-pub enum CleanupScopeKind<'blk, 'tcx: 'blk> {
- CustomScopeKind,
- AstScopeKind(ast::NodeId),
- LoopScopeKind(ast::NodeId, [Block<'blk, 'tcx>; EXIT_MAX])
-}
-
-impl<'blk, 'tcx: 'blk> fmt::Debug for CleanupScopeKind<'blk, 'tcx> {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- match *self {
- CustomScopeKind => write!(f, "CustomScopeKind"),
- AstScopeKind(nid) => write!(f, "AstScopeKind({})", nid),
- LoopScopeKind(nid, ref blks) => {
- write!(f, "LoopScopeKind({}, [", nid)?;
- for blk in blks {
- write!(f, "{:p}, ", blk)?;
- }
- write!(f, "])")
- }
- }
- }
-}
-
#[derive(Copy, Clone, PartialEq, Debug)]
pub enum EarlyExitLabel {
UnwindExit(UnwindKind),
- ReturnExit,
- LoopExit(ast::NodeId, usize)
}
#[derive(Copy, Clone, Debug)]
last_cleanup: usize,
}
-pub trait Cleanup<'tcx> {
- fn must_unwind(&self) -> bool;
- fn is_lifetime_end(&self) -> bool;
- fn trans<'blk>(&self,
- bcx: Block<'blk, 'tcx>,
- debug_loc: DebugLoc)
- -> Block<'blk, 'tcx>;
-}
-
-pub type CleanupObj<'tcx> = Box<Cleanup<'tcx>+'tcx>;
-
-#[derive(Copy, Clone, Debug)]
-pub enum ScopeId {
- AstScope(ast::NodeId),
- CustomScope(CustomScopeIndex)
-}
-
-#[derive(Copy, Clone, Debug)]
-pub struct DropHint<K>(pub ast::NodeId, pub K);
-
-pub type DropHintDatum<'tcx> = DropHint<Datum<'tcx, Lvalue>>;
-pub type DropHintValue = DropHint<ValueRef>;
-
-impl<K> DropHint<K> {
- pub fn new(id: ast::NodeId, k: K) -> DropHint<K> { DropHint(id, k) }
-}
-
-impl DropHint<ValueRef> {
- pub fn value(&self) -> ValueRef { self.1 }
-}
-
-pub trait DropHintMethods {
- type ValueKind;
- fn to_value(&self) -> Self::ValueKind;
-}
-impl<'tcx> DropHintMethods for DropHintDatum<'tcx> {
- type ValueKind = DropHintValue;
- fn to_value(&self) -> DropHintValue { DropHint(self.0, self.1.val) }
-}
-
-impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
- /// Invoked when we start to trans the code contained within a new cleanup scope.
- fn push_ast_cleanup_scope(&self, debug_loc: NodeIdAndSpan) {
- debug!("push_ast_cleanup_scope({})",
- self.ccx.tcx().map.node_to_string(debug_loc.id));
-
- // FIXME(#2202) -- currently closure bodies have a parent
- // region, which messes up the assertion below, since there
- // are no cleanup scopes on the stack at the start of
- // trans'ing a closure body. I think though that this should
- // eventually be fixed by closure bodies not having a parent
- // region, though that's a touch unclear, and it might also be
- // better just to narrow this assertion more (i.e., by
- // excluding id's that correspond to closure bodies only). For
- // now we just say that if there is already an AST scope on the stack,
- // this new AST scope had better be its immediate child.
- let top_scope = self.top_ast_scope();
- let region_maps = &self.ccx.tcx().region_maps;
- if top_scope.is_some() {
- assert!((region_maps
- .opt_encl_scope(region_maps.node_extent(debug_loc.id))
- .map(|s|s.node_id(region_maps)) == top_scope)
- ||
- (region_maps
- .opt_encl_scope(region_maps.lookup_code_extent(
- region::CodeExtentData::DestructionScope(debug_loc.id)))
- .map(|s|s.node_id(region_maps)) == top_scope));
- }
-
- self.push_scope(CleanupScope::new(AstScopeKind(debug_loc.id),
- debug_loc.debug_loc()));
- }
-
- fn push_loop_cleanup_scope(&self,
- id: ast::NodeId,
- exits: [Block<'blk, 'tcx>; EXIT_MAX]) {
- debug!("push_loop_cleanup_scope({})",
- self.ccx.tcx().map.node_to_string(id));
- assert_eq!(Some(id), self.top_ast_scope());
-
- // Just copy the debuginfo source location from the enclosing scope
- let debug_loc = self.scopes
- .borrow()
- .last()
- .unwrap()
- .debug_loc;
-
- self.push_scope(CleanupScope::new(LoopScopeKind(id, exits), debug_loc));
- }
-
- fn push_custom_cleanup_scope(&self) -> CustomScopeIndex {
+impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> {
+ pub fn push_custom_cleanup_scope(&self) -> CustomScopeIndex {
let index = self.scopes_len();
debug!("push_custom_cleanup_scope(): {}", index);
.map(|opt_scope| opt_scope.debug_loc)
.unwrap_or(DebugLoc::None);
- self.push_scope(CleanupScope::new(CustomScopeKind, debug_loc));
- CustomScopeIndex { index: index }
- }
-
- fn push_custom_cleanup_scope_with_debug_loc(&self,
- debug_loc: NodeIdAndSpan)
- -> CustomScopeIndex {
- let index = self.scopes_len();
- debug!("push_custom_cleanup_scope(): {}", index);
-
- self.push_scope(CleanupScope::new(CustomScopeKind,
- debug_loc.debug_loc()));
+ self.push_scope(CleanupScope::new(debug_loc));
CustomScopeIndex { index: index }
}
- /// Removes the cleanup scope for id `cleanup_scope`, which must be at the top of the cleanup
- /// stack, and generates the code to do its cleanups for normal exit.
- fn pop_and_trans_ast_cleanup_scope(&self,
- bcx: Block<'blk, 'tcx>,
- cleanup_scope: ast::NodeId)
- -> Block<'blk, 'tcx> {
- debug!("pop_and_trans_ast_cleanup_scope({})",
- self.ccx.tcx().map.node_to_string(cleanup_scope));
-
- assert!(self.top_scope(|s| s.kind.is_ast_with_id(cleanup_scope)));
-
- let scope = self.pop_scope();
- self.trans_scope_cleanups(bcx, &scope)
- }
-
- /// Removes the loop cleanup scope for id `cleanup_scope`, which must be at the top of the
- /// cleanup stack. Does not generate any cleanup code, since loop scopes should exit by
- /// branching to a block generated by `normal_exit_block`.
- fn pop_loop_cleanup_scope(&self,
- cleanup_scope: ast::NodeId) {
- debug!("pop_loop_cleanup_scope({})",
- self.ccx.tcx().map.node_to_string(cleanup_scope));
-
- assert!(self.top_scope(|s| s.kind.is_loop_with_id(cleanup_scope)));
-
- let _ = self.pop_scope();
- }
-
/// Removes the top cleanup scope from the stack without executing its cleanups. The top
/// cleanup scope must be the temporary scope `custom_scope`.
- fn pop_custom_cleanup_scope(&self,
- custom_scope: CustomScopeIndex) {
+ pub fn pop_custom_cleanup_scope(&self,
+ custom_scope: CustomScopeIndex) {
debug!("pop_custom_cleanup_scope({})", custom_scope.index);
assert!(self.is_valid_to_pop_custom_scope(custom_scope));
let _ = self.pop_scope();
/// Removes the top cleanup scope from the stack, which must be a temporary scope, and
/// generates the code to do its cleanups for normal exit.
- fn pop_and_trans_custom_cleanup_scope(&self,
- bcx: Block<'blk, 'tcx>,
- custom_scope: CustomScopeIndex)
- -> Block<'blk, 'tcx> {
+ pub fn pop_and_trans_custom_cleanup_scope(&self,
+ bcx: Block<'blk, 'tcx>,
+ custom_scope: CustomScopeIndex)
+ -> Block<'blk, 'tcx> {
debug!("pop_and_trans_custom_cleanup_scope({:?})", custom_scope);
assert!(self.is_valid_to_pop_custom_scope(custom_scope));
self.trans_scope_cleanups(bcx, &scope)
}
- /// Returns the id of the top-most loop scope
- fn top_loop_scope(&self) -> ast::NodeId {
- for scope in self.scopes.borrow().iter().rev() {
- if let LoopScopeKind(id, _) = scope.kind {
- return id;
- }
- }
- bug!("no loop scope found");
- }
-
- /// Returns a block to branch to which will perform all pending cleanups and
- /// then break/continue (depending on `exit`) out of the loop with id
- /// `cleanup_scope`
- fn normal_exit_block(&'blk self,
- cleanup_scope: ast::NodeId,
- exit: usize) -> BasicBlockRef {
- self.trans_cleanups_to_exit_scope(LoopExit(cleanup_scope, exit))
- }
-
- /// Returns a block to branch to which will perform all pending cleanups and
- /// then return from this function
- fn return_exit_block(&'blk self) -> BasicBlockRef {
- self.trans_cleanups_to_exit_scope(ReturnExit)
- }
-
- fn schedule_lifetime_end(&self,
- cleanup_scope: ScopeId,
- val: ValueRef) {
- let drop = box LifetimeEnd {
- ptr: val,
- };
-
- debug!("schedule_lifetime_end({:?}, val={:?})",
- cleanup_scope, Value(val));
-
- self.schedule_clean(cleanup_scope, drop as CleanupObj);
- }
-
/// Schedules a (deep) drop of `val`, which is a pointer to an instance of
/// `ty`
- fn schedule_drop_mem(&self,
- cleanup_scope: ScopeId,
- val: ValueRef,
- ty: Ty<'tcx>,
- drop_hint: Option<DropHintDatum<'tcx>>) {
+ pub fn schedule_drop_mem(&self,
+ cleanup_scope: CustomScopeIndex,
+ val: ValueRef,
+ ty: Ty<'tcx>) {
if !self.type_needs_drop(ty) { return; }
- let drop_hint = drop_hint.map(|hint|hint.to_value());
- let drop = box DropValue {
+ let drop = DropValue {
is_immediate: false,
val: val,
ty: ty,
- fill_on_drop: false,
skip_dtor: false,
- drop_hint: drop_hint,
};
- debug!("schedule_drop_mem({:?}, val={:?}, ty={:?}) fill_on_drop={} skip_dtor={}",
+ debug!("schedule_drop_mem({:?}, val={:?}, ty={:?}) skip_dtor={}",
cleanup_scope,
Value(val),
ty,
- drop.fill_on_drop,
drop.skip_dtor);
- self.schedule_clean(cleanup_scope, drop as CleanupObj);
- }
-
- /// Schedules a (deep) drop and filling of `val`, which is a pointer to an instance of `ty`
- fn schedule_drop_and_fill_mem(&self,
- cleanup_scope: ScopeId,
- val: ValueRef,
- ty: Ty<'tcx>,
- drop_hint: Option<DropHintDatum<'tcx>>) {
- if !self.type_needs_drop(ty) { return; }
-
- let drop_hint = drop_hint.map(|datum|datum.to_value());
- let drop = box DropValue {
- is_immediate: false,
- val: val,
- ty: ty,
- fill_on_drop: true,
- skip_dtor: false,
- drop_hint: drop_hint,
- };
-
- debug!("schedule_drop_and_fill_mem({:?}, val={:?}, ty={:?},
- fill_on_drop={}, skip_dtor={}, has_drop_hint={})",
- cleanup_scope,
- Value(val),
- ty,
- drop.fill_on_drop,
- drop.skip_dtor,
- drop_hint.is_some());
-
- self.schedule_clean(cleanup_scope, drop as CleanupObj);
+ self.schedule_clean(cleanup_scope, drop);
}
/// Issue #23611: Schedules a (deep) drop of the contents of
/// `ty`. The scheduled code handles extracting the discriminant
/// and dropping the contents associated with that variant
/// *without* executing any associated drop implementation.
- fn schedule_drop_adt_contents(&self,
- cleanup_scope: ScopeId,
- val: ValueRef,
- ty: Ty<'tcx>) {
+ pub fn schedule_drop_adt_contents(&self,
+ cleanup_scope: CustomScopeIndex,
+ val: ValueRef,
+ ty: Ty<'tcx>) {
// `if` below could be "!contents_needs_drop"; skipping drop
// is just an optimization, so sound to be conservative.
if !self.type_needs_drop(ty) { return; }
- let drop = box DropValue {
+ let drop = DropValue {
is_immediate: false,
val: val,
ty: ty,
- fill_on_drop: false,
skip_dtor: true,
- drop_hint: None,
};
- debug!("schedule_drop_adt_contents({:?}, val={:?}, ty={:?}) fill_on_drop={} skip_dtor={}",
+ debug!("schedule_drop_adt_contents({:?}, val={:?}, ty={:?}) skip_dtor={}",
cleanup_scope,
Value(val),
ty,
- drop.fill_on_drop,
drop.skip_dtor);
- self.schedule_clean(cleanup_scope, drop as CleanupObj);
+ self.schedule_clean(cleanup_scope, drop);
}
/// Schedules a (deep) drop of `val`, which is an instance of `ty`
- fn schedule_drop_immediate(&self,
- cleanup_scope: ScopeId,
- val: ValueRef,
- ty: Ty<'tcx>) {
+ pub fn schedule_drop_immediate(&self,
+ cleanup_scope: CustomScopeIndex,
+ val: ValueRef,
+ ty: Ty<'tcx>) {
if !self.type_needs_drop(ty) { return; }
- let drop = Box::new(DropValue {
+ let drop = DropValue {
is_immediate: true,
val: val,
ty: ty,
- fill_on_drop: false,
skip_dtor: false,
- drop_hint: None,
- });
+ };
- debug!("schedule_drop_immediate({:?}, val={:?}, ty={:?}) fill_on_drop={} skip_dtor={}",
+ debug!("schedule_drop_immediate({:?}, val={:?}, ty={:?}) skip_dtor={}",
cleanup_scope,
Value(val),
ty,
- drop.fill_on_drop,
drop.skip_dtor);
- self.schedule_clean(cleanup_scope, drop as CleanupObj);
- }
-
- /// Schedules a call to `free(val)`. Note that this is a shallow operation.
- fn schedule_free_value(&self,
- cleanup_scope: ScopeId,
- val: ValueRef,
- heap: Heap,
- content_ty: Ty<'tcx>) {
- let drop = box FreeValue { ptr: val, heap: heap, content_ty: content_ty };
-
- debug!("schedule_free_value({:?}, val={:?}, heap={:?})",
- cleanup_scope, Value(val), heap);
-
- self.schedule_clean(cleanup_scope, drop as CleanupObj);
- }
-
- fn schedule_clean(&self,
- cleanup_scope: ScopeId,
- cleanup: CleanupObj<'tcx>) {
- match cleanup_scope {
- AstScope(id) => self.schedule_clean_in_ast_scope(id, cleanup),
- CustomScope(id) => self.schedule_clean_in_custom_scope(id, cleanup),
- }
- }
-
- /// Schedules a cleanup to occur upon exit from `cleanup_scope`. If `cleanup_scope` is not
- /// provided, then the cleanup is scheduled in the topmost scope, which must be a temporary
- /// scope.
- fn schedule_clean_in_ast_scope(&self,
- cleanup_scope: ast::NodeId,
- cleanup: CleanupObj<'tcx>) {
- debug!("schedule_clean_in_ast_scope(cleanup_scope={})",
- cleanup_scope);
-
- for scope in self.scopes.borrow_mut().iter_mut().rev() {
- if scope.kind.is_ast_with_id(cleanup_scope) {
- scope.cleanups.push(cleanup);
- scope.cached_landing_pad = None;
- return;
- } else {
- // will be adding a cleanup to some enclosing scope
- scope.clear_cached_exits();
- }
- }
-
- bug!("no cleanup scope {} found",
- self.ccx.tcx().map.node_to_string(cleanup_scope));
+ self.schedule_clean(cleanup_scope, drop);
}
/// Schedules a cleanup to occur in the top-most scope, which must be a temporary scope.
- fn schedule_clean_in_custom_scope(&self,
- custom_scope: CustomScopeIndex,
- cleanup: CleanupObj<'tcx>) {
+ fn schedule_clean(&self, custom_scope: CustomScopeIndex, cleanup: DropValue<'tcx>) {
debug!("schedule_clean_in_custom_scope(custom_scope={})",
custom_scope.index);
}
/// Returns true if there are pending cleanups that should execute on panic.
- fn needs_invoke(&self) -> bool {
+ pub fn needs_invoke(&self) -> bool {
self.scopes.borrow().iter().rev().any(|s| s.needs_invoke())
}
/// Returns a basic block to branch to in the event of a panic. This block
/// will run the panic cleanups and eventually resume the exception that
/// caused the landing pad to be run.
- fn get_landing_pad(&'blk self) -> BasicBlockRef {
+ pub fn get_landing_pad(&'blk self) -> BasicBlockRef {
let _icx = base::push_ctxt("get_landing_pad");
debug!("get_landing_pad");
return llbb;
}
-}
-
-impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
- /// Returns the id of the current top-most AST scope, if any.
- fn top_ast_scope(&self) -> Option<ast::NodeId> {
- for scope in self.scopes.borrow().iter().rev() {
- match scope.kind {
- CustomScopeKind | LoopScopeKind(..) => {}
- AstScopeKind(i) => {
- return Some(i);
- }
- }
- }
- None
- }
-
- fn top_nonempty_cleanup_scope(&self) -> Option<usize> {
- self.scopes.borrow().iter().rev().position(|s| !s.cleanups.is_empty())
- }
fn is_valid_to_pop_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool {
self.is_valid_custom_scope(custom_scope) &&
fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool {
let scopes = self.scopes.borrow();
- custom_scope.index < scopes.len() &&
- (*scopes)[custom_scope.index].kind.is_temp()
+ custom_scope.index < scopes.len()
}
/// Generates the cleanups for `scope` into `bcx`
fn trans_scope_cleanups(&self, // cannot borrow self, will recurse
bcx: Block<'blk, 'tcx>,
- scope: &CleanupScope<'blk, 'tcx>) -> Block<'blk, 'tcx> {
+ scope: &CleanupScope<'tcx>) -> Block<'blk, 'tcx> {
let mut bcx = bcx;
if !bcx.unreachable.get() {
self.scopes.borrow().len()
}
- fn push_scope(&self, scope: CleanupScope<'blk, 'tcx>) {
+ fn push_scope(&self, scope: CleanupScope<'tcx>) {
self.scopes.borrow_mut().push(scope)
}
- fn pop_scope(&self) -> CleanupScope<'blk, 'tcx> {
+ fn pop_scope(&self) -> CleanupScope<'tcx> {
debug!("popping cleanup scope {}, {} scopes remaining",
self.top_scope(|s| s.block_name("")),
self.scopes_len() - 1);
self.scopes.borrow_mut().pop().unwrap()
}
- fn top_scope<R, F>(&self, f: F) -> R where F: FnOnce(&CleanupScope<'blk, 'tcx>) -> R {
+ fn top_scope<R, F>(&self, f: F) -> R where F: FnOnce(&CleanupScope<'tcx>) -> R {
f(self.scopes.borrow().last().unwrap())
}
UnwindExit(val) => {
// Generate a block that will resume unwinding to the
// calling function
- let bcx = self.new_block("resume", None);
+ let bcx = self.new_block("resume");
match val {
UnwindKind::LandingPad => {
let addr = self.landingpad_alloca.get()
prev_llbb = bcx.llbb;
break;
}
-
- ReturnExit => {
- prev_llbb = self.get_llreturn();
- break
- }
-
- LoopExit(id, _) => {
- bug!("cannot exit from scope {}, not in scope", id);
- }
}
}
skip = last_cleanup;
break;
}
-
- // If we are searching for a loop exit,
- // and this scope is that loop, then stop popping and set
- // `prev_llbb` to the appropriate exit block from the loop.
- let scope = popped_scopes.last().unwrap();
- match label {
- UnwindExit(..) | ReturnExit => { }
- LoopExit(id, exit) => {
- if let Some(exit) = scope.kind.early_exit_block(id, exit) {
- prev_llbb = exit;
- break
- }
- }
- }
}
debug!("trans_cleanups_to_exit_scope: popped {} scopes",
let name = scope.block_name("clean");
debug!("generating cleanups for {}", name);
- let bcx_in = self.new_block(&name[..], None);
+ let bcx_in = self.new_block(&name[..]);
let exit_label = label.start(bcx_in);
let mut bcx_out = bcx_in;
let len = scope.cleanups.len();
Some(llbb) => return llbb,
None => {
let name = last_scope.block_name("unwind");
- pad_bcx = self.new_block(&name[..], None);
+ pad_bcx = self.new_block(&name[..]);
last_scope.cached_landing_pad = Some(pad_bcx.llbb);
}
}
}
}
-impl<'blk, 'tcx> CleanupScope<'blk, 'tcx> {
- fn new(kind: CleanupScopeKind<'blk, 'tcx>,
- debug_loc: DebugLoc)
- -> CleanupScope<'blk, 'tcx> {
+impl<'tcx> CleanupScope<'tcx> {
+ fn new(debug_loc: DebugLoc) -> CleanupScope<'tcx> {
CleanupScope {
- kind: kind,
debug_loc: debug_loc,
cleanups: vec!(),
cached_early_exits: vec!(),
}
}
- fn clear_cached_exits(&mut self) {
- self.cached_early_exits = vec!();
- self.cached_landing_pad = None;
- }
-
fn cached_early_exit(&self,
label: EarlyExitLabel)
-> Option<(BasicBlockRef, usize)> {
/// True if this scope has cleanups that need unwinding
fn needs_invoke(&self) -> bool {
-
self.cached_landing_pad.is_some() ||
- self.cleanups.iter().any(|c| c.must_unwind())
+ !self.cleanups.is_empty()
}
/// Returns a suitable name to use for the basic block that handles this cleanup scope
fn block_name(&self, prefix: &str) -> String {
- match self.kind {
- CustomScopeKind => format!("{}_custom_", prefix),
- AstScopeKind(id) => format!("{}_ast_{}_", prefix, id),
- LoopScopeKind(id, _) => format!("{}_loop_{}_", prefix, id),
- }
- }
-
- /// Manipulate cleanup scope for call arguments. Conceptually, each
- /// argument to a call is an lvalue, and performing the call moves each
- /// of the arguments into a new rvalue (which gets cleaned up by the
- /// callee). As an optimization, instead of actually performing all of
- /// those moves, trans just manipulates the cleanup scope to obtain the
- /// same effect.
- pub fn drop_non_lifetime_clean(&mut self) {
- self.cleanups.retain(|c| c.is_lifetime_end());
- self.clear_cached_exits();
- }
-}
-
-impl<'blk, 'tcx> CleanupScopeKind<'blk, 'tcx> {
- fn is_temp(&self) -> bool {
- match *self {
- CustomScopeKind => true,
- LoopScopeKind(..) | AstScopeKind(..) => false,
- }
- }
-
- fn is_ast_with_id(&self, id: ast::NodeId) -> bool {
- match *self {
- CustomScopeKind | LoopScopeKind(..) => false,
- AstScopeKind(i) => i == id
- }
- }
-
- fn is_loop_with_id(&self, id: ast::NodeId) -> bool {
- match *self {
- CustomScopeKind | AstScopeKind(..) => false,
- LoopScopeKind(i, _) => i == id
- }
- }
-
- /// If this is a loop scope with id `id`, return the early exit block `exit`, else `None`
- fn early_exit_block(&self,
- id: ast::NodeId,
- exit: usize) -> Option<BasicBlockRef> {
- match *self {
- LoopScopeKind(i, ref exits) if id == i => Some(exits[exit].llbb),
- _ => None,
- }
+ format!("{}_custom_", prefix)
}
}
bcx.lpad.set(Some(bcx.fcx.lpad_arena.alloc(LandingPad::gnu())));
*self
}
- label => label,
}
}
}
is_immediate: bool,
val: ValueRef,
ty: Ty<'tcx>,
- fill_on_drop: bool,
skip_dtor: bool,
- drop_hint: Option<DropHintValue>,
}
-impl<'tcx> Cleanup<'tcx> for DropValue<'tcx> {
- fn must_unwind(&self) -> bool {
- true
- }
-
- fn is_lifetime_end(&self) -> bool {
- false
- }
-
+impl<'tcx> DropValue<'tcx> {
fn trans<'blk>(&self,
bcx: Block<'blk, 'tcx>,
debug_loc: DebugLoc)
let bcx = if self.is_immediate {
glue::drop_ty_immediate(bcx, self.val, self.ty, debug_loc, self.skip_dtor)
} else {
- glue::drop_ty_core(bcx, self.val, self.ty, debug_loc, self.skip_dtor, self.drop_hint)
+ glue::drop_ty_core(bcx, self.val, self.ty, debug_loc, self.skip_dtor)
};
- if self.fill_on_drop {
- base::drop_done_fill_mem(bcx, self.val, self.ty);
- }
bcx
}
}
-
-#[derive(Copy, Clone, Debug)]
-pub enum Heap {
- HeapExchange
-}
-
-#[derive(Copy, Clone)]
-pub struct FreeValue<'tcx> {
- ptr: ValueRef,
- heap: Heap,
- content_ty: Ty<'tcx>
-}
-
-impl<'tcx> Cleanup<'tcx> for FreeValue<'tcx> {
- fn must_unwind(&self) -> bool {
- true
- }
-
- fn is_lifetime_end(&self) -> bool {
- false
- }
-
- fn trans<'blk>(&self,
- bcx: Block<'blk, 'tcx>,
- debug_loc: DebugLoc)
- -> Block<'blk, 'tcx> {
- match self.heap {
- HeapExchange => {
- glue::trans_exchange_free_ty(bcx,
- self.ptr,
- self.content_ty,
- debug_loc)
- }
- }
- }
-}
-
-#[derive(Copy, Clone)]
-pub struct LifetimeEnd {
- ptr: ValueRef,
-}
-
-impl<'tcx> Cleanup<'tcx> for LifetimeEnd {
- fn must_unwind(&self) -> bool {
- false
- }
-
- fn is_lifetime_end(&self) -> bool {
- true
- }
-
- fn trans<'blk>(&self,
- bcx: Block<'blk, 'tcx>,
- debug_loc: DebugLoc)
- -> Block<'blk, 'tcx> {
- debug_loc.apply(bcx.fcx);
- base::call_lifetime_end(bcx, self.ptr);
- bcx
- }
-}
-
-pub fn temporary_scope(tcx: TyCtxt,
- id: ast::NodeId)
- -> ScopeId {
- match tcx.region_maps.temporary_scope(id) {
- Some(scope) => {
- let r = AstScope(scope.node_id(&tcx.region_maps));
- debug!("temporary_scope({}) = {:?}", id, r);
- r
- }
- None => {
- bug!("no temporary scope available for expr {}", id)
- }
- }
-}
-
-pub fn var_scope(tcx: TyCtxt,
- id: ast::NodeId)
- -> ScopeId {
- let r = AstScope(tcx.region_maps.var_scope(id).node_id(&tcx.region_maps));
- debug!("var_scope({}) = {:?}", id, r);
- r
-}
-
-///////////////////////////////////////////////////////////////////////////
-// These traits just exist to put the methods into this file.
-
-pub trait CleanupMethods<'blk, 'tcx> {
- fn push_ast_cleanup_scope(&self, id: NodeIdAndSpan);
- fn push_loop_cleanup_scope(&self,
- id: ast::NodeId,
- exits: [Block<'blk, 'tcx>; EXIT_MAX]);
- fn push_custom_cleanup_scope(&self) -> CustomScopeIndex;
- fn push_custom_cleanup_scope_with_debug_loc(&self,
- debug_loc: NodeIdAndSpan)
- -> CustomScopeIndex;
- fn pop_and_trans_ast_cleanup_scope(&self,
- bcx: Block<'blk, 'tcx>,
- cleanup_scope: ast::NodeId)
- -> Block<'blk, 'tcx>;
- fn pop_loop_cleanup_scope(&self,
- cleanup_scope: ast::NodeId);
- fn pop_custom_cleanup_scope(&self,
- custom_scope: CustomScopeIndex);
- fn pop_and_trans_custom_cleanup_scope(&self,
- bcx: Block<'blk, 'tcx>,
- custom_scope: CustomScopeIndex)
- -> Block<'blk, 'tcx>;
- fn top_loop_scope(&self) -> ast::NodeId;
- fn normal_exit_block(&'blk self,
- cleanup_scope: ast::NodeId,
- exit: usize) -> BasicBlockRef;
- fn return_exit_block(&'blk self) -> BasicBlockRef;
- fn schedule_lifetime_end(&self,
- cleanup_scope: ScopeId,
- val: ValueRef);
- fn schedule_drop_mem(&self,
- cleanup_scope: ScopeId,
- val: ValueRef,
- ty: Ty<'tcx>,
- drop_hint: Option<DropHintDatum<'tcx>>);
- fn schedule_drop_and_fill_mem(&self,
- cleanup_scope: ScopeId,
- val: ValueRef,
- ty: Ty<'tcx>,
- drop_hint: Option<DropHintDatum<'tcx>>);
- fn schedule_drop_adt_contents(&self,
- cleanup_scope: ScopeId,
- val: ValueRef,
- ty: Ty<'tcx>);
- fn schedule_drop_immediate(&self,
- cleanup_scope: ScopeId,
- val: ValueRef,
- ty: Ty<'tcx>);
- fn schedule_free_value(&self,
- cleanup_scope: ScopeId,
- val: ValueRef,
- heap: Heap,
- content_ty: Ty<'tcx>);
- fn schedule_clean(&self,
- cleanup_scope: ScopeId,
- cleanup: CleanupObj<'tcx>);
- fn schedule_clean_in_ast_scope(&self,
- cleanup_scope: ast::NodeId,
- cleanup: CleanupObj<'tcx>);
- fn schedule_clean_in_custom_scope(&self,
- custom_scope: CustomScopeIndex,
- cleanup: CleanupObj<'tcx>);
- fn needs_invoke(&self) -> bool;
- fn get_landing_pad(&'blk self) -> BasicBlockRef;
-}
-
-trait CleanupHelperMethods<'blk, 'tcx> {
- fn top_ast_scope(&self) -> Option<ast::NodeId>;
- fn top_nonempty_cleanup_scope(&self) -> Option<usize>;
- fn is_valid_to_pop_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool;
- fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool;
- fn trans_scope_cleanups(&self,
- bcx: Block<'blk, 'tcx>,
- scope: &CleanupScope<'blk, 'tcx>) -> Block<'blk, 'tcx>;
- fn trans_cleanups_to_exit_scope(&'blk self,
- label: EarlyExitLabel)
- -> BasicBlockRef;
- fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef;
- fn scopes_len(&self) -> usize;
- fn push_scope(&self, scope: CleanupScope<'blk, 'tcx>);
- fn pop_scope(&self) -> CleanupScope<'blk, 'tcx>;
- fn top_scope<R, F>(&self, f: F) -> R where F: FnOnce(&CleanupScope<'blk, 'tcx>) -> R;
-}
use arena::TypedArena;
use back::symbol_names;
-use llvm::{self, ValueRef, get_param, get_params};
+use llvm::{self, ValueRef, get_params};
use rustc::hir::def_id::DefId;
use abi::{Abi, FnType};
-use adt;
use attributes;
use base::*;
-use build::*;
-use callee::{self, ArgVals, Callee};
-use cleanup::{CleanupMethods, CustomScope, ScopeId};
+use callee::{self, Callee};
use common::*;
-use datum::{ByRef, Datum, lvalue_scratch_datum};
-use datum::{rvalue_scratch_datum, Rvalue};
-use debuginfo::{self, DebugLoc};
+use debuginfo::{DebugLoc};
use declare;
-use expr;
use monomorphize::{Instance};
use value::Value;
-use Disr;
use rustc::ty::{self, Ty, TyCtxt};
-use session::config::FullDebugInfo;
-
-use syntax::ast;
use rustc::hir;
-use libc::c_uint;
-
-fn load_closure_environment<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- closure_def_id: DefId,
- arg_scope_id: ScopeId,
- id: ast::NodeId) {
- let _icx = push_ctxt("closure::load_closure_environment");
- let kind = kind_for_closure(bcx.ccx(), closure_def_id);
-
- let env_arg = &bcx.fcx.fn_ty.args[0];
- let mut env_idx = bcx.fcx.fn_ty.ret.is_indirect() as usize;
-
- // Special case for small by-value selfs.
- let llenv = if kind == ty::ClosureKind::FnOnce && !env_arg.is_indirect() {
- let closure_ty = node_id_type(bcx, id);
- let llenv = rvalue_scratch_datum(bcx, closure_ty, "closure_env").val;
- env_arg.store_fn_arg(&bcx.build(), &mut env_idx, llenv);
- llenv
- } else {
- get_param(bcx.fcx.llfn, env_idx as c_uint)
- };
-
- // Store the pointer to closure data in an alloca for debug info because that's what the
- // llvm.dbg.declare intrinsic expects
- let env_pointer_alloca = if bcx.sess().opts.debuginfo == FullDebugInfo {
- let alloc = alloca(bcx, val_ty(llenv), "__debuginfo_env_ptr");
- Store(bcx, llenv, alloc);
- Some(alloc)
- } else {
- None
- };
-
- bcx.tcx().with_freevars(id, |fv| {
- for (i, freevar) in fv.iter().enumerate() {
- let upvar_id = ty::UpvarId { var_id: freevar.def.var_id(),
- closure_expr_id: id };
- let upvar_capture = bcx.tcx().upvar_capture(upvar_id).unwrap();
- let mut upvar_ptr = StructGEP(bcx, llenv, i);
- let captured_by_ref = match upvar_capture {
- ty::UpvarCapture::ByValue => false,
- ty::UpvarCapture::ByRef(..) => {
- upvar_ptr = Load(bcx, upvar_ptr);
- true
- }
- };
- let node_id = freevar.def.var_id();
- bcx.fcx.llupvars.borrow_mut().insert(node_id, upvar_ptr);
-
- if kind == ty::ClosureKind::FnOnce && !captured_by_ref {
- let hint = bcx.fcx.lldropflag_hints.borrow().hint_datum(upvar_id.var_id);
- bcx.fcx.schedule_drop_mem(arg_scope_id,
- upvar_ptr,
- node_id_type(bcx, node_id),
- hint)
- }
-
- if let Some(env_pointer_alloca) = env_pointer_alloca {
- debuginfo::create_captured_var_metadata(
- bcx,
- node_id,
- env_pointer_alloca,
- i,
- captured_by_ref,
- freevar.span);
- }
- }
- })
-}
-
-pub enum ClosureEnv {
- NotClosure,
- Closure(DefId, ast::NodeId),
-}
-
-impl ClosureEnv {
- pub fn load<'blk,'tcx>(self, bcx: Block<'blk, 'tcx>, arg_scope: ScopeId) {
- if let ClosureEnv::Closure(def_id, id) = self {
- load_closure_environment(bcx, def_id, arg_scope, id);
- }
- }
-}
-
fn get_self_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
closure_id: DefId,
fn_ty: Ty<'tcx>)
llfn
}
-fn translating_closure_body_via_mir_will_fail(ccx: &CrateContext,
- closure_def_id: DefId)
- -> bool {
- let default_to_mir = ccx.sess().opts.debugging_opts.orbit;
- let invert = if default_to_mir { "rustc_no_mir" } else { "rustc_mir" };
- let use_mir = default_to_mir ^ ccx.tcx().has_attr(closure_def_id, invert);
-
- !use_mir
-}
-
pub fn trans_closure_body_via_mir<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
closure_def_id: DefId,
closure_substs: ty::ClosureSubsts<'tcx>) {
- use syntax::ast::DUMMY_NODE_ID;
- use syntax_pos::DUMMY_SP;
- use syntax::ptr::P;
-
- trans_closure_expr(Dest::Ignore(ccx),
- &hir::FnDecl {
- inputs: P::new(),
- output: hir::Return(P(hir::Ty {
- id: DUMMY_NODE_ID,
- span: DUMMY_SP,
- node: hir::Ty_::TyNever,
- })),
- variadic: false
- },
- &hir::Block {
- stmts: P::new(),
- expr: None,
- id: DUMMY_NODE_ID,
- rules: hir::DefaultBlock,
- span: DUMMY_SP
- },
- DUMMY_NODE_ID,
- closure_def_id,
- closure_substs);
-}
-
-pub enum Dest<'a, 'tcx: 'a> {
- SaveIn(Block<'a, 'tcx>, ValueRef),
- Ignore(&'a CrateContext<'a, 'tcx>)
-}
-
-pub fn trans_closure_expr<'a, 'tcx>(dest: Dest<'a, 'tcx>,
- decl: &hir::FnDecl,
- body: &hir::Block,
- id: ast::NodeId,
- closure_def_id: DefId, // (*)
- closure_substs: ty::ClosureSubsts<'tcx>)
- -> Option<Block<'a, 'tcx>>
-{
// (*) Note that in the case of inlined functions, the `closure_def_id` will be the
// defid of the closure in its original crate, whereas `id` will be the id of the local
// inlined copy.
- debug!("trans_closure_expr(id={:?}, closure_def_id={:?}, closure_substs={:?})",
- id, closure_def_id, closure_substs);
+ debug!("trans_closure_body_via_mir(closure_def_id={:?}, closure_substs={:?})",
+ closure_def_id, closure_substs);
- let ccx = match dest {
- Dest::SaveIn(bcx, _) => bcx.ccx(),
- Dest::Ignore(ccx) => ccx
- };
let tcx = ccx.tcx();
let _icx = push_ctxt("closure::trans_closure_expr");
};
trans_closure(ccx,
- decl,
- body,
llfn,
Instance::new(closure_def_id, param_substs),
- id,
&sig,
- Abi::RustCall,
- ClosureEnv::Closure(closure_def_id, id));
+ Abi::RustCall);
ccx.instances().borrow_mut().insert(instance, llfn);
}
-
- // Don't hoist this to the top of the function. It's perfectly legitimate
- // to have a zero-size closure (in which case dest will be `Ignore`) and
- // we must still generate the closure body.
- let (mut bcx, dest_addr) = match dest {
- Dest::SaveIn(bcx, p) => (bcx, p),
- Dest::Ignore(_) => {
- debug!("trans_closure_expr() ignoring result");
- return None;
- }
- };
-
- let repr = adt::represent_type(ccx, node_id_type(bcx, id));
-
- // Create the closure.
- tcx.with_freevars(id, |fv| {
- for (i, freevar) in fv.iter().enumerate() {
- let datum = expr::trans_var(bcx, freevar.def);
- let upvar_slot_dest = adt::trans_field_ptr(
- bcx, &repr, adt::MaybeSizedValue::sized(dest_addr), Disr(0), i);
- let upvar_id = ty::UpvarId { var_id: freevar.def.var_id(),
- closure_expr_id: id };
- match tcx.upvar_capture(upvar_id).unwrap() {
- ty::UpvarCapture::ByValue => {
- bcx = datum.store_to(bcx, upvar_slot_dest);
- }
- ty::UpvarCapture::ByRef(..) => {
- Store(bcx, datum.to_llref(), upvar_slot_dest);
- }
- }
- }
- });
- adt::trans_set_discr(bcx, &repr, dest_addr, Disr(0));
-
- Some(bcx)
}
pub fn trans_closure_method<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>,
if !ccx.sess().target.target.options.allows_weak_linkage &&
!ccx.sess().opts.single_codegen_unit() {
- if let Some(node_id) = ccx.tcx().map.as_local_node_id(closure_def_id) {
- // If the closure is defined in the local crate, we can always just
- // translate it.
- let (decl, body) = match ccx.tcx().map.expect_expr(node_id).node {
- hir::ExprClosure(_, ref decl, ref body, _) => (decl, body),
- _ => { unreachable!() }
- };
-
- trans_closure_expr(Dest::Ignore(ccx),
- decl,
- body,
- node_id,
- closure_def_id,
- substs);
- } else {
- // If the closure is defined in an upstream crate, we can only
- // translate it if MIR-trans is active.
-
- if translating_closure_body_via_mir_will_fail(ccx, closure_def_id) {
- ccx.sess().fatal("You have run into a known limitation of the \
- MingW toolchain. Either compile with -Zorbit or \
- with -Ccodegen-units=1 to work around it.");
- }
-
- trans_closure_body_via_mir(ccx, closure_def_id, substs);
- }
+ trans_closure_body_via_mir(ccx, closure_def_id, substs);
}
// If the closure is a Fn closure, but a FnOnce is needed (etc),
let (block_arena, fcx): (TypedArena<_>, FunctionContext);
block_arena = TypedArena::new();
fcx = FunctionContext::new(ccx, lloncefn, fn_ty, None, &block_arena);
- let mut bcx = fcx.init(false, None);
+ let mut bcx = fcx.init(false);
// the first argument (`self`) will be the (by value) closure env.
- let self_scope = fcx.push_custom_cleanup_scope();
- let self_scope_id = CustomScope(self_scope);
let mut llargs = get_params(fcx.llfn);
let mut self_idx = fcx.fn_ty.ret.is_indirect() as usize;
let env_arg = &fcx.fn_ty.args[0];
let llenv = if env_arg.is_indirect() {
- Datum::new(llargs[self_idx], closure_ty, Rvalue::new(ByRef))
- .add_clean(&fcx, self_scope_id)
+ llargs[self_idx]
} else {
- unpack_datum!(bcx, lvalue_scratch_datum(bcx, closure_ty, "self",
- InitAlloca::Dropped,
- self_scope_id, |bcx, llval| {
- let mut llarg_idx = self_idx;
- env_arg.store_fn_arg(&bcx.build(), &mut llarg_idx, llval);
- bcx.fcx.schedule_lifetime_end(self_scope_id, llval);
- bcx
- })).val
+ let scratch = alloc_ty(bcx, closure_ty, "self");
+ let mut llarg_idx = self_idx;
+ env_arg.store_fn_arg(&bcx.build(), &mut llarg_idx, scratch);
+ scratch
};
debug!("trans_fn_once_adapter_shim: env={:?}", Value(llenv));
llargs[self_idx] = llenv;
}
- let dest =
- fcx.llretslotptr.get().map(
- |_| expr::SaveIn(fcx.get_ret_slot(bcx, "ret_slot")));
+ let dest = fcx.llretslotptr.get();
let callee = Callee {
data: callee::Fn(llreffn),
ty: llref_fn_ty
};
- bcx = callee.call(bcx, DebugLoc::None, ArgVals(&llargs[self_idx..]), dest).bcx;
+
+ // Call the by-ref closure body with `self` in a cleanup scope,
+ // to drop `self` when the body returns, or in case it unwinds.
+ let self_scope = fcx.push_custom_cleanup_scope();
+ fcx.schedule_drop_mem(self_scope, llenv, closure_ty);
+
+ bcx = callee.call(bcx, DebugLoc::None, &llargs[self_idx..], dest).bcx;
fcx.pop_and_trans_custom_cleanup_scope(bcx, self_scope);
use rustc::mir::repr as mir;
use rustc::mir::visit as mir_visit;
use rustc::mir::visit::Visitor as MirVisitor;
+use rustc::mir::repr::Location;
use rustc_const_eval as const_eval;
impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> {
- fn visit_rvalue(&mut self, rvalue: &mir::Rvalue<'tcx>) {
+ fn visit_rvalue(&mut self, rvalue: &mir::Rvalue<'tcx>, location: Location) {
debug!("visiting rvalue {:?}", *rvalue);
match *rvalue {
_ => { /* not interesting */ }
}
- self.super_rvalue(rvalue);
+ self.super_rvalue(rvalue, location);
}
fn visit_lvalue(&mut self,
lvalue: &mir::Lvalue<'tcx>,
- context: mir_visit::LvalueContext) {
+ context: mir_visit::LvalueContext,
+ location: Location) {
debug!("visiting lvalue {:?}", *lvalue);
if let mir_visit::LvalueContext::Drop = context {
self.output.push(TransItem::DropGlue(DropGlueKind::Ty(ty)));
}
- self.super_lvalue(lvalue, context);
+ self.super_lvalue(lvalue, context, location);
}
- fn visit_operand(&mut self, operand: &mir::Operand<'tcx>) {
+ fn visit_operand(&mut self, operand: &mir::Operand<'tcx>, location: Location) {
debug!("visiting operand {:?}", *operand);
let callee = match *operand {
}
}
- self.super_operand(operand);
+ self.super_operand(operand, location);
fn can_result_in_trans_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId)
// we would not register drop-glues.
fn visit_terminator_kind(&mut self,
block: mir::BasicBlock,
- kind: &mir::TerminatorKind<'tcx>) {
+ kind: &mir::TerminatorKind<'tcx>,
+ location: Location) {
let tcx = self.scx.tcx();
match *kind {
mir::TerminatorKind::Call {
_ => { /* Nothing to do. */ }
}
- self.super_terminator_kind(block, kind);
+ self.super_terminator_kind(block, kind, location);
fn is_drop_in_place_intrinsic<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId,
.drop_trait()
.unwrap();
- let self_type_substs = Substs::new_trait(scx.tcx(), vec![], vec![], ty);
+ let self_type_substs = Substs::new_trait(scx.tcx(), ty, &[]);
let trait_ref = ty::TraitRef {
def_id: drop_trait_def_id,
// The substitutions we have are on the impl, so we grab
// the method type from the impl to substitute into.
let impl_substs = Substs::for_item(tcx, impl_def_id,
- |_, _| ty::ReErased,
+ |_, _| tcx.mk_region(ty::ReErased),
|_, _| tcx.types.err);
let mth = meth::get_impl_method(tcx,
callee_substs,
use llvm;
use llvm::{ValueRef, BasicBlockRef, BuilderRef, ContextRef, TypeKind};
use llvm::{True, False, Bool, OperandBundleDef};
-use rustc::cfg;
use rustc::hir::def::Def;
use rustc::hir::def_id::DefId;
use rustc::infer::TransNormalize;
use callee::Callee;
use cleanup;
use consts;
-use datum;
use debuginfo::{self, DebugLoc};
use declare;
use machine;
use rustc::traits::{self, SelectionContext, Reveal};
use rustc::ty::fold::TypeFoldable;
use rustc::hir;
-use util::nodemap::NodeMap;
use arena::TypedArena;
use libc::{c_uint, c_char};
/// Returns true if the type is represented as a pair of immediates.
pub fn type_is_imm_pair<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>)
-> bool {
- let tcx = ccx.tcx();
- let layout = tcx.normalizing_infer_ctxt(Reveal::All).enter(|infcx| {
- match ty.layout(&infcx) {
- Ok(layout) => layout,
- Err(err) => {
- bug!("type_is_imm_pair: layout for `{:?}` failed: {}",
- ty, err);
- }
- }
- });
-
- match *layout {
+ match *ccx.layout_of(ty) {
Layout::FatPointer { .. } => true,
Layout::Univariant { ref variant, .. } => {
// There must be only 2 fields.
use Disr;
-#[derive(Copy, Clone)]
-pub struct NodeIdAndSpan {
- pub id: ast::NodeId,
- pub span: Span,
-}
-
-pub fn expr_info(expr: &hir::Expr) -> NodeIdAndSpan {
- NodeIdAndSpan { id: expr.id, span: expr.span }
-}
-
/// The concrete version of ty::FieldDef. The name is the field index if
/// the field is numeric.
pub struct Field<'tcx>(pub ast::Name, pub Ty<'tcx>);
}
}
}
-
- /// Return the variant corresponding to a given node (e.g. expr)
- pub fn of_node(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>, id: ast::NodeId) -> Self {
- Self::from_ty(tcx, ty, Some(tcx.expect_def(id)))
- }
-
- pub fn field_index(&self, name: ast::Name) -> usize {
- self.fields.iter().position(|&Field(n,_)| n == name).unwrap_or_else(|| {
- bug!("unknown field `{}`", name)
- })
- }
}
pub struct BuilderRef_res {
}
pub fn validate_substs(substs: &Substs) {
- assert!(!substs.types.needs_infer());
-}
-
-// work around bizarre resolve errors
-type RvalueDatum<'tcx> = datum::Datum<'tcx, datum::Rvalue>;
-pub type LvalueDatum<'tcx> = datum::Datum<'tcx, datum::Lvalue>;
-
-#[derive(Clone, Debug)]
-struct HintEntry<'tcx> {
- // The datum for the dropflag-hint itself; note that many
- // source-level Lvalues will be associated with the same
- // dropflag-hint datum.
- datum: cleanup::DropHintDatum<'tcx>,
-}
-
-pub struct DropFlagHintsMap<'tcx> {
- // Maps NodeId for expressions that read/write unfragmented state
- // to that state's drop-flag "hint." (A stack-local hint
- // indicates either that (1.) it is certain that no-drop is
- // needed, or (2.) inline drop-flag must be consulted.)
- node_map: NodeMap<HintEntry<'tcx>>,
-}
-
-impl<'tcx> DropFlagHintsMap<'tcx> {
- pub fn new() -> DropFlagHintsMap<'tcx> { DropFlagHintsMap { node_map: NodeMap() } }
- pub fn has_hint(&self, id: ast::NodeId) -> bool { self.node_map.contains_key(&id) }
- pub fn insert(&mut self, id: ast::NodeId, datum: cleanup::DropHintDatum<'tcx>) {
- self.node_map.insert(id, HintEntry { datum: datum });
- }
- pub fn hint_datum(&self, id: ast::NodeId) -> Option<cleanup::DropHintDatum<'tcx>> {
- self.node_map.get(&id).map(|t|t.datum)
- }
+ assert!(!substs.needs_infer());
}
// Function context. Every LLVM function we create will have one of
// A marker for the place where we want to insert the function's static
// allocas, so that LLVM will coalesce them into a single alloca call.
pub alloca_insert_pt: Cell<Option<ValueRef>>,
- pub llreturn: Cell<Option<BasicBlockRef>>,
-
- // If the function has any nested return's, including something like:
- // fn foo() -> Option<Foo> { Some(Foo { x: return None }) }, then
- // we use a separate alloca for each return
- pub needs_ret_allocas: bool,
// When working with landingpad-based exceptions this value is alloca'd and
// later loaded when using the resume instruction. This ends up being
// Note that for cleanuppad-based exceptions this is not used.
pub landingpad_alloca: Cell<Option<ValueRef>>,
- // Maps the DefId's for local variables to the allocas created for
- // them in llallocas.
- pub lllocals: RefCell<NodeMap<LvalueDatum<'tcx>>>,
-
- // Same as above, but for closure upvars
- pub llupvars: RefCell<NodeMap<ValueRef>>,
-
- // Carries info about drop-flags for local bindings (longer term,
- // paths) for the code being compiled.
- pub lldropflag_hints: RefCell<DropFlagHintsMap<'tcx>>,
-
// Describes the return/argument LLVM types and their ABI handling.
pub fn_ty: FnType,
pub debug_context: debuginfo::FunctionDebugContext,
// Cleanup scopes.
- pub scopes: RefCell<Vec<cleanup::CleanupScope<'a, 'tcx>>>,
-
- pub cfg: Option<cfg::CFG>,
+ pub scopes: RefCell<Vec<cleanup::CleanupScope<'tcx>>>,
}
impl<'a, 'tcx> FunctionContext<'a, 'tcx> {
}
}
- pub fn get_llreturn(&self) -> BasicBlockRef {
- if self.llreturn.get().is_none() {
-
- self.llreturn.set(Some(unsafe {
- llvm::LLVMAppendBasicBlockInContext(self.ccx.llcx(), self.llfn,
- "return\0".as_ptr() as *const _)
- }))
- }
-
- self.llreturn.get().unwrap()
- }
-
- pub fn get_ret_slot(&self, bcx: Block<'a, 'tcx>, name: &str) -> ValueRef {
- if self.needs_ret_allocas {
- base::alloca(bcx, self.fn_ty.ret.memory_ty(self.ccx), name)
- } else {
- self.llretslotptr.get().unwrap()
- }
- }
-
pub fn new_block(&'a self,
- name: &str,
- opt_node_id: Option<ast::NodeId>)
+ name: &str)
-> Block<'a, 'tcx> {
unsafe {
let name = CString::new(name).unwrap();
let llbb = llvm::LLVMAppendBasicBlockInContext(self.ccx.llcx(),
self.llfn,
name.as_ptr());
- BlockS::new(llbb, opt_node_id, self)
+ BlockS::new(llbb, self)
}
}
- pub fn new_id_block(&'a self,
- name: &str,
- node_id: ast::NodeId)
- -> Block<'a, 'tcx> {
- self.new_block(name, Some(node_id))
- }
-
- pub fn new_temp_block(&'a self,
- name: &str)
- -> Block<'a, 'tcx> {
- self.new_block(name, None)
- }
-
- pub fn join_blocks(&'a self,
- id: ast::NodeId,
- in_cxs: &[Block<'a, 'tcx>])
- -> Block<'a, 'tcx> {
- let out = self.new_id_block("join", id);
- let mut reachable = false;
- for bcx in in_cxs {
- if !bcx.unreachable.get() {
- build::Br(*bcx, out.llbb, DebugLoc::None);
- reachable = true;
- }
- }
- if !reachable {
- build::Unreachable(out);
- }
- return out;
- }
-
pub fn monomorphize<T>(&self, value: &T) -> T
where T: TransNormalize<'tcx>
{
let tcx = ccx.tcx();
match tcx.lang_items.eh_personality() {
Some(def_id) if !base::wants_msvc_seh(ccx.sess()) => {
- Callee::def(ccx, def_id, Substs::empty(tcx)).reify(ccx).val
+ Callee::def(ccx, def_id, Substs::empty(tcx)).reify(ccx)
}
_ => {
if let Some(llpersonality) = ccx.eh_personality().get() {
let unwresume = ccx.eh_unwind_resume();
if let Some(llfn) = unwresume.get() {
- return Callee::ptr(datum::immediate_rvalue(llfn, ty));
+ return Callee::ptr(llfn, ty);
}
let llfn = declare::declare_fn(ccx, "rust_eh_unwind_resume", ty);
attributes::unwind(llfn, true);
unwresume.set(Some(llfn));
- Callee::ptr(datum::immediate_rvalue(llfn, ty))
+ Callee::ptr(llfn, ty)
}
}
// kind of landing pad its in, otherwise this is none.
pub lpad: Cell<Option<&'blk LandingPad>>,
- // AST node-id associated with this block, if any. Used for
- // debugging purposes only.
- pub opt_node_id: Option<ast::NodeId>,
-
// The function context for the function to which this block is
// attached.
pub fcx: &'blk FunctionContext<'blk, 'tcx>,
impl<'blk, 'tcx> BlockS<'blk, 'tcx> {
pub fn new(llbb: BasicBlockRef,
- opt_node_id: Option<ast::NodeId>,
fcx: &'blk FunctionContext<'blk, 'tcx>)
-> Block<'blk, 'tcx> {
fcx.block_arena.alloc(BlockS {
terminated: Cell::new(false),
unreachable: Cell::new(false),
lpad: Cell::new(None),
- opt_node_id: opt_node_id,
fcx: fcx
})
}
}
}
-pub fn C_floating(s: &str, t: Type) -> ValueRef {
- unsafe {
- let s = CString::new(s).unwrap();
- llvm::LLVMConstRealOfString(t.to_ref(), s.as_ptr())
- }
-}
-
pub fn C_floating_f64(f: f64, t: Type) -> ValueRef {
unsafe {
llvm::LLVMConstReal(t.to_ref(), f)
C_integral(Type::i64(ccx), i, false)
}
-pub fn C_int<I: AsI64>(ccx: &CrateContext, i: I) -> ValueRef {
- let v = i.as_i64();
-
- let bit_size = machine::llbitsize_of_real(ccx, ccx.int_type());
-
- if bit_size < 64 {
- // make sure it doesn't overflow
- assert!(v < (1<<(bit_size-1)) && v >= -(1<<(bit_size-1)));
- }
-
- C_integral(ccx.int_type(), v as u64, true)
-}
-
pub fn C_uint<I: AsU64>(ccx: &CrateContext, i: I) -> ValueRef {
let v = i.as_u64();
}
}
-pub fn monomorphize_type<'blk, 'tcx>(bcx: &BlockS<'blk, 'tcx>, t: Ty<'tcx>) -> Ty<'tcx> {
- bcx.fcx.monomorphize(&t)
-}
-
-pub fn node_id_type<'blk, 'tcx>(bcx: &BlockS<'blk, 'tcx>, id: ast::NodeId) -> Ty<'tcx> {
- let tcx = bcx.tcx();
- let t = tcx.node_id_to_type(id);
- monomorphize_type(bcx, t)
-}
-
-pub fn expr_ty<'blk, 'tcx>(bcx: &BlockS<'blk, 'tcx>, ex: &hir::Expr) -> Ty<'tcx> {
- node_id_type(bcx, ex.id)
-}
-
-pub fn expr_ty_adjusted<'blk, 'tcx>(bcx: &BlockS<'blk, 'tcx>, ex: &hir::Expr) -> Ty<'tcx> {
- monomorphize_type(bcx, bcx.tcx().expr_ty_adjusted(ex))
-}
-
/// Attempts to resolve an obligation. The result is a shallow vtable resolution -- meaning that we
/// do not (necessarily) resolve all nested obligations on the impl. Note that type check should
/// guarantee to us that all nested obligations *could be* resolved if we wanted to.
}
}
-/// Return the VariantDef corresponding to an inlined variant node
-pub fn inlined_variant_def<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
- inlined_vid: ast::NodeId)
- -> ty::VariantDef<'tcx>
-{
- let ctor_ty = ccx.tcx().node_id_to_type(inlined_vid);
- debug!("inlined_variant_def: ctor_ty={:?} inlined_vid={:?}", ctor_ty,
- inlined_vid);
- let adt_def = match ctor_ty.sty {
- ty::TyFnDef(_, _, &ty::BareFnTy { sig: ty::Binder(ty::FnSig {
- output, ..
- }), ..}) => output,
- _ => ctor_ty
- }.ty_adt_def().unwrap();
- let variant_def_id = if ccx.tcx().map.is_inlined_node_id(inlined_vid) {
- ccx.defid_for_inlined_node(inlined_vid).unwrap()
- } else {
- ccx.tcx().map.local_def_id(inlined_vid)
- };
-
- adt_def.variants
- .iter()
- .find(|v| variant_def_id == v.did)
- .unwrap_or_else(|| {
- bug!("no variant for {:?}::{}", adt_def, inlined_vid)
- })
-}
-
// To avoid UB from LLVM, these two functions mask RHS with an
// appropriate mask unconditionally (i.e. the fallback behavior for
// all shifts). For 32- and 64-bit types, this matches the semantics
use llvm;
use llvm::{SetUnnamedAddr};
-use llvm::{InternalLinkage, ValueRef, Bool, True};
-use middle::const_qualif::ConstQualif;
-use rustc_const_eval::{ConstEvalErr, lookup_const_fn_by_id, lookup_const_by_id, ErrKind};
-use rustc_const_eval::{eval_length, report_const_eval_err, note_const_eval_err};
-use rustc::hir::def::Def;
+use llvm::{InternalLinkage, ValueRef, True};
+use rustc_const_eval::ConstEvalErr;
use rustc::hir::def_id::DefId;
use rustc::hir::map as hir_map;
-use {abi, adt, closure, debuginfo, expr, machine};
+use {debuginfo, machine};
use base::{self, push_ctxt};
-use callee::Callee;
use trans_item::TransItem;
-use common::{type_is_sized, C_nil, const_get_elt};
-use common::{CrateContext, C_integral, C_floating, C_bool, C_str_slice, C_bytes, val_ty};
-use common::{C_struct, C_undef, const_to_opt_int, const_to_opt_uint, VariantInfo, C_uint};
-use common::{type_is_fat_ptr, Field, C_vector, C_array, C_null};
-use datum::{Datum, Lvalue};
+use common::{CrateContext, val_ty};
use declare;
-use monomorphize::{self, Instance};
+use monomorphize::{Instance};
use type_::Type;
use type_of;
-use value::Value;
-use Disr;
-use rustc::ty::subst::Substs;
-use rustc::ty::adjustment::{AdjustNeverToAny, AdjustDerefRef, AdjustReifyFnPointer};
-use rustc::ty::adjustment::{AdjustUnsafeFnPointer, AdjustMutToConstPointer};
-use rustc::ty::{self, Ty, TyCtxt};
-use rustc::ty::cast::{CastTy,IntTy};
-use util::nodemap::NodeMap;
-use rustc_const_math::{ConstInt, ConstUsize, ConstIsize};
+use rustc::ty;
use rustc::hir;
use std::ffi::{CStr, CString};
-use libc::c_uint;
-use syntax::ast::{self, LitKind};
+use syntax::ast;
use syntax::attr::{self, AttrMetaMethods};
use syntax::parse::token;
-use syntax::ptr::P;
-use syntax_pos::Span;
-
-pub type FnArgMap<'a> = Option<&'a NodeMap<ValueRef>>;
-
-pub fn const_lit(cx: &CrateContext, e: &hir::Expr, lit: &ast::Lit)
- -> ValueRef {
- let _icx = push_ctxt("trans_lit");
- debug!("const_lit: {:?}", lit);
- match lit.node {
- LitKind::Byte(b) => C_integral(Type::uint_from_ty(cx, ast::UintTy::U8), b as u64, false),
- LitKind::Char(i) => C_integral(Type::char(cx), i as u64, false),
- LitKind::Int(i, ast::LitIntType::Signed(t)) => {
- C_integral(Type::int_from_ty(cx, t), i, true)
- }
- LitKind::Int(u, ast::LitIntType::Unsigned(t)) => {
- C_integral(Type::uint_from_ty(cx, t), u, false)
- }
- LitKind::Int(i, ast::LitIntType::Unsuffixed) => {
- let lit_int_ty = cx.tcx().node_id_to_type(e.id);
- match lit_int_ty.sty {
- ty::TyInt(t) => {
- C_integral(Type::int_from_ty(cx, t), i as u64, true)
- }
- ty::TyUint(t) => {
- C_integral(Type::uint_from_ty(cx, t), i as u64, false)
- }
- _ => span_bug!(lit.span,
- "integer literal has type {:?} (expected int \
- or usize)",
- lit_int_ty)
- }
- }
- LitKind::Float(ref fs, t) => {
- C_floating(&fs, Type::float_from_ty(cx, t))
- }
- LitKind::FloatUnsuffixed(ref fs) => {
- let lit_float_ty = cx.tcx().node_id_to_type(e.id);
- match lit_float_ty.sty {
- ty::TyFloat(t) => {
- C_floating(&fs, Type::float_from_ty(cx, t))
- }
- _ => {
- span_bug!(lit.span,
- "floating point literal doesn't have the right type");
- }
- }
- }
- LitKind::Bool(b) => C_bool(cx, b),
- LitKind::Str(ref s, _) => C_str_slice(cx, (*s).clone()),
- LitKind::ByteStr(ref data) => {
- addr_of(cx, C_bytes(cx, &data[..]), 1, "byte_str")
- }
- }
-}
pub fn ptrcast(val: ValueRef, ty: Type) -> ValueRef {
unsafe {
gv
}
-/// Deref a constant pointer
-pub fn load_const(cx: &CrateContext, v: ValueRef, t: Ty) -> ValueRef {
- let v = match cx.const_unsized().borrow().get(&v) {
- Some(&v) => v,
- None => v
- };
- let d = unsafe { llvm::LLVMGetInitializer(v) };
- if !d.is_null() && t.is_bool() {
- unsafe { llvm::LLVMConstTrunc(d, Type::i1(cx).to_ref()) }
- } else {
- d
- }
-}
-
-fn const_deref<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
- v: ValueRef,
- ty: Ty<'tcx>)
- -> (ValueRef, Ty<'tcx>) {
- match ty.builtin_deref(true, ty::NoPreference) {
- Some(mt) => {
- if type_is_sized(cx.tcx(), mt.ty) {
- (load_const(cx, v, mt.ty), mt.ty)
- } else {
- // Derefing a fat pointer does not change the representation,
- // just the type to the unsized contents.
- (v, mt.ty)
- }
- }
- None => {
- bug!("unexpected dereferenceable type {:?}", ty)
- }
- }
-}
-
-fn const_fn_call<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
- def_id: DefId,
- substs: &'tcx Substs<'tcx>,
- arg_vals: &[ValueRef],
- param_substs: &'tcx Substs<'tcx>,
- trueconst: TrueConst) -> Result<ValueRef, ConstEvalFailure> {
- let fn_like = lookup_const_fn_by_id(ccx.tcx(), def_id);
- let fn_like = fn_like.expect("lookup_const_fn_by_id failed in const_fn_call");
-
- let body = match fn_like.body().expr {
- Some(ref expr) => expr,
- None => return Ok(C_nil(ccx))
- };
-
- let args = &fn_like.decl().inputs;
- assert_eq!(args.len(), arg_vals.len());
-
- let arg_ids = args.iter().map(|arg| arg.pat.id);
- let fn_args = arg_ids.zip(arg_vals.iter().cloned()).collect();
-
- let substs = ccx.tcx().erase_regions(&substs);
- let substs = monomorphize::apply_param_substs(ccx.tcx(),
- param_substs,
- &substs);
-
- const_expr(ccx, body, substs, Some(&fn_args), trueconst).map(|(res, _)| res)
-}
-
-pub fn get_const_expr<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
- def_id: DefId,
- ref_expr: &hir::Expr,
- param_substs: &'tcx Substs<'tcx>)
- -> &'tcx hir::Expr {
- let substs = ccx.tcx().node_id_item_substs(ref_expr.id).substs;
- let substs = ccx.tcx().erase_regions(&substs);
- let substs = monomorphize::apply_param_substs(ccx.tcx(),
- param_substs,
- &substs);
- match lookup_const_by_id(ccx.tcx(), def_id, Some(substs)) {
- Some((ref expr, _ty)) => expr,
- None => {
- span_bug!(ref_expr.span, "constant item not found")
- }
- }
-}
-
-pub enum ConstEvalFailure {
- /// in case the const evaluator failed on something that panic at runtime
- /// as defined in RFC 1229
- Runtime(ConstEvalErr),
- // in case we found a true constant
- Compiletime(ConstEvalErr),
-}
-
-impl ConstEvalFailure {
- fn into_inner(self) -> ConstEvalErr {
- match self {
- Runtime(e) => e,
- Compiletime(e) => e,
- }
- }
-
- pub fn as_inner(&self) -> &ConstEvalErr {
- match self {
- &Runtime(ref e) => e,
- &Compiletime(ref e) => e,
- }
- }
-}
-
-#[derive(Copy, Clone, Debug, Eq, PartialEq)]
-pub enum TrueConst {
- Yes, No
-}
-
-use self::ConstEvalFailure::*;
-
-fn get_const_val<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
- def_id: DefId,
- ref_expr: &hir::Expr,
- param_substs: &'tcx Substs<'tcx>)
- -> Result<ValueRef, ConstEvalFailure> {
- let expr = get_const_expr(ccx, def_id, ref_expr, param_substs);
- let empty_substs = Substs::empty(ccx.tcx());
- match get_const_expr_as_global(ccx, expr, ConstQualif::empty(), empty_substs, TrueConst::Yes) {
- Err(Runtime(err)) => {
- report_const_eval_err(ccx.tcx(), &err, expr.span, "expression").emit();
- Err(Compiletime(err))
- },
- other => other,
- }
-}
-
-pub fn get_const_expr_as_global<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
- expr: &hir::Expr,
- qualif: ConstQualif,
- param_substs: &'tcx Substs<'tcx>,
- trueconst: TrueConst)
- -> Result<ValueRef, ConstEvalFailure> {
- debug!("get_const_expr_as_global: {:?}", expr.id);
- // Special-case constants to cache a common global for all uses.
- if let hir::ExprPath(..) = expr.node {
- // `def` must be its own statement and cannot be in the `match`
- // otherwise the `def_map` will be borrowed for the entire match instead
- // of just to get the `def` value
- match ccx.tcx().expect_def(expr.id) {
- Def::Const(def_id) | Def::AssociatedConst(def_id) => {
- if !ccx.tcx().tables.borrow().adjustments.contains_key(&expr.id) {
- debug!("get_const_expr_as_global ({:?}): found const {:?}",
- expr.id, def_id);
- return get_const_val(ccx, def_id, expr, param_substs);
- }
- },
- _ => {},
- }
- }
-
- let key = (expr.id, param_substs);
- if let Some(&val) = ccx.const_values().borrow().get(&key) {
- return Ok(val);
- }
- let ty = monomorphize::apply_param_substs(ccx.tcx(), param_substs,
- &ccx.tcx().expr_ty(expr));
- let val = if qualif.intersects(ConstQualif::NON_STATIC_BORROWS) {
- // Avoid autorefs as they would create global instead of stack
- // references, even when only the latter are correct.
- const_expr_unadjusted(ccx, expr, ty, param_substs, None, trueconst)?
- } else {
- const_expr(ccx, expr, param_substs, None, trueconst)?.0
- };
-
- // boolean SSA values are i1, but they have to be stored in i8 slots,
- // otherwise some LLVM optimization passes don't work as expected
- let val = unsafe {
- if llvm::LLVMTypeOf(val) == Type::i1(ccx).to_ref() {
- llvm::LLVMConstZExt(val, Type::i8(ccx).to_ref())
- } else {
- val
- }
- };
-
- let lvalue = addr_of(ccx, val, type_of::align_of(ccx, ty), "const");
- ccx.const_values().borrow_mut().insert(key, lvalue);
- Ok(lvalue)
-}
-
-pub fn const_expr<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
- e: &hir::Expr,
- param_substs: &'tcx Substs<'tcx>,
- fn_args: FnArgMap,
- trueconst: TrueConst)
- -> Result<(ValueRef, Ty<'tcx>), ConstEvalFailure> {
- let ety = monomorphize::apply_param_substs(cx.tcx(), param_substs,
- &cx.tcx().expr_ty(e));
- let llconst = const_expr_unadjusted(cx, e, ety, param_substs, fn_args, trueconst)?;
- let mut llconst = llconst;
- let mut ety_adjusted = monomorphize::apply_param_substs(cx.tcx(), param_substs,
- &cx.tcx().expr_ty_adjusted(e));
- let opt_adj = cx.tcx().tables.borrow().adjustments.get(&e.id).cloned();
- match opt_adj {
- Some(AdjustNeverToAny(..)) => span_bug!(e.span, "const expression of type ! encountered"),
- Some(AdjustReifyFnPointer) => {
- match ety.sty {
- ty::TyFnDef(def_id, substs, _) => {
- llconst = Callee::def(cx, def_id, substs).reify(cx).val;
- }
- _ => {
- bug!("{} cannot be reified to a fn ptr", ety)
- }
- }
- }
- Some(AdjustUnsafeFnPointer) | Some(AdjustMutToConstPointer) => {
- // purely a type-level thing
- }
- Some(AdjustDerefRef(adj)) => {
- let mut ty = ety;
- // Save the last autoderef in case we can avoid it.
- if adj.autoderefs > 0 {
- for _ in 0..adj.autoderefs-1 {
- let (dv, dt) = const_deref(cx, llconst, ty);
- llconst = dv;
- ty = dt;
- }
- }
-
- if adj.autoref.is_some() {
- if adj.autoderefs == 0 {
- // Don't copy data to do a deref+ref
- // (i.e., skip the last auto-deref).
- llconst = addr_of(cx, llconst, type_of::align_of(cx, ty), "autoref");
- ty = cx.tcx().mk_imm_ref(cx.tcx().mk_region(ty::ReErased), ty);
- }
- } else if adj.autoderefs > 0 {
- let (dv, dt) = const_deref(cx, llconst, ty);
- llconst = dv;
-
- // If we derefed a fat pointer then we will have an
- // open type here. So we need to update the type with
- // the one returned from const_deref.
- ety_adjusted = dt;
- }
-
- if let Some(target) = adj.unsize {
- let target = monomorphize::apply_param_substs(cx.tcx(),
- param_substs,
- &target);
-
- let pointee_ty = ty.builtin_deref(true, ty::NoPreference)
- .expect("consts: unsizing got non-pointer type").ty;
- let (base, old_info) = if !type_is_sized(cx.tcx(), pointee_ty) {
- // Normally, the source is a thin pointer and we are
- // adding extra info to make a fat pointer. The exception
- // is when we are upcasting an existing object fat pointer
- // to use a different vtable. In that case, we want to
- // load out the original data pointer so we can repackage
- // it.
- (const_get_elt(llconst, &[abi::FAT_PTR_ADDR as u32]),
- Some(const_get_elt(llconst, &[abi::FAT_PTR_EXTRA as u32])))
- } else {
- (llconst, None)
- };
-
- let unsized_ty = target.builtin_deref(true, ty::NoPreference)
- .expect("consts: unsizing got non-pointer target type").ty;
- let ptr_ty = type_of::in_memory_type_of(cx, unsized_ty).ptr_to();
- let base = ptrcast(base, ptr_ty);
- let info = base::unsized_info(cx, pointee_ty, unsized_ty, old_info);
-
- if old_info.is_none() {
- let prev_const = cx.const_unsized().borrow_mut()
- .insert(base, llconst);
- assert!(prev_const.is_none() || prev_const == Some(llconst));
- }
- assert_eq!(abi::FAT_PTR_ADDR, 0);
- assert_eq!(abi::FAT_PTR_EXTRA, 1);
- llconst = C_struct(cx, &[base, info], false);
- }
- }
- None => {}
- };
-
- let llty = type_of::sizing_type_of(cx, ety_adjusted);
- let csize = machine::llsize_of_alloc(cx, val_ty(llconst));
- let tsize = machine::llsize_of_alloc(cx, llty);
- if csize != tsize {
- cx.sess().abort_if_errors();
- unsafe {
- // FIXME these values could use some context
- llvm::LLVMDumpValue(llconst);
- llvm::LLVMDumpValue(C_undef(llty));
- }
- bug!("const {:?} of type {:?} has size {} instead of {}",
- e, ety_adjusted,
- csize, tsize);
- }
- Ok((llconst, ety_adjusted))
-}
-
-fn check_unary_expr_validity(cx: &CrateContext, e: &hir::Expr, t: Ty,
- te: ValueRef, trueconst: TrueConst) -> Result<(), ConstEvalFailure> {
- // The only kind of unary expression that we check for validity
- // here is `-expr`, to check if it "overflows" (e.g. `-i32::MIN`).
- if let hir::ExprUnary(hir::UnNeg, ref inner_e) = e.node {
-
- // An unfortunate special case: we parse e.g. -128 as a
- // negation of the literal 128, which means if we're expecting
- // a i8 (or if it was already suffixed, e.g. `-128_i8`), then
- // 128 will have already overflowed to -128, and so then the
- // constant evaluator thinks we're trying to negate -128.
- //
- // Catch this up front by looking for ExprLit directly,
- // and just accepting it.
- if let hir::ExprLit(_) = inner_e.node { return Ok(()); }
- let cval = match to_const_int(te, t, cx.tcx()) {
- Some(v) => v,
- None => return Ok(()),
- };
- const_err(cx, e.span, (-cval).map_err(ErrKind::Math), trueconst)?;
- }
- Ok(())
-}
-
-pub fn to_const_int(value: ValueRef, t: Ty, tcx: TyCtxt) -> Option<ConstInt> {
- match t.sty {
- ty::TyInt(int_type) => const_to_opt_int(value).and_then(|input| match int_type {
- ast::IntTy::I8 => {
- assert_eq!(input as i8 as i64, input);
- Some(ConstInt::I8(input as i8))
- },
- ast::IntTy::I16 => {
- assert_eq!(input as i16 as i64, input);
- Some(ConstInt::I16(input as i16))
- },
- ast::IntTy::I32 => {
- assert_eq!(input as i32 as i64, input);
- Some(ConstInt::I32(input as i32))
- },
- ast::IntTy::I64 => {
- Some(ConstInt::I64(input))
- },
- ast::IntTy::Is => {
- ConstIsize::new(input, tcx.sess.target.int_type)
- .ok().map(ConstInt::Isize)
- },
- }),
- ty::TyUint(uint_type) => const_to_opt_uint(value).and_then(|input| match uint_type {
- ast::UintTy::U8 => {
- assert_eq!(input as u8 as u64, input);
- Some(ConstInt::U8(input as u8))
- },
- ast::UintTy::U16 => {
- assert_eq!(input as u16 as u64, input);
- Some(ConstInt::U16(input as u16))
- },
- ast::UintTy::U32 => {
- assert_eq!(input as u32 as u64, input);
- Some(ConstInt::U32(input as u32))
- },
- ast::UintTy::U64 => {
- Some(ConstInt::U64(input))
- },
- ast::UintTy::Us => {
- ConstUsize::new(input, tcx.sess.target.uint_type)
- .ok().map(ConstInt::Usize)
- },
- }),
- _ => None,
- }
-}
-
-pub fn const_err<T>(cx: &CrateContext,
- span: Span,
- result: Result<T, ErrKind>,
- trueconst: TrueConst)
- -> Result<T, ConstEvalFailure> {
- match (result, trueconst) {
- (Ok(x), _) => Ok(x),
- (Err(err), TrueConst::Yes) => {
- let err = ConstEvalErr{ span: span, kind: err };
- report_const_eval_err(cx.tcx(), &err, span, "expression").emit();
- Err(Compiletime(err))
- },
- (Err(err), TrueConst::No) => {
- let err = ConstEvalErr{ span: span, kind: err };
- let mut diag = cx.tcx().sess.struct_span_warn(
- span, "this expression will panic at run-time");
- note_const_eval_err(cx.tcx(), &err, span, "expression", &mut diag);
- diag.emit();
- Err(Runtime(err))
- },
- }
-}
-
-fn check_binary_expr_validity(cx: &CrateContext, e: &hir::Expr, t: Ty,
- te1: ValueRef, te2: ValueRef,
- trueconst: TrueConst) -> Result<(), ConstEvalFailure> {
- let b = if let hir::ExprBinary(b, _, _) = e.node { b } else { bug!() };
- let (lhs, rhs) = match (to_const_int(te1, t, cx.tcx()), to_const_int(te2, t, cx.tcx())) {
- (Some(v1), Some(v2)) => (v1, v2),
- _ => return Ok(()),
- };
- let result = match b.node {
- hir::BiAdd => lhs + rhs,
- hir::BiSub => lhs - rhs,
- hir::BiMul => lhs * rhs,
- hir::BiDiv => lhs / rhs,
- hir::BiRem => lhs % rhs,
- hir::BiShl => lhs << rhs,
- hir::BiShr => lhs >> rhs,
- _ => return Ok(()),
- };
- const_err(cx, e.span, result.map_err(ErrKind::Math), trueconst)?;
- Ok(())
-}
-
-fn const_expr_unadjusted<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
- e: &hir::Expr,
- ety: Ty<'tcx>,
- param_substs: &'tcx Substs<'tcx>,
- fn_args: FnArgMap,
- trueconst: TrueConst)
- -> Result<ValueRef, ConstEvalFailure>
-{
- debug!("const_expr_unadjusted(e={:?}, ety={:?}, param_substs={:?})",
- e,
- ety,
- param_substs);
-
- let map_list = |exprs: &[P<hir::Expr>]| -> Result<Vec<ValueRef>, ConstEvalFailure> {
- exprs.iter()
- .map(|e| const_expr(cx, &e, param_substs, fn_args, trueconst).map(|(l, _)| l))
- .collect::<Vec<Result<ValueRef, ConstEvalFailure>>>()
- .into_iter()
- .collect()
- // this dance is necessary to eagerly run const_expr so all errors are reported
- };
- let _icx = push_ctxt("const_expr");
- Ok(match e.node {
- hir::ExprLit(ref lit) => const_lit(cx, e, &lit),
- hir::ExprBinary(b, ref e1, ref e2) => {
- /* Neither type is bottom, and we expect them to be unified
- * already, so the following is safe. */
- let (te1, ty) = const_expr(cx, &e1, param_substs, fn_args, trueconst)?;
- debug!("const_expr_unadjusted: te1={:?}, ty={:?}",
- Value(te1), ty);
- assert!(!ty.is_simd());
- let is_float = ty.is_fp();
- let signed = ty.is_signed();
-
- let (te2, ty2) = const_expr(cx, &e2, param_substs, fn_args, trueconst)?;
- debug!("const_expr_unadjusted: te2={:?}, ty={:?}",
- Value(te2), ty2);
-
- check_binary_expr_validity(cx, e, ty, te1, te2, trueconst)?;
-
- unsafe { match b.node {
- hir::BiAdd if is_float => llvm::LLVMConstFAdd(te1, te2),
- hir::BiAdd => llvm::LLVMConstAdd(te1, te2),
-
- hir::BiSub if is_float => llvm::LLVMConstFSub(te1, te2),
- hir::BiSub => llvm::LLVMConstSub(te1, te2),
-
- hir::BiMul if is_float => llvm::LLVMConstFMul(te1, te2),
- hir::BiMul => llvm::LLVMConstMul(te1, te2),
-
- hir::BiDiv if is_float => llvm::LLVMConstFDiv(te1, te2),
- hir::BiDiv if signed => llvm::LLVMConstSDiv(te1, te2),
- hir::BiDiv => llvm::LLVMConstUDiv(te1, te2),
-
- hir::BiRem if is_float => llvm::LLVMConstFRem(te1, te2),
- hir::BiRem if signed => llvm::LLVMConstSRem(te1, te2),
- hir::BiRem => llvm::LLVMConstURem(te1, te2),
-
- hir::BiAnd => llvm::LLVMConstAnd(te1, te2),
- hir::BiOr => llvm::LLVMConstOr(te1, te2),
- hir::BiBitXor => llvm::LLVMConstXor(te1, te2),
- hir::BiBitAnd => llvm::LLVMConstAnd(te1, te2),
- hir::BiBitOr => llvm::LLVMConstOr(te1, te2),
- hir::BiShl => {
- let te2 = base::cast_shift_const_rhs(b.node, te1, te2);
- llvm::LLVMConstShl(te1, te2)
- },
- hir::BiShr => {
- let te2 = base::cast_shift_const_rhs(b.node, te1, te2);
- if signed { llvm::LLVMConstAShr(te1, te2) }
- else { llvm::LLVMConstLShr(te1, te2) }
- },
- hir::BiEq | hir::BiNe | hir::BiLt | hir::BiLe | hir::BiGt | hir::BiGe => {
- if is_float {
- let cmp = base::bin_op_to_fcmp_predicate(b.node);
- llvm::LLVMConstFCmp(cmp, te1, te2)
- } else {
- let cmp = base::bin_op_to_icmp_predicate(b.node, signed);
- llvm::LLVMConstICmp(cmp, te1, te2)
- }
- },
- } } // unsafe { match b.node {
- },
- hir::ExprUnary(u, ref inner_e) => {
- let (te, ty) = const_expr(cx, &inner_e, param_substs, fn_args, trueconst)?;
-
- check_unary_expr_validity(cx, e, ty, te, trueconst)?;
-
- let is_float = ty.is_fp();
- unsafe { match u {
- hir::UnDeref => const_deref(cx, te, ty).0,
- hir::UnNot => llvm::LLVMConstNot(te),
- hir::UnNeg if is_float => llvm::LLVMConstFNeg(te),
- hir::UnNeg => llvm::LLVMConstNeg(te),
- } }
- },
- hir::ExprField(ref base, field) => {
- let (bv, bt) = const_expr(cx, &base, param_substs, fn_args, trueconst)?;
- let brepr = adt::represent_type(cx, bt);
- let vinfo = VariantInfo::from_ty(cx.tcx(), bt, None);
- let ix = vinfo.field_index(field.node);
- adt::const_get_field(&brepr, bv, vinfo.discr, ix)
- },
- hir::ExprTupField(ref base, idx) => {
- let (bv, bt) = const_expr(cx, &base, param_substs, fn_args, trueconst)?;
- let brepr = adt::represent_type(cx, bt);
- let vinfo = VariantInfo::from_ty(cx.tcx(), bt, None);
- adt::const_get_field(&brepr, bv, vinfo.discr, idx.node)
- },
- hir::ExprIndex(ref base, ref index) => {
- let (bv, bt) = const_expr(cx, &base, param_substs, fn_args, trueconst)?;
- let iv = const_expr(cx, &index, param_substs, fn_args, TrueConst::Yes)?.0;
- let iv = if let Some(iv) = const_to_opt_uint(iv) {
- iv
- } else {
- span_bug!(index.span, "index is not an integer-constant expression");
- };
- let (arr, len) = match bt.sty {
- ty::TyArray(_, u) => (bv, C_uint(cx, u)),
- ty::TySlice(..) | ty::TyStr => {
- let e1 = const_get_elt(bv, &[0]);
- (load_const(cx, e1, bt), const_get_elt(bv, &[1]))
- },
- ty::TyRef(_, mt) => match mt.ty.sty {
- ty::TyArray(_, u) => {
- (load_const(cx, bv, mt.ty), C_uint(cx, u))
- },
- _ => span_bug!(base.span,
- "index-expr base must be a vector \
- or string type, found {:?}",
- bt),
- },
- _ => span_bug!(base.span,
- "index-expr base must be a vector \
- or string type, found {:?}",
- bt),
- };
-
- let len = unsafe { llvm::LLVMConstIntGetZExtValue(len) as u64 };
- let len = match bt.sty {
- ty::TyBox(ty) | ty::TyRef(_, ty::TypeAndMut{ty, ..}) => match ty.sty {
- ty::TyStr => {
- assert!(len > 0);
- len - 1
- },
- _ => len,
- },
- _ => len,
- };
- if iv >= len {
- // FIXME #3170: report this earlier on in the const-eval
- // pass. Reporting here is a bit late.
- const_err(cx, e.span, Err(ErrKind::IndexOutOfBounds {
- len: len,
- index: iv
- }), trueconst)?;
- C_undef(val_ty(arr).element_type())
- } else {
- const_get_elt(arr, &[iv as c_uint])
- }
- },
- hir::ExprCast(ref base, _) => {
- let t_cast = ety;
- let llty = type_of::type_of(cx, t_cast);
- let (v, t_expr) = const_expr(cx, &base, param_substs, fn_args, trueconst)?;
- debug!("trans_const_cast({:?} as {:?})", t_expr, t_cast);
- if expr::cast_is_noop(cx.tcx(), base, t_expr, t_cast) {
- return Ok(v);
- }
- if type_is_fat_ptr(cx.tcx(), t_expr) {
- // Fat pointer casts.
- let t_cast_inner =
- t_cast.builtin_deref(true, ty::NoPreference).expect("cast to non-pointer").ty;
- let ptr_ty = type_of::in_memory_type_of(cx, t_cast_inner).ptr_to();
- let addr = ptrcast(const_get_elt(v, &[abi::FAT_PTR_ADDR as u32]),
- ptr_ty);
- if type_is_fat_ptr(cx.tcx(), t_cast) {
- let info = const_get_elt(v, &[abi::FAT_PTR_EXTRA as u32]);
- return Ok(C_struct(cx, &[addr, info], false))
- } else {
- return Ok(addr);
- }
- }
- unsafe { match (
- CastTy::from_ty(t_expr).expect("bad input type for cast"),
- CastTy::from_ty(t_cast).expect("bad output type for cast"),
- ) {
- (CastTy::Int(IntTy::CEnum), CastTy::Int(_)) => {
- let repr = adt::represent_type(cx, t_expr);
- let discr = adt::const_get_discrim(&repr, v);
- let iv = C_integral(cx.int_type(), discr.0, false);
- let s = adt::is_discr_signed(&repr) as Bool;
- llvm::LLVMConstIntCast(iv, llty.to_ref(), s)
- },
- (CastTy::Int(_), CastTy::Int(_)) => {
- let s = t_expr.is_signed() as Bool;
- llvm::LLVMConstIntCast(v, llty.to_ref(), s)
- },
- (CastTy::Int(_), CastTy::Float) => {
- if t_expr.is_signed() {
- llvm::LLVMConstSIToFP(v, llty.to_ref())
- } else {
- llvm::LLVMConstUIToFP(v, llty.to_ref())
- }
- },
- (CastTy::Float, CastTy::Float) => llvm::LLVMConstFPCast(v, llty.to_ref()),
- (CastTy::Float, CastTy::Int(IntTy::I)) => llvm::LLVMConstFPToSI(v, llty.to_ref()),
- (CastTy::Float, CastTy::Int(_)) => llvm::LLVMConstFPToUI(v, llty.to_ref()),
- (CastTy::Ptr(_), CastTy::Ptr(_)) | (CastTy::FnPtr, CastTy::Ptr(_))
- | (CastTy::RPtr(_), CastTy::Ptr(_)) => {
- ptrcast(v, llty)
- },
- (CastTy::FnPtr, CastTy::FnPtr) => ptrcast(v, llty), // isn't this a coercion?
- (CastTy::Int(_), CastTy::Ptr(_)) => llvm::LLVMConstIntToPtr(v, llty.to_ref()),
- (CastTy::Ptr(_), CastTy::Int(_)) | (CastTy::FnPtr, CastTy::Int(_)) => {
- llvm::LLVMConstPtrToInt(v, llty.to_ref())
- },
- _ => {
- span_bug!(e.span, "bad combination of types for cast")
- },
- } } // unsafe { match ( ... ) {
- },
- hir::ExprAddrOf(hir::MutImmutable, ref sub) => {
- // If this is the address of some static, then we need to return
- // the actual address of the static itself (short circuit the rest
- // of const eval).
- let mut cur = sub;
- loop {
- match cur.node {
- hir::ExprBlock(ref blk) => {
- if let Some(ref sub) = blk.expr {
- cur = sub;
- } else {
- break;
- }
- },
- _ => break,
- }
- }
- if let Some(Def::Static(def_id, _)) = cx.tcx().expect_def_or_none(cur.id) {
- get_static(cx, def_id).val
- } else {
- // If this isn't the address of a static, then keep going through
- // normal constant evaluation.
- let (v, ty) = const_expr(cx, &sub, param_substs, fn_args, trueconst)?;
- addr_of(cx, v, type_of::align_of(cx, ty), "ref")
- }
- },
- hir::ExprAddrOf(hir::MutMutable, ref sub) => {
- let (v, ty) = const_expr(cx, &sub, param_substs, fn_args, trueconst)?;
- addr_of_mut(cx, v, type_of::align_of(cx, ty), "ref_mut_slice")
- },
- hir::ExprTup(ref es) => {
- let repr = adt::represent_type(cx, ety);
- let vals = map_list(&es[..])?;
- adt::trans_const(cx, &repr, Disr(0), &vals[..])
- },
- hir::ExprStruct(_, ref fs, ref base_opt) => {
- let repr = adt::represent_type(cx, ety);
-
- let base_val = match *base_opt {
- Some(ref base) => Some(const_expr(
- cx,
- &base,
- param_substs,
- fn_args,
- trueconst,
- )?),
- None => None
- };
-
- let VariantInfo { discr, fields } = VariantInfo::of_node(cx.tcx(), ety, e.id);
- let cs = fields.iter().enumerate().map(|(ix, &Field(f_name, _))| {
- match (fs.iter().find(|f| f_name == f.name.node), base_val) {
- (Some(ref f), _) => {
- const_expr(cx, &f.expr, param_substs, fn_args, trueconst).map(|(l, _)| l)
- },
- (_, Some((bv, _))) => Ok(adt::const_get_field(&repr, bv, discr, ix)),
- (_, None) => span_bug!(e.span, "missing struct field"),
- }
- })
- .collect::<Vec<Result<_, ConstEvalFailure>>>()
- .into_iter()
- .collect::<Result<Vec<_>,ConstEvalFailure>>();
- let cs = cs?;
- if ety.is_simd() {
- C_vector(&cs[..])
- } else {
- adt::trans_const(cx, &repr, discr, &cs[..])
- }
- },
- hir::ExprVec(ref es) => {
- let unit_ty = ety.sequence_element_type(cx.tcx());
- let llunitty = type_of::type_of(cx, unit_ty);
- let vs = es.iter()
- .map(|e| const_expr(
- cx,
- &e,
- param_substs,
- fn_args,
- trueconst,
- ).map(|(l, _)| l))
- .collect::<Vec<Result<_, ConstEvalFailure>>>()
- .into_iter()
- .collect::<Result<Vec<_>, ConstEvalFailure>>();
- let vs = vs?;
- // If the vector contains enums, an LLVM array won't work.
- if vs.iter().any(|vi| val_ty(*vi) != llunitty) {
- C_struct(cx, &vs[..], false)
- } else {
- C_array(llunitty, &vs[..])
- }
- },
- hir::ExprRepeat(ref elem, ref count) => {
- let unit_ty = ety.sequence_element_type(cx.tcx());
- let llunitty = type_of::type_of(cx, unit_ty);
- let n = eval_length(cx.tcx(), count, "repeat count").unwrap();
- let unit_val = const_expr(cx, &elem, param_substs, fn_args, trueconst)?.0;
- let vs = vec![unit_val; n];
- if val_ty(unit_val) != llunitty {
- C_struct(cx, &vs[..], false)
- } else {
- C_array(llunitty, &vs[..])
- }
- },
- hir::ExprPath(..) => {
- match cx.tcx().expect_def(e.id) {
- Def::Local(_, id) => {
- if let Some(val) = fn_args.and_then(|args| args.get(&id).cloned()) {
- val
- } else {
- span_bug!(e.span, "const fn argument not found")
- }
- }
- Def::Fn(..) | Def::Method(..) => C_nil(cx),
- Def::Const(def_id) | Def::AssociatedConst(def_id) => {
- load_const(cx, get_const_val(cx, def_id, e, param_substs)?,
- ety)
- }
- Def::Variant(enum_did, variant_did) => {
- let vinfo = cx.tcx().lookup_adt_def(enum_did).variant_with_id(variant_did);
- match vinfo.kind {
- ty::VariantKind::Unit => {
- let repr = adt::represent_type(cx, ety);
- adt::trans_const(cx, &repr, Disr::from(vinfo.disr_val), &[])
- }
- ty::VariantKind::Tuple => C_nil(cx),
- ty::VariantKind::Struct => {
- span_bug!(e.span, "path-expr refers to a dict variant!")
- }
- }
- }
- // Unit struct or ctor.
- Def::Struct(..) => C_null(type_of::type_of(cx, ety)),
- _ => {
- span_bug!(e.span, "expected a const, fn, struct, \
- or variant def")
- }
- }
- },
- hir::ExprCall(ref callee, ref args) => {
- let mut callee = &**callee;
- loop {
- callee = match callee.node {
- hir::ExprBlock(ref block) => match block.expr {
- Some(ref tail) => &tail,
- None => break,
- },
- _ => break,
- };
- }
- let arg_vals = map_list(args)?;
- match cx.tcx().expect_def(callee.id) {
- Def::Fn(did) | Def::Method(did) => {
- const_fn_call(
- cx,
- did,
- cx.tcx().node_id_item_substs(callee.id).substs,
- &arg_vals,
- param_substs,
- trueconst,
- )?
- }
- Def::Struct(..) => {
- if ety.is_simd() {
- C_vector(&arg_vals[..])
- } else {
- let repr = adt::represent_type(cx, ety);
- adt::trans_const(cx, &repr, Disr(0), &arg_vals[..])
- }
- }
- Def::Variant(enum_did, variant_did) => {
- let repr = adt::represent_type(cx, ety);
- let vinfo = cx.tcx().lookup_adt_def(enum_did).variant_with_id(variant_did);
- adt::trans_const(cx,
- &repr,
- Disr::from(vinfo.disr_val),
- &arg_vals[..])
- }
- _ => span_bug!(e.span, "expected a struct, variant, or const fn def"),
- }
- },
- hir::ExprMethodCall(_, _, ref args) => {
- let arg_vals = map_list(args)?;
- let method_call = ty::MethodCall::expr(e.id);
- let method = cx.tcx().tables.borrow().method_map[&method_call];
- const_fn_call(cx, method.def_id, method.substs,
- &arg_vals, param_substs, trueconst)?
- },
- hir::ExprType(ref e, _) => const_expr(cx, &e, param_substs, fn_args, trueconst)?.0,
- hir::ExprBlock(ref block) => {
- match block.expr {
- Some(ref expr) => const_expr(
- cx,
- &expr,
- param_substs,
- fn_args,
- trueconst,
- )?.0,
- None => C_nil(cx),
- }
- },
- hir::ExprClosure(_, ref decl, ref body, _) => {
- match ety.sty {
- ty::TyClosure(def_id, substs) => {
- closure::trans_closure_expr(closure::Dest::Ignore(cx),
- decl,
- body,
- e.id,
- def_id,
- substs);
- }
- _ =>
- span_bug!(
- e.span,
- "bad type for closure expr: {:?}", ety)
- }
- C_null(type_of::type_of(cx, ety))
- },
- _ => span_bug!(e.span,
- "bad constant expression type in consts::const_expr"),
- })
-}
-
-pub fn get_static<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, def_id: DefId)
- -> Datum<'tcx, Lvalue> {
- let ty = ccx.tcx().lookup_item_type(def_id).ty;
-
+pub fn get_static(ccx: &CrateContext, def_id: DefId) -> ValueRef {
let instance = Instance::mono(ccx.shared(), def_id);
if let Some(&g) = ccx.instances().borrow().get(&instance) {
- return Datum::new(g, ty, Lvalue::new("static"));
+ return g;
}
+ let ty = ccx.tcx().lookup_item_type(def_id).ty;
let g = if let Some(id) = ccx.tcx().map.as_local_node_id(def_id) {
let llty = type_of::type_of(ccx, ty);
let defined_in_current_codegen_unit = ccx.codegen_unit()
.items()
.contains_key(&TransItem::Static(id));
- if defined_in_current_codegen_unit {
- if declare::get_declared_value(ccx, sym).is_none() {
- span_bug!(span, "trans: Static not properly pre-defined?");
- }
- } else {
- if declare::get_declared_value(ccx, sym).is_some() {
- span_bug!(span, "trans: Conflicting symbol names for static?");
- }
+ assert!(!defined_in_current_codegen_unit);
+
+ if declare::get_declared_value(ccx, sym).is_some() {
+ span_bug!(span, "trans: Conflicting symbol names for static?");
}
let g = declare::define_global(ccx, sym, llty).unwrap();
ccx.instances().borrow_mut().insert(instance, g);
ccx.statics().borrow_mut().insert(g, def_id);
- Datum::new(g, ty, Lvalue::new("static"))
+ g
}
pub fn trans_static(ccx: &CrateContext,
m: hir::Mutability,
- expr: &hir::Expr,
id: ast::NodeId,
attrs: &[ast::Attribute])
-> Result<ValueRef, ConstEvalErr> {
unsafe {
let _icx = push_ctxt("trans_static");
let def_id = ccx.tcx().map.local_def_id(id);
- let datum = get_static(ccx, def_id);
+ let g = get_static(ccx, def_id);
- let check_attrs = |attrs: &[ast::Attribute]| {
- let default_to_mir = ccx.sess().opts.debugging_opts.orbit;
- let invert = if default_to_mir { "rustc_no_mir" } else { "rustc_mir" };
- default_to_mir ^ attrs.iter().any(|item| item.check_name(invert))
- };
- let use_mir = check_attrs(ccx.tcx().map.attrs(id));
-
- let v = if use_mir {
- ::mir::trans_static_initializer(ccx, def_id)
- } else {
- let empty_substs = Substs::empty(ccx.tcx());
- const_expr(ccx, expr, empty_substs, None, TrueConst::Yes)
- .map(|(v, _)| v)
- }.map_err(|e| e.into_inner())?;
+ let v = ::mir::trans_static_initializer(ccx, def_id)?;
// boolean SSA values are i1, but they have to be stored in i8 slots,
// otherwise some LLVM optimization passes don't work as expected
v
};
- let llty = type_of::type_of(ccx, datum.ty);
+ let ty = ccx.tcx().lookup_item_type(def_id).ty;
+ let llty = type_of::type_of(ccx, ty);
let g = if val_llty == llty {
- datum.val
+ g
} else {
// If we created the global with the wrong type,
// correct the type.
let empty_string = CString::new("").unwrap();
- let name_str_ref = CStr::from_ptr(llvm::LLVMGetValueName(datum.val));
+ let name_str_ref = CStr::from_ptr(llvm::LLVMGetValueName(g));
let name_string = CString::new(name_str_ref.to_bytes()).unwrap();
- llvm::LLVMSetValueName(datum.val, empty_string.as_ptr());
+ llvm::LLVMSetValueName(g, empty_string.as_ptr());
let new_g = llvm::LLVMRustGetOrInsertGlobal(
ccx.llmod(), name_string.as_ptr(), val_llty.to_ref());
// To avoid breaking any invariants, we leave around the old
// global for the moment; we'll replace all references to it
// with the new global later. (See base::trans_crate.)
- ccx.statics_to_rauw().borrow_mut().push((datum.val, new_g));
+ ccx.statics_to_rauw().borrow_mut().push((g, new_g));
new_g
};
- llvm::LLVMSetAlignment(g, type_of::align_of(ccx, datum.ty));
+ llvm::LLVMSetAlignment(g, type_of::align_of(ccx, ty));
llvm::LLVMSetInitializer(g, v);
// As an optimization, all shared statics which do not have interior
// mutability are placed into read-only memory.
if m != hir::MutMutable {
- let tcontents = datum.ty.type_contents(ccx.tcx());
+ let tcontents = ty.type_contents(ccx.tcx());
if !tcontents.interior_unsafe() {
llvm::LLVMSetGlobalConstant(g, llvm::True);
}
pub n_glues_created: Cell<usize>,
pub n_null_glues: Cell<usize>,
pub n_real_glues: Cell<usize>,
- pub n_fallback_instantiations: Cell<usize>,
pub n_fns: Cell<usize>,
- pub n_monos: Cell<usize>,
pub n_inlines: Cell<usize>,
pub n_closures: Cell<usize>,
pub n_llvm_insns: Cell<usize>,
tcx: TyCtxt<'a, 'tcx, 'tcx>,
stats: Stats,
check_overflow: bool,
- check_drop_flag_for_sanity: bool,
mir_map: &'a MirMap<'tcx>,
mir_cache: RefCell<DepTrackingMap<MirCache<'tcx>>>,
drop_glues: RefCell<FnvHashMap<DropGlueKind<'tcx>, (ValueRef, FnType)>>,
/// Cache instances of monomorphic and polymorphic items
instances: RefCell<FnvHashMap<Instance<'tcx>, ValueRef>>,
- monomorphizing: RefCell<DefIdMap<usize>>,
/// Cache generated vtables
vtables: RefCell<FnvHashMap<ty::PolyTraitRef<'tcx>, ValueRef>>,
/// Cache of constant strings,
symbol_hasher: Sha256,
link_meta: LinkMeta,
reachable: NodeSet,
- check_overflow: bool,
- check_drop_flag_for_sanity: bool)
+ check_overflow: bool)
-> SharedCrateContext<'b, 'tcx> {
let (metadata_llcx, metadata_llmod) = unsafe {
create_context_and_module(&tcx.sess, "metadata")
n_glues_created: Cell::new(0),
n_null_glues: Cell::new(0),
n_real_glues: Cell::new(0),
- n_fallback_instantiations: Cell::new(0),
n_fns: Cell::new(0),
- n_monos: Cell::new(0),
n_inlines: Cell::new(0),
n_closures: Cell::new(0),
n_llvm_insns: Cell::new(0),
fn_stats: RefCell::new(Vec::new()),
},
check_overflow: check_overflow,
- check_drop_flag_for_sanity: check_drop_flag_for_sanity,
use_dll_storage_attrs: use_dll_storage_attrs,
translation_items: RefCell::new(FnvHashSet()),
trait_cache: RefCell::new(DepTrackingMap::new(tcx.dep_graph.clone())),
/// Given the def-id of some item that has no type parameters, make
/// a suitable "empty substs" for it.
pub fn empty_substs_for_def_id(&self, item_def_id: DefId) -> &'tcx Substs<'tcx> {
- Substs::for_item(self.tcx(), item_def_id, |_, _| ty::ReErased, |_, _| {
+ Substs::for_item(self.tcx(), item_def_id,
+ |_, _| self.tcx().mk_region(ty::ReErased),
+ |_, _| {
bug!("empty_substs_for_def_id: {:?} has type parameters", item_def_id)
})
}
fn_pointer_shims: RefCell::new(FnvHashMap()),
drop_glues: RefCell::new(FnvHashMap()),
instances: RefCell::new(FnvHashMap()),
- monomorphizing: RefCell::new(DefIdMap()),
vtables: RefCell::new(FnvHashMap()),
const_cstr_cache: RefCell::new(FnvHashMap()),
const_unsized: RefCell::new(FnvHashMap()),
&self.local().instances
}
- pub fn monomorphizing<'a>(&'a self) -> &'a RefCell<DefIdMap<usize>> {
- &self.local().monomorphizing
- }
-
pub fn vtables<'a>(&'a self) -> &'a RefCell<FnvHashMap<ty::PolyTraitRef<'tcx>, ValueRef>> {
&self.local().vtables
}
TypeOfDepthLock(self.local())
}
- pub fn check_overflow(&self) -> bool {
- self.shared.check_overflow
+ pub fn layout_of(&self, ty: Ty<'tcx>) -> &'tcx ty::layout::Layout {
+ self.tcx().normalizing_infer_ctxt(traits::Reveal::All).enter(|infcx| {
+ ty.layout(&infcx).unwrap_or_else(|e| {
+ bug!("failed to get layout for `{}`: {}", ty, e);
+ })
+ })
}
- pub fn check_drop_flag_for_sanity(&self) -> bool {
- // This controls whether we emit a conditional llvm.debugtrap
- // guarded on whether the dropflag is one of its (two) valid
- // values.
- self.shared.check_drop_flag_for_sanity
+ pub fn check_overflow(&self) -> bool {
+ self.shared.check_overflow
}
pub fn use_dll_storage_attrs(&self) -> bool {
+++ /dev/null
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use llvm::ValueRef;
-use rustc::hir::def::Def;
-use middle::lang_items::{PanicFnLangItem, PanicBoundsCheckFnLangItem};
-use rustc::ty::subst::Substs;
-use base::*;
-use basic_block::BasicBlock;
-use build::*;
-use callee::{Callee, ArgVals};
-use cleanup::CleanupMethods;
-use cleanup;
-use common::*;
-use consts;
-use debuginfo;
-use debuginfo::{DebugLoc, ToDebugLoc};
-use expr;
-use machine;
-
-use rustc::hir;
-
-use syntax::ast;
-use syntax::parse::token::InternedString;
-use syntax::parse::token;
-
-pub fn trans_stmt<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
- s: &hir::Stmt)
- -> Block<'blk, 'tcx> {
- let _icx = push_ctxt("trans_stmt");
- let fcx = cx.fcx;
- debug!("trans_stmt({:?})", s);
-
- if cx.unreachable.get() {
- return cx;
- }
-
- if cx.sess().asm_comments() {
- add_span_comment(cx, s.span, &format!("{:?}", s));
- }
-
- let mut bcx = cx;
-
- let id = s.node.id();
- let cleanup_debug_loc =
- debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(), id, s.span, false);
- fcx.push_ast_cleanup_scope(cleanup_debug_loc);
-
- match s.node {
- hir::StmtExpr(ref e, _) | hir::StmtSemi(ref e, _) => {
- bcx = trans_stmt_semi(bcx, &e);
- }
- hir::StmtDecl(ref d, _) => {
- match d.node {
- hir::DeclLocal(ref local) => {
- bcx = init_local(bcx, &local);
- debuginfo::create_local_var_metadata(bcx, &local);
- }
- // Inner items are visited by `trans_item`/`trans_meth`.
- hir::DeclItem(_) => {},
- }
- }
- }
-
- bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, s.node.id());
-
- return bcx;
-}
-
-pub fn trans_stmt_semi<'blk, 'tcx>(cx: Block<'blk, 'tcx>, e: &hir::Expr)
- -> Block<'blk, 'tcx> {
- let _icx = push_ctxt("trans_stmt_semi");
-
- if cx.unreachable.get() {
- return cx;
- }
-
- let ty = expr_ty(cx, e);
- if cx.fcx.type_needs_drop(ty) {
- expr::trans_to_lvalue(cx, e, "stmt").bcx
- } else {
- expr::trans_into(cx, e, expr::Ignore)
- }
-}
-
-pub fn trans_block<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- b: &hir::Block,
- mut dest: expr::Dest)
- -> Block<'blk, 'tcx> {
- let _icx = push_ctxt("trans_block");
-
- if bcx.unreachable.get() {
- return bcx;
- }
-
- let fcx = bcx.fcx;
- let mut bcx = bcx;
-
- let cleanup_debug_loc =
- debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(), b.id, b.span, true);
- fcx.push_ast_cleanup_scope(cleanup_debug_loc);
-
- for s in &b.stmts {
- bcx = trans_stmt(bcx, s);
- }
-
- if dest != expr::Ignore {
- let block_ty = node_id_type(bcx, b.id);
-
- if b.expr.is_none() || type_is_zero_size(bcx.ccx(), block_ty) {
- dest = expr::Ignore;
- } else if b.expr.is_some() {
- // If the block has an expression, but that expression isn't reachable,
- // don't save into the destination given, ignore it.
- if let Some(ref cfg) = bcx.fcx.cfg {
- if !cfg.node_is_reachable(b.expr.as_ref().unwrap().id) {
- dest = expr::Ignore;
- }
- }
- }
- }
-
- match b.expr {
- Some(ref e) => {
- if !bcx.unreachable.get() {
- bcx = expr::trans_into(bcx, &e, dest);
- }
- }
- None => {
- assert!(dest == expr::Ignore || bcx.unreachable.get());
- }
- }
-
- bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, b.id);
-
- return bcx;
-}
-
-pub fn trans_if<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- if_id: ast::NodeId,
- cond: &hir::Expr,
- thn: &hir::Block,
- els: Option<&hir::Expr>,
- dest: expr::Dest)
- -> Block<'blk, 'tcx> {
- debug!("trans_if(bcx={}, if_id={}, cond={:?}, thn={}, dest={:?})",
- bcx.to_str(), if_id, cond, thn.id, dest);
- let _icx = push_ctxt("trans_if");
-
- if bcx.unreachable.get() {
- return bcx;
- }
-
- let mut bcx = bcx;
-
- let cond_val = unpack_result!(bcx, expr::trans(bcx, cond).to_llbool());
-
- // Drop branches that are known to be impossible
- if let Some(cv) = const_to_opt_uint(cond_val) {
- if cv == 1 {
- // if true { .. } [else { .. }]
- bcx = trans_block(bcx, &thn, dest);
- DebugLoc::None.apply(bcx.fcx);
- } else {
- if let Some(elexpr) = els {
- bcx = expr::trans_into(bcx, &elexpr, dest);
- DebugLoc::None.apply(bcx.fcx);
- }
- }
-
- return bcx;
- }
-
- let name = format!("then-block-{}-", thn.id);
- let then_bcx_in = bcx.fcx.new_id_block(&name[..], thn.id);
- let then_bcx_out = trans_block(then_bcx_in, &thn, dest);
- DebugLoc::None.apply(bcx.fcx);
-
- let cond_source_loc = cond.debug_loc();
-
- let next_bcx;
- match els {
- Some(elexpr) => {
- let else_bcx_in = bcx.fcx.new_id_block("else-block", elexpr.id);
- let else_bcx_out = expr::trans_into(else_bcx_in, &elexpr, dest);
- next_bcx = bcx.fcx.join_blocks(if_id,
- &[then_bcx_out, else_bcx_out]);
- CondBr(bcx, cond_val, then_bcx_in.llbb, else_bcx_in.llbb, cond_source_loc);
- }
-
- None => {
- next_bcx = bcx.fcx.new_id_block("next-block", if_id);
- Br(then_bcx_out, next_bcx.llbb, DebugLoc::None);
- CondBr(bcx, cond_val, then_bcx_in.llbb, next_bcx.llbb, cond_source_loc);
- }
- }
-
- // Clear the source location because it is still set to whatever has been translated
- // right before.
- DebugLoc::None.apply(next_bcx.fcx);
-
- next_bcx
-}
-
-pub fn trans_while<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- loop_expr: &hir::Expr,
- cond: &hir::Expr,
- body: &hir::Block)
- -> Block<'blk, 'tcx> {
- let _icx = push_ctxt("trans_while");
-
- if bcx.unreachable.get() {
- return bcx;
- }
-
- let fcx = bcx.fcx;
-
- // bcx
- // |
- // cond_bcx_in <--------+
- // | |
- // cond_bcx_out |
- // | | |
- // | body_bcx_in |
- // cleanup_blk | |
- // | body_bcx_out --+
- // next_bcx_in
-
- let next_bcx_in = fcx.new_id_block("while_exit", loop_expr.id);
- let cond_bcx_in = fcx.new_id_block("while_cond", cond.id);
- let body_bcx_in = fcx.new_id_block("while_body", body.id);
-
- fcx.push_loop_cleanup_scope(loop_expr.id, [next_bcx_in, cond_bcx_in]);
-
- Br(bcx, cond_bcx_in.llbb, loop_expr.debug_loc());
-
- // compile the block where we will handle loop cleanups
- let cleanup_llbb = fcx.normal_exit_block(loop_expr.id, cleanup::EXIT_BREAK);
-
- // compile the condition
- let Result {bcx: cond_bcx_out, val: cond_val} =
- expr::trans(cond_bcx_in, cond).to_llbool();
-
- CondBr(cond_bcx_out, cond_val, body_bcx_in.llbb, cleanup_llbb, cond.debug_loc());
-
- // loop body:
- let body_bcx_out = trans_block(body_bcx_in, body, expr::Ignore);
- Br(body_bcx_out, cond_bcx_in.llbb, DebugLoc::None);
-
- fcx.pop_loop_cleanup_scope(loop_expr.id);
- return next_bcx_in;
-}
-
-pub fn trans_loop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- loop_expr: &hir::Expr,
- body: &hir::Block)
- -> Block<'blk, 'tcx> {
- let _icx = push_ctxt("trans_loop");
-
- if bcx.unreachable.get() {
- return bcx;
- }
-
- let fcx = bcx.fcx;
-
- // bcx
- // |
- // body_bcx_in
- // |
- // body_bcx_out
- //
- // next_bcx
- //
- // Links between body_bcx_in and next_bcx are created by
- // break statements.
-
- let next_bcx_in = bcx.fcx.new_id_block("loop_exit", loop_expr.id);
- let body_bcx_in = bcx.fcx.new_id_block("loop_body", body.id);
-
- fcx.push_loop_cleanup_scope(loop_expr.id, [next_bcx_in, body_bcx_in]);
-
- Br(bcx, body_bcx_in.llbb, loop_expr.debug_loc());
- let body_bcx_out = trans_block(body_bcx_in, body, expr::Ignore);
- Br(body_bcx_out, body_bcx_in.llbb, DebugLoc::None);
-
- fcx.pop_loop_cleanup_scope(loop_expr.id);
-
- // If there are no predecessors for the next block, we just translated an endless loop and the
- // next block is unreachable
- if BasicBlock(next_bcx_in.llbb).pred_iter().next().is_none() {
- Unreachable(next_bcx_in);
- }
-
- return next_bcx_in;
-}
-
-pub fn trans_break_cont<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- expr: &hir::Expr,
- opt_label: Option<ast::Name>,
- exit: usize)
- -> Block<'blk, 'tcx> {
- let _icx = push_ctxt("trans_break_cont");
-
- if bcx.unreachable.get() {
- return bcx;
- }
-
- let fcx = bcx.fcx;
-
- // Locate loop that we will break to
- let loop_id = match opt_label {
- None => fcx.top_loop_scope(),
- Some(_) => {
- match bcx.tcx().expect_def(expr.id) {
- Def::Label(loop_id) => loop_id,
- r => {
- bug!("{:?} in def-map for label", r)
- }
- }
- }
- };
-
- // Generate appropriate cleanup code and branch
- let cleanup_llbb = fcx.normal_exit_block(loop_id, exit);
- Br(bcx, cleanup_llbb, expr.debug_loc());
- Unreachable(bcx); // anything afterwards should be ignored
- return bcx;
-}
-
-pub fn trans_break<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- expr: &hir::Expr,
- label_opt: Option<ast::Name>)
- -> Block<'blk, 'tcx> {
- return trans_break_cont(bcx, expr, label_opt, cleanup::EXIT_BREAK);
-}
-
-pub fn trans_cont<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- expr: &hir::Expr,
- label_opt: Option<ast::Name>)
- -> Block<'blk, 'tcx> {
- return trans_break_cont(bcx, expr, label_opt, cleanup::EXIT_LOOP);
-}
-
-pub fn trans_ret<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- return_expr: &hir::Expr,
- retval_expr: Option<&hir::Expr>)
- -> Block<'blk, 'tcx> {
- let _icx = push_ctxt("trans_ret");
-
- if bcx.unreachable.get() {
- return bcx;
- }
-
- let fcx = bcx.fcx;
- let mut bcx = bcx;
- if let Some(x) = retval_expr {
- let dest = if fcx.llretslotptr.get().is_some() {
- expr::SaveIn(fcx.get_ret_slot(bcx, "ret_slot"))
- } else {
- expr::Ignore
- };
- bcx = expr::trans_into(bcx, &x, dest);
- match dest {
- expr::SaveIn(slot) if fcx.needs_ret_allocas => {
- Store(bcx, slot, fcx.llretslotptr.get().unwrap());
- }
- _ => {}
- }
- }
- let cleanup_llbb = fcx.return_exit_block();
- Br(bcx, cleanup_llbb, return_expr.debug_loc());
- Unreachable(bcx);
- return bcx;
-}
-
-pub fn trans_fail<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- call_info: NodeIdAndSpan,
- fail_str: InternedString)
- -> Block<'blk, 'tcx> {
- let ccx = bcx.ccx();
- let _icx = push_ctxt("trans_fail_value");
-
- if bcx.unreachable.get() {
- return bcx;
- }
-
- let v_str = C_str_slice(ccx, fail_str);
- let loc = bcx.sess().codemap().lookup_char_pos(call_info.span.lo);
- let filename = token::intern_and_get_ident(&loc.file.name);
- let filename = C_str_slice(ccx, filename);
- let line = C_u32(ccx, loc.line as u32);
- let expr_file_line_const = C_struct(ccx, &[v_str, filename, line], false);
- let align = machine::llalign_of_min(ccx, val_ty(expr_file_line_const));
- let expr_file_line = consts::addr_of(ccx, expr_file_line_const, align, "panic_loc");
- let args = vec!(expr_file_line);
- let did = langcall(bcx.tcx(), Some(call_info.span), "", PanicFnLangItem);
- Callee::def(ccx, did, Substs::empty(ccx.tcx()))
- .call(bcx, call_info.debug_loc(), ArgVals(&args), None).bcx
-}
-
-pub fn trans_fail_bounds_check<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- call_info: NodeIdAndSpan,
- index: ValueRef,
- len: ValueRef)
- -> Block<'blk, 'tcx> {
- let ccx = bcx.ccx();
- let _icx = push_ctxt("trans_fail_bounds_check");
-
- if bcx.unreachable.get() {
- return bcx;
- }
-
- // Extract the file/line from the span
- let loc = bcx.sess().codemap().lookup_char_pos(call_info.span.lo);
- let filename = token::intern_and_get_ident(&loc.file.name);
-
- // Invoke the lang item
- let filename = C_str_slice(ccx, filename);
- let line = C_u32(ccx, loc.line as u32);
- let file_line_const = C_struct(ccx, &[filename, line], false);
- let align = machine::llalign_of_min(ccx, val_ty(file_line_const));
- let file_line = consts::addr_of(ccx, file_line_const, align, "panic_bounds_check_loc");
- let args = vec!(file_line, index, len);
- let did = langcall(bcx.tcx(), Some(call_info.span), "", PanicBoundsCheckFnLangItem);
- Callee::def(ccx, did, Substs::empty(ccx.tcx()))
- .call(bcx, call_info.debug_loc(), ArgVals(&args), None).bcx
-}
+++ /dev/null
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! ## The Datum module
-//!
-//! A `Datum` encapsulates the result of evaluating a Rust expression. It
-//! contains a `ValueRef` indicating the result, a `Ty` describing
-//! the Rust type, but also a *kind*. The kind indicates whether the datum
-//! has cleanup scheduled (lvalue) or not (rvalue) and -- in the case of
-//! rvalues -- whether or not the value is "by ref" or "by value".
-//!
-//! The datum API is designed to try and help you avoid memory errors like
-//! forgetting to arrange cleanup or duplicating a value. The type of the
-//! datum incorporates the kind, and thus reflects whether it has cleanup
-//! scheduled:
-//!
-//! - `Datum<Lvalue>` -- by ref, cleanup scheduled
-//! - `Datum<Rvalue>` -- by value or by ref, no cleanup scheduled
-//! - `Datum<Expr>` -- either `Datum<Lvalue>` or `Datum<Rvalue>`
-//!
-//! Rvalue and expr datums are noncopyable, and most of the methods on
-//! datums consume the datum itself (with some notable exceptions). This
-//! reflects the fact that datums may represent affine values which ought
-//! to be consumed exactly once, and if you were to try to (for example)
-//! store an affine value multiple times, you would be duplicating it,
-//! which would certainly be a bug.
-//!
-//! Some of the datum methods, however, are designed to work only on
-//! copyable values such as ints or pointers. Those methods may borrow the
-//! datum (`&self`) rather than consume it, but they always include
-//! assertions on the type of the value represented to check that this
-//! makes sense. An example is `shallow_copy()`, which duplicates
-//! a datum value.
-//!
-//! Translating an expression always yields a `Datum<Expr>` result, but
-//! the methods `to_[lr]value_datum()` can be used to coerce a
-//! `Datum<Expr>` into a `Datum<Lvalue>` or `Datum<Rvalue>` as
-//! needed. Coercing to an lvalue is fairly common, and generally occurs
-//! whenever it is necessary to inspect a value and pull out its
-//! subcomponents (for example, a match, or indexing expression). Coercing
-//! to an rvalue is more unusual; it occurs when moving values from place
-//! to place, such as in an assignment expression or parameter passing.
-//!
-//! ### Lvalues in detail
-//!
-//! An lvalue datum is one for which cleanup has been scheduled. Lvalue
-//! datums are always located in memory, and thus the `ValueRef` for an
-//! LLVM value is always a pointer to the actual Rust value. This means
-//! that if the Datum has a Rust type of `int`, then the LLVM type of the
-//! `ValueRef` will be `int*` (pointer to int).
-//!
-//! Because lvalues already have cleanups scheduled, the memory must be
-//! zeroed to prevent the cleanup from taking place (presuming that the
-//! Rust type needs drop in the first place, otherwise it doesn't
-//! matter). The Datum code automatically performs this zeroing when the
-//! value is stored to a new location, for example.
-//!
-//! Lvalues usually result from evaluating lvalue expressions. For
-//! example, evaluating a local variable `x` yields an lvalue, as does a
-//! reference to a field like `x.f` or an index `x[i]`.
-//!
-//! Lvalue datums can also arise by *converting* an rvalue into an lvalue.
-//! This is done with the `to_lvalue_datum` method defined on
-//! `Datum<Expr>`. Basically this method just schedules cleanup if the
-//! datum is an rvalue, possibly storing the value into a stack slot first
-//! if needed. Converting rvalues into lvalues occurs in constructs like
-//! `&foo()` or `match foo() { ref x => ... }`, where the user is
-//! implicitly requesting a temporary.
-//!
-//! ### Rvalues in detail
-//!
-//! Rvalues datums are values with no cleanup scheduled. One must be
-//! careful with rvalue datums to ensure that cleanup is properly
-//! arranged, usually by converting to an lvalue datum or by invoking the
-//! `add_clean` method.
-//!
-//! ### Scratch datums
-//!
-//! Sometimes you need some temporary scratch space. The functions
-//! `[lr]value_scratch_datum()` can be used to get temporary stack
-//! space. As their name suggests, they yield lvalues and rvalues
-//! respectively. That is, the slot from `lvalue_scratch_datum` will have
-//! cleanup arranged, and the slot from `rvalue_scratch_datum` does not.
-
-pub use self::Expr::*;
-pub use self::RvalueMode::*;
-
-use llvm::ValueRef;
-use adt;
-use base::*;
-use build::{Load, Store};
-use common::*;
-use cleanup;
-use cleanup::{CleanupMethods, DropHintDatum, DropHintMethods};
-use expr;
-use tvec;
-use value::Value;
-use rustc::ty::Ty;
-
-use std::fmt;
-use syntax::ast;
-use syntax_pos::DUMMY_SP;
-
-/// A `Datum` encapsulates the result of evaluating an expression. It
-/// describes where the value is stored, what Rust type the value has,
-/// whether it is addressed by reference, and so forth. Please refer
-/// the section on datums in `README.md` for more details.
-#[derive(Clone, Copy)]
-pub struct Datum<'tcx, K> {
- /// The llvm value. This is either a pointer to the Rust value or
- /// the value itself, depending on `kind` below.
- pub val: ValueRef,
-
- /// The rust type of the value.
- pub ty: Ty<'tcx>,
-
- /// Indicates whether this is by-ref or by-value.
- pub kind: K,
-}
-
-impl<'tcx, K: fmt::Debug> fmt::Debug for Datum<'tcx, K> {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- write!(f, "Datum({:?}, {:?}, {:?})",
- Value(self.val), self.ty, self.kind)
- }
-}
-
-pub struct DatumBlock<'blk, 'tcx: 'blk, K> {
- pub bcx: Block<'blk, 'tcx>,
- pub datum: Datum<'tcx, K>,
-}
-
-#[derive(Debug)]
-pub enum Expr {
- /// a fresh value that was produced and which has no cleanup yet
- /// because it has not yet "landed" into its permanent home
- RvalueExpr(Rvalue),
-
- /// `val` is a pointer into memory for which a cleanup is scheduled
- /// (and thus has type *T). If you move out of an Lvalue, you must
- /// zero out the memory (FIXME #5016).
- LvalueExpr(Lvalue),
-}
-
-#[derive(Copy, Clone, PartialEq, Eq, Debug)]
-pub enum DropFlagInfo {
- DontZeroJustUse(ast::NodeId),
- ZeroAndMaintain(ast::NodeId),
- None,
-}
-
-impl DropFlagInfo {
- pub fn must_zero(&self) -> bool {
- match *self {
- DropFlagInfo::DontZeroJustUse(..) => false,
- DropFlagInfo::ZeroAndMaintain(..) => true,
- DropFlagInfo::None => true,
- }
- }
-
- pub fn hint_datum<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>)
- -> Option<DropHintDatum<'tcx>> {
- let id = match *self {
- DropFlagInfo::None => return None,
- DropFlagInfo::DontZeroJustUse(id) |
- DropFlagInfo::ZeroAndMaintain(id) => id,
- };
-
- let hints = bcx.fcx.lldropflag_hints.borrow();
- let retval = hints.hint_datum(id);
- assert!(retval.is_some(), "An id (={}) means must have a hint", id);
- retval
- }
-}
-
-// FIXME: having Lvalue be `Copy` is a bit of a footgun, since clients
-// may not realize that subparts of an Lvalue can have a subset of
-// drop-flags associated with them, while this as written will just
-// memcpy the drop_flag_info. But, it is an easier way to get `_match`
-// off the ground to just let this be `Copy` for now.
-#[derive(Copy, Clone, Debug)]
-pub struct Lvalue {
- pub source: &'static str,
- pub drop_flag_info: DropFlagInfo
-}
-
-#[derive(Debug)]
-pub struct Rvalue {
- pub mode: RvalueMode
-}
-
-/// Classifies what action we should take when a value is moved away
-/// with respect to its drop-flag.
-///
-/// Long term there will be no need for this classification: all flags
-/// (which will be stored on the stack frame) will have the same
-/// interpretation and maintenance code associated with them.
-#[derive(Copy, Clone, Debug)]
-pub enum HintKind {
- /// When the value is moved, set the drop-flag to "dropped"
- /// (i.e. "zero the flag", even when the specific representation
- /// is not literally 0) and when it is reinitialized, set the
- /// drop-flag back to "initialized".
- ZeroAndMaintain,
-
- /// When the value is moved, do not set the drop-flag to "dropped"
- /// However, continue to read the drop-flag in deciding whether to
- /// drop. (In essence, the path/fragment in question will never
- /// need to be dropped at the points where it is moved away by
- /// this code, but we are defending against the scenario where
- /// some *other* code could move away (or drop) the value and thus
- /// zero-the-flag, which is why we will still read from it.
- DontZeroJustUse,
-}
-
-impl Lvalue { // Constructors for various Lvalues.
- pub fn new<'blk, 'tcx>(source: &'static str) -> Lvalue {
- debug!("Lvalue at {} no drop flag info", source);
- Lvalue { source: source, drop_flag_info: DropFlagInfo::None }
- }
-
- pub fn new_dropflag_hint(source: &'static str) -> Lvalue {
- debug!("Lvalue at {} is drop flag hint", source);
- Lvalue { source: source, drop_flag_info: DropFlagInfo::None }
- }
-
- pub fn new_with_hint<'blk, 'tcx>(source: &'static str,
- bcx: Block<'blk, 'tcx>,
- id: ast::NodeId,
- k: HintKind) -> Lvalue {
- let (opt_id, info) = {
- let hint_available = Lvalue::has_dropflag_hint(bcx, id) &&
- bcx.tcx().sess.nonzeroing_move_hints();
- let info = match k {
- HintKind::ZeroAndMaintain if hint_available =>
- DropFlagInfo::ZeroAndMaintain(id),
- HintKind::DontZeroJustUse if hint_available =>
- DropFlagInfo::DontZeroJustUse(id),
- _ =>
- DropFlagInfo::None,
- };
- (Some(id), info)
- };
- debug!("Lvalue at {}, id: {:?} info: {:?}", source, opt_id, info);
- Lvalue { source: source, drop_flag_info: info }
- }
-} // end Lvalue constructor methods.
-
-impl Lvalue {
- fn has_dropflag_hint<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- id: ast::NodeId) -> bool {
- let hints = bcx.fcx.lldropflag_hints.borrow();
- hints.has_hint(id)
- }
- pub fn dropflag_hint<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>)
- -> Option<DropHintDatum<'tcx>> {
- self.drop_flag_info.hint_datum(bcx)
- }
-}
-
-impl Rvalue {
- pub fn new(m: RvalueMode) -> Rvalue {
- Rvalue { mode: m }
- }
-}
-
-// Make Datum linear for more type safety.
-impl Drop for Rvalue {
- fn drop(&mut self) { }
-}
-
-#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
-pub enum RvalueMode {
- /// `val` is a pointer to the actual value (and thus has type *T)
- ByRef,
-
- /// `val` is the actual value (*only used for immediates* like ints, ptrs)
- ByValue,
-}
-
-pub fn immediate_rvalue<'tcx>(val: ValueRef, ty: Ty<'tcx>) -> Datum<'tcx, Rvalue> {
- return Datum::new(val, ty, Rvalue::new(ByValue));
-}
-
-pub fn immediate_rvalue_bcx<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- val: ValueRef,
- ty: Ty<'tcx>)
- -> DatumBlock<'blk, 'tcx, Rvalue> {
- return DatumBlock::new(bcx, immediate_rvalue(val, ty))
-}
-
-/// Allocates temporary space on the stack using alloca() and returns a by-ref Datum pointing to
-/// it. The memory will be dropped upon exit from `scope`. The callback `populate` should
-/// initialize the memory.
-///
-/// The flag `zero` indicates how the temporary space itself should be
-/// initialized at the outset of the function; the only time that
-/// `InitAlloca::Uninit` is a valid value for `zero` is when the
-/// caller can prove that either (1.) the code injected by `populate`
-/// onto `bcx` always dominates the end of `scope`, or (2.) the data
-/// being allocated has no associated destructor.
-pub fn lvalue_scratch_datum<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
- ty: Ty<'tcx>,
- name: &str,
- zero: InitAlloca,
- scope: cleanup::ScopeId,
- populate: F)
- -> DatumBlock<'blk, 'tcx, Lvalue> where
- F: FnOnce(Block<'blk, 'tcx>, ValueRef) -> Block<'blk, 'tcx>,
-{
- // Very subtle: potentially initialize the scratch memory at point where it is alloca'ed.
- // (See discussion at Issue 30530.)
- let scratch = alloc_ty_init(bcx, ty, zero, name);
- debug!("lvalue_scratch_datum scope={:?} scratch={:?} ty={:?}",
- scope, Value(scratch), ty);
-
- // Subtle. Populate the scratch memory *before* scheduling cleanup.
- let bcx = populate(bcx, scratch);
- bcx.fcx.schedule_drop_mem(scope, scratch, ty, None);
-
- DatumBlock::new(bcx, Datum::new(scratch, ty, Lvalue::new("datum::lvalue_scratch_datum")))
-}
-
-/// Allocates temporary space on the stack using alloca() and returns a by-ref Datum pointing to
-/// it. If `zero` is true, the space will be zeroed when it is allocated; this is normally not
-/// necessary, but in the case of automatic rooting in match statements it is possible to have
-/// temporaries that may not get initialized if a certain arm is not taken, so we must zero them.
-/// You must arrange any cleanups etc yourself!
-pub fn rvalue_scratch_datum<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- ty: Ty<'tcx>,
- name: &str)
- -> Datum<'tcx, Rvalue> {
- let scratch = alloc_ty(bcx, ty, name);
- call_lifetime_start(bcx, scratch);
- Datum::new(scratch, ty, Rvalue::new(ByRef))
-}
-
-/// Indicates the "appropriate" mode for this value, which is either by ref or by value, depending
-/// on whether type is immediate or not.
-pub fn appropriate_rvalue_mode<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
- ty: Ty<'tcx>) -> RvalueMode {
- if type_is_immediate(ccx, ty) {
- ByValue
- } else {
- ByRef
- }
-}
-
-fn add_rvalue_clean<'a, 'tcx>(mode: RvalueMode,
- fcx: &FunctionContext<'a, 'tcx>,
- scope: cleanup::ScopeId,
- val: ValueRef,
- ty: Ty<'tcx>) {
- debug!("add_rvalue_clean scope={:?} val={:?} ty={:?}",
- scope, Value(val), ty);
- match mode {
- ByValue => { fcx.schedule_drop_immediate(scope, val, ty); }
- ByRef => {
- fcx.schedule_lifetime_end(scope, val);
- fcx.schedule_drop_mem(scope, val, ty, None);
- }
- }
-}
-
-pub trait KindOps {
-
- /// Take appropriate action after the value in `datum` has been
- /// stored to a new location.
- fn post_store<'blk, 'tcx>(&self,
- bcx: Block<'blk, 'tcx>,
- val: ValueRef,
- ty: Ty<'tcx>)
- -> Block<'blk, 'tcx>;
-
- /// True if this mode is a reference mode, meaning that the datum's
- /// val field is a pointer to the actual value
- fn is_by_ref(&self) -> bool;
-
- /// Converts to an Expr kind
- fn to_expr_kind(self) -> Expr;
-
-}
-
-impl KindOps for Rvalue {
- fn post_store<'blk, 'tcx>(&self,
- bcx: Block<'blk, 'tcx>,
- _val: ValueRef,
- _ty: Ty<'tcx>)
- -> Block<'blk, 'tcx> {
- // No cleanup is scheduled for an rvalue, so we don't have
- // to do anything after a move to cancel or duplicate it.
- if self.is_by_ref() {
- call_lifetime_end(bcx, _val);
- }
- bcx
- }
-
- fn is_by_ref(&self) -> bool {
- self.mode == ByRef
- }
-
- fn to_expr_kind(self) -> Expr {
- RvalueExpr(self)
- }
-}
-
-impl KindOps for Lvalue {
- /// If an lvalue is moved, we must zero out the memory in which it resides so as to cancel
- /// cleanup. If an @T lvalue is copied, we must increment the reference count.
- fn post_store<'blk, 'tcx>(&self,
- bcx: Block<'blk, 'tcx>,
- val: ValueRef,
- ty: Ty<'tcx>)
- -> Block<'blk, 'tcx> {
- let _icx = push_ctxt("<Lvalue as KindOps>::post_store");
- if bcx.fcx.type_needs_drop(ty) {
- // cancel cleanup of affine values:
- // 1. if it has drop-hint, mark as moved; then code
- // aware of drop-hint won't bother calling the
- // drop-glue itself.
- if let Some(hint_datum) = self.drop_flag_info.hint_datum(bcx) {
- let moved_hint_byte = adt::DTOR_MOVED_HINT;
- let hint_llval = hint_datum.to_value().value();
- Store(bcx, C_u8(bcx.fcx.ccx, moved_hint_byte), hint_llval);
- }
- // 2. if the drop info says its necessary, drop-fill the memory.
- if self.drop_flag_info.must_zero() {
- let () = drop_done_fill_mem(bcx, val, ty);
- }
- bcx
- } else {
- // FIXME (#5016) would be nice to assert this, but we have
- // to allow for e.g. DontZeroJustUse flags, for now.
- //
- // (The dropflag hint construction should be taking
- // !type_needs_drop into account; earlier analysis phases
- // may not have all the info they need to include such
- // information properly, I think; in particular the
- // fragments analysis works on a non-monomorphized view of
- // the code.)
- //
- // assert_eq!(self.drop_flag_info, DropFlagInfo::None);
- bcx
- }
- }
-
- fn is_by_ref(&self) -> bool {
- true
- }
-
- fn to_expr_kind(self) -> Expr {
- LvalueExpr(self)
- }
-}
-
-impl KindOps for Expr {
- fn post_store<'blk, 'tcx>(&self,
- bcx: Block<'blk, 'tcx>,
- val: ValueRef,
- ty: Ty<'tcx>)
- -> Block<'blk, 'tcx> {
- match *self {
- LvalueExpr(ref l) => l.post_store(bcx, val, ty),
- RvalueExpr(ref r) => r.post_store(bcx, val, ty),
- }
- }
-
- fn is_by_ref(&self) -> bool {
- match *self {
- LvalueExpr(ref l) => l.is_by_ref(),
- RvalueExpr(ref r) => r.is_by_ref()
- }
- }
-
- fn to_expr_kind(self) -> Expr {
- self
- }
-}
-
-impl<'tcx> Datum<'tcx, Rvalue> {
- /// Schedules a cleanup for this datum in the given scope. That means that this datum is no
- /// longer an rvalue datum; hence, this function consumes the datum and returns the contained
- /// ValueRef.
- pub fn add_clean<'a>(self,
- fcx: &FunctionContext<'a, 'tcx>,
- scope: cleanup::ScopeId)
- -> ValueRef {
- add_rvalue_clean(self.kind.mode, fcx, scope, self.val, self.ty);
- self.val
- }
-
- /// Returns an lvalue datum (that is, a by ref datum with cleanup scheduled). If `self` is not
- /// already an lvalue, cleanup will be scheduled in the temporary scope for `expr_id`.
- pub fn to_lvalue_datum_in_scope<'blk>(self,
- bcx: Block<'blk, 'tcx>,
- name: &str,
- scope: cleanup::ScopeId)
- -> DatumBlock<'blk, 'tcx, Lvalue> {
- let fcx = bcx.fcx;
-
- match self.kind.mode {
- ByRef => {
- add_rvalue_clean(ByRef, fcx, scope, self.val, self.ty);
- DatumBlock::new(bcx, Datum::new(
- self.val,
- self.ty,
- Lvalue::new("datum::to_lvalue_datum_in_scope")))
- }
-
- ByValue => {
- lvalue_scratch_datum(
- bcx, self.ty, name, InitAlloca::Dropped, scope,
- |bcx, llval| {
- debug!("populate call for Datum::to_lvalue_datum_in_scope \
- self.ty={:?}", self.ty);
- // do not call_lifetime_start here; the
- // `InitAlloc::Dropped` will start scratch
- // value's lifetime at open of function body.
- let bcx = self.store_to(bcx, llval);
- bcx.fcx.schedule_lifetime_end(scope, llval);
- bcx
- })
- }
- }
- }
-
- pub fn to_ref_datum<'blk>(self, bcx: Block<'blk, 'tcx>)
- -> DatumBlock<'blk, 'tcx, Rvalue> {
- let mut bcx = bcx;
- match self.kind.mode {
- ByRef => DatumBlock::new(bcx, self),
- ByValue => {
- let scratch = rvalue_scratch_datum(bcx, self.ty, "to_ref");
- bcx = self.store_to(bcx, scratch.val);
- DatumBlock::new(bcx, scratch)
- }
- }
- }
-
- pub fn to_appropriate_datum<'blk>(self, bcx: Block<'blk, 'tcx>)
- -> DatumBlock<'blk, 'tcx, Rvalue> {
- match self.appropriate_rvalue_mode(bcx.ccx()) {
- ByRef => {
- self.to_ref_datum(bcx)
- }
- ByValue => {
- match self.kind.mode {
- ByValue => DatumBlock::new(bcx, self),
- ByRef => {
- let llval = load_ty(bcx, self.val, self.ty);
- call_lifetime_end(bcx, self.val);
- DatumBlock::new(bcx, Datum::new(llval, self.ty, Rvalue::new(ByValue)))
- }
- }
- }
- }
- }
-}
-
-/// Methods suitable for "expr" datums that could be either lvalues or
-/// rvalues. These include coercions into lvalues/rvalues but also a number
-/// of more general operations. (Some of those operations could be moved to
-/// the more general `impl<K> Datum<K>`, but it's convenient to have them
-/// here since we can `match self.kind` rather than having to implement
-/// generic methods in `KindOps`.)
-impl<'tcx> Datum<'tcx, Expr> {
- fn match_kind<R, F, G>(self, if_lvalue: F, if_rvalue: G) -> R where
- F: FnOnce(Datum<'tcx, Lvalue>) -> R,
- G: FnOnce(Datum<'tcx, Rvalue>) -> R,
- {
- let Datum { val, ty, kind } = self;
- match kind {
- LvalueExpr(l) => if_lvalue(Datum::new(val, ty, l)),
- RvalueExpr(r) => if_rvalue(Datum::new(val, ty, r)),
- }
- }
-
- /// Asserts that this datum *is* an lvalue and returns it.
- #[allow(dead_code)] // potentially useful
- pub fn assert_lvalue(self) -> Datum<'tcx, Lvalue> {
- self.match_kind(
- |d| d,
- |_| bug!("assert_lvalue given rvalue"))
- }
-
- pub fn store_to_dest<'blk>(self,
- bcx: Block<'blk, 'tcx>,
- dest: expr::Dest,
- expr_id: ast::NodeId)
- -> Block<'blk, 'tcx> {
- match dest {
- expr::Ignore => {
- self.add_clean_if_rvalue(bcx, expr_id);
- bcx
- }
- expr::SaveIn(addr) => {
- self.store_to(bcx, addr)
- }
- }
- }
-
- /// Arranges cleanup for `self` if it is an rvalue. Use when you are done working with a value
- /// that may need drop.
- pub fn add_clean_if_rvalue<'blk>(self,
- bcx: Block<'blk, 'tcx>,
- expr_id: ast::NodeId) {
- self.match_kind(
- |_| { /* Nothing to do, cleanup already arranged */ },
- |r| {
- let scope = cleanup::temporary_scope(bcx.tcx(), expr_id);
- r.add_clean(bcx.fcx, scope);
- })
- }
-
- pub fn to_lvalue_datum<'blk>(self,
- bcx: Block<'blk, 'tcx>,
- name: &str,
- expr_id: ast::NodeId)
- -> DatumBlock<'blk, 'tcx, Lvalue> {
- debug!("to_lvalue_datum self: {:?}", self);
-
- self.match_kind(
- |l| DatumBlock::new(bcx, l),
- |r| {
- let scope = cleanup::temporary_scope(bcx.tcx(), expr_id);
- r.to_lvalue_datum_in_scope(bcx, name, scope)
- })
- }
-
- /// Ensures that we have an rvalue datum (that is, a datum with no cleanup scheduled).
- pub fn to_rvalue_datum<'blk>(self,
- bcx: Block<'blk, 'tcx>,
- name: &'static str)
- -> DatumBlock<'blk, 'tcx, Rvalue> {
- self.match_kind(
- |l| {
- let mut bcx = bcx;
- match l.appropriate_rvalue_mode(bcx.ccx()) {
- ByRef => {
- let scratch = rvalue_scratch_datum(bcx, l.ty, name);
- bcx = l.store_to(bcx, scratch.val);
- DatumBlock::new(bcx, scratch)
- }
- ByValue => {
- let v = load_ty(bcx, l.val, l.ty);
- bcx = l.kind.post_store(bcx, l.val, l.ty);
- DatumBlock::new(bcx, Datum::new(v, l.ty, Rvalue::new(ByValue)))
- }
- }
- },
- |r| DatumBlock::new(bcx, r))
- }
-
-}
-
-/// Methods suitable only for lvalues. These include the various
-/// operations to extract components out of compound data structures,
-/// such as extracting the field from a struct or a particular element
-/// from an array.
-impl<'tcx> Datum<'tcx, Lvalue> {
- /// Converts a datum into a by-ref value. The datum type must be one which is always passed by
- /// reference.
- pub fn to_llref(self) -> ValueRef {
- self.val
- }
-
- // Extracts a component of a compound data structure (e.g., a field from a
- // struct). Note that if self is an opened, unsized type then the returned
- // datum may also be unsized _without the size information_. It is the
- // callers responsibility to package the result in some way to make a valid
- // datum in that case (e.g., by making a fat pointer or opened pair).
- pub fn get_element<'blk, F>(&self, bcx: Block<'blk, 'tcx>, ty: Ty<'tcx>,
- gep: F)
- -> Datum<'tcx, Lvalue> where
- F: FnOnce(adt::MaybeSizedValue) -> ValueRef,
- {
- let val = if type_is_sized(bcx.tcx(), self.ty) {
- let val = adt::MaybeSizedValue::sized(self.val);
- gep(val)
- } else {
- let val = adt::MaybeSizedValue::unsized_(
- Load(bcx, expr::get_dataptr(bcx, self.val)),
- Load(bcx, expr::get_meta(bcx, self.val)));
- gep(val)
- };
- Datum {
- val: val,
- kind: Lvalue::new("Datum::get_element"),
- ty: ty,
- }
- }
-
- pub fn get_vec_base_and_len<'blk>(&self, bcx: Block<'blk, 'tcx>)
- -> (ValueRef, ValueRef) {
- //! Converts a vector into the slice pair.
-
- tvec::get_base_and_len(bcx, self.val, self.ty)
- }
-}
-
-/// Generic methods applicable to any sort of datum.
-impl<'tcx, K: KindOps + fmt::Debug> Datum<'tcx, K> {
- pub fn new(val: ValueRef, ty: Ty<'tcx>, kind: K) -> Datum<'tcx, K> {
- Datum { val: val, ty: ty, kind: kind }
- }
-
- pub fn to_expr_datum(self) -> Datum<'tcx, Expr> {
- let Datum { val, ty, kind } = self;
- Datum { val: val, ty: ty, kind: kind.to_expr_kind() }
- }
-
- /// Moves or copies this value into a new home, as appropriate depending on the type of the
- /// datum. This method consumes the datum, since it would be incorrect to go on using the datum
- /// if the value represented is affine (and hence the value is moved).
- pub fn store_to<'blk>(self,
- bcx: Block<'blk, 'tcx>,
- dst: ValueRef)
- -> Block<'blk, 'tcx> {
- self.shallow_copy_raw(bcx, dst);
-
- self.kind.post_store(bcx, self.val, self.ty)
- }
-
- /// Helper function that performs a shallow copy of this value into `dst`, which should be a
- /// pointer to a memory location suitable for `self.ty`. `dst` should contain uninitialized
- /// memory (either newly allocated, zeroed, or dropped).
- ///
- /// This function is private to datums because it leaves memory in an unstable state, where the
- /// source value has been copied but not zeroed. Public methods are `store_to` (if you no
- /// longer need the source value) or `shallow_copy` (if you wish the source value to remain
- /// valid).
- fn shallow_copy_raw<'blk>(&self,
- bcx: Block<'blk, 'tcx>,
- dst: ValueRef)
- -> Block<'blk, 'tcx> {
- let _icx = push_ctxt("copy_to_no_check");
-
- if type_is_zero_size(bcx.ccx(), self.ty) {
- return bcx;
- }
-
- if self.kind.is_by_ref() {
- memcpy_ty(bcx, dst, self.val, self.ty);
- } else {
- store_ty(bcx, self.val, dst, self.ty);
- }
-
- return bcx;
- }
-
- /// Copies the value into a new location. This function always preserves the existing datum as
- /// a valid value. Therefore, it does not consume `self` and, also, cannot be applied to affine
- /// values (since they must never be duplicated).
- pub fn shallow_copy<'blk>(&self,
- bcx: Block<'blk, 'tcx>,
- dst: ValueRef)
- -> Block<'blk, 'tcx> {
- /*!
- * Copies the value into a new location. This function always
- * preserves the existing datum as a valid value. Therefore,
- * it does not consume `self` and, also, cannot be applied to
- * affine values (since they must never be duplicated).
- */
-
- assert!(!self.ty.moves_by_default(bcx.tcx(),
- &bcx.tcx().empty_parameter_environment(), DUMMY_SP));
- self.shallow_copy_raw(bcx, dst)
- }
-
- /// See the `appropriate_rvalue_mode()` function
- pub fn appropriate_rvalue_mode<'a>(&self, ccx: &CrateContext<'a, 'tcx>)
- -> RvalueMode {
- appropriate_rvalue_mode(ccx, self.ty)
- }
-
- /// Converts `self` into a by-value `ValueRef`. Consumes this datum (i.e., absolves you of
- /// responsibility to cleanup the value). For this to work, the value must be something
- /// scalar-ish (like an int or a pointer) which (1) does not require drop glue and (2) is
- /// naturally passed around by value, and not by reference.
- pub fn to_llscalarish<'blk>(self, bcx: Block<'blk, 'tcx>) -> ValueRef {
- assert!(!bcx.fcx.type_needs_drop(self.ty));
- assert!(self.appropriate_rvalue_mode(bcx.ccx()) == ByValue);
- if self.kind.is_by_ref() {
- load_ty(bcx, self.val, self.ty)
- } else {
- self.val
- }
- }
-
- pub fn to_llbool<'blk>(self, bcx: Block<'blk, 'tcx>) -> ValueRef {
- assert!(self.ty.is_bool());
- self.to_llscalarish(bcx)
- }
-}
-
-impl<'blk, 'tcx, K> DatumBlock<'blk, 'tcx, K> {
- pub fn new(bcx: Block<'blk, 'tcx>, datum: Datum<'tcx, K>)
- -> DatumBlock<'blk, 'tcx, K> {
- DatumBlock { bcx: bcx, datum: datum }
- }
-}
-
-impl<'blk, 'tcx, K: KindOps + fmt::Debug> DatumBlock<'blk, 'tcx, K> {
- pub fn to_expr_datumblock(self) -> DatumBlock<'blk, 'tcx, Expr> {
- DatumBlock::new(self.bcx, self.datum.to_expr_datum())
- }
-}
-
-impl<'blk, 'tcx> DatumBlock<'blk, 'tcx, Expr> {
- pub fn store_to_dest(self,
- dest: expr::Dest,
- expr_id: ast::NodeId) -> Block<'blk, 'tcx> {
- let DatumBlock { bcx, datum } = self;
- datum.store_to_dest(bcx, dest, expr_id)
- }
-
- pub fn to_llbool(self) -> Result<'blk, 'tcx> {
- let DatumBlock { datum, bcx } = self;
- Result::new(bcx, datum.to_llbool(bcx))
- }
-}
use llvm;
use llvm::debuginfo::{DIScope, DISubprogram};
use common::{CrateContext, FunctionContext};
-use rustc::hir::pat_util;
use rustc::mir::repr::{Mir, VisibilityScope};
-use rustc::util::nodemap::NodeMap;
use libc::c_uint;
use std::ptr;
-use syntax_pos::{Span, Pos};
-use syntax::{ast, codemap};
+use syntax_pos::Pos;
use rustc_data_structures::bitvec::BitVector;
use rustc_data_structures::indexed_vec::{Idx, IndexVec};
-use rustc::hir::{self, PatKind};
-// This procedure builds the *scope map* for a given function, which maps any
-// given ast::NodeId in the function's AST to the correct DIScope metadata instance.
-//
-// This builder procedure walks the AST in execution order and keeps track of
-// what belongs to which scope, creating DIScope DIEs along the way, and
-// introducing *artificial* lexical scope descriptors where necessary. These
-// artificial scopes allow GDB to correctly handle name shadowing.
-pub fn create_scope_map(cx: &CrateContext,
- args: &[hir::Arg],
- fn_entry_block: &hir::Block,
- fn_metadata: DISubprogram,
- fn_ast_id: ast::NodeId)
- -> NodeMap<DIScope> {
- let mut scope_map = NodeMap();
- let mut scope_stack = vec!(ScopeStackEntry { scope_metadata: fn_metadata, name: None });
- scope_map.insert(fn_ast_id, fn_metadata);
-
- // Push argument identifiers onto the stack so arguments integrate nicely
- // with variable shadowing.
- for arg in args {
- pat_util::pat_bindings(&arg.pat, |_, node_id, _, path1| {
- scope_stack.push(ScopeStackEntry { scope_metadata: fn_metadata,
- name: Some(path1.node) });
- scope_map.insert(node_id, fn_metadata);
- })
- }
+use syntax_pos::BytePos;
- // Clang creates a separate scope for function bodies, so let's do this too.
- with_new_scope(cx,
- fn_entry_block.span,
- &mut scope_stack,
- &mut scope_map,
- |cx, scope_stack, scope_map| {
- walk_block(cx, fn_entry_block, scope_stack, scope_map);
- });
+#[derive(Clone, Copy, Debug)]
+pub struct MirDebugScope {
+ pub scope_metadata: DIScope,
+ // Start and end offsets of the file to which this DIScope belongs.
+ // These are used to quickly determine whether some span refers to the same file.
+ pub file_start_pos: BytePos,
+ pub file_end_pos: BytePos,
+}
- return scope_map;
+impl MirDebugScope {
+ pub fn is_valid(&self) -> bool {
+ !self.scope_metadata.is_null()
+ }
}
/// Produce DIScope DIEs for each MIR Scope which has variables defined in it.
/// If debuginfo is disabled, the returned vector is empty.
-pub fn create_mir_scopes(fcx: &FunctionContext) -> IndexVec<VisibilityScope, DIScope> {
+pub fn create_mir_scopes(fcx: &FunctionContext) -> IndexVec<VisibilityScope, MirDebugScope> {
let mir = fcx.mir.clone().expect("create_mir_scopes: missing MIR for fn");
- let mut scopes = IndexVec::from_elem(ptr::null_mut(), &mir.visibility_scopes);
+ let null_scope = MirDebugScope {
+ scope_metadata: ptr::null_mut(),
+ file_start_pos: BytePos(0),
+ file_end_pos: BytePos(0)
+ };
+ let mut scopes = IndexVec::from_elem(null_scope, &mir.visibility_scopes);
let fn_metadata = match fcx.debug_context {
FunctionDebugContext::RegularContext(box ref data) => data.fn_metadata,
has_variables: &BitVector,
fn_metadata: DISubprogram,
scope: VisibilityScope,
- scopes: &mut IndexVec<VisibilityScope, DIScope>) {
- if !scopes[scope].is_null() {
+ scopes: &mut IndexVec<VisibilityScope, MirDebugScope>) {
+ if scopes[scope].is_valid() {
return;
}
scopes[parent]
} else {
// The root is the function itself.
- scopes[scope] = fn_metadata;
+ let loc = span_start(ccx, mir.span);
+ scopes[scope] = MirDebugScope {
+ scope_metadata: fn_metadata,
+ file_start_pos: loc.file.start_pos,
+ file_end_pos: loc.file.end_pos,
+ };
return;
};
// However, we don't skip creating a nested scope if
// our parent is the root, because we might want to
// put arguments in the root and not have shadowing.
- if parent_scope != fn_metadata {
+ if parent_scope.scope_metadata != fn_metadata {
scopes[scope] = parent_scope;
return;
}
}
let loc = span_start(ccx, scope_data.span);
- scopes[scope] = unsafe {
let file_metadata = file_metadata(ccx, &loc.file.name, &loc.file.abs_path);
+ let scope_metadata = unsafe {
llvm::LLVMRustDIBuilderCreateLexicalBlock(
DIB(ccx),
- parent_scope,
+ parent_scope.scope_metadata,
file_metadata,
loc.line as c_uint,
loc.col.to_usize() as c_uint)
};
-}
-
-// local helper functions for walking the AST.
-fn with_new_scope<F>(cx: &CrateContext,
- scope_span: Span,
- scope_stack: &mut Vec<ScopeStackEntry> ,
- scope_map: &mut NodeMap<DIScope>,
- inner_walk: F) where
- F: FnOnce(&CrateContext, &mut Vec<ScopeStackEntry>, &mut NodeMap<DIScope>),
-{
- // Create a new lexical scope and push it onto the stack
- let loc = span_start(cx, scope_span);
- let file_metadata = file_metadata(cx, &loc.file.name, &loc.file.abs_path);
- let parent_scope = scope_stack.last().unwrap().scope_metadata;
-
- let scope_metadata = unsafe {
- llvm::LLVMRustDIBuilderCreateLexicalBlock(
- DIB(cx),
- parent_scope,
- file_metadata,
- loc.line as c_uint,
- loc.col.to_usize() as c_uint)
+ scopes[scope] = MirDebugScope {
+ scope_metadata: scope_metadata,
+ file_start_pos: loc.file.start_pos,
+ file_end_pos: loc.file.end_pos,
};
-
- scope_stack.push(ScopeStackEntry { scope_metadata: scope_metadata, name: None });
-
- inner_walk(cx, scope_stack, scope_map);
-
- // pop artificial scopes
- while scope_stack.last().unwrap().name.is_some() {
- scope_stack.pop();
- }
-
- if scope_stack.last().unwrap().scope_metadata != scope_metadata {
- span_bug!(scope_span, "debuginfo: Inconsistency in scope management.");
- }
-
- scope_stack.pop();
-}
-
-struct ScopeStackEntry {
- scope_metadata: DIScope,
- name: Option<ast::Name>
-}
-
-fn walk_block(cx: &CrateContext,
- block: &hir::Block,
- scope_stack: &mut Vec<ScopeStackEntry> ,
- scope_map: &mut NodeMap<DIScope>) {
- scope_map.insert(block.id, scope_stack.last().unwrap().scope_metadata);
-
- // The interesting things here are statements and the concluding expression.
- for statement in &block.stmts {
- scope_map.insert(statement.node.id(),
- scope_stack.last().unwrap().scope_metadata);
-
- match statement.node {
- hir::StmtDecl(ref decl, _) =>
- walk_decl(cx, &decl, scope_stack, scope_map),
- hir::StmtExpr(ref exp, _) |
- hir::StmtSemi(ref exp, _) =>
- walk_expr(cx, &exp, scope_stack, scope_map),
- }
- }
-
- if let Some(ref exp) = block.expr {
- walk_expr(cx, &exp, scope_stack, scope_map);
- }
-}
-
-fn walk_decl(cx: &CrateContext,
- decl: &hir::Decl,
- scope_stack: &mut Vec<ScopeStackEntry> ,
- scope_map: &mut NodeMap<DIScope>) {
- match *decl {
- codemap::Spanned { node: hir::DeclLocal(ref local), .. } => {
- scope_map.insert(local.id, scope_stack.last().unwrap().scope_metadata);
-
- walk_pattern(cx, &local.pat, scope_stack, scope_map);
-
- if let Some(ref exp) = local.init {
- walk_expr(cx, &exp, scope_stack, scope_map);
- }
- }
- _ => ()
- }
-}
-
-fn walk_pattern(cx: &CrateContext,
- pat: &hir::Pat,
- scope_stack: &mut Vec<ScopeStackEntry> ,
- scope_map: &mut NodeMap<DIScope>) {
- // Unfortunately, we cannot just use pat_util::pat_bindings() or
- // ast_util::walk_pat() here because we have to visit *all* nodes in
- // order to put them into the scope map. The above functions don't do that.
- match pat.node {
- PatKind::Binding(_, ref path1, ref sub_pat_opt) => {
- // LLVM does not properly generate 'DW_AT_start_scope' fields
- // for variable DIEs. For this reason we have to introduce
- // an artificial scope at bindings whenever a variable with
- // the same name is declared in *any* parent scope.
- //
- // Otherwise the following error occurs:
- //
- // let x = 10;
- //
- // do_something(); // 'gdb print x' correctly prints 10
- //
- // {
- // do_something(); // 'gdb print x' prints 0, because it
- // // already reads the uninitialized 'x'
- // // from the next line...
- // let x = 100;
- // do_something(); // 'gdb print x' correctly prints 100
- // }
-
- // Is there already a binding with that name?
- // N.B.: this comparison must be UNhygienic... because
- // gdb knows nothing about the context, so any two
- // variables with the same name will cause the problem.
- let name = path1.node;
- let need_new_scope = scope_stack
- .iter()
- .any(|entry| entry.name == Some(name));
-
- if need_new_scope {
- // Create a new lexical scope and push it onto the stack
- let loc = span_start(cx, pat.span);
- let file_metadata = file_metadata(cx, &loc.file.name, &loc.file.abs_path);
- let parent_scope = scope_stack.last().unwrap().scope_metadata;
-
- let scope_metadata = unsafe {
- llvm::LLVMRustDIBuilderCreateLexicalBlock(
- DIB(cx),
- parent_scope,
- file_metadata,
- loc.line as c_uint,
- loc.col.to_usize() as c_uint)
- };
-
- scope_stack.push(ScopeStackEntry {
- scope_metadata: scope_metadata,
- name: Some(name)
- });
-
- } else {
- // Push a new entry anyway so the name can be found
- let prev_metadata = scope_stack.last().unwrap().scope_metadata;
- scope_stack.push(ScopeStackEntry {
- scope_metadata: prev_metadata,
- name: Some(name)
- });
- }
-
- scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
-
- if let Some(ref sub_pat) = *sub_pat_opt {
- walk_pattern(cx, &sub_pat, scope_stack, scope_map);
- }
- }
-
- PatKind::Wild => {
- scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
- }
-
- PatKind::TupleStruct(_, ref sub_pats, _) => {
- scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
-
- for p in sub_pats {
- walk_pattern(cx, &p, scope_stack, scope_map);
- }
- }
-
- PatKind::Path(..) => {
- scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
- }
-
- PatKind::Struct(_, ref field_pats, _) => {
- scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
-
- for &codemap::Spanned {
- node: hir::FieldPat { pat: ref sub_pat, .. },
- ..
- } in field_pats {
- walk_pattern(cx, &sub_pat, scope_stack, scope_map);
- }
- }
-
- PatKind::Tuple(ref sub_pats, _) => {
- scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
-
- for sub_pat in sub_pats {
- walk_pattern(cx, &sub_pat, scope_stack, scope_map);
- }
- }
-
- PatKind::Box(ref sub_pat) | PatKind::Ref(ref sub_pat, _) => {
- scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
- walk_pattern(cx, &sub_pat, scope_stack, scope_map);
- }
-
- PatKind::Lit(ref exp) => {
- scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
- walk_expr(cx, &exp, scope_stack, scope_map);
- }
-
- PatKind::Range(ref exp1, ref exp2) => {
- scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
- walk_expr(cx, &exp1, scope_stack, scope_map);
- walk_expr(cx, &exp2, scope_stack, scope_map);
- }
-
- PatKind::Vec(ref front_sub_pats, ref middle_sub_pats, ref back_sub_pats) => {
- scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
-
- for sub_pat in front_sub_pats {
- walk_pattern(cx, &sub_pat, scope_stack, scope_map);
- }
-
- if let Some(ref sub_pat) = *middle_sub_pats {
- walk_pattern(cx, &sub_pat, scope_stack, scope_map);
- }
-
- for sub_pat in back_sub_pats {
- walk_pattern(cx, &sub_pat, scope_stack, scope_map);
- }
- }
- }
-}
-
-fn walk_expr(cx: &CrateContext,
- exp: &hir::Expr,
- scope_stack: &mut Vec<ScopeStackEntry> ,
- scope_map: &mut NodeMap<DIScope>) {
-
- scope_map.insert(exp.id, scope_stack.last().unwrap().scope_metadata);
-
- match exp.node {
- hir::ExprLit(_) |
- hir::ExprBreak(_) |
- hir::ExprAgain(_) |
- hir::ExprPath(..) => {}
-
- hir::ExprCast(ref sub_exp, _) |
- hir::ExprType(ref sub_exp, _) |
- hir::ExprAddrOf(_, ref sub_exp) |
- hir::ExprField(ref sub_exp, _) |
- hir::ExprTupField(ref sub_exp, _) =>
- walk_expr(cx, &sub_exp, scope_stack, scope_map),
-
- hir::ExprBox(ref sub_expr) => {
- walk_expr(cx, &sub_expr, scope_stack, scope_map);
- }
-
- hir::ExprRet(ref exp_opt) => match *exp_opt {
- Some(ref sub_exp) => walk_expr(cx, &sub_exp, scope_stack, scope_map),
- None => ()
- },
-
- hir::ExprUnary(_, ref sub_exp) => {
- walk_expr(cx, &sub_exp, scope_stack, scope_map);
- }
-
- hir::ExprAssignOp(_, ref lhs, ref rhs) |
- hir::ExprIndex(ref lhs, ref rhs) |
- hir::ExprBinary(_, ref lhs, ref rhs) => {
- walk_expr(cx, &lhs, scope_stack, scope_map);
- walk_expr(cx, &rhs, scope_stack, scope_map);
- }
-
- hir::ExprVec(ref init_expressions) |
- hir::ExprTup(ref init_expressions) => {
- for ie in init_expressions {
- walk_expr(cx, &ie, scope_stack, scope_map);
- }
- }
-
- hir::ExprAssign(ref sub_exp1, ref sub_exp2) |
- hir::ExprRepeat(ref sub_exp1, ref sub_exp2) => {
- walk_expr(cx, &sub_exp1, scope_stack, scope_map);
- walk_expr(cx, &sub_exp2, scope_stack, scope_map);
- }
-
- hir::ExprIf(ref cond_exp, ref then_block, ref opt_else_exp) => {
- walk_expr(cx, &cond_exp, scope_stack, scope_map);
-
- with_new_scope(cx,
- then_block.span,
- scope_stack,
- scope_map,
- |cx, scope_stack, scope_map| {
- walk_block(cx, &then_block, scope_stack, scope_map);
- });
-
- match *opt_else_exp {
- Some(ref else_exp) =>
- walk_expr(cx, &else_exp, scope_stack, scope_map),
- _ => ()
- }
- }
-
- hir::ExprWhile(ref cond_exp, ref loop_body, _) => {
- walk_expr(cx, &cond_exp, scope_stack, scope_map);
-
- with_new_scope(cx,
- loop_body.span,
- scope_stack,
- scope_map,
- |cx, scope_stack, scope_map| {
- walk_block(cx, &loop_body, scope_stack, scope_map);
- })
- }
-
- hir::ExprLoop(ref block, _) |
- hir::ExprBlock(ref block) => {
- with_new_scope(cx,
- block.span,
- scope_stack,
- scope_map,
- |cx, scope_stack, scope_map| {
- walk_block(cx, &block, scope_stack, scope_map);
- })
- }
-
- hir::ExprClosure(_, ref decl, ref block, _) => {
- with_new_scope(cx,
- block.span,
- scope_stack,
- scope_map,
- |cx, scope_stack, scope_map| {
- for &hir::Arg { pat: ref pattern, .. } in &decl.inputs {
- walk_pattern(cx, &pattern, scope_stack, scope_map);
- }
-
- walk_block(cx, &block, scope_stack, scope_map);
- })
- }
-
- hir::ExprCall(ref fn_exp, ref args) => {
- walk_expr(cx, &fn_exp, scope_stack, scope_map);
-
- for arg_exp in args {
- walk_expr(cx, &arg_exp, scope_stack, scope_map);
- }
- }
-
- hir::ExprMethodCall(_, _, ref args) => {
- for arg_exp in args {
- walk_expr(cx, &arg_exp, scope_stack, scope_map);
- }
- }
-
- hir::ExprMatch(ref discriminant_exp, ref arms, _) => {
- walk_expr(cx, &discriminant_exp, scope_stack, scope_map);
-
- // For each arm we have to first walk the pattern as these might
- // introduce new artificial scopes. It should be sufficient to
- // walk only one pattern per arm, as they all must contain the
- // same binding names.
-
- for arm_ref in arms {
- let arm_span = arm_ref.pats[0].span;
-
- with_new_scope(cx,
- arm_span,
- scope_stack,
- scope_map,
- |cx, scope_stack, scope_map| {
- for pat in &arm_ref.pats {
- walk_pattern(cx, &pat, scope_stack, scope_map);
- }
-
- if let Some(ref guard_exp) = arm_ref.guard {
- walk_expr(cx, &guard_exp, scope_stack, scope_map)
- }
-
- walk_expr(cx, &arm_ref.body, scope_stack, scope_map);
- })
- }
- }
-
- hir::ExprStruct(_, ref fields, ref base_exp) => {
- for &hir::Field { expr: ref exp, .. } in fields {
- walk_expr(cx, &exp, scope_stack, scope_map);
- }
-
- match *base_exp {
- Some(ref exp) => walk_expr(cx, &exp, scope_stack, scope_map),
- None => ()
- }
- }
-
- hir::ExprInlineAsm(_, ref outputs, ref inputs) => {
- for output in outputs {
- walk_expr(cx, output, scope_stack, scope_map);
- }
-
- for input in inputs {
- walk_expr(cx, input, scope_stack, scope_map);
- }
- }
- }
}
use self::EnumDiscriminantInfo::*;
use super::utils::{debug_context, DIB, span_start, bytes_to_bits, size_and_align_of,
- get_namespace_and_span_for_item, create_DIArray,
- fn_should_be_ignored, is_node_local_to_unit};
+ get_namespace_and_span_for_item, create_DIArray, is_node_local_to_unit};
use super::namespace::mangled_name_of_item;
use super::type_names::{compute_debuginfo_type_name, push_debuginfo_type_name};
-use super::{declare_local, VariableKind, VariableAccess, CrateDebugContext};
+use super::{CrateDebugContext};
use context::SharedCrateContext;
use session::Session;
use llvm::{self, ValueRef};
-use llvm::debuginfo::{DIType, DIFile, DIScope, DIDescriptor, DICompositeType};
+use llvm::debuginfo::{DIType, DIFile, DIScope, DIDescriptor, DICompositeType, DILexicalBlock};
use rustc::hir::def_id::DefId;
-use rustc::hir::pat_util;
use rustc::ty::subst::Substs;
-use rustc::hir::map as hir_map;
-use rustc::hir::{self, PatKind};
+use rustc::hir;
use {type_of, adt, machine, monomorphize};
-use common::{self, CrateContext, FunctionContext, Block};
-use _match::{BindingInfo, TransBindingMode};
+use common::CrateContext;
use type_::Type;
use rustc::ty::{self, Ty};
-use session::config::{self, FullDebugInfo};
+use session::config;
use util::nodemap::FnvHashMap;
use util::common::path2cstr;
// Add the def-index as the second part
output.push_str(&format!("{:x}", def_id.index.as_usize()));
- let tps = &substs.types;
- if !tps.is_empty() {
+ if substs.types().next().is_some() {
output.push('<');
- for &type_parameter in tps {
+ for type_parameter in substs.types() {
let param_type_id =
type_map.get_unique_type_id_of_type(cx, type_parameter);
let param_type_id =
file_metadata
}
-/// Finds the scope metadata node for the given AST node.
-pub fn scope_metadata(fcx: &FunctionContext,
- node_id: ast::NodeId,
- error_reporting_span: Span)
- -> DIScope {
- let scope_map = &fcx.debug_context
- .get_ref(error_reporting_span)
- .scope_map;
- match scope_map.borrow().get(&node_id).cloned() {
- Some(scope_metadata) => scope_metadata,
- None => {
- let node = fcx.ccx.tcx().map.get(node_id);
-
- span_bug!(error_reporting_span,
- "debuginfo: Could not find scope info for node {:?}",
- node);
- }
- }
-}
-
fn basic_type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
t: Ty<'tcx>) -> DIType {
-> Vec<MemberDescription> {
let adt = &self.enum_type.ty_adt_def().unwrap();
match *self.type_rep {
- adt::General(_, ref struct_defs, _) => {
+ adt::General(_, ref struct_defs) => {
let discriminant_info = RegularDiscriminant(self.discriminant_type_metadata
.expect(""));
struct_defs
}
}).collect()
},
- adt::Univariant(ref struct_def, _) => {
+ adt::Univariant(ref struct_def) => {
assert!(adt.variants.len() <= 1);
if adt.variants.is_empty() {
adt::RawNullablePointer { .. } |
adt::StructWrappedNullablePointer { .. } |
adt::Univariant(..) => None,
- adt::General(inttype, _, _) => Some(discriminant_type_metadata(inttype)),
+ adt::General(inttype, _) => Some(discriminant_type_metadata(inttype)),
};
let enum_llvm_type = type_of::type_of(cx, enum_type);
}
}
-/// Creates debug information for the given local variable.
-///
-/// This function assumes that there's a datum for each pattern component of the
-/// local in `bcx.fcx.lllocals`.
-/// Adds the created metadata nodes directly to the crate's IR.
-pub fn create_local_var_metadata(bcx: Block, local: &hir::Local) {
- if bcx.unreachable.get() ||
- fn_should_be_ignored(bcx.fcx) ||
- bcx.sess().opts.debuginfo != FullDebugInfo {
- return;
- }
-
- let locals = bcx.fcx.lllocals.borrow();
- pat_util::pat_bindings(&local.pat, |_, node_id, span, var_name| {
- let datum = match locals.get(&node_id) {
- Some(datum) => datum,
- None => {
- span_bug!(span,
- "no entry in lllocals table for {}",
- node_id);
- }
- };
-
- if unsafe { llvm::LLVMIsAAllocaInst(datum.val) } == ptr::null_mut() {
- span_bug!(span, "debuginfo::create_local_var_metadata() - \
- Referenced variable location is not an alloca!");
- }
-
- let scope_metadata = scope_metadata(bcx.fcx, node_id, span);
-
- declare_local(bcx,
- var_name.node,
- datum.ty,
- scope_metadata,
- VariableAccess::DirectVariable { alloca: datum.val },
- VariableKind::LocalVariable,
- span);
- })
-}
-
-/// Creates debug information for a variable captured in a closure.
-///
-/// Adds the created metadata nodes directly to the crate's IR.
-pub fn create_captured_var_metadata<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- node_id: ast::NodeId,
- env_pointer: ValueRef,
- env_index: usize,
- captured_by_ref: bool,
- span: Span) {
- if bcx.unreachable.get() ||
- fn_should_be_ignored(bcx.fcx) ||
- bcx.sess().opts.debuginfo != FullDebugInfo {
- return;
- }
-
- let cx = bcx.ccx();
-
- let ast_item = cx.tcx().map.find(node_id);
-
- let variable_name = match ast_item {
- None => {
- span_bug!(span, "debuginfo::create_captured_var_metadata: node not found");
- }
- Some(hir_map::NodeLocal(pat)) => {
- match pat.node {
- PatKind::Binding(_, ref path1, _) => {
- path1.node
- }
- _ => {
- span_bug!(span,
- "debuginfo::create_captured_var_metadata() - \
- Captured var-id refers to unexpected \
- hir_map variant: {:?}",
- ast_item);
- }
- }
- }
- _ => {
- span_bug!(span,
- "debuginfo::create_captured_var_metadata() - \
- Captured var-id refers to unexpected \
- hir_map variant: {:?}",
- ast_item);
- }
- };
-
- let variable_type = common::node_id_type(bcx, node_id);
- let scope_metadata = bcx.fcx.debug_context.get_ref(span).fn_metadata;
-
- // env_pointer is the alloca containing the pointer to the environment,
- // so it's type is **EnvironmentType. In order to find out the type of
- // the environment we have to "dereference" two times.
- let llvm_env_data_type = common::val_ty(env_pointer).element_type()
- .element_type();
- let byte_offset_of_var_in_env = machine::llelement_offset(cx,
- llvm_env_data_type,
- env_index);
-
- let address_operations = unsafe {
- [llvm::LLVMRustDIBuilderCreateOpDeref(),
- llvm::LLVMRustDIBuilderCreateOpPlus(),
- byte_offset_of_var_in_env as i64,
- llvm::LLVMRustDIBuilderCreateOpDeref()]
- };
-
- let address_op_count = if captured_by_ref {
- address_operations.len()
- } else {
- address_operations.len() - 1
- };
-
- let variable_access = VariableAccess::IndirectVariable {
- alloca: env_pointer,
- address_operations: &address_operations[..address_op_count]
- };
-
- declare_local(bcx,
- variable_name,
- variable_type,
- scope_metadata,
- variable_access,
- VariableKind::CapturedVariable,
- span);
-}
-
-/// Creates debug information for a local variable introduced in the head of a
-/// match-statement arm.
-///
-/// Adds the created metadata nodes directly to the crate's IR.
-pub fn create_match_binding_metadata<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- variable_name: ast::Name,
- binding: BindingInfo<'tcx>) {
- if bcx.unreachable.get() ||
- fn_should_be_ignored(bcx.fcx) ||
- bcx.sess().opts.debuginfo != FullDebugInfo {
- return;
- }
-
- let scope_metadata = scope_metadata(bcx.fcx, binding.id, binding.span);
- let aops = unsafe {
- [llvm::LLVMRustDIBuilderCreateOpDeref()]
- };
- // Regardless of the actual type (`T`) we're always passed the stack slot
- // (alloca) for the binding. For ByRef bindings that's a `T*` but for ByMove
- // bindings we actually have `T**`. So to get the actual variable we need to
- // dereference once more. For ByCopy we just use the stack slot we created
- // for the binding.
- let var_access = match binding.trmode {
- TransBindingMode::TrByCopy(llbinding) |
- TransBindingMode::TrByMoveIntoCopy(llbinding) => VariableAccess::DirectVariable {
- alloca: llbinding
- },
- TransBindingMode::TrByMoveRef => VariableAccess::IndirectVariable {
- alloca: binding.llmatch,
- address_operations: &aops
- },
- TransBindingMode::TrByRef => VariableAccess::DirectVariable {
- alloca: binding.llmatch
- }
- };
-
- declare_local(bcx,
- variable_name,
- binding.ty,
- scope_metadata,
- var_access,
- VariableKind::LocalVariable,
- binding.span);
-}
-
-/// Creates debug information for the given function argument.
-///
-/// This function assumes that there's a datum for each pattern component of the
-/// argument in `bcx.fcx.lllocals`.
-/// Adds the created metadata nodes directly to the crate's IR.
-pub fn create_argument_metadata(bcx: Block, arg: &hir::Arg) {
- if bcx.unreachable.get() ||
- fn_should_be_ignored(bcx.fcx) ||
- bcx.sess().opts.debuginfo != FullDebugInfo {
- return;
+// Creates an "extension" of an existing DIScope into another file.
+pub fn extend_scope_to_file(ccx: &CrateContext,
+ scope_metadata: DIScope,
+ file: &syntax_pos::FileMap)
+ -> DILexicalBlock {
+ let file_metadata = file_metadata(ccx, &file.name, &file.abs_path);
+ unsafe {
+ llvm::LLVMRustDIBuilderCreateLexicalBlockFile(
+ DIB(ccx),
+ scope_metadata,
+ file_metadata)
}
-
- let scope_metadata = bcx
- .fcx
- .debug_context
- .get_ref(arg.pat.span)
- .fn_metadata;
- let locals = bcx.fcx.lllocals.borrow();
-
- pat_util::pat_bindings(&arg.pat, |_, node_id, span, var_name| {
- let datum = match locals.get(&node_id) {
- Some(v) => v,
- None => {
- span_bug!(span, "no entry in lllocals table for {}", node_id);
- }
- };
-
- if unsafe { llvm::LLVMIsAAllocaInst(datum.val) } == ptr::null_mut() {
- span_bug!(span, "debuginfo::create_argument_metadata() - \
- Referenced variable location is not an alloca!");
- }
-
- let argument_index = {
- let counter = &bcx
- .fcx
- .debug_context
- .get_ref(span)
- .argument_counter;
- let argument_index = counter.get();
- counter.set(argument_index + 1);
- argument_index
- };
-
- declare_local(bcx,
- var_name.node,
- datum.ty,
- scope_metadata,
- VariableAccess::DirectVariable { alloca: datum.val },
- VariableKind::ArgumentVariable(argument_index),
- span);
- })
-}
+}
\ No newline at end of file
use rustc::hir::def_id::DefId;
use rustc::hir::map::DefPathData;
use rustc::ty::subst::Substs;
-use rustc::hir;
use abi::Abi;
-use common::{NodeIdAndSpan, CrateContext, FunctionContext, Block, BlockAndBuilder};
-use inline;
+use common::{CrateContext, FunctionContext, Block, BlockAndBuilder};
use monomorphize::{self, Instance};
use rustc::ty::{self, Ty};
+use rustc::mir::repr as mir;
use session::config::{self, FullDebugInfo, LimitedDebugInfo, NoDebugInfo};
-use util::nodemap::{DefIdMap, NodeMap, FnvHashMap, FnvHashSet};
+use util::nodemap::{DefIdMap, FnvHashMap, FnvHashSet};
use libc::c_uint;
use std::cell::{Cell, RefCell};
mod create_scope_map;
mod source_loc;
-pub use self::create_scope_map::create_mir_scopes;
+pub use self::create_scope_map::{create_mir_scopes, MirDebugScope};
pub use self::source_loc::start_emitting_source_locations;
-pub use self::source_loc::get_cleanup_debug_loc_for_ast_node;
-pub use self::source_loc::with_source_location_override;
-pub use self::metadata::create_match_binding_metadata;
-pub use self::metadata::create_argument_metadata;
-pub use self::metadata::create_captured_var_metadata;
pub use self::metadata::create_global_var_metadata;
-pub use self::metadata::create_local_var_metadata;
+pub use self::metadata::extend_scope_to_file;
#[allow(non_upper_case_globals)]
const DW_TAG_auto_variable: c_uint = 0x100;
}
pub struct FunctionDebugContextData {
- scope_map: RefCell<NodeMap<DIScope>>,
fn_metadata: DISubprogram,
- argument_counter: Cell<usize>,
source_locations_enabled: Cell<bool>,
source_location_override: Cell<bool>,
}
instance: Instance<'tcx>,
sig: &ty::FnSig<'tcx>,
abi: Abi,
- llfn: ValueRef) -> FunctionDebugContext {
+ llfn: ValueRef,
+ mir: &mir::Mir) -> FunctionDebugContext {
if cx.sess().opts.debuginfo == NoDebugInfo {
return FunctionDebugContext::DebugInfoDisabled;
}
// Do this here already, in case we do an early exit from this function.
source_loc::set_debug_location(cx, None, UnknownLocation);
- let instance = inline::maybe_inline_instance(cx, instance);
- let (containing_scope, span) = get_containing_scope_and_span(cx, instance);
+ let containing_scope = get_containing_scope(cx, instance);
+ let span = mir.span;
// This can be the case for functions inlined from another crate
if span == syntax_pos::DUMMY_SP {
// Initialize fn debug context (including scope map and namespace map)
let fn_debug_context = box FunctionDebugContextData {
- scope_map: RefCell::new(NodeMap()),
fn_metadata: fn_metadata,
- argument_counter: Cell::new(1),
source_locations_enabled: Cell::new(false),
source_location_override: Cell::new(false),
};
fn get_template_parameters<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
generics: &ty::Generics<'tcx>,
- param_substs: &Substs<'tcx>,
+ substs: &Substs<'tcx>,
file_metadata: DIFile,
name_to_append_suffix_to: &mut String)
-> DIArray
{
- let actual_types = ¶m_substs.types;
-
- if actual_types.is_empty() {
+ if substs.types().next().is_none() {
return create_DIArray(DIB(cx), &[]);
}
name_to_append_suffix_to.push('<');
- for (i, &actual_type) in actual_types.iter().enumerate() {
+ for (i, actual_type) in substs.types().enumerate() {
+ if i != 0 {
+ name_to_append_suffix_to.push_str(",");
+ }
+
let actual_type = cx.tcx().normalize_associated_type(&actual_type);
// Add actual type name to <...> clause of function name
let actual_type_name = compute_debuginfo_type_name(cx,
actual_type,
true);
name_to_append_suffix_to.push_str(&actual_type_name[..]);
-
- if i != actual_types.len() - 1 {
- name_to_append_suffix_to.push_str(",");
- }
}
name_to_append_suffix_to.push('>');
// Again, only create type information if full debuginfo is enabled
let template_params: Vec<_> = if cx.sess().opts.debuginfo == FullDebugInfo {
let names = get_type_parameter_names(cx, generics);
- actual_types.iter().zip(names).map(|(ty, name)| {
- let actual_type = cx.tcx().normalize_associated_type(ty);
+ substs.types().zip(names).map(|(ty, name)| {
+ let actual_type = cx.tcx().normalize_associated_type(&ty);
let actual_type_metadata = type_metadata(cx, actual_type, syntax_pos::DUMMY_SP);
let name = CString::new(name.as_str().as_bytes()).unwrap();
unsafe {
names
}
- fn get_containing_scope_and_span<'ccx, 'tcx>(cx: &CrateContext<'ccx, 'tcx>,
- instance: Instance<'tcx>)
- -> (DIScope, Span) {
+ fn get_containing_scope<'ccx, 'tcx>(cx: &CrateContext<'ccx, 'tcx>,
+ instance: Instance<'tcx>)
+ -> DIScope {
// First, let's see if this is a method within an inherent impl. Because
// if yes, we want to make the result subroutine DIE a child of the
// subroutine's self-type.
let impl_self_ty = monomorphize::apply_param_substs(cx.tcx(),
instance.substs,
&impl_self_ty);
- Some(type_metadata(cx, impl_self_ty, syntax_pos::DUMMY_SP))
+
+ // Only "class" methods are generally understood by LLVM,
+ // so avoid methods on other types (e.g. `<*mut T>::null`).
+ match impl_self_ty.sty {
+ ty::TyStruct(..) | ty::TyEnum(..) => {
+ Some(type_metadata(cx, impl_self_ty, syntax_pos::DUMMY_SP))
+ }
+ _ => None
+ }
} else {
// For trait method impls we still use the "parallel namespace"
// strategy
}
});
- let containing_scope = self_type.unwrap_or_else(|| {
+ self_type.unwrap_or_else(|| {
namespace::item_namespace(cx, DefId {
krate: instance.def.krate,
index: cx.tcx()
.def_key(instance.def)
.parent
- .expect("get_containing_scope_and_span: missing parent?")
+ .expect("get_containing_scope: missing parent?")
})
- });
-
- // Try to get some span information, if we have an inlined item.
- let definition_span = cx.tcx()
- .map
- .def_id_span(instance.def, syntax_pos::DUMMY_SP);
-
- (containing_scope, definition_span)
- }
-}
-
-/// Computes the scope map for a function given its declaration and body.
-pub fn fill_scope_map_for_function<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
- fn_decl: &hir::FnDecl,
- top_level_block: &hir::Block,
- fn_ast_id: ast::NodeId) {
- match fcx.debug_context {
- FunctionDebugContext::RegularContext(box ref data) => {
- let scope_map = create_scope_map::create_scope_map(fcx.ccx,
- &fn_decl.inputs,
- top_level_block,
- data.fn_metadata,
- fn_ast_id);
- *data.scope_map.borrow_mut() = scope_map;
- }
- FunctionDebugContext::DebugInfoDisabled |
- FunctionDebugContext::FunctionWithoutDebugInfo => {}
+ })
}
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum DebugLoc {
- At(ast::NodeId, Span),
ScopeAt(DIScope, Span),
None
}
source_loc::set_source_location(bcx.fcx(), Some(bcx), self);
}
}
-
-pub trait ToDebugLoc {
- fn debug_loc(&self) -> DebugLoc;
-}
-
-impl ToDebugLoc for hir::Expr {
- fn debug_loc(&self) -> DebugLoc {
- DebugLoc::At(self.id, self.span)
- }
-}
-
-impl ToDebugLoc for NodeIdAndSpan {
- fn debug_loc(&self) -> DebugLoc {
- DebugLoc::At(self.id, self.span)
- }
-}
-
-impl ToDebugLoc for Option<NodeIdAndSpan> {
- fn debug_loc(&self) -> DebugLoc {
- match *self {
- Some(NodeIdAndSpan { id, span }) => DebugLoc::At(id, span),
- None => DebugLoc::None
- }
- }
-}
use self::InternalDebugLocation::*;
use super::utils::{debug_context, span_start};
-use super::metadata::{scope_metadata,UNKNOWN_COLUMN_NUMBER};
+use super::metadata::{UNKNOWN_COLUMN_NUMBER};
use super::{FunctionDebugContext, DebugLoc};
use llvm;
use llvm::debuginfo::DIScope;
use builder::Builder;
-use common::{NodeIdAndSpan, CrateContext, FunctionContext};
+use common::{CrateContext, FunctionContext};
use libc::c_uint;
use std::ptr;
-use syntax_pos::{self, Span, Pos};
-use syntax::ast;
-
-pub fn get_cleanup_debug_loc_for_ast_node<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
- node_id: ast::NodeId,
- node_span: Span,
- is_block: bool)
- -> NodeIdAndSpan {
- // A debug location needs two things:
- // (1) A span (of which only the beginning will actually be used)
- // (2) An AST node-id which will be used to look up the lexical scope
- // for the location in the functions scope-map
- //
- // This function will calculate the debug location for compiler-generated
- // cleanup calls that are executed when control-flow leaves the
- // scope identified by `node_id`.
- //
- // For everything but block-like things we can simply take id and span of
- // the given expression, meaning that from a debugger's view cleanup code is
- // executed at the same source location as the statement/expr itself.
- //
- // Blocks are a special case. Here we want the cleanup to be linked to the
- // closing curly brace of the block. The *scope* the cleanup is executed in
- // is up to debate: It could either still be *within* the block being
- // cleaned up, meaning that locals from the block are still visible in the
- // debugger.
- // Or it could be in the scope that the block is contained in, so any locals
- // from within the block are already considered out-of-scope and thus not
- // accessible in the debugger anymore.
- //
- // The current implementation opts for the second option: cleanup of a block
- // already happens in the parent scope of the block. The main reason for
- // this decision is that scoping becomes controlflow dependent when variable
- // shadowing is involved and it's impossible to decide statically which
- // scope is actually left when the cleanup code is executed.
- // In practice it shouldn't make much of a difference.
-
- let mut cleanup_span = node_span;
-
- if is_block {
- // Not all blocks actually have curly braces (e.g. simple closure
- // bodies), in which case we also just want to return the span of the
- // whole expression.
- let code_snippet = cx.sess().codemap().span_to_snippet(node_span);
- if let Ok(code_snippet) = code_snippet {
- let bytes = code_snippet.as_bytes();
-
- if !bytes.is_empty() && &bytes[bytes.len()-1..] == b"}" {
- cleanup_span = Span {
- lo: node_span.hi - syntax_pos::BytePos(1),
- hi: node_span.hi,
- expn_id: node_span.expn_id
- };
- }
- }
- }
-
- NodeIdAndSpan {
- id: node_id,
- span: cleanup_span
- }
-}
-
+use syntax_pos::Pos;
/// Sets the current debug location at the beginning of the span.
///
let dbg_loc = if function_debug_context.source_locations_enabled.get() {
let (scope, span) = match debug_loc {
- DebugLoc::At(node_id, span) => {
- (scope_metadata(fcx, node_id, span), span)
- }
DebugLoc::ScopeAt(scope, span) => (scope, span),
DebugLoc::None => {
set_debug_location(fcx.ccx, builder, UnknownLocation);
set_debug_location(fcx.ccx, builder, dbg_loc);
}
-/// This function makes sure that all debug locations emitted while executing
-/// `wrapped_function` are set to the given `debug_loc`.
-pub fn with_source_location_override<F, R>(fcx: &FunctionContext,
- debug_loc: DebugLoc,
- wrapped_function: F) -> R
- where F: FnOnce() -> R
-{
- match fcx.debug_context {
- FunctionDebugContext::DebugInfoDisabled => {
- wrapped_function()
- }
- FunctionDebugContext::FunctionWithoutDebugInfo => {
- set_debug_location(fcx.ccx, None, UnknownLocation);
- wrapped_function()
- }
- FunctionDebugContext::RegularContext(box ref function_debug_context) => {
- if function_debug_context.source_location_override.get() {
- wrapped_function()
- } else {
- debug_loc.apply(fcx);
- function_debug_context.source_location_override.set(true);
- let result = wrapped_function();
- function_debug_context.source_location_override.set(false);
- result
- }
- }
- }
-}
-
/// Enables emitting source locations for the given functions.
///
/// Since we don't want source locations to be emitted for the function prelude,
fn push_type_params<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
substs: &Substs<'tcx>,
output: &mut String) {
- if substs.types.is_empty() {
+ if substs.types().next().is_none() {
return;
}
output.push('<');
- for &type_parameter in &substs.types {
+ for type_parameter in substs.types() {
push_debuginfo_type_name(cx, type_parameter, true, output);
output.push_str(", ");
}
// Utility Functions.
-use super::{FunctionDebugContext, CrateDebugContext};
+use super::{CrateDebugContext};
use super::namespace::item_namespace;
use rustc::hir::def_id::DefId;
use llvm;
use llvm::debuginfo::{DIScope, DIBuilderRef, DIDescriptor, DIArray};
use machine;
-use common::{CrateContext, FunctionContext};
+use common::{CrateContext};
use type_::Type;
use syntax_pos::{self, Span};
cx.dbg_cx().as_ref().unwrap().builder
}
-pub fn fn_should_be_ignored(fcx: &FunctionContext) -> bool {
- match fcx.debug_context {
- FunctionDebugContext::RegularContext(_) => false,
- _ => true
- }
-}
-
pub fn get_namespace_and_span_for_item(cx: &CrateContext, def_id: DefId)
-> (DIScope, Span) {
let containing_scope = item_namespace(cx, DefId {
fn simd_add<T>(a: T, b: T) -> T;
}
-unsafe { simd_add(0, 1); }
-// error: invalid monomorphization of `simd_add` intrinsic
+fn main() {
+ unsafe { simd_add(0, 1); }
+ // error: invalid monomorphization of `simd_add` intrinsic
+}
```
The generic type has to be a SIMD type. Example:
+++ /dev/null
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! # Translation of Expressions
-//!
-//! The expr module handles translation of expressions. The most general
-//! translation routine is `trans()`, which will translate an expression
-//! into a datum. `trans_into()` is also available, which will translate
-//! an expression and write the result directly into memory, sometimes
-//! avoiding the need for a temporary stack slot. Finally,
-//! `trans_to_lvalue()` is available if you'd like to ensure that the
-//! result has cleanup scheduled.
-//!
-//! Internally, each of these functions dispatches to various other
-//! expression functions depending on the kind of expression. We divide
-//! up expressions into:
-//!
-//! - **Datum expressions:** Those that most naturally yield values.
-//! Examples would be `22`, `box x`, or `a + b` (when not overloaded).
-//! - **DPS expressions:** Those that most naturally write into a location
-//! in memory. Examples would be `foo()` or `Point { x: 3, y: 4 }`.
-//! - **Statement expressions:** That that do not generate a meaningful
-//! result. Examples would be `while { ... }` or `return 44`.
-//!
-//! Public entry points:
-//!
-//! - `trans_into(bcx, expr, dest) -> bcx`: evaluates an expression,
-//! storing the result into `dest`. This is the preferred form, if you
-//! can manage it.
-//!
-//! - `trans(bcx, expr) -> DatumBlock`: evaluates an expression, yielding
-//! `Datum` with the result. You can then store the datum, inspect
-//! the value, etc. This may introduce temporaries if the datum is a
-//! structural type.
-//!
-//! - `trans_to_lvalue(bcx, expr, "...") -> DatumBlock`: evaluates an
-//! expression and ensures that the result has a cleanup associated with it,
-//! creating a temporary stack slot if necessary.
-//!
-//! - `trans_var -> Datum`: looks up a local variable, upvar or static.
-
-#![allow(non_camel_case_types)]
-
-pub use self::Dest::*;
-use self::lazy_binop_ty::*;
-
-use llvm::{self, ValueRef, TypeKind};
-use middle::const_qualif::ConstQualif;
-use rustc::hir::def::Def;
-use rustc::ty::subst::Substs;
-use {_match, abi, adt, asm, base, closure, consts, controlflow};
-use base::*;
-use build::*;
-use callee::{Callee, ArgExprs, ArgOverloadedCall, ArgOverloadedOp};
-use cleanup::{self, CleanupMethods, DropHintMethods};
-use common::*;
-use datum::*;
-use debuginfo::{self, DebugLoc, ToDebugLoc};
-use glue;
-use machine;
-use tvec;
-use type_of;
-use value::Value;
-use Disr;
-use rustc::ty::adjustment::{AdjustNeverToAny, AdjustDerefRef, AdjustReifyFnPointer};
-use rustc::ty::adjustment::{AdjustUnsafeFnPointer, AdjustMutToConstPointer};
-use rustc::ty::adjustment::CustomCoerceUnsized;
-use rustc::ty::{self, Ty, TyCtxt};
-use rustc::ty::MethodCall;
-use rustc::ty::cast::{CastKind, CastTy};
-use util::common::indenter;
-use machine::{llsize_of, llsize_of_alloc};
-use type_::Type;
-
-use rustc::hir;
-
-use syntax::ast;
-use syntax::parse::token::InternedString;
-use syntax_pos;
-use std::fmt;
-use std::mem;
-
-// Destinations
-
-// These are passed around by the code generating functions to track the
-// destination of a computation's value.
-
-#[derive(Copy, Clone, PartialEq)]
-pub enum Dest {
- SaveIn(ValueRef),
- Ignore,
-}
-
-impl fmt::Debug for Dest {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- match *self {
- SaveIn(v) => write!(f, "SaveIn({:?})", Value(v)),
- Ignore => f.write_str("Ignore")
- }
- }
-}
-
-/// This function is equivalent to `trans(bcx, expr).store_to_dest(dest)` but it may generate
-/// better optimized LLVM code.
-pub fn trans_into<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- expr: &hir::Expr,
- dest: Dest)
- -> Block<'blk, 'tcx> {
- let mut bcx = bcx;
-
- expr.debug_loc().apply(bcx.fcx);
-
- if adjustment_required(bcx, expr) {
- // use trans, which may be less efficient but
- // which will perform the adjustments:
- let datum = unpack_datum!(bcx, trans(bcx, expr));
- return datum.store_to_dest(bcx, dest, expr.id);
- }
-
- let qualif = *bcx.tcx().const_qualif_map.borrow().get(&expr.id).unwrap();
- if !qualif.intersects(ConstQualif::NOT_CONST | ConstQualif::NEEDS_DROP) {
- if !qualif.intersects(ConstQualif::PREFER_IN_PLACE) {
- if let SaveIn(lldest) = dest {
- match consts::get_const_expr_as_global(bcx.ccx(), expr, qualif,
- bcx.fcx.param_substs,
- consts::TrueConst::No) {
- Ok(global) => {
- // Cast pointer to destination, because constants
- // have different types.
- let lldest = PointerCast(bcx, lldest, val_ty(global));
- memcpy_ty(bcx, lldest, global, expr_ty_adjusted(bcx, expr));
- return bcx;
- },
- Err(consts::ConstEvalFailure::Runtime(_)) => {
- // in case const evaluation errors, translate normally
- // debug assertions catch the same errors
- // see RFC 1229
- },
- Err(consts::ConstEvalFailure::Compiletime(_)) => {
- return bcx;
- },
- }
- }
-
- // If we see a const here, that's because it evaluates to a type with zero size. We
- // should be able to just discard it, since const expressions are guaranteed not to
- // have side effects. This seems to be reached through tuple struct constructors being
- // passed zero-size constants.
- if let hir::ExprPath(..) = expr.node {
- match bcx.tcx().expect_def(expr.id) {
- Def::Const(_) | Def::AssociatedConst(_) => {
- assert!(type_is_zero_size(bcx.ccx(), bcx.tcx().node_id_to_type(expr.id)));
- return bcx;
- }
- _ => {}
- }
- }
-
- // Even if we don't have a value to emit, and the expression
- // doesn't have any side-effects, we still have to translate the
- // body of any closures.
- // FIXME: Find a better way of handling this case.
- } else {
- // The only way we're going to see a `const` at this point is if
- // it prefers in-place instantiation, likely because it contains
- // `[x; N]` somewhere within.
- match expr.node {
- hir::ExprPath(..) => {
- match bcx.tcx().expect_def(expr.id) {
- Def::Const(did) | Def::AssociatedConst(did) => {
- let empty_substs = Substs::empty(bcx.tcx());
- let const_expr = consts::get_const_expr(bcx.ccx(), did, expr,
- empty_substs);
- // Temporarily get cleanup scopes out of the way,
- // as they require sub-expressions to be contained
- // inside the current AST scope.
- // These should record no cleanups anyways, `const`
- // can't have destructors.
- let scopes = mem::replace(&mut *bcx.fcx.scopes.borrow_mut(),
- vec![]);
- // Lock emitted debug locations to the location of
- // the constant reference expression.
- debuginfo::with_source_location_override(bcx.fcx,
- expr.debug_loc(),
- || {
- bcx = trans_into(bcx, const_expr, dest)
- });
- let scopes = mem::replace(&mut *bcx.fcx.scopes.borrow_mut(),
- scopes);
- assert!(scopes.is_empty());
- return bcx;
- }
- _ => {}
- }
- }
- _ => {}
- }
- }
- }
-
- debug!("trans_into() expr={:?}", expr);
-
- let cleanup_debug_loc = debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(),
- expr.id,
- expr.span,
- false);
- bcx.fcx.push_ast_cleanup_scope(cleanup_debug_loc);
-
- let kind = expr_kind(bcx.tcx(), expr);
- bcx = match kind {
- ExprKind::Lvalue | ExprKind::RvalueDatum => {
- trans_unadjusted(bcx, expr).store_to_dest(dest, expr.id)
- }
- ExprKind::RvalueDps => {
- trans_rvalue_dps_unadjusted(bcx, expr, dest)
- }
- ExprKind::RvalueStmt => {
- trans_rvalue_stmt_unadjusted(bcx, expr)
- }
- };
-
- bcx.fcx.pop_and_trans_ast_cleanup_scope(bcx, expr.id)
-}
-
-/// Translates an expression, returning a datum (and new block) encapsulating the result. When
-/// possible, it is preferred to use `trans_into`, as that may avoid creating a temporary on the
-/// stack.
-pub fn trans<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- expr: &hir::Expr)
- -> DatumBlock<'blk, 'tcx, Expr> {
- debug!("trans(expr={:?})", expr);
-
- let mut bcx = bcx;
- let fcx = bcx.fcx;
- let qualif = *bcx.tcx().const_qualif_map.borrow().get(&expr.id).unwrap();
- let adjusted_global = !qualif.intersects(ConstQualif::NON_STATIC_BORROWS);
- let global = if !qualif.intersects(ConstQualif::NOT_CONST | ConstQualif::NEEDS_DROP) {
- match consts::get_const_expr_as_global(bcx.ccx(), expr, qualif,
- bcx.fcx.param_substs,
- consts::TrueConst::No) {
- Ok(global) => {
- if qualif.intersects(ConstQualif::HAS_STATIC_BORROWS) {
- // Is borrowed as 'static, must return lvalue.
-
- // Cast pointer to global, because constants have different types.
- let const_ty = expr_ty_adjusted(bcx, expr);
- let llty = type_of::type_of(bcx.ccx(), const_ty);
- let global = PointerCast(bcx, global, llty.ptr_to());
- let datum = Datum::new(global, const_ty, Lvalue::new("expr::trans"));
- return DatumBlock::new(bcx, datum.to_expr_datum());
- }
-
- // Otherwise, keep around and perform adjustments, if needed.
- let const_ty = if adjusted_global {
- expr_ty_adjusted(bcx, expr)
- } else {
- expr_ty(bcx, expr)
- };
-
- // This could use a better heuristic.
- Some(if type_is_immediate(bcx.ccx(), const_ty) {
- // Cast pointer to global, because constants have different types.
- let llty = type_of::type_of(bcx.ccx(), const_ty);
- let global = PointerCast(bcx, global, llty.ptr_to());
- // Maybe just get the value directly, instead of loading it?
- immediate_rvalue(load_ty(bcx, global, const_ty), const_ty)
- } else {
- let scratch = alloc_ty(bcx, const_ty, "const");
- call_lifetime_start(bcx, scratch);
- let lldest = if !const_ty.is_structural() {
- // Cast pointer to slot, because constants have different types.
- PointerCast(bcx, scratch, val_ty(global))
- } else {
- // In this case, memcpy_ty calls llvm.memcpy after casting both
- // source and destination to i8*, so we don't need any casts.
- scratch
- };
- memcpy_ty(bcx, lldest, global, const_ty);
- Datum::new(scratch, const_ty, Rvalue::new(ByRef))
- })
- },
- Err(consts::ConstEvalFailure::Runtime(_)) => {
- // in case const evaluation errors, translate normally
- // debug assertions catch the same errors
- // see RFC 1229
- None
- },
- Err(consts::ConstEvalFailure::Compiletime(_)) => {
- // generate a dummy llvm value
- let const_ty = expr_ty(bcx, expr);
- let llty = type_of::type_of(bcx.ccx(), const_ty);
- let dummy = C_undef(llty.ptr_to());
- Some(Datum::new(dummy, const_ty, Rvalue::new(ByRef)))
- },
- }
- } else {
- None
- };
-
- let cleanup_debug_loc = debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(),
- expr.id,
- expr.span,
- false);
- fcx.push_ast_cleanup_scope(cleanup_debug_loc);
- let datum = match global {
- Some(rvalue) => rvalue.to_expr_datum(),
- None => unpack_datum!(bcx, trans_unadjusted(bcx, expr))
- };
- let datum = if adjusted_global {
- datum // trans::consts already performed adjustments.
- } else {
- unpack_datum!(bcx, apply_adjustments(bcx, expr, datum))
- };
- bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, expr.id);
- return DatumBlock::new(bcx, datum);
-}
-
-pub fn get_meta(bcx: Block, fat_ptr: ValueRef) -> ValueRef {
- StructGEP(bcx, fat_ptr, abi::FAT_PTR_EXTRA)
-}
-
-pub fn get_dataptr(bcx: Block, fat_ptr: ValueRef) -> ValueRef {
- StructGEP(bcx, fat_ptr, abi::FAT_PTR_ADDR)
-}
-
-pub fn copy_fat_ptr(bcx: Block, src_ptr: ValueRef, dst_ptr: ValueRef) {
- Store(bcx, Load(bcx, get_dataptr(bcx, src_ptr)), get_dataptr(bcx, dst_ptr));
- Store(bcx, Load(bcx, get_meta(bcx, src_ptr)), get_meta(bcx, dst_ptr));
-}
-
-fn adjustment_required<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- expr: &hir::Expr) -> bool {
- let adjustment = match bcx.tcx().tables.borrow().adjustments.get(&expr.id).cloned() {
- None => { return false; }
- Some(adj) => adj
- };
-
- // Don't skip a conversion from Box<T> to &T, etc.
- if bcx.tcx().is_overloaded_autoderef(expr.id, 0) {
- return true;
- }
-
- match adjustment {
- AdjustNeverToAny(..) => true,
- AdjustReifyFnPointer => true,
- AdjustUnsafeFnPointer | AdjustMutToConstPointer => {
- // purely a type-level thing
- false
- }
- AdjustDerefRef(ref adj) => {
- // We are a bit paranoid about adjustments and thus might have a re-
- // borrow here which merely derefs and then refs again (it might have
- // a different region or mutability, but we don't care here).
- !(adj.autoderefs == 1 && adj.autoref.is_some() && adj.unsize.is_none())
- }
- }
-}
-
-/// Helper for trans that apply adjustments from `expr` to `datum`, which should be the unadjusted
-/// translation of `expr`.
-fn apply_adjustments<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- expr: &hir::Expr,
- datum: Datum<'tcx, Expr>)
- -> DatumBlock<'blk, 'tcx, Expr>
-{
- let mut bcx = bcx;
- let mut datum = datum;
- let adjustment = match bcx.tcx().tables.borrow().adjustments.get(&expr.id).cloned() {
- None => {
- return DatumBlock::new(bcx, datum);
- }
- Some(adj) => { adj }
- };
- debug!("unadjusted datum for expr {:?}: {:?} adjustment={:?}",
- expr, datum, adjustment);
- match adjustment {
- AdjustNeverToAny(ref target) => {
- let mono_target = bcx.monomorphize(target);
- let llty = type_of::type_of(bcx.ccx(), mono_target);
- let dummy = C_undef(llty.ptr_to());
- datum = Datum::new(dummy, mono_target, Lvalue::new("never")).to_expr_datum();
- }
- AdjustReifyFnPointer => {
- match datum.ty.sty {
- ty::TyFnDef(def_id, substs, _) => {
- datum = Callee::def(bcx.ccx(), def_id, substs)
- .reify(bcx.ccx()).to_expr_datum();
- }
- _ => {
- bug!("{} cannot be reified to a fn ptr", datum.ty)
- }
- }
- }
- AdjustUnsafeFnPointer | AdjustMutToConstPointer => {
- // purely a type-level thing
- }
- AdjustDerefRef(ref adj) => {
- let skip_reborrows = if adj.autoderefs == 1 && adj.autoref.is_some() {
- // We are a bit paranoid about adjustments and thus might have a re-
- // borrow here which merely derefs and then refs again (it might have
- // a different region or mutability, but we don't care here).
- match datum.ty.sty {
- // Don't skip a conversion from Box<T> to &T, etc.
- ty::TyRef(..) => {
- if bcx.tcx().is_overloaded_autoderef(expr.id, 0) {
- // Don't skip an overloaded deref.
- 0
- } else {
- 1
- }
- }
- _ => 0
- }
- } else {
- 0
- };
-
- if adj.autoderefs > skip_reborrows {
- // Schedule cleanup.
- let lval = unpack_datum!(bcx, datum.to_lvalue_datum(bcx, "auto_deref", expr.id));
- datum = unpack_datum!(bcx, deref_multiple(bcx, expr,
- lval.to_expr_datum(),
- adj.autoderefs - skip_reborrows));
- }
-
- // (You might think there is a more elegant way to do this than a
- // skip_reborrows bool, but then you remember that the borrow checker exists).
- if skip_reborrows == 0 && adj.autoref.is_some() {
- datum = unpack_datum!(bcx, auto_ref(bcx, datum, expr));
- }
-
- if let Some(target) = adj.unsize {
- // We do not arrange cleanup ourselves; if we already are an
- // L-value, then cleanup will have already been scheduled (and
- // the `datum.to_rvalue_datum` call below will emit code to zero
- // the drop flag when moving out of the L-value). If we are an
- // R-value, then we do not need to schedule cleanup.
- let source_datum = unpack_datum!(bcx,
- datum.to_rvalue_datum(bcx, "__coerce_source"));
-
- let target = bcx.monomorphize(&target);
-
- let scratch = alloc_ty(bcx, target, "__coerce_target");
- call_lifetime_start(bcx, scratch);
- let target_datum = Datum::new(scratch, target,
- Rvalue::new(ByRef));
- bcx = coerce_unsized(bcx, expr.span, source_datum, target_datum);
- datum = Datum::new(scratch, target,
- RvalueExpr(Rvalue::new(ByRef)));
- }
- }
- }
- debug!("after adjustments, datum={:?}", datum);
- DatumBlock::new(bcx, datum)
-}
-
-fn coerce_unsized<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- span: syntax_pos::Span,
- source: Datum<'tcx, Rvalue>,
- target: Datum<'tcx, Rvalue>)
- -> Block<'blk, 'tcx> {
- let mut bcx = bcx;
- debug!("coerce_unsized({:?} -> {:?})", source, target);
-
- match (&source.ty.sty, &target.ty.sty) {
- (&ty::TyBox(a), &ty::TyBox(b)) |
- (&ty::TyRef(_, ty::TypeAndMut { ty: a, .. }),
- &ty::TyRef(_, ty::TypeAndMut { ty: b, .. })) |
- (&ty::TyRef(_, ty::TypeAndMut { ty: a, .. }),
- &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) |
- (&ty::TyRawPtr(ty::TypeAndMut { ty: a, .. }),
- &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) => {
- let (inner_source, inner_target) = (a, b);
-
- let (base, old_info) = if !type_is_sized(bcx.tcx(), inner_source) {
- // Normally, the source is a thin pointer and we are
- // adding extra info to make a fat pointer. The exception
- // is when we are upcasting an existing object fat pointer
- // to use a different vtable. In that case, we want to
- // load out the original data pointer so we can repackage
- // it.
- (Load(bcx, get_dataptr(bcx, source.val)),
- Some(Load(bcx, get_meta(bcx, source.val))))
- } else {
- let val = if source.kind.is_by_ref() {
- load_ty(bcx, source.val, source.ty)
- } else {
- source.val
- };
- (val, None)
- };
-
- let info = unsized_info(bcx.ccx(), inner_source, inner_target, old_info);
-
- // Compute the base pointer. This doesn't change the pointer value,
- // but merely its type.
- let ptr_ty = type_of::in_memory_type_of(bcx.ccx(), inner_target).ptr_to();
- let base = PointerCast(bcx, base, ptr_ty);
-
- Store(bcx, base, get_dataptr(bcx, target.val));
- Store(bcx, info, get_meta(bcx, target.val));
- }
-
- // This can be extended to enums and tuples in the future.
- // (&ty::TyEnum(def_id_a, _), &ty::TyEnum(def_id_b, _)) |
- (&ty::TyStruct(def_id_a, _), &ty::TyStruct(def_id_b, _)) => {
- assert_eq!(def_id_a, def_id_b);
-
- // The target is already by-ref because it's to be written to.
- let source = unpack_datum!(bcx, source.to_ref_datum(bcx));
- assert!(target.kind.is_by_ref());
-
- let kind = custom_coerce_unsize_info(bcx.ccx().shared(),
- source.ty,
- target.ty);
-
- let repr_source = adt::represent_type(bcx.ccx(), source.ty);
- let src_fields = match &*repr_source {
- &adt::Repr::Univariant(ref s, _) => &s.fields,
- _ => span_bug!(span,
- "Non univariant struct? (repr_source: {:?})",
- repr_source),
- };
- let repr_target = adt::represent_type(bcx.ccx(), target.ty);
- let target_fields = match &*repr_target {
- &adt::Repr::Univariant(ref s, _) => &s.fields,
- _ => span_bug!(span,
- "Non univariant struct? (repr_target: {:?})",
- repr_target),
- };
-
- let coerce_index = match kind {
- CustomCoerceUnsized::Struct(i) => i
- };
- assert!(coerce_index < src_fields.len() && src_fields.len() == target_fields.len());
-
- let source_val = adt::MaybeSizedValue::sized(source.val);
- let target_val = adt::MaybeSizedValue::sized(target.val);
-
- let iter = src_fields.iter().zip(target_fields).enumerate();
- for (i, (src_ty, target_ty)) in iter {
- let ll_source = adt::trans_field_ptr(bcx, &repr_source, source_val, Disr(0), i);
- let ll_target = adt::trans_field_ptr(bcx, &repr_target, target_val, Disr(0), i);
-
- // If this is the field we need to coerce, recurse on it.
- if i == coerce_index {
- coerce_unsized(bcx, span,
- Datum::new(ll_source, src_ty,
- Rvalue::new(ByRef)),
- Datum::new(ll_target, target_ty,
- Rvalue::new(ByRef)));
- } else {
- // Otherwise, simply copy the data from the source.
- assert!(src_ty.is_phantom_data() || src_ty == target_ty);
- memcpy_ty(bcx, ll_target, ll_source, src_ty);
- }
- }
- }
- _ => bug!("coerce_unsized: invalid coercion {:?} -> {:?}",
- source.ty,
- target.ty)
- }
- bcx
-}
-
-/// Translates an expression in "lvalue" mode -- meaning that it returns a reference to the memory
-/// that the expr represents.
-///
-/// If this expression is an rvalue, this implies introducing a temporary. In other words,
-/// something like `x().f` is translated into roughly the equivalent of
-///
-/// { tmp = x(); tmp.f }
-pub fn trans_to_lvalue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- expr: &hir::Expr,
- name: &str)
- -> DatumBlock<'blk, 'tcx, Lvalue> {
- let mut bcx = bcx;
- let datum = unpack_datum!(bcx, trans(bcx, expr));
- return datum.to_lvalue_datum(bcx, name, expr.id);
-}
-
-/// A version of `trans` that ignores adjustments. You almost certainly do not want to call this
-/// directly.
-fn trans_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- expr: &hir::Expr)
- -> DatumBlock<'blk, 'tcx, Expr> {
- let mut bcx = bcx;
-
- debug!("trans_unadjusted(expr={:?})", expr);
- let _indenter = indenter();
-
- expr.debug_loc().apply(bcx.fcx);
-
- return match expr_kind(bcx.tcx(), expr) {
- ExprKind::Lvalue | ExprKind::RvalueDatum => {
- let datum = unpack_datum!(bcx, {
- trans_datum_unadjusted(bcx, expr)
- });
-
- DatumBlock {bcx: bcx, datum: datum}
- }
-
- ExprKind::RvalueStmt => {
- bcx = trans_rvalue_stmt_unadjusted(bcx, expr);
- nil(bcx, expr_ty(bcx, expr))
- }
-
- ExprKind::RvalueDps => {
- let ty = expr_ty(bcx, expr);
- if type_is_zero_size(bcx.ccx(), ty) {
- bcx = trans_rvalue_dps_unadjusted(bcx, expr, Ignore);
- nil(bcx, ty)
- } else {
- let scratch = rvalue_scratch_datum(bcx, ty, "");
- bcx = trans_rvalue_dps_unadjusted(
- bcx, expr, SaveIn(scratch.val));
-
- // Note: this is not obviously a good idea. It causes
- // immediate values to be loaded immediately after a
- // return from a call or other similar expression,
- // which in turn leads to alloca's having shorter
- // lifetimes and hence larger stack frames. However,
- // in turn it can lead to more register pressure.
- // Still, in practice it seems to increase
- // performance, since we have fewer problems with
- // morestack churn.
- let scratch = unpack_datum!(
- bcx, scratch.to_appropriate_datum(bcx));
-
- DatumBlock::new(bcx, scratch.to_expr_datum())
- }
- }
- };
-
- fn nil<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ty: Ty<'tcx>)
- -> DatumBlock<'blk, 'tcx, Expr> {
- let llval = C_undef(type_of::type_of(bcx.ccx(), ty));
- let datum = immediate_rvalue(llval, ty);
- DatumBlock::new(bcx, datum.to_expr_datum())
- }
-}
-
-fn trans_datum_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- expr: &hir::Expr)
- -> DatumBlock<'blk, 'tcx, Expr> {
- let mut bcx = bcx;
- let fcx = bcx.fcx;
- let _icx = push_ctxt("trans_datum_unadjusted");
-
- match expr.node {
- hir::ExprType(ref e, _) => {
- trans(bcx, &e)
- }
- hir::ExprPath(..) => {
- let var = trans_var(bcx, bcx.tcx().expect_def(expr.id));
- DatumBlock::new(bcx, var.to_expr_datum())
- }
- hir::ExprField(ref base, name) => {
- trans_rec_field(bcx, &base, name.node)
- }
- hir::ExprTupField(ref base, idx) => {
- trans_rec_tup_field(bcx, &base, idx.node)
- }
- hir::ExprIndex(ref base, ref idx) => {
- trans_index(bcx, expr, &base, &idx, MethodCall::expr(expr.id))
- }
- hir::ExprBox(ref contents) => {
- // Special case for `Box<T>`
- let box_ty = expr_ty(bcx, expr);
- let contents_ty = expr_ty(bcx, &contents);
- match box_ty.sty {
- ty::TyBox(..) => {
- trans_uniq_expr(bcx, expr, box_ty, &contents, contents_ty)
- }
- _ => span_bug!(expr.span,
- "expected unique box")
- }
-
- }
- hir::ExprLit(ref lit) => trans_immediate_lit(bcx, expr, &lit),
- hir::ExprBinary(op, ref lhs, ref rhs) => {
- trans_binary(bcx, expr, op, &lhs, &rhs)
- }
- hir::ExprUnary(op, ref x) => {
- trans_unary(bcx, expr, op, &x)
- }
- hir::ExprAddrOf(_, ref x) => {
- match x.node {
- hir::ExprRepeat(..) | hir::ExprVec(..) => {
- // Special case for slices.
- let cleanup_debug_loc =
- debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(),
- x.id,
- x.span,
- false);
- fcx.push_ast_cleanup_scope(cleanup_debug_loc);
- let datum = unpack_datum!(
- bcx, tvec::trans_slice_vec(bcx, expr, &x));
- bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, x.id);
- DatumBlock::new(bcx, datum)
- }
- _ => {
- trans_addr_of(bcx, expr, &x)
- }
- }
- }
- hir::ExprCast(ref val, _) => {
- // Datum output mode means this is a scalar cast:
- trans_imm_cast(bcx, &val, expr.id)
- }
- _ => {
- span_bug!(
- expr.span,
- "trans_rvalue_datum_unadjusted reached \
- fall-through case: {:?}",
- expr.node);
- }
- }
-}
-
-fn trans_field<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
- base: &hir::Expr,
- get_idx: F)
- -> DatumBlock<'blk, 'tcx, Expr> where
- F: FnOnce(TyCtxt<'blk, 'tcx, 'tcx>, &VariantInfo<'tcx>) -> usize,
-{
- let mut bcx = bcx;
- let _icx = push_ctxt("trans_rec_field");
-
- let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, base, "field"));
- let bare_ty = base_datum.ty;
- let repr = adt::represent_type(bcx.ccx(), bare_ty);
- let vinfo = VariantInfo::from_ty(bcx.tcx(), bare_ty, None);
-
- let ix = get_idx(bcx.tcx(), &vinfo);
- let d = base_datum.get_element(
- bcx,
- vinfo.fields[ix].1,
- |srcval| {
- adt::trans_field_ptr(bcx, &repr, srcval, vinfo.discr, ix)
- });
-
- if type_is_sized(bcx.tcx(), d.ty) {
- DatumBlock { datum: d.to_expr_datum(), bcx: bcx }
- } else {
- let scratch = rvalue_scratch_datum(bcx, d.ty, "");
- Store(bcx, d.val, get_dataptr(bcx, scratch.val));
- let info = Load(bcx, get_meta(bcx, base_datum.val));
- Store(bcx, info, get_meta(bcx, scratch.val));
-
- // Always generate an lvalue datum, because this pointer doesn't own
- // the data and cleanup is scheduled elsewhere.
- DatumBlock::new(bcx, Datum::new(scratch.val, scratch.ty, LvalueExpr(d.kind)))
- }
-}
-
-/// Translates `base.field`.
-fn trans_rec_field<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- base: &hir::Expr,
- field: ast::Name)
- -> DatumBlock<'blk, 'tcx, Expr> {
- trans_field(bcx, base, |_, vinfo| vinfo.field_index(field))
-}
-
-/// Translates `base.<idx>`.
-fn trans_rec_tup_field<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- base: &hir::Expr,
- idx: usize)
- -> DatumBlock<'blk, 'tcx, Expr> {
- trans_field(bcx, base, |_, _| idx)
-}
-
-fn trans_index<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- index_expr: &hir::Expr,
- base: &hir::Expr,
- idx: &hir::Expr,
- method_call: MethodCall)
- -> DatumBlock<'blk, 'tcx, Expr> {
- //! Translates `base[idx]`.
-
- let _icx = push_ctxt("trans_index");
- let ccx = bcx.ccx();
- let mut bcx = bcx;
-
- let index_expr_debug_loc = index_expr.debug_loc();
-
- // Check for overloaded index.
- let method = ccx.tcx().tables.borrow().method_map.get(&method_call).cloned();
- let elt_datum = match method {
- Some(method) => {
- let method_ty = monomorphize_type(bcx, method.ty);
-
- let base_datum = unpack_datum!(bcx, trans(bcx, base));
-
- // Translate index expression.
- let ix_datum = unpack_datum!(bcx, trans(bcx, idx));
-
- let ref_ty = // invoked methods have LB regions instantiated:
- bcx.tcx().no_late_bound_regions(&method_ty.fn_ret()).unwrap();
- let elt_ty = match ref_ty.builtin_deref(true, ty::NoPreference) {
- None => {
- span_bug!(index_expr.span,
- "index method didn't return a \
- dereferenceable type?!")
- }
- Some(elt_tm) => elt_tm.ty,
- };
-
- // Overloaded. Invoke the index() method, which basically
- // yields a `&T` pointer. We can then proceed down the
- // normal path (below) to dereference that `&T`.
- let scratch = rvalue_scratch_datum(bcx, ref_ty, "overloaded_index_elt");
-
- bcx = Callee::method(bcx, method)
- .call(bcx, index_expr_debug_loc,
- ArgOverloadedOp(base_datum, Some(ix_datum)),
- Some(SaveIn(scratch.val))).bcx;
-
- let datum = scratch.to_expr_datum();
- let lval = Lvalue::new("expr::trans_index overload");
- if type_is_sized(bcx.tcx(), elt_ty) {
- Datum::new(datum.to_llscalarish(bcx), elt_ty, LvalueExpr(lval))
- } else {
- Datum::new(datum.val, elt_ty, LvalueExpr(lval))
- }
- }
- None => {
- let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx,
- base,
- "index"));
-
- // Translate index expression and cast to a suitable LLVM integer.
- // Rust is less strict than LLVM in this regard.
- let ix_datum = unpack_datum!(bcx, trans(bcx, idx));
- let ix_val = ix_datum.to_llscalarish(bcx);
- let ix_size = machine::llbitsize_of_real(bcx.ccx(),
- val_ty(ix_val));
- let int_size = machine::llbitsize_of_real(bcx.ccx(),
- ccx.int_type());
- let ix_val = {
- if ix_size < int_size {
- if expr_ty(bcx, idx).is_signed() {
- SExt(bcx, ix_val, ccx.int_type())
- } else { ZExt(bcx, ix_val, ccx.int_type()) }
- } else if ix_size > int_size {
- Trunc(bcx, ix_val, ccx.int_type())
- } else {
- ix_val
- }
- };
-
- let unit_ty = base_datum.ty.sequence_element_type(bcx.tcx());
-
- let (base, len) = base_datum.get_vec_base_and_len(bcx);
-
- debug!("trans_index: base {:?}", Value(base));
- debug!("trans_index: len {:?}", Value(len));
-
- let bounds_check = ICmp(bcx,
- llvm::IntUGE,
- ix_val,
- len,
- index_expr_debug_loc);
- let expect = ccx.get_intrinsic(&("llvm.expect.i1"));
- let expected = Call(bcx,
- expect,
- &[bounds_check, C_bool(ccx, false)],
- index_expr_debug_loc);
- bcx = with_cond(bcx, expected, |bcx| {
- controlflow::trans_fail_bounds_check(bcx,
- expr_info(index_expr),
- ix_val,
- len)
- });
- let elt = InBoundsGEP(bcx, base, &[ix_val]);
- let elt = PointerCast(bcx, elt, type_of::type_of(ccx, unit_ty).ptr_to());
- let lval = Lvalue::new("expr::trans_index fallback");
- Datum::new(elt, unit_ty, LvalueExpr(lval))
- }
- };
-
- DatumBlock::new(bcx, elt_datum)
-}
-
-/// Translates a reference to a variable.
-pub fn trans_var<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, def: Def)
- -> Datum<'tcx, Lvalue> {
-
- match def {
- Def::Static(did, _) => consts::get_static(bcx.ccx(), did),
- Def::Upvar(_, nid, _, _) => {
- // Can't move upvars, so this is never a ZeroMemLastUse.
- let local_ty = node_id_type(bcx, nid);
- let lval = Lvalue::new_with_hint("expr::trans_var (upvar)",
- bcx, nid, HintKind::ZeroAndMaintain);
- match bcx.fcx.llupvars.borrow().get(&nid) {
- Some(&val) => Datum::new(val, local_ty, lval),
- None => {
- bug!("trans_var: no llval for upvar {} found", nid);
- }
- }
- }
- Def::Local(_, nid) => {
- let datum = match bcx.fcx.lllocals.borrow().get(&nid) {
- Some(&v) => v,
- None => {
- bug!("trans_var: no datum for local/arg {} found", nid);
- }
- };
- debug!("take_local(nid={}, v={:?}, ty={})",
- nid, Value(datum.val), datum.ty);
- datum
- }
- _ => bug!("{:?} should not reach expr::trans_var", def)
- }
-}
-
-fn trans_rvalue_stmt_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- expr: &hir::Expr)
- -> Block<'blk, 'tcx> {
- let mut bcx = bcx;
- let _icx = push_ctxt("trans_rvalue_stmt");
-
- if bcx.unreachable.get() {
- return bcx;
- }
-
- expr.debug_loc().apply(bcx.fcx);
-
- match expr.node {
- hir::ExprBreak(label_opt) => {
- controlflow::trans_break(bcx, expr, label_opt.map(|l| l.node))
- }
- hir::ExprType(ref e, _) => {
- trans_into(bcx, &e, Ignore)
- }
- hir::ExprAgain(label_opt) => {
- controlflow::trans_cont(bcx, expr, label_opt.map(|l| l.node))
- }
- hir::ExprRet(ref ex) => {
- // Check to see if the return expression itself is reachable.
- // This can occur when the inner expression contains a return
- let reachable = if let Some(ref cfg) = bcx.fcx.cfg {
- cfg.node_is_reachable(expr.id)
- } else {
- true
- };
-
- if reachable {
- controlflow::trans_ret(bcx, expr, ex.as_ref().map(|e| &**e))
- } else {
- // If it's not reachable, just translate the inner expression
- // directly. This avoids having to manage a return slot when
- // it won't actually be used anyway.
- if let &Some(ref x) = ex {
- bcx = trans_into(bcx, &x, Ignore);
- }
- // Mark the end of the block as unreachable. Once we get to
- // a return expression, there's no more we should be doing
- // after this.
- Unreachable(bcx);
- bcx
- }
- }
- hir::ExprWhile(ref cond, ref body, _) => {
- controlflow::trans_while(bcx, expr, &cond, &body)
- }
- hir::ExprLoop(ref body, _) => {
- controlflow::trans_loop(bcx, expr, &body)
- }
- hir::ExprAssign(ref dst, ref src) => {
- let src_datum = unpack_datum!(bcx, trans(bcx, &src));
- let dst_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, &dst, "assign"));
-
- if bcx.fcx.type_needs_drop(dst_datum.ty) {
- // If there are destructors involved, make sure we
- // are copying from an rvalue, since that cannot possible
- // alias an lvalue. We are concerned about code like:
- //
- // a = a
- //
- // but also
- //
- // a = a.b
- //
- // where e.g. a : Option<Foo> and a.b :
- // Option<Foo>. In that case, freeing `a` before the
- // assignment may also free `a.b`!
- //
- // We could avoid this intermediary with some analysis
- // to determine whether `dst` may possibly own `src`.
- expr.debug_loc().apply(bcx.fcx);
- let src_datum = unpack_datum!(
- bcx, src_datum.to_rvalue_datum(bcx, "ExprAssign"));
- let opt_hint_datum = dst_datum.kind.drop_flag_info.hint_datum(bcx);
- let opt_hint_val = opt_hint_datum.map(|d|d.to_value());
-
- // 1. Drop the data at the destination, passing the
- // drop-hint in case the lvalue has already been
- // dropped or moved.
- bcx = glue::drop_ty_core(bcx,
- dst_datum.val,
- dst_datum.ty,
- expr.debug_loc(),
- false,
- opt_hint_val);
-
- // 2. We are overwriting the destination; ensure that
- // its drop-hint (if any) says "initialized."
- if let Some(hint_val) = opt_hint_val {
- let hint_llval = hint_val.value();
- let drop_needed = C_u8(bcx.fcx.ccx, adt::DTOR_NEEDED_HINT);
- Store(bcx, drop_needed, hint_llval);
- }
- src_datum.store_to(bcx, dst_datum.val)
- } else {
- src_datum.store_to(bcx, dst_datum.val)
- }
- }
- hir::ExprAssignOp(op, ref dst, ref src) => {
- let method = bcx.tcx().tables
- .borrow()
- .method_map
- .get(&MethodCall::expr(expr.id)).cloned();
-
- if let Some(method) = method {
- let dst = unpack_datum!(bcx, trans(bcx, &dst));
- let src_datum = unpack_datum!(bcx, trans(bcx, &src));
-
- Callee::method(bcx, method)
- .call(bcx, expr.debug_loc(),
- ArgOverloadedOp(dst, Some(src_datum)), None).bcx
- } else {
- trans_assign_op(bcx, expr, op, &dst, &src)
- }
- }
- hir::ExprInlineAsm(ref a, ref outputs, ref inputs) => {
- let outputs = outputs.iter().map(|output| {
- let out_datum = unpack_datum!(bcx, trans(bcx, output));
- unpack_datum!(bcx, out_datum.to_lvalue_datum(bcx, "out", expr.id))
- }).collect();
- let inputs = inputs.iter().map(|input| {
- let input = unpack_datum!(bcx, trans(bcx, input));
- let input = unpack_datum!(bcx, input.to_rvalue_datum(bcx, "in"));
- input.to_llscalarish(bcx)
- }).collect();
- asm::trans_inline_asm(bcx, a, outputs, inputs);
- bcx
- }
- _ => {
- span_bug!(
- expr.span,
- "trans_rvalue_stmt_unadjusted reached \
- fall-through case: {:?}",
- expr.node);
- }
- }
-}
-
-fn trans_rvalue_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- expr: &hir::Expr,
- dest: Dest)
- -> Block<'blk, 'tcx> {
- let _icx = push_ctxt("trans_rvalue_dps_unadjusted");
- let mut bcx = bcx;
-
- expr.debug_loc().apply(bcx.fcx);
-
- // Entry into the method table if this is an overloaded call/op.
- let method_call = MethodCall::expr(expr.id);
-
- match expr.node {
- hir::ExprType(ref e, _) => {
- trans_into(bcx, &e, dest)
- }
- hir::ExprPath(..) => {
- trans_def_dps_unadjusted(bcx, expr, bcx.tcx().expect_def(expr.id), dest)
- }
- hir::ExprIf(ref cond, ref thn, ref els) => {
- controlflow::trans_if(bcx, expr.id, &cond, &thn, els.as_ref().map(|e| &**e), dest)
- }
- hir::ExprMatch(ref discr, ref arms, _) => {
- _match::trans_match(bcx, expr, &discr, &arms[..], dest)
- }
- hir::ExprBlock(ref blk) => {
- controlflow::trans_block(bcx, &blk, dest)
- }
- hir::ExprStruct(_, ref fields, ref base) => {
- trans_struct(bcx,
- &fields[..],
- base.as_ref().map(|e| &**e),
- expr.span,
- expr.id,
- node_id_type(bcx, expr.id),
- dest)
- }
- hir::ExprTup(ref args) => {
- let numbered_fields: Vec<(usize, &hir::Expr)> =
- args.iter().enumerate().map(|(i, arg)| (i, &**arg)).collect();
- trans_adt(bcx,
- expr_ty(bcx, expr),
- Disr(0),
- &numbered_fields[..],
- None,
- dest,
- expr.debug_loc())
- }
- hir::ExprLit(ref lit) => {
- match lit.node {
- ast::LitKind::Str(ref s, _) => {
- tvec::trans_lit_str(bcx, expr, (*s).clone(), dest)
- }
- _ => {
- span_bug!(expr.span,
- "trans_rvalue_dps_unadjusted shouldn't be \
- translating this type of literal")
- }
- }
- }
- hir::ExprVec(..) | hir::ExprRepeat(..) => {
- tvec::trans_fixed_vstore(bcx, expr, dest)
- }
- hir::ExprClosure(_, ref decl, ref body, _) => {
- let dest = match dest {
- SaveIn(lldest) => closure::Dest::SaveIn(bcx, lldest),
- Ignore => closure::Dest::Ignore(bcx.ccx())
- };
-
- // NB. To get the id of the closure, we don't use
- // `local_def_id(id)`, but rather we extract the closure
- // def-id from the expr's type. This is because this may
- // be an inlined expression from another crate, and we
- // want to get the ORIGINAL closure def-id, since that is
- // the key we need to find the closure-kind and
- // closure-type etc.
- let (def_id, substs) = match expr_ty(bcx, expr).sty {
- ty::TyClosure(def_id, substs) => (def_id, substs),
- ref t =>
- span_bug!(
- expr.span,
- "closure expr without closure type: {:?}", t),
- };
-
- closure::trans_closure_expr(dest,
- decl,
- body,
- expr.id,
- def_id,
- substs).unwrap_or(bcx)
- }
- hir::ExprCall(ref f, ref args) => {
- let method = bcx.tcx().tables.borrow().method_map.get(&method_call).cloned();
- let (callee, args) = if let Some(method) = method {
- let mut all_args = vec![&**f];
- all_args.extend(args.iter().map(|e| &**e));
-
- (Callee::method(bcx, method), ArgOverloadedCall(all_args))
- } else {
- let f = unpack_datum!(bcx, trans(bcx, f));
- (match f.ty.sty {
- ty::TyFnDef(def_id, substs, _) => {
- Callee::def(bcx.ccx(), def_id, substs)
- }
- ty::TyFnPtr(_) => {
- let f = unpack_datum!(bcx,
- f.to_rvalue_datum(bcx, "callee"));
- Callee::ptr(f)
- }
- _ => {
- span_bug!(expr.span,
- "type of callee is not a fn: {}", f.ty);
- }
- }, ArgExprs(&args))
- };
- callee.call(bcx, expr.debug_loc(), args, Some(dest)).bcx
- }
- hir::ExprMethodCall(_, _, ref args) => {
- Callee::method_call(bcx, method_call)
- .call(bcx, expr.debug_loc(), ArgExprs(&args), Some(dest)).bcx
- }
- hir::ExprBinary(op, ref lhs, ref rhs_expr) => {
- // if not overloaded, would be RvalueDatumExpr
- let lhs = unpack_datum!(bcx, trans(bcx, &lhs));
- let mut rhs = unpack_datum!(bcx, trans(bcx, &rhs_expr));
- if !op.node.is_by_value() {
- rhs = unpack_datum!(bcx, auto_ref(bcx, rhs, rhs_expr));
- }
-
- Callee::method_call(bcx, method_call)
- .call(bcx, expr.debug_loc(),
- ArgOverloadedOp(lhs, Some(rhs)), Some(dest)).bcx
- }
- hir::ExprUnary(_, ref subexpr) => {
- // if not overloaded, would be RvalueDatumExpr
- let arg = unpack_datum!(bcx, trans(bcx, &subexpr));
-
- Callee::method_call(bcx, method_call)
- .call(bcx, expr.debug_loc(),
- ArgOverloadedOp(arg, None), Some(dest)).bcx
- }
- hir::ExprCast(..) => {
- // Trait casts used to come this way, now they should be coercions.
- span_bug!(expr.span, "DPS expr_cast (residual trait cast?)")
- }
- hir::ExprAssignOp(op, _, _) => {
- span_bug!(
- expr.span,
- "augmented assignment `{}=` should always be a rvalue_stmt",
- op.node.as_str())
- }
- _ => {
- span_bug!(
- expr.span,
- "trans_rvalue_dps_unadjusted reached fall-through \
- case: {:?}",
- expr.node);
- }
- }
-}
-
-fn trans_def_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- ref_expr: &hir::Expr,
- def: Def,
- dest: Dest)
- -> Block<'blk, 'tcx> {
- let _icx = push_ctxt("trans_def_dps_unadjusted");
-
- let lldest = match dest {
- SaveIn(lldest) => lldest,
- Ignore => { return bcx; }
- };
-
- let ty = expr_ty(bcx, ref_expr);
- if let ty::TyFnDef(..) = ty.sty {
- // Zero-sized function or ctor.
- return bcx;
- }
-
- match def {
- Def::Variant(tid, vid) => {
- let variant = bcx.tcx().lookup_adt_def(tid).variant_with_id(vid);
- // Nullary variant.
- let ty = expr_ty(bcx, ref_expr);
- let repr = adt::represent_type(bcx.ccx(), ty);
- adt::trans_set_discr(bcx, &repr, lldest, Disr::from(variant.disr_val));
- bcx
- }
- Def::Struct(..) => {
- match ty.sty {
- ty::TyStruct(def, _) if def.has_dtor() => {
- let repr = adt::represent_type(bcx.ccx(), ty);
- adt::trans_set_discr(bcx, &repr, lldest, Disr(0));
- }
- _ => {}
- }
- bcx
- }
- _ => {
- span_bug!(ref_expr.span,
- "Non-DPS def {:?} referened by {}",
- def, bcx.node_id_to_string(ref_expr.id));
- }
- }
-}
-
-fn trans_struct<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- fields: &[hir::Field],
- base: Option<&hir::Expr>,
- expr_span: syntax_pos::Span,
- expr_id: ast::NodeId,
- ty: Ty<'tcx>,
- dest: Dest) -> Block<'blk, 'tcx> {
- let _icx = push_ctxt("trans_rec");
-
- let tcx = bcx.tcx();
- let vinfo = VariantInfo::of_node(tcx, ty, expr_id);
-
- let mut need_base = vec![true; vinfo.fields.len()];
-
- let numbered_fields = fields.iter().map(|field| {
- let pos = vinfo.field_index(field.name.node);
- need_base[pos] = false;
- (pos, &*field.expr)
- }).collect::<Vec<_>>();
-
- let optbase = match base {
- Some(base_expr) => {
- let mut leftovers = Vec::new();
- for (i, b) in need_base.iter().enumerate() {
- if *b {
- leftovers.push((i, vinfo.fields[i].1));
- }
- }
- Some(StructBaseInfo {expr: base_expr,
- fields: leftovers })
- }
- None => {
- if need_base.iter().any(|b| *b) {
- span_bug!(expr_span, "missing fields and no base expr")
- }
- None
- }
- };
-
- trans_adt(bcx,
- ty,
- vinfo.discr,
- &numbered_fields,
- optbase,
- dest,
- DebugLoc::At(expr_id, expr_span))
-}
-
-/// Information that `trans_adt` needs in order to fill in the fields
-/// of a struct copied from a base struct (e.g., from an expression
-/// like `Foo { a: b, ..base }`.
-///
-/// Note that `fields` may be empty; the base expression must always be
-/// evaluated for side-effects.
-pub struct StructBaseInfo<'a, 'tcx> {
- /// The base expression; will be evaluated after all explicit fields.
- expr: &'a hir::Expr,
- /// The indices of fields to copy paired with their types.
- fields: Vec<(usize, Ty<'tcx>)>
-}
-
-/// Constructs an ADT instance:
-///
-/// - `fields` should be a list of field indices paired with the
-/// expression to store into that field. The initializers will be
-/// evaluated in the order specified by `fields`.
-///
-/// - `optbase` contains information on the base struct (if any) from
-/// which remaining fields are copied; see comments on `StructBaseInfo`.
-pub fn trans_adt<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
- ty: Ty<'tcx>,
- discr: Disr,
- fields: &[(usize, &hir::Expr)],
- optbase: Option<StructBaseInfo<'a, 'tcx>>,
- dest: Dest,
- debug_location: DebugLoc)
- -> Block<'blk, 'tcx> {
- let _icx = push_ctxt("trans_adt");
- let fcx = bcx.fcx;
- let repr = adt::represent_type(bcx.ccx(), ty);
-
- debug_location.apply(bcx.fcx);
-
- // If we don't care about the result, just make a
- // temporary stack slot
- let addr = match dest {
- SaveIn(pos) => pos,
- Ignore => {
- let llresult = alloc_ty(bcx, ty, "temp");
- call_lifetime_start(bcx, llresult);
- llresult
- }
- };
-
- debug!("trans_adt");
-
- // This scope holds intermediates that must be cleaned should
- // panic occur before the ADT as a whole is ready.
- let custom_cleanup_scope = fcx.push_custom_cleanup_scope();
-
- if ty.is_simd() {
- // Issue 23112: The original logic appeared vulnerable to same
- // order-of-eval bug. But, SIMD values are tuple-structs;
- // i.e. functional record update (FRU) syntax is unavailable.
- //
- // To be safe, double-check that we did not get here via FRU.
- assert!(optbase.is_none());
-
- // This is the constructor of a SIMD type, such types are
- // always primitive machine types and so do not have a
- // destructor or require any clean-up.
- let llty = type_of::type_of(bcx.ccx(), ty);
-
- // keep a vector as a register, and running through the field
- // `insertelement`ing them directly into that register
- // (i.e. avoid GEPi and `store`s to an alloca) .
- let mut vec_val = C_undef(llty);
-
- for &(i, ref e) in fields {
- let block_datum = trans(bcx, &e);
- bcx = block_datum.bcx;
- let position = C_uint(bcx.ccx(), i);
- let value = block_datum.datum.to_llscalarish(bcx);
- vec_val = InsertElement(bcx, vec_val, value, position);
- }
- Store(bcx, vec_val, addr);
- } else if let Some(base) = optbase {
- // Issue 23112: If there is a base, then order-of-eval
- // requires field expressions eval'ed before base expression.
-
- // First, trans field expressions to temporary scratch values.
- let scratch_vals: Vec<_> = fields.iter().map(|&(i, ref e)| {
- let datum = unpack_datum!(bcx, trans(bcx, &e));
- (i, datum)
- }).collect();
-
- debug_location.apply(bcx.fcx);
-
- // Second, trans the base to the dest.
- assert_eq!(discr, Disr(0));
-
- let addr = adt::MaybeSizedValue::sized(addr);
- match expr_kind(bcx.tcx(), &base.expr) {
- ExprKind::RvalueDps | ExprKind::RvalueDatum if !bcx.fcx.type_needs_drop(ty) => {
- bcx = trans_into(bcx, &base.expr, SaveIn(addr.value));
- },
- ExprKind::RvalueStmt => {
- bug!("unexpected expr kind for struct base expr")
- }
- _ => {
- let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, &base.expr, "base"));
- for &(i, t) in &base.fields {
- let datum = base_datum.get_element(
- bcx, t, |srcval| adt::trans_field_ptr(bcx, &repr, srcval, discr, i));
- assert!(type_is_sized(bcx.tcx(), datum.ty));
- let dest = adt::trans_field_ptr(bcx, &repr, addr, discr, i);
- bcx = datum.store_to(bcx, dest);
- }
- }
- }
-
- // Finally, move scratch field values into actual field locations
- for (i, datum) in scratch_vals {
- let dest = adt::trans_field_ptr(bcx, &repr, addr, discr, i);
- bcx = datum.store_to(bcx, dest);
- }
- } else {
- // No base means we can write all fields directly in place.
- let addr = adt::MaybeSizedValue::sized(addr);
- for &(i, ref e) in fields {
- let dest = adt::trans_field_ptr(bcx, &repr, addr, discr, i);
- let e_ty = expr_ty_adjusted(bcx, &e);
- bcx = trans_into(bcx, &e, SaveIn(dest));
- let scope = cleanup::CustomScope(custom_cleanup_scope);
- fcx.schedule_lifetime_end(scope, dest);
- // FIXME: nonzeroing move should generalize to fields
- fcx.schedule_drop_mem(scope, dest, e_ty, None);
- }
- }
-
- adt::trans_set_discr(bcx, &repr, addr, discr);
-
- fcx.pop_custom_cleanup_scope(custom_cleanup_scope);
-
- // If we don't care about the result drop the temporary we made
- match dest {
- SaveIn(_) => bcx,
- Ignore => {
- bcx = glue::drop_ty(bcx, addr, ty, debug_location);
- base::call_lifetime_end(bcx, addr);
- bcx
- }
- }
-}
-
-
-fn trans_immediate_lit<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- expr: &hir::Expr,
- lit: &ast::Lit)
- -> DatumBlock<'blk, 'tcx, Expr> {
- // must not be a string constant, that is a RvalueDpsExpr
- let _icx = push_ctxt("trans_immediate_lit");
- let ty = expr_ty(bcx, expr);
- let v = consts::const_lit(bcx.ccx(), expr, lit);
- immediate_rvalue_bcx(bcx, v, ty).to_expr_datumblock()
-}
-
-fn trans_unary<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- expr: &hir::Expr,
- op: hir::UnOp,
- sub_expr: &hir::Expr)
- -> DatumBlock<'blk, 'tcx, Expr> {
- let ccx = bcx.ccx();
- let mut bcx = bcx;
- let _icx = push_ctxt("trans_unary_datum");
-
- let method_call = MethodCall::expr(expr.id);
-
- // The only overloaded operator that is translated to a datum
- // is an overloaded deref, since it is always yields a `&T`.
- // Otherwise, we should be in the RvalueDpsExpr path.
- assert!(op == hir::UnDeref || !ccx.tcx().is_method_call(expr.id));
-
- let un_ty = expr_ty(bcx, expr);
-
- let debug_loc = expr.debug_loc();
-
- match op {
- hir::UnNot => {
- let datum = unpack_datum!(bcx, trans(bcx, sub_expr));
- let llresult = Not(bcx, datum.to_llscalarish(bcx), debug_loc);
- immediate_rvalue_bcx(bcx, llresult, un_ty).to_expr_datumblock()
- }
- hir::UnNeg => {
- let datum = unpack_datum!(bcx, trans(bcx, sub_expr));
- let val = datum.to_llscalarish(bcx);
- let (bcx, llneg) = {
- if un_ty.is_fp() {
- let result = FNeg(bcx, val, debug_loc);
- (bcx, result)
- } else {
- let is_signed = un_ty.is_signed();
- let result = Neg(bcx, val, debug_loc);
- let bcx = if bcx.ccx().check_overflow() && is_signed {
- let (llty, min) = base::llty_and_min_for_signed_ty(bcx, un_ty);
- let is_min = ICmp(bcx, llvm::IntEQ, val,
- C_integral(llty, min, true), debug_loc);
- with_cond(bcx, is_min, |bcx| {
- let msg = InternedString::new(
- "attempt to negate with overflow");
- controlflow::trans_fail(bcx, expr_info(expr), msg)
- })
- } else {
- bcx
- };
- (bcx, result)
- }
- };
- immediate_rvalue_bcx(bcx, llneg, un_ty).to_expr_datumblock()
- }
- hir::UnDeref => {
- let datum = unpack_datum!(bcx, trans(bcx, sub_expr));
- deref_once(bcx, expr, datum, method_call)
- }
- }
-}
-
-fn trans_uniq_expr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- box_expr: &hir::Expr,
- box_ty: Ty<'tcx>,
- contents: &hir::Expr,
- contents_ty: Ty<'tcx>)
- -> DatumBlock<'blk, 'tcx, Expr> {
- let _icx = push_ctxt("trans_uniq_expr");
- let fcx = bcx.fcx;
- assert!(type_is_sized(bcx.tcx(), contents_ty));
- let llty = type_of::type_of(bcx.ccx(), contents_ty);
- let size = llsize_of(bcx.ccx(), llty);
- let align = C_uint(bcx.ccx(), type_of::align_of(bcx.ccx(), contents_ty));
- let llty_ptr = llty.ptr_to();
- let Result { bcx, val } = malloc_raw_dyn(bcx,
- llty_ptr,
- box_ty,
- size,
- align,
- box_expr.debug_loc());
- // Unique boxes do not allocate for zero-size types. The standard library
- // may assume that `free` is never called on the pointer returned for
- // `Box<ZeroSizeType>`.
- let bcx = if llsize_of_alloc(bcx.ccx(), llty) == 0 {
- trans_into(bcx, contents, SaveIn(val))
- } else {
- let custom_cleanup_scope = fcx.push_custom_cleanup_scope();
- fcx.schedule_free_value(cleanup::CustomScope(custom_cleanup_scope),
- val, cleanup::HeapExchange, contents_ty);
- let bcx = trans_into(bcx, contents, SaveIn(val));
- fcx.pop_custom_cleanup_scope(custom_cleanup_scope);
- bcx
- };
- immediate_rvalue_bcx(bcx, val, box_ty).to_expr_datumblock()
-}
-
-fn trans_addr_of<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- expr: &hir::Expr,
- subexpr: &hir::Expr)
- -> DatumBlock<'blk, 'tcx, Expr> {
- let _icx = push_ctxt("trans_addr_of");
- let mut bcx = bcx;
- let sub_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, subexpr, "addr_of"));
- let ty = expr_ty(bcx, expr);
- if !type_is_sized(bcx.tcx(), sub_datum.ty) {
- // Always generate an lvalue datum, because this pointer doesn't own
- // the data and cleanup is scheduled elsewhere.
- DatumBlock::new(bcx, Datum::new(sub_datum.val, ty, LvalueExpr(sub_datum.kind)))
- } else {
- // Sized value, ref to a thin pointer
- immediate_rvalue_bcx(bcx, sub_datum.val, ty).to_expr_datumblock()
- }
-}
-
-fn trans_scalar_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- binop_expr: &hir::Expr,
- binop_ty: Ty<'tcx>,
- op: hir::BinOp,
- lhs: Datum<'tcx, Rvalue>,
- rhs: Datum<'tcx, Rvalue>)
- -> DatumBlock<'blk, 'tcx, Expr>
-{
- let _icx = push_ctxt("trans_scalar_binop");
-
- let lhs_t = lhs.ty;
- assert!(!lhs_t.is_simd());
- let is_float = lhs_t.is_fp();
- let is_signed = lhs_t.is_signed();
- let info = expr_info(binop_expr);
-
- let binop_debug_loc = binop_expr.debug_loc();
-
- let mut bcx = bcx;
- let lhs = lhs.to_llscalarish(bcx);
- let rhs = rhs.to_llscalarish(bcx);
- let val = match op.node {
- hir::BiAdd => {
- if is_float {
- FAdd(bcx, lhs, rhs, binop_debug_loc)
- } else {
- let (newbcx, res) = with_overflow_check(
- bcx, OverflowOp::Add, info, lhs_t, lhs, rhs, binop_debug_loc);
- bcx = newbcx;
- res
- }
- }
- hir::BiSub => {
- if is_float {
- FSub(bcx, lhs, rhs, binop_debug_loc)
- } else {
- let (newbcx, res) = with_overflow_check(
- bcx, OverflowOp::Sub, info, lhs_t, lhs, rhs, binop_debug_loc);
- bcx = newbcx;
- res
- }
- }
- hir::BiMul => {
- if is_float {
- FMul(bcx, lhs, rhs, binop_debug_loc)
- } else {
- let (newbcx, res) = with_overflow_check(
- bcx, OverflowOp::Mul, info, lhs_t, lhs, rhs, binop_debug_loc);
- bcx = newbcx;
- res
- }
- }
- hir::BiDiv => {
- if is_float {
- FDiv(bcx, lhs, rhs, binop_debug_loc)
- } else {
- // Only zero-check integers; fp /0 is NaN
- bcx = base::fail_if_zero_or_overflows(bcx,
- expr_info(binop_expr),
- op,
- lhs,
- rhs,
- lhs_t);
- if is_signed {
- SDiv(bcx, lhs, rhs, binop_debug_loc)
- } else {
- UDiv(bcx, lhs, rhs, binop_debug_loc)
- }
- }
- }
- hir::BiRem => {
- if is_float {
- FRem(bcx, lhs, rhs, binop_debug_loc)
- } else {
- // Only zero-check integers; fp %0 is NaN
- bcx = base::fail_if_zero_or_overflows(bcx,
- expr_info(binop_expr),
- op, lhs, rhs, lhs_t);
- if is_signed {
- SRem(bcx, lhs, rhs, binop_debug_loc)
- } else {
- URem(bcx, lhs, rhs, binop_debug_loc)
- }
- }
- }
- hir::BiBitOr => Or(bcx, lhs, rhs, binop_debug_loc),
- hir::BiBitAnd => And(bcx, lhs, rhs, binop_debug_loc),
- hir::BiBitXor => Xor(bcx, lhs, rhs, binop_debug_loc),
- hir::BiShl => {
- let (newbcx, res) = with_overflow_check(
- bcx, OverflowOp::Shl, info, lhs_t, lhs, rhs, binop_debug_loc);
- bcx = newbcx;
- res
- }
- hir::BiShr => {
- let (newbcx, res) = with_overflow_check(
- bcx, OverflowOp::Shr, info, lhs_t, lhs, rhs, binop_debug_loc);
- bcx = newbcx;
- res
- }
- hir::BiEq | hir::BiNe | hir::BiLt | hir::BiGe | hir::BiLe | hir::BiGt => {
- base::compare_scalar_types(bcx, lhs, rhs, lhs_t, op.node, binop_debug_loc)
- }
- _ => {
- span_bug!(binop_expr.span, "unexpected binop");
- }
- };
-
- immediate_rvalue_bcx(bcx, val, binop_ty).to_expr_datumblock()
-}
-
-// refinement types would obviate the need for this
-#[derive(Clone, Copy)]
-enum lazy_binop_ty {
- lazy_and,
- lazy_or,
-}
-
-
-fn trans_lazy_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- binop_expr: &hir::Expr,
- op: lazy_binop_ty,
- a: &hir::Expr,
- b: &hir::Expr)
- -> DatumBlock<'blk, 'tcx, Expr> {
- let _icx = push_ctxt("trans_lazy_binop");
- let binop_ty = expr_ty(bcx, binop_expr);
- let fcx = bcx.fcx;
-
- let DatumBlock {bcx: past_lhs, datum: lhs} = trans(bcx, a);
- let lhs = lhs.to_llscalarish(past_lhs);
-
- if past_lhs.unreachable.get() {
- return immediate_rvalue_bcx(past_lhs, lhs, binop_ty).to_expr_datumblock();
- }
-
- // If the rhs can never be reached, don't generate code for it.
- if let Some(cond_val) = const_to_opt_uint(lhs) {
- match (cond_val, op) {
- (0, lazy_and) |
- (1, lazy_or) => {
- return immediate_rvalue_bcx(past_lhs, lhs, binop_ty).to_expr_datumblock();
- }
- _ => { /* continue */ }
- }
- }
-
- let join = fcx.new_id_block("join", binop_expr.id);
- let before_rhs = fcx.new_id_block("before_rhs", b.id);
-
- match op {
- lazy_and => CondBr(past_lhs, lhs, before_rhs.llbb, join.llbb, DebugLoc::None),
- lazy_or => CondBr(past_lhs, lhs, join.llbb, before_rhs.llbb, DebugLoc::None)
- }
-
- let DatumBlock {bcx: past_rhs, datum: rhs} = trans(before_rhs, b);
- let rhs = rhs.to_llscalarish(past_rhs);
-
- if past_rhs.unreachable.get() {
- return immediate_rvalue_bcx(join, lhs, binop_ty).to_expr_datumblock();
- }
-
- Br(past_rhs, join.llbb, DebugLoc::None);
- let phi = Phi(join, Type::i1(bcx.ccx()), &[lhs, rhs],
- &[past_lhs.llbb, past_rhs.llbb]);
-
- return immediate_rvalue_bcx(join, phi, binop_ty).to_expr_datumblock();
-}
-
-fn trans_binary<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- expr: &hir::Expr,
- op: hir::BinOp,
- lhs: &hir::Expr,
- rhs: &hir::Expr)
- -> DatumBlock<'blk, 'tcx, Expr> {
- let _icx = push_ctxt("trans_binary");
- let ccx = bcx.ccx();
-
- // if overloaded, would be RvalueDpsExpr
- assert!(!ccx.tcx().is_method_call(expr.id));
-
- match op.node {
- hir::BiAnd => {
- trans_lazy_binop(bcx, expr, lazy_and, lhs, rhs)
- }
- hir::BiOr => {
- trans_lazy_binop(bcx, expr, lazy_or, lhs, rhs)
- }
- _ => {
- let mut bcx = bcx;
- let binop_ty = expr_ty(bcx, expr);
-
- let lhs = unpack_datum!(bcx, trans(bcx, lhs));
- let lhs = unpack_datum!(bcx, lhs.to_rvalue_datum(bcx, "binop_lhs"));
- debug!("trans_binary (expr {}): lhs={:?}", expr.id, lhs);
- let rhs = unpack_datum!(bcx, trans(bcx, rhs));
- let rhs = unpack_datum!(bcx, rhs.to_rvalue_datum(bcx, "binop_rhs"));
- debug!("trans_binary (expr {}): rhs={:?}", expr.id, rhs);
-
- if type_is_fat_ptr(ccx.tcx(), lhs.ty) {
- assert!(type_is_fat_ptr(ccx.tcx(), rhs.ty),
- "built-in binary operators on fat pointers are homogeneous");
- assert_eq!(binop_ty, bcx.tcx().types.bool);
- let val = base::compare_scalar_types(
- bcx,
- lhs.val,
- rhs.val,
- lhs.ty,
- op.node,
- expr.debug_loc());
- immediate_rvalue_bcx(bcx, val, binop_ty).to_expr_datumblock()
- } else {
- assert!(!type_is_fat_ptr(ccx.tcx(), rhs.ty),
- "built-in binary operators on fat pointers are homogeneous");
- trans_scalar_binop(bcx, expr, binop_ty, op, lhs, rhs)
- }
- }
- }
-}
-
-pub fn cast_is_noop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
- expr: &hir::Expr,
- t_in: Ty<'tcx>,
- t_out: Ty<'tcx>)
- -> bool {
- if let Some(&CastKind::CoercionCast) = tcx.cast_kinds.borrow().get(&expr.id) {
- return true;
- }
-
- match (t_in.builtin_deref(true, ty::NoPreference),
- t_out.builtin_deref(true, ty::NoPreference)) {
- (Some(ty::TypeAndMut{ ty: t_in, .. }), Some(ty::TypeAndMut{ ty: t_out, .. })) => {
- t_in == t_out
- }
- _ => {
- // This condition isn't redundant with the check for CoercionCast:
- // different types can be substituted into the same type, and
- // == equality can be overconservative if there are regions.
- t_in == t_out
- }
- }
-}
-
-fn trans_imm_cast<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- expr: &hir::Expr,
- id: ast::NodeId)
- -> DatumBlock<'blk, 'tcx, Expr>
-{
- use rustc::ty::cast::CastTy::*;
- use rustc::ty::cast::IntTy::*;
-
- fn int_cast(bcx: Block,
- lldsttype: Type,
- llsrctype: Type,
- llsrc: ValueRef,
- signed: bool)
- -> ValueRef
- {
- let _icx = push_ctxt("int_cast");
- let srcsz = llsrctype.int_width();
- let dstsz = lldsttype.int_width();
- return if dstsz == srcsz {
- BitCast(bcx, llsrc, lldsttype)
- } else if srcsz > dstsz {
- TruncOrBitCast(bcx, llsrc, lldsttype)
- } else if signed {
- SExtOrBitCast(bcx, llsrc, lldsttype)
- } else {
- ZExtOrBitCast(bcx, llsrc, lldsttype)
- }
- }
-
- fn float_cast(bcx: Block,
- lldsttype: Type,
- llsrctype: Type,
- llsrc: ValueRef)
- -> ValueRef
- {
- let _icx = push_ctxt("float_cast");
- let srcsz = llsrctype.float_width();
- let dstsz = lldsttype.float_width();
- return if dstsz > srcsz {
- FPExt(bcx, llsrc, lldsttype)
- } else if srcsz > dstsz {
- FPTrunc(bcx, llsrc, lldsttype)
- } else { llsrc };
- }
-
- let _icx = push_ctxt("trans_cast");
- let mut bcx = bcx;
- let ccx = bcx.ccx();
-
- let t_in = expr_ty_adjusted(bcx, expr);
- let t_out = node_id_type(bcx, id);
-
- debug!("trans_cast({:?} as {:?})", t_in, t_out);
- let mut ll_t_in = type_of::immediate_type_of(ccx, t_in);
- let ll_t_out = type_of::immediate_type_of(ccx, t_out);
- // Convert the value to be cast into a ValueRef, either by-ref or
- // by-value as appropriate given its type:
- let mut datum = unpack_datum!(bcx, trans(bcx, expr));
-
- let datum_ty = monomorphize_type(bcx, datum.ty);
-
- if cast_is_noop(bcx.tcx(), expr, datum_ty, t_out) {
- datum.ty = t_out;
- return DatumBlock::new(bcx, datum);
- }
-
- if type_is_fat_ptr(bcx.tcx(), t_in) {
- assert!(datum.kind.is_by_ref());
- if type_is_fat_ptr(bcx.tcx(), t_out) {
- return DatumBlock::new(bcx, Datum::new(
- PointerCast(bcx, datum.val, ll_t_out.ptr_to()),
- t_out,
- Rvalue::new(ByRef)
- )).to_expr_datumblock();
- } else {
- // Return the address
- return immediate_rvalue_bcx(bcx,
- PointerCast(bcx,
- Load(bcx, get_dataptr(bcx, datum.val)),
- ll_t_out),
- t_out).to_expr_datumblock();
- }
- }
-
- let r_t_in = CastTy::from_ty(t_in).expect("bad input type for cast");
- let r_t_out = CastTy::from_ty(t_out).expect("bad output type for cast");
-
- let (llexpr, signed) = if let Int(CEnum) = r_t_in {
- let repr = adt::represent_type(ccx, t_in);
- let datum = unpack_datum!(
- bcx, datum.to_lvalue_datum(bcx, "trans_imm_cast", expr.id));
- let llexpr_ptr = datum.to_llref();
- let discr = adt::trans_get_discr(bcx, &repr, llexpr_ptr,
- Some(Type::i64(ccx)), true);
- ll_t_in = val_ty(discr);
- (discr, adt::is_discr_signed(&repr))
- } else {
- (datum.to_llscalarish(bcx), t_in.is_signed())
- };
-
- let newval = match (r_t_in, r_t_out) {
- (Ptr(_), Ptr(_)) | (FnPtr, Ptr(_)) | (RPtr(_), Ptr(_)) => {
- PointerCast(bcx, llexpr, ll_t_out)
- }
- (Ptr(_), Int(_)) | (FnPtr, Int(_)) => PtrToInt(bcx, llexpr, ll_t_out),
- (Int(_), Ptr(_)) => IntToPtr(bcx, llexpr, ll_t_out),
-
- (Int(_), Int(_)) => int_cast(bcx, ll_t_out, ll_t_in, llexpr, signed),
- (Float, Float) => float_cast(bcx, ll_t_out, ll_t_in, llexpr),
- (Int(_), Float) if signed => SIToFP(bcx, llexpr, ll_t_out),
- (Int(_), Float) => UIToFP(bcx, llexpr, ll_t_out),
- (Float, Int(I)) => FPToSI(bcx, llexpr, ll_t_out),
- (Float, Int(_)) => FPToUI(bcx, llexpr, ll_t_out),
-
- _ => span_bug!(expr.span,
- "translating unsupported cast: \
- {:?} -> {:?}",
- t_in,
- t_out)
- };
- return immediate_rvalue_bcx(bcx, newval, t_out).to_expr_datumblock();
-}
-
-fn trans_assign_op<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- expr: &hir::Expr,
- op: hir::BinOp,
- dst: &hir::Expr,
- src: &hir::Expr)
- -> Block<'blk, 'tcx> {
- let _icx = push_ctxt("trans_assign_op");
- let mut bcx = bcx;
-
- debug!("trans_assign_op(expr={:?})", expr);
-
- // User-defined operator methods cannot be used with `+=` etc right now
- assert!(!bcx.tcx().is_method_call(expr.id));
-
- // Evaluate LHS (destination), which should be an lvalue
- let dst = unpack_datum!(bcx, trans_to_lvalue(bcx, dst, "assign_op"));
- assert!(!bcx.fcx.type_needs_drop(dst.ty));
- let lhs = load_ty(bcx, dst.val, dst.ty);
- let lhs = immediate_rvalue(lhs, dst.ty);
-
- // Evaluate RHS - FIXME(#28160) this sucks
- let rhs = unpack_datum!(bcx, trans(bcx, &src));
- let rhs = unpack_datum!(bcx, rhs.to_rvalue_datum(bcx, "assign_op_rhs"));
-
- // Perform computation and store the result
- let result_datum = unpack_datum!(
- bcx, trans_scalar_binop(bcx, expr, dst.ty, op, lhs, rhs));
- return result_datum.store_to(bcx, dst.val);
-}
-
-fn auto_ref<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- datum: Datum<'tcx, Expr>,
- expr: &hir::Expr)
- -> DatumBlock<'blk, 'tcx, Expr> {
- let mut bcx = bcx;
-
- // Ensure cleanup of `datum` if not already scheduled and obtain
- // a "by ref" pointer.
- let lv_datum = unpack_datum!(bcx, datum.to_lvalue_datum(bcx, "autoref", expr.id));
-
- // Compute final type. Note that we are loose with the region and
- // mutability, since those things don't matter in trans.
- let referent_ty = lv_datum.ty;
- let ptr_ty = bcx.tcx().mk_imm_ref(bcx.tcx().mk_region(ty::ReErased), referent_ty);
-
- // Construct the resulting datum. The right datum to return here would be an Lvalue datum,
- // because there is cleanup scheduled and the datum doesn't own the data, but for thin pointers
- // we microoptimize it to be an Rvalue datum to avoid the extra alloca and level of
- // indirection and for thin pointers, this has no ill effects.
- let kind = if type_is_sized(bcx.tcx(), referent_ty) {
- RvalueExpr(Rvalue::new(ByValue))
- } else {
- LvalueExpr(lv_datum.kind)
- };
-
- // Get the pointer.
- let llref = lv_datum.to_llref();
- DatumBlock::new(bcx, Datum::new(llref, ptr_ty, kind))
-}
-
-fn deref_multiple<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- expr: &hir::Expr,
- datum: Datum<'tcx, Expr>,
- times: usize)
- -> DatumBlock<'blk, 'tcx, Expr> {
- let mut bcx = bcx;
- let mut datum = datum;
- for i in 0..times {
- let method_call = MethodCall::autoderef(expr.id, i as u32);
- datum = unpack_datum!(bcx, deref_once(bcx, expr, datum, method_call));
- }
- DatumBlock { bcx: bcx, datum: datum }
-}
-
-fn deref_once<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- expr: &hir::Expr,
- datum: Datum<'tcx, Expr>,
- method_call: MethodCall)
- -> DatumBlock<'blk, 'tcx, Expr> {
- let ccx = bcx.ccx();
-
- debug!("deref_once(expr={:?}, datum={:?}, method_call={:?})",
- expr, datum, method_call);
-
- let mut bcx = bcx;
-
- // Check for overloaded deref.
- let method = ccx.tcx().tables.borrow().method_map.get(&method_call).cloned();
- let datum = match method {
- Some(method) => {
- let method_ty = monomorphize_type(bcx, method.ty);
-
- // Overloaded. Invoke the deref() method, which basically
- // converts from the `Smaht<T>` pointer that we have into
- // a `&T` pointer. We can then proceed down the normal
- // path (below) to dereference that `&T`.
- let datum = if method_call.autoderef == 0 {
- datum
- } else {
- // Always perform an AutoPtr when applying an overloaded auto-deref
- unpack_datum!(bcx, auto_ref(bcx, datum, expr))
- };
-
- let ref_ty = // invoked methods have their LB regions instantiated
- ccx.tcx().no_late_bound_regions(&method_ty.fn_ret()).unwrap();
- let scratch = rvalue_scratch_datum(bcx, ref_ty, "overloaded_deref");
-
- bcx = Callee::method(bcx, method)
- .call(bcx, expr.debug_loc(),
- ArgOverloadedOp(datum, None),
- Some(SaveIn(scratch.val))).bcx;
- scratch.to_expr_datum()
- }
- None => {
- // Not overloaded. We already have a pointer we know how to deref.
- datum
- }
- };
-
- let r = match datum.ty.sty {
- ty::TyBox(content_ty) => {
- // Make sure we have an lvalue datum here to get the
- // proper cleanups scheduled
- let datum = unpack_datum!(
- bcx, datum.to_lvalue_datum(bcx, "deref", expr.id));
-
- if type_is_sized(bcx.tcx(), content_ty) {
- let ptr = load_ty(bcx, datum.val, datum.ty);
- DatumBlock::new(bcx, Datum::new(ptr, content_ty, LvalueExpr(datum.kind)))
- } else {
- // A fat pointer and a DST lvalue have the same representation
- // just different types. Since there is no temporary for `*e`
- // here (because it is unsized), we cannot emulate the sized
- // object code path for running drop glue and free. Instead,
- // we schedule cleanup for `e`, turning it into an lvalue.
-
- let lval = Lvalue::new("expr::deref_once ty_uniq");
- let datum = Datum::new(datum.val, content_ty, LvalueExpr(lval));
- DatumBlock::new(bcx, datum)
- }
- }
-
- ty::TyRawPtr(ty::TypeAndMut { ty: content_ty, .. }) |
- ty::TyRef(_, ty::TypeAndMut { ty: content_ty, .. }) => {
- let lval = Lvalue::new("expr::deref_once ptr");
- if type_is_sized(bcx.tcx(), content_ty) {
- let ptr = datum.to_llscalarish(bcx);
-
- // Always generate an lvalue datum, even if datum.mode is
- // an rvalue. This is because datum.mode is only an
- // rvalue for non-owning pointers like &T or *T, in which
- // case cleanup *is* scheduled elsewhere, by the true
- // owner (or, in the case of *T, by the user).
- DatumBlock::new(bcx, Datum::new(ptr, content_ty, LvalueExpr(lval)))
- } else {
- // A fat pointer and a DST lvalue have the same representation
- // just different types.
- DatumBlock::new(bcx, Datum::new(datum.val, content_ty, LvalueExpr(lval)))
- }
- }
-
- _ => {
- span_bug!(
- expr.span,
- "deref invoked on expr of invalid type {:?}",
- datum.ty);
- }
- };
-
- debug!("deref_once(expr={}, method_call={:?}, result={:?})",
- expr.id, method_call, r.datum);
-
- return r;
-}
-
-#[derive(Debug)]
-enum OverflowOp {
- Add,
- Sub,
- Mul,
- Shl,
- Shr,
-}
-
-impl OverflowOp {
- fn codegen_strategy(&self) -> OverflowCodegen {
- use self::OverflowCodegen::{ViaIntrinsic, ViaInputCheck};
- match *self {
- OverflowOp::Add => ViaIntrinsic(OverflowOpViaIntrinsic::Add),
- OverflowOp::Sub => ViaIntrinsic(OverflowOpViaIntrinsic::Sub),
- OverflowOp::Mul => ViaIntrinsic(OverflowOpViaIntrinsic::Mul),
-
- OverflowOp::Shl => ViaInputCheck(OverflowOpViaInputCheck::Shl),
- OverflowOp::Shr => ViaInputCheck(OverflowOpViaInputCheck::Shr),
- }
- }
-}
-
-enum OverflowCodegen {
- ViaIntrinsic(OverflowOpViaIntrinsic),
- ViaInputCheck(OverflowOpViaInputCheck),
-}
-
-enum OverflowOpViaInputCheck { Shl, Shr, }
-
-#[derive(Debug)]
-enum OverflowOpViaIntrinsic { Add, Sub, Mul, }
-
-impl OverflowOpViaIntrinsic {
- fn to_intrinsic<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>, lhs_ty: Ty) -> ValueRef {
- let name = self.to_intrinsic_name(bcx.tcx(), lhs_ty);
- bcx.ccx().get_intrinsic(&name)
- }
- fn to_intrinsic_name(&self, tcx: TyCtxt, ty: Ty) -> &'static str {
- use syntax::ast::IntTy::*;
- use syntax::ast::UintTy::*;
- use rustc::ty::{TyInt, TyUint};
-
- let new_sty = match ty.sty {
- TyInt(Is) => match &tcx.sess.target.target.target_pointer_width[..] {
- "16" => TyInt(I16),
- "32" => TyInt(I32),
- "64" => TyInt(I64),
- _ => bug!("unsupported target word size")
- },
- TyUint(Us) => match &tcx.sess.target.target.target_pointer_width[..] {
- "16" => TyUint(U16),
- "32" => TyUint(U32),
- "64" => TyUint(U64),
- _ => bug!("unsupported target word size")
- },
- ref t @ TyUint(_) | ref t @ TyInt(_) => t.clone(),
- _ => bug!("tried to get overflow intrinsic for {:?} applied to non-int type",
- *self)
- };
-
- match *self {
- OverflowOpViaIntrinsic::Add => match new_sty {
- TyInt(I8) => "llvm.sadd.with.overflow.i8",
- TyInt(I16) => "llvm.sadd.with.overflow.i16",
- TyInt(I32) => "llvm.sadd.with.overflow.i32",
- TyInt(I64) => "llvm.sadd.with.overflow.i64",
-
- TyUint(U8) => "llvm.uadd.with.overflow.i8",
- TyUint(U16) => "llvm.uadd.with.overflow.i16",
- TyUint(U32) => "llvm.uadd.with.overflow.i32",
- TyUint(U64) => "llvm.uadd.with.overflow.i64",
-
- _ => bug!(),
- },
- OverflowOpViaIntrinsic::Sub => match new_sty {
- TyInt(I8) => "llvm.ssub.with.overflow.i8",
- TyInt(I16) => "llvm.ssub.with.overflow.i16",
- TyInt(I32) => "llvm.ssub.with.overflow.i32",
- TyInt(I64) => "llvm.ssub.with.overflow.i64",
-
- TyUint(U8) => "llvm.usub.with.overflow.i8",
- TyUint(U16) => "llvm.usub.with.overflow.i16",
- TyUint(U32) => "llvm.usub.with.overflow.i32",
- TyUint(U64) => "llvm.usub.with.overflow.i64",
-
- _ => bug!(),
- },
- OverflowOpViaIntrinsic::Mul => match new_sty {
- TyInt(I8) => "llvm.smul.with.overflow.i8",
- TyInt(I16) => "llvm.smul.with.overflow.i16",
- TyInt(I32) => "llvm.smul.with.overflow.i32",
- TyInt(I64) => "llvm.smul.with.overflow.i64",
-
- TyUint(U8) => "llvm.umul.with.overflow.i8",
- TyUint(U16) => "llvm.umul.with.overflow.i16",
- TyUint(U32) => "llvm.umul.with.overflow.i32",
- TyUint(U64) => "llvm.umul.with.overflow.i64",
-
- _ => bug!(),
- },
- }
- }
-
- fn build_intrinsic_call<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>,
- info: NodeIdAndSpan,
- lhs_t: Ty<'tcx>, lhs: ValueRef,
- rhs: ValueRef,
- binop_debug_loc: DebugLoc)
- -> (Block<'blk, 'tcx>, ValueRef) {
- use rustc_const_math::{ConstMathErr, Op};
-
- let llfn = self.to_intrinsic(bcx, lhs_t);
-
- let val = Call(bcx, llfn, &[lhs, rhs], binop_debug_loc);
- let result = ExtractValue(bcx, val, 0); // iN operation result
- let overflow = ExtractValue(bcx, val, 1); // i1 "did it overflow?"
-
- let cond = ICmp(bcx, llvm::IntEQ, overflow, C_integral(Type::i1(bcx.ccx()), 1, false),
- binop_debug_loc);
-
- let expect = bcx.ccx().get_intrinsic(&"llvm.expect.i1");
- let expected = Call(bcx, expect, &[cond, C_bool(bcx.ccx(), false)],
- binop_debug_loc);
-
- let op = match *self {
- OverflowOpViaIntrinsic::Add => Op::Add,
- OverflowOpViaIntrinsic::Sub => Op::Sub,
- OverflowOpViaIntrinsic::Mul => Op::Mul
- };
-
- let bcx =
- base::with_cond(bcx, expected, |bcx|
- controlflow::trans_fail(bcx, info,
- InternedString::new(ConstMathErr::Overflow(op).description())));
-
- (bcx, result)
- }
-}
-
-impl OverflowOpViaInputCheck {
- fn build_with_input_check<'blk, 'tcx>(&self,
- bcx: Block<'blk, 'tcx>,
- info: NodeIdAndSpan,
- lhs_t: Ty<'tcx>,
- lhs: ValueRef,
- rhs: ValueRef,
- binop_debug_loc: DebugLoc)
- -> (Block<'blk, 'tcx>, ValueRef)
- {
- use rustc_const_math::{ConstMathErr, Op};
-
- let lhs_llty = val_ty(lhs);
- let rhs_llty = val_ty(rhs);
-
- // Panic if any bits are set outside of bits that we always
- // mask in.
- //
- // Note that the mask's value is derived from the LHS type
- // (since that is where the 32/64 distinction is relevant) but
- // the mask's type must match the RHS type (since they will
- // both be fed into an and-binop)
- let invert_mask = shift_mask_val(bcx, lhs_llty, rhs_llty, true);
-
- let outer_bits = And(bcx, rhs, invert_mask, binop_debug_loc);
- let cond = build_nonzero_check(bcx, outer_bits, binop_debug_loc);
- let (result, op) = match *self {
- OverflowOpViaInputCheck::Shl =>
- (build_unchecked_lshift(bcx, lhs, rhs, binop_debug_loc), Op::Shl),
- OverflowOpViaInputCheck::Shr =>
- (build_unchecked_rshift(bcx, lhs_t, lhs, rhs, binop_debug_loc), Op::Shr)
- };
- let bcx =
- base::with_cond(bcx, cond, |bcx|
- controlflow::trans_fail(bcx, info,
- InternedString::new(ConstMathErr::Overflow(op).description())));
-
- (bcx, result)
- }
-}
-
-// Check if an integer or vector contains a nonzero element.
-fn build_nonzero_check<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- value: ValueRef,
- binop_debug_loc: DebugLoc) -> ValueRef {
- let llty = val_ty(value);
- let kind = llty.kind();
- match kind {
- TypeKind::Integer => ICmp(bcx, llvm::IntNE, value, C_null(llty), binop_debug_loc),
- TypeKind::Vector => {
- // Check if any elements of the vector are nonzero by treating
- // it as a wide integer and checking if the integer is nonzero.
- let width = llty.vector_length() as u64 * llty.element_type().int_width();
- let int_value = BitCast(bcx, value, Type::ix(bcx.ccx(), width));
- build_nonzero_check(bcx, int_value, binop_debug_loc)
- },
- _ => bug!("build_nonzero_check: expected Integer or Vector, found {:?}", kind),
- }
-}
-
-fn with_overflow_check<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, oop: OverflowOp, info: NodeIdAndSpan,
- lhs_t: Ty<'tcx>, lhs: ValueRef,
- rhs: ValueRef,
- binop_debug_loc: DebugLoc)
- -> (Block<'blk, 'tcx>, ValueRef) {
- if bcx.unreachable.get() { return (bcx, _Undef(lhs)); }
- if bcx.ccx().check_overflow() {
-
- match oop.codegen_strategy() {
- OverflowCodegen::ViaIntrinsic(oop) =>
- oop.build_intrinsic_call(bcx, info, lhs_t, lhs, rhs, binop_debug_loc),
- OverflowCodegen::ViaInputCheck(oop) =>
- oop.build_with_input_check(bcx, info, lhs_t, lhs, rhs, binop_debug_loc),
- }
- } else {
- let res = match oop {
- OverflowOp::Add => Add(bcx, lhs, rhs, binop_debug_loc),
- OverflowOp::Sub => Sub(bcx, lhs, rhs, binop_debug_loc),
- OverflowOp::Mul => Mul(bcx, lhs, rhs, binop_debug_loc),
-
- OverflowOp::Shl =>
- build_unchecked_lshift(bcx, lhs, rhs, binop_debug_loc),
- OverflowOp::Shr =>
- build_unchecked_rshift(bcx, lhs_t, lhs, rhs, binop_debug_loc),
- };
- (bcx, res)
- }
-}
-
-/// We categorize expressions into three kinds. The distinction between
-/// lvalue/rvalue is fundamental to the language. The distinction between the
-/// two kinds of rvalues is an artifact of trans which reflects how we will
-/// generate code for that kind of expression. See trans/expr.rs for more
-/// information.
-#[derive(Copy, Clone)]
-enum ExprKind {
- Lvalue,
- RvalueDps,
- RvalueDatum,
- RvalueStmt
-}
-
-fn expr_kind<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, expr: &hir::Expr) -> ExprKind {
- if tcx.is_method_call(expr.id) {
- // Overloaded operations are generally calls, and hence they are
- // generated via DPS, but there are a few exceptions:
- return match expr.node {
- // `a += b` has a unit result.
- hir::ExprAssignOp(..) => ExprKind::RvalueStmt,
-
- // the deref method invoked for `*a` always yields an `&T`
- hir::ExprUnary(hir::UnDeref, _) => ExprKind::Lvalue,
-
- // the index method invoked for `a[i]` always yields an `&T`
- hir::ExprIndex(..) => ExprKind::Lvalue,
-
- // in the general case, result could be any type, use DPS
- _ => ExprKind::RvalueDps
- };
- }
-
- match expr.node {
- hir::ExprPath(..) => {
- match tcx.expect_def(expr.id) {
- // Put functions and ctors with the ADTs, as they
- // are zero-sized, so DPS is the cheapest option.
- Def::Struct(..) | Def::Variant(..) |
- Def::Fn(..) | Def::Method(..) => {
- ExprKind::RvalueDps
- }
-
- // Note: there is actually a good case to be made that
- // DefArg's, particularly those of immediate type, ought to
- // considered rvalues.
- Def::Static(..) |
- Def::Upvar(..) |
- Def::Local(..) => ExprKind::Lvalue,
-
- Def::Const(..) |
- Def::AssociatedConst(..) => ExprKind::RvalueDatum,
-
- def => {
- span_bug!(
- expr.span,
- "uncategorized def for expr {}: {:?}",
- expr.id,
- def);
- }
- }
- }
-
- hir::ExprType(ref expr, _) => {
- expr_kind(tcx, expr)
- }
-
- hir::ExprUnary(hir::UnDeref, _) |
- hir::ExprField(..) |
- hir::ExprTupField(..) |
- hir::ExprIndex(..) => {
- ExprKind::Lvalue
- }
-
- hir::ExprCall(..) |
- hir::ExprMethodCall(..) |
- hir::ExprStruct(..) |
- hir::ExprTup(..) |
- hir::ExprIf(..) |
- hir::ExprMatch(..) |
- hir::ExprClosure(..) |
- hir::ExprBlock(..) |
- hir::ExprRepeat(..) |
- hir::ExprVec(..) => {
- ExprKind::RvalueDps
- }
-
- hir::ExprLit(ref lit) if lit.node.is_str() => {
- ExprKind::RvalueDps
- }
-
- hir::ExprBreak(..) |
- hir::ExprAgain(..) |
- hir::ExprRet(..) |
- hir::ExprWhile(..) |
- hir::ExprLoop(..) |
- hir::ExprAssign(..) |
- hir::ExprInlineAsm(..) |
- hir::ExprAssignOp(..) => {
- ExprKind::RvalueStmt
- }
-
- hir::ExprLit(_) | // Note: LitStr is carved out above
- hir::ExprUnary(..) |
- hir::ExprBox(_) |
- hir::ExprAddrOf(..) |
- hir::ExprBinary(..) |
- hir::ExprCast(..) => {
- ExprKind::RvalueDatum
- }
- }
-}
use rustc::traits;
use rustc::ty::{self, Ty, TyCtxt, TypeFoldable};
use adt;
-use adt::GetDtorType; // for tcx.dtor_type()
use base::*;
use build::*;
-use callee::{Callee, ArgVals};
-use cleanup;
-use cleanup::CleanupMethods;
+use callee::{Callee};
use common::*;
use debuginfo::DebugLoc;
-use expr;
use machine::*;
use monomorphize;
use trans_item::TransItem;
+use tvec;
use type_of::{type_of, sizing_type_of, align_of};
use type_::Type;
use value::Value;
+use Disr;
use arena::TypedArena;
use syntax_pos::DUMMY_SP;
let def_id = langcall(bcx.tcx(), None, "", ExchangeFreeFnLangItem);
let args = [PointerCast(bcx, v, Type::i8p(bcx.ccx())), size, align];
Callee::def(bcx.ccx(), def_id, Substs::empty(bcx.tcx()))
- .call(bcx, debug_loc, ArgVals(&args), None).bcx
+ .call(bcx, debug_loc, &args, None).bcx
}
pub fn trans_exchange_free<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
v: ValueRef,
t: Ty<'tcx>,
debug_loc: DebugLoc) -> Block<'blk, 'tcx> {
- drop_ty_core(bcx, v, t, debug_loc, false, None)
+ drop_ty_core(bcx, v, t, debug_loc, false)
}
pub fn drop_ty_core<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
v: ValueRef,
t: Ty<'tcx>,
debug_loc: DebugLoc,
- skip_dtor: bool,
- drop_hint: Option<cleanup::DropHintValue>)
+ skip_dtor: bool)
-> Block<'blk, 'tcx> {
// NB: v is an *alias* of type t here, not a direct value.
- debug!("drop_ty_core(t={:?}, skip_dtor={} drop_hint={:?})", t, skip_dtor, drop_hint);
+ debug!("drop_ty_core(t={:?}, skip_dtor={})", t, skip_dtor);
let _icx = push_ctxt("drop_ty");
- let mut bcx = bcx;
if bcx.fcx.type_needs_drop(t) {
let ccx = bcx.ccx();
let g = if skip_dtor {
v
};
- match drop_hint {
- Some(drop_hint) => {
- let hint_val = load_ty(bcx, drop_hint.value(), bcx.tcx().types.u8);
- let moved_val =
- C_integral(Type::i8(bcx.ccx()), adt::DTOR_MOVED_HINT as u64, false);
- let may_need_drop =
- ICmp(bcx, llvm::IntNE, hint_val, moved_val, DebugLoc::None);
- bcx = with_cond(bcx, may_need_drop, |cx| {
- Call(cx, glue, &[ptr], debug_loc);
- cx
- })
- }
- None => {
- // No drop-hint ==> call standard drop glue
- Call(bcx, glue, &[ptr], debug_loc);
- }
- }
+ // No drop-hint ==> call standard drop glue
+ Call(bcx, glue, &[ptr], debug_loc);
}
bcx
}
let vp = alloc_ty(bcx, t, "");
call_lifetime_start(bcx, vp);
store_ty(bcx, v, vp, t);
- let bcx = drop_ty_core(bcx, vp, t, debug_loc, skip_dtor, None);
+ let bcx = drop_ty_core(bcx, vp, t, debug_loc, skip_dtor);
call_lifetime_end(bcx, vp);
bcx
}
g,
TransItem::DropGlue(g).to_raw_string(),
ccx.codegen_unit().name());
-
- ccx.stats().n_fallback_instantiations.set(ccx.stats()
- .n_fallback_instantiations
- .get() + 1);
}
}
arena = TypedArena::new();
fcx = FunctionContext::new(ccx, llfn, fn_ty, None, &arena);
- let bcx = fcx.init(false, None);
+ let bcx = fcx.init(false);
ccx.stats().n_glues_created.set(ccx.stats().n_glues_created.get() + 1);
// All glue functions take values passed *by alias*; this is a
fcx.finish(bcx, DebugLoc::None);
}
-
-fn trans_struct_drop_flag<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
- t: Ty<'tcx>,
- struct_data: ValueRef)
- -> Block<'blk, 'tcx> {
- assert!(type_is_sized(bcx.tcx(), t), "Precondition: caller must ensure t is sized");
-
- let repr = adt::represent_type(bcx.ccx(), t);
- let drop_flag = unpack_datum!(bcx, adt::trans_drop_flag_ptr(bcx, &repr, struct_data));
- let loaded = load_ty(bcx, drop_flag.val, bcx.tcx().dtor_type());
- let drop_flag_llty = type_of(bcx.fcx.ccx, bcx.tcx().dtor_type());
- let init_val = C_integral(drop_flag_llty, adt::DTOR_NEEDED as u64, false);
-
- let bcx = if !bcx.ccx().check_drop_flag_for_sanity() {
- bcx
- } else {
- let drop_flag_llty = type_of(bcx.fcx.ccx, bcx.tcx().dtor_type());
- let done_val = C_integral(drop_flag_llty, adt::DTOR_DONE as u64, false);
- let not_init = ICmp(bcx, llvm::IntNE, loaded, init_val, DebugLoc::None);
- let not_done = ICmp(bcx, llvm::IntNE, loaded, done_val, DebugLoc::None);
- let drop_flag_neither_initialized_nor_cleared =
- And(bcx, not_init, not_done, DebugLoc::None);
- with_cond(bcx, drop_flag_neither_initialized_nor_cleared, |cx| {
- let llfn = cx.ccx().get_intrinsic(&("llvm.debugtrap"));
- Call(cx, llfn, &[], DebugLoc::None);
- cx
- })
- };
-
- let drop_flag_dtor_needed = ICmp(bcx, llvm::IntEQ, loaded, init_val, DebugLoc::None);
- with_cond(bcx, drop_flag_dtor_needed, |cx| {
- trans_struct_drop(cx, t, struct_data)
- })
-}
fn trans_struct_drop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
t: Ty<'tcx>,
v0: ValueRef)
// Issue #23611: schedule cleanup of contents, re-inspecting the
// discriminant (if any) in case of variant swap in drop code.
- bcx.fcx.schedule_drop_adt_contents(cleanup::CustomScope(contents_scope), v0, t);
+ bcx.fcx.schedule_drop_adt_contents(contents_scope, v0, t);
let (sized_args, unsized_args);
let args: &[ValueRef] = if type_is_sized(tcx, t) {
sized_args = [v0];
&sized_args
} else {
- unsized_args = [Load(bcx, expr::get_dataptr(bcx, v0)), Load(bcx, expr::get_meta(bcx, v0))];
+ unsized_args = [
+ Load(bcx, get_dataptr(bcx, v0)),
+ Load(bcx, get_meta(bcx, v0))
+ ];
&unsized_args
};
let trait_ref = ty::Binder(ty::TraitRef {
def_id: tcx.lang_items.drop_trait().unwrap(),
- substs: Substs::new_trait(tcx, vec![], vec![], t)
+ substs: Substs::new_trait(tcx, t, &[])
});
let vtbl = match fulfill_obligation(bcx.ccx().shared(), DUMMY_SP, trait_ref) {
traits::VtableImpl(data) => data,
};
let dtor_did = def.destructor().unwrap();
bcx = Callee::def(bcx.ccx(), dtor_did, vtbl.substs)
- .call(bcx, DebugLoc::None, ArgVals(args), None).bcx;
+ .call(bcx, DebugLoc::None, args, None).bcx;
bcx.fcx.pop_and_trans_custom_cleanup_scope(bcx, contents_scope)
}
ty::TyStruct(def, substs) => {
let ccx = bcx.ccx();
// First get the size of all statically known fields.
- // Don't use type_of::sizing_type_of because that expects t to be sized.
+ // Don't use type_of::sizing_type_of because that expects t to be sized,
+ // and it also rounds up to alignment, which we want to avoid,
+ // as the unsized field's alignment could be smaller.
assert!(!t.is_simd());
- let repr = adt::represent_type(ccx, t);
- let sizing_type = adt::sizing_type_context_of(ccx, &repr, true);
- debug!("DST {} sizing_type: {:?}", t, sizing_type);
- let sized_size = llsize_of_alloc(ccx, sizing_type.prefix());
- let sized_align = llalign_of_min(ccx, sizing_type.prefix());
+ let layout = ccx.layout_of(t);
+ debug!("DST {} layout: {:?}", t, layout);
+
+ let (sized_size, sized_align) = match *layout {
+ ty::layout::Layout::Univariant { ref variant, .. } => {
+ (variant.min_size().bytes(), variant.align.abi())
+ }
+ _ => {
+ bug!("size_and_align_of_dst: expcted Univariant for `{}`, found {:#?}",
+ t, layout);
+ }
+ };
debug!("DST {} statically sized prefix size: {} align: {}",
t, sized_size, sized_align);
let sized_size = C_uint(ccx, sized_size);
// here. But this is where the add would go.)
// Return the sum of sizes and max of aligns.
- let mut size = bcx.add(sized_size, unsized_size);
-
- // Issue #27023: If there is a drop flag, *now* we add 1
- // to the size. (We can do this without adding any
- // padding because drop flags do not have any alignment
- // constraints.)
- if sizing_type.needs_drop_flag() {
- size = bcx.add(size, C_uint(bcx.ccx(), 1_u64));
- }
+ let size = bcx.add(sized_size, unsized_size);
// Choose max of two known alignments (combined value must
// be aligned according to more restrictive of the two).
// must definitely check for special bit-patterns corresponding to
// the special dtor markings.
- let inttype = Type::int(bcx.ccx());
- let dropped_pattern = C_integral(inttype, adt::DTOR_DONE_U64, false);
-
match t.sty {
ty::TyBox(content_ty) => {
// Support for TyBox is built-in and its drop glue is
// a safe-guard, assert TyBox not used with TyContents.
assert!(!skip_dtor);
if !type_is_sized(bcx.tcx(), content_ty) {
- let llval = expr::get_dataptr(bcx, v0);
+ let llval = get_dataptr(bcx, v0);
let llbox = Load(bcx, llval);
- let llbox_as_usize = PtrToInt(bcx, llbox, Type::int(bcx.ccx()));
- let drop_flag_not_dropped_already =
- ICmp(bcx, llvm::IntNE, llbox_as_usize, dropped_pattern, DebugLoc::None);
- with_cond(bcx, drop_flag_not_dropped_already, |bcx| {
- let bcx = drop_ty(bcx, v0, content_ty, DebugLoc::None);
- let info = expr::get_meta(bcx, v0);
- let info = Load(bcx, info);
- let (llsize, llalign) =
- size_and_align_of_dst(&bcx.build(), content_ty, info);
-
- // `Box<ZeroSizeType>` does not allocate.
- let needs_free = ICmp(bcx,
- llvm::IntNE,
- llsize,
- C_uint(bcx.ccx(), 0u64),
- DebugLoc::None);
- with_cond(bcx, needs_free, |bcx| {
- trans_exchange_free_dyn(bcx, llbox, llsize, llalign, DebugLoc::None)
- })
+ let bcx = drop_ty(bcx, v0, content_ty, DebugLoc::None);
+ let info = get_meta(bcx, v0);
+ let info = Load(bcx, info);
+ let (llsize, llalign) =
+ size_and_align_of_dst(&bcx.build(), content_ty, info);
+
+ // `Box<ZeroSizeType>` does not allocate.
+ let needs_free = ICmp(bcx,
+ llvm::IntNE,
+ llsize,
+ C_uint(bcx.ccx(), 0u64),
+ DebugLoc::None);
+ with_cond(bcx, needs_free, |bcx| {
+ trans_exchange_free_dyn(bcx, llbox, llsize, llalign, DebugLoc::None)
})
} else {
let llval = v0;
let llbox = Load(bcx, llval);
- let llbox_as_usize = PtrToInt(bcx, llbox, inttype);
- let drop_flag_not_dropped_already =
- ICmp(bcx, llvm::IntNE, llbox_as_usize, dropped_pattern, DebugLoc::None);
- with_cond(bcx, drop_flag_not_dropped_already, |bcx| {
- let bcx = drop_ty(bcx, llbox, content_ty, DebugLoc::None);
- trans_exchange_free_ty(bcx, llbox, content_ty, DebugLoc::None)
- })
- }
- }
- ty::TyStruct(def, _) | ty::TyEnum(def, _) => {
- match (def.dtor_kind(), skip_dtor) {
- (ty::TraitDtor(true), false) => {
- // FIXME(16758) Since the struct is unsized, it is hard to
- // find the drop flag (which is at the end of the struct).
- // Lets just ignore the flag and pretend everything will be
- // OK.
- if type_is_sized(bcx.tcx(), t) {
- trans_struct_drop_flag(bcx, t, v0)
- } else {
- // Give the user a heads up that we are doing something
- // stupid and dangerous.
- bcx.sess().warn(&format!("Ignoring drop flag in destructor for {} \
- because the struct is unsized. See issue \
- #16758", t));
- trans_struct_drop(bcx, t, v0)
- }
- }
- (ty::TraitDtor(false), false) => {
- trans_struct_drop(bcx, t, v0)
- }
- (ty::NoDtor, _) | (_, true) => {
- // No dtor? Just the default case
- iter_structural_ty(bcx, v0, t, |bb, vv, tt| drop_ty(bb, vv, tt, DebugLoc::None))
- }
+ let bcx = drop_ty(bcx, llbox, content_ty, DebugLoc::None);
+ trans_exchange_free_ty(bcx, llbox, content_ty, DebugLoc::None)
}
}
ty::TyTrait(..) => {
// versus without calling Drop::drop. Assert caller is
// okay with always calling the Drop impl, if any.
assert!(!skip_dtor);
- let data_ptr = expr::get_dataptr(bcx, v0);
- let vtable_ptr = Load(bcx, expr::get_meta(bcx, v0));
+ let data_ptr = get_dataptr(bcx, v0);
+ let vtable_ptr = Load(bcx, get_meta(bcx, v0));
let dtor = Load(bcx, vtable_ptr);
Call(bcx,
dtor,
DebugLoc::None);
bcx
}
+ ty::TyStruct(def, _) | ty::TyEnum(def, _)
+ if def.dtor_kind().is_present() && !skip_dtor => {
+ trans_struct_drop(bcx, t, v0)
+ }
_ => {
if bcx.fcx.type_needs_drop(t) {
- iter_structural_ty(bcx,
- v0,
- t,
- |bb, vv, tt| drop_ty(bb, vv, tt, DebugLoc::None))
+ drop_structural_ty(bcx, v0, t)
} else {
bcx
}
}
}
}
+
+// Iterates through the elements of a structural type, dropping them.
+fn drop_structural_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
+ av: ValueRef,
+ t: Ty<'tcx>)
+ -> Block<'blk, 'tcx> {
+ let _icx = push_ctxt("drop_structural_ty");
+
+ fn iter_variant<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
+ repr: &adt::Repr<'tcx>,
+ av: adt::MaybeSizedValue,
+ variant: ty::VariantDef<'tcx>,
+ substs: &Substs<'tcx>)
+ -> Block<'blk, 'tcx> {
+ let _icx = push_ctxt("iter_variant");
+ let tcx = cx.tcx();
+ let mut cx = cx;
+
+ for (i, field) in variant.fields.iter().enumerate() {
+ let arg = monomorphize::field_ty(tcx, substs, field);
+ cx = drop_ty(cx,
+ adt::trans_field_ptr(cx, repr, av, Disr::from(variant.disr_val), i),
+ arg, DebugLoc::None);
+ }
+ return cx;
+ }
+
+ let value = if type_is_sized(cx.tcx(), t) {
+ adt::MaybeSizedValue::sized(av)
+ } else {
+ let data = Load(cx, get_dataptr(cx, av));
+ let info = Load(cx, get_meta(cx, av));
+ adt::MaybeSizedValue::unsized_(data, info)
+ };
+
+ let mut cx = cx;
+ match t.sty {
+ ty::TyStruct(..) => {
+ let repr = adt::represent_type(cx.ccx(), t);
+ let VariantInfo { fields, discr } = VariantInfo::from_ty(cx.tcx(), t, None);
+ for (i, &Field(_, field_ty)) in fields.iter().enumerate() {
+ let llfld_a = adt::trans_field_ptr(cx, &repr, value, Disr::from(discr), i);
+
+ let val = if type_is_sized(cx.tcx(), field_ty) {
+ llfld_a
+ } else {
+ let scratch = alloc_ty(cx, field_ty, "__fat_ptr_iter");
+ Store(cx, llfld_a, get_dataptr(cx, scratch));
+ Store(cx, value.meta, get_meta(cx, scratch));
+ scratch
+ };
+ cx = drop_ty(cx, val, field_ty, DebugLoc::None);
+ }
+ }
+ ty::TyClosure(_, ref substs) => {
+ let repr = adt::represent_type(cx.ccx(), t);
+ for (i, upvar_ty) in substs.upvar_tys.iter().enumerate() {
+ let llupvar = adt::trans_field_ptr(cx, &repr, value, Disr(0), i);
+ cx = drop_ty(cx, llupvar, upvar_ty, DebugLoc::None);
+ }
+ }
+ ty::TyArray(_, n) => {
+ let base = get_dataptr(cx, value.value);
+ let len = C_uint(cx.ccx(), n);
+ let unit_ty = t.sequence_element_type(cx.tcx());
+ cx = tvec::slice_for_each(cx, base, unit_ty, len,
+ |bb, vv| drop_ty(bb, vv, unit_ty, DebugLoc::None));
+ }
+ ty::TySlice(_) | ty::TyStr => {
+ let unit_ty = t.sequence_element_type(cx.tcx());
+ cx = tvec::slice_for_each(cx, value.value, unit_ty, value.meta,
+ |bb, vv| drop_ty(bb, vv, unit_ty, DebugLoc::None));
+ }
+ ty::TyTuple(ref args) => {
+ let repr = adt::represent_type(cx.ccx(), t);
+ for (i, arg) in args.iter().enumerate() {
+ let llfld_a = adt::trans_field_ptr(cx, &repr, value, Disr(0), i);
+ cx = drop_ty(cx, llfld_a, *arg, DebugLoc::None);
+ }
+ }
+ ty::TyEnum(en, substs) => {
+ let fcx = cx.fcx;
+ let ccx = fcx.ccx;
+
+ let repr = adt::represent_type(ccx, t);
+ let n_variants = en.variants.len();
+
+ // NB: we must hit the discriminant first so that structural
+ // comparison know not to proceed when the discriminants differ.
+
+ match adt::trans_switch(cx, &repr, av, false) {
+ (adt::BranchKind::Single, None) => {
+ if n_variants != 0 {
+ assert!(n_variants == 1);
+ cx = iter_variant(cx, &repr, adt::MaybeSizedValue::sized(av),
+ &en.variants[0], substs);
+ }
+ }
+ (adt::BranchKind::Switch, Some(lldiscrim_a)) => {
+ cx = drop_ty(cx, lldiscrim_a, cx.tcx().types.isize, DebugLoc::None);
+
+ // Create a fall-through basic block for the "else" case of
+ // the switch instruction we're about to generate. Note that
+ // we do **not** use an Unreachable instruction here, even
+ // though most of the time this basic block will never be hit.
+ //
+ // When an enum is dropped it's contents are currently
+ // overwritten to DTOR_DONE, which means the discriminant
+ // could have changed value to something not within the actual
+ // range of the discriminant. Currently this function is only
+ // used for drop glue so in this case we just return quickly
+ // from the outer function, and any other use case will only
+ // call this for an already-valid enum in which case the `ret
+ // void` will never be hit.
+ let ret_void_cx = fcx.new_block("enum-iter-ret-void");
+ RetVoid(ret_void_cx, DebugLoc::None);
+ let llswitch = Switch(cx, lldiscrim_a, ret_void_cx.llbb, n_variants);
+ let next_cx = fcx.new_block("enum-iter-next");
+
+ for variant in &en.variants {
+ let variant_cx = fcx.new_block(&format!("enum-iter-variant-{}",
+ &variant.disr_val
+ .to_string()));
+ let case_val = adt::trans_case(cx, &repr, Disr::from(variant.disr_val));
+ AddCase(llswitch, case_val, variant_cx.llbb);
+ let variant_cx = iter_variant(variant_cx,
+ &repr,
+ value,
+ variant,
+ substs);
+ Br(variant_cx, next_cx.llbb, DebugLoc::None);
+ }
+ cx = next_cx;
+ }
+ _ => ccx.sess().unimpl("value from adt::trans_switch in drop_structural_ty"),
+ }
+ }
+ _ => {
+ cx.sess().unimpl(&format!("type in drop_structural_ty: {}", t))
+ }
+ }
+ return cx;
+}
+++ /dev/null
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use rustc::hir::def_id::DefId;
-use base::push_ctxt;
-use common::*;
-use monomorphize::Instance;
-
-use rustc::dep_graph::DepNode;
-
-fn instantiate_inline(ccx: &CrateContext, fn_id: DefId) -> Option<DefId> {
- debug!("instantiate_inline({:?})", fn_id);
- let _icx = push_ctxt("instantiate_inline");
- let tcx = ccx.tcx();
- let _task = tcx.dep_graph.in_task(DepNode::TransInlinedItem(fn_id));
-
- tcx.sess
- .cstore
- .maybe_get_item_ast(tcx, fn_id)
- .map(|(_, inline_id)| {
- tcx.map.local_def_id(inline_id)
- })
-}
-
-pub fn get_local_instance(ccx: &CrateContext, fn_id: DefId)
- -> Option<DefId> {
- if let Some(_) = ccx.tcx().map.as_local_node_id(fn_id) {
- Some(fn_id)
- } else {
- instantiate_inline(ccx, fn_id)
- }
-}
-
-pub fn maybe_instantiate_inline(ccx: &CrateContext, fn_id: DefId) -> DefId {
- get_local_instance(ccx, fn_id).unwrap_or(fn_id)
-}
-
-pub fn maybe_inline_instance<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
- instance: Instance<'tcx>) -> Instance<'tcx> {
- let def_id = maybe_instantiate_inline(ccx, instance.def);
- Instance {
- def: def_id,
- substs: instance.substs
- }
-}
use intrinsics::{self, Intrinsic};
use libc;
use llvm;
-use llvm::{ValueRef, TypeKind};
-use rustc::ty::subst::Substs;
+use llvm::{ValueRef};
use abi::{Abi, FnType};
use adt;
use base::*;
use build::*;
-use callee::{self, Callee};
-use cleanup;
-use cleanup::CleanupMethods;
use common::*;
-use consts;
-use datum::*;
use debuginfo::DebugLoc;
use declare;
-use expr;
use glue;
use type_of;
use machine;
use Disr;
use rustc::hir;
use syntax::ast;
-use syntax::ptr::P;
use syntax::parse::token;
use rustc::session::Session;
-use rustc_const_eval::fatal_const_eval_err;
use syntax_pos::{Span, DUMMY_SP};
use std::cmp::Ordering;
pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
callee_ty: Ty<'tcx>,
fn_ty: &FnType,
- args: callee::CallArgs<'a, 'tcx>,
- dest: expr::Dest,
+ llargs: &[ValueRef],
+ llresult: ValueRef,
call_debug_location: DebugLoc)
-> Result<'blk, 'tcx> {
let fcx = bcx.fcx;
let name = tcx.item_name(def_id).as_str();
let span = match call_debug_location {
- DebugLoc::At(_, span) | DebugLoc::ScopeAt(_, span) => span,
+ DebugLoc::ScopeAt(_, span) => span,
DebugLoc::None => {
span_bug!(fcx.span.unwrap_or(DUMMY_SP),
"intrinsic `{}` called with missing span", name);
}
};
- let cleanup_scope = fcx.push_custom_cleanup_scope();
-
- // For `transmute` we can just trans the input expr directly into dest
- if name == "transmute" {
- let llret_ty = type_of::type_of(ccx, ret_ty);
- match args {
- callee::ArgExprs(arg_exprs) => {
- assert_eq!(arg_exprs.len(), 1);
-
- let (in_type, out_type) = (substs.types[0],
- substs.types[1]);
- let llintype = type_of::type_of(ccx, in_type);
- let llouttype = type_of::type_of(ccx, out_type);
-
- let in_type_size = machine::llbitsize_of_real(ccx, llintype);
- let out_type_size = machine::llbitsize_of_real(ccx, llouttype);
-
- if let ty::TyFnDef(def_id, substs, _) = in_type.sty {
- if out_type_size != 0 {
- // FIXME #19925 Remove this hack after a release cycle.
- let _ = unpack_datum!(bcx, expr::trans(bcx, &arg_exprs[0]));
- let llfn = Callee::def(ccx, def_id, substs).reify(ccx).val;
- let llfnty = val_ty(llfn);
- let llresult = match dest {
- expr::SaveIn(d) => d,
- expr::Ignore => alloc_ty(bcx, out_type, "ret")
- };
- Store(bcx, llfn, PointerCast(bcx, llresult, llfnty.ptr_to()));
- if dest == expr::Ignore {
- bcx = glue::drop_ty(bcx, llresult, out_type,
- call_debug_location);
- }
- fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
- fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
- return Result::new(bcx, llresult);
- }
- }
-
- // This should be caught by the intrinsicck pass
- assert_eq!(in_type_size, out_type_size);
-
- let nonpointer_nonaggregate = |llkind: TypeKind| -> bool {
- use llvm::TypeKind::*;
- match llkind {
- Half | Float | Double | X86_FP80 | FP128 |
- PPC_FP128 | Integer | Vector | X86_MMX => true,
- _ => false
- }
- };
-
- // An approximation to which types can be directly cast via
- // LLVM's bitcast. This doesn't cover pointer -> pointer casts,
- // but does, importantly, cover SIMD types.
- let in_kind = llintype.kind();
- let ret_kind = llret_ty.kind();
- let bitcast_compatible =
- (nonpointer_nonaggregate(in_kind) && nonpointer_nonaggregate(ret_kind)) || {
- in_kind == TypeKind::Pointer && ret_kind == TypeKind::Pointer
- };
-
- let dest = if bitcast_compatible {
- // if we're here, the type is scalar-like (a primitive, a
- // SIMD type or a pointer), and so can be handled as a
- // by-value ValueRef and can also be directly bitcast to the
- // target type. Doing this special case makes conversions
- // like `u32x4` -> `u64x2` much nicer for LLVM and so more
- // efficient (these are done efficiently implicitly in C
- // with the `__m128i` type and so this means Rust doesn't
- // lose out there).
- let expr = &arg_exprs[0];
- let datum = unpack_datum!(bcx, expr::trans(bcx, expr));
- let datum = unpack_datum!(bcx, datum.to_rvalue_datum(bcx, "transmute_temp"));
- let val = if datum.kind.is_by_ref() {
- load_ty(bcx, datum.val, datum.ty)
- } else {
- from_immediate(bcx, datum.val)
- };
-
- let cast_val = BitCast(bcx, val, llret_ty);
-
- match dest {
- expr::SaveIn(d) => {
- // this often occurs in a sequence like `Store(val,
- // d); val2 = Load(d)`, so disappears easily.
- Store(bcx, cast_val, d);
- }
- expr::Ignore => {}
- }
- dest
- } else {
- // The types are too complicated to do with a by-value
- // bitcast, so pointer cast instead. We need to cast the
- // dest so the types work out.
- let dest = match dest {
- expr::SaveIn(d) => expr::SaveIn(PointerCast(bcx, d, llintype.ptr_to())),
- expr::Ignore => expr::Ignore
- };
- bcx = expr::trans_into(bcx, &arg_exprs[0], dest);
- dest
- };
-
- fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
- fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
-
- return match dest {
- expr::SaveIn(d) => Result::new(bcx, d),
- expr::Ignore => Result::new(bcx, C_undef(llret_ty.ptr_to()))
- };
-
- }
-
- _ => {
- bug!("expected expr as argument for transmute");
- }
- }
- }
-
- // For `move_val_init` we can evaluate the destination address
- // (the first argument) and then trans the source value (the
- // second argument) directly into the resulting destination
- // address.
- if name == "move_val_init" {
- if let callee::ArgExprs(ref exprs) = args {
- let (dest_expr, source_expr) = if exprs.len() != 2 {
- bug!("expected two exprs as arguments for `move_val_init` intrinsic");
- } else {
- (&exprs[0], &exprs[1])
- };
-
- // evaluate destination address
- let dest_datum = unpack_datum!(bcx, expr::trans(bcx, dest_expr));
- let dest_datum = unpack_datum!(
- bcx, dest_datum.to_rvalue_datum(bcx, "arg"));
- let dest_datum = unpack_datum!(
- bcx, dest_datum.to_appropriate_datum(bcx));
-
- // `expr::trans_into(bcx, expr, dest)` is equiv to
- //
- // `trans(bcx, expr).store_to_dest(dest)`,
- //
- // which for `dest == expr::SaveIn(addr)`, is equivalent to:
- //
- // `trans(bcx, expr).store_to(bcx, addr)`.
- let lldest = expr::Dest::SaveIn(dest_datum.val);
- bcx = expr::trans_into(bcx, source_expr, lldest);
-
- let llresult = C_nil(ccx);
- fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
-
- return Result::new(bcx, llresult);
- } else {
- bug!("expected two exprs as arguments for `move_val_init` intrinsic");
- }
- }
-
- // save the actual AST arguments for later (some places need to do
- // const-evaluation on them)
- let expr_arguments = match args {
- callee::ArgExprs(args) => Some(args),
- _ => None,
- };
-
- // Push the arguments.
- let mut llargs = Vec::new();
- bcx = callee::trans_args(bcx,
- Abi::RustIntrinsic,
- fn_ty,
- &mut callee::Intrinsic,
- args,
- &mut llargs,
- cleanup::CustomScope(cleanup_scope));
-
- fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
-
// These are the only intrinsic functions that diverge.
if name == "abort" {
let llfn = ccx.get_intrinsic(&("llvm.trap"));
Call(bcx, llfn, &[], call_debug_location);
- fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
Unreachable(bcx);
return Result::new(bcx, C_undef(Type::nil(ccx).ptr_to()));
} else if &name[..] == "unreachable" {
- fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
Unreachable(bcx);
return Result::new(bcx, C_nil(ccx));
}
let llret_ty = type_of::type_of(ccx, ret_ty);
- // Get location to store the result. If the user does
- // not care about the result, just make a stack slot
- let llresult = match dest {
- expr::SaveIn(d) => d,
- expr::Ignore => {
- if !type_is_zero_size(ccx, ret_ty) {
- let llresult = alloc_ty(bcx, ret_ty, "intrinsic_result");
- call_lifetime_start(bcx, llresult);
- llresult
- } else {
- C_undef(llret_ty.ptr_to())
- }
- }
- };
-
let simple = get_simple_intrinsic(ccx, &name);
let llval = match (simple, &name[..]) {
(Some(llfn), _) => {
Call(bcx, llfn, &[], call_debug_location)
}
(_, "size_of") => {
- let tp_ty = substs.types[0];
+ let tp_ty = substs.type_at(0);
let lltp_ty = type_of::type_of(ccx, tp_ty);
C_uint(ccx, machine::llsize_of_alloc(ccx, lltp_ty))
}
(_, "size_of_val") => {
- let tp_ty = substs.types[0];
+ let tp_ty = substs.type_at(0);
if !type_is_sized(tcx, tp_ty) {
let (llsize, _) =
glue::size_and_align_of_dst(&bcx.build(), tp_ty, llargs[1]);
}
}
(_, "min_align_of") => {
- let tp_ty = substs.types[0];
+ let tp_ty = substs.type_at(0);
C_uint(ccx, type_of::align_of(ccx, tp_ty))
}
(_, "min_align_of_val") => {
- let tp_ty = substs.types[0];
+ let tp_ty = substs.type_at(0);
if !type_is_sized(tcx, tp_ty) {
let (_, llalign) =
glue::size_and_align_of_dst(&bcx.build(), tp_ty, llargs[1]);
}
}
(_, "pref_align_of") => {
- let tp_ty = substs.types[0];
+ let tp_ty = substs.type_at(0);
let lltp_ty = type_of::type_of(ccx, tp_ty);
C_uint(ccx, machine::llalign_of_pref(ccx, lltp_ty))
}
(_, "drop_in_place") => {
- let tp_ty = substs.types[0];
- let ptr = if type_is_sized(tcx, tp_ty) {
+ let tp_ty = substs.type_at(0);
+ let is_sized = type_is_sized(tcx, tp_ty);
+ let ptr = if is_sized {
llargs[0]
} else {
- let scratch = rvalue_scratch_datum(bcx, tp_ty, "tmp");
- Store(bcx, llargs[0], expr::get_dataptr(bcx, scratch.val));
- Store(bcx, llargs[1], expr::get_meta(bcx, scratch.val));
- fcx.schedule_lifetime_end(cleanup::CustomScope(cleanup_scope), scratch.val);
- scratch.val
+ let scratch = alloc_ty(bcx, tp_ty, "drop");
+ call_lifetime_start(bcx, scratch);
+ Store(bcx, llargs[0], get_dataptr(bcx, scratch));
+ Store(bcx, llargs[1], get_meta(bcx, scratch));
+ scratch
};
glue::drop_ty(bcx, ptr, tp_ty, call_debug_location);
+ if !is_sized {
+ call_lifetime_end(bcx, ptr);
+ }
C_nil(ccx)
}
(_, "type_name") => {
- let tp_ty = substs.types[0];
+ let tp_ty = substs.type_at(0);
let ty_name = token::intern_and_get_ident(&tp_ty.to_string());
C_str_slice(ccx, ty_name)
}
(_, "type_id") => {
- C_u64(ccx, ccx.tcx().type_id_hash(substs.types[0]))
- }
- (_, "init_dropped") => {
- let tp_ty = substs.types[0];
- if !type_is_zero_size(ccx, tp_ty) {
- drop_done_fill_mem(bcx, llresult, tp_ty);
- }
- C_nil(ccx)
+ C_u64(ccx, ccx.tcx().type_id_hash(substs.type_at(0)))
}
(_, "init") => {
- let tp_ty = substs.types[0];
+ let tp_ty = substs.type_at(0);
if !type_is_zero_size(ccx, tp_ty) {
// Just zero out the stack slot. (See comment on base::memzero for explanation)
init_zero_mem(bcx, llresult, tp_ty);
C_nil(ccx)
}
(_, "needs_drop") => {
- let tp_ty = substs.types[0];
+ let tp_ty = substs.type_at(0);
C_bool(ccx, bcx.fcx.type_needs_drop(tp_ty))
}
copy_intrinsic(bcx,
false,
false,
- substs.types[0],
+ substs.type_at(0),
llargs[1],
llargs[0],
llargs[2],
copy_intrinsic(bcx,
true,
false,
- substs.types[0],
+ substs.type_at(0),
llargs[1],
llargs[0],
llargs[2],
(_, "write_bytes") => {
memset_intrinsic(bcx,
false,
- substs.types[0],
+ substs.type_at(0),
llargs[0],
llargs[1],
llargs[2],
copy_intrinsic(bcx,
false,
true,
- substs.types[0],
+ substs.type_at(0),
llargs[0],
llargs[1],
llargs[2],
copy_intrinsic(bcx,
true,
true,
- substs.types[0],
+ substs.type_at(0),
llargs[0],
llargs[1],
llargs[2],
(_, "volatile_set_memory") => {
memset_intrinsic(bcx,
true,
- substs.types[0],
+ substs.type_at(0),
llargs[0],
llargs[1],
llargs[2],
call_debug_location)
}
(_, "volatile_load") => {
- let tp_ty = substs.types[0];
+ let tp_ty = substs.type_at(0);
let mut ptr = llargs[0];
if let Some(ty) = fn_ty.ret.cast {
ptr = PointerCast(bcx, ptr, ty.ptr_to());
to_immediate(bcx, load, tp_ty)
},
(_, "volatile_store") => {
- let tp_ty = substs.types[0];
+ let tp_ty = substs.type_at(0);
if type_is_fat_ptr(bcx.tcx(), tp_ty) {
- VolatileStore(bcx, llargs[1], expr::get_dataptr(bcx, llargs[0]));
- VolatileStore(bcx, llargs[2], expr::get_meta(bcx, llargs[0]));
+ VolatileStore(bcx, llargs[1], get_dataptr(bcx, llargs[0]));
+ VolatileStore(bcx, llargs[2], get_meta(bcx, llargs[0]));
} else {
let val = if fn_ty.args[1].is_indirect() {
Load(bcx, llargs[1])
},
(_, "discriminant_value") => {
- let val_ty = substs.types[0];
+ let val_ty = substs.type_at(0);
match val_ty.sty {
ty::TyEnum(..) => {
let repr = adt::represent_type(ccx, val_ty);
}
(_, name) if name.starts_with("simd_") => {
generic_simd_intrinsic(bcx, name,
- substs,
callee_ty,
- expr_arguments,
&llargs,
ret_ty, llret_ty,
call_debug_location,
match split[1] {
"cxchg" | "cxchgweak" => {
- let sty = &substs.types[0].sty;
+ let sty = &substs.type_at(0).sty;
if int_type_width_signed(sty, ccx).is_some() {
let weak = if split[1] == "cxchgweak" { llvm::True } else { llvm::False };
let val = AtomicCmpXchg(bcx, llargs[0], llargs[1], llargs[2],
}
"load" => {
- let sty = &substs.types[0].sty;
+ let sty = &substs.type_at(0).sty;
if int_type_width_signed(sty, ccx).is_some() {
AtomicLoad(bcx, llargs[0], order)
} else {
}
"store" => {
- let sty = &substs.types[0].sty;
+ let sty = &substs.type_at(0).sty;
if int_type_width_signed(sty, ccx).is_some() {
AtomicStore(bcx, llargs[1], llargs[0], order);
} else {
_ => ccx.sess().fatal("unknown atomic operation")
};
- let sty = &substs.types[0].sty;
+ let sty = &substs.type_at(0).sty;
if int_type_width_signed(sty, ccx).is_some() {
AtomicRMW(bcx, atom_op, llargs[0], llargs[1], order)
} else {
let llargs = if !any_changes_needed {
// no aggregates to flatten, so no change needed
- llargs
+ llargs.to_vec()
} else {
// there are some aggregates that need to be flattened
// in the LLVM call, so we need to run over the types
// again to find them and extract the arguments
intr.inputs.iter()
- .zip(&llargs)
+ .zip(llargs)
.zip(&arg_tys)
.flat_map(|((t, llarg), ty)| modify_as_needed(bcx, t, ty, *llarg))
.collect()
}
}
- // If we made a temporary stack slot, let's clean it up
- match dest {
- expr::Ignore => {
- bcx = glue::drop_ty(bcx, llresult, ret_ty, call_debug_location);
- call_lifetime_end(bcx, llresult);
- }
- expr::SaveIn(_) => {}
- }
-
- fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
-
Result::new(bcx, llresult)
}
SetPersonalityFn(bcx, bcx.fcx.eh_personality());
- let normal = bcx.fcx.new_temp_block("normal");
- let catchswitch = bcx.fcx.new_temp_block("catchswitch");
- let catchpad = bcx.fcx.new_temp_block("catchpad");
- let caught = bcx.fcx.new_temp_block("caught");
+ let normal = bcx.fcx.new_block("normal");
+ let catchswitch = bcx.fcx.new_block("catchswitch");
+ let catchpad = bcx.fcx.new_block("catchpad");
+ let caught = bcx.fcx.new_block("caught");
let func = llvm::get_param(bcx.fcx.llfn, 0);
let data = llvm::get_param(bcx.fcx.llfn, 1);
let tcx = ccx.tcx();
let tydesc = match tcx.lang_items.msvc_try_filter() {
- Some(did) => ::consts::get_static(ccx, did).to_llref(),
+ Some(did) => ::consts::get_static(ccx, did),
None => bug!("msvc_try_filter not defined"),
};
let tok = CatchPad(catchpad, cs, &[tydesc, C_i32(ccx, 0), slot]);
// expected to be `*mut *mut u8` for this to actually work, but that's
// managed by the standard library.
- let then = bcx.fcx.new_temp_block("then");
- let catch = bcx.fcx.new_temp_block("catch");
+ let then = bcx.fcx.new_block("then");
+ let catch = bcx.fcx.new_block("catch");
let func = llvm::get_param(bcx.fcx.llfn, 0);
let data = llvm::get_param(bcx.fcx.llfn, 1);
let (fcx, block_arena);
block_arena = TypedArena::new();
fcx = FunctionContext::new(ccx, llfn, fn_ty, None, &block_arena);
- let bcx = fcx.init(true, None);
- trans(bcx);
+ trans(fcx.init(true));
fcx.cleanup();
llfn
}
fn generic_simd_intrinsic<'blk, 'tcx, 'a>
(bcx: Block<'blk, 'tcx>,
name: &str,
- substs: &'tcx Substs<'tcx>,
callee_ty: Ty<'tcx>,
- args: Option<&[P<hir::Expr>]>,
llargs: &[ValueRef],
ret_ty: Ty<'tcx>,
llret_ty: Type,
let total_len = in_len as u64 * 2;
- let vector = match args {
- Some(args) => {
- match consts::const_expr(bcx.ccx(), &args[2], substs, None,
- // this should probably help simd error reporting
- consts::TrueConst::Yes) {
- Ok((vector, _)) => vector,
- Err(err) => {
- fatal_const_eval_err(bcx.tcx(), err.as_inner(), span,
- "shuffle indices");
- }
- }
- }
- None => llargs[2]
- };
+ let vector = llargs[2];
let indices: Option<Vec<_>> = (0..n)
.map(|i| {
mod common;
mod consts;
mod context;
-mod controlflow;
-mod datum;
mod debuginfo;
mod declare;
mod disr;
-mod expr;
mod glue;
-mod inline;
mod intrinsic;
mod machine;
-mod _match;
mod meth;
mod mir;
mod monomorphize;
use abi::FnType;
use base::*;
use build::*;
-use callee::{Callee, Virtual, ArgVals, trans_fn_pointer_shim};
+use callee::{Callee, Virtual, trans_fn_pointer_shim};
use closure;
use common::*;
use consts;
use debuginfo::DebugLoc;
use declare;
-use expr;
use glue;
use machine;
use type_::Type;
let (block_arena, fcx): (TypedArena<_>, FunctionContext);
block_arena = TypedArena::new();
fcx = FunctionContext::new(ccx, llfn, fn_ty, None, &block_arena);
- let mut bcx = fcx.init(false, None);
- assert!(!fcx.needs_ret_allocas);
+ let mut bcx = fcx.init(false);
-
- let dest =
- fcx.llretslotptr.get().map(
- |_| expr::SaveIn(fcx.get_ret_slot(bcx, "ret_slot")));
+ let dest = fcx.llretslotptr.get();
debug!("trans_object_shim: method_offset_in_vtable={}",
vtable_index);
let llargs = get_params(fcx.llfn);
- let args = ArgVals(&llargs[fcx.fn_ty.ret.is_indirect() as usize..]);
let callee = Callee {
data: Virtual(vtable_index),
ty: method_ty
};
- bcx = callee.call(bcx, DebugLoc::None, args, dest).bcx;
+ bcx = callee.call(bcx, DebugLoc::None,
+ &llargs[fcx.fn_ty.ret.is_indirect() as usize..], dest).bcx;
fcx.finish(bcx, DebugLoc::None);
get_vtable_methods(tcx, id, substs)
.into_iter()
.map(|opt_mth| opt_mth.map_or(nullptr, |mth| {
- Callee::def(ccx, mth.method.def_id, &mth.substs).reify(ccx).val
+ Callee::def(ccx, mth.method.def_id, &mth.substs).reify(ccx)
}))
.collect::<Vec<_>>()
.into_iter()
// the method may have some early-bound lifetimes, add
// regions for those
let method_substs = Substs::for_item(tcx, trait_method_def_id,
- |_, _| ty::ReErased,
+ |_, _| tcx.mk_region(ty::ReErased),
|_, _| tcx.types.err);
// The substitutions we have are on the impl, so we grab
name: Name)
-> ImplMethod<'tcx>
{
- assert!(!substs.types.needs_infer());
+ assert!(!substs.needs_infer());
let trait_def_id = tcx.trait_id_of_impl(impl_def_id).unwrap();
let trait_def = tcx.lookup_trait_def(trait_def_id);
use rustc_data_structures::indexed_vec::{Idx, IndexVec};
use rustc::mir::repr as mir;
use rustc::mir::repr::TerminatorKind;
+use rustc::mir::repr::Location;
use rustc::mir::visit::{Visitor, LvalueContext};
use rustc::mir::traversal;
use common::{self, Block, BlockAndBuilder};
common::type_is_fat_ptr(bcx.tcx(), ty));
} else if common::type_is_imm_pair(bcx.ccx(), ty) {
// We allow pairs and uses of any of their 2 fields.
+ } else if !analyzer.seen_assigned.contains(index) {
+ // No assignment has been seen, which means that
+ // either the local has been marked as lvalue
+ // already, or there is no possible initialization
+ // for the local, making any reads invalid.
+ // This is useful in weeding out dead temps.
} else {
// These sorts of types require an alloca. Note that
// type_is_immediate() may *still* be true, particularly
fn visit_assign(&mut self,
block: mir::BasicBlock,
lvalue: &mir::Lvalue<'tcx>,
- rvalue: &mir::Rvalue<'tcx>) {
+ rvalue: &mir::Rvalue<'tcx>,
+ location: Location) {
debug!("visit_assign(block={:?}, lvalue={:?}, rvalue={:?})", block, lvalue, rvalue);
if let Some(index) = self.mir.local_index(lvalue) {
self.mark_as_lvalue(index);
}
} else {
- self.visit_lvalue(lvalue, LvalueContext::Store);
+ self.visit_lvalue(lvalue, LvalueContext::Store, location);
}
- self.visit_rvalue(rvalue);
+ self.visit_rvalue(rvalue, location);
}
fn visit_terminator_kind(&mut self,
block: mir::BasicBlock,
- kind: &mir::TerminatorKind<'tcx>) {
+ kind: &mir::TerminatorKind<'tcx>,
+ location: Location) {
match *kind {
mir::TerminatorKind::Call {
func: mir::Operand::Constant(mir::Constant {
// is not guaranteed to be statically dominated by the
// definition of x, so x must always be in an alloca.
if let mir::Operand::Consume(ref lvalue) = args[0] {
- self.visit_lvalue(lvalue, LvalueContext::Drop);
+ self.visit_lvalue(lvalue, LvalueContext::Drop, location);
}
}
_ => {}
}
- self.super_terminator_kind(block, kind);
+ self.super_terminator_kind(block, kind, location);
}
fn visit_lvalue(&mut self,
lvalue: &mir::Lvalue<'tcx>,
- context: LvalueContext) {
+ context: LvalueContext,
+ location: Location) {
debug!("visit_lvalue(lvalue={:?}, context={:?})", lvalue, context);
// Allow uses of projections of immediate pair fields.
// A deref projection only reads the pointer, never needs the lvalue.
if let mir::Lvalue::Projection(ref proj) = *lvalue {
if let mir::ProjectionElem::Deref = proj.elem {
- return self.visit_lvalue(&proj.base, LvalueContext::Consume);
+ return self.visit_lvalue(&proj.base, LvalueContext::Consume, location);
}
}
- self.super_lvalue(lvalue, context);
+ self.super_lvalue(lvalue, context, location);
}
}
// except according to those terms.
use llvm::{self, ValueRef};
-use rustc_const_eval::ErrKind;
+use rustc_const_eval::{ErrKind, ConstEvalErr, note_const_eval_err};
use rustc::middle::lang_items;
use rustc::ty;
use rustc::mir::repr as mir;
debug!("llblock: creating cleanup trampoline for {:?}", target);
let name = &format!("{:?}_cleanup_trampoline_{:?}", bb, target);
- let trampoline = this.fcx.new_block(name, None).build();
+ let trampoline = this.fcx.new_block(name).build();
trampoline.set_personality_fn(this.fcx.eh_personality());
trampoline.cleanup_ret(cp, Some(lltarget));
trampoline.llbb()
// Create the failure block and the conditional branch to it.
let lltarget = llblock(self, target);
- let panic_block = self.fcx.new_block("panic", None);
+ let panic_block = self.fcx.new_block("panic");
if expected {
bcx.cond_br(cond, lltarget, panic_block.llbb);
} else {
// is also constant, then we can produce a warning.
if const_cond == Some(!expected) {
if let Some(err) = const_err {
- let _ = consts::const_err(bcx.ccx(), span,
- Err::<(), _>(err),
- consts::TrueConst::No);
+ let err = ConstEvalErr{ span: span, kind: err };
+ let mut diag = bcx.tcx().sess.struct_span_warn(
+ span, "this expression will panic at run-time");
+ note_const_eval_err(bcx.tcx(), &err, span, "expression", &mut diag);
+ diag.emit();
}
}
let def_id = common::langcall(bcx.tcx(), Some(span), "", lang_item);
let callee = Callee::def(bcx.ccx(), def_id,
bcx.ccx().empty_substs_for_def_id(def_id));
- let llfn = callee.reify(bcx.ccx()).val;
+ let llfn = callee.reify(bcx.ccx());
// Translate the actual panic invoke/call.
if let Some(unwind) = cleanup {
let fn_ptr = match callee.data {
NamedTupleConstructor(_) => {
// FIXME translate this like mir::Rvalue::Aggregate.
- callee.reify(bcx.ccx()).val
+ callee.reify(bcx.ccx())
}
Intrinsic => {
- use callee::ArgVals;
- use expr::{Ignore, SaveIn};
use intrinsic::trans_intrinsic_call;
let (dest, llargs) = match ret_dest {
_ if fn_ty.ret.is_indirect() => {
- (SaveIn(llargs[0]), &llargs[1..])
+ (llargs[0], &llargs[1..])
+ }
+ ReturnDest::Nothing => {
+ (C_undef(fn_ty.ret.original_ty.ptr_to()), &llargs[..])
}
- ReturnDest::Nothing => (Ignore, &llargs[..]),
ReturnDest::IndirectOperand(dst, _) |
- ReturnDest::Store(dst) => (SaveIn(dst), &llargs[..]),
+ ReturnDest::Store(dst) => (dst, &llargs[..]),
ReturnDest::DirectOperand(_) =>
bug!("Cannot use direct operand with an intrinsic call")
};
bcx.with_block(|bcx| {
trans_intrinsic_call(bcx, callee.ty, &fn_ty,
- ArgVals(llargs), dest,
- debug_loc);
+ &llargs, dest, debug_loc);
});
if let ReturnDest::IndirectOperand(dst, _) = ret_dest {
let target = self.bcx(target_bb);
- let block = self.fcx.new_block("cleanup", None);
+ let block = self.fcx.new_block("cleanup");
self.landing_pads[target_bb] = Some(block);
let bcx = block.build();
fn unreachable_block(&mut self) -> Block<'bcx, 'tcx> {
self.unreachable_block.unwrap_or_else(|| {
- let bl = self.fcx.new_block("unreachable", None);
+ let bl = self.fcx.new_block("unreachable");
bl.build().unreachable();
self.unreachable_block = Some(bl);
bl
if out_type_size != 0 {
// FIXME #19925 Remove this hack after a release cycle.
let f = Callee::def(bcx.ccx(), def_id, substs);
- let datum = f.reify(bcx.ccx());
+ let ty = match f.ty.sty {
+ ty::TyFnDef(_, _, f) => bcx.tcx().mk_fn_ptr(f),
+ _ => f.ty
+ };
val = OperandRef {
- val: Immediate(datum.val),
- ty: datum.ty
+ val: Immediate(f.reify(bcx.ccx())),
+ ty: ty
};
}
}
use llvm::{self, ValueRef};
use rustc::middle::const_val::ConstVal;
-use rustc_const_eval::ErrKind;
+use rustc_const_eval::{ErrKind, ConstEvalErr, report_const_eval_err};
use rustc_const_math::ConstInt::*;
use rustc_const_math::ConstFloat::*;
-use rustc_const_math::ConstMathErr;
+use rustc_const_math::{ConstInt, ConstIsize, ConstUsize, ConstMathErr};
use rustc::hir::def_id::DefId;
use rustc::infer::TransNormalize;
use rustc::mir::repr as mir;
use common::{self, BlockAndBuilder, CrateContext, const_get_elt, val_ty};
use common::{C_array, C_bool, C_bytes, C_floating_f64, C_integral};
use common::{C_null, C_struct, C_str_slice, C_undef, C_uint};
-use consts::{self, ConstEvalFailure, TrueConst, to_const_int};
+use common::{const_to_opt_int, const_to_opt_uint};
+use consts;
use monomorphize::{self, Instance};
use type_of;
use type_::Type;
use value::Value;
+use syntax::ast;
use syntax_pos::{Span, DUMMY_SP};
use std::ptr;
fn trans_def(ccx: &'a CrateContext<'a, 'tcx>,
mut instance: Instance<'tcx>,
args: IndexVec<mir::Arg, Const<'tcx>>)
- -> Result<Const<'tcx>, ConstEvalFailure> {
+ -> Result<Const<'tcx>, ConstEvalErr> {
// Try to resolve associated constants.
if let Some(trait_id) = ccx.tcx().trait_of_item(instance.def) {
let trait_ref = ty::TraitRef::new(trait_id, instance.substs);
value)
}
- fn trans(&mut self) -> Result<Const<'tcx>, ConstEvalFailure> {
+ fn trans(&mut self) -> Result<Const<'tcx>, ConstEvalErr> {
let tcx = self.ccx.tcx();
let mut bb = mir::START_BLOCK;
ErrKind::Math(err.clone())
}
};
- match consts::const_err(self.ccx, span, Err(err), TrueConst::Yes) {
- Ok(()) => {}
- Err(err) => if failure.is_ok() { failure = Err(err); }
- }
+
+ let err = ConstEvalErr{ span: span, kind: err };
+ report_const_eval_err(tcx, &err, span, "expression").emit();
+ failure = Err(err);
}
target
}
}
fn const_lvalue(&self, lvalue: &mir::Lvalue<'tcx>, span: Span)
- -> Result<ConstLvalue<'tcx>, ConstEvalFailure> {
+ -> Result<ConstLvalue<'tcx>, ConstEvalErr> {
let tcx = self.ccx.tcx();
if let Some(index) = self.mir.local_index(lvalue) {
mir::Lvalue::ReturnPointer => bug!(), // handled above
mir::Lvalue::Static(def_id) => {
ConstLvalue {
- base: Base::Static(consts::get_static(self.ccx, def_id).val),
+ base: Base::Static(consts::get_static(self.ccx, def_id)),
llextra: ptr::null_mut(),
ty: lvalue.ty(self.mir, tcx).to_ty(tcx)
}
} else if let ty::TyStr = projected_ty.sty {
(Base::Str(base), extra)
} else {
- let val = consts::load_const(self.ccx, base, projected_ty);
+ let v = base;
+ let v = self.ccx.const_unsized().borrow().get(&v).map_or(v, |&v| v);
+ let mut val = unsafe { llvm::LLVMGetInitializer(v) };
if val.is_null() {
span_bug!(span, "dereference of non-constant pointer `{:?}`",
Value(base));
}
+ if projected_ty.is_bool() {
+ unsafe {
+ val = llvm::LLVMConstTrunc(val, Type::i1(self.ccx).to_ref());
+ }
+ }
(Base::Value(val), extra)
}
}
}
fn const_operand(&self, operand: &mir::Operand<'tcx>, span: Span)
- -> Result<Const<'tcx>, ConstEvalFailure> {
+ -> Result<Const<'tcx>, ConstEvalErr> {
match *operand {
mir::Operand::Consume(ref lvalue) => {
Ok(self.const_lvalue(lvalue, span)?.to_const(span))
fn const_rvalue(&self, rvalue: &mir::Rvalue<'tcx>,
dest_ty: Ty<'tcx>, span: Span)
- -> Result<Const<'tcx>, ConstEvalFailure> {
+ -> Result<Const<'tcx>, ConstEvalErr> {
let tcx = self.ccx.tcx();
let val = match *rvalue {
mir::Rvalue::Use(ref operand) => self.const_operand(operand, span)?,
match operand.ty.sty {
ty::TyFnDef(def_id, substs, _) => {
Callee::def(self.ccx, def_id, substs)
- .reify(self.ccx).val
+ .reify(self.ccx)
}
_ => {
span_bug!(span, "{} cannot be reified to a fn ptr",
}
+fn to_const_int(value: ValueRef, t: Ty, tcx: TyCtxt) -> Option<ConstInt> {
+ match t.sty {
+ ty::TyInt(int_type) => const_to_opt_int(value).and_then(|input| match int_type {
+ ast::IntTy::I8 => {
+ assert_eq!(input as i8 as i64, input);
+ Some(ConstInt::I8(input as i8))
+ },
+ ast::IntTy::I16 => {
+ assert_eq!(input as i16 as i64, input);
+ Some(ConstInt::I16(input as i16))
+ },
+ ast::IntTy::I32 => {
+ assert_eq!(input as i32 as i64, input);
+ Some(ConstInt::I32(input as i32))
+ },
+ ast::IntTy::I64 => {
+ Some(ConstInt::I64(input))
+ },
+ ast::IntTy::Is => {
+ ConstIsize::new(input, tcx.sess.target.int_type)
+ .ok().map(ConstInt::Isize)
+ },
+ }),
+ ty::TyUint(uint_type) => const_to_opt_uint(value).and_then(|input| match uint_type {
+ ast::UintTy::U8 => {
+ assert_eq!(input as u8 as u64, input);
+ Some(ConstInt::U8(input as u8))
+ },
+ ast::UintTy::U16 => {
+ assert_eq!(input as u16 as u64, input);
+ Some(ConstInt::U16(input as u16))
+ },
+ ast::UintTy::U32 => {
+ assert_eq!(input as u32 as u64, input);
+ Some(ConstInt::U32(input as u32))
+ },
+ ast::UintTy::U64 => {
+ Some(ConstInt::U64(input))
+ },
+ ast::UintTy::Us => {
+ ConstUsize::new(input, tcx.sess.target.uint_type)
+ .ok().map(ConstInt::Usize)
+ },
+ }),
+ _ => None,
+ }
+}
+
pub fn const_scalar_binop(op: mir::BinOp,
lhs: ValueRef,
rhs: ValueRef,
}
};
- match result {
- Ok(v) => v,
- Err(ConstEvalFailure::Compiletime(_)) => {
- // We've errored, so we don't have to produce working code.
- let llty = type_of::type_of(bcx.ccx(), ty);
- Const::new(C_undef(llty), ty)
- }
- Err(ConstEvalFailure::Runtime(err)) => {
- span_bug!(constant.span,
- "MIR constant {:?} results in runtime panic: {:?}",
- constant, err.description())
- }
- }
+ result.unwrap_or_else(|_| {
+ // We've errored, so we don't have to produce working code.
+ let llty = type_of::type_of(bcx.ccx(), ty);
+ Const::new(C_undef(llty), ty)
+ })
}
}
pub fn trans_static_initializer(ccx: &CrateContext, def_id: DefId)
- -> Result<ValueRef, ConstEvalFailure> {
+ -> Result<ValueRef, ConstEvalErr> {
let instance = Instance::mono(ccx.shared(), def_id);
MirConstContext::trans_def(ccx, instance, IndexVec::new()).map(|c| c.llval)
}
mir::Lvalue::ReturnPointer => bug!(), // handled above
mir::Lvalue::Static(def_id) => {
let const_ty = self.monomorphized_lvalue_ty(lvalue);
- LvalueRef::new_sized(consts::get_static(ccx, def_id).val,
+ LvalueRef::new_sized(consts::get_static(ccx, def_id),
LvalueTy::from_ty(const_ty))
},
mir::Lvalue::Projection(box mir::Projection {
use libc::c_uint;
use llvm::{self, ValueRef};
-use llvm::debuginfo::DIScope;
use rustc::ty;
use rustc::mir::repr as mir;
use rustc::mir::tcx::LvalueTy;
use session::config::FullDebugInfo;
use base;
use common::{self, Block, BlockAndBuilder, CrateContext, FunctionContext, C_null};
-use debuginfo::{self, declare_local, DebugLoc, VariableAccess, VariableKind};
+use debuginfo::{self, declare_local, DebugLoc, VariableAccess, VariableKind, FunctionDebugContext};
use machine;
use type_of;
-use syntax_pos::DUMMY_SP;
+use syntax_pos::{DUMMY_SP, NO_EXPANSION, COMMAND_LINE_EXPN, BytePos};
use syntax::parse::token::keywords;
use std::ops::Deref;
locals: IndexVec<mir::Local, LocalRef<'tcx>>,
/// Debug information for MIR scopes.
- scopes: IndexVec<mir::VisibilityScope, DIScope>
+ scopes: IndexVec<mir::VisibilityScope, debuginfo::MirDebugScope>,
}
impl<'blk, 'tcx> MirContext<'blk, 'tcx> {
- pub fn debug_loc(&self, source_info: mir::SourceInfo) -> DebugLoc {
- DebugLoc::ScopeAt(self.scopes[source_info.scope], source_info.span)
+ pub fn debug_loc(&mut self, source_info: mir::SourceInfo) -> DebugLoc {
+ // Bail out if debug info emission is not enabled.
+ match self.fcx.debug_context {
+ FunctionDebugContext::DebugInfoDisabled |
+ FunctionDebugContext::FunctionWithoutDebugInfo => {
+ // Can't return DebugLoc::None here because intrinsic::trans_intrinsic_call()
+ // relies on debug location to obtain span of the call site.
+ return DebugLoc::ScopeAt(self.scopes[source_info.scope].scope_metadata,
+ source_info.span);
+ }
+ FunctionDebugContext::RegularContext(_) =>{}
+ }
+
+ // In order to have a good line stepping behavior in debugger, we overwrite debug
+ // locations of macro expansions with that of the outermost expansion site
+ // (unless the crate is being compiled with `-Z debug-macros`).
+ if source_info.span.expn_id == NO_EXPANSION ||
+ source_info.span.expn_id == COMMAND_LINE_EXPN ||
+ self.fcx.ccx.sess().opts.debugging_opts.debug_macros {
+
+ let scope_metadata = self.scope_metadata_for_loc(source_info.scope,
+ source_info.span.lo);
+ DebugLoc::ScopeAt(scope_metadata, source_info.span)
+ } else {
+ let cm = self.fcx.ccx.sess().codemap();
+ // Walk up the macro expansion chain until we reach a non-expanded span.
+ let mut span = source_info.span;
+ while span.expn_id != NO_EXPANSION && span.expn_id != COMMAND_LINE_EXPN {
+ if let Some(callsite_span) = cm.with_expn_info(span.expn_id,
+ |ei| ei.map(|ei| ei.call_site.clone())) {
+ span = callsite_span;
+ } else {
+ break;
+ }
+ }
+ let scope_metadata = self.scope_metadata_for_loc(source_info.scope, span.lo);
+ // Use span of the outermost call site, while keeping the original lexical scope
+ DebugLoc::ScopeAt(scope_metadata, span)
+ }
+ }
+
+ // DILocations inherit source file name from the parent DIScope. Due to macro expansions
+ // it may so happen that the current span belongs to a different file than the DIScope
+ // corresponding to span's containing visibility scope. If so, we need to create a DIScope
+ // "extension" into that file.
+ fn scope_metadata_for_loc(&self, scope_id: mir::VisibilityScope, pos: BytePos)
+ -> llvm::debuginfo::DIScope {
+ let scope_metadata = self.scopes[scope_id].scope_metadata;
+ if pos < self.scopes[scope_id].file_start_pos ||
+ pos >= self.scopes[scope_id].file_end_pos {
+ let cm = self.fcx.ccx.sess().codemap();
+ debuginfo::extend_scope_to_file(self.fcx.ccx,
+ scope_metadata,
+ &cm.lookup_char_pos(pos).file)
+ } else {
+ scope_metadata
+ }
}
}
///////////////////////////////////////////////////////////////////////////
pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) {
- let bcx = fcx.init(false, None).build();
+ let bcx = fcx.init(true).build();
let mir = bcx.mir();
// Analyze the temps to determine which must be lvalues
analyze::cleanup_kinds(bcx, &mir))
});
+ // Allocate a `Block` for every basic block
+ let block_bcxs: IndexVec<mir::BasicBlock, Block<'blk,'tcx>> =
+ mir.basic_blocks().indices().map(|bb| {
+ if bb == mir::START_BLOCK {
+ fcx.new_block("start")
+ } else {
+ fcx.new_block(&format!("{:?}", bb))
+ }
+ }).collect();
+
// Compute debuginfo scopes from MIR scopes.
let scopes = debuginfo::create_mir_scopes(fcx);
+ let mut mircx = MirContext {
+ mir: mir.clone(),
+ fcx: fcx,
+ llpersonalityslot: None,
+ blocks: block_bcxs,
+ unreachable_block: None,
+ cleanup_kinds: cleanup_kinds,
+ landing_pads: IndexVec::from_elem(None, mir.basic_blocks()),
+ scopes: scopes,
+ locals: IndexVec::new(),
+ };
+
// Allocate variable and temp allocas
- let locals = {
- let args = arg_local_refs(&bcx, &mir, &scopes, &lvalue_locals);
+ mircx.locals = {
+ let args = arg_local_refs(&bcx, &mir, &mircx.scopes, &lvalue_locals);
let vars = mir.var_decls.iter().enumerate().map(|(i, decl)| {
let ty = bcx.monomorphize(&decl.ty);
- let scope = scopes[decl.source_info.scope];
- let dbg = !scope.is_null() && bcx.sess().opts.debuginfo == FullDebugInfo;
+ let debug_scope = mircx.scopes[decl.source_info.scope];
+ let dbg = debug_scope.is_valid() && bcx.sess().opts.debuginfo == FullDebugInfo;
let local = mir.local_index(&mir::Lvalue::Var(mir::Var::new(i))).unwrap();
if !lvalue_locals.contains(local.index()) && !dbg {
let lvalue = LvalueRef::alloca(&bcx, ty, &decl.name.as_str());
if dbg {
- bcx.with_block(|bcx| {
- declare_local(bcx, decl.name, ty, scope,
- VariableAccess::DirectVariable { alloca: lvalue.llval },
- VariableKind::LocalVariable, decl.source_info.span);
- });
+ let dbg_loc = mircx.debug_loc(decl.source_info);
+ if let DebugLoc::ScopeAt(scope, span) = dbg_loc {
+ bcx.with_block(|bcx| {
+ declare_local(bcx, decl.name, ty, scope,
+ VariableAccess::DirectVariable { alloca: lvalue.llval },
+ VariableKind::LocalVariable, span);
+ });
+ } else {
+ panic!("Unexpected");
+ }
}
LocalRef::Lvalue(lvalue)
});
})).collect()
};
- // Allocate a `Block` for every basic block
- let block_bcxs: IndexVec<mir::BasicBlock, Block<'blk,'tcx>> =
- mir.basic_blocks().indices().map(|bb| {
- if bb == mir::START_BLOCK {
- fcx.new_block("start", None)
- } else {
- fcx.new_block(&format!("{:?}", bb), None)
- }
- }).collect();
-
// Branch to the START block
- let start_bcx = block_bcxs[mir::START_BLOCK];
+ let start_bcx = mircx.blocks[mir::START_BLOCK];
bcx.br(start_bcx.llbb);
// Up until here, IR instructions for this function have explicitly not been annotated with
// emitting should be enabled.
debuginfo::start_emitting_source_locations(fcx);
- let mut mircx = MirContext {
- mir: mir.clone(),
- fcx: fcx,
- llpersonalityslot: None,
- blocks: block_bcxs,
- unreachable_block: None,
- cleanup_kinds: cleanup_kinds,
- landing_pads: IndexVec::from_elem(None, mir.basic_blocks()),
- locals: locals,
- scopes: scopes
- };
-
let mut visited = BitVector::new(mir.basic_blocks().len());
let mut rpo = traversal::reverse_postorder(&mir);
/// indirect.
fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
mir: &mir::Mir<'tcx>,
- scopes: &IndexVec<mir::VisibilityScope, DIScope>,
+ scopes: &IndexVec<mir::VisibilityScope, debuginfo::MirDebugScope>,
lvalue_locals: &BitVector)
-> Vec<LocalRef<'tcx>> {
let fcx = bcx.fcx();
// Get the argument scope, if it exists and if we need it.
let arg_scope = scopes[mir::ARGUMENT_VISIBILITY_SCOPE];
- let arg_scope = if !arg_scope.is_null() && bcx.sess().opts.debuginfo == FullDebugInfo {
- Some(arg_scope)
+ let arg_scope = if arg_scope.is_valid() && bcx.sess().opts.debuginfo == FullDebugInfo {
+ Some(arg_scope.scope_metadata)
} else {
None
};
use base;
use callee::Callee;
use common::{self, val_ty, C_bool, C_null, C_uint, BlockAndBuilder, Result};
-use datum::{Datum, Lvalue};
use debuginfo::DebugLoc;
use adt;
use machine;
let size = C_uint(bcx.ccx(), size);
let base = get_dataptr(&bcx, dest.llval);
let bcx = bcx.map_block(|block| {
- tvec::iter_vec_raw(block, base, tr_elem.ty, size, |block, llslot, _| {
+ tvec::slice_for_each(block, base, tr_elem.ty, size, |block, llslot| {
self.store_operand_direct(block, llslot, tr_elem);
block
})
mir::Rvalue::InlineAsm { ref asm, ref outputs, ref inputs } => {
let outputs = outputs.iter().map(|output| {
let lvalue = self.trans_lvalue(&bcx, output);
- Datum::new(lvalue.llval, lvalue.ty.to_ty(bcx.tcx()),
- Lvalue::new("out"))
+ (lvalue.llval, lvalue.ty.to_ty(bcx.tcx()))
}).collect();
let input_vals = inputs.iter().map(|input| {
ty::TyFnDef(def_id, substs, _) => {
OperandValue::Immediate(
Callee::def(bcx.ccx(), def_id, substs)
- .reify(bcx.ccx()).val)
+ .reify(bcx.ccx()))
}
_ => {
bug!("{} cannot be reified to a fn ptr", operand.ty)
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use llvm::ValueRef;
-use llvm;
use rustc::hir::def_id::DefId;
use rustc::infer::TransNormalize;
use rustc::ty::subst::{Subst, Substs};
-use rustc::ty::{self, Ty, TypeFoldable, TyCtxt};
-use attributes;
-use base::{push_ctxt};
-use base;
+use rustc::ty::{self, Ty, TyCtxt};
use common::*;
-use declare;
-use Disr;
-use rustc::hir::map as hir_map;
use rustc::util::ppaux;
-use rustc::hir;
-
-use errors;
-
use std::fmt;
-use trans_item::TransItem;
-
-pub fn monomorphic_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
- fn_id: DefId,
- psubsts: &'tcx Substs<'tcx>)
- -> (ValueRef, Ty<'tcx>) {
- debug!("monomorphic_fn(fn_id={:?}, real_substs={:?})", fn_id, psubsts);
- assert!(!psubsts.types.needs_infer() && !psubsts.types.has_param_types());
-
- let _icx = push_ctxt("monomorphic_fn");
-
- let instance = Instance::new(fn_id, psubsts);
-
- let item_ty = ccx.tcx().lookup_item_type(fn_id).ty;
-
- debug!("monomorphic_fn about to subst into {:?}", item_ty);
- let mono_ty = apply_param_substs(ccx.tcx(), psubsts, &item_ty);
- debug!("mono_ty = {:?} (post-substitution)", mono_ty);
-
- if let Some(&val) = ccx.instances().borrow().get(&instance) {
- debug!("leaving monomorphic fn {:?}", instance);
- return (val, mono_ty);
- } else {
- assert!(!ccx.codegen_unit().contains_item(&TransItem::Fn(instance)));
- }
-
- debug!("monomorphic_fn({:?})", instance);
-
- ccx.stats().n_monos.set(ccx.stats().n_monos.get() + 1);
-
- let depth;
- {
- let mut monomorphizing = ccx.monomorphizing().borrow_mut();
- depth = match monomorphizing.get(&fn_id) {
- Some(&d) => d, None => 0
- };
-
- debug!("monomorphic_fn: depth for fn_id={:?} is {:?}", fn_id, depth+1);
-
- // Random cut-off -- code that needs to instantiate the same function
- // recursively more than thirty times can probably safely be assumed
- // to be causing an infinite expansion.
- if depth > ccx.sess().recursion_limit.get() {
- let error = format!("reached the recursion limit while instantiating `{}`",
- instance);
- if let Some(id) = ccx.tcx().map.as_local_node_id(fn_id) {
- ccx.sess().span_fatal(ccx.tcx().map.span(id), &error);
- } else {
- ccx.sess().fatal(&error);
- }
- }
-
- monomorphizing.insert(fn_id, depth + 1);
- }
-
- let symbol = ccx.symbol_map().get_or_compute(ccx.shared(),
- TransItem::Fn(instance));
-
- debug!("monomorphize_fn mangled to {}", &symbol);
- assert!(declare::get_defined_value(ccx, &symbol).is_none());
-
- // FIXME(nagisa): perhaps needs a more fine grained selection?
- let lldecl = declare::define_internal_fn(ccx, &symbol, mono_ty);
- // FIXME(eddyb) Doubt all extern fn should allow unwinding.
- attributes::unwind(lldecl, true);
-
- ccx.instances().borrow_mut().insert(instance, lldecl);
-
- // we can only monomorphize things in this crate (or inlined into it)
- let fn_node_id = ccx.tcx().map.as_local_node_id(fn_id).unwrap();
- let map_node = errors::expect(
- ccx.sess().diagnostic(),
- ccx.tcx().map.find(fn_node_id),
- || {
- format!("while instantiating `{}`, couldn't find it in \
- the item map (may have attempted to monomorphize \
- an item defined in a different crate?)",
- instance)
- });
- match map_node {
- hir_map::NodeItem(&hir::Item {
- ref attrs,
- node: hir::ItemFn(..), ..
- }) |
- hir_map::NodeImplItem(&hir::ImplItem {
- ref attrs, node: hir::ImplItemKind::Method(
- hir::MethodSig { .. }, _), ..
- }) |
- hir_map::NodeTraitItem(&hir::TraitItem {
- ref attrs, node: hir::MethodTraitItem(
- hir::MethodSig { .. }, Some(_)), ..
- }) => {
- let trans_item = TransItem::Fn(instance);
-
- if ccx.shared().translation_items().borrow().contains(&trans_item) {
- attributes::from_fn_attrs(ccx, attrs, lldecl);
- unsafe {
- llvm::LLVMSetLinkage(lldecl, llvm::ExternalLinkage);
- }
- } else {
- // FIXME: #34151
- // Normally, getting here would indicate a bug in trans::collector,
- // since it seems to have missed a translation item. When we are
- // translating with non-MIR based trans, however, the results of
- // the collector are not entirely reliable since it bases its
- // analysis on MIR. Thus, we'll instantiate the missing function
- // privately in this codegen unit, so that things keep working.
- ccx.stats().n_fallback_instantiations.set(ccx.stats()
- .n_fallback_instantiations
- .get() + 1);
- trans_item.predefine(ccx, llvm::InternalLinkage);
- trans_item.define(ccx);
- }
- }
-
- hir_map::NodeVariant(_) | hir_map::NodeStructCtor(_) => {
- let disr = match map_node {
- hir_map::NodeVariant(_) => {
- Disr::from(inlined_variant_def(ccx, fn_node_id).disr_val)
- }
- hir_map::NodeStructCtor(_) => Disr(0),
- _ => bug!()
- };
- attributes::inline(lldecl, attributes::InlineAttr::Hint);
- attributes::set_frame_pointer_elimination(ccx, lldecl);
- base::trans_ctor_shim(ccx, fn_node_id, disr, psubsts, lldecl);
- }
-
- _ => bug!("can't monomorphize a {:?}", map_node)
- };
-
- ccx.monomorphizing().borrow_mut().insert(fn_id, depth);
-
- debug!("leaving monomorphic fn {}", ccx.tcx().item_path_str(fn_id));
- (lldecl, mono_ty)
-}
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
pub struct Instance<'tcx> {
impl<'tcx> Instance<'tcx> {
pub fn new(def_id: DefId, substs: &'tcx Substs<'tcx>)
-> Instance<'tcx> {
- assert!(substs.regions.iter().all(|&r| r == ty::ReErased));
+ assert!(substs.regions().all(|&r| r == ty::ReErased));
Instance { def: def_id, substs: substs }
}
pub fn mono<'a>(scx: &SharedCrateContext<'a, 'tcx>, def_id: DefId) -> Instance<'tcx> {
TransItem::DropGlue(..) => unreachable!(),
// Is there any benefit to using ExternalLinkage?:
TransItem::Fn(ref instance) => {
- if instance.substs.types.is_empty() {
+ if instance.substs.types().next().is_none() {
// This is a non-generic functions, we always
// make it visible externally on the chance that
// it might be used in another codegen unit.
// DefId, we use the location of the impl after all.
if tcx.trait_of_item(instance.def).is_some() {
- let self_ty = instance.substs.types[0];
+ let self_ty = instance.substs.type_at(0);
// This is an implementation of a trait method.
return characteristic_def_id_of_type(self_ty).or(Some(instance.def));
}
use glue::DropGlueKind;
use llvm;
use monomorphize::{self, Instance};
-use inline;
use rustc::dep_graph::DepNode;
use rustc::hir;
-use rustc::hir::map as hir_map;
use rustc::hir::def_id::DefId;
use rustc::ty::{self, Ty, TyCtxt, TypeFoldable};
use rustc::ty::subst::Substs;
use rustc_const_eval::fatal_const_eval_err;
use std::hash::{Hash, Hasher};
use syntax::ast::{self, NodeId};
-use syntax::{attr,errors};
+use syntax::attr;
use type_of;
use glue;
use abi::{Abi, FnType};
let def_id = ccx.tcx().map.local_def_id(node_id);
let _task = ccx.tcx().dep_graph.in_task(DepNode::TransCrateItem(def_id)); // (*)
let item = ccx.tcx().map.expect_item(node_id);
- if let hir::ItemStatic(_, m, ref expr) = item.node {
- match consts::trans_static(&ccx, m, expr, item.id, &item.attrs) {
+ if let hir::ItemStatic(_, m, _) = item.node {
+ match consts::trans_static(&ccx, m, item.id, &item.attrs) {
Ok(_) => { /* Cool, everything's alright. */ },
Err(err) => {
// FIXME: shouldn't this be a `span_err`?
fatal_const_eval_err(
- ccx.tcx(), &err, expr.span, "static");
+ ccx.tcx(), &err, item.span, "static");
}
};
} else {
let ty = ccx.tcx().lookup_item_type(def_id).ty;
let llty = type_of::type_of(ccx, ty);
- match ccx.tcx().map.get(node_id) {
- hir::map::NodeItem(&hir::Item {
- span, node: hir::ItemStatic(..), ..
- }) => {
- let g = declare::define_global(ccx, symbol_name, llty).unwrap_or_else(|| {
- ccx.sess().span_fatal(span,
- &format!("symbol `{}` is already defined", symbol_name))
- });
+ let g = declare::define_global(ccx, symbol_name, llty).unwrap_or_else(|| {
+ ccx.sess().span_fatal(ccx.tcx().map.span(node_id),
+ &format!("symbol `{}` is already defined", symbol_name))
+ });
- unsafe { llvm::LLVMSetLinkage(g, linkage) };
- }
+ unsafe { llvm::LLVMSetLinkage(g, linkage) };
- item => bug!("predefine_static: expected static, found {:?}", item)
- }
+ let instance = Instance::mono(ccx.shared(), def_id);
+ ccx.instances().borrow_mut().insert(instance, g);
+ ccx.statics().borrow_mut().insert(g, def_id);
}
fn predefine_fn(ccx: &CrateContext<'a, 'tcx>,
instance: Instance<'tcx>,
linkage: llvm::Linkage,
symbol_name: &str) {
- assert!(!instance.substs.types.needs_infer() &&
- !instance.substs.types.has_param_types());
-
- let instance = inline::maybe_inline_instance(ccx, instance);
+ assert!(!instance.substs.needs_infer() &&
+ !instance.substs.has_param_types());
let item_ty = ccx.tcx().lookup_item_type(instance.def).ty;
let item_ty = ccx.tcx().erase_regions(&item_ty);
let mono_ty = monomorphize::apply_param_substs(ccx.tcx(), instance.substs, &item_ty);
- let fn_node_id = ccx.tcx().map.as_local_node_id(instance.def).unwrap();
- let map_node = errors::expect(
- ccx.sess().diagnostic(),
- ccx.tcx().map.find(fn_node_id),
- || {
- format!("while instantiating `{}`, couldn't find it in \
- the item map (may have attempted to monomorphize \
- an item defined in a different crate?)",
- instance)
- });
-
- match map_node {
- hir_map::NodeItem(&hir::Item {
- ref attrs, node: hir::ItemFn(..), ..
- }) |
- hir_map::NodeTraitItem(&hir::TraitItem {
- ref attrs, node: hir::MethodTraitItem(..), ..
- }) |
- hir_map::NodeImplItem(&hir::ImplItem {
- ref attrs, node: hir::ImplItemKind::Method(..), ..
- }) => {
- let lldecl = declare::declare_fn(ccx, symbol_name, mono_ty);
- unsafe { llvm::LLVMSetLinkage(lldecl, linkage) };
- base::set_link_section(ccx, lldecl, attrs);
- if linkage == llvm::LinkOnceODRLinkage ||
- linkage == llvm::WeakODRLinkage {
- llvm::SetUniqueComdat(ccx.llmod(), lldecl);
- }
+ let attrs = ccx.tcx().get_attrs(instance.def);
+ let lldecl = declare::declare_fn(ccx, symbol_name, mono_ty);
+ unsafe { llvm::LLVMSetLinkage(lldecl, linkage) };
+ base::set_link_section(ccx, lldecl, &attrs);
+ if linkage == llvm::LinkOnceODRLinkage ||
+ linkage == llvm::WeakODRLinkage {
+ llvm::SetUniqueComdat(ccx.llmod(), lldecl);
+ }
- attributes::from_fn_attrs(ccx, attrs, lldecl);
- ccx.instances().borrow_mut().insert(instance, lldecl);
- }
- _ => bug!("Invalid item for TransItem::Fn: `{:?}`", map_node)
- };
+ attributes::from_fn_attrs(ccx, &attrs, lldecl);
+ ccx.instances().borrow_mut().insert(instance, lldecl);
}
fn predefine_drop_glue(ccx: &CrateContext<'a, 'tcx>,
pub fn requests_inline(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> bool {
match *self {
TransItem::Fn(ref instance) => {
- !instance.substs.types.is_empty() || {
+ instance.substs.types().next().is_some() || {
let attributes = tcx.get_attrs(instance.def);
attr::requests_inline(&attributes[..])
}
pub fn is_instantiated_only_on_demand(&self) -> bool {
match *self {
- TransItem::Fn(ref instance) => !instance.def.is_local() ||
- !instance.substs.types.is_empty(),
+ TransItem::Fn(ref instance) => {
+ !instance.def.is_local() || instance.substs.types().next().is_some()
+ }
TransItem::DropGlue(..) => true,
TransItem::Static(..) => false,
}
pub fn is_generic_fn(&self) -> bool {
match *self {
- TransItem::Fn(ref instance) => !instance.substs.types.is_empty(),
+ TransItem::Fn(ref instance) => {
+ instance.substs.types().next().is_some()
+ }
TransItem::DropGlue(..) |
TransItem::Static(..) => false,
}
/// Same as `unique_type_name()` but with the result pushed onto the given
/// `output` parameter.
pub fn push_unique_type_name<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
- t: ty::Ty<'tcx>,
+ t: Ty<'tcx>,
output: &mut String) {
match t.sty {
ty::TyBool => output.push_str("bool"),
ty::TyStruct(adt_def, substs) |
ty::TyEnum(adt_def, substs) => {
push_item_name(tcx, adt_def.did, output);
- push_type_params(tcx, &substs.types, &[], output);
+ push_type_params(tcx, substs, &[], output);
},
ty::TyTuple(component_types) => {
output.push('(');
ty::TyTrait(ref trait_data) => {
push_item_name(tcx, trait_data.principal.def_id(), output);
push_type_params(tcx,
- &trait_data.principal.skip_binder().substs.types,
+ trait_data.principal.skip_binder().substs,
&trait_data.projection_bounds,
output);
},
output.push_str("{");
output.push_str(&format!("{}:{}", def_id.krate, def_id.index.as_usize()));
output.push_str("}");
- push_type_params(tcx, &closure_substs.func_substs.types, &[], output);
+ push_type_params(tcx, closure_substs.func_substs, &[], output);
}
ty::TyError |
ty::TyInfer(_) |
}
fn push_type_params<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
- types: &[Ty<'tcx>],
+ substs: &Substs<'tcx>,
projections: &[ty::PolyExistentialProjection<'tcx>],
output: &mut String) {
- if types.is_empty() && projections.is_empty() {
+ if substs.types().next().is_none() && projections.is_empty() {
return;
}
output.push('<');
- for &type_parameter in types {
+ for type_parameter in substs.types() {
push_unique_type_name(tcx, type_parameter, output);
output.push_str(", ");
}
instance: Instance<'tcx>,
output: &mut String) {
push_item_name(tcx, instance.def, output);
- push_type_params(tcx, &instance.substs.types, &[], output);
+ push_type_params(tcx, instance.substs, &[], output);
}
pub fn def_id_to_string(tcx: TyCtxt, def_id: DefId) -> String {
}
pub fn type_to_string<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
- ty: ty::Ty<'tcx>)
+ ty: Ty<'tcx>)
-> String {
let mut output = String::new();
push_unique_type_name(tcx, ty, &mut output);
use llvm;
use llvm::ValueRef;
use base::*;
-use base;
use build::*;
-use cleanup;
-use cleanup::CleanupMethods;
use common::*;
-use consts;
-use datum::*;
use debuginfo::DebugLoc;
-use expr::{Dest, Ignore, SaveIn};
-use expr;
-use machine::llsize_of_alloc;
-use type_::Type;
-use type_of;
-use value::Value;
-use rustc::ty::{self, Ty};
-
-use rustc::hir;
-use rustc_const_eval::eval_length;
-
-use syntax::ast;
-use syntax::parse::token::InternedString;
-
-#[derive(Copy, Clone, Debug)]
-struct VecTypes<'tcx> {
- unit_ty: Ty<'tcx>,
- llunit_ty: Type
-}
-
-pub fn trans_fixed_vstore<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- expr: &hir::Expr,
- dest: expr::Dest)
- -> Block<'blk, 'tcx> {
- //!
- //
- // [...] allocates a fixed-size array and moves it around "by value".
- // In this case, it means that the caller has already given us a location
- // to store the array of the suitable size, so all we have to do is
- // generate the content.
-
- debug!("trans_fixed_vstore(expr={:?}, dest={:?})", expr, dest);
-
- let vt = vec_types_from_expr(bcx, expr);
-
- return match dest {
- Ignore => write_content(bcx, &vt, expr, expr, dest),
- SaveIn(lldest) => {
- // lldest will have type *[T x N], but we want the type *T,
- // so use GEP to convert:
- let lldest = StructGEP(bcx, lldest, 0);
- write_content(bcx, &vt, expr, expr, SaveIn(lldest))
- }
- };
-}
-
-/// &[...] allocates memory on the stack and writes the values into it, returning the vector (the
-/// caller must make the reference). "..." is similar except that the memory can be statically
-/// allocated and we return a reference (strings are always by-ref).
-pub fn trans_slice_vec<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- slice_expr: &hir::Expr,
- content_expr: &hir::Expr)
- -> DatumBlock<'blk, 'tcx, Expr> {
+use rustc::ty::Ty;
+
+pub fn slice_for_each<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
+ data_ptr: ValueRef,
+ unit_ty: Ty<'tcx>,
+ len: ValueRef,
+ f: F)
+ -> Block<'blk, 'tcx> where
+ F: FnOnce(Block<'blk, 'tcx>, ValueRef) -> Block<'blk, 'tcx>,
+{
+ let _icx = push_ctxt("tvec::slice_for_each");
let fcx = bcx.fcx;
- let mut bcx = bcx;
-
- debug!("trans_slice_vec(slice_expr={:?})",
- slice_expr);
-
- let vec_ty = node_id_type(bcx, slice_expr.id);
-
- // Handle the "..." case (returns a slice since strings are always unsized):
- if let hir::ExprLit(ref lit) = content_expr.node {
- if let ast::LitKind::Str(ref s, _) = lit.node {
- let scratch = rvalue_scratch_datum(bcx, vec_ty, "");
- bcx = trans_lit_str(bcx,
- content_expr,
- s.clone(),
- SaveIn(scratch.val));
- return DatumBlock::new(bcx, scratch.to_expr_datum());
- }
- }
-
- // Handle the &[...] case:
- let vt = vec_types_from_expr(bcx, content_expr);
- let count = elements_required(bcx, content_expr);
- debug!(" vt={:?}, count={}", vt, count);
- let fixed_ty = bcx.tcx().mk_array(vt.unit_ty, count);
-
- // Always create an alloca even if zero-sized, to preserve
- // the non-null invariant of the inner slice ptr
- let llfixed;
- // Issue 30018: ensure state is initialized as dropped if necessary.
- if fcx.type_needs_drop(vt.unit_ty) {
- llfixed = base::alloc_ty_init(bcx, fixed_ty, InitAlloca::Dropped, "");
+ // Special-case vectors with elements of size 0 so they don't go out of bounds (#9890)
+ let zst = type_is_zero_size(bcx.ccx(), unit_ty);
+ let add = |bcx, a, b| if zst {
+ Add(bcx, a, b, DebugLoc::None)
} else {
- let uninit = InitAlloca::Uninit("fcx says vt.unit_ty is non-drop");
- llfixed = base::alloc_ty_init(bcx, fixed_ty, uninit, "");
- call_lifetime_start(bcx, llfixed);
- };
-
- if count > 0 {
- // Arrange for the backing array to be cleaned up.
- let cleanup_scope = cleanup::temporary_scope(bcx.tcx(), content_expr.id);
- fcx.schedule_lifetime_end(cleanup_scope, llfixed);
- fcx.schedule_drop_mem(cleanup_scope, llfixed, fixed_ty, None);
-
- // Generate the content into the backing array.
- // llfixed has type *[T x N], but we want the type *T,
- // so use GEP to convert
- bcx = write_content(bcx, &vt, slice_expr, content_expr,
- SaveIn(StructGEP(bcx, llfixed, 0)));
+ InBoundsGEP(bcx, a, &[b])
};
- immediate_rvalue_bcx(bcx, llfixed, vec_ty).to_expr_datumblock()
-}
-
-/// Literal strings translate to slices into static memory. This is different from
-/// trans_slice_vstore() above because it doesn't need to copy the content anywhere.
-pub fn trans_lit_str<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- lit_expr: &hir::Expr,
- str_lit: InternedString,
- dest: Dest)
- -> Block<'blk, 'tcx> {
- debug!("trans_lit_str(lit_expr={:?}, dest={:?})", lit_expr, dest);
-
- match dest {
- Ignore => bcx,
- SaveIn(lldest) => {
- let bytes = str_lit.len();
- let llbytes = C_uint(bcx.ccx(), bytes);
- let llcstr = C_cstr(bcx.ccx(), str_lit, false);
- let llcstr = consts::ptrcast(llcstr, Type::i8p(bcx.ccx()));
- Store(bcx, llcstr, expr::get_dataptr(bcx, lldest));
- Store(bcx, llbytes, expr::get_meta(bcx, lldest));
- bcx
- }
- }
-}
-
-fn write_content<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- vt: &VecTypes<'tcx>,
- vstore_expr: &hir::Expr,
- content_expr: &hir::Expr,
- dest: Dest)
- -> Block<'blk, 'tcx> {
- let _icx = push_ctxt("tvec::write_content");
- let fcx = bcx.fcx;
- let mut bcx = bcx;
-
- debug!("write_content(vt={:?}, dest={:?}, vstore_expr={:?})",
- vt, dest, vstore_expr);
-
- match content_expr.node {
- hir::ExprLit(ref lit) => {
- match lit.node {
- ast::LitKind::Str(ref s, _) => {
- match dest {
- Ignore => return bcx,
- SaveIn(lldest) => {
- let bytes = s.len();
- let llbytes = C_uint(bcx.ccx(), bytes);
- let llcstr = C_cstr(bcx.ccx(), (*s).clone(), false);
- if !bcx.unreachable.get() {
- base::call_memcpy(&B(bcx), lldest, llcstr, llbytes, 1);
- }
- return bcx;
- }
- }
- }
- _ => {
- span_bug!(content_expr.span, "unexpected evec content");
- }
- }
- }
- hir::ExprVec(ref elements) => {
- match dest {
- Ignore => {
- for element in elements {
- bcx = expr::trans_into(bcx, &element, Ignore);
- }
- }
-
- SaveIn(lldest) => {
- let temp_scope = fcx.push_custom_cleanup_scope();
- for (i, element) in elements.iter().enumerate() {
- let lleltptr = GEPi(bcx, lldest, &[i]);
- debug!("writing index {} with lleltptr={:?}",
- i, Value(lleltptr));
- bcx = expr::trans_into(bcx, &element,
- SaveIn(lleltptr));
- let scope = cleanup::CustomScope(temp_scope);
- // Issue #30822: mark memory as dropped after running destructor
- fcx.schedule_drop_and_fill_mem(scope, lleltptr, vt.unit_ty, None);
- }
- fcx.pop_custom_cleanup_scope(temp_scope);
- }
- }
- return bcx;
- }
- hir::ExprRepeat(ref element, ref count_expr) => {
- match dest {
- Ignore => {
- return expr::trans_into(bcx, &element, Ignore);
- }
- SaveIn(lldest) => {
- match eval_length(bcx.tcx(), &count_expr, "repeat count").unwrap() {
- 0 => expr::trans_into(bcx, &element, Ignore),
- 1 => expr::trans_into(bcx, &element, SaveIn(lldest)),
- count => {
- let elem = unpack_datum!(bcx, expr::trans(bcx, &element));
- let bcx = iter_vec_loop(bcx, lldest, vt,
- C_uint(bcx.ccx(), count),
- |set_bcx, lleltptr, _| {
- elem.shallow_copy(set_bcx, lleltptr)
- });
- bcx
- }
- }
- }
- }
- }
- _ => {
- span_bug!(content_expr.span, "unexpected vec content");
- }
- }
-}
-
-fn vec_types_from_expr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, vec_expr: &hir::Expr)
- -> VecTypes<'tcx> {
- let vec_ty = node_id_type(bcx, vec_expr.id);
- vec_types(bcx, vec_ty.sequence_element_type(bcx.tcx()))
-}
-
-fn vec_types<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, unit_ty: Ty<'tcx>)
- -> VecTypes<'tcx> {
- VecTypes {
- unit_ty: unit_ty,
- llunit_ty: type_of::type_of(bcx.ccx(), unit_ty)
- }
-}
-
-fn elements_required(bcx: Block, content_expr: &hir::Expr) -> usize {
- //! Figure out the number of elements we need to store this content
-
- match content_expr.node {
- hir::ExprLit(ref lit) => {
- match lit.node {
- ast::LitKind::Str(ref s, _) => s.len(),
- _ => {
- span_bug!(content_expr.span, "unexpected evec content")
- }
- }
- },
- hir::ExprVec(ref es) => es.len(),
- hir::ExprRepeat(_, ref count_expr) => {
- eval_length(bcx.tcx(), &count_expr, "repeat count").unwrap()
- }
- _ => span_bug!(content_expr.span, "unexpected vec content")
- }
-}
-
-/// Converts a fixed-length vector into the slice pair. The vector should be stored in `llval`
-/// which should be by ref.
-pub fn get_fixed_base_and_len(bcx: Block,
- llval: ValueRef,
- vec_length: usize)
- -> (ValueRef, ValueRef) {
- let ccx = bcx.ccx();
-
- let base = expr::get_dataptr(bcx, llval);
- let len = C_uint(ccx, vec_length);
- (base, len)
-}
-
-/// Converts a vector into the slice pair. The vector should be stored in `llval` which should be
-/// by-reference. If you have a datum, you would probably prefer to call
-/// `Datum::get_base_and_len()` which will handle any conversions for you.
-pub fn get_base_and_len<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- llval: ValueRef,
- vec_ty: Ty<'tcx>)
- -> (ValueRef, ValueRef) {
- match vec_ty.sty {
- ty::TyArray(_, n) => get_fixed_base_and_len(bcx, llval, n),
- ty::TySlice(_) | ty::TyStr => {
- let base = Load(bcx, expr::get_dataptr(bcx, llval));
- let len = Load(bcx, expr::get_meta(bcx, llval));
- (base, len)
- }
-
- // Only used for pattern matching.
- ty::TyBox(ty) | ty::TyRef(_, ty::TypeAndMut{ty, ..}) => {
- let inner = if type_is_sized(bcx.tcx(), ty) {
- Load(bcx, llval)
- } else {
- llval
- };
- get_base_and_len(bcx, inner, ty)
- },
- _ => bug!("unexpected type in get_base_and_len"),
- }
-}
+ let header_bcx = fcx.new_block("slice_loop_header");
+ let body_bcx = fcx.new_block("slice_loop_body");
+ let next_bcx = fcx.new_block("slice_loop_next");
-fn iter_vec_loop<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
- data_ptr: ValueRef,
- vt: &VecTypes<'tcx>,
- count: ValueRef,
- f: F)
- -> Block<'blk, 'tcx> where
- F: FnOnce(Block<'blk, 'tcx>, ValueRef, Ty<'tcx>) -> Block<'blk, 'tcx>,
-{
- let _icx = push_ctxt("tvec::iter_vec_loop");
-
- if bcx.unreachable.get() {
- return bcx;
- }
-
- let fcx = bcx.fcx;
- let loop_bcx = fcx.new_temp_block("expr_repeat");
- let next_bcx = fcx.new_temp_block("expr_repeat: next");
-
- Br(bcx, loop_bcx.llbb, DebugLoc::None);
-
- let loop_counter = Phi(loop_bcx, bcx.ccx().int_type(),
- &[C_uint(bcx.ccx(), 0 as usize)], &[bcx.llbb]);
-
- let bcx = loop_bcx;
-
- let lleltptr = if llsize_of_alloc(bcx.ccx(), vt.llunit_ty) == 0 {
- data_ptr
+ let start = if zst {
+ C_uint(bcx.ccx(), 0 as usize)
} else {
- InBoundsGEP(bcx, data_ptr, &[loop_counter])
+ data_ptr
};
- let bcx = f(bcx, lleltptr, vt.unit_ty);
- let plusone = Add(bcx, loop_counter, C_uint(bcx.ccx(), 1usize), DebugLoc::None);
- AddIncomingToPhi(loop_counter, plusone, bcx.llbb);
+ let end = add(bcx, start, len);
- let cond_val = ICmp(bcx, llvm::IntULT, plusone, count, DebugLoc::None);
- CondBr(bcx, cond_val, loop_bcx.llbb, next_bcx.llbb, DebugLoc::None);
+ Br(bcx, header_bcx.llbb, DebugLoc::None);
+ let current = Phi(header_bcx, val_ty(start), &[start], &[bcx.llbb]);
- next_bcx
-}
+ let keep_going =
+ ICmp(header_bcx, llvm::IntULT, current, end, DebugLoc::None);
+ CondBr(header_bcx, keep_going, body_bcx.llbb, next_bcx.llbb, DebugLoc::None);
-pub fn iter_vec_raw<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
- data_ptr: ValueRef,
- unit_ty: Ty<'tcx>,
- len: ValueRef,
- f: F)
- -> Block<'blk, 'tcx> where
- F: FnOnce(Block<'blk, 'tcx>, ValueRef, Ty<'tcx>) -> Block<'blk, 'tcx>,
-{
- let _icx = push_ctxt("tvec::iter_vec_raw");
- let fcx = bcx.fcx;
-
- let vt = vec_types(bcx, unit_ty);
-
- if llsize_of_alloc(bcx.ccx(), vt.llunit_ty) == 0 {
- // Special-case vectors with elements of size 0 so they don't go out of bounds (#9890)
- iter_vec_loop(bcx, data_ptr, &vt, len, f)
- } else {
- // Calculate the last pointer address we want to handle.
- let data_end_ptr = InBoundsGEP(bcx, data_ptr, &[len]);
-
- // Now perform the iteration.
- let header_bcx = fcx.new_temp_block("iter_vec_loop_header");
- Br(bcx, header_bcx.llbb, DebugLoc::None);
- let data_ptr =
- Phi(header_bcx, val_ty(data_ptr), &[data_ptr], &[bcx.llbb]);
- let not_yet_at_end =
- ICmp(header_bcx, llvm::IntULT, data_ptr, data_end_ptr, DebugLoc::None);
- let body_bcx = fcx.new_temp_block("iter_vec_loop_body");
- let next_bcx = fcx.new_temp_block("iter_vec_next");
- CondBr(header_bcx, not_yet_at_end, body_bcx.llbb, next_bcx.llbb, DebugLoc::None);
- let body_bcx = f(body_bcx, data_ptr, unit_ty);
- AddIncomingToPhi(data_ptr, InBoundsGEP(body_bcx, data_ptr,
- &[C_int(bcx.ccx(), 1)]),
- body_bcx.llbb);
- Br(body_bcx, header_bcx.llbb, DebugLoc::None);
- next_bcx
- }
+ let body_bcx = f(body_bcx, if zst { data_ptr } else { current });
+ let next = add(body_bcx, current, C_uint(bcx.ccx(), 1usize));
+ AddIncomingToPhi(current, next, body_bcx.llbb);
+ Br(body_bcx, header_bcx.llbb, DebugLoc::None);
+ next_bcx
}
use adt;
use common::*;
use machine;
-use rustc::traits::Reveal;
use rustc::ty::{self, Ty, TypeFoldable};
+use rustc::ty::subst::Substs;
use type_::Type;
cx.llsizingtypes().borrow_mut().insert(t, llsizingty);
// FIXME(eddyb) Temporary sanity check for ty::layout.
- let layout = cx.tcx().normalizing_infer_ctxt(Reveal::All).enter(|infcx| {
- t.layout(&infcx)
- });
- match layout {
- Ok(layout) => {
- if !type_is_sized(cx.tcx(), t) {
- if !layout.is_unsized() {
- bug!("layout should be unsized for type `{}` / {:#?}",
- t, layout);
- }
-
- // Unsized types get turned into a fat pointer for LLVM.
- return llsizingty;
- }
- let r = layout.size(&cx.tcx().data_layout).bytes();
- let l = machine::llsize_of_alloc(cx, llsizingty);
- if r != l {
- bug!("size differs (rustc: {}, llvm: {}) for type `{}` / {:#?}",
- r, l, t, layout);
- }
- let r = layout.align(&cx.tcx().data_layout).abi();
- let l = machine::llalign_of_min(cx, llsizingty) as u64;
- if r != l {
- bug!("align differs (rustc: {}, llvm: {}) for type `{}` / {:#?}",
- r, l, t, layout);
- }
- }
- Err(e) => {
- bug!("failed to get layout for `{}`: {}", t, e);
+ let layout = cx.layout_of(t);
+ if !type_is_sized(cx.tcx(), t) {
+ if !layout.is_unsized() {
+ bug!("layout should be unsized for type `{}` / {:#?}",
+ t, layout);
}
+
+ // Unsized types get turned into a fat pointer for LLVM.
+ return llsizingty;
+ }
+
+ let r = layout.size(&cx.tcx().data_layout).bytes();
+ let l = machine::llsize_of_alloc(cx, llsizingty);
+ if r != l {
+ bug!("size differs (rustc: {}, llvm: {}) for type `{}` / {:#?}",
+ r, l, t, layout);
}
+
+ let r = layout.align(&cx.tcx().data_layout).abi();
+ let l = machine::llalign_of_min(cx, llsizingty) as u64;
+ if r != l {
+ bug!("align differs (rustc: {}, llvm: {}) for type `{}` / {:#?}",
+ r, l, t, layout);
+ }
+
llsizingty
}
// avoids creating more than one copy of the enum when one
// of the enum's variants refers to the enum itself.
let repr = adt::represent_type(cx, t);
- let name = llvm_type_name(cx, def.did, &substs.types);
+ let name = llvm_type_name(cx, def.did, substs);
adt::incomplete_type_of(cx, &repr, &name[..])
}
ty::TyClosure(..) => {
// in *after* placing it into the type cache. This prevents
// infinite recursion with recursive struct types.
let repr = adt::represent_type(cx, t);
- let name = llvm_type_name(cx, def.did, &substs.types);
+ let name = llvm_type_name(cx, def.did, substs);
adt::incomplete_type_of(cx, &repr, &name[..])
}
}
fn llvm_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
did: DefId,
- tps: &[Ty<'tcx>])
+ substs: &Substs<'tcx>)
-> String {
let base = cx.tcx().item_path_str(did);
- let strings: Vec<String> = tps.iter().map(|t| t.to_string()).collect();
+ let strings: Vec<String> = substs.types().map(|t| t.to_string()).collect();
let tstr = if strings.is_empty() {
base
} else {
/// This type must not appear anywhere in other converted types.
const TRAIT_OBJECT_DUMMY_SELF: ty::TypeVariants<'static> = ty::TyInfer(ty::FreshTy(0));
-pub fn ast_region_to_region(tcx: TyCtxt, lifetime: &hir::Lifetime)
- -> ty::Region {
+pub fn ast_region_to_region<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
+ lifetime: &hir::Lifetime)
+ -> &'tcx ty::Region {
let r = match tcx.named_region_map.defs.get(&lifetime.id) {
None => {
// should have been recorded by the `resolve_lifetime` pass
lifetime.id,
r);
- r
+ tcx.mk_region(r)
}
fn report_elision_failure(
pub fn opt_ast_region_to_region(&self,
rscope: &RegionScope,
default_span: Span,
- opt_lifetime: &Option<hir::Lifetime>) -> ty::Region
+ opt_lifetime: &Option<hir::Lifetime>) -> &'tcx ty::Region
{
let r = match *opt_lifetime {
Some(ref lifetime) => {
ast_region_to_region(self.tcx(), lifetime)
}
- None => match rscope.anon_regions(default_span, 1) {
+ None => self.tcx().mk_region(match rscope.anon_regions(default_span, 1) {
Ok(rs) => rs[0],
Err(params) => {
let ampersand_span = Span { hi: default_span.lo, ..default_span};
err.emit();
ty::ReStatic
}
- }
+ })
};
debug!("opt_ast_region_to_region(opt_lifetime={:?}) yields {:?}",
.emit();
return Substs::for_item(tcx, def_id, |_, _| {
- ty::ReStatic
+ tcx.mk_region(ty::ReStatic)
}, |_, _| {
tcx.types.err
});
let expected_num_region_params = decl_generics.regions.len();
let supplied_num_region_params = lifetimes.len();
let regions = if expected_num_region_params == supplied_num_region_params {
- lifetimes.iter().map(|l| ast_region_to_region(tcx, l)).collect()
+ lifetimes.iter().map(|l| *ast_region_to_region(tcx, l)).collect()
} else {
let anon_regions =
rscope.anon_regions(span, expected_num_region_params);
let mut output_assoc_binding = None;
let substs = Substs::for_item(tcx, def_id, |def, _| {
- regions[def.index as usize]
+ let i = def.index as usize - self_ty.is_some() as usize;
+ tcx.mk_region(regions[i])
}, |def, substs| {
let i = def.index as usize;
return ty;
}
- let i = i - self_ty.is_some() as usize;
+ let i = i - self_ty.is_some() as usize - decl_generics.regions.len();
if num_types_provided.map_or(false, |n| i < n) {
// A provided type parameter.
match *parameters {
}
if lifetimes_for_params.iter().map(|e| e.lifetime_count).sum::<usize>() == 1 {
- Ok(possible_implied_output_region.unwrap())
+ Ok(*possible_implied_output_region.unwrap())
} else {
Err(Some(lifetimes_for_params))
}
// FIXME(#12938): This is a hack until we have full support for DST.
if Some(did) == self.tcx().lang_items.owned_box() {
- assert_eq!(substs.types.len(), 1);
- return self.tcx().mk_box(substs.types[0]);
+ assert_eq!(substs.types().count(), 1);
+ return self.tcx().mk_box(substs.type_at(0));
}
decl_ty.subst(self.tcx(), substs)
let region_bound = match region_bound {
Some(r) => r,
None => {
- match rscope.object_lifetime_default(span) {
+ tcx.mk_region(match rscope.object_lifetime_default(span) {
Some(r) => r,
None => {
span_err!(self.tcx().sess, span, E0228,
from context; please supply an explicit bound");
ty::ReStatic
}
- }
+ })
}
};
rscope,
ty::ObjectLifetimeDefault::Specific(r));
let t = self.ast_ty_to_ty(rscope1, &mt.ty);
- tcx.mk_ref(tcx.mk_region(r), ty::TypeAndMut {ty: t, mutbl: mt.mutbl})
+ tcx.mk_ref(r, ty::TypeAndMut {ty: t, mutbl: mt.mutbl})
}
hir::TyNever => {
tcx.types.never
sig: &hir::MethodSig,
untransformed_self_ty: Ty<'tcx>,
anon_scope: Option<AnonTypeScope>)
- -> (&'tcx ty::BareFnTy<'tcx>, ty::ExplicitSelfCategory) {
+ -> (&'tcx ty::BareFnTy<'tcx>, ty::ExplicitSelfCategory<'tcx>) {
self.ty_of_method_or_bare_fn(sig.unsafety,
sig.abi,
Some(untransformed_self_ty),
decl: &hir::FnDecl,
arg_anon_scope: Option<AnonTypeScope>,
ret_anon_scope: Option<AnonTypeScope>)
- -> (&'tcx ty::BareFnTy<'tcx>, ty::ExplicitSelfCategory)
+ -> (&'tcx ty::BareFnTy<'tcx>, ty::ExplicitSelfCategory<'tcx>)
{
debug!("ty_of_method_or_bare_fn");
// reference) in the arguments, then any anonymous regions in the output
// have that lifetime.
let implied_output_region = match explicit_self_category {
- ty::ExplicitSelfCategory::ByReference(region, _) => Ok(region),
+ ty::ExplicitSelfCategory::ByReference(region, _) => Ok(*region),
_ => self.find_implied_output_region(&arg_tys, arg_pats)
};
rscope: &RegionScope,
untransformed_self_ty: Ty<'tcx>,
explicit_self: &hir::ExplicitSelf)
- -> (Ty<'tcx>, ty::ExplicitSelfCategory)
+ -> (Ty<'tcx>, ty::ExplicitSelfCategory<'tcx>)
{
return match explicit_self.node {
SelfKind::Value(..) => {
rscope,
explicit_self.span,
lifetime);
- (self.tcx().mk_ref(
- self.tcx().mk_region(region),
+ (self.tcx().mk_ref(region,
ty::TypeAndMut {
ty: untransformed_self_ty,
mutbl: mutability
ty::ExplicitSelfCategory::ByValue
} else {
match explicit_type.sty {
- ty::TyRef(r, mt) => ty::ExplicitSelfCategory::ByReference(*r, mt.mutbl),
+ ty::TyRef(r, mt) => ty::ExplicitSelfCategory::ByReference(r, mt.mutbl),
ty::TyBox(_) => ty::ExplicitSelfCategory::ByBox,
_ => ty::ExplicitSelfCategory::ByValue,
}
explicit_region_bounds: &[&hir::Lifetime],
principal_trait_ref: ty::PolyExistentialTraitRef<'tcx>,
builtin_bounds: ty::BuiltinBounds)
- -> Option<ty::Region> // if None, use the default
+ -> Option<&'tcx ty::Region> // if None, use the default
{
let tcx = self.tcx();
if let Err(ErrorReported) =
self.ensure_super_predicates(span, principal_trait_ref.def_id()) {
- return Some(ty::ReStatic);
+ return Some(tcx.mk_region(ty::ReStatic));
}
// No explicit region bound specified. Therefore, examine trait
// If any of the derived region bounds are 'static, that is always
// the best choice.
- if derived_region_bounds.iter().any(|r| ty::ReStatic == *r) {
- return Some(ty::ReStatic);
+ if derived_region_bounds.iter().any(|&r| ty::ReStatic == *r) {
+ return Some(tcx.mk_region(ty::ReStatic));
}
// Determine whether there is exactly one unique region in the set
// and return from functions in multiple places.
#[derive(PartialEq, Eq, Clone, Debug)]
pub struct Bounds<'tcx> {
- pub region_bounds: Vec<ty::Region>,
+ pub region_bounds: Vec<&'tcx ty::Region>,
pub builtin_bounds: ty::BuiltinBounds,
pub trait_bounds: Vec<ty::PolyTraitRef<'tcx>>,
pub projection_bounds: Vec<ty::PolyProjectionPredicate<'tcx>>,
for ®ion_bound in &self.region_bounds {
// account for the binder being introduced below; no need to shift `param_ty`
// because, at present at least, it can only refer to early-bound regions
- let region_bound = ty::fold::shift_region(region_bound, 1);
+ let region_bound = tcx.mk_region(ty::fold::shift_region(*region_bound, 1));
vec.push(ty::Binder(ty::OutlivesPredicate(param_ty, region_bound)).to_predicate());
}
// and T is the expected type.
let region_var = self.next_region_var(infer::PatternRegion(pat.span));
let mt = ty::TypeAndMut { ty: expected, mutbl: mutbl };
- let region_ty = tcx.mk_ref(tcx.mk_region(region_var), mt);
+ let region_ty = tcx.mk_ref(region_var, mt);
// `x` is assigned a value of type `&M T`, hence `&M T <: typeof(x)` is
// required. However, we use equality, which is stronger. See (*) for
let inner_ty = self.next_ty_var();
let mt = ty::TypeAndMut { ty: inner_ty, mutbl: mutbl };
let region = self.next_region_var(infer::PatternRegion(pat.span));
- let rptr_ty = tcx.mk_ref(tcx.mk_region(region), mt);
+ let rptr_ty = tcx.mk_ref(region, mt);
self.demand_eqtype(pat.span, expected, rptr_ty);
(rptr_ty, inner_ty)
}
if let ty::TyTrait(..) = mt.ty.sty {
// This is "x = SomeTrait" being reduced from
// "let &x = &SomeTrait" or "let box x = Box<SomeTrait>", an error.
- span_err!(self.tcx.sess, span, E0033,
- "type `{}` cannot be dereferenced",
- self.ty_to_string(expected));
+ let type_str = self.ty_to_string(expected);
+ struct_span_err!(self.tcx.sess, span, E0033,
+ "type `{}` cannot be dereferenced", type_str)
+ .span_label(span, &format!("type `{}` cannot be dereferenced", type_str))
+ .emit();
return false
}
}
Some(f) => f,
None => return None
},
- substs: Substs::new_trait(tcx, vec![], vec![], self.cur_ty)
+ substs: Substs::new_trait(tcx, self.cur_ty, &[])
};
let cause = traits::ObligationCause::misc(self.span, self.fcx.body_id);
}
CastError::CastToBool => {
struct_span_err!(fcx.tcx.sess, self.span, E0054, "cannot cast as `bool`")
+ .span_label(self.span, &format!("unsupported cast"))
.help("compare with zero instead")
.emit();
}
return None;
}
- let arg_param_ty = trait_ref.substs().types[1];
+ let arg_param_ty = trait_ref.substs().type_at(1);
let arg_param_ty = self.resolve_type_vars_if_possible(&arg_param_ty);
debug!("deduce_sig_from_projection: arg_param_ty {:?}", arg_param_ty);
if r_borrow_var.is_none() { // create var lazilly, at most once
let coercion = Coercion(span);
let r = self.next_region_var(coercion);
- r_borrow_var = Some(self.tcx.mk_region(r)); // [4] above
+ r_borrow_var = Some(r); // [4] above
}
r_borrow_var.unwrap()
};
let coercion = Coercion(self.origin.span());
let r_borrow = self.next_region_var(coercion);
- let region = self.tcx.mk_region(r_borrow);
- (mt_a.ty, Some(AutoPtr(region, mt_b.mutbl)))
+ (mt_a.ty, Some(AutoPtr(r_borrow, mt_b.mutbl)))
}
(&ty::TyRef(_, mt_a), &ty::TyRawPtr(mt_b)) => {
coerce_mutbls(mt_a.mutbl, mt_b.mutbl)?;
coerce_unsized_did,
0,
source,
- vec![target]));
+ &[target]));
// Keep resolving `CoerceUnsized` and `Unsize` predicates to avoid
// emitting a coercion in cases like `Foo<$1>` -> `Foo<$2>`, where
impl_m_span,
impl_m_body_id,
&impl_sig);
- let impl_args = impl_sig.inputs.clone();
let impl_fty = tcx.mk_fn_ptr(tcx.mk_bare_fn(ty::BareFnTy {
unsafety: impl_m.fty.unsafety,
abi: impl_m.fty.abi,
- sig: ty::Binder(impl_sig)
+ sig: ty::Binder(impl_sig.clone())
}));
debug!("compare_impl_method: impl_fty={:?}", impl_fty);
impl_m_span,
impl_m_body_id,
&trait_sig);
- let trait_args = trait_sig.inputs.clone();
let trait_fty = tcx.mk_fn_ptr(tcx.mk_bare_fn(ty::BareFnTy {
unsafety: trait_m.fty.unsafety,
abi: trait_m.fty.abi,
- sig: ty::Binder(trait_sig)
+ sig: ty::Binder(trait_sig.clone())
}));
debug!("compare_impl_method: trait_fty={:?}", trait_fty);
impl_fty,
trait_fty);
- let impl_m_iter = match tcx.map.expect_impl_item(impl_m_node_id).node {
- ImplItemKind::Method(ref impl_m_sig, _) => impl_m_sig.decl.inputs.iter(),
- _ => bug!("{:?} is not a method", impl_m)
- };
-
- let (impl_err_span, trait_err_span) = match terr {
- TypeError::Mutability => {
- if let Some(trait_m_node_id) = tcx.map.as_local_node_id(trait_m.def_id) {
- let trait_m_iter = match tcx.map.expect_trait_item(trait_m_node_id).node {
- TraitItem_::MethodTraitItem(ref trait_m_sig, _) =>
- trait_m_sig.decl.inputs.iter(),
- _ => bug!("{:?} is not a MethodTraitItem", trait_m)
- };
-
- impl_m_iter.zip(trait_m_iter).find(|&(ref impl_arg, ref trait_arg)| {
- match (&impl_arg.ty.node, &trait_arg.ty.node) {
- (&Ty_::TyRptr(_, ref impl_mt), &Ty_::TyRptr(_, ref trait_mt)) |
- (&Ty_::TyPtr(ref impl_mt), &Ty_::TyPtr(ref trait_mt)) =>
- impl_mt.mutbl != trait_mt.mutbl,
- _ => false
- }
- }).map(|(ref impl_arg, ref trait_arg)| {
- match (impl_arg.to_self(), trait_arg.to_self()) {
- (Some(impl_self), Some(trait_self)) =>
- (impl_self.span, Some(trait_self.span)),
- (None, None) => (impl_arg.ty.span, Some(trait_arg.ty.span)),
- _ => bug!("impl and trait fns have different first args, \
- impl: {:?}, trait: {:?}", impl_arg, trait_arg)
- }
- }).unwrap_or((origin.span(), tcx.map.span_if_local(trait_m.def_id)))
- } else {
- (origin.span(), tcx.map.span_if_local(trait_m.def_id))
- }
- }
- TypeError::Sorts(ExpectedFound { expected, found }) => {
- if let Some(trait_m_node_id) = tcx.map.as_local_node_id(trait_m.def_id) {
- let trait_m_iter = match tcx.map.expect_trait_item(trait_m_node_id).node {
- TraitItem_::MethodTraitItem(ref trait_m_sig, _) =>
- trait_m_sig.decl.inputs.iter(),
- _ => bug!("{:?} is not a MethodTraitItem", trait_m)
- };
- let impl_iter = impl_args.iter();
- let trait_iter = trait_args.iter();
- let arg_idx = impl_iter.zip(trait_iter)
- .position(|(impl_arg_ty, trait_arg_ty)| {
- *impl_arg_ty == found && *trait_arg_ty == expected
- }).unwrap();
- impl_m_iter.zip(trait_m_iter)
- .nth(arg_idx)
- .map(|(impl_arg, trait_arg)|
- (impl_arg.ty.span, Some(trait_arg.ty.span)))
- .unwrap_or(
- (origin.span(), tcx.map.span_if_local(trait_m.def_id)))
- } else {
- (origin.span(), tcx.map.span_if_local(trait_m.def_id))
- }
- }
- _ => (origin.span(), tcx.map.span_if_local(trait_m.def_id))
- };
+ let (impl_err_span, trait_err_span) =
+ extract_spans_for_error_reporting(&infcx, &terr, origin, impl_m,
+ impl_sig, trait_m, trait_sig);
let origin = TypeOrigin::MethodCompatCheck(impl_err_span);
// are zero. Since I don't quite know how to phrase things at
// the moment, give a kind of vague error message.
if trait_params.len() != impl_params.len() {
- span_err!(ccx.tcx.sess, span, E0195,
+ struct_span_err!(ccx.tcx.sess, span, E0195,
"lifetime parameters or bounds on method `{}` do \
- not match the trait declaration",
- impl_m.name);
+ not match the trait declaration",impl_m.name)
+ .span_label(span, &format!("lifetimes do not match trait"))
+ .emit();
return false;
}
return true;
}
+
+ fn extract_spans_for_error_reporting<'a, 'gcx, 'tcx>(infcx: &infer::InferCtxt<'a, 'gcx, 'tcx>,
+ terr: &TypeError,
+ origin: TypeOrigin,
+ impl_m: &ty::Method,
+ impl_sig: ty::FnSig<'tcx>,
+ trait_m: &ty::Method,
+ trait_sig: ty::FnSig<'tcx>)
+ -> (Span, Option<Span>) {
+ let tcx = infcx.tcx;
+ let impl_m_node_id = tcx.map.as_local_node_id(impl_m.def_id).unwrap();
+ let (impl_m_output, impl_m_iter) = match tcx.map.expect_impl_item(impl_m_node_id).node {
+ ImplItemKind::Method(ref impl_m_sig, _) =>
+ (&impl_m_sig.decl.output, impl_m_sig.decl.inputs.iter()),
+ _ => bug!("{:?} is not a method", impl_m)
+ };
+
+ match *terr {
+ TypeError::Mutability => {
+ if let Some(trait_m_node_id) = tcx.map.as_local_node_id(trait_m.def_id) {
+ let trait_m_iter = match tcx.map.expect_trait_item(trait_m_node_id).node {
+ TraitItem_::MethodTraitItem(ref trait_m_sig, _) =>
+ trait_m_sig.decl.inputs.iter(),
+ _ => bug!("{:?} is not a MethodTraitItem", trait_m)
+ };
+
+ impl_m_iter.zip(trait_m_iter).find(|&(ref impl_arg, ref trait_arg)| {
+ match (&impl_arg.ty.node, &trait_arg.ty.node) {
+ (&Ty_::TyRptr(_, ref impl_mt), &Ty_::TyRptr(_, ref trait_mt)) |
+ (&Ty_::TyPtr(ref impl_mt), &Ty_::TyPtr(ref trait_mt)) =>
+ impl_mt.mutbl != trait_mt.mutbl,
+ _ => false
+ }
+ }).map(|(ref impl_arg, ref trait_arg)| {
+ match (impl_arg.to_self(), trait_arg.to_self()) {
+ (Some(impl_self), Some(trait_self)) =>
+ (impl_self.span, Some(trait_self.span)),
+ (None, None) => (impl_arg.ty.span, Some(trait_arg.ty.span)),
+ _ => bug!("impl and trait fns have different first args, \
+ impl: {:?}, trait: {:?}", impl_arg, trait_arg)
+ }
+ }).unwrap_or((origin.span(), tcx.map.span_if_local(trait_m.def_id)))
+ } else {
+ (origin.span(), tcx.map.span_if_local(trait_m.def_id))
+ }
+ }
+ TypeError::Sorts(ExpectedFound { .. }) => {
+ if let Some(trait_m_node_id) = tcx.map.as_local_node_id(trait_m.def_id) {
+ let (trait_m_output, trait_m_iter) =
+ match tcx.map.expect_trait_item(trait_m_node_id).node {
+ TraitItem_::MethodTraitItem(ref trait_m_sig, _) =>
+ (&trait_m_sig.decl.output, trait_m_sig.decl.inputs.iter()),
+ _ => bug!("{:?} is not a MethodTraitItem", trait_m)
+ };
+
+ let impl_iter = impl_sig.inputs.iter();
+ let trait_iter = trait_sig.inputs.iter();
+ impl_iter.zip(trait_iter).zip(impl_m_iter).zip(trait_m_iter)
+ .filter_map(|(((impl_arg_ty, trait_arg_ty), impl_arg), trait_arg)| {
+ match infcx.sub_types(true, origin, trait_arg_ty, impl_arg_ty) {
+ Ok(_) => None,
+ Err(_) => Some((impl_arg.ty.span, Some(trait_arg.ty.span)))
+ }
+ })
+ .next()
+ .unwrap_or_else(|| {
+ if infcx.sub_types(false, origin, impl_sig.output,
+ trait_sig.output).is_err() {
+ (impl_m_output.span(), Some(trait_m_output.span()))
+ } else {
+ (origin.span(), tcx.map.span_if_local(trait_m.def_id))
+ }
+ })
+ } else {
+ (origin.span(), tcx.map.span_if_local(trait_m.def_id))
+ }
+ }
+ _ => (origin.span(), tcx.map.span_if_local(trait_m.def_id))
+ }
+ }
}
pub fn compare_const_impl<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>,
ty);
cx.rcx.type_must_outlive(infer::SubregionOrigin::SafeDestructor(cx.span),
- ty, ty::ReScope(cx.parent_scope));
+ ty, tcx.mk_region(ty::ReScope(cx.parent_scope)));
return Ok(());
}
ty::TyStruct(def, substs) if def.is_phantom_data() => {
// PhantomData<T> - behaves identically to T
- let ity = substs.types[0];
+ let ity = substs.type_at(0);
iterate_over_potentially_unsafe_regions_in_type(
cx, context, ity, depth+1)
}
use rustc::ty::subst::Substs;
use rustc::ty::FnSig;
use rustc::ty::{self, Ty};
+use rustc::util::nodemap::FnvHashMap;
use {CrateCtxt, require_same_types};
-use std::collections::{HashMap};
use syntax::abi::Abi;
use syntax::ast;
use syntax::parse::token;
let i_ty = tcx.lookup_item_type(def_id);
let substs = Substs::for_item(tcx, def_id,
- |_, _| ty::ReErased,
+ |_, _| tcx.mk_region(ty::ReErased),
|def, _| tcx.mk_param_from_def(def));
let fty = tcx.mk_fn_def(def_id, substs, tcx.mk_bare_fn(ty::BareFnTy {
}));
let i_n_tps = i_ty.generics.types.len();
if i_n_tps != n_tps {
- struct_span_err!(tcx.sess, it.span, E0094,
- "intrinsic has wrong number of type \
- parameters: found {}, expected {}",
- i_n_tps, n_tps)
- .span_label(it.span, &format!("expected {} type parameter", n_tps))
- .emit();
+ let span = match it.node {
+ hir::ForeignItemFn(_, ref generics) => generics.span().unwrap_or(it.span),
+ hir::ForeignItemStatic(_, _) => it.span
+ };
+
+ struct_span_err!(tcx.sess, span, E0094,
+ "intrinsic has wrong number of type \
+ parameters: found {}, expected {}",
+ i_n_tps, n_tps)
+ .span_label(span, &format!("expected {} type parameter", n_tps))
+ .emit();
} else {
require_same_types(ccx,
TypeOrigin::IntrinsicType(it.span),
], ccx.tcx.types.usize)
}
"rustc_peek" => (1, vec![param(ccx, 0)], param(ccx, 0)),
- "init" | "init_dropped" => (1, Vec::new(), param(ccx, 0)),
+ "init" => (1, Vec::new(), param(ccx, 0)),
"uninit" => (1, Vec::new(), param(ccx, 0)),
"forget" => (1, vec!( param(ccx, 0) ), tcx.mk_nil()),
"transmute" => (2, vec!( param(ccx, 0) ), param(ccx, 1)),
return
}
- let mut structural_to_nomimal = HashMap::new();
+ let mut structural_to_nomimal = FnvHashMap();
let sig = tcx.no_late_bound_regions(i_ty.ty.fn_sig()).unwrap();
if intr.inputs.len() != sig.inputs.len() {
ccx: &CrateCtxt<'a, 'tcx>,
position: &str,
span: Span,
- structural_to_nominal: &mut HashMap<&'a intrinsics::Type, ty::Ty<'tcx>>,
+ structural_to_nominal: &mut FnvHashMap<&'a intrinsics::Type, ty::Ty<'tcx>>,
expected: &'a intrinsics::Type, t: ty::Ty<'tcx>)
{
use intrinsics::Type::*;
{
let (autoref, unsize) = if let Some(mutbl) = pick.autoref {
let region = self.next_region_var(infer::Autoref(self.span));
- let autoref = AutoPtr(self.tcx.mk_region(region), mutbl);
+ let autoref = AutoPtr(region, mutbl);
(Some(autoref), pick.unsize.map(|target| {
target.adjust_for_autoref(self.tcx, Some(autoref))
}))
// parameters from the type and those from the method.
//
// FIXME -- permit users to manually specify lifetimes
+ let supplied_start = substs.params().len() + method.generics.regions.len();
Substs::for_item(self.tcx, method.def_id, |def, _| {
- if let Some(&r) = substs.regions.get(def.index as usize) {
- r
+ let i = def.index as usize;
+ if i < substs.params().len() {
+ substs.region_at(i)
} else {
self.region_var_for_def(self.span, def)
}
}, |def, cur_substs| {
- if let Some(&ty) = substs.types.get(def.index as usize) {
- ty
+ let i = def.index as usize;
+ if i < substs.params().len() {
+ substs.type_at(i)
} else if supplied_method_types.is_empty() {
self.type_var_for_def(self.span, def, cur_substs)
} else {
- supplied_method_types[def.index as usize - substs.types.len()]
+ supplied_method_types[i - supplied_start]
}
})
}
use rustc::traits;
use rustc::ty::{self, Ty, ToPolyTraitRef, TraitRef, TypeFoldable};
use rustc::infer::{InferOk, TypeOrigin};
+use rustc::util::nodemap::FnvHashSet;
use syntax::ast;
use syntax_pos::{Span, DUMMY_SP};
use rustc::hir;
-use std::collections::HashSet;
use std::mem;
use std::ops::Deref;
use std::rc::Rc;
opt_simplified_steps: Option<Vec<ty::fast_reject::SimplifiedType>>,
inherent_candidates: Vec<Candidate<'tcx>>,
extension_candidates: Vec<Candidate<'tcx>>,
- impl_dups: HashSet<DefId>,
+ impl_dups: FnvHashSet<DefId>,
import_id: Option<ast::NodeId>,
/// Collects near misses when the candidate functions are missing a `self` keyword and is only
item_name: item_name,
inherent_candidates: Vec::new(),
extension_candidates: Vec::new(),
- impl_dups: HashSet::new(),
+ impl_dups: FnvHashSet(),
import_id: None,
steps: Rc::new(steps),
opt_simplified_steps: opt_simplified_steps,
trait_ref.substs,
m);
assert_eq!(m.generics.parent_types as usize,
- trait_ref.substs.types.len());
+ trait_ref.substs.types().count());
assert_eq!(m.generics.parent_regions as usize,
- trait_ref.substs.regions.len());
+ trait_ref.substs.regions().count());
}
// Because this trait derives from a where-clause, it
// artifacts. This means it is safe to put into the
// `WhereClauseCandidate` and (eventually) into the
// `WhereClausePick`.
- assert!(!trait_ref.substs.types.needs_infer());
+ assert!(!trait_ref.substs.needs_infer());
this.inherent_candidates.push(Candidate {
xform_self_ty: xform_self_ty,
expr_id: ast::NodeId)
-> Result<(), MethodError<'tcx>>
{
- let mut duplicates = HashSet::new();
+ let mut duplicates = FnvHashSet();
let opt_applicable_traits = self.tcx.trait_map.get(&expr_id);
if let Some(applicable_traits) = opt_applicable_traits {
for trait_candidate in applicable_traits {
}
fn assemble_extension_candidates_for_all_traits(&mut self) -> Result<(), MethodError<'tcx>> {
- let mut duplicates = HashSet::new();
+ let mut duplicates = FnvHashSet();
for trait_info in suggest::all_traits(self.ccx) {
if duplicates.insert(trait_info.def_id) {
self.assemble_extension_candidates_for_trait(trait_info.def_id)?;
// are given do not include type/lifetime parameters for the
// method yet. So create fresh variables here for those too,
// if there are any.
- assert_eq!(substs.types.len(), method.generics.parent_types as usize);
- assert_eq!(substs.regions.len(), method.generics.parent_regions as usize);
+ assert_eq!(substs.types().count(), method.generics.parent_types as usize);
+ assert_eq!(substs.regions().count(), method.generics.parent_regions as usize);
if self.mode == Mode::Path {
return impl_ty;
xform_self_ty.subst(self.tcx, substs)
} else {
let substs = Substs::for_item(self.tcx, method.def_id, |def, _| {
- if let Some(&r) = substs.regions.get(def.index as usize) {
- r
+ let i = def.index as usize;
+ if i < substs.params().len() {
+ substs.region_at(i)
} else {
// In general, during probe we erase regions. See
// `impl_self_ty()` for an explanation.
- ty::ReErased
+ self.tcx.mk_region(ty::ReErased)
}
}, |def, cur_substs| {
- if let Some(&ty) = substs.types.get(def.index as usize) {
- ty
+ let i = def.index as usize;
+ if i < substs.params().len() {
+ substs.type_at(i)
} else {
self.type_var_for_def(self.span, def, cur_substs)
}
let impl_ty = self.tcx.lookup_item_type(impl_def_id).ty;
let substs = Substs::for_item(self.tcx, impl_def_id,
- |_, _| ty::ReErased,
+ |_, _| self.tcx.mk_region(ty::ReErased),
|_, _| self.next_ty_var());
(impl_ty, substs)
// inference variables or other artifacts. This
// means they are safe to put into the
// `WhereClausePick`.
- assert!(!trait_ref.substs().types.needs_infer());
+ assert!(!trait_ref.substs().needs_infer());
WhereClausePick(trait_ref.clone())
}
self.autoderef(span, ty).any(|(ty, _)| self.probe(|_| {
let fn_once_substs =
- Substs::new_trait(tcx, vec![self.next_ty_var()], vec![], ty);
+ Substs::new_trait(tcx, ty, &[self.next_ty_var()]);
let trait_ref = ty::TraitRef::new(fn_once, fn_once_substs);
let poly_trait_ref = trait_ref.to_poly_trait_ref();
let obligation = Obligation::misc(span,
use TypeAndSubsts;
use lint;
use util::common::{block_query, ErrorReported, indenter, loop_query};
-use util::nodemap::{DefIdMap, FnvHashMap, NodeMap};
+use util::nodemap::{DefIdMap, FnvHashMap, FnvHashSet, NodeMap};
use std::cell::{Cell, Ref, RefCell};
-use std::collections::{HashSet};
use std::mem::replace;
use std::ops::Deref;
use syntax::abi::Abi;
inherited.tables.borrow_mut().liberated_fn_sigs.insert(fn_id, fn_sig);
- fcx.check_block_with_expected(body, ExpectHasType(fcx.ret_ty));
+ // FIXME(aburka) do we need this special case? and should it be is_uninhabited?
+ let expected = if fcx.ret_ty.is_never() {
+ NoExpectation
+ } else {
+ ExpectHasType(fcx.ret_ty)
+ };
+ fcx.check_block_with_expected(body, expected);
fcx
}
}
hir::ItemTy(_, ref generics) => {
let pty_ty = ccx.tcx.node_id_to_type(it.id);
- check_bounds_are_used(ccx, &generics.ty_params, pty_ty);
+ check_bounds_are_used(ccx, generics, pty_ty);
}
hir::ItemForeignMod(ref m) => {
if m.abi == Abi::RustIntrinsic {
// (and anyway, within a fn body the right region may not even
// be something the user can write explicitly, since it might
// be some expression).
- self.next_region_var(infer::MiscVariable(span))
+ *self.next_region_var(infer::MiscVariable(span))
}
fn anon_regions(&self, span: Span, count: usize)
-> Result<Vec<ty::Region>, Option<Vec<ElisionFailureInfo>>> {
Ok((0..count).map(|_| {
- self.next_region_var(infer::MiscVariable(span))
+ *self.next_region_var(infer::MiscVariable(span))
}).collect())
}
}
/// outlive the region `r`.
pub fn register_region_obligation(&self,
ty: Ty<'tcx>,
- region: ty::Region,
+ region: &'tcx ty::Region,
cause: traits::ObligationCause<'tcx>)
{
let mut fulfillment_cx = self.fulfillment_cx.borrow_mut();
//
// FIXME(#27579) all uses of this should be migrated to register_wf_obligation eventually
let cause = traits::ObligationCause::new(span, self.body_id, code);
- self.register_region_obligation(ty, ty::ReEmpty, cause);
+ self.register_region_obligation(ty, self.tcx.mk_region(ty::ReEmpty), cause);
}
/// Registers obligations that all types appearing in `substs` are well-formed.
pub fn add_wf_bounds(&self, substs: &Substs<'tcx>, expr: &hir::Expr)
{
- for &ty in &substs.types {
+ for ty in substs.types() {
self.register_wf_obligation(ty, expr.span, traits::MiscObligation);
}
}
.filter_map(|t| self.default(t).map(|d| (t, d)))
.collect();
- let mut unbound_tyvars = HashSet::new();
+ let mut unbound_tyvars = FnvHashSet();
debug!("select_all_obligations_and_apply_defaults: defaults={:?}", default_map);
// table then apply defaults until we find a conflict. That default must be the one
// that caused conflict earlier.
fn find_conflicting_default(&self,
- unbound_vars: &HashSet<Ty<'tcx>>,
+ unbound_vars: &FnvHashSet<Ty<'tcx>>,
default_map: &FnvHashMap<&Ty<'tcx>, type_variable::Default<'tcx>>,
conflict: Ty<'tcx>)
-> Option<type_variable::Default<'tcx>> {
// value whose address was taken can actually be made to live
// as long as it needs to live.
let region = self.next_region_var(infer::AddrOfRegion(expr.span));
- tcx.mk_ref(tcx.mk_region(region), tm)
+ tcx.mk_ref(region, tm)
};
self.write_ty(id, oprnd_t);
}
self.check_path_parameter_count(span, !require_type_space, &mut type_segment);
self.check_path_parameter_count(span, true, &mut fn_segment);
+ let (fn_start, has_self) = match (type_segment, fn_segment) {
+ (_, Some((_, generics))) => {
+ (generics.parent_count(), generics.has_self)
+ }
+ (Some((_, generics)), None) => {
+ (generics.own_count(), generics.has_self)
+ }
+ (None, None) => (0, false)
+ };
let substs = Substs::for_item(self.tcx, def.def_id(), |def, _| {
let mut i = def.index as usize;
- let type_regions = match (type_segment, fn_segment) {
- (_, Some((_, generics))) => generics.parent_regions as usize,
- (Some((_, generics)), None) => generics.regions.len(),
- (None, None) => 0
- };
- let segment = if i < type_regions {
+ let segment = if i < fn_start {
+ i -= has_self as usize;
type_segment
} else {
- i -= type_regions;
+ i -= fn_start;
fn_segment
};
let lifetimes = match segment.map(|(s, _)| &s.parameters) {
}
}, |def, substs| {
let mut i = def.index as usize;
- let (type_types, has_self) = match (type_segment, fn_segment) {
- (_, Some((_, generics))) => {
- (generics.parent_types as usize, generics.has_self)
- }
- (Some((_, generics)), None) => {
- (generics.types.len(), generics.has_self)
- }
- (None, None) => (0, false)
- };
- let can_omit = i >= type_types || !require_type_space;
- let segment = if i < type_types {
+ let can_omit = i >= fn_start || !require_type_space;
+ let segment = if i < fn_start {
// Handle Self first, so we can adjust the index to match the AST.
if has_self && i == 0 {
return opt_self_ty.unwrap_or_else(|| {
i -= has_self as usize;
type_segment
} else {
- i -= type_types;
+ i -= fn_start;
fn_segment
};
let types = match segment.map(|(s, _)| &s.parameters) {
None => &[]
};
+ // Skip over the lifetimes in the same segment.
+ if let Some((_, generics)) = segment {
+ i -= generics.regions.len();
+ }
+
let omitted = can_omit && types.is_empty();
if let Some(ast_ty) = types.get(i) {
// A provided type parameter.
}
pub fn check_bounds_are_used<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>,
- tps: &[hir::TyParam],
+ generics: &hir::Generics,
ty: Ty<'tcx>) {
debug!("check_bounds_are_used(n_tps={}, ty={:?})",
- tps.len(), ty);
+ generics.ty_params.len(), ty);
// make a vector of booleans initially false, set to true when used
- if tps.is_empty() { return; }
- let mut tps_used = vec![false; tps.len()];
+ if generics.ty_params.is_empty() { return; }
+ let mut tps_used = vec![false; generics.ty_params.len()];
for leaf_ty in ty.walk() {
if let ty::TyParam(ParamTy {idx, ..}) = leaf_ty.sty {
debug!("Found use of ty param num {}", idx);
- tps_used[idx as usize] = true;
+ tps_used[idx as usize - generics.lifetimes.len()] = true;
}
}
- for (i, b) in tps_used.iter().enumerate() {
- if !*b {
- struct_span_err!(ccx.tcx.sess, tps[i].span, E0091,
+ for (&used, param) in tps_used.iter().zip(&generics.ty_params) {
+ if !used {
+ struct_span_err!(ccx.tcx.sess, param.span, E0091,
"type parameter `{}` is unused",
- tps[i].name)
- .span_label(tps[i].span, &format!("unused type parameter"))
+ param.name)
+ .span_label(param.span, &format!("unused type parameter"))
.emit();
}
}
pub struct RegionCtxt<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
pub fcx: &'a FnCtxt<'a, 'gcx, 'tcx>,
- region_bound_pairs: Vec<(ty::Region, GenericKind<'tcx>)>,
+ region_bound_pairs: Vec<(&'tcx ty::Region, GenericKind<'tcx>)>,
free_region_map: FreeRegionMap,
let call_site_scope = self.call_site_scope.unwrap();
debug!("visit_fn_body body.id {} call_site_scope: {:?}",
body.id, call_site_scope);
+ let call_site_region = self.tcx.mk_region(ty::ReScope(call_site_scope));
self.type_of_node_must_outlive(infer::CallReturn(span),
body.id,
- ty::ReScope(call_site_scope));
+ call_site_region);
self.region_bound_pairs.truncate(old_region_bounds_pairs_len);
for implication in implied_bounds {
debug!("implication: {:?}", implication);
match implication {
- ImpliedBound::RegionSubRegion(ty::ReFree(free_a),
- ty::ReVar(vid_b)) => {
+ ImpliedBound::RegionSubRegion(&ty::ReFree(free_a),
+ &ty::ReVar(vid_b)) => {
self.add_given(free_a, vid_b);
}
ImpliedBound::RegionSubParam(r_a, param_b) => {
// variable's type enclose at least the variable's scope.
let var_scope = tcx.region_maps.var_scope(id);
+ let var_region = self.tcx.mk_region(ty::ReScope(var_scope));
let origin = infer::BindingTypeIsNotValidAtDecl(span);
- self.type_of_node_must_outlive(origin, id, ty::ReScope(var_scope));
+ self.type_of_node_must_outlive(origin, id, var_region);
let typ = self.resolve_node_type(id);
dropck::check_safety_of_destructor_if_necessary(self, typ, span, var_scope);
// scope of that expression. This also guarantees basic WF.
let expr_ty = self.resolve_node_type(expr.id);
// the region corresponding to this expression
- let expr_region = ty::ReScope(self.tcx.region_maps.node_extent(expr.id));
+ let expr_region = self.tcx.node_scope_region(expr.id);
self.type_must_outlive(infer::ExprTypeIsNotInScope(expr_ty, expr.span),
expr_ty, expr_region);
None => self.resolve_node_type(base.id)
};
if let ty::TyRef(r_ptr, _) = base_ty.sty {
- self.mk_subregion_due_to_dereference(expr.span, expr_region, *r_ptr);
+ self.mk_subregion_due_to_dereference(expr.span, expr_region, r_ptr);
}
intravisit::walk_expr(self, expr);
let call_site_scope = self.call_site_scope;
debug!("visit_expr ExprRet ret_expr.id {} call_site_scope: {:?}",
ret_expr.id, call_site_scope);
+ let call_site_region = self.tcx.mk_region(ty::ReScope(call_site_scope.unwrap()));
self.type_of_node_must_outlive(infer::CallReturn(ret_expr.span),
ret_expr.id,
- ty::ReScope(call_site_scope.unwrap()));
+ call_site_region);
intravisit::walk_expr(self, expr);
}
/*From:*/ (&ty::TyRef(from_r, ref from_mt),
/*To: */ &ty::TyRef(to_r, ref to_mt)) => {
// Target cannot outlive source, naturally.
- self.sub_regions(infer::Reborrow(cast_expr.span), *to_r, *from_r);
+ self.sub_regions(infer::Reborrow(cast_expr.span), to_r, from_r);
self.walk_cast(cast_expr, from_mt.ty, to_mt.ty);
}
//
// FIXME(#6268) to support nested method calls, should be callee_id
let callee_scope = self.tcx.region_maps.node_extent(call_expr.id);
- let callee_region = ty::ReScope(callee_scope);
+ let callee_region = self.tcx.mk_region(ty::ReScope(callee_scope));
debug!("callee_region={:?}", callee_region);
derefs,
derefd_ty);
- let s_deref_expr = self.tcx.region_maps.node_extent(deref_expr.id);
- let r_deref_expr = ty::ReScope(s_deref_expr);
+ let r_deref_expr = self.tcx.node_scope_region(deref_expr.id);
for i in 0..derefs {
let method_call = MethodCall::autoderef(deref_expr.id, i as u32);
debug!("constrain_autoderefs: method_call={:?} (of {:?} total)", method_call, derefs);
if let ty::TyRef(r_ptr, _) = derefd_ty.sty {
self.mk_subregion_due_to_dereference(deref_expr.span,
- r_deref_expr, *r_ptr);
+ r_deref_expr, r_ptr);
}
match derefd_ty.builtin_deref(true, ty::NoPreference) {
pub fn mk_subregion_due_to_dereference(&mut self,
deref_span: Span,
- minimum_lifetime: ty::Region,
- maximum_lifetime: ty::Region) {
+ minimum_lifetime: &'tcx ty::Region,
+ maximum_lifetime: &'tcx ty::Region) {
self.sub_regions(infer::DerefPointer(deref_span),
minimum_lifetime, maximum_lifetime)
}
span: Span) {
match cmt.cat {
Categorization::Rvalue(region) => {
- match region {
+ match *region {
ty::ReScope(rvalue_scope) => {
let typ = self.resolve_type(cmt.ty);
dropck::check_safety_of_destructor_if_necessary(self,
rvalue_scope);
}
ty::ReStatic => {}
- region => {
+ _ => {
span_bug!(span,
"unexpected rvalue region in rvalue \
destructor safety checking: `{:?}`",
match mt.ty.sty {
ty::TySlice(_) | ty::TyStr => {
self.sub_regions(infer::IndexSlice(index_expr.span),
- r_index_expr, *r_ptr);
+ self.tcx.mk_region(r_index_expr), r_ptr);
}
_ => {}
}
fn type_of_node_must_outlive(&mut self,
origin: infer::SubregionOrigin<'tcx>,
id: ast::NodeId,
- minimum_lifetime: ty::Region)
+ minimum_lifetime: &'tcx ty::Region)
{
let tcx = self.tcx;
let mc = mc::MemCategorizationContext::new(self);
for arg in args {
let arg_ty = self.node_ty(arg.id);
- let re_scope = ty::ReScope(body_scope);
+ let re_scope = self.tcx.mk_region(ty::ReScope(body_scope));
let arg_cmt = mc.cat_rvalue(arg.id, arg.ty.span, re_scope, arg_ty);
debug!("arg_ty={:?} arg_cmt={:?} arg={:?}",
arg_ty,
fn link_autoref(&self,
expr: &hir::Expr,
autoderefs: usize,
- autoref: &adjustment::AutoRef)
+ autoref: &adjustment::AutoRef<'tcx>)
{
debug!("link_autoref(autoref={:?})", autoref);
let mc = mc::MemCategorizationContext::new(self);
}
adjustment::AutoUnsafe(m) => {
- let r = ty::ReScope(self.tcx.region_maps.node_extent(expr.id));
- self.link_region(expr.span, &r, ty::BorrowKind::from_mutbl(m), expr_cmt);
+ let r = self.tcx.node_scope_region(expr.id);
+ self.link_region(expr.span, r, ty::BorrowKind::from_mutbl(m), expr_cmt);
}
}
}
expr, callee_scope);
let mc = mc::MemCategorizationContext::new(self);
let expr_cmt = ignore_err!(mc.cat_expr(expr));
- let borrow_region = ty::ReScope(callee_scope);
- self.link_region(expr.span, &borrow_region, ty::ImmBorrow, expr_cmt);
+ let borrow_region = self.tcx.mk_region(ty::ReScope(callee_scope));
+ self.link_region(expr.span, borrow_region, ty::ImmBorrow, expr_cmt);
}
/// Like `link_region()`, except that the region is extracted from the type of `id`,
id, mutbl, cmt_borrowed);
let rptr_ty = self.resolve_node_type(id);
- if let ty::TyRef(&r, _) = rptr_ty.sty {
+ if let ty::TyRef(r, _) = rptr_ty.sty {
debug!("rptr_ty={}", rptr_ty);
- self.link_region(span, &r, ty::BorrowKind::from_mutbl(mutbl),
+ self.link_region(span, r, ty::BorrowKind::from_mutbl(mutbl),
cmt_borrowed);
}
}
/// between regions, as explained in `link_reborrowed_region()`.
fn link_region(&self,
span: Span,
- borrow_region: &ty::Region,
+ borrow_region: &'tcx ty::Region,
borrow_kind: ty::BorrowKind,
borrow_cmt: mc::cmt<'tcx>) {
let mut borrow_cmt = borrow_cmt;
let mut borrow_kind = borrow_kind;
let origin = infer::DataBorrowed(borrow_cmt.ty, span);
- self.type_must_outlive(origin, borrow_cmt.ty, *borrow_region);
+ self.type_must_outlive(origin, borrow_cmt.ty, borrow_region);
loop {
debug!("link_region(borrow_region={:?}, borrow_kind={:?}, borrow_cmt={:?})",
/// recurse and process `ref_cmt` (see case 2 above).
fn link_reborrowed_region(&self,
span: Span,
- borrow_region: &ty::Region,
+ borrow_region: &'tcx ty::Region,
borrow_kind: ty::BorrowKind,
ref_cmt: mc::cmt<'tcx>,
- ref_region: ty::Region,
+ ref_region: &'tcx ty::Region,
mut ref_kind: ty::BorrowKind,
note: mc::Note)
-> Option<(mc::cmt<'tcx>, ty::BorrowKind)>
debug!("link_reborrowed_region: {:?} <= {:?}",
borrow_region,
ref_region);
- self.sub_regions(cause, *borrow_region, ref_region);
+ self.sub_regions(cause, borrow_region, ref_region);
// If we end up needing to recurse and establish a region link
// with `ref_cmt`, calculate what borrow kind we will end up
origin: infer::ParameterOrigin,
substs: &Substs<'tcx>,
expr_span: Span,
- expr_region: ty::Region) {
+ expr_region: &'tcx ty::Region) {
debug!("substs_wf_in_scope(substs={:?}, \
expr_region={:?}, \
origin={:?}, \
let origin = infer::ParameterInScope(origin, expr_span);
- for ®ion in &substs.regions {
+ for region in substs.regions() {
self.sub_regions(origin.clone(), expr_region, region);
}
- for &ty in &substs.types {
+ for ty in substs.types() {
let ty = self.resolve_type(ty);
self.type_must_outlive(origin.clone(), ty, expr_region);
}
pub fn type_must_outlive(&self,
origin: infer::SubregionOrigin<'tcx>,
ty: Ty<'tcx>,
- region: ty::Region)
+ region: &'tcx ty::Region)
{
let ty = self.resolve_type(ty);
fn components_must_outlive(&self,
origin: infer::SubregionOrigin<'tcx>,
components: Vec<ty::outlives::Component<'tcx>>,
- region: ty::Region)
+ region: &'tcx ty::Region)
{
for component in components {
let origin = origin.clone();
fn param_ty_must_outlive(&self,
origin: infer::SubregionOrigin<'tcx>,
- region: ty::Region,
+ region: &'tcx ty::Region,
param_ty: ty::ParamTy) {
debug!("param_ty_must_outlive(region={:?}, param_ty={:?}, origin={:?})",
region, param_ty, origin);
fn projection_must_outlive(&self,
origin: infer::SubregionOrigin<'tcx>,
- region: ty::Region,
+ region: &'tcx ty::Region,
projection_ty: ty::ProjectionTy<'tcx>)
{
debug!("projection_must_outlive(region={:?}, projection_ty={:?}, origin={:?})",
// If we know that the projection outlives 'static, then we're
// done here.
- if env_bounds.contains(&ty::ReStatic) {
+ if env_bounds.contains(&&ty::ReStatic) {
debug!("projection_must_outlive: 'static as declared bound");
return;
}
if env_bounds.is_empty() && needs_infer {
debug!("projection_must_outlive: no declared bounds");
- for &component_ty in &projection_ty.trait_ref.substs.types {
+ for component_ty in projection_ty.trait_ref.substs.types() {
self.type_must_outlive(origin.clone(), component_ty, region);
}
- for &r in &projection_ty.trait_ref.substs.regions {
+ for r in projection_ty.trait_ref.substs.regions() {
self.sub_regions(origin.clone(), region, r);
}
if !env_bounds.is_empty() && env_bounds[1..].iter().all(|b| *b == env_bounds[0]) {
let unique_bound = env_bounds[0];
debug!("projection_must_outlive: unique declared bound = {:?}", unique_bound);
- if projection_ty.trait_ref.substs.regions
- .iter()
- .any(|r| env_bounds.contains(r))
- {
+ if projection_ty.trait_ref.substs.regions().any(|r| env_bounds.contains(&r)) {
debug!("projection_must_outlive: unique declared bound appears in trait ref");
self.sub_regions(origin.clone(), region, unique_bound);
return;
self.verify_generic_bound(origin, generic.clone(), region, verify_bound);
}
- fn type_bound(&self, span: Span, ty: Ty<'tcx>) -> VerifyBound {
+ fn type_bound(&self, span: Span, ty: Ty<'tcx>) -> VerifyBound<'tcx> {
match ty.sty {
ty::TyParam(p) => {
self.param_bound(p)
}
}
- fn param_bound(&self, param_ty: ty::ParamTy) -> VerifyBound {
+ fn param_bound(&self, param_ty: ty::ParamTy) -> VerifyBound<'tcx> {
let param_env = &self.parameter_environment;
debug!("param_bound(param_ty={:?})",
fn projection_declared_bounds(&self,
span: Span,
projection_ty: ty::ProjectionTy<'tcx>)
- -> Vec<ty::Region>
+ -> Vec<&'tcx ty::Region>
{
// First assemble bounds from where clauses and traits.
fn projection_bound(&self,
span: Span,
- declared_bounds: Vec<ty::Region>,
+ declared_bounds: Vec<&'tcx ty::Region>,
projection_ty: ty::ProjectionTy<'tcx>)
- -> VerifyBound {
+ -> VerifyBound<'tcx> {
debug!("projection_bound(declared_bounds={:?}, projection_ty={:?})",
declared_bounds, projection_ty);
VerifyBound::AnyRegion(declared_bounds).or(recursive_bound)
}
- fn recursive_type_bound(&self, span: Span, ty: Ty<'tcx>) -> VerifyBound {
+ fn recursive_type_bound(&self, span: Span, ty: Ty<'tcx>) -> VerifyBound<'tcx> {
let mut bounds = vec![];
for subty in ty.walk_shallow() {
}
fn declared_generic_bounds_from_env(&self, generic: GenericKind<'tcx>)
- -> Vec<ty::Region>
+ -> Vec<&'tcx ty::Region>
{
let param_env = &self.parameter_environment;
fn declared_projection_bounds_from_trait(&self,
span: Span,
projection_ty: ty::ProjectionTy<'tcx>)
- -> Vec<ty::Region>
+ -> Vec<&'tcx ty::Region>
{
debug!("projection_bounds(projection_ty={:?})",
projection_ty);
match capture {
ty::UpvarCapture::ByValue => freevar_ty,
ty::UpvarCapture::ByRef(borrow) =>
- tcx.mk_ref(tcx.mk_region(borrow.region),
+ tcx.mk_ref(borrow.region,
ty::TypeAndMut {
ty: freevar_ty,
mutbl: borrow.kind.to_mutbl_lossy(),
borrow_id: ast::NodeId,
_borrow_span: Span,
cmt: mc::cmt<'tcx>,
- _loan_region: ty::Region,
+ _loan_region: &'tcx ty::Region,
bk: ty::BorrowKind,
_loan_cause: euv::LoanCause)
{
use rustc::infer::TypeOrigin;
use rustc::traits;
use rustc::ty::{self, Ty, TyCtxt};
+use rustc::util::nodemap::FnvHashSet;
-use std::collections::HashSet;
use syntax::ast;
use syntax_pos::Span;
use errors::DiagnosticBuilder;
ty::ExplicitSelfCategory::Static => return,
ty::ExplicitSelfCategory::ByValue => self_ty,
ty::ExplicitSelfCategory::ByReference(region, mutability) => {
- fcx.tcx.mk_ref(fcx.tcx.mk_region(region), ty::TypeAndMut {
+ fcx.tcx.mk_ref(region, ty::TypeAndMut {
ty: self_ty,
mutbl: mutability
})
assert_eq!(ty_predicates.parent, None);
let variances = self.tcx().item_variances(item_def_id);
- let mut constrained_parameters: HashSet<_> =
- variances.types
+ let mut constrained_parameters: FnvHashSet<_> =
+ variances[ast_generics.lifetimes.len()..]
.iter().enumerate()
.filter(|&(_, &variance)| variance != ty::Bivariant)
.map(|(index, _)| self.param_ty(ast_generics, index))
None,
&mut constrained_parameters);
- for (index, _) in variances.types.iter().enumerate() {
- let param_ty = self.param_ty(ast_generics, index);
- if constrained_parameters.contains(&Parameter::Type(param_ty)) {
- continue;
- }
- let span = ast_generics.ty_params[index].span;
- self.report_bivariance(span, param_ty.name);
- }
-
- for (index, &variance) in variances.regions.iter().enumerate() {
- if variance != ty::Bivariant {
- continue;
- }
+ for (index, &variance) in variances.iter().enumerate() {
+ let (span, name) = if index < ast_generics.lifetimes.len() {
+ if variance != ty::Bivariant {
+ continue;
+ }
- let span = ast_generics.lifetimes[index].lifetime.span;
- let name = ast_generics.lifetimes[index].lifetime.name;
+ (ast_generics.lifetimes[index].lifetime.span,
+ ast_generics.lifetimes[index].lifetime.name)
+ } else {
+ let index = index - ast_generics.lifetimes.len();
+ let param_ty = self.param_ty(ast_generics, index);
+ if constrained_parameters.contains(&Parameter::Type(param_ty)) {
+ continue;
+ }
+ (ast_generics.ty_params[index].span, param_ty.name)
+ };
self.report_bivariance(span, name);
}
}
fn reject_shadowing_type_parameters(tcx: TyCtxt, span: Span, generics: &ty::Generics) {
let parent = tcx.lookup_generics(generics.parent.unwrap());
- let impl_params: HashSet<_> = parent.types.iter().map(|tp| tp.name).collect();
+ let impl_params: FnvHashSet<_> = parent.types.iter().map(|tp| tp.name).collect();
for method_param in &generics.types {
if impl_params.contains(&method_param.name) {
// Trait impl: take implied bounds from all types that
// appear in the trait reference.
let trait_ref = self.instantiate_type_scheme(span, free_substs, trait_ref);
- trait_ref.substs.types.to_vec()
+ trait_ref.substs.types().collect()
}
None => {
// early-bound versions of them, visible from the
// outside of the function. This is needed by, and
// only populated if there are any `impl Trait`.
- free_to_bound_regions: DefIdMap<ty::Region>
+ free_to_bound_regions: DefIdMap<&'gcx ty::Region>
}
impl<'cx, 'gcx, 'tcx> WritebackCx<'cx, 'gcx, 'tcx> {
return wbcx;
}
+ let gcx = fcx.tcx.global_tcx();
let free_substs = fcx.parameter_environment.free_substs;
- for (i, r) in free_substs.regions.iter().enumerate() {
+ for (i, k) in free_substs.params().iter().enumerate() {
+ let r = if let Some(r) = k.as_region() {
+ r
+ } else {
+ continue;
+ };
match *r {
ty::ReFree(ty::FreeRegion {
bound_region: ty::BoundRegion::BrNamed(def_id, name, _), ..
}) => {
- let bound_region = ty::ReEarlyBound(ty::EarlyBoundRegion {
+ let bound_region = gcx.mk_region(ty::ReEarlyBound(ty::EarlyBoundRegion {
index: i as u32,
name: name,
- });
+ }));
wbcx.free_to_bound_regions.insert(def_id, bound_region);
}
_ => {
// Convert the type from the function into a type valid outside
// the function, by replacing free regions with early-bound ones.
let outside_ty = gcx.fold_regions(&inside_ty, &mut false, |r, _| {
- match r {
+ match *r {
// 'static is valid everywhere.
- ty::ReStatic => ty::ReStatic,
+ ty::ReStatic => gcx.mk_region(ty::ReStatic),
// Free regions that come from early-bound regions are valid.
ty::ReFree(ty::FreeRegion {
span_err!(self.tcx().sess, span, E0564,
"only named lifetimes are allowed in `impl Trait`, \
but `{}` was found in the type `{}`", r, inside_ty);
- ty::ReStatic
+ gcx.mk_region(ty::ReStatic)
}
ty::ReVar(_) |
}
}
- fn fold_region(&mut self, r: ty::Region) -> ty::Region {
+ fn fold_region(&mut self, r: &'tcx ty::Region) -> &'tcx ty::Region {
match self.infcx.fully_resolve(&r) {
Ok(r) => r,
Err(e) => {
self.report_error(e);
- ty::ReStatic
+ self.tcx.mk_region(ty::ReStatic)
}
}
}
let source = tcx.lookup_item_type(impl_did).ty;
let trait_ref = self.crate_context.tcx.impl_trait_ref(impl_did).unwrap();
- let target = trait_ref.substs.types[1];
+ let target = trait_ref.substs.type_at(1);
debug!("check_implementations_of_coerce_unsized: {:?} -> {:?} (bound)",
source, target);
(&ty::TyBox(a), &ty::TyBox(b)) => (a, b, unsize_trait, None),
(&ty::TyRef(r_a, mt_a), &ty::TyRef(r_b, mt_b)) => {
- infcx.sub_regions(infer::RelateObjectBound(span), *r_b, *r_a);
+ infcx.sub_regions(infer::RelateObjectBound(span), r_b, r_a);
check_mutbl(mt_a, mt_b, &|ty| tcx.mk_imm_ref(r_b, ty))
}
// Register an obligation for `A: Trait<B>`.
let cause = traits::ObligationCause::misc(span, impl_node_id);
let predicate = tcx.predicate_for_trait_def(cause, trait_def_id, 0,
- source, vec![target]);
+ source, &[target]);
fulfill_cx.register_predicate_obligation(&infcx, predicate);
// Check that all transitive obligations are satisfied.
use rscope::*;
use rustc::dep_graph::DepNode;
use util::common::{ErrorReported, MemoizationMap};
-use util::nodemap::{NodeMap, FnvHashMap};
+use util::nodemap::{NodeMap, FnvHashMap, FnvHashSet};
use {CrateCtxt, write_ty_to_tcx};
use rustc_const_math::ConstInt;
use std::cell::RefCell;
-use std::collections::HashSet;
use std::collections::hash_map::Entry::{Occupied, Vacant};
use std::rc::Rc;
let has_self = opt_self.is_some();
let mut parent_has_self = false;
+ let mut own_start = has_self as u32;
let (parent_regions, parent_types) = parent_def_id.map_or((0, 0), |def_id| {
let generics = generics_of_def_id(ccx, def_id);
assert_eq!(generics.parent, None);
assert_eq!(generics.parent_types, 0);
assert_eq!(has_self, false);
parent_has_self = generics.has_self;
+ own_start = generics.count() as u32;
(generics.regions.len() as u32, generics.types.len() as u32)
});
let regions = early_lifetimes.iter().enumerate().map(|(i, l)| {
ty::RegionParameterDef {
name: l.lifetime.name,
- index: parent_regions + i as u32,
+ index: own_start + i as u32,
def_id: tcx.map.local_def_id(l.lifetime.id),
bounds: l.bounds.iter().map(|l| {
ast_region_to_region(tcx, l)
}).collect()
}
- }).collect();
+ }).collect::<Vec<_>>();
// Now create the real type parameters.
+ let type_start = own_start + regions.len() as u32;
let types = ast_generics.ty_params.iter().enumerate().map(|(i, p)| {
- let i = parent_types + has_self as u32 + i as u32;
+ let i = type_start + i as u32;
get_or_create_type_parameter_def(ccx, ast_generics, i, p, allow_defaults)
});
let types: Vec<_> = opt_self.into_iter().chain(types).collect();
tcx.alloc_generics(ty::Generics {
parent: parent_def_id,
- parent_regions: parent_regions as u32,
- parent_types: parent_types as u32,
+ parent_regions: parent_regions,
+ parent_types: parent_types,
regions: regions,
types: types,
has_self: has_self || parent_has_self
-> ty::GenericPredicates<'tcx>
{
let tcx = ccx.tcx;
- let (parent_regions, parent_types) = parent.map_or((0, 0), |def_id| {
+ let parent_count = parent.map_or(0, |def_id| {
let generics = generics_of_def_id(ccx, def_id);
assert_eq!(generics.parent, None);
assert_eq!(generics.parent_regions, 0);
assert_eq!(generics.parent_types, 0);
- (generics.regions.len() as u32, generics.types.len() as u32)
+ generics.count() as u32
});
let ref base_predicates = match parent {
Some(def_id) => {
};
let mut predicates = super_predicates;
+ // Collect the region predicates that were declared inline as
+ // well. In the case of parameters declared on a fn or method, we
+ // have to be careful to only iterate over early-bound regions.
+ let own_start = parent_count + has_self as u32;
+ let early_lifetimes = early_bound_lifetimes_from_generics(ccx, ast_generics);
+ for (index, param) in early_lifetimes.iter().enumerate() {
+ let index = own_start + index as u32;
+ let region = ccx.tcx.mk_region(ty::ReEarlyBound(ty::EarlyBoundRegion {
+ index: index,
+ name: param.lifetime.name
+ }));
+ for bound in ¶m.bounds {
+ let bound_region = ast_region_to_region(ccx.tcx, bound);
+ let outlives = ty::Binder(ty::OutlivesPredicate(region, bound_region));
+ predicates.push(outlives.to_predicate());
+ }
+ }
+
// Collect the predicates that were written inline by the user on each
// type parameter (e.g., `<T:Foo>`).
+ let type_start = own_start + early_lifetimes.len() as u32;
for (index, param) in ast_generics.ty_params.iter().enumerate() {
- let index = parent_types + has_self as u32 + index as u32;
+ let index = type_start + index as u32;
let param_ty = ty::ParamTy::new(index, param.name).to_ty(ccx.tcx);
let bounds = compute_bounds(&ccx.icx(&(base_predicates, ast_generics)),
param_ty,
predicates.extend(bounds.predicates(ccx.tcx, param_ty));
}
- // Collect the region predicates that were declared inline as
- // well. In the case of parameters declared on a fn or method, we
- // have to be careful to only iterate over early-bound regions.
- let early_lifetimes = early_bound_lifetimes_from_generics(ccx, ast_generics);
- for (index, param) in early_lifetimes.iter().enumerate() {
- let index = parent_regions + index as u32;
- let region =
- ty::ReEarlyBound(ty::EarlyBoundRegion {
- index: index,
- name: param.lifetime.name
- });
- for bound in ¶m.bounds {
- let bound_region = ast_region_to_region(ccx.tcx, bound);
- let outlives = ty::Binder(ty::OutlivesPredicate(region, bound_region));
- predicates.push(outlives.to_predicate());
- }
- }
-
// Add in the bounds that appear in the where-clause
let where_clause = &ast_generics.where_clause;
for predicate in &where_clause.predicates {
param_id: ast::NodeId,
param_bounds: &[hir::TyParamBound],
where_clause: &hir::WhereClause)
- -> ty::ObjectLifetimeDefault
+ -> ty::ObjectLifetimeDefault<'tcx>
{
let inline_bounds = from_bounds(ccx, param_bounds);
let where_bounds = from_predicates(ccx, param_id, &where_clause.predicates);
- let all_bounds: HashSet<_> = inline_bounds.into_iter()
- .chain(where_bounds)
- .collect();
+ let all_bounds: FnvHashSet<_> = inline_bounds.into_iter()
+ .chain(where_bounds)
+ .collect();
return if all_bounds.len() > 1 {
ty::ObjectLifetimeDefault::Ambiguous
} else if all_bounds.len() == 0 {
fn from_bounds<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>,
bounds: &[hir::TyParamBound])
- -> Vec<ty::Region>
+ -> Vec<&'tcx ty::Region>
{
bounds.iter()
.filter_map(|bound| {
fn from_predicates<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>,
param_id: ast::NodeId,
predicates: &[hir::WherePredicate])
- -> Vec<ty::Region>
+ -> Vec<&'tcx ty::Region>
{
predicates.iter()
.flat_map(|predicate| {
}
Substs::for_item(tcx, def_id,
- |def, _| def.to_early_bound_region(),
+ |def, _| tcx.mk_region(def.to_early_bound_region()),
|def, _| tcx.mk_param_from_def(def))
}
// The trait reference is an input, so find all type parameters
// reachable from there, to start (if this is an inherent impl,
// then just examine the self type).
- let mut input_parameters: HashSet<_> =
+ let mut input_parameters: FnvHashSet<_> =
ctp::parameters_for(&impl_scheme.ty, false).into_iter().collect();
if let Some(ref trait_ref) = impl_trait_ref {
input_parameters.extend(ctp::parameters_for(trait_ref, false));
let impl_predicates = ccx.tcx.lookup_predicates(impl_def_id);
let impl_trait_ref = ccx.tcx.impl_trait_ref(impl_def_id);
- let mut input_parameters: HashSet<_> =
+ let mut input_parameters: FnvHashSet<_> =
ctp::parameters_for(&impl_scheme.ty, false).into_iter().collect();
if let Some(ref trait_ref) = impl_trait_ref {
input_parameters.extend(ctp::parameters_for(trait_ref, false));
ctp::identify_constrained_type_params(
&impl_predicates.predicates.as_slice(), impl_trait_ref, &mut input_parameters);
- let lifetimes_in_associated_types: HashSet<_> = impl_items.iter()
+ let lifetimes_in_associated_types: FnvHashSet<_> = impl_items.iter()
.map(|item| ccx.tcx.impl_or_trait_item(ccx.tcx.map.local_def_id(item.id)))
.filter_map(|item| match item {
ty::TypeTraitItem(ref assoc_ty) => assoc_ty.ty,
use rustc::ty::{self, Ty};
use rustc::ty::fold::{TypeFoldable, TypeVisitor};
-use std::collections::HashSet;
+use rustc::util::nodemap::FnvHashSet;
#[derive(Clone, PartialEq, Eq, Hash, Debug)]
pub enum Parameter {
t.super_visit_with(self)
}
- fn visit_region(&mut self, r: ty::Region) -> bool {
- match r {
+ fn visit_region(&mut self, r: &'tcx ty::Region) -> bool {
+ match *r {
ty::ReEarlyBound(data) => {
self.parameters.push(Parameter::Region(data));
}
pub fn identify_constrained_type_params<'tcx>(predicates: &[ty::Predicate<'tcx>],
impl_trait_ref: Option<ty::TraitRef<'tcx>>,
- input_parameters: &mut HashSet<Parameter>)
+ input_parameters: &mut FnvHashSet<Parameter>)
{
let mut predicates = predicates.to_owned();
setup_constraining_predicates(&mut predicates, impl_trait_ref, input_parameters);
/// think of any.
pub fn setup_constraining_predicates<'tcx>(predicates: &mut [ty::Predicate<'tcx>],
impl_trait_ref: Option<ty::TraitRef<'tcx>>,
- input_parameters: &mut HashSet<Parameter>)
+ input_parameters: &mut FnvHashSet<Parameter>)
{
// The canonical way of doing the needed topological sort
// would be a DFS, but getting the graph and its ownership
node_id,
item_substs);
- assert!(!item_substs.substs.types.needs_infer());
+ assert!(!item_substs.substs.needs_infer());
ccx.tcx.tables.borrow_mut().item_substs.insert(node_id, item_substs);
}
/// A scope which overrides the default object lifetime but has no other effect.
pub struct ObjectLifetimeDefaultRscope<'r> {
base_scope: &'r (RegionScope+'r),
- default: ty::ObjectLifetimeDefault,
+ default: ty::ObjectLifetimeDefault<'r>,
}
impl<'r> ObjectLifetimeDefaultRscope<'r> {
pub fn new(base_scope: &'r (RegionScope+'r),
- default: ty::ObjectLifetimeDefault)
+ default: ty::ObjectLifetimeDefault<'r>)
-> ObjectLifetimeDefaultRscope<'r>
{
ObjectLifetimeDefaultRscope {
Some(self.base_object_lifetime_default(span)),
ty::ObjectLifetimeDefault::Specific(r) =>
- Some(r),
+ Some(*r),
}
}
use super::terms::*;
use super::terms::VarianceTerm::*;
-use super::terms::ParamKind::*;
use super::xform::*;
pub struct ConstraintContext<'a, 'tcx: 'a> {
fn declared_variance(&self,
param_def_id: DefId,
item_def_id: DefId,
- kind: ParamKind,
index: usize)
-> VarianceTermPtr<'a> {
assert_eq!(param_def_id.krate, item_def_id.krate);
// Parameter on an item defined within another crate:
// variance already inferred, just look it up.
let variances = self.tcx().item_variances(item_def_id);
- let variance = match kind {
- TypeParam => variances.types[index],
- RegionParam => variances.regions[index],
- };
- self.constant_term(variance)
+ self.constant_term(variances[index])
}
}
ty::TyRef(region, ref mt) => {
let contra = self.contravariant(variance);
- self.add_constraints_from_region(generics, *region, contra);
+ self.add_constraints_from_region(generics, region, contra);
self.add_constraints_from_mt(generics, mt, variance);
}
ty::TyParam(ref data) => {
assert_eq!(generics.parent, None);
- assert!((data.idx as usize) < generics.types.len());
- let def_id = generics.types[data.idx as usize].def_id;
+ let mut i = data.idx as usize;
+ if !generics.has_self || i > 0 {
+ i -= generics.regions.len();
+ }
+ let def_id = generics.types[i].def_id;
let node_id = self.tcx().map.as_local_node_id(def_id).unwrap();
match self.terms_cx.inferred_map.get(&node_id) {
Some(&index) => {
for p in type_param_defs {
let variance_decl =
- self.declared_variance(p.def_id, def_id, TypeParam, p.index as usize);
+ self.declared_variance(p.def_id, def_id, p.index as usize);
let variance_i = self.xform(variance, variance_decl);
let substs_ty = substs.type_for_def(p);
debug!("add_constraints_from_substs: variance_decl={:?} variance_i={:?}",
for p in region_param_defs {
let variance_decl =
- self.declared_variance(p.def_id, def_id, RegionParam, p.index as usize);
+ self.declared_variance(p.def_id, def_id, p.index as usize);
let variance_i = self.xform(variance, variance_decl);
let substs_r = substs.region_for_def(p);
self.add_constraints_from_region(generics, substs_r, variance_i);
/// context with ambient variance `variance`
fn add_constraints_from_region(&mut self,
generics: &ty::Generics<'tcx>,
- region: ty::Region,
+ region: &'tcx ty::Region,
variance: VarianceTermPtr<'a>) {
- match region {
+ match *region {
ty::ReEarlyBound(ref data) => {
assert_eq!(generics.parent, None);
- assert!((data.index as usize) < generics.regions.len());
- let def_id = generics.regions[data.index as usize].def_id;
+ let i = data.index as usize - generics.has_self as usize;
+ let def_id = generics.regions[i].def_id;
let node_id = self.tcx().map.as_local_node_id(def_id).unwrap();
if self.is_to_be_inferred(node_id) {
let index = self.inferred_index(node_id);
use super::constraints::*;
use super::terms::*;
use super::terms::VarianceTerm::*;
-use super::terms::ParamKind::*;
use super::xform::*;
struct SolveContext<'a, 'tcx: 'a> {
while index < num_inferred {
let item_id = inferred_infos[index].item_id;
- let mut item_variances = ty::ItemVariances::empty();
+ let mut item_variances = vec![];
while index < num_inferred && inferred_infos[index].item_id == item_id {
let info = &inferred_infos[index];
let variance = solutions[index];
- debug!("Index {} Info {} / {:?} Variance {:?}",
- index, info.index, info.kind, variance);
- match info.kind {
- TypeParam => {
- assert_eq!(item_variances.types.len(), info.index);
- item_variances.types.push(variance);
- }
- RegionParam => {
- assert_eq!(item_variances.regions.len(), info.index);
- item_variances.regions.push(variance);
- }
- }
+ debug!("Index {} Info {} Variance {:?}",
+ index, info.index, variance);
+ assert_eq!(item_variances.len(), info.index);
+ item_variances.push(variance);
index += 1;
}
use util::nodemap::NodeMap;
use self::VarianceTerm::*;
-use self::ParamKind::*;
pub type VarianceTermPtr<'a> = &'a VarianceTerm<'a>;
pub tcx: TyCtxt<'a, 'tcx, 'tcx>,
pub arena: &'a TypedArena<VarianceTerm<'a>>,
- pub empty_variances: Rc<ty::ItemVariances>,
+ pub empty_variances: Rc<Vec<ty::Variance>>,
// For marker types, UnsafeCell, and other lang items where
// variance is hardcoded, records the item-id and the hardcoded
pub inferred_infos: Vec<InferredInfo<'a>> ,
}
-#[derive(Copy, Clone, Debug, PartialEq)]
-pub enum ParamKind {
- TypeParam,
- RegionParam,
-}
-
pub struct InferredInfo<'a> {
pub item_id: ast::NodeId,
- pub kind: ParamKind,
pub index: usize,
pub param_id: ast::NodeId,
pub term: VarianceTermPtr<'a>,
// cache and share the variance struct used for items with
// no type/region parameters
- empty_variances: Rc::new(ty::ItemVariances::empty())
+ empty_variances: Rc::new(vec![])
};
// See README.md for a discussion on dep-graph management.
let inferreds_on_entry = self.num_inferred();
+ if has_self {
+ self.add_inferred(item_id, 0, item_id);
+ }
+
for (i, p) in generics.lifetimes.iter().enumerate() {
let id = p.lifetime.id;
- self.add_inferred(item_id, RegionParam, i, id);
+ let i = has_self as usize + i;
+ self.add_inferred(item_id, i, id);
}
- if has_self {
- self.add_inferred(item_id, TypeParam, 0, item_id);
- }
for (i, p) in generics.ty_params.iter().enumerate() {
- let i = has_self as usize + i;
- self.add_inferred(item_id, TypeParam, i, p.id);
+ let i = has_self as usize + generics.lifetimes.len() + i;
+ self.add_inferred(item_id, i, p.id);
}
// If this item has no type or lifetime parameters,
fn add_inferred(&mut self,
item_id: ast::NodeId,
- kind: ParamKind,
index: usize,
param_id: ast::NodeId) {
let inf_index = InferredIndex(self.inferred_infos.len());
let term = self.arena.alloc(InferredTerm(inf_index));
let initial_variance = self.pick_initial_variance(item_id, index);
self.inferred_infos.push(InferredInfo { item_id: item_id,
- kind: kind,
index: index,
param_id: param_id,
term: term,
debug!("add_inferred(item_path={}, \
item_id={}, \
- kind={:?}, \
index={}, \
param_id={}, \
inf_index={:?}, \
initial_variance={:?})",
self.tcx.item_path_str(self.tcx.map.local_def_id(item_id)),
- item_id, kind, index, param_id, inf_index,
+ item_id, index, param_id, inf_index,
initial_variance);
}
#![stable(feature = "rust1", since = "1.0.0")]
use core::char::CharExt as C;
+use core::iter::FusedIterator;
use core::fmt;
use tables::{conversions, derived_property, general_category, property};
}
}
+#[unstable(feature = "fused", issue = "35602")]
+impl FusedIterator for ToLowercase {}
+
/// Returns an iterator that yields the uppercase equivalent of a `char`.
///
/// This `struct` is created by the [`to_uppercase()`] method on [`char`]. See
}
}
+#[unstable(feature = "fused", issue = "35602")]
+impl FusedIterator for ToUppercase {}
enum CaseMappingIter {
Three(char, char, char),
#![feature(char_escape_debug)]
#![feature(core_char_ext)]
#![feature(decode_utf8)]
+#![feature(fused)]
#![feature(lang_items)]
#![feature(staged_api)]
#![feature(unicode)]
//! methods provided by the unicode parts of the CharExt trait.
use core::char;
-use core::iter::Filter;
+use core::iter::{Filter, FusedIterator};
use core::str::Split;
/// An iterator over the non-whitespace substrings of a string,
}
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<I> FusedIterator for Utf16Encoder<I>
+ where I: FusedIterator<Item = char> {}
+
impl<'a> Iterator for SplitWhitespace<'a> {
type Item = &'a str;
self.inner.next_back()
}
}
+
+#[unstable(feature = "fused", issue = "35602")]
+impl<'a> FusedIterator for SplitWhitespace<'a> {}
//! Support for inlining external documentation into the current AST.
-use std::collections::HashSet;
use std::iter::once;
use syntax::ast;
use rustc::hir::def_id::DefId;
use rustc::hir::print as pprust;
use rustc::ty::{self, TyCtxt};
+use rustc::util::nodemap::FnvHashSet;
use rustc_const_eval::lookup_const_by_id;
.into_iter()
.map(|meth| meth.name.to_string())
.collect()
- }).unwrap_or(HashSet::new());
+ }).unwrap_or(FnvHashSet());
ret.push(clean::Item {
inner: clean::ImplItem(clean::Impl {
// If we're reexporting a reexport it may actually reexport something in
// two namespaces, so the target may be listed twice. Make sure we only
// visit each node at most once.
- let mut visited = HashSet::new();
+ let mut visited = FnvHashSet();
for item in tcx.sess.cstore.item_children(did) {
match item.def {
cstore::DlDef(Def::ForeignMod(did)) => {
//! that clean them.
pub use self::Type::*;
-pub use self::PrimitiveType::*;
pub use self::TypeKind::*;
pub use self::VariantKind::*;
pub use self::Mutability::*;
use rustc::ty::subst::Substs;
use rustc::ty;
use rustc::middle::stability;
+use rustc::util::nodemap::{FnvHashMap, FnvHashSet};
use rustc::hir;
-use std::collections::{HashMap, HashSet};
use std::path::PathBuf;
use std::rc::Rc;
use std::sync::Arc;
pub access_levels: Arc<AccessLevels<DefId>>,
// These are later on moved into `CACHEKEY`, leaving the map empty.
// Only here so that they can be filtered through the rustdoc passes.
- pub external_traits: HashMap<DefId, Trait>,
+ pub external_traits: FnvHashMap<DefId, Trait>,
}
struct CrateNum(ast::CrateNum);
}
}
pub fn is_mod(&self) -> bool {
- ItemType::from_item(self) == ItemType::Module
+ ItemType::from(self) == ItemType::Module
}
pub fn is_trait(&self) -> bool {
- ItemType::from_item(self) == ItemType::Trait
+ ItemType::from(self) == ItemType::Trait
}
pub fn is_struct(&self) -> bool {
- ItemType::from_item(self) == ItemType::Struct
+ ItemType::from(self) == ItemType::Struct
}
pub fn is_enum(&self) -> bool {
- ItemType::from_item(self) == ItemType::Module
+ ItemType::from(self) == ItemType::Module
}
pub fn is_fn(&self) -> bool {
- ItemType::from_item(self) == ItemType::Function
+ ItemType::from(self) == ItemType::Function
}
pub fn is_associated_type(&self) -> bool {
- ItemType::from_item(self) == ItemType::AssociatedType
+ ItemType::from(self) == ItemType::AssociatedType
}
pub fn is_associated_const(&self) -> bool {
- ItemType::from_item(self) == ItemType::AssociatedConst
+ ItemType::from(self) == ItemType::AssociatedConst
}
pub fn is_method(&self) -> bool {
- ItemType::from_item(self) == ItemType::Method
+ ItemType::from(self) == ItemType::Method
}
pub fn is_ty_method(&self) -> bool {
- ItemType::from_item(self) == ItemType::TyMethod
+ ItemType::from(self) == ItemType::TyMethod
}
pub fn is_primitive(&self) -> bool {
- ItemType::from_item(self) == ItemType::Primitive
+ ItemType::from(self) == ItemType::Primitive
}
pub fn is_stripped(&self) -> bool {
match self.inner { StrippedItem(..) => true, _ => false }
StrippedItem(Box<ItemEnum>),
}
+impl ItemEnum {
+ pub fn generics(&self) -> Option<&Generics> {
+ Some(match *self {
+ ItemEnum::StructItem(ref s) => &s.generics,
+ ItemEnum::EnumItem(ref e) => &e.generics,
+ ItemEnum::FunctionItem(ref f) => &f.generics,
+ ItemEnum::TypedefItem(ref t, _) => &t.generics,
+ ItemEnum::TraitItem(ref t) => &t.generics,
+ ItemEnum::ImplItem(ref i) => &i.generics,
+ ItemEnum::TyMethodItem(ref i) => &i.generics,
+ ItemEnum::MethodItem(ref i) => &i.generics,
+ ItemEnum::ForeignFunctionItem(ref f) => &f.generics,
+ _ => return None,
+ })
+ }
+}
+
#[derive(Clone, RustcEncodable, RustcDecodable, Debug)]
pub struct Module {
pub items: Vec<Item>,
fn external_path_params(cx: &DocContext, trait_did: Option<DefId>, has_self: bool,
bindings: Vec<TypeBinding>, substs: &Substs) -> PathParameters {
- let lifetimes = substs.regions.iter().filter_map(|v| v.clean(cx)).collect();
- let types = substs.types[has_self as usize..].to_vec();
+ let lifetimes = substs.regions().filter_map(|v| v.clean(cx)).collect();
+ let types = substs.types().skip(has_self as usize).collect::<Vec<_>>();
match (trait_did, cx.tcx_opt()) {
// Attempt to sugar an external path like Fn<(A, B,), C> to Fn(A, B) -> C
let path = external_path(cx, &tcx.item_name(self.def_id).as_str(),
Some(self.def_id), true, vec![], self.substs);
- debug!("ty::TraitRef\n substs.types: {:?}\n",
- &self.input_types()[1..]);
+ debug!("ty::TraitRef\n subst: {:?}\n", self.substs);
// collect any late bound regions
let mut late_bounds = vec![];
- for &ty_s in &self.input_types()[1..] {
+ for ty_s in self.input_types().skip(1) {
if let ty::TyTuple(ts) = ty_s.sty {
for &ty_s in ts {
if let ty::TyRef(ref reg, _) = ty_s.sty {
impl<'tcx> Clean<Option<Vec<TyParamBound>>> for Substs<'tcx> {
fn clean(&self, cx: &DocContext) -> Option<Vec<TyParamBound>> {
let mut v = Vec::new();
- v.extend(self.regions.iter().filter_map(|r| r.clean(cx))
+ v.extend(self.regions().filter_map(|r| r.clean(cx))
.map(RegionBound));
- v.extend(self.types.iter().map(|t| TraitBound(PolyTrait {
+ v.extend(self.types().map(|t| TraitBound(PolyTrait {
trait_: t.clean(cx),
lifetimes: vec![]
}, hir::TraitBoundModifier::None)));
}
}
-impl Clean<Lifetime> for ty::RegionParameterDef {
+impl<'tcx> Clean<Lifetime> for ty::RegionParameterDef<'tcx> {
fn clean(&self, _: &DocContext) -> Lifetime {
Lifetime(self.name.to_string())
}
}
}
-impl Clean<WherePredicate> for ty::OutlivesPredicate<ty::Region, ty::Region> {
+impl<'tcx> Clean<WherePredicate> for ty::OutlivesPredicate<&'tcx ty::Region, &'tcx ty::Region> {
fn clean(&self, cx: &DocContext) -> WherePredicate {
let ty::OutlivesPredicate(ref a, ref b) = *self;
WherePredicate::RegionPredicate {
}
}
-impl<'tcx> Clean<WherePredicate> for ty::OutlivesPredicate<ty::Ty<'tcx>, ty::Region> {
+impl<'tcx> Clean<WherePredicate> for ty::OutlivesPredicate<ty::Ty<'tcx>, &'tcx ty::Region> {
fn clean(&self, cx: &DocContext) -> WherePredicate {
let ty::OutlivesPredicate(ref ty, ref lt) = *self;
// Note that associated types also have a sized bound by default, but we
// don't actually know the set of associated types right here so that's
// handled in cleaning associated types
- let mut sized_params = HashSet::new();
+ let mut sized_params = FnvHashSet();
where_predicates.retain(|pred| {
match *pred {
WP::BoundPredicate { ty: Generic(ref g), ref bounds } => {
Str,
Slice,
Array,
- PrimitiveTuple,
- PrimitiveRawPointer,
+ Tuple,
+ RawPointer,
}
#[derive(Clone, RustcEncodable, RustcDecodable, Copy, Debug)]
pub fn primitive_type(&self) -> Option<PrimitiveType> {
match *self {
Primitive(p) | BorrowedRef { type_: box Primitive(p), ..} => Some(p),
- Vector(..) | BorrowedRef{ type_: box Vector(..), .. } => Some(Slice),
+ Vector(..) | BorrowedRef{ type_: box Vector(..), .. } => Some(PrimitiveType::Slice),
FixedVector(..) | BorrowedRef { type_: box FixedVector(..), .. } => {
- Some(Array)
+ Some(PrimitiveType::Array)
}
- Tuple(..) => Some(PrimitiveTuple),
- RawPointer(..) => Some(PrimitiveRawPointer),
+ Tuple(..) => Some(PrimitiveType::Tuple),
+ RawPointer(..) => Some(PrimitiveType::RawPointer),
_ => None,
}
}
impl PrimitiveType {
fn from_str(s: &str) -> Option<PrimitiveType> {
match s {
- "isize" => Some(Isize),
- "i8" => Some(I8),
- "i16" => Some(I16),
- "i32" => Some(I32),
- "i64" => Some(I64),
- "usize" => Some(Usize),
- "u8" => Some(U8),
- "u16" => Some(U16),
- "u32" => Some(U32),
- "u64" => Some(U64),
- "bool" => Some(Bool),
- "char" => Some(Char),
- "str" => Some(Str),
- "f32" => Some(F32),
- "f64" => Some(F64),
- "array" => Some(Array),
- "slice" => Some(Slice),
- "tuple" => Some(PrimitiveTuple),
- "pointer" => Some(PrimitiveRawPointer),
+ "isize" => Some(PrimitiveType::Isize),
+ "i8" => Some(PrimitiveType::I8),
+ "i16" => Some(PrimitiveType::I16),
+ "i32" => Some(PrimitiveType::I32),
+ "i64" => Some(PrimitiveType::I64),
+ "usize" => Some(PrimitiveType::Usize),
+ "u8" => Some(PrimitiveType::U8),
+ "u16" => Some(PrimitiveType::U16),
+ "u32" => Some(PrimitiveType::U32),
+ "u64" => Some(PrimitiveType::U64),
+ "bool" => Some(PrimitiveType::Bool),
+ "char" => Some(PrimitiveType::Char),
+ "str" => Some(PrimitiveType::Str),
+ "f32" => Some(PrimitiveType::F32),
+ "f64" => Some(PrimitiveType::F64),
+ "array" => Some(PrimitiveType::Array),
+ "slice" => Some(PrimitiveType::Slice),
+ "tuple" => Some(PrimitiveType::Tuple),
+ "pointer" => Some(PrimitiveType::RawPointer),
_ => None,
}
}
pub fn to_string(&self) -> &'static str {
match *self {
- Isize => "isize",
- I8 => "i8",
- I16 => "i16",
- I32 => "i32",
- I64 => "i64",
- Usize => "usize",
- U8 => "u8",
- U16 => "u16",
- U32 => "u32",
- U64 => "u64",
- F32 => "f32",
- F64 => "f64",
- Str => "str",
- Bool => "bool",
- Char => "char",
- Array => "array",
- Slice => "slice",
- PrimitiveTuple => "tuple",
- PrimitiveRawPointer => "pointer",
+ PrimitiveType::Isize => "isize",
+ PrimitiveType::I8 => "i8",
+ PrimitiveType::I16 => "i16",
+ PrimitiveType::I32 => "i32",
+ PrimitiveType::I64 => "i64",
+ PrimitiveType::Usize => "usize",
+ PrimitiveType::U8 => "u8",
+ PrimitiveType::U16 => "u16",
+ PrimitiveType::U32 => "u32",
+ PrimitiveType::U64 => "u64",
+ PrimitiveType::F32 => "f32",
+ PrimitiveType::F64 => "f64",
+ PrimitiveType::Str => "str",
+ PrimitiveType::Bool => "bool",
+ PrimitiveType::Char => "char",
+ PrimitiveType::Array => "array",
+ PrimitiveType::Slice => "slice",
+ PrimitiveType::Tuple => "tuple",
+ PrimitiveType::RawPointer => "pointer",
}
}
}
}
+impl From<ast::IntTy> for PrimitiveType {
+ fn from(int_ty: ast::IntTy) -> PrimitiveType {
+ match int_ty {
+ ast::IntTy::Is => PrimitiveType::Isize,
+ ast::IntTy::I8 => PrimitiveType::I8,
+ ast::IntTy::I16 => PrimitiveType::I16,
+ ast::IntTy::I32 => PrimitiveType::I32,
+ ast::IntTy::I64 => PrimitiveType::I64,
+ }
+ }
+}
+
+impl From<ast::UintTy> for PrimitiveType {
+ fn from(uint_ty: ast::UintTy) -> PrimitiveType {
+ match uint_ty {
+ ast::UintTy::Us => PrimitiveType::Usize,
+ ast::UintTy::U8 => PrimitiveType::U8,
+ ast::UintTy::U16 => PrimitiveType::U16,
+ ast::UintTy::U32 => PrimitiveType::U32,
+ ast::UintTy::U64 => PrimitiveType::U64,
+ }
+ }
+}
+
+impl From<ast::FloatTy> for PrimitiveType {
+ fn from(float_ty: ast::FloatTy) -> PrimitiveType {
+ match float_ty {
+ ast::FloatTy::F32 => PrimitiveType::F32,
+ ast::FloatTy::F64 => PrimitiveType::F64,
+ }
+ }
+}
// Poor man's type parameter substitution at HIR level.
// Used to replace private type aliases in public signatures with their aliased types.
struct SubstAlias<'a, 'tcx: 'a> {
tcx: &'a ty::TyCtxt<'a, 'tcx, 'tcx>,
// Table type parameter definition -> substituted type
- ty_substs: HashMap<Def, hir::Ty>,
+ ty_substs: FnvHashMap<Def, hir::Ty>,
// Table node id of lifetime parameter definition -> substituted lifetime
- lt_substs: HashMap<ast::NodeId, hir::Lifetime>,
+ lt_substs: FnvHashMap<ast::NodeId, hir::Lifetime>,
}
impl<'a, 'tcx: 'a, 'b: 'tcx> Folder for SubstAlias<'a, 'tcx> {
let item = tcx.map.expect_item(node_id);
if let hir::ItemTy(ref ty, ref generics) = item.node {
let provided_params = &path.segments.last().unwrap().parameters;
- let mut ty_substs = HashMap::new();
- let mut lt_substs = HashMap::new();
+ let mut ty_substs = FnvHashMap();
+ let mut lt_substs = FnvHashMap();
for (i, ty_param) in generics.ty_params.iter().enumerate() {
let ty_param_def = tcx.expect_def(ty_param.id);
if let Some(ty) = provided_params.types().get(i).cloned()
fn clean(&self, cx: &DocContext) -> Type {
match self.sty {
ty::TyNever => Never,
- ty::TyBool => Primitive(Bool),
- ty::TyChar => Primitive(Char),
- ty::TyInt(ast::IntTy::Is) => Primitive(Isize),
- ty::TyInt(ast::IntTy::I8) => Primitive(I8),
- ty::TyInt(ast::IntTy::I16) => Primitive(I16),
- ty::TyInt(ast::IntTy::I32) => Primitive(I32),
- ty::TyInt(ast::IntTy::I64) => Primitive(I64),
- ty::TyUint(ast::UintTy::Us) => Primitive(Usize),
- ty::TyUint(ast::UintTy::U8) => Primitive(U8),
- ty::TyUint(ast::UintTy::U16) => Primitive(U16),
- ty::TyUint(ast::UintTy::U32) => Primitive(U32),
- ty::TyUint(ast::UintTy::U64) => Primitive(U64),
- ty::TyFloat(ast::FloatTy::F32) => Primitive(F32),
- ty::TyFloat(ast::FloatTy::F64) => Primitive(F64),
- ty::TyStr => Primitive(Str),
+ ty::TyBool => Primitive(PrimitiveType::Bool),
+ ty::TyChar => Primitive(PrimitiveType::Char),
+ ty::TyInt(int_ty) => Primitive(int_ty.into()),
+ ty::TyUint(uint_ty) => Primitive(uint_ty.into()),
+ ty::TyFloat(float_ty) => Primitive(float_ty.into()),
+ ty::TyStr => Primitive(PrimitiveType::Str),
ty::TyBox(t) => {
let box_did = cx.tcx_opt().and_then(|tcx| {
tcx.lang_items.owned_box()
pub struct Impl {
pub unsafety: hir::Unsafety,
pub generics: Generics,
- pub provided_trait_methods: HashSet<String>,
+ pub provided_trait_methods: FnvHashSet<String>,
pub trait_: Option<Type>,
pub for_: Type,
pub items: Vec<Item>,
.map(|meth| meth.name.to_string())
.collect()
})
- }).unwrap_or(HashSet::new());
+ }).unwrap_or(FnvHashSet());
ret.push(Item {
name: None,
}
};
let did = match primitive {
- Isize => tcx.lang_items.isize_impl(),
- I8 => tcx.lang_items.i8_impl(),
- I16 => tcx.lang_items.i16_impl(),
- I32 => tcx.lang_items.i32_impl(),
- I64 => tcx.lang_items.i64_impl(),
- Usize => tcx.lang_items.usize_impl(),
- U8 => tcx.lang_items.u8_impl(),
- U16 => tcx.lang_items.u16_impl(),
- U32 => tcx.lang_items.u32_impl(),
- U64 => tcx.lang_items.u64_impl(),
- F32 => tcx.lang_items.f32_impl(),
- F64 => tcx.lang_items.f64_impl(),
- Char => tcx.lang_items.char_impl(),
- Bool => None,
- Str => tcx.lang_items.str_impl(),
- Slice => tcx.lang_items.slice_impl(),
- Array => tcx.lang_items.slice_impl(),
- PrimitiveTuple => None,
- PrimitiveRawPointer => tcx.lang_items.const_ptr_impl(),
+ PrimitiveType::Isize => tcx.lang_items.isize_impl(),
+ PrimitiveType::I8 => tcx.lang_items.i8_impl(),
+ PrimitiveType::I16 => tcx.lang_items.i16_impl(),
+ PrimitiveType::I32 => tcx.lang_items.i32_impl(),
+ PrimitiveType::I64 => tcx.lang_items.i64_impl(),
+ PrimitiveType::Usize => tcx.lang_items.usize_impl(),
+ PrimitiveType::U8 => tcx.lang_items.u8_impl(),
+ PrimitiveType::U16 => tcx.lang_items.u16_impl(),
+ PrimitiveType::U32 => tcx.lang_items.u32_impl(),
+ PrimitiveType::U64 => tcx.lang_items.u64_impl(),
+ PrimitiveType::F32 => tcx.lang_items.f32_impl(),
+ PrimitiveType::F64 => tcx.lang_items.f64_impl(),
+ PrimitiveType::Char => tcx.lang_items.char_impl(),
+ PrimitiveType::Bool => None,
+ PrimitiveType::Str => tcx.lang_items.str_impl(),
+ PrimitiveType::Slice => tcx.lang_items.slice_impl(),
+ PrimitiveType::Array => tcx.lang_items.slice_impl(),
+ PrimitiveType::Tuple => None,
+ PrimitiveType::RawPointer => tcx.lang_items.const_ptr_impl(),
};
if let Some(did) = did {
if !did.is_local() {
let is_generic = match def {
Def::PrimTy(p) => match p {
- hir::TyStr => return Primitive(Str),
- hir::TyBool => return Primitive(Bool),
- hir::TyChar => return Primitive(Char),
- hir::TyInt(ast::IntTy::Is) => return Primitive(Isize),
- hir::TyInt(ast::IntTy::I8) => return Primitive(I8),
- hir::TyInt(ast::IntTy::I16) => return Primitive(I16),
- hir::TyInt(ast::IntTy::I32) => return Primitive(I32),
- hir::TyInt(ast::IntTy::I64) => return Primitive(I64),
- hir::TyUint(ast::UintTy::Us) => return Primitive(Usize),
- hir::TyUint(ast::UintTy::U8) => return Primitive(U8),
- hir::TyUint(ast::UintTy::U16) => return Primitive(U16),
- hir::TyUint(ast::UintTy::U32) => return Primitive(U32),
- hir::TyUint(ast::UintTy::U64) => return Primitive(U64),
- hir::TyFloat(ast::FloatTy::F32) => return Primitive(F32),
- hir::TyFloat(ast::FloatTy::F64) => return Primitive(F64),
+ hir::TyStr => return Primitive(PrimitiveType::Str),
+ hir::TyBool => return Primitive(PrimitiveType::Bool),
+ hir::TyChar => return Primitive(PrimitiveType::Char),
+ hir::TyInt(int_ty) => return Primitive(int_ty.into()),
+ hir::TyUint(uint_ty) => return Primitive(uint_ty.into()),
+ hir::TyFloat(float_ty) => return Primitive(float_ty.into()),
},
Def::SelfTy(..) if path.segments.len() == 1 => {
return Generic(keywords::SelfType.name().to_string());
use rustc::ty::{self, TyCtxt};
use rustc::hir::map as hir_map;
use rustc::lint;
+use rustc::util::nodemap::{FnvHashMap, FnvHashSet};
use rustc_trans::back::link;
use rustc_resolve as resolve;
use rustc_metadata::cstore::CStore;
use errors::emitter::ColorConfig;
use std::cell::{RefCell, Cell};
-use std::collections::{HashMap, HashSet};
use std::rc::Rc;
use visit_ast::RustdocVisitor;
NotTyped(&'a session::Session)
}
-pub type ExternalPaths = HashMap<DefId, (Vec<String>, clean::TypeKind)>;
+pub type ExternalPaths = FnvHashMap<DefId, (Vec<String>, clean::TypeKind)>;
pub struct DocContext<'a, 'tcx: 'a> {
pub map: &'a hir_map::Map<'tcx>,
pub maybe_typed: MaybeTyped<'a, 'tcx>,
pub input: Input,
- pub populated_crate_impls: RefCell<HashSet<ast::CrateNum>>,
+ pub populated_crate_impls: RefCell<FnvHashSet<ast::CrateNum>>,
pub deref_trait_did: Cell<Option<DefId>>,
// Note that external items for which `doc(hidden)` applies to are shown as
// non-reachable while local items aren't. This is because we're reusing
/// Later on moved into `html::render::CACHE_KEY`
pub renderinfo: RefCell<RenderInfo>,
/// Later on moved through `clean::Crate` into `html::render::CACHE_KEY`
- pub external_traits: RefCell<HashMap<DefId, clean::Trait>>,
+ pub external_traits: RefCell<FnvHashMap<DefId, clean::Trait>>,
}
impl<'b, 'tcx> DocContext<'b, 'tcx> {
resolutions,
&arenas,
&name,
- |tcx, _, analysis, result| {
+ |tcx, _, analysis, _, result| {
if let Err(_) = result {
sess.fatal("Compilation failed, aborting rustdoc");
}
map: &tcx.map,
maybe_typed: Typed(tcx),
input: input,
- populated_crate_impls: RefCell::new(HashSet::new()),
+ populated_crate_impls: RefCell::new(FnvHashSet()),
deref_trait_did: Cell::new(None),
access_levels: RefCell::new(access_levels),
- external_traits: RefCell::new(HashMap::new()),
+ external_traits: RefCell::new(FnvHashMap()),
renderinfo: RefCell::new(Default::default()),
};
debug!("crate: {:?}", ctxt.map.krate());
use syntax::abi::Abi;
use rustc::hir;
-use clean;
+use clean::{self, PrimitiveType};
use core::DocAccessLevels;
use html::item_type::ItemType;
use html::escape::Escape;
}
clean::Tuple(ref typs) => {
match &typs[..] {
- &[] => primitive_link(f, clean::PrimitiveTuple, "()"),
+ &[] => primitive_link(f, PrimitiveType::Tuple, "()"),
&[ref one] => {
- primitive_link(f, clean::PrimitiveTuple, "(")?;
+ primitive_link(f, PrimitiveType::Tuple, "(")?;
write!(f, "{},", one)?;
- primitive_link(f, clean::PrimitiveTuple, ")")
+ primitive_link(f, PrimitiveType::Tuple, ")")
}
many => {
- primitive_link(f, clean::PrimitiveTuple, "(")?;
+ primitive_link(f, PrimitiveType::Tuple, "(")?;
write!(f, "{}", CommaSep(&many))?;
- primitive_link(f, clean::PrimitiveTuple, ")")
+ primitive_link(f, PrimitiveType::Tuple, ")")
}
}
}
clean::Vector(ref t) => {
- primitive_link(f, clean::Slice, &format!("["))?;
+ primitive_link(f, PrimitiveType::Slice, &format!("["))?;
write!(f, "{}", t)?;
- primitive_link(f, clean::Slice, &format!("]"))
+ primitive_link(f, PrimitiveType::Slice, &format!("]"))
}
clean::FixedVector(ref t, ref s) => {
- primitive_link(f, clean::PrimitiveType::Array, "[")?;
+ primitive_link(f, PrimitiveType::Array, "[")?;
write!(f, "{}", t)?;
- primitive_link(f, clean::PrimitiveType::Array,
+ primitive_link(f, PrimitiveType::Array,
&format!("; {}]", Escape(s)))
}
clean::Never => f.write_str("!"),
clean::RawPointer(m, ref t) => {
match **t {
clean::Generic(_) | clean::ResolvedPath {is_generic: true, ..} => {
- primitive_link(f, clean::PrimitiveType::PrimitiveRawPointer,
+ primitive_link(f, clean::PrimitiveType::RawPointer,
&format!("*{}{}", RawMutableSpace(m), t))
}
_ => {
- primitive_link(f, clean::PrimitiveType::PrimitiveRawPointer,
+ primitive_link(f, clean::PrimitiveType::RawPointer,
&format!("*{}", RawMutableSpace(m)))?;
write!(f, "{}", t)
}
clean::Vector(ref bt) => { // BorrowedRef{ ... Vector(T) } is &[T]
match **bt {
clean::Generic(_) =>
- primitive_link(f, clean::Slice,
+ primitive_link(f, PrimitiveType::Slice,
&format!("&{}{}[{}]", lt, m, **bt)),
_ => {
- primitive_link(f, clean::Slice, &format!("&{}{}[", lt, m))?;
+ primitive_link(f, PrimitiveType::Slice,
+ &format!("&{}{}[", lt, m))?;
write!(f, "{}", **bt)?;
- primitive_link(f, clean::Slice, "]")
+ primitive_link(f, PrimitiveType::Slice, "]")
}
}
}
Macro,
}
-impl ItemType {
- pub fn from_item(item: &clean::Item) -> ItemType {
+impl<'a> From<&'a clean::Item> for ItemType {
+ fn from(item: &'a clean::Item) -> ItemType {
let inner = match item.inner {
clean::StrippedItem(box ref item) => item,
ref inner@_ => inner,
clean::StrippedItem(..) => unreachable!(),
}
}
+}
- pub fn from_type_kind(kind: clean::TypeKind) -> ItemType {
+impl From<clean::TypeKind> for ItemType {
+ fn from(kind: clean::TypeKind) -> ItemType {
match kind {
clean::TypeStruct => ItemType::Struct,
clean::TypeEnum => ItemType::Enum,
clean::TypeTypedef => ItemType::Typedef,
}
}
+}
+impl ItemType {
pub fn css_class(&self) -> &'static str {
match *self {
ItemType::Module => "mod",
use std::ascii::AsciiExt;
use std::cell::RefCell;
use std::cmp::Ordering;
-use std::collections::{BTreeMap, HashMap, HashSet};
+use std::collections::BTreeMap;
use std::default::Default;
use std::error;
use std::fmt::{self, Display, Formatter};
use rustc::middle::stability;
use rustc::session::config::get_unstable_features_setting;
use rustc::hir;
+use rustc::util::nodemap::{FnvHashMap, FnvHashSet};
use clean::{self, Attributes, GetDefId};
use doctree;
/// `true`.
pub include_sources: bool,
/// The local file sources we've emitted and their respective url-paths.
- pub local_sources: HashMap<PathBuf, String>,
+ pub local_sources: FnvHashMap<PathBuf, String>,
/// All the passes that were run on this crate.
- pub passes: HashSet<String>,
+ pub passes: FnvHashSet<String>,
/// The base-URL of the issue tracker for when an item has been tagged with
/// an issue number.
pub issue_tracker_base_url: Option<String>,
/// Mapping of typaram ids to the name of the type parameter. This is used
/// when pretty-printing a type (so pretty printing doesn't have to
/// painfully maintain a context like this)
- pub typarams: HashMap<DefId, String>,
+ pub typarams: FnvHashMap<DefId, String>,
/// Maps a type id to all known implementations for that type. This is only
/// recognized for intra-crate `ResolvedPath` types, and is used to print
///
/// The values of the map are a list of implementations and documentation
/// found on that implementation.
- pub impls: HashMap<DefId, Vec<Impl>>,
+ pub impls: FnvHashMap<DefId, Vec<Impl>>,
/// Maintains a mapping of local crate node ids to the fully qualified name
/// and "short type description" of that node. This is used when generating
/// URLs when a type is being linked to. External paths are not located in
/// this map because the `External` type itself has all the information
/// necessary.
- pub paths: HashMap<DefId, (Vec<String>, ItemType)>,
+ pub paths: FnvHashMap<DefId, (Vec<String>, ItemType)>,
/// Similar to `paths`, but only holds external paths. This is only used for
/// generating explicit hyperlinks to other crates.
- pub external_paths: HashMap<DefId, (Vec<String>, ItemType)>,
+ pub external_paths: FnvHashMap<DefId, (Vec<String>, ItemType)>,
/// This map contains information about all known traits of this crate.
/// Implementations of a crate should inherit the documentation of the
/// parent trait if no extra documentation is specified, and default methods
/// should show up in documentation about trait implementations.
- pub traits: HashMap<DefId, clean::Trait>,
+ pub traits: FnvHashMap<DefId, clean::Trait>,
/// When rendering traits, it's often useful to be able to list all
/// implementors of the trait, and this mapping is exactly, that: a mapping
/// of trait ids to the list of known implementors of the trait
- pub implementors: HashMap<DefId, Vec<Implementor>>,
+ pub implementors: FnvHashMap<DefId, Vec<Implementor>>,
/// Cache of where external crate documentation can be found.
- pub extern_locations: HashMap<ast::CrateNum, (String, ExternalLocation)>,
+ pub extern_locations: FnvHashMap<ast::CrateNum, (String, ExternalLocation)>,
/// Cache of where documentation for primitives can be found.
- pub primitive_locations: HashMap<clean::PrimitiveType, ast::CrateNum>,
+ pub primitive_locations: FnvHashMap<clean::PrimitiveType, ast::CrateNum>,
// Note that external items for which `doc(hidden)` applies to are shown as
// non-reachable while local items aren't. This is because we're reusing
parent_stack: Vec<DefId>,
parent_is_trait_impl: bool,
search_index: Vec<IndexItem>,
- seen_modules: HashSet<DefId>,
+ seen_modules: FnvHashSet<DefId>,
seen_mod: bool,
stripped_mod: bool,
deref_trait_did: Option<DefId>,
/// Later on moved into `CACHE_KEY`.
#[derive(Default)]
pub struct RenderInfo {
- pub inlined: HashSet<DefId>,
+ pub inlined: FnvHashSet<DefId>,
pub external_paths: ::core::ExternalPaths,
- pub external_typarams: HashMap<DefId, String>,
+ pub external_typarams: FnvHashMap<DefId, String>,
pub deref_trait_did: Option<DefId>,
}
thread_local!(static CACHE_KEY: RefCell<Arc<Cache>> = Default::default());
thread_local!(pub static CURRENT_LOCATION_KEY: RefCell<Vec<String>> =
RefCell::new(Vec::new()));
-thread_local!(static USED_ID_MAP: RefCell<HashMap<String, usize>> =
+thread_local!(static USED_ID_MAP: RefCell<FnvHashMap<String, usize>> =
RefCell::new(init_ids()));
-fn init_ids() -> HashMap<String, usize> {
+fn init_ids() -> FnvHashMap<String, usize> {
[
"main",
"search",
*s.borrow_mut() = if embedded {
init_ids()
} else {
- HashMap::new()
+ FnvHashMap()
};
});
}
pub fn run(mut krate: clean::Crate,
external_html: &ExternalHtml,
dst: PathBuf,
- passes: HashSet<String>,
+ passes: FnvHashSet<String>,
css_file_extension: Option<PathBuf>,
renderinfo: RenderInfo) -> Result<(), Error> {
let src_root = match krate.src.parent() {
src_root: src_root,
passes: passes,
include_sources: true,
- local_sources: HashMap::new(),
+ local_sources: FnvHashMap(),
issue_tracker_base_url: None,
layout: layout::Layout {
logo: "".to_string(),
} = renderinfo;
let external_paths = external_paths.into_iter()
- .map(|(k, (v, t))| (k, (v, ItemType::from_type_kind(t))))
+ .map(|(k, (v, t))| (k, (v, ItemType::from(t))))
.collect();
let mut cache = Cache {
- impls: HashMap::new(),
+ impls: FnvHashMap(),
external_paths: external_paths,
- paths: HashMap::new(),
- implementors: HashMap::new(),
+ paths: FnvHashMap(),
+ implementors: FnvHashMap(),
stack: Vec::new(),
parent_stack: Vec::new(),
search_index: Vec::new(),
parent_is_trait_impl: false,
- extern_locations: HashMap::new(),
- primitive_locations: HashMap::new(),
- seen_modules: HashSet::new(),
+ extern_locations: FnvHashMap(),
+ primitive_locations: FnvHashMap(),
+ seen_modules: FnvHashSet(),
seen_mod: false,
stripped_mod: false,
access_levels: krate.access_levels.clone(),
orphan_methods: Vec::new(),
- traits: mem::replace(&mut krate.external_traits, HashMap::new()),
+ traits: mem::replace(&mut krate.external_traits, FnvHashMap()),
deref_trait_did: deref_trait_did,
typarams: external_typarams,
};
/// Build the search index from the collected metadata
fn build_index(krate: &clean::Crate, cache: &mut Cache) -> String {
- let mut nodeid_to_pathid = HashMap::new();
+ let mut nodeid_to_pathid = FnvHashMap();
let mut crate_items = Vec::with_capacity(cache.search_index.len());
let mut crate_paths = Vec::<Json>::new();
/// Returns a documentation-level item type from the item.
fn item_type(item: &clean::Item) -> ItemType {
- ItemType::from_item(item)
+ ItemType::from(item)
}
/// Takes a path to a source file and cleans the path to it. This canonicalizes
// Register any generics to their corresponding string. This is used
// when pretty-printing types
- match item.inner {
- clean::StructItem(ref s) => self.generics(&s.generics),
- clean::EnumItem(ref e) => self.generics(&e.generics),
- clean::FunctionItem(ref f) => self.generics(&f.generics),
- clean::TypedefItem(ref t, _) => self.generics(&t.generics),
- clean::TraitItem(ref t) => self.generics(&t.generics),
- clean::ImplItem(ref i) => self.generics(&i.generics),
- clean::TyMethodItem(ref i) => self.generics(&i.generics),
- clean::MethodItem(ref i) => self.generics(&i.generics),
- clean::ForeignFunctionItem(ref f) => self.generics(&f.generics),
- _ => {}
+ if let Some(generics) = item.inner.generics() {
+ self.generics(generics);
}
if !self.seen_mod {
// these modules are recursed into, but not rendered normally
// (a flag on the context).
if !self.render_redirect_pages {
- self.render_redirect_pages = self.maybe_ignore_item(&item);
+ self.render_redirect_pages = maybe_ignore_item(&item);
}
if item.is_mod() {
// BTreeMap instead of HashMap to get a sorted output
let mut map = BTreeMap::new();
for item in &m.items {
- if self.maybe_ignore_item(item) { continue }
+ if maybe_ignore_item(item) { continue }
let short = item_type(item).css_class();
let myname = match item.name {
}
return map;
}
-
- fn maybe_ignore_item(&self, it: &clean::Item) -> bool {
- match it.inner {
- clean::StrippedItem(..) => true,
- clean::ModuleItem(ref m) => {
- it.doc_value().is_none() && m.items.is_empty()
- && it.visibility != Some(clean::Public)
- },
- _ => false,
- }
- }
}
impl<'a> Item<'a> {
if let clean::DefaultImplItem(..) = items[*i].inner {
return false;
}
- !cx.maybe_ignore_item(&items[*i])
+ !maybe_ignore_item(&items[*i])
}).collect::<Vec<usize>>();
// the order of item types in the listing
Ok(())
}
+fn maybe_ignore_item(it: &clean::Item) -> bool {
+ match it.inner {
+ clean::StrippedItem(..) => true,
+ clean::ModuleItem(ref m) => {
+ it.doc_value().is_none() && m.items.is_empty()
+ && it.visibility != Some(clean::Public)
+ },
+ _ => false,
+ }
+}
+
fn short_stability(item: &clean::Item, cx: &Context, show_reason: bool) -> Vec<String> {
let mut stability = vec![];
#[derive(Copy, Clone)]
enum AssocItemLink<'a> {
Anchor(Option<&'a str>),
- GotoSource(DefId, &'a HashSet<String>),
+ GotoSource(DefId, &'a FnvHashSet<String>),
}
impl<'a> AssocItemLink<'a> {
// except according to those terms.
use std::cell::{RefCell, Cell};
-use std::collections::{HashMap, HashSet};
use std::env;
use std::ffi::OsString;
use std::io::prelude::*;
use rustc::session::config::{get_unstable_features_setting, OutputType,
OutputTypes, Externs};
use rustc::session::search_paths::{SearchPaths, PathKind};
+use rustc::util::nodemap::{FnvHashMap, FnvHashSet};
use rustc_back::dynamic_lib::DynamicLibrary;
use rustc_back::tempdir::TempDir;
use rustc_driver::{driver, Compilation};
map: &map,
maybe_typed: core::NotTyped(&sess),
input: input,
- external_traits: RefCell::new(HashMap::new()),
- populated_crate_impls: RefCell::new(HashSet::new()),
+ external_traits: RefCell::new(FnvHashMap()),
+ populated_crate_impls: RefCell::new(FnvHashSet()),
deref_trait_did: Cell::new(None),
access_levels: Default::default(),
renderinfo: Default::default(),
//! Rust AST Visitor. Extracts useful information and massages it into a form
//! usable for clean
-use std::collections::HashSet;
use std::mem;
use syntax::abi;
use rustc::hir::map as hir_map;
use rustc::hir::def::Def;
use rustc::middle::privacy::AccessLevel;
+use rustc::util::nodemap::FnvHashSet;
use rustc::hir;
pub module: Module,
pub attrs: hir::HirVec<ast::Attribute>,
pub cx: &'a core::DocContext<'a, 'tcx>,
- view_item_stack: HashSet<ast::NodeId>,
+ view_item_stack: FnvHashSet<ast::NodeId>,
inlining_from_glob: bool,
}
impl<'a, 'tcx> RustdocVisitor<'a, 'tcx> {
pub fn new(cx: &'a core::DocContext<'a, 'tcx>) -> RustdocVisitor<'a, 'tcx> {
// If the root is reexported, terminate all recursion.
- let mut stack = HashSet::new();
+ let mut stack = FnvHashSet();
stack.insert(ast::CRATE_NODE_ID);
RustdocVisitor {
module: Module::new(None),
#![stable(feature = "rust1", since = "1.0.0")]
-use prelude::v1::*;
-
use mem;
use ops::Range;
+use iter::FusedIterator;
/// Extension methods for ASCII-subset only operations on string slices.
///
}
#[stable(feature = "rust1", since = "1.0.0")]
impl ExactSizeIterator for EscapeDefault {}
+#[unstable(feature = "fused", issue = "35602")]
+impl FusedIterator for EscapeDefault {}
+
static ASCII_LOWERCASE_MAP: [u8; 256] = [
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
#[cfg(test)]
mod tests {
- use prelude::v1::*;
use super::*;
use char::from_u32;
use cmp::max;
use fmt::{self, Debug};
use hash::{Hash, Hasher, BuildHasher, SipHasher13};
-use iter::FromIterator;
+use iter::{FromIterator, FusedIterator};
use mem::{self, replace};
use ops::{Deref, Index};
use rand::{self, Rng};
#[inline] fn len(&self) -> usize { self.inner.len() }
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<'a, K, V> FusedIterator for Iter<'a, K, V> {}
+
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> Iterator for IterMut<'a, K, V> {
type Item = (&'a K, &'a mut V);
impl<'a, K, V> ExactSizeIterator for IterMut<'a, K, V> {
#[inline] fn len(&self) -> usize { self.inner.len() }
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<'a, K, V> FusedIterator for IterMut<'a, K, V> {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<K, V> Iterator for IntoIter<K, V> {
impl<K, V> ExactSizeIterator for IntoIter<K, V> {
#[inline] fn len(&self) -> usize { self.inner.len() }
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<K, V> FusedIterator for IntoIter<K, V> {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> Iterator for Keys<'a, K, V> {
impl<'a, K, V> ExactSizeIterator for Keys<'a, K, V> {
#[inline] fn len(&self) -> usize { self.inner.len() }
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<'a, K, V> FusedIterator for Keys<'a, K, V> {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> Iterator for Values<'a, K, V> {
impl<'a, K, V> ExactSizeIterator for Values<'a, K, V> {
#[inline] fn len(&self) -> usize { self.inner.len() }
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<'a, K, V> FusedIterator for Values<'a, K, V> {}
#[stable(feature = "map_values_mut", since = "1.10.0")]
impl<'a, K, V> Iterator for ValuesMut<'a, K, V> {
impl<'a, K, V> ExactSizeIterator for ValuesMut<'a, K, V> {
#[inline] fn len(&self) -> usize { self.inner.len() }
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<'a, K, V> FusedIterator for ValuesMut<'a, K, V> {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> Iterator for Drain<'a, K, V> {
impl<'a, K, V> ExactSizeIterator for Drain<'a, K, V> {
#[inline] fn len(&self) -> usize { self.inner.len() }
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<'a, K, V> FusedIterator for Drain<'a, K, V> {}
impl<'a, K, V> Entry<'a, K, V> {
#[stable(feature = "rust1", since = "1.0.0")]
#[cfg(test)]
mod test_map {
- use prelude::v1::*;
-
use super::HashMap;
use super::Entry::{Occupied, Vacant};
use cell::RefCell;
use borrow::Borrow;
use fmt;
use hash::{Hash, BuildHasher};
-use iter::{Chain, FromIterator};
+use iter::{Chain, FromIterator, FusedIterator};
use ops::{BitOr, BitAnd, BitXor, Sub};
use super::Recover;
impl<'a, K> ExactSizeIterator for Iter<'a, K> {
fn len(&self) -> usize { self.iter.len() }
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<'a, K> FusedIterator for Iter<'a, K> {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<K> Iterator for IntoIter<K> {
impl<K> ExactSizeIterator for IntoIter<K> {
fn len(&self) -> usize { self.iter.len() }
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<K> FusedIterator for IntoIter<K> {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K> Iterator for Drain<'a, K> {
impl<'a, K> ExactSizeIterator for Drain<'a, K> {
fn len(&self) -> usize { self.iter.len() }
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<'a, K> FusedIterator for Drain<'a, K> {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T, S> Clone for Intersection<'a, T, S> {
}
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<'a, T, S> FusedIterator for Intersection<'a, T, S>
+ where T: Eq + Hash, S: BuildHasher
+{}
+
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T, S> Clone for Difference<'a, T, S> {
fn clone(&self) -> Difference<'a, T, S> {
}
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<'a, T, S> FusedIterator for Difference<'a, T, S>
+ where T: Eq + Hash, S: BuildHasher
+{}
+
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T, S> Clone for SymmetricDifference<'a, T, S> {
fn clone(&self) -> SymmetricDifference<'a, T, S> {
fn size_hint(&self) -> (usize, Option<usize>) { self.iter.size_hint() }
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<'a, T, S> FusedIterator for SymmetricDifference<'a, T, S>
+ where T: Eq + Hash, S: BuildHasher
+{}
+
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T, S> Clone for Union<'a, T, S> {
fn clone(&self) -> Union<'a, T, S> { Union { iter: self.iter.clone() } }
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<'a, T, S> FusedIterator for Union<'a, T, S>
+ where T: Eq + Hash, S: BuildHasher
+{}
+
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T, S> Iterator for Union<'a, T, S>
where T: Eq + Hash, S: BuildHasher
#[cfg(test)]
mod test_set {
- use prelude::v1::*;
-
use super::HashSet;
#[test]
/// around just the "table" part of the hashtable. It enforces some
/// invariants at the type level and employs some performance trickery,
/// but in general is just a tricked out `Vec<Option<u64, K, V>>`.
-#[unsafe_no_drop_flag]
+#[cfg_attr(stage0, unsafe_no_drop_flag)]
pub struct RawTable<K, V> {
capacity: usize,
size: usize,
impl<K, V> Drop for RawTable<K, V> {
#[unsafe_destructor_blind_to_params]
fn drop(&mut self) {
- if self.capacity == 0 || self.capacity == mem::POST_DROP_USIZE {
+ if self.capacity == 0 {
return;
}
#![stable(feature = "env", since = "1.0.0")]
-use prelude::v1::*;
-
use error::Error;
use ffi::{OsStr, OsString};
use fmt;
#[cfg(test)]
mod tests {
- use prelude::v1::*;
use super::*;
use iter::repeat;
// reconsider what crate these items belong in.
use any::TypeId;
-use boxed::Box;
use cell;
use char;
use fmt::{self, Debug, Display};
-use marker::{Send, Sync, Reflect};
+use marker::Reflect;
use mem::transmute;
use num;
-use raw::TraitObject;
use str;
-use string::{self, String};
+use string;
/// Base functionality for all errors in Rust.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn downcast_ref<T: Error + 'static>(&self) -> Option<&T> {
if self.is::<T>() {
unsafe {
- // Get the raw representation of the trait object
- let to: TraitObject = transmute(self);
-
- // Extract the data pointer
- Some(&*(to.data as *const T))
+ Some(&*(self as *const Error as *const T))
}
} else {
None
pub fn downcast_mut<T: Error + 'static>(&mut self) -> Option<&mut T> {
if self.is::<T>() {
unsafe {
- // Get the raw representation of the trait object
- let to: TraitObject = transmute(self);
-
- // Extract the data pointer
- Some(&mut *(to.data as *const T as *mut T))
+ Some(&mut *(self as *mut Error as *mut T))
}
} else {
None
pub fn downcast<T: Error + 'static>(self: Box<Self>) -> Result<Box<T>, Box<Error>> {
if self.is::<T>() {
unsafe {
- // Get the raw representation of the trait object
- let raw = Box::into_raw(self);
- let to: TraitObject =
- transmute::<*mut Error, TraitObject>(raw);
-
- // Extract the data pointer
- Ok(Box::from_raw(to.data as *mut T))
+ let raw: *mut Error = Box::into_raw(self);
+ Ok(Box::from_raw(raw as *mut T))
}
} else {
Err(self)
#[cfg(test)]
mod tests {
- use prelude::v1::*;
use super::Error;
use fmt;
// except according to those terms.
use ascii;
-use borrow::{Cow, ToOwned, Borrow};
-use boxed::Box;
-use convert::{Into, From};
-use cmp::{PartialEq, Eq, PartialOrd, Ord, Ordering};
+use borrow::{Cow, Borrow};
+use cmp::Ordering;
use error::Error;
use fmt::{self, Write};
use io;
-use iter::Iterator;
use libc;
use mem;
use memchr;
use ops;
-use option::Option::{self, Some, None};
use os::raw::c_char;
-use result::Result::{self, Ok, Err};
use slice;
use str::{self, Utf8Error};
-use string::String;
-use vec::Vec;
/// A type representing an owned C-compatible string
///
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub unsafe fn from_vec_unchecked(mut v: Vec<u8>) -> CString {
+ v.reserve_exact(1);
v.push(0);
CString { inner: v.into_boxed_slice() }
}
#[cfg(test)]
mod tests {
- use prelude::v1::*;
use super::*;
use os::raw::c_char;
use borrow::Cow::{Borrowed, Owned};
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use borrow::{Borrow, Cow, ToOwned};
+use borrow::{Borrow, Cow};
use fmt::{self, Debug};
use mem;
-use string::String;
use ops;
use cmp;
use hash::{Hash, Hasher};
-use vec::Vec;
use sys::os_str::{Buf, Slice};
use sys_common::{AsInner, IntoInner, FromInner};
use path::{Path, PathBuf};
use sys::fs as fs_imp;
use sys_common::{AsInnerMut, FromInner, AsInner, IntoInner};
-use vec::Vec;
use time::SystemTime;
/// A reference to an open file on the filesystem.
#[cfg(test)]
mod tests {
- use prelude::v1::*;
use io::prelude::*;
use fs::{self, File, OpenOptions};
//! Buffering wrappers for I/O traits
-use prelude::v1::*;
use io::prelude::*;
use marker::Reflect;
#[cfg(test)]
mod tests {
- use prelude::v1::*;
use io::prelude::*;
use io::{self, BufReader, BufWriter, LineWriter, SeekFrom};
use sync::atomic::{AtomicUsize, Ordering};
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use prelude::v1::*;
use io::prelude::*;
use cmp;
mod tests {
use io::prelude::*;
use io::{Cursor, SeekFrom};
- use vec::Vec;
#[test]
fn test_vec_writer() {
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use boxed::Box;
-use convert::Into;
use error;
use fmt;
-use marker::{Send, Sync};
-use option::Option::{self, Some, None};
use result;
use sys;
#[cfg(test)]
mod test {
- use prelude::v1::*;
use super::{Error, ErrorKind};
use error;
use fmt;
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use boxed::Box;
use cmp;
use io::{self, SeekFrom, Read, Write, Seek, BufRead, Error, ErrorKind};
use fmt;
use mem;
-use string::String;
-use vec::Vec;
// =============================================================================
// Forwarding implementations
#[cfg(test)]
mod tests {
use io::prelude::*;
- use vec::Vec;
use test;
#[bench]
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use prelude::v1::*;
-
use cell::Cell;
use ptr;
use sync::Arc;
use rustc_unicode::str as core_str;
use error as std_error;
use fmt;
-use iter::{Iterator};
-use marker::Sized;
-use ops::{Drop, FnOnce};
-use option::Option::{self, Some, None};
-use result::Result::{Ok, Err};
use result;
-use string::String;
use str;
-use vec::Vec;
use memchr;
#[stable(feature = "rust1", since = "1.0.0")]
#[cfg(test)]
mod tests {
- use prelude::v1::*;
use io::prelude::*;
use io;
use super::Cursor;
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use prelude::v1::*;
use io::prelude::*;
use cell::{RefCell, BorrowState};
#[cfg(test)]
mod tests {
- use prelude::v1::*;
-
use io::prelude::*;
use io::{copy, sink, empty, repeat};
#![feature(float_from_str_radix)]
#![feature(fn_traits)]
#![feature(fnbox)]
+#![feature(fused)]
#![feature(hashmap_hasher)]
#![feature(heap_api)]
#![feature(inclusive_range)]
#![feature(optin_builtin_traits)]
#![feature(panic_unwind)]
#![feature(placement_in_syntax)]
+#![feature(prelude_import)]
#![feature(question_mark)]
#![feature(rand)]
#![feature(raw)]
#![feature(unboxed_closures)]
#![feature(unicode)]
#![feature(unique)]
-#![feature(unsafe_no_drop_flag, filling_drop)]
+#![cfg_attr(stage0, feature(unsafe_no_drop_flag))]
#![feature(unwind_attributes)]
#![feature(vec_push_all)]
#![feature(zero_one)]
#![allow(unused_features)] // std may use features in a platform-specific way
#![cfg_attr(not(stage0), deny(warnings))]
+#[prelude_import]
+#[allow(unused)]
+use prelude::v1::*;
+
#[cfg(test)] extern crate test;
// We want to reexport a few macros from core but libcore has already been
let end_align = (ptr as usize + len) & (usize_bytes - 1);
let mut offset;
if end_align > 0 {
- offset = len - cmp::min(usize_bytes - end_align, len);
+ offset = if end_align >= len { 0 } else { len - end_align };
if let Some(index) = text[offset..].iter().rposition(|elt| *elt == x) {
return Some(offset + index);
}
fn no_match_reversed() {
assert_eq!(None, memrchr(b'a', b"xyz"));
}
+
+ #[test]
+ fn each_alignment_reversed() {
+ let mut data = [1u8; 64];
+ let needle = 2;
+ let pos = 40;
+ data[pos] = needle;
+ for start in 0..16 {
+ assert_eq!(Some(pos - start), memrchr(needle, &data[start..]));
+ }
+ }
}
#[cfg(test)]
fn no_match_reversed() {
assert_eq!(None, memrchr(b'a', b"xyz"));
}
+
+ #[test]
+ fn each_alignment() {
+ let mut data = [1u8; 64];
+ let needle = 2;
+ let pos = 40;
+ data[pos] = needle;
+ for start in 0..16 {
+ assert_eq!(Some(pos - start), memchr(needle, &data[start..]));
+ }
+ }
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use prelude::v1::*;
-
use fmt;
use hash;
use io;
#[cfg(test)]
mod tests {
- use prelude::v1::*;
use net::*;
use net::test::{tsa, sa6, sa4};
// Tests for this module
#[cfg(test)]
mod tests {
- use prelude::v1::*;
use net::*;
use net::Ipv6MulticastScope::*;
use net::test::{tsa, sa6, sa4};
#![stable(feature = "rust1", since = "1.0.0")]
-use prelude::v1::*;
-
use io::{self, Error, ErrorKind};
use sys_common::net as net_imp;
//! This module is "publicly exported" through the `FromStr` implementations
//! below.
-use prelude::v1::*;
-
use error::Error;
use fmt;
use net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6};
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use prelude::v1::*;
use io::prelude::*;
use fmt;
#[cfg(test)]
mod tests {
- use prelude::v1::*;
-
use io::ErrorKind;
use io::prelude::*;
use net::*;
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use prelude::v1::*;
-
use env;
use net::{SocketAddr, SocketAddrV4, SocketAddrV6, Ipv4Addr, Ipv6Addr, ToSocketAddrs};
use sync::atomic::{AtomicUsize, Ordering};
#[cfg(test)]
mod tests {
- use prelude::v1::*;
-
use io::ErrorKind;
use net::*;
use net::test::{next_test_ip4, next_test_ip6};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::num::Wrapping;
-#[cfg(test)] use cmp::PartialEq;
#[cfg(test)] use fmt;
-#[cfg(test)] use marker::Copy;
#[cfg(test)] use ops::{Add, Sub, Mul, Div, Rem};
/// Helper function for testing numeric operations
use u32;
use u64;
use usize;
- use string::ToString;
use ops::Mul;
#[test]
mod bench {
extern crate test;
use self::test::Bencher;
- use prelude::v1::*;
#[bench]
fn bench_pow_function(b: &mut Bencher) {
#![stable(feature = "std_panic", since = "1.9.0")]
use any::Any;
-use boxed::Box;
use cell::UnsafeCell;
use ops::{Deref, DerefMut};
use panicking;
//! * Executing a panic up to doing the actual implementation
//! * Shims around "try"
-use prelude::v1::*;
use io::prelude::*;
use any::Any;
#![stable(feature = "rust1", since = "1.0.0")]
use ascii::*;
-use borrow::{Borrow, ToOwned, Cow};
+use borrow::{Borrow, Cow};
use cmp;
use error::Error;
use fmt;
use fs;
use hash::{Hash, Hasher};
use io;
-use iter;
+use iter::{self, FusedIterator};
use mem;
use ops::{self, Deref};
-use string::String;
-use vec::Vec;
use ffi::{OsStr, OsString};
}
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<'a> FusedIterator for Iter<'a> {}
+
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a> Iterator for Components<'a> {
type Item = Component<'a>;
}
}
+#[unstable(feature = "fused", issue = "35602")]
+impl<'a> FusedIterator for Components<'a> {}
+
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a> cmp::PartialEq for Components<'a> {
fn eq(&self, other: &Components<'a>) -> bool {
#[cfg(test)]
mod tests {
use super::*;
- use string::{ToString, String};
- use vec::Vec;
macro_rules! t(
($path:expr, iter: $iter:expr) => (
#![stable(feature = "process", since = "1.0.0")]
-use prelude::v1::*;
use io::prelude::*;
use ffi::OsStr;
#[cfg(test)]
mod tests {
- use prelude::v1::*;
use io::prelude::*;
use io::ErrorKind;
#[cfg(not(test))]
#[lang = "start"]
fn lang_start(main: *const u8, argc: isize, argv: *const *const u8) -> isize {
- use borrow::ToOwned;
use mem;
use panic;
use sys;
#[cfg(test)]
mod tests {
- use prelude::v1::*;
-
use sync::{Arc, Barrier};
use sync::mpsc::{channel, TryRecvError};
use thread;
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use prelude::v1::*;
-
use sync::atomic::{AtomicUsize, Ordering};
use sync::{mutex, MutexGuard, PoisonError};
use sys_common::condvar as sys;
#[cfg(test)]
mod tests {
- use prelude::v1::*;
-
use sync::mpsc::channel;
use sync::{Condvar, Mutex, Arc};
use thread;
use thread::{self, Thread};
use sync::atomic::{AtomicBool, Ordering};
use sync::Arc;
-use marker::{Sync, Send};
use mem;
-use clone::Clone;
use time::Instant;
struct Inner {
#[cfg(test)]
mod tests {
- use prelude::v1::*;
-
use env;
use super::*;
use thread;
#[cfg(test)]
mod sync_tests {
- use prelude::v1::*;
-
use env;
use thread;
use super::*;
#[cfg(test)]
mod tests {
- use prelude::v1::*;
-
use sync::mpsc::channel;
use super::{Queue, Data, Empty, Inconsistent};
use sync::Arc;
#[cfg(test)]
#[allow(unused_imports)]
mod tests {
- use prelude::v1::*;
-
use thread;
use sync::mpsc::*;
#[cfg(test)]
mod tests {
- use prelude::v1::*;
-
use sync::Arc;
use super::Queue;
use thread;
pub use self::Failure::*;
use self::Blocker::*;
-use vec::Vec;
use core::mem;
use core::ptr;
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use prelude::v1::*;
-
use cell::UnsafeCell;
use fmt;
use marker;
#[cfg(test)]
mod tests {
- use prelude::v1::*;
-
use sync::mpsc::channel;
use sync::{Arc, Mutex, Condvar};
use sync::atomic::{AtomicUsize, Ordering};
#[cfg(test)]
mod tests {
- use prelude::v1::*;
-
use panic;
use sync::mpsc::channel;
use thread;
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use prelude::v1::*;
-
use cell::UnsafeCell;
use fmt;
use marker;
mod tests {
#![allow(deprecated)] // rand
- use prelude::v1::*;
-
use rand::{self, Rng};
use sync::mpsc::channel;
use thread;
#[test]
fn test_rwlock_try_write() {
- use mem::drop;
-
let lock = RwLock::new(0isize);
let read_guard = lock.read().unwrap();
#![allow(dead_code)] // different code on OSX/linux/etc
-use vec::Vec;
-
/// One-time global initialization.
pub unsafe fn init(argc: isize, argv: *const *const u8) { imp::init(argc, argv) }
target_os = "solaris",
target_os = "emscripten"))]
mod imp {
- use prelude::v1::*;
-
use libc::c_char;
use mem;
use ffi::CStr;
target_os = "ios",
target_os = "windows"))]
mod imp {
- use vec::Vec;
-
pub unsafe fn init(_argc: isize, _argv: *const *const u8) {
}
//! Documentation can be found on the `rt::at_exit` function.
use alloc::boxed::FnBox;
-use boxed::Box;
use ptr;
use sys_common::mutex::Mutex;
-use vec::Vec;
type Queue = Vec<Box<FnBox()>>;
let i: usize = inner[.. (inner.len() - rest.len())].parse().unwrap();
inner = &rest[i..];
rest = &rest[..i];
+ if rest.starts_with("_$") {
+ rest = &rest[1..];
+ }
while !rest.is_empty() {
- if rest.starts_with("$") {
+ if rest.starts_with(".") {
+ if let Some('.') = rest[1..].chars().next() {
+ writer.write_all(b"::")?;
+ rest = &rest[2..];
+ } else {
+ writer.write_all(b".")?;
+ rest = &rest[1..];
+ }
+ } else if rest.starts_with("$") {
macro_rules! demangle {
- ($($pat:expr, => $demangled:expr),*) => ({
+ ($($pat:expr => $demangled:expr),*) => ({
$(if rest.starts_with($pat) {
try!(writer.write_all($demangled));
rest = &rest[$pat.len()..];
// see src/librustc/back/link.rs for these mappings
demangle! (
- "$SP$", => b"@",
- "$BP$", => b"*",
- "$RF$", => b"&",
- "$LT$", => b"<",
- "$GT$", => b">",
- "$LP$", => b"(",
- "$RP$", => b")",
- "$C$", => b",",
+ "$SP$" => b"@",
+ "$BP$" => b"*",
+ "$RF$" => b"&",
+ "$LT$" => b"<",
+ "$GT$" => b">",
+ "$LP$" => b"(",
+ "$RP$" => b")",
+ "$C$" => b",",
// in theory we can demangle any Unicode code point, but
// for simplicity we just catch the common ones.
- "$u7e$", => b"~",
- "$u20$", => b" ",
- "$u27$", => b"'",
- "$u5b$", => b"[",
- "$u5d$", => b"]",
- "$u7b$", => b"{",
- "$u7d$", => b"}"
+ "$u7e$" => b"~",
+ "$u20$" => b" ",
+ "$u27$" => b"'",
+ "$u5b$" => b"[",
+ "$u5d$" => b"]",
+ "$u7b$" => b"{",
+ "$u7d$" => b"}",
+ "$u3b$" => b";",
+ "$u2b$" => b"+",
+ "$u22$" => b"\""
)
} else {
- let idx = match rest.find('$') {
+ let idx = match rest.char_indices().find(|&(_, c)| c == '$' || c == '.') {
None => rest.len(),
- Some(i) => i,
+ Some((i, _)) => i,
};
writer.write_all(rest[..idx].as_bytes())?;
rest = &rest[idx..];
#[cfg(test)]
mod tests {
- use prelude::v1::*;
use sys_common;
macro_rules! t { ($a:expr, $b:expr) => ({
let mut m = Vec::new();
t!("_ZN8$RF$testE", "&test");
t!("_ZN8$BP$test4foobE", "*test::foob");
t!("_ZN9$u20$test4foobE", " test::foob");
+ t!("_ZN35Bar$LT$$u5b$u32$u3b$$u20$4$u5d$$GT$E", "Bar<[u32; 4]>");
}
#[test]
t!("ZN13test$u20$test4foobE", "test test::foob");
t!("ZN12test$RF$test4foobE", "test&test::foob");
}
+
+ #[test]
+ fn demangle_elements_beginning_with_underscore() {
+ t!("_ZN13_$LT$test$GT$E", "<test>");
+ t!("_ZN28_$u7b$$u7b$closure$u7d$$u7d$E", "{{closure}}");
+ t!("_ZN15__STATIC_FMTSTRE", "__STATIC_FMTSTR");
+ }
+
+ #[test]
+ fn demangle_trait_impls() {
+ t!("_ZN71_$LT$Test$u20$$u2b$$u20$$u27$static$u20$as$u20$foo..Bar$LT$Test$GT$$GT$3barE",
+ "<Test + 'static as foo::Bar<Test>>::bar");
+ }
}
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use prelude::v1::*;
use io;
use io::ErrorKind;
use io::Read;
#[cfg(test)]
pub mod test {
- use prelude::v1::*;
use path::{Path, PathBuf};
use env;
use rand::{self, Rng};
#[cfg(test)]
mod tests {
- use prelude::v1::*;
use io::prelude::*;
use super::*;
use io;
#![allow(missing_docs)]
-use boxed::Box;
use sync::Once;
use sys;
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use marker::Sync;
use sys::mutex as imp;
/// An OS-based mutual exclusion lock.
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use prelude::v1::*;
-
use cmp;
use ffi::CString;
use fmt;
#[cfg(test)]
mod tests {
- use prelude::v1::*;
-
use super::*;
use collections::HashMap;
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use prelude::v1::*;
-
use fmt;
use marker;
use ops::Deref;
#[cfg(test)]
mod tests {
- use prelude::v1::*;
use sys_common::remutex::{ReentrantMutex, ReentrantMutexGuard};
use cell::RefCell;
use sync::Arc;
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use prelude::v1::*;
-
use alloc::boxed::FnBox;
use libc;
use sys::stack_overflow;
#![allow(dead_code)] // stack_guard isn't used right now on all platforms
use cell::RefCell;
-use string::String;
use thread::Thread;
use thread::LocalKeyState;
#[cfg(test)]
mod tests {
- use prelude::v1::*;
use super::{Key, StaticKey};
fn assert_sync<T: Sync>() {}
use ops;
use slice;
use str;
-use string::String;
use sys_common::AsInner;
-use vec::Vec;
const UTF8_REPLACEMENT_CHARACTER: &'static [u8] = b"\xEF\xBF\xBD";
#[cfg(test)]
mod tests {
- use prelude::v1::*;
use borrow::Cow;
use super::*;
use ffi::{OsStr, OsString};
use mem;
-use prelude::v1::*;
use sys::os_str::Buf;
use sys_common::{FromInner, IntoInner, AsInner};
use libc;
-use prelude::v1::*;
use ascii;
use ffi::OsStr;
use fmt;
#[cfg(test)]
mod test {
- use prelude::v1::*;
use thread;
use io;
use io::prelude::*;
#![stable(feature = "rust1", since = "1.0.0")]
-use prelude::v1::*;
-
use io;
use os::unix::io::{FromRawFd, RawFd, AsRawFd, IntoRawFd};
use process;
#![unstable(reason = "not public", issue = "0", feature = "fd")]
-use prelude::v1::*;
-
use io::{self, Read};
use libc::{self, c_int, size_t, c_void};
use mem;
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use prelude::v1::*;
use os::unix::prelude::*;
use ffi::{CString, CStr, OsString, OsStr};
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
#[cfg(target_os = "linux")]
fn get_path(fd: c_int) -> Option<PathBuf> {
- use string::ToString;
let mut p = PathBuf::from("/proc/self/fd");
p.push(&fd.to_string());
readlink(&p).ok()
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use prelude::v1::*;
-
use ffi::CStr;
use io;
use libc::{self, c_int, size_t, sockaddr, socklen_t};
#![allow(unused_imports)] // lots of cfg code here
-use prelude::v1::*;
use os::unix::prelude::*;
use error::Error as StdError;
use borrow::Cow;
use fmt::{self, Debug};
-use vec::Vec;
use str;
-use string::String;
use mem;
use sys_common::{AsInner, IntoInner};
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use prelude::v1::*;
-
use cmp;
use io;
use libc::{self, c_int};
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use prelude::v1::*;
use os::unix::prelude::*;
use collections::hash_map::{HashMap, Entry};
#[cfg(test)]
mod tests {
use super::*;
- use prelude::v1::*;
use ffi::OsStr;
use mem;
pub use self::imp::OsRng;
-#[cfg(all(unix, not(target_os = "ios"), not(target_os = "openbsd")))]
+use mem;
+
+fn next_u32(mut fill_buf: &mut FnMut(&mut [u8])) -> u32 {
+ let mut buf: [u8; 4] = [0; 4];
+ fill_buf(&mut buf);
+ unsafe { mem::transmute::<[u8; 4], u32>(buf) }
+}
+
+fn next_u64(mut fill_buf: &mut FnMut(&mut [u8])) -> u64 {
+ let mut buf: [u8; 8] = [0; 8];
+ fill_buf(&mut buf);
+ unsafe { mem::transmute::<[u8; 8], u64>(buf) }
+}
+
+#[cfg(all(unix,
+ not(target_os = "ios"),
+ not(target_os = "openbsd"),
+ not(target_os = "freebsd")))]
mod imp {
use self::OsRngInner::*;
+ use super::{next_u32, next_u64};
use fs::File;
use io;
use libc;
- use mem;
use rand::Rng;
use rand::reader::ReaderRng;
use sys::os::errno;
}
}
- fn getrandom_next_u32() -> u32 {
- let mut buf: [u8; 4] = [0; 4];
- getrandom_fill_bytes(&mut buf);
- unsafe { mem::transmute::<[u8; 4], u32>(buf) }
- }
-
- fn getrandom_next_u64() -> u64 {
- let mut buf: [u8; 8] = [0; 8];
- getrandom_fill_bytes(&mut buf);
- unsafe { mem::transmute::<[u8; 8], u64>(buf) }
- }
-
#[cfg(all(target_os = "linux",
any(target_arch = "x86_64",
target_arch = "x86",
impl Rng for OsRng {
fn next_u32(&mut self) -> u32 {
match self.inner {
- OsGetrandomRng => getrandom_next_u32(),
+ OsGetrandomRng => next_u32(&mut getrandom_fill_bytes),
OsReaderRng(ref mut rng) => rng.next_u32(),
}
}
fn next_u64(&mut self) -> u64 {
match self.inner {
- OsGetrandomRng => getrandom_next_u64(),
+ OsGetrandomRng => next_u64(&mut getrandom_fill_bytes),
OsReaderRng(ref mut rng) => rng.next_u64(),
}
}
#[cfg(target_os = "openbsd")]
mod imp {
+ use super::{next_u32, next_u64};
+
use io;
use libc;
- use mem;
use sys::os::errno;
use rand::Rng;
impl Rng for OsRng {
fn next_u32(&mut self) -> u32 {
- let mut v = [0; 4];
- self.fill_bytes(&mut v);
- unsafe { mem::transmute(v) }
+ next_u32(&mut |v| self.fill_bytes(v))
}
fn next_u64(&mut self) -> u64 {
- let mut v = [0; 8];
- self.fill_bytes(&mut v);
- unsafe { mem::transmute(v) }
+ next_u64(&mut |v| self.fill_bytes(v))
}
fn fill_bytes(&mut self, v: &mut [u8]) {
// getentropy(2) permits a maximum buffer size of 256 bytes
#[cfg(target_os = "ios")]
mod imp {
+ use super::{next_u32, next_u64};
+
use io;
- use mem;
use ptr;
use rand::Rng;
use libc::{c_int, size_t};
impl Rng for OsRng {
fn next_u32(&mut self) -> u32 {
- let mut v = [0; 4];
- self.fill_bytes(&mut v);
- unsafe { mem::transmute(v) }
+ next_u32(&mut |v| self.fill_bytes(v))
}
fn next_u64(&mut self) -> u64 {
- let mut v = [0; 8];
- self.fill_bytes(&mut v);
- unsafe { mem::transmute(v) }
+ next_u64(&mut |v| self.fill_bytes(v))
}
fn fill_bytes(&mut self, v: &mut [u8]) {
let ret = unsafe {
}
}
}
+
+#[cfg(target_os = "freebsd")]
+mod imp {
+ use super::{next_u32, next_u64};
+
+ use io;
+ use libc;
+ use rand::Rng;
+ use ptr;
+
+ pub struct OsRng {
+ // dummy field to ensure that this struct cannot be constructed outside
+ // of this module
+ _dummy: (),
+ }
+
+ impl OsRng {
+ /// Create a new `OsRng`.
+ pub fn new() -> io::Result<OsRng> {
+ Ok(OsRng { _dummy: () })
+ }
+ }
+
+ impl Rng for OsRng {
+ fn next_u32(&mut self) -> u32 {
+ next_u32(&mut |v| self.fill_bytes(v))
+ }
+ fn next_u64(&mut self) -> u64 {
+ next_u64(&mut |v| self.fill_bytes(v))
+ }
+ fn fill_bytes(&mut self, v: &mut [u8]) {
+ let mib = [libc::CTL_KERN, libc::KERN_ARND];
+ // kern.arandom permits a maximum buffer size of 256 bytes
+ for s in v.chunks_mut(256) {
+ let mut s_len = s.len();
+ let ret = unsafe {
+ libc::sysctl(mib.as_ptr(), mib.len() as libc::c_uint,
+ s.as_mut_ptr() as *mut _, &mut s_len,
+ ptr::null(), 0)
+ };
+ if ret == -1 || s_len != s.len() {
+ panic!("kern.arandom sysctl failed! (returned {}, s.len() {}, oldlenp {})",
+ ret, s.len(), s_len);
+ }
+ }
+ }
+ }
+}
// We roughly maintain the deadlocking behavior by panicking to ensure
// that this lock acquisition does not succeed.
//
- // We also check whether there this lock is already write locked. This
+ // We also check whether this lock is already write locked. This
// is only possible if it was write locked by the current thread and
// the implementation allows recursive locking. The POSIX standard
- // doesn't require recursivly locking a rwlock to deadlock, but we can't
+ // doesn't require recursively locking a rwlock to deadlock, but we can't
// allow that because it could lead to aliasing issues.
if r == libc::EAGAIN {
panic!("rwlock maximum reader count exceeded");
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use prelude::v1::*;
-
use io;
use libc;
use sys::fd::FileDesc;
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use prelude::v1::*;
-
use alloc::boxed::FnBox;
use cmp;
use ffi::CStr;
target_os = "solaris"))]
#[cfg_attr(test, allow(dead_code))]
pub mod guard {
- use prelude::v1::*;
-
use libc;
use libc::mmap;
use libc::{PROT_NONE, MAP_PRIVATE, MAP_ANON, MAP_FAILED, MAP_FIXED};
//! manner we pay a semi-large one-time cost up front for detecting whether a
//! function is available but afterwards it's just a load and a jump.
-use prelude::v1::*;
-
use ffi::CString;
use sync::atomic::{AtomicUsize, Ordering};
use sys::c;
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use prelude::v1::*;
use os::windows::prelude::*;
use ffi::{CString, OsStr};
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use prelude::v1::*;
use os::windows::prelude::*;
use ffi::OsString;
#![unstable(issue = "0", feature = "windows_handle")]
-use prelude::v1::*;
-
use cmp;
use io::{ErrorKind, Read};
use io;
#![allow(missing_docs, bad_style)]
-use prelude::v1::*;
-
use ffi::{OsStr, OsString};
use io::{self, ErrorKind};
use os::windows::ffi::{OsStrExt, OsStringExt};
//! CriticalSection is used and we keep track of who's holding the mutex to
//! detect recursive locks.
-use prelude::v1::*;
-
use cell::UnsafeCell;
use mem;
use sync::atomic::{AtomicUsize, Ordering};
#![unstable(issue = "0", feature = "windows_net")]
-use prelude::v1::*;
-
use cmp;
use io::{self, Read};
use libc::{c_int, c_void, c_ulong};
#![allow(bad_style)]
-use prelude::v1::*;
use os::windows::prelude::*;
use error::Error as StdError;
use borrow::Cow;
use fmt::{self, Debug};
use sys_common::wtf8::{Wtf8, Wtf8Buf};
-use string::String;
-use result::Result;
-use option::Option;
use mem;
use sys_common::{AsInner, IntoInner};
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use prelude::v1::*;
use os::windows::prelude::*;
use ffi::OsStr;
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use prelude::v1::*;
-
use ascii::*;
use collections::HashMap;
use collections;
#[cfg(test)]
mod tests {
- use prelude::v1::*;
use ffi::{OsStr, OsString};
use super::make_command_line;
#![unstable(issue = "0", feature = "windows_stdio")]
-use prelude::v1::*;
use io::prelude::*;
use cmp;
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use prelude::v1::*;
-
use alloc::boxed::FnBox;
use io;
use ffi::CStr;
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use prelude::v1::*;
-
use ptr;
use sys::c;
use sys_common::mutex::Mutex;
// Due to rust-lang/rust#18804, make sure this is not generic!
#[cfg(target_os = "linux")]
unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern fn(*mut u8)) {
- use prelude::v1::*;
use mem;
use libc;
use sys_common::thread_local as os;
#[doc(hidden)]
pub mod os {
- use prelude::v1::*;
-
use cell::{Cell, UnsafeCell};
use marker;
use ptr;
#[cfg(test)]
mod tests {
- use prelude::v1::*;
-
use sync::mpsc::{channel, Sender};
use cell::{Cell, UnsafeCell};
use super::LocalKeyState;
#[cfg(test)]
mod dynamic_tests {
- use prelude::v1::*;
-
use cell::RefCell;
use collections::HashMap;
#![stable(feature = "rust1", since = "1.0.0")]
-use prelude::v1::*;
-
use any::Any;
use cell::UnsafeCell;
use ffi::{CStr, CString};
#[cfg(test)]
mod tests {
- use prelude::v1::*;
-
use any::Any;
use sync::mpsc::{channel, Sender};
use result;
#[test]
fn test_spawn_sched() {
- use clone::Clone;
-
let (tx, rx) = channel();
fn f(i: i32, tx: Sender<()>) {
pub fn expand_type(t: P<ast::Ty>, fld: &mut MacroExpander) -> P<ast::Ty> {
let t = match t.node.clone() {
ast::TyKind::Mac(mac) => {
- if fld.cx.ecfg.features.unwrap().type_macros {
- expand_mac_invoc(mac, None, Vec::new(), t.span, fld)
- } else {
- feature_gate::emit_feature_err(
- &fld.cx.parse_sess.span_diagnostic,
- "type_macros",
- t.span,
- feature_gate::GateIssue::Language,
- "type macros are experimental");
-
- DummyResult::raw_ty(t.span)
- }
+ expand_mac_invoc(mac, None, Vec::new(), t.span, fld)
}
_ => t
};
// Allows using `box` in patterns; RFC 469
(active, box_patterns, "1.0.0", Some(29641)),
- // Allows using the unsafe_no_drop_flag attribute (unlikely to
- // switch to Accepted; see RFC 320)
- (active, unsafe_no_drop_flag, "1.0.0", None),
-
// Allows using the unsafe_destructor_blind_to_params attribute;
// RFC 1238
(active, dropck_parametricity, "1.3.0", Some(28498)),
// Allows associated type defaults
(active, associated_type_defaults, "1.2.0", Some(29661)),
- // Allows macros to appear in the type position.
- (active, type_macros, "1.3.0", Some(27245)),
-
// allow `repr(simd)`, and importing the various simd intrinsics
(active, repr_simd, "1.4.0", Some(27731)),
(removed, quad_precision_float, "1.0.0", None),
(removed, struct_inherit, "1.0.0", None),
(removed, test_removed_feature, "1.0.0", None),
- (removed, visible_private_types, "1.0.0", None)
+ (removed, visible_private_types, "1.0.0", None),
+ (removed, unsafe_no_drop_flag, "1.0.0", None)
);
declare_features! (
// mean anything
(accepted, test_accepted_feature, "1.0.0", None),
(accepted, tuple_indexing, "1.0.0", None),
+ // Allows macros to appear in the type position.
+ (accepted, type_macros, "1.13.0", Some(27245)),
(accepted, while_let, "1.0.0", None),
// Allows `#[deprecated]` attribute
(accepted, deprecated, "1.9.0", Some(29935))
is just used for rustc unit tests \
and will never be stable",
cfg_fn!(rustc_attrs))),
- ("rustc_no_mir", Whitelisted, Gated("rustc_attrs",
- "the `#[rustc_no_mir]` attribute \
- is just used to make tests pass \
- and will never be stable",
- cfg_fn!(rustc_attrs))),
("rustc_inherit_overflow_checks", Whitelisted, Gated("rustc_attrs",
"the `#[rustc_inherit_overflow_checks]` \
attribute is just used to control \
attribute is just used for the Rust test \
suite",
cfg_fn!(omit_gdb_pretty_printer_section))),
- ("unsafe_no_drop_flag", Whitelisted, Gated("unsafe_no_drop_flag",
- "unsafe_no_drop_flag has unstable semantics \
- and may be removed in the future",
- cfg_fn!(unsafe_no_drop_flag))),
("unsafe_destructor_blind_to_params",
Normal,
Gated("dropck_parametricity",
#![feature(associated_consts)]
#![feature(const_fn)]
-#![feature(filling_drop)]
#![feature(libc)]
#![feature(rustc_private)]
#![feature(staged_api)]
if !self.eat(&token::OpenDelim(token::Brace)) {
let sp = self.span;
let tok = self.this_token_to_string();
- return Err(self.span_fatal_help(sp,
- &format!("expected `{{`, found `{}`", tok),
- "place this code inside a block"));
+ let mut e = self.span_fatal(sp, &format!("expected `{{`, found `{}`", tok));
+
+ // Check to see if the user has written something like
+ //
+ // if (cond)
+ // bar;
+ //
+ // Which is valid in other languages, but not Rust.
+ match self.parse_stmt_without_recovery(false) {
+ Ok(Some(stmt)) => {
+ let mut stmt_span = stmt.span;
+ // expand the span to include the semicolon, if it exists
+ if self.eat(&token::Semi) {
+ stmt_span.hi = self.last_span.hi;
+ }
+ e.span_help(stmt_span, "try placing this code inside a block");
+ }
+ Err(mut e) => {
+ self.recover_stmt_(SemiColonMode::Break);
+ e.cancel();
+ }
+ _ => ()
+ }
+ return Err(e);
}
self.parse_block_tail(lo, BlockCheckMode::Default)
use std::fmt::{self, Display, Debug};
use std::iter::FromIterator;
use std::ops::Deref;
-use std::{ptr, slice, vec};
+use std::{mem, ptr, slice, vec};
use serialize::{Encodable, Decodable, Encoder, Decoder};
pub fn map<F>(mut self, f: F) -> P<T> where
F: FnOnce(T) -> T,
{
+ let p: *mut T = &mut *self.ptr;
+
+ // Leak self in case of panic.
+ // FIXME(eddyb) Use some sort of "free guard" that
+ // only deallocates, without dropping the pointee,
+ // in case the call the `f` below ends in a panic.
+ mem::forget(self);
+
unsafe {
- let p = &mut *self.ptr;
- // FIXME(#5016) this shouldn't need to drop-fill to be safe.
- ptr::write(p, f(ptr::read_and_drop(p)));
+ ptr::write(p, f(ptr::read(p)));
+
+ // Recreate self from the raw pointer.
+ P {
+ ptr: Box::from_raw(p)
+ }
}
- self
}
}
-Subproject commit c3eb3c7608f439231d0c1340af6b720f113b4bf4
+Subproject commit eee68eafa7e8e4ce996b49f5551636639a6c331a
-Subproject commit c37d3747da75c280237dc2d6b925078e69555499
+Subproject commit 755bc3db4ff795865ea31b5b4f38ac920d8acacb
"serialize 0.0.0",
]
+[metadata]
+"checksum gcc 0.3.28 (registry+https://github.com/rust-lang/crates.io-index)" = "3da3a2cbaeb01363c8e3704fd9fd0eb2ceb17c6f27abd4c1ef040fb57d20dc79"
"libc 0.0.0",
]
+[metadata]
+"checksum gcc 0.3.27 (registry+https://github.com/rust-lang/crates.io-index)" = "806e63121fbf30760b060a5fc2d1e9f47e1bd356d183e8870367c6c12cc9d5ed"
#define SUBTARGET_PPC
#endif
+#ifdef LLVM_COMPONENT_SYSTEMZ
+#define SUBTARGET_SYSTEMZ SUBTARGET(SystemZ)
+#else
+#define SUBTARGET_SYSTEMZ
+#endif
+
#define GEN_SUBTARGETS \
SUBTARGET_X86 \
SUBTARGET_ARM \
SUBTARGET_AARCH64 \
SUBTARGET_MIPS \
- SUBTARGET_PPC
+ SUBTARGET_PPC \
+ SUBTARGET_SYSTEMZ
#define SUBTARGET(x) namespace llvm { \
extern const SubtargetFeatureKV x##FeatureKV[]; \
));
}
+extern "C" LLVMRustMetadataRef LLVMRustDIBuilderCreateLexicalBlockFile(
+ LLVMRustDIBuilderRef Builder,
+ LLVMRustMetadataRef Scope,
+ LLVMRustMetadataRef File) {
+ return wrap(Builder->createLexicalBlockFile(
+ unwrapDI<DIDescriptor>(Scope),
+ unwrapDI<DIFile>(File)));
+}
+
extern "C" LLVMRustMetadataRef LLVMRustDIBuilderCreateStaticVariable(
LLVMRustDIBuilderRef Builder,
LLVMRustMetadataRef Context,
# If this file is modified, then llvm will be forcibly cleaned and then rebuilt.
# The actual contents of this file do not matter, but to trigger a change on the
# build bots then the contents should be changed so git updates the mtime.
-2016-08-17
+2016-08-23
# tarball for a stable release you'll likely see `1.x.0-$date` where `1.x.0` was
# released on `$date`
-rustc: beta-2016-07-06
-rustc_key: 411fd48b
-cargo: nightly-2016-07-05
+rustc: beta-2016-08-17
+rustc_key: 195e6261
+cargo: nightly-2016-08-21
// compile-flags: -C no-prepopulate-passes
#![crate_type = "lib"]
-#![feature(rustc_attrs)]
// Hack to get the correct size for the length part in slices
// CHECK: @helper([[USIZE:i[0-9]+]])
// CHECK-LABEL: @no_op_slice_adjustment
#[no_mangle]
-#[rustc_no_mir] // FIXME #27840 MIR has different codegen.
pub fn no_op_slice_adjustment(x: &[u8]) -> &[u8] {
// We used to generate an extra alloca and memcpy for the block's trailing expression value, so
// check that we copy directly to the return value slot
-// CHECK: [[SRC:%[0-9]+]] = bitcast { i8*, [[USIZE]] }* %x to
-// CHECK: [[DST:%[0-9]+]] = bitcast { i8*, [[USIZE]] }* %sret_slot to i8*
-// CHECK: call void @llvm.memcpy.{{.*}}(i8* [[DST]], i8* [[SRC]],
+// CHECK: %2 = insertvalue { i8*, [[USIZE]] } undef, i8* %0, 0
+// CHECK: %3 = insertvalue { i8*, [[USIZE]] } %2, [[USIZE]] %1, 1
+// CHECK: ret { i8*, [[USIZE]] } %3
{ x }
}
// compile-flags: -C no-prepopulate-passes
#![crate_type = "lib"]
-#![feature(rustc_attrs)]
static X: i32 = 5;
// CHECK-LABEL: @raw_ptr_to_raw_ptr_noop
// CHECK-NOT: alloca
#[no_mangle]
-#[rustc_no_mir] // FIXME #27840 MIR has different codegen.
pub fn raw_ptr_to_raw_ptr_noop() -> *const i32{
&X as *const i32
}
// CHECK-LABEL: @reference_to_raw_ptr_noop
// CHECK-NOT: alloca
#[no_mangle]
-#[rustc_no_mir] // FIXME #27840 MIR has different codegen.
pub fn reference_to_raw_ptr_noop() -> *const i32 {
&X
}
// compile-flags: -C no-prepopulate-passes
#![crate_type = "lib"]
-#![feature(rustc_attrs)]
// Below, these constants are defined as enum variants that by itself would
// have a lower alignment than the enum type. Ensure that we mark them
// CHECK: @STATIC = {{.*}}, align 4
// This checks the constants from inline_enum_const
-// CHECK: @const{{[0-9]+}} = {{.*}}, align 2
+// CHECK: @ref{{[0-9]+}} = {{.*}}, align 2
// This checks the constants from {low,high}_align_const, they share the same
// constant, but the alignment differs, so the higher one should be used
-// CHECK: @const{{[0-9]+}} = {{.*}}, align 4
+// CHECK: [[LOW_HIGH:@ref[0-9]+]] = {{.*}}, align 4
+// CHECK: [[LOW_HIGH_REF:@const[0-9]+]] = {{.*}} [[LOW_HIGH]]
#[derive(Copy, Clone)]
// CHECK-LABEL: @static_enum_const
#[no_mangle]
-#[rustc_no_mir] // FIXME #27840 MIR has different codegen.
pub fn static_enum_const() -> E<i16, i32> {
STATIC
}
// CHECK-LABEL: @inline_enum_const
#[no_mangle]
-#[rustc_no_mir] // FIXME #27840 MIR has different codegen.
pub fn inline_enum_const() -> E<i8, i16> {
- E::A(0)
+ *&E::A(0)
}
// CHECK-LABEL: @low_align_const
#[no_mangle]
-#[rustc_no_mir] // FIXME #27840 MIR has different codegen.
pub fn low_align_const() -> E<i16, [i16; 3]> {
// Check that low_align_const and high_align_const use the same constant
-// CHECK: call void @llvm.memcpy.{{.*}}(i8* %{{[0-9]+}}, i8* {{.*}} [[LOW_HIGH:@const[0-9]+]]
- E::A(0)
+// CHECK: load {{.*}} bitcast ({ i16, i16, [4 x i8] }** [[LOW_HIGH_REF]]
+ *&E::A(0)
}
// CHECK-LABEL: @high_align_const
#[no_mangle]
-#[rustc_no_mir] // FIXME #27840 MIR has different codegen.
pub fn high_align_const() -> E<i16, i32> {
// Check that low_align_const and high_align_const use the same constant
-// CHECK: call void @llvm.memcpy.{{.*}}(i8* %{{[0-9]}}, i8* {{.*}} [[LOW_HIGH]]
- E::A(0)
+// CHECK: load {{.*}} bitcast ({ i16, i16, [4 x i8] }** [[LOW_HIGH_REF]]
+ *&E::A(0)
}
// compile-flags: -C no-prepopulate-passes
#![crate_type = "lib"]
-#![feature(rustc_attrs)]
struct SomeUniqueName;
// CHECK-LABEL: @droppy
#[no_mangle]
-#[rustc_no_mir] // FIXME #27840 MIR has different codegen.
pub fn droppy() {
// Check that there are exactly 6 drop calls. The cleanups for the unwinding should be reused, so
// that's one new drop call per call to possibly_unwinding(), and finally 3 drop calls for the
// regular function exit. We used to have problems with quadratic growths of drop calls in such
// functions.
-// CHECK: call{{.*}}drop{{.*}}SomeUniqueName
-// CHECK: call{{.*}}drop{{.*}}SomeUniqueName
-// CHECK: call{{.*}}drop{{.*}}SomeUniqueName
+// CHECK-NOT: invoke{{.*}}drop{{.*}}SomeUniqueName
// CHECK: call{{.*}}drop{{.*}}SomeUniqueName
// CHECK: call{{.*}}drop{{.*}}SomeUniqueName
// CHECK: call{{.*}}drop{{.*}}SomeUniqueName
// CHECK-NOT: call{{.*}}drop{{.*}}SomeUniqueName
+// CHECK: invoke{{.*}}drop{{.*}}SomeUniqueName
+// CHECK: invoke{{.*}}drop{{.*}}SomeUniqueName
+// CHECK: invoke{{.*}}drop{{.*}}SomeUniqueName
+// CHECK-NOT: {{(call|invoke).*}}drop{{.*}}SomeUniqueName
// The next line checks for the } that ends the function definition
// CHECK-LABEL: {{^[}]}}
let _s = SomeUniqueName;
// compile-flags: -C no-prepopulate-passes
#![crate_type = "lib"]
-#![feature(rustc_attrs)]
pub struct Bytes {
a: u8,
// CHECK-LABEL: @borrow
#[no_mangle]
-#[rustc_no_mir] // FIXME #27840 MIR has different codegen.
pub fn borrow(x: &i32) -> &i32 {
// CHECK: load {{(i32\*, )?}}i32** %x{{.*}}, !nonnull
+ &x; // keep variable in an alloca
x
}
// CHECK-LABEL: @_box
#[no_mangle]
-#[rustc_no_mir] // FIXME #27840 MIR has different codegen.
pub fn _box(x: Box<i32>) -> i32 {
// CHECK: load {{(i32\*, )?}}i32** %x{{.*}}, !nonnull
*x
// compile-flags: -C no-prepopulate-passes
-#![feature(rustc_attrs)]
#![crate_type = "lib"]
use std::marker::PhantomData;
// CHECK-LABEL: @mir
#[no_mangle]
-#[rustc_mir]
fn mir(){
// CHECK-NOT: getelementptr
// CHECK-NOT: store{{.*}}undef
// compile-flags: -C no-prepopulate-passes
#![crate_type = "lib"]
-#![feature(naked_functions, rustc_attrs)]
+#![feature(naked_functions)]
// CHECK: Function Attrs: naked uwtable
// CHECK-NEXT: define internal void @naked_empty()
// CHECK: Function Attrs: naked uwtable
#[no_mangle]
#[naked]
-#[rustc_no_mir] // FIXME #27840 MIR has different codegen.
// CHECK-NEXT: define internal void @naked_with_args(i{{[0-9]+}})
fn naked_with_args(a: isize) {
// CHECK: %a = alloca i{{[0-9]+}}
// CHECK: ret void
+ &a; // keep variable in an alloca
}
// CHECK: Function Attrs: naked uwtable
// CHECK-NEXT: define internal i{{[0-9]+}} @naked_with_args_and_return(i{{[0-9]+}})
#[no_mangle]
#[naked]
-#[rustc_no_mir] // FIXME #27840 MIR has different codegen.
fn naked_with_args_and_return(a: isize) -> isize {
// CHECK: %a = alloca i{{[0-9]+}}
// CHECK: ret i{{[0-9]+}} %{{[0-9]+}}
+ &a; // keep variable in an alloca
a
}
// compile-flags: -C no-prepopulate-passes
#![crate_type = "lib"]
-#![feature(rustc_attrs)]
// Hack to get the correct size for the length part in slices
// CHECK: @helper([[USIZE:i[0-9]+]])
// CHECK-LABEL: @ref_dst
#[no_mangle]
-#[rustc_no_mir] // FIXME #27840 MIR has different codegen.
pub fn ref_dst(s: &[u8]) {
// We used to generate an extra alloca and memcpy to ref the dst, so check that we copy
// directly to the alloca for "x"
-// CHECK: [[SRC:%[0-9]+]] = bitcast { i8*, [[USIZE]] }* %s to i8*
-// CHECK: [[DST:%[0-9]+]] = bitcast { i8*, [[USIZE]] }* %x to i8*
-// CHECK: call void @llvm.memcpy.{{.*}}(i8* [[DST]], i8* [[SRC]],
+// CHECK: [[X0:%[0-9]+]] = getelementptr {{.*}} { i8*, [[USIZE]] }* %x, i32 0, i32 0
+// CHECK: store i8* %0, i8** [[X0]]
+// CHECK: [[X1:%[0-9]+]] = getelementptr {{.*}} { i8*, [[USIZE]] }* %x, i32 0, i32 1
+// CHECK: store [[USIZE]] %1, [[USIZE]]* [[X1]]
+
let x = &*s;
+ &x; // keep variable in an alloca
}
// compile-flags: -C no-prepopulate-passes
#![crate_type = "lib"]
-#![feature(rustc_attrs)]
pub struct Bytes {
a: u8,
// The array is stored as i32, but its alignment is lower, go with 1 byte to avoid target
// dependent alignment
#[no_mangle]
-#[rustc_no_mir] // FIXME #27840 MIR has different codegen.
pub fn small_array_alignment(x: &mut [i8; 4], y: [i8; 4]) {
-// CHECK: %y = alloca [4 x i8]
+// CHECK: %arg1 = alloca [4 x i8]
// CHECK: [[TMP:%.+]] = alloca i32
// CHECK: store i32 %1, i32* [[TMP]]
-// CHECK: [[Y8:%[0-9]+]] = bitcast [4 x i8]* %y to i8*
+// CHECK: [[Y8:%[0-9]+]] = bitcast [4 x i8]* %arg1 to i8*
// CHECK: [[TMP8:%[0-9]+]] = bitcast i32* [[TMP]] to i8*
// CHECK: call void @llvm.memcpy.{{.*}}(i8* [[Y8]], i8* [[TMP8]], i{{[0-9]+}} 4, i32 1, i1 false)
*x = y;
// The struct is stored as i32, but its alignment is lower, go with 1 byte to avoid target
// dependent alignment
#[no_mangle]
-#[rustc_no_mir] // FIXME #27840 MIR has different codegen.
pub fn small_struct_alignment(x: &mut Bytes, y: Bytes) {
-// CHECK: %y = alloca %Bytes
+// CHECK: %arg1 = alloca %Bytes
// CHECK: [[TMP:%.+]] = alloca i32
// CHECK: store i32 %1, i32* [[TMP]]
-// CHECK: [[Y8:%[0-9]+]] = bitcast %Bytes* %y to i8*
+// CHECK: [[Y8:%[0-9]+]] = bitcast %Bytes* %arg1 to i8*
// CHECK: [[TMP8:%[0-9]+]] = bitcast i32* [[TMP]] to i8*
// CHECK: call void @llvm.memcpy.{{.*}}(i8* [[Y8]], i8* [[TMP8]], i{{[0-9]+}} 4, i32 1, i1 false)
*x = y;
#![plugin(lint_plugin_test)]
#![forbid(test_lint)]
//~^ NOTE lint level defined here
-//~| NOTE `forbid` lint level set here
+//~| NOTE `forbid` level set here
fn lintme() { } //~ ERROR item is named 'lintme'
-#[allow(test_lint)] //~ ERROR allow(test_lint) overruled by outer forbid(test_lint)
+#[allow(test_lint)]
+//~^ ERROR allow(test_lint) overruled by outer forbid(test_lint)
+//~| NOTE overruled by previous forbid
pub fn main() {
lintme();
}
//~| ERROR E0017
//~| NOTE statics require immutable values
//~| ERROR E0388
+ //~| NOTE cannot write data in a static definition
static CONST_REF: &'static mut i32 = &mut C; //~ ERROR E0017
//~| NOTE statics require immutable values
//~| ERROR E0017
}
fn main() {
- let trait_obj: &SomeTrait = SomeTrait; //~ ERROR E0425
- //~^ ERROR E0038
- let &invalid = trait_obj; //~ ERROR E0033
+ let trait_obj: &SomeTrait = SomeTrait;
+ //~^ ERROR E0425
+ //~| ERROR E0038
+ //~| method `foo` has no receiver
+ //~| NOTE the trait `SomeTrait` cannot be made into an object
+
+ let &invalid = trait_obj;
+ //~^ ERROR E0033
+ //~| NOTE type `&SomeTrait` cannot be dereferenced
}
impl Trait for Foo {
fn bar<'a,'b>(x: &'a str, y: &'b str) { //~ ERROR E0195
+ //~^ lifetimes do not match trait
}
}
}
fn main() {
- some_func(5i32); //~ ERROR E0277
+ some_func(5i32);
+ //~^ ERROR the trait bound `i32: Foo` is not satisfied
+ //~| NOTE trait `i32: Foo` not satisfied
+ //~| NOTE required by `some_func`
}
let mut fancy = FancyNum{ num: 5 };
let fancy_ref = &(&mut fancy);
fancy_ref.num = 6; //~ ERROR E0389
+ //~^ NOTE assignment into an immutable reference
println!("{}", fancy_ref.num);
}
struct Foo { a: bool };
let f = Foo(); //~ ERROR E0423
+ //~^ struct called like a function
}
fn bar(self) {}
fn foo() {
- self.bar(); //~ ERROR E0424
+ self.bar();
+ //~^ ERROR `self` is not available in a static method [E0424]
+ //~| NOTE not available in static method
+ //~| NOTE maybe a `self` argument is missing?
}
}
fn main () {
loop {
- break 'a; //~ ERROR E0426
+ break 'a;
+ //~^ ERROR E0426
+ //~| NOTE undeclared label `'a`
}
}
fn main () {
let foo = 42u32;
const FOO : u32 = foo; //~ ERROR E0435
+ //~| NOTE non-constant used with constant
}
impl Foo for i32 {
type Bar = bool; //~ ERROR E0437
+ //~| NOTE not a member of trait `Foo`
}
fn main () {
impl Foo for i32 {
const BAR: bool = true; //~ ERROR E0438
+ //~| NOTE not a member of trait `Foo`
}
fn main () {
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(repr_simd)]
+#![feature(platform_intrinsics)]
+
+#[repr(simd)]
+struct i16x8(i16, i16, i16, i16, i16, i16, i16, i16);
+
+extern "platform-intrinsic" {
+ fn x86_mm_adds_ep16(x: i16x8, y: i16x8) -> i16x8; //~ ERROR E0441
+}
+
+fn main() {}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(repr_simd)]
+#![feature(platform_intrinsics)]
+
+#[repr(simd)]
+struct i8x16(i8, i8, i8, i8, i8, i8, i8, i8,
+ i8, i8, i8, i8, i8, i8, i8, i8);
+#[repr(simd)]
+struct i32x4(i32, i32, i32, i32);
+#[repr(simd)]
+struct i64x2(i64, i64);
+
+extern "platform-intrinsic" {
+ fn x86_mm_adds_epi16(x: i8x16, y: i32x4) -> i64x2;
+ //~^ ERROR E0442
+ //~| ERROR E0442
+ //~| ERROR E0442
+}
+
+fn main() {}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(repr_simd)]
+#![feature(platform_intrinsics)]
+
+#[repr(simd)]
+struct i16x8(i16, i16, i16, i16, i16, i16, i16, i16);
+#[repr(simd)]
+struct i64x8(i64, i64, i64, i64, i64, i64, i64, i64);
+
+extern "platform-intrinsic" {
+ fn x86_mm_adds_epi16(x: i16x8, y: i16x8) -> i64x8; //~ ERROR E0443
+}
+
+fn main() {}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(repr_simd)]
+#![feature(platform_intrinsics)]
+
+#[repr(simd)]
+struct f64x2(f64, f64);
+
+extern "platform-intrinsic" {
+ fn x86_mm_movemask_pd(x: f64x2, y: f64x2, z: f64x2) -> i32; //~ ERROR E0444
+}
+
+fn main() {}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+trait Foo {
+ fn dummy(&self) { }
+}
+
+pub trait Bar : Foo {}
+//~^ ERROR private trait in public interface [E0445]
+//~| NOTE private trait can't be public
+pub struct Bar2<T: Foo>(pub T);
+//~^ ERROR private trait in public interface [E0445]
+//~| NOTE private trait can't be public
+pub fn foo<T: Foo> (t: T) {}
+//~^ ERROR private trait in public interface [E0445]
+//~| NOTE private trait can't be public
+
+fn main() {}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+mod Foo {
+ struct Bar(u32);
+
+ pub fn bar() -> Bar { //~ ERROR E0446
+ Bar(0)
+ }
+}
+
+fn main() {}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+struct Bar;
+
+trait Foo {
+ fn foo();
+}
+
+pub impl Bar {} //~ ERROR E0449
+
+pub impl Foo for Bar { //~ ERROR E0449
+ pub fn foo() {} //~ ERROR E0449
+}
+
+fn main() {
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+mod Bar {
+ pub struct Foo( bool, pub i32, f32, bool);
+ //~^ NOTE private field declared here
+ //~| NOTE private field declared here
+ //~| NOTE private field declared here
+}
+
+fn main() {
+ let f = Bar::Foo(false,1,0.1, true); //~ ERROR E0450
+ //~^ NOTE cannot construct with a private field
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+mod Bar {
+ pub struct Foo {
+ pub a: isize,
+ b: isize,
+ }
+}
+
+fn main() {
+ let f = Bar::Foo{ a: 0, b: 0 }; //~ ERROR E0451
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![allow(foo = "")] //~ ERROR E0452
+
+fn main() {
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![forbid(non_snake_case)]
+//~^ NOTE `forbid` level set here
+
+#[allow(non_snake_case)]
+//~^ ERROR allow(non_snake_case) overruled by outer forbid(non_snake_case)
+//~| NOTE overruled by previous forbid
+fn main() {
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#[link(name = "")] extern {}
+//~^ ERROR E0454
+//~| NOTE empty name given
+
+fn main() {
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#[link(kind = "wonderful_unicorn")] extern {} //~ ERROR E0458
+ //~^ ERROR E0459
+
+fn main() {
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#[link(kind = "dylib")] extern {} //~ ERROR E0459
+
+fn main() {
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(plugin)]
+#![plugin(cookie_monster)] //~ ERROR E0463
+extern crate cake_is_a_lie;
+
+fn main() {
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+trait Wedding<'t>: 't { }
+
+struct Prince<'kiss, 'SnowWhite> {
+ child: Box<Wedding<'kiss> + 'SnowWhite>, //~ ERROR E0478
+}
+
+fn main() {
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT};
+
+const A: AtomicUsize = ATOMIC_USIZE_INIT;
+static B: &'static AtomicUsize = &A; //~ ERROR E0492
+
+fn main() {
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+struct Foo {
+ a: u32
+}
+
+impl Drop for Foo {
+ fn drop(&mut self) {}
+}
+
+const F : Foo = Foo { a : 0 }; //~ ERROR E0493
+
+fn main() {
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+struct Foo {
+ a: u32
+}
+
+static S : Foo = Foo { a : 0 };
+static A : &'static u32 = &S.a; //~ ERROR E0494
+
+fn main() {
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+struct Foo<'a> {
+ a: &'a i32,
+}
+
+impl<'a> Foo<'a> {
+ fn f<'a>(x: &'a i32) { //~ ERROR E0496
+ }
+}
+
+fn main() {
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn main() {
+ let mut i = 0;
+ let mut x = &mut i;
+ let mut a = &mut i; //~ ERROR E0499
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn inside_closure(x: &mut i32) {
+}
+
+fn outside_closure(x: &mut i32) {
+}
+
+fn foo(a: &mut i32) {
+ let bar = || {
+ inside_closure(a)
+ };
+ outside_closure(a); //~ ERROR E0501
+}
+
+fn main() {
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn bar(x: &mut i32) {}
+fn foo(a: &mut i32) {
+ let ref y = a;
+ bar(a); //~ ERROR E0502
+}
+
+fn main() {
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn main() {
+ let mut value = 3;
+ let _borrow = &mut value;
+ let _sum = value + 1; //~ ERROR E0503
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+struct FancyNum {
+ num: u8,
+}
+
+fn main() {
+ let fancy_num = FancyNum { num: 5 };
+ let fancy_ref = &fancy_num;
+
+ let x = move || {
+ println!("child function: {}", fancy_num.num); //~ ERROR E0504
+ };
+
+ x();
+ println!("main function: {}", fancy_ref.num);
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+struct Value {}
+
+fn eat(val: Value) {}
+
+fn main() {
+ let x = Value{};
+ {
+ let _ref_to_val: &Value = &x;
+ eat(x); //~ ERROR E0505
+ }
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+struct FancyNum {
+ num: u8,
+}
+
+fn main() {
+ let mut fancy_num = FancyNum { num: 5 };
+ let fancy_ref = &fancy_num;
+ fancy_num = FancyNum { num: 6 }; //~ ERROR E0506
+
+ println!("Num: {}, Ref: {}", fancy_num.num, fancy_ref.num);
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::cell::RefCell;
+
+struct TheDarkKnight;
+
+impl TheDarkKnight {
+ fn nothing_is_true(self) {}
+}
+
+fn main() {
+ let x = RefCell::new(TheDarkKnight);
+
+ x.borrow().nothing_is_true(); //~ ERROR E0507
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+struct NonCopy;
+
+fn main() {
+ let array = [NonCopy; 1];
+ let _value = array[0]; //~ ERROR E0508
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+struct FancyNum {
+ num: usize
+}
+
+struct DropStruct {
+ fancy: FancyNum
+}
+
+impl Drop for DropStruct {
+ fn drop(&mut self) {
+ }
+}
+
+fn main() {
+ let drop_struct = DropStruct{fancy: FancyNum{num: 5}};
+ let fancy_field = drop_struct.fancy; //~ ERROR E0509
+ println!("Fancy: {}", fancy_field.num);
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(platform_intrinsics)]
+
+extern "platform-intrinsic" {
+ fn simd_add<T>(a: T, b: T) -> T;
+}
+
+fn main() {
+ unsafe { simd_add(0, 1); } //~ ERROR E0511
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn takes_u8(_: u8) {}
+
+fn main() {
+ unsafe { takes_u8(::std::mem::transmute(0u16)); } //~ ERROR E0512
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn main() {
+ let x: typeof(92) = 92; //~ ERROR E0516
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#[repr(C)] //~ ERROR E0517
+type Foo = u8;
+
+#[repr(packed)] //~ ERROR E0517
+enum Foo2 {Bar, Baz}
+
+#[repr(u8)] //~ ERROR E0517
+struct Foo3 {bar: bool, baz: bool}
+
+#[repr(C)] //~ ERROR E0517
+impl Foo3 {
+}
+
+fn main() {
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#[inline(always)] //~ ERROR E0518
+struct Foo;
+
+#[inline(never)] //~ ERROR E0518
+impl Foo {
+}
+
+fn main() {
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(specialization)]
+
+trait SpaceLlama {
+ fn fly(&self);
+}
+
+impl<T> SpaceLlama for T {
+ default fn fly(&self) {}
+}
+
+impl<T: Clone> SpaceLlama for T {
+ fn fly(&self) {}
+}
+
+impl SpaceLlama for i32 {
+ default fn fly(&self) {} //~ ERROR E0520
+}
+
+fn main() {
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(lang_items)]
+
+#[lang = "cookie"]
+fn cookie() -> ! { //~ E0522
+ loop {}
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(slice_patterns)]
+
+fn main() {
+ let r = &[1, 2, 3, 4];
+ match r {
+ &[a, b] => { //~ ERROR E0527
+ println!("a={}, b={}", a, b);
+ }
+ }
+}
fn ice<A>(a: A) {
let r = loop {};
r = r + a;
- //~^ ERROR E0277
+ //~^ ERROR the trait bound `(): Add<A>` is not satisfied
+ //~| NOTE trait `(): Add<A>` not satisfied
}
let _x = &mut a.x;
//~^ NOTE borrow of `a.x` occurs here
let _y = a.y; //~ ERROR cannot use
+ //~^ NOTE use of borrowed `a.x`
}
fn move_after_mut_borrow() {
let _x = &mut a.x.x;
//~^ NOTE borrow of `a.x.x` occurs here
let _y = a.y; //~ ERROR cannot use
+ //~^ NOTE use of borrowed `a.x.x`
}
fn move_after_mut_borrow_nested() {
//~^ ERROR casting
//~^^ HELP through a usize first
let _ = 3_i32 as bool;
- //~^ ERROR cannot cast as `bool`
+ //~^ ERROR cannot cast as `bool` [E0054]
+ //~| unsupported cast
//~| HELP compare with zero
let _ = E::A as bool;
- //~^ ERROR cannot cast as `bool`
+ //~^ ERROR cannot cast as `bool` [E0054]
+ //~| unsupported cast
//~| HELP compare with zero
let _ = 0x61u32 as char; //~ ERROR only `u8` can be cast
let _ = v as *const [u8]; //~ ERROR cannot cast
let _ = fat_v as *const Foo;
//~^ ERROR the trait bound `[u8]: std::marker::Sized` is not satisfied
+ //~| NOTE trait `[u8]: std::marker::Sized` not satisfied
//~| NOTE `[u8]` does not have a constant size known at compile-time
//~| NOTE required for the cast to the object type `Foo`
let _ = foo as *const str; //~ ERROR casting
let a : *const str = "hello";
let _ = a as *const Foo;
//~^ ERROR the trait bound `str: std::marker::Sized` is not satisfied
+ //~| NOTE trait `str: std::marker::Sized` not satisfied
//~| NOTE `str` does not have a constant size known at compile-time
//~| NOTE required for the cast to the object type `Foo`
const CONST_0: Debug+Sync = *(&0 as &(Debug+Sync));
//~^ ERROR `std::fmt::Debug + Sync + 'static: std::marker::Sized` is not satisfied
+//~| NOTE `std::fmt::Debug + Sync + 'static: std::marker::Sized` not satisfied
//~| NOTE does not have a constant size known at compile-time
//~| NOTE constant expressions must have a statically known size
const CONST_FOO: str = *"foo";
//~^ ERROR `str: std::marker::Sized` is not satisfied
+//~| NOTE `str: std::marker::Sized` not satisfied
//~| NOTE does not have a constant size known at compile-time
//~| NOTE constant expressions must have a statically known size
static STATIC_1: Debug+Sync = *(&1 as &(Debug+Sync));
//~^ ERROR `std::fmt::Debug + Sync + 'static: std::marker::Sized` is not satisfied
+//~| NOTE `std::fmt::Debug + Sync + 'static: std::marker::Sized` not satisfied
//~| NOTE does not have a constant size known at compile-time
//~| NOTE constant expressions must have a statically known size
static STATIC_BAR: str = *"bar";
//~^ ERROR `str: std::marker::Sized` is not satisfied
+//~| NOTE `str: std::marker::Sized` not satisfied
//~| NOTE does not have a constant size known at compile-time
//~| NOTE constant expressions must have a statically known size
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn _converge() -> ! { //~ ERROR computation may converge
+ 42
+}
+
+fn main() { }
+
+++ /dev/null
-// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// ignore-pretty
-// compile-flags:-Zincremental=tmp/cfail-tests/enable-orbit-for-incr-comp -Zorbit=off
-// error-pattern:Automatically enabling `-Z orbit` because `-Z incremental` was specified
-
-#![deny(warnings)]
-
-fn main() {
- FAIL! // We just need some compilation error. What we really care about is
- // that the error pattern above is checked.
-}
fn main() {
send(before());
//~^ ERROR the trait bound `std::rc::Rc<std::cell::Cell<i32>>: std::marker::Send` is not satisfied
+ //~| NOTE trait `std::rc::Rc<std::cell::Cell<i32>>: std::marker::Send` not satisfied
//~| NOTE `std::rc::Rc<std::cell::Cell<i32>>` cannot be sent between threads safely
//~| NOTE required because it appears within the type `[closure
//~| NOTE required because it appears within the type `impl std::ops::Fn<(i32,)>`
send(after());
//~^ ERROR the trait bound `std::rc::Rc<std::cell::Cell<i32>>: std::marker::Send` is not satisfied
+ //~| NOTE trait `std::rc::Rc<std::cell::Cell<i32>>: std::marker::Send` not satisfied
//~| NOTE `std::rc::Rc<std::cell::Cell<i32>>` cannot be sent between threads safely
//~| NOTE required because it appears within the type `[closure
//~| NOTE required because it appears within the type `impl std::ops::Fn<(i32,)>`
fn cycle1() -> impl Clone {
send(cycle2().clone());
//~^ ERROR the trait bound `std::rc::Rc<std::string::String>: std::marker::Send` is not satisfied
+ //~| NOTE trait `std::rc::Rc<std::string::String>: std::marker::Send` not satisfied
//~| NOTE `std::rc::Rc<std::string::String>` cannot be sent between threads safely
//~| NOTE required because it appears within the type `impl std::clone::Clone`
//~| NOTE required by `send`
fn cycle2() -> impl Clone {
send(cycle1().clone());
//~^ ERROR the trait bound `std::rc::Rc<std::cell::Cell<i32>>: std::marker::Send` is not satisfied
+ //~| NOTE trait `std::rc::Rc<std::cell::Cell<i32>>: std::marker::Send` not satisfied
//~| NOTE `std::rc::Rc<std::cell::Cell<i32>>` cannot be sent between threads safely
//~| NOTE required because it appears within the type `impl std::clone::Clone`
//~| NOTE required by `send`
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use spam::{ham, eggs};
-//~^ ERROR unresolved import `spam::eggs`. There is no `eggs` in `spam`
+use spam::{ham, eggs}; //~ ERROR unresolved import `spam::eggs` [E0432]
+ //~^ no `eggs` in `spam`
mod spam {
pub fn ham() { }
// except according to those terms.
use zed::bar;
-use zed::baz;
-//~^ ERROR unresolved import `zed::baz`. There is no `baz` in `zed`
+use zed::baz; //~ ERROR unresolved import `zed::baz` [E0432]
+ //~^ no `baz` in `zed`. Did you mean to use `bar`?
mod zed {
pub fn bar() { println!("bar"); }
- use foo; //~ ERROR unresolved import
+ use foo; //~ ERROR unresolved import `foo` [E0432]
+ //~^ no `foo` in the root
}
fn main() {
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use baz::zed::bar;
-//~^ ERROR unresolved import `baz::zed::bar`. Could not find `zed` in `baz`
+use baz::zed::bar; //~ ERROR unresolved import `baz::zed::bar` [E0432]
+ //~^ Could not find `zed` in `baz`
mod baz {}
mod zed {
use foo::bar;
mod test {
- use bar::foo;
- //~^ ERROR unresolved import `bar::foo`. Maybe a missing `extern crate bar`?
+ use bar::foo; //~ ERROR unresolved import `bar::foo` [E0432]
+ //~^ Maybe a missing `extern crate bar`?
}
fn main() {}
// except according to those terms.
use a::f;
-use b::f;
-//~^ ERROR: unresolved import `b::f`. There is no `f` in `b`
+use b::f; //~ ERROR: unresolved import `b::f` [E0432]
+ //~^ no `f` in `b`
mod a { pub fn f() {} }
mod b { }
impl<'a> NoLifetime for Foo<'a> {
fn get<'p, T : Test<'a>>(&self) -> T {
//~^ ERROR E0195
+//~| lifetimes do not match trait
return *self as T;
}
}
// Testing that we don't fail abnormally after hitting the errors
-use unresolved::*; //~ ERROR unresolved import `unresolved::*`. Maybe a missing `extern crate unres
+use unresolved::*; //~ ERROR unresolved import `unresolved::*` [E0432]
+ //~^ Maybe a missing `extern crate unresolved`?
fn main() {}
}
fn main() {
- let f = Foo::Variant(42); //~ ERROR uses it like a function
+ let f = Foo::Variant(42);
+ //~^ ERROR uses it like a function
+ //~| struct called like a function
}
}
fn main() {
- let homura = Homura::Madoka; //~ ERROR uses it like a function
+ let homura = Homura::Madoka;
+ //~^ ERROR uses it like a function
+ //~| struct called like a function
}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn main() {}
+
+struct The;
+
+impl The {
+ fn iceman(c: Vec<[i32]>) {}
+ //~^ ERROR the trait bound `[i32]: std::marker::Sized` is not satisfied
+}
impl cat {
fn meow() {
if self.whiskers > 3 {
- //~^ ERROR: `self` is not available in a static method. Maybe a `self` argument is missing?
+ //~^ ERROR `self` is not available in a static method [E0424]
+ //~| NOTE not available in static method
+ //~| NOTE maybe a `self` argument is missing?
println!("MEOW");
}
}
($this:expr) => {
$this.width.unwrap()
//~^ ERROR cannot use `self.width` because it was mutably borrowed
+ //~| NOTE use of borrowed `*self`
}
);
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+trait Bar {
+ type Bar;
+}
+
+struct ArrayPeano<T: Bar> {
+ data: T::Bar,
+}
+
+fn foo<T>(a: &ArrayPeano<T>) -> &[T] where T: Bar {
+ unsafe { std::mem::transmute(a) } //~ ERROR transmute called with differently sized types
+}
+
+impl Bar for () {
+ type Bar = ();
+}
+
+fn main() {
+ let x: ArrayPeano<()> = ArrayPeano { data: () };
+ foo(&x);
+}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use m::f as x; //~ ERROR unresolved import `m::f`. There is no `f` in `m`
+use m::f as x; //~ ERROR unresolved import `m::f` [E0432]
+ //~^ no `f` in `m`
mod m {}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(type_macros)]
-
macro_rules! t {
() => ( String ; ); //~ ERROR macro expansion ignores token `;`
}
// except according to those terms.
type Alias = ();
-use Alias::*; //~ ERROR Not a module
-use std::io::Result::*; //~ ERROR Not a module
+use Alias::*;
+//~^ ERROR unresolved import `Alias::*` [E0432]
+//~| Not a module `Alias`
+use std::io::Result::*;
+//~^ ERROR unresolved import `std::io::Result::*` [E0432]
+//~| Not a module `Result`
trait T {}
use T::*; //~ ERROR items in traits are not importable
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use bar::Foo; //~ ERROR There is no `Foo` in `bar` [E0432]
+use bar::Foo; //~ ERROR unresolved import `bar::Foo` [E0432]
+ //~^ no `Foo` in `bar`
mod bar {
- use Foo; //~ ERROR There is no `Foo` in the crate root [E0432]
+ use Foo; //~ ERROR unresolved import `Foo` [E0432]
+ //~^ no `Foo` in the root
}
fn main() {}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(type_macros, concat_idents)]
+#![feature(concat_idents)]
#[derive(Debug)] //~ NOTE in this expansion
struct Baz<T>(
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(conservative_impl_trait)]
+
+trait Foo {
+ fn foo(fn(u8) -> ()); //~ NOTE type in trait
+ fn bar(Option<u8>); //~ NOTE type in trait
+ fn baz((u8, u16)); //~ NOTE type in trait
+ fn qux() -> u8; //~ NOTE type in trait
+}
+
+struct Bar;
+
+impl Foo for Bar {
+ fn foo(_: fn(u16) -> ()) {}
+ //~^ ERROR method `foo` has an incompatible type for trait
+ //~| NOTE expected u8
+ fn bar(_: Option<u16>) {}
+ //~^ ERROR method `bar` has an incompatible type for trait
+ //~| NOTE expected u8
+ fn baz(_: (u16, u16)) {}
+ //~^ ERROR method `baz` has an incompatible type for trait
+ //~| NOTE expected u8
+ fn qux() -> u16 { 5u16 }
+ //~^ ERROR method `qux` has an incompatible type for trait
+ //~| NOTE expected u8
+}
+
+fn main() {}
//~| NOTE: not a trait
//~| NOTE: aliases cannot be used for traits
-use ImportError; //~ ERROR unresolved
+use ImportError; //~ ERROR unresolved import `ImportError` [E0432]
+ //~^ no `ImportError` in the root
impl ImportError for () {} // check that this is not an additional error (c.f. #35142)
fn main() {}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use self::*; //~ ERROR: unresolved import `self::*`. Cannot glob-import a module into itself.
+use self::*; //~ ERROR: unresolved import `self::*` [E0432]
+ //~^ Cannot glob-import a module into itself.
mod foo {
- use foo::*; //~ ERROR: unresolved import `foo::*`. Cannot glob-import a module into itself.
+ use foo::*; //~ ERROR: unresolved import `foo::*` [E0432]
+ //~^ Cannot glob-import a module into itself.
mod bar {
use super::bar::*;
- //~^ ERROR: unresolved import `super::bar::*`. Cannot glob-import a module into itself.
+ //~^ ERROR: unresolved import `super::bar::*` [E0432]
+ //~| Cannot glob-import a module into itself.
}
}
// except according to those terms.
#![forbid(deprecated)]
-//~^ NOTE `forbid` lint level set here
+//~^ NOTE `forbid` level set here
-#[allow(deprecated)] //~ ERROR allow(deprecated) overruled by outer forbid(deprecated)
+#[allow(deprecated)]
+//~^ ERROR allow(deprecated) overruled by outer forbid(deprecated)
+//~| NOTE overruled by previous forbid
fn main() {
}
+++ /dev/null
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// Check we reject structs that mix a `Drop` impl with `#[repr(C)]`.
-//
-// As a special case, also check that we do not warn on such structs
-// if they also are declared with `#[unsafe_no_drop_flag]`
-
-#![feature(unsafe_no_drop_flag)]
-#![deny(drop_with_repr_extern)]
-//~^ NOTE lint level defined here
-//~| NOTE lint level defined here
-
-#[repr(C)] struct As { x: Box<i8> }
-#[repr(C)] enum Ae { Ae(Box<i8>), _None }
-
-struct Bs { x: Box<i8> }
-enum Be { Be(Box<i8>), _None }
-
-#[repr(C)] struct Cs { x: Box<i8> }
-//~^ NOTE the `#[repr(C)]` attribute is attached here
-
-impl Drop for Cs { fn drop(&mut self) { } }
-//~^ ERROR implementing Drop adds hidden state to types, possibly conflicting with `#[repr(C)]`
-
-#[repr(C)] enum Ce { Ce(Box<i8>), _None }
-//~^ NOTE the `#[repr(C)]` attribute is attached here
-
-impl Drop for Ce { fn drop(&mut self) { } }
-//~^ ERROR implementing Drop adds hidden state to types, possibly conflicting with `#[repr(C)]`
-
-#[unsafe_no_drop_flag]
-#[repr(C)] struct Ds { x: Box<i8> }
-
-impl Drop for Ds { fn drop(&mut self) { } }
-
-#[unsafe_no_drop_flag]
-#[repr(C)] enum De { De(Box<i8>), _None }
-
-impl Drop for De { fn drop(&mut self) { } }
-
-fn main() {
- let a = As { x: Box::new(3) };
- let b = Bs { x: Box::new(3) };
- let c = Cs { x: Box::new(3) };
- let d = Ds { x: Box::new(3) };
-
- println!("{:?}", (*a.x, *b.x, *c.x, *d.x));
-
- let _a = Ae::Ae(Box::new(3));
- let _b = Be::Be(Box::new(3));
- let _c = Ce::Ce(Box::new(3));
- let _d = De::De(Box::new(3));
-}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(type_macros)]
-
// (typeof used because it's surprisingly hard to find an unparsed token after a stmt)
macro_rules! m {
() => ( i ; typeof ); //~ ERROR expected expression, found reserved keyword `typeof`
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(type_macros)]
-
macro_rules! foo {
($a:expr) => $a; //~ ERROR macro rhs must be delimited
}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn main() {
+ {
+ if (foo) => {} //~ ERROR expected `{`, found `=>`
+ }
+ {
+ if (foo)
+ bar; //~ ERROR expected `{`, found `bar`
+ //^ HELP try placing this code inside a block
+ }
+}
fn main() {
Index::index(&[] as &[i32], 2u32);
//~^ ERROR E0277
+ //~| NOTE not satisfied
//~| NOTE trait message
//~| NOTE required by
Index::index(&[] as &[i32], Foo(2u32));
//~^ ERROR E0277
+ //~| NOTE not satisfied
//~| NOTE on impl for Foo
//~| NOTE required by
Index::index(&[] as &[i32], Bar(2u32));
//~^ ERROR E0277
+ //~| NOTE not satisfied
//~| NOTE on impl for Bar
//~| NOTE required by
}
#[rustc_error]
fn main() {
Index::<u32>::index(&[1, 2, 3] as &[i32], 2u32); //~ ERROR E0277
+ //~| NOTE not satisfied
//~| NOTE a usize is required
//~| NOTE required by
}
//~^ ERROR
//~^^ NOTE a collection of type `std::option::Option<std::vec::Vec<u8>>` cannot be built from an iterator over elements of type `&u8`
//~^^^ NOTE required by `collect`
+ //~| NOTE trait `std::option::Option<std::vec::Vec<u8>>: MyFromIterator<&u8>` not satisfied
let x: String = foobar(); //~ ERROR
//~^ NOTE test error `std::string::String` with `u8` `_` `u32`
//~^^ NOTE required by `foobar`
+ //~| NOTE trait `std::string::String: Foo<u8, _, u32>` not satisfied
}
fn main() {
let x = &[1, 2, 3] as &[i32];
x[1i32]; //~ ERROR E0277
+ //~| NOTE trait `[i32]: std::ops::Index<i32>` not satisfied
//~| NOTE slice indices are of type `usize`
x[..1i32]; //~ ERROR E0277
+ //~| NOTE trait `[i32]: std::ops::Index<std::ops::RangeTo<i32>>` not satisfied
//~| NOTE slice indices are of type `usize`
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(pub_restricted, type_macros)]
+#![feature(pub_restricted)]
mod foo {
type T = ();
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(pub_restricted, type_macros)]
+#![feature(pub_restricted)]
macro_rules! define_struct {
($t:ty) => {
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(pub_restricted, type_macros)]
+#![feature(pub_restricted)]
macro_rules! define_struct {
($t:ty) => {
fn test1() {
use bar::foo;
- //~^ ERROR unresolved import `bar::foo`. There is no `foo` in `bar`
+ //~^ ERROR unresolved import `bar::foo` [E0432]
+ //~| no `foo` in `bar`
}
fn test2() {
use bar::glob::foo;
- //~^ ERROR unresolved import `bar::glob::foo`. There is no `foo` in `bar::glob`
+ //~^ ERROR unresolved import `bar::glob::foo` [E0432]
+ //~| no `foo` in `bar::glob`
}
#[start] fn main(_: isize, _: *const *const u8) -> isize { 3 }
fn test1() {
use bar::gpriv;
- //~^ ERROR unresolved import `bar::gpriv`. There is no `gpriv` in `bar`
+ //~^ ERROR unresolved import `bar::gpriv` [E0432]
+ //~| no `gpriv` in `bar`
// This should pass because the compiler will insert a fake name binding
// for `gpriv`
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(question_mark, question_mark_carrier)]
+
+// Test that type inference fails where there are multiple possible return types
+// for the `?` operator.
+
+fn f(x: &i32) -> Result<i32, ()> {
+ Ok(*x)
+}
+
+fn g() -> Result<Vec<i32>, ()> {
+ let l = [1, 2, 3, 4];
+ l.iter().map(f).collect()? //~ ERROR type annotations required: cannot resolve
+}
+
+fn main() {
+ g();
+}
fn broken() {
let mut x = 3;
let mut _y = vec!(&mut x);
+ //~^ NOTE borrow of `x` occurs here
+ //~| NOTE borrow of `x` occurs here
+ //~| NOTE borrow of `x` occurs here
while x < 10 { //~ ERROR cannot use `x` because it was mutably borrowed
+ //~^ NOTE use of borrowed `x`
let mut z = x; //~ ERROR cannot use `x` because it was mutably borrowed
+ //~^ NOTE use of borrowed `x`
_y.push(&mut z); //~ ERROR `z` does not live long enough
+ //~^ NOTE does not live long enough
x += 1; //~ ERROR cannot assign
+ //~^ NOTE assignment to borrowed `x` occurs here
}
+ //~^ NOTE borrowed value only valid until here
}
+//~^ NOTE borrowed value must be valid until here
fn main() { }
mod a {
extern crate collections;
use collections::HashMap;
-//~^ ERROR unresolved import `collections::HashMap`. Did you mean `self::collections`?
+ //~^ ERROR unresolved import `collections::HashMap` [E0432]
+ //~| Did you mean `self::collections`?
mod b {
use collections::HashMap;
-//~^ ERROR unresolved import `collections::HashMap`. Did you mean `a::collections`?
+ //~^ ERROR unresolved import `collections::HashMap` [E0432]
+ //~| Did you mean `a::collections`?
mod c {
use collections::HashMap;
-//~^ ERROR unresolved import `collections::HashMap`. Did you mean `a::collections`?
+ //~^ ERROR unresolved import `collections::HashMap` [E0432]
+ //~| Did you mean `a::collections`?
mod d {
use collections::HashMap;
-//~^ ERROR unresolved import `collections::HashMap`. Did you mean `a::collections`?
+ //~^ ERROR unresolved import `collections::HashMap` [E0432]
+ //~| Did you mean `a::collections`?
}
}
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use super::f; //~ ERROR unresolved import `super::f`. There are too many initial `super`s.
+use super::f; //~ ERROR unresolved import `super::f` [E0432]
+ //~^ There are too many initial `super`s.
fn main() {
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(concat_idents, type_macros)]
+#![feature(concat_idents)]
pub fn main() {
struct Foo;
pub fn main() {
test_send::<rand::ThreadRng>();
- //~^ ERROR : std::marker::Send` is not satisfied
}
// suggest a where-clause, if needed
mem::size_of::<U>();
//~^ ERROR `U: std::marker::Sized` is not satisfied
+ //~| NOTE trait `U: std::marker::Sized` not satisfied
//~| HELP consider adding a `where U: std::marker::Sized` bound
//~| NOTE required by `std::mem::size_of`
mem::size_of::<Misc<U>>();
//~^ ERROR `U: std::marker::Sized` is not satisfied
+ //~| NOTE trait `U: std::marker::Sized` not satisfied
//~| HELP consider adding a `where U: std::marker::Sized` bound
//~| NOTE required because it appears within the type `Misc<U>`
//~| NOTE required by `std::mem::size_of`
<u64 as From<T>>::from;
//~^ ERROR `u64: std::convert::From<T>` is not satisfied
+ //~| NOTE trait `u64: std::convert::From<T>` not satisfied
//~| HELP consider adding a `where u64: std::convert::From<T>` bound
//~| NOTE required by `std::convert::From::from`
<u64 as From<<T as Iterator>::Item>>::from;
//~^ ERROR `u64: std::convert::From<<T as std::iter::Iterator>::Item>` is not satisfied
+ //~| NOTE trait `u64: std::convert::From<<T as std::iter::Iterator>::Item>` not satisfied
//~| HELP consider adding a `where u64:
//~| NOTE required by `std::convert::From::from`
<Misc<_> as From<T>>::from;
//~^ ERROR `Misc<_>: std::convert::From<T>` is not satisfied
+ //~| NOTE trait `Misc<_>: std::convert::From<T>` not satisfied
//~| NOTE required by `std::convert::From::from`
// ... and also not if the error is not related to the type
mem::size_of::<[T]>();
//~^ ERROR `[T]: std::marker::Sized` is not satisfied
+ //~| NOTE `[T]: std::marker::Sized` not satisfied
//~| NOTE `[T]` does not have a constant size
//~| NOTE required by `std::mem::size_of`
mem::size_of::<[&U]>();
//~^ ERROR `[&U]: std::marker::Sized` is not satisfied
+ //~| NOTE `[&U]: std::marker::Sized` not satisfied
//~| NOTE `[&U]` does not have a constant size
//~| NOTE required by `std::mem::size_of`
}
+++ /dev/null
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-macro_rules! Id {
- ($T:tt) => ($T);
-}
-
-struct Foo<T> {
- x: Id!(T)
- //~^ ERROR: type macros are experimental (see issue #27245)
-}
-
-fn main() {
- let foo = Foo { x: i32 };
-}
// ignore-tidy-linelength
-use foo::bar; //~ ERROR unresolved import `foo::bar`. Maybe a missing `extern crate foo`?
+use foo::bar; //~ ERROR unresolved import `foo::bar` [E0432]
+ //~^ Maybe a missing `extern crate foo`?
-use bar::Baz as x; //~ ERROR unresolved import `bar::Baz`. There is no `Baz` in `bar`. Did you mean to use `Bar`?
+use bar::Baz as x; //~ ERROR unresolved import `bar::Baz` [E0432]
+ //~^ no `Baz` in `bar`. Did you mean to use `Bar`?
-use food::baz; //~ ERROR unresolved import `food::baz`. There is no `baz` in `food`. Did you mean to use `bag`?
+use food::baz; //~ ERROR unresolved import `food::baz`
+ //~^ no `baz` in `food`. Did you mean to use `bag`?
-use food::{beens as Foo}; //~ ERROR unresolved import `food::beens`. There is no `beens` in `food`. Did you mean to use `beans`?
+use food::{beens as Foo}; //~ ERROR unresolved import `food::beens` [E0432]
+ //~^ no `beens` in `food`. Did you mean to use `beans`?
mod bar {
pub struct Bar;
+++ /dev/null
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-pub struct T;
-
-#[unsafe_no_drop_flag]
-//~^ ERROR unsafe_no_drop_flag has unstable semantics and may be removed
-pub struct S {
- pub x: T,
-}
-
-impl Drop for S {
- fn drop(&mut self) {}
-}
-
-pub fn main() {}
use use_from_trait_xc::Trait::CONST;
//~^ ERROR `CONST` is not directly importable
-use use_from_trait_xc::Foo::new;
+use use_from_trait_xc::Foo::new; //~ ERROR struct `Foo` is private
//~^ ERROR unresolved import `use_from_trait_xc::Foo::new`
-use use_from_trait_xc::Foo::C;
+use use_from_trait_xc::Foo::C; //~ ERROR struct `Foo` is private
//~^ ERROR unresolved import `use_from_trait_xc::Foo::C`
use use_from_trait_xc::Bar::new as bnew;
//~^ ERROR `C` is not directly importable
use Foo::new;
-//~^ ERROR unresolved import `Foo::new`. Not a module `Foo`
+//~^ ERROR unresolved import `Foo::new` [E0432]
+//~| Not a module `Foo`
use Foo::C2;
-//~^ ERROR unresolved import `Foo::C2`. Not a module `Foo`
+//~^ ERROR unresolved import `Foo::C2` [E0432]
+//~| Not a module `Foo`
pub trait Trait {
fn foo();
mod a {
mod b {
use self as A; //~ ERROR `self` imports are only allowed within a { } list
- //~^ ERROR unresolved import `self`. There is no `self` in the crate root
- use super as B; //~ ERROR unresolved import `super`. There is no `super` in the crate root
- use super::{self as C}; //~ERROR unresolved import `super`. There is no `super` in the crate
+ //~^ ERROR unresolved import `self` [E0432]
+ //~| no `self` in the root
+ use super as B;
+ //~^ ERROR unresolved import `super` [E0432]
+ //~| no `super` in the root
+ use super::{self as C};
+ //~^ ERROR unresolved import `super` [E0432]
+ //~| no `super` in the root
}
}
mod foo {
use self::{self};
- //~^ ERROR unresolved import `self`. There is no `self` in the crate root
+ //~^ ERROR unresolved import `self` [E0432]
+ //~| no `self` in the root
use super::{self};
- //~^ ERROR unresolved import `super`. There is no `super` in the crate root
+ //~^ ERROR unresolved import `super` [E0432]
+ //~| no `super` in the root
}
fn main() {}
}
#[rustc_variance]
-struct Foo<'a, T : Trait<'a>> { //~ ERROR ItemVariances(types=[+], regions=[-])
+struct Foo<'a, T : Trait<'a>> { //~ ERROR [-, +]
field: (T, &'a ())
}
#[rustc_variance]
-struct Bar<'a, T : Trait<'a>> { //~ ERROR ItemVariances(types=[o], regions=[o])
+struct Bar<'a, T : Trait<'a>> { //~ ERROR [o, o]
field: <T as Trait<'a>>::Type
}
// For better or worse, associated types are invariant, and hence we
// get an invariant result for `'a`.
#[rustc_variance]
-struct Foo<'a> { //~ ERROR regions=[o]
+struct Foo<'a> { //~ ERROR [o]
x: Box<Fn(i32) -> &'a i32 + 'static>
}
#![feature(rustc_attrs)]
#[rustc_variance]
-trait Foo: 'static { //~ ERROR types=[o]
+trait Foo: 'static { //~ ERROR [o]
}
#[rustc_variance]
-trait Bar<T> { //~ ERROR types=[o, o]
+trait Bar<T> { //~ ERROR [o, o]
fn do_it(&self)
where T: 'static;
}
// Regions that just appear in normal spots are contravariant:
#[rustc_variance]
-struct Test2<'a, 'b, 'c> { //~ ERROR regions=[-, -, -]
+struct Test2<'a, 'b, 'c> { //~ ERROR [-, -, -]
x: &'a isize,
y: &'b [isize],
c: &'c str
// Those same annotations in function arguments become covariant:
#[rustc_variance]
-struct Test3<'a, 'b, 'c> { //~ ERROR regions=[+, +, +]
+struct Test3<'a, 'b, 'c> { //~ ERROR [+, +, +]
x: extern "Rust" fn(&'a isize),
y: extern "Rust" fn(&'b [isize]),
c: extern "Rust" fn(&'c str),
// Mutability induces invariance:
#[rustc_variance]
-struct Test4<'a, 'b:'a> { //~ ERROR regions=[-, o]
+struct Test4<'a, 'b:'a> { //~ ERROR [-, o]
x: &'a mut &'b isize,
}
// contravariant context:
#[rustc_variance]
-struct Test5<'a, 'b:'a> { //~ ERROR regions=[+, o]
+struct Test5<'a, 'b:'a> { //~ ERROR [+, o]
x: extern "Rust" fn(&'a mut &'b isize),
}
// argument list occurs in an invariant context.
#[rustc_variance]
-struct Test6<'a, 'b:'a> { //~ ERROR regions=[-, o]
+struct Test6<'a, 'b:'a> { //~ ERROR [-, o]
x: &'a mut extern "Rust" fn(&'b isize),
}
// No uses at all is bivariant:
#[rustc_variance]
-struct Test7<'a> { //~ ERROR regions=[*]
+struct Test7<'a> { //~ ERROR [*]
//~^ ERROR parameter `'a` is never used
x: isize
}
// Try enums too.
#[rustc_variance]
-enum Test8<'a, 'b, 'c:'b> { //~ ERROR regions=[+, -, o]
+enum Test8<'a, 'b, 'c:'b> { //~ ERROR [+, -, o]
Test8A(extern "Rust" fn(&'a isize)),
Test8B(&'b [isize]),
Test8C(&'b mut &'c str),
#![feature(rustc_attrs)]
#[rustc_variance]
-enum Base<'a, 'b, 'c:'b, 'd> { //~ ERROR regions=[+, -, o, *]
+enum Base<'a, 'b, 'c:'b, 'd> { //~ ERROR [+, -, o, *]
//~^ ERROR parameter `'d` is never used
Test8A(extern "Rust" fn(&'a isize)),
Test8B(&'b [isize]),
}
#[rustc_variance]
-struct Derived1<'w, 'x:'y, 'y, 'z> { //~ ERROR regions=[*, o, -, +]
+struct Derived1<'w, 'x:'y, 'y, 'z> { //~ ERROR [*, o, -, +]
//~^ ERROR parameter `'w` is never used
f: Base<'z, 'y, 'x, 'w>
}
#[rustc_variance] // Combine - and + to yield o
-struct Derived2<'a, 'b:'a, 'c> { //~ ERROR regions=[o, o, *]
+struct Derived2<'a, 'b:'a, 'c> { //~ ERROR [o, o, *]
//~^ ERROR parameter `'c` is never used
f: Base<'a, 'a, 'b, 'c>
}
#[rustc_variance] // Combine + and o to yield o (just pay attention to 'a here)
-struct Derived3<'a:'b, 'b, 'c> { //~ ERROR regions=[o, -, *]
+struct Derived3<'a:'b, 'b, 'c> { //~ ERROR [o, -, *]
//~^ ERROR parameter `'c` is never used
f: Base<'a, 'b, 'a, 'c>
}
#[rustc_variance] // Combine + and * to yield + (just pay attention to 'a here)
-struct Derived4<'a, 'b, 'c:'b> { //~ ERROR regions=[+, -, o]
+struct Derived4<'a, 'b, 'c:'b> { //~ ERROR [+, -, o]
f: Base<'a, 'b, 'c, 'a>
}
// influence variance.
#[rustc_variance]
-trait Getter<T> { //~ ERROR types=[o, o]
+trait Getter<T> { //~ ERROR [o, o]
fn get(&self) -> T;
}
#[rustc_variance]
-trait Setter<T> { //~ ERROR types=[o, o]
+trait Setter<T> { //~ ERROR [o, o]
fn get(&self, T);
}
#[rustc_variance]
-struct TestStruct<U,T:Setter<U>> { //~ ERROR types=[+, +]
+struct TestStruct<U,T:Setter<U>> { //~ ERROR [+, +]
t: T, u: U
}
#[rustc_variance]
-enum TestEnum<U,T:Setter<U>> {//~ ERROR types=[*, +]
+enum TestEnum<U,T:Setter<U>> {//~ ERROR [*, +]
//~^ ERROR parameter `U` is never used
Foo(T)
}
#[rustc_variance]
-trait TestTrait<U,T:Setter<U>> { //~ ERROR types=[o, o, o]
+trait TestTrait<U,T:Setter<U>> { //~ ERROR [o, o, o]
fn getter(&self, u: U) -> T;
}
#[rustc_variance]
-trait TestTrait2<U> : Getter<U> { //~ ERROR types=[o, o]
+trait TestTrait2<U> : Getter<U> { //~ ERROR [o, o]
}
#[rustc_variance]
-trait TestTrait3<U> { //~ ERROR types=[o, o]
+trait TestTrait3<U> { //~ ERROR [o, o]
fn getter<T:Getter<U>>(&self);
}
#[rustc_variance]
-struct TestContraStruct<U,T:Setter<U>> { //~ ERROR types=[*, +]
+struct TestContraStruct<U,T:Setter<U>> { //~ ERROR [*, +]
//~^ ERROR parameter `U` is never used
t: T
}
#[rustc_variance]
-struct TestBox<U,T:Getter<U>+Setter<U>> { //~ ERROR types=[*, +]
+struct TestBox<U,T:Getter<U>+Setter<U>> { //~ ERROR [*, +]
//~^ ERROR parameter `U` is never used
t: T
}
trait T { fn foo(&self); }
#[rustc_variance]
-struct TOption<'a> { //~ ERROR regions=[-]
+struct TOption<'a> { //~ ERROR [-]
v: Option<Box<T + 'a>>,
}
#![feature(rustc_attrs)]
#[rustc_variance]
-struct TestImm<A, B> { //~ ERROR types=[+, +]
+struct TestImm<A, B> { //~ ERROR [+, +]
x: A,
y: B,
}
#[rustc_variance]
-struct TestMut<A, B:'static> { //~ ERROR types=[+, o]
+struct TestMut<A, B:'static> { //~ ERROR [+, o]
x: A,
y: &'static mut B,
}
#[rustc_variance]
-struct TestIndirect<A:'static, B:'static> { //~ ERROR types=[+, o]
+struct TestIndirect<A:'static, B:'static> { //~ ERROR [+, o]
m: TestMut<A, B>
}
#[rustc_variance]
-struct TestIndirect2<A:'static, B:'static> { //~ ERROR types=[o, o]
+struct TestIndirect2<A:'static, B:'static> { //~ ERROR [o, o]
n: TestMut<A, B>,
m: TestMut<B, A>
}
#[rustc_variance]
-trait Getter<A> { //~ ERROR types=[o, o]
+trait Getter<A> { //~ ERROR [o, o]
fn get(&self) -> A;
}
#[rustc_variance]
-trait Setter<A> { //~ ERROR types=[o, o]
+trait Setter<A> { //~ ERROR [o, o]
fn set(&mut self, a: A);
}
#[rustc_variance]
-trait GetterSetter<A> { //~ ERROR types=[o, o]
+trait GetterSetter<A> { //~ ERROR [o, o]
fn get(&self) -> A;
fn set(&mut self, a: A);
}
#[rustc_variance]
-trait GetterInTypeBound<A> { //~ ERROR types=[o, o]
+trait GetterInTypeBound<A> { //~ ERROR [o, o]
// Here, the use of `A` in the method bound *does* affect
// variance. Think of it as if the method requested a dictionary
// for `T:Getter<A>`. Since this dictionary is an input, it is
}
#[rustc_variance]
-trait SetterInTypeBound<A> { //~ ERROR types=[o, o]
+trait SetterInTypeBound<A> { //~ ERROR [o, o]
fn do_it<T:Setter<A>>(&self);
}
#[rustc_variance]
-struct TestObject<A, R> { //~ ERROR types=[o, o]
+struct TestObject<A, R> { //~ ERROR [o, o]
n: Box<Setter<A>+Send>,
m: Box<Getter<R>+Send>,
}
// not considered bivariant.
#[rustc_variance]
-struct InvariantMut<'a,A:'a,B:'a> { //~ ERROR types=[o, o], regions=[-]
+struct InvariantMut<'a,A:'a,B:'a> { //~ ERROR [-, o, o]
t: &'a mut (A,B)
}
#[rustc_variance]
-struct InvariantCell<A> { //~ ERROR types=[o]
+struct InvariantCell<A> { //~ ERROR [o]
t: Cell<A>
}
#[rustc_variance]
-struct InvariantIndirect<A> { //~ ERROR types=[o]
+struct InvariantIndirect<A> { //~ ERROR [o]
t: InvariantCell<A>
}
#[rustc_variance]
-struct Covariant<A> { //~ ERROR types=[+]
+struct Covariant<A> { //~ ERROR [+]
t: A, u: fn() -> A
}
#[rustc_variance]
-struct Contravariant<A> { //~ ERROR types=[-]
+struct Contravariant<A> { //~ ERROR [-]
t: fn(A)
}
#[rustc_variance]
-enum Enum<A,B,C> { //~ ERROR types=[+, -, o]
+enum Enum<A,B,C> { //~ ERROR [+, -, o]
Foo(Covariant<A>),
Bar(Contravariant<B>),
Zed(Covariant<C>,Contravariant<C>)
--- /dev/null
+// Copyright 2013-2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags:-g
+
+#![crate_type = "rlib"]
+
+#[macro_export]
+macro_rules! new_scope {
+ () => {
+ let x = 1;
+ }
+}
// min-lldb-version: 310
-// compile-flags:-g
+// compile-flags:-g -Zdebug-macros
// === GDB TESTS ===================================================================================
--- /dev/null
+// Copyright 2013-2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// ignore-windows
+// ignore-android
+// min-lldb-version: 310
+
+// aux-build:macro-stepping.rs
+
+#![allow(unused)]
+
+#[macro_use]
+extern crate macro_stepping; // exports new_scope!()
+
+// compile-flags:-g
+
+// === GDB TESTS ===================================================================================
+
+// gdb-command:run
+// gdb-command:next
+// gdb-command:frame
+// gdb-check:[...]#loc1[...]
+// gdb-command:next
+// gdb-command:frame
+// gdb-check:[...]#loc2[...]
+// gdb-command:next
+// gdb-command:frame
+// gdb-check:[...]#loc3[...]
+// gdb-command:next
+// gdb-command:frame
+// gdb-check:[...]#loc4[...]
+// gdb-command:next
+// gdb-command:frame
+// gdb-check:[...]#loc5[...]
+// gdb-command:next
+// gdb-command:frame
+// gdb-check:[...]#loc6[...]
+
+// === LLDB TESTS ==================================================================================
+
+// lldb-command:set set stop-line-count-before 0
+// lldb-command:set set stop-line-count-after 1
+// Can't set both to zero or lldb will stop printing source at all. So it will output the current
+// line and the next. We deal with this by having at least 2 lines between the #loc's
+
+// lldb-command:run
+// lldb-command:next
+// lldb-command:frame select
+// lldb-check:[...]#loc1[...]
+// lldb-command:next
+// lldb-command:frame select
+// lldb-check:[...]#loc2[...]
+// lldb-command:next
+// lldb-command:frame select
+// lldb-check:[...]#loc3[...]
+// lldb-command:next
+// lldb-command:frame select
+// lldb-check:[...]#loc4[...]
+// lldb-command:next
+// lldb-command:frame select
+// lldb-check:[...]#loc5[...]
+
+macro_rules! foo {
+ () => {
+ let a = 1;
+ let b = 2;
+ let c = 3;
+ }
+}
+
+macro_rules! foo2 {
+ () => {
+ foo!();
+ let x = 1;
+ foo!();
+ }
+}
+
+fn main() {
+ zzz(); // #break
+
+ foo!(); // #loc1
+
+ foo2!(); // #loc2
+
+ let x = vec![42]; // #loc3
+
+ new_scope!(); // #loc4
+
+ println!("Hello {}", // #loc5
+ "world");
+
+ zzz(); // #loc6
+}
+
+fn zzz() {()}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test that the crate hash is not affected by reordering items.
+
+// revisions:rpass1 rpass2 rpass3
+// compile-flags: -Z query-dep-graph
+
+#![feature(rustc_attrs)]
+
+// Check that reordering otherwise identical items is not considered a
+// change at all.
+#[rustc_clean(label="Krate", cfg="rpass2")]
+
+// But removing an item, naturally, is.
+#[rustc_dirty(label="Krate", cfg="rpass3")]
+
+#[cfg(rpass1)]
+pub struct X {
+ pub x: u32,
+}
+
+pub struct Y {
+ pub x: u32,
+}
+
+#[cfg(rpass2)]
+pub struct X {
+ pub x: u32,
+}
+
+pub fn main() { }
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Regression test for #35593. Check that we can reuse this trivially
+// equal example.
+
+// revisions:rpass1 rpass2
+
+#![feature(rustc_attrs)]
+#![rustc_partition_reused(module="issue_35593", cfg="rpass2")]
+
+fn main() {
+ println!("hello world");
+}
// Here the only thing which changes is the string constant in `x`.
// Therefore, the compiler deduces (correctly) that typeck is not
// needed even for callers of `x`.
-//
-// It is not entirely clear why `TransCrateItem` invalidates `y` and
-// `z`, actually, I think it's because of the structure of
-// trans. -nmatsakis
fn main() { }
mod y {
use x;
- // FIXME(#35078) -- when body of `x` changes, we treat it as
- // though signature changed.
- #[rustc_dirty(label="TypeckItemBody", cfg="rpass2")]
- #[rustc_dirty(label="TransCrateItem", cfg="rpass2")]
+ #[rustc_clean(label="TypeckItemBody", cfg="rpass2")]
+ #[rustc_clean(label="TransCrateItem", cfg="rpass2")]
pub fn y() {
x::x();
}
// Test that we can use a ! for an argument of type !
+// ignore-test FIXME(durka) can't be done with the current liveness code
// error-pattern:wowzers!
#![feature(never_type)]
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(rustc_attrs)]
-
// check that panics in destructors during assignment do not leave
// destroyed values lying around for other destructors to observe.
}
}
-#[rustc_mir]
fn foo(b: &mut Observer) {
*b.0 = FilledOnDrop(1);
}
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(rustc_attrs)]
// error-pattern:panic 1
// error-pattern:drop 2
}
}
-#[rustc_mir]
fn mir() {
let x = Droppable(2);
let y = Droppable(1);
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(rustc_attrs)]
// error-pattern:drop 1
// error-pattern:drop 2
use std::io::{self, Write};
}
}
-#[rustc_mir]
fn mir() {
let (mut xv, mut yv) = (false, false);
let x = Droppable(&mut xv, 1);
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(rustc_attrs)]
+
// error-pattern:drop 1
use std::io::{self, Write};
}
}
-#[rustc_mir]
fn mir<'a>(d: Droppable<'a>) {
loop {
let x = d;
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(rustc_attrs)]
+
// error-pattern:unwind happens
// error-pattern:drop 3
// error-pattern:drop 2
panic!("unwind happens");
}
-#[rustc_mir]
fn mir<'a>(d: Droppable<'a>) {
let (mut a, mut b) = (false, false);
let y = Droppable(&mut a, 2);
// except according to those terms.
// error-pattern:index out of bounds: the len is 5 but the index is 10
-#![feature(rustc_attrs)]
const C: [u32; 5] = [0; 5];
-#[rustc_mir]
fn test() -> u32 {
C[10]
}
// except according to those terms.
// error-pattern:index out of bounds: the len is 5 but the index is 10
-#![feature(rustc_attrs)]
const C: &'static [u8; 5] = b"hello";
-#[rustc_mir]
fn test() -> u8 {
C[10]
}
// except according to those terms.
// error-pattern:index out of bounds: the len is 5 but the index is 10
-#![feature(rustc_attrs)]
const C: &'static [u8; 5] = b"hello";
-#[rustc_mir]
fn mir() -> u8 {
C[10]
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(rustc_attrs)]
-
// error-pattern:converging_fn called
// error-pattern:0 dropped
// error-pattern:exit
write!(io::stderr(), "converging_fn called\n");
}
-#[rustc_mir]
fn mir(d: Droppable) {
converging_fn();
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(rustc_attrs)]
-
// error-pattern:complex called
// error-pattern:dropped
// error-pattern:exit
}
-#[rustc_mir]
fn mir() -> u64 {
let x = Droppable;
return complex();
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(rustc_attrs)]
+
// error-pattern:diverging_fn called
fn diverging_fn() -> ! {
panic!("diverging_fn called")
}
-#[rustc_mir]
fn mir() {
diverging_fn();
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(rustc_attrs)]
-
// error-pattern:diverging_fn called
// error-pattern:0 dropped
panic!("diverging_fn called")
}
-#[rustc_mir]
fn mir(d: Droppable) {
diverging_fn();
}
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(rustc_attrs)]
+
// compile-flags: -Z no-landing-pads
// error-pattern:converging_fn called
use std::io::{self, Write};
panic!("converging_fn called")
}
-#[rustc_mir]
fn mir(d: Droppable) {
let x = Droppable;
converging_fn();
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(rustc_attrs)]
+
// compile-flags: -Z no-landing-pads
// error-pattern:diverging_fn called
use std::io::{self, Write};
panic!("diverging_fn called")
}
-#[rustc_mir]
fn mir(d: Droppable) {
let x = Droppable;
diverging_fn();
extern crate syntax;
use rustc::mir::transform::{self, MirPass, MirSource};
-use rustc::mir::repr::{Mir, Literal};
+use rustc::mir::repr::{Mir, Literal, Location};
use rustc::mir::visit::MutVisitor;
use rustc::ty::TyCtxt;
use rustc::middle::const_val::ConstVal;
struct Visitor;
impl<'tcx> MutVisitor<'tcx> for Visitor {
- fn visit_literal(&mut self, literal: &mut Literal<'tcx>) {
+ fn visit_literal(&mut self, literal: &mut Literal<'tcx>, _: Location) {
if let Literal::Value { ref mut value } = *literal {
if let ConstVal::Integral(ConstInt::I32(ref mut i @ 11)) = *value {
*i = 42;
// except according to those terms.
#![feature(rustc_private)]
+#![allow(dead_code)]
extern crate serialize;
#![feature(plugin)]
#![plugin(lint_group_plugin_test)]
+#![allow(dead_code)]
fn lintme() { } //~ WARNING item is named 'lintme'
fn pleaselintme() { } //~ WARNING item is named 'pleaselintme'
// ignore-pretty: Random space appears with the pretty test
// compile-flags: -Z extra-plugins=lint_plugin_test
+#![allow(dead_code)]
+
fn lintme() { } //~ WARNING item is named 'lintme'
#[allow(test_lint)]
#![feature(plugin)]
#![plugin(lint_plugin_test)]
+#![allow(dead_code)]
fn lintme() { } //~ WARNING item is named 'lintme'
// aux-build:dummy_mir_pass.rs
// ignore-stage1
-#![feature(plugin, rustc_attrs)]
+#![feature(plugin)]
#![plugin(dummy_mir_pass)]
-#[rustc_mir]
fn math() -> i32 {
11
}
// no-prefer-dynamic
#![allow(dead_code)]
-#![feature(const_fn, rustc_attrs)]
+#![feature(const_fn)]
// check dtor calling order when casting enums.
}
}
-#[rustc_no_mir] // FIXME #27840 MIR miscompiles this.
fn main() {
assert_eq!(FLAG.load(Ordering::SeqCst), 0);
{
assert_eq!(e as u32, 2);
assert_eq!(FLAG.load(Ordering::SeqCst), 0);
}
- assert_eq!(FLAG.load(Ordering::SeqCst), 1);
+ assert_eq!(FLAG.load(Ordering::SeqCst), 0);
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-// error-pattern:expected item
-
// pretty-expanded FIXME #23616
#![feature(custom_attribute, test)]
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-// error-pattern:expected item
-
// pretty-expanded FIXME #23616
#![feature(custom_attribute, test)]
+++ /dev/null
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![feature(unsafe_no_drop_flag)]
-
-use std::mem::size_of;
-
-#[unsafe_no_drop_flag]
-struct Test<T> {
- a: T
-}
-
-impl<T> Drop for Test<T> {
- fn drop(&mut self) { }
-}
-
-pub fn main() {
- assert_eq!(size_of::<isize>(), size_of::<Test<isize>>());
-}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(unsafe_no_drop_flag)]
-
-#[unsafe_no_drop_flag]
pub struct ZeroLengthThingWithDestructor;
impl Drop for ZeroLengthThingWithDestructor {
fn drop(&mut self) {}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+#![allow(dead_code)]
#[derive] //~ WARNING empty trait list in `derive`
struct Foo;
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn assert_sizeof() -> ! {
+ unsafe {
+ ::std::mem::transmute::<f64, [u8; 8]>(panic!())
+ }
+}
+
+fn main() { }
+
+++ /dev/null
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// compile-flags: -Z force-dropflag-checks=on
-// ignore-emscripten
-
-// Quick-and-dirty test to ensure -Z force-dropflag-checks=on works as
-// expected. Note that the inlined drop-flag is slated for removal
-// (RFC 320); when that happens, the -Z flag and this test should
-// simply be removed.
-//
-// See also drop-flag-skip-sanity-check.rs.
-
-use std::env;
-use std::process::Command;
-
-fn main() {
- let args: Vec<String> = env::args().collect();
- if args.len() > 1 && args[1] == "test" {
- return test();
- }
-
- let mut p = Command::new(&args[0]).arg("test").spawn().unwrap();
- // The invocation should fail due to the drop-flag sanity check.
- assert!(!p.wait().unwrap().success());
-}
-
-#[derive(Debug)]
-struct Corrupted {
- x: u8
-}
-
-impl Drop for Corrupted {
- fn drop(&mut self) { println!("dropping"); }
-}
-
-fn test() {
- {
- let mut c1 = Corrupted { x: 1 };
- let mut c2 = Corrupted { x: 2 };
- unsafe {
- let p1 = &mut c1 as *mut Corrupted as *mut u8;
- let p2 = &mut c2 as *mut Corrupted as *mut u8;
- for i in 0..std::mem::size_of::<Corrupted>() {
- // corrupt everything, *including the drop flag.
- //
- // (We corrupt via two different means to safeguard
- // against the hypothetical assignment of the
- // dtor_needed/dtor_done values to v and v+k. that
- // happen to match with one of the corruption values
- // below.)
- *p1.offset(i as isize) += 2;
- *p2.offset(i as isize) += 3;
- }
- }
- // Here, at the end of the scope of `c1` and `c2`, the
- // drop-glue should detect the corruption of (at least one of)
- // the drop-flags.
- }
- println!("We should never get here.");
-}
+++ /dev/null
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// compile-flags: -Z force-dropflag-checks=off
-// ignore-emscripten no threads support
-
-// Quick-and-dirty test to ensure -Z force-dropflag-checks=off works as
-// expected. Note that the inlined drop-flag is slated for removal
-// (RFC 320); when that happens, the -Z flag and this test should
-// simply be removed.
-//
-// See also drop-flag-sanity-check.rs.
-
-use std::env;
-use std::process::Command;
-
-fn main() {
- let args: Vec<String> = env::args().collect();
- if args.len() > 1 && args[1] == "test" {
- return test();
- }
-
- let s = Command::new(&args[0]).arg("test").status().unwrap();
- // Invocatinn should succeed as drop-flag sanity check is skipped.
- assert!(s.success());
-}
-
-#[derive(Debug)]
-struct Corrupted {
- x: u8
-}
-
-impl Drop for Corrupted {
- fn drop(&mut self) { println!("dropping"); }
-}
-
-fn test() {
- {
- let mut c1 = Corrupted { x: 1 };
- let mut c2 = Corrupted { x: 2 };
- unsafe {
- let p1 = &mut c1 as *mut Corrupted as *mut u8;
- let p2 = &mut c2 as *mut Corrupted as *mut u8;
- for i in 0..std::mem::size_of::<Corrupted>() {
- // corrupt everything, *including the drop flag.
- //
- // (We corrupt via two different means to safeguard
- // against the hypothetical assignment of the
- // dtor_needed/dtor_done values to v and v+k. that
- // happen to match with one of the corruption values
- // below.)
- *p1.offset(i as isize) += 2;
- *p2.offset(i as isize) += 3;
- }
- }
- // Here, at the end of the scope of `c1` and `c2`, the
- // drop-glue should detect the corruption of (at least one of)
- // the drop-flags.
- }
-}
}
}
-#[rustc_mir]
fn dynamic_init(a: &Allocator, c: bool) {
let _x;
if c {
}
}
-#[rustc_mir]
fn dynamic_drop(a: &Allocator, c: bool) {
let x = a.alloc();
if c {
};
}
-#[rustc_mir]
fn assignment2(a: &Allocator, c0: bool, c1: bool) {
let mut _v = a.alloc();
let mut _w = a.alloc();
}
}
-#[rustc_mir]
fn assignment1(a: &Allocator, c0: bool) {
let mut _v = a.alloc();
let mut _w = a.alloc();
//
// ignore-pretty
-#![deny(enum_size_variance)]
+#![warn(variant_size_differences)]
#![allow(dead_code)]
enum Enum1 { }
enum Enum4 { H(isize), I(isize), J }
-enum Enum5 { //~ ERROR three times larger
- L(isize, isize, isize, isize), //~ NOTE this variant is the largest
+enum Enum5 {
+ L(isize, isize, isize, isize), //~ WARNING three times larger
M(isize),
N
}
Q(isize)
}
-#[allow(enum_size_variance)]
+#[allow(variant_size_differences)]
enum Enum7 {
R(isize, isize, isize, isize),
S(isize),
// sanity in that we generate an if-else chain giving the correct
// results.
-#![feature(rustc_attrs)]
-
-#[rustc_mir]
fn foo(x: bool, y: bool) -> u32 {
match (x, y) {
(false, _) => 0,
}
fn parent() {
- let file = File::open("Makefile").unwrap();
+ let file = File::open(file!()).unwrap();
let tcp1 = TcpListener::bind("127.0.0.1:0").unwrap();
let tcp2 = tcp1.try_clone().unwrap();
let addr = tcp1.local_addr().unwrap();
// compiler is hidden.
rusti::move_val_init(&mut y, x);
- // In particular, it may be tracked via a drop-flag embedded
- // in the value, or via a null pointer, or via
- // mem::POST_DROP_USIZE, or (most preferably) via a
- // stack-local drop flag.
- //
- // (This test used to build-in knowledge of how it was
- // tracked, and check that the underlying stack slot had been
- // set to `mem::POST_DROP_USIZE`.)
-
// But what we *can* observe is how many times the destructor
// for `D` is invoked, and what the last value we saw was
// during such a destructor call. We do so after the end of
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-
-#![feature(unsafe_no_drop_flag)]
-
static mut drop_count: usize = 0;
-#[unsafe_no_drop_flag]
struct Foo {
dropped: bool
}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::cell::RefCell;
+
+fn main() {
+ let x = RefCell::new(0);
+ if *x.borrow() == 0 {} else {}
+}
// except according to those terms.
-#![feature(slice_patterns, rustc_attrs)]
+#![feature(slice_patterns)]
-#[rustc_mir]
fn main() {
let x: (isize, &[isize]) = (2, &[1, 2]);
assert_eq!(match x {
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+#![allow(non_snake_case)]
+#![allow(dead_code)]
+#![allow(unused_variables)]
+
#[derive(Copy, Clone)]
enum Foo {
Bar,
fn main() {
let mut foo = Foo { a: 137 };
- FUNC(&mut foo); //~ ERROR bad
+ FUNC(&mut foo);
assert_eq!(foo.a, 5);
}
impl A for E {
fn b<F,G>(&self, _x: F) -> F { panic!() }
- //~^ ERROR in method `b`, type parameter 0 has 1 bound, but
}
pub fn main() {}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn main() {
+ let x = 'a';
+
+ let y = match x {
+ 'a'...'b' if false => "one",
+ 'a' => "two",
+ 'a'...'b' => "three",
+ _ => panic!("what?"),
+ };
+
+ assert_eq!(y, "two");
+}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+#![allow(unused_variables)]
+#![allow(dead_code)]
+
macro_rules! piece(
($piece:pat) => ($piece);
);
#[cfg(target_pointer_width = "64")]
pub fn main() {
assert_eq!(mem::size_of::<Cat>(), 8 as usize);
- assert_eq!(mem::size_of::<Kitty>(), 16 as usize);
+ assert_eq!(mem::size_of::<Kitty>(), 8 as usize);
}
#[cfg(target_pointer_width = "32")]
pub fn main() {
assert_eq!(mem::size_of::<Cat>(), 4 as usize);
- assert_eq!(mem::size_of::<Kitty>(), 8 as usize);
+ assert_eq!(mem::size_of::<Kitty>(), 4 as usize);
}
// except according to those terms.
// ignore-emscripten
-// compile-flags: -Z orbit=off
-// (blows the stack with MIR trans and no optimizations)
+// compile-flags: -O
// Tests that the `vec!` macro does not overflow the stack when it is
// given data larger than the stack.
+// FIXME(eddyb) Improve unoptimized codegen to avoid the temporary,
+// and thus run successfully even when compiled at -C opt-level=0.
+
const LEN: usize = 1 << 15;
use std::thread::Builder;
fn main() {
assert!(Builder::new().stack_size(LEN / 2).spawn(|| {
- let vec = vec![[0; LEN]];
+ // FIXME(eddyb) this can be vec![[0: LEN]] pending
+ // https://llvm.org/bugs/show_bug.cgi?id=28987
+ let vec = vec![unsafe { std::mem::zeroed::<[u8; LEN]>() }];
assert_eq!(vec.len(), 1);
}).unwrap().join().is_ok());
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(rustc_attrs)]
-
-#[rustc_mir]
fn const_mir() -> f32 { 9007199791611905.0 }
-#[rustc_no_mir]
-fn const_old() -> f32 { 9007199791611905.0 }
-
fn main() {
let original = "9007199791611905.0"; // (1<<53)+(1<<29)+1
let expected = "9007200000000000";
assert_eq!(const_mir().to_string(), expected);
- assert_eq!(const_old().to_string(), expected);
assert_eq!(original.parse::<f32>().unwrap().to_string(), expected);
}
struct Bar<T: ?Sized>(T);
-#[rustc_mir]
fn unsize_fat_ptr<'a>(x: &'a Bar<Foo + Send + 'a>) -> &'a Bar<Foo + 'a> {
x
}
-#[rustc_mir]
fn unsize_nested_fat_ptr(x: Arc<Foo + Send>) -> Arc<Foo> {
x
}
-#[rustc_mir]
fn main() {
let x: Box<Bar<Foo + Send>> = Box::new(Bar([1,2]));
assert_eq!(unsize_fat_ptr(&*x).0.get(), [1, 2]);
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(unboxed_closures)]
+#![feature(fn_traits)]
+
+struct Test;
+
+impl FnOnce<(u32, u32)> for Test {
+ type Output = u32;
+
+ extern "rust-call" fn call_once(self, (a, b): (u32, u32)) -> u32 {
+ a + b
+ }
+}
+
+fn main() {
+ assert_eq!(Test(1u32, 2u32), 3u32);
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::mem;
+
+struct Foo<T: ?Sized> {
+ a: i64,
+ b: bool,
+ c: T,
+}
+
+fn main() {
+ let foo: &Foo<i32> = &Foo { a: 1, b: false, c: 2i32 };
+ let foo_unsized: &Foo<Send> = foo;
+ assert_eq!(mem::size_of_val(foo), mem::size_of_val(foo_unsized));
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::ops::Deref;
+
+fn main() {
+ if env_var("FOOBAR").as_ref().map(Deref::deref).ok() == Some("yes") {
+ panic!()
+ }
+
+ let env_home: Result<String, ()> = Ok("foo-bar-baz".to_string());
+ let env_home = env_home.as_ref().map(Deref::deref).ok();
+
+ if env_home == Some("") { panic!() }
+}
+
+#[inline(never)]
+fn env_var(s: &str) -> Result<String, VarError> {
+ Err(VarError::NotPresent)
+}
+
+pub enum VarError {
+ NotPresent,
+ NotUnicode(String),
+}
#![feature(advanced_slice_patterns)]
#![feature(slice_patterns)]
-#![feature(rustc_attrs)]
use std::ops::Add;
[a, b, b, a]
}
-#[rustc_mir]
fn main() {
assert_eq!(foo([1, 2, 3]), (1, 3, 6));
// pretty-expanded FIXME #23616
-#![allow(unreachable_code)]
+#![allow(dead_code)]
fn test() {
let _v: isize;
_v = 1;
return;
- _v = 2; //~ WARNING: unreachable statement
+ _v = 2;
}
pub fn main() {
#![feature(advanced_slice_patterns)]
#![feature(slice_patterns)]
-#![feature(rustc_attrs)]
-#[rustc_mir]
fn match_vecs<'a, T>(l1: &'a [T], l2: &'a [T]) -> &'static str {
match (l1, l2) {
(&[], &[]) => "both empty",
}
}
-#[rustc_mir]
fn match_vecs_cons<'a, T>(l1: &'a [T], l2: &'a [T]) -> &'static str {
match (l1, l2) {
(&[], &[]) => "both empty",
}
}
-#[rustc_mir]
fn match_vecs_snoc<'a, T>(l1: &'a [T], l2: &'a [T]) -> &'static str {
match (l1, l2) {
(&[], &[]) => "both empty",
}
}
-#[rustc_mir]
fn match_nested_vecs_cons<'a, T>(l1: Option<&'a [T]>, l2: Result<&'a [T], ()>) -> &'static str {
match (l1, l2) {
(Some(&[]), Ok(&[])) => "Some(empty), Ok(empty)",
}
}
-#[rustc_mir]
fn match_nested_vecs_snoc<'a, T>(l1: Option<&'a [T]>, l2: Result<&'a [T], ()>) -> &'static str {
match (l1, l2) {
(Some(&[]), Ok(&[])) => "Some(empty), Ok(empty)",
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(rustc_attrs)]
-
#[repr(C, u32)]
enum CEnum {
Hello = 30,
World = 60
}
-#[rustc_mir]
fn test1(c: CEnum) -> i32 {
let c2 = CEnum::Hello;
match (c, c2) {
fn drop(&mut self) {}
}
-#[rustc_mir]
fn test2() -> Pakd {
Pakd { a: 42, b: 42, c: 42, d: 42, e: () }
}
#[derive(PartialEq, Debug)]
struct TupleLike(u64, u32);
-#[rustc_mir]
fn test3() -> TupleLike {
TupleLike(42, 42)
}
-#[rustc_mir]
fn test4(x: fn(u64, u32) -> TupleLike) -> (TupleLike, TupleLike) {
let y = TupleLike;
(x(42, 84), y(42, 84))
}
-#[rustc_mir]
fn test5(x: fn(u32) -> Option<u32>) -> (Option<u32>, Option<u32>) {
let y = Some;
(x(42), y(42))
// Tests that the result of type ascription has adjustments applied
-#![feature(rustc_attrs, type_ascription)]
+#![feature(type_ascription)]
-#[rustc_mir]
fn main() {
let x = [1, 2, 3];
// The RHS should coerce to &[i32]
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(rustc_attrs)]
-
use std::mem;
use std::ops::{
AddAssign, BitAndAssign, BitOrAssign, BitXorAssign, DivAssign, MulAssign, RemAssign,
main_mir();
}
-#[rustc_mir]
fn main_mir() {
let mut x = Int(1);
}
impl AddAssign for Int {
- #[rustc_mir]
fn add_assign(&mut self, rhs: Int) {
self.0 += rhs.0;
}
}
impl BitAndAssign for Int {
- #[rustc_mir]
fn bitand_assign(&mut self, rhs: Int) {
self.0 &= rhs.0;
}
}
impl BitOrAssign for Int {
- #[rustc_mir]
fn bitor_assign(&mut self, rhs: Int) {
self.0 |= rhs.0;
}
}
impl BitXorAssign for Int {
- #[rustc_mir]
fn bitxor_assign(&mut self, rhs: Int) {
self.0 ^= rhs.0;
}
}
impl DivAssign for Int {
- #[rustc_mir]
fn div_assign(&mut self, rhs: Int) {
self.0 /= rhs.0;
}
}
impl MulAssign for Int {
- #[rustc_mir]
fn mul_assign(&mut self, rhs: Int) {
self.0 *= rhs.0;
}
}
impl RemAssign for Int {
- #[rustc_mir]
fn rem_assign(&mut self, rhs: Int) {
self.0 %= rhs.0;
}
}
impl ShlAssign<u8> for Int {
- #[rustc_mir]
fn shl_assign(&mut self, rhs: u8) {
self.0 <<= rhs;
}
}
impl ShlAssign<u16> for Int {
- #[rustc_mir]
fn shl_assign(&mut self, rhs: u16) {
self.0 <<= rhs;
}
}
impl ShrAssign<u8> for Int {
- #[rustc_mir]
fn shr_assign(&mut self, rhs: u8) {
self.0 >>= rhs;
}
}
impl ShrAssign<u16> for Int {
- #[rustc_mir]
fn shr_assign(&mut self, rhs: u16) {
self.0 >>= rhs;
}
}
impl SubAssign for Int {
- #[rustc_mir]
fn sub_assign(&mut self, rhs: Int) {
self.0 -= rhs.0;
}
}
impl AddAssign<i32> for Slice {
- #[rustc_mir]
fn add_assign(&mut self, rhs: i32) {
for lhs in &mut self.0 {
*lhs += rhs;
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(rustc_attrs)]
-
use std::ops::{Deref, DerefMut};
pub struct MyRef(u32);
}
-#[rustc_mir]
fn deref(x: &MyRef) -> &u32 {
x
}
-#[rustc_mir]
fn deref_mut(x: &mut MyRef) -> &mut u32 {
x
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(rustc_attrs, box_syntax)]
+#![feature(box_syntax)]
-#[rustc_mir]
fn test() -> Box<i32> {
box 42
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(rustc_attrs)]
-
-#[rustc_mir]
fn test1(x: i8) -> i32 {
match x {
1...10 => 0,
const U: Option<i8> = Some(10);
const S: &'static str = "hello";
-#[rustc_mir]
fn test2(x: i8) -> i32 {
match Some(x) {
U => 0,
}
}
-#[rustc_mir]
fn test3(x: &'static str) -> i32 {
match x {
S => 0,
None
}
-#[rustc_mir]
fn test4(x: u64) -> i32 {
let opt = Opt::Some{ v: x };
match opt {
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(rustc_attrs)]
-
trait Trait {
type Type;
}
type Type = u32;
}
-#[rustc_mir]
fn foo<'a>(t: <&'a () as Trait>::Type) -> <&'a () as Trait>::Type {
t
}
-#[rustc_mir]
fn main() {
assert_eq!(foo(4), 4);
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(rustc_attrs)]
-
pub extern "C" fn tuple2() -> (u16, u8) {
(1, 2)
}
(1, 2, 3)
}
-#[rustc_mir]
pub fn test2() -> u8 {
tuple2().1
}
-#[rustc_mir]
pub fn test3() -> u8 {
tuple3().2
}
// Tests the coercion casts are handled properly
-#![feature(rustc_attrs)]
-
-#[rustc_mir]
fn main() {
// This should produce only a reification of f,
// not a fn -> fn cast as well
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(rustc_attrs, coerce_unsized, unsize)]
+#![feature(coerce_unsized, unsize)]
use std::ops::CoerceUnsized;
use std::marker::Unsize;
-#[rustc_mir]
fn identity_coercion(x: &(Fn(u32)->u32 + Send)) -> &Fn(u32)->u32 {
x
}
-#[rustc_mir]
fn fn_coercions(f: &fn(u32) -> u32) ->
(unsafe fn(u32) -> u32,
&(Fn(u32) -> u32+Send))
(*f, f)
}
-#[rustc_mir]
fn simple_array_coercion(x: &[u8; 3]) -> &[u8] { x }
fn square(a: u32) -> u32 { a * a }
impl<'a, T: ?Sized+Unsize<U>, U: ?Sized>
CoerceUnsized<TrivPtrWrapper<'a, U>> for TrivPtrWrapper<'a, T> {}
-#[rustc_mir]
fn coerce_ptr_wrapper(p: PtrWrapper<[u8; 3]>) -> PtrWrapper<[u8]> {
p
}
-#[rustc_mir]
fn coerce_triv_ptr_wrapper(p: TrivPtrWrapper<[u8; 3]>) -> TrivPtrWrapper<[u8]> {
p
}
-#[rustc_mir]
fn coerce_fat_ptr_wrapper(p: PtrWrapper<Fn(u32) -> u32+Send>)
-> PtrWrapper<Fn(u32) -> u32> {
p
}
-#[rustc_mir]
fn coerce_ptr_wrapper_poly<'a, T, Trait: ?Sized>(p: PtrWrapper<'a, T>)
-> PtrWrapper<'a, Trait>
where PtrWrapper<'a, T>: CoerceUnsized<PtrWrapper<'a, Trait>>
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(rustc_attrs)]
#[derive(PartialEq, Debug)]
struct Point {
const TUPLE2: (&'static str, &'static str) = ("hello","world");
const PAIR_NEWTYPE: (Newtype<i32>, Newtype<i32>) = (Newtype(42), Newtype(42));
-#[rustc_mir]
fn mir() -> (Point, (i32, i32), (&'static str, &'static str), (Newtype<i32>, Newtype<i32>)) {
let struct1 = STRUCT;
let tuple1 = TUPLE1;
const NEWTYPE: Newtype<&'static str> = Newtype("foobar");
-#[rustc_mir]
fn test_promoted_newtype_str_ref() {
let x = &NEWTYPE;
assert_eq!(x, &Newtype("foobar"));
+++ /dev/null
-// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// compile-flags: -Z orbit
-// Tests that -Z orbit affects functions from other crates.
-
-#![feature(unsafe_no_drop_flag)]
-
-#[unsafe_no_drop_flag]
-struct Foo;
-
-impl Drop for Foo {
- fn drop(&mut self) {
- panic!("MIR trans is not enabled for mem::forget");
- }
-}
-
-fn main() {
- let x = Foo;
- std::mem::forget(x);
-}
// test that ordinary fat pointer operations work.
-#![feature(rustc_attrs)]
-
struct Wrapper<T: ?Sized>(u32, T);
struct FatPtrContainer<'a> {
ptr: &'a [u8]
}
-#[rustc_mir]
fn fat_ptr_project(a: &Wrapper<[u8]>) -> &[u8] {
&a.1
}
-#[rustc_mir]
fn fat_ptr_simple(a: &[u8]) -> &[u8] {
a
}
-#[rustc_mir]
fn fat_ptr_via_local(a: &[u8]) -> &[u8] {
let x = a;
x
}
-#[rustc_mir]
fn fat_ptr_from_struct(s: FatPtrContainer) -> &[u8] {
s.ptr
}
-#[rustc_mir]
fn fat_ptr_to_struct(a: &[u8]) -> FatPtrContainer {
FatPtrContainer { ptr: a }
}
-#[rustc_mir]
fn fat_ptr_store_to<'a>(a: &'a [u8], b: &mut &'a [u8]) {
*b = a;
}
-#[rustc_mir]
fn fat_ptr_constant() -> &'static str {
"HELLO"
}
}
}
-#[rustc_mir]
fn fat_ptr_move_then_drop(a: Box<[DropMe]>) {
let b = a;
}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+const TEST_DATA: [u8; 32 * 1024 * 1024] = [42; 32 * 1024 * 1024];
+
+// Check that the promoted copy of TEST_DATA doesn't
+// leave an alloca from an unused temp behind, which,
+// without optimizations, can still blow the stack.
+fn main() {
+ println!("{}", TEST_DATA.len());
+}
// #30527 - We were not generating arms with guards in certain cases.
-#![feature(rustc_attrs)]
-
-#[rustc_mir]
fn match_with_guard(x: Option<i8>) -> i8 {
match x {
Some(xyz) if xyz > 100 => 0,
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(libc, rustc_attrs)]
+#![feature(libc)]
extern crate libc;
const STR: &'static str = "hello";
const BSTR: &'static [u8; 5] = b"hello";
-#[rustc_mir]
fn from_ptr()
-> (isize, usize, i8, i16, i32, i64, u8, u16, u32, u64, *const ()) {
let f = 1_usize as *const libc::FILE;
(c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11)
}
-#[rustc_mir]
fn from_1()
-> (isize, usize, i8, i16, i32, i64, u8, u16, u32, u64, f32, f64, *const libc::FILE) {
let c1 = 1 as isize;
(c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13)
}
-#[rustc_mir]
fn from_1usize()
-> (isize, usize, i8, i16, i32, i64, u8, u16, u32, u64, f32, f64, *const libc::FILE) {
let c1 = 1_usize as isize;
(c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13)
}
-#[rustc_mir]
fn from_1isize()
-> (isize, usize, i8, i16, i32, i64, u8, u16, u32, u64, f32, f64, *const libc::FILE) {
let c1 = 1_isize as isize;
(c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13)
}
-#[rustc_mir]
fn from_1u8()
-> (isize, usize, i8, i16, i32, i64, u8, u16, u32, u64, f32, f64, *const libc::FILE) {
let c1 = 1_u8 as isize;
(c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13)
}
-#[rustc_mir]
fn from_1i8()
-> (isize, usize, i8, i16, i32, i64, u8, u16, u32, u64, f32, f64, *const libc::FILE) {
let c1 = 1_i8 as isize;
(c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13)
}
-#[rustc_mir]
fn from_1u16()
-> (isize, usize, i8, i16, i32, i64, u8, u16, u32, u64, f32, f64, *const libc::FILE) {
let c1 = 1_u16 as isize;
(c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13)
}
-#[rustc_mir]
fn from_1i16()
-> (isize, usize, i8, i16, i32, i64, u8, u16, u32, u64, f32, f64, *const libc::FILE) {
let c1 = 1_i16 as isize;
(c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13)
}
-#[rustc_mir]
fn from_1u32()
-> (isize, usize, i8, i16, i32, i64, u8, u16, u32, u64, f32, f64, *const libc::FILE) {
let c1 = 1_u32 as isize;
(c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13)
}
-#[rustc_mir]
fn from_1i32()
-> (isize, usize, i8, i16, i32, i64, u8, u16, u32, u64, f32, f64, *const libc::FILE) {
let c1 = 1_i32 as isize;
(c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13)
}
-#[rustc_mir]
fn from_1u64()
-> (isize, usize, i8, i16, i32, i64, u8, u16, u32, u64, f32, f64, *const libc::FILE) {
let c1 = 1_u64 as isize;
(c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13)
}
-#[rustc_mir]
fn from_1i64()
-> (isize, usize, i8, i16, i32, i64, u8, u16, u32, u64, f32, f64, *const libc::FILE) {
let c1 = 1_i64 as isize;
(c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13)
}
-#[rustc_mir]
fn from_bool()
-> (isize, usize, i8, i16, i32, i64, u8, u16, u32, u64) {
let c1 = true as isize;
(c1, c2, c3, c4, c5, c6, c7, c8, c9, c10)
}
-#[rustc_mir]
fn from_1f32()
-> (isize, usize, i8, i16, i32, i64, u8, u16, u32, u64, f32, f64) {
let c1 = 1.0_f32 as isize;
(c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12)
}
-#[rustc_mir]
fn from_1f64()
-> (isize, usize, i8, i16, i32, i64, u8, u16, u32, u64, f32, f64) {
let c1 = 1.0f64 as isize;
(c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12)
}
-#[rustc_mir]
fn other_casts()
-> (*const u8, *const isize, *const u8, *const u8) {
let c1 = func as *const u8;
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-// compile-flags: -Z force-overflow-checks=off -Z orbit
+// compile-flags: -Z force-overflow-checks=off
// Test that with MIR trans, overflow checks can be
// turned off, even when they're from core::ops::*.
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(rustc_attrs)]
-
// ignore-pretty : (#23623) problems when ending with // comments
// check raw fat pointer ops in mir
ne: true
};
-#[rustc_mir]
fn compare_su8(a: *const S<[u8]>, b: *const S<[u8]>) -> ComparisonResults {
ComparisonResults {
lt: a < b,
}
}
-#[rustc_mir]
fn compare_au8(a: *const [u8], b: *const [u8]) -> ComparisonResults {
ComparisonResults {
lt: a < b,
}
}
-#[rustc_mir]
fn compare_foo<'a>(a: *const (Foo+'a), b: *const (Foo+'a)) -> ComparisonResults {
ComparisonResults {
lt: a < b,
}
}
-#[rustc_mir]
fn simple_eq<'a>(a: *const (Foo+'a), b: *const (Foo+'a)) -> bool {
let result = a == b;
result
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(rustc_attrs)]
-// aux-build:mir_external_refs.rs
+// aux-build:mir_external_refs.rs
extern crate mir_external_refs as ext;
u
}
-#[rustc_mir]
fn t1() -> fn()->u8 {
regular
}
-#[rustc_mir]
fn t2() -> fn(u8)->E {
E::U
}
-#[rustc_mir]
fn t3() -> fn(u8)->S {
S
}
-#[rustc_mir]
fn t4() -> fn()->u8 {
S::hey
}
-#[rustc_mir]
fn t5() -> fn(&S)-> u8 {
<S as X>::hoy
}
-#[rustc_mir]
fn t6() -> fn()->u8{
ext::regular_fn
}
-#[rustc_mir]
fn t7() -> fn(u8)->ext::E {
ext::E::U
}
-#[rustc_mir]
fn t8() -> fn(u8)->ext::S {
ext::S
}
-#[rustc_mir]
fn t9() -> fn()->u8 {
ext::S::hey
}
-#[rustc_mir]
fn t10() -> fn(&ext::S)->u8 {
<ext::S as ext::X>::hoy
}
-#[rustc_mir]
fn t11() -> fn(u8)->u8 {
parametric
}
-#[rustc_mir]
fn t12() -> u8 {
C
}
-#[rustc_mir]
fn t13() -> [u8; 5] {
C2
}
-#[rustc_mir]
fn t13_2() -> [u8; 3] {
C3
}
-#[rustc_mir]
fn t14() -> fn()-> u8 {
<S as X>::hoy2
}
-#[rustc_mir]
fn t15() -> fn(&S)-> u8 {
S::hey2
}
-#[rustc_mir]
fn t16() -> fn(u32, u32)->u64 {
F::f
}
-#[rustc_mir]
fn t17() -> fn(u32, u64)->u64 {
F::f
}
-#[rustc_mir]
fn t18() -> fn(u64, u64)->u64 {
F::f
}
-#[rustc_mir]
fn t19() -> fn(u64, u32)->u64 {
F::f
}
-#[rustc_mir]
fn t20() -> fn(u64, u32)->(u64, u32) {
<u32 as T<_, _>>::staticmeth
}
-#[rustc_mir]
fn t21() -> Unit {
Unit
}
-#[rustc_mir]
fn t22() -> Option<u8> {
None
}
-#[rustc_mir]
fn t23() -> (CEnum, CEnum) {
(CEnum::A, CEnum::B)
}
-#[rustc_mir]
fn t24() -> fn(u8) -> S {
C4
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(rustc_attrs)]
-
-#[rustc_mir]
fn foo((x, y): (i8, i8)) {
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(rustc_attrs)]
-
use std::marker::PhantomData;
pub trait DataBind {
pub offsets: <Global<[u32; 2]> as DataBind>::Data,
}
-#[rustc_mir]
fn create_data() -> Data {
let mut d = Data { offsets: [1, 2] };
d.offsets[0] = 3;
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(rustc_attrs)]
-
-#[rustc_mir]
fn test1(f: f32) -> bool {
// test that we properly promote temporaries to allocas when a temporary is assigned to
// multiple times (assignment is still happening once ∀ possible dataflows).
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(rustc_attrs)]
-#[rustc_mir]
fn into_inner() -> [u64; 1024] {
let mut x = 10 + 20;
[x; 1024]
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(rustc_attrs)]
-#[rustc_mir]
fn into_inner(x: u64) -> [u64; 1024] {
[x; 2*4*8*16]
}
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(rustc_attrs)]
fn converging_fn() -> u64 {
43
}
-#[rustc_mir]
fn mir() -> u64 {
let x;
loop {
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(rustc_attrs, fn_traits)]
+#![feature(fn_traits)]
-#[rustc_mir]
fn test1(a: isize, b: (i32, i32), c: &[i32]) -> (isize, (i32, i32), &[i32]) {
// Test passing a number of arguments including a fat pointer.
// Also returning via an out pointer
callee(a, b, c)
}
-#[rustc_mir]
fn test2(a: isize) -> isize {
// Test passing a single argument.
// Not using out pointer.
fn inherent_method(&self, a: isize) -> isize { a }
}
-#[rustc_mir]
fn test3(x: &Foo, a: isize) -> isize {
// Test calling inherent method
x.inherent_method(a)
}
impl Bar for Foo {}
-#[rustc_mir]
fn test4(x: &Foo, a: isize) -> isize {
// Test calling extension method
x.extension_method(a)
}
-#[rustc_mir]
fn test5(x: &Bar, a: isize) -> isize {
// Test calling method on trait object
x.extension_method(a)
}
-#[rustc_mir]
fn test6<T: Bar>(x: &T, a: isize) -> isize {
// Test calling extension method on generic callee
x.extension_method(a)
fn one() -> isize { 1 }
}
-#[rustc_mir]
fn test7() -> isize {
// Test calling trait static method
<isize as One>::one()
fn two() -> isize { 2 }
}
-#[rustc_mir]
fn test8() -> isize {
// Test calling impl static method
Two::two()
x + y.0 * y.1
}
-#[rustc_mir]
fn test9() -> u32 {
simple_extern(41, (42, 43))
}
-#[rustc_mir]
fn test_closure<F>(f: &F, x: i32, y: i32) -> i32
where F: Fn(i32, i32) -> i32
{
f(x, y)
}
-#[rustc_mir]
fn test_fn_object(f: &Fn(i32, i32) -> i32, x: i32, y: i32) -> i32 {
f(x, y)
}
-#[rustc_mir]
fn test_fn_impl(f: &&Fn(i32, i32) -> i32, x: i32, y: i32) -> i32 {
// This call goes through the Fn implementation for &Fn provided in
// core::ops::impls. It expands to a static Fn::call() that calls the
f(x, y)
}
-#[rustc_mir]
fn test_fn_direct_call<F>(f: &F, x: i32, y: i32) -> i32
where F: Fn(i32, i32) -> i32
{
f.call((x, y))
}
-#[rustc_mir]
fn test_fn_const_call<F>(f: &F) -> i32
where F: Fn(i32, i32) -> i32
{
f.call((100, -1))
}
-#[rustc_mir]
fn test_fn_nil_call<F>(f: &F) -> i32
where F: Fn() -> i32
{
f()
}
-#[rustc_mir]
fn test_fn_transmute_zst(x: ()) -> [(); 1] {
fn id<T>(x: T) -> T {x}
})
}
-#[rustc_mir]
fn test_fn_ignored_pair() -> ((), ()) {
((), ())
}
-#[rustc_mir]
fn test_fn_ignored_pair_0() {
test_fn_ignored_pair().0
}
-#[rustc_mir]
fn id<T>(x: T) -> T { x }
-#[rustc_mir]
fn ignored_pair_named() -> (Foo, Foo) {
(Foo, Foo)
}
-#[rustc_mir]
fn test_fn_ignored_pair_named() -> (Foo, Foo) {
id(ignored_pair_named())
}
-#[rustc_mir]
fn test_fn_nested_pair(x: &((f32, f32), u32)) -> (f32, f32) {
let y = *x;
let z = y.0;
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(rustc_attrs)]
-
#[link(name = "rust_test_helpers")]
extern {
fn rust_interesting_average(_: i64, ...) -> f64;
}
-#[rustc_mir]
fn test<T, U>(a: i64, b: i64, c: i64, d: i64, e: i64, f: T, g: U) -> i64 {
unsafe {
rust_interesting_average(6, a, a as f64,
where A: Iterator, B: Iterator<Item=A::Item>
{
// This is the function we care about
- #[rustc_mir]
fn next(&mut self) -> Option<A::Item> {
match self.state {
State::Both => match self.a.next() {
// A simple spike test for MIR version of trans.
-#![feature(rustc_attrs)]
-
-#[rustc_mir]
fn sum(x: i32, y: i32) -> i32 {
x + y
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(rustc_attrs)]
-
enum Abc {
A(u8),
B(i8),
D,
}
-#[rustc_mir]
fn foo(x: Abc) -> i32 {
match x {
Abc::C => 3,
}
}
-#[rustc_mir]
fn foo2(x: Abc) -> bool {
match x {
Abc::D => true,
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(rustc_attrs)]
-
-#[rustc_mir]
pub fn foo(x: i8) -> i32 {
match x {
1 => 0,
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(rustc_attrs)]
-
-#[rustc_mir]
fn mir() -> (){
let x = 1;
let mut y = 0;
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(rustc_attrs)]
-
fn nil() {}
-#[rustc_mir]
fn mir(){
nil()
}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::mem::size_of;
+
+struct Test<T> {
+ a: T
+}
+
+impl<T> Drop for Test<T> {
+ fn drop(&mut self) { }
+}
+
+pub fn main() {
+ assert_eq!(size_of::<isize>(), size_of::<Test<isize>>());
+}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(repr_simd, platform_intrinsics, concat_idents,
- type_macros, test)]
+#![feature(repr_simd, platform_intrinsics, concat_idents, test)]
#![allow(non_camel_case_types)]
extern crate test;
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(question_mark, question_mark_carrier)]
+
+use std::ops::Carrier;
+
+enum MyResult<T, U> {
+ Awesome(T),
+ Terrible(U)
+}
+
+impl<U, V> Carrier for MyResult<U, V> {
+ type Success = U;
+ type Error = V;
+
+ fn from_success(u: U) -> MyResult<U, V> {
+ MyResult::Awesome(u)
+ }
+
+ fn from_error(e: V) -> MyResult<U, V> {
+ MyResult::Terrible(e)
+ }
+
+ fn translate<T>(self) -> T
+ where T: Carrier<Success=U, Error=V>
+ {
+ match self {
+ MyResult::Awesome(u) => T::from_success(u),
+ MyResult::Terrible(e) => T::from_error(e),
+ }
+ }
+}
+
+fn f(x: i32) -> Result<i32, String> {
+ if x == 0 {
+ Ok(42)
+ } else {
+ let y = g(x)?;
+ Ok(y)
+ }
+}
+
+fn g(x: i32) -> MyResult<i32, String> {
+ let _y = f(x - 1)?;
+ MyResult::Terrible("Hello".to_owned())
+}
+
+fn h() -> MyResult<i32, String> {
+ let a: Result<i32, &'static str> = Err("Hello");
+ let b = a?;
+ MyResult::Awesome(b)
+}
+
+fn i() -> MyResult<i32, String> {
+ let a: MyResult<i32, &'static str> = MyResult::Terrible("Hello");
+ let b = a?;
+ MyResult::Awesome(b)
+}
+
+fn main() {
+ assert!(f(0) == Ok(42));
+ assert!(f(10) == Err("Hello".to_owned()));
+ let _ = h();
+ let _ = i();
+}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(type_macros)]
-
use std::ops::*;
#[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)]
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(type_macros)]
-
macro_rules! Tuple {
{ $A:ty,$B:ty } => { ($A, $B) }
}
// except according to those terms.
-#![allow(unreachable_code)]
#![allow(unused_variables)]
+#![allow(dead_code)]
fn id(x: bool) -> bool { x }
fn call_id() {
let c = panic!();
- id(c); //~ WARNING unreachable statement
+ id(c);
}
fn call_id_3() { id(return) && id(return); }
#![feature(advanced_slice_patterns)]
#![feature(slice_patterns)]
-#![feature(rustc_attrs)]
use std::fmt::Debug;
-#[rustc_mir(graphviz="mir.gv")]
fn foldl<T, U, F>(values: &[T],
initial: U,
mut function: F)
}
}
-#[rustc_mir]
fn foldr<T, U, F>(values: &[T],
initial: U,
mut function: F)
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(slice_patterns, rustc_attrs)]
+#![feature(slice_patterns)]
-#[rustc_mir]
pub fn main() {
let x = &[1, 2, 3, 4, 5];
let x: &[isize] = &[1, 2, 3, 4, 5];
#![feature(advanced_slice_patterns)]
#![feature(slice_patterns)]
-#![feature(rustc_attrs)]
-#[rustc_mir]
fn a() {
let x = [1];
match x {
}
}
-#[rustc_mir]
fn b() {
let x = [1, 2, 3];
match x {
}
-#[rustc_mir]
fn b_slice() {
let x : &[_] = &[1, 2, 3];
match x {
}
}
-#[rustc_mir]
fn c() {
let x = [1];
match x {
}
}
-#[rustc_mir]
fn d() {
let x = [1, 2, 3];
let branch = match x {
assert_eq!(branch, 1);
}
-#[rustc_mir]
fn e() {
let x: &[isize] = &[1, 2, 3];
let a = match *x {
#![feature(slice_patterns)]
-#![feature(rustc_attrs)]
struct Foo {
string: &'static str
}
-#[rustc_mir]
pub fn main() {
let x = [
Foo { string: "foo" },
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(rustc_attrs, unsafe_no_drop_flag)]
-
-// ignore-pretty : (#23623) problems when ending with // comments
-
static mut destructions : isize = 3;
-#[rustc_no_mir] // FIXME #29855 MIR doesn't handle all drops correctly.
pub fn foo() {
- #[unsafe_no_drop_flag]
struct Foo;
impl Drop for Foo {
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(rustc_attrs)]
#![feature(slice_patterns)]
-#[rustc_mir]
fn main() {
let x = [(), ()];
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
+[metadata]
+"checksum aho-corasick 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "67077478f0a03952bed2e6786338d400d40c25e9836e08ad50af96607317fd03"
+"checksum env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "aba65b63ffcc17ffacd6cf5aa843da7c5a25e3bd4bbe0b7def8b214e411250e5"
+"checksum libc 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)" = "95ca44454e7cfe7f8a2095a41a10c79d96a177c0b1672cbf1a30d901a9c16ee5"
+"checksum log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "ab83497bf8bf4ed2a74259c1c802351fcd67a65baa86394b6ba73c36f4838054"
+"checksum memchr 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "d8b629fb514376c675b98c1421e80b151d3817ac42d7c667717d282761418d20"
+"checksum mempool 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f997e65fe3eb7a6f8557a7a477de9ed5c511850c85363d13f7b0145b526ed36a"
+"checksum regex 0.1.62 (registry+https://github.com/rust-lang/crates.io-index)" = "22bdab319e36735729aa280752c9293b29ec0053a6810679d697515f80a986fe"
+"checksum regex-syntax 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "841591b1e05609a643e3b4d0045fce04f701daba7151ddcd3ad47b080693d5a9"
+"checksum utf8-ranges 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a1ca13c08c41c9c3e04224ed9ff80461d97e121589ff27c753a16cb10830ae0f"
self.fatal_proc_rec("compilation failed!", &proc_res);
}
+ let expected_errors = errors::load_errors(&self.testpaths.file, self.revision);
+ if !expected_errors.is_empty() {
+ self.check_expected_errors(expected_errors, &proc_res);
+ }
+
let proc_res = self.exec_compiled_test();
if !proc_res.status.success() {
fn check_expected_errors(&self,
expected_errors: Vec<errors::Error>,
proc_res: &ProcRes) {
- if proc_res.status.success() {
+ if proc_res.status.success() &&
+ expected_errors.iter().any(|x| x.kind == Some(ErrorKind::Error)) {
self.fatal_proc_rec("process did not return an error status", proc_res);
}
match self.config.mode {
CompileFail |
ParseFail |
+ RunPass |
Incremental => {
// If we are extracting and matching errors in the new
// fashion, then you want JSON mode. Old-skool error
args.push(dir_opt);
}
RunFail |
- RunPass |
RunPassValgrind |
Pretty |
DebugInfoGdb |
#[cfg(unix)]
pub fn check(path: &Path, bad: &mut bool) {
use std::fs;
+ use std::process::{Command, Stdio};
use std::os::unix::prelude::*;
super::walk(path,
let metadata = t!(fs::symlink_metadata(&file), &file);
if metadata.mode() & 0o111 != 0 {
- println!("binary checked into source: {}", file.display());
- *bad = true;
+ let rel_path = file.strip_prefix(path).unwrap();
+ let git_friendly_path = rel_path.to_str().unwrap().replace("\\", "/");
+ let ret_code = Command::new("git")
+ .arg("ls-files")
+ .arg(&git_friendly_path)
+ .current_dir(path)
+ .stdout(Stdio::null())
+ .stderr(Stdio::null())
+ .status()
+ .unwrap_or_else(|e| {
+ panic!("could not run git ls-files: {}", e);
+ });
+ if ret_code.success() {
+ println!("binary checked into source: {}", file.display());
+ *bad = true;
+ }
}
})
}