Remove alternate stack with sigaltstack before unmaping it.
Also reuse existing signal stack if already set, this is especially
useful when working with sanitizers that configure alternate stack
themselves.
This change depends on SS_DISABLE recently introduced in libc crate and updates
this git submodule accordingly.
the `-Z unstable-options` flag.
* [When running tests with `--test`, rustdoc will pass `--cfg`
arguments to the compiler][1.7dt].
-* [The compiler is built with RPATH information by default][1.7rp].
+* [The compiler is built with RPATH information by default][1.7rpa].
This means that it will be possible to run `rustc` when installed in
unusual configurations without configuring the dynamic linker search
path explicitly.
-* [`rustc` passes `--enable-new-dtags` to GNU ld][1.7dt]. This makes
+* [`rustc` passes `--enable-new-dtags` to GNU ld][1.7dta]. This makes
any RPATH entries (emitted with `-C rpath`) *not* take precedence
over `LD_LIBRARY_PATH`.
[1.7cp]: https://github.com/rust-lang/cargo/pull/2224
[1.7d]: https://github.com/rust-lang/rust/pull/30724
[1.7dt]: https://github.com/rust-lang/rust/pull/30372
-[1.7dt]: https://github.com/rust-lang/rust/pull/30394
+[1.7dta]: https://github.com/rust-lang/rust/pull/30394
[1.7f]: https://github.com/rust-lang/rust/pull/30672
[1.7h]: https://github.com/rust-lang/rust/pull/30818
[1.7j]: https://github.com/rust-lang/rust/pull/30711
[1.7m]: https://github.com/rust-lang/rust/pull/30381
[1.7p]: https://github.com/rust-lang/rust/pull/30681
[1.7rp]: https://github.com/rust-lang/rust/pull/29498
-[1.7rp]: https://github.com/rust-lang/rust/pull/30353
+[1.7rpa]: https://github.com/rust-lang/rust/pull/30353
[1.7rr]: https://github.com/rust-lang/cargo/pull/2279
[1.7sf]: https://github.com/rust-lang/rust/pull/30389
[1.7utf8]: https://github.com/rust-lang/rust/pull/30740
rustc_data_structures rustc_front rustc_platform_intrinsics \
rustc_plugin rustc_metadata rustc_passes
HOST_CRATES := syntax syntax_ext $(RUSTC_CRATES) rustdoc fmt_macros
-TOOLS := compiletest rustdoc rustc rustbook error-index-generator
+TOOLS := compiletest rustdoc rustc rustbook error_index_generator
DEPS_core :=
DEPS_alloc := core libc alloc_system
TOOL_DEPS_rustdoc := rustdoc
TOOL_DEPS_rustc := rustc_driver
TOOL_DEPS_rustbook := std rustdoc
-TOOL_DEPS_error-index-generator := rustdoc syntax serialize
+TOOL_DEPS_error_index_generator := rustdoc syntax serialize
TOOL_SOURCE_compiletest := $(S)src/compiletest/compiletest.rs
TOOL_SOURCE_rustdoc := $(S)src/driver/driver.rs
TOOL_SOURCE_rustc := $(S)src/driver/driver.rs
TOOL_SOURCE_rustbook := $(S)src/rustbook/main.rs
-TOOL_SOURCE_error-index-generator := $(S)src/error-index-generator/main.rs
+TOOL_SOURCE_error_index_generator := $(S)src/error_index_generator/main.rs
ONLY_RLIB_core := 1
ONLY_RLIB_libc := 1
doc \
driver \
etc \
- error-index-generator \
+ error_index_generator \
$(foreach crate,$(CRATES),lib$(crate)) \
libcollectionstest \
libcoretest \
# ./configure
RUSTBOOK = $(RPATH_VAR2_T_$(CFG_BUILD)_H_$(CFG_BUILD)) $(RUSTBOOK_EXE)
-# The error-index-generator executable...
-ERR_IDX_GEN_EXE = $(HBIN2_H_$(CFG_BUILD))/error-index-generator$(X_$(CFG_BUILD))
+# The error_index_generator executable...
+ERR_IDX_GEN_EXE = $(HBIN2_H_$(CFG_BUILD))/error_index_generator$(X_$(CFG_BUILD))
ERR_IDX_GEN = $(RPATH_VAR2_T_$(CFG_BUILD)_H_$(CFG_BUILD)) $(ERR_IDX_GEN_EXE)
ERR_IDX_GEN_MD = $(RPATH_VAR2_T_$(CFG_BUILD)_H_$(CFG_BUILD)) $(ERR_IDX_GEN_EXE) markdown
# Metadata used to generate the index is created as a side effect of
# the build so this depends on every crate being up to date.
doc/error-index.html: $(ERR_IDX_GEN_EXE) $(CSREQ$(2)_T_$(CFG_BUILD)_H_$(CFG_BUILD)) | doc/
- $(Q)$(call E, error-index-generator: $@)
+ $(Q)$(call E, error_index_generator: $@)
$(Q)$(ERR_IDX_GEN)
doc/error-index.md: $(ERR_IDX_GEN_EXE) $(CSREQ$(2)_T_$(CFG_BUILD)_H_$(CFG_BUILD)) | doc/
- $(Q)$(call E, error-index-generator: $@)
+ $(Q)$(call E, error_index_generator: $@)
$(Q)$(ERR_IDX_GEN_MD)
endef
-PREPARE_TOOLS = $(filter-out compiletest rustbook error-index-generator, $(TOOLS))
+PREPARE_TOOLS = $(filter-out compiletest rustbook error_index_generator, $(TOOLS))
# $(1) is tool
$(3) \
"$$(LLVM_LIBDIR_RUSTFLAGS_$(3))" \
"$$(LLVM_ALL_COMPONENTS_$(3))" \
- "$$(LLVM_CXXFLAGS_$(3))"
+ "$$(LLVM_CXXFLAGS_$(3))" \
+ '$$(CXX_$(3))'
@touch -r $$@.start_time $$@ && rm $$@.start_time
else
# FIXME #11094 - The above rule doesn't work right for multiple targets
## The details
The reason that we cannot use a binding after we’ve moved it is subtle, but
-important. When we write code like this:
+important.
+
+When we write code like this:
+
+```rust
+let x = 10;
+```
+
+Rust allocates memory for an integer [i32] on the [stack][sh], copies the bit
+pattern representing the value of 10 to the allocated memory and binds the
+variable name x to this memory region for future reference.
+
+Now consider the following code fragment:
```rust
let v = vec![1, 2, 3];
-let v2 = v;
+let mut v2 = v;
+```
+
+The first line allocates memory for the vector object `v` on the stack like
+it does for `x` above. But in addition to that it also allocates some memory
+on the [heap][sh] for the actual data (`[1, 2, 3]`). Rust copies the address
+of this heap allocation to an internal pointer, which is part of the vector
+object placed on the stack (let's call it the data pointer).
+
+It is worth pointing out (even at the risk of stating the obvious) that the
+vector object and its data live in separate memory regions instead of being a
+single contiguous memory allocation (due to reasons we will not go into at
+this point of time). These two parts of the vector (the one on the stack and
+one on the heap) must agree with each other at all times with regards to
+things like the length, capacity etc.
+
+When we move `v` to `v2`, rust actually does a bitwise copy of the vector
+object `v` into the stack allocation represented by `v2`. This shallow copy
+does not create a copy of the heap allocation containing the actual data.
+Which means that there would be two pointers to the contents of the vector
+both pointing to the same memory allocation on the heap. It would violate
+Rust’s safety guarantees by introducing a data race if one could access both
+`v` and `v2` at the same time.
+
+For example if we truncated the vector to just two elements through `v2`:
+
+```rust
+# let v = vec![1, 2, 3];
+# let mut v2 = v;
+v2.truncate(2);
```
-The first line allocates memory for the vector object, `v`, and for the data it
-contains. The vector object is stored on the [stack][sh] and contains a pointer
-to the content (`[1, 2, 3]`) stored on the [heap][sh]. When we move `v` to `v2`,
-it creates a copy of that pointer, for `v2`. Which means that there would be two
-pointers to the content of the vector on the heap. It would violate Rust’s
-safety guarantees by introducing a data race. Therefore, Rust forbids using `v`
-after we’ve done the move.
+and `v1` were still accessible we'd end up with an invalid vector since `v1`
+would not know that the heap data has been truncated. Now, the part of the
+vector `v1` on the stack does not agree with the corresponding part on the
+heap. `v1` still thinks there are three elements in the vector and will
+happily let us access the non existent element `v1[2]` but as you might
+already know this is a recipe for disaster. Especially because it might lead
+to a segmentation fault or worse allow an unauthorized user to read from
+memory to which they don't have access.
+
+This is why Rust forbids using `v` after we’ve done the move.
[sh]: the-stack-and-the-heap.html
```
(Notice that unlike the `println!` macro we’ve used in the past, we use square
-brackets `[]` with `vec!` macro. Rust allows you to use either in either situation,
-this is just convention.)
+brackets `[]` with `vec!` macro. Rust allows you to use either in either
+situation, this is just convention.)
There’s an alternate form of `vec!` for repeating an initial value:
let v = vec![0; 10]; // ten zeroes
```
+Vectors store their contents as contiguous arrays of `T` on the heap. This means
+that they must be able to know the size of `T` at compile time (that is, how
+many bytes are needed to store a `T`?). The size of some things can't be known
+at compile time. For these you'll have to store a pointer to that thing:
+thankfully, the [`Box`][box] type works perfectly for this.
+
## Accessing elements
To get the value at a particular index in the vector, we use `[]`s:
API documentation][vec].
[vec]: ../std/vec/index.html
+[box]: ../std/boxed/index.html
[generic]: generics.html
[panic]: concurrency.html#panics
[get]: http://doc.rust-lang.org/std/vec/struct.Vec.html#method.get
+++ /dev/null
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![feature(rustc_private, rustdoc)]
-
-extern crate syntax;
-extern crate rustdoc;
-extern crate serialize as rustc_serialize;
-
-use std::collections::BTreeMap;
-use std::fs::{read_dir, File};
-use std::io::{Read, Write};
-use std::env;
-use std::path::Path;
-use std::error::Error;
-
-use syntax::diagnostics::metadata::{get_metadata_dir, ErrorMetadataMap, ErrorMetadata};
-
-use rustdoc::html::markdown::Markdown;
-use rustc_serialize::json;
-
-enum OutputFormat {
- HTML(HTMLFormatter),
- Markdown(MarkdownFormatter),
- Unknown(String),
-}
-
-impl OutputFormat {
- fn from(format: &str) -> OutputFormat {
- match &*format.to_lowercase() {
- "html" => OutputFormat::HTML(HTMLFormatter),
- "markdown" => OutputFormat::Markdown(MarkdownFormatter),
- s => OutputFormat::Unknown(s.to_owned()),
- }
- }
-}
-
-trait Formatter {
- fn header(&self, output: &mut Write) -> Result<(), Box<Error>>;
- fn title(&self, output: &mut Write) -> Result<(), Box<Error>>;
- fn error_code_block(&self, output: &mut Write, info: &ErrorMetadata,
- err_code: &str) -> Result<(), Box<Error>>;
- fn footer(&self, output: &mut Write) -> Result<(), Box<Error>>;
-}
-
-struct HTMLFormatter;
-struct MarkdownFormatter;
-
-impl Formatter for HTMLFormatter {
- fn header(&self, output: &mut Write) -> Result<(), Box<Error>> {
- try!(write!(output, r##"<!DOCTYPE html>
-<html>
-<head>
-<title>Rust Compiler Error Index</title>
-<meta charset="utf-8">
-<!-- Include rust.css after main.css so its rules take priority. -->
-<link rel="stylesheet" type="text/css" href="main.css"/>
-<link rel="stylesheet" type="text/css" href="rust.css"/>
-<style>
-.error-undescribed {{
- display: none;
-}}
-</style>
-</head>
-<body>
-"##));
- Ok(())
- }
-
- fn title(&self, output: &mut Write) -> Result<(), Box<Error>> {
- try!(write!(output, "<h1>Rust Compiler Error Index</h1>\n"));
- Ok(())
- }
-
- fn error_code_block(&self, output: &mut Write, info: &ErrorMetadata,
- err_code: &str) -> Result<(), Box<Error>> {
- // Enclose each error in a div so they can be shown/hidden en masse.
- let desc_desc = match info.description {
- Some(_) => "error-described",
- None => "error-undescribed",
- };
- let use_desc = match info.use_site {
- Some(_) => "error-used",
- None => "error-unused",
- };
- try!(write!(output, "<div class=\"{} {}\">", desc_desc, use_desc));
-
- // Error title (with self-link).
- try!(write!(output,
- "<h2 id=\"{0}\" class=\"section-header\"><a href=\"#{0}\">{0}</a></h2>\n",
- err_code));
-
- // Description rendered as markdown.
- match info.description {
- Some(ref desc) => try!(write!(output, "{}", Markdown(desc))),
- None => try!(write!(output, "<p>No description.</p>\n")),
- }
-
- try!(write!(output, "</div>\n"));
- Ok(())
- }
-
- fn footer(&self, output: &mut Write) -> Result<(), Box<Error>> {
- try!(write!(output, "</body>\n</html>"));
- Ok(())
- }
-}
-
-impl Formatter for MarkdownFormatter {
- #[allow(unused_variables)]
- fn header(&self, output: &mut Write) -> Result<(), Box<Error>> {
- Ok(())
- }
-
- fn title(&self, output: &mut Write) -> Result<(), Box<Error>> {
- try!(write!(output, "# Rust Compiler Error Index\n"));
- Ok(())
- }
-
- fn error_code_block(&self, output: &mut Write, info: &ErrorMetadata,
- err_code: &str) -> Result<(), Box<Error>> {
- Ok(match info.description {
- Some(ref desc) => try!(write!(output, "## {}\n{}\n", err_code, desc)),
- None => (),
- })
- }
-
- #[allow(unused_variables)]
- fn footer(&self, output: &mut Write) -> Result<(), Box<Error>> {
- Ok(())
- }
-}
-
-/// Load all the metadata files from `metadata_dir` into an in-memory map.
-fn load_all_errors(metadata_dir: &Path) -> Result<ErrorMetadataMap, Box<Error>> {
- let mut all_errors = BTreeMap::new();
-
- for entry in try!(read_dir(metadata_dir)) {
- let path = try!(entry).path();
-
- let mut metadata_str = String::new();
- try!(File::open(&path).and_then(|mut f| f.read_to_string(&mut metadata_str)));
-
- let some_errors: ErrorMetadataMap = try!(json::decode(&metadata_str));
-
- for (err_code, info) in some_errors {
- all_errors.insert(err_code, info);
- }
- }
-
- Ok(all_errors)
-}
-
-/// Output an HTML page for the errors in `err_map` to `output_path`.
-fn render_error_page<T: Formatter>(err_map: &ErrorMetadataMap, output_path: &Path,
- formatter: T) -> Result<(), Box<Error>> {
- let mut output_file = try!(File::create(output_path));
-
- try!(formatter.header(&mut output_file));
- try!(formatter.title(&mut output_file));
-
- for (err_code, info) in err_map {
- try!(formatter.error_code_block(&mut output_file, info, err_code));
- }
-
- formatter.footer(&mut output_file)
-}
-
-fn main_with_result(format: OutputFormat) -> Result<(), Box<Error>> {
- let build_arch = try!(env::var("CFG_BUILD"));
- let metadata_dir = get_metadata_dir(&build_arch);
- let err_map = try!(load_all_errors(&metadata_dir));
- match format {
- OutputFormat::Unknown(s) => panic!("Unknown output format: {}", s),
- OutputFormat::HTML(h) => try!(render_error_page(&err_map,
- Path::new("doc/error-index.html"),
- h)),
- OutputFormat::Markdown(m) => try!(render_error_page(&err_map,
- Path::new("doc/error-index.md"),
- m)),
- }
- Ok(())
-}
-
-fn parse_args() -> OutputFormat {
- for arg in env::args().skip(1) {
- return OutputFormat::from(&arg);
- }
- OutputFormat::from("html")
-}
-
-fn main() {
- if let Err(e) = main_with_result(parse_args()) {
- panic!("{}", e.description());
- }
-}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(rustc_private, rustdoc)]
+
+extern crate syntax;
+extern crate rustdoc;
+extern crate serialize as rustc_serialize;
+
+use std::collections::BTreeMap;
+use std::fs::{read_dir, File};
+use std::io::{Read, Write};
+use std::env;
+use std::path::Path;
+use std::error::Error;
+
+use syntax::diagnostics::metadata::{get_metadata_dir, ErrorMetadataMap, ErrorMetadata};
+
+use rustdoc::html::markdown::Markdown;
+use rustc_serialize::json;
+
+enum OutputFormat {
+ HTML(HTMLFormatter),
+ Markdown(MarkdownFormatter),
+ Unknown(String),
+}
+
+impl OutputFormat {
+ fn from(format: &str) -> OutputFormat {
+ match &*format.to_lowercase() {
+ "html" => OutputFormat::HTML(HTMLFormatter),
+ "markdown" => OutputFormat::Markdown(MarkdownFormatter),
+ s => OutputFormat::Unknown(s.to_owned()),
+ }
+ }
+}
+
+trait Formatter {
+ fn header(&self, output: &mut Write) -> Result<(), Box<Error>>;
+ fn title(&self, output: &mut Write) -> Result<(), Box<Error>>;
+ fn error_code_block(&self, output: &mut Write, info: &ErrorMetadata,
+ err_code: &str) -> Result<(), Box<Error>>;
+ fn footer(&self, output: &mut Write) -> Result<(), Box<Error>>;
+}
+
+struct HTMLFormatter;
+struct MarkdownFormatter;
+
+impl Formatter for HTMLFormatter {
+ fn header(&self, output: &mut Write) -> Result<(), Box<Error>> {
+ try!(write!(output, r##"<!DOCTYPE html>
+<html>
+<head>
+<title>Rust Compiler Error Index</title>
+<meta charset="utf-8">
+<!-- Include rust.css after main.css so its rules take priority. -->
+<link rel="stylesheet" type="text/css" href="main.css"/>
+<link rel="stylesheet" type="text/css" href="rust.css"/>
+<style>
+.error-undescribed {{
+ display: none;
+}}
+</style>
+</head>
+<body>
+"##));
+ Ok(())
+ }
+
+ fn title(&self, output: &mut Write) -> Result<(), Box<Error>> {
+ try!(write!(output, "<h1>Rust Compiler Error Index</h1>\n"));
+ Ok(())
+ }
+
+ fn error_code_block(&self, output: &mut Write, info: &ErrorMetadata,
+ err_code: &str) -> Result<(), Box<Error>> {
+ // Enclose each error in a div so they can be shown/hidden en masse.
+ let desc_desc = match info.description {
+ Some(_) => "error-described",
+ None => "error-undescribed",
+ };
+ let use_desc = match info.use_site {
+ Some(_) => "error-used",
+ None => "error-unused",
+ };
+ try!(write!(output, "<div class=\"{} {}\">", desc_desc, use_desc));
+
+ // Error title (with self-link).
+ try!(write!(output,
+ "<h2 id=\"{0}\" class=\"section-header\"><a href=\"#{0}\">{0}</a></h2>\n",
+ err_code));
+
+ // Description rendered as markdown.
+ match info.description {
+ Some(ref desc) => try!(write!(output, "{}", Markdown(desc))),
+ None => try!(write!(output, "<p>No description.</p>\n")),
+ }
+
+ try!(write!(output, "</div>\n"));
+ Ok(())
+ }
+
+ fn footer(&self, output: &mut Write) -> Result<(), Box<Error>> {
+ try!(write!(output, "</body>\n</html>"));
+ Ok(())
+ }
+}
+
+impl Formatter for MarkdownFormatter {
+ #[allow(unused_variables)]
+ fn header(&self, output: &mut Write) -> Result<(), Box<Error>> {
+ Ok(())
+ }
+
+ fn title(&self, output: &mut Write) -> Result<(), Box<Error>> {
+ try!(write!(output, "# Rust Compiler Error Index\n"));
+ Ok(())
+ }
+
+ fn error_code_block(&self, output: &mut Write, info: &ErrorMetadata,
+ err_code: &str) -> Result<(), Box<Error>> {
+ Ok(match info.description {
+ Some(ref desc) => try!(write!(output, "## {}\n{}\n", err_code, desc)),
+ None => (),
+ })
+ }
+
+ #[allow(unused_variables)]
+ fn footer(&self, output: &mut Write) -> Result<(), Box<Error>> {
+ Ok(())
+ }
+}
+
+/// Load all the metadata files from `metadata_dir` into an in-memory map.
+fn load_all_errors(metadata_dir: &Path) -> Result<ErrorMetadataMap, Box<Error>> {
+ let mut all_errors = BTreeMap::new();
+
+ for entry in try!(read_dir(metadata_dir)) {
+ let path = try!(entry).path();
+
+ let mut metadata_str = String::new();
+ try!(File::open(&path).and_then(|mut f| f.read_to_string(&mut metadata_str)));
+
+ let some_errors: ErrorMetadataMap = try!(json::decode(&metadata_str));
+
+ for (err_code, info) in some_errors {
+ all_errors.insert(err_code, info);
+ }
+ }
+
+ Ok(all_errors)
+}
+
+/// Output an HTML page for the errors in `err_map` to `output_path`.
+fn render_error_page<T: Formatter>(err_map: &ErrorMetadataMap, output_path: &Path,
+ formatter: T) -> Result<(), Box<Error>> {
+ let mut output_file = try!(File::create(output_path));
+
+ try!(formatter.header(&mut output_file));
+ try!(formatter.title(&mut output_file));
+
+ for (err_code, info) in err_map {
+ try!(formatter.error_code_block(&mut output_file, info, err_code));
+ }
+
+ formatter.footer(&mut output_file)
+}
+
+fn main_with_result(format: OutputFormat) -> Result<(), Box<Error>> {
+ let build_arch = try!(env::var("CFG_BUILD"));
+ let metadata_dir = get_metadata_dir(&build_arch);
+ let err_map = try!(load_all_errors(&metadata_dir));
+ match format {
+ OutputFormat::Unknown(s) => panic!("Unknown output format: {}", s),
+ OutputFormat::HTML(h) => try!(render_error_page(&err_map,
+ Path::new("doc/error-index.html"),
+ h)),
+ OutputFormat::Markdown(m) => try!(render_error_page(&err_map,
+ Path::new("doc/error-index.md"),
+ m)),
+ }
+ Ok(())
+}
+
+fn parse_args() -> OutputFormat {
+ for arg in env::args().skip(1) {
+ return OutputFormat::from(&arg);
+ }
+ OutputFormat::from("html")
+}
+
+fn main() {
+ if let Err(e) = main_with_result(parse_args()) {
+ panic!("{}", e.description());
+ }
+}
putenv('RUSTFLAGS', sys.argv[15])
putenv('LLVM_COMPONENTS', sys.argv[16])
putenv('LLVM_CXXFLAGS', sys.argv[17])
+putenv('CXX', sys.argv[18])
putenv('PYTHON', sys.executable)
os.putenv('TARGET', target_triple)
interesting_files = ['.rs', '.py', '.js', '.sh', '.c', '.h']
uninteresting_files = ['miniz.c', 'jquery', 'rust_android_dummy']
+stable_whitelist = {
+ 'src/bootstrap',
+ 'src/build_helper',
+ 'src/libcollectionstest',
+ 'src/libcore',
+ 'src/libstd',
+ 'src/rustc/std_shim',
+ 'src/test'
+}
def report_error_name_no(name, no, s):
file_counts = {ext: 0 for ext in interesting_files}
all_paths = set()
+needs_unstable_attr = set()
try:
for (dirpath, dirnames, filenames) in os.walk(src_dir):
else:
if "SNAP " in line:
report_warn("unmatched SNAP line: " + line)
+ search = re.search(r'^#!\[unstable', line)
+ if search:
+ needs_unstable_attr.discard(filename)
if cr_flag in line:
check_cr = False
check_cr = True
check_tab = True
check_linelength = True
+ if all(f not in filename for f in stable_whitelist) and \
+ re.search(r'src/.*/lib\.rs', filename):
+ needs_unstable_attr.add(filename)
# Put a reasonable limit on the amount of header data we use for
# the licenseck
update_counts(current_name)
assert len(current_contents) > 0
do_license_check(current_name, current_contents)
+ for f in needs_unstable_attr:
+ report_error_name_no(f, 1, "requires unstable attribute")
except UnicodeDecodeError as e:
report_err("UTF-8 decoding error " + str(e))
///
/// If the map did not have this key present, `None` is returned.
///
- /// If the map did have this key present, the key is not updated, the
- /// value is updated and the old value is returned.
- /// See the [module-level documentation] for more.
+ /// If the map did have this key present, the value is updated, and the old
+ /// value is returned. The key is not updated, though; this matters for
+ /// types that can be `==` without being identical. See the [module-level
+ /// documentation] for more.
///
/// [module-level documentation]: index.html#insert-and-complex-keys
///
#![feature(nonzero)]
#![feature(num_bits_bytes)]
#![feature(pattern)]
+#![feature(placement_in)]
+#![feature(placement_new_protocol)]
#![feature(shared)]
#![feature(slice_bytes)]
#![feature(slice_patterns)]
#![stable(feature = "rust1", since = "1.0.0")]
-use alloc::boxed::Box;
+use alloc::boxed::{Box, IntermediateBox};
use core::cmp::Ordering;
use core::fmt;
use core::hash::{Hasher, Hash};
use core::iter::FromIterator;
use core::mem;
-use core::ptr::Shared;
+use core::ops::{BoxPlace, InPlace, Place, Placer};
+use core::ptr::{self, Shared};
/// A doubly-linked list.
#[stable(feature = "rust1", since = "1.0.0")]
second_part
}
+
+ /// Returns a place for insertion at the front of the list.
+ ///
+ /// Using this method with placement syntax is equivalent to [`push_front`]
+ /// (#method.push_front), but may be more efficient.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(collection_placement)]
+ /// #![feature(placement_in_syntax)]
+ ///
+ /// use std::collections::LinkedList;
+ ///
+ /// let mut list = LinkedList::new();
+ /// list.front_place() <- 2;
+ /// list.front_place() <- 4;
+ /// assert!(list.iter().eq(&[4, 2]));
+ /// ```
+ #[unstable(feature = "collection_placement",
+ reason = "method name and placement protocol are subject to change",
+ issue = "30172")]
+ pub fn front_place(&mut self) -> FrontPlace<T> {
+ FrontPlace { list: self, node: IntermediateBox::make_place() }
+ }
+
+ /// Returns a place for insertion at the back of the list.
+ ///
+ /// Using this method with placement syntax is equivalent to [`push_back`](#method.push_back),
+ /// but may be more efficient.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(collection_placement)]
+ /// #![feature(placement_in_syntax)]
+ ///
+ /// use std::collections::LinkedList;
+ ///
+ /// let mut list = LinkedList::new();
+ /// list.back_place() <- 2;
+ /// list.back_place() <- 4;
+ /// assert!(list.iter().eq(&[2, 4]));
+ /// ```
+ #[unstable(feature = "collection_placement",
+ reason = "method name and placement protocol are subject to change",
+ issue = "30172")]
+ pub fn back_place(&mut self) -> BackPlace<T> {
+ BackPlace { list: self, node: IntermediateBox::make_place() }
+ }
}
#[stable(feature = "rust1", since = "1.0.0")]
}
}
+unsafe fn finalize<T>(node: IntermediateBox<Node<T>>) -> Box<Node<T>> {
+ let mut node = node.finalize();
+ ptr::write(&mut node.next, None);
+ ptr::write(&mut node.prev, Rawlink::none());
+ node
+}
+
+/// A place for insertion at the front of a `LinkedList`.
+///
+/// See [`LinkedList::front_place`](struct.LinkedList.html#method.front_place) for details.
+#[must_use = "places do nothing unless written to with `<-` syntax"]
+#[unstable(feature = "collection_placement",
+ reason = "struct name and placement protocol are subject to change",
+ issue = "30172")]
+pub struct FrontPlace<'a, T: 'a> {
+ list: &'a mut LinkedList<T>,
+ node: IntermediateBox<Node<T>>,
+}
+
+#[unstable(feature = "collection_placement",
+ reason = "placement protocol is subject to change",
+ issue = "30172")]
+impl<'a, T> Placer<T> for FrontPlace<'a, T> {
+ type Place = Self;
+
+ fn make_place(self) -> Self {
+ self
+ }
+}
+
+#[unstable(feature = "collection_placement",
+ reason = "placement protocol is subject to change",
+ issue = "30172")]
+impl<'a, T> Place<T> for FrontPlace<'a, T> {
+ fn pointer(&mut self) -> *mut T {
+ unsafe { &mut (*self.node.pointer()).value }
+ }
+}
+
+#[unstable(feature = "collection_placement",
+ reason = "placement protocol is subject to change",
+ issue = "30172")]
+impl<'a, T> InPlace<T> for FrontPlace<'a, T> {
+ type Owner = ();
+
+ unsafe fn finalize(self) {
+ let FrontPlace { list, node } = self;
+ list.push_front_node(finalize(node));
+ }
+}
+
+/// A place for insertion at the back of a `LinkedList`.
+///
+/// See [`LinkedList::back_place`](struct.LinkedList.html#method.back_place) for details.
+#[must_use = "places do nothing unless written to with `<-` syntax"]
+#[unstable(feature = "collection_placement",
+ reason = "struct name and placement protocol are subject to change",
+ issue = "30172")]
+pub struct BackPlace<'a, T: 'a> {
+ list: &'a mut LinkedList<T>,
+ node: IntermediateBox<Node<T>>,
+}
+
+#[unstable(feature = "collection_placement",
+ reason = "placement protocol is subject to change",
+ issue = "30172")]
+impl<'a, T> Placer<T> for BackPlace<'a, T> {
+ type Place = Self;
+
+ fn make_place(self) -> Self {
+ self
+ }
+}
+
+#[unstable(feature = "collection_placement",
+ reason = "placement protocol is subject to change",
+ issue = "30172")]
+impl<'a, T> Place<T> for BackPlace<'a, T> {
+ fn pointer(&mut self) -> *mut T {
+ unsafe { &mut (*self.node.pointer()).value }
+ }
+}
+
+#[unstable(feature = "collection_placement",
+ reason = "placement protocol is subject to change",
+ issue = "30172")]
+impl<'a, T> InPlace<T> for BackPlace<'a, T> {
+ type Owner = ();
+
+ unsafe fn finalize(self) {
+ let BackPlace { list, node } = self;
+ list.push_back_node(finalize(node));
+ }
+}
+
// Ensure that `LinkedList` and its read-only iterators are covariant in their type parameters.
#[allow(dead_code)]
fn assert_covariance() {
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! A growable list type with heap-allocated contents, written `Vec<T>` but
-//! pronounced 'vector.'
+//! A contiguous growable array type with heap-allocated contents, written
+//! `Vec<T>` but pronounced 'vector.'
//!
//! Vectors have `O(1)` indexing, amortized `O(1)` push (to the end) and
//! `O(1)` pop (from the end).
use super::range::RangeArgument;
-/// A growable list type, written `Vec<T>` but pronounced 'vector.'
+/// A contiguous growable array type, written `Vec<T>` but pronounced 'vector.'
///
/// # Examples
///
/// ```
/// use std::mem;
///
-/// let one = unsafe { mem::transmute_copy(&1) };
+/// #[repr(packed)]
+/// struct Foo {
+/// bar: u8,
+/// }
+///
+/// let foo_slice = [10u8];
+///
+/// unsafe {
+/// // Copy the data from 'foo_slice' and treat it as a 'Foo'
+/// let mut foo_struct: Foo = mem::transmute_copy(&foo_slice);
+/// assert_eq!(foo_struct.bar, 10);
+///
+/// // Modify the copied data
+/// foo_struct.bar = 20;
+/// assert_eq!(foo_struct.bar, 20);
+/// }
///
-/// assert_eq!(1, one);
+/// // The contents of 'foo_slice' should not have changed
+/// assert_eq!(foo_slice, [10]);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
#![stable(feature = "rust1", since = "1.0.0")]
-uint_module! { u16, i16, 16 }
+uint_module! { u16, 16 }
#![stable(feature = "rust1", since = "1.0.0")]
-uint_module! { u32, i32, 32 }
+uint_module! { u32, 32 }
#![stable(feature = "rust1", since = "1.0.0")]
-uint_module! { u64, i64, 64 }
+uint_module! { u64, 64 }
#![stable(feature = "rust1", since = "1.0.0")]
-uint_module! { u8, i8, 8 }
+uint_module! { u8, 8 }
#![doc(hidden)]
-macro_rules! uint_module { ($T:ty, $T_SIGNED:ty, $bits:expr) => (
+macro_rules! uint_module { ($T:ty, $bits:expr) => (
#[unstable(feature = "num_bits_bytes",
reason = "may want to be an associated function",
#![stable(feature = "rust1", since = "1.0.0")]
#[cfg(target_pointer_width = "32")]
-uint_module! { usize, isize, 32 }
+uint_module! { usize, 32 }
#[cfg(target_pointer_width = "64")]
-uint_module! { usize, isize, 64 }
+uint_module! { usize, 64 }
will never be reached as for all possible values of the expression being
matched, one of the preceding patterns will match.
-This means that perhaps some of the preceding patterns are too general, this one
-is too specific or the ordering is incorrect.
+This means that perhaps some of the preceding patterns are too general, this
+one is too specific or the ordering is incorrect.
For example, the following `match` block has too many arms:
This error indicates that the compiler cannot guarantee a matching pattern for
one or more possible inputs to a match expression. Guaranteed matches are
required in order to assign values to match expressions, or alternatively,
-determine the flow of execution.
+determine the flow of execution. Erroneous code example:
+
+```compile_fail
+enum Terminator {
+ HastaLaVistaBaby,
+ TalkToMyHand,
+}
+
+let x = Terminator::HastaLaVistaBaby;
+
+match x { // error: non-exhaustive patterns: `HastaLaVistaBaby` not covered
+ Terminator::TalkToMyHand => {}
+}
+```
If you encounter this error you must alter your patterns so that every possible
value of the input type is matched. For types with a small number of variants
(like enums) you should probably cover all cases explicitly. Alternatively, the
underscore `_` wildcard pattern can be added after all other patterns to match
-"anything else".
+"anything else". Example:
+
+```
+enum Terminator {
+ HastaLaVistaBaby,
+ TalkToMyHand,
+}
+
+let x = Terminator::HastaLaVistaBaby;
+
+match x {
+ Terminator::TalkToMyHand => {}
+ Terminator::HastaLaVistaBaby => {}
+}
+
+// or:
+
+match x {
+ Terminator::TalkToMyHand => {}
+ _ => {}
+}
+```
"##,
E0005: r##"
Patterns used to bind names must be irrefutable, that is, they must guarantee
-that a name will be extracted in all cases. If you encounter this error you
-probably need to use a `match` or `if let` to deal with the possibility of
-failure.
+that a name will be extracted in all cases. Erroneous code example:
+
+```compile_fail
+let x = Some(1);
+let Some(y) = x;
+// error: refutable pattern in local binding: `None` not covered
+```
+
+If you encounter this error you probably need to use a `match` or `if let` to
+deal with the possibility of failure. Example:
+
+```compile_fail
+let x = Some(1);
+
+match x {
+ Some(y) => {
+ // do something
+ },
+ None => {}
+}
+
+// or:
+
+if let Some(y) = x {
+ // do something
+}
+```
"##,
E0007: r##"
This error indicates that the bindings in a match arm would require a value to
-be moved into more than one location, thus violating unique ownership. Code like
-the following is invalid as it requires the entire `Option<String>` to be moved
-into a variable called `op_string` while simultaneously requiring the inner
-String to be moved into a variable called `s`.
+be moved into more than one location, thus violating unique ownership. Code
+like the following is invalid as it requires the entire `Option<String>` to be
+moved into a variable called `op_string` while simultaneously requiring the
+inner `String` to be moved into a variable called `s`.
```compile_fail
let x = Some("s".to_string());
This limitation may be removed in a future version of Rust.
-Wrong example:
+Erroneous code example:
```compile_fail
struct X { x: (), }
}
```
-we cannot create an object of type `Box<Foo>` or `&Foo` since in this case
+We cannot create an object of type `Box<Foo>` or `&Foo` since in this case
`Self` would not be `Sized`.
Generally, `Self : Sized` is used to indicate that the trait should not be used
```
(Note that `&self` and `&mut self` are okay, it's additional `Self` types which
-cause this problem)
+cause this problem.)
In such a case, the compiler cannot predict the return type of `foo()` in a
situation like the following:
"##,
E0133: r##"
-Using unsafe functionality, is potentially dangerous and disallowed
-by safety checks. Examples:
+Using unsafe functionality is potentially dangerous and disallowed by safety
+checks. Examples:
-- Dereferencing raw pointers
-- Calling functions via FFI
-- Calling functions marked unsafe
+* Dereferencing raw pointers
+* Calling functions via FFI
+* Calling functions marked unsafe
-These safety checks can be relaxed for a section of the code
-by wrapping the unsafe instructions with an `unsafe` block. For instance:
+These safety checks can be relaxed for a section of the code by wrapping the
+unsafe instructions with an `unsafe` block. For instance:
```
unsafe fn f() { return; }
println!("{}", y);
```
-In the previous example, the print statement was never reached when the wildcard
-match arm was hit, so we were okay with `foo()` not returning an integer that we
-could set to `y`. But in this example, `foo()` actually does return control, so
-the print statement will be executed with an uninitialized value.
+In the previous example, the print statement was never reached when the
+wildcard match arm was hit, so we were okay with `foo()` not returning an
+integer that we could set to `y`. But in this example, `foo()` actually does
+return control, so the print statement will be executed with an uninitialized
+value.
Obviously we cannot have functions which are allowed to be used in such
positions and yet can return control. So, if you are defining a function that
-returns `!`, make sure that there is no way for it to actually finish executing.
+returns `!`, make sure that there is no way for it to actually finish
+executing.
"##,
E0271: r##"
foo(true); // `bool` does not implement `Index<u8>`
```
-there will be an error about `bool` not implementing `Index<u8>`, followed by a
+There will be an error about `bool` not implementing `Index<u8>`, followed by a
note saying "the type `bool` cannot be indexed by `u8`".
-As you can see, you can specify type parameters in curly braces for substitution
-with the actual types (using the regular format string syntax) in a given
-situation. Furthermore, `{Self}` will substitute to the type (in this case,
-`bool`) that we tried to use.
+As you can see, you can specify type parameters in curly braces for
+substitution with the actual types (using the regular format string syntax) in
+a given situation. Furthermore, `{Self}` will substitute to the type (in this
+case, `bool`) that we tried to use.
This error appears when the curly braces contain an identifier which doesn't
-match with any of the type parameters or the string `Self`. This might happen if
-you misspelled a type parameter, or if you intended to use literal curly braces.
-If it is the latter, escape the curly braces with a second curly brace of the
-same type; e.g. a literal `{` is `{{`
+match with any of the type parameters or the string `Self`. This might happen
+if you misspelled a type parameter, or if you intended to use literal curly
+braces. If it is the latter, escape the curly braces with a second curly brace
+of the same type; e.g. a literal `{` is `{{`.
"##,
E0273: r##"
there will be an error about `bool` not implementing `Index<u8>`, followed by a
note saying "the type `bool` cannot be indexed by `u8`".
-As you can see, you can specify type parameters in curly braces for substitution
-with the actual types (using the regular format string syntax) in a given
-situation. Furthermore, `{Self}` will substitute to the type (in this case,
-`bool`) that we tried to use.
+As you can see, you can specify type parameters in curly braces for
+substitution with the actual types (using the regular format string syntax) in
+a given situation. Furthermore, `{Self}` will substitute to the type (in this
+case, `bool`) that we tried to use.
This error appears when the curly braces do not contain an identifier. Please
add one of the same name as a type parameter. If you intended to use literal
E0275: r##"
This error occurs when there was a recursive trait requirement that overflowed
-before it could be evaluated. Often this means that there is unbounded recursion
-in resolving some type bounds.
+before it could be evaluated. Often this means that there is unbounded
+recursion in resolving some type bounds.
For example, in the following code:
```
To determine if a `T` is `Foo`, we need to check if `Bar<T>` is `Foo`. However,
-to do this check, we need to determine that `Bar<Bar<T>>` is `Foo`. To determine
-this, we check if `Bar<Bar<Bar<T>>>` is `Foo`, and so on. This is clearly a
-recursive requirement that can't be resolved directly.
+to do this check, we need to determine that `Bar<Bar<T>>` is `Foo`. To
+determine this, we check if `Bar<Bar<Bar<T>>>` is `Foo`, and so on. This is
+clearly a recursive requirement that can't be resolved directly.
Consider changing your trait bounds so that they're less self-referential.
"##,
// we now call the method with the i32 type, which doesn't implement
// the Foo trait
some_func(5i32); // error: the trait `Foo` is not implemented for the
- // type `i32`
+ // type `i32`
}
```
```compile_fail
match Some(()) {
None => { },
- option if option.take().is_none() => { /* impossible, option is `Some` */ },
+ option if option.take().is_none() => {
+ /* impossible, option is `Some` */
+ },
Some(_) => { } // When the previous match failed, the option became `None`.
}
```
E0306: r##"
In an array literal `[x; N]`, `N` is the number of elements in the array. This
-number cannot be negative.
+must be an unsigned integer. Erroneous code example:
+
+```compile_fail
+let x = [0i32; true]; // error: expected positive integer for repeat count,
+ // found boolean
+```
+
+Working example:
+
+```
+let x = [0i32; 2];
+```
"##,
E0307: r##"
-The length of an array is part of its type. For this reason, this length must be
-a compile-time constant.
+The length of an array is part of its type. For this reason, this length must
+be a compile-time constant. Erroneous code example:
+
+```compile_fail
+ let len = 10;
+ let x = [0i32; len]; // error: expected constant integer for repeat count,
+ // found variable
+```
"##,
E0308: r##"
"##,
E0398: r##"
-In Rust 1.3, the default object lifetime bounds are expected to
-change, as described in RFC #1156 [1]. You are getting a warning
-because the compiler thinks it is possible that this change will cause
-a compilation error in your code. It is possible, though unlikely,
-that this is a false alarm.
-
-The heart of the change is that where `&'a Box<SomeTrait>` used to
-default to `&'a Box<SomeTrait+'a>`, it now defaults to `&'a
-Box<SomeTrait+'static>` (here, `SomeTrait` is the name of some trait
-type). Note that the only types which are affected are references to
-boxes, like `&Box<SomeTrait>` or `&[Box<SomeTrait>]`. More common
-types like `&SomeTrait` or `Box<SomeTrait>` are unaffected.
-
-To silence this warning, edit your code to use an explicit bound.
-Most of the time, this means that you will want to change the
-signature of a function that you are calling. For example, if
-the error is reported on a call like `foo(x)`, and `foo` is
-defined as follows:
+In Rust 1.3, the default object lifetime bounds are expected to change, as
+described in RFC #1156 [1]. You are getting a warning because the compiler
+thinks it is possible that this change will cause a compilation error in your
+code. It is possible, though unlikely, that this is a false alarm.
+
+The heart of the change is that where `&'a Box<SomeTrait>` used to default to
+`&'a Box<SomeTrait+'a>`, it now defaults to `&'a Box<SomeTrait+'static>` (here,
+`SomeTrait` is the name of some trait type). Note that the only types which are
+affected are references to boxes, like `&Box<SomeTrait>` or
+`&[Box<SomeTrait>]`. More common types like `&SomeTrait` or `Box<SomeTrait>`
+are unaffected.
+
+To silence this warning, edit your code to use an explicit bound. Most of the
+time, this means that you will want to change the signature of a function that
+you are calling. For example, if the error is reported on a call like `foo(x)`,
+and `foo` is defined as follows:
```ignore
fn foo(arg: &Box<SomeTrait>) { ... }
fn foo<'a>(arg: &Box<SomeTrait+'a>) { ... }
```
-This explicitly states that you expect the trait object `SomeTrait` to
-contain references (with a maximum lifetime of `'a`).
+This explicitly states that you expect the trait object `SomeTrait` to contain
+references (with a maximum lifetime of `'a`).
[1]: https://github.com/rust-lang/rfcs/pull/1156
"##,
"##,
E0517: r##"
-This error indicates that a `#[repr(..)]` attribute was placed on an unsupported
-item.
+This error indicates that a `#[repr(..)]` attribute was placed on an
+unsupported item.
Examples of erroneous code:
#[repr(C)]
impl Foo {
- ...
+ // ...
}
```
- - The `#[repr(C)]` attribute can only be placed on structs and enums
- - The `#[repr(packed)]` and `#[repr(simd)]` attributes only work on structs
- - The `#[repr(u8)]`, `#[repr(i16)]`, etc attributes only work on enums
+* The `#[repr(C)]` attribute can only be placed on structs and enums.
+* The `#[repr(packed)]` and `#[repr(simd)]` attributes only work on structs.
+* The `#[repr(u8)]`, `#[repr(i16)]`, etc attributes only work on enums.
These attributes do not work on typedefs, since typedefs are just aliases.
Representations like `#[repr(u8)]`, `#[repr(i64)]` are for selecting the
-discriminant size for C-like enums (when there is no associated data, e.g. `enum
-Color {Red, Blue, Green}`), effectively setting the size of the enum to the size
-of the provided type. Such an enum can be cast to a value of the same type as
-well. In short, `#[repr(u8)]` makes the enum behave like an integer with a
-constrained set of allowed values.
+discriminant size for C-like enums (when there is no associated data, e.g.
+`enum Color {Red, Blue, Green}`), effectively setting the size of the enum to
+the size of the provided type. Such an enum can be cast to a value of the same
+type as well. In short, `#[repr(u8)]` makes the enum behave like an integer
+with a constrained set of allowed values.
Only C-like enums can be cast to numerical primitives, so this attribute will
not apply to structs.
`#[repr(packed)]` reduces padding to make the struct size smaller. The
-representation of enums isn't strictly defined in Rust, and this attribute won't
-work on enums.
+representation of enums isn't strictly defined in Rust, and this attribute
+won't work on enums.
`#[repr(simd)]` will give a struct consisting of a homogenous series of machine
types (i.e. `u8`, `i32`, etc) a representation that permits vectorization via
"##,
E0518: r##"
-This error indicates that an `#[inline(..)]` attribute was incorrectly placed on
-something other than a function or method.
+This error indicates that an `#[inline(..)]` attribute was incorrectly placed
+on something other than a function or method.
Examples of erroneous code:
#[inline(never)]
impl Foo {
- ...
+ // ...
}
```
fn visit_pat(&mut self, pat: &'ast Pat) {
let maybe_binding = match pat.node {
- PatIdent(_, id, _) => Some(id.node),
+ PatKind::Ident(_, id, _) => Some(id.node),
_ => None
};
return DepNode::Krate,
NotPresent =>
- panic!("Walking parents from `{}` led to `NotPresent` at `{}`", id0, id),
+ // Some nodes, notably struct fields, are not
+ // present in the map for whatever reason, but
+ // they *do* have def-ids. So if we encounter an
+ // empty hole, check for that case.
+ return self.opt_local_def_id(id)
+ .map(|def_id| DepNode::Hir(def_id))
+ .unwrap_or_else(|| {
+ panic!("Walking parents from `{}` \
+ led to `NotPresent` at `{}`",
+ id0, id)
+ }),
}
}
}
NodeVariant(v) => PathName(v.node.name),
NodeLifetime(lt) => PathName(lt.name),
NodeTyParam(tp) => PathName(tp.name),
- NodeLocal(&Pat { node: PatIdent(_,l,_), .. }) => {
+ NodeLocal(&Pat { node: PatKind::Ident(_,l,_), .. }) => {
PathName(l.node.name)
},
_ => panic!("no path elem for {:?}", node)
use syntax::ast;
use syntax::ptr::P;
-use rustc_front::hir;
+use rustc_front::hir::{self, PatKind};
struct CFGBuilder<'a, 'tcx: 'a> {
tcx: &'a ty::ctxt<'tcx>,
fn pat(&mut self, pat: &hir::Pat, pred: CFGIndex) -> CFGIndex {
match pat.node {
- hir::PatIdent(_, _, None) |
- hir::PatEnum(_, None) |
- hir::PatQPath(..) |
- hir::PatLit(..) |
- hir::PatRange(..) |
- hir::PatWild => {
+ PatKind::Ident(_, _, None) |
+ PatKind::TupleStruct(_, None) |
+ PatKind::Path(..) |
+ PatKind::QPath(..) |
+ PatKind::Lit(..) |
+ PatKind::Range(..) |
+ PatKind::Wild => {
self.add_ast_node(pat.id, &[pred])
}
- hir::PatBox(ref subpat) |
- hir::PatRegion(ref subpat, _) |
- hir::PatIdent(_, _, Some(ref subpat)) => {
+ PatKind::Box(ref subpat) |
+ PatKind::Ref(ref subpat, _) |
+ PatKind::Ident(_, _, Some(ref subpat)) => {
let subpat_exit = self.pat(&subpat, pred);
self.add_ast_node(pat.id, &[subpat_exit])
}
- hir::PatEnum(_, Some(ref subpats)) |
- hir::PatTup(ref subpats) => {
+ PatKind::TupleStruct(_, Some(ref subpats)) |
+ PatKind::Tup(ref subpats) => {
let pats_exit = self.pats_all(subpats.iter(), pred);
self.add_ast_node(pat.id, &[pats_exit])
}
- hir::PatStruct(_, ref subpats, _) => {
+ PatKind::Struct(_, ref subpats, _) => {
let pats_exit =
self.pats_all(subpats.iter().map(|f| &f.node.pat), pred);
self.add_ast_node(pat.id, &[pats_exit])
}
- hir::PatVec(ref pre, ref vec, ref post) => {
+ PatKind::Vec(ref pre, ref vec, ref post) => {
let pre_exit = self.pats_all(pre.iter(), pred);
let vec_exit = self.pats_all(vec.iter(), pre_exit);
let post_exit = self.pats_all(post.iter(), vec_exit);
use std::iter::{FromIterator, IntoIterator, repeat};
use rustc_front::hir;
-use rustc_front::hir::Pat;
+use rustc_front::hir::{Pat, PatKind};
use rustc_front::intravisit::{self, Visitor, FnKind};
use rustc_front::util as front_util;
use rustc_back::slice;
pub const DUMMY_WILD_PAT: &'static Pat = &Pat {
id: DUMMY_NODE_ID,
- node: hir::PatWild,
+ node: PatKind::Wild,
span: DUMMY_SP
};
fn check_for_bindings_named_the_same_as_variants(cx: &MatchCheckCtxt, pat: &Pat) {
front_util::walk_pat(pat, |p| {
match p.node {
- hir::PatIdent(hir::BindByValue(hir::MutImmutable), ident, None) => {
+ PatKind::Ident(hir::BindByValue(hir::MutImmutable), ident, None) => {
let pat_ty = cx.tcx.pat_ty(p);
if let ty::TyEnum(edef, _) = pat_ty.sty {
let def = cx.tcx.def_map.borrow().get(&p.id).map(|d| d.full_def());
// Check that we do not match against a static NaN (#6804)
fn check_for_static_nan(cx: &MatchCheckCtxt, pat: &Pat) {
front_util::walk_pat(pat, |p| {
- if let hir::PatLit(ref expr) = p.node {
+ if let PatKind::Lit(ref expr) = p.node {
match eval_const_expr_partial(cx.tcx, &expr, ExprTypeChecked, None) {
Ok(ConstVal::Float(f)) if f.is_nan() => {
span_warn!(cx.tcx.sess, p.span, E0003,
fn raw_pat<'a>(p: &'a Pat) -> &'a Pat {
match p.node {
- hir::PatIdent(_, _, Some(ref s)) => raw_pat(&s),
+ PatKind::Ident(_, _, Some(ref s)) => raw_pat(&s),
_ => p
}
}
hir::MatchSource::ForLoopDesugar => {
// `witnesses[0]` has the form `Some(<head>)`, peel off the `Some`
let witness = match witnesses[0].node {
- hir::PatEnum(_, Some(ref pats)) => match &pats[..] {
+ PatKind::TupleStruct(_, Some(ref pats)) => match &pats[..] {
[ref pat] => &**pat,
_ => unreachable!(),
},
impl<'a, 'tcx> Folder for StaticInliner<'a, 'tcx> {
fn fold_pat(&mut self, pat: P<Pat>) -> P<Pat> {
return match pat.node {
- hir::PatIdent(..) | hir::PatEnum(..) | hir::PatQPath(..) => {
+ PatKind::Ident(..) | PatKind::Path(..) | PatKind::QPath(..) => {
let def = self.tcx.def_map.borrow().get(&pat.id).map(|d| d.full_def());
match def {
Some(Def::AssociatedConst(did)) |
let pats_len = pats.len();
let mut pats = pats.into_iter().map(|p| P((*p).clone()));
let pat = match left_ty.sty {
- ty::TyTuple(_) => hir::PatTup(pats.collect()),
+ ty::TyTuple(_) => PatKind::Tup(pats.collect()),
ty::TyEnum(adt, _) | ty::TyStruct(adt, _) => {
let v = adt.variant_of_ctor(ctor);
- if let VariantKind::Struct = v.kind() {
- let field_pats: hir::HirVec<_> = v.fields.iter()
- .zip(pats)
- .filter(|&(_, ref pat)| pat.node != hir::PatWild)
- .map(|(field, pat)| Spanned {
- span: DUMMY_SP,
- node: hir::FieldPat {
- name: field.name,
- pat: pat,
- is_shorthand: false,
- }
- }).collect();
- let has_more_fields = field_pats.len() < pats_len;
- hir::PatStruct(def_to_path(cx.tcx, v.did), field_pats, has_more_fields)
- } else {
- hir::PatEnum(def_to_path(cx.tcx, v.did), Some(pats.collect()))
+ match v.kind() {
+ VariantKind::Struct => {
+ let field_pats: hir::HirVec<_> = v.fields.iter()
+ .zip(pats)
+ .filter(|&(_, ref pat)| pat.node != PatKind::Wild)
+ .map(|(field, pat)| Spanned {
+ span: DUMMY_SP,
+ node: hir::FieldPat {
+ name: field.name,
+ pat: pat,
+ is_shorthand: false,
+ }
+ }).collect();
+ let has_more_fields = field_pats.len() < pats_len;
+ PatKind::Struct(def_to_path(cx.tcx, v.did), field_pats, has_more_fields)
+ }
+ VariantKind::Tuple => {
+ PatKind::TupleStruct(def_to_path(cx.tcx, v.did), Some(pats.collect()))
+ }
+ VariantKind::Unit => {
+ PatKind::Path(def_to_path(cx.tcx, v.did))
+ }
}
}
ty::TyArray(_, n) => match ctor {
&Single => {
assert_eq!(pats_len, n);
- hir::PatVec(pats.collect(), None, hir::HirVec::new())
+ PatKind::Vec(pats.collect(), None, hir::HirVec::new())
},
_ => unreachable!()
},
ty::TySlice(_) => match ctor {
&Slice(n) => {
assert_eq!(pats_len, n);
- hir::PatVec(pats.collect(), None, hir::HirVec::new())
+ PatKind::Vec(pats.collect(), None, hir::HirVec::new())
},
_ => unreachable!()
},
- ty::TyStr => hir::PatWild,
+ ty::TyStr => PatKind::Wild,
_ => {
assert_eq!(pats_len, 1);
- hir::PatRegion(pats.nth(0).unwrap(), mutbl)
+ PatKind::Ref(pats.nth(0).unwrap(), mutbl)
}
}
}
ty::TyArray(_, len) => {
assert_eq!(pats_len, len);
- hir::PatVec(pats.collect(), None, hir::HirVec::new())
+ PatKind::Vec(pats.collect(), None, hir::HirVec::new())
}
_ => {
match *ctor {
- ConstantValue(ref v) => hir::PatLit(const_val_to_expr(v)),
- _ => hir::PatWild,
+ ConstantValue(ref v) => PatKind::Lit(const_val_to_expr(v)),
+ _ => PatKind::Wild,
}
}
};
let left_ty = cx.tcx.pat_ty(&real_pat);
match real_pat.node {
- hir::PatIdent(hir::BindByRef(..), _, _) => {
+ PatKind::Ident(hir::BindByRef(..), _, _) => {
left_ty.builtin_deref(false, NoPreference).unwrap().ty
}
_ => left_ty,
};
let max_slice_length = rows.iter().filter_map(|row| match row[0].node {
- hir::PatVec(ref before, _, ref after) => Some(before.len() + after.len()),
+ PatKind::Vec(ref before, _, ref after) => Some(before.len() + after.len()),
_ => None
}).max().map_or(0, |v| v + 1);
left_ty: Ty, max_slice_length: usize) -> Vec<Constructor> {
let pat = raw_pat(p);
match pat.node {
- hir::PatIdent(..) =>
- match cx.tcx.def_map.borrow().get(&pat.id).map(|d| d.full_def()) {
- Some(Def::Const(..)) | Some(Def::AssociatedConst(..)) =>
- cx.tcx.sess.span_bug(pat.span, "const pattern should've \
- been rewritten"),
- Some(Def::Struct(..)) => vec!(Single),
- Some(Def::Variant(_, id)) => vec!(Variant(id)),
- _ => vec!()
- },
- hir::PatEnum(..) =>
- match cx.tcx.def_map.borrow().get(&pat.id).map(|d| d.full_def()) {
- Some(Def::Const(..)) | Some(Def::AssociatedConst(..)) =>
+ PatKind::Struct(..) | PatKind::TupleStruct(..) | PatKind::Path(..) | PatKind::Ident(..) =>
+ match cx.tcx.def_map.borrow().get(&pat.id).unwrap().full_def() {
+ Def::Const(..) | Def::AssociatedConst(..) =>
cx.tcx.sess.span_bug(pat.span, "const pattern should've \
been rewritten"),
- Some(Def::Variant(_, id)) => vec!(Variant(id)),
- _ => vec!(Single)
+ Def::Struct(..) | Def::TyAlias(..) => vec![Single],
+ Def::Variant(_, id) => vec![Variant(id)],
+ Def::Local(..) => vec![],
+ def => cx.tcx.sess.span_bug(pat.span, &format!("pat_constructors: unexpected \
+ definition {:?}", def)),
},
- hir::PatQPath(..) =>
+ PatKind::QPath(..) =>
cx.tcx.sess.span_bug(pat.span, "const pattern should've \
been rewritten"),
- hir::PatStruct(..) =>
- match cx.tcx.def_map.borrow().get(&pat.id).map(|d| d.full_def()) {
- Some(Def::Const(..)) | Some(Def::AssociatedConst(..)) =>
- cx.tcx.sess.span_bug(pat.span, "const pattern should've \
- been rewritten"),
- Some(Def::Variant(_, id)) => vec!(Variant(id)),
- _ => vec!(Single)
- },
- hir::PatLit(ref expr) =>
+ PatKind::Lit(ref expr) =>
vec!(ConstantValue(eval_const_expr(cx.tcx, &expr))),
- hir::PatRange(ref lo, ref hi) =>
+ PatKind::Range(ref lo, ref hi) =>
vec!(ConstantRange(eval_const_expr(cx.tcx, &lo), eval_const_expr(cx.tcx, &hi))),
- hir::PatVec(ref before, ref slice, ref after) =>
+ PatKind::Vec(ref before, ref slice, ref after) =>
match left_ty.sty {
ty::TyArray(_, _) => vec!(Single),
_ => if slice.is_some() {
vec!(Slice(before.len() + after.len()))
}
},
- hir::PatBox(_) | hir::PatTup(_) | hir::PatRegion(..) =>
+ PatKind::Box(_) | PatKind::Tup(_) | PatKind::Ref(..) =>
vec!(Single),
- hir::PatWild =>
+ PatKind::Wild =>
vec!(),
}
}
id: pat_id, ref node, span: pat_span
} = raw_pat(r[col]);
let head: Option<Vec<&Pat>> = match *node {
- hir::PatWild =>
+ PatKind::Wild =>
Some(vec![DUMMY_WILD_PAT; arity]),
- hir::PatIdent(_, _, _) => {
- let opt_def = cx.tcx.def_map.borrow().get(&pat_id).map(|d| d.full_def());
- match opt_def {
- Some(Def::Const(..)) | Some(Def::AssociatedConst(..)) =>
+ PatKind::Path(..) | PatKind::Ident(..) => {
+ let def = cx.tcx.def_map.borrow().get(&pat_id).unwrap().full_def();
+ match def {
+ Def::Const(..) | Def::AssociatedConst(..) =>
cx.tcx.sess.span_bug(pat_span, "const pattern should've \
been rewritten"),
- Some(Def::Variant(_, id)) => if *constructor == Variant(id) {
- Some(vec!())
- } else {
- None
- },
- _ => Some(vec![DUMMY_WILD_PAT; arity])
+ Def::Variant(_, id) if *constructor != Variant(id) => None,
+ Def::Variant(..) | Def::Struct(..) => Some(Vec::new()),
+ Def::Local(..) => Some(vec![DUMMY_WILD_PAT; arity]),
+ _ => cx.tcx.sess.span_bug(pat_span, &format!("specialize: unexpected \
+ definition {:?}", def)),
}
}
- hir::PatEnum(_, ref args) => {
+ PatKind::TupleStruct(_, ref args) => {
let def = cx.tcx.def_map.borrow().get(&pat_id).unwrap().full_def();
match def {
Def::Const(..) | Def::AssociatedConst(..) =>
}
}
- hir::PatQPath(_, _) => {
+ PatKind::QPath(_, _) => {
cx.tcx.sess.span_bug(pat_span, "const pattern should've \
been rewritten")
}
- hir::PatStruct(_, ref pattern_fields, _) => {
+ PatKind::Struct(_, ref pattern_fields, _) => {
let def = cx.tcx.def_map.borrow().get(&pat_id).unwrap().full_def();
let adt = cx.tcx.node_id_to_type(pat_id).ty_adt_def().unwrap();
let variant = adt.variant_of_ctor(constructor);
}
}
- hir::PatTup(ref args) =>
+ PatKind::Tup(ref args) =>
Some(args.iter().map(|p| &**p).collect()),
- hir::PatBox(ref inner) | hir::PatRegion(ref inner, _) =>
+ PatKind::Box(ref inner) | PatKind::Ref(ref inner, _) =>
Some(vec![&**inner]),
- hir::PatLit(ref expr) => {
+ PatKind::Lit(ref expr) => {
let expr_value = eval_const_expr(cx.tcx, &expr);
match range_covered_by_constructor(constructor, &expr_value, &expr_value) {
Some(true) => Some(vec![]),
}
}
- hir::PatRange(ref from, ref to) => {
+ PatKind::Range(ref from, ref to) => {
let from_value = eval_const_expr(cx.tcx, &from);
let to_value = eval_const_expr(cx.tcx, &to);
match range_covered_by_constructor(constructor, &from_value, &to_value) {
}
}
- hir::PatVec(ref before, ref slice, ref after) => {
+ PatKind::Vec(ref before, ref slice, ref after) => {
match *constructor {
// Fixed-length vectors.
Single => {
front_util::walk_pat(&pat, |p| {
if pat_is_binding(&def_map.borrow(), &p) {
match p.node {
- hir::PatIdent(hir::BindByValue(_), _, ref sub) => {
+ PatKind::Ident(hir::BindByValue(_), _, ref sub) => {
let pat_ty = tcx.node_id_to_type(p.id);
//FIXME: (@jroesch) this code should be floated up as well
let infcx = infer::new_infer_ctxt(cx.tcx,
check_move(p, sub.as_ref().map(|p| &**p));
}
}
- hir::PatIdent(hir::BindByRef(_), _, _) => {
+ PatKind::Ident(hir::BindByRef(_), _, _) => {
}
_ => {
cx.tcx.sess.span_bug(
}
match pat.node {
- hir::PatIdent(_, _, Some(_)) => {
+ PatKind::Ident(_, _, Some(_)) => {
let bindings_were_allowed = self.bindings_allowed;
self.bindings_allowed = false;
intravisit::walk_pat(self, pat);
use graphviz::IntoCow;
use syntax::ast;
-use rustc_front::hir::Expr;
+use rustc_front::hir::{Expr, PatKind};
use rustc_front::hir;
use rustc_front::intravisit::FnKind;
use syntax::codemap::Span;
pub fn const_expr_to_pat(tcx: &ty::ctxt, expr: &Expr, span: Span) -> P<hir::Pat> {
let pat = match expr.node {
hir::ExprTup(ref exprs) =>
- hir::PatTup(exprs.iter().map(|expr| const_expr_to_pat(tcx, &expr, span)).collect()),
+ PatKind::Tup(exprs.iter().map(|expr| const_expr_to_pat(tcx, &expr, span)).collect()),
hir::ExprCall(ref callee, ref args) => {
let def = *tcx.def_map.borrow().get(&callee.id).unwrap();
Def::Variant(_, variant_did) => def_to_path(tcx, variant_did),
Def::Fn(..) => return P(hir::Pat {
id: expr.id,
- node: hir::PatLit(P(expr.clone())),
+ node: PatKind::Lit(P(expr.clone())),
span: span,
}),
_ => unreachable!()
};
let pats = args.iter().map(|expr| const_expr_to_pat(tcx, &expr, span)).collect();
- hir::PatEnum(path, Some(pats))
+ PatKind::TupleStruct(path, Some(pats))
}
hir::ExprStruct(ref path, ref fields, None) => {
is_shorthand: false,
},
}).collect();
- hir::PatStruct(path.clone(), field_pats, false)
+ PatKind::Struct(path.clone(), field_pats, false)
}
hir::ExprVec(ref exprs) => {
let pats = exprs.iter().map(|expr| const_expr_to_pat(tcx, &expr, span)).collect();
- hir::PatVec(pats, None, hir::HirVec::new())
+ PatKind::Vec(pats, None, hir::HirVec::new())
}
hir::ExprPath(_, ref path) => {
let opt_def = tcx.def_map.borrow().get(&expr.id).map(|d| d.full_def());
match opt_def {
- Some(Def::Struct(..)) =>
- hir::PatStruct(path.clone(), hir::HirVec::new(), false),
- Some(Def::Variant(..)) =>
- hir::PatEnum(path.clone(), None),
+ Some(Def::Struct(..)) | Some(Def::Variant(..)) =>
+ PatKind::Path(path.clone()),
Some(Def::Const(def_id)) |
Some(Def::AssociatedConst(def_id)) => {
let expr = lookup_const_by_id(tcx, def_id, Some(expr.id), None).unwrap();
}
}
- _ => hir::PatLit(P(expr.clone()))
+ _ => PatKind::Lit(P(expr.clone()))
};
P(hir::Pat { id: expr.id, node: pat, span: span })
}
use dep_graph::DepNode;
use front::map as ast_map;
-use rustc_front::hir;
+use rustc_front::hir::{self, PatKind};
use rustc_front::intravisit::{self, Visitor};
use middle::{pat_util, privacy, ty};
_ => self.tcx.sess.span_bug(lhs.span, "non-ADT in struct pattern")
};
for pat in pats {
- if let hir::PatWild = pat.node.pat.node {
+ if let PatKind::Wild = pat.node.pat.node {
continue;
}
self.insert_def_id(variant.field_named(pat.node.name).did);
fn visit_pat(&mut self, pat: &hir::Pat) {
let def_map = &self.tcx.def_map;
match pat.node {
- hir::PatStruct(_, ref fields, _) => {
+ PatKind::Struct(_, ref fields, _) => {
self.handle_field_pattern_match(pat, fields);
}
_ if pat_util::pat_is_const(&def_map.borrow(), pat) => {
use middle::ty;
use middle::ty::adjustment;
-use rustc_front::hir;
+use rustc_front::hir::{self, PatKind};
use syntax::ast;
use syntax::ptr::P;
let def_map = &self.tcx().def_map;
if pat_util::pat_is_binding(&def_map.borrow(), pat) {
match pat.node {
- hir::PatIdent(hir::BindByRef(_), _, _) =>
+ PatKind::Ident(hir::BindByRef(_), _, _) =>
mode.lub(BorrowingMatch),
- hir::PatIdent(hir::BindByValue(_), _, _) => {
+ PatKind::Ident(hir::BindByValue(_), _, _) => {
match copy_or_move(self.typer, &cmt_pat, PatBindingMove) {
Copy => mode.lub(CopyingMatch),
Move(_) => mode.lub(MovingMatch),
// It is also a borrow or copy/move of the value being matched.
match pat.node {
- hir::PatIdent(hir::BindByRef(m), _, _) => {
+ PatKind::Ident(hir::BindByRef(m), _, _) => {
if let ty::TyRef(&r, _) = pat_ty.sty {
let bk = ty::BorrowKind::from_mutbl(m);
delegate.borrow(pat.id, pat.span, cmt_pat,
r, bk, RefBinding);
}
}
- hir::PatIdent(hir::BindByValue(_), _, _) => {
+ PatKind::Ident(hir::BindByValue(_), _, _) => {
let mode = copy_or_move(typer, &cmt_pat, PatBindingMove);
debug!("walk_pat binding consuming pat");
delegate.consume_pat(pat, cmt_pat, mode);
}
} else {
match pat.node {
- hir::PatVec(_, Some(ref slice_pat), _) => {
+ PatKind::Vec(_, Some(ref slice_pat), _) => {
// The `slice_pat` here creates a slice into
// the original vector. This is effectively a
// borrow of the elements of the vector being
let tcx = typer.tcx;
match pat.node {
- hir::PatEnum(_, _) | hir::PatQPath(..) |
- hir::PatIdent(_, _, None) | hir::PatStruct(..) => {
+ PatKind::TupleStruct(..) | PatKind::Path(..) | PatKind::QPath(..) |
+ PatKind::Ident(_, _, None) | PatKind::Struct(..) => {
match def_map.get(&pat.id).map(|d| d.full_def()) {
None => {
// no definition found: pat is not a
}
}
- hir::PatIdent(_, _, Some(_)) => {
+ PatKind::Ident(_, _, Some(_)) => {
// Do nothing; this is a binding (not an enum
// variant or struct), and the cat_pattern call
// will visit the substructure recursively.
}
- hir::PatWild | hir::PatTup(..) | hir::PatBox(..) |
- hir::PatRegion(..) | hir::PatLit(..) | hir::PatRange(..) |
- hir::PatVec(..) => {
+ PatKind::Wild | PatKind::Tup(..) | PatKind::Box(..) |
+ PatKind::Ref(..) | PatKind::Lit(..) | PatKind::Range(..) |
+ PatKind::Vec(..) => {
// Similarly, each of these cases does not
// correspond to an enum variant or struct, so we
// do not do any `matched_pat` calls for these
use middle::ty::adjustment;
use middle::ty::{self, Ty};
-use rustc_front::hir::{MutImmutable, MutMutable};
+use rustc_front::hir::{MutImmutable, MutMutable, PatKind};
use rustc_front::hir;
use syntax::ast;
use syntax::codemap::Span;
fn from_local(tcx: &ty::ctxt, id: ast::NodeId) -> MutabilityCategory {
let ret = match tcx.map.get(id) {
ast_map::NodeLocal(p) => match p.node {
- hir::PatIdent(bind_mode, _, _) => {
+ PatKind::Ident(bind_mode, _, _) => {
if bind_mode == hir::BindByValue(hir::MutMutable) {
McDeclared
} else {
// *being borrowed* is. But ideally we would put in a more
// fundamental fix to this conflated use of the node id.
let ret_ty = match pat.node {
- hir::PatIdent(hir::BindByRef(_), _, _) => {
+ PatKind::Ident(hir::BindByRef(_), _, _) => {
// a bind-by-ref means that the base_ty will be the type of the ident itself,
// but what we want here is the type of the underlying value being borrowed.
// So peel off one-level, turning the &T into T.
None
};
- // Note: This goes up here (rather than within the PatEnum arm
+ // Note: This goes up here (rather than within the PatKind::TupleStruct arm
// alone) because struct patterns can refer to struct types or
// to struct variants within enums.
let cmt = match opt_def {
};
match pat.node {
- hir::PatWild => {
+ PatKind::Wild => {
// _
}
- hir::PatEnum(_, None) => {
+ PatKind::TupleStruct(_, None) => {
// variant(..)
}
- hir::PatEnum(_, Some(ref subpats)) => {
+ PatKind::TupleStruct(_, Some(ref subpats)) => {
match opt_def {
Some(Def::Variant(..)) => {
// variant(x, y, z)
}
}
- hir::PatQPath(..) => {
- // Lone constant: ignore
+ PatKind::Path(..) | PatKind::QPath(..) | PatKind::Ident(_, _, None) => {
+ // Lone constant, or unit variant or identifier: ignore
}
- hir::PatIdent(_, _, Some(ref subpat)) => {
+ PatKind::Ident(_, _, Some(ref subpat)) => {
try!(self.cat_pattern_(cmt, &subpat, op));
}
- hir::PatIdent(_, _, None) => {
- // nullary variant or identifier: ignore
- }
-
- hir::PatStruct(_, ref field_pats, _) => {
+ PatKind::Struct(_, ref field_pats, _) => {
// {f1: p1, ..., fN: pN}
for fp in field_pats {
let field_ty = try!(self.pat_ty(&fp.node.pat)); // see (*2)
}
}
- hir::PatTup(ref subpats) => {
+ PatKind::Tup(ref subpats) => {
// (p1, ..., pN)
for (i, subpat) in subpats.iter().enumerate() {
let subpat_ty = try!(self.pat_ty(&subpat)); // see (*2)
}
}
- hir::PatBox(ref subpat) | hir::PatRegion(ref subpat, _) => {
+ PatKind::Box(ref subpat) | PatKind::Ref(ref subpat, _) => {
// box p1, &p1, &mut p1. we can ignore the mutability of
- // PatRegion since that information is already contained
+ // PatKind::Ref since that information is already contained
// in the type.
let subcmt = try!(self.cat_deref(pat, cmt, 0, None));
try!(self.cat_pattern_(subcmt, &subpat, op));
}
- hir::PatVec(ref before, ref slice, ref after) => {
+ PatKind::Vec(ref before, ref slice, ref after) => {
let context = InteriorOffsetKind::Pattern;
let vec_cmt = try!(self.deref_vec(pat, cmt, context));
let elt_cmt = try!(self.cat_index(pat, vec_cmt, context));
}
}
- hir::PatLit(_) | hir::PatRange(_, _) => {
+ PatKind::Lit(_) | PatKind::Range(_, _) => {
/*always ok*/
}
}
use util::nodemap::FnvHashMap;
use syntax::ast;
-use rustc_front::hir;
+use rustc_front::hir::{self, PatKind};
use rustc_front::util::walk_pat;
use syntax::codemap::{respan, Span, Spanned, DUMMY_SP};
pub fn pat_is_refutable(dm: &DefMap, pat: &hir::Pat) -> bool {
match pat.node {
- hir::PatLit(_) | hir::PatRange(_, _) | hir::PatQPath(..) => true,
- hir::PatEnum(_, _) |
- hir::PatIdent(_, _, None) |
- hir::PatStruct(..) => {
+ PatKind::Lit(_) | PatKind::Range(_, _) | PatKind::QPath(..) => true,
+ PatKind::TupleStruct(..) |
+ PatKind::Path(..) |
+ PatKind::Ident(_, _, None) |
+ PatKind::Struct(..) => {
match dm.get(&pat.id).map(|d| d.full_def()) {
Some(Def::Variant(..)) => true,
_ => false
}
}
- hir::PatVec(_, _, _) => true,
+ PatKind::Vec(_, _, _) => true,
_ => false
}
}
pub fn pat_is_variant_or_struct(dm: &DefMap, pat: &hir::Pat) -> bool {
match pat.node {
- hir::PatEnum(_, _) |
- hir::PatIdent(_, _, None) |
- hir::PatStruct(..) => {
+ PatKind::TupleStruct(..) |
+ PatKind::Path(..) |
+ PatKind::Ident(_, _, None) |
+ PatKind::Struct(..) => {
match dm.get(&pat.id).map(|d| d.full_def()) {
- Some(Def::Variant(..)) | Some(Def::Struct(..)) => true,
+ Some(Def::Variant(..)) | Some(Def::Struct(..)) | Some(Def::TyAlias(..)) => true,
_ => false
}
}
pub fn pat_is_const(dm: &DefMap, pat: &hir::Pat) -> bool {
match pat.node {
- hir::PatIdent(_, _, None) | hir::PatEnum(..) | hir::PatQPath(..) => {
+ PatKind::Ident(_, _, None) | PatKind::Path(..) | PatKind::QPath(..) => {
match dm.get(&pat.id).map(|d| d.full_def()) {
Some(Def::Const(..)) | Some(Def::AssociatedConst(..)) => true,
_ => false
// returned instead of a panic.
pub fn pat_is_resolved_const(dm: &DefMap, pat: &hir::Pat) -> bool {
match pat.node {
- hir::PatIdent(_, _, None) | hir::PatEnum(..) | hir::PatQPath(..) => {
+ PatKind::Ident(_, _, None) | PatKind::Path(..) | PatKind::QPath(..) => {
match dm.get(&pat.id)
.and_then(|d| if d.depth == 0 { Some(d.base_def) }
else { None } ) {
pub fn pat_is_binding(dm: &DefMap, pat: &hir::Pat) -> bool {
match pat.node {
- hir::PatIdent(..) => {
+ PatKind::Ident(..) => {
!pat_is_variant_or_struct(dm, pat) &&
!pat_is_const(dm, pat)
}
pub fn pat_is_binding_or_wild(dm: &DefMap, pat: &hir::Pat) -> bool {
match pat.node {
- hir::PatIdent(..) => pat_is_binding(dm, pat),
- hir::PatWild => true,
+ PatKind::Ident(..) => pat_is_binding(dm, pat),
+ PatKind::Wild => true,
_ => false
}
}
{
walk_pat(pat, |p| {
match p.node {
- hir::PatIdent(binding_mode, ref pth, _) if pat_is_binding(&dm.borrow(), p) => {
+ PatKind::Ident(binding_mode, ref pth, _) if pat_is_binding(&dm.borrow(), p) => {
it(binding_mode, p.id, p.span, &respan(pth.span, pth.node.name));
}
_ => {}
{
walk_pat(pat, |p| {
match p.node {
- hir::PatIdent(binding_mode, ref pth, _) if pat_is_binding(&dm.borrow(), p) => {
+ PatKind::Ident(binding_mode, ref pth, _) if pat_is_binding(&dm.borrow(), p) => {
it(binding_mode, p.id, p.span, &respan(pth.span, pth.node));
}
_ => {}
pub fn simple_name<'a>(pat: &'a hir::Pat) -> Option<ast::Name> {
match pat.node {
- hir::PatIdent(hir::BindByValue(_), ref path1, None) => {
+ PatKind::Ident(hir::BindByValue(_), ref path1, None) => {
Some(path1.node.name)
}
_ => {
let mut variants = vec![];
walk_pat(pat, |p| {
match p.node {
- hir::PatEnum(_, _) |
- hir::PatIdent(_, _, None) |
- hir::PatStruct(..) => {
+ PatKind::TupleStruct(..) |
+ PatKind::Path(..) |
+ PatKind::Ident(_, _, None) |
+ PatKind::Struct(..) => {
match dm.get(&p.id) {
Some(&PathResolution { base_def: Def::Variant(_, id), .. }) => {
variants.push(id);
use rustc_front::hir;
use rustc_front::intravisit::{self, Visitor, FnKind};
-use rustc_front::hir::{Block, Item, FnDecl, Arm, Pat, Stmt, Expr, Local};
+use rustc_front::hir::{Block, Item, FnDecl, Arm, Pat, PatKind, Stmt, Expr, Local};
use rustc_front::util::stmt_id;
#[derive(Clone, PartialEq, PartialOrd, Eq, Ord, Hash, RustcEncodable,
// If this is a binding (or maybe a binding, I'm too lazy to check
// the def map) then record the lifetime of that binding.
match pat.node {
- hir::PatIdent(..) => {
+ PatKind::Ident(..) => {
record_var_lifetime(visitor, pat.id, pat.span);
}
_ => { }
/// | box P&
fn is_binding_pat(pat: &hir::Pat) -> bool {
match pat.node {
- hir::PatIdent(hir::BindByRef(_), _, _) => true,
+ PatKind::Ident(hir::BindByRef(_), _, _) => true,
- hir::PatStruct(_, ref field_pats, _) => {
+ PatKind::Struct(_, ref field_pats, _) => {
field_pats.iter().any(|fp| is_binding_pat(&fp.node.pat))
}
- hir::PatVec(ref pats1, ref pats2, ref pats3) => {
+ PatKind::Vec(ref pats1, ref pats2, ref pats3) => {
pats1.iter().any(|p| is_binding_pat(&p)) ||
pats2.iter().any(|p| is_binding_pat(&p)) ||
pats3.iter().any(|p| is_binding_pat(&p))
}
- hir::PatEnum(_, Some(ref subpats)) |
- hir::PatTup(ref subpats) => {
+ PatKind::TupleStruct(_, Some(ref subpats)) |
+ PatKind::Tup(ref subpats) => {
subpats.iter().any(|p| is_binding_pat(&p))
}
- hir::PatBox(ref subpat) => {
+ PatKind::Box(ref subpat) => {
is_binding_pat(&subpat)
}
use util::nodemap::{DefIdMap, FnvHashSet, FnvHashMap};
use rustc_front::hir;
-use rustc_front::hir::{Item, Generics, StructField, Variant};
+use rustc_front::hir::{Item, Generics, StructField, Variant, PatKind};
use rustc_front::intravisit::{self, Visitor};
use std::mem::replace;
};
match pat.node {
// Foo(a, b, c)
- // A Variant(..) pattern `hir::PatEnum(_, None)` doesn't have to be recursed into.
- hir::PatEnum(_, Some(ref pat_fields)) => {
+ // A Variant(..) pattern `PatKind::TupleStruct(_, None)` doesn't have to be recursed into.
+ PatKind::TupleStruct(_, Some(ref pat_fields)) => {
for (field, struct_field) in pat_fields.iter().zip(&v.fields) {
maybe_do_stability_check(tcx, struct_field.did, field.span, cb)
}
}
// Foo { a, b, c }
- hir::PatStruct(_, ref pat_fields, _) => {
+ PatKind::Struct(_, ref pat_fields, _) => {
for field in pat_fields {
let did = v.field_named(field.node.name).did;
maybe_do_stability_check(tcx, did, field.span, cb);
use dep_graph::DepGraph;
use middle::infer::InferCtxt;
-use middle::ty::{self, Ty, TypeFoldable};
+use middle::ty::{self, Ty, TypeFoldable, ToPolyTraitRef};
use rustc_data_structures::obligation_forest::{Backtrace, ObligationForest, Error};
use std::iter;
use syntax::ast;
}
}
+
+/// Return the set of type variables contained in a trait ref
+fn trait_ref_type_vars<'a, 'tcx>(selcx: &mut SelectionContext<'a, 'tcx>,
+ t: ty::PolyTraitRef<'tcx>) -> Vec<Ty<'tcx>>
+{
+ t.skip_binder() // ok b/c this check doesn't care about regions
+ .input_types()
+ .iter()
+ .map(|t| selcx.infcx().resolve_type_vars_if_possible(t))
+ .filter(|t| t.has_infer_types())
+ .flat_map(|t| t.walk())
+ .filter(|t| match t.sty { ty::TyInfer(_) => true, _ => false })
+ .collect()
+}
+
/// Processes a predicate obligation and returns either:
/// - `Ok(Some(v))` if the predicate is true, presuming that `v` are also true
/// - `Ok(None)` if we don't have enough info to be sure
// doing more work yet
if !pending_obligation.stalled_on.is_empty() {
if pending_obligation.stalled_on.iter().all(|&ty| {
- let resolved_ty = selcx.infcx().resolve_type_vars_if_possible(&ty);
+ let resolved_ty = selcx.infcx().shallow_resolve(&ty);
resolved_ty == ty // nothing changed here
}) {
debug!("process_predicate: pending obligation {:?} still stalled on {:?}",
// of its type, and those types are resolved at
// the same time.
pending_obligation.stalled_on =
- data.skip_binder() // ok b/c this check doesn't care about regions
- .input_types()
- .iter()
- .map(|t| selcx.infcx().resolve_type_vars_if_possible(t))
- .filter(|t| t.has_infer_types())
- .flat_map(|t| t.walk())
- .filter(|t| match t.sty { ty::TyInfer(_) => true, _ => false })
- .collect();
+ trait_ref_type_vars(selcx, data.to_poly_trait_ref());
debug!("process_predicate: pending obligation {:?} now stalled on {:?}",
selcx.infcx().resolve_type_vars_if_possible(obligation),
ty::Predicate::Projection(ref data) => {
let project_obligation = obligation.with(data.clone());
match project::poly_project_and_unify_type(selcx, &project_obligation) {
+ Ok(None) => {
+ pending_obligation.stalled_on =
+ trait_ref_type_vars(selcx, data.to_poly_trait_ref());
+ Ok(None)
+ }
Ok(v) => Ok(v),
Err(e) => Err(CodeProjectionError(e))
}
}
ty::Predicate::WellFormed(ty) => {
- Ok(ty::wf::obligations(selcx.infcx(), obligation.cause.body_id,
- ty, obligation.cause.span))
+ match ty::wf::obligations(selcx.infcx(), obligation.cause.body_id,
+ ty, obligation.cause.span) {
+ None => {
+ pending_obligation.stalled_on = vec![ty];
+ Ok(None)
+ }
+ s => Ok(s)
+ }
}
}
}
use syntax::parse::token::InternedString;
use rustc_front::hir;
-use rustc_front::hir::{ItemImpl, ItemTrait};
+use rustc_front::hir::{ItemImpl, ItemTrait, PatKind};
use rustc_front::intravisit::Visitor;
pub use self::sty::{Binder, DebruijnIndex};
impl<'tcx> ToPolyTraitRef<'tcx> for PolyTraitPredicate<'tcx> {
fn to_poly_trait_ref(&self) -> PolyTraitRef<'tcx> {
- self.map_bound_ref(|trait_pred| trait_pred.trait_ref.clone())
+ self.map_bound_ref(|trait_pred| trait_pred.trait_ref)
}
}
// This is because here `self` has a `Binder` and so does our
// return value, so we are preserving the number of binding
// levels.
- ty::Binder(self.0.projection_ty.trait_ref.clone())
+ ty::Binder(self.0.projection_ty.trait_ref)
}
}
match self.map.find(id) {
Some(ast_map::NodeLocal(pat)) => {
match pat.node {
- hir::PatIdent(_, ref path1, _) => path1.node.name.as_str(),
+ PatKind::Ident(_, ref path1, _) => path1.node.name.as_str(),
_ => {
self.sess.bug(&format!("Variable id {} maps to {:?}, not local", id, pat));
},
use syntax::attr::AttrMetaMethods;
use syntax::errors::{ColorConfig, Handler};
use syntax::parse;
+use syntax::parse::lexer::Reader;
use syntax::parse::token::InternedString;
use syntax::feature_gate::UnstableFeatures;
link_args: Option<Vec<String>> = (None, parse_opt_list,
"extra arguments to pass to the linker (space separated)"),
link_dead_code: bool = (false, parse_bool,
- "let the linker strip dead coded (turning it on can be used for code coverage)"),
+ "don't let linker strip dead code (turning it on can be used for code coverage)"),
lto: bool = (false, parse_bool,
"perform LLVM link-time optimizations"),
target_cpu: Option<String> = (None, parse_opt_string,
// Convert strings provided as --cfg [cfgspec] into a crate_cfg
pub fn parse_cfgspecs(cfgspecs: Vec<String> ) -> ast::CrateConfig {
cfgspecs.into_iter().map(|s| {
- parse::parse_meta_from_source_str("cfgspec".to_string(),
- s.to_string(),
- Vec::new(),
- &parse::ParseSess::new())
+ let sess = parse::ParseSess::new();
+ let mut parser = parse::new_parser_from_source_str(&sess,
+ Vec::new(),
+ "cfgspec".to_string(),
+ s.to_string());
+ let meta_item = panictry!(parser.parse_meta_item());
+
+ if !parser.reader.is_eof() {
+ early_error(ErrorOutputType::default(), &format!("invalid --cfg argument: {}",
+ s))
+ }
+
+ meta_item
}).collect::<ast::CrateConfig>()
}
macro_rules! supported_targets {
( $(($triple:expr, $module:ident)),+ ) => (
+ $(mod $module;)*
+
/// List of supported targets
pub const TARGETS: &'static [&'static str] = &[$($triple),*];
// this would use a match if stringify! were allowed in pattern position
fn load_specific(target: &str) -> Option<Target> {
- $(mod $module;)*
let target = target.replace("-", "_");
if false { }
$(
pub fn target() -> Target {
let mut base = super::openbsd_base::opts();
+ base.cpu = "x86-64".to_string();
base.pre_link_args.push("-m64".to_string());
Target {
use std::rc::Rc;
use syntax::ast;
use syntax::codemap::Span;
-use rustc_front::hir;
+use rustc_front::hir::{self, PatKind};
struct GatherMoveInfo<'tcx> {
id: ast::NodeId,
move_pat: &hir::Pat,
cmt: mc::cmt<'tcx>) {
let pat_span_path_opt = match move_pat.node {
- hir::PatIdent(_, ref path1, _) => {
+ PatKind::Ident(_, ref path1, _) => {
Some(MoveSpanAndPath{span: move_pat.span,
name: path1.node.name})
},
Notice that `x` is stack-allocated by `foo()`. By default, Rust captures
closed-over data by reference. This means that once `foo()` returns, `x` no
-longer exists. An attempt to access `x` within the closure would thus be unsafe.
+longer exists. An attempt to access `x` within the closure would thus be
+unsafe.
Another situation where this might be encountered is when spawning threads:
```
To fix this, ensure that any declared variables are initialized before being
-used.
+used. Example:
+
+```
+fn main() {
+ let x: i32 = 0;
+ let y = x; // ok!
+}
+```
"##,
E0382: r##"
**y = 2;
```
-It can also be fixed by using a type with interior mutability, such as `Cell` or
-`RefCell`:
+It can also be fixed by using a type with interior mutability, such as `Cell`
+or `RefCell`:
```
use std::cell::Cell;
```
Alternatively, we can consider using the `Cell` and `RefCell` types to achieve
-interior mutability through a shared reference. Our example's `mutable` function
-could be redefined as below:
+interior mutability through a shared reference. Our example's `mutable`
+function could be redefined as below:
```
use std::cell::Cell;
Pat {
id: folder.new_id(id),
node: match node {
- PatWild => PatWild,
- PatIdent(binding_mode, pth1, sub) => {
- PatIdent(binding_mode,
+ PatKind::Wild => PatKind::Wild,
+ PatKind::Ident(binding_mode, pth1, sub) => {
+ PatKind::Ident(binding_mode,
Spanned {
span: folder.new_span(pth1.span),
node: folder.fold_ident(pth1.node),
},
sub.map(|x| folder.fold_pat(x)))
}
- PatLit(e) => PatLit(folder.fold_expr(e)),
- PatEnum(pth, pats) => {
- PatEnum(folder.fold_path(pth),
+ PatKind::Lit(e) => PatKind::Lit(folder.fold_expr(e)),
+ PatKind::TupleStruct(pth, pats) => {
+ PatKind::TupleStruct(folder.fold_path(pth),
pats.map(|pats| pats.move_map(|x| folder.fold_pat(x))))
}
- PatQPath(qself, pth) => {
+ PatKind::Path(pth) => {
+ PatKind::Path(folder.fold_path(pth))
+ }
+ PatKind::QPath(qself, pth) => {
let qself = QSelf { ty: folder.fold_ty(qself.ty), ..qself };
- PatQPath(qself, folder.fold_path(pth))
+ PatKind::QPath(qself, folder.fold_path(pth))
}
- PatStruct(pth, fields, etc) => {
+ PatKind::Struct(pth, fields, etc) => {
let pth = folder.fold_path(pth);
let fs = fields.move_map(|f| {
Spanned {
},
}
});
- PatStruct(pth, fs, etc)
+ PatKind::Struct(pth, fs, etc)
}
- PatTup(elts) => PatTup(elts.move_map(|x| folder.fold_pat(x))),
- PatBox(inner) => PatBox(folder.fold_pat(inner)),
- PatRegion(inner, mutbl) => PatRegion(folder.fold_pat(inner), mutbl),
- PatRange(e1, e2) => {
- PatRange(folder.fold_expr(e1), folder.fold_expr(e2))
+ PatKind::Tup(elts) => PatKind::Tup(elts.move_map(|x| folder.fold_pat(x))),
+ PatKind::Box(inner) => PatKind::Box(folder.fold_pat(inner)),
+ PatKind::Ref(inner, mutbl) => PatKind::Ref(folder.fold_pat(inner), mutbl),
+ PatKind::Range(e1, e2) => {
+ PatKind::Range(folder.fold_expr(e1), folder.fold_expr(e2))
}
- PatVec(before, slice, after) => {
- PatVec(before.move_map(|x| folder.fold_pat(x)),
+ PatKind::Vec(before, slice, after) => {
+ PatKind::Vec(before.move_map(|x| folder.fold_pat(x)),
slice.map(|x| folder.fold_pat(x)),
after.move_map(|x| folder.fold_pat(x)))
}
pub use self::ForeignItem_::*;
pub use self::Item_::*;
pub use self::Mutability::*;
-pub use self::Pat_::*;
pub use self::PathListItem_::*;
pub use self::PrimTy::*;
pub use self::Stmt_::*;
#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash)]
pub struct Pat {
pub id: NodeId,
- pub node: Pat_,
+ pub node: PatKind,
pub span: Span,
}
}
#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
-pub enum Pat_ {
+pub enum PatKind {
/// Represents a wildcard pattern (`_`)
- PatWild,
+ Wild,
- /// A PatIdent may either be a new bound variable,
- /// or a nullary enum (in which case the third field
- /// is None).
+ /// A `PatKind::Ident` may either be a new bound variable,
+ /// or a unit struct/variant pattern, or a const pattern (in the last two cases
+ /// the third field must be `None`).
///
- /// In the nullary enum case, the parser can't determine
+ /// In the unit or const pattern case, the parser can't determine
/// which it is. The resolver determines this, and
- /// records this pattern's NodeId in an auxiliary
- /// set (of "PatIdents that refer to nullary enums")
- PatIdent(BindingMode, Spanned<Ident>, Option<P<Pat>>),
+ /// records this pattern's `NodeId` in an auxiliary
+ /// set (of "PatIdents that refer to unit patterns or constants").
+ Ident(BindingMode, Spanned<Ident>, Option<P<Pat>>),
+ /// A struct or struct variant pattern, e.g. `Variant {x, y, ..}`.
+ /// The `bool` is `true` in the presence of a `..`.
+ Struct(Path, HirVec<Spanned<FieldPat>>, bool),
+
+ /// A tuple struct/variant pattern `Variant(x, y, z)`.
/// "None" means a `Variant(..)` pattern where we don't bind the fields to names.
- PatEnum(Path, Option<HirVec<P<Pat>>>),
+ TupleStruct(Path, Option<HirVec<P<Pat>>>),
+
+ /// A path pattern.
+ /// Such pattern can be resolved to a unit struct/variant or a constant.
+ Path(Path),
/// An associated const named using the qualified path `<T>::CONST` or
/// `<T as Trait>::CONST`. Associated consts from inherent impls can be
/// referred to as simply `T::CONST`, in which case they will end up as
- /// PatEnum, and the resolver will have to sort that out.
- PatQPath(QSelf, Path),
+ /// PatKind::Path, and the resolver will have to sort that out.
+ QPath(QSelf, Path),
- /// Destructuring of a struct, e.g. `Foo {x, y, ..}`
- /// The `bool` is `true` in the presence of a `..`
- PatStruct(Path, HirVec<Spanned<FieldPat>>, bool),
/// A tuple pattern `(a, b)`
- PatTup(HirVec<P<Pat>>),
+ Tup(HirVec<P<Pat>>),
/// A `box` pattern
- PatBox(P<Pat>),
+ Box(P<Pat>),
/// A reference pattern, e.g. `&mut (a, b)`
- PatRegion(P<Pat>, Mutability),
+ Ref(P<Pat>, Mutability),
/// A literal
- PatLit(P<Expr>),
+ Lit(P<Expr>),
/// A range pattern, e.g. `1...2`
- PatRange(P<Expr>, P<Expr>),
+ Range(P<Expr>, P<Expr>),
/// `[a, b, ..i, y, z]` is represented as:
- /// `PatVec(box [a, b], Some(i), box [y, z])`
- PatVec(HirVec<P<Pat>>, Option<P<Pat>>, HirVec<P<Pat>>),
+ /// `PatKind::Vec(box [a, b], Some(i), box [y, z])`
+ Vec(HirVec<P<Pat>>, Option<P<Pat>>, HirVec<P<Pat>>),
}
#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)]
}),
pat: P(Pat {
id: DUMMY_NODE_ID,
- node: PatIdent(BindByValue(mutability), path, None),
+ node: PatKind::Ident(BindByValue(mutability), path, None),
span: span,
}),
id: DUMMY_NODE_ID,
pub fn walk_pat<'v, V: Visitor<'v>>(visitor: &mut V, pattern: &'v Pat) {
match pattern.node {
- PatEnum(ref path, ref opt_children) => {
+ PatKind::TupleStruct(ref path, ref opt_children) => {
visitor.visit_path(path, pattern.id);
if let Some(ref children) = *opt_children {
walk_list!(visitor, visit_pat, children);
}
}
- PatQPath(ref qself, ref path) => {
+ PatKind::Path(ref path) => {
+ visitor.visit_path(path, pattern.id);
+ }
+ PatKind::QPath(ref qself, ref path) => {
visitor.visit_ty(&qself.ty);
visitor.visit_path(path, pattern.id)
}
- PatStruct(ref path, ref fields, _) => {
+ PatKind::Struct(ref path, ref fields, _) => {
visitor.visit_path(path, pattern.id);
for field in fields {
visitor.visit_name(field.span, field.node.name);
visitor.visit_pat(&field.node.pat)
}
}
- PatTup(ref tuple_elements) => {
+ PatKind::Tup(ref tuple_elements) => {
walk_list!(visitor, visit_pat, tuple_elements);
}
- PatBox(ref subpattern) |
- PatRegion(ref subpattern, _) => {
+ PatKind::Box(ref subpattern) |
+ PatKind::Ref(ref subpattern, _) => {
visitor.visit_pat(subpattern)
}
- PatIdent(_, ref pth1, ref optional_subpattern) => {
+ PatKind::Ident(_, ref pth1, ref optional_subpattern) => {
visitor.visit_ident(pth1.span, pth1.node);
walk_list!(visitor, visit_pat, optional_subpattern);
}
- PatLit(ref expression) => visitor.visit_expr(expression),
- PatRange(ref lower_bound, ref upper_bound) => {
+ PatKind::Lit(ref expression) => visitor.visit_expr(expression),
+ PatKind::Range(ref lower_bound, ref upper_bound) => {
visitor.visit_expr(lower_bound);
visitor.visit_expr(upper_bound)
}
- PatWild => (),
- PatVec(ref prepatterns, ref slice_pattern, ref postpatterns) => {
+ PatKind::Wild => (),
+ PatKind::Vec(ref prepatterns, ref slice_pattern, ref postpatterns) => {
walk_list!(visitor, visit_pat, prepatterns);
walk_list!(visitor, visit_pat, slice_pattern);
walk_list!(visitor, visit_pat, postpatterns);
P(hir::Pat {
id: p.id,
node: match p.node {
- PatKind::Wild => hir::PatWild,
+ PatKind::Wild => hir::PatKind::Wild,
PatKind::Ident(ref binding_mode, pth1, ref sub) => {
- hir::PatIdent(lower_binding_mode(lctx, binding_mode),
+ hir::PatKind::Ident(lower_binding_mode(lctx, binding_mode),
respan(pth1.span, lower_ident(lctx, pth1.node)),
sub.as_ref().map(|x| lower_pat(lctx, x)))
}
- PatKind::Lit(ref e) => hir::PatLit(lower_expr(lctx, e)),
+ PatKind::Lit(ref e) => hir::PatKind::Lit(lower_expr(lctx, e)),
PatKind::TupleStruct(ref pth, ref pats) => {
- hir::PatEnum(lower_path(lctx, pth),
+ hir::PatKind::TupleStruct(lower_path(lctx, pth),
pats.as_ref()
.map(|pats| pats.iter().map(|x| lower_pat(lctx, x)).collect()))
}
PatKind::Path(ref pth) => {
- hir::PatEnum(lower_path(lctx, pth), Some(hir::HirVec::new()))
+ hir::PatKind::Path(lower_path(lctx, pth))
}
PatKind::QPath(ref qself, ref pth) => {
let qself = hir::QSelf {
ty: lower_ty(lctx, &qself.ty),
position: qself.position,
};
- hir::PatQPath(qself, lower_path(lctx, pth))
+ hir::PatKind::QPath(qself, lower_path(lctx, pth))
}
PatKind::Struct(ref pth, ref fields, etc) => {
let pth = lower_path(lctx, pth);
}
})
.collect();
- hir::PatStruct(pth, fs, etc)
+ hir::PatKind::Struct(pth, fs, etc)
}
PatKind::Tup(ref elts) => {
- hir::PatTup(elts.iter().map(|x| lower_pat(lctx, x)).collect())
+ hir::PatKind::Tup(elts.iter().map(|x| lower_pat(lctx, x)).collect())
}
- PatKind::Box(ref inner) => hir::PatBox(lower_pat(lctx, inner)),
+ PatKind::Box(ref inner) => hir::PatKind::Box(lower_pat(lctx, inner)),
PatKind::Ref(ref inner, mutbl) => {
- hir::PatRegion(lower_pat(lctx, inner), lower_mutability(lctx, mutbl))
+ hir::PatKind::Ref(lower_pat(lctx, inner), lower_mutability(lctx, mutbl))
}
PatKind::Range(ref e1, ref e2) => {
- hir::PatRange(lower_expr(lctx, e1), lower_expr(lctx, e2))
+ hir::PatKind::Range(lower_expr(lctx, e1), lower_expr(lctx, e2))
}
PatKind::Vec(ref before, ref slice, ref after) => {
- hir::PatVec(before.iter().map(|x| lower_pat(lctx, x)).collect(),
+ hir::PatKind::Vec(before.iter().map(|x| lower_pat(lctx, x)).collect(),
slice.as_ref().map(|x| lower_pat(lctx, x)),
after.iter().map(|x| lower_pat(lctx, x)).collect())
}
path: hir::Path,
subpats: hir::HirVec<P<hir::Pat>>)
-> P<hir::Pat> {
- let pt = hir::PatEnum(path, Some(subpats));
+ let pt = if subpats.is_empty() {
+ hir::PatKind::Path(path)
+ } else {
+ hir::PatKind::TupleStruct(path, Some(subpats))
+ };
pat(lctx, span, pt)
}
ident: hir::Ident,
bm: hir::BindingMode)
-> P<hir::Pat> {
- let pat_ident = hir::PatIdent(bm,
+ let pat_ident = hir::PatKind::Ident(bm,
Spanned {
span: span,
node: ident,
}
fn pat_wild(lctx: &LoweringContext, span: Span) -> P<hir::Pat> {
- pat(lctx, span, hir::PatWild)
+ pat(lctx, span, hir::PatKind::Wild)
}
-fn pat(lctx: &LoweringContext, span: Span, pat: hir::Pat_) -> P<hir::Pat> {
+fn pat(lctx: &LoweringContext, span: Span, pat: hir::PatKind) -> P<hir::Pat> {
P(hir::Pat {
id: lctx.next_id(),
node: pat,
use syntax::ptr::P;
use hir;
-use hir::{Crate, RegionTyParamBound, TraitTyParamBound, TraitBoundModifier};
+use hir::{Crate, PatKind, RegionTyParamBound, TraitTyParamBound, TraitBoundModifier};
use std::io::{self, Write, Read};
// Pat isn't normalized, but the beauty of it
// is that it doesn't matter
match pat.node {
- hir::PatWild => try!(word(&mut self.s, "_")),
- hir::PatIdent(binding_mode, ref path1, ref sub) => {
+ PatKind::Wild => try!(word(&mut self.s, "_")),
+ PatKind::Ident(binding_mode, ref path1, ref sub) => {
match binding_mode {
hir::BindByRef(mutbl) => {
try!(self.word_nbsp("ref"));
None => (),
}
}
- hir::PatEnum(ref path, ref args_) => {
+ PatKind::TupleStruct(ref path, ref args_) => {
try!(self.print_path(path, true, 0));
match *args_ {
None => try!(word(&mut self.s, "(..)")),
Some(ref args) => {
- if !args.is_empty() {
- try!(self.popen());
- try!(self.commasep(Inconsistent, &args[..], |s, p| s.print_pat(&p)));
- try!(self.pclose());
- }
+ try!(self.popen());
+ try!(self.commasep(Inconsistent, &args[..], |s, p| s.print_pat(&p)));
+ try!(self.pclose());
}
}
}
- hir::PatQPath(ref qself, ref path) => {
+ PatKind::Path(ref path) => {
+ try!(self.print_path(path, true, 0));
+ }
+ PatKind::QPath(ref qself, ref path) => {
try!(self.print_qpath(path, qself, false));
}
- hir::PatStruct(ref path, ref fields, etc) => {
+ PatKind::Struct(ref path, ref fields, etc) => {
try!(self.print_path(path, true, 0));
try!(self.nbsp());
try!(self.word_space("{"));
try!(space(&mut self.s));
try!(word(&mut self.s, "}"));
}
- hir::PatTup(ref elts) => {
+ PatKind::Tup(ref elts) => {
try!(self.popen());
try!(self.commasep(Inconsistent, &elts[..], |s, p| s.print_pat(&p)));
if elts.len() == 1 {
}
try!(self.pclose());
}
- hir::PatBox(ref inner) => {
+ PatKind::Box(ref inner) => {
try!(word(&mut self.s, "box "));
try!(self.print_pat(&inner));
}
- hir::PatRegion(ref inner, mutbl) => {
+ PatKind::Ref(ref inner, mutbl) => {
try!(word(&mut self.s, "&"));
if mutbl == hir::MutMutable {
try!(word(&mut self.s, "mut "));
}
try!(self.print_pat(&inner));
}
- hir::PatLit(ref e) => try!(self.print_expr(&e)),
- hir::PatRange(ref begin, ref end) => {
+ PatKind::Lit(ref e) => try!(self.print_expr(&e)),
+ PatKind::Range(ref begin, ref end) => {
try!(self.print_expr(&begin));
try!(space(&mut self.s));
try!(word(&mut self.s, "..."));
try!(self.print_expr(&end));
}
- hir::PatVec(ref before, ref slice, ref after) => {
+ PatKind::Vec(ref before, ref slice, ref after) => {
try!(word(&mut self.s, "["));
try!(self.commasep(Inconsistent, &before[..], |s, p| s.print_pat(&p)));
if let Some(ref p) = *slice {
if !before.is_empty() {
try!(self.word_space(","));
}
- if p.node != hir::PatWild {
+ if p.node != PatKind::Wild {
try!(self.print_pat(&p));
}
try!(word(&mut self.s, ".."));
let m = match explicit_self {
&hir::SelfStatic => hir::MutImmutable,
_ => match decl.inputs[0].pat.node {
- hir::PatIdent(hir::BindByValue(m), _, _) => m,
+ PatKind::Ident(hir::BindByValue(m), _, _) => m,
_ => hir::MutImmutable,
},
};
hir::TyInfer if is_closure => try!(self.print_pat(&input.pat)),
_ => {
match input.pat.node {
- hir::PatIdent(_, ref path1, _) if
+ PatKind::Ident(_, ref path1, _) if
path1.node.name ==
parse::token::special_idents::invalid.name => {
// Do nothing.
}
match pat.node {
- PatIdent(_, _, Some(ref p)) => walk_pat_(&p, it),
- PatStruct(_, ref fields, _) => {
+ PatKind::Ident(_, _, Some(ref p)) => walk_pat_(&p, it),
+ PatKind::Struct(_, ref fields, _) => {
fields.iter().all(|field| walk_pat_(&field.node.pat, it))
}
- PatEnum(_, Some(ref s)) | PatTup(ref s) => {
+ PatKind::TupleStruct(_, Some(ref s)) | PatKind::Tup(ref s) => {
s.iter().all(|p| walk_pat_(&p, it))
}
- PatBox(ref s) | PatRegion(ref s, _) => {
+ PatKind::Box(ref s) | PatKind::Ref(ref s, _) => {
walk_pat_(&s, it)
}
- PatVec(ref before, ref slice, ref after) => {
+ PatKind::Vec(ref before, ref slice, ref after) => {
before.iter().all(|p| walk_pat_(&p, it)) &&
slice.iter().all(|p| walk_pat_(&p, it)) &&
after.iter().all(|p| walk_pat_(&p, it))
}
- PatWild |
- PatLit(_) |
- PatRange(_, _) |
- PatIdent(_, _, _) |
- PatEnum(_, _) |
- PatQPath(_, _) => {
+ PatKind::Wild |
+ PatKind::Lit(_) |
+ PatKind::Range(_, _) |
+ PatKind::Ident(_, _, _) |
+ PatKind::TupleStruct(..) |
+ PatKind::Path(..) |
+ PatKind::QPath(_, _) => {
true
}
}
use syntax::attr::{self, AttrMetaMethods};
use syntax::codemap::Span;
-use rustc_front::hir;
+use rustc_front::hir::{self, PatKind};
use rustc_front::intravisit::FnKind;
#[derive(PartialEq)]
}
fn check_pat(&mut self, cx: &LateContext, p: &hir::Pat) {
- if let &hir::PatIdent(_, ref path1, _) = &p.node {
+ if let &PatKind::Ident(_, ref path1, _) = &p.node {
let def = cx.tcx.def_map.borrow().get(&p.id).map(|d| d.full_def());
if let Some(Def::Local(..)) = def {
self.check_snake_case(cx, "variable", &path1.node.name.as_str(), Some(p.span));
fn check_pat(&mut self, cx: &LateContext, p: &hir::Pat) {
// Lint for constants that look like binding identifiers (#7526)
match (&p.node, cx.tcx.def_map.borrow().get(&p.id).map(|d| d.full_def())) {
- (&hir::PatIdent(_, ref path1, _), Some(Def::Const(..))) => {
+ (&PatKind::Ident(_, ref path1, _), Some(Def::Const(..))) => {
NonUpperCaseGlobals::check_upper_case(cx, "constant in pattern",
path1.node.name, p.span);
}
use syntax::attr::{self, AttrMetaMethods};
use syntax::codemap::{self, Span};
-use rustc_front::hir;
+use rustc_front::hir::{self, PatKind};
use rustc_front::intravisit::FnKind;
use bad_style::{MethodLateContext, method_context};
impl LateLintPass for NonShorthandFieldPatterns {
fn check_pat(&mut self, cx: &LateContext, pat: &hir::Pat) {
let def_map = cx.tcx.def_map.borrow();
- if let hir::PatStruct(_, ref v, _) = pat.node {
+ if let PatKind::Struct(_, ref v, _) = pat.node {
let field_pats = v.iter().filter(|fieldpat| {
if fieldpat.node.is_shorthand {
return false;
}
});
for fieldpat in field_pats {
- if let hir::PatIdent(_, ident, None) = fieldpat.node.pat.node {
+ if let PatKind::Ident(_, ident, None) = fieldpat.node.pat.node {
if ident.node.unhygienic_name == fieldpat.node.name {
cx.span_lint(NON_SHORTHAND_FIELD_PATTERNS, fieldpat.span,
&format!("the `{}:` in this pattern is redundant and can \
use syntax;
use rbml::writer::Encoder;
-use rustc_front::hir;
+use rustc_front::hir::{self, PatKind};
use rustc_front::intravisit::Visitor;
use rustc_front::intravisit;
rbml_w.start_tag(tag_method_argument_names);
for arg in &decl.inputs {
let tag = tag_method_argument_name;
- if let hir::PatIdent(_, ref path1, _) = arg.pat.node {
+ if let PatKind::Ident(_, ref path1, _) = arg.pat.node {
let name = path1.node.name.as_str();
rbml_w.wr_tagged_bytes(tag, name.as_bytes());
} else {
use rustc::middle::pat_util::{pat_is_resolved_const, pat_is_binding};
use rustc::middle::ty::{self, Ty};
use rustc::mir::repr::*;
-use rustc_front::hir;
+use rustc_front::hir::{self, PatKind};
use syntax::ast;
use syntax::codemap::Span;
use syntax::ptr::P;
fn to_pattern(&mut self, pat: &hir::Pat) -> Pattern<'tcx> {
let kind = match pat.node {
- hir::PatWild => PatternKind::Wild,
+ PatKind::Wild => PatternKind::Wild,
- hir::PatLit(ref value) => {
+ PatKind::Lit(ref value) => {
let value = const_eval::eval_const_expr(self.cx.tcx, value);
PatternKind::Constant { value: value }
}
- hir::PatRange(ref lo, ref hi) => {
+ PatKind::Range(ref lo, ref hi) => {
let lo = const_eval::eval_const_expr(self.cx.tcx, lo);
let lo = Literal::Value { value: lo };
let hi = const_eval::eval_const_expr(self.cx.tcx, hi);
PatternKind::Range { lo: lo, hi: hi }
},
- hir::PatEnum(..) | hir::PatIdent(..) | hir::PatQPath(..)
+ PatKind::Path(..) | PatKind::Ident(..) | PatKind::QPath(..)
if pat_is_resolved_const(&self.cx.tcx.def_map.borrow(), pat) =>
{
let def = self.cx.tcx.def_map.borrow().get(&pat.id).unwrap().full_def();
}
}
- hir::PatRegion(ref subpattern, _) |
- hir::PatBox(ref subpattern) => {
+ PatKind::Ref(ref subpattern, _) |
+ PatKind::Box(ref subpattern) => {
PatternKind::Deref { subpattern: self.to_pattern(subpattern) }
}
- hir::PatVec(ref prefix, ref slice, ref suffix) => {
+ PatKind::Vec(ref prefix, ref slice, ref suffix) => {
let ty = self.cx.tcx.node_id_to_type(pat.id);
match ty.sty {
ty::TyRef(_, mt) =>
}
}
- hir::PatTup(ref subpatterns) => {
+ PatKind::Tup(ref subpatterns) => {
let subpatterns =
subpatterns.iter()
.enumerate()
PatternKind::Leaf { subpatterns: subpatterns }
}
- hir::PatIdent(bm, ref ident, ref sub)
+ PatKind::Ident(bm, ref ident, ref sub)
if pat_is_binding(&self.cx.tcx.def_map.borrow(), pat) =>
{
let id = match self.binding_map {
}
}
- hir::PatIdent(..) => {
+ PatKind::Ident(..) | PatKind::Path(..) => {
self.variant_or_leaf(pat, vec![])
}
- hir::PatEnum(_, ref opt_subpatterns) => {
+ PatKind::TupleStruct(_, ref opt_subpatterns) => {
let subpatterns =
opt_subpatterns.iter()
.flat_map(|v| v.iter())
self.variant_or_leaf(pat, subpatterns)
}
- hir::PatStruct(_, ref fields, _) => {
+ PatKind::Struct(_, ref fields, _) => {
let pat_ty = self.cx.tcx.node_id_to_type(pat.id);
let adt_def = match pat_ty.sty {
ty::TyStruct(adt_def, _) | ty::TyEnum(adt_def, _) => adt_def,
self.variant_or_leaf(pat, subpatterns)
}
- hir::PatQPath(..) => {
+ PatKind::QPath(..) => {
self.cx.tcx.sess.span_bug(pat.span, "unexpanded macro or bad constant etc");
}
};
use build;
use graphviz;
use pretty;
-use transform::simplify_cfg;
+use transform::{simplify_cfg, no_landing_pads};
use rustc::dep_graph::DepNode;
use rustc::mir::repr::Mir;
use hair::cx::Cx;
match build_mir(Cx::new(&infcx), implicit_arg_tys, id, span, decl, body) {
Ok(mut mir) => {
+ no_landing_pads::NoLandingPads.run_on_mir(&mut mir, self.tcx);
simplify_cfg::SimplifyCfg::new().run_on_mir(&mut mir, self.tcx);
let meta_item_list = self.attr
pub mod simplify_cfg;
pub mod erase_regions;
+pub mod no_landing_pads;
mod util;
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! This pass removes the unwind branch of all the terminators when the no-landing-pads option is
+//! specified.
+
+use rustc::middle::ty;
+use rustc::mir::repr::*;
+use rustc::mir::visit::MutVisitor;
+use rustc::mir::transform::MirPass;
+
+pub struct NoLandingPads;
+
+impl<'tcx> MutVisitor<'tcx> for NoLandingPads {
+ fn visit_terminator(&mut self, bb: BasicBlock, terminator: &mut Terminator<'tcx>) {
+ match *terminator {
+ Terminator::Goto { .. } |
+ Terminator::Resume |
+ Terminator::Return |
+ Terminator::If { .. } |
+ Terminator::Switch { .. } |
+ Terminator::SwitchInt { .. } => {
+ /* nothing to do */
+ },
+ Terminator::Drop { ref mut unwind, .. } => {
+ unwind.take();
+ },
+ Terminator::Call { ref mut cleanup, .. } => {
+ cleanup.take();
+ },
+ }
+ self.super_terminator(bb, terminator);
+ }
+}
+
+impl MirPass for NoLandingPads {
+ fn run_on_mir<'tcx>(&mut self, mir: &mut Mir<'tcx>, tcx: &ty::ctxt<'tcx>) {
+ if tcx.sess.no_landing_pads() {
+ self.visit_mir(mir);
+ }
+ }
+}
use rustc::middle::const_qualif::ConstQualif;
use rustc::lint::builtin::CONST_ERR;
-use rustc_front::hir;
+use rustc_front::hir::{self, PatKind};
use syntax::ast;
use syntax::codemap::Span;
use syntax::feature_gate::UnstableFeatures;
fn visit_pat(&mut self, p: &hir::Pat) {
match p.node {
- hir::PatLit(ref lit) => {
+ PatKind::Lit(ref lit) => {
self.global_expr(Mode::Const, &lit);
}
- hir::PatRange(ref start, ref end) => {
+ PatKind::Range(ref start, ref end) => {
self.global_expr(Mode::Const, &start);
self.global_expr(Mode::Const, &end);
```
To solve this error, please ensure that the trait is also public. The trait
-can be made inaccessible if necessary by placing it into a private inner module,
-but it still has to be marked with `pub`. Example:
+can be made inaccessible if necessary by placing it into a private inner
+module, but it still has to be marked with `pub`. Example:
```ignore
pub trait Foo { // we set the Foo trait public
```
To solve this error, please ensure that the type is also public. The type
-can be made inaccessible if necessary by placing it into a private inner module,
-but it still has to be marked with `pub`.
+can be made inaccessible if necessary by placing it into a private inner
+module, but it still has to be marked with `pub`.
Example:
```
```
To solve this issue, please ensure that all of the fields of the tuple struct
-are public. Alternatively, provide a new() method to the tuple struct to
+are public. Alternatively, provide a `new()` method to the tuple struct to
construct it from a given inner value. Example:
```
use std::cmp;
use std::mem::replace;
-use rustc_front::hir;
+use rustc_front::hir::{self, PatKind};
use rustc_front::intravisit::{self, Visitor};
use rustc::dep_graph::DepNode;
changed: bool,
}
+struct ReachEverythingInTheInterfaceVisitor<'b, 'a: 'b, 'tcx: 'a> {
+ ev: &'b mut EmbargoVisitor<'a, 'tcx>,
+}
+
impl<'a, 'tcx> EmbargoVisitor<'a, 'tcx> {
fn ty_level(&self, ty: &hir::Ty) -> Option<AccessLevel> {
if let hir::TyPath(..) = ty.node {
old_level
}
}
+
+ fn reach<'b>(&'b mut self) -> ReachEverythingInTheInterfaceVisitor<'b, 'a, 'tcx> {
+ ReachEverythingInTheInterfaceVisitor { ev: self }
+ }
}
impl<'a, 'tcx, 'v> Visitor<'v> for EmbargoVisitor<'a, 'tcx> {
}
};
- // Update id of the item itself
+ // Update level of the item itself
let item_level = self.update(item.id, inherited_item_level);
- // Update ids of nested things
+ // Update levels of nested things
match item.node {
hir::ItemEnum(ref def, _) => {
for variant in &def.variants {
}
}
}
- hir::ItemTy(ref ty, _) if item_level.is_some() => {
- if let hir::TyPath(..) = ty.node {
- match self.tcx.def_map.borrow().get(&ty.id).unwrap().full_def() {
- Def::PrimTy(..) | Def::SelfTy(..) | Def::TyParam(..) => {},
- def => {
- if let Some(node_id) = self.tcx.map.as_local_node_id(def.def_id()) {
- self.update(node_id, Some(AccessLevel::Reachable));
- }
+ _ => {}
+ }
+
+ // Mark all items in interfaces of reachable items as reachable
+ match item.node {
+ // The interface is empty
+ hir::ItemExternCrate(..) => {}
+ // All nested items are checked by visit_item
+ hir::ItemMod(..) => {}
+ // Reexports are handled in visit_mod
+ hir::ItemUse(..) => {}
+ // Visit everything
+ hir::ItemConst(..) | hir::ItemStatic(..) | hir::ItemFn(..) |
+ hir::ItemTrait(..) | hir::ItemTy(..) | hir::ItemImpl(_, _, _, Some(..), _, _) => {
+ if item_level.is_some() {
+ self.reach().visit_item(item);
+ }
+ }
+ // Visit everything, but enum variants have their own levels
+ hir::ItemEnum(ref def, ref generics) => {
+ if item_level.is_some() {
+ self.reach().visit_generics(generics);
+ }
+ for variant in &def.variants {
+ if self.get(variant.node.data.id()).is_some() {
+ for field in variant.node.data.fields() {
+ self.reach().visit_struct_field(field);
+ }
+ // Corner case: if the variant is reachable, but its
+ // enum is not, make the enum reachable as well.
+ self.update(item.id, Some(AccessLevel::Reachable));
+ }
+ }
+ }
+ // Visit everything, but foreign items have their own levels
+ hir::ItemForeignMod(ref foreign_mod) => {
+ for foreign_item in &foreign_mod.items {
+ if self.get(foreign_item.id).is_some() {
+ self.reach().visit_foreign_item(foreign_item);
+ }
+ }
+ }
+ // Visit everything except for private fields
+ hir::ItemStruct(ref struct_def, ref generics) => {
+ if item_level.is_some() {
+ self.reach().visit_generics(generics);
+ for field in struct_def.fields() {
+ if self.get(field.node.id).is_some() {
+ self.reach().visit_struct_field(field);
+ }
+ }
+ }
+ }
+ // The interface is empty
+ hir::ItemDefaultImpl(..) => {}
+ // Visit everything except for private impl items
+ hir::ItemImpl(_, _, ref generics, None, _, ref impl_items) => {
+ if item_level.is_some() {
+ self.reach().visit_generics(generics);
+ for impl_item in impl_items {
+ if self.get(impl_item.id).is_some() {
+ self.reach().visit_impl_item(impl_item);
}
}
}
}
- _ => {}
}
let orig_level = self.prev_level;
}
}
+impl<'b, 'a, 'tcx: 'a> ReachEverythingInTheInterfaceVisitor<'b, 'a, 'tcx> {
+ // Make the type hidden under a type alias reachable
+ fn reach_aliased_type(&mut self, item: &hir::Item, path: &hir::Path) {
+ if let hir::ItemTy(ref ty, ref generics) = item.node {
+ // See `fn is_public_type_alias` for details
+ self.visit_ty(ty);
+ let provided_params = path.segments.last().unwrap().parameters.types().len();
+ for ty_param in &generics.ty_params[provided_params..] {
+ if let Some(ref default_ty) = ty_param.default {
+ self.visit_ty(default_ty);
+ }
+ }
+ }
+ }
+}
+
+impl<'b, 'a, 'tcx: 'a, 'v> Visitor<'v> for ReachEverythingInTheInterfaceVisitor<'b, 'a, 'tcx> {
+ fn visit_ty(&mut self, ty: &hir::Ty) {
+ if let hir::TyPath(_, ref path) = ty.node {
+ let def = self.ev.tcx.def_map.borrow().get(&ty.id).unwrap().full_def();
+ match def {
+ Def::Struct(def_id) | Def::Enum(def_id) | Def::TyAlias(def_id) |
+ Def::Trait(def_id) | Def::AssociatedTy(def_id, _) => {
+ if let Some(node_id) = self.ev.tcx.map.as_local_node_id(def_id) {
+ let item = self.ev.tcx.map.expect_item(node_id);
+ if let Def::TyAlias(..) = def {
+ // Type aliases are substituted. Associated type aliases are not
+ // substituted yet, but ideally they should be.
+ if self.ev.get(item.id).is_none() {
+ self.reach_aliased_type(item, path);
+ }
+ } else {
+ self.ev.update(item.id, Some(AccessLevel::Reachable));
+ }
+ }
+ }
+
+ _ => {}
+ }
+ }
+
+ intravisit::walk_ty(self, ty);
+ }
+
+ fn visit_trait_ref(&mut self, trait_ref: &hir::TraitRef) {
+ let def_id = self.ev.tcx.trait_ref_to_def_id(trait_ref);
+ if let Some(node_id) = self.ev.tcx.map.as_local_node_id(def_id) {
+ let item = self.ev.tcx.map.expect_item(node_id);
+ self.ev.update(item.id, Some(AccessLevel::Reachable));
+ }
+
+ intravisit::walk_trait_ref(self, trait_ref);
+ }
+
+ // Don't recurse into function bodies
+ fn visit_block(&mut self, _: &hir::Block) {}
+ // Don't recurse into expressions in array sizes or const initializers
+ fn visit_expr(&mut self, _: &hir::Expr) {}
+ // Don't recurse into patterns in function arguments
+ fn visit_pat(&mut self, _: &hir::Pat) {}
+}
+
////////////////////////////////////////////////////////////////////////////////
/// The privacy visitor, where privacy checks take place (violations reported)
////////////////////////////////////////////////////////////////////////////////
if self.in_foreign { return }
match pattern.node {
- hir::PatStruct(_, ref fields, _) => {
+ PatKind::Struct(_, ref fields, _) => {
let adt = self.tcx.pat_ty(pattern).ty_adt_def().unwrap();
let def = self.tcx.def_map.borrow().get(&pattern.id).unwrap().full_def();
let variant = adt.variant_of_def(def);
// Patterns which bind no fields are allowable (the path is check
// elsewhere).
- hir::PatEnum(_, Some(ref fields)) => {
+ PatKind::TupleStruct(_, Some(ref fields)) => {
match self.tcx.pat_ty(pattern).sty {
ty::TyStruct(def, _) => {
for (i, field) in fields.iter().enumerate() {
- if let hir::PatWild = field.node {
+ if let PatKind::Wild = field.node {
continue
}
self.check_field(field.span,
"##,
E0253: r##"
-Attempt was made to import an unimportable value. This can happen when
-trying to import a method from a trait. An example of this error:
+Attempt was made to import an unimportable value. This can happen when trying
+to import a method from a trait. An example of this error:
```compile_fail
mod foo {
"##,
E0259: r##"
-The name chosen for an external crate conflicts with another external crate that
-has been imported into the current module.
+The name chosen for an external crate conflicts with another external crate
+that has been imported into the current module.
-Wrong example:
+Erroneous code example:
```compile_fail
extern crate a;
"##,
E0364: r##"
-Private items cannot be publicly re-exported. This error indicates that
-you attempted to `pub use` a type or value that was not itself public.
+Private items cannot be publicly re-exported. This error indicates that you
+attempted to `pub use` a type or value that was not itself public.
Here is an example that demonstrates the error:
pub use foo::X;
```
-See the 'Use Declarations' section of the reference for more information
-on this topic:
+See the 'Use Declarations' section of the reference for more information on
+this topic:
https://doc.rust-lang.org/reference.html#use-declarations
"##,
E0365: r##"
-Private modules cannot be publicly re-exported. This error indicates
-that you attempted to `pub use` a module that was not itself public.
+Private modules cannot be publicly re-exported. This error indicates that you
+attempted to `pub use` a module that was not itself public.
Here is an example that demonstrates the error:
"##,
E0401: r##"
-Inner items do not inherit type parameters from the functions they are
-embedded in. For example, this will not compile:
+Inner items do not inherit type parameters from the functions they are embedded
+in. For example, this will not compile:
```compile_fail
fn foo<T>(x: T) {
"##,
E0411: r##"
-The `Self` keyword was used outside an impl or a trait. Erroneous
-code example:
+The `Self` keyword was used outside an impl or a trait. Erroneous code example:
```compile_fail
<Self>::foo; // error: use of `Self` outside of an impl or trait
```
-The `Self` keyword represents the current type, which explains why it
-can only be used inside an impl or a trait. It gives access to the
-associated items of a type:
+The `Self` keyword represents the current type, which explains why it can only
+be used inside an impl or a trait. It gives access to the associated items of a
+type:
```
trait Foo {
}
```
-However, be careful when two types has a common associated type:
+However, be careful when two types have a common associated type:
```compile_fail
trait Foo {
}
```
-This problem can be solved by specifying from which trait we want
-to use the `Bar` type:
+This problem can be solved by specifying from which trait we want to use the
+`Bar` type:
```
trait Foo {
```compile_fail
impl Something {} // error: use of undeclared type name `Something`
+
// or:
+
trait Foo {
fn bar(N); // error: use of undeclared type name `N`
}
+
// or:
+
fn foo(x: T) {} // error: use of undeclared type name `T`
```
-To fix this error, please verify you didn't misspell the type name,
-you did declare it or imported it into the scope. Examples:
+To fix this error, please verify you didn't misspell the type name, you did
+declare it or imported it into the scope. Examples:
```
struct Something;
"##,
E0413: r##"
-A declaration shadows an enum variant or unit-like struct in scope.
-Example of erroneous code:
+A declaration shadows an enum variant or unit-like struct in scope. Example of
+erroneous code:
```compile_fail
struct Foo;
"##,
E0415: r##"
-More than one function parameter have the same name. Example of erroneous
-code:
+More than one function parameter have the same name. Example of erroneous code:
```compile_fail
fn foo(f: i32, f: i32) {} // error: identifier `f` is bound more than
"##,
E0416: r##"
-An identifier is bound more than once in a pattern. Example of erroneous
-code:
+An identifier is bound more than once in a pattern. Example of erroneous code:
```compile_fail
match (1, 2) {
"##,
E0419: r##"
-An unknown enum variant, struct or const was used. Example of
-erroneous code:
+An unknown enum variant, struct or const was used. Example of erroneous code:
```compile_fail
match 0 {
"##,
E0422: r##"
-You are trying to use an identifier that is either undefined or not a
-struct. For instance:
+You are trying to use an identifier that is either undefined or not a struct.
+For instance:
``` compile_fail
fn main () {
}
```
-In this case, `foo` is defined, but is not a struct, so Rust can't use
-it as one.
+In this case, `foo` is defined, but is not a struct, so Rust can't use it as
+one.
"##,
E0423: r##"
-A `struct` variant name was used like a function name. Example of
-erroneous code:
+A `struct` variant name was used like a function name. Example of erroneous
+code:
```compile_fail
struct Foo { a: bool};
// it like a function name
```
-Please verify you didn't misspell the name of what you actually wanted
-to use here. Example:
+Please verify you didn't misspell the name of what you actually wanted to use
+here. Example:
```
fn Foo() -> u32 { 0 }
// error: unresolved name `something_that_doesnt_exist::foo`
// or:
+
trait Foo {
fn bar() {
Self; // error: unresolved name `Self`
}
// or:
+
let x = unknown_variable; // error: unresolved name `unknown_variable`
```
"##,
E0431: r##"
-`self` import was made. Erroneous code example:
+An invalid `self` import was made. Erroneous code example:
```compile_fail
use {self}; // error: `self` import can only appear in an import list with a
use rustc_front::hir::{ItemFn, ItemForeignMod, ItemImpl, ItemMod, ItemStatic, ItemDefaultImpl};
use rustc_front::hir::{ItemStruct, ItemTrait, ItemTy, ItemUse};
use rustc_front::hir::Local;
-use rustc_front::hir::{Pat, PatEnum, PatIdent, PatLit, PatQPath};
-use rustc_front::hir::{PatRange, PatStruct, Path, PrimTy};
+use rustc_front::hir::{Pat, PatKind, Path, PrimTy};
use rustc_front::hir::{TraitRef, Ty, TyBool, TyChar, TyFloat, TyInt};
use rustc_front::hir::{TyRptr, TyStr, TyUint, TyPath, TyPtr};
use rustc_front::util::walk_pat;
let pat_id = pattern.id;
walk_pat(pattern, |pattern| {
match pattern.node {
- PatIdent(binding_mode, ref path1, ref at_rhs) => {
- // The meaning of PatIdent with no type parameters
+ PatKind::Ident(binding_mode, ref path1, ref at_rhs) => {
+ // The meaning of PatKind::Ident with no type parameters
// depends on whether an enum variant or unit-like struct
// with that name is in scope. The probing lookup has to
// be careful not to emit spurious errors. Only matching
}
}
- PatEnum(ref path, _) => {
+ PatKind::TupleStruct(ref path, _) | PatKind::Path(ref path) => {
// This must be an enum variant, struct or const.
let resolution = match self.resolve_possibly_assoc_item(pat_id,
None,
ValueNS,
false) {
// The below shouldn't happen because all
- // qualified paths should be in PatQPath.
+ // qualified paths should be in PatKind::QPath.
TypecheckRequired =>
self.session.span_bug(path.span,
- "resolve_possibly_assoc_item claimed
- \
- that a path in PatEnum requires typecheck
- \
- to resolve, but qualified paths should be
- \
- PatQPath"),
+ "resolve_possibly_assoc_item claimed that a path \
+ in PatKind::Path or PatKind::TupleStruct \
+ requires typecheck to resolve, but qualified \
+ paths should be PatKind::QPath"),
ResolveAttempt(resolution) => resolution,
};
if let Some(path_res) = resolution {
intravisit::walk_path(self, path);
}
- PatQPath(ref qself, ref path) => {
+ PatKind::QPath(ref qself, ref path) => {
// Associated constants only.
let resolution = match self.resolve_possibly_assoc_item(pat_id,
Some(qself),
intravisit::walk_pat(self, pattern);
}
- PatStruct(ref path, _, _) => {
+ PatKind::Struct(ref path, _, _) => {
match self.resolve_path(pat_id, path, 0, TypeNS, false) {
Some(definition) => {
self.record_def(pattern.id, definition);
intravisit::walk_path(self, path);
}
- PatLit(_) | PatRange(..) => {
+ PatKind::Lit(_) | PatKind::Range(..) => {
intravisit::walk_pat(self, pattern);
}
use std::cmp::Ordering;
use std::fmt;
use std::rc::Rc;
-use rustc_front::hir;
+use rustc_front::hir::{self, PatKind};
use syntax::ast::{self, DUMMY_NODE_ID, NodeId};
use syntax::codemap::Span;
use rustc_front::fold::Folder;
fn has_nested_bindings(m: &[Match], col: usize) -> bool {
for br in m {
match br.pats[col].node {
- hir::PatIdent(_, _, Some(_)) => return true,
+ PatKind::Ident(_, _, Some(_)) => return true,
_ => ()
}
}
let mut pat = br.pats[col];
loop {
pat = match pat.node {
- hir::PatIdent(_, ref path, Some(ref inner)) => {
+ PatKind::Ident(_, ref path, Some(ref inner)) => {
bound_ptrs.push((path.node.name, val.val));
&inner
},
let this = br.pats[col];
let mut bound_ptrs = br.bound_ptrs.clone();
match this.node {
- hir::PatIdent(_, ref path, None) => {
+ PatKind::Ident(_, ref path, None) => {
if pat_is_binding(&dm.borrow(), &this) {
bound_ptrs.push((path.node.name, val.val));
}
}
- hir::PatVec(ref before, Some(ref slice), ref after) => {
- if let hir::PatIdent(_, ref path, None) = slice.node {
+ PatKind::Vec(ref before, Some(ref slice), ref after) => {
+ if let PatKind::Ident(_, ref path, None) = slice.node {
let subslice_val = bind_subslice_pat(
bcx, this.id, val,
before.len(), after.len());
};
let opt = match cur.node {
- hir::PatLit(ref l) => {
+ PatKind::Lit(ref l) => {
ConstantValue(ConstantExpr(&l), debug_loc)
}
- hir::PatIdent(..) | hir::PatEnum(..) | hir::PatStruct(..) => {
+ PatKind::Ident(..) | PatKind::Path(..) |
+ PatKind::TupleStruct(..) | PatKind::Struct(..) => {
// This is either an enum variant or a variable binding.
let opt_def = tcx.def_map.borrow().get(&cur.id).map(|d| d.full_def());
match opt_def {
_ => continue
}
}
- hir::PatRange(ref l1, ref l2) => {
+ PatKind::Range(ref l1, ref l2) => {
ConstantRange(ConstantExpr(&l1), ConstantExpr(&l2), debug_loc)
}
- hir::PatVec(ref before, None, ref after) => {
+ PatKind::Vec(ref before, None, ref after) => {
SliceLengthEqual(before.len() + after.len(), debug_loc)
}
- hir::PatVec(ref before, Some(_), ref after) => {
+ PatKind::Vec(ref before, Some(_), ref after) => {
SliceLengthGreaterOrEqual(before.len(), after.len(), debug_loc)
}
_ => continue
}
fn any_uniq_pat(m: &[Match], col: usize) -> bool {
- any_pat!(m, col, hir::PatBox(_))
+ any_pat!(m, col, PatKind::Box(_))
}
fn any_region_pat(m: &[Match], col: usize) -> bool {
- any_pat!(m, col, hir::PatRegion(..))
+ any_pat!(m, col, PatKind::Ref(..))
}
fn any_irrefutable_adt_pat(tcx: &ty::ctxt, m: &[Match], col: usize) -> bool {
m.iter().any(|br| {
let pat = br.pats[col];
match pat.node {
- hir::PatTup(_) => true,
- hir::PatStruct(..) => {
- match tcx.def_map.borrow().get(&pat.id).map(|d| d.full_def()) {
- Some(Def::Variant(..)) => false,
- _ => true,
- }
- }
- hir::PatEnum(..) | hir::PatIdent(_, _, None) => {
- match tcx.def_map.borrow().get(&pat.id).map(|d| d.full_def()) {
- Some(Def::Struct(..)) => true,
- _ => false
+ PatKind::Tup(_) => true,
+ PatKind::Struct(..) | PatKind::TupleStruct(..) |
+ PatKind::Path(..) | PatKind::Ident(_, _, None) => {
+ match tcx.def_map.borrow().get(&pat.id).unwrap().full_def() {
+ Def::Struct(..) | Def::TyAlias(..) => true,
+ _ => false,
}
}
_ => false
fn pick_column_to_specialize(def_map: &RefCell<DefMap>, m: &[Match]) -> Option<usize> {
fn pat_score(def_map: &RefCell<DefMap>, pat: &hir::Pat) -> usize {
match pat.node {
- hir::PatIdent(_, _, Some(ref inner)) => pat_score(def_map, &inner),
+ PatKind::Ident(_, _, Some(ref inner)) => pat_score(def_map, &inner),
_ if pat_is_refutable(&def_map.borrow(), pat) => 1,
_ => 0
}
let column_contains_any_nonwild_patterns = |&col: &usize| -> bool {
m.iter().any(|row| match row.pats[col].node {
- hir::PatWild => false,
+ PatKind::Wild => false,
_ => true
})
};
// to the default arm.
let has_default = arms.last().map_or(false, |arm| {
arm.pats.len() == 1
- && arm.pats.last().unwrap().node == hir::PatWild
+ && arm.pats.last().unwrap().node == PatKind::Wild
});
compile_submatch(bcx, &matches[..], &[discr_datum.match_input()], &chk, has_default);
let tcx = bcx.tcx();
let ccx = bcx.ccx();
match pat.node {
- hir::PatIdent(pat_binding_mode, ref path1, ref inner) => {
+ PatKind::Ident(pat_binding_mode, ref path1, ref inner) => {
if pat_is_binding(&tcx.def_map.borrow(), &pat) {
// Allocate the stack slot where the value of this
// binding will live and place it into the appropriate
bcx = bind_irrefutable_pat(bcx, &inner_pat, val, cleanup_scope);
}
}
- hir::PatEnum(_, ref sub_pats) => {
+ PatKind::TupleStruct(_, ref sub_pats) => {
let opt_def = bcx.tcx().def_map.borrow().get(&pat.id).map(|d| d.full_def());
match opt_def {
Some(Def::Variant(enum_id, var_id)) => {
}
}
}
- hir::PatStruct(_, ref fields, _) => {
+ PatKind::Struct(_, ref fields, _) => {
let tcx = bcx.tcx();
let pat_ty = node_id_type(bcx, pat.id);
let pat_repr = adt::represent_type(bcx.ccx(), pat_ty);
cleanup_scope);
}
}
- hir::PatTup(ref elems) => {
+ PatKind::Tup(ref elems) => {
let repr = adt::represent_node(bcx, pat.id);
let val = adt::MaybeSizedValue::sized(val.val);
for (i, elem) in elems.iter().enumerate() {
cleanup_scope);
}
}
- hir::PatBox(ref inner) => {
+ PatKind::Box(ref inner) => {
let pat_ty = node_id_type(bcx, inner.id);
// Pass along DSTs as fat pointers.
let val = if type_is_fat_ptr(tcx, pat_ty) {
// We need to check for this, as the pattern could be binding
// a fat pointer by-value.
- if let hir::PatIdent(hir::BindByRef(_),_,_) = inner.node {
+ if let PatKind::Ident(hir::BindByRef(_),_,_) = inner.node {
val.val
} else {
Load(bcx, val.val)
bcx = bind_irrefutable_pat(
bcx, &inner, MatchInput::from_val(val), cleanup_scope);
}
- hir::PatRegion(ref inner, _) => {
+ PatKind::Ref(ref inner, _) => {
let pat_ty = node_id_type(bcx, inner.id);
// Pass along DSTs as fat pointers.
let val = if type_is_fat_ptr(tcx, pat_ty) {
// We need to check for this, as the pattern could be binding
// a fat pointer by-value.
- if let hir::PatIdent(hir::BindByRef(_),_,_) = inner.node {
+ if let PatKind::Ident(hir::BindByRef(_),_,_) = inner.node {
val.val
} else {
Load(bcx, val.val)
MatchInput::from_val(val),
cleanup_scope);
}
- hir::PatVec(ref before, ref slice, ref after) => {
+ PatKind::Vec(ref before, ref slice, ref after) => {
let pat_ty = node_id_type(bcx, pat.id);
let mut extracted = extract_vec_elems(bcx, pat_ty, before.len(), after.len(), val);
match slice {
cleanup_scope)
});
}
- hir::PatQPath(..) | hir::PatWild | hir::PatLit(_) |
- hir::PatRange(_, _) => ()
+ PatKind::Path(..) | PatKind::QPath(..) | PatKind::Wild | PatKind::Lit(_) |
+ PatKind::Range(_, _) => ()
}
return bcx;
}
{
self.bcx.monomorphize(value)
}
+
+ pub fn set_lpad(&self, lpad: Option<LandingPad>) {
+ self.bcx.lpad.set(lpad.map(|p| &*self.fcx().lpad_arena.alloc(p)))
+ }
}
impl<'blk, 'tcx> Deref for BlockAndBuilder<'blk, 'tcx> {
use middle::const_eval::{const_int_checked_rem, const_uint_checked_rem};
use middle::const_eval::{const_int_checked_shl, const_uint_checked_shl};
use middle::const_eval::{const_int_checked_shr, const_uint_checked_shr};
-use middle::const_eval::EvalHint::ExprTypeChecked;
-use middle::const_eval::eval_const_expr_partial;
use middle::def::Def;
use middle::def_id::DefId;
use trans::{adt, closure, debuginfo, expr, inline, machine};
}
}
-#[derive(Copy, Clone)]
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum TrueConst {
Yes, No
}
},
hir::ExprIndex(ref base, ref index) => {
let (bv, bt) = try!(const_expr(cx, &base, param_substs, fn_args, trueconst));
- let iv = match eval_const_expr_partial(cx.tcx(), &index, ExprTypeChecked, None) {
- Ok(ConstVal::Int(i)) => i as u64,
- Ok(ConstVal::Uint(u)) => u,
- _ => cx.sess().span_bug(index.span,
- "index is not an integer-constant expression")
+ let iv = try!(const_expr(cx, &index, param_substs, fn_args, TrueConst::Yes)).0;
+ let iv = if let Some(iv) = const_to_opt_uint(iv) {
+ iv
+ } else {
+ cx.sess().span_bug(index.span, "index is not an integer-constant expression");
};
let (arr, len) = match bt.sty {
ty::TyArray(_, u) => (bv, C_uint(cx, u)),
use syntax::{ast, codemap};
use rustc_front;
-use rustc_front::hir;
+use rustc_front::hir::{self, PatKind};
// This procedure builds the *scope map* for a given function, which maps any
// given ast::NodeId in the function's AST to the correct DIScope metadata instance.
// ast_util::walk_pat() here because we have to visit *all* nodes in
// order to put them into the scope map. The above functions don't do that.
match pat.node {
- hir::PatIdent(_, ref path1, ref sub_pat_opt) => {
+ PatKind::Ident(_, ref path1, ref sub_pat_opt) => {
// Check if this is a binding. If so we need to put it on the
// scope stack and maybe introduce an artificial scope
}
}
- hir::PatWild => {
+ PatKind::Wild => {
scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
}
- hir::PatEnum(_, ref sub_pats_opt) => {
+ PatKind::TupleStruct(_, ref sub_pats_opt) => {
scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
if let Some(ref sub_pats) = *sub_pats_opt {
}
}
- hir::PatQPath(..) => {
+ PatKind::Path(..) | PatKind::QPath(..) => {
scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
}
- hir::PatStruct(_, ref field_pats, _) => {
+ PatKind::Struct(_, ref field_pats, _) => {
scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
for &codemap::Spanned {
}
}
- hir::PatTup(ref sub_pats) => {
+ PatKind::Tup(ref sub_pats) => {
scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
for sub_pat in sub_pats {
}
}
- hir::PatBox(ref sub_pat) | hir::PatRegion(ref sub_pat, _) => {
+ PatKind::Box(ref sub_pat) | PatKind::Ref(ref sub_pat, _) => {
scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
walk_pattern(cx, &sub_pat, scope_stack, scope_map);
}
- hir::PatLit(ref exp) => {
+ PatKind::Lit(ref exp) => {
scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
walk_expr(cx, &exp, scope_stack, scope_map);
}
- hir::PatRange(ref exp1, ref exp2) => {
+ PatKind::Range(ref exp1, ref exp2) => {
scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
walk_expr(cx, &exp1, scope_stack, scope_map);
walk_expr(cx, &exp2, scope_stack, scope_map);
}
- hir::PatVec(ref front_sub_pats, ref middle_sub_pats, ref back_sub_pats) => {
+ PatKind::Vec(ref front_sub_pats, ref middle_sub_pats, ref back_sub_pats) => {
scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
for sub_pat in front_sub_pats {
use middle::pat_util;
use middle::subst;
use rustc::front::map as hir_map;
-use rustc_front::hir;
+use rustc_front::hir::{self, PatKind};
use trans::{type_of, adt, machine, monomorphize};
use trans::common::{self, CrateContext, FunctionContext, Block};
use trans::_match::{BindingInfo, TransBindingMode};
}
Some(hir_map::NodeLocal(pat)) => {
match pat.node {
- hir::PatIdent(_, ref path1, _) => {
+ PatKind::Ident(_, ref path1, _) => {
path1.node.name
}
_ => {
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use llvm::{BasicBlockRef, ValueRef};
+use llvm::{BasicBlockRef, ValueRef, OperandBundleDef};
use rustc::middle::ty;
use rustc::mir::repr as mir;
use syntax::abi::Abi;
let mut bcx = self.bcx(bb);
let data = self.mir.basic_block_data(bb);
+ // MSVC SEH bits
+ let (cleanup_pad, cleanup_bundle) = if let Some((cp, cb)) = self.make_cleanup_pad(bb) {
+ (Some(cp), Some(cb))
+ } else {
+ (None, None)
+ };
+ let funclet_br = |bcx: BlockAndBuilder, llbb: BasicBlockRef| if let Some(cp) = cleanup_pad {
+ bcx.cleanup_ret(cp, Some(llbb));
+ } else {
+ bcx.br(llbb);
+ };
+
for statement in &data.statements {
bcx = self.trans_statement(bcx, statement);
}
debug!("trans_block: terminator: {:?}", data.terminator());
match *data.terminator() {
+ mir::Terminator::Resume => {
+ if let Some(cleanup_pad) = cleanup_pad {
+ bcx.cleanup_ret(cleanup_pad, None);
+ } else {
+ let ps = self.get_personality_slot(&bcx);
+ let lp = bcx.load(ps);
+ bcx.with_block(|bcx| {
+ base::call_lifetime_end(bcx, ps);
+ base::trans_unwind_resume(bcx, lp);
+ });
+ }
+ }
+
mir::Terminator::Goto { target } => {
- bcx.br(self.llblock(target));
+ funclet_br(bcx, self.llblock(target));
}
mir::Terminator::If { ref cond, targets: (true_bb, false_bb) } => {
}
}
- mir::Terminator::Resume => {
- let ps = self.get_personality_slot(&bcx);
- let lp = bcx.load(ps);
- bcx.with_block(|bcx| {
- base::call_lifetime_end(bcx, ps);
- base::trans_unwind_resume(bcx, lp);
- });
- }
-
mir::Terminator::Return => {
let return_ty = bcx.monomorphize(&self.mir.return_ty);
bcx.with_block(|bcx| {
- base::build_return_block(bcx.fcx, bcx, return_ty, DebugLoc::None);
+ base::build_return_block(self.fcx, bcx, return_ty, DebugLoc::None);
})
}
let ty = lvalue.ty.to_ty(bcx.tcx());
// Double check for necessity to drop
if !glue::type_needs_drop(bcx.tcx(), ty) {
- bcx.br(self.llblock(target));
+ funclet_br(bcx, self.llblock(target));
return;
}
let drop_fn = glue::get_drop_glue(bcx.ccx(), ty);
&[llvalue],
self.llblock(target),
unwind.llbb(),
- None,
+ cleanup_bundle.as_ref(),
None);
} else {
- bcx.call(drop_fn, &[llvalue], None, None);
- bcx.br(self.llblock(target));
+ bcx.call(drop_fn, &[llvalue], cleanup_bundle.as_ref(), None);
+ funclet_br(bcx, self.llblock(target));
}
}
}
}
- let avoid_invoke = bcx.with_block(|bcx| base::avoid_invoke(bcx));
// Many different ways to call a function handled here
- match (is_foreign, avoid_invoke, cleanup, destination) {
+ match (is_foreign, cleanup, destination) {
// The two cases below are the only ones to use LLVM’s `invoke`.
- (false, false, &Some(cleanup), &None) => {
+ (false, &Some(cleanup), &None) => {
let cleanup = self.bcx(cleanup);
let landingpad = self.make_landing_pad(cleanup);
let unreachable_blk = self.unreachable_block();
&llargs[..],
unreachable_blk.llbb,
landingpad.llbb(),
- None,
+ cleanup_bundle.as_ref(),
Some(attrs));
},
- (false, false, &Some(cleanup), &Some((_, success))) => {
+ (false, &Some(cleanup), &Some((_, success))) => {
let cleanup = self.bcx(cleanup);
let landingpad = self.make_landing_pad(cleanup);
let (target, postinvoke) = if must_copy_dest {
- (bcx.fcx().new_block("", None).build(), Some(self.bcx(success)))
+ (self.fcx.new_block("", None).build(), Some(self.bcx(success)))
} else {
(self.bcx(success), None)
};
&llargs[..],
target.llbb(),
landingpad.llbb(),
- None,
+ cleanup_bundle.as_ref(),
Some(attrs));
if let Some(postinvoketarget) = postinvoke {
// We translate the copy into a temporary block. The temporary block is
target.br(postinvoketarget.llbb());
}
},
- (false, _, _, &None) => {
- bcx.call(callee.immediate(), &llargs[..], None, Some(attrs));
+ (false, _, &None) => {
+ bcx.call(callee.immediate(),
+ &llargs[..],
+ cleanup_bundle.as_ref(),
+ Some(attrs));
bcx.unreachable();
}
- (false, _, _, &Some((_, target))) => {
+ (false, _, &Some((_, target))) => {
let llret = bcx.call(callee.immediate(),
&llargs[..],
- None,
+ cleanup_bundle.as_ref(),
Some(attrs));
if must_copy_dest {
let (ret_dest, ret_ty) = ret_dest_ty
base::store_ty(bcx, llret, ret_dest.llval, ret_ty);
});
}
- bcx.br(self.llblock(target));
+ funclet_br(bcx, self.llblock(target));
}
// Foreign functions
- (true, _, _, destination) => {
+ (true, _, destination) => {
let (dest, _) = ret_dest_ty
.expect("return destination is not set");
bcx = bcx.map_block(|bcx| {
debugloc)
});
if let Some((_, target)) = *destination {
- bcx.br(self.llblock(target));
+ funclet_br(bcx, self.llblock(target));
}
},
}
}
}
+ /// Create a landingpad wrapper around the given Block.
+ ///
+ /// No-op in MSVC SEH scheme.
fn make_landing_pad(&mut self,
cleanup: BlockAndBuilder<'bcx, 'tcx>)
-> BlockAndBuilder<'bcx, 'tcx>
{
- // FIXME(#30941) this doesn't handle msvc-style exceptions
+ if base::wants_msvc_seh(cleanup.sess()) {
+ return cleanup;
+ }
let bcx = self.fcx.new_block("cleanup", None).build();
let ccx = bcx.ccx();
let llpersonality = self.fcx.eh_personality();
bcx
}
+ /// Create prologue cleanuppad instruction under MSVC SEH handling scheme.
+ ///
+ /// Also handles setting some state for the original trans and creating an operand bundle for
+ /// function calls.
+ fn make_cleanup_pad(&mut self, bb: mir::BasicBlock) -> Option<(ValueRef, OperandBundleDef)> {
+ let bcx = self.bcx(bb);
+ let data = self.mir.basic_block_data(bb);
+ let use_funclets = base::wants_msvc_seh(bcx.sess()) && data.is_cleanup;
+ let cleanup_pad = if use_funclets {
+ bcx.set_personality_fn(self.fcx.eh_personality());
+ Some(bcx.cleanup_pad(None, &[]))
+ } else {
+ None
+ };
+ // Set the landingpad global-state for old translator, so it knows about the SEH used.
+ bcx.set_lpad(if let Some(cleanup_pad) = cleanup_pad {
+ Some(common::LandingPad::msvc(cleanup_pad))
+ } else if data.is_cleanup {
+ Some(common::LandingPad::gnu())
+ } else {
+ None
+ });
+ cleanup_pad.map(|f| (f, OperandBundleDef::new("funclet", &[f])))
+ }
+
fn unreachable_block(&mut self) -> Block<'bcx, 'tcx> {
self.unreachable_block.unwrap_or_else(|| {
let bl = self.fcx.new_block("unreachable", None);
use syntax::codemap::{Span, Spanned};
use syntax::ptr::P;
-use rustc_front::hir;
+use rustc_front::hir::{self, PatKind};
use rustc_front::print::pprust;
use rustc_front::util as hir_util;
expected);
match pat.node {
- hir::PatWild => {
+ PatKind::Wild => {
fcx.write_ty(pat.id, expected);
}
- hir::PatLit(ref lt) => {
+ PatKind::Lit(ref lt) => {
check_expr(fcx, <);
let expr_ty = fcx.expr_ty(<);
// that's equivalent to there existing a LUB.
demand::suptype(fcx, pat.span, expected, pat_ty);
}
- hir::PatRange(ref begin, ref end) => {
+ PatKind::Range(ref begin, ref end) => {
check_expr(fcx, begin);
check_expr(fcx, end);
// subtyping doesn't matter here, as the value is some kind of scalar
demand::eqtype(fcx, pat.span, expected, lhs_ty);
}
- hir::PatEnum(..) | hir::PatIdent(..)
+ PatKind::Path(..) | PatKind::Ident(..)
if pat_is_resolved_const(&tcx.def_map.borrow(), pat) => {
- if let hir::PatEnum(ref path, ref subpats) = pat.node {
- if !(subpats.is_some() && subpats.as_ref().unwrap().is_empty()) {
- bad_struct_kind_err(tcx.sess, pat, path, false);
- return;
- }
- }
if let Some(pat_def) = tcx.def_map.borrow().get(&pat.id) {
let const_did = pat_def.def_id();
let const_scheme = tcx.lookup_item_type(const_did);
// FIXME(#20489) -- we should limit the types here to scalars or something!
- // As with PatLit, what we really want here is that there
+ // As with PatKind::Lit, what we really want here is that there
// exist a LUB, but for the cases that can occur, subtype
// is good enough.
demand::suptype(fcx, pat.span, expected, const_ty);
fcx.write_error(pat.id);
}
}
- hir::PatIdent(bm, ref path, ref sub) if pat_is_binding(&tcx.def_map.borrow(), pat) => {
+ PatKind::Ident(bm, ref path, ref sub) if pat_is_binding(&tcx.def_map.borrow(), pat) => {
let typ = fcx.local_ty(pat.span, pat.id);
match bm {
hir::BindByRef(mutbl) => {
}
}
}
- hir::PatIdent(_, ref path, _) => {
+ PatKind::Ident(_, ref path, _) => {
let path = hir_util::ident_to_path(path.span, path.node);
check_pat_enum(pcx, pat, &path, Some(&[]), expected, false);
}
- hir::PatEnum(ref path, ref subpats) => {
- let subpats = subpats.as_ref().map(|v| &v[..]);
- let is_tuple_struct_pat = !(subpats.is_some() && subpats.unwrap().is_empty());
- check_pat_enum(pcx, pat, path, subpats, expected, is_tuple_struct_pat);
+ PatKind::TupleStruct(ref path, ref subpats) => {
+ check_pat_enum(pcx, pat, path, subpats.as_ref().map(|v| &v[..]), expected, true);
+ }
+ PatKind::Path(ref path) => {
+ check_pat_enum(pcx, pat, path, None, expected, false);
}
- hir::PatQPath(ref qself, ref path) => {
+ PatKind::QPath(ref qself, ref path) => {
let self_ty = fcx.to_ty(&qself.ty);
let path_res = if let Some(&d) = tcx.def_map.borrow().get(&pat.id) {
if d.base_def == Def::Err {
}
}
}
- hir::PatStruct(ref path, ref fields, etc) => {
+ PatKind::Struct(ref path, ref fields, etc) => {
check_pat_struct(pcx, pat, path, fields, etc, expected);
}
- hir::PatTup(ref elements) => {
+ PatKind::Tup(ref elements) => {
let element_tys: Vec<_> =
(0..elements.len()).map(|_| fcx.infcx().next_ty_var())
.collect();
check_pat(pcx, &element_pat, element_ty);
}
}
- hir::PatBox(ref inner) => {
+ PatKind::Box(ref inner) => {
let inner_ty = fcx.infcx().next_ty_var();
let uniq_ty = tcx.mk_box(inner_ty);
check_pat(pcx, &inner, tcx.types.err);
}
}
- hir::PatRegion(ref inner, mutbl) => {
+ PatKind::Ref(ref inner, mutbl) => {
let expected = fcx.infcx().shallow_resolve(expected);
if check_dereferencable(pcx, pat.span, expected, &inner) {
// `demand::subtype` would be good enough, but using
check_pat(pcx, &inner, tcx.types.err);
}
}
- hir::PatVec(ref before, ref slice, ref after) => {
+ PatKind::Vec(ref before, ref slice, ref after) => {
let expected_ty = structurally_resolved_type(fcx, pat.span, expected);
let inner_ty = fcx.infcx().next_ty_var();
let pat_ty = match expected_ty.sty {
use rustc_front::intravisit::{self, Visitor};
use rustc_front::hir;
-use rustc_front::hir::Visibility;
+use rustc_front::hir::{Visibility, PatKind};
use rustc_front::print::pprust;
use rustc_back::slice;
// Add pattern bindings.
fn visit_pat(&mut self, p: &'tcx hir::Pat) {
- if let hir::PatIdent(_, ref path1, _) = p.node {
+ if let PatKind::Ident(_, ref path1, _) = p.node {
if pat_util::pat_is_binding(&self.fcx.ccx.tcx.def_map.borrow(), p) {
let var_ty = self.assign(p.span, p.id, None);
use syntax::ast;
use syntax::codemap::Span;
use rustc_front::intravisit::{self, Visitor};
-use rustc_front::hir;
+use rustc_front::hir::{self, PatKind};
use rustc_front::util as hir_util;
use self::SubjectNode::Subject;
let _ = mc.cat_pattern(discr_cmt, root_pat, |mc, sub_cmt, sub_pat| {
match sub_pat.node {
// `ref x` pattern
- hir::PatIdent(hir::BindByRef(mutbl), _, _) => {
+ PatKind::Ident(hir::BindByRef(mutbl), _, _) => {
link_region_from_node_type(
rcx, sub_pat.span, sub_pat.id,
mutbl, sub_cmt);
}
// `[_, ..slice, _]` pattern
- hir::PatVec(_, Some(ref slice_pat), _) => {
+ PatKind::Vec(_, Some(ref slice_pat), _) => {
match mc.cat_slice_pattern(sub_cmt, &slice_pat) {
Ok((slice_cmt, slice_mutbl, slice_r)) => {
link_region(rcx, sub_pat.span, &slice_r,
use syntax::codemap::Span;
use syntax::parse::token::special_idents;
use syntax::ptr::P;
-use rustc_front::hir;
+use rustc_front::hir::{self, PatKind};
use rustc_front::intravisit;
use rustc_front::print::pprust;
{
for i in &decl.inputs {
match i.pat.node {
- hir::PatIdent(_, _, _) => (),
- hir::PatWild => (),
+ PatKind::Ident(_, _, _) => (),
+ PatKind::Wild => (),
_ => {
span_err!(ccx.tcx.sess, i.pat.span, E0130,
"patterns aren't allowed in foreign function declarations");
+++ /dev/null
-// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! This file infers the variance of type and lifetime parameters. The
-//! algorithm is taken from Section 4 of the paper "Taming the Wildcards:
-//! Combining Definition- and Use-Site Variance" published in PLDI'11 and
-//! written by Altidor et al., and hereafter referred to as The Paper.
-//!
-//! This inference is explicitly designed *not* to consider the uses of
-//! types within code. To determine the variance of type parameters
-//! defined on type `X`, we only consider the definition of the type `X`
-//! and the definitions of any types it references.
-//!
-//! We only infer variance for type parameters found on *data types*
-//! like structs and enums. In these cases, there is fairly straightforward
-//! explanation for what variance means. The variance of the type
-//! or lifetime parameters defines whether `T<A>` is a subtype of `T<B>`
-//! (resp. `T<'a>` and `T<'b>`) based on the relationship of `A` and `B`
-//! (resp. `'a` and `'b`).
-//!
-//! We do not infer variance for type parameters found on traits, fns,
-//! or impls. Variance on trait parameters can make indeed make sense
-//! (and we used to compute it) but it is actually rather subtle in
-//! meaning and not that useful in practice, so we removed it. See the
-//! addendum for some details. Variances on fn/impl parameters, otoh,
-//! doesn't make sense because these parameters are instantiated and
-//! then forgotten, they don't persist in types or compiled
-//! byproducts.
-//!
-//! ### The algorithm
-//!
-//! The basic idea is quite straightforward. We iterate over the types
-//! defined and, for each use of a type parameter X, accumulate a
-//! constraint indicating that the variance of X must be valid for the
-//! variance of that use site. We then iteratively refine the variance of
-//! X until all constraints are met. There is *always* a sol'n, because at
-//! the limit we can declare all type parameters to be invariant and all
-//! constraints will be satisfied.
-//!
-//! As a simple example, consider:
-//!
-//! enum Option<A> { Some(A), None }
-//! enum OptionalFn<B> { Some(|B|), None }
-//! enum OptionalMap<C> { Some(|C| -> C), None }
-//!
-//! Here, we will generate the constraints:
-//!
-//! 1. V(A) <= +
-//! 2. V(B) <= -
-//! 3. V(C) <= +
-//! 4. V(C) <= -
-//!
-//! These indicate that (1) the variance of A must be at most covariant;
-//! (2) the variance of B must be at most contravariant; and (3, 4) the
-//! variance of C must be at most covariant *and* contravariant. All of these
-//! results are based on a variance lattice defined as follows:
-//!
-//! * Top (bivariant)
-//! - +
-//! o Bottom (invariant)
-//!
-//! Based on this lattice, the solution V(A)=+, V(B)=-, V(C)=o is the
-//! optimal solution. Note that there is always a naive solution which
-//! just declares all variables to be invariant.
-//!
-//! You may be wondering why fixed-point iteration is required. The reason
-//! is that the variance of a use site may itself be a function of the
-//! variance of other type parameters. In full generality, our constraints
-//! take the form:
-//!
-//! V(X) <= Term
-//! Term := + | - | * | o | V(X) | Term x Term
-//!
-//! Here the notation V(X) indicates the variance of a type/region
-//! parameter `X` with respect to its defining class. `Term x Term`
-//! represents the "variance transform" as defined in the paper:
-//!
-//! If the variance of a type variable `X` in type expression `E` is `V2`
-//! and the definition-site variance of the [corresponding] type parameter
-//! of a class `C` is `V1`, then the variance of `X` in the type expression
-//! `C<E>` is `V3 = V1.xform(V2)`.
-//!
-//! ### Constraints
-//!
-//! If I have a struct or enum with where clauses:
-//!
-//! struct Foo<T:Bar> { ... }
-//!
-//! you might wonder whether the variance of `T` with respect to `Bar`
-//! affects the variance `T` with respect to `Foo`. I claim no. The
-//! reason: assume that `T` is invariant w/r/t `Bar` but covariant w/r/t
-//! `Foo`. And then we have a `Foo<X>` that is upcast to `Foo<Y>`, where
-//! `X <: Y`. However, while `X : Bar`, `Y : Bar` does not hold. In that
-//! case, the upcast will be illegal, but not because of a variance
-//! failure, but rather because the target type `Foo<Y>` is itself just
-//! not well-formed. Basically we get to assume well-formedness of all
-//! types involved before considering variance.
-//!
-//! ### Addendum: Variance on traits
-//!
-//! As mentioned above, we used to permit variance on traits. This was
-//! computed based on the appearance of trait type parameters in
-//! method signatures and was used to represent the compatibility of
-//! vtables in trait objects (and also "virtual" vtables or dictionary
-//! in trait bounds). One complication was that variance for
-//! associated types is less obvious, since they can be projected out
-//! and put to myriad uses, so it's not clear when it is safe to allow
-//! `X<A>::Bar` to vary (or indeed just what that means). Moreover (as
-//! covered below) all inputs on any trait with an associated type had
-//! to be invariant, limiting the applicability. Finally, the
-//! annotations (`MarkerTrait`, `PhantomFn`) needed to ensure that all
-//! trait type parameters had a variance were confusing and annoying
-//! for little benefit.
-//!
-//! Just for historical reference,I am going to preserve some text indicating
-//! how one could interpret variance and trait matching.
-//!
-//! #### Variance and object types
-//!
-//! Just as with structs and enums, we can decide the subtyping
-//! relationship between two object types `&Trait<A>` and `&Trait<B>`
-//! based on the relationship of `A` and `B`. Note that for object
-//! types we ignore the `Self` type parameter -- it is unknown, and
-//! the nature of dynamic dispatch ensures that we will always call a
-//! function that is expected the appropriate `Self` type. However, we
-//! must be careful with the other type parameters, or else we could
-//! end up calling a function that is expecting one type but provided
-//! another.
-//!
-//! To see what I mean, consider a trait like so:
-//!
-//! trait ConvertTo<A> {
-//! fn convertTo(&self) -> A;
-//! }
-//!
-//! Intuitively, If we had one object `O=&ConvertTo<Object>` and another
-//! `S=&ConvertTo<String>`, then `S <: O` because `String <: Object`
-//! (presuming Java-like "string" and "object" types, my go to examples
-//! for subtyping). The actual algorithm would be to compare the
-//! (explicit) type parameters pairwise respecting their variance: here,
-//! the type parameter A is covariant (it appears only in a return
-//! position), and hence we require that `String <: Object`.
-//!
-//! You'll note though that we did not consider the binding for the
-//! (implicit) `Self` type parameter: in fact, it is unknown, so that's
-//! good. The reason we can ignore that parameter is precisely because we
-//! don't need to know its value until a call occurs, and at that time (as
-//! you said) the dynamic nature of virtual dispatch means the code we run
-//! will be correct for whatever value `Self` happens to be bound to for
-//! the particular object whose method we called. `Self` is thus different
-//! from `A`, because the caller requires that `A` be known in order to
-//! know the return type of the method `convertTo()`. (As an aside, we
-//! have rules preventing methods where `Self` appears outside of the
-//! receiver position from being called via an object.)
-//!
-//! #### Trait variance and vtable resolution
-//!
-//! But traits aren't only used with objects. They're also used when
-//! deciding whether a given impl satisfies a given trait bound. To set the
-//! scene here, imagine I had a function:
-//!
-//! fn convertAll<A,T:ConvertTo<A>>(v: &[T]) {
-//! ...
-//! }
-//!
-//! Now imagine that I have an implementation of `ConvertTo` for `Object`:
-//!
-//! impl ConvertTo<i32> for Object { ... }
-//!
-//! And I want to call `convertAll` on an array of strings. Suppose
-//! further that for whatever reason I specifically supply the value of
-//! `String` for the type parameter `T`:
-//!
-//! let mut vector = vec!["string", ...];
-//! convertAll::<i32, String>(vector);
-//!
-//! Is this legal? To put another way, can we apply the `impl` for
-//! `Object` to the type `String`? The answer is yes, but to see why
-//! we have to expand out what will happen:
-//!
-//! - `convertAll` will create a pointer to one of the entries in the
-//! vector, which will have type `&String`
-//! - It will then call the impl of `convertTo()` that is intended
-//! for use with objects. This has the type:
-//!
-//! fn(self: &Object) -> i32
-//!
-//! It is ok to provide a value for `self` of type `&String` because
-//! `&String <: &Object`.
-//!
-//! OK, so intuitively we want this to be legal, so let's bring this back
-//! to variance and see whether we are computing the correct result. We
-//! must first figure out how to phrase the question "is an impl for
-//! `Object,i32` usable where an impl for `String,i32` is expected?"
-//!
-//! Maybe it's helpful to think of a dictionary-passing implementation of
-//! type classes. In that case, `convertAll()` takes an implicit parameter
-//! representing the impl. In short, we *have* an impl of type:
-//!
-//! V_O = ConvertTo<i32> for Object
-//!
-//! and the function prototype expects an impl of type:
-//!
-//! V_S = ConvertTo<i32> for String
-//!
-//! As with any argument, this is legal if the type of the value given
-//! (`V_O`) is a subtype of the type expected (`V_S`). So is `V_O <: V_S`?
-//! The answer will depend on the variance of the various parameters. In
-//! this case, because the `Self` parameter is contravariant and `A` is
-//! covariant, it means that:
-//!
-//! V_O <: V_S iff
-//! i32 <: i32
-//! String <: Object
-//!
-//! These conditions are satisfied and so we are happy.
-//!
-//! #### Variance and associated types
-//!
-//! Traits with associated types -- or at minimum projection
-//! expressions -- must be invariant with respect to all of their
-//! inputs. To see why this makes sense, consider what subtyping for a
-//! trait reference means:
-//!
-//! <T as Trait> <: <U as Trait>
-//!
-//! means that if I know that `T as Trait`, I also know that `U as
-//! Trait`. Moreover, if you think of it as dictionary passing style,
-//! it means that a dictionary for `<T as Trait>` is safe to use where
-//! a dictionary for `<U as Trait>` is expected.
-//!
-//! The problem is that when you can project types out from `<T as
-//! Trait>`, the relationship to types projected out of `<U as Trait>`
-//! is completely unknown unless `T==U` (see #21726 for more
-//! details). Making `Trait` invariant ensures that this is true.
-//!
-//! Another related reason is that if we didn't make traits with
-//! associated types invariant, then projection is no longer a
-//! function with a single result. Consider:
-//!
-//! ```
-//! trait Identity { type Out; fn foo(&self); }
-//! impl<T> Identity for T { type Out = T; ... }
-//! ```
-//!
-//! Now if I have `<&'static () as Identity>::Out`, this can be
-//! validly derived as `&'a ()` for any `'a`:
-//!
-//! <&'a () as Identity> <: <&'static () as Identity>
-//! if &'static () < : &'a () -- Identity is contravariant in Self
-//! if 'static : 'a -- Subtyping rules for relations
-//!
-//! This change otoh means that `<'static () as Identity>::Out` is
-//! always `&'static ()` (which might then be upcast to `'a ()`,
-//! separately). This was helpful in solving #21750.
-
-use self::VarianceTerm::*;
-use self::ParamKind::*;
-
-use arena;
-use arena::TypedArena;
-use dep_graph::DepNode;
-use middle::def_id::DefId;
-use middle::resolve_lifetime as rl;
-use middle::subst;
-use middle::subst::{ParamSpace, FnSpace, TypeSpace, SelfSpace, VecPerParamSpace};
-use middle::ty::{self, Ty};
-use rustc::front::map as hir_map;
-use std::fmt;
-use std::rc::Rc;
-use syntax::ast;
-use rustc_front::hir;
-use rustc_front::intravisit::Visitor;
-use util::nodemap::NodeMap;
-
-pub fn infer_variance(tcx: &ty::ctxt) {
- let _task = tcx.dep_graph.in_task(DepNode::Variance);
- let krate = tcx.map.krate();
- let mut arena = arena::TypedArena::new();
- let terms_cx = determine_parameters_to_be_inferred(tcx, &mut arena, krate);
- let constraints_cx = add_constraints_from_crate(terms_cx, krate);
- solve_constraints(constraints_cx);
- tcx.variance_computed.set(true);
-}
-
-// Representing terms
-//
-// Terms are structured as a straightforward tree. Rather than rely on
-// GC, we allocate terms out of a bounded arena (the lifetime of this
-// arena is the lifetime 'a that is threaded around).
-//
-// We assign a unique index to each type/region parameter whose variance
-// is to be inferred. We refer to such variables as "inferreds". An
-// `InferredIndex` is a newtype'd int representing the index of such
-// a variable.
-
-type VarianceTermPtr<'a> = &'a VarianceTerm<'a>;
-
-#[derive(Copy, Clone, Debug)]
-struct InferredIndex(usize);
-
-#[derive(Copy, Clone)]
-enum VarianceTerm<'a> {
- ConstantTerm(ty::Variance),
- TransformTerm(VarianceTermPtr<'a>, VarianceTermPtr<'a>),
- InferredTerm(InferredIndex),
-}
-
-impl<'a> fmt::Debug for VarianceTerm<'a> {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- match *self {
- ConstantTerm(c1) => write!(f, "{:?}", c1),
- TransformTerm(v1, v2) => write!(f, "({:?} \u{00D7} {:?})", v1, v2),
- InferredTerm(id) => write!(f, "[{}]", { let InferredIndex(i) = id; i })
- }
- }
-}
-
-// The first pass over the crate simply builds up the set of inferreds.
-
-struct TermsContext<'a, 'tcx: 'a> {
- tcx: &'a ty::ctxt<'tcx>,
- arena: &'a TypedArena<VarianceTerm<'a>>,
-
- empty_variances: Rc<ty::ItemVariances>,
-
- // For marker types, UnsafeCell, and other lang items where
- // variance is hardcoded, records the item-id and the hardcoded
- // variance.
- lang_items: Vec<(ast::NodeId, Vec<ty::Variance>)>,
-
- // Maps from the node id of a type/generic parameter to the
- // corresponding inferred index.
- inferred_map: NodeMap<InferredIndex>,
-
- // Maps from an InferredIndex to the info for that variable.
- inferred_infos: Vec<InferredInfo<'a>> ,
-}
-
-#[derive(Copy, Clone, Debug, PartialEq)]
-enum ParamKind {
- TypeParam,
- RegionParam,
-}
-
-struct InferredInfo<'a> {
- item_id: ast::NodeId,
- kind: ParamKind,
- space: ParamSpace,
- index: usize,
- param_id: ast::NodeId,
- term: VarianceTermPtr<'a>,
-
- // Initial value to use for this parameter when inferring
- // variance. For most parameters, this is Bivariant. But for lang
- // items and input type parameters on traits, it is different.
- initial_variance: ty::Variance,
-}
-
-fn determine_parameters_to_be_inferred<'a, 'tcx>(tcx: &'a ty::ctxt<'tcx>,
- arena: &'a mut TypedArena<VarianceTerm<'a>>,
- krate: &hir::Crate)
- -> TermsContext<'a, 'tcx> {
- let mut terms_cx = TermsContext {
- tcx: tcx,
- arena: arena,
- inferred_map: NodeMap(),
- inferred_infos: Vec::new(),
-
- lang_items: lang_items(tcx),
-
- // cache and share the variance struct used for items with
- // no type/region parameters
- empty_variances: Rc::new(ty::ItemVariances {
- types: VecPerParamSpace::empty(),
- regions: VecPerParamSpace::empty()
- })
- };
-
- krate.visit_all_items(&mut terms_cx);
-
- terms_cx
-}
-
-fn lang_items(tcx: &ty::ctxt) -> Vec<(ast::NodeId,Vec<ty::Variance>)> {
- let all = vec![
- (tcx.lang_items.phantom_data(), vec![ty::Covariant]),
- (tcx.lang_items.unsafe_cell_type(), vec![ty::Invariant]),
-
- // Deprecated:
- (tcx.lang_items.covariant_type(), vec![ty::Covariant]),
- (tcx.lang_items.contravariant_type(), vec![ty::Contravariant]),
- (tcx.lang_items.invariant_type(), vec![ty::Invariant]),
- (tcx.lang_items.covariant_lifetime(), vec![ty::Covariant]),
- (tcx.lang_items.contravariant_lifetime(), vec![ty::Contravariant]),
- (tcx.lang_items.invariant_lifetime(), vec![ty::Invariant]),
-
- ];
-
- all.into_iter() // iterating over (Option<DefId>, Variance)
- .filter(|&(ref d,_)| d.is_some())
- .map(|(d, v)| (d.unwrap(), v)) // (DefId, Variance)
- .filter_map(|(d, v)| tcx.map.as_local_node_id(d).map(|n| (n, v))) // (NodeId, Variance)
- .collect()
-}
-
-impl<'a, 'tcx> TermsContext<'a, 'tcx> {
- fn add_inferreds_for_item(&mut self,
- item_id: ast::NodeId,
- has_self: bool,
- generics: &hir::Generics)
- {
- /*!
- * Add "inferreds" for the generic parameters declared on this
- * item. This has a lot of annoying parameters because we are
- * trying to drive this from the AST, rather than the
- * ty::Generics, so that we can get span info -- but this
- * means we must accommodate syntactic distinctions.
- */
-
- // NB: In the code below for writing the results back into the
- // tcx, we rely on the fact that all inferreds for a particular
- // item are assigned continuous indices.
-
- let inferreds_on_entry = self.num_inferred();
-
- if has_self {
- self.add_inferred(item_id, TypeParam, SelfSpace, 0, item_id);
- }
-
- for (i, p) in generics.lifetimes.iter().enumerate() {
- let id = p.lifetime.id;
- self.add_inferred(item_id, RegionParam, TypeSpace, i, id);
- }
-
- for (i, p) in generics.ty_params.iter().enumerate() {
- self.add_inferred(item_id, TypeParam, TypeSpace, i, p.id);
- }
-
- // If this item has no type or lifetime parameters,
- // then there are no variances to infer, so just
- // insert an empty entry into the variance map.
- // Arguably we could just leave the map empty in this
- // case but it seems cleaner to be able to distinguish
- // "invalid item id" from "item id with no
- // parameters".
- if self.num_inferred() == inferreds_on_entry {
- let item_def_id = self.tcx.map.local_def_id(item_id);
- let newly_added =
- self.tcx.item_variance_map.borrow_mut().insert(
- item_def_id,
- self.empty_variances.clone()).is_none();
- assert!(newly_added);
- }
- }
-
- fn add_inferred(&mut self,
- item_id: ast::NodeId,
- kind: ParamKind,
- space: ParamSpace,
- index: usize,
- param_id: ast::NodeId) {
- let inf_index = InferredIndex(self.inferred_infos.len());
- let term = self.arena.alloc(InferredTerm(inf_index));
- let initial_variance = self.pick_initial_variance(item_id, space, index);
- self.inferred_infos.push(InferredInfo { item_id: item_id,
- kind: kind,
- space: space,
- index: index,
- param_id: param_id,
- term: term,
- initial_variance: initial_variance });
- let newly_added = self.inferred_map.insert(param_id, inf_index).is_none();
- assert!(newly_added);
-
- debug!("add_inferred(item_path={}, \
- item_id={}, \
- kind={:?}, \
- space={:?}, \
- index={}, \
- param_id={}, \
- inf_index={:?}, \
- initial_variance={:?})",
- self.tcx.item_path_str(self.tcx.map.local_def_id(item_id)),
- item_id, kind, space, index, param_id, inf_index,
- initial_variance);
- }
-
- fn pick_initial_variance(&self,
- item_id: ast::NodeId,
- space: ParamSpace,
- index: usize)
- -> ty::Variance
- {
- match space {
- SelfSpace | FnSpace => {
- ty::Bivariant
- }
-
- TypeSpace => {
- match self.lang_items.iter().find(|&&(n, _)| n == item_id) {
- Some(&(_, ref variances)) => variances[index],
- None => ty::Bivariant
- }
- }
- }
- }
-
- fn num_inferred(&self) -> usize {
- self.inferred_infos.len()
- }
-}
-
-impl<'a, 'tcx, 'v> Visitor<'v> for TermsContext<'a, 'tcx> {
- fn visit_item(&mut self, item: &hir::Item) {
- debug!("add_inferreds for item {}", self.tcx.map.node_to_string(item.id));
-
- match item.node {
- hir::ItemEnum(_, ref generics) |
- hir::ItemStruct(_, ref generics) => {
- self.add_inferreds_for_item(item.id, false, generics);
- }
- hir::ItemTrait(_, ref generics, _, _) => {
- // Note: all inputs for traits are ultimately
- // constrained to be invariant. See `visit_item` in
- // the impl for `ConstraintContext` below.
- self.add_inferreds_for_item(item.id, true, generics);
- }
-
- hir::ItemExternCrate(_) |
- hir::ItemUse(_) |
- hir::ItemDefaultImpl(..) |
- hir::ItemImpl(..) |
- hir::ItemStatic(..) |
- hir::ItemConst(..) |
- hir::ItemFn(..) |
- hir::ItemMod(..) |
- hir::ItemForeignMod(..) |
- hir::ItemTy(..) => {
- }
- }
- }
-}
-
-// Constraint construction and representation
-//
-// The second pass over the AST determines the set of constraints.
-// We walk the set of items and, for each member, generate new constraints.
-
-struct ConstraintContext<'a, 'tcx: 'a> {
- terms_cx: TermsContext<'a, 'tcx>,
-
- // These are pointers to common `ConstantTerm` instances
- covariant: VarianceTermPtr<'a>,
- contravariant: VarianceTermPtr<'a>,
- invariant: VarianceTermPtr<'a>,
- bivariant: VarianceTermPtr<'a>,
-
- constraints: Vec<Constraint<'a>> ,
-}
-
-/// Declares that the variable `decl_id` appears in a location with
-/// variance `variance`.
-#[derive(Copy, Clone)]
-struct Constraint<'a> {
- inferred: InferredIndex,
- variance: &'a VarianceTerm<'a>,
-}
-
-fn add_constraints_from_crate<'a, 'tcx>(terms_cx: TermsContext<'a, 'tcx>,
- krate: &hir::Crate)
- -> ConstraintContext<'a, 'tcx>
-{
- let covariant = terms_cx.arena.alloc(ConstantTerm(ty::Covariant));
- let contravariant = terms_cx.arena.alloc(ConstantTerm(ty::Contravariant));
- let invariant = terms_cx.arena.alloc(ConstantTerm(ty::Invariant));
- let bivariant = terms_cx.arena.alloc(ConstantTerm(ty::Bivariant));
- let mut constraint_cx = ConstraintContext {
- terms_cx: terms_cx,
- covariant: covariant,
- contravariant: contravariant,
- invariant: invariant,
- bivariant: bivariant,
- constraints: Vec::new(),
- };
- krate.visit_all_items(&mut constraint_cx);
- constraint_cx
-}
-
-impl<'a, 'tcx, 'v> Visitor<'v> for ConstraintContext<'a, 'tcx> {
- fn visit_item(&mut self, item: &hir::Item) {
- let tcx = self.terms_cx.tcx;
- let did = tcx.map.local_def_id(item.id);
-
- debug!("visit_item item={}", tcx.map.node_to_string(item.id));
-
- match item.node {
- hir::ItemEnum(..) | hir::ItemStruct(..) => {
- let scheme = tcx.lookup_item_type(did);
-
- // Not entirely obvious: constraints on structs/enums do not
- // affect the variance of their type parameters. See discussion
- // in comment at top of module.
- //
- // self.add_constraints_from_generics(&scheme.generics);
-
- for field in tcx.lookup_adt_def(did).all_fields() {
- self.add_constraints_from_ty(&scheme.generics,
- field.unsubst_ty(),
- self.covariant);
- }
- }
- hir::ItemTrait(..) => {
- let trait_def = tcx.lookup_trait_def(did);
- self.add_constraints_from_trait_ref(&trait_def.generics,
- trait_def.trait_ref,
- self.invariant);
- }
-
- hir::ItemExternCrate(_) |
- hir::ItemUse(_) |
- hir::ItemStatic(..) |
- hir::ItemConst(..) |
- hir::ItemFn(..) |
- hir::ItemMod(..) |
- hir::ItemForeignMod(..) |
- hir::ItemTy(..) |
- hir::ItemImpl(..) |
- hir::ItemDefaultImpl(..) => {
- }
- }
- }
-}
-
-/// Is `param_id` a lifetime according to `map`?
-fn is_lifetime(map: &hir_map::Map, param_id: ast::NodeId) -> bool {
- match map.find(param_id) {
- Some(hir_map::NodeLifetime(..)) => true, _ => false
- }
-}
-
-impl<'a, 'tcx> ConstraintContext<'a, 'tcx> {
- fn tcx(&self) -> &'a ty::ctxt<'tcx> {
- self.terms_cx.tcx
- }
-
- fn inferred_index(&self, param_id: ast::NodeId) -> InferredIndex {
- match self.terms_cx.inferred_map.get(¶m_id) {
- Some(&index) => index,
- None => {
- self.tcx().sess.bug(&format!(
- "no inferred index entry for {}",
- self.tcx().map.node_to_string(param_id)));
- }
- }
- }
-
- fn find_binding_for_lifetime(&self, param_id: ast::NodeId) -> ast::NodeId {
- let tcx = self.terms_cx.tcx;
- assert!(is_lifetime(&tcx.map, param_id));
- match tcx.named_region_map.get(¶m_id) {
- Some(&rl::DefEarlyBoundRegion(_, _, lifetime_decl_id))
- => lifetime_decl_id,
- Some(_) => panic!("should not encounter non early-bound cases"),
-
- // The lookup should only fail when `param_id` is
- // itself a lifetime binding: use it as the decl_id.
- None => param_id,
- }
-
- }
-
- /// Is `param_id` a type parameter for which we infer variance?
- fn is_to_be_inferred(&self, param_id: ast::NodeId) -> bool {
- let result = self.terms_cx.inferred_map.contains_key(¶m_id);
-
- // To safe-guard against invalid inferred_map constructions,
- // double-check if variance is inferred at some use of a type
- // parameter (by inspecting parent of its binding declaration
- // to see if it is introduced by a type or by a fn/impl).
-
- let check_result = |this:&ConstraintContext| -> bool {
- let tcx = this.terms_cx.tcx;
- let decl_id = this.find_binding_for_lifetime(param_id);
- // Currently only called on lifetimes; double-checking that.
- assert!(is_lifetime(&tcx.map, param_id));
- let parent_id = tcx.map.get_parent(decl_id);
- let parent = tcx.map.find(parent_id).unwrap_or_else(
- || panic!("tcx.map missing entry for id: {}", parent_id));
-
- let is_inferred;
- macro_rules! cannot_happen { () => { {
- panic!("invalid parent: {} for {}",
- tcx.map.node_to_string(parent_id),
- tcx.map.node_to_string(param_id));
- } } }
-
- match parent {
- hir_map::NodeItem(p) => {
- match p.node {
- hir::ItemTy(..) |
- hir::ItemEnum(..) |
- hir::ItemStruct(..) |
- hir::ItemTrait(..) => is_inferred = true,
- hir::ItemFn(..) => is_inferred = false,
- _ => cannot_happen!(),
- }
- }
- hir_map::NodeTraitItem(..) => is_inferred = false,
- hir_map::NodeImplItem(..) => is_inferred = false,
- _ => cannot_happen!(),
- }
-
- return is_inferred;
- };
-
- assert_eq!(result, check_result(self));
-
- return result;
- }
-
- /// Returns a variance term representing the declared variance of the type/region parameter
- /// with the given id.
- fn declared_variance(&self,
- param_def_id: DefId,
- item_def_id: DefId,
- kind: ParamKind,
- space: ParamSpace,
- index: usize)
- -> VarianceTermPtr<'a> {
- assert_eq!(param_def_id.krate, item_def_id.krate);
-
- if let Some(param_node_id) = self.tcx().map.as_local_node_id(param_def_id) {
- // Parameter on an item defined within current crate:
- // variance not yet inferred, so return a symbolic
- // variance.
- let InferredIndex(index) = self.inferred_index(param_node_id);
- self.terms_cx.inferred_infos[index].term
- } else {
- // Parameter on an item defined within another crate:
- // variance already inferred, just look it up.
- let variances = self.tcx().item_variances(item_def_id);
- let variance = match kind {
- TypeParam => *variances.types.get(space, index),
- RegionParam => *variances.regions.get(space, index),
- };
- self.constant_term(variance)
- }
- }
-
- fn add_constraint(&mut self,
- InferredIndex(index): InferredIndex,
- variance: VarianceTermPtr<'a>) {
- debug!("add_constraint(index={}, variance={:?})",
- index, variance);
- self.constraints.push(Constraint { inferred: InferredIndex(index),
- variance: variance });
- }
-
- fn contravariant(&mut self,
- variance: VarianceTermPtr<'a>)
- -> VarianceTermPtr<'a> {
- self.xform(variance, self.contravariant)
- }
-
- fn invariant(&mut self,
- variance: VarianceTermPtr<'a>)
- -> VarianceTermPtr<'a> {
- self.xform(variance, self.invariant)
- }
-
- fn constant_term(&self, v: ty::Variance) -> VarianceTermPtr<'a> {
- match v {
- ty::Covariant => self.covariant,
- ty::Invariant => self.invariant,
- ty::Contravariant => self.contravariant,
- ty::Bivariant => self.bivariant,
- }
- }
-
- fn xform(&mut self,
- v1: VarianceTermPtr<'a>,
- v2: VarianceTermPtr<'a>)
- -> VarianceTermPtr<'a> {
- match (*v1, *v2) {
- (_, ConstantTerm(ty::Covariant)) => {
- // Applying a "covariant" transform is always a no-op
- v1
- }
-
- (ConstantTerm(c1), ConstantTerm(c2)) => {
- self.constant_term(c1.xform(c2))
- }
-
- _ => {
- &*self.terms_cx.arena.alloc(TransformTerm(v1, v2))
- }
- }
- }
-
- fn add_constraints_from_trait_ref(&mut self,
- generics: &ty::Generics<'tcx>,
- trait_ref: ty::TraitRef<'tcx>,
- variance: VarianceTermPtr<'a>) {
- debug!("add_constraints_from_trait_ref: trait_ref={:?} variance={:?}",
- trait_ref,
- variance);
-
- let trait_def = self.tcx().lookup_trait_def(trait_ref.def_id);
-
- self.add_constraints_from_substs(
- generics,
- trait_ref.def_id,
- trait_def.generics.types.as_slice(),
- trait_def.generics.regions.as_slice(),
- trait_ref.substs,
- variance);
- }
-
- /// Adds constraints appropriate for an instance of `ty` appearing
- /// in a context with the generics defined in `generics` and
- /// ambient variance `variance`
- fn add_constraints_from_ty(&mut self,
- generics: &ty::Generics<'tcx>,
- ty: Ty<'tcx>,
- variance: VarianceTermPtr<'a>) {
- debug!("add_constraints_from_ty(ty={:?}, variance={:?})",
- ty,
- variance);
-
- match ty.sty {
- ty::TyBool |
- ty::TyChar | ty::TyInt(_) | ty::TyUint(_) |
- ty::TyFloat(_) | ty::TyStr => {
- /* leaf type -- noop */
- }
-
- ty::TyClosure(..) => {
- self.tcx().sess.bug("Unexpected closure type in variance computation");
- }
-
- ty::TyRef(region, ref mt) => {
- let contra = self.contravariant(variance);
- self.add_constraints_from_region(generics, *region, contra);
- self.add_constraints_from_mt(generics, mt, variance);
- }
-
- ty::TyBox(typ) | ty::TyArray(typ, _) | ty::TySlice(typ) => {
- self.add_constraints_from_ty(generics, typ, variance);
- }
-
-
- ty::TyRawPtr(ref mt) => {
- self.add_constraints_from_mt(generics, mt, variance);
- }
-
- ty::TyTuple(ref subtys) => {
- for &subty in subtys {
- self.add_constraints_from_ty(generics, subty, variance);
- }
- }
-
- ty::TyEnum(def, substs) |
- ty::TyStruct(def, substs) => {
- let item_type = self.tcx().lookup_item_type(def.did);
-
- // All type parameters on enums and structs should be
- // in the TypeSpace.
- assert!(item_type.generics.types.is_empty_in(subst::SelfSpace));
- assert!(item_type.generics.types.is_empty_in(subst::FnSpace));
- assert!(item_type.generics.regions.is_empty_in(subst::SelfSpace));
- assert!(item_type.generics.regions.is_empty_in(subst::FnSpace));
-
- self.add_constraints_from_substs(
- generics,
- def.did,
- item_type.generics.types.get_slice(subst::TypeSpace),
- item_type.generics.regions.get_slice(subst::TypeSpace),
- substs,
- variance);
- }
-
- ty::TyProjection(ref data) => {
- let trait_ref = &data.trait_ref;
- let trait_def = self.tcx().lookup_trait_def(trait_ref.def_id);
- self.add_constraints_from_substs(
- generics,
- trait_ref.def_id,
- trait_def.generics.types.as_slice(),
- trait_def.generics.regions.as_slice(),
- trait_ref.substs,
- variance);
- }
-
- ty::TyTrait(ref data) => {
- let poly_trait_ref =
- data.principal_trait_ref_with_self_ty(self.tcx(),
- self.tcx().types.err);
-
- // The type `Foo<T+'a>` is contravariant w/r/t `'a`:
- let contra = self.contravariant(variance);
- self.add_constraints_from_region(generics, data.bounds.region_bound, contra);
-
- // Ignore the SelfSpace, it is erased.
- self.add_constraints_from_trait_ref(generics, poly_trait_ref.0, variance);
-
- let projections = data.projection_bounds_with_self_ty(self.tcx(),
- self.tcx().types.err);
- for projection in &projections {
- self.add_constraints_from_ty(generics, projection.0.ty, self.invariant);
- }
- }
-
- ty::TyParam(ref data) => {
- let def_id = generics.types.get(data.space, data.idx as usize).def_id;
- let node_id = self.tcx().map.as_local_node_id(def_id).unwrap();
- match self.terms_cx.inferred_map.get(&node_id) {
- Some(&index) => {
- self.add_constraint(index, variance);
- }
- None => {
- // We do not infer variance for type parameters
- // declared on methods. They will not be present
- // in the inferred_map.
- }
- }
- }
-
- ty::TyBareFn(_, &ty::BareFnTy { ref sig, .. }) => {
- self.add_constraints_from_sig(generics, sig, variance);
- }
-
- ty::TyError => {
- // we encounter this when walking the trait references for object
- // types, where we use TyError as the Self type
- }
-
- ty::TyInfer(..) => {
- self.tcx().sess.bug(
- &format!("unexpected type encountered in \
- variance inference: {}", ty));
- }
- }
- }
-
-
- /// Adds constraints appropriate for a nominal type (enum, struct,
- /// object, etc) appearing in a context with ambient variance `variance`
- fn add_constraints_from_substs(&mut self,
- generics: &ty::Generics<'tcx>,
- def_id: DefId,
- type_param_defs: &[ty::TypeParameterDef<'tcx>],
- region_param_defs: &[ty::RegionParameterDef],
- substs: &subst::Substs<'tcx>,
- variance: VarianceTermPtr<'a>) {
- debug!("add_constraints_from_substs(def_id={:?}, substs={:?}, variance={:?})",
- def_id,
- substs,
- variance);
-
- for p in type_param_defs {
- let variance_decl =
- self.declared_variance(p.def_id, def_id, TypeParam,
- p.space, p.index as usize);
- let variance_i = self.xform(variance, variance_decl);
- let substs_ty = *substs.types.get(p.space, p.index as usize);
- debug!("add_constraints_from_substs: variance_decl={:?} variance_i={:?}",
- variance_decl, variance_i);
- self.add_constraints_from_ty(generics, substs_ty, variance_i);
- }
-
- for p in region_param_defs {
- let variance_decl =
- self.declared_variance(p.def_id, def_id,
- RegionParam, p.space, p.index as usize);
- let variance_i = self.xform(variance, variance_decl);
- let substs_r = *substs.regions().get(p.space, p.index as usize);
- self.add_constraints_from_region(generics, substs_r, variance_i);
- }
- }
-
- /// Adds constraints appropriate for a function with signature
- /// `sig` appearing in a context with ambient variance `variance`
- fn add_constraints_from_sig(&mut self,
- generics: &ty::Generics<'tcx>,
- sig: &ty::PolyFnSig<'tcx>,
- variance: VarianceTermPtr<'a>) {
- let contra = self.contravariant(variance);
- for &input in &sig.0.inputs {
- self.add_constraints_from_ty(generics, input, contra);
- }
- if let ty::FnConverging(result_type) = sig.0.output {
- self.add_constraints_from_ty(generics, result_type, variance);
- }
- }
-
- /// Adds constraints appropriate for a region appearing in a
- /// context with ambient variance `variance`
- fn add_constraints_from_region(&mut self,
- generics: &ty::Generics<'tcx>,
- region: ty::Region,
- variance: VarianceTermPtr<'a>) {
- match region {
- ty::ReEarlyBound(ref data) => {
- let def_id =
- generics.regions.get(data.space, data.index as usize).def_id;
- let node_id = self.tcx().map.as_local_node_id(def_id).unwrap();
- if self.is_to_be_inferred(node_id) {
- let index = self.inferred_index(node_id);
- self.add_constraint(index, variance);
- }
- }
-
- ty::ReStatic => { }
-
- ty::ReLateBound(..) => {
- // We do not infer variance for region parameters on
- // methods or in fn types.
- }
-
- ty::ReFree(..) | ty::ReScope(..) | ty::ReVar(..) |
- ty::ReSkolemized(..) | ty::ReEmpty => {
- // We don't expect to see anything but 'static or bound
- // regions when visiting member types or method types.
- self.tcx()
- .sess
- .bug(&format!("unexpected region encountered in variance \
- inference: {:?}",
- region));
- }
- }
- }
-
- /// Adds constraints appropriate for a mutability-type pair
- /// appearing in a context with ambient variance `variance`
- fn add_constraints_from_mt(&mut self,
- generics: &ty::Generics<'tcx>,
- mt: &ty::TypeAndMut<'tcx>,
- variance: VarianceTermPtr<'a>) {
- match mt.mutbl {
- hir::MutMutable => {
- let invar = self.invariant(variance);
- self.add_constraints_from_ty(generics, mt.ty, invar);
- }
-
- hir::MutImmutable => {
- self.add_constraints_from_ty(generics, mt.ty, variance);
- }
- }
- }
-}
-
-// Constraint solving
-//
-// The final phase iterates over the constraints, refining the variance
-// for each inferred until a fixed point is reached. This will be the
-// optimal solution to the constraints. The final variance for each
-// inferred is then written into the `variance_map` in the tcx.
-
-struct SolveContext<'a, 'tcx: 'a> {
- terms_cx: TermsContext<'a, 'tcx>,
- constraints: Vec<Constraint<'a>> ,
-
- // Maps from an InferredIndex to the inferred value for that variable.
- solutions: Vec<ty::Variance> }
-
-fn solve_constraints(constraints_cx: ConstraintContext) {
- let ConstraintContext { terms_cx, constraints, .. } = constraints_cx;
-
- let solutions =
- terms_cx.inferred_infos.iter()
- .map(|ii| ii.initial_variance)
- .collect();
-
- let mut solutions_cx = SolveContext {
- terms_cx: terms_cx,
- constraints: constraints,
- solutions: solutions
- };
- solutions_cx.solve();
- solutions_cx.write();
-}
-
-impl<'a, 'tcx> SolveContext<'a, 'tcx> {
- fn solve(&mut self) {
- // Propagate constraints until a fixed point is reached. Note
- // that the maximum number of iterations is 2C where C is the
- // number of constraints (each variable can change values at most
- // twice). Since number of constraints is linear in size of the
- // input, so is the inference process.
- let mut changed = true;
- while changed {
- changed = false;
-
- for constraint in &self.constraints {
- let Constraint { inferred, variance: term } = *constraint;
- let InferredIndex(inferred) = inferred;
- let variance = self.evaluate(term);
- let old_value = self.solutions[inferred];
- let new_value = glb(variance, old_value);
- if old_value != new_value {
- debug!("Updating inferred {} (node {}) \
- from {:?} to {:?} due to {:?}",
- inferred,
- self.terms_cx
- .inferred_infos[inferred]
- .param_id,
- old_value,
- new_value,
- term);
-
- self.solutions[inferred] = new_value;
- changed = true;
- }
- }
- }
- }
-
- fn write(&self) {
- // Collect all the variances for a particular item and stick
- // them into the variance map. We rely on the fact that we
- // generate all the inferreds for a particular item
- // consecutively (that is, we collect solutions for an item
- // until we see a new item id, and we assume (1) the solutions
- // are in the same order as the type parameters were declared
- // and (2) all solutions or a given item appear before a new
- // item id).
-
- let tcx = self.terms_cx.tcx;
- let solutions = &self.solutions;
- let inferred_infos = &self.terms_cx.inferred_infos;
- let mut index = 0;
- let num_inferred = self.terms_cx.num_inferred();
- while index < num_inferred {
- let item_id = inferred_infos[index].item_id;
- let mut types = VecPerParamSpace::empty();
- let mut regions = VecPerParamSpace::empty();
-
- while index < num_inferred && inferred_infos[index].item_id == item_id {
- let info = &inferred_infos[index];
- let variance = solutions[index];
- debug!("Index {} Info {} / {:?} / {:?} Variance {:?}",
- index, info.index, info.kind, info.space, variance);
- match info.kind {
- TypeParam => { types.push(info.space, variance); }
- RegionParam => { regions.push(info.space, variance); }
- }
-
- index += 1;
- }
-
- let item_variances = ty::ItemVariances {
- types: types,
- regions: regions
- };
- debug!("item_id={} item_variances={:?}",
- item_id,
- item_variances);
-
- let item_def_id = tcx.map.local_def_id(item_id);
-
- // For unit testing: check for a special "rustc_variance"
- // attribute and report an error with various results if found.
- if tcx.has_attr(item_def_id, "rustc_variance") {
- span_err!(tcx.sess, tcx.map.span(item_id), E0208, "{:?}", item_variances);
- }
-
- let newly_added = tcx.item_variance_map.borrow_mut()
- .insert(item_def_id, Rc::new(item_variances)).is_none();
- assert!(newly_added);
- }
- }
-
- fn evaluate(&self, term: VarianceTermPtr<'a>) -> ty::Variance {
- match *term {
- ConstantTerm(v) => {
- v
- }
-
- TransformTerm(t1, t2) => {
- let v1 = self.evaluate(t1);
- let v2 = self.evaluate(t2);
- v1.xform(v2)
- }
-
- InferredTerm(InferredIndex(index)) => {
- self.solutions[index]
- }
- }
- }
-}
-
-// Miscellany transformations on variance
-
-trait Xform {
- fn xform(self, v: Self) -> Self;
-}
-
-impl Xform for ty::Variance {
- fn xform(self, v: ty::Variance) -> ty::Variance {
- // "Variance transformation", Figure 1 of The Paper
- match (self, v) {
- // Figure 1, column 1.
- (ty::Covariant, ty::Covariant) => ty::Covariant,
- (ty::Covariant, ty::Contravariant) => ty::Contravariant,
- (ty::Covariant, ty::Invariant) => ty::Invariant,
- (ty::Covariant, ty::Bivariant) => ty::Bivariant,
-
- // Figure 1, column 2.
- (ty::Contravariant, ty::Covariant) => ty::Contravariant,
- (ty::Contravariant, ty::Contravariant) => ty::Covariant,
- (ty::Contravariant, ty::Invariant) => ty::Invariant,
- (ty::Contravariant, ty::Bivariant) => ty::Bivariant,
-
- // Figure 1, column 3.
- (ty::Invariant, _) => ty::Invariant,
-
- // Figure 1, column 4.
- (ty::Bivariant, _) => ty::Bivariant,
- }
- }
-}
-
-fn glb(v1: ty::Variance, v2: ty::Variance) -> ty::Variance {
- // Greatest lower bound of the variance lattice as
- // defined in The Paper:
- //
- // *
- // - +
- // o
- match (v1, v2) {
- (ty::Invariant, _) | (_, ty::Invariant) => ty::Invariant,
-
- (ty::Covariant, ty::Contravariant) => ty::Invariant,
- (ty::Contravariant, ty::Covariant) => ty::Invariant,
-
- (ty::Covariant, ty::Covariant) => ty::Covariant,
-
- (ty::Contravariant, ty::Contravariant) => ty::Contravariant,
-
- (x, ty::Bivariant) | (ty::Bivariant, x) => x,
- }
-}
--- /dev/null
+This file infers the variance of type and lifetime parameters. The
+algorithm is taken from Section 4 of the paper "Taming the Wildcards:
+Combining Definition- and Use-Site Variance" published in PLDI'11 and
+written by Altidor et al., and hereafter referred to as The Paper.
+
+This inference is explicitly designed *not* to consider the uses of
+types within code. To determine the variance of type parameters
+defined on type `X`, we only consider the definition of the type `X`
+and the definitions of any types it references.
+
+We only infer variance for type parameters found on *data types*
+like structs and enums. In these cases, there is fairly straightforward
+explanation for what variance means. The variance of the type
+or lifetime parameters defines whether `T<A>` is a subtype of `T<B>`
+(resp. `T<'a>` and `T<'b>`) based on the relationship of `A` and `B`
+(resp. `'a` and `'b`).
+
+We do not infer variance for type parameters found on traits, fns,
+or impls. Variance on trait parameters can make indeed make sense
+(and we used to compute it) but it is actually rather subtle in
+meaning and not that useful in practice, so we removed it. See the
+addendum for some details. Variances on fn/impl parameters, otoh,
+doesn't make sense because these parameters are instantiated and
+then forgotten, they don't persist in types or compiled
+byproducts.
+
+### The algorithm
+
+The basic idea is quite straightforward. We iterate over the types
+defined and, for each use of a type parameter X, accumulate a
+constraint indicating that the variance of X must be valid for the
+variance of that use site. We then iteratively refine the variance of
+X until all constraints are met. There is *always* a sol'n, because at
+the limit we can declare all type parameters to be invariant and all
+constraints will be satisfied.
+
+As a simple example, consider:
+
+ enum Option<A> { Some(A), None }
+ enum OptionalFn<B> { Some(|B|), None }
+ enum OptionalMap<C> { Some(|C| -> C), None }
+
+Here, we will generate the constraints:
+
+ 1. V(A) <= +
+ 2. V(B) <= -
+ 3. V(C) <= +
+ 4. V(C) <= -
+
+These indicate that (1) the variance of A must be at most covariant;
+(2) the variance of B must be at most contravariant; and (3, 4) the
+variance of C must be at most covariant *and* contravariant. All of these
+results are based on a variance lattice defined as follows:
+
+ * Top (bivariant)
+ - +
+ o Bottom (invariant)
+
+Based on this lattice, the solution V(A)=+, V(B)=-, V(C)=o is the
+optimal solution. Note that there is always a naive solution which
+just declares all variables to be invariant.
+
+You may be wondering why fixed-point iteration is required. The reason
+is that the variance of a use site may itself be a function of the
+variance of other type parameters. In full generality, our constraints
+take the form:
+
+ V(X) <= Term
+ Term := + | - | * | o | V(X) | Term x Term
+
+Here the notation V(X) indicates the variance of a type/region
+parameter `X` with respect to its defining class. `Term x Term`
+represents the "variance transform" as defined in the paper:
+
+ If the variance of a type variable `X` in type expression `E` is `V2`
+ and the definition-site variance of the [corresponding] type parameter
+ of a class `C` is `V1`, then the variance of `X` in the type expression
+ `C<E>` is `V3 = V1.xform(V2)`.
+
+### Constraints
+
+If I have a struct or enum with where clauses:
+
+ struct Foo<T:Bar> { ... }
+
+you might wonder whether the variance of `T` with respect to `Bar`
+affects the variance `T` with respect to `Foo`. I claim no. The
+reason: assume that `T` is invariant w/r/t `Bar` but covariant w/r/t
+`Foo`. And then we have a `Foo<X>` that is upcast to `Foo<Y>`, where
+`X <: Y`. However, while `X : Bar`, `Y : Bar` does not hold. In that
+case, the upcast will be illegal, but not because of a variance
+failure, but rather because the target type `Foo<Y>` is itself just
+not well-formed. Basically we get to assume well-formedness of all
+types involved before considering variance.
+
+#### Dependency graph management
+
+Because variance works in two phases, if we are not careful, we wind
+up with a muddled mess of a dep-graph. Basically, when gathering up
+the constraints, things are fairly well-structured, but then we do a
+fixed-point iteration and write the results back where they
+belong. You can't give this fixed-point iteration a single task
+because it reads from (and writes to) the variance of all types in the
+crate. In principle, we *could* switch the "current task" in a very
+fine-grained way while propagating constraints in the fixed-point
+iteration and everything would be automatically tracked, but that
+would add some overhead and isn't really necessary anyway.
+
+Instead what we do is to add edges into the dependency graph as we
+construct the constraint set: so, if computing the constraints for
+node `X` requires loading the inference variables from node `Y`, then
+we can add an edge `Y -> X`, since the variance we ultimately infer
+for `Y` will affect the variance we ultimately infer for `X`.
+
+At this point, we've basically mirrored the inference graph in the
+dependency graph. This means we can just completely ignore the
+fixed-point iteration, since it is just shuffling values along this
+graph. In other words, if we added the fine-grained switching of tasks
+I described earlier, all it would show is that we repeatedly read the
+values described by the constraints, but those edges were already
+added when building the constraints in the first place.
+
+Here is how this is implemented (at least as of the time of this
+writing). The associated `DepNode` for the variance map is (at least
+presently) `Signature(DefId)`. This means that, in `constraints.rs`,
+when we visit an item to load up its constraints, we set
+`Signature(DefId)` as the current task (the "memoization" pattern
+described in the `dep-graph` README). Then whenever we find an
+embedded type or trait, we add a synthetic read of `Signature(DefId)`,
+which covers the variances we will compute for all of its
+parameters. This read is synthetic (i.e., we call
+`variance_map.read()`) because, in fact, the final variance is not yet
+computed -- the read *will* occur (repeatedly) during the fixed-point
+iteration phase.
+
+In fact, we don't really *need* this synthetic read. That's because we
+do wind up looking up the `TypeScheme` or `TraitDef` for all
+references types/traits, and those reads add an edge from
+`Signature(DefId)` (that is, they share the same dep node as
+variance). However, I've kept the synthetic reads in place anyway,
+just for future-proofing (in case we change the dep-nodes in the
+future), and because it makes the intention a bit clearer I think.
+
+### Addendum: Variance on traits
+
+As mentioned above, we used to permit variance on traits. This was
+computed based on the appearance of trait type parameters in
+method signatures and was used to represent the compatibility of
+vtables in trait objects (and also "virtual" vtables or dictionary
+in trait bounds). One complication was that variance for
+associated types is less obvious, since they can be projected out
+and put to myriad uses, so it's not clear when it is safe to allow
+`X<A>::Bar` to vary (or indeed just what that means). Moreover (as
+covered below) all inputs on any trait with an associated type had
+to be invariant, limiting the applicability. Finally, the
+annotations (`MarkerTrait`, `PhantomFn`) needed to ensure that all
+trait type parameters had a variance were confusing and annoying
+for little benefit.
+
+Just for historical reference,I am going to preserve some text indicating
+how one could interpret variance and trait matching.
+
+#### Variance and object types
+
+Just as with structs and enums, we can decide the subtyping
+relationship between two object types `&Trait<A>` and `&Trait<B>`
+based on the relationship of `A` and `B`. Note that for object
+types we ignore the `Self` type parameter -- it is unknown, and
+the nature of dynamic dispatch ensures that we will always call a
+function that is expected the appropriate `Self` type. However, we
+must be careful with the other type parameters, or else we could
+end up calling a function that is expecting one type but provided
+another.
+
+To see what I mean, consider a trait like so:
+
+ trait ConvertTo<A> {
+ fn convertTo(&self) -> A;
+ }
+
+Intuitively, If we had one object `O=&ConvertTo<Object>` and another
+`S=&ConvertTo<String>`, then `S <: O` because `String <: Object`
+(presuming Java-like "string" and "object" types, my go to examples
+for subtyping). The actual algorithm would be to compare the
+(explicit) type parameters pairwise respecting their variance: here,
+the type parameter A is covariant (it appears only in a return
+position), and hence we require that `String <: Object`.
+
+You'll note though that we did not consider the binding for the
+(implicit) `Self` type parameter: in fact, it is unknown, so that's
+good. The reason we can ignore that parameter is precisely because we
+don't need to know its value until a call occurs, and at that time (as
+you said) the dynamic nature of virtual dispatch means the code we run
+will be correct for whatever value `Self` happens to be bound to for
+the particular object whose method we called. `Self` is thus different
+from `A`, because the caller requires that `A` be known in order to
+know the return type of the method `convertTo()`. (As an aside, we
+have rules preventing methods where `Self` appears outside of the
+receiver position from being called via an object.)
+
+#### Trait variance and vtable resolution
+
+But traits aren't only used with objects. They're also used when
+deciding whether a given impl satisfies a given trait bound. To set the
+scene here, imagine I had a function:
+
+ fn convertAll<A,T:ConvertTo<A>>(v: &[T]) {
+ ...
+ }
+
+Now imagine that I have an implementation of `ConvertTo` for `Object`:
+
+ impl ConvertTo<i32> for Object { ... }
+
+And I want to call `convertAll` on an array of strings. Suppose
+further that for whatever reason I specifically supply the value of
+`String` for the type parameter `T`:
+
+ let mut vector = vec!["string", ...];
+ convertAll::<i32, String>(vector);
+
+Is this legal? To put another way, can we apply the `impl` for
+`Object` to the type `String`? The answer is yes, but to see why
+we have to expand out what will happen:
+
+- `convertAll` will create a pointer to one of the entries in the
+ vector, which will have type `&String`
+- It will then call the impl of `convertTo()` that is intended
+ for use with objects. This has the type:
+
+ fn(self: &Object) -> i32
+
+ It is ok to provide a value for `self` of type `&String` because
+ `&String <: &Object`.
+
+OK, so intuitively we want this to be legal, so let's bring this back
+to variance and see whether we are computing the correct result. We
+must first figure out how to phrase the question "is an impl for
+`Object,i32` usable where an impl for `String,i32` is expected?"
+
+Maybe it's helpful to think of a dictionary-passing implementation of
+type classes. In that case, `convertAll()` takes an implicit parameter
+representing the impl. In short, we *have* an impl of type:
+
+ V_O = ConvertTo<i32> for Object
+
+and the function prototype expects an impl of type:
+
+ V_S = ConvertTo<i32> for String
+
+As with any argument, this is legal if the type of the value given
+(`V_O`) is a subtype of the type expected (`V_S`). So is `V_O <: V_S`?
+The answer will depend on the variance of the various parameters. In
+this case, because the `Self` parameter is contravariant and `A` is
+covariant, it means that:
+
+ V_O <: V_S iff
+ i32 <: i32
+ String <: Object
+
+These conditions are satisfied and so we are happy.
+
+#### Variance and associated types
+
+Traits with associated types -- or at minimum projection
+expressions -- must be invariant with respect to all of their
+inputs. To see why this makes sense, consider what subtyping for a
+trait reference means:
+
+ <T as Trait> <: <U as Trait>
+
+means that if I know that `T as Trait`, I also know that `U as
+Trait`. Moreover, if you think of it as dictionary passing style,
+it means that a dictionary for `<T as Trait>` is safe to use where
+a dictionary for `<U as Trait>` is expected.
+
+The problem is that when you can project types out from `<T as
+Trait>`, the relationship to types projected out of `<U as Trait>`
+is completely unknown unless `T==U` (see #21726 for more
+details). Making `Trait` invariant ensures that this is true.
+
+Another related reason is that if we didn't make traits with
+associated types invariant, then projection is no longer a
+function with a single result. Consider:
+
+```
+trait Identity { type Out; fn foo(&self); }
+impl<T> Identity for T { type Out = T; ... }
+```
+
+Now if I have `<&'static () as Identity>::Out`, this can be
+validly derived as `&'a ()` for any `'a`:
+
+ <&'a () as Identity> <: <&'static () as Identity>
+ if &'static () < : &'a () -- Identity is contravariant in Self
+ if 'static : 'a -- Subtyping rules for relations
+
+This change otoh means that `<'static () as Identity>::Out` is
+always `&'static ()` (which might then be upcast to `'a ()`,
+separately). This was helpful in solving #21750.
+
+
--- /dev/null
+// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Constraint construction and representation
+//!
+//! The second pass over the AST determines the set of constraints.
+//! We walk the set of items and, for each member, generate new constraints.
+
+use dep_graph::DepTrackingMapConfig;
+use middle::def_id::DefId;
+use middle::resolve_lifetime as rl;
+use middle::subst;
+use middle::subst::ParamSpace;
+use middle::ty::{self, Ty};
+use middle::ty::maps::ItemVariances;
+use rustc::front::map as hir_map;
+use syntax::ast;
+use rustc_front::hir;
+use rustc_front::intravisit::Visitor;
+
+use super::terms::*;
+use super::terms::VarianceTerm::*;
+use super::terms::ParamKind::*;
+use super::xform::*;
+
+pub struct ConstraintContext<'a, 'tcx: 'a> {
+ pub terms_cx: TermsContext<'a, 'tcx>,
+
+ // These are pointers to common `ConstantTerm` instances
+ covariant: VarianceTermPtr<'a>,
+ contravariant: VarianceTermPtr<'a>,
+ invariant: VarianceTermPtr<'a>,
+ bivariant: VarianceTermPtr<'a>,
+
+ pub constraints: Vec<Constraint<'a>> ,
+}
+
+/// Declares that the variable `decl_id` appears in a location with
+/// variance `variance`.
+#[derive(Copy, Clone)]
+pub struct Constraint<'a> {
+ pub inferred: InferredIndex,
+ pub variance: &'a VarianceTerm<'a>,
+}
+
+pub fn add_constraints_from_crate<'a, 'tcx>(terms_cx: TermsContext<'a, 'tcx>)
+ -> ConstraintContext<'a, 'tcx>
+{
+ let tcx = terms_cx.tcx;
+ let covariant = terms_cx.arena.alloc(ConstantTerm(ty::Covariant));
+ let contravariant = terms_cx.arena.alloc(ConstantTerm(ty::Contravariant));
+ let invariant = terms_cx.arena.alloc(ConstantTerm(ty::Invariant));
+ let bivariant = terms_cx.arena.alloc(ConstantTerm(ty::Bivariant));
+ let mut constraint_cx = ConstraintContext {
+ terms_cx: terms_cx,
+ covariant: covariant,
+ contravariant: contravariant,
+ invariant: invariant,
+ bivariant: bivariant,
+ constraints: Vec::new(),
+ };
+
+ // See README.md for a discussion on dep-graph management.
+ tcx.visit_all_items_in_krate(|def_id| ItemVariances::to_dep_node(&def_id),
+ &mut constraint_cx);
+
+ constraint_cx
+}
+
+impl<'a, 'tcx, 'v> Visitor<'v> for ConstraintContext<'a, 'tcx> {
+ fn visit_item(&mut self, item: &hir::Item) {
+ let tcx = self.terms_cx.tcx;
+ let did = tcx.map.local_def_id(item.id);
+
+ debug!("visit_item item={}", tcx.map.node_to_string(item.id));
+
+ match item.node {
+ hir::ItemEnum(..) | hir::ItemStruct(..) => {
+ let scheme = tcx.lookup_item_type(did);
+
+ // Not entirely obvious: constraints on structs/enums do not
+ // affect the variance of their type parameters. See discussion
+ // in comment at top of module.
+ //
+ // self.add_constraints_from_generics(&scheme.generics);
+
+ for field in tcx.lookup_adt_def(did).all_fields() {
+ self.add_constraints_from_ty(&scheme.generics,
+ field.unsubst_ty(),
+ self.covariant);
+ }
+ }
+ hir::ItemTrait(..) => {
+ let trait_def = tcx.lookup_trait_def(did);
+ self.add_constraints_from_trait_ref(&trait_def.generics,
+ trait_def.trait_ref,
+ self.invariant);
+ }
+
+ hir::ItemExternCrate(_) |
+ hir::ItemUse(_) |
+ hir::ItemStatic(..) |
+ hir::ItemConst(..) |
+ hir::ItemFn(..) |
+ hir::ItemMod(..) |
+ hir::ItemForeignMod(..) |
+ hir::ItemTy(..) |
+ hir::ItemImpl(..) |
+ hir::ItemDefaultImpl(..) => {
+ }
+ }
+ }
+}
+
+/// Is `param_id` a lifetime according to `map`?
+fn is_lifetime(map: &hir_map::Map, param_id: ast::NodeId) -> bool {
+ match map.find(param_id) {
+ Some(hir_map::NodeLifetime(..)) => true, _ => false
+ }
+}
+
+impl<'a, 'tcx> ConstraintContext<'a, 'tcx> {
+ fn tcx(&self) -> &'a ty::ctxt<'tcx> {
+ self.terms_cx.tcx
+ }
+
+ fn inferred_index(&self, param_id: ast::NodeId) -> InferredIndex {
+ match self.terms_cx.inferred_map.get(¶m_id) {
+ Some(&index) => index,
+ None => {
+ self.tcx().sess.bug(&format!(
+ "no inferred index entry for {}",
+ self.tcx().map.node_to_string(param_id)));
+ }
+ }
+ }
+
+ fn find_binding_for_lifetime(&self, param_id: ast::NodeId) -> ast::NodeId {
+ let tcx = self.terms_cx.tcx;
+ assert!(is_lifetime(&tcx.map, param_id));
+ match tcx.named_region_map.get(¶m_id) {
+ Some(&rl::DefEarlyBoundRegion(_, _, lifetime_decl_id))
+ => lifetime_decl_id,
+ Some(_) => panic!("should not encounter non early-bound cases"),
+
+ // The lookup should only fail when `param_id` is
+ // itself a lifetime binding: use it as the decl_id.
+ None => param_id,
+ }
+
+ }
+
+ /// Is `param_id` a type parameter for which we infer variance?
+ fn is_to_be_inferred(&self, param_id: ast::NodeId) -> bool {
+ let result = self.terms_cx.inferred_map.contains_key(¶m_id);
+
+ // To safe-guard against invalid inferred_map constructions,
+ // double-check if variance is inferred at some use of a type
+ // parameter (by inspecting parent of its binding declaration
+ // to see if it is introduced by a type or by a fn/impl).
+
+ let check_result = |this:&ConstraintContext| -> bool {
+ let tcx = this.terms_cx.tcx;
+ let decl_id = this.find_binding_for_lifetime(param_id);
+ // Currently only called on lifetimes; double-checking that.
+ assert!(is_lifetime(&tcx.map, param_id));
+ let parent_id = tcx.map.get_parent(decl_id);
+ let parent = tcx.map.find(parent_id).unwrap_or_else(
+ || panic!("tcx.map missing entry for id: {}", parent_id));
+
+ let is_inferred;
+ macro_rules! cannot_happen { () => { {
+ panic!("invalid parent: {} for {}",
+ tcx.map.node_to_string(parent_id),
+ tcx.map.node_to_string(param_id));
+ } } }
+
+ match parent {
+ hir_map::NodeItem(p) => {
+ match p.node {
+ hir::ItemTy(..) |
+ hir::ItemEnum(..) |
+ hir::ItemStruct(..) |
+ hir::ItemTrait(..) => is_inferred = true,
+ hir::ItemFn(..) => is_inferred = false,
+ _ => cannot_happen!(),
+ }
+ }
+ hir_map::NodeTraitItem(..) => is_inferred = false,
+ hir_map::NodeImplItem(..) => is_inferred = false,
+ _ => cannot_happen!(),
+ }
+
+ return is_inferred;
+ };
+
+ assert_eq!(result, check_result(self));
+
+ return result;
+ }
+
+ /// Returns a variance term representing the declared variance of the type/region parameter
+ /// with the given id.
+ fn declared_variance(&self,
+ param_def_id: DefId,
+ item_def_id: DefId,
+ kind: ParamKind,
+ space: ParamSpace,
+ index: usize)
+ -> VarianceTermPtr<'a> {
+ assert_eq!(param_def_id.krate, item_def_id.krate);
+
+ if let Some(param_node_id) = self.tcx().map.as_local_node_id(param_def_id) {
+ // Parameter on an item defined within current crate:
+ // variance not yet inferred, so return a symbolic
+ // variance.
+ let InferredIndex(index) = self.inferred_index(param_node_id);
+ self.terms_cx.inferred_infos[index].term
+ } else {
+ // Parameter on an item defined within another crate:
+ // variance already inferred, just look it up.
+ let variances = self.tcx().item_variances(item_def_id);
+ let variance = match kind {
+ TypeParam => *variances.types.get(space, index),
+ RegionParam => *variances.regions.get(space, index),
+ };
+ self.constant_term(variance)
+ }
+ }
+
+ fn add_constraint(&mut self,
+ InferredIndex(index): InferredIndex,
+ variance: VarianceTermPtr<'a>) {
+ debug!("add_constraint(index={}, variance={:?})",
+ index, variance);
+ self.constraints.push(Constraint { inferred: InferredIndex(index),
+ variance: variance });
+ }
+
+ fn contravariant(&mut self,
+ variance: VarianceTermPtr<'a>)
+ -> VarianceTermPtr<'a> {
+ self.xform(variance, self.contravariant)
+ }
+
+ fn invariant(&mut self,
+ variance: VarianceTermPtr<'a>)
+ -> VarianceTermPtr<'a> {
+ self.xform(variance, self.invariant)
+ }
+
+ fn constant_term(&self, v: ty::Variance) -> VarianceTermPtr<'a> {
+ match v {
+ ty::Covariant => self.covariant,
+ ty::Invariant => self.invariant,
+ ty::Contravariant => self.contravariant,
+ ty::Bivariant => self.bivariant,
+ }
+ }
+
+ fn xform(&mut self,
+ v1: VarianceTermPtr<'a>,
+ v2: VarianceTermPtr<'a>)
+ -> VarianceTermPtr<'a> {
+ match (*v1, *v2) {
+ (_, ConstantTerm(ty::Covariant)) => {
+ // Applying a "covariant" transform is always a no-op
+ v1
+ }
+
+ (ConstantTerm(c1), ConstantTerm(c2)) => {
+ self.constant_term(c1.xform(c2))
+ }
+
+ _ => {
+ &*self.terms_cx.arena.alloc(TransformTerm(v1, v2))
+ }
+ }
+ }
+
+ fn add_constraints_from_trait_ref(&mut self,
+ generics: &ty::Generics<'tcx>,
+ trait_ref: ty::TraitRef<'tcx>,
+ variance: VarianceTermPtr<'a>) {
+ debug!("add_constraints_from_trait_ref: trait_ref={:?} variance={:?}",
+ trait_ref,
+ variance);
+
+ let trait_def = self.tcx().lookup_trait_def(trait_ref.def_id);
+
+ // This edge is actually implied by the call to
+ // `lookup_trait_def`, but I'm trying to be future-proof. See
+ // README.md for a discussion on dep-graph management.
+ self.tcx().dep_graph.read(ItemVariances::to_dep_node(&trait_ref.def_id));
+
+ self.add_constraints_from_substs(
+ generics,
+ trait_ref.def_id,
+ trait_def.generics.types.as_slice(),
+ trait_def.generics.regions.as_slice(),
+ trait_ref.substs,
+ variance);
+ }
+
+ /// Adds constraints appropriate for an instance of `ty` appearing
+ /// in a context with the generics defined in `generics` and
+ /// ambient variance `variance`
+ fn add_constraints_from_ty(&mut self,
+ generics: &ty::Generics<'tcx>,
+ ty: Ty<'tcx>,
+ variance: VarianceTermPtr<'a>) {
+ debug!("add_constraints_from_ty(ty={:?}, variance={:?})",
+ ty,
+ variance);
+
+ match ty.sty {
+ ty::TyBool |
+ ty::TyChar | ty::TyInt(_) | ty::TyUint(_) |
+ ty::TyFloat(_) | ty::TyStr => {
+ /* leaf type -- noop */
+ }
+
+ ty::TyClosure(..) => {
+ self.tcx().sess.bug("Unexpected closure type in variance computation");
+ }
+
+ ty::TyRef(region, ref mt) => {
+ let contra = self.contravariant(variance);
+ self.add_constraints_from_region(generics, *region, contra);
+ self.add_constraints_from_mt(generics, mt, variance);
+ }
+
+ ty::TyBox(typ) | ty::TyArray(typ, _) | ty::TySlice(typ) => {
+ self.add_constraints_from_ty(generics, typ, variance);
+ }
+
+
+ ty::TyRawPtr(ref mt) => {
+ self.add_constraints_from_mt(generics, mt, variance);
+ }
+
+ ty::TyTuple(ref subtys) => {
+ for &subty in subtys {
+ self.add_constraints_from_ty(generics, subty, variance);
+ }
+ }
+
+ ty::TyEnum(def, substs) |
+ ty::TyStruct(def, substs) => {
+ let item_type = self.tcx().lookup_item_type(def.did);
+
+ // This edge is actually implied by the call to
+ // `lookup_trait_def`, but I'm trying to be future-proof. See
+ // README.md for a discussion on dep-graph management.
+ self.tcx().dep_graph.read(ItemVariances::to_dep_node(&def.did));
+
+ // All type parameters on enums and structs should be
+ // in the TypeSpace.
+ assert!(item_type.generics.types.is_empty_in(subst::SelfSpace));
+ assert!(item_type.generics.types.is_empty_in(subst::FnSpace));
+ assert!(item_type.generics.regions.is_empty_in(subst::SelfSpace));
+ assert!(item_type.generics.regions.is_empty_in(subst::FnSpace));
+
+ self.add_constraints_from_substs(
+ generics,
+ def.did,
+ item_type.generics.types.get_slice(subst::TypeSpace),
+ item_type.generics.regions.get_slice(subst::TypeSpace),
+ substs,
+ variance);
+ }
+
+ ty::TyProjection(ref data) => {
+ let trait_ref = &data.trait_ref;
+ let trait_def = self.tcx().lookup_trait_def(trait_ref.def_id);
+
+ // This edge is actually implied by the call to
+ // `lookup_trait_def`, but I'm trying to be future-proof. See
+ // README.md for a discussion on dep-graph management.
+ self.tcx().dep_graph.read(ItemVariances::to_dep_node(&trait_ref.def_id));
+
+ self.add_constraints_from_substs(
+ generics,
+ trait_ref.def_id,
+ trait_def.generics.types.as_slice(),
+ trait_def.generics.regions.as_slice(),
+ trait_ref.substs,
+ variance);
+ }
+
+ ty::TyTrait(ref data) => {
+ let poly_trait_ref =
+ data.principal_trait_ref_with_self_ty(self.tcx(),
+ self.tcx().types.err);
+
+ // The type `Foo<T+'a>` is contravariant w/r/t `'a`:
+ let contra = self.contravariant(variance);
+ self.add_constraints_from_region(generics, data.bounds.region_bound, contra);
+
+ // Ignore the SelfSpace, it is erased.
+ self.add_constraints_from_trait_ref(generics, poly_trait_ref.0, variance);
+
+ let projections = data.projection_bounds_with_self_ty(self.tcx(),
+ self.tcx().types.err);
+ for projection in &projections {
+ self.add_constraints_from_ty(generics, projection.0.ty, self.invariant);
+ }
+ }
+
+ ty::TyParam(ref data) => {
+ let def_id = generics.types.get(data.space, data.idx as usize).def_id;
+ let node_id = self.tcx().map.as_local_node_id(def_id).unwrap();
+ match self.terms_cx.inferred_map.get(&node_id) {
+ Some(&index) => {
+ self.add_constraint(index, variance);
+ }
+ None => {
+ // We do not infer variance for type parameters
+ // declared on methods. They will not be present
+ // in the inferred_map.
+ }
+ }
+ }
+
+ ty::TyBareFn(_, &ty::BareFnTy { ref sig, .. }) => {
+ self.add_constraints_from_sig(generics, sig, variance);
+ }
+
+ ty::TyError => {
+ // we encounter this when walking the trait references for object
+ // types, where we use TyError as the Self type
+ }
+
+ ty::TyInfer(..) => {
+ self.tcx().sess.bug(
+ &format!("unexpected type encountered in \
+ variance inference: {}", ty));
+ }
+ }
+ }
+
+ /// Adds constraints appropriate for a nominal type (enum, struct,
+ /// object, etc) appearing in a context with ambient variance `variance`
+ fn add_constraints_from_substs(&mut self,
+ generics: &ty::Generics<'tcx>,
+ def_id: DefId,
+ type_param_defs: &[ty::TypeParameterDef<'tcx>],
+ region_param_defs: &[ty::RegionParameterDef],
+ substs: &subst::Substs<'tcx>,
+ variance: VarianceTermPtr<'a>) {
+ debug!("add_constraints_from_substs(def_id={:?}, substs={:?}, variance={:?})",
+ def_id,
+ substs,
+ variance);
+
+ for p in type_param_defs {
+ let variance_decl =
+ self.declared_variance(p.def_id, def_id, TypeParam,
+ p.space, p.index as usize);
+ let variance_i = self.xform(variance, variance_decl);
+ let substs_ty = *substs.types.get(p.space, p.index as usize);
+ debug!("add_constraints_from_substs: variance_decl={:?} variance_i={:?}",
+ variance_decl, variance_i);
+ self.add_constraints_from_ty(generics, substs_ty, variance_i);
+ }
+
+ for p in region_param_defs {
+ let variance_decl =
+ self.declared_variance(p.def_id, def_id,
+ RegionParam, p.space, p.index as usize);
+ let variance_i = self.xform(variance, variance_decl);
+ let substs_r = *substs.regions().get(p.space, p.index as usize);
+ self.add_constraints_from_region(generics, substs_r, variance_i);
+ }
+ }
+
+ /// Adds constraints appropriate for a function with signature
+ /// `sig` appearing in a context with ambient variance `variance`
+ fn add_constraints_from_sig(&mut self,
+ generics: &ty::Generics<'tcx>,
+ sig: &ty::PolyFnSig<'tcx>,
+ variance: VarianceTermPtr<'a>) {
+ let contra = self.contravariant(variance);
+ for &input in &sig.0.inputs {
+ self.add_constraints_from_ty(generics, input, contra);
+ }
+ if let ty::FnConverging(result_type) = sig.0.output {
+ self.add_constraints_from_ty(generics, result_type, variance);
+ }
+ }
+
+ /// Adds constraints appropriate for a region appearing in a
+ /// context with ambient variance `variance`
+ fn add_constraints_from_region(&mut self,
+ generics: &ty::Generics<'tcx>,
+ region: ty::Region,
+ variance: VarianceTermPtr<'a>) {
+ match region {
+ ty::ReEarlyBound(ref data) => {
+ let def_id =
+ generics.regions.get(data.space, data.index as usize).def_id;
+ let node_id = self.tcx().map.as_local_node_id(def_id).unwrap();
+ if self.is_to_be_inferred(node_id) {
+ let index = self.inferred_index(node_id);
+ self.add_constraint(index, variance);
+ }
+ }
+
+ ty::ReStatic => { }
+
+ ty::ReLateBound(..) => {
+ // We do not infer variance for region parameters on
+ // methods or in fn types.
+ }
+
+ ty::ReFree(..) | ty::ReScope(..) | ty::ReVar(..) |
+ ty::ReSkolemized(..) | ty::ReEmpty => {
+ // We don't expect to see anything but 'static or bound
+ // regions when visiting member types or method types.
+ self.tcx()
+ .sess
+ .bug(&format!("unexpected region encountered in variance \
+ inference: {:?}",
+ region));
+ }
+ }
+ }
+
+ /// Adds constraints appropriate for a mutability-type pair
+ /// appearing in a context with ambient variance `variance`
+ fn add_constraints_from_mt(&mut self,
+ generics: &ty::Generics<'tcx>,
+ mt: &ty::TypeAndMut<'tcx>,
+ variance: VarianceTermPtr<'a>) {
+ match mt.mutbl {
+ hir::MutMutable => {
+ let invar = self.invariant(variance);
+ self.add_constraints_from_ty(generics, mt.ty, invar);
+ }
+
+ hir::MutImmutable => {
+ self.add_constraints_from_ty(generics, mt.ty, variance);
+ }
+ }
+ }
+}
--- /dev/null
+// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Module for inferring the variance of type and lifetime
+//! parameters. See README.md for details.
+
+use arena;
+use middle::ty;
+
+/// Defines the `TermsContext` basically houses an arena where we can
+/// allocate terms.
+mod terms;
+
+/// Code to gather up constraints.
+mod constraints;
+
+/// Code to solve constraints and write out the results.
+mod solve;
+
+/// Code for transforming variances.
+mod xform;
+
+pub fn infer_variance(tcx: &ty::ctxt) {
+ let mut arena = arena::TypedArena::new();
+ let terms_cx = terms::determine_parameters_to_be_inferred(tcx, &mut arena);
+ let constraints_cx = constraints::add_constraints_from_crate(terms_cx);
+ solve::solve_constraints(constraints_cx);
+ tcx.variance_computed.set(true);
+}
+
--- /dev/null
+// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Constraint solving
+//!
+//! The final phase iterates over the constraints, refining the variance
+//! for each inferred until a fixed point is reached. This will be the
+//! optimal solution to the constraints. The final variance for each
+//! inferred is then written into the `variance_map` in the tcx.
+
+use middle::subst::VecPerParamSpace;
+use middle::ty;
+use std::rc::Rc;
+
+use super::constraints::*;
+use super::terms::*;
+use super::terms::VarianceTerm::*;
+use super::terms::ParamKind::*;
+use super::xform::*;
+
+struct SolveContext<'a, 'tcx: 'a> {
+ terms_cx: TermsContext<'a, 'tcx>,
+ constraints: Vec<Constraint<'a>> ,
+
+ // Maps from an InferredIndex to the inferred value for that variable.
+ solutions: Vec<ty::Variance>
+}
+
+pub fn solve_constraints(constraints_cx: ConstraintContext) {
+ let ConstraintContext { terms_cx, constraints, .. } = constraints_cx;
+
+ let solutions =
+ terms_cx.inferred_infos.iter()
+ .map(|ii| ii.initial_variance)
+ .collect();
+
+ let mut solutions_cx = SolveContext {
+ terms_cx: terms_cx,
+ constraints: constraints,
+ solutions: solutions
+ };
+ solutions_cx.solve();
+ solutions_cx.write();
+}
+
+impl<'a, 'tcx> SolveContext<'a, 'tcx> {
+ fn solve(&mut self) {
+ // Propagate constraints until a fixed point is reached. Note
+ // that the maximum number of iterations is 2C where C is the
+ // number of constraints (each variable can change values at most
+ // twice). Since number of constraints is linear in size of the
+ // input, so is the inference process.
+ let mut changed = true;
+ while changed {
+ changed = false;
+
+ for constraint in &self.constraints {
+ let Constraint { inferred, variance: term } = *constraint;
+ let InferredIndex(inferred) = inferred;
+ let variance = self.evaluate(term);
+ let old_value = self.solutions[inferred];
+ let new_value = glb(variance, old_value);
+ if old_value != new_value {
+ debug!("Updating inferred {} (node {}) \
+ from {:?} to {:?} due to {:?}",
+ inferred,
+ self.terms_cx
+ .inferred_infos[inferred]
+ .param_id,
+ old_value,
+ new_value,
+ term);
+
+ self.solutions[inferred] = new_value;
+ changed = true;
+ }
+ }
+ }
+ }
+
+ fn write(&self) {
+ // Collect all the variances for a particular item and stick
+ // them into the variance map. We rely on the fact that we
+ // generate all the inferreds for a particular item
+ // consecutively (that is, we collect solutions for an item
+ // until we see a new item id, and we assume (1) the solutions
+ // are in the same order as the type parameters were declared
+ // and (2) all solutions or a given item appear before a new
+ // item id).
+
+ let tcx = self.terms_cx.tcx;
+
+ // Ignore the writes here because the relevant edges were
+ // already accounted for in `constraints.rs`. See the section
+ // on dependency graph management in README.md for more
+ // information.
+ let _ignore = tcx.dep_graph.in_ignore();
+
+ let solutions = &self.solutions;
+ let inferred_infos = &self.terms_cx.inferred_infos;
+ let mut index = 0;
+ let num_inferred = self.terms_cx.num_inferred();
+ while index < num_inferred {
+ let item_id = inferred_infos[index].item_id;
+ let mut types = VecPerParamSpace::empty();
+ let mut regions = VecPerParamSpace::empty();
+
+ while index < num_inferred && inferred_infos[index].item_id == item_id {
+ let info = &inferred_infos[index];
+ let variance = solutions[index];
+ debug!("Index {} Info {} / {:?} / {:?} Variance {:?}",
+ index, info.index, info.kind, info.space, variance);
+ match info.kind {
+ TypeParam => { types.push(info.space, variance); }
+ RegionParam => { regions.push(info.space, variance); }
+ }
+
+ index += 1;
+ }
+
+ let item_variances = ty::ItemVariances {
+ types: types,
+ regions: regions
+ };
+ debug!("item_id={} item_variances={:?}",
+ item_id,
+ item_variances);
+
+ let item_def_id = tcx.map.local_def_id(item_id);
+
+ // For unit testing: check for a special "rustc_variance"
+ // attribute and report an error with various results if found.
+ if tcx.has_attr(item_def_id, "rustc_variance") {
+ span_err!(tcx.sess, tcx.map.span(item_id), E0208, "{:?}", item_variances);
+ }
+
+ let newly_added = tcx.item_variance_map.borrow_mut()
+ .insert(item_def_id, Rc::new(item_variances)).is_none();
+ assert!(newly_added);
+ }
+ }
+
+ fn evaluate(&self, term: VarianceTermPtr<'a>) -> ty::Variance {
+ match *term {
+ ConstantTerm(v) => {
+ v
+ }
+
+ TransformTerm(t1, t2) => {
+ let v1 = self.evaluate(t1);
+ let v2 = self.evaluate(t2);
+ v1.xform(v2)
+ }
+
+ InferredTerm(InferredIndex(index)) => {
+ self.solutions[index]
+ }
+ }
+ }
+}
--- /dev/null
+// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Representing terms
+//
+// Terms are structured as a straightforward tree. Rather than rely on
+// GC, we allocate terms out of a bounded arena (the lifetime of this
+// arena is the lifetime 'a that is threaded around).
+//
+// We assign a unique index to each type/region parameter whose variance
+// is to be inferred. We refer to such variables as "inferreds". An
+// `InferredIndex` is a newtype'd int representing the index of such
+// a variable.
+
+use arena::TypedArena;
+use dep_graph::DepTrackingMapConfig;
+use middle::subst::{ParamSpace, FnSpace, TypeSpace, SelfSpace, VecPerParamSpace};
+use middle::ty;
+use middle::ty::maps::ItemVariances;
+use std::fmt;
+use std::rc::Rc;
+use syntax::ast;
+use rustc_front::hir;
+use rustc_front::intravisit::Visitor;
+use util::nodemap::NodeMap;
+
+use self::VarianceTerm::*;
+use self::ParamKind::*;
+
+pub type VarianceTermPtr<'a> = &'a VarianceTerm<'a>;
+
+#[derive(Copy, Clone, Debug)]
+pub struct InferredIndex(pub usize);
+
+#[derive(Copy, Clone)]
+pub enum VarianceTerm<'a> {
+ ConstantTerm(ty::Variance),
+ TransformTerm(VarianceTermPtr<'a>, VarianceTermPtr<'a>),
+ InferredTerm(InferredIndex),
+}
+
+impl<'a> fmt::Debug for VarianceTerm<'a> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ match *self {
+ ConstantTerm(c1) => write!(f, "{:?}", c1),
+ TransformTerm(v1, v2) => write!(f, "({:?} \u{00D7} {:?})", v1, v2),
+ InferredTerm(id) => write!(f, "[{}]", { let InferredIndex(i) = id; i })
+ }
+ }
+}
+
+// The first pass over the crate simply builds up the set of inferreds.
+
+pub struct TermsContext<'a, 'tcx: 'a> {
+ pub tcx: &'a ty::ctxt<'tcx>,
+ pub arena: &'a TypedArena<VarianceTerm<'a>>,
+
+ pub empty_variances: Rc<ty::ItemVariances>,
+
+ // For marker types, UnsafeCell, and other lang items where
+ // variance is hardcoded, records the item-id and the hardcoded
+ // variance.
+ pub lang_items: Vec<(ast::NodeId, Vec<ty::Variance>)>,
+
+ // Maps from the node id of a type/generic parameter to the
+ // corresponding inferred index.
+ pub inferred_map: NodeMap<InferredIndex>,
+
+ // Maps from an InferredIndex to the info for that variable.
+ pub inferred_infos: Vec<InferredInfo<'a>> ,
+}
+
+#[derive(Copy, Clone, Debug, PartialEq)]
+pub enum ParamKind {
+ TypeParam,
+ RegionParam,
+}
+
+pub struct InferredInfo<'a> {
+ pub item_id: ast::NodeId,
+ pub kind: ParamKind,
+ pub space: ParamSpace,
+ pub index: usize,
+ pub param_id: ast::NodeId,
+ pub term: VarianceTermPtr<'a>,
+
+ // Initial value to use for this parameter when inferring
+ // variance. For most parameters, this is Bivariant. But for lang
+ // items and input type parameters on traits, it is different.
+ pub initial_variance: ty::Variance,
+}
+
+pub fn determine_parameters_to_be_inferred<'a, 'tcx>(
+ tcx: &'a ty::ctxt<'tcx>,
+ arena: &'a mut TypedArena<VarianceTerm<'a>>)
+ -> TermsContext<'a, 'tcx>
+{
+ let mut terms_cx = TermsContext {
+ tcx: tcx,
+ arena: arena,
+ inferred_map: NodeMap(),
+ inferred_infos: Vec::new(),
+
+ lang_items: lang_items(tcx),
+
+ // cache and share the variance struct used for items with
+ // no type/region parameters
+ empty_variances: Rc::new(ty::ItemVariances {
+ types: VecPerParamSpace::empty(),
+ regions: VecPerParamSpace::empty()
+ })
+ };
+
+ // See README.md for a discussion on dep-graph management.
+ tcx.visit_all_items_in_krate(|def_id| ItemVariances::to_dep_node(&def_id),
+ &mut terms_cx);
+
+ terms_cx
+}
+
+fn lang_items(tcx: &ty::ctxt) -> Vec<(ast::NodeId,Vec<ty::Variance>)> {
+ let all = vec![
+ (tcx.lang_items.phantom_data(), vec![ty::Covariant]),
+ (tcx.lang_items.unsafe_cell_type(), vec![ty::Invariant]),
+
+ // Deprecated:
+ (tcx.lang_items.covariant_type(), vec![ty::Covariant]),
+ (tcx.lang_items.contravariant_type(), vec![ty::Contravariant]),
+ (tcx.lang_items.invariant_type(), vec![ty::Invariant]),
+ (tcx.lang_items.covariant_lifetime(), vec![ty::Covariant]),
+ (tcx.lang_items.contravariant_lifetime(), vec![ty::Contravariant]),
+ (tcx.lang_items.invariant_lifetime(), vec![ty::Invariant]),
+
+ ];
+
+ all.into_iter() // iterating over (Option<DefId>, Variance)
+ .filter(|&(ref d,_)| d.is_some())
+ .map(|(d, v)| (d.unwrap(), v)) // (DefId, Variance)
+ .filter_map(|(d, v)| tcx.map.as_local_node_id(d).map(|n| (n, v))) // (NodeId, Variance)
+ .collect()
+}
+
+impl<'a, 'tcx> TermsContext<'a, 'tcx> {
+ fn add_inferreds_for_item(&mut self,
+ item_id: ast::NodeId,
+ has_self: bool,
+ generics: &hir::Generics)
+ {
+ /*!
+ * Add "inferreds" for the generic parameters declared on this
+ * item. This has a lot of annoying parameters because we are
+ * trying to drive this from the AST, rather than the
+ * ty::Generics, so that we can get span info -- but this
+ * means we must accommodate syntactic distinctions.
+ */
+
+ // NB: In the code below for writing the results back into the
+ // tcx, we rely on the fact that all inferreds for a particular
+ // item are assigned continuous indices.
+
+ let inferreds_on_entry = self.num_inferred();
+
+ if has_self {
+ self.add_inferred(item_id, TypeParam, SelfSpace, 0, item_id);
+ }
+
+ for (i, p) in generics.lifetimes.iter().enumerate() {
+ let id = p.lifetime.id;
+ self.add_inferred(item_id, RegionParam, TypeSpace, i, id);
+ }
+
+ for (i, p) in generics.ty_params.iter().enumerate() {
+ self.add_inferred(item_id, TypeParam, TypeSpace, i, p.id);
+ }
+
+ // If this item has no type or lifetime parameters,
+ // then there are no variances to infer, so just
+ // insert an empty entry into the variance map.
+ // Arguably we could just leave the map empty in this
+ // case but it seems cleaner to be able to distinguish
+ // "invalid item id" from "item id with no
+ // parameters".
+ if self.num_inferred() == inferreds_on_entry {
+ let item_def_id = self.tcx.map.local_def_id(item_id);
+ let newly_added =
+ self.tcx.item_variance_map.borrow_mut().insert(
+ item_def_id,
+ self.empty_variances.clone()).is_none();
+ assert!(newly_added);
+ }
+ }
+
+ fn add_inferred(&mut self,
+ item_id: ast::NodeId,
+ kind: ParamKind,
+ space: ParamSpace,
+ index: usize,
+ param_id: ast::NodeId) {
+ let inf_index = InferredIndex(self.inferred_infos.len());
+ let term = self.arena.alloc(InferredTerm(inf_index));
+ let initial_variance = self.pick_initial_variance(item_id, space, index);
+ self.inferred_infos.push(InferredInfo { item_id: item_id,
+ kind: kind,
+ space: space,
+ index: index,
+ param_id: param_id,
+ term: term,
+ initial_variance: initial_variance });
+ let newly_added = self.inferred_map.insert(param_id, inf_index).is_none();
+ assert!(newly_added);
+
+ debug!("add_inferred(item_path={}, \
+ item_id={}, \
+ kind={:?}, \
+ space={:?}, \
+ index={}, \
+ param_id={}, \
+ inf_index={:?}, \
+ initial_variance={:?})",
+ self.tcx.item_path_str(self.tcx.map.local_def_id(item_id)),
+ item_id, kind, space, index, param_id, inf_index,
+ initial_variance);
+ }
+
+ fn pick_initial_variance(&self,
+ item_id: ast::NodeId,
+ space: ParamSpace,
+ index: usize)
+ -> ty::Variance
+ {
+ match space {
+ SelfSpace | FnSpace => {
+ ty::Bivariant
+ }
+
+ TypeSpace => {
+ match self.lang_items.iter().find(|&&(n, _)| n == item_id) {
+ Some(&(_, ref variances)) => variances[index],
+ None => ty::Bivariant
+ }
+ }
+ }
+ }
+
+ pub fn num_inferred(&self) -> usize {
+ self.inferred_infos.len()
+ }
+}
+
+impl<'a, 'tcx, 'v> Visitor<'v> for TermsContext<'a, 'tcx> {
+ fn visit_item(&mut self, item: &hir::Item) {
+ debug!("add_inferreds for item {}", self.tcx.map.node_to_string(item.id));
+
+ match item.node {
+ hir::ItemEnum(_, ref generics) |
+ hir::ItemStruct(_, ref generics) => {
+ self.add_inferreds_for_item(item.id, false, generics);
+ }
+ hir::ItemTrait(_, ref generics, _, _) => {
+ // Note: all inputs for traits are ultimately
+ // constrained to be invariant. See `visit_item` in
+ // the impl for `ConstraintContext` in `constraints.rs`.
+ self.add_inferreds_for_item(item.id, true, generics);
+ }
+
+ hir::ItemExternCrate(_) |
+ hir::ItemUse(_) |
+ hir::ItemDefaultImpl(..) |
+ hir::ItemImpl(..) |
+ hir::ItemStatic(..) |
+ hir::ItemConst(..) |
+ hir::ItemFn(..) |
+ hir::ItemMod(..) |
+ hir::ItemForeignMod(..) |
+ hir::ItemTy(..) => {
+ }
+ }
+ }
+}
+
--- /dev/null
+// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use middle::ty;
+
+pub trait Xform {
+ fn xform(self, v: Self) -> Self;
+}
+
+impl Xform for ty::Variance {
+ fn xform(self, v: ty::Variance) -> ty::Variance {
+ // "Variance transformation", Figure 1 of The Paper
+ match (self, v) {
+ // Figure 1, column 1.
+ (ty::Covariant, ty::Covariant) => ty::Covariant,
+ (ty::Covariant, ty::Contravariant) => ty::Contravariant,
+ (ty::Covariant, ty::Invariant) => ty::Invariant,
+ (ty::Covariant, ty::Bivariant) => ty::Bivariant,
+
+ // Figure 1, column 2.
+ (ty::Contravariant, ty::Covariant) => ty::Contravariant,
+ (ty::Contravariant, ty::Contravariant) => ty::Covariant,
+ (ty::Contravariant, ty::Invariant) => ty::Invariant,
+ (ty::Contravariant, ty::Bivariant) => ty::Bivariant,
+
+ // Figure 1, column 3.
+ (ty::Invariant, _) => ty::Invariant,
+
+ // Figure 1, column 4.
+ (ty::Bivariant, _) => ty::Bivariant,
+ }
+ }
+}
+
+pub fn glb(v1: ty::Variance, v2: ty::Variance) -> ty::Variance {
+ // Greatest lower bound of the variance lattice as
+ // defined in The Paper:
+ //
+ // *
+ // - +
+ // o
+ match (v1, v2) {
+ (ty::Invariant, _) | (_, ty::Invariant) => ty::Invariant,
+
+ (ty::Covariant, ty::Contravariant) => ty::Invariant,
+ (ty::Contravariant, ty::Covariant) => ty::Invariant,
+
+ (ty::Covariant, ty::Covariant) => ty::Covariant,
+
+ (ty::Contravariant, ty::Contravariant) => ty::Contravariant,
+
+ (x, ty::Bivariant) | (ty::Bivariant, x) => x,
+ }
+}
debug!("Trying to get a name from pattern: {:?}", p);
match p.node {
- PatWild => "_".to_string(),
- PatIdent(_, ref p, _) => p.node.to_string(),
- PatEnum(ref p, _) => path_to_string(p),
- PatQPath(..) => panic!("tried to get argument name from PatQPath, \
+ PatKind::Wild => "_".to_string(),
+ PatKind::Ident(_, ref p, _) => p.node.to_string(),
+ PatKind::TupleStruct(ref p, _) | PatKind::Path(ref p) => path_to_string(p),
+ PatKind::QPath(..) => panic!("tried to get argument name from PatKind::QPath, \
which is not allowed in function arguments"),
- PatStruct(ref name, ref fields, etc) => {
+ PatKind::Struct(ref name, ref fields, etc) => {
format!("{} {{ {}{} }}", path_to_string(name),
fields.iter().map(|&Spanned { node: ref fp, .. }|
format!("{}: {}", fp.name, name_from_pat(&*fp.pat)))
if etc { ", ..." } else { "" }
)
},
- PatTup(ref elts) => format!("({})", elts.iter().map(|p| name_from_pat(&**p))
+ PatKind::Tup(ref elts) => format!("({})", elts.iter().map(|p| name_from_pat(&**p))
.collect::<Vec<String>>().join(", ")),
- PatBox(ref p) => name_from_pat(&**p),
- PatRegion(ref p, _) => name_from_pat(&**p),
- PatLit(..) => {
- warn!("tried to get argument name from PatLit, \
+ PatKind::Box(ref p) => name_from_pat(&**p),
+ PatKind::Ref(ref p, _) => name_from_pat(&**p),
+ PatKind::Lit(..) => {
+ warn!("tried to get argument name from PatKind::Lit, \
which is silly in function arguments");
"()".to_string()
},
- PatRange(..) => panic!("tried to get argument name from PatRange, \
+ PatKind::Range(..) => panic!("tried to get argument name from PatKind::Range, \
which is not allowed in function arguments"),
- PatVec(ref begin, ref mid, ref end) => {
+ PatKind::Vec(ref begin, ref mid, ref end) => {
let begin = begin.iter().map(|p| name_from_pat(&**p));
let mid = mid.as_ref().map(|p| format!("..{}", name_from_pat(&**p))).into_iter();
let end = end.iter().map(|p| name_from_pat(&**p));
<p>
Search functions by type signature (e.g.
- <code>vec -> usize</code>)
+ <code>vec -> usize</code> or <code>* -> vec</code>)
</p>
</div>
</div>
var parts = val.split("->").map(trimmer);
var input = parts[0];
// sort inputs so that order does not matter
- var inputs = input.split(",").map(trimmer).sort();
+ var inputs = input.split(",").map(trimmer).sort().toString();
var output = parts[1];
for (var i = 0; i < nSearchWords; ++i) {
// allow searching for void (no output) functions as well
var typeOutput = type.output ? type.output.name : "";
- if (inputs.toString() === typeInputs.toString() &&
- output == typeOutput) {
+ if ((inputs === "*" || inputs === typeInputs.toString()) &&
+ (output === "*" || output == typeOutput)) {
results.push({id: i, index: -1, dontValidate: true});
}
}
///
/// If the map did not have this key present, `None` is returned.
///
- /// If the map did have this key present, the key is not updated, the
- /// value is updated and the old value is returned.
- /// See the [module-level documentation] for more.
+ /// If the map did have this key present, the value is updated, and the old
+ /// value is returned. The key is not updated, though; this matters for
+ /// types that can be `==` without being identical. See the [module-level
+ /// documentation] for more.
///
/// [module-level documentation]: index.html#insert-and-complex-keys
///
use cmp;
use hash::{Hash, Hasher, BuildHasher};
+use intrinsics::needs_drop;
use marker;
use mem::{align_of, size_of};
use mem;
// dropping empty tables such as on resize.
// Also avoid double drop of elements that have been already moved out.
unsafe {
- for _ in self.rev_move_buckets() {}
+ if needs_drop::<(K, V)>() { // avoid linear runtime for types that don't need drop
+ for _ in self.rev_move_buckets() {}
+ }
}
let hashes_size = self.capacity * size_of::<u64>();
//! }
//!
//! let mut map = BTreeMap::new();
-//! map.insert(Foo { a: 1, b: "baz" }, ());
+//! map.insert(Foo { a: 1, b: "baz" }, 99);
//!
//! // We already have a Foo with an a of 1, so this will be updating the value.
-//! map.insert(Foo { a: 1, b: "xyz" }, ());
+//! map.insert(Foo { a: 1, b: "xyz" }, 100);
//!
-//! // ... but the key hasn't changed. b is still "baz", not "xyz"
+//! // The value has been updated...
+//! assert_eq!(map.values().next().unwrap(), &100);
+//!
+//! // ...but the key hasn't changed. b is still "baz", not "xyz".
//! assert_eq!(map.keys().next().unwrap().b, "baz");
//! ```
#![allow(deprecated)]
use os::raw::c_long;
-use os::unix::raw::{uid_t, gid_t};
#[stable(feature = "raw_ext", since = "1.1.0")] pub type blkcnt_t = u64;
#[stable(feature = "raw_ext", since = "1.1.0")] pub type blksize_t = u64;
#[stable(feature = "raw_ext", since = "1.1.0")]
pub st_rdev: u32,
#[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_atime: i64,
+ pub st_atime: c_long,
#[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_atime_nsec: i64,
+ pub st_atime_nsec: c_long,
#[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_mtime: i64,
+ pub st_mtime: c_long,
#[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_mtime_nsec: i64,
+ pub st_mtime_nsec: c_long,
#[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_ctime: i64,
+ pub st_ctime: c_long,
#[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_ctime_nsec: i64,
+ pub st_ctime_nsec: c_long,
#[stable(feature = "raw_ext", since = "1.1.0")]
pub st_size: i64,
#[stable(feature = "raw_ext", since = "1.1.0")]
#[stable(feature = "raw_ext", since = "1.1.0")]
pub st_lspare: i32,
#[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_birthtime: i64,
+ pub st_birthtime: c_long,
#[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_birthtime_nsec: i64,
+ pub st_birthtime_nsec: c_long,
}
definitions")]
#![allow(deprecated)]
-#[stable(feature = "raw_ext", since = "1.1.0")] pub type off_t = u64;
+use os::raw::c_long;
+
+#[stable(feature = "raw_ext", since = "1.1.0")] pub type blkcnt_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")] pub type blksize_t = u64;
#[stable(feature = "raw_ext", since = "1.1.0")] pub type dev_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")] pub type fflags_t = u32;
#[stable(feature = "raw_ext", since = "1.1.0")] pub type ino_t = u64;
#[stable(feature = "raw_ext", since = "1.1.0")] pub type mode_t = u32;
#[stable(feature = "raw_ext", since = "1.1.0")] pub type nlink_t = u64;
-#[stable(feature = "raw_ext", since = "1.1.0")] pub type blksize_t = u64;
-#[stable(feature = "raw_ext", since = "1.1.0")] pub type blkcnt_t = u64;
-#[stable(feature = "raw_ext", since = "1.1.0")] pub type fflags_t = u32;
+#[stable(feature = "raw_ext", since = "1.1.0")] pub type off_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")] pub type time_t = i64;
#[unstable(feature = "pthread_t", issue = "29791")] pub type pthread_t = usize;
-#[doc(inline)]
+#[repr(C)]
+#[derive(Clone)]
#[stable(feature = "raw_ext", since = "1.1.0")]
-pub use self::arch::{stat, time_t};
-
-#[cfg(target_arch = "x86")]
-mod arch {
- use super::{off_t, dev_t, ino_t, mode_t, nlink_t, blksize_t, blkcnt_t, fflags_t};
- use os::raw::c_long;
- use os::unix::raw::{uid_t, gid_t};
-
- #[stable(feature = "raw_ext", since = "1.1.0")] pub type time_t = i64;
-
- #[repr(C)]
- #[derive(Clone)]
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub struct stat {
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_dev: dev_t,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_ino: ino_t,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_mode: mode_t,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_nlink: nlink_t,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_uid: uid_t,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_gid: gid_t,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_rdev: dev_t,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_atime: time_t,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_atime_nsec: c_long,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_mtime: time_t,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_mtime_nsec: c_long,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_ctime: time_t,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_ctime_nsec: c_long,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_size: off_t,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_blocks: blkcnt_t,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_blksize: blksize_t,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_flags: fflags_t,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_gen: u32,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_lspare: i32,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_birthtime: time_t,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_birthtime_nsec: c_long,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub __unused: [u8; 8],
- }
-}
-
-#[cfg(target_arch = "x86_64")]
-mod arch {
- use super::{off_t, dev_t, ino_t, mode_t, nlink_t, blksize_t, blkcnt_t, fflags_t};
- use os::raw::c_long;
- use os::unix::raw::{uid_t, gid_t};
-
- #[stable(feature = "raw_ext", since = "1.1.0")] pub type time_t = i64;
-
- #[repr(C)]
- #[derive(Clone)]
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub struct stat {
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_dev: u32,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_ino: u32,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_mode: u16,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_nlink: u16,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_uid: u32,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_gid: u32,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_rdev: u32,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_atime: i64,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_atime_nsec: i64,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_mtime: i64,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_mtime_nsec: i64,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_ctime: i64,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_ctime_nsec: i64,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_size: i64,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_blocks: i64,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_blksize: u32,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_flags: u32,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_gen: u32,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_lspare: i32,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_birthtime: i64,
- #[stable(feature = "raw_ext", since = "1.1.0")]
- pub st_birthtime_nsec: i64,
- }
+pub struct stat {
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_dev: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ino: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mode: u16,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_nlink: u16,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_uid: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_gid: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_rdev: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_size: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blocks: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blksize: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_flags: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_gen: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_lspare: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_birthtime: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_birthtime_nsec: c_long,
+ #[cfg(target_arch = "x86")]
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __unused: [u8; 8],
}
-
pub fn new(t: T) -> AssertRecoverSafe<T> {
AssertRecoverSafe(t)
}
+
+ /// Consumes the `AssertRecoverSafe`, returning the wrapped value.
+ #[unstable(feature = "recover", reason = "awaiting feedback", issue = "27719")]
+ pub fn into_inner(self) -> T {
+ self.0
+ }
}
impl<T> Deref for AssertRecoverSafe<T> {
/// Returns a path that, when joined onto `base`, yields `self`.
///
+ /// # Errors
+ ///
/// If `base` is not a prefix of `self` (i.e. `starts_with`
- /// returns false), then `relative_from` returns `None`.
+ /// returns `false`), returns `Err`.
#[stable(since = "1.7.0", feature = "path_strip_prefix")]
pub fn strip_prefix<'a, P: ?Sized>(&'a self, base: &'a P)
-> Result<&'a Path, StripPrefixError>
/// [`String`]: string/struct.String.html
///
/// As always, remember that a human intuition for 'character' may not map to
-/// Unicode's definitions. For example, emoji symbols such as '❤️' are more than
-/// one byte; ❤️ in particular is six:
+/// Unicode's definitions. For example, emoji symbols such as '❤️' can be more
+/// than one Unicode code point; this ❤️ in particular is two:
///
/// ```
/// let s = String::from("❤️");
///
-/// // six bytes times one byte for each element
-/// assert_eq!(6, s.len() * std::mem::size_of::<u8>());
+/// // we get two chars out of a single ❤️
+/// let mut iter = s.chars();
+/// assert_eq!(Some('\u{2764}'), iter.next());
+/// assert_eq!(Some('\u{fe0f}'), iter.next());
+/// assert_eq!(None, iter.next());
/// ```
///
-/// This also means it won't fit into a `char`, and so trying to create a
-/// literal with `let heart = '❤️';` gives an error:
+/// This means it won't fit into a `char`. Trying to create a literal with
+/// `let heart = '❤️';` gives an error:
///
/// ```text
/// error: character literal may only contain one codepoint: '❤
/// ^~
/// ```
///
-/// Another implication of this is that if you want to do per-`char`acter
-/// processing, it can end up using a lot more memory:
+/// Another implication of the 4-byte fixed size of a `char`, is that
+/// per-`char`acter processing can end up using a lot more memory:
///
/// ```
/// let s = String::from("love: ❤️");
/// assert_eq!(12, s.len() * std::mem::size_of::<u8>());
/// assert_eq!(32, v.len() * std::mem::size_of::<char>());
/// ```
-///
-/// Or may give you results you may not expect:
-///
-/// ```
-/// let s = String::from("❤️");
-///
-/// let mut iter = s.chars();
-///
-/// // we get two chars out of a single ❤️
-/// assert_eq!(Some('\u{2764}'), iter.next());
-/// assert_eq!(Some('\u{fe0f}'), iter.next());
-/// assert_eq!(None, iter.next());
-/// ```
mod prim_char { }
#[doc(primitive = "unit")]
use ffi::{CString, CStr, OsString, OsStr};
use fmt;
use io::{self, Error, ErrorKind, SeekFrom};
-use libc::{self, dirent, c_int, off_t, mode_t};
+use libc::{self, c_int, mode_t};
use mem;
use path::{Path, PathBuf};
use ptr;
use sys_common::{AsInner, FromInner};
#[cfg(target_os = "linux")]
-use libc::{stat64, fstat64, lstat64};
+use libc::{stat64, fstat64, lstat64, off64_t, ftruncate64, lseek64, dirent64, readdir64_r, open64};
#[cfg(not(target_os = "linux"))]
-use libc::{stat as stat64, fstat as fstat64, lstat as lstat64};
+use libc::{stat as stat64, fstat as fstat64, lstat as lstat64, off_t as off64_t,
+ ftruncate as ftruncate64, lseek as lseek64, dirent as dirent64, open as open64};
+#[cfg(not(any(target_os = "linux", target_os = "solaris")))]
+use libc::{readdir_r as readdir64_r};
pub struct File(FileDesc);
unsafe impl Sync for Dir {}
pub struct DirEntry {
- entry: dirent,
+ entry: dirent64,
root: Arc<PathBuf>,
// We need to store an owned copy of the directory name
// on Solaris because a) it uses a zero-length array to
};
let mut entry_ptr = ptr::null_mut();
loop {
- if libc::readdir_r(self.dirp.0, &mut ret.entry, &mut entry_ptr) != 0 {
+ if readdir64_r(self.dirp.0, &mut ret.entry, &mut entry_ptr) != 0 {
return Some(Err(Error::last_os_error()))
}
if entry_ptr.is_null() {
try!(opts.get_creation_mode()) |
(opts.custom_flags as c_int & !libc::O_ACCMODE);
let fd = try!(cvt_r(|| unsafe {
- libc::open(path.as_ptr(), flags, opts.mode as c_int)
+ open64(path.as_ptr(), flags, opts.mode as c_int)
}));
let fd = FileDesc::new(fd);
pub fn truncate(&self, size: u64) -> io::Result<()> {
try!(cvt_r(|| unsafe {
- libc::ftruncate(self.0.raw(), size as libc::off_t)
+ ftruncate64(self.0.raw(), size as off64_t)
}));
Ok(())
}
pub fn seek(&self, pos: SeekFrom) -> io::Result<u64> {
let (whence, pos) = match pos {
- SeekFrom::Start(off) => (libc::SEEK_SET, off as off_t),
- SeekFrom::End(off) => (libc::SEEK_END, off as off_t),
- SeekFrom::Current(off) => (libc::SEEK_CUR, off as off_t),
+ SeekFrom::Start(off) => (libc::SEEK_SET, off as off64_t),
+ SeekFrom::End(off) => (libc::SEEK_END, off as off64_t),
+ SeekFrom::Current(off) => (libc::SEEK_CUR, off as off64_t),
};
- let n = try!(cvt(unsafe { libc::lseek(self.0.raw(), pos, whence) }));
+ let n = try!(cvt(unsafe { lseek64(self.0.raw(), pos, whence) }));
Ok(n as u64)
}
#[derive(Clone)]
pub struct FileAttr {
- data: c::WIN32_FILE_ATTRIBUTE_DATA,
+ attributes: c::DWORD,
+ creation_time: c::FILETIME,
+ last_access_time: c::FILETIME,
+ last_write_time: c::FILETIME,
+ file_size: u64,
reparse_tag: c::DWORD,
}
pub fn metadata(&self) -> io::Result<FileAttr> {
Ok(FileAttr {
- data: c::WIN32_FILE_ATTRIBUTE_DATA {
- dwFileAttributes: self.data.dwFileAttributes,
- ftCreationTime: self.data.ftCreationTime,
- ftLastAccessTime: self.data.ftLastAccessTime,
- ftLastWriteTime: self.data.ftLastWriteTime,
- nFileSizeHigh: self.data.nFileSizeHigh,
- nFileSizeLow: self.data.nFileSizeLow,
- },
+ attributes: self.data.dwFileAttributes,
+ creation_time: self.data.ftCreationTime,
+ last_access_time: self.data.ftLastAccessTime,
+ last_write_time: self.data.ftLastWriteTime,
+ file_size: ((self.data.nFileSizeHigh as u64) << 32) | (self.data.nFileSizeLow as u64),
reparse_tag: if self.data.dwFileAttributes & c::FILE_ATTRIBUTE_REPARSE_POINT != 0 {
// reserved unless this is a reparse point
self.data.dwReserved0
try!(cvt(c::GetFileInformationByHandle(self.handle.raw(),
&mut info)));
let mut attr = FileAttr {
- data: c::WIN32_FILE_ATTRIBUTE_DATA {
- dwFileAttributes: info.dwFileAttributes,
- ftCreationTime: info.ftCreationTime,
- ftLastAccessTime: info.ftLastAccessTime,
- ftLastWriteTime: info.ftLastWriteTime,
- nFileSizeHigh: info.nFileSizeHigh,
- nFileSizeLow: info.nFileSizeLow,
- },
+ attributes: info.dwFileAttributes,
+ creation_time: info.ftCreationTime,
+ last_access_time: info.ftLastAccessTime,
+ last_write_time: info.ftLastWriteTime,
+ file_size: ((info.nFileSizeHigh as u64) << 32) | (info.nFileSizeLow as u64),
reparse_tag: 0,
};
if attr.is_reparse_point() {
impl FileAttr {
pub fn size(&self) -> u64 {
- ((self.data.nFileSizeHigh as u64) << 32) | (self.data.nFileSizeLow as u64)
+ self.file_size
}
pub fn perm(&self) -> FilePermissions {
- FilePermissions { attrs: self.data.dwFileAttributes }
+ FilePermissions { attrs: self.attributes }
}
- pub fn attrs(&self) -> u32 { self.data.dwFileAttributes as u32 }
+ pub fn attrs(&self) -> u32 { self.attributes as u32 }
pub fn file_type(&self) -> FileType {
- FileType::new(self.data.dwFileAttributes, self.reparse_tag)
+ FileType::new(self.attributes, self.reparse_tag)
}
pub fn modified(&self) -> io::Result<SystemTime> {
- Ok(SystemTime::from(self.data.ftLastWriteTime))
+ Ok(SystemTime::from(self.last_write_time))
}
pub fn accessed(&self) -> io::Result<SystemTime> {
- Ok(SystemTime::from(self.data.ftLastAccessTime))
+ Ok(SystemTime::from(self.last_access_time))
}
pub fn created(&self) -> io::Result<SystemTime> {
- Ok(SystemTime::from(self.data.ftCreationTime))
+ Ok(SystemTime::from(self.creation_time))
}
pub fn modified_u64(&self) -> u64 {
- to_u64(&self.data.ftLastWriteTime)
+ to_u64(&self.last_write_time)
}
pub fn accessed_u64(&self) -> u64 {
- to_u64(&self.data.ftLastAccessTime)
+ to_u64(&self.last_access_time)
}
pub fn created_u64(&self) -> u64 {
- to_u64(&self.data.ftCreationTime)
+ to_u64(&self.creation_time)
}
fn is_reparse_point(&self) -> bool {
- self.data.dwFileAttributes & c::FILE_ATTRIBUTE_REPARSE_POINT != 0
+ self.attributes & c::FILE_ATTRIBUTE_REPARSE_POINT != 0
}
}
/// An associated const named using the qualified path `<T>::CONST` or
/// `<T as Trait>::CONST`. Associated consts from inherent impls can be
/// referred to as simply `T::CONST`, in which case they will end up as
- /// PatKind::Enum, and the resolver will have to sort that out.
+ /// PatKind::Path, and the resolver will have to sort that out.
QPath(QSelf, Path),
/// A tuple pattern `(a, b)`
err: &mut DiagnosticBuilder<'a>) {
let names = &self.syntax_env.names;
if let Some(suggestion) = find_best_match_for_name(names.iter(), name, None) {
- err.fileline_help(span, &format!("did you mean `{}!`?", suggestion));
+ if suggestion != name {
+ err.fileline_help(span, &format!("did you mean `{}!`?", suggestion));
+ } else {
+ err.fileline_help(span, &format!("have you added the `#[macro_use]` on the \
+ module/import?"));
+ }
}
}
}
flags Restrictions: u8 {
const RESTRICTION_STMT_EXPR = 1 << 0,
const RESTRICTION_NO_STRUCT_LITERAL = 1 << 1,
+ const NO_NONINLINE_MOD = 1 << 2,
}
}
/// Evaluate the closure with restrictions in place.
///
/// After the closure is evaluated, restrictions are reset.
- pub fn with_res<F>(&mut self, r: Restrictions, f: F) -> PResult<'a, P<Expr>>
- where F: FnOnce(&mut Self) -> PResult<'a, P<Expr>>
+ pub fn with_res<F, T>(&mut self, r: Restrictions, f: F) -> T
+ where F: FnOnce(&mut Self) -> T
{
let old = self.restrictions;
self.restrictions = r;
}
} else {
// FIXME: Bad copy of attrs
- match try!(self.parse_item_(attrs.clone(), false, true)) {
+ let restrictions = self.restrictions | Restrictions::NO_NONINLINE_MOD;
+ match try!(self.with_res(restrictions,
+ |this| this.parse_item_(attrs.clone(), false, true))) {
Some(i) => {
let hi = i.span.hi;
let decl = P(spanned(lo, hi, DeclKind::Item(i)));
self.push_mod_path(id, outer_attrs);
try!(self.expect(&token::OpenDelim(token::Brace)));
let mod_inner_lo = self.span.lo;
- let old_owns_directory = self.owns_directory;
- self.owns_directory = true;
let attrs = try!(self.parse_inner_attributes());
let m = try!(self.parse_mod_items(&token::CloseDelim(token::Brace), mod_inner_lo));
- self.owns_directory = old_owns_directory;
self.pop_mod_path();
Ok((id, ItemKind::Mod(m), Some(attrs)))
}
let paths = Parser::default_submod_path(id, &dir_path, self.sess.codemap());
- if !self.owns_directory {
+ if self.restrictions.contains(Restrictions::NO_NONINLINE_MOD) {
+ let msg =
+ "Cannot declare a non-inline module inside a block unless it has a path attribute";
+ let mut err = self.diagnostic().struct_span_err(id_sp, msg);
+ if paths.path_exists {
+ let msg = format!("Maybe `use` the module `{}` instead of redeclaring it",
+ paths.name);
+ err.span_note(id_sp, &msg);
+ }
+ return Err(err);
+ } else if !self.owns_directory {
let mut err = self.diagnostic().struct_span_err(id_sp,
"cannot declare a new module at this location");
let this_module = match self.mod_path_stack.last() {
-Subproject commit de5c31045dc0f6da1f65d02ee640ccf99ba90e7c
+Subproject commit 39e8e59056bcd84bf9c44d0bcd46ae441567e0a0
# If this file is modified, then llvm will be forcibly cleaned and then rebuilt.
# The actual contents of this file do not matter, but to trigger a change on the
# build bots then the contents should be changed so git updates the mtime.
-2015-01-25
+2016-02-16
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#[derive(Copy)]
+pub struct U256(pub [u64; 4]);
+
+impl Clone for U256 {
+ fn clone(&self) -> U256 {
+ *self
+ }
+}
+
+impl U256 {
+ pub fn new(value: u64) -> U256 {
+ let mut ret = [0; 4];
+ ret[0] = value;
+ U256(ret)
+ }
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -g
+
+extern crate issue_31702_1;
+
+use std::collections::HashMap;
+use issue_31702_1::U256;
+
+pub struct Ethash {
+ engine_params: for<'a> fn() -> Option<&'a Vec<u8>>,
+ u256_params: HashMap<String, U256>,
+}
+
+impl Ethash {
+ pub fn u256_param(&mut self, name: &str) -> U256 {
+ let engine = self.engine_params;
+ *self.u256_params.entry(name.to_owned()).or_insert_with(|| {
+ engine().map_or(U256::new(0u64), |_a| loop {})
+ })
+ }
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use inner_private_module::*;
+
+mod inner_private_module {
+ pub struct Unnameable1;
+ pub struct Unnameable2;
+ #[derive(Clone, Copy)]
+ pub struct Unnameable3;
+ pub struct Unnameable4;
+ pub struct Unnameable5;
+ pub struct Unnameable6;
+ pub struct Unnameable7;
+ #[derive(Default)]
+ pub struct Unnameable8;
+ pub enum UnnameableEnum {
+ NameableVariant
+ }
+ pub trait UnnameableTrait {
+ type Alias: Default;
+ }
+
+ impl Unnameable1 {
+ pub fn method_of_unnameable_type1(&self) -> &'static str {
+ "Hello1"
+ }
+ }
+ impl Unnameable2 {
+ pub fn method_of_unnameable_type2(&self) -> &'static str {
+ "Hello2"
+ }
+ }
+ impl Unnameable3 {
+ pub fn method_of_unnameable_type3(&self) -> &'static str {
+ "Hello3"
+ }
+ }
+ impl Unnameable4 {
+ pub fn method_of_unnameable_type4(&self) -> &'static str {
+ "Hello4"
+ }
+ }
+ impl Unnameable5 {
+ pub fn method_of_unnameable_type5(&self) -> &'static str {
+ "Hello5"
+ }
+ }
+ impl Unnameable6 {
+ pub fn method_of_unnameable_type6(&self) -> &'static str {
+ "Hello6"
+ }
+ }
+ impl Unnameable7 {
+ pub fn method_of_unnameable_type7(&self) -> &'static str {
+ "Hello7"
+ }
+ }
+ impl Unnameable8 {
+ pub fn method_of_unnameable_type8(&self) -> &'static str {
+ "Hello8"
+ }
+ }
+ impl UnnameableEnum {
+ pub fn method_of_unnameable_enum(&self) -> &'static str {
+ "HelloEnum"
+ }
+ }
+}
+
+pub fn function_returning_unnameable_type() -> Unnameable1 {
+ Unnameable1
+}
+
+pub const CONSTANT_OF_UNNAMEABLE_TYPE: Unnameable2 =
+ Unnameable2;
+
+pub fn function_accepting_unnameable_type(_: Option<Unnameable3>) {}
+
+pub type AliasOfUnnameableType = Unnameable4;
+
+impl Unnameable1 {
+ pub fn inherent_method_returning_unnameable_type(&self) -> Unnameable5 {
+ Unnameable5
+ }
+}
+
+pub trait Tr {
+ fn trait_method_returning_unnameable_type(&self) -> Unnameable6 {
+ Unnameable6
+ }
+}
+impl Tr for Unnameable1 {}
+
+pub use inner_private_module::UnnameableEnum::NameableVariant;
+
+pub struct Struct {
+ pub field_of_unnameable_type: Unnameable7
+}
+
+pub static STATIC: Struct = Struct { field_of_unnameable_type: Unnameable7 } ;
+
+impl UnnameableTrait for AliasOfUnnameableType {
+ type Alias = Unnameable8;
+}
+
+pub fn generic_function<T: UnnameableTrait>() -> T::Alias {
+ Default::default()
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: --cfg a{b}
+// error-pattern: invalid --cfg argument: a{b}
+fn main() {}
fn indirect(x: WillChanges) { }
}
-// these are invalid dependencies, though sometimes we create edges
-// anyway.
mod invalid_signatures {
use WontChange;
- // FIXME due to the variance pass having overly conservative edges,
- // we incorrectly think changes are needed here
- #[rustc_then_this_would_need(ItemSignature)] //~ ERROR OK
- #[rustc_then_this_would_need(CollectItem)] //~ ERROR OK
+ #[rustc_then_this_would_need(ItemSignature)] //~ ERROR no path
+ #[rustc_then_this_would_need(CollectItem)] //~ ERROR no path
trait A {
fn do_something_else_twice(x: WontChange);
}
- // FIXME due to the variance pass having overly conservative edges,
- // we incorrectly think changes are needed here
- #[rustc_then_this_would_need(ItemSignature)] //~ ERROR OK
- #[rustc_then_this_would_need(CollectItem)] //~ ERROR OK
+ #[rustc_then_this_would_need(ItemSignature)] //~ ERROR no path
+ #[rustc_then_this_would_need(CollectItem)] //~ ERROR no path
fn b(x: WontChange) { }
#[rustc_then_this_would_need(ItemSignature)] //~ ERROR no path from `WillChange`
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test macro_undefined issue
+
+mod m {
+ #[macro_export]
+ macro_rules! kl {
+ () => ()
+ }
+}
+
+fn main() {
+ k!(); //~ ERROR macro undefined: 'k!'
+ //~^ HELP did you mean `kl!`?
+ kl!(); //~ ERROR macro undefined: 'kl!'
+ //~^ HELP have you added the `#[macro_use]` on the module/import?
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test that non-inline modules are not allowed inside blocks.
+
+fn main() {
+ mod foo; //~ ERROR Cannot declare a non-inline module inside a block
+}
// except according to those terms.
#![feature(rustc_attrs)]
-// ignore-msvc: FIXME(#30941)
// error-pattern:panic 1
// error-pattern:drop 2
use std::io::{self, Write};
#![feature(rustc_attrs)]
-// ignore-msvc: FIXME(#30941)
// error-pattern:converging_fn called
// error-pattern:0 dropped
// error-pattern:exit
#![feature(rustc_attrs)]
-// ignore-msvc: FIXME(#30941)
// error-pattern:complex called
// error-pattern:dropped
// error-pattern:exit
#![feature(rustc_attrs)]
-// ignore-msvc: FIXME(#30941)
// error-pattern:diverging_fn called
// error-pattern:0 dropped
+++ /dev/null
--include ../tools.mk
-
-# Windows doesn't correctly handle include statements with escaping paths,
-# so this test will not get run on Windows.
-ifdef IS_WINDOWS
-all:
-else
-all: $(call NATIVE_STATICLIB,llvm-pass)
- $(RUSTC) plugin.rs -C prefer-dynamic
- $(RUSTC) main.rs
-
-$(TMPDIR)/libllvm-pass.o:
- $(CXX) $(CFLAGS) $(LLVM_CXXFLAGS) -c llvm-pass.so.cc -o $(TMPDIR)/libllvm-pass.o
-endif
+++ /dev/null
-// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <unistd.h>
-
-#include "llvm/IR/Module.h"
-
-using namespace llvm;
-
-namespace {
-
- class TestLLVMPass : public ModulePass {
-
- public:
-
- static char ID;
- TestLLVMPass() : ModulePass(ID) { }
-
- bool runOnModule(Module &M) override;
-
- const char *getPassName() const override {
- return "Some LLVM pass";
- }
-
- };
-
-}
-
-bool TestLLVMPass::runOnModule(Module &M) {
- // A couple examples of operations that previously caused segmentation faults
- // https://github.com/rust-lang/rust/issues/31067
-
- for (auto F = M.begin(); F != M.end(); ++F) {
- /* code */
- }
-
- LLVMContext &C = M.getContext();
- IntegerType *Int8Ty = IntegerType::getInt8Ty(C);
- PointerType::get(Int8Ty, 0);
- return true;
-}
-
-char TestLLVMPass::ID = 0;
-
-static RegisterPass<TestLLVMPass> RegisterAFLPass(
- "some-llvm-pass", "Some LLVM pass");
+++ /dev/null
-// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![feature(plugin)]
-#![plugin(some_plugin)]
-
-fn main() {}
+++ /dev/null
-// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![feature(plugin_registrar, rustc_private)]
-#![crate_type = "dylib"]
-#![crate_name = "some_plugin"]
-
-extern crate rustc;
-extern crate rustc_plugin;
-
-#[link(name = "llvm-pass", kind = "static")]
-extern {}
-
-use rustc_plugin::registry::Registry;
-
-#[plugin_registrar]
-pub fn plugin_registrar(reg: &mut Registry) {
- reg.register_llvm_pass("some-llvm-pass");
-}
--- /dev/null
+-include ../tools.mk
+
+# Windows doesn't correctly handle include statements with escaping paths,
+# so this test will not get run on Windows.
+ifdef IS_WINDOWS
+all:
+else
+all: $(call NATIVE_STATICLIB,llvm-function-pass) $(call NATIVE_STATICLIB,llvm-module-pass)
+ $(RUSTC) plugin.rs -C prefer-dynamic
+ $(RUSTC) main.rs
+
+$(TMPDIR)/libllvm-function-pass.o:
+ $(CXX) $(CFLAGS) $(LLVM_CXXFLAGS) -c llvm-function-pass.so.cc -o $(TMPDIR)/libllvm-function-pass.o
+
+$(TMPDIR)/libllvm-module-pass.o:
+ $(CXX) $(CFLAGS) $(LLVM_CXXFLAGS) -c llvm-module-pass.so.cc -o $(TMPDIR)/libllvm-module-pass.o
+endif
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include "llvm/Pass.h"
+#include "llvm/IR/Function.h"
+
+using namespace llvm;
+
+namespace {
+
+ class TestLLVMPass : public FunctionPass {
+
+ public:
+
+ static char ID;
+ TestLLVMPass() : FunctionPass(ID) { }
+
+ bool runOnFunction(Function &F) override;
+
+ const char *getPassName() const override {
+ return "Some LLVM pass";
+ }
+
+ };
+
+}
+
+bool TestLLVMPass::runOnFunction(Function &F) {
+ // A couple examples of operations that previously caused segmentation faults
+ // https://github.com/rust-lang/rust/issues/31067
+
+ for (auto N = F.begin(); N != F.end(); ++N) {
+ /* code */
+ }
+
+ LLVMContext &C = F.getContext();
+ IntegerType *Int8Ty = IntegerType::getInt8Ty(C);
+ PointerType::get(Int8Ty, 0);
+ return true;
+}
+
+char TestLLVMPass::ID = 0;
+
+static RegisterPass<TestLLVMPass> RegisterAFLPass(
+ "some-llvm-function-pass", "Some LLVM pass");
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include "llvm/IR/Module.h"
+
+using namespace llvm;
+
+namespace {
+
+ class TestLLVMPass : public ModulePass {
+
+ public:
+
+ static char ID;
+ TestLLVMPass() : ModulePass(ID) { }
+
+ bool runOnModule(Module &M) override;
+
+ const char *getPassName() const override {
+ return "Some LLVM pass";
+ }
+
+ };
+
+}
+
+bool TestLLVMPass::runOnModule(Module &M) {
+ // A couple examples of operations that previously caused segmentation faults
+ // https://github.com/rust-lang/rust/issues/31067
+
+ for (auto F = M.begin(); F != M.end(); ++F) {
+ /* code */
+ }
+
+ LLVMContext &C = M.getContext();
+ IntegerType *Int8Ty = IntegerType::getInt8Ty(C);
+ PointerType::get(Int8Ty, 0);
+ return true;
+}
+
+char TestLLVMPass::ID = 0;
+
+static RegisterPass<TestLLVMPass> RegisterAFLPass(
+ "some-llvm-module-pass", "Some LLVM pass");
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(plugin)]
+#![plugin(some_plugin)]
+
+fn main() {}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(plugin_registrar, rustc_private)]
+#![crate_type = "dylib"]
+#![crate_name = "some_plugin"]
+
+extern crate rustc;
+extern crate rustc_plugin;
+
+#[link(name = "llvm-function-pass", kind = "static")]
+#[link(name = "llvm-module-pass", kind = "static")]
+extern {}
+
+use rustc_plugin::registry::Registry;
+
+#[plugin_registrar]
+pub fn plugin_registrar(reg: &mut Registry) {
+ reg.register_llvm_pass("some-llvm-function-pass");
+ reg.register_llvm_pass("some-llvm-module-pass");
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn main() {
+ macro_rules! f {
+ () => { 0 + 0 }
+ }
+ // 16 per line
+ f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();
+ f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();
+ f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();
+ f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();
+
+ f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();
+ f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();
+ f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();
+ f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();
+
+ f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();
+ f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();
+ f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();
+ f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();
+
+ f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();
+ f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();
+ f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();
+ f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();f!();
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+const ARR: [usize; 5] = [5, 4, 3, 2, 1];
+
+fn main() {
+ assert_eq!(3, ARR[ARR[3]]);
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+const ARR: [usize; 5] = [5, 4, 3, 2, 1];
+const BLA: usize = ARR[ARR[3]];
+
+fn main() {
+ assert_eq!(3, BLA);
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+#![feature(const_indexing)]
+
+const ARR: [usize; 5] = [5, 4, 3, 2, 1];
+
+fn main() {
+ assert_eq!(3, ARR[ARR[3]]);
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// aux-build:issue-31702-1.rs
+// aux-build:issue-31702-2.rs
+
+// this test is actually entirely in the linked library crates
+
+extern crate issue_31702_1;
+extern crate issue_31702_2;
+
+fn main() {}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// aux-build:reachable-unnameable-items.rs
+
+#![feature(braced_empty_structs)]
+#![feature(recover)]
+
+extern crate reachable_unnameable_items;
+use reachable_unnameable_items::*;
+
+fn main() {
+ let res1 = function_returning_unnameable_type().method_of_unnameable_type1();
+ let res2 = CONSTANT_OF_UNNAMEABLE_TYPE.method_of_unnameable_type2();
+ let res4 = AliasOfUnnameableType{}.method_of_unnameable_type4();
+ let res5 = function_returning_unnameable_type().inherent_method_returning_unnameable_type().
+ method_of_unnameable_type5();
+ let res6 = function_returning_unnameable_type().trait_method_returning_unnameable_type().
+ method_of_unnameable_type6();
+ let res7 = STATIC.field_of_unnameable_type.method_of_unnameable_type7();
+ let res8 = generic_function::<AliasOfUnnameableType>().method_of_unnameable_type8();
+ let res_enum = NameableVariant.method_of_unnameable_enum();
+ assert_eq!(res1, "Hello1");
+ assert_eq!(res2, "Hello2");
+ assert_eq!(res4, "Hello4");
+ assert_eq!(res5, "Hello5");
+ assert_eq!(res6, "Hello6");
+ assert_eq!(res7, "Hello7");
+ assert_eq!(res8, "Hello8");
+ assert_eq!(res_enum, "HelloEnum");
+
+ let none = None;
+ function_accepting_unnameable_type(none);
+ let _guard = std::panic::recover(|| none.unwrap().method_of_unnameable_type3());
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(staged_api)]
+#![stable(feature = "a", since = "b")]
+
+mod inner_private_module {
+ // UnnameableTypeAlias isn't marked as reachable, so no stability annotation is required here
+ pub type UnnameableTypeAlias = u8;
+}
+
+#[stable(feature = "a", since = "b")]
+pub fn f() -> inner_private_module::UnnameableTypeAlias {
+ 0
+}
+
+fn main() {}