+Version 1.15.1 (2017-02-07)
+===========================
+
+* [Fix IntoIter::as_mut_slice's signature][39466]
+
+[39466]: https://github.com/rust-lang/rust/pull/39466
+
+
Version 1.15.0 (2017-02-02)
===========================
ifeq ($$(findstring msvc,$(1)),)
ifeq ($$(findstring freebsd,$(1)),)
+ifeq ($$(findstring netbsd,$(1)),)
$(call ADD_INTRINSIC,$(1),gcc_personality_v0.o)
endif
endif
+endif
ifeq ($$(findstring aarch64,$(1)),aarch64)
$(foreach intrinsic,comparetf2.o \
name = "rustc_errors"
version = "0.0.0"
dependencies = [
+ "serialize 0.0.0",
"syntax_pos 0.0.0",
]
rb.use_vendored_sources = '\nvendor = true' in rb.config_toml or \
'CFG_ENABLE_VENDOR' in rb.config_mk
- if 'SUDO_USER' in os.environ:
- if os.environ['USER'] != os.environ['SUDO_USER']:
+ if 'SUDO_USER' in os.environ and not rb.use_vendored_sources:
+ if os.environ.get('USER') != os.environ['SUDO_USER']:
rb.use_vendored_sources = True
print('info: looks like you are running this command under `sudo`')
print(' and so in order to preserve your $HOME this will now')
print(' use vendored sources by default. Note that if this')
print(' does not work you should run a normal build first')
- print(' before running a command like `sudo make intall`')
+ print(' before running a command like `sudo make install`')
if rb.use_vendored_sources:
if not os.path.exists('.cargo'):
"README.md",
"RELEASES.md",
"configure",
- "Makefile.in"
+ "Makefile.in",
+ "x.py",
];
let src_dirs = [
"man",
to generate warnings or errors, when an item is missing documentation.
To generate warnings you use `warn`:
-```rust
+```rust,ignore
#![warn(missing_docs)]
```
You can control a few aspects of the HTML that `rustdoc` generates through the
`#![doc]` version of the attribute:
-```rust
+```rust,ignore
#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "https://www.rust-lang.org/favicon.ico",
html_root_url = "https://doc.rust-lang.org/")]
To use `#![no_std]`, add it to your crate root:
-```rust
+```rust,ignore
#![no_std]
fn plus_one(x: i32) -> i32 {
prelude](../core/prelude/v1/index.html). This means that a lot of code will Just
Work:
-```rust
+```rust,ignore
#![no_std]
fn may_fail(failure: bool) -> Result<(), &'static str> {
#[unstable(feature = "fused", issue = "35602")]
impl<'a, T: 'a> FusedIterator for Drain<'a, T> {}
-#[stable(feature = "rust1", since = "1.0.0")]
+#[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
impl<T: Ord> From<Vec<T>> for BinaryHeap<T> {
fn from(vec: Vec<T>) -> BinaryHeap<T> {
let mut heap = BinaryHeap { data: vec };
}
}
-#[stable(feature = "rust1", since = "1.0.0")]
+#[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
impl<T> From<BinaryHeap<T>> for Vec<T> {
fn from(heap: BinaryHeap<T>) -> Vec<T> {
heap.data
}
}
+#[stable(feature = "cow_str_to_string_specialization", since = "1.17.0")]
+impl<'a> ToString for Cow<'a, str> {
+ #[inline]
+ fn to_string(&self) -> String {
+ self[..].to_owned()
+ }
+}
+
+#[stable(feature = "string_to_string_specialization", since = "1.17.0")]
+impl ToString for String {
+ #[inline]
+ fn to_string(&self) -> String {
+ self.to_owned()
+ }
+}
+
#[stable(feature = "rust1", since = "1.0.0")]
impl AsRef<str> for String {
#[inline]
sources.extend(&["x86_64/floatdidf.c", "x86_64/floatdisf.c", "x86_64/floatdixf.c"]);
}
} else {
- if !target.contains("freebsd") {
+ if !target.contains("freebsd") && !target.contains("netbsd") {
sources.extend(&["gcc_personality_v0.c"]);
}
#[unstable(feature = "fused", issue = "35602")]
impl FusedIterator for EscapeUnicode {}
-#[stable(feature = "char_struct_display", since = "1.17.0")]
+#[stable(feature = "char_struct_display", since = "1.16.0")]
impl fmt::Display for EscapeUnicode {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for c in self.clone() {
#[unstable(feature = "fused", issue = "35602")]
impl FusedIterator for EscapeDefault {}
-#[stable(feature = "char_struct_display", since = "1.17.0")]
+#[stable(feature = "char_struct_display", since = "1.16.0")]
impl fmt::Display for EscapeDefault {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for c in self.clone() {
#[unstable(feature = "fused", issue = "35602")]
impl FusedIterator for EscapeDebug {}
-#[stable(feature = "char_struct_display", since = "1.17.0")]
+#[unstable(feature = "char_escape_debug", issue = "35068")]
impl fmt::Display for EscapeDebug {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(&self.0, f)
type Err;
/// Performs the conversion.
- fn try_from(T) -> Result<Self, Self::Err>;
+ fn try_from(value: T) -> Result<Self, Self::Err>;
}
////////////////////////////////////////////////////////////////////////////////
// based on "op T" where T is expected to be `Copy`able
macro_rules! forward_ref_unop {
(impl $imp:ident, $method:ident for $t:ty) => {
- #[stable(feature = "rust1", since = "1.0.0")]
+ forward_ref_unop!(impl $imp, $method for $t,
+ #[stable(feature = "rust1", since = "1.0.0")]);
+ };
+ (impl $imp:ident, $method:ident for $t:ty, #[$attr:meta]) => {
+ #[$attr]
impl<'a> $imp for &'a $t {
type Output = <$t as $imp>::Output;
// based on "T op U" where T and U are expected to be `Copy`able
macro_rules! forward_ref_binop {
(impl $imp:ident, $method:ident for $t:ty, $u:ty) => {
- #[stable(feature = "rust1", since = "1.0.0")]
+ forward_ref_binop!(impl $imp, $method for $t, $u,
+ #[stable(feature = "rust1", since = "1.0.0")]);
+ };
+ (impl $imp:ident, $method:ident for $t:ty, $u:ty, #[$attr:meta]) => {
+ #[$attr]
impl<'a> $imp<$u> for &'a $t {
type Output = <$t as $imp<$u>>::Output;
}
}
- #[stable(feature = "rust1", since = "1.0.0")]
+ #[$attr]
impl<'a> $imp<&'a $u> for $t {
type Output = <$t as $imp<$u>>::Output;
}
}
- #[stable(feature = "rust1", since = "1.0.0")]
+ #[$attr]
impl<'a, 'b> $imp<&'a $u> for &'b $t {
type Output = <$t as $imp<$u>>::Output;
#[inline]
fn next(&mut self) -> Option<I::Item> {
- for x in self.iter.by_ref() {
+ for x in &mut self.iter {
if (self.predicate)(&x) {
return Some(x);
}
let (_, upper) = self.iter.size_hint();
(0, upper) // can't know a lower bound, due to the predicate
}
+
+ // this special case allows the compiler to make `.filter(_).count()`
+ // branchless. Barring perfect branch prediction (which is unattainable in
+ // the general case), this will be much faster in >90% of cases (containing
+ // virtually all real workloads) and only a tiny bit slower in the rest.
+ //
+ // Having this specialization thus allows us to write `.filter(p).count()`
+ // where we would otherwise write `.map(|x| p(x) as usize).sum()`, which is
+ // less readable and also less backwards-compatible to Rust before 1.10.
+ //
+ // Using the branchless version will also simplify the LLVM byte code, thus
+ // leaving more budget for LLVM optimizations.
+ #[inline]
+ fn count(mut self) -> usize {
+ let mut count = 0;
+ for x in &mut self.iter {
+ count += (self.predicate)(&x) as usize;
+ }
+ count
+ }
}
#[stable(feature = "rust1", since = "1.0.0")]
// NB: explicitly use Add and Mul here to inherit overflow checks
macro_rules! integer_sum_product {
- (@impls $zero:expr, $one:expr, $($a:ty)*) => ($(
- #[stable(feature = "iter_arith_traits", since = "1.12.0")]
+ (@impls $zero:expr, $one:expr, #[$attr:meta], $($a:ty)*) => ($(
+ #[$attr]
impl Sum for $a {
fn sum<I: Iterator<Item=$a>>(iter: I) -> $a {
iter.fold($zero, Add::add)
}
}
- #[stable(feature = "iter_arith_traits", since = "1.12.0")]
+ #[$attr]
impl Product for $a {
fn product<I: Iterator<Item=$a>>(iter: I) -> $a {
iter.fold($one, Mul::mul)
}
}
- #[stable(feature = "iter_arith_traits", since = "1.12.0")]
+ #[$attr]
impl<'a> Sum<&'a $a> for $a {
fn sum<I: Iterator<Item=&'a $a>>(iter: I) -> $a {
iter.fold($zero, Add::add)
}
}
- #[stable(feature = "iter_arith_traits", since = "1.12.0")]
+ #[$attr]
impl<'a> Product<&'a $a> for $a {
fn product<I: Iterator<Item=&'a $a>>(iter: I) -> $a {
iter.fold($one, Mul::mul)
}
)*);
($($a:ty)*) => (
- integer_sum_product!(@impls 0, 1, $($a)+);
- integer_sum_product!(@impls Wrapping(0), Wrapping(1), $(Wrapping<$a>)+);
+ integer_sum_product!(@impls 0, 1,
+ #[stable(feature = "iter_arith_traits", since = "1.12.0")],
+ $($a)+);
+ integer_sum_product!(@impls Wrapping(0), Wrapping(1),
+ #[stable(feature = "wrapping_iter_arith", since = "1.14.0")],
+ $(Wrapping<$a>)+);
);
}
macro_rules! int_module {
($T:ident) => (int_module!($T, #[stable(feature = "rust1", since = "1.0.0")]););
- ($T:ident, $($attr: tt)*) => (
+ ($T:ident, #[$attr:meta]) => (
/// The smallest value that can be represented by this integer type.
- $($attr)*
+ #[$attr]
pub const MIN: $T = $T::min_value();
/// The largest value that can be represented by this integer type.
- $($attr)*
+ #[$attr]
pub const MAX: $T = $T::max_value();
)
}
macro_rules! uint_module {
($T:ident) => (uint_module!($T, #[stable(feature = "rust1", since = "1.0.0")]););
- ($T:ident, $($attr: tt)*) => (
+ ($T:ident, #[$attr:meta]) => (
/// The smallest value that can be represented by this integer type.
- $($attr)*
+ #[$attr]
pub const MIN: $T = $T::min_value();
/// The largest value that can be represented by this integer type.
- $($attr)*
+ #[$attr]
pub const MAX: $T = $T::max_value();
)
}
Wrapping(self.0.wrapping_add(other.0))
}
}
- forward_ref_binop! { impl Add, add for Wrapping<$t>, Wrapping<$t> }
+ forward_ref_binop! { impl Add, add for Wrapping<$t>, Wrapping<$t>,
+ #[stable(feature = "wrapping_ref", since = "1.14.0")] }
#[stable(feature = "op_assign_traits", since = "1.8.0")]
impl AddAssign for Wrapping<$t> {
Wrapping(self.0.wrapping_sub(other.0))
}
}
- forward_ref_binop! { impl Sub, sub for Wrapping<$t>, Wrapping<$t> }
+ forward_ref_binop! { impl Sub, sub for Wrapping<$t>, Wrapping<$t>,
+ #[stable(feature = "wrapping_ref", since = "1.14.0")] }
#[stable(feature = "op_assign_traits", since = "1.8.0")]
impl SubAssign for Wrapping<$t> {
Wrapping(self.0.wrapping_mul(other.0))
}
}
- forward_ref_binop! { impl Mul, mul for Wrapping<$t>, Wrapping<$t> }
+ forward_ref_binop! { impl Mul, mul for Wrapping<$t>, Wrapping<$t>,
+ #[stable(feature = "wrapping_ref", since = "1.14.0")] }
#[stable(feature = "op_assign_traits", since = "1.8.0")]
impl MulAssign for Wrapping<$t> {
Wrapping(self.0.wrapping_div(other.0))
}
}
- forward_ref_binop! { impl Div, div for Wrapping<$t>, Wrapping<$t> }
+ forward_ref_binop! { impl Div, div for Wrapping<$t>, Wrapping<$t>,
+ #[stable(feature = "wrapping_ref", since = "1.14.0")] }
#[stable(feature = "op_assign_traits", since = "1.8.0")]
impl DivAssign for Wrapping<$t> {
Wrapping(self.0.wrapping_rem(other.0))
}
}
- forward_ref_binop! { impl Rem, rem for Wrapping<$t>, Wrapping<$t> }
+ forward_ref_binop! { impl Rem, rem for Wrapping<$t>, Wrapping<$t>,
+ #[stable(feature = "wrapping_ref", since = "1.14.0")] }
#[stable(feature = "op_assign_traits", since = "1.8.0")]
impl RemAssign for Wrapping<$t> {
Wrapping(!self.0)
}
}
- forward_ref_unop! { impl Not, not for Wrapping<$t> }
+ forward_ref_unop! { impl Not, not for Wrapping<$t>,
+ #[stable(feature = "wrapping_ref", since = "1.14.0")] }
#[stable(feature = "rust1", since = "1.0.0")]
impl BitXor for Wrapping<$t> {
Wrapping(self.0 ^ other.0)
}
}
- forward_ref_binop! { impl BitXor, bitxor for Wrapping<$t>, Wrapping<$t> }
+ forward_ref_binop! { impl BitXor, bitxor for Wrapping<$t>, Wrapping<$t>,
+ #[stable(feature = "wrapping_ref", since = "1.14.0")] }
#[stable(feature = "op_assign_traits", since = "1.8.0")]
impl BitXorAssign for Wrapping<$t> {
Wrapping(self.0 | other.0)
}
}
- forward_ref_binop! { impl BitOr, bitor for Wrapping<$t>, Wrapping<$t> }
+ forward_ref_binop! { impl BitOr, bitor for Wrapping<$t>, Wrapping<$t>,
+ #[stable(feature = "wrapping_ref", since = "1.14.0")] }
#[stable(feature = "op_assign_traits", since = "1.8.0")]
impl BitOrAssign for Wrapping<$t> {
Wrapping(self.0 & other.0)
}
}
- forward_ref_binop! { impl BitAnd, bitand for Wrapping<$t>, Wrapping<$t> }
+ forward_ref_binop! { impl BitAnd, bitand for Wrapping<$t>, Wrapping<$t>,
+ #[stable(feature = "wrapping_ref", since = "1.14.0")] }
#[stable(feature = "op_assign_traits", since = "1.8.0")]
impl BitAndAssign for Wrapping<$t> {
Wrapping(0) - self
}
}
- forward_ref_unop! { impl Neg, neg for Wrapping<$t> }
+ forward_ref_unop! { impl Neg, neg for Wrapping<$t>,
+ #[stable(feature = "wrapping_ref", since = "1.14.0")] }
)*)
}
}
}
+ /////////////////////////////////////////////////////////////////////////
+ // Entry-like operations to insert if None and return a reference
+ /////////////////////////////////////////////////////////////////////////
+
+ /// Inserts `v` into the option if it is `None`, then
+ /// returns a mutable reference to the contained value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(option_entry)]
+ ///
+ /// let mut x = None;
+ ///
+ /// {
+ /// let y: &mut u32 = x.get_or_insert(5);
+ /// assert_eq!(y, &5);
+ ///
+ /// *y = 7;
+ /// }
+ ///
+ /// assert_eq!(x, Some(7));
+ /// ```
+ #[inline]
+ #[unstable(feature = "option_entry", issue = "39288")]
+ pub fn get_or_insert(&mut self, v: T) -> &mut T {
+ match *self {
+ None => *self = Some(v),
+ _ => (),
+ }
+
+ match *self {
+ Some(ref mut v) => v,
+ _ => unreachable!(),
+ }
+ }
+
+ /// Inserts a value computed from `f` into the option if it is `None`, then
+ /// returns a mutable reference to the contained value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(option_entry)]
+ ///
+ /// let mut x = None;
+ ///
+ /// {
+ /// let y: &mut u32 = x.get_or_insert_with(|| 5);
+ /// assert_eq!(y, &5);
+ ///
+ /// *y = 7;
+ /// }
+ ///
+ /// assert_eq!(x, Some(7));
+ /// ```
+ #[inline]
+ #[unstable(feature = "option_entry", issue = "39288")]
+ pub fn get_or_insert_with<F: FnOnce() -> T>(&mut self, f: F) -> &mut T {
+ match *self {
+ None => *self = Some(f()),
+ _ => (),
+ }
+
+ match *self {
+ Some(ref mut v) => v,
+ _ => unreachable!(),
+ }
+ }
+
/////////////////////////////////////////////////////////////////////////
// Misc
/////////////////////////////////////////////////////////////////////////
fn index_mut(self, slice: &mut [T]) -> &mut Self::Output;
}
-#[stable(feature = "slice-get-slice-impls", since = "1.13.0")]
+#[stable(feature = "slice-get-slice-impls", since = "1.15.0")]
impl<T> SliceIndex<T> for usize {
type Output = T;
}
}
-#[stable(feature = "slice-get-slice-impls", since = "1.13.0")]
+#[stable(feature = "slice-get-slice-impls", since = "1.15.0")]
impl<T> SliceIndex<T> for ops::Range<usize> {
type Output = [T];
}
}
-#[stable(feature = "slice-get-slice-impls", since = "1.13.0")]
+#[stable(feature = "slice-get-slice-impls", since = "1.15.0")]
impl<T> SliceIndex<T> for ops::RangeTo<usize> {
type Output = [T];
}
}
-#[stable(feature = "slice-get-slice-impls", since = "1.13.0")]
+#[stable(feature = "slice-get-slice-impls", since = "1.15.0")]
impl<T> SliceIndex<T> for ops::RangeFrom<usize> {
type Output = [T];
}
}
-#[stable(feature = "slice-get-slice-impls", since = "1.13.0")]
+#[stable(feature = "slice-get-slice-impls", since = "1.15.0")]
impl<T> SliceIndex<T> for ops::RangeFull {
type Output = [T];
}
-#[stable(feature = "slice-get-slice-impls", since = "1.13.0")]
+#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")]
impl<T> SliceIndex<T> for ops::RangeInclusive<usize> {
type Output = [T];
}
}
-#[stable(feature = "slice-get-slice-impls", since = "1.13.0")]
+#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")]
impl<T> SliceIndex<T> for ops::RangeToInclusive<usize> {
type Output = [T];
unstable(feature = "integer_atomics", issue = "32976"),
u64 AtomicU64 ATOMIC_U64_INIT
}
+#[cfg(not(stage0))]
+#[cfg(target_has_atomic = "128")]
+atomic_int! {
+ unstable(feature = "i128", issue = "35118"),
+ unstable(feature = "i128", issue = "35118"),
+ unstable(feature = "i128", issue = "35118"),
+ unstable(feature = "i128", issue = "35118"),
+ i128 AtomicI128 ATOMIC_I128_INIT
+}
+#[cfg(not(stage0))]
+#[cfg(target_has_atomic = "128")]
+atomic_int! {
+ unstable(feature = "i128", issue = "35118"),
+ unstable(feature = "i128", issue = "35118"),
+ unstable(feature = "i128", issue = "35118"),
+ unstable(feature = "i128", issue = "35118"),
+ u128 AtomicU128 ATOMIC_U128_INIT
+}
#[cfg(target_has_atomic = "ptr")]
atomic_int!{
stable(feature = "rust1", since = "1.0.0"),
assert_eq!(xs.iter().count(), 6);
}
+#[test]
+fn test_iterator_filter_count() {
+ let xs = [0, 1, 2, 3, 4, 5, 6, 7, 8];
+ assert_eq!(xs.iter().filter(|&&x| x % 2 == 0).count(), 5);
+}
+
#[test]
fn test_iterator_peekable() {
let xs = vec![0, 1, 2, 3, 4, 5];
-Subproject commit 7d57bdcdbb56540f37afe5a934ce12d33a6ca7fc
+Subproject commit cb7f66732175e6171587ed69656b7aae7dd2e6ec
AssociatedItemDefIds(D),
InherentImpls(D),
TypeckTables(D),
+ UsedTraitImports(D),
// The set of impls for a given trait. Ultimately, it would be
// nice to get more fine-grained here (e.g., to include a
AssociatedItemDefIds,
InherentImpls,
TypeckTables,
+ UsedTraitImports,
TraitImpls,
ReprHints,
}
AssociatedItemDefIds(ref d) => op(d).map(AssociatedItemDefIds),
InherentImpls(ref d) => op(d).map(InherentImpls),
TypeckTables(ref d) => op(d).map(TypeckTables),
+ UsedTraitImports(ref d) => op(d).map(UsedTraitImports),
TraitImpls(ref d) => op(d).map(TraitImpls),
TraitItems(ref d) => op(d).map(TraitItems),
ReprHints(ref d) => op(d).map(ReprHints),
self.map.get(k)
}
- pub fn get_mut(&mut self, k: &M::Key) -> Option<&mut M::Value> {
- self.read(k);
- self.write(k);
- self.map.get_mut(k)
- }
-
- pub fn insert(&mut self, k: M::Key, v: M::Value) -> Option<M::Value> {
+ pub fn insert(&mut self, k: M::Key, v: M::Value) {
self.write(&k);
- self.map.insert(k, v)
+ let old_value = self.map.insert(k, v);
+ assert!(old_value.is_none());
}
pub fn contains_key(&self, k: &M::Key) -> bool {
/// Append `elem` to the vector stored for `k`, creating a new vector if needed.
/// This is considered a write to `k`.
+ ///
+ /// NOTE: Caution is required when using this method. You should
+ /// be sure that nobody is **reading from the vector** while you
+ /// are writing to it. Eventually, it'd be nice to remove this.
pub fn push<E: Clone>(&mut self, k: M::Key, elem: E)
where M: DepTrackingMapConfig<Value=Vec<E>>
{
You can build a free-standing crate by adding `#![no_std]` to the crate
attributes:
-```
+```ignore
#![no_std]
```
"lifetimes or labels named `'_` were erroneously allowed"
}
+declare_lint! {
+ pub RESOLVE_TRAIT_ON_DEFAULTED_UNIT,
+ Warn,
+ "attempt to resolve a trait on an expression whose type cannot be inferred but which \
+ currently defaults to ()"
+}
+
declare_lint! {
pub SAFE_EXTERN_STATICS,
Warn,
SUPER_OR_SELF_IN_GLOBAL_PATH,
HR_LIFETIME_IN_ASSOC_TYPE,
LIFETIME_UNDERSCORE,
+ RESOLVE_TRAIT_ON_DEFAULTED_UNIT,
SAFE_EXTERN_STATICS,
PATTERNS_IN_FNS_WITHOUT_BODY,
EXTRA_REQUIREMENT_IN_IMPL,
use lint::{EarlyLintPassObject, LateLintPassObject};
use lint::{Default, CommandLine, Node, Allow, Warn, Deny, Forbid};
use lint::builtin;
+use rustc_serialize::{Decoder, Decodable, Encoder, Encodable};
use util::nodemap::FxHashMap;
use std::cmp;
use std::default::Default as StdDefault;
use std::mem;
use std::fmt;
+use std::ops::Deref;
use syntax::attr;
use syntax::ast;
+use syntax::symbol::Symbol;
use syntax_pos::{MultiSpan, Span};
use errors::{self, Diagnostic, DiagnosticBuilder};
use hir;
/// When you call `add_lint` on the session, you wind up storing one
/// of these, which records a "potential lint" at a particular point.
-#[derive(PartialEq)]
+#[derive(PartialEq, RustcEncodable, RustcDecodable)]
pub struct EarlyLint {
/// what lint is this? (e.g., `dead_code`)
pub id: LintId,
check_lint_name_cmdline(sess, self,
&lint_name[..], level);
+ let lint_flag_val = Symbol::intern(&lint_name);
match self.find_lint(&lint_name[..], sess, None) {
- Ok(lint_id) => self.set_level(lint_id, (level, CommandLine)),
+ Ok(lint_id) => self.set_level(lint_id, (level, CommandLine(lint_flag_val))),
Err(FindLintError::Removed) => { }
Err(_) => {
match self.lint_groups.iter().map(|(&x, pair)| (x, pair.0.clone()))
Some(v) => {
v.iter()
.map(|lint_id: &LintId|
- self.set_level(*lint_id, (level, CommandLine)))
+ self.set_level(*lint_id, (level, CommandLine(lint_flag_val))))
.collect::<Vec<()>>();
}
None => {
-> DiagnosticBuilder<'a>
where S: Into<MultiSpan>
{
- let (mut level, source) = lvlsrc;
+ let (level, source) = lvlsrc;
if level == Allow {
return sess.diagnostic().struct_dummy();
}
let name = lint.name_lower();
let mut def = None;
- let msg = match source {
- Default => {
- format!("{}, #[{}({})] on by default", msg,
- level.as_str(), name)
- },
- CommandLine => {
- format!("{} [-{} {}]", msg,
- match level {
- Warn => 'W', Deny => 'D', Forbid => 'F',
- Allow => bug!()
- }, name.replace("_", "-"))
- },
- Node(src) => {
- def = Some(src);
- msg.to_string()
- }
- };
- // For purposes of printing, we can treat forbid as deny.
- if level == Forbid { level = Deny; }
+ // Except for possible note details, forbid behaves like deny.
+ let effective_level = if level == Forbid { Deny } else { level };
- let mut err = match (level, span) {
+ let mut err = match (effective_level, span) {
(Warn, Some(sp)) => sess.struct_span_warn(sp, &msg[..]),
(Warn, None) => sess.struct_warn(&msg[..]),
(Deny, Some(sp)) => sess.struct_span_err(sp, &msg[..]),
_ => bug!("impossible level in raw_emit_lint"),
};
+ match source {
+ Default => {
+ err.note(&format!("#[{}({})] on by default", level.as_str(), name));
+ },
+ CommandLine(lint_flag_val) => {
+ let flag = match level {
+ Warn => "-W", Deny => "-D", Forbid => "-F",
+ Allow => bug!("earlier conditional return should handle Allow case")
+ };
+ let hyphen_case_lint_name = name.replace("_", "-");
+ if lint_flag_val.as_str().deref() == name {
+ err.note(&format!("requested on the command line with `{} {}`",
+ flag, hyphen_case_lint_name));
+ } else {
+ let hyphen_case_flag_val = lint_flag_val.as_str().replace("_", "-");
+ err.note(&format!("`{} {}` implied by `{} {}`",
+ flag, hyphen_case_lint_name, flag, hyphen_case_flag_val));
+ }
+ },
+ Node(lint_attr_name, src) => {
+ def = Some(src);
+ if lint_attr_name.as_str().deref() != name {
+ let level_str = level.as_str();
+ err.note(&format!("#[{}({})] implied by #[{}({})]",
+ level_str, name, level_str, lint_attr_name));
+ }
+ }
+ }
+
// Check for future incompatibility lints and issue a stronger warning.
if let Some(future_incompatible) = lints.future_incompatible(LintId::of(lint)) {
let explanation = format!("this was previously accepted by the compiler \
self.lookup_and_emit(lint, Some(span), msg);
}
- fn early_lint(&self, early_lint: EarlyLint) {
+ fn early_lint(&self, early_lint: &EarlyLint) {
let span = early_lint.diagnostic.span.primary_span().expect("early lint w/o primary span");
let mut err = self.struct_span_lint(early_lint.id.lint,
span,
}
};
+ let lint_attr_name = result.expect("lint attribute should be well-formed").0;
+
for (lint_id, level, span) in v {
let (now, now_source) = self.lints().get_level_source(lint_id);
if now == Forbid && level != Forbid {
diag_builder.span_label(span, &format!("overruled by previous forbid"));
match now_source {
LintSource::Default => &mut diag_builder,
- LintSource::Node(forbid_source_span) => {
+ LintSource::Node(_, forbid_source_span) => {
diag_builder.span_label(forbid_source_span,
&format!("`forbid` level set here"))
},
- LintSource::CommandLine => {
+ LintSource::CommandLine(_) => {
diag_builder.note("`forbid` lint level was set on command line")
}
}.emit()
let src = self.lints().get_level_source(lint_id).1;
self.level_stack().push((lint_id, (now, src)));
pushed += 1;
- self.mut_lints().set_level(lint_id, (level, Node(span)));
+ self.mut_lints().set_level(lint_id, (level, Node(lint_attr_name, span)));
}
}
}
// Output any lints that were previously added to the session.
fn visit_id(&mut self, id: ast::NodeId) {
- if let Some(lints) = self.sess().lints.borrow_mut().remove(&id) {
- debug!("LateContext::visit_id: id={:?} lints={:?}", id, lints);
- for early_lint in lints {
- self.early_lint(early_lint);
- }
+ let lints = self.sess().lints.borrow_mut().take(id);
+ for early_lint in lints.iter().chain(self.tables.lints.get(id)) {
+ debug!("LateContext::visit_id: id={:?} early_lint={:?}", id, early_lint);
+ self.early_lint(early_lint);
}
}
// If we missed any lints added to the session, then there's a bug somewhere
// in the iteration code.
- for (id, v) in tcx.sess.lints.borrow().iter() {
+ if let Some((id, v)) = tcx.sess.lints.borrow().get_any() {
for early_lint in v {
span_bug!(early_lint.diagnostic.span.clone(),
"unprocessed lint {:?} at {}",
// Visit the whole crate.
cx.with_lint_attrs(&krate.attrs, |cx| {
// Lints may be assigned to the whole crate.
- if let Some(lints) = cx.sess.lints.borrow_mut().remove(&ast::CRATE_NODE_ID) {
- for early_lint in lints {
- cx.early_lint(early_lint);
- }
+ let lints = cx.sess.lints.borrow_mut().take(ast::CRATE_NODE_ID);
+ for early_lint in lints {
+ cx.early_lint(&early_lint);
}
// since the root module isn't visited as an item (because it isn't an
// If we missed any lints added to the session, then there's a bug somewhere
// in the iteration code.
- for (_, v) in sess.lints.borrow().iter() {
+ for (_, v) in sess.lints.borrow().get_any() {
for early_lint in v {
span_bug!(early_lint.diagnostic.span.clone(), "unprocessed lint {:?}", early_lint);
}
}
}
+
+impl Encodable for LintId {
+ fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
+ s.emit_str(&self.lint.name.to_lowercase())
+ }
+}
+
+impl Decodable for LintId {
+ #[inline]
+ fn decode<D: Decoder>(d: &mut D) -> Result<LintId, D::Error> {
+ let s = d.read_str()?;
+ ty::tls::with(|tcx| {
+ match tcx.sess.lint_store.borrow().find_lint(&s, tcx.sess, None) {
+ Ok(id) => Ok(id),
+ Err(_) => panic!("invalid lint-id `{}`", s),
+ }
+ })
+ }
+}
pub use self::Level::*;
pub use self::LintSource::*;
+use hir;
+use hir::intravisit::FnKind;
use std::hash;
use std::ascii::AsciiExt;
use syntax_pos::Span;
-use hir::intravisit::FnKind;
use syntax::visit as ast_visit;
use syntax::ast;
-use hir;
+use syntax::symbol::Symbol;
pub use lint::context::{LateContext, EarlyContext, LintContext, LintStore,
raw_emit_lint, check_crate, check_ast_crate, gather_attrs,
raw_struct_lint, FutureIncompatibleInfo, EarlyLint, IntoEarlyLint};
+pub use lint::table::LintTable;
+
/// Specification of a single lint.
#[derive(Copy, Clone, Debug)]
pub struct Lint {
Default,
/// Lint level was set by an attribute.
- Node(Span),
+ Node(ast::Name, Span),
/// Lint level was set by a command-line flag.
- CommandLine,
+ CommandLine(Symbol),
}
pub type LevelSource = (Level, LintSource);
pub mod builtin;
mod context;
+mod table;
--- /dev/null
+// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use syntax::ast;
+use syntax_pos::MultiSpan;
+use util::nodemap::NodeMap;
+
+use super::{Lint, LintId, EarlyLint, IntoEarlyLint};
+
+#[derive(RustcEncodable, RustcDecodable)]
+pub struct LintTable {
+ map: NodeMap<Vec<EarlyLint>>
+}
+
+impl LintTable {
+ pub fn new() -> Self {
+ LintTable { map: NodeMap() }
+ }
+
+ pub fn add_lint<S: Into<MultiSpan>>(&mut self,
+ lint: &'static Lint,
+ id: ast::NodeId,
+ sp: S,
+ msg: String)
+ {
+ self.add_lint_diagnostic(lint, id, (sp, &msg[..]))
+ }
+
+ pub fn add_lint_diagnostic<M>(&mut self,
+ lint: &'static Lint,
+ id: ast::NodeId,
+ msg: M)
+ where M: IntoEarlyLint,
+ {
+ let lint_id = LintId::of(lint);
+ let early_lint = msg.into_early_lint(lint_id);
+ let arr = self.map.entry(id).or_insert(vec![]);
+ if !arr.contains(&early_lint) {
+ arr.push(early_lint);
+ }
+ }
+
+ pub fn get(&self, id: ast::NodeId) -> &[EarlyLint] {
+ self.map.get(&id).map(|v| &v[..]).unwrap_or(&[])
+ }
+
+ pub fn take(&mut self, id: ast::NodeId) -> Vec<EarlyLint> {
+ self.map.remove(&id).unwrap_or(vec![])
+ }
+
+ pub fn transfer(&mut self, into: &mut LintTable) {
+ into.map.extend(self.map.drain());
+ }
+
+ /// Returns the first (id, lint) pair that is non-empty. Used to
+ /// implement a sanity check in lints that all node-ids are
+ /// visited.
+ pub fn get_any(&self) -> Option<(&ast::NodeId, &Vec<EarlyLint>)> {
+ self.map.iter()
+ .filter(|&(_, v)| !v.is_empty())
+ .next()
+ }
+}
+
use hir;
use rustc_back::PanicStrategy;
-pub use self::NativeLibraryKind::{NativeStatic, NativeFramework, NativeUnknown};
+pub use self::NativeLibraryKind::*;
// lonely orphan structs and enums looking for a better home
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)]
pub enum NativeLibraryKind {
NativeStatic, // native static library (.a archive)
+ NativeStaticNobundle, // native static library, which doesn't get bundled into .rlibs
NativeFramework, // OSX-specific
NativeUnknown, // default way to specify a dynamic library
}
PatKind::Tuple(ref subpats, ddpos) => {
// (p1, ..., pN)
let expected_len = match self.pat_ty(&pat)?.sty {
- ty::TyTuple(ref tys) => tys.len(),
+ ty::TyTuple(ref tys, _) => tys.len(),
ref ty => span_bug!(pat.span, "tuple pattern unexpected type {:?}", ty),
};
for (i, subpat) in subpats.iter().enumerate_and_adjust(expected_len, ddpos) {
let lhs_ty = lhs.ty(mir, tcx);
let rhs_ty = rhs.ty(mir, tcx);
let ty = op.ty(tcx, lhs_ty, rhs_ty);
- let ty = tcx.intern_tup(&[ty, tcx.types.bool]);
+ let ty = tcx.intern_tup(&[ty, tcx.types.bool], false);
Some(ty)
}
&Rvalue::UnaryOp(_, ref operand) => {
}
AggregateKind::Tuple => {
Some(tcx.mk_tup(
- ops.iter().map(|op| op.ty(mir, tcx))
+ ops.iter().map(|op| op.ty(mir, tcx)),
+ false
))
}
AggregateKind::Adt(def, _, substs, _) => {
(Some(name), "dylib") => (name, cstore::NativeUnknown),
(Some(name), "framework") => (name, cstore::NativeFramework),
(Some(name), "static") => (name, cstore::NativeStatic),
+ (Some(name), "static-nobundle") => (name, cstore::NativeStaticNobundle),
(_, s) => {
early_error(error_format, &format!("unknown library kind `{}`, expected \
one of dylib, framework, or static",
s));
}
};
+ if kind == cstore::NativeStaticNobundle && !nightly_options::is_nightly_build() {
+ early_error(error_format, &format!("the library kind 'static-nobundle' is only \
+ accepted on the nightly compiler"));
+ }
let mut name_parts = name.splitn(2, ':');
let name = name_parts.next().unwrap();
let new_name = name_parts.next();
use session::search_paths::PathKind;
use session::config::DebugInfoLevel;
use ty::tls;
-use util::nodemap::{NodeMap, FxHashMap, FxHashSet};
+use util::nodemap::{FxHashMap, FxHashSet};
use util::common::duration_to_secs_str;
use mir::transform as mir_pass;
pub local_crate_source_file: Option<PathBuf>,
pub working_dir: PathBuf,
pub lint_store: RefCell<lint::LintStore>,
- pub lints: RefCell<NodeMap<Vec<lint::EarlyLint>>>,
+ pub lints: RefCell<lint::LintTable>,
/// Set of (LintId, span, message) tuples tracking lint (sub)diagnostics
/// that have been set once, but should not be set again, in order to avoid
/// redundantly verbose output (Issue #24690).
pub fn unimpl(&self, msg: &str) -> ! {
self.diagnostic().unimpl(msg)
}
+
pub fn add_lint<S: Into<MultiSpan>>(&self,
lint: &'static lint::Lint,
id: ast::NodeId,
sp: S,
msg: String)
{
- self.add_lint_diagnostic(lint, id, (sp, &msg[..]))
+ self.lints.borrow_mut().add_lint(lint, id, sp, msg);
}
pub fn add_lint_diagnostic<M>(&self,
msg: M)
where M: lint::IntoEarlyLint,
{
- let lint_id = lint::LintId::of(lint);
- let mut lints = self.lints.borrow_mut();
- let early_lint = msg.into_early_lint(lint_id);
- if let Some(arr) = lints.get_mut(&id) {
- if !arr.contains(&early_lint) {
- arr.push(early_lint);
- }
- return;
- }
- lints.insert(id, vec![early_lint]);
+ self.lints.borrow_mut().add_lint_diagnostic(lint, id, msg);
}
+
pub fn reserve_node_ids(&self, count: usize) -> ast::NodeId {
let id = self.next_node_id.get();
local_crate_source_file: local_crate_source_file,
working_dir: env::current_dir().unwrap(),
lint_store: RefCell::new(lint::LintStore::new()),
- lints: RefCell::new(NodeMap()),
+ lints: RefCell::new(lint::LintTable::new()),
one_time_diagnostics: RefCell::new(FxHashSet()),
plugin_llvm_passes: RefCell::new(Vec::new()),
mir_passes: RefCell::new(mir_pass::Passes::new()),
use std::rc::Rc;
use syntax::abi::Abi;
use hir;
+use lint;
use util::nodemap::FxHashMap;
struct InferredObligationsSnapshotVecDelegate<'tcx> {
debug!("select({:?})", obligation);
assert!(!obligation.predicate.has_escaping_regions());
+ let tcx = self.tcx();
let dep_node = obligation.predicate.dep_node();
- let _task = self.tcx().dep_graph.in_task(dep_node);
+ let _task = tcx.dep_graph.in_task(dep_node);
let stack = self.push_stack(TraitObligationStackList::empty(), obligation);
- match self.candidate_from_obligation(&stack)? {
- None => Ok(None),
+ let ret = match self.candidate_from_obligation(&stack)? {
+ None => None,
Some(candidate) => {
let mut candidate = self.confirm_candidate(obligation, candidate)?;
let inferred_obligations = (*self.inferred_obligations).into_iter().cloned();
candidate.nested_obligations_mut().extend(inferred_obligations);
- Ok(Some(candidate))
+ Some(candidate)
},
+ };
+
+ // Test whether this is a `()` which was produced by defaulting a
+ // diverging type variable with `!` disabled. If so, we may need
+ // to raise a warning.
+ if obligation.predicate.skip_binder().self_ty().is_defaulted_unit() {
+ let mut raise_warning = true;
+ // Don't raise a warning if the trait is implemented for ! and only
+ // permits a trivial implementation for !. This stops us warning
+ // about (for example) `(): Clone` becoming `!: Clone` because such
+ // a switch can't cause code to stop compiling or execute
+ // differently.
+ let mut never_obligation = obligation.clone();
+ let def_id = never_obligation.predicate.skip_binder().trait_ref.def_id;
+ never_obligation.predicate = never_obligation.predicate.map_bound(|mut trait_pred| {
+ // Swap out () with ! so we can check if the trait is impld for !
+ {
+ let mut trait_ref = &mut trait_pred.trait_ref;
+ let unit_substs = trait_ref.substs;
+ let mut never_substs = Vec::with_capacity(unit_substs.len());
+ never_substs.push(From::from(tcx.types.never));
+ never_substs.extend(&unit_substs[1..]);
+ trait_ref.substs = tcx.intern_substs(&never_substs);
+ }
+ trait_pred
+ });
+ if let Ok(Some(..)) = self.select(&never_obligation) {
+ if !tcx.trait_relevant_for_never(def_id) {
+ // The trait is also implemented for ! and the resulting
+ // implementation cannot actually be invoked in any way.
+ raise_warning = false;
+ }
+ }
+
+ if raise_warning {
+ tcx.sess.add_lint(lint::builtin::RESOLVE_TRAIT_ON_DEFAULTED_UNIT,
+ obligation.cause.body_id,
+ obligation.cause.span,
+ format!("code relies on type inference rules which are likely \
+ to change"));
+ }
}
+ Ok(ret)
}
///////////////////////////////////////////////////////////////////////////
ty::TyStr | ty::TySlice(_) | ty::TyDynamic(..) => Never,
- ty::TyTuple(tys) => {
+ ty::TyTuple(tys, _) => {
Where(ty::Binder(tys.last().into_iter().cloned().collect()))
}
let sized_crit = def.sized_constraint(self.tcx());
// (*) binder moved here
Where(ty::Binder(match sized_crit.sty {
- ty::TyTuple(tys) => tys.to_vec().subst(self.tcx(), substs),
+ ty::TyTuple(tys, _) => tys.to_vec().subst(self.tcx(), substs),
ty::TyBool => vec![],
_ => vec![sized_crit.subst(self.tcx(), substs)]
}))
Where(ty::Binder(vec![element_ty]))
}
- ty::TyTuple(tys) => {
+ ty::TyTuple(tys, _) => {
// (*) binder moved here
Where(ty::Binder(tys.to_vec()))
}
vec![element_ty]
}
- ty::TyTuple(ref tys) => {
+ ty::TyTuple(ref tys, _) => {
// (T1, ..., Tn) -- meets any bound that all of T1...Tn meet
tys.to_vec()
}
let arguments_tuple = match tuple_arguments {
TupleArgumentsFlag::No => sig.skip_binder().inputs()[0],
TupleArgumentsFlag::Yes =>
- self.intern_tup(sig.skip_binder().inputs()),
+ self.intern_tup(sig.skip_binder().inputs(), false),
};
let trait_ref = ty::TraitRef {
def_id: fn_trait_def_id,
|ty| tc_ty(tcx, &ty, cache))
}
- ty::TyTuple(ref tys) => {
+ ty::TyTuple(ref tys, _) => {
TypeContents::union(&tys[..],
|ty| tc_ty(tcx, *ty, cache))
}
use dep_graph::{DepGraph, DepTrackingMap};
use session::Session;
+use lint;
use middle;
use hir::TraitMap;
use hir::def::Def;
/// Maps a cast expression to its kind. This is keyed on the
/// *from* expression of the cast, not the cast itself.
pub cast_kinds: NodeMap<ty::cast::CastKind>,
+
+ /// Lints for the body of this fn generated by typeck.
+ pub lints: lint::LintTable,
}
impl<'tcx> TypeckTables<'tcx> {
liberated_fn_sigs: NodeMap(),
fru_field_types: NodeMap(),
cast_kinds: NodeMap(),
+ lints: lint::LintTable::new(),
}
}
/// Set of trait imports actually used in the method resolution.
/// This is used for warning unused imports.
- pub used_trait_imports: RefCell<NodeSet>,
+ pub used_trait_imports: RefCell<DepTrackingMap<maps::UsedTraitImports<'tcx>>>,
/// The set of external nominal types whose implementations have been read.
/// This is used for lazy resolution of methods.
inherent_impls: RefCell::new(DepTrackingMap::new(dep_graph.clone())),
used_unsafe: RefCell::new(NodeSet()),
used_mut_nodes: RefCell::new(NodeSet()),
- used_trait_imports: RefCell::new(NodeSet()),
+ used_trait_imports: RefCell::new(DepTrackingMap::new(dep_graph.clone())),
populated_external_types: RefCell::new(DefIdSet()),
populated_external_primitive_impls: RefCell::new(DefIdSet()),
stability: RefCell::new(stability),
self.mk_ty(TySlice(ty))
}
- pub fn intern_tup(self, ts: &[Ty<'tcx>]) -> Ty<'tcx> {
- self.mk_ty(TyTuple(self.intern_type_list(ts)))
+ pub fn intern_tup(self, ts: &[Ty<'tcx>], defaulted: bool) -> Ty<'tcx> {
+ self.mk_ty(TyTuple(self.intern_type_list(ts), defaulted))
}
- pub fn mk_tup<I: InternAs<[Ty<'tcx>], Ty<'tcx>>>(self, iter: I) -> I::Output {
- iter.intern_with(|ts| self.mk_ty(TyTuple(self.intern_type_list(ts))))
+ pub fn mk_tup<I: InternAs<[Ty<'tcx>], Ty<'tcx>>>(self, iter: I,
+ defaulted: bool) -> I::Output {
+ iter.intern_with(|ts| self.mk_ty(TyTuple(self.intern_type_list(ts), defaulted)))
}
pub fn mk_nil(self) -> Ty<'tcx> {
- self.intern_tup(&[])
+ self.intern_tup(&[], false)
}
pub fn mk_diverging_default(self) -> Ty<'tcx> {
if self.sess.features.borrow().never_type {
self.types.never
} else {
- self.mk_nil()
+ self.intern_tup(&[], true)
}
}
match self.sty {
ty::TyBool | ty::TyChar | ty::TyInt(_) |
ty::TyUint(_) | ty::TyFloat(_) | ty::TyStr | ty::TyNever => self.to_string(),
- ty::TyTuple(ref tys) if tys.is_empty() => self.to_string(),
+ ty::TyTuple(ref tys, _) if tys.is_empty() => self.to_string(),
ty::TyAdt(def, _) => format!("{} `{}`", def.descr(), tcx.item_path_str(def.did)),
ty::TyArray(_, n) => format!("array of {} elements", n),
|p| format!("trait {}", tcx.item_path_str(p.def_id())))
}
ty::TyClosure(..) => "closure".to_string(),
- ty::TyTuple(_) => "tuple".to_string(),
+ ty::TyTuple(..) => "tuple".to_string(),
ty::TyInfer(ty::TyVar(_)) => "inferred type".to_string(),
ty::TyInfer(ty::IntVar(_)) => "integral variable".to_string(),
ty::TyInfer(ty::FloatVar(_)) => "floating-point variable".to_string(),
Some(ClosureSimplifiedType(def_id))
}
ty::TyNever => Some(NeverSimplifiedType),
- ty::TyTuple(ref tys) => {
+ ty::TyTuple(ref tys, _) => {
Some(TupleSimplifiedType(tys.len()))
}
ty::TyFnDef(.., ref f) | ty::TyFnPtr(ref f) => {
self.add_ty(m.ty);
}
- &ty::TyTuple(ref ts) => {
+ &ty::TyTuple(ref ts, _) => {
self.add_tys(&ts[..]);
}
},
TyNever => DefIdForest::full(tcx),
- TyTuple(ref tys) => {
+ TyTuple(ref tys, _) => {
DefIdForest::union(tcx, tys.iter().map(|ty| {
ty.uninhabited_from(visited, tcx)
}))
ty::TyRawPtr(mt) |
ty::TyRef(_, mt) => characteristic_def_id_of_type(mt.ty),
- ty::TyTuple(ref tys) => tys.iter()
- .filter_map(|ty| characteristic_def_id_of_type(ty))
- .next(),
+ ty::TyTuple(ref tys, _) => tys.iter()
+ .filter_map(|ty| characteristic_def_id_of_type(ty))
+ .next(),
ty::TyFnDef(def_id, ..) |
ty::TyClosure(def_id, _) => Some(def_id),
Some(&variant.memory_index[..]))
}
// Can we use one of the fields in this tuple?
- (&Univariant { ref variant, .. }, &ty::TyTuple(tys)) => {
+ (&Univariant { ref variant, .. }, &ty::TyTuple(tys, _)) => {
Struct::non_zero_field_paths(infcx, tys.iter().cloned(),
Some(&variant.memory_index[..]))
}
Univariant { variant: st, non_zero: false }
}
- ty::TyTuple(tys) => {
+ ty::TyTuple(tys, _) => {
// FIXME(camlorn): if we ever allow unsized tuples, this needs to be checked.
// See the univariant case below to learn how.
let st = Struct::new(dl,
use hir::def_id::DefId;
use mir;
use ty::{self, Ty};
+use util::nodemap::DefIdSet;
use std::cell::RefCell;
use std::marker::PhantomData;
dep_map_ty! { ClosureKinds: ItemSignature(DefId) -> ty::ClosureKind }
dep_map_ty! { ClosureTypes: ItemSignature(DefId) -> ty::ClosureTy<'tcx> }
dep_map_ty! { TypeckTables: TypeckTables(DefId) -> &'tcx ty::TypeckTables<'tcx> }
+dep_map_ty! { UsedTraitImports: UsedTraitImports(DefId) -> DefIdSet }
AssociatedKind::Type => Def::AssociatedTy(self.def_id),
}
}
+
+ /// Tests whether the associated item admits a non-trivial implementation
+ /// for !
+ pub fn relevant_for_never<'tcx>(&self) -> bool {
+ match self.kind {
+ AssociatedKind::Const => true,
+ AssociatedKind::Type => true,
+ // FIXME(canndrew): Be more thorough here, check if any argument is uninhabited.
+ AssociatedKind::Method => !self.method_has_self_argument,
+ }
+ }
}
#[derive(Clone, Debug, PartialEq, Eq, Copy, RustcEncodable, RustcDecodable)]
_ if tys.references_error() => tcx.types.err,
0 => tcx.types.bool,
1 => tys[0],
- _ => tcx.intern_tup(&tys[..])
+ _ => tcx.intern_tup(&tys[..], false)
};
let old = tcx.adt_sized_constraint.borrow().get(&self.did).cloned();
vec![ty]
}
- TyTuple(ref tys) => {
+ TyTuple(ref tys, _) => {
match tys.last() {
None => vec![],
Some(ty) => self.sized_constraint_for_ty(tcx, stack, ty)
.subst(tcx, substs);
debug!("sized_constraint_for_ty({:?}) intermediate = {:?}",
ty, adt_ty);
- if let ty::TyTuple(ref tys) = adt_ty.sty {
+ if let ty::TyTuple(ref tys, _) = adt_ty.sty {
tys.iter().flat_map(|ty| {
self.sized_constraint_for_ty(tcx, stack, ty)
}).collect()
}
}
+ pub fn trait_relevant_for_never(self, did: DefId) -> bool {
+ self.associated_items(did).any(|item| {
+ item.relevant_for_never()
+ })
+ }
+
pub fn custom_coerce_unsized_kind(self, did: DefId) -> adjustment::CustomCoerceUnsized {
self.custom_coerce_unsized_kinds.memoize(did, || {
let (kind, src) = if did.krate != LOCAL_CRATE {
Ok(tcx.mk_slice(t))
}
- (&ty::TyTuple(as_), &ty::TyTuple(bs)) =>
+ (&ty::TyTuple(as_, a_defaulted), &ty::TyTuple(bs, b_defaulted)) =>
{
if as_.len() == bs.len() {
- Ok(tcx.mk_tup(as_.iter().zip(bs).map(|(a, b)| relation.relate(a, b)))?)
+ let defaulted = a_defaulted || b_defaulted;
+ Ok(tcx.mk_tup(as_.iter().zip(bs).map(|(a, b)| relation.relate(a, b)), defaulted)?)
} else if !(as_.is_empty() || bs.is_empty()) {
Err(TypeError::TupleSize(
expected_found(relation, &as_.len(), &bs.len())))
ty::TyAdt(tid, substs) => ty::TyAdt(tid, substs.fold_with(folder)),
ty::TyDynamic(ref trait_ty, ref region) =>
ty::TyDynamic(trait_ty.fold_with(folder), region.fold_with(folder)),
- ty::TyTuple(ts) => ty::TyTuple(ts.fold_with(folder)),
+ ty::TyTuple(ts, defaulted) => ty::TyTuple(ts.fold_with(folder), defaulted),
ty::TyFnDef(def_id, substs, f) => {
ty::TyFnDef(def_id,
substs.fold_with(folder),
ty::TyAdt(_, substs) => substs.visit_with(visitor),
ty::TyDynamic(ref trait_ty, ref reg) =>
trait_ty.visit_with(visitor) || reg.visit_with(visitor),
- ty::TyTuple(ts) => ts.visit_with(visitor),
+ ty::TyTuple(ts, _) => ts.visit_with(visitor),
ty::TyFnDef(_, substs, ref f) => {
substs.visit_with(visitor) || f.visit_with(visitor)
}
TyNever,
/// A tuple type. For example, `(i32, bool)`.
- TyTuple(&'tcx Slice<Ty<'tcx>>),
+ /// The bool indicates whether this is a unit tuple and was created by
+ /// defaulting a diverging type variable with feature(never_type) disabled.
+ /// It's only purpose is for raising future-compatibility warnings for when
+ /// diverging type variables start defaulting to ! instead of ().
+ TyTuple(&'tcx Slice<Ty<'tcx>>, bool),
/// The projection of an associated type. For example,
/// `<T as Trait<..>>::N`.
pub fn is_nil(&self) -> bool {
match self.sty {
- TyTuple(ref tys) => tys.is_empty(),
+ TyTuple(ref tys, _) => tys.is_empty(),
_ => false
}
}
}
}
+ // Test whether this is a `()` which was produced by defaulting a
+ // diverging type variable with feature(never_type) disabled.
+ pub fn is_defaulted_unit(&self) -> bool {
+ match self.sty {
+ TyTuple(_, true) => true,
+ _ => false,
+ }
+ }
+
/// Checks whether a type is visibly uninhabited from a particular module.
/// # Example
/// ```rust
TySlice(_) |
TyRawPtr(_) |
TyNever |
- TyTuple(_) |
+ TyTuple(..) |
TyParam(_) |
TyInfer(_) |
TyError => {
// Don't use `struct_variant`, this may be a univariant enum.
adt.variants[0].fields.get(i).map(|f| f.ty(self, substs))
}
- (&TyTuple(ref v), None) => v.get(i).cloned(),
+ (&TyTuple(ref v, _), None) => v.get(i).cloned(),
_ => None
}
}
self.def_id(d);
}
}
- TyTuple(tys) => {
+ TyTuple(tys, defaulted) => {
self.hash(tys.len());
+ self.hash(defaulted);
}
TyParam(p) => {
self.hash(p.idx);
seen: &mut Vec<Ty<'tcx>>, ty: Ty<'tcx>)
-> Representability {
match ty.sty {
- TyTuple(ref ts) => {
+ TyTuple(ref ts, _) => {
find_nonrepresentable(tcx, sp, seen, ts.iter().cloned())
}
// Fixed-length vectors.
ty::TyClosure(_, ref substs) => {
stack.extend(substs.substs.types().rev());
}
- ty::TyTuple(ts) => {
+ ty::TyTuple(ts, _) => {
stack.extend(ts.iter().cloned().rev());
}
ty::TyFnDef(_, substs, ref ft) => {
self.require_sized(subty, traits::SliceOrArrayElem);
}
- ty::TyTuple(ref tys) => {
+ ty::TyTuple(ref tys, _) => {
if let Some((_last, rest)) = tys.split_last() {
for elem in rest {
self.require_sized(elem, traits::TupleElem);
if !verbose && fn_trait_kind.is_some() && projections.len() == 1 {
let projection_ty = projections[0].ty;
- if let TyTuple(ref args) = substs.type_at(1).sty {
+ if let TyTuple(ref args, _) = substs.type_at(1).sty {
return fn_sig(f, args, false, projection_ty);
}
}
write!(f, "{}", tm)
}
TyNever => write!(f, "!"),
- TyTuple(ref tys) => {
+ TyTuple(ref tys, _) => {
write!(f, "(")?;
let mut tys = tys.iter();
if let Some(&ty) = tys.next() {
linker_is_gnu: true,
allow_asm: false,
obj_is_bitcode: true,
+ is_like_emscripten: true,
max_atomic_width: Some(32),
post_link_args: vec!["-s".to_string(), "ERROR_ON_UNDEFINED_SYMBOLS=1".to_string()],
target_family: Some("unix".to_string()),
/// Whether the target toolchain is like Android's. Only useful for compiling against Android.
/// Defaults to false.
pub is_like_android: bool,
+ /// Whether the target toolchain is like Emscripten's. Only useful for compiling with
+ /// Emscripten toolchain.
+ /// Defaults to false.
+ pub is_like_emscripten: bool,
/// Whether the linker support GNU-like arguments such as -O. Defaults to false.
pub linker_is_gnu: bool,
/// The MinGW toolchain has a known issue that prevents it from correctly
is_like_solaris: false,
is_like_windows: false,
is_like_android: false,
+ is_like_emscripten: false,
is_like_msvc: false,
linker_is_gnu: false,
allows_weak_linkage: true,
key!(is_like_solaris, bool);
key!(is_like_windows, bool);
key!(is_like_msvc, bool);
+ key!(is_like_emscripten, bool);
key!(is_like_android, bool);
key!(linker_is_gnu, bool);
key!(allows_weak_linkage, bool);
target_option_val!(is_like_solaris);
target_option_val!(is_like_windows);
target_option_val!(is_like_msvc);
+ target_option_val!(is_like_emscripten);
target_option_val!(is_like_android);
target_option_val!(linker_is_gnu);
target_option_val!(allows_weak_linkage);
linker_is_gnu: true,
allow_asm: false,
obj_is_bitcode: true,
+ is_like_emscripten: true,
max_atomic_width: Some(32),
post_link_args: vec!["-s".to_string(), "BINARYEN=1".to_string(),
"-s".to_string(), "ERROR_ON_UNDEFINED_SYMBOLS=1".to_string()],
};
match parent_ty.sty {
- ty::TyTuple(ref v) => {
+ ty::TyTuple(ref v, _) => {
let tuple_idx = match *origin_field_name {
mc::PositionalField(tuple_idx) => tuple_idx,
mc::NamedField(_) =>
let tys : Vec<_> = substs.upvar_tys(def_id, self.tcx).collect();
self.open_drop_for_tuple(c, &tys)
}
- ty::TyTuple(tys) => {
+ ty::TyTuple(tys, _) => {
self.open_drop_for_tuple(c, tys)
}
ty::TyAdt(def, _) if def.is_box() => {
fn constructor_arity(_cx: &MatchCheckCtxt, ctor: &Constructor, ty: Ty) -> usize {
debug!("constructor_arity({:?}, {:?})", ctor, ty);
match ty.sty {
- ty::TyTuple(ref fs) => fs.len(),
+ ty::TyTuple(ref fs, _) => fs.len(),
ty::TySlice(..) | ty::TyArray(..) => match *ctor {
Slice(length) => length,
ConstantValue(_) => 0,
{
debug!("constructor_sub_pattern_tys({:?}, {:?})", ctor, ty);
match ty.sty {
- ty::TyTuple(ref fs) => fs.into_iter().map(|t| *t).collect(),
+ ty::TyTuple(ref fs, _) => fs.into_iter().map(|t| *t).collect(),
ty::TySlice(ty) | ty::TyArray(ty, _) => match *ctor {
Slice(length) => repeat(ty).take(length).collect(),
ConstantValue(_) => vec![],
PatKind::Tuple(ref subpatterns, ddpos) => {
let ty = self.tables.node_id_to_type(pat.id);
match ty.sty {
- ty::TyTuple(ref tys) => {
+ ty::TyTuple(ref tys, _) => {
let subpatterns =
subpatterns.iter()
.enumerate_and_adjust(tys.len(), ddpos)
}
pub fn t_pair(&self, ty1: Ty<'tcx>, ty2: Ty<'tcx>) -> Ty<'tcx> {
- self.infcx.tcx.intern_tup(&[ty1, ty2])
+ self.infcx.tcx.intern_tup(&[ty1, ty2], false)
}
pub fn t_param(&self, index: u32) -> Ty<'tcx> {
let tcx = env.infcx.tcx;
let int_ty = tcx.types.isize;
let uint_ty = tcx.types.usize;
- let tup1_ty = tcx.intern_tup(&[int_ty, uint_ty, int_ty, uint_ty]);
- let tup2_ty = tcx.intern_tup(&[tup1_ty, tup1_ty, uint_ty]);
+ let tup1_ty = tcx.intern_tup(&[int_ty, uint_ty, int_ty, uint_ty], false);
+ let tup2_ty = tcx.intern_tup(&[tup1_ty, tup1_ty, uint_ty], false);
let walked: Vec<_> = tup2_ty.walk().collect();
assert_eq!(walked,
[tup2_ty, tup1_ty, int_ty, uint_ty, int_ty, uint_ty, tup1_ty, int_ty,
let tcx = env.infcx.tcx;
let int_ty = tcx.types.isize;
let uint_ty = tcx.types.usize;
- let tup1_ty = tcx.intern_tup(&[int_ty, uint_ty, int_ty, uint_ty]);
- let tup2_ty = tcx.intern_tup(&[tup1_ty, tup1_ty, uint_ty]);
+ let tup1_ty = tcx.intern_tup(&[int_ty, uint_ty, int_ty, uint_ty], false);
+ let tup2_ty = tcx.intern_tup(&[tup1_ty, tup1_ty, uint_ty], false);
// types we expect to see (in order), plus a boolean saying
// whether to skip the subtree.
crate-type = ["dylib"]
[dependencies]
+serialize = { path = "../libserialize" }
syntax_pos = { path = "../libsyntax_pos" }
use snippet::Style;
#[must_use]
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, RustcEncodable, RustcDecodable)]
pub struct Diagnostic {
pub level: Level,
pub message: Vec<(String, Style)>,
}
/// For example a note attached to an error.
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, RustcEncodable, RustcDecodable)]
pub struct SubDiagnostic {
pub level: Level,
pub message: Vec<(String, Style)>,
extern crate term;
extern crate libc;
+extern crate serialize as rustc_serialize;
extern crate syntax_pos;
pub use emitter::ColorConfig;
use syntax_pos::{BytePos, Loc, FileLinesResult, FileName, MultiSpan, Span, NO_EXPANSION};
use syntax_pos::MacroBacktrace;
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, RustcEncodable, RustcDecodable)]
pub enum RenderSpan {
/// A FullSpan renders with both with an initial line for the
/// message, prefixed by file:linenum, followed by a summary of
Suggestion(CodeSuggestion),
}
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Clone, Debug, PartialEq, RustcEncodable, RustcDecodable)]
pub struct CodeSuggestion {
pub msp: MultiSpan,
pub substitutes: Vec<String>,
}
-#[derive(Copy, PartialEq, Clone, Debug)]
+#[derive(Copy, PartialEq, Clone, Debug, RustcEncodable, RustcDecodable)]
pub enum Level {
Bug,
Fatal,
pub style: Style,
}
-#[derive(Copy, Clone, Debug, PartialEq)]
+#[derive(Copy, Clone, Debug, PartialEq, RustcEncodable, RustcDecodable)]
pub enum Style {
HeaderMsg,
FileNameStyle,
#![feature(staged_api)]
#![feature(rand)]
#![feature(core_intrinsics)]
+#![feature(conservative_impl_trait)]
+#![feature(field_init_shorthand)]
+#![feature(pub_restricted)]
extern crate graphviz;
#[macro_use] extern crate rustc;
/// Data for use when recompiling the **current crate**.
#[derive(Debug, RustcEncodable, RustcDecodable)]
pub struct SerializedDepGraph {
- pub edges: Vec<SerializedEdge>,
+ pub edges: Vec<SerializedEdgeSet>,
/// These are hashes of two things:
/// - the HIR nodes in this crate
pub hashes: Vec<SerializedHash>,
}
-/// Represents a "reduced" dependency edge. Unlike the full dep-graph,
-/// the dep-graph we serialize contains only edges `S -> T` where the
-/// source `S` is something hashable (a HIR node or foreign metadata)
-/// and the target `T` is something significant, like a work-product.
-/// Normally, significant nodes are only those that have saved data on
-/// disk, but in unit-testing the set of significant nodes can be
-/// increased.
-pub type SerializedEdge = (DepNode<DefPathIndex>, DepNode<DefPathIndex>);
+/// Represents a set of "reduced" dependency edge. We group the
+/// outgoing edges from a single source together.
+#[derive(Debug, RustcEncodable, RustcDecodable)]
+pub struct SerializedEdgeSet {
+ pub source: DepNode<DefPathIndex>,
+ pub targets: Vec<DepNode<DefPathIndex>>
+}
#[derive(Debug, RustcEncodable, RustcDecodable)]
pub struct SerializedHash {
let _ignore = tcx.dep_graph.in_ignore();
let dirty_inputs: FxHashSet<DepNode<DefId>> =
- dirty_inputs.iter()
- .filter_map(|d| retraced.map(d))
- .collect();
+ dirty_inputs.keys()
+ .filter_map(|d| retraced.map(d))
+ .collect();
let query = tcx.dep_graph.query();
debug!("query-nodes: {:?}", query.nodes());
let krate = tcx.hir.krate();
//! Code to save/load the dep-graph from files.
-use rustc::dep_graph::DepNode;
+use rustc::dep_graph::{DepNode, WorkProductId};
use rustc::hir::def_id::DefId;
use rustc::hir::svh::Svh;
use rustc::session::Session;
use rustc_serialize::Decodable as RustcDecodable;
use rustc_serialize::opaque::Decoder;
use std::path::{Path};
+use std::sync::Arc;
use IncrementalHashesMap;
use ich::Fingerprint;
use super::file_format;
use super::work_product;
-pub type DirtyNodes = FxHashSet<DepNode<DefPathIndex>>;
+// The key is a dirty node. The value is **some** base-input that we
+// can blame it on.
+pub type DirtyNodes = FxHashMap<DepNode<DefPathIndex>, DepNode<DefPathIndex>>;
/// If we are in incremental mode, and a previous dep-graph exists,
/// then load up those nodes/edges that are still valid into the
let directory = DefIdDirectory::decode(&mut dep_graph_decoder)?;
let serialized_dep_graph = SerializedDepGraph::decode(&mut dep_graph_decoder)?;
+ let edge_map: FxHashMap<_, _> = serialized_dep_graph.edges
+ .into_iter()
+ .map(|s| (s.source, s.targets))
+ .collect();
+
// Retrace the paths in the directory to find their current location (if any).
let retraced = directory.retrace(tcx);
- // Compute the set of Hir nodes whose data has changed or which
- // have been removed. These are "raw" source nodes, which means
- // that they still use the original `DefPathIndex` values from the
- // encoding, rather than having been retraced to a `DefId`. The
- // reason for this is that this way we can include nodes that have
- // been removed (which no longer have a `DefId` in the current
- // compilation).
- let dirty_raw_source_nodes = dirty_nodes(tcx,
- incremental_hashes_map,
- &serialized_dep_graph.hashes,
- &retraced);
-
- // Create a list of (raw-source-node ->
- // retracted-target-node) edges. In the process of retracing the
- // target nodes, we may discover some of them def-paths no longer exist,
- // in which case there is no need to mark the corresopnding nodes as dirty
- // (they are just not present). So this list may be smaller than the original.
- //
- // Note though that in the common case the target nodes are
- // `DepNode::WorkProduct` instances, and those don't have a
- // def-id, so they will never be considered to not exist. Instead,
- // we do a secondary hashing step (later, in trans) when we know
- // the set of symbols that go into a work-product: if any symbols
- // have been removed (or added) the hash will be different and
- // we'll ignore the work-product then.
- let retraced_edges: Vec<_> =
- serialized_dep_graph.edges.iter()
- .filter_map(|&(ref raw_source_node, ref raw_target_node)| {
- retraced.map(raw_target_node)
- .map(|target_node| (raw_source_node, target_node))
- })
- .collect();
-
- // Compute which work-products have an input that has changed or
- // been removed. Put the dirty ones into a set.
- let mut dirty_target_nodes = FxHashSet();
- for &(raw_source_node, ref target_node) in &retraced_edges {
- if dirty_raw_source_nodes.contains(raw_source_node) {
- if !dirty_target_nodes.contains(target_node) {
- dirty_target_nodes.insert(target_node.clone());
-
- if tcx.sess.opts.debugging_opts.incremental_info {
- // It'd be nice to pretty-print these paths better than just
- // using the `Debug` impls, but wev.
- println!("incremental: module {:?} is dirty because {:?} \
- changed or was removed",
- target_node,
- raw_source_node.map_def(|&index| {
- Some(directory.def_path_string(tcx, index))
- }).unwrap());
+ // Compute the set of nodes from the old graph where some input
+ // has changed or been removed. These are "raw" source nodes,
+ // which means that they still use the original `DefPathIndex`
+ // values from the encoding, rather than having been retraced to a
+ // `DefId`. The reason for this is that this way we can include
+ // nodes that have been removed (which no longer have a `DefId` in
+ // the current compilation).
+ let dirty_raw_nodes = initial_dirty_nodes(tcx,
+ incremental_hashes_map,
+ &serialized_dep_graph.hashes,
+ &retraced);
+ let dirty_raw_nodes = transitive_dirty_nodes(&edge_map, dirty_raw_nodes);
+
+ // Recreate the edges in the graph that are still clean.
+ let mut clean_work_products = FxHashSet();
+ let mut dirty_work_products = FxHashSet(); // incomplete; just used to suppress debug output
+ for (source, targets) in &edge_map {
+ for target in targets {
+ // If the target is dirty, skip the edge. If this is an edge
+ // that targets a work-product, we can print the blame
+ // information now.
+ if let Some(blame) = dirty_raw_nodes.get(target) {
+ if let DepNode::WorkProduct(ref wp) = *target {
+ if tcx.sess.opts.debugging_opts.incremental_info {
+ if dirty_work_products.insert(wp.clone()) {
+ // It'd be nice to pretty-print these paths better than just
+ // using the `Debug` impls, but wev.
+ println!("incremental: module {:?} is dirty because {:?} \
+ changed or was removed",
+ wp,
+ blame.map_def(|&index| {
+ Some(directory.def_path_string(tcx, index))
+ }).unwrap());
+ }
+ }
}
+ continue;
}
- }
- }
- // For work-products that are still clean, add their deps into the
- // graph. This is needed because later we will have to save this
- // back out again!
- let dep_graph = tcx.dep_graph.clone();
- for (raw_source_node, target_node) in retraced_edges {
- if dirty_target_nodes.contains(&target_node) {
- continue;
+ // If the source is dirty, the target will be dirty.
+ assert!(!dirty_raw_nodes.contains_key(source));
+
+ // Retrace the source -> target edges to def-ids and then
+ // create an edge in the graph. Retracing may yield none if
+ // some of the data happens to have been removed; this ought
+ // to be impossible unless it is dirty, so we can unwrap.
+ let source_node = retraced.map(source).unwrap();
+ let target_node = retraced.map(target).unwrap();
+ let _task = tcx.dep_graph.in_task(target_node);
+ tcx.dep_graph.read(source_node);
+ if let DepNode::WorkProduct(ref wp) = *target {
+ clean_work_products.insert(wp.clone());
+ }
}
-
- let source_node = retraced.map(raw_source_node).unwrap();
-
- debug!("decode_dep_graph: clean edge: {:?} -> {:?}", source_node, target_node);
-
- let _task = dep_graph.in_task(target_node);
- dep_graph.read(source_node);
}
// Add in work-products that are still clean, and delete those that are
// dirty.
- reconcile_work_products(tcx, work_products, &dirty_target_nodes);
+ reconcile_work_products(tcx, work_products, &clean_work_products);
- dirty_clean::check_dirty_clean_annotations(tcx, &dirty_raw_source_nodes, &retraced);
+ dirty_clean::check_dirty_clean_annotations(tcx, &dirty_raw_nodes, &retraced);
load_prev_metadata_hashes(tcx,
&retraced,
/// Computes which of the original set of def-ids are dirty. Stored in
/// a bit vector where the index is the DefPathIndex.
-fn dirty_nodes<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
- incremental_hashes_map: &IncrementalHashesMap,
- serialized_hashes: &[SerializedHash],
- retraced: &RetracedDefIdDirectory)
- -> DirtyNodes {
+fn initial_dirty_nodes<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ incremental_hashes_map: &IncrementalHashesMap,
+ serialized_hashes: &[SerializedHash],
+ retraced: &RetracedDefIdDirectory)
+ -> DirtyNodes {
let mut hcx = HashContext::new(tcx, incremental_hashes_map);
- let mut dirty_nodes = FxHashSet();
+ let mut dirty_nodes = FxHashMap();
for hash in serialized_hashes {
if let Some(dep_node) = retraced.map(&hash.dep_node) {
hash.dep_node);
}
- dirty_nodes.insert(hash.dep_node.clone());
+ dirty_nodes.insert(hash.dep_node.clone(), hash.dep_node.clone());
}
dirty_nodes
}
+fn transitive_dirty_nodes(edge_map: &FxHashMap<DepNode<DefPathIndex>, Vec<DepNode<DefPathIndex>>>,
+ mut dirty_nodes: DirtyNodes)
+ -> DirtyNodes
+{
+ let mut stack: Vec<(DepNode<DefPathIndex>, DepNode<DefPathIndex>)> = vec![];
+ stack.extend(dirty_nodes.iter().map(|(s, b)| (s.clone(), b.clone())));
+ while let Some((source, blame)) = stack.pop() {
+ // we know the source is dirty (because of the node `blame`)...
+ assert!(dirty_nodes.contains_key(&source));
+
+ // ...so we dirty all the targets (with the same blame)
+ if let Some(targets) = edge_map.get(&source) {
+ for target in targets {
+ if !dirty_nodes.contains_key(target) {
+ dirty_nodes.insert(target.clone(), blame.clone());
+ stack.push((target.clone(), blame.clone()));
+ }
+ }
+ }
+ }
+ dirty_nodes
+}
+
/// Go through the list of work-products produced in the previous run.
/// Delete any whose nodes have been found to be dirty or which are
/// otherwise no longer applicable.
fn reconcile_work_products<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
work_products: Vec<SerializedWorkProduct>,
- dirty_target_nodes: &FxHashSet<DepNode<DefId>>) {
+ clean_work_products: &FxHashSet<Arc<WorkProductId>>) {
debug!("reconcile_work_products({:?})", work_products);
for swp in work_products {
- if dirty_target_nodes.contains(&DepNode::WorkProduct(swp.id.clone())) {
+ if !clean_work_products.contains(&swp.id) {
debug!("reconcile_work_products: dep-node for {:?} is dirty", swp);
delete_dirty_work_product(tcx, swp);
} else {
+++ /dev/null
-// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use rustc::dep_graph::{DepGraphQuery, DepNode};
-use rustc::hir::def_id::DefId;
-use rustc_data_structures::fx::FxHashMap;
-use rustc_data_structures::bitvec::BitVector;
-use rustc_data_structures::graph::{NodeIndex, Graph};
-
-use super::hash::*;
-use ich::Fingerprint;
-
-/// A data-structure that makes it easy to enumerate the hashable
-/// predecessors of any given dep-node.
-pub struct Predecessors<'query> {
- // - Keys: dep-nodes that may have work-products, output meta-data
- // nodes.
- // - Values: transitive predecessors of the key that are hashable
- // (e.g., HIR nodes, input meta-data nodes)
- pub inputs: FxHashMap<&'query DepNode<DefId>, Vec<&'query DepNode<DefId>>>,
-
- // - Keys: some hashable node
- // - Values: the hash thereof
- pub hashes: FxHashMap<&'query DepNode<DefId>, Fingerprint>,
-}
-
-impl<'q> Predecessors<'q> {
- pub fn new(query: &'q DepGraphQuery<DefId>, hcx: &mut HashContext) -> Self {
- let tcx = hcx.tcx;
-
- let collect_for_metadata = tcx.sess.opts.debugging_opts.incremental_cc ||
- tcx.sess.opts.debugging_opts.query_dep_graph;
-
- // Find nodes for which we want to know the full set of preds
- let node_count = query.graph.len_nodes();
-
- // Set up some data structures the cache predecessor search needs:
- let mut visit_counts: Vec<u32> = Vec::new();
- let mut node_cache: Vec<Option<Box<[u32]>>> = Vec::new();
- visit_counts.resize(node_count, 0);
- node_cache.resize(node_count, None);
- let mut dfs_workspace1 = DfsWorkspace::new(node_count);
- let mut dfs_workspace2 = DfsWorkspace::new(node_count);
-
- let inputs: FxHashMap<_, _> = query
- .graph
- .all_nodes()
- .iter()
- .enumerate()
- .filter(|&(_, node)| match node.data {
- DepNode::WorkProduct(_) => true,
- DepNode::MetaData(ref def_id) => collect_for_metadata && def_id.is_local(),
-
- // if -Z query-dep-graph is passed, save more extended data
- // to enable better unit testing
- DepNode::TypeckTables(_) |
- DepNode::TransCrateItem(_) => tcx.sess.opts.debugging_opts.query_dep_graph,
-
- _ => false,
- })
- .map(|(node_index, node)| {
- find_roots(&query.graph,
- node_index as u32,
- &mut visit_counts,
- &mut node_cache[..],
- HashContext::is_hashable,
- &mut dfs_workspace1,
- Some(&mut dfs_workspace2));
-
- let inputs: Vec<_> = dfs_workspace1.output.nodes.iter().map(|&i| {
- query.graph.node_data(NodeIndex(i as usize))
- }).collect();
-
- (&node.data, inputs)
- })
- .collect();
-
- let mut hashes = FxHashMap();
- for input in inputs.values().flat_map(|v| v.iter().cloned()) {
- hashes.entry(input)
- .or_insert_with(|| hcx.hash(input).unwrap());
- }
-
- Predecessors {
- inputs: inputs,
- hashes: hashes,
- }
- }
-}
-
-const CACHING_THRESHOLD: u32 = 60;
-
-// Starting at `start_node`, this function finds this node's "roots", that is,
-// anything that is hashable, in the dep-graph. It uses a simple depth-first
-// search to achieve that. However, since some sub-graphs are traversed over
-// and over again, the function also some caching built into it: Each time it
-// visits a node it increases a counter for that node. If a node has been
-// visited more often than CACHING_THRESHOLD, the function will allocate a
-// cache entry in the `cache` array. This cache entry contains a flat list of
-// all roots reachable from the given node. The next time the node is visited,
-// the search can just add the contents of this array to the output instead of
-// recursing further.
-//
-// The function takes two `DfsWorkspace` arguments. These contains some data
-// structures that would be expensive to re-allocate all the time, so they are
-// allocated once up-front. There are two of them because building a cache entry
-// requires a recursive invocation of this function. Two are enough though,
-// since function never recurses more than once.
-fn find_roots<T, F>(graph: &Graph<T, ()>,
- start_node: u32,
- visit_counts: &mut [u32],
- cache: &mut [Option<Box<[u32]>>],
- is_root: F,
- workspace: &mut DfsWorkspace,
- mut sub_workspace: Option<&mut DfsWorkspace>)
- where F: Copy + Fn(&T) -> bool,
- T: ::std::fmt::Debug,
-{
- workspace.visited.clear();
- workspace.output.clear();
- workspace.stack.clear();
- workspace.stack.push(start_node);
-
- loop {
- let node = match workspace.stack.pop() {
- Some(node) => node,
- None => return,
- };
-
- if !workspace.visited.insert(node as usize) {
- continue
- }
-
- if is_root(graph.node_data(NodeIndex(node as usize))) {
- // If this is a root, just add it to the output.
- workspace.output.insert(node);
- } else {
- if let Some(ref cached) = cache[node as usize] {
- for &n in &cached[..] {
- workspace.output.insert(n);
- }
- // No need to recurse further from this node
- continue
- }
-
- visit_counts[node as usize] += 1;
-
- // If this node has been visited often enough to be cached ...
- if visit_counts[node as usize] > CACHING_THRESHOLD {
- // ... we are actually allowed to cache something, do so:
- if let Some(ref mut sub_workspace) = sub_workspace {
- // Note that the following recursive invocation does never
- // write to the cache (since we pass None as sub_workspace).
- // This is intentional: The graph we are working with
- // contains cycles and this prevent us from simply building
- // our caches recursively on-demand.
- // However, we can just do a regular, non-caching DFS to
- // yield the set of roots and cache that.
- find_roots(graph,
- node,
- visit_counts,
- cache,
- is_root,
- sub_workspace,
- None);
-
- for &n in &sub_workspace.output.nodes {
- workspace.output.insert(n);
- }
-
- cache[node as usize] = Some(sub_workspace.output
- .nodes
- .clone()
- .into_boxed_slice());
- // No need to recurse further from this node
- continue
- }
- }
-
- for pred in graph.predecessor_nodes(NodeIndex(node as usize)) {
- workspace.stack.push(pred.node_id() as u32);
- }
- }
- }
-}
-
-struct DfsWorkspace {
- stack: Vec<u32>,
- visited: BitVector,
- output: NodeIndexSet,
-}
-
-impl DfsWorkspace {
- fn new(total_node_count: usize) -> DfsWorkspace {
- DfsWorkspace {
- stack: Vec::new(),
- visited: BitVector::new(total_node_count),
- output: NodeIndexSet::new(total_node_count),
- }
- }
-}
-
-struct NodeIndexSet {
- bitset: BitVector,
- nodes: Vec<u32>,
-}
-
-impl NodeIndexSet {
- fn new(total_node_count: usize) -> NodeIndexSet {
- NodeIndexSet {
- bitset: BitVector::new(total_node_count),
- nodes: Vec::new(),
- }
- }
-
- #[inline]
- fn clear(&mut self) {
- self.bitset.clear();
- self.nodes.clear();
- }
-
- #[inline]
- fn insert(&mut self, node: u32) {
- if self.bitset.insert(node as usize) {
- self.nodes.push(node)
- }
- }
-}
-
-#[test]
-fn test_cached_dfs_acyclic() {
-
- // 0 1 2
- // | \ /
- // 3---+ |
- // | | |
- // | | |
- // 4 5 6
- // \ / \ / \
- // | | |
- // 7 8 9
-
- let mut g: Graph<bool, ()> = Graph::new();
- g.add_node(false);
- g.add_node(false);
- g.add_node(false);
- g.add_node(false);
- g.add_node(false);
- g.add_node(false);
- g.add_node(false);
- g.add_node(true);
- g.add_node(true);
- g.add_node(true);
-
- g.add_edge(NodeIndex(3), NodeIndex(0), ());
- g.add_edge(NodeIndex(4), NodeIndex(3), ());
- g.add_edge(NodeIndex(7), NodeIndex(4), ());
- g.add_edge(NodeIndex(5), NodeIndex(3), ());
- g.add_edge(NodeIndex(7), NodeIndex(5), ());
- g.add_edge(NodeIndex(8), NodeIndex(5), ());
- g.add_edge(NodeIndex(8), NodeIndex(6), ());
- g.add_edge(NodeIndex(9), NodeIndex(6), ());
- g.add_edge(NodeIndex(6), NodeIndex(1), ());
- g.add_edge(NodeIndex(6), NodeIndex(2), ());
-
- let mut ws1 = DfsWorkspace::new(g.len_nodes());
- let mut ws2 = DfsWorkspace::new(g.len_nodes());
- let mut visit_counts: Vec<_> = g.all_nodes().iter().map(|_| 0u32).collect();
- let mut cache: Vec<Option<Box<[u32]>>> = g.all_nodes().iter().map(|_| None).collect();
-
- fn is_root(x: &bool) -> bool { *x }
-
- for _ in 0 .. CACHING_THRESHOLD + 1 {
- find_roots(&g, 5, &mut visit_counts, &mut cache[..], is_root, &mut ws1, Some(&mut ws2));
- ws1.output.nodes.sort();
- assert_eq!(ws1.output.nodes, vec![7, 8]);
-
- find_roots(&g, 6, &mut visit_counts, &mut cache[..], is_root, &mut ws1, Some(&mut ws2));
- ws1.output.nodes.sort();
- assert_eq!(ws1.output.nodes, vec![8, 9]);
-
- find_roots(&g, 0, &mut visit_counts, &mut cache[..], is_root, &mut ws1, Some(&mut ws2));
- ws1.output.nodes.sort();
- assert_eq!(ws1.output.nodes, vec![7, 8]);
-
- find_roots(&g, 1, &mut visit_counts, &mut cache[..], is_root, &mut ws1, Some(&mut ws2));
- ws1.output.nodes.sort();
- assert_eq!(ws1.output.nodes, vec![8, 9]);
-
- find_roots(&g, 2, &mut visit_counts, &mut cache[..], is_root, &mut ws1, Some(&mut ws2));
- ws1.output.nodes.sort();
- assert_eq!(ws1.output.nodes, vec![8, 9]);
-
- find_roots(&g, 3, &mut visit_counts, &mut cache[..], is_root, &mut ws1, Some(&mut ws2));
- ws1.output.nodes.sort();
- assert_eq!(ws1.output.nodes, vec![7, 8]);
-
- find_roots(&g, 4, &mut visit_counts, &mut cache[..], is_root, &mut ws1, Some(&mut ws2));
- ws1.output.nodes.sort();
- assert_eq!(ws1.output.nodes, vec![7]);
- }
-}
-
-#[test]
-fn test_cached_dfs_cyclic() {
-
- // 0 1 <---- 2 3
- // ^ | ^ ^
- // | v | |
- // 4 ----> 5 ----> 6 ----> 7
- // ^ ^ ^ ^
- // | | | |
- // 8 9 10 11
-
-
- let mut g: Graph<bool, ()> = Graph::new();
- g.add_node(false);
- g.add_node(false);
- g.add_node(false);
- g.add_node(false);
- g.add_node(false);
- g.add_node(false);
- g.add_node(false);
- g.add_node(false);
- g.add_node(true);
- g.add_node(true);
- g.add_node(true);
- g.add_node(true);
-
- g.add_edge(NodeIndex( 4), NodeIndex(0), ());
- g.add_edge(NodeIndex( 8), NodeIndex(4), ());
- g.add_edge(NodeIndex( 4), NodeIndex(5), ());
- g.add_edge(NodeIndex( 1), NodeIndex(5), ());
- g.add_edge(NodeIndex( 9), NodeIndex(5), ());
- g.add_edge(NodeIndex( 5), NodeIndex(6), ());
- g.add_edge(NodeIndex( 6), NodeIndex(2), ());
- g.add_edge(NodeIndex( 2), NodeIndex(1), ());
- g.add_edge(NodeIndex(10), NodeIndex(6), ());
- g.add_edge(NodeIndex( 6), NodeIndex(7), ());
- g.add_edge(NodeIndex(11), NodeIndex(7), ());
- g.add_edge(NodeIndex( 7), NodeIndex(3), ());
-
- let mut ws1 = DfsWorkspace::new(g.len_nodes());
- let mut ws2 = DfsWorkspace::new(g.len_nodes());
- let mut visit_counts: Vec<_> = g.all_nodes().iter().map(|_| 0u32).collect();
- let mut cache: Vec<Option<Box<[u32]>>> = g.all_nodes().iter().map(|_| None).collect();
-
- fn is_root(x: &bool) -> bool { *x }
-
- for _ in 0 .. CACHING_THRESHOLD + 1 {
- find_roots(&g, 2, &mut visit_counts, &mut cache[..], is_root, &mut ws1, Some(&mut ws2));
- ws1.output.nodes.sort();
- assert_eq!(ws1.output.nodes, vec![8, 9, 10]);
-
- find_roots(&g, 3, &mut visit_counts, &mut cache[..], is_root, &mut ws1, Some(&mut ws2));
- ws1.output.nodes.sort();
- assert_eq!(ws1.output.nodes, vec![8, 9, 10, 11]);
- }
-}
--- /dev/null
+Graph compression
+
+The graph compression algorithm is intended to remove and minimize the
+size of the dependency graph so it can be saved, while preserving
+everything we care about. In particular, given a set of input/output
+nodes in the graph (which must be disjoint), we ensure that the set of
+input nodes that can reach a given output node does not change,
+although the intermediate nodes may change in various ways. In short,
+the output nodes are intended to be the ones whose existence we care
+about when we start up, because they have some associated data that we
+will try to re-use (and hence if they are dirty, we have to throw that
+data away). The other intermediate nodes don't really matter so much.
+
+### Overview
+
+The algorithm works as follows:
+
+1. Do a single walk of the graph to construct a DAG
+ - in this walk, we identify and unify all cycles, electing a representative "head" node
+ - this is done using the union-find implementation
+ - this code is found in the `classify` module
+2. The result from this walk is a `Dag`:
+ - the set of SCCs, represented by the union-find table
+ - a set of edges in the new DAG, represented by:
+ - a vector of parent nodes for each child node
+ - a vector of cross-edges
+ - once these are canonicalized, some of these edges may turn out to be cyclic edges
+ (i.e., an edge A -> A where A is the head of some SCC)
+3. We pass this `Dag` into the construct code, which then creates a
+ new graph. This graph has a smaller set of indices which includes
+ *at least* the inputs/outputs from the original graph, but may have
+ other nodes as well, if keeping them reduces the overall size of
+ the graph.
+ - This code is found in the `construct` module.
+
+### Some notes
+
+The input graph is assumed to have *read-by* edges. i.e., `A -> B`
+means that the task B reads data from A. But the DAG defined by
+classify is expressed in terms of *reads-from* edges, which are the
+inverse. So `A -> B` is the same as `B -rf-> A`. *reads-from* edges
+are more natural since we want to walk from the outputs to the inputs,
+effectively. When we construct the final graph, we reverse these edges
+back into the *read-by* edges common elsewhere.
+
+
+
+
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! First phase. Detect cycles and cross-edges.
+
+use super::*;
+
+#[cfg(test)]
+mod test;
+
+pub struct Classify<'a, 'g: 'a, N: 'g, I: 'a, O: 'a>
+ where N: Debug + Clone + 'g,
+ I: Fn(&N) -> bool,
+ O: Fn(&N) -> bool,
+{
+ r: &'a mut GraphReduce<'g, N, I, O>,
+ stack: Vec<NodeIndex>,
+ colors: Vec<Color>,
+ dag: Dag,
+}
+
+#[derive(Copy, Clone, Debug, PartialEq)]
+enum Color {
+ // not yet visited
+ White,
+
+ // visiting; usize is index on stack
+ Grey(usize),
+
+ // finished visiting
+ Black,
+}
+
+impl<'a, 'g, N, I, O> Classify<'a, 'g, N, I, O>
+ where N: Debug + Clone + 'g,
+ I: Fn(&N) -> bool,
+ O: Fn(&N) -> bool,
+{
+ pub(super) fn new(r: &'a mut GraphReduce<'g, N, I, O>) -> Self {
+ Classify {
+ r: r,
+ colors: vec![Color::White; r.in_graph.len_nodes()],
+ stack: vec![],
+ dag: Dag {
+ parents: (0..r.in_graph.len_nodes()).map(|i| NodeIndex(i)).collect(),
+ cross_edges: vec![],
+ input_nodes: vec![],
+ output_nodes: vec![],
+ },
+ }
+ }
+
+ pub(super) fn walk(mut self) -> Dag {
+ for (index, node) in self.r.in_graph.all_nodes().iter().enumerate() {
+ if (self.r.is_output)(&node.data) {
+ let index = NodeIndex(index);
+ self.dag.output_nodes.push(index);
+ match self.colors[index.0] {
+ Color::White => self.open(index),
+ Color::Grey(_) => panic!("grey node but have not yet started a walk"),
+ Color::Black => (), // already visited, skip
+ }
+ }
+ }
+
+ // At this point we've identifed all the cycles, and we've
+ // constructed a spanning tree over the original graph
+ // (encoded in `self.parents`) as well as a list of
+ // cross-edges that reflect additional edges from the DAG.
+ //
+ // If we converted each node to its `cycle-head` (a
+ // representative choice from each SCC, basically) and then
+ // take the union of `self.parents` and `self.cross_edges`
+ // (after canonicalization), that is basically our DAG.
+ //
+ // Note that both of those may well contain trivial `X -rf-> X`
+ // cycle edges after canonicalization, though. e.g., if you
+ // have a graph `{A -rf-> B, B -rf-> A}`, we will have unioned A and
+ // B, but A will also be B's parent (or vice versa), and hence
+ // when we canonicalize the parent edge it would become `A -rf->
+ // A` (or `B -rf-> B`).
+ self.dag
+ }
+
+ fn open(&mut self, node: NodeIndex) {
+ let index = self.stack.len();
+ self.stack.push(node);
+ self.colors[node.0] = Color::Grey(index);
+ for child in self.r.inputs(node) {
+ self.walk_edge(node, child);
+ }
+ self.stack.pop().unwrap();
+ self.colors[node.0] = Color::Black;
+
+ if (self.r.is_input)(&self.r.in_graph.node_data(node)) {
+ // base inputs should have no inputs
+ assert!(self.r.inputs(node).next().is_none());
+ debug!("input: `{:?}`", self.r.in_graph.node_data(node));
+ self.dag.input_nodes.push(node);
+ }
+ }
+
+ fn walk_edge(&mut self, parent: NodeIndex, child: NodeIndex) {
+ debug!("walk_edge: {:?} -rf-> {:?}, {:?}",
+ self.r.in_graph.node_data(parent),
+ self.r.in_graph.node_data(child),
+ self.colors[child.0]);
+
+ // Ignore self-edges, just in case they exist.
+ if child == parent {
+ return;
+ }
+
+ match self.colors[child.0] {
+ Color::White => {
+ // Not yet visited this node; start walking it.
+ assert_eq!(self.dag.parents[child.0], child);
+ self.dag.parents[child.0] = parent;
+ self.open(child);
+ }
+
+ Color::Grey(stack_index) => {
+ // Back-edge; unify everything on stack between here and `stack_index`
+ // since we are all participating in a cycle
+ assert!(self.stack[stack_index] == child);
+
+ for &n in &self.stack[stack_index..] {
+ debug!("cycle `{:?}` and `{:?}`",
+ self.r.in_graph.node_data(n),
+ self.r.in_graph.node_data(parent));
+ self.r.mark_cycle(n, parent);
+ }
+ }
+
+ Color::Black => {
+ // Cross-edge, record and ignore
+ self.dag.cross_edges.push((parent, child));
+ debug!("cross-edge `{:?} -rf-> {:?}`",
+ self.r.in_graph.node_data(parent),
+ self.r.in_graph.node_data(child));
+ }
+ }
+ }
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use super::*;
+
+#[test]
+fn detect_cycles() {
+ let (graph, nodes) = graph! {
+ A -> C0,
+ A -> C1,
+ B -> C1,
+ C0 -> C1,
+ C1 -> C0,
+ C0 -> D,
+ C1 -> E,
+ };
+ let inputs = ["A", "B"];
+ let outputs = ["D", "E"];
+ let mut reduce = GraphReduce::new(&graph, |n| inputs.contains(n), |n| outputs.contains(n));
+ Classify::new(&mut reduce).walk();
+
+ assert!(!reduce.in_cycle(nodes("A"), nodes("C0")));
+ assert!(!reduce.in_cycle(nodes("B"), nodes("C0")));
+ assert!(reduce.in_cycle(nodes("C0"), nodes("C1")));
+ assert!(!reduce.in_cycle(nodes("D"), nodes("C0")));
+ assert!(!reduce.in_cycle(nodes("E"), nodes("C0")));
+ assert!(!reduce.in_cycle(nodes("E"), nodes("A")));
+}
+
+/// Regr test for a bug where we forgot to pop nodes off of the stack
+/// as we were walking. In this case, because edges are pushed to the front
+/// of the list, we would visit OUT, then A, then IN, and then close IN (but forget
+/// to POP. Then visit B, C, and then A, which would mark everything from A to C as
+/// cycle. But since we failed to pop IN, the stack was `OUT, A, IN, B, C` so that
+/// marked C and IN as being in a cycle.
+#[test]
+fn edge_order1() {
+ let (graph, nodes) = graph! {
+ A -> C,
+ C -> B,
+ B -> A,
+ IN -> B,
+ IN -> A,
+ A -> OUT,
+ };
+ let inputs = ["IN"];
+ let outputs = ["OUT"];
+ let mut reduce = GraphReduce::new(&graph, |n| inputs.contains(n), |n| outputs.contains(n));
+ Classify::new(&mut reduce).walk();
+
+ // A, B, and C are mutually in a cycle, but IN/OUT are not participating.
+ let names = ["A", "B", "C", "IN", "OUT"];
+ let cycle_names = ["A", "B", "C"];
+ for &i in &names {
+ for &j in names.iter().filter(|&&j| j != i) {
+ let in_cycle = cycle_names.contains(&i) && cycle_names.contains(&j);
+ assert_eq!(reduce.in_cycle(nodes(i), nodes(j)), in_cycle,
+ "cycle status for nodes {} and {} is incorrect",
+ i, j);
+ }
+ }
+}
+
+/// Same as `edge_order1` but in reverse order so as to detect a failure
+/// if we were to enqueue edges onto end of list instead.
+#[test]
+fn edge_order2() {
+ let (graph, nodes) = graph! {
+ A -> OUT,
+ IN -> A,
+ IN -> B,
+ B -> A,
+ C -> B,
+ A -> C,
+ };
+ let inputs = ["IN"];
+ let outputs = ["OUT"];
+ let mut reduce = GraphReduce::new(&graph, |n| inputs.contains(n), |n| outputs.contains(n));
+ Classify::new(&mut reduce).walk();
+
+ assert!(reduce.in_cycle(nodes("B"), nodes("C")));
+
+ assert!(!reduce.in_cycle(nodes("IN"), nodes("A")));
+ assert!(!reduce.in_cycle(nodes("IN"), nodes("B")));
+ assert!(!reduce.in_cycle(nodes("IN"), nodes("C")));
+ assert!(!reduce.in_cycle(nodes("IN"), nodes("OUT")));
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Second phase. Construct new graph. The previous phase has
+//! converted the input graph into a DAG by detecting and unifying
+//! cycles. It provides us with the following (which is a
+//! representation of the DAG):
+//!
+//! - SCCs, in the form of a union-find repr that can convert each node to
+//! its *cycle head* (an arbitrarly chosen representative from the cycle)
+//! - a vector of *leaf nodes*, just a convenience
+//! - a vector of *parents* for each node (in some cases, nodes have no parents,
+//! or their parent is another member of same cycle; in that case, the vector
+//! will be stored `v[i] == i`, after canonicalization)
+//! - a vector of *cross edges*, meaning add'l edges between graphs nodes beyond
+//! the parents.
+
+use rustc_data_structures::fx::FxHashMap;
+
+use super::*;
+
+pub(super) fn construct_graph<'g, N, I, O>(r: &mut GraphReduce<'g, N, I, O>, dag: Dag)
+ -> Reduction<'g, N>
+ where N: Debug + Clone, I: Fn(&N) -> bool, O: Fn(&N) -> bool,
+{
+ let Dag { parents: old_parents, input_nodes, output_nodes, cross_edges } = dag;
+ let in_graph = r.in_graph;
+
+ debug!("construct_graph");
+
+ // Create a canonical list of edges; this includes both parent and
+ // cross-edges. We store this in `(target -> Vec<source>)` form.
+ // We call the first edge to any given target its "parent".
+ let mut edges = FxHashMap();
+ let old_parent_edges = old_parents.iter().cloned().zip((0..).map(NodeIndex));
+ for (source, target) in old_parent_edges.chain(cross_edges) {
+ debug!("original edge `{:?} -rf-> {:?}`",
+ in_graph.node_data(source),
+ in_graph.node_data(target));
+ let source = r.cycle_head(source);
+ let target = r.cycle_head(target);
+ if source != target {
+ let v = edges.entry(target).or_insert(vec![]);
+ if !v.contains(&source) {
+ debug!("edge `{:?} -rf-> {:?}` is edge #{} with that target",
+ in_graph.node_data(source),
+ in_graph.node_data(target),
+ v.len());
+ v.push(source);
+ }
+ }
+ }
+ let parent = |ni: NodeIndex| -> NodeIndex {
+ edges[&ni][0]
+ };
+
+ // `retain_map`: a map of those nodes that we will want to
+ // *retain* in the ultimate graph; the key is the node index in
+ // the old graph, the value is the node index in the new
+ // graph. These are nodes in the following categories:
+ //
+ // - inputs
+ // - work-products
+ // - targets of a cross-edge
+ //
+ // The first two categories hopefully make sense. We want the
+ // inputs so we can compare hashes later. We want the
+ // work-products so we can tell precisely when a given
+ // work-product is invalidated. But the last one isn't strictly
+ // needed; we keep cross-target edges so as to minimize the total
+ // graph size.
+ //
+ // Consider a graph like:
+ //
+ // WP0 -rf-> Y
+ // WP1 -rf-> Y
+ // Y -rf-> INPUT0
+ // Y -rf-> INPUT1
+ // Y -rf-> INPUT2
+ // Y -rf-> INPUT3
+ //
+ // Now if we were to remove Y, we would have a total of 8 edges: both WP0 and WP1
+ // depend on INPUT0...INPUT3. As it is, we have 6 edges.
+ //
+ // NB: The current rules are not optimal. For example, given this
+ // input graph:
+ //
+ // OUT0 -rf-> X
+ // OUT1 -rf-> X
+ // X -rf -> INPUT0
+ //
+ // we will preserve X because it has two "consumers" (OUT0 and
+ // OUT1). We could as easily skip it, but we'd have to tally up
+ // the number of input nodes that it (transitively) reaches, and I
+ // was too lazy to do so. This is the unit test `suboptimal`.
+
+ let mut retain_map = FxHashMap();
+ let mut new_graph = Graph::new();
+
+ {
+ // Start by adding start-nodes and inputs.
+ let retained_nodes = output_nodes.iter().chain(&input_nodes).map(|&n| r.cycle_head(n));
+
+ // Next add in targets of cross-edges. Due to the canonicalization,
+ // some of these may be self-edges or may may duplicate the parent
+ // edges, so ignore those.
+ let retained_nodes = retained_nodes.chain(
+ edges.iter()
+ .filter(|&(_, ref sources)| sources.len() > 1)
+ .map(|(&target, _)| target));
+
+ // Now create the new graph, adding in the entries from the map.
+ for n in retained_nodes {
+ retain_map.entry(n)
+ .or_insert_with(|| {
+ let data = in_graph.node_data(n);
+ debug!("retaining node `{:?}`", data);
+ new_graph.add_node(data)
+ });
+ }
+ }
+
+ // Given a cycle-head `ni`, converts it to the closest parent that has
+ // been retained in the output graph.
+ let retained_parent = |mut ni: NodeIndex| -> NodeIndex {
+ loop {
+ debug!("retained_parent({:?})", in_graph.node_data(ni));
+ match retain_map.get(&ni) {
+ Some(&v) => return v,
+ None => ni = parent(ni),
+ }
+ }
+ };
+
+ // Now add in the edges into the graph.
+ for (&target, sources) in &edges {
+ if let Some(&r_target) = retain_map.get(&target) {
+ debug!("adding edges that target `{:?}`", in_graph.node_data(target));
+ for &source in sources {
+ debug!("new edge `{:?} -rf-> {:?}`",
+ in_graph.node_data(source),
+ in_graph.node_data(target));
+ let r_source = retained_parent(source);
+
+ // NB. In the input graph, we have `a -> b` if b
+ // **reads from** a. But in the terminology of this
+ // code, we would describe that edge as `b -> a`,
+ // because we have edges *from* outputs *to* inputs.
+ // Therefore, when we create our new graph, we have to
+ // reverse the edge.
+ new_graph.add_edge(r_target, r_source, ());
+ }
+ } else {
+ assert_eq!(sources.len(), 1);
+ }
+ }
+
+ // One complication. In some cases, output nodes *may* participate in
+ // cycles. An example:
+ //
+ // [HIR0] [HIR1]
+ // | |
+ // v v
+ // TypeckClosureBody(X) -> ItemSignature(X::SomeClosureInX)
+ // | ^ | |
+ // | +-------------------------+ |
+ // | |
+ // v v
+ // Foo Bar
+ //
+ // In these cases, the output node may not wind up as the head
+ // of the cycle, in which case it would be absent from the
+ // final graph. We don't wish this to happen, therefore we go
+ // over the list of output nodes again and check for any that
+ // are not their own cycle-head. If we find such a node, we
+ // add it to the graph now with an edge from the cycle head.
+ // So the graph above could get transformed into this:
+ //
+ // [HIR0, HIR1]
+ // |
+ // v
+ // TypeckClosureBody(X) ItemSignature(X::SomeClosureInX)
+ // ^ | |
+ // +-------------------------+ |
+ // v
+ // [Foo, Bar]
+ //
+ // (Note that all the edges here are "read-by" edges, not
+ // "reads-from" edges.)
+ for &output_node in &output_nodes {
+ let head = r.cycle_head(output_node);
+ if output_node == head {
+ assert!(retain_map.contains_key(&output_node));
+ } else {
+ assert!(!retain_map.contains_key(&output_node));
+ let output_data = in_graph.node_data(output_node);
+ let new_node = new_graph.add_node(output_data);
+ let new_head_node = retain_map[&head];
+ new_graph.add_edge(new_head_node, new_node, ());
+ }
+ }
+
+ // Finally, prepare a list of the input node indices as found in
+ // the new graph. Note that since all input nodes are leaves in
+ // the graph, they should never participate in a cycle.
+ let input_nodes =
+ input_nodes.iter()
+ .map(|&n| {
+ assert_eq!(r.cycle_head(n), n, "input node participating in a cycle");
+ retain_map[&n]
+ })
+ .collect();
+
+ Reduction { graph: new_graph, input_nodes: input_nodes }
+}
+
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use rustc_data_structures::graph::NodeIndex;
+use rustc_data_structures::unify::UnifyKey;
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
+pub struct DagId {
+ index: u32,
+}
+
+impl DagId {
+ pub fn from_input_index(n: NodeIndex) -> Self {
+ DagId { index: n.0 as u32 }
+ }
+
+ pub fn as_input_index(&self) -> NodeIndex {
+ NodeIndex(self.index as usize)
+ }
+}
+
+impl UnifyKey for DagId {
+ type Value = ();
+
+ fn index(&self) -> u32 {
+ self.index
+ }
+
+ fn from_index(u: u32) -> Self {
+ DagId { index: u }
+ }
+
+ fn tag(_: Option<Self>) -> &'static str {
+ "DagId"
+ }
+}
--- /dev/null
+// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Graph compression. See `README.md`.
+
+use rustc_data_structures::graph::{Graph, NodeIndex};
+use rustc_data_structures::unify::UnificationTable;
+use std::fmt::Debug;
+
+#[cfg(test)]
+#[macro_use]
+mod test_macro;
+
+mod construct;
+
+mod classify;
+use self::classify::Classify;
+
+mod dag_id;
+use self::dag_id::DagId;
+
+#[cfg(test)]
+mod test;
+
+pub fn reduce_graph<N, I, O>(graph: &Graph<N, ()>,
+ is_input: I,
+ is_output: O) -> Reduction<N>
+ where N: Debug + Clone,
+ I: Fn(&N) -> bool,
+ O: Fn(&N) -> bool,
+{
+ GraphReduce::new(graph, is_input, is_output).compute()
+}
+
+pub struct Reduction<'q, N> where N: 'q + Debug + Clone {
+ pub graph: Graph<&'q N, ()>,
+ pub input_nodes: Vec<NodeIndex>,
+}
+
+struct GraphReduce<'q, N, I, O>
+ where N: 'q + Debug + Clone,
+ I: Fn(&N) -> bool,
+ O: Fn(&N) -> bool,
+{
+ in_graph: &'q Graph<N, ()>,
+ unify: UnificationTable<DagId>,
+ is_input: I,
+ is_output: O,
+}
+
+struct Dag {
+ // The "parent" of a node is the node which reached it during the
+ // initial DFS. To encode the case of "no parent" (i.e., for the
+ // roots of the walk), we make `parents[i] == i` to start, which
+ // turns out be convenient.
+ parents: Vec<NodeIndex>,
+
+ // Additional edges beyond the parents.
+ cross_edges: Vec<(NodeIndex, NodeIndex)>,
+
+ // Nodes which we found that are considered "outputs"
+ output_nodes: Vec<NodeIndex>,
+
+ // Nodes which we found that are considered "inputs"
+ input_nodes: Vec<NodeIndex>,
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Hash)]
+struct DagNode {
+ in_index: NodeIndex
+}
+
+impl<'q, N, I, O> GraphReduce<'q, N, I, O>
+ where N: Debug + Clone,
+ I: Fn(&N) -> bool,
+ O: Fn(&N) -> bool,
+{
+ fn new(in_graph: &'q Graph<N, ()>, is_input: I, is_output: O) -> Self {
+ let mut unify = UnificationTable::new();
+
+ // create a set of unification keys whose indices
+ // correspond to the indices from the input graph
+ for i in 0..in_graph.len_nodes() {
+ let k = unify.new_key(());
+ assert!(k == DagId::from_input_index(NodeIndex(i)));
+ }
+
+ GraphReduce { in_graph, unify, is_input, is_output }
+ }
+
+ fn compute(mut self) -> Reduction<'q, N> {
+ let dag = Classify::new(&mut self).walk();
+ construct::construct_graph(&mut self, dag)
+ }
+
+ fn inputs(&self, in_node: NodeIndex) -> impl Iterator<Item = NodeIndex> + 'q {
+ self.in_graph.predecessor_nodes(in_node)
+ }
+
+ fn mark_cycle(&mut self, in_node1: NodeIndex, in_node2: NodeIndex) {
+ let dag_id1 = DagId::from_input_index(in_node1);
+ let dag_id2 = DagId::from_input_index(in_node2);
+ self.unify.union(dag_id1, dag_id2);
+ }
+
+ /// Convert a dag-id into its cycle head representative. This will
+ /// be a no-op unless `in_node` participates in a cycle, in which
+ /// case a distinct node *may* be returned.
+ fn cycle_head(&mut self, in_node: NodeIndex) -> NodeIndex {
+ let i = DagId::from_input_index(in_node);
+ self.unify.find(i).as_input_index()
+ }
+
+ #[cfg(test)]
+ fn in_cycle(&mut self, ni1: NodeIndex, ni2: NodeIndex) -> bool {
+ self.cycle_head(ni1) == self.cycle_head(ni2)
+ }
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use super::*;
+
+fn reduce(graph: &Graph<&'static str, ()>,
+ inputs: &[&'static str],
+ outputs: &[&'static str],
+ expected: &[&'static str])
+{
+ let reduce = GraphReduce::new(&graph,
+ |n| inputs.contains(n),
+ |n| outputs.contains(n));
+ let result = reduce.compute();
+ let mut edges: Vec<String> =
+ result.graph
+ .all_edges()
+ .iter()
+ .map(|edge| format!("{} -> {}",
+ result.graph.node_data(edge.source()),
+ result.graph.node_data(edge.target())))
+ .collect();
+ edges.sort();
+ println!("{:#?}", edges);
+ assert_eq!(edges.len(), expected.len());
+ for (expected, actual) in expected.iter().zip(&edges) {
+ assert_eq!(expected, actual);
+ }
+}
+
+#[test]
+fn test1() {
+ // +---------------+
+ // | |
+ // | +--------|------+
+ // | | v v
+ // [A] -> [C0] -> [C1] [D]
+ // [ ] <- [ ] -> [E]
+ // ^
+ // [B] -------------+
+ let (graph, _nodes) = graph! {
+ A -> C0,
+ A -> C1,
+ B -> C1,
+ C0 -> C1,
+ C1 -> C0,
+ C0 -> D,
+ C1 -> E,
+ };
+
+ // [A] -> [C1] -> [D]
+ // [B] -> [ ] -> [E]
+ reduce(&graph, &["A", "B"], &["D", "E"], &[
+ "A -> C1",
+ "B -> C1",
+ "C1 -> D",
+ "C1 -> E",
+ ]);
+}
+
+#[test]
+fn test2() {
+ // +---------------+
+ // | |
+ // | +--------|------+
+ // | | v v
+ // [A] -> [C0] -> [C1] [D] -> [E]
+ // [ ] <- [ ]
+ // ^
+ // [B] -------------+
+ let (graph, _nodes) = graph! {
+ A -> C0,
+ A -> C1,
+ B -> C1,
+ C0 -> C1,
+ C1 -> C0,
+ C0 -> D,
+ D -> E,
+ };
+
+ // [A] -> [D] -> [E]
+ // [B] -> [ ]
+ reduce(&graph, &["A", "B"], &["D", "E"], &[
+ "A -> D",
+ "B -> D",
+ "D -> E",
+ ]);
+}
+
+#[test]
+fn test2b() {
+ // Variant on test2 in which [B] is not
+ // considered an input.
+ let (graph, _nodes) = graph! {
+ A -> C0,
+ A -> C1,
+ B -> C1,
+ C0 -> C1,
+ C1 -> C0,
+ C0 -> D,
+ D -> E,
+ };
+
+ // [A] -> [D] -> [E]
+ reduce(&graph, &["A"], &["D", "E"], &[
+ "A -> D",
+ "D -> E",
+ ]);
+}
+
+#[test]
+fn test3() {
+
+ // Edges going *downwards*, so 0, 1 and 2 are inputs,
+ // while 7, 8, and 9 are outputs.
+ //
+ // 0 1 2
+ // | \ /
+ // 3---+ |
+ // | | |
+ // | | |
+ // 4 5 6
+ // \ / \ / \
+ // | | |
+ // 7 8 9
+ //
+ // Here the end result removes node 4, instead encoding an edge
+ // from n3 -> n7, but keeps nodes 5 and 6, as they are common
+ // inputs to nodes 8/9.
+
+ let (graph, _nodes) = graph! {
+ n0 -> n3,
+ n3 -> n4,
+ n3 -> n5,
+ n4 -> n7,
+ n5 -> n7,
+ n5 -> n8,
+ n1 -> n6,
+ n2 -> n6,
+ n6 -> n8,
+ n6 -> n9,
+ };
+
+ reduce(&graph, &["n0", "n1", "n2"], &["n7", "n8", "n9"], &[
+ "n0 -> n3",
+ "n1 -> n6",
+ "n2 -> n6",
+ "n3 -> n5",
+ "n3 -> n7",
+ "n5 -> n7",
+ "n5 -> n8",
+ "n6 -> n8",
+ "n6 -> n9"
+ ]);
+}
+
+#[test]
+fn test_cached_dfs_cyclic() {
+
+ // 0 1 <---- 2 3
+ // ^ | ^ ^
+ // | v | |
+ // 4 ----> 5 ----> 6 ----> 7
+ // ^ ^ ^ ^
+ // | | | |
+ // 8 9 10 11
+
+ let (graph, _nodes) = graph! {
+ // edges from above diagram, in columns, top-to-bottom:
+ n4 -> n0,
+ n8 -> n4,
+ n4 -> n5,
+ n1 -> n5,
+ n9 -> n5,
+ n2 -> n1,
+ n5 -> n6,
+ n6 -> n2,
+ n10 -> n6,
+ n6 -> n7,
+ n7 -> n3,
+ n11 -> n7,
+ };
+
+ // 0 1 2 3
+ // ^ ^ / ^
+ // | |/ |
+ // 4 ----> 5 --------------+
+ // ^ ^ \ |
+ // | | \ |
+ // 8 9 10 11
+
+ reduce(&graph, &["n8", "n9", "n10", "n11"], &["n0", "n1", "n2", "n3"], &[
+ "n10 -> n5",
+ "n11 -> n3",
+ "n4 -> n0",
+ "n4 -> n5",
+ "n5 -> n1",
+ "n5 -> n2",
+ "n5 -> n3",
+ "n8 -> n4",
+ "n9 -> n5"
+ ]);
+}
+
+/// Demonstrates the case where we don't reduce as much as we could.
+#[test]
+fn suboptimal() {
+ let (graph, _nodes) = graph! {
+ INPUT0 -> X,
+ X -> OUTPUT0,
+ X -> OUTPUT1,
+ };
+
+ reduce(&graph, &["INPUT0"], &["OUTPUT0", "OUTPUT1"], &[
+ "INPUT0 -> X",
+ "X -> OUTPUT0",
+ "X -> OUTPUT1"
+ ]);
+}
+
+#[test]
+fn test_cycle_output() {
+ // +---------------+
+ // | |
+ // | +--------|------+
+ // | | v v
+ // [A] -> [C0] <-> [C1] <- [D]
+ // +----> [E]
+ // ^
+ // [B] ----------------- ---+
+ let (graph, _nodes) = graph! {
+ A -> C0,
+ A -> C1,
+ B -> E,
+ C0 -> C1,
+ C1 -> C0,
+ C0 -> D,
+ C1 -> E,
+ D -> C1,
+ };
+
+ // [A] -> [C0] --> [D]
+ // +----> [E]
+ // ^
+ // [B] -------------+
+ reduce(&graph, &["A", "B"], &["D", "E"], &[
+ "A -> C0",
+ "B -> E",
+ "C0 -> D",
+ "C0 -> E",
+ ]);
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+macro_rules! graph {
+ ($( $source:ident -> $target:ident, )*) => {
+ {
+ use $crate::rustc_data_structures::graph::{Graph, NodeIndex};
+ use $crate::rustc_data_structures::fx::FxHashMap;
+
+ let mut graph = Graph::new();
+ let mut nodes: FxHashMap<&'static str, NodeIndex> = FxHashMap();
+
+ for &name in &[ $(stringify!($source), stringify!($target)),* ] {
+ let name: &'static str = name;
+ nodes.entry(name)
+ .or_insert_with(|| graph.add_node(name));
+ }
+
+ $(
+ {
+ let source = nodes[&stringify!($source)];
+ let target = nodes[&stringify!($target)];
+ graph.add_edge(source, target, ());
+ }
+ )*
+
+ let f = move |name: &'static str| -> NodeIndex { nodes[&name] };
+
+ (graph, f)
+ }
+ }
+}
+
+macro_rules! set {
+ ($( $value:expr ),*) => {
+ {
+ use $crate::rustc_data_structures::fx::FxHashSet;
+ let mut set = FxHashSet();
+ $(set.insert($value);)*
+ set
+ }
+ }
+}
--- /dev/null
+// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use rustc::dep_graph::{DepGraphQuery, DepNode};
+use rustc::hir::def_id::DefId;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::graph::Graph;
+
+use super::hash::*;
+use ich::Fingerprint;
+
+mod compress;
+
+/// A data-structure that makes it easy to enumerate the hashable
+/// predecessors of any given dep-node.
+pub struct Predecessors<'query> {
+ // A reduced version of the input graph that contains fewer nodes.
+ // This is intended to keep all of the base inputs (i.e., HIR
+ // nodes) and all of the "work-products" we may care about
+ // later. Other nodes may be retained if it keeps the overall size
+ // of the graph down.
+ pub reduced_graph: Graph<&'query DepNode<DefId>, ()>,
+
+ // For the inputs (hir/foreign-metadata), we include hashes.
+ pub hashes: FxHashMap<&'query DepNode<DefId>, Fingerprint>,
+}
+
+impl<'q> Predecessors<'q> {
+ pub fn new(query: &'q DepGraphQuery<DefId>, hcx: &mut HashContext) -> Self {
+ let tcx = hcx.tcx;
+
+ let collect_for_metadata = tcx.sess.opts.debugging_opts.incremental_cc ||
+ tcx.sess.opts.debugging_opts.query_dep_graph;
+
+ // Find the set of "start nodes". These are nodes that we will
+ // possibly query later.
+ let is_output = |node: &DepNode<DefId>| -> bool {
+ match *node {
+ DepNode::WorkProduct(_) => true,
+ DepNode::MetaData(ref def_id) => collect_for_metadata && def_id.is_local(),
+
+ // if -Z query-dep-graph is passed, save more extended data
+ // to enable better unit testing
+ DepNode::TypeckTables(_) |
+ DepNode::TransCrateItem(_) => tcx.sess.opts.debugging_opts.query_dep_graph,
+
+ _ => false,
+ }
+ };
+
+ // Reduce the graph to the most important nodes.
+ let compress::Reduction { graph, input_nodes } =
+ compress::reduce_graph(&query.graph, HashContext::is_hashable, is_output);
+
+ let mut hashes = FxHashMap();
+ for input_index in input_nodes {
+ let input = *graph.node_data(input_index);
+ debug!("computing hash for input node `{:?}`", input);
+ hashes.entry(input)
+ .or_insert_with(|| hcx.hash(input).unwrap());
+ }
+
+ Predecessors {
+ reduced_graph: graph,
+ hashes: hashes,
+ }
+ }
+}
use rustc::session::Session;
use rustc::ty::TyCtxt;
use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::graph::{NodeIndex, INCOMING};
use rustc_serialize::Encodable as RustcEncodable;
use rustc_serialize::opaque::Encoder;
use std::hash::Hash;
// Create a flat list of (Input, WorkProduct) edges for
// serialization.
- let mut edges = vec![];
- for (&target, sources) in &preds.inputs {
+ let mut edges = FxHashMap();
+ for edge in preds.reduced_graph.all_edges() {
+ let source = *preds.reduced_graph.node_data(edge.source());
+ let target = *preds.reduced_graph.node_data(edge.target());
match *target {
DepNode::MetaData(ref def_id) => {
// Metadata *targets* are always local metadata nodes. We have
}
_ => (),
}
+ debug!("serialize edge: {:?} -> {:?}", source, target);
+ let source = builder.map(source);
let target = builder.map(target);
- for &source in sources {
- let source = builder.map(source);
- edges.push((source, target.clone()));
- }
+ edges.entry(source).or_insert(vec![]).push(target);
}
if tcx.sess.opts.debugging_opts.incremental_dump_hash {
}
// Create the serialized dep-graph.
+ let edges = edges.into_iter()
+ .map(|(k, v)| SerializedEdgeSet { source: k, targets: v })
+ .collect();
let graph = SerializedDepGraph {
edges: edges,
hashes: preds.hashes
let mut def_id_hashes = FxHashMap();
- for (&target, sources) in &preds.inputs {
- let def_id = match *target {
- DepNode::MetaData(def_id) => {
- assert!(def_id.is_local());
- def_id
- }
+ for (index, target) in preds.reduced_graph.all_nodes().iter().enumerate() {
+ let index = NodeIndex(index);
+ let def_id = match *target.data {
+ DepNode::MetaData(def_id) if def_id.is_local() => def_id,
_ => continue,
};
// is the det. hash of the def-path. This is convenient
// because we can sort this to get a stable ordering across
// compilations, even if the def-ids themselves have changed.
- let mut hashes: Vec<(DepNode<u64>, Fingerprint)> = sources.iter()
- .map(|dep_node| {
- let hash_dep_node = dep_node.map_def(|&def_id| Some(def_id_hash(def_id))).unwrap();
- let hash = preds.hashes[dep_node];
- (hash_dep_node, hash)
- })
- .collect();
+ let mut hashes: Vec<(DepNode<u64>, Fingerprint)> =
+ preds.reduced_graph
+ .depth_traverse(index, INCOMING)
+ .map(|index| preds.reduced_graph.node_data(index))
+ .filter(|dep_node| HashContext::is_hashable(dep_node))
+ .map(|dep_node| {
+ let hash_dep_node = dep_node.map_def(|&def_id| Some(def_id_hash(def_id)))
+ .unwrap();
+ let hash = preds.hashes[dep_node];
+ (hash_dep_node, hash)
+ })
+ .collect();
hashes.sort();
let mut state = IchHasher::new();
if tcx.sess.opts.debugging_opts.incremental_dump_hash {
println!("metadata hash for {:?} is {}", def_id, hash);
- for dep_node in sources {
- println!("metadata hash for {:?} depends on {:?} with hash {}",
- def_id, dep_node, preds.hashes[dep_node]);
+ for pred_index in preds.reduced_graph.depth_traverse(index, INCOMING) {
+ let dep_node = preds.reduced_graph.node_data(pred_index);
+ if HashContext::is_hashable(&dep_node) {
+ println!("metadata hash for {:?} depends on {:?} with hash {}",
+ def_id, dep_node, preds.hashes[dep_node]);
+ }
}
}
id: LintId::of(LIFETIME_UNDERSCORE),
reference: "issue #36892 <https://github.com/rust-lang/rust/issues/36892>",
},
+ FutureIncompatibleInfo {
+ id: LintId::of(RESOLVE_TRAIT_ON_DEFAULTED_UNIT),
+ reference: "issue #39216 <https://github.com/rust-lang/rust/issues/39216>",
+ },
FutureIncompatibleInfo {
id: LintId::of(SAFE_EXTERN_STATICS),
reference: "issue #36247 <https://github.com/rust-lang/rust/issues/35112>",
consider using a `*const libc::c_char`")
}
- ty::TyTuple(_) => {
+ ty::TyTuple(..) => {
FfiUnsafe("found Rust tuple type in foreign module; \
consider using a struct instead")
}
let t = cx.tables.expr_ty(&expr);
let warned = match t.sty {
- ty::TyTuple(ref tys) if tys.is_empty() => return,
+ ty::TyTuple(ref tys, _) if tys.is_empty() => return,
ty::TyNever => return,
ty::TyBool => return,
ty::TyAdt(def, _) => {
GateIssue::Language,
"is feature gated");
}
+ if lib.kind == cstore::NativeStaticNobundle && !sess.features.borrow().static_nobundle {
+ feature_gate::emit_feature_err(&sess.parse_sess,
+ "static_nobundle",
+ span.unwrap(),
+ GateIssue::Language,
+ "kind=\"static-nobundle\" is feature gated");
+ }
cstore.add_used_library(lib);
}
use proc_macro::TokenStream;
use proc_macro::__internal::Registry;
use rustc_back::dynamic_lib::DynamicLibrary;
- use syntax_ext::deriving::custom::CustomDerive;
+ use syntax_ext::deriving::custom::ProcMacroDerive;
use syntax_ext::proc_macro_impl::AttrProcMacro;
let path = match dylib {
expand: fn(TokenStream) -> TokenStream,
attributes: &[&'static str]) {
let attrs = attributes.iter().cloned().map(Symbol::intern).collect();
- let derive = SyntaxExtension::CustomDerive(
- Box::new(CustomDerive::new(expand, attrs))
+ let derive = SyntaxExtension::ProcMacroDerive(
+ Box::new(ProcMacroDerive::new(expand, attrs))
);
self.0.push((Symbol::intern(trait_name), Rc::new(derive)));
}
for id in self.get_foreign_items_of_kind(cstore::NativeStatic) {
self.cstore.add_statically_included_foreign_item(id);
}
+ for id in self.get_foreign_items_of_kind(cstore::NativeStaticNobundle) {
+ self.cstore.add_statically_included_foreign_item(id);
+ }
}
fn register_dllimport_foreign_items(&mut self) {
}).and_then(|a| a.value_str()).map(Symbol::as_str);
let kind = match kind.as_ref().map(|s| &s[..]) {
Some("static") => cstore::NativeStatic,
+ Some("static-nobundle") => cstore::NativeStaticNobundle,
Some("dylib") => cstore::NativeUnknown,
Some("framework") => cstore::NativeFramework,
Some(k) => {
use syntax_pos;
pub use rustc::middle::cstore::{NativeLibrary, NativeLibraryKind, LinkagePreference};
-pub use rustc::middle::cstore::{NativeStatic, NativeFramework, NativeUnknown};
+pub use rustc::middle::cstore::NativeLibraryKind::*;
pub use rustc::middle::cstore::{CrateSource, LinkMeta, LibSource};
// A map from external crate numbers (as decoded from some crate file) to
let source_info = self.source_info(span);
let bool_ty = self.hir.bool_ty();
if self.hir.check_overflow() && op.is_checkable() && ty.is_integral() {
- let result_tup = self.hir.tcx().intern_tup(&[ty, bool_ty]);
+ let result_tup = self.hir.tcx().intern_tup(&[ty, bool_ty], false);
let result_value = self.temp(result_tup);
self.cfg.push_assign(block, source_info,
let mir = tcx.alloc_mir(mir);
let def_id = tcx.hir.local_def_id(src.item_id());
- assert!(tcx.mir_map.borrow_mut().insert(def_id, mir).is_none());
+ tcx.mir_map.borrow_mut().insert(def_id, mir);
});
let body = self.tcx.hir.body(body_id);
})
}
}
- ty::TyTuple(tys) => {
+ ty::TyTuple(tys, _) => {
return match tys.get(field.index()) {
Some(&ty) => Ok(ty),
None => Err(FieldAccessError::OutOfRange {
"an `extern crate` loading macros must be at the crate root");
} else if !self.use_extern_macros && !used &&
self.session.cstore.dep_kind(module.def_id().unwrap().krate).macros_only() {
- let msg = "custom derive crates and `#[no_link]` crates have no effect without \
+ let msg = "proc macro crates and `#[no_link]` crates have no effect without \
`#[macro_use]`";
self.session.span_warn(item.span, msg);
used = true; // Avoid the normal unused extern crate warning
}
result
}
+
+ fn resolve_builtin_macro(&mut self, tname: Name) -> Result<Rc<SyntaxExtension>, Determinacy> {
+ match self.builtin_macros.get(&tname).cloned() {
+ Some(binding) => Ok(binding.get_macro(self)),
+ None => Err(Determinacy::Undetermined),
+ }
+ }
+
+ fn resolve_derive_macro(&mut self, scope: Mark, path: &ast::Path, force: bool)
+ -> Result<Rc<SyntaxExtension>, Determinacy> {
+ let ast::Path { span, .. } = *path;
+ match self.resolve_macro(scope, path, false) {
+ Ok(ext) => match *ext {
+ SyntaxExtension::BuiltinDerive(..) |
+ SyntaxExtension::ProcMacroDerive(..) => Ok(ext),
+ _ => Err(Determinacy::Determined),
+ },
+ Err(Determinacy::Undetermined) if force => {
+ let msg = format!("cannot find derive macro `{}` in this scope", path);
+ let mut err = self.session.struct_span_err(span, &msg);
+ err.emit();
+ Err(Determinacy::Determined)
+ },
+ Err(err) => Err(err),
+ }
+ }
}
impl<'a> Resolver<'a> {
let names = resolutions.iter().filter_map(|(&(ref i, _), resolution)| {
if *i == ident { return None; } // Never suggest the same name
match *resolution.borrow() {
- NameResolution { binding: Some(_), .. } => Some(&i.name),
+ NameResolution { binding: Some(name_binding), .. } => {
+ match name_binding.kind {
+ NameBindingKind::Import { binding, .. } => {
+ match binding.kind {
+ // Never suggest the name that has binding error
+ // i.e. the name that cannot be previously resolved
+ NameBindingKind::Def(Def::Err) => return None,
+ _ => Some(&i.name),
+ }
+ },
+ _ => Some(&i.name),
+ }
+ },
NameResolution { single_imports: SingleImports::None, .. } => None,
_ => Some(&i.name),
}
}.lower(self.tcx));
}
}
- ty::TyTuple(_) => {}
+ ty::TyTuple(..) => {}
_ => span_bug!(ex.span,
"Expected struct or tuple type, found {:?}",
ty),
-> Option<TypeRefData> {
self.lookup_ref_id(trait_ref.ref_id).and_then(|def_id| {
let span = trait_ref.path.span;
+ if generated_code(span) {
+ return None;
+ }
let sub_span = self.span_utils.sub_span_for_type_name(span).or(Some(span));
filter!(self.span_utils, sub_span, span, None);
Some(TypeRefData {
assert!(!sig.variadic && extra_args.is_empty());
match sig.inputs().last().unwrap().sty {
- ty::TyTuple(ref tupled_arguments) => {
+ ty::TyTuple(ref tupled_arguments, _) => {
inputs = &sig.inputs()[0..sig.inputs().len() - 1];
&tupled_arguments[..]
}
monomorphize::field_ty(cx.tcx(), substs, f)
}).collect::<Vec<_>>()
},
- ty::TyTuple(fields) => fields.to_vec(),
+ ty::TyTuple(fields, _) => fields.to_vec(),
ty::TyClosure(def_id, substs) => {
if variant_index > 0 { bug!("{} is a closure, which only has one variant", t);}
substs.upvar_tys(def_id, cx.tcx()).collect()
use rustc::hir::def_id::CrateNum;
use rustc::hir::svh::Svh;
use rustc_back::tempdir::TempDir;
+use rustc_back::PanicStrategy;
use rustc_incremental::IncrementalHashesMap;
use std::ascii;
for lib in sess.cstore.used_libraries() {
match lib.kind {
NativeLibraryKind::NativeStatic => {}
+ NativeLibraryKind::NativeStaticNobundle |
NativeLibraryKind::NativeFramework |
NativeLibraryKind::NativeUnknown => continue,
}
for lib in all_native_libs.iter().filter(|l| relevant_lib(sess, l)) {
let name = match lib.kind {
+ NativeLibraryKind::NativeStaticNobundle |
NativeLibraryKind::NativeUnknown => "library",
NativeLibraryKind::NativeFramework => "framework",
// These are included, no need to print them
cmd.arg(root.join(obj));
}
+ if sess.target.target.options.is_like_emscripten &&
+ sess.panic_strategy() == PanicStrategy::Abort {
+ cmd.args(&["-s", "DISABLE_EXCEPTION_CATCHING=1"]);
+ }
+
{
let mut linker = trans.linker_info.to_linker(&mut cmd, &sess);
link_args(&mut *linker, sess, crate_type, tmpdir,
// on other dylibs (e.g. other native deps).
add_local_native_libraries(cmd, sess);
add_upstream_rust_crates(cmd, sess, crate_type, tmpdir);
- add_upstream_native_libraries(cmd, sess);
+ add_upstream_native_libraries(cmd, sess, crate_type);
// # Telling the linker what we're doing
match lib.kind {
NativeLibraryKind::NativeUnknown => cmd.link_dylib(&lib.name.as_str()),
NativeLibraryKind::NativeFramework => cmd.link_framework(&lib.name.as_str()),
+ NativeLibraryKind::NativeStaticNobundle => cmd.link_staticlib(&lib.name.as_str()),
NativeLibraryKind::NativeStatic => bug!(),
}
}
// generic function calls a native function, then the generic function must
// be instantiated in the target crate, meaning that the native symbol must
// also be resolved in the target crate.
-fn add_upstream_native_libraries(cmd: &mut Linker, sess: &Session) {
+fn add_upstream_native_libraries(cmd: &mut Linker, sess: &Session, crate_type: config::CrateType) {
// Be sure to use a topological sorting of crates because there may be
// interdependencies between native libraries. When passing -nodefaultlibs,
// for example, almost all native libraries depend on libc, so we have to
// This passes RequireStatic, but the actual requirement doesn't matter,
// we're just getting an ordering of crate numbers, we're not worried about
// the paths.
+ let formats = sess.dependency_formats.borrow();
+ let data = formats.get(&crate_type).unwrap();
+
let crates = sess.cstore.used_crates(LinkagePreference::RequireStatic);
for (cnum, _) in crates {
for lib in sess.cstore.native_libraries(cnum) {
match lib.kind {
NativeLibraryKind::NativeUnknown => cmd.link_dylib(&lib.name.as_str()),
NativeLibraryKind::NativeFramework => cmd.link_framework(&lib.name.as_str()),
-
+ NativeLibraryKind::NativeStaticNobundle => {
+ // Link "static-nobundle" native libs only if the crate they originate from
+ // is being linked statically to the current crate. If it's linked dynamically
+ // or is an rlib already included via some other dylib crate, the symbols from
+ // native libs will have already been included in that dylib.
+ if data[cnum.as_usize() - 1] == Linkage::Static {
+ cmd.link_staticlib(&lib.name.as_str())
+ }
+ },
// ignore statically included native libraries here as we've
// already included them when we included the rust library
// previously
writeln!(f, "LIBRARY")?;
writeln!(f, "EXPORTS")?;
for symbol in self.info.exports[&crate_type].iter() {
+ debug!(" _{}", symbol);
writeln!(f, " {}", symbol)?;
}
Ok(())
}
};
let sig = tcx.erase_late_bound_regions_and_normalize(sig);
- let tuple_input_ty = tcx.intern_tup(sig.inputs());
+ let tuple_input_ty = tcx.intern_tup(sig.inputs(), false);
let sig = tcx.mk_fn_sig(
[bare_fn_ty_maybe_ref, tuple_input_ty].iter().cloned(),
sig.output(),
output.push(TransItem::DropGlue(DropGlueKind::Ty(inner_type)));
}
}
- ty::TyTuple(args) => {
+ ty::TyTuple(args, _) => {
for arg in args {
let arg = glue::get_drop_glue_type(scx, arg);
if scx.type_needs_drop(arg) {
}
}))
}
- ty::TyTuple(tys) => {
+ ty::TyTuple(tys, _) => {
if tys.len() != 2 {
return None;
}
// return type
signature_metadata.push(match signature.output().sty {
- ty::TyTuple(ref tys) if tys.is_empty() => ptr::null_mut(),
+ ty::TyTuple(ref tys, _) if tys.is_empty() => ptr::null_mut(),
_ => type_metadata(cx, signature.output(), span)
});
ty::TyFloat(_) => {
MetadataCreationResult::new(basic_type_metadata(cx, t), false)
}
- ty::TyTuple(ref elements) if elements.is_empty() => {
+ ty::TyTuple(ref elements, _) if elements.is_empty() => {
MetadataCreationResult::new(basic_type_metadata(cx, t), false)
}
ty::TyArray(typ, len) => {
usage_site_span).finalize(cx)
}
},
- ty::TyTuple(ref elements) => {
+ ty::TyTuple(ref elements, _) => {
prepare_tuple_metadata(cx,
t,
&elements[..],
let (name, encoding) = match t.sty {
ty::TyNever => ("!", DW_ATE_unsigned),
- ty::TyTuple(ref elements) if elements.is_empty() =>
+ ty::TyTuple(ref elements, _) if elements.is_empty() =>
("()", DW_ATE_unsigned),
ty::TyBool => ("bool", DW_ATE_boolean),
ty::TyChar => ("char", DW_ATE_unsigned_char),
// Return type -- llvm::DIBuilder wants this at index 0
signature.push(match sig.output().sty {
- ty::TyTuple(ref tys) if tys.is_empty() => ptr::null_mut(),
+ ty::TyTuple(ref tys, _) if tys.is_empty() => ptr::null_mut(),
_ => type_metadata(cx, sig.output(), syntax_pos::DUMMY_SP)
});
}
if abi == Abi::RustCall && !sig.inputs().is_empty() {
- if let ty::TyTuple(args) = sig.inputs()[sig.inputs().len() - 1].sty {
+ if let ty::TyTuple(args, _) = sig.inputs()[sig.inputs().len() - 1].sty {
for &argument_type in args {
signature.push(type_metadata(cx, argument_type, syntax_pos::DUMMY_SP));
}
push_item_name(cx, def.did, qualified, output);
push_type_params(cx, substs, output);
},
- ty::TyTuple(component_types) => {
+ ty::TyTuple(component_types, _) => {
output.push('(');
for &component_type in component_types {
push_debuginfo_type_name(cx, component_type, true, output);
cx = tvec::slice_for_each(&cx, ptr.llval, unit_ty, ptr.llextra,
|bb, vv| drop_ty(bb, LvalueRef::new_sized_ty(vv, unit_ty)));
}
- ty::TyTuple(ref args) => {
+ ty::TyTuple(ref args, _) => {
for (i, arg) in args.iter().enumerate() {
let llfld_a = ptr.trans_field_ptr(&cx, i);
drop_ty(&cx, LvalueRef::new_sized_ty(llfld_a, *arg));
let tuple = self.trans_operand(bcx, operand);
let arg_types = match tuple.ty.sty {
- ty::TyTuple(ref tys) => tys,
+ ty::TyTuple(ref tys, _) => tys,
_ => span_bug!(self.mir.span,
"bad final argument to \"rust-call\" fn {:?}", tuple.ty)
};
let rhs = self.const_operand(rhs, span)?;
let ty = lhs.ty;
let val_ty = op.ty(tcx, lhs.ty, rhs.ty);
- let binop_ty = tcx.intern_tup(&[val_ty, tcx.types.bool]);
+ let binop_ty = tcx.intern_tup(&[val_ty, tcx.types.bool], false);
let (lhs, rhs) = (lhs.llval, rhs.llval);
assert!(!ty.is_fp());
// individual LLVM function arguments.
let tupled_arg_tys = match arg_ty.sty {
- ty::TyTuple(ref tys) => tys,
+ ty::TyTuple(ref tys, _) => tys,
_ => bug!("spread argument isn't a tuple?!")
};
lhs.immediate(), rhs.immediate(),
lhs.ty);
let val_ty = op.ty(bcx.tcx(), lhs.ty, rhs.ty);
- let operand_ty = bcx.tcx().intern_tup(&[val_ty, bcx.tcx().types.bool]);
+ let operand_ty = bcx.tcx().intern_tup(&[val_ty, bcx.tcx().types.bool], false);
let operand = OperandRef {
val: result,
ty: operand_ty
self.push_def_path(adt_def.did, output);
self.push_type_params(substs, iter::empty(), output);
},
- ty::TyTuple(component_types) => {
+ ty::TyTuple(component_types, _) => {
output.push('(');
for &component_type in component_types {
self.push_type_name(component_type, output);
Type::array(&llty, size)
}
- ty::TyTuple(ref tys) if tys.is_empty() => {
+ ty::TyTuple(ref tys, _) if tys.is_empty() => {
Type::nil(cx)
}
let sig = cx.tcx().erase_late_bound_regions_and_normalize(&f.sig);
FnType::new(cx, f.abi, &sig, &[]).llvm_type(cx).ptr_to()
}
- ty::TyTuple(ref tys) if tys.is_empty() => Type::nil(cx),
+ ty::TyTuple(ref tys, _) if tys.is_empty() => Type::nil(cx),
ty::TyTuple(..) => {
adt::type_of(cx, t)
}
span: output_span
};
- (self.tcx().mk_ty(ty::TyTuple(inputs)), output_binding)
+ (self.tcx().mk_ty(ty::TyTuple(inputs, false)), output_binding)
}
/// Instantiates the path for the given trait reference, assuming that it's
tcx.types.never
},
hir::TyTup(ref fields) => {
- tcx.mk_tup(fields.iter().map(|t| self.ast_ty_to_ty(&t)))
+ tcx.mk_tup(fields.iter().map(|t| self.ast_ty_to_ty(&t)), false)
}
hir::TyBareFn(ref bf) => {
require_c_abi_if_variadic(tcx, &bf.decl, bf.abi, ast_ty.span);
let mut expected_len = elements.len();
if ddpos.is_some() {
// Require known type only when `..` is present
- if let ty::TyTuple(ref tys) =
+ if let ty::TyTuple(ref tys, _) =
self.structurally_resolved_type(pat.span, expected).sty {
expected_len = tys.len();
}
// from all tuple elements isn't trivial.
TypeVariableOrigin::TypeInference(pat.span)));
let element_tys = tcx.mk_type_list(element_tys_iter);
- let pat_ty = tcx.mk_ty(ty::TyTuple(element_tys));
+ let pat_ty = tcx.mk_ty(ty::TyTuple(element_tys, false));
self.demand_eqtype(pat.span, expected, pat_ty);
for (i, elem) in elements.iter().enumerate_and_adjust(max_len, ddpos) {
self.check_pat(elem, &element_tys[i]);
let t_cast = self.cast_ty;
let t_expr = self.expr_ty;
if t_cast.is_numeric() && t_expr.is_numeric() {
- fcx.tcx.sess.add_lint(lint::builtin::TRIVIAL_NUMERIC_CASTS,
- self.expr.id,
- self.span,
- format!("trivial numeric cast: `{}` as `{}`. Cast can be \
- replaced by coercion, this might require type \
- ascription or a temporary variable",
- fcx.ty_to_string(t_expr),
- fcx.ty_to_string(t_cast)));
+ fcx.tables.borrow_mut().lints.add_lint(
+ lint::builtin::TRIVIAL_NUMERIC_CASTS,
+ self.expr.id,
+ self.span,
+ format!("trivial numeric cast: `{}` as `{}`. Cast can be \
+ replaced by coercion, this might require type \
+ ascription or a temporary variable",
+ fcx.ty_to_string(t_expr),
+ fcx.ty_to_string(t_cast)));
} else {
- fcx.tcx.sess.add_lint(lint::builtin::TRIVIAL_CASTS,
- self.expr.id,
- self.span,
- format!("trivial cast: `{}` as `{}`. Cast can be \
- replaced by coercion, this might require type \
- ascription or a temporary variable",
- fcx.ty_to_string(t_expr),
- fcx.ty_to_string(t_cast)));
+ fcx.tables.borrow_mut().lints.add_lint(
+ lint::builtin::TRIVIAL_CASTS,
+ self.expr.id,
+ self.span,
+ format!("trivial cast: `{}` as `{}`. Cast can be \
+ replaced by coercion, this might require type \
+ ascription or a temporary variable",
+ fcx.ty_to_string(t_expr),
+ fcx.ty_to_string(t_cast)));
}
}
// Tuple up the arguments and insert the resulting function type into
// the `closures` table.
fn_ty.sig.0 = self.tcx.mk_fn_sig(
- iter::once(self.tcx.intern_tup(fn_ty.sig.skip_binder().inputs())),
+ iter::once(self.tcx.intern_tup(fn_ty.sig.skip_binder().inputs(), false)),
fn_ty.sig.skip_binder().output(),
fn_ty.sig.variadic()
);
arg_param_ty);
let input_tys = match arg_param_ty.sty {
- ty::TyTuple(tys) => tys.into_iter(),
+ ty::TyTuple(tys, _) => tys.into_iter(),
_ => {
return None;
}
Ok(())
}
- ty::TyTuple(tys) => {
+ ty::TyTuple(tys, _) => {
for ty in tys {
iterate_over_potentially_unsafe_regions_in_type(cx, context, ty, depth+1)?
}
"cxchg" | "cxchgweak" => (1, vec![tcx.mk_mut_ptr(param(ccx, 0)),
param(ccx, 0),
param(ccx, 0)],
- tcx.intern_tup(&[param(ccx, 0), tcx.types.bool])),
+ tcx.intern_tup(&[param(ccx, 0), tcx.types.bool], false)),
"load" => (1, vec![tcx.mk_imm_ptr(param(ccx, 0))],
param(ccx, 0)),
"store" => (1, vec![tcx.mk_mut_ptr(param(ccx, 0)), param(ccx, 0)],
"add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" =>
(1, vec![param(ccx, 0), param(ccx, 0)],
- tcx.intern_tup(&[param(ccx, 0), tcx.types.bool])),
+ tcx.intern_tup(&[param(ccx, 0), tcx.types.bool], false)),
"unchecked_div" | "unchecked_rem" =>
(1, vec![param(ccx, 0), param(ccx, 0)], param(ccx, 0)),
match *expected {
Void => match t.sty {
- ty::TyTuple(ref v) if v.is_empty() => {},
+ ty::TyTuple(ref v, _) if v.is_empty() => {},
_ => simple_error(&format!("`{}`", t), "()"),
},
// (The width we pass to LLVM doesn't concern the type checker.)
}
Aggregate(_flatten, ref expected_contents) => {
match t.sty {
- ty::TyTuple(contents) => {
+ ty::TyTuple(contents, _) => {
if contents.len() != expected_contents.len() {
simple_error(&format!("tuple with length {}", contents.len()),
&format!("tuple with length {}", expected_contents.len()));
self_ty, call_expr.id)?;
if let Some(import_id) = pick.import_id {
- self.tcx.used_trait_imports.borrow_mut().insert(import_id);
+ let import_def_id = self.tcx.hir.local_def_id(import_id);
+ debug!("used_trait_import: {:?}", import_def_id);
+ self.used_trait_imports.borrow_mut().insert(import_def_id);
}
self.tcx.check_stability(pick.item.def_id, call_expr.id, span);
self_ty, expr_id)?;
if let Some(import_id) = pick.import_id {
- self.tcx.used_trait_imports.borrow_mut().insert(import_id);
+ let import_def_id = self.tcx.hir.local_def_id(import_id);
+ debug!("used_trait_import: {:?}", import_def_id);
+ self.used_trait_imports.borrow_mut().insert(import_def_id);
}
let def = pick.item.def();
inherent_candidates: Vec<Candidate<'tcx>>,
extension_candidates: Vec<Candidate<'tcx>>,
impl_dups: FxHashSet<DefId>,
- import_id: Option<ast::NodeId>,
/// Collects near misses when the candidate functions are missing a `self` keyword and is only
/// used for error reporting
inherent_candidates: Vec::new(),
extension_candidates: Vec::new(),
impl_dups: FxHashSet(),
- import_id: None,
steps: Rc::new(steps),
opt_simplified_steps: opt_simplified_steps,
static_candidates: Vec::new(),
xform_self_ty: xform_self_ty,
item: item,
kind: InherentImplCandidate(impl_substs, obligations),
- import_id: self.import_id,
+ import_id: None,
});
}
}
xform_self_ty: xform_self_ty,
item: item,
kind: ObjectCandidate,
- import_id: this.import_id,
+ import_id: None,
});
});
}
xform_self_ty: xform_self_ty,
item: item,
kind: WhereClauseCandidate(poly_trait_ref),
- import_id: this.import_id,
+ import_id: None,
});
});
}
for trait_candidate in applicable_traits {
let trait_did = trait_candidate.def_id;
if duplicates.insert(trait_did) {
- self.import_id = trait_candidate.import_id;
- let result = self.assemble_extension_candidates_for_trait(trait_did);
- self.import_id = None;
+ let import_id = trait_candidate.import_id;
+ let result = self.assemble_extension_candidates_for_trait(import_id, trait_did);
result?;
}
}
let mut duplicates = FxHashSet();
for trait_info in suggest::all_traits(self.ccx) {
if duplicates.insert(trait_info.def_id) {
- self.assemble_extension_candidates_for_trait(trait_info.def_id)?;
+ self.assemble_extension_candidates_for_trait(None, trait_info.def_id)?;
}
}
Ok(())
}
fn assemble_extension_candidates_for_trait(&mut self,
+ import_id: Option<ast::NodeId>,
trait_def_id: DefId)
-> Result<(), MethodError<'tcx>> {
debug!("assemble_extension_candidates_for_trait(trait_def_id={:?})",
continue;
}
- self.assemble_extension_candidates_for_trait_impls(trait_def_id, item.clone());
+ self.assemble_extension_candidates_for_trait_impls(import_id, trait_def_id,
+ item.clone());
- self.assemble_closure_candidates(trait_def_id, item.clone())?;
+ self.assemble_closure_candidates(import_id, trait_def_id, item.clone())?;
- self.assemble_projection_candidates(trait_def_id, item.clone());
+ self.assemble_projection_candidates(import_id, trait_def_id, item.clone());
- self.assemble_where_clause_candidates(trait_def_id, item.clone());
+ self.assemble_where_clause_candidates(import_id, trait_def_id, item.clone());
}
Ok(())
}
fn assemble_extension_candidates_for_trait_impls(&mut self,
+ import_id: Option<ast::NodeId>,
trait_def_id: DefId,
item: ty::AssociatedItem) {
let trait_def = self.tcx.lookup_trait_def(trait_def_id);
xform_self_ty: xform_self_ty,
item: item.clone(),
kind: ExtensionImplCandidate(impl_def_id, impl_substs, obligations),
- import_id: self.import_id,
+ import_id: import_id,
});
});
}
}
fn assemble_closure_candidates(&mut self,
+ import_id: Option<ast::NodeId>,
trait_def_id: DefId,
item: ty::AssociatedItem)
-> Result<(), MethodError<'tcx>> {
xform_self_ty: xform_self_ty,
item: item.clone(),
kind: TraitCandidate,
- import_id: self.import_id,
+ import_id: import_id,
});
}
}
fn assemble_projection_candidates(&mut self,
+ import_id: Option<ast::NodeId>,
trait_def_id: DefId,
item: ty::AssociatedItem) {
debug!("assemble_projection_candidates(\
xform_self_ty: xform_self_ty,
item: item.clone(),
kind: TraitCandidate,
- import_id: self.import_id,
+ import_id: import_id,
});
}
}
}
fn assemble_where_clause_candidates(&mut self,
+ import_id: Option<ast::NodeId>,
trait_def_id: DefId,
item: ty::AssociatedItem) {
debug!("assemble_where_clause_candidates(trait_def_id={:?})",
xform_self_ty: xform_self_ty,
item: item.clone(),
kind: WhereClauseCandidate(poly_bound),
- import_id: self.import_id,
+ import_id: import_id,
});
}
}
use TypeAndSubsts;
use lint;
use util::common::{ErrorReported, indenter};
-use util::nodemap::{DefIdMap, FxHashMap, FxHashSet, NodeMap};
+use util::nodemap::{DefIdMap, DefIdSet, FxHashMap, FxHashSet, NodeMap};
use std::cell::{Cell, RefCell};
use std::cmp;
// Obligations which will have to be checked at the end of
// type-checking, after all functions have been inferred.
deferred_obligations: RefCell<Vec<traits::DeferredObligation<'tcx>>>,
+
+ // a set of trait import def-ids that we use during method
+ // resolution; during writeback, this is written into
+ // `tcx.used_trait_imports` for this item def-id
+ used_trait_imports: RefCell<FxHashSet<DefId>>,
}
impl<'a, 'gcx, 'tcx> Deref for Inherited<'a, 'gcx, 'tcx> {
deferred_cast_checks: RefCell::new(Vec::new()),
anon_types: RefCell::new(DefIdMap()),
deferred_obligations: RefCell::new(Vec::new()),
+ used_trait_imports: RefCell::new(DefIdSet()),
}
}
if self.diverges.get() == Diverges::Always {
self.diverges.set(Diverges::WarnedAlways);
- self.tcx.sess.add_lint(lint::builtin::UNREACHABLE_CODE,
- id, span,
- format!("unreachable {}", kind));
+ self.tables.borrow_mut().lints.add_lint(
+ lint::builtin::UNREACHABLE_CODE,
+ id, span,
+ format!("unreachable {}", kind));
}
}
}
/// Apply "fallbacks" to some types
- /// ! gets replaced with (), unconstrained ints with i32, and unconstrained floats with f64.
+ /// unconstrained types get replaced with ! or () (depending on whether
+ /// feature(never_type) is enabled), unconstrained ints with i32, and
+ /// unconstrained floats with f64.
fn default_type_parameters(&self) {
use rustc::ty::error::UnconstrainedNumeric::Neither;
use rustc::ty::error::UnconstrainedNumeric::{UnconstrainedInt, UnconstrainedFloat};
let err_inputs = match tuple_arguments {
DontTupleArguments => err_inputs,
- TupleArguments => vec![self.tcx.intern_tup(&err_inputs[..])],
+ TupleArguments => vec![self.tcx.intern_tup(&err_inputs[..], false)],
};
self.check_argument_types(sp, &err_inputs[..], &[], args_no_rcvr,
let formal_tys = if tuple_arguments == TupleArguments {
let tuple_type = self.structurally_resolved_type(sp, fn_inputs[0]);
match tuple_type.sty {
- ty::TyTuple(arg_types) if arg_types.len() != args.len() => {
+ ty::TyTuple(arg_types, _) if arg_types.len() != args.len() => {
parameter_count_error(tcx.sess, sp_args, arg_types.len(), args.len(),
"E0057", false, def_span);
expected_arg_tys = &[];
self.err_args(args.len())
}
- ty::TyTuple(arg_types) => {
+ ty::TyTuple(arg_types, _) => {
expected_arg_tys = match expected_arg_tys.get(0) {
Some(&ty) => match ty.sty {
- ty::TyTuple(ref tys) => &tys,
+ ty::TyTuple(ref tys, _) => &tys,
_ => &[]
},
None => &[]
}
})
}
- ty::TyTuple(ref v) => {
+ ty::TyTuple(ref v, _) => {
tuple_like = true;
v.get(idx.node).cloned()
}
hir::ExprTup(ref elts) => {
let flds = expected.only_has_type(self).and_then(|ty| {
match ty.sty {
- ty::TyTuple(ref flds) => Some(&flds[..]),
+ ty::TyTuple(ref flds, _) => Some(&flds[..]),
_ => None
}
});
};
t
});
- let tuple = tcx.mk_tup(elt_ts_iter);
+ let tuple = tcx.mk_tup(elt_ts_iter, false);
if tuple.references_error() {
tcx.types.err
} else {
},
base_t);
// Try to give some advice about indexing tuples.
- if let ty::TyTuple(_) = base_t.sty {
+ if let ty::TyTuple(..) = base_t.sty {
let mut needs_note = true;
// If the index is an integer, we can show the actual
// fixed expression:
use rustc::ty::adjustment;
use rustc::ty::fold::{TypeFolder,TypeFoldable};
use rustc::infer::{InferCtxt, FixupError};
-use rustc::util::nodemap::DefIdMap;
+use rustc::util::nodemap::{DefIdMap, DefIdSet};
use std::cell::Cell;
+use std::mem;
use syntax::ast;
use syntax_pos::Span;
wbcx.visit_deferred_obligations(item_id);
wbcx.visit_type_nodes();
wbcx.visit_cast_types();
+ wbcx.visit_lints();
let tables = self.tcx.alloc_tables(wbcx.tables);
self.tcx.tables.borrow_mut().insert(item_def_id, tables);
+
+ let used_trait_imports = mem::replace(&mut *self.used_trait_imports.borrow_mut(),
+ DefIdSet());
+ debug!("used_trait_imports({:?}) = {:?}", item_def_id, used_trait_imports);
+ self.tcx.used_trait_imports.borrow_mut().insert(item_def_id, used_trait_imports);
}
}
self.fcx.tables.borrow().cast_kinds.iter().map(|(&key, &value)| (key, value)));
}
+ fn visit_lints(&mut self) {
+ if self.fcx.writeback_errors.get() {
+ return
+ }
+
+ self.fcx.tables.borrow_mut().lints.transfer(&mut self.tables.lints);
+ }
+
fn visit_anon_types(&self) {
if self.fcx.writeback_errors.get() {
return
use rustc::hir;
use rustc::hir::itemlikevisit::ItemLikeVisitor;
+use rustc::util::nodemap::DefIdSet;
-struct UnusedTraitImportVisitor<'a, 'tcx: 'a> {
+struct CheckVisitor<'a, 'tcx: 'a> {
tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ used_trait_imports: DefIdSet,
}
-impl<'a, 'tcx> UnusedTraitImportVisitor<'a, 'tcx> {
+impl<'a, 'tcx> CheckVisitor<'a, 'tcx> {
fn check_import(&self, id: ast::NodeId, span: Span) {
if !self.tcx.maybe_unused_trait_imports.contains(&id) {
return;
}
- if self.tcx.used_trait_imports.borrow().contains(&id) {
+
+ let import_def_id = self.tcx.hir.local_def_id(id);
+ if self.used_trait_imports.contains(&import_def_id) {
return;
}
}
}
-impl<'a, 'tcx, 'v> ItemLikeVisitor<'v> for UnusedTraitImportVisitor<'a, 'tcx> {
+impl<'a, 'tcx, 'v> ItemLikeVisitor<'v> for CheckVisitor<'a, 'tcx> {
fn visit_item(&mut self, item: &hir::Item) {
if item.vis == hir::Public || item.span == DUMMY_SP {
return;
pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
let _task = tcx.dep_graph.in_task(DepNode::UnusedTraitCheck);
- let mut visitor = UnusedTraitImportVisitor { tcx: tcx };
+
+ let mut used_trait_imports = DefIdSet();
+ for &body_id in tcx.hir.krate().bodies.keys() {
+ let item_id = tcx.hir.body_owner(body_id);
+ let item_def_id = tcx.hir.local_def_id(item_id);
+
+ // this will have been written by the main typeck pass
+ if let Some(imports) = tcx.used_trait_imports.borrow().get(&item_def_id) {
+ debug!("GatherVisitor: item_def_id={:?} with imports {:#?}", item_def_id, imports);
+ used_trait_imports.extend(imports);
+ } else {
+ debug!("GatherVisitor: item_def_id={:?} with no imports", item_def_id);
+ }
+ }
+
+ let mut visitor = CheckVisitor { tcx, used_trait_imports };
tcx.hir.krate().visit_all_item_likes(&mut visitor);
}
// done by the orphan and overlap modules. Then we build up various
// mappings. That mapping code resides here.
+use dep_graph::DepTrackingMap;
use hir::def_id::DefId;
-use rustc::ty::{self, TyCtxt, TypeFoldable};
+use rustc::ty::{self, maps, TyCtxt, TypeFoldable};
use rustc::ty::{Ty, TyBool, TyChar, TyError};
use rustc::ty::{TyParam, TyRawPtr};
use rustc::ty::{TyRef, TyAdt, TyDynamic, TyNever, TyTuple};
use rustc::hir::itemlikevisit::ItemLikeVisitor;
use rustc::hir::{Item, ItemImpl};
use rustc::hir;
+use std::cell::RefMut;
mod builtin;
mod orphan;
mod overlap;
mod unsafety;
-struct CoherenceChecker<'a, 'tcx: 'a> {
+struct CoherenceCollect<'a, 'tcx: 'a> {
tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ inherent_impls: RefMut<'a, DepTrackingMap<maps::InherentImpls<'tcx>>>,
}
-impl<'a, 'tcx, 'v> ItemLikeVisitor<'v> for CoherenceChecker<'a, 'tcx> {
+impl<'a, 'tcx, 'v> ItemLikeVisitor<'v> for CoherenceCollect<'a, 'tcx> {
fn visit_item(&mut self, item: &Item) {
if let ItemImpl(..) = item.node {
self.check_implementation(item)
}
}
-impl<'a, 'tcx> CoherenceChecker<'a, 'tcx> {
+impl<'a, 'tcx> CoherenceCollect<'a, 'tcx> {
+ fn check(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
+ let inherent_impls = tcx.inherent_impls.borrow_mut();
+ let mut this = &mut CoherenceCollect { tcx, inherent_impls };
+
+ // Check implementations and traits. This populates the tables
+ // containing the inherent methods and extension methods. It also
+ // builds up the trait inheritance table.
+ tcx.visit_all_item_likes_in_krate(DepNode::CoherenceCheckImpl, this);
+ }
+
// Returns the def ID of the base type, if there is one.
fn get_base_type_def_id(&self, span: Span, ty: Ty<'tcx>) -> Option<DefId> {
match ty.sty {
}
}
- fn check(&mut self) {
- // Check implementations and traits. This populates the tables
- // containing the inherent methods and extension methods. It also
- // builds up the trait inheritance table.
- self.tcx.visit_all_item_likes_in_krate(DepNode::CoherenceCheckImpl, self);
- }
-
- fn check_implementation(&self, item: &Item) {
+ fn check_implementation(&mut self, item: &Item) {
let tcx = self.tcx;
let impl_did = tcx.hir.local_def_id(item.id);
let self_type = tcx.item_type(impl_did);
}
}
- fn add_inherent_impl(&self, base_def_id: DefId, impl_def_id: DefId) {
- self.tcx.inherent_impls.borrow_mut().push(base_def_id, impl_def_id);
+ fn add_inherent_impl(&mut self, base_def_id: DefId, impl_def_id: DefId) {
+ // Subtle: it'd be better to collect these into a local map
+ // and then write the vector only once all items are known,
+ // but that leads to degenerate dep-graphs. The problem is
+ // that the write of that big vector winds up having reads
+ // from *all* impls in the krate, since we've lost the
+ // precision basically. This would be ok in the firewall
+ // model so once we've made progess towards that we can modify
+ // the strategy here. In the meantime, using `push` is ok
+ // because we are doing this as a pre-pass before anyone
+ // actually reads from `inherent_impls` -- and we know this is
+ // true beacuse we hold the refcell lock.
+ self.inherent_impls.push(base_def_id, impl_def_id);
}
fn add_trait_impl(&self, impl_trait_ref: ty::TraitRef<'tcx>, impl_def_id: DefId) {
}
pub fn check_coherence(ccx: &CrateCtxt) {
+ CoherenceCollect::check(ccx.tcx);
+
let _task = ccx.tcx.dep_graph.in_task(DepNode::Coherence);
- CoherenceChecker { tcx: ccx.tcx }.check();
unsafety::check(ccx.tcx);
orphan::check(ccx.tcx);
overlap::check(ccx.tcx);
ccx.tcx.item_types.borrow_mut().insert(ty_f.did, tt);
let def_id = ccx.tcx.hir.local_def_id(field.id);
- ccx.tcx.item_types.borrow_mut().insert(def_id, tt);
+ assert_eq!(def_id, ty_f.did);
ccx.tcx.generics.borrow_mut().insert(def_id, struct_generics);
ccx.tcx.predicates.borrow_mut().insert(def_id, struct_predicates.clone());
}
let const_def_id = ccx.tcx.hir.local_def_id(trait_item.id);
generics_of_def_id(ccx, const_def_id);
let ty = ccx.icx(&trait_predicates).to_ty(&ty);
- tcx.item_types.borrow_mut().insert(const_def_id, ty);
- convert_associated_const(ccx, TraitContainer(trait_def_id),
- trait_item.id, ty);
+ convert_associated_const(ccx,
+ TraitContainer(trait_def_id),
+ trait_item.id,
+ ty);
}
hir::TraitItemKind::Type(_, ref opt_ty) => {
let const_def_id = ccx.tcx.hir.local_def_id(impl_item.id);
generics_of_def_id(ccx, const_def_id);
let ty = ccx.icx(&impl_predicates).to_ty(&ty);
- tcx.item_types.borrow_mut().insert(const_def_id, ty);
- convert_associated_const(ccx, ImplContainer(impl_def_id),
- impl_item.id, ty);
+ convert_associated_const(ccx,
+ ImplContainer(impl_def_id),
+ impl_item.id,
+ ty);
}
hir::ImplItemKind::Type(ref ty) => {
items);
trait_predicates.predicates.extend(assoc_predicates);
- let prev_predicates = tcx.predicates.borrow_mut().insert(def_id, trait_predicates);
- assert!(prev_predicates.is_none());
-
+ tcx.predicates.borrow_mut().insert(def_id, trait_predicates);
return;
fn predicates_for_associated_types<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>,
};
let predicates = ty_generic_predicates(ccx, generics, None, vec![], false);
- let prev_predicates = ccx.tcx.predicates.borrow_mut().insert(def_id,
- predicates.clone());
- assert!(prev_predicates.is_none());
+ ccx.tcx.predicates.borrow_mut().insert(def_id, predicates.clone());
predicates
}
};
let predicates = ty_generic_predicates(ccx, generics, None, vec![], false);
- let prev_predicates = ccx.tcx.predicates.borrow_mut().insert(def_id, predicates);
- assert!(prev_predicates.is_none());
+ ccx.tcx.predicates.borrow_mut().insert(def_id, predicates);
}
// Is it marked with ?Sized
#![feature(box_patterns)]
#![feature(box_syntax)]
#![feature(conservative_impl_trait)]
+#![feature(field_init_shorthand)]
#![feature(loop_break_value)]
#![feature(quote)]
#![feature(rustc_diagnostic_macros)]
self.add_constraints_from_mt(generics, mt, variance);
}
- ty::TyTuple(subtys) => {
+ ty::TyTuple(subtys, _) => {
for &subty in subtys {
self.add_constraints_from_ty(generics, subty, variance);
}
item_variances);
}
- let newly_added = tcx.item_variance_map
- .borrow_mut()
- .insert(item_def_id, Rc::new(item_variances))
- .is_none();
- assert!(newly_added);
+ tcx.item_variance_map
+ .borrow_mut()
+ .insert(item_def_id, Rc::new(item_variances));
}
}
// parameters".
if self.num_inferred() == inferreds_on_entry {
let item_def_id = self.tcx.hir.local_def_id(item_id);
- let newly_added = self.tcx
+ self.tcx
.item_variance_map
.borrow_mut()
- .insert(item_def_id, self.empty_variances.clone())
- .is_none();
- assert!(newly_added);
+ .insert(item_def_id, self.empty_variances.clone());
}
}
Some(did) if cx.tcx.lang_items.fn_trait_kind(did).is_some() => {
assert_eq!(types.len(), 1);
let inputs = match types[0].sty {
- ty::TyTuple(ref tys) => tys.iter().map(|t| t.clean(cx)).collect(),
+ ty::TyTuple(ref tys, _) => tys.iter().map(|t| t.clean(cx)).collect(),
_ => {
return PathParameters::AngleBracketed {
lifetimes: lifetimes,
let output = None;
// FIXME(#20299) return type comes from a projection now
// match types[1].sty {
- // ty::TyTuple(ref v) if v.is_empty() => None, // -> ()
+ // ty::TyTuple(ref v, _) if v.is_empty() => None, // -> ()
// _ => Some(types[1].clean(cx))
// };
PathParameters::Parenthesized {
// collect any late bound regions
let mut late_bounds = vec![];
for ty_s in self.input_types().skip(1) {
- if let ty::TyTuple(ts) = ty_s.sty {
+ if let ty::TyTuple(ts, _) = ty_s.sty {
for &ty_s in ts {
if let ty::TyRef(ref reg, _) = ty_s.sty {
if let &ty::Region::ReLateBound(..) = *reg {
Never
}
}
- ty::TyTuple(ref t) => Tuple(t.clean(cx)),
+ ty::TyTuple(ref t, _) => Tuple(t.clean(cx)),
ty::TyProjection(ref data) => data.clean(cx),
nightly_options::check_nightly_options(&matches, &opts());
if matches.opt_present("h") || matches.opt_present("help") {
- usage(&args[0]);
+ usage("rustdoc");
return 0;
} else if matches.opt_present("version") {
rustc_driver::version("rustdoc", &matches);
prog
}
+// FIXME(aburka): use a real parser to deal with multiline attributes
fn partition_source(s: &str) -> (String, String) {
use std_unicode::str::UnicodeStr;
for line in s.lines() {
let trimline = line.trim();
let header = trimline.is_whitespace() ||
- trimline.starts_with("#![feature");
+ trimline.starts_with("#![");
if !header || after_header {
after_header = true;
after.push_str(line);
#[unstable(feature = "fused", issue = "35602")]
impl FusedIterator for EscapeDefault {}
-#[stable(feature = "std_debug", since = "1.15.0")]
+#[stable(feature = "std_debug", since = "1.16.0")]
impl fmt::Debug for EscapeDefault {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.pad("EscapeDefault { .. }")
/// attacks such as HashDoS.
///
/// The hashing algorithm can be replaced on a per-`HashMap` basis using the
-/// `HashMap::default`, `HashMap::with_hasher`, and
-/// `HashMap::with_capacity_and_hasher` methods. Many alternative algorithms
-/// are available on crates.io, such as the `fnv` crate.
+/// [`HashMap::default`], [`HashMap::with_hasher`], and
+/// [`HashMap::with_capacity_and_hasher`] methods. Many alternative algorithms
+/// are available on crates.io, such as the [`fnv`] crate.
///
/// It is required that the keys implement the [`Eq`] and [`Hash`] traits, although
/// this can frequently be achieved by using `#[derive(PartialEq, Eq, Hash)]`.
/// [`PartialEq`]: ../../std/cmp/trait.PartialEq.html
/// [`RefCell`]: ../../std/cell/struct.RefCell.html
/// [`Cell`]: ../../std/cell/struct.Cell.html
+/// [`HashMap::default`]: #method.default
+/// [`HashMap::with_hasher`]: #method.with_hasher
+/// [`HashMap::with_capacity_and_hasher`]: #method.with_capacity_and_hasher
+/// [`fnv`]: https://crates.io/crates/fnv
///
/// ```
/// use std::collections::HashMap;
///
/// # Panics
///
- /// Panics if the new allocation size overflows `usize`.
+ /// Panics if the new allocation size overflows [`usize`].
+ ///
+ /// [`usize`]: ../../std/primitive.usize.html
///
/// # Examples
///
/// Inserts a key-value pair into the map.
///
- /// If the map did not have this key present, `None` is returned.
+ /// If the map did not have this key present, [`None`] is returned.
///
/// If the map did have this key present, the value is updated, and the old
/// value is returned. The key is not updated, though; this matters for
/// types that can be `==` without being identical. See the [module-level
/// documentation] for more.
///
+ /// [`None`]: ../../std/option/enum.Option.html#variant.None
/// [module-level documentation]: index.html#insert-and-complex-keys
///
/// # Examples
}
}
-#[stable(feature = "std_debug", since = "1.15.0")]
+#[stable(feature = "std_debug", since = "1.16.0")]
impl<'a, K: Debug, V: Debug> fmt::Debug for Iter<'a, K, V> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_list()
}
}
-#[stable(feature = "std_debug", since = "1.15.0")]
+#[stable(feature = "std_debug", since = "1.16.0")]
impl<'a, K: Debug, V: Debug> fmt::Debug for Keys<'a, K, V> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_list()
}
}
-#[stable(feature = "std_debug", since = "1.15.0")]
+#[stable(feature = "std_debug", since = "1.16.0")]
impl<'a, K: Debug, V: Debug> fmt::Debug for Values<'a, K, V> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_list()
#[unstable(feature = "fused", issue = "35602")]
impl<'a, K, V> FusedIterator for IterMut<'a, K, V> {}
-#[stable(feature = "std_debug", since = "1.15.0")]
+#[stable(feature = "std_debug", since = "1.16.0")]
impl<'a, K, V> fmt::Debug for IterMut<'a, K, V>
where K: fmt::Debug,
V: fmt::Debug,
#[unstable(feature = "fused", issue = "35602")]
impl<K, V> FusedIterator for IntoIter<K, V> {}
-#[stable(feature = "std_debug", since = "1.15.0")]
+#[stable(feature = "std_debug", since = "1.16.0")]
impl<K: Debug, V: Debug> fmt::Debug for IntoIter<K, V> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_list()
#[unstable(feature = "fused", issue = "35602")]
impl<'a, K, V> FusedIterator for ValuesMut<'a, K, V> {}
-#[stable(feature = "std_debug", since = "1.15.0")]
+#[stable(feature = "std_debug", since = "1.16.0")]
impl<'a, K, V> fmt::Debug for ValuesMut<'a, K, V>
where K: fmt::Debug,
V: fmt::Debug,
#[unstable(feature = "fused", issue = "35602")]
impl<'a, K, V> FusedIterator for Drain<'a, K, V> {}
-#[stable(feature = "std_debug", since = "1.15.0")]
+#[stable(feature = "std_debug", since = "1.16.0")]
impl<'a, K, V> fmt::Debug for Drain<'a, K, V>
where K: fmt::Debug,
V: fmt::Debug,
}
}
-#[stable(feature = "std_debug", since = "1.15.0")]
+#[stable(feature = "std_debug", since = "1.16.0")]
impl fmt::Debug for RandomState {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.pad("RandomState { .. }")
#[unstable(feature = "fused", issue = "35602")]
impl<'a, K> FusedIterator for Iter<'a, K> {}
-#[stable(feature = "std_debug", since = "1.15.0")]
+#[stable(feature = "std_debug", since = "1.16.0")]
impl<'a, K: fmt::Debug> fmt::Debug for Iter<'a, K> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_list()
#[unstable(feature = "fused", issue = "35602")]
impl<K> FusedIterator for IntoIter<K> {}
-#[stable(feature = "std_debug", since = "1.15.0")]
+#[stable(feature = "std_debug", since = "1.16.0")]
impl<K: fmt::Debug> fmt::Debug for IntoIter<K> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let entries_iter = self.iter.inner.iter().map(|(k, _)| k);
#[unstable(feature = "fused", issue = "35602")]
impl<'a, K> FusedIterator for Drain<'a, K> {}
-#[stable(feature = "std_debug", since = "1.15.0")]
+#[stable(feature = "std_debug", since = "1.16.0")]
impl<'a, K: fmt::Debug> fmt::Debug for Drain<'a, K> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let entries_iter = self.iter.inner.iter().map(|(k, _)| k);
}
}
-#[stable(feature = "std_debug", since = "1.15.0")]
+#[stable(feature = "std_debug", since = "1.16.0")]
impl<'a, T, S> fmt::Debug for Intersection<'a, T, S>
where T: fmt::Debug + Eq + Hash,
S: BuildHasher,
{
}
-#[stable(feature = "std_debug", since = "1.15.0")]
+#[stable(feature = "std_debug", since = "1.16.0")]
impl<'a, T, S> fmt::Debug for Difference<'a, T, S>
where T: fmt::Debug + Eq + Hash,
S: BuildHasher,
{
}
-#[stable(feature = "std_debug", since = "1.15.0")]
+#[stable(feature = "std_debug", since = "1.16.0")]
impl<'a, T, S> fmt::Debug for SymmetricDifference<'a, T, S>
where T: fmt::Debug + Eq + Hash,
S: BuildHasher,
{
}
-#[stable(feature = "std_debug", since = "1.15.0")]
+#[stable(feature = "std_debug", since = "1.16.0")]
impl<'a, T, S> fmt::Debug for Union<'a, T, S>
where T: fmt::Debug + Eq + Hash,
S: BuildHasher,
fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() }
}
-#[stable(feature = "std_debug", since = "1.15.0")]
+#[stable(feature = "std_debug", since = "1.16.0")]
impl fmt::Debug for Vars {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.pad("Vars { .. }")
fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() }
}
-#[stable(feature = "std_debug", since = "1.15.0")]
+#[stable(feature = "std_debug", since = "1.16.0")]
impl fmt::Debug for VarsOs {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.pad("VarsOs { .. }")
fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() }
}
-#[stable(feature = "std_debug", since = "1.15.0")]
+#[stable(feature = "std_debug", since = "1.16.0")]
impl<'a> fmt::Debug for SplitPaths<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.pad("SplitPaths { .. }")
}
}
-#[stable(feature = "std_debug", since = "1.15.0")]
+#[stable(feature = "std_debug", since = "1.16.0")]
impl fmt::Debug for Args {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.pad("Args { .. }")
fn next_back(&mut self) -> Option<OsString> { self.inner.next_back() }
}
-#[stable(feature = "std_debug", since = "1.15.0")]
+#[stable(feature = "std_debug", since = "1.16.0")]
impl fmt::Debug for ArgsOs {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.pad("ArgsOs { .. }")
}
}
-#[stable(feature = "std_debug", since = "1.15.0")]
+#[stable(feature = "std_debug", since = "1.16.0")]
impl fmt::Debug for Metadata {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Metadata")
done_first: bool,
}
-#[stable(feature = "std_debug", since = "1.15.0")]
+#[stable(feature = "std_debug", since = "1.16.0")]
impl<T: fmt::Debug, U: fmt::Debug> fmt::Debug for Chain<T, U> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Chain")
}
}
-#[stable(feature = "std_debug", since = "1.15.0")]
+#[stable(feature = "std_debug", since = "1.16.0")]
impl fmt::Debug for Stdin {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.pad("Stdin { .. }")
fn consume(&mut self, n: usize) { self.inner.consume(n) }
}
-#[stable(feature = "std_debug", since = "1.15.0")]
+#[stable(feature = "std_debug", since = "1.16.0")]
impl<'a> fmt::Debug for StdinLock<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.pad("StdinLock { .. }")
}
}
-#[stable(feature = "std_debug", since = "1.15.0")]
+#[stable(feature = "std_debug", since = "1.16.0")]
impl fmt::Debug for Stdout {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.pad("Stdout { .. }")
}
}
-#[stable(feature = "std_debug", since = "1.15.0")]
+#[stable(feature = "std_debug", since = "1.16.0")]
impl<'a> fmt::Debug for StdoutLock<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.pad("StdoutLock { .. }")
}
}
-#[stable(feature = "std_debug", since = "1.15.0")]
+#[stable(feature = "std_debug", since = "1.16.0")]
impl fmt::Debug for Stderr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.pad("Stderr { .. }")
}
}
-#[stable(feature = "std_debug", since = "1.15.0")]
+#[stable(feature = "std_debug", since = "1.16.0")]
impl<'a> fmt::Debug for StderrLock<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.pad("StderrLock { .. }")
fn consume(&mut self, _n: usize) {}
}
-#[stable(feature = "std_debug", since = "1.15.0")]
+#[stable(feature = "std_debug", since = "1.16.0")]
impl fmt::Debug for Empty {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.pad("Empty { .. }")
}
}
-#[stable(feature = "std_debug", since = "1.15.0")]
+#[stable(feature = "std_debug", since = "1.16.0")]
impl fmt::Debug for Repeat {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.pad("Repeat { .. }")
fn flush(&mut self) -> io::Result<()> { Ok(()) }
}
-#[stable(feature = "std_debug", since = "1.15.0")]
+#[stable(feature = "std_debug", since = "1.16.0")]
impl fmt::Debug for Sink {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.pad("Sink { .. }")
#![feature(oom)]
#![feature(optin_builtin_traits)]
#![feature(panic_unwind)]
+#![feature(peek)]
#![feature(placement_in_syntax)]
#![feature(prelude_import)]
#![feature(pub_restricted)]
fn next(&mut self) -> Option<SocketAddr> { self.0.next() }
}
-#[stable(feature = "std_debug", since = "1.15.0")]
+#[unstable(feature = "lookup_host", reason = "unsure about the returned \
+ iterator and returning socket \
+ addresses",
+ issue = "27705")]
impl fmt::Debug for LookupHost {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.pad("LookupHost { .. }")
self.0.write_timeout()
}
+ /// Receives data on the socket from the remote adress to which it is
+ /// connected, without removing that data from the queue. On success,
+ /// returns the number of bytes peeked.
+ ///
+ /// Successive calls return the same data. This is accomplished by passing
+ /// `MSG_PEEK` as a flag to the underlying `recv` system call.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// #![feature(peek)]
+ /// use std::net::TcpStream;
+ ///
+ /// let stream = TcpStream::connect("127.0.0.1:8000")
+ /// .expect("couldn't bind to address");
+ /// let mut buf = [0; 10];
+ /// let len = stream.peek(&mut buf).expect("peek failed");
+ /// ```
+ #[unstable(feature = "peek", issue = "38980")]
+ pub fn peek(&self, buf: &mut [u8]) -> io::Result<usize> {
+ self.0.peek(buf)
+ }
+
/// Sets the value of the `TCP_NODELAY` option on this socket.
///
/// If set, this option disables the Nagle algorithm. This means that
Err(e) => panic!("unexpected error {}", e),
}
}
+
+ #[test]
+ fn peek() {
+ each_ip(&mut |addr| {
+ let (txdone, rxdone) = channel();
+
+ let srv = t!(TcpListener::bind(&addr));
+ let _t = thread::spawn(move|| {
+ let mut cl = t!(srv.accept()).0;
+ cl.write(&[1,3,3,7]).unwrap();
+ t!(rxdone.recv());
+ });
+
+ let mut c = t!(TcpStream::connect(&addr));
+ let mut b = [0; 10];
+ for _ in 1..3 {
+ let len = c.peek(&mut b).unwrap();
+ assert_eq!(len, 4);
+ }
+ let len = c.read(&mut b).unwrap();
+ assert_eq!(len, 4);
+
+ t!(c.set_nonblocking(true));
+ match c.peek(&mut b) {
+ Ok(_) => panic!("expected error"),
+ Err(ref e) if e.kind() == ErrorKind::WouldBlock => {}
+ Err(e) => panic!("unexpected error {}", e),
+ }
+ t!(txdone.send(()));
+ })
+ }
}
self.0.recv_from(buf)
}
+ /// Receives data from the socket, without removing it from the queue.
+ ///
+ /// Successive calls return the same data. This is accomplished by passing
+ /// `MSG_PEEK` as a flag to the underlying `recvfrom` system call.
+ ///
+ /// On success, returns the number of bytes peeked and the address from
+ /// whence the data came.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// #![feature(peek)]
+ /// use std::net::UdpSocket;
+ ///
+ /// let socket = UdpSocket::bind("127.0.0.1:34254").expect("couldn't bind to address");
+ /// let mut buf = [0; 10];
+ /// let (number_of_bytes, src_addr) = socket.peek_from(&mut buf)
+ /// .expect("Didn't receive data");
+ /// ```
+ #[unstable(feature = "peek", issue = "38980")]
+ pub fn peek_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
+ self.0.peek_from(buf)
+ }
+
/// Sends data on the socket to the given address. On success, returns the
/// number of bytes written.
///
self.0.recv(buf)
}
+ /// Receives data on the socket from the remote adress to which it is
+ /// connected, without removing that data from the queue. On success,
+ /// returns the number of bytes peeked.
+ ///
+ /// Successive calls return the same data. This is accomplished by passing
+ /// `MSG_PEEK` as a flag to the underlying `recv` system call.
+ ///
+ /// # Errors
+ ///
+ /// This method will fail if the socket is not connected. The `connect` method
+ /// will connect this socket to a remote address.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// #![feature(peek)]
+ /// use std::net::UdpSocket;
+ ///
+ /// let socket = UdpSocket::bind("127.0.0.1:34254").expect("couldn't bind to address");
+ /// socket.connect("127.0.0.1:8080").expect("connect function failed");
+ /// let mut buf = [0; 10];
+ /// match socket.peek(&mut buf) {
+ /// Ok(received) => println!("received {} bytes", received),
+ /// Err(e) => println!("peek function failed: {:?}", e),
+ /// }
+ /// ```
+ #[unstable(feature = "peek", issue = "38980")]
+ pub fn peek(&self, buf: &mut [u8]) -> io::Result<usize> {
+ self.0.peek(buf)
+ }
+
/// Moves this UDP socket into or out of nonblocking mode.
///
/// On Unix this corresponds to calling fcntl, and on Windows this
assert_eq!(b"hello world", &buf[..]);
}
+ #[test]
+ fn connect_send_peek_recv() {
+ each_ip(&mut |addr, _| {
+ let socket = t!(UdpSocket::bind(&addr));
+ t!(socket.connect(addr));
+
+ t!(socket.send(b"hello world"));
+
+ for _ in 1..3 {
+ let mut buf = [0; 11];
+ let size = t!(socket.peek(&mut buf));
+ assert_eq!(b"hello world", &buf[..]);
+ assert_eq!(size, 11);
+ }
+
+ let mut buf = [0; 11];
+ let size = t!(socket.recv(&mut buf));
+ assert_eq!(b"hello world", &buf[..]);
+ assert_eq!(size, 11);
+ })
+ }
+
+ #[test]
+ fn peek_from() {
+ each_ip(&mut |addr, _| {
+ let socket = t!(UdpSocket::bind(&addr));
+ t!(socket.send_to(b"hello world", &addr));
+
+ for _ in 1..3 {
+ let mut buf = [0; 11];
+ let (size, _) = t!(socket.peek_from(&mut buf));
+ assert_eq!(b"hello world", &buf[..]);
+ assert_eq!(size, 11);
+ }
+
+ let mut buf = [0; 11];
+ let (size, _) = t!(socket.recv_from(&mut buf));
+ assert_eq!(b"hello world", &buf[..]);
+ assert_eq!(size, 11);
+ })
+ }
+
#[test]
fn ttl() {
let ttl = 100;
#[doc(hidden)] __variant2,
}
-#[stable(feature = "std_debug", since = "1.15.0")]
+#[stable(feature = "std_debug", since = "1.16.0")]
impl fmt::Debug for c_void {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.pad("c_void")
}
}
-#[stable(feature = "std_debug", since = "1.15.0")]
+#[stable(feature = "std_debug", since = "1.16.0")]
impl<T: fmt::Debug> fmt::Debug for AssertUnwindSafe<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("AssertUnwindSafe")
fn into_inner(self) -> imp::Process { self.handle }
}
-#[stable(feature = "std_debug", since = "1.15.0")]
+#[stable(feature = "std_debug", since = "1.16.0")]
impl fmt::Debug for Child {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Child")
}
}
-#[stable(feature = "std_debug", since = "1.15.0")]
+#[stable(feature = "std_debug", since = "1.16.0")]
impl fmt::Debug for ChildStdin {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.pad("ChildStdin { .. }")
}
}
-#[stable(feature = "std_debug", since = "1.15.0")]
+#[stable(feature = "std_debug", since = "1.16.0")]
impl fmt::Debug for ChildStdout {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.pad("ChildStdout { .. }")
}
}
-#[stable(feature = "std_debug", since = "1.15.0")]
+#[stable(feature = "std_debug", since = "1.16.0")]
impl fmt::Debug for ChildStderr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.pad("ChildStderr { .. }")
}
}
-#[stable(feature = "std_debug", since = "1.15.0")]
+#[stable(feature = "std_debug", since = "1.16.0")]
impl fmt::Debug for Stdio {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.pad("Stdio { .. }")
///
/// # Examples
///
+/// Due to this function’s behavior regarding destructors, a conventional way
+/// to use the function is to extract the actual computation to another
+/// function and compute the exit code from its return value:
+///
/// ```
-/// use std::process;
+/// use std::io::{self, Write};
+///
+/// fn run_app() -> Result<(), ()> {
+/// // Application logic here
+/// Ok(())
+/// }
///
-/// process::exit(0);
+/// fn main() {
+/// ::std::process::exit(match run_app() {
+/// Ok(_) => 0,
+/// Err(err) => {
+/// writeln!(io::stderr(), "error: {:?}", err).unwrap();
+/// 1
+/// }
+/// });
+/// }
/// ```
///
/// Due to [platform-specific behavior], the exit code for this example will be
#[stable(feature = "rust1", since = "1.0.0")]
pub struct BarrierWaitResult(bool);
-#[stable(feature = "std_debug", since = "1.15.0")]
+#[stable(feature = "std_debug", since = "1.16.0")]
impl fmt::Debug for Barrier {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.pad("Barrier { .. }")
}
}
-#[stable(feature = "std_debug", since = "1.15.0")]
+#[stable(feature = "std_debug", since = "1.16.0")]
impl fmt::Debug for BarrierWaitResult {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("BarrierWaitResult")
/// consumes no CPU time while waiting for an event to occur. Condition
/// variables are typically associated with a boolean predicate (a condition)
/// and a mutex. The predicate is always verified inside of the mutex before
-/// determining that thread must block.
+/// determining that a thread must block.
///
/// Functions in this module will block the current **thread** of execution and
/// are bindings to system-provided condition variables where possible. Note
}
}
-#[stable(feature = "std_debug", since = "1.15.0")]
+#[stable(feature = "std_debug", since = "1.16.0")]
impl fmt::Debug for Condvar {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.pad("Condvar { .. }")
}
}
-#[stable(feature = "std_debug", since = "1.15.0")]
+#[stable(feature = "std_debug", since = "1.16.0")]
impl<'a, T: ?Sized + fmt::Debug> fmt::Debug for MutexGuard<'a, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("MutexGuard")
}
}
-#[stable(feature = "std_debug", since = "1.15.0")]
+#[stable(feature = "std_debug", since = "1.16.0")]
impl fmt::Debug for Once {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.pad("Once { .. }")
}
}
-#[stable(feature = "std_debug", since = "1.15.0")]
+#[stable(feature = "std_debug", since = "1.16.0")]
impl<'a, T: fmt::Debug> fmt::Debug for RwLockReadGuard<'a, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("RwLockReadGuard")
}
}
-#[stable(feature = "std_debug", since = "1.15.0")]
+#[stable(feature = "std_debug", since = "1.16.0")]
impl<'a, T: fmt::Debug> fmt::Debug for RwLockWriteGuard<'a, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("RwLockWriteGuard")
/// an error indicating why the exec (or another part of the setup of the
/// `Command`) failed.
///
+ /// `exec` not returning has the same implications as calling
+ /// [`process::exit`] – no destructors on the current stack or any other
+ /// thread’s stack will be run. Therefore, it is recommended to only call
+ /// `exec` at a point where it is fine to not run any destructors. Note,
+ /// that the `execvp` syscall independently guarantees that all memory is
+ /// freed and all file descriptors with the `CLOEXEC` option (set by default
+ /// on all file descriptors opened by the standard library) are closed.
+ ///
/// This function, unlike `spawn`, will **not** `fork` the process to create
/// a new child. Like spawn, however, the default behavior for the stdio
/// descriptors will be to inherited from the current process.
///
+ /// [`process::exit`]: ../../../process/fn.exit.html
+ ///
/// # Notes
///
/// The process may be in a "broken state" if this function returns in
use ffi::CStr;
use io;
-use libc::{self, c_int, size_t, sockaddr, socklen_t, EAI_SYSTEM};
+use libc::{self, c_int, c_void, size_t, sockaddr, socklen_t, EAI_SYSTEM, MSG_PEEK};
+use mem;
use net::{SocketAddr, Shutdown};
use str;
use sys::fd::FileDesc;
use sys_common::{AsInner, FromInner, IntoInner};
-use sys_common::net::{getsockopt, setsockopt};
+use sys_common::net::{getsockopt, setsockopt, sockaddr_to_addr};
use time::Duration;
pub use sys::{cvt, cvt_r};
self.0.duplicate().map(Socket)
}
+ fn recv_with_flags(&self, buf: &mut [u8], flags: c_int) -> io::Result<usize> {
+ let ret = cvt(unsafe {
+ libc::recv(self.0.raw(),
+ buf.as_mut_ptr() as *mut c_void,
+ buf.len(),
+ flags)
+ })?;
+ Ok(ret as usize)
+ }
+
pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
- self.0.read(buf)
+ self.recv_with_flags(buf, 0)
+ }
+
+ pub fn peek(&self, buf: &mut [u8]) -> io::Result<usize> {
+ self.recv_with_flags(buf, MSG_PEEK)
+ }
+
+ fn recv_from_with_flags(&self, buf: &mut [u8], flags: c_int)
+ -> io::Result<(usize, SocketAddr)> {
+ let mut storage: libc::sockaddr_storage = unsafe { mem::zeroed() };
+ let mut addrlen = mem::size_of_val(&storage) as libc::socklen_t;
+
+ let n = cvt(unsafe {
+ libc::recvfrom(self.0.raw(),
+ buf.as_mut_ptr() as *mut c_void,
+ buf.len(),
+ flags,
+ &mut storage as *mut _ as *mut _,
+ &mut addrlen)
+ })?;
+ Ok((n as usize, sockaddr_to_addr(&storage, addrlen as usize)?))
+ }
+
+ pub fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
+ self.recv_from_with_flags(buf, 0)
+ }
+
+ pub fn peek_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
+ self.recv_from_with_flags(buf, MSG_PEEK)
}
pub fn read_to_end(&self, buf: &mut Vec<u8>) -> io::Result<usize> {
pub const FSCTL_SET_REPARSE_POINT: DWORD = 0x900a4;
pub const SYMBOLIC_LINK_FLAG_DIRECTORY: DWORD = 0x1;
+pub const SYMBOLIC_LINK_FLAG_ALLOW_UNPRIVILEGED_CREATE: DWORD = 0x2;
// Note that these are not actually HANDLEs, just values to pass to GetStdHandle
pub const STD_INPUT_HANDLE: DWORD = -10i32 as DWORD;
pub const IP_DROP_MEMBERSHIP: c_int = 13;
pub const IPV6_ADD_MEMBERSHIP: c_int = 12;
pub const IPV6_DROP_MEMBERSHIP: c_int = 13;
+pub const MSG_PEEK: c_int = 0x2;
#[repr(C)]
pub struct ip_mreq {
let src = to_u16s(src)?;
let dst = to_u16s(dst)?;
let flags = if dir { c::SYMBOLIC_LINK_FLAG_DIRECTORY } else { 0 };
- cvt(unsafe {
- c::CreateSymbolicLinkW(dst.as_ptr(), src.as_ptr(), flags) as c::BOOL
- })?;
+ // Formerly, symlink creation required the SeCreateSymbolicLink privilege. For the Windows 10
+ // Creators Update, Microsoft loosened this to allow unprivileged symlink creation if the
+ // computer is in Developer Mode, but SYMBOLIC_LINK_FLAG_ALLOW_UNPRIVILEGED_CREATE must be
+ // added to dwFlags to opt into this behaviour.
+ let result = cvt(unsafe {
+ c::CreateSymbolicLinkW(dst.as_ptr(), src.as_ptr(),
+ flags | c::SYMBOLIC_LINK_FLAG_ALLOW_UNPRIVILEGED_CREATE) as c::BOOL
+ });
+ if let Err(err) = result {
+ if err.raw_os_error() == Some(c::ERROR_INVALID_PARAMETER as i32) {
+ // Older Windows objects to SYMBOLIC_LINK_FLAG_ALLOW_UNPRIVILEGED_CREATE,
+ // so if we encounter ERROR_INVALID_PARAMETER, retry without that flag.
+ cvt(unsafe {
+ c::CreateSymbolicLinkW(dst.as_ptr(), src.as_ptr(), flags) as c::BOOL
+ })?;
+ } else {
+ return Err(err);
+ }
+ }
Ok(())
}
Ok(socket)
}
- pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
+ fn recv_with_flags(&self, buf: &mut [u8], flags: c_int) -> io::Result<usize> {
// On unix when a socket is shut down all further reads return 0, so we
// do the same on windows to map a shut down socket to returning EOF.
let len = cmp::min(buf.len(), i32::max_value() as usize) as i32;
unsafe {
- match c::recv(self.0, buf.as_mut_ptr() as *mut c_void, len, 0) {
+ match c::recv(self.0, buf.as_mut_ptr() as *mut c_void, len, flags) {
-1 if c::WSAGetLastError() == c::WSAESHUTDOWN => Ok(0),
-1 => Err(last_error()),
n => Ok(n as usize)
}
}
+ pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
+ self.recv_with_flags(buf, 0)
+ }
+
+ pub fn peek(&self, buf: &mut [u8]) -> io::Result<usize> {
+ self.recv_with_flags(buf, c::MSG_PEEK)
+ }
+
+ fn recv_from_with_flags(&self, buf: &mut [u8], flags: c_int)
+ -> io::Result<(usize, SocketAddr)> {
+ let mut storage: c::SOCKADDR_STORAGE_LH = unsafe { mem::zeroed() };
+ let mut addrlen = mem::size_of_val(&storage) as c::socklen_t;
+ let len = cmp::min(buf.len(), <wrlen_t>::max_value() as usize) as wrlen_t;
+
+ // On unix when a socket is shut down all further reads return 0, so we
+ // do the same on windows to map a shut down socket to returning EOF.
+ unsafe {
+ match c::recvfrom(self.0,
+ buf.as_mut_ptr() as *mut c_void,
+ len,
+ flags,
+ &mut storage as *mut _ as *mut _,
+ &mut addrlen) {
+ -1 if c::WSAGetLastError() == c::WSAESHUTDOWN => {
+ Ok((0, net::sockaddr_to_addr(&storage, addrlen as usize)?))
+ },
+ -1 => Err(last_error()),
+ n => Ok((n as usize, net::sockaddr_to_addr(&storage, addrlen as usize)?)),
+ }
+ }
+ }
+
+ pub fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
+ self.recv_from_with_flags(buf, 0)
+ }
+
+ pub fn peek_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
+ self.recv_from_with_flags(buf, c::MSG_PEEK)
+ }
+
pub fn read_to_end(&self, buf: &mut Vec<u8>) -> io::Result<usize> {
let mut me = self;
(&mut me).read_to_end(buf)
}
}
-fn sockaddr_to_addr(storage: &c::sockaddr_storage,
+pub fn sockaddr_to_addr(storage: &c::sockaddr_storage,
len: usize) -> io::Result<SocketAddr> {
match storage.ss_family as c_int {
c::AF_INET => {
self.inner.timeout(c::SO_SNDTIMEO)
}
+ pub fn peek(&self, buf: &mut [u8]) -> io::Result<usize> {
+ self.inner.peek(buf)
+ }
+
pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
self.inner.read(buf)
}
}
pub fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
- let mut storage: c::sockaddr_storage = unsafe { mem::zeroed() };
- let mut addrlen = mem::size_of_val(&storage) as c::socklen_t;
- let len = cmp::min(buf.len(), <wrlen_t>::max_value() as usize) as wrlen_t;
+ self.inner.recv_from(buf)
+ }
- let n = cvt(unsafe {
- c::recvfrom(*self.inner.as_inner(),
- buf.as_mut_ptr() as *mut c_void,
- len, 0,
- &mut storage as *mut _ as *mut _, &mut addrlen)
- })?;
- Ok((n as usize, sockaddr_to_addr(&storage, addrlen as usize)?))
+ pub fn peek_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
+ self.inner.peek_from(buf)
}
pub fn send_to(&self, buf: &[u8], dst: &SocketAddr) -> io::Result<usize> {
self.inner.read(buf)
}
+ pub fn peek(&self, buf: &mut [u8]) -> io::Result<usize> {
+ self.inner.peek(buf)
+ }
+
pub fn send(&self, buf: &[u8]) -> io::Result<usize> {
let len = cmp::min(buf.len(), <wrlen_t>::max_value() as usize) as wrlen_t;
let ret = cvt(unsafe {
init: fn() -> T,
}
-#[stable(feature = "std_debug", since = "1.15.0")]
+#[stable(feature = "std_debug", since = "1.16.0")]
impl<T: 'static> fmt::Debug for LocalKey<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.pad("LocalKey { .. }")
marker: marker::PhantomData<Cell<T>>,
}
- #[stable(feature = "std_debug", since = "1.15.0")]
impl<T> fmt::Debug for Key<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.pad("Key { .. }")
}
}
-#[stable(feature = "std_debug", since = "1.15.0")]
+#[unstable(feature = "thread_id", issue = "21507")]
impl fmt::Debug for ThreadId {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.pad("ThreadId { .. }")
fn into_inner(self) -> imp::Thread { self.0.native.unwrap() }
}
-#[stable(feature = "std_debug", since = "1.15.0")]
+#[stable(feature = "std_debug", since = "1.16.0")]
impl<T> fmt::Debug for JoinHandle<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.pad("JoinHandle { .. }")
}
}
-#[stable(feature = "char_struct_display", since = "1.17.0")]
impl fmt::Display for CaseMappingIter {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
}
}
-#[stable(feature = "char_struct_display", since = "1.17.0")]
+#[stable(feature = "char_struct_display", since = "1.16.0")]
impl fmt::Display for ToLowercase {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(&self.0, f)
}
}
-#[stable(feature = "char_struct_display", since = "1.17.0")]
+#[stable(feature = "char_struct_display", since = "1.16.0")]
impl fmt::Display for ToUppercase {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(&self.0, f)
pub use self::SyntaxExtension::{MultiDecorator, MultiModifier, NormalTT, IdentTT};
-use ast::{self, Attribute, Name, PatKind};
+use ast::{self, Attribute, Name, PatKind, MetaItem};
use attr::HasAttrs;
use codemap::{self, CodeMap, ExpnInfo, Spanned, respan};
use syntax_pos::{Span, ExpnId, NO_EXPANSION};
}
}
+pub type BuiltinDeriveFn =
+ for<'cx> fn(&'cx mut ExtCtxt, Span, &MetaItem, &Annotatable, &mut FnMut(Annotatable));
+
/// An enum representing the different kinds of syntax extensions.
pub enum SyntaxExtension {
/// A syntax extension that is attached to an item and creates new items
///
IdentTT(Box<IdentMacroExpander>, Option<Span>, bool),
- CustomDerive(Box<MultiItemModifier>),
+ /// An attribute-like procedural macro. TokenStream -> TokenStream.
+ /// The input is the annotated item.
+ /// Allows generating code to implement a Trait for a given struct
+ /// or enum item.
+ ProcMacroDerive(Box<MultiItemModifier>),
+
+ /// An attribute-like procedural macro that derives a builtin trait.
+ BuiltinDerive(BuiltinDeriveFn),
}
pub type NamedSyntaxExtension = (Name, SyntaxExtension);
fn find_attr_invoc(&mut self, attrs: &mut Vec<Attribute>) -> Option<Attribute>;
fn resolve_macro(&mut self, scope: Mark, path: &ast::Path, force: bool)
-> Result<Rc<SyntaxExtension>, Determinacy>;
+ fn resolve_builtin_macro(&mut self, tname: Name) -> Result<Rc<SyntaxExtension>, Determinacy>;
+ fn resolve_derive_macro(&mut self, scope: Mark, path: &ast::Path, force: bool)
+ -> Result<Rc<SyntaxExtension>, Determinacy>;
}
#[derive(Copy, Clone, Debug)]
-> Result<Rc<SyntaxExtension>, Determinacy> {
Err(Determinacy::Determined)
}
+ fn resolve_builtin_macro(&mut self, _tname: Name) -> Result<Rc<SyntaxExtension>, Determinacy> {
+ Err(Determinacy::Determined)
+ }
+ fn resolve_derive_macro(&mut self, _scope: Mark, _path: &ast::Path, _force: bool)
+ -> Result<Rc<SyntaxExtension>, Determinacy> {
+ Err(Determinacy::Determined)
+ }
}
#[derive(Clone)]
--- /dev/null
+// Copyright 2012-2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use ast::Name;
+use attr;
+use ast::{self, NestedMetaItem}; use ext::base::{ExtCtxt, SyntaxExtension};
+use codemap;
+use ext::build::AstBuilder;
+use feature_gate;
+use symbol::Symbol;
+use syntax_pos::Span;
+
+pub fn derive_attr_trait<'a>(cx: &mut ExtCtxt, attr: &'a ast::Attribute)
+ -> Option<&'a NestedMetaItem> {
+ if attr.name() != "derive" {
+ return None;
+ }
+ if attr.value_str().is_some() {
+ cx.span_err(attr.span, "unexpected value in `derive`");
+ return None;
+ }
+
+ let traits = attr.meta_item_list().unwrap_or(&[]);
+
+ if traits.is_empty() {
+ cx.span_warn(attr.span, "empty trait list in `derive`");
+ return None;
+ }
+
+ return traits.get(0);
+}
+
+pub fn verify_derive_attrs(cx: &mut ExtCtxt, attrs: &[ast::Attribute]) {
+ for attr in attrs {
+ if attr.name() != "derive" {
+ continue;
+ }
+
+ if attr.value_str().is_some() {
+ cx.span_err(attr.span, "unexpected value in `derive`");
+ }
+
+ let traits = attr.meta_item_list().unwrap_or(&[]).to_owned();
+
+ if traits.is_empty() {
+ cx.span_warn(attr.span, "empty trait list in `derive`");
+ attr::mark_used(&attr);
+ continue;
+ }
+ for titem in traits {
+ if titem.word().is_none() {
+ cx.span_err(titem.span, "malformed `derive` entry");
+ }
+ }
+ }
+}
+
+#[derive(PartialEq, Debug, Clone, Copy)]
+pub enum DeriveType {
+ Legacy,
+ ProcMacro,
+ Builtin
+}
+
+impl DeriveType {
+ // Classify a derive trait name by resolving the macro.
+ pub fn classify(cx: &mut ExtCtxt, tname: Name) -> DeriveType {
+ let legacy_derive_name = Symbol::intern(&format!("derive_{}", tname));
+
+ if let Ok(_) = cx.resolver.resolve_builtin_macro(legacy_derive_name) {
+ return DeriveType::Legacy;
+ }
+
+ match cx.resolver.resolve_builtin_macro(tname) {
+ Ok(ext) => match *ext {
+ SyntaxExtension::BuiltinDerive(..) => DeriveType::Builtin,
+ _ => DeriveType::ProcMacro,
+ },
+ Err(_) => DeriveType::ProcMacro,
+ }
+ }
+}
+
+pub fn get_derive_attr(cx: &mut ExtCtxt, attrs: &mut Vec<ast::Attribute>,
+ derive_type: DeriveType) -> Option<ast::Attribute> {
+ for i in 0..attrs.len() {
+ if attrs[i].name() != "derive" {
+ continue;
+ }
+
+ if attrs[i].value_str().is_some() {
+ continue;
+ }
+
+ let mut traits = attrs[i].meta_item_list().unwrap_or(&[]).to_owned();
+
+ // First, weed out malformed #[derive]
+ traits.retain(|titem| titem.word().is_some());
+
+ let mut titem = None;
+
+ // See if we can find a matching trait.
+ for j in 0..traits.len() {
+ let tname = match traits[j].name() {
+ Some(tname) => tname,
+ _ => continue,
+ };
+
+ if DeriveType::classify(cx, tname) == derive_type {
+ titem = Some(traits.remove(j));
+ break;
+ }
+ }
+
+ // If we find a trait, remove the trait from the attribute.
+ if let Some(titem) = titem {
+ if traits.len() == 0 {
+ attrs.remove(i);
+ } else {
+ let derive = Symbol::intern("derive");
+ let mitem = cx.meta_list(titem.span, derive, traits);
+ attrs[i] = cx.attribute(titem.span, mitem);
+ }
+ let derive = Symbol::intern("derive");
+ let mitem = cx.meta_list(titem.span, derive, vec![titem]);
+ return Some(cx.attribute(mitem.span, mitem));
+ }
+ }
+ return None;
+}
+
+fn allow_unstable(cx: &mut ExtCtxt, span: Span, attr_name: &str) -> Span {
+ Span {
+ expn_id: cx.codemap().record_expansion(codemap::ExpnInfo {
+ call_site: span,
+ callee: codemap::NameAndSpan {
+ format: codemap::MacroAttribute(Symbol::intern(attr_name)),
+ span: Some(span),
+ allow_internal_unstable: true,
+ },
+ }),
+ ..span
+ }
+}
+
+pub fn add_derived_markers(cx: &mut ExtCtxt, attrs: &mut Vec<ast::Attribute>) {
+ if attrs.is_empty() {
+ return;
+ }
+
+ let titems = attrs.iter().filter(|a| {
+ a.name() == "derive"
+ }).flat_map(|a| {
+ a.meta_item_list().unwrap_or(&[]).iter()
+ }).filter_map(|titem| {
+ titem.name()
+ }).collect::<Vec<_>>();
+
+ let span = attrs[0].span;
+
+ if !attrs.iter().any(|a| a.name() == "structural_match") &&
+ titems.iter().any(|t| *t == "PartialEq") && titems.iter().any(|t| *t == "Eq") {
+ let structural_match = Symbol::intern("structural_match");
+ let span = allow_unstable(cx, span, "derive(PartialEq, Eq)");
+ let meta = cx.meta_word(span, structural_match);
+ attrs.push(cx.attribute(span, meta));
+ }
+
+ if !attrs.iter().any(|a| a.name() == "rustc_copy_clone_marker") &&
+ titems.iter().any(|t| *t == "Copy") && titems.iter().any(|t| *t == "Clone") {
+ let structural_match = Symbol::intern("rustc_copy_clone_marker");
+ let span = allow_unstable(cx, span, "derive(Copy, Clone)");
+ let meta = cx.meta_word(span, structural_match);
+ attrs.push(cx.attribute(span, meta));
+ }
+}
+
+pub fn find_derive_attr(cx: &mut ExtCtxt, attrs: &mut Vec<ast::Attribute>)
+ -> Option<ast::Attribute> {
+ verify_derive_attrs(cx, attrs);
+ get_derive_attr(cx, attrs, DeriveType::Legacy).and_then(|a| {
+ let titem = derive_attr_trait(cx, &a);
+ titem.and_then(|titem| {
+ let tword = titem.word().unwrap();
+ let tname = tword.name();
+ if !cx.ecfg.enable_custom_derive() {
+ feature_gate::emit_feature_err(
+ &cx.parse_sess,
+ "custom_derive",
+ titem.span,
+ feature_gate::GateIssue::Language,
+ feature_gate::EXPLAIN_CUSTOM_DERIVE
+ );
+ None
+ } else {
+ let name = Symbol::intern(&format!("derive_{}", tname));
+ if !cx.resolver.is_whitelisted_legacy_custom_derive(name) {
+ cx.span_warn(titem.span,
+ feature_gate::EXPLAIN_DEPR_CUSTOM_DERIVE);
+ }
+ let mitem = cx.meta_word(titem.span, name);
+ Some(cx.attribute(mitem.span, mitem))
+ }
+ })
+ }).or_else(|| {
+ get_derive_attr(cx, attrs, DeriveType::ProcMacro)
+ }).or_else(|| {
+ add_derived_markers(cx, attrs);
+ get_derive_attr(cx, attrs, DeriveType::Builtin)
+ })
+}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use ast::{Block, Ident, Mac_, PatKind};
+use ast::{self, Block, Ident, Mac_, PatKind};
use ast::{Name, MacStmtStyle, StmtKind, ItemKind};
-use ast;
-use ext::hygiene::Mark;
-use ext::placeholders::{placeholder, PlaceholderExpander};
use attr::{self, HasAttrs};
use codemap::{ExpnInfo, NameAndSpan, MacroBang, MacroAttribute};
-use syntax_pos::{self, Span, ExpnId};
use config::{is_test_or_bench, StripUnconfigured};
use ext::base::*;
+use ext::derive::{find_derive_attr, derive_attr_trait};
+use ext::hygiene::Mark;
+use ext::placeholders::{placeholder, PlaceholderExpander};
use feature_gate::{self, Features};
use fold;
use fold::*;
-use parse::{ParseSess, DirectoryOwnership, PResult, filemap_to_tts};
use parse::parser::Parser;
use parse::token;
+use parse::{ParseSess, DirectoryOwnership, PResult, filemap_to_tts};
use print::pprust;
use ptr::P;
use std_inject;
+use symbol::Symbol;
use symbol::keywords;
+use syntax_pos::{self, Span, ExpnId};
use tokenstream::{TokenTree, TokenStream};
use util::small_vector::SmallVector;
use visit::Visitor;
attr: ast::Attribute,
item: Annotatable,
},
+ Derive {
+ attr: ast::Attribute,
+ item: Annotatable,
+ },
}
impl Invocation {
match self.kind {
InvocationKind::Bang { span, .. } => span,
InvocationKind::Attr { ref attr, .. } => attr.span,
+ InvocationKind::Derive { ref attr, .. } => attr.span,
}
}
}
let path = ast::Path::from_ident(attr.span, ident);
self.cx.resolver.resolve_macro(scope, &path, force)
}
+ InvocationKind::Derive { ref attr, .. } => {
+ let titem = derive_attr_trait(self.cx, &attr).unwrap();
+ let tname = titem.name().expect("Expected derive macro name");
+ let ident = Ident::with_empty_ctxt(tname);
+ let path = ast::Path::from_ident(attr.span, ident);
+ self.cx.resolver.resolve_derive_macro(scope, &path, force)
+ }
};
let ext = match resolution {
Ok(ext) => Some(ext),
match invoc.kind {
InvocationKind::Bang { .. } => self.expand_bang_invoc(invoc, ext),
InvocationKind::Attr { .. } => self.expand_attr_invoc(invoc, ext),
+ InvocationKind::Derive { .. } => self.expand_derive_invoc(invoc, ext),
}
}
let tok_result = mac.expand(self.cx, attr.span, attr_toks, item_toks);
self.parse_expansion(tok_result, kind, name, attr.span)
}
- SyntaxExtension::CustomDerive(_) => {
+ SyntaxExtension::ProcMacroDerive(..) | SyntaxExtension::BuiltinDerive(..) => {
self.cx.span_err(attr.span, &format!("`{}` is a derive mode", name));
kind.dummy(attr.span)
}
return kind.dummy(span);
}
- SyntaxExtension::CustomDerive(..) => {
+ SyntaxExtension::ProcMacroDerive(..) | SyntaxExtension::BuiltinDerive(..) => {
self.cx.span_err(path.span, &format!("`{}` is a derive mode", extname));
return kind.dummy(span);
}
})
}
+ /// Expand a derive invocation. Returns the result of expansion.
+ fn expand_derive_invoc(&mut self, invoc: Invocation, ext: Rc<SyntaxExtension>) -> Expansion {
+ let Invocation { expansion_kind: kind, .. } = invoc;
+ let (attr, item) = match invoc.kind {
+ InvocationKind::Derive { attr, item } => (attr, item),
+ _ => unreachable!(),
+ };
+
+ attr::mark_used(&attr);
+ let titem = derive_attr_trait(self.cx, &attr).unwrap();
+ let tname = ast::Ident::with_empty_ctxt(titem.name().unwrap());
+ let name = Symbol::intern(&format!("derive({})", tname));
+ let mitem = &attr.value;
+
+ self.cx.bt_push(ExpnInfo {
+ call_site: attr.span,
+ callee: NameAndSpan {
+ format: MacroAttribute(attr.name()),
+ span: Some(attr.span),
+ allow_internal_unstable: false,
+ }
+ });
+
+ match *ext {
+ SyntaxExtension::ProcMacroDerive(ref ext) => {
+ let span = Span {
+ expn_id: self.cx.codemap().record_expansion(ExpnInfo {
+ call_site: mitem.span,
+ callee: NameAndSpan {
+ format: MacroAttribute(Symbol::intern(&format!("derive({})", tname))),
+ span: None,
+ allow_internal_unstable: false,
+ },
+ }),
+ ..mitem.span
+ };
+ return kind.expect_from_annotatables(ext.expand(self.cx, span, &mitem, item));
+ }
+ SyntaxExtension::BuiltinDerive(func) => {
+ let span = Span {
+ expn_id: self.cx.codemap().record_expansion(ExpnInfo {
+ call_site: titem.span,
+ callee: NameAndSpan {
+ format: MacroAttribute(name),
+ span: None,
+ allow_internal_unstable: true,
+ },
+ }),
+ ..titem.span
+ };
+ let mut items = Vec::new();
+ func(self.cx, span, &mitem, &item, &mut |a| {
+ items.push(a)
+ });
+ items.insert(0, item);
+ return kind.expect_from_annotatables(items);
+ }
+ _ => {
+ let msg = &format!("macro `{}` may not be used for derive attributes", name);
+ self.cx.span_err(attr.span, &msg);
+ kind.dummy(attr.span)
+ }
+ }
+ }
+
fn parse_expansion(&mut self, toks: TokenStream, kind: ExpansionKind, name: Name, span: Span)
-> Expansion {
let mut parser = self.cx.new_parser_from_tts(&toks.trees().cloned().collect::<Vec<_>>());
fn collect_attr(&mut self, attr: ast::Attribute, item: Annotatable, kind: ExpansionKind)
-> Expansion {
- self.collect(kind, InvocationKind::Attr { attr: attr, item: item })
+ let invoc_kind = if attr.name() == "derive" {
+ if kind == ExpansionKind::TraitItems || kind == ExpansionKind::ImplItems {
+ self.cx.span_err(attr.span, "`derive` can be only be applied to items");
+ return kind.expect_from_annotatables(::std::iter::once(item));
+ }
+ InvocationKind::Derive { attr: attr, item: item }
+ } else {
+ InvocationKind::Attr { attr: attr, item: item }
+ };
+
+ self.collect(kind, invoc_kind)
}
// If `item` is an attr invocation, remove and return the macro attribute.
fn classify_item<T: HasAttrs>(&mut self, mut item: T) -> (T, Option<ast::Attribute>) {
let mut attr = None;
+
item = item.map_attrs(|mut attrs| {
- attr = self.cx.resolver.find_attr_invoc(&mut attrs);
+ attr = self.cx.resolver.find_attr_invoc(&mut attrs).or_else(|| {
+ find_derive_attr(self.cx, &mut attrs)
+ });
+
attrs
});
+
(item, attr)
}
(active, abi_unadjusted, "1.16.0", None),
// Macros 1.1
- (active, proc_macro, "1.16.0", Some(35900)),
+ (active, proc_macro, "1.16.0", Some(38356)),
// Allows attributes on struct literal fields.
(active, struct_field_attributes, "1.16.0", Some(38814)),
+ // Allows #[link(kind="static-nobundle"...]
+ (active, static_nobundle, "1.16.0", Some(37403)),
+
// `extern "msp430-interrupt" fn()`
(active, abi_msp430_interrupt, "1.16.0", Some(38487)),
);
pub mod ext {
pub mod base;
pub mod build;
+ pub mod derive;
pub mod expand;
pub mod placeholders;
pub mod hygiene;
}
// Assemble the span.
+ // FIXME(#39450) This is bogus if part of the path is macro generated.
let span = mk_sp(lo, self.prev_span.hi);
// Assemble the result.
fn visit_mac(&mut self, _mac: &Mac) {}
}
-pub struct CustomDerive {
+pub struct ProcMacroDerive {
inner: fn(TokenStream) -> TokenStream,
attrs: Vec<ast::Name>,
}
-impl CustomDerive {
- pub fn new(inner: fn(TokenStream) -> TokenStream, attrs: Vec<ast::Name>) -> CustomDerive {
- CustomDerive { inner: inner, attrs: attrs }
+impl ProcMacroDerive {
+ pub fn new(inner: fn(TokenStream) -> TokenStream, attrs: Vec<ast::Name>) -> ProcMacroDerive {
+ ProcMacroDerive { inner: inner, attrs: attrs }
}
}
-impl MultiItemModifier for CustomDerive {
+impl MultiItemModifier for ProcMacroDerive {
fn expand(&self,
ecx: &mut ExtCtxt,
span: Span,
Annotatable::Item(item) => item,
Annotatable::ImplItem(_) |
Annotatable::TraitItem(_) => {
- ecx.span_err(span, "custom derive attributes may only be \
+ ecx.span_err(span, "proc-macro derives may only be \
applied to struct/enum items");
return Vec::new()
}
ItemKind::Struct(..) |
ItemKind::Enum(..) => {},
_ => {
- ecx.span_err(span, "custom derive attributes may only be \
+ ecx.span_err(span, "proc-macro derives may only be \
applied to struct/enum items");
return Vec::new()
}
let stream = match res {
Ok(stream) => stream,
Err(e) => {
- let msg = "custom derive attribute panicked";
+ let msg = "proc-macro derive panicked";
let mut err = ecx.struct_span_fatal(span, msg);
if let Some(s) = e.downcast_ref::<String>() {
err.help(&format!("message: {}", s));
Ok(new_items) => new_items,
Err(_) => {
// FIXME: handle this better
- let msg = "custom derive produced unparseable tokens";
+ let msg = "proc-macro derive produced unparseable tokens";
ecx.struct_span_fatal(span, msg).emit();
panic!(FatalError);
}
use deriving;
use deriving::generic::*;
use deriving::generic::ty::*;
+use deriving::warn_if_deprecated;
use syntax::ast;
use syntax::ast::{Expr, MetaItem, Mutability};
mitem: &MetaItem,
item: &Annotatable,
push: &mut FnMut(Annotatable)) {
+ warn_if_deprecated(cx, span, "Decodable");
expand_deriving_decodable_imp(cx, span, mitem, item, push, "serialize")
}
use deriving;
use deriving::generic::*;
use deriving::generic::ty::*;
+use deriving::warn_if_deprecated;
use syntax::ast::{Expr, ExprKind, MetaItem, Mutability};
use syntax::ext::base::{Annotatable, ExtCtxt};
mitem: &MetaItem,
item: &Annotatable,
push: &mut FnMut(Annotatable)) {
+ warn_if_deprecated(cx, span, "Encodable");
expand_deriving_encodable_imp(cx, span, mitem, item, push, "serialize")
}
//! The compiler code necessary to implement the `#[derive]` extensions.
-use syntax::ast::{self, MetaItem};
-use syntax::attr::HasAttrs;
+use std::rc::Rc;
+use syntax::ast;
use syntax::codemap;
-use syntax::ext::base::{Annotatable, ExtCtxt, SyntaxExtension};
+use syntax::ext::base::{Annotatable, ExtCtxt, SyntaxExtension, Resolver};
use syntax::ext::build::AstBuilder;
-use syntax::feature_gate;
use syntax::ptr::P;
use syntax::symbol::Symbol;
use syntax_pos::Span;
}
}
-pub fn expand_derive(cx: &mut ExtCtxt,
- span: Span,
- mitem: &MetaItem,
- annotatable: Annotatable)
- -> Vec<Annotatable> {
- debug!("expand_derive: span = {:?}", span);
- debug!("expand_derive: mitem = {:?}", mitem);
- debug!("expand_derive: annotatable input = {:?}", annotatable);
- let mut item = match annotatable {
- Annotatable::Item(item) => item,
- other => {
- cx.span_err(span, "`derive` can only be applied to items");
- return vec![other]
- }
- };
-
- let derive = Symbol::intern("derive");
- let mut derive_attrs = Vec::new();
- item = item.map_attrs(|attrs| {
- let partition = attrs.into_iter().partition(|attr| attr.name() == derive);
- derive_attrs = partition.0;
- partition.1
- });
-
- // Expand `#[derive]`s after other attribute macro invocations.
- if cx.resolver.find_attr_invoc(&mut item.attrs.clone()).is_some() {
- return vec![Annotatable::Item(item.map_attrs(|mut attrs| {
- attrs.push(cx.attribute(span, mitem.clone()));
- attrs.extend(derive_attrs);
- attrs
- }))];
- }
-
- let get_traits = |mitem: &MetaItem, cx: &ExtCtxt| {
- if mitem.value_str().is_some() {
- cx.span_err(mitem.span, "unexpected value in `derive`");
- }
-
- let traits = mitem.meta_item_list().unwrap_or(&[]).to_owned();
- if traits.is_empty() {
- cx.span_warn(mitem.span, "empty trait list in `derive`");
- }
- traits
- };
-
- let mut traits = get_traits(mitem, cx);
- for derive_attr in derive_attrs {
- traits.extend(get_traits(&derive_attr.value, cx));
- }
-
- // First, weed out malformed #[derive]
- traits.retain(|titem| {
- if titem.word().is_none() {
- cx.span_err(titem.span, "malformed `derive` entry");
- false
- } else {
- true
- }
- });
-
- // Next, check for old-style #[derive(Foo)]
- //
- // These all get expanded to `#[derive_Foo]` and will get expanded first. If
- // we actually add any attributes here then we return to get those expanded
- // and then eventually we'll come back to finish off the other derive modes.
- let mut new_attributes = Vec::new();
- traits.retain(|titem| {
- let tword = titem.word().unwrap();
- let tname = tword.name();
-
- if is_builtin_trait(tname) || {
- let derive_mode = ast::Path::from_ident(titem.span, ast::Ident::with_empty_ctxt(tname));
- cx.resolver.resolve_macro(cx.current_expansion.mark, &derive_mode, false).map(|ext| {
- if let SyntaxExtension::CustomDerive(_) = *ext { true } else { false }
- }).unwrap_or(false)
- } {
- return true;
- }
-
- if !cx.ecfg.enable_custom_derive() {
- feature_gate::emit_feature_err(&cx.parse_sess,
- "custom_derive",
- titem.span,
- feature_gate::GateIssue::Language,
- feature_gate::EXPLAIN_CUSTOM_DERIVE);
- } else {
- let name = Symbol::intern(&format!("derive_{}", tname));
- if !cx.resolver.is_whitelisted_legacy_custom_derive(name) {
- cx.span_warn(titem.span, feature_gate::EXPLAIN_DEPR_CUSTOM_DERIVE);
- }
- let mitem = cx.meta_word(titem.span, name);
- new_attributes.push(cx.attribute(mitem.span, mitem));
- }
- false
- });
- if new_attributes.len() > 0 {
- item = item.map(|mut i| {
- i.attrs.extend(new_attributes);
- if traits.len() > 0 {
- let list = cx.meta_list(mitem.span, derive, traits);
- i.attrs.push(cx.attribute(mitem.span, list));
- }
- i
- });
- return vec![Annotatable::Item(item)]
- }
-
- // Now check for macros-1.1 style custom #[derive].
- //
- // Expand each of them in order given, but *before* we expand any built-in
- // derive modes. The logic here is to:
- //
- // 1. Collect the remaining `#[derive]` annotations into a list. If
- // there are any left, attach a `#[derive]` attribute to the item
- // that we're currently expanding with the remaining derive modes.
- // 2. Manufacture a `#[derive(Foo)]` attribute to pass to the expander.
- // 3. Expand the current item we're expanding, getting back a list of
- // items that replace it.
- // 4. Extend the returned list with the current list of items we've
- // collected so far.
- // 5. Return everything!
- //
- // If custom derive extensions end up threading through the `#[derive]`
- // attribute, we'll get called again later on to continue expanding
- // those modes.
- let macros_11_derive = traits.iter()
- .cloned()
- .enumerate()
- .filter(|&(_, ref name)| !is_builtin_trait(name.name().unwrap()))
- .next();
- if let Some((i, titem)) = macros_11_derive {
- let tname = ast::Ident::with_empty_ctxt(titem.name().unwrap());
- let path = ast::Path::from_ident(titem.span, tname);
- let ext = cx.resolver.resolve_macro(cx.current_expansion.mark, &path, false).unwrap();
-
- traits.remove(i);
- if traits.len() > 0 {
- item = item.map(|mut i| {
- let list = cx.meta_list(mitem.span, derive, traits);
- i.attrs.push(cx.attribute(mitem.span, list));
- i
- });
- }
- let titem = cx.meta_list_item_word(titem.span, titem.name().unwrap());
- let mitem = cx.meta_list(titem.span, derive, vec![titem]);
- let item = Annotatable::Item(item);
-
- let span = Span {
- expn_id: cx.codemap().record_expansion(codemap::ExpnInfo {
- call_site: mitem.span,
- callee: codemap::NameAndSpan {
- format: codemap::MacroAttribute(Symbol::intern(&format!("derive({})", tname))),
- span: None,
- allow_internal_unstable: false,
- },
- }),
- ..mitem.span
- };
-
- if let SyntaxExtension::CustomDerive(ref ext) = *ext {
- return ext.expand(cx, span, &mitem, item);
- } else {
- unreachable!()
- }
- }
-
- // Ok, at this point we know that there are no old-style `#[derive_Foo]` nor
- // any macros-1.1 style `#[derive(Foo)]`. Expand all built-in traits here.
-
- // RFC #1445. `#[derive(PartialEq, Eq)]` adds a (trusted)
- // `#[structural_match]` attribute.
- let (partial_eq, eq) = (Symbol::intern("PartialEq"), Symbol::intern("Eq"));
- if traits.iter().any(|t| t.name() == Some(partial_eq)) &&
- traits.iter().any(|t| t.name() == Some(eq)) {
- let structural_match = Symbol::intern("structural_match");
- let span = allow_unstable(cx, span, "derive(PartialEq, Eq)");
- let meta = cx.meta_word(span, structural_match);
- item = item.map(|mut i| {
- i.attrs.push(cx.attribute(span, meta));
- i
- });
- }
-
- // RFC #1521. `Clone` can assume that `Copy` types' clone implementation is
- // the same as the copy implementation.
- //
- // Add a marker attribute here picked up during #[derive(Clone)]
- let (copy, clone) = (Symbol::intern("Copy"), Symbol::intern("Clone"));
- if traits.iter().any(|t| t.name() == Some(clone)) &&
- traits.iter().any(|t| t.name() == Some(copy)) {
- let marker = Symbol::intern("rustc_copy_clone_marker");
- let span = allow_unstable(cx, span, "derive(Copy, Clone)");
- let meta = cx.meta_word(span, marker);
- item = item.map(|mut i| {
- i.attrs.push(cx.attribute(span, meta));
- i
- });
- }
-
- let mut items = Vec::new();
- for titem in traits.iter() {
- let tname = titem.word().unwrap().name();
- let name = Symbol::intern(&format!("derive({})", tname));
- let mitem = cx.meta_word(titem.span, name);
-
- let span = Span {
- expn_id: cx.codemap().record_expansion(codemap::ExpnInfo {
- call_site: titem.span,
- callee: codemap::NameAndSpan {
- format: codemap::MacroAttribute(name),
- span: None,
- allow_internal_unstable: true,
- },
- }),
- ..titem.span
- };
-
- let my_item = Annotatable::Item(item);
- expand_builtin(&tname.as_str(), cx, span, &mitem, &my_item, &mut |a| {
- items.push(a);
- });
- item = my_item.expect_item();
- }
-
- items.insert(0, Annotatable::Item(item));
- return items
-}
-
macro_rules! derive_traits {
($( $name:expr => $func:path, )+) => {
pub fn is_builtin_trait(name: ast::Name) -> bool {
}
}
- fn expand_builtin(name: &str,
- ecx: &mut ExtCtxt,
- span: Span,
- mitem: &MetaItem,
- item: &Annotatable,
- push: &mut FnMut(Annotatable)) {
- match name {
- $(
- $name => {
- warn_if_deprecated(ecx, span, $name);
- $func(ecx, span, mitem, item, push);
- }
- )*
- _ => panic!("not a builtin derive mode: {}", name),
- }
+ pub fn register_builtin_derives(resolver: &mut Resolver) {
+ $(
+ resolver.add_ext(
+ ast::Ident::with_empty_ctxt(Symbol::intern($name)),
+ Rc::new(SyntaxExtension::BuiltinDerive($func))
+ );
+ )*
}
}
}
#![feature(staged_api)]
extern crate fmt_macros;
-#[macro_use]
extern crate log;
#[macro_use]
extern crate syntax;
use std::rc::Rc;
use syntax::ast;
-use syntax::ext::base::{MacroExpanderFn, NormalTT, MultiModifier, NamedSyntaxExtension};
+use syntax::ext::base::{MacroExpanderFn, NormalTT, NamedSyntaxExtension};
use syntax::symbol::Symbol;
pub fn register_builtins(resolver: &mut syntax::ext::base::Resolver,
user_exts: Vec<NamedSyntaxExtension>,
enable_quotes: bool) {
+ deriving::register_builtin_derives(resolver);
+
let mut register = |name, ext| {
resolver.add_ext(ast::Ident::with_empty_ctxt(name), Rc::new(ext));
};
register(Symbol::intern("format_args"),
NormalTT(Box::new(format::expand_format_args), None, true));
- register(Symbol::intern("derive"), MultiModifier(Box::new(deriving::expand_derive)));
-
for (name, ext) in user_exts {
register(name, ext);
}
use deriving;
-struct CustomDerive {
+struct ProcMacroDerive {
trait_name: ast::Name,
function_name: Ident,
span: Span,
}
struct CollectProcMacros<'a> {
- derives: Vec<CustomDerive>,
+ derives: Vec<ProcMacroDerive>,
attr_macros: Vec<AttrProcMacro>,
in_root: bool,
handler: &'a errors::Handler,
};
if self.in_root && item.vis == ast::Visibility::Public {
- self.derives.push(CustomDerive {
+ self.derives.push(ProcMacroDerive {
span: item.span,
trait_name: trait_name,
function_name: item.ident,
// }
// }
fn mk_registrar(cx: &mut ExtCtxt,
- custom_derives: &[CustomDerive],
+ custom_derives: &[ProcMacroDerive],
custom_attrs: &[AttrProcMacro]) -> P<ast::Item> {
let eid = cx.codemap().record_expansion(ExpnInfo {
call_site: DUMMY_SP,
/// the error, and would be rendered with `^^^`.
/// - they can have a *label*. In this case, the label is written next
/// to the mark in the snippet when we render.
-#[derive(Clone, Debug, Hash, PartialEq, Eq)]
+#[derive(Clone, Debug, Hash, PartialEq, Eq, RustcEncodable, RustcDecodable)]
pub struct MultiSpan {
primary_spans: Vec<Span>,
span_labels: Vec<(Span, String)>,
#[derive(
A
)]
-//~^^ ERROR: custom derive produced unparseable tokens
+//~^^ ERROR: proc-macro derive produced unparseable tokens
struct A;
fn main() {}
extern crate derive_panic;
#[derive(A)]
-//~^ ERROR: custom derive attribute panicked
+//~^ ERROR: proc-macro derive panicked
//~| HELP: message: nope!
struct Foo;
#![feature(rustc_attrs)]
extern crate derive_a;
-//~^ WARN custom derive crates and `#[no_link]` crates have no effect without `#[macro_use]`
+//~^ WARN proc macro crates and `#[no_link]` crates have no effect without `#[macro_use]`
#[rustc_error]
fn main() {} //~ ERROR compilation successful
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![allow(dead_code)]
+#![allow(unreachable_code)]
+#![deny(resolve_trait_on_defaulted_unit)]
+
+trait Deserialize: Sized {
+ fn deserialize() -> Result<Self, String>;
+}
+
+impl Deserialize for () {
+ fn deserialize() -> Result<(), String> {
+ Ok(())
+ }
+}
+
+fn doit() -> Result<(), String> {
+ let _ = match Deserialize::deserialize() {
+ //~^ ERROR code relies on type
+ //~| WARNING previously accepted
+ Ok(x) => x,
+ Err(e) => return Err(e),
+ };
+ Ok(())
+}
+
+trait ImplementedForUnitButNotNever {}
+
+impl ImplementedForUnitButNotNever for () {}
+
+fn foo<T: ImplementedForUnitButNotNever>(_t: T) {}
+
+fn smeg() {
+ let _x = return;
+ foo(_x);
+ //~^ ERROR code relies on type
+ //~| WARNING previously accepted
+}
+
+fn main() {
+ let _ = doit();
+}
+
// ignore-tidy-linelength
#[derive(Eqr)]
-//~^ ERROR `#[derive]` for custom traits is not stable enough for use. It is deprecated and will be removed in v1.15 (see issue #29644)
+//~^ ERROR cannot find derive macro `Eqr` in this scope
struct Foo;
pub fn main() {}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#[derive(FromPrimitive)] //~ERROR `#[derive]` for custom traits is not stable
+#[derive(FromPrimitive)] //~ERROR cannot find derive macro `FromPrimitive` in this scope
enum Foo {}
fn main() {}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#[link(name="foo", kind="static-nobundle")]
+//~^ ERROR: kind="static-nobundle" is feature gated
+extern {}
//~^ WARN `Foo` is ambiguous
//~| WARN hard error in a future release
//~| NOTE see issue #38260
+ //~| NOTE #[warn(legacy_imports)] on by default
}
}
// except according to those terms.
#![deny(warnings)] //~ NOTE: lint level defined here
-use std::thread; //~ ERROR: unused import
+use std::thread;
+//~^ ERROR: unused import
+//~| NOTE: #[deny(unused_imports)] implied by #[deny(warnings)]
fn main() {}
+++ /dev/null
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![deny(bad_style)]
-//~^ NOTE lint level defined here
-#![allow(dead_code)]
-
-fn CamelCase() {} //~ ERROR function `CamelCase` should have a snake case name
-
-#[allow(bad_style)]
-mod test {
- fn CamelCase() {}
-
- #[forbid(bad_style)]
- //~^ NOTE lint level defined here
- //~^^ NOTE lint level defined here
- mod bad {
- fn CamelCase() {} //~ ERROR function `CamelCase` should have a snake case name
-
- static bad: isize = 1; //~ ERROR static variable `bad` should have an upper case name
- }
-
- mod warn {
- #![warn(bad_style)]
- //~^ NOTE lint level defined here
- //~| NOTE lint level defined here
-
- fn CamelCase() {} //~ WARN function `CamelCase` should have a snake case name
-
- struct snake_case; //~ WARN type `snake_case` should have a camel case name
- }
-}
-
-fn main() {}
// compile-flags: -F unused_features
// aux-build:lint_output_format.rs
-#![feature(foo)] //~ ERROR unused or unknown feature
+#![feature(foo)]
+//~^ ERROR unused or unknown feature
+//~| NOTE requested on the command line with `-F unused-features`
#![feature(test_feature)]
extern crate lint_output_format;
use lint_output_format::{foo, bar};
-//~^ WARNING use of deprecated item: text,
+//~^ WARNING use of deprecated item: text
+//~| NOTE #[warn(deprecated)] on by default
fn main() {
- let _x = foo(); //~ WARNING #[warn(deprecated)] on by default
+ let _x = foo();
+ //~^ WARNING use of deprecated item: text
+ //~| NOTE #[warn(deprecated)] on by default
let _y = bar();
}
foo!(0); // Check that we report errors at macro definition, not expansion.
let _: cfg!(foo) = (); //~ ERROR non-type macro in type position
- derive!(); //~ ERROR `derive` can only be used in attributes
+ derive!(); //~ ERROR macro undefined: 'derive!'
}
#![feature(asm)]
#![feature(trace_macros, concat_idents)]
-#[derive(Default, //~ ERROR
- Zero)] //~ ERROR
-enum CantDeriveThose {}
+#[derive(Zero)] //~ ERROR
+struct CantDeriveThis;
+
+#[derive(Default)] //~ ERROR
+enum OrDeriveThis {}
fn main() {
doesnt_exist!(); //~ ERROR
#[no_link]
extern crate empty_struct;
-//~^ WARN custom derive crates and `#[no_link]` crates have no effect without `#[macro_use]`
+//~^ WARN proc macro crates and `#[no_link]` crates have no effect without `#[macro_use]`
fn main() {
empty_struct::XEmpty1; //~ ERROR cannot find value `XEmpty1` in module `empty_struct`
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// aux-build:derive-foo.rs
+// ignore-stage1
+// pp-exact
+// Testing that both the inner item and next outer item are
+// preserved, and that the first outer item parsed in main is not
+// accidentally carried over to each inner function
+
+#[macro_use]
+extern crate derive_foo;
+
+#[derive(Foo)]
+struct X;
+
+#[derive(Foo)]
+#[Bar]
+struct Y;
+
+#[derive(Foo)]
+struct WithRef {
+ x: X,
+ #[Bar]
+ y: Y,
+}
+
+#[derive(Foo)]
+enum Enum {
+
+ #[Bar]
+ Asdf,
+ Qwerty,
+}
+
+fn main() { }
+++ /dev/null
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// pp-exact
-// Testing that both the inner item and next outer item are
-// preserved, and that the first outer item parsed in main is not
-// accidentally carried over to each inner function
-
-#![feature(custom_attribute)]
-#![feature(custom_derive)]
-
-#[derive(Serialize, Deserialize)]
-struct X;
-
-#[derive(Serialize, Deserialize)]
-struct WithRef<'a, T: 'a> {
- #[serde(skip_deserializing)]
- t: Option<&'a T>,
- #[serde(serialize_with = "ser_x", deserialize_with = "de_x")]
- x: X,
-}
-
-#[derive(Serialize, Deserialize)]
-enum EnumWith<T> {
- Unit,
- Newtype(
- #[serde(serialize_with = "ser_x", deserialize_with = "de_x")]
- X),
- Tuple(T,
- #[serde(serialize_with = "ser_x", deserialize_with = "de_x")]
- X),
- Struct {
- t: T,
- #[serde(serialize_with = "ser_x", deserialize_with = "de_x")]
- x: X,
- },
-}
-
-#[derive(Serialize, Deserialize)]
-struct Tuple<T>(T,
- #[serde(serialize_with = "ser_x", deserialize_with = "de_x")]
- X);
-
-fn main() { }
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// no-prefer-dynamic
+
+#![crate_type = "proc-macro"]
+
+extern crate proc_macro;
+
+use proc_macro::TokenStream;
+
+#[proc_macro_derive(Foo, attributes(Bar))]
+pub fn derive(input: TokenStream) -> TokenStream {
+ "".parse().unwrap()
+}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(cfg_target_has_atomic, no_core, intrinsics, lang_items)]
+#![feature(cfg_target_has_atomic, no_core, intrinsics, lang_items, i128_type)]
#![crate_type="rlib"]
#![no_core]
pub unsafe fn atomic_i64(x: *mut i64) {
atomic_xadd(x, 1);
}
+#[cfg(target_has_atomic = "128")]
+pub unsafe fn atomic_u128(x: *mut u128) {
+ atomic_xadd(x, 1);
+}
+#[cfg(target_has_atomic = "128")]
+pub unsafe fn atomic_i128(x: *mut i128) {
+ atomic_xadd(x, 1);
+}
#[cfg(target_has_atomic = "ptr")]
pub unsafe fn atomic_usize(x: *mut usize) {
atomic_xadd(x, 1);
--- /dev/null
+-include ../tools.mk
+
+# aaa is a native static library
+# bbb is a rlib
+# ccc is a dylib
+# ddd is an executable
+
+all: $(call NATIVE_STATICLIB,aaa)
+ $(RUSTC) bbb.rs --crate-type=rlib
+
+ # Check that bbb does NOT contain the definition of `native_func`
+ nm $(TMPDIR)/libbbb.rlib | (! grep "T _*native_func")
+ nm $(TMPDIR)/libbbb.rlib | grep "U _*native_func"
+
+ # Check that aaa gets linked (either as `-l aaa` or `aaa.lib`) when building ccc.
+ $(RUSTC) ccc.rs -C prefer-dynamic --crate-type=dylib -Z print-link-args | grep -e "-l[\" ]*aaa" -e "aaa.lib"
+
+ # Check that aaa does NOT get linked when building ddd.
+ $(RUSTC) ddd.rs -Z print-link-args | (! grep -e "-l[\" ]*aaa" -e "aaa.lib")
+
+ $(call RUN,ddd)
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+void native_func() {}
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![crate_type = "rlib"]
+#![feature(static_nobundle)]
+
+#[link(name = "aaa", kind = "static-nobundle")]
+extern {
+ pub fn native_func();
+}
+
+pub fn wrapped_func() {
+ unsafe {
+ native_func();
+ }
+}
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![crate_type = "dylib"]
+
+extern crate bbb;
+
+pub fn do_work() {
+ unsafe { bbb::native_func(); }
+ bbb::wrapped_func();
+}
+
+pub fn do_work_generic<T>() {
+ unsafe { bbb::native_func(); }
+ bbb::wrapped_func();
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+extern crate ccc;
+
+fn main() {
+ ccc::do_work();
+ ccc::do_work_generic::<i16>();
+ ccc::do_work_generic::<i32>();
+}
// Parser test for #37765
-fn with_parens<T: ToString>(arg: T) -> String { //~WARN dead_code
- return (<T as ToString>::to_string(&arg)); //~WARN unused_parens
+fn with_parens<T: ToString>(arg: T) -> String { //~WARN function is never used: `with_parens`
+ return (<T as ToString>::to_string(&arg)); //~WARN unnecessary parentheses around `return` value
}
-fn no_parens<T: ToString>(arg: T) -> String { //~WARN dead_code
+fn no_parens<T: ToString>(arg: T) -> String { //~WARN function is never used: `no_parens`
return <T as ToString>::to_string(&arg);
}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags:--test
+
+// This file tests the source-partitioning behavior of rustdoc.
+// Each test contains some code that should be put into the generated
+// `fn main` and some attributes should be left outside (except the first
+// one, which has no attributes).
+// If the #![recursion_limit] attribute is incorrectly left inside,
+// then the tests will fail because the macro recurses 128 times.
+
+/// ```
+/// assert_eq!(1 + 1, 2);
+/// ```
+pub fn simple() {}
+
+/// ```
+/// #![recursion_limit = "1024"]
+/// macro_rules! recurse {
+/// (()) => {};
+/// (() $($rest:tt)*) => { recurse!($($rest)*); }
+/// }
+/// recurse!(() () () () () () () ()
+/// () () () () () () () ()
+/// () () () () () () () ()
+/// () () () () () () () ()
+/// () () () () () () () ()
+/// () () () () () () () ()
+/// () () () () () () () ()
+/// () () () () () () () ()
+/// () () () () () () () ()
+/// () () () () () () () ()
+/// () () () () () () () ()
+/// () () () () () () () ()
+/// () () () () () () () ()
+/// () () () () () () () ()
+/// () () () () () () () ()
+/// () () () () () () () ());
+/// assert_eq!(1 + 1, 2);
+/// ```
+pub fn non_feature_attr() {}
+
+/// ```
+/// #![feature(core_intrinsics)]
+/// assert_eq!(1 + 1, 2);
+/// ```
+pub fn feature_attr() {}
+
+/// ```
+/// #![feature(core_intrinsics)]
+/// #![recursion_limit = "1024"]
+/// macro_rules! recurse {
+/// (()) => {};
+/// (() $($rest:tt)*) => { recurse!($($rest)*); }
+/// }
+/// recurse!(() () () () () () () ()
+/// () () () () () () () ()
+/// () () () () () () () ()
+/// () () () () () () () ()
+/// () () () () () () () ()
+/// () () () () () () () ()
+/// () () () () () () () ()
+/// () () () () () () () ()
+/// () () () () () () () ()
+/// () () () () () () () ()
+/// () () () () () () () ()
+/// () () () () () () () ()
+/// () () () () () () () ()
+/// () () () () () () () ()
+/// () () () () () () () ()
+/// () () () () () () () ());
+/// assert_eq!(1 + 1, 2);
+/// ```
+pub fn both_attrs() {}
+
+/// ```
+/// #![recursion_limit = "1024"]
+/// #![feature(core_intrinsics)]
+/// macro_rules! recurse {
+/// (()) => {};
+/// (() $($rest:tt)*) => { recurse!($($rest)*); }
+/// }
+/// recurse!(() () () () () () () ()
+/// () () () () () () () ()
+/// () () () () () () () ()
+/// () () () () () () () ()
+/// () () () () () () () ()
+/// () () () () () () () ()
+/// () () () () () () () ()
+/// () () () () () () () ()
+/// () () () () () () () ()
+/// () () () () () () () ()
+/// () () () () () () () ()
+/// () () () () () () () ()
+/// () () () () () () () ()
+/// () () () () () () () ()
+/// () () () () () () () ()
+/// () () () () () () () ());
+/// assert_eq!(1 + 1, 2);
+/// ```
+pub fn both_attrs_reverse() {}
+
-error[E0276]: impl has stricter requirements than trait, #[deny(extra_requirement_in_impl)] on by default
+error[E0276]: impl has stricter requirements than trait
--> $DIR/proj-outlives-region.rs:22:5
|
17 | fn foo() where T: 'a;
22 | fn foo() where U: 'a { } //~ ERROR E0276
| ^^^^^^^^^^^^^^^^^^^^^^^^ impl has extra requirement `U: 'a`
|
+ = note: #[deny(extra_requirement_in_impl)] on by default
= warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
= note: for more information, see issue #37166 <https://github.com/rust-lang/rust/issues/37166>
-error[E0276]: impl has stricter requirements than trait, #[deny(extra_requirement_in_impl)] on by default
+error[E0276]: impl has stricter requirements than trait
--> $DIR/region-unrelated.rs:22:5
|
17 | fn foo() where T: 'a;
22 | fn foo() where V: 'a { }
| ^^^^^^^^^^^^^^^^^^^^^^^^ impl has extra requirement `V: 'a`
|
+ = note: #[deny(extra_requirement_in_impl)] on by default
= warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
= note: for more information, see issue #37166 <https://github.com/rust-lang/rust/issues/37166>
-error: custom derive attribute panicked
+error: proc-macro derive panicked
--> $DIR/issue-36935.rs:17:15
|
17 | #[derive(Foo, Bar)]
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use Foo;
+
+use Foo1;
+
+fn main() {}
--- /dev/null
+error[E0432]: unresolved import `Foo`
+ --> $DIR/issue-38054-do-not-show-unresolved-names.rs:11:5
+ |
+11 | use Foo;
+ | ^^^ no `Foo` in the root
+
+error[E0432]: unresolved import `Foo1`
+ --> $DIR/issue-38054-do-not-show-unresolved-names.rs:13:5
+ |
+13 | use Foo1;
+ | ^^^^ no `Foo1` in the root
+
+error: aborting due to 2 previous errors
+
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -A bad-style
+
+fn main() {
+ let _InappropriateCamelCasing = true;
+}
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -D bad-style
+
+fn main() {
+ let _InappropriateCamelCasing = true;
+}
--- /dev/null
+error: variable `_InappropriateCamelCasing` should have a snake case name such as `_inappropriate_camel_casing`
+ --> $DIR/command-line-lint-group-deny.rs:14:9
+ |
+14 | let _InappropriateCamelCasing = true;
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^
+ |
+ = note: `-D non-snake-case` implied by `-D bad-style`
+
+error: aborting due to previous error
+
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -F bad-style
+
+fn main() {
+ let _InappropriateCamelCasing = true;
+}
--- /dev/null
+error: variable `_InappropriateCamelCasing` should have a snake case name such as `_inappropriate_camel_casing`
+ --> $DIR/command-line-lint-group-forbid.rs:14:9
+ |
+14 | let _InappropriateCamelCasing = true;
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^
+ |
+ = note: `-F non-snake-case` implied by `-F bad-style`
+
+error: aborting due to previous error
+
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -W bad-style
+
+fn main() {
+ let _InappropriateCamelCasing = true;
+}
--- /dev/null
+warning: variable `_InappropriateCamelCasing` should have a snake case name such as `_inappropriate_camel_casing`
+ --> $DIR/command-line-lint-group-warn.rs:14:9
+ |
+14 | let _InappropriateCamelCasing = true;
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^
+ |
+ = note: `-W non-snake-case` implied by `-W bad-style`
+
--- /dev/null
+// Copyright 2014–2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![deny(bad_style)]
+#![allow(dead_code)]
+
+fn CamelCase() {}
+
+#[allow(bad_style)]
+mod test {
+ fn CamelCase() {}
+
+ #[forbid(bad_style)]
+ mod bad {
+ fn CamelCase() {}
+
+ static bad: isize = 1;
+ }
+
+ mod warn {
+ #![warn(bad_style)]
+
+ fn CamelCase() {}
+
+ struct snake_case;
+ }
+}
+
+fn main() {}
--- /dev/null
+error: function `CamelCase` should have a snake case name such as `camel_case`
+ --> $DIR/lint-group-style.rs:14:1
+ |
+14 | fn CamelCase() {}
+ | ^^^^^^^^^^^^^^^^^
+ |
+ = note: #[deny(non_snake_case)] implied by #[deny(bad_style)]
+note: lint level defined here
+ --> $DIR/lint-group-style.rs:11:9
+ |
+11 | #![deny(bad_style)]
+ | ^^^^^^^^^
+
+error: function `CamelCase` should have a snake case name such as `camel_case`
+ --> $DIR/lint-group-style.rs:22:9
+ |
+22 | fn CamelCase() {}
+ | ^^^^^^^^^^^^^^^^^
+ |
+ = note: #[forbid(non_snake_case)] implied by #[forbid(bad_style)]
+note: lint level defined here
+ --> $DIR/lint-group-style.rs:20:14
+ |
+20 | #[forbid(bad_style)]
+ | ^^^^^^^^^
+
+error: static variable `bad` should have an upper case name such as `BAD`
+ --> $DIR/lint-group-style.rs:24:9
+ |
+24 | static bad: isize = 1;
+ | ^^^^^^^^^^^^^^^^^^^^^^
+ |
+ = note: #[forbid(non_upper_case_globals)] implied by #[forbid(bad_style)]
+note: lint level defined here
+ --> $DIR/lint-group-style.rs:20:14
+ |
+20 | #[forbid(bad_style)]
+ | ^^^^^^^^^
+
+warning: function `CamelCase` should have a snake case name such as `camel_case`
+ --> $DIR/lint-group-style.rs:30:9
+ |
+30 | fn CamelCase() {}
+ | ^^^^^^^^^^^^^^^^^
+ |
+ = note: #[warn(non_snake_case)] implied by #[warn(bad_style)]
+note: lint level defined here
+ --> $DIR/lint-group-style.rs:28:17
+ |
+28 | #![warn(bad_style)]
+ | ^^^^^^^^^
+
+warning: type `snake_case` should have a camel case name such as `SnakeCase`
+ --> $DIR/lint-group-style.rs:32:9
+ |
+32 | struct snake_case;
+ | ^^^^^^^^^^^^^^^^^^
+ |
+ = note: #[warn(non_camel_case_types)] implied by #[warn(bad_style)]
+note: lint level defined here
+ --> $DIR/lint-group-style.rs:28:17
+ |
+28 | #![warn(bad_style)]
+ | ^^^^^^^^^
+
+error: aborting due to 3 previous errors
+
19 | let theTwo = 2;
| ^^^^^^
|
+ = note: #[deny(non_snake_case)] implied by #[deny(warnings)]
note: lint level defined here
--> $DIR/issue-24690.rs:16:9
|
|
20 | let theOtherTwo = 2;
| ^^^^^^^^^^^
+ |
+ = note: #[deny(non_snake_case)] implied by #[deny(warnings)]
error: unused variable: `theOtherTwo`
--> $DIR/issue-24690.rs:20:9
20 | let theOtherTwo = 2;
| ^^^^^^^^^^^
|
+ = note: #[deny(unused_variables)] implied by #[deny(warnings)]
note: lint level defined here
--> $DIR/issue-24690.rs:16:9
|
-warning: unused imports: `Eq`, `Ord`, `PartialEq`, `PartialOrd`, #[warn(unused_imports)] on by default
+warning: unused imports: `Eq`, `Ord`, `PartialEq`, `PartialOrd`
--> $DIR/multispan-import-lint.rs:11:16
|
11 | use std::cmp::{Eq, Ord, min, PartialEq, PartialOrd};
| ^^ ^^^ ^^^^^^^^^ ^^^^^^^^^^
+ |
+ = note: #[warn(unused_imports)] on by default