-Subproject commit c416fb60b11ecfd2a1ba0fb8567c9a92590b5d28
+Subproject commit 8326a3683a9045d825e4fdc4021af340ee3b3755
valopt arm-linux-androideabi-ndk "" "arm-linux-androideabi NDK standalone path"
valopt armv7-linux-androideabi-ndk "" "armv7-linux-androideabi NDK standalone path"
valopt aarch64-linux-android-ndk "" "aarch64-linux-android NDK standalone path"
+valopt x86_64-linux-android-ndk "" "x86_64-linux-android NDK standalone path"
valopt nacl-cross-path "" "NaCl SDK path (Pepper Canary is recommended). Must be absolute!"
valopt musl-root "/usr/local" "MUSL root installation directory (deprecated)"
valopt musl-root-x86_64 "" "x86_64-unknown-linux-musl install directory"
putvar CFG_ARM_LINUX_ANDROIDEABI_NDK
putvar CFG_ARMV7_LINUX_ANDROIDEABI_NDK
putvar CFG_I686_LINUX_ANDROID_NDK
+putvar CFG_X86_64_LINUX_ANDROID_NDK
putvar CFG_NACL_CROSS_PATH
putvar CFG_MANDIR
putvar CFG_DOCDIR
-Subproject commit 016cbc514cf44a2bd3fe806e8afa6b9c50287373
+Subproject commit 6ecff95fdc3ee7ceed2b9b0cc1a3a64876860bce
if !up_to_date(src_file, dst_file) {
let mut cmd = Command::new(&compiler_path);
build.run(cmd.env("RUSTC_BOOTSTRAP", "1")
+ .arg("--cfg").arg(format!("stage{}", compiler.stage))
.arg("--target").arg(target)
.arg("--emit=obj")
.arg("--out-dir").arg(dst_dir)
.or_insert(Target::default());
target.ndk = Some(parse_configure_path(value));
}
+ "CFG_X86_64_LINUX_ANDROID_NDK" if value.len() > 0 => {
+ let target = "x86_64-linux-android".to_string();
+ let target = self.target_config.entry(target)
+ .or_insert(Target::default());
+ target.ndk = Some(parse_configure_path(value));
+ }
"CFG_LOCAL_RUST_ROOT" if value.len() > 0 => {
let path = parse_configure_path(value);
self.rustc = Some(push_exe_path(path.clone(), &["bin", "rustc"]));
chmod +x /usr/local/bin/sccache
ENV TARGETS=arm-linux-androideabi
+ENV TARGETS=$TARGETS,armv7-linux-androideabi
ENV TARGETS=$TARGETS,i686-linux-android
ENV TARGETS=$TARGETS,aarch64-linux-android
-ENV TARGETS=$TARGETS,armv7-linux-androideabi
+ENV TARGETS=$TARGETS,x86_64-linux-android
ENV RUST_CONFIGURE_ARGS \
--target=$TARGETS \
--arm-linux-androideabi-ndk=/android/ndk-arm-9 \
--armv7-linux-androideabi-ndk=/android/ndk-arm-9 \
--i686-linux-android-ndk=/android/ndk-x86-9 \
- --aarch64-linux-android-ndk=/android/ndk-aarch64
+ --aarch64-linux-android-ndk=/android/ndk-arm64-21 \
+ --x86_64-linux-android-ndk=/android/ndk-x86_64-21
ENV SCRIPT python2.7 ../x.py dist --target $TARGETS
bash android-ndk-r11c/build/tools/make-standalone-toolchain.sh \
--platform=android-21 \
--toolchain=aarch64-linux-android-4.9 \
- --install-dir=/android/ndk-aarch64 \
+ --install-dir=/android/ndk-arm64-21 \
--ndk-dir=/android/android-ndk-r11c \
--arch=arm64
bash android-ndk-r11c/build/tools/make-standalone-toolchain.sh \
--install-dir=/android/ndk-x86-9 \
--ndk-dir=/android/android-ndk-r11c \
--arch=x86
+bash android-ndk-r11c/build/tools/make-standalone-toolchain.sh \
+ --platform=android-21 \
+ --toolchain=x86_64-4.9 \
+ --install-dir=/android/ndk-x86_64-21 \
+ --ndk-dir=/android/android-ndk-r11c \
+ --arch=x86_64
rm -rf ./android-ndk-r11c-linux-x86_64.zip ./android-ndk-r11c
### Type parameter bounds
```antlr
+bound-list := bound | bound '+' bound-list '+' ?
bound := ty_bound | lt_bound
lt_bound := lifetime
-ty_bound := [?] [ for<lt_param_defs> ] simple_path
-bound-list := bound | bound '+' bound-list '+' ?
+ty_bound := ty_bound_noparen | (ty_bound_noparen)
+ty_bound_noparen := [?] [ for<lt_param_defs> ] simple_path
```
### Self types
- [proc_macro](language-features/proc-macro.md)
- [quote](language-features/quote.md)
- [relaxed_adts](language-features/relaxed-adts.md)
+ - [repr_align](language-features/repr-align.md)
- [repr_simd](language-features/repr-simd.md)
- [rustc_attrs](language-features/rustc-attrs.md)
- [rustc_diagnostic_macros](language-features/rustc-diagnostic-macros.md)
--- /dev/null
+# `repr_align`
+
+The tracking issue for this feature is: [#33626]
+
+[#33626]: https://github.com/rust-lang/rust/issues/33626
+
+------------------------
+
+
+
+
GDB_PYTHON_MODULE_DIRECTORY="$RUSTC_SYSROOT/lib/rustlib/etc"
# Run GDB with the additional arguments that load the pretty printers
-PYTHONPATH="$PYTHONPATH:$GDB_PYTHON_MODULE_DIRECTORY" gdb \
+# Set the environment variable `RUST_GDB` to overwrite the call to a
+# different/specific command (defaults to `gdb`).
+RUST_GDB="${RUST_GDB:-gdb}"
+PYTHONPATH="$PYTHONPATH:$GDB_PYTHON_MODULE_DIRECTORY" ${RUST_GDB} \
-d "$GDB_PYTHON_MODULE_DIRECTORY" \
-iex "add-auto-load-safe-path $GDB_PYTHON_MODULE_DIRECTORY" \
"$@"
issue = "27700")]
use core::{isize, usize};
-#[cfg(not(test))]
use core::intrinsics::{min_align_of_val, size_of_val};
#[allow(improper_ctypes)]
}
}
-#[cfg(not(test))]
-#[lang = "box_free"]
+#[cfg_attr(not(test), lang = "box_free")]
#[inline]
-unsafe fn box_free<T: ?Sized>(ptr: *mut T) {
+pub(crate) unsafe fn box_free<T: ?Sized>(ptr: *mut T) {
let size = size_of_val(&*ptr);
let align = min_align_of_val(&*ptr);
// We do not allocate for Box<T> when T is ZST, so deallocation is also not necessary.
#![feature(needs_allocator)]
#![feature(optin_builtin_traits)]
#![feature(placement_in_syntax)]
+#![cfg_attr(stage0, feature(pub_restricted))]
#![feature(shared)]
#![feature(staged_api)]
#![feature(unboxed_closures)]
use core::ptr::{self, Shared};
use core::convert::From;
-use heap::deallocate;
+use heap::{allocate, deallocate, box_free};
use raw_vec::RawVec;
struct RcBox<T: ?Sized> {
value: T,
}
-
/// A single-threaded reference-counting pointer.
///
/// See the [module-level documentation](./index.html) for more details.
}
}
+impl<T> Rc<[T]> {
+ /// Constructs a new `Rc<[T]>` from a `Box<[T]>`.
+ #[doc(hidden)]
+ #[unstable(feature = "rustc_private",
+ reason = "for internal use in rustc",
+ issue = "0")]
+ pub fn __from_array(value: Box<[T]>) -> Rc<[T]> {
+ unsafe {
+ let ptr: *mut RcBox<[T]> =
+ mem::transmute([mem::align_of::<RcBox<[T; 1]>>(), value.len()]);
+ // FIXME(custom-DST): creating this invalid &[T] is dubiously defined,
+ // we should have a better way of getting the size/align
+ // of a DST from its unsized part.
+ let ptr = allocate(size_of_val(&*ptr), align_of_val(&*ptr));
+ let ptr: *mut RcBox<[T]> = mem::transmute([ptr as usize, value.len()]);
+
+ // Initialize the new RcBox.
+ ptr::write(&mut (*ptr).strong, Cell::new(1));
+ ptr::write(&mut (*ptr).weak, Cell::new(1));
+ ptr::copy_nonoverlapping(
+ value.as_ptr(),
+ &mut (*ptr).value as *mut [T] as *mut T,
+ value.len());
+
+ // Free the original allocation without freeing its (moved) contents.
+ box_free(Box::into_raw(value));
+
+ Rc { ptr: Shared::new(ptr as *const _) }
+ }
+ }
+}
+
impl<T: ?Sized> Rc<T> {
/// Creates a new [`Weak`][weak] pointer to this value.
///
/// An owning iterator over the elements of a `BinaryHeap`.
///
-/// This `struct` is created by the [`into_iter`] method on [`BinaryHeap`]
+/// This `struct` is created by the [`into_iter`] method on [`BinaryHeap`][`BinaryHeap`]
/// (provided by the `IntoIterator` trait). See its documentation for more.
///
/// [`into_iter`]: struct.BinaryHeap.html#method.into_iter
/// An owning iterator over the entries of a `BTreeMap`.
///
-/// This `struct` is created by the [`into_iter`] method on [`BTreeMap`]
+/// This `struct` is created by the [`into_iter`] method on [`BTreeMap`][`BTreeMap`]
/// (provided by the `IntoIterator` trait). See its documentation for more.
///
/// [`into_iter`]: struct.BTreeMap.html#method.into_iter
/// An owning iterator over the items of a `BTreeSet`.
///
-/// This `struct` is created by the [`into_iter`] method on [`BTreeSet`]
+/// This `struct` is created by the [`into_iter`] method on [`BTreeSet`][`BTreeSet`]
/// (provided by the `IntoIterator` trait). See its documentation for more.
///
/// [`BTreeSet`]: struct.BTreeSet.html
/// An owning iterator over the elements of a `LinkedList`.
///
-/// This `struct` is created by the [`into_iter`] method on [`LinkedList`]
+/// This `struct` is created by the [`into_iter`] method on [`LinkedList`][`LinkedList`]
/// (provided by the `IntoIterator` trait). See its documentation for more.
///
/// [`into_iter`]: struct.LinkedList.html#method.into_iter
/// An owning iterator over the elements of a `VecDeque`.
///
-/// This `struct` is created by the [`into_iter`] method on [`VecDeque`]
+/// This `struct` is created by the [`into_iter`] method on [`VecDeque`][`VecDeque`]
/// (provided by the `IntoIterator` trait). See its documentation for more.
///
/// [`into_iter`]: struct.VecDeque.html#method.into_iter
#![stable(feature = "rust1", since = "1.0.0")]
+use cell::UnsafeCell;
use cmp;
use hash::Hash;
use hash::Hasher;
#[stable(feature = "rust1", since = "1.0.0")]
unsafe impl<'a, T: Send + ?Sized> Send for &'a mut T {}
}
+
+/// Compiler-internal trait used to determine whether a type contains
+/// any `UnsafeCell` internally, but not through an indirection.
+/// This affects, for example, whether a `static` of that type is
+/// placed in read-only static memory or writable static memory.
+#[cfg_attr(not(stage0), lang = "freeze")]
+unsafe trait Freeze {}
+
+unsafe impl Freeze for .. {}
+
+impl<T: ?Sized> !Freeze for UnsafeCell<T> {}
+unsafe impl<T: ?Sized> Freeze for PhantomData<T> {}
+unsafe impl<T: ?Sized> Freeze for *const T {}
+unsafe impl<T: ?Sized> Freeze for *mut T {}
+unsafe impl<'a, T: ?Sized> Freeze for &'a T {}
+unsafe impl<'a, T: ?Sized> Freeze for &'a mut T {}
-Subproject commit 05a2d197356ef253dfd985166576619ac9b6947f
+Subproject commit c34a802d1eb037b44c5252078c7270b5472e0f65
// predicates for an item wind up in `ItemSignature`).
AssociatedItems(D),
ItemSignature(D),
+ IsForeignItem(D),
TypeParamPredicates((D, D)),
SizedConstraint(D),
AdtDestructor(D),
TransCrateItem,
AssociatedItems,
ItemSignature,
+ IsForeignItem,
AssociatedItemDefIds,
InherentImpls,
TypeckTables,
TransInlinedItem(ref d) => op(d).map(TransInlinedItem),
AssociatedItems(ref d) => op(d).map(AssociatedItems),
ItemSignature(ref d) => op(d).map(ItemSignature),
+ IsForeignItem(ref d) => op(d).map(IsForeignItem),
TypeParamPredicates((ref item, ref param)) => {
Some(TypeParamPredicates((try_opt!(op(item)), try_opt!(op(param)))))
}
E0489, // type/lifetime parameter not in scope here
E0490, // a value of type `..` is borrowed for too long
E0495, // cannot infer an appropriate lifetime due to conflicting requirements
- E0566 // conflicting representation hints
+ E0566, // conflicting representation hints
+ E0587, // conflicting packed and align representation hints
}
};
let mut conflicting_reprs = 0;
+ let mut found_packed = false;
+ let mut found_align = false;
+
for word in words {
let name = match word.name() {
("attribute should be applied to struct or union",
"a struct or union")
} else {
+ found_packed = true;
continue
}
}
continue
}
}
+ "align" => {
+ found_align = true;
+ if target != Target::Struct {
+ ("attribute should be applied to struct",
+ "a struct")
+ } else {
+ continue
+ }
+ }
"i8" | "u8" | "i16" | "u16" |
"i32" | "u32" | "i64" | "u64" |
"isize" | "usize" => {
span_warn!(self.sess, attr.span, E0566,
"conflicting representation hints");
}
+ if found_align && found_packed {
+ struct_span_err!(self.sess, attr.span, E0587,
+ "conflicting packed and align representation hints").emit();
+ }
}
fn check_attribute(&self, attr: &ast::Attribute, target: Target) {
ty::ReEmpty |
ty::ReErased => {
// replace all free regions with 'erased
- self.tcx().mk_region(ty::ReErased)
+ self.tcx().types.re_erased
}
}
}
} else {
// otherwise, we don't know what the free region is,
// so we must conservatively say the LUB is static:
- self.tcx.mk_region(ReStatic)
+ self.tcx.types.re_static
}
}
if a == b {
a
} else {
- self.tcx.mk_region(ReStatic)
+ self.tcx.types.re_static
}
}
}
fn construct_var_data(&self) -> Vec<VarValue<'tcx>> {
(0..self.num_vars() as usize)
- .map(|_| Value(self.tcx.mk_region(ty::ReEmpty)))
+ .map(|_| Value(self.tcx.types.re_empty))
.collect()
}
-> &'tcx ty::Region {
match values[rid.index as usize] {
Value(r) => r,
- ErrorValue => tcx.mk_region(ReStatic), // Previously reported error.
+ ErrorValue => tcx.types.re_static, // Previously reported error.
}
}
fn visibility(&self, def: DefId) -> ty::Visibility;
fn visible_parent_map<'a>(&'a self) -> ::std::cell::Ref<'a, DefIdMap<DefId>>;
fn item_generics_cloned(&self, def: DefId) -> ty::Generics;
- fn item_attrs(&self, def_id: DefId) -> Vec<ast::Attribute>;
+ fn item_attrs(&self, def_id: DefId) -> Rc<[ast::Attribute]>;
fn fn_arg_names(&self, did: DefId) -> Vec<ast::Name>;
// trait info
fn implementations_of_trait(&self, filter: Option<DefId>) -> Vec<DefId>;
// impl info
- fn impl_polarity(&self, def: DefId) -> hir::ImplPolarity;
fn impl_parent(&self, impl_def_id: DefId) -> Option<DefId>;
// trait/impl-item info
}
fn item_generics_cloned(&self, def: DefId) -> ty::Generics
{ bug!("item_generics_cloned") }
- fn item_attrs(&self, def_id: DefId) -> Vec<ast::Attribute> { bug!("item_attrs") }
+ fn item_attrs(&self, def_id: DefId) -> Rc<[ast::Attribute]> { bug!("item_attrs") }
fn fn_arg_names(&self, did: DefId) -> Vec<ast::Name> { bug!("fn_arg_names") }
// trait info
fn implementations_of_trait(&self, filter: Option<DefId>) -> Vec<DefId> { vec![] }
// impl info
- fn impl_polarity(&self, def: DefId) -> hir::ImplPolarity { bug!("impl_polarity") }
fn impl_parent(&self, def: DefId) -> Option<DefId> { bug!("impl_parent") }
// trait/impl-item info
hir::ExprMatch(ref discr, ref arms, _) => {
let discr_cmt = return_if_err!(self.mc.cat_expr(&discr));
- let r = self.tcx().mk_region(ty::ReEmpty);
+ let r = self.tcx().types.re_empty;
self.borrow_expr(&discr, r, ty::ImmBorrow, MatchDiscriminant);
// treatment of the discriminant is handled while walking the arms.
pub fn extract(attrs: &[ast::Attribute]) -> Option<Symbol> {
for attribute in attrs {
- match attribute.value_str() {
- Some(value) if attribute.check_name("lang") => return Some(value),
- _ => {}
+ if attribute.check_name("lang") {
+ if let Some(value) = attribute.value_str() {
+ return Some(value)
+ }
}
}
UnsizeTraitLangItem, "unsize", unsize_trait;
CopyTraitLangItem, "copy", copy_trait;
SyncTraitLangItem, "sync", sync_trait;
+ FreezeTraitLangItem, "freeze", freeze_trait;
DropTraitLangItem, "drop", drop_trait;
// we can promote to a constant, otherwise equal to enclosing temp
// lifetime.
let (re, old_re) = if promotable {
- (self.tcx().mk_region(ty::ReStatic),
- self.tcx().mk_region(ty::ReStatic))
+ (self.tcx().types.re_static,
+ self.tcx().types.re_static)
} else {
self.temporary_scope(id)
};
use hir::map as hir_map;
use hir::def::Def;
use hir::def_id::{DefId, CrateNum};
+use std::rc::Rc;
use ty::{self, TyCtxt};
use ty::maps::Providers;
use middle::privacy;
}
}
-pub fn find_reachable<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> NodeSet {
+pub fn find_reachable<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Rc<NodeSet> {
ty::queries::reachable_set::get(tcx, DUMMY_SP, LOCAL_CRATE)
}
-fn reachable_set<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, crate_num: CrateNum) -> NodeSet {
+fn reachable_set<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, crate_num: CrateNum) -> Rc<NodeSet> {
debug_assert!(crate_num == LOCAL_CRATE);
let access_levels = &ty::queries::privacy_access_levels::get(tcx, DUMMY_SP, LOCAL_CRATE);
reachable_context.propagate();
// Return the set of reachable symbols.
- reachable_context.reachable_symbols
+ Rc::new(reachable_context.reachable_symbols)
}
pub fn provide(providers: &mut Providers) {
// Otherwise, we have something of the form
// `for<'a> T: 'a where 'a not in T`, which we can treat as `T: 'static`.
Some(t_a) => {
- let r_static = selcx.tcx().mk_region(ty::ReStatic);
+ let r_static = selcx.tcx().types.re_static;
register_region_obligation(t_a, r_static,
obligation.cause.clone(),
region_obligations);
mod select;
mod specialize;
mod structural_impls;
+pub mod trans;
mod util;
/// An `Obligation` represents some trait reference (e.g. `int:Eq`) for
// the method may have some early-bound lifetimes, add
// regions for those
let substs = Substs::for_item(tcx, def_id,
- |_, _| tcx.mk_region(ty::ReErased),
+ |_, _| tcx.types.re_erased,
|def, _| trait_ref.substs().type_for_def(def));
// the trait type may have higher-ranked lifetimes in it;
debug!("Retaining candidate #{}/{}: {:?}",
i, candidates.len(), candidates[i]);
i += 1;
+
+ // If there are *STILL* multiple candidates, give up
+ // and report ambiguity.
+ if i > 1 {
+ debug!("multiple matches, ambig");
+ return Ok(None);
+ }
}
}
}
- // If there are *STILL* multiple candidates, give up and
- // report ambiguity.
- if candidates.len() > 1 {
- debug!("multiple matches, ambig");
- return Ok(None);
- }
-
// If there are *NO* candidates, then there are no impls --
// that we know of, anyway. Note that in the case where there
// are unbound type variables within the obligation, it might
--- /dev/null
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// This file contains various trait resolution methods used by trans.
+// They all assume regions can be erased and monomorphic types. It
+// seems likely that they should eventually be merged into more
+// general routines.
+
+use dep_graph::{DepGraph, DepNode, DepTrackingMap, DepTrackingMapConfig};
+use hir::def_id::DefId;
+use infer::TransNormalize;
+use std::cell::RefCell;
+use std::marker::PhantomData;
+use syntax::ast;
+use syntax_pos::Span;
+use traits::{FulfillmentContext, Obligation, ObligationCause, Reveal, SelectionContext, Vtable};
+use ty::{self, Ty, TyCtxt};
+use ty::subst::{Subst, Substs};
+use ty::fold::{TypeFoldable, TypeFolder};
+use util::common::MemoizationMap;
+
+impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> {
+ /// Attempts to resolve an obligation to a vtable.. The result is
+ /// a shallow vtable resolution -- meaning that we do not
+ /// (necessarily) resolve all nested obligations on the impl. Note
+ /// that type check should guarantee to us that all nested
+ /// obligations *could be* resolved if we wanted to.
+ pub fn trans_fulfill_obligation(self,
+ span: Span,
+ trait_ref: ty::PolyTraitRef<'tcx>)
+ -> Vtable<'tcx, ()>
+ {
+ // Remove any references to regions; this helps improve caching.
+ let trait_ref = self.erase_regions(&trait_ref);
+
+ self.trans_trait_caches.trait_cache.memoize(trait_ref, || {
+ debug!("trans::fulfill_obligation(trait_ref={:?}, def_id={:?})",
+ trait_ref, trait_ref.def_id());
+
+ // Do the initial selection for the obligation. This yields the
+ // shallow result we are looking for -- that is, what specific impl.
+ self.infer_ctxt((), Reveal::All).enter(|infcx| {
+ let mut selcx = SelectionContext::new(&infcx);
+
+ let obligation_cause = ObligationCause::misc(span,
+ ast::DUMMY_NODE_ID);
+ let obligation = Obligation::new(obligation_cause,
+ trait_ref.to_poly_trait_predicate());
+
+ let selection = match selcx.select(&obligation) {
+ Ok(Some(selection)) => selection,
+ Ok(None) => {
+ // Ambiguity can happen when monomorphizing during trans
+ // expands to some humongo type that never occurred
+ // statically -- this humongo type can then overflow,
+ // leading to an ambiguous result. So report this as an
+ // overflow bug, since I believe this is the only case
+ // where ambiguity can result.
+ debug!("Encountered ambiguity selecting `{:?}` during trans, \
+ presuming due to overflow",
+ trait_ref);
+ self.sess.span_fatal(span,
+ "reached the recursion limit during monomorphization \
+ (selection ambiguity)");
+ }
+ Err(e) => {
+ span_bug!(span, "Encountered error `{:?}` selecting `{:?}` during trans",
+ e, trait_ref)
+ }
+ };
+
+ debug!("fulfill_obligation: selection={:?}", selection);
+
+ // Currently, we use a fulfillment context to completely resolve
+ // all nested obligations. This is because they can inform the
+ // inference of the impl's type parameters.
+ let mut fulfill_cx = FulfillmentContext::new();
+ let vtable = selection.map(|predicate| {
+ debug!("fulfill_obligation: register_predicate_obligation {:?}", predicate);
+ fulfill_cx.register_predicate_obligation(&infcx, predicate);
+ });
+ let vtable = infcx.drain_fulfillment_cx_or_panic(span, &mut fulfill_cx, &vtable);
+
+ info!("Cache miss: {:?} => {:?}", trait_ref, vtable);
+ vtable
+ })
+ })
+ }
+
+ /// Monomorphizes a type from the AST by first applying the in-scope
+ /// substitutions and then normalizing any associated types.
+ pub fn trans_apply_param_substs<T>(self,
+ param_substs: &Substs<'tcx>,
+ value: &T)
+ -> T
+ where T: TransNormalize<'tcx>
+ {
+ debug!("apply_param_substs(param_substs={:?}, value={:?})", param_substs, value);
+ let substituted = value.subst(self, param_substs);
+ let substituted = self.erase_regions(&substituted);
+ AssociatedTypeNormalizer::new(self).fold(&substituted)
+ }
+}
+
+struct AssociatedTypeNormalizer<'a, 'gcx: 'a> {
+ tcx: TyCtxt<'a, 'gcx, 'gcx>,
+}
+
+impl<'a, 'gcx> AssociatedTypeNormalizer<'a, 'gcx> {
+ fn new(tcx: TyCtxt<'a, 'gcx, 'gcx>) -> Self {
+ AssociatedTypeNormalizer { tcx }
+ }
+
+ fn fold<T:TypeFoldable<'gcx>>(&mut self, value: &T) -> T {
+ if !value.has_projection_types() {
+ value.clone()
+ } else {
+ value.fold_with(self)
+ }
+ }
+}
+
+impl<'a, 'gcx> TypeFolder<'gcx, 'gcx> for AssociatedTypeNormalizer<'a, 'gcx> {
+ fn tcx<'c>(&'c self) -> TyCtxt<'c, 'gcx, 'gcx> {
+ self.tcx
+ }
+
+ fn fold_ty(&mut self, ty: Ty<'gcx>) -> Ty<'gcx> {
+ if !ty.has_projection_types() {
+ ty
+ } else {
+ self.tcx.trans_trait_caches.project_cache.memoize(ty, || {
+ debug!("AssociatedTypeNormalizer: ty={:?}", ty);
+ self.tcx.normalize_associated_type(&ty)
+ })
+ }
+ }
+}
+
+/// Specializes caches used in trans -- in particular, they assume all
+/// types are fully monomorphized and that free regions can be erased.
+pub struct TransTraitCaches<'tcx> {
+ trait_cache: RefCell<DepTrackingMap<TraitSelectionCache<'tcx>>>,
+ project_cache: RefCell<DepTrackingMap<ProjectionCache<'tcx>>>,
+}
+
+impl<'tcx> TransTraitCaches<'tcx> {
+ pub fn new(graph: DepGraph) -> Self {
+ TransTraitCaches {
+ trait_cache: RefCell::new(DepTrackingMap::new(graph.clone())),
+ project_cache: RefCell::new(DepTrackingMap::new(graph)),
+ }
+ }
+}
+
+// Implement DepTrackingMapConfig for `trait_cache`
+pub struct TraitSelectionCache<'tcx> {
+ data: PhantomData<&'tcx ()>
+}
+
+impl<'tcx> DepTrackingMapConfig for TraitSelectionCache<'tcx> {
+ type Key = ty::PolyTraitRef<'tcx>;
+ type Value = Vtable<'tcx, ()>;
+ fn to_dep_node(key: &ty::PolyTraitRef<'tcx>) -> DepNode<DefId> {
+ key.to_poly_trait_predicate().dep_node()
+ }
+}
+
+// # Global Cache
+
+pub struct ProjectionCache<'gcx> {
+ data: PhantomData<&'gcx ()>
+}
+
+impl<'gcx> DepTrackingMapConfig for ProjectionCache<'gcx> {
+ type Key = Ty<'gcx>;
+ type Value = Ty<'gcx>;
+ fn to_dep_node(key: &Self::Key) -> DepNode<DefId> {
+ // Ideally, we'd just put `key` into the dep-node, but we
+ // can't put full types in there. So just collect up all the
+ // def-ids of structs/enums as well as any traits that we
+ // project out of. It doesn't matter so much what we do here,
+ // except that if we are too coarse, we'll create overly
+ // coarse edges between impls and the trans. For example, if
+ // we just used the def-id of things we are projecting out of,
+ // then the key for `<Foo as SomeTrait>::T` and `<Bar as
+ // SomeTrait>::T` would both share a dep-node
+ // (`TraitSelect(SomeTrait)`), and hence the impls for both
+ // `Foo` and `Bar` would be considered inputs. So a change to
+ // `Bar` would affect things that just normalized `Foo`.
+ // Anyway, this heuristic is not ideal, but better than
+ // nothing.
+ let def_ids: Vec<DefId> =
+ key.walk()
+ .filter_map(|t| match t.sty {
+ ty::TyAdt(adt_def, _) => Some(adt_def.did),
+ ty::TyProjection(ref proj) => Some(proj.trait_ref.def_id),
+ _ => None,
+ })
+ .collect();
+
+ DepNode::ProjectionCache { def_ids: def_ids }
+ }
+}
+
+++ /dev/null
-// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use hir::def_id::{DefId};
-use ty::{self, Ty, TyCtxt};
-use util::common::MemoizationMap;
-use util::nodemap::FxHashMap;
-
-use std::fmt;
-use std::ops;
-
-use syntax::ast;
-
-/// Type contents is how the type checker reasons about kinds.
-/// They track what kinds of things are found within a type. You can
-/// think of them as kind of an "anti-kind". They track the kinds of values
-/// and thinks that are contained in types. Having a larger contents for
-/// a type tends to rule that type *out* from various kinds. For example,
-/// a type that contains a reference is not sendable.
-///
-/// The reason we compute type contents and not kinds is that it is
-/// easier for me (nmatsakis) to think about what is contained within
-/// a type than to think about what is *not* contained within a type.
-#[derive(Clone, Copy)]
-pub struct TypeContents {
- pub bits: u64
-}
-
-macro_rules! def_type_content_sets {
- (mod $mname:ident { $($name:ident = $bits:expr),+ }) => {
- #[allow(non_snake_case)]
- mod $mname {
- use super::TypeContents;
- $(
- #[allow(non_upper_case_globals)]
- pub const $name: TypeContents = TypeContents { bits: $bits };
- )+
- }
- }
-}
-
-def_type_content_sets! {
- mod TC {
- None = 0b0000_0000__0000_0000__0000,
-
- // Things that are interior to the value (first nibble):
- InteriorUnsafe = 0b0000_0000__0000_0000__0010,
- InteriorParam = 0b0000_0000__0000_0000__0100,
- // InteriorAll = 0b00000000__00000000__1111,
-
- // Things that are owned by the value (second and third nibbles):
- OwnsDtor = 0b0000_0000__0000_0010__0000,
- // OwnsAll = 0b0000_0000__1111_1111__0000,
-
- // All bits
- All = 0b1111_1111__1111_1111__1111
- }
-}
-
-impl TypeContents {
- pub fn when(&self, cond: bool) -> TypeContents {
- if cond {*self} else {TC::None}
- }
-
- pub fn intersects(&self, tc: TypeContents) -> bool {
- (self.bits & tc.bits) != 0
- }
-
- pub fn interior_param(&self) -> bool {
- self.intersects(TC::InteriorParam)
- }
-
- pub fn interior_unsafe(&self) -> bool {
- self.intersects(TC::InteriorUnsafe)
- }
-
- pub fn needs_drop(&self, _: TyCtxt) -> bool {
- self.intersects(TC::OwnsDtor)
- }
-
- pub fn union<I, T, F>(v: I, mut f: F) -> TypeContents where
- I: IntoIterator<Item=T>,
- F: FnMut(T) -> TypeContents,
- {
- v.into_iter().fold(TC::None, |tc, ty| tc | f(ty))
- }
-}
-
-impl ops::BitOr for TypeContents {
- type Output = TypeContents;
-
- fn bitor(self, other: TypeContents) -> TypeContents {
- TypeContents {bits: self.bits | other.bits}
- }
-}
-
-impl ops::BitAnd for TypeContents {
- type Output = TypeContents;
-
- fn bitand(self, other: TypeContents) -> TypeContents {
- TypeContents {bits: self.bits & other.bits}
- }
-}
-
-impl ops::Sub for TypeContents {
- type Output = TypeContents;
-
- fn sub(self, other: TypeContents) -> TypeContents {
- TypeContents {bits: self.bits & !other.bits}
- }
-}
-
-impl fmt::Debug for TypeContents {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- write!(f, "TypeContents({:b})", self.bits)
- }
-}
-
-impl<'a, 'tcx> ty::TyS<'tcx> {
- pub fn type_contents(&'tcx self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> TypeContents {
- return tcx.tc_cache.memoize(self, || tc_ty(tcx, self, &mut FxHashMap()));
-
- fn tc_ty<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
- ty: Ty<'tcx>,
- cache: &mut FxHashMap<Ty<'tcx>, TypeContents>) -> TypeContents
- {
- // Subtle: Note that we are *not* using tcx.tc_cache here but rather a
- // private cache for this walk. This is needed in the case of cyclic
- // types like:
- //
- // struct List { next: Box<Option<List>>, ... }
- //
- // When computing the type contents of such a type, we wind up deeply
- // recursing as we go. So when we encounter the recursive reference
- // to List, we temporarily use TC::None as its contents. Later we'll
- // patch up the cache with the correct value, once we've computed it
- // (this is basically a co-inductive process, if that helps). So in
- // the end we'll compute TC::OwnsOwned, in this case.
- //
- // The problem is, as we are doing the computation, we will also
- // compute an *intermediate* contents for, e.g., Option<List> of
- // TC::None. This is ok during the computation of List itself, but if
- // we stored this intermediate value into tcx.tc_cache, then later
- // requests for the contents of Option<List> would also yield TC::None
- // which is incorrect. This value was computed based on the crutch
- // value for the type contents of list. The correct value is
- // TC::OwnsOwned. This manifested as issue #4821.
- if let Some(tc) = cache.get(&ty) {
- return *tc;
- }
- // Must check both caches!
- if let Some(tc) = tcx.tc_cache.borrow().get(&ty) {
- return *tc;
- }
- cache.insert(ty, TC::None);
-
- let result = match ty.sty {
- // usize and isize are ffi-unsafe
- ty::TyUint(ast::UintTy::Us) | ty::TyInt(ast::IntTy::Is) => {
- TC::None
- }
-
- // Scalar and unique types are sendable, and durable
- ty::TyInfer(ty::FreshIntTy(_)) | ty::TyInfer(ty::FreshFloatTy(_)) |
- ty::TyBool | ty::TyInt(_) | ty::TyUint(_) | ty::TyFloat(_) | ty::TyNever |
- ty::TyFnDef(..) | ty::TyFnPtr(_) | ty::TyChar => {
- TC::None
- }
-
- ty::TyDynamic(..) => {
- TC::All - TC::InteriorParam
- }
-
- ty::TyRawPtr(_) => {
- TC::None
- }
-
- ty::TyRef(..) => {
- TC::None
- }
-
- ty::TyArray(ty, _) => {
- tc_ty(tcx, ty, cache)
- }
-
- ty::TySlice(ty) => {
- tc_ty(tcx, ty, cache)
- }
- ty::TyStr => TC::None,
-
- ty::TyClosure(def_id, ref substs) => {
- TypeContents::union(
- substs.upvar_tys(def_id, tcx),
- |ty| tc_ty(tcx, &ty, cache))
- }
-
- ty::TyTuple(ref tys, _) => {
- TypeContents::union(&tys[..],
- |ty| tc_ty(tcx, *ty, cache))
- }
-
- ty::TyAdt(def, substs) => {
- let mut res =
- TypeContents::union(&def.variants, |v| {
- TypeContents::union(&v.fields, |f| {
- tc_ty(tcx, f.ty(tcx, substs), cache)
- })
- });
-
- if def.is_union() {
- // unions don't have destructors regardless of the child types
- res = res - TC::OwnsDtor;
- }
-
- if def.has_dtor(tcx) {
- res = res | TC::OwnsDtor;
- }
-
- apply_lang_items(tcx, def.did, res)
- }
-
- ty::TyProjection(..) |
- ty::TyParam(_) |
- ty::TyAnon(..) => {
- TC::All
- }
-
- ty::TyInfer(_) |
- ty::TyError => {
- bug!("asked to compute contents of error type");
- }
- };
-
- cache.insert(ty, result);
- result
- }
-
- fn apply_lang_items<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
- did: DefId, tc: TypeContents)
- -> TypeContents {
- if Some(did) == tcx.lang_items.unsafe_cell_type() {
- tc | TC::InteriorUnsafe
- } else {
- tc
- }
- }
- }
-}
pub f64: Ty<'tcx>,
pub never: Ty<'tcx>,
pub err: Ty<'tcx>,
+
+ pub re_empty: &'tcx Region,
+ pub re_static: &'tcx Region,
+ pub re_erased: &'tcx Region,
}
#[derive(RustcEncodable, RustcDecodable)]
impl<'tcx> CommonTypes<'tcx> {
fn new(interners: &CtxtInterners<'tcx>) -> CommonTypes<'tcx> {
let mk = |sty| interners.intern_ty(sty, None);
+ let mk_region = |r| {
+ if let Some(r) = interners.region.borrow().get(&r) {
+ return r.0;
+ }
+ let r = interners.arena.alloc(r);
+ interners.region.borrow_mut().insert(Interned(r));
+ &*r
+ };
CommonTypes {
bool: mk(TyBool),
char: mk(TyChar),
u128: mk(TyUint(ast::UintTy::U128)),
f32: mk(TyFloat(ast::FloatTy::F32)),
f64: mk(TyFloat(ast::FloatTy::F64)),
+
+ re_empty: mk_region(Region::ReEmpty),
+ re_static: mk_region(Region::ReStatic),
+ re_erased: mk_region(Region::ReErased),
}
}
}
pub specializes_cache: RefCell<traits::SpecializesCache>,
+ pub trans_trait_caches: traits::trans::TransTraitCaches<'tcx>,
+
pub dep_graph: DepGraph,
/// Common types, pre-interned for your convenience.
// Internal cache for metadata decoding. No need to track deps on this.
pub rcache: RefCell<FxHashMap<ty::CReaderCacheKey, Ty<'tcx>>>,
- // Cache for the type-contents routine. FIXME -- track deps?
- pub tc_cache: RefCell<FxHashMap<Ty<'tcx>, ty::contents::TypeContents>>,
-
// FIXME dep tracking -- should be harmless enough
pub normalized_cache: RefCell<FxHashMap<Ty<'tcx>, Ty<'tcx>>>,
providers[LOCAL_CRATE] = local_providers;
tls::enter_global(GlobalCtxt {
sess: s,
+ trans_trait_caches: traits::trans::TransTraitCaches::new(dep_graph.clone()),
specializes_cache: RefCell::new(traits::SpecializesCache::new()),
global_arenas: arenas,
global_interners: interners,
freevars: RefCell::new(resolutions.freevars),
maybe_unused_trait_imports: resolutions.maybe_unused_trait_imports,
rcache: RefCell::new(FxHashMap()),
- tc_cache: RefCell::new(FxHashMap()),
normalized_cache: RefCell::new(FxHashMap()),
inhabitedness_cache: RefCell::new(FxHashMap()),
lang_items: lang_items,
}
pub fn mk_static_str(self) -> Ty<'tcx> {
- self.mk_imm_ref(self.mk_region(ty::ReStatic), self.mk_str())
+ self.mk_imm_ref(self.types.re_static, self.mk_str())
}
pub fn mk_adt(self, def: &'tcx AdtDef, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> {
pub fn erase_late_bound_regions<T>(self, value: &Binder<T>) -> T
where T : TypeFoldable<'tcx>
{
- self.replace_late_bound_regions(value, |_| self.mk_region(ty::ReErased)).0
+ self.replace_late_bound_regions(value, |_| self.types.re_erased).0
}
/// Rewrite any late-bound regions so that they are anonymous. Region numbers are
// whenever a substitution occurs.
match *r {
ty::ReLateBound(..) => r,
- _ => self.tcx().mk_region(ty::ReErased)
+ _ => self.tcx().types.re_erased
}
}
}
}
}
+pub fn shift_region_ref<'a, 'gcx, 'tcx>(
+ tcx: TyCtxt<'a, 'gcx, 'tcx>,
+ region: &'tcx ty::Region,
+ amount: u32)
+ -> &'tcx ty::Region
+{
+ match region {
+ &ty::ReLateBound(debruijn, br) if amount > 0 => {
+ tcx.mk_region(ty::ReLateBound(debruijn.shifted(amount), br))
+ }
+ _ => {
+ region
+ }
+ }
+}
+
pub fn shift_regions<'a, 'gcx, 'tcx, T>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
amount: u32, value: &T) -> T
where T: TypeFoldable<'tcx>
value, amount);
value.fold_with(&mut RegionFolder::new(tcx, &mut false, &mut |region, _current_depth| {
- tcx.mk_region(shift_region(*region, amount))
+ shift_region_ref(tcx, region, amount)
}))
}
use ty::{self, Ty, TypeFoldable, Substs};
use util::ppaux;
-use std::borrow::Cow;
use std::fmt;
-use syntax::ast;
-
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
pub struct Instance<'tcx> {
}
#[inline]
- pub fn attrs<'a>(&self, tcx: ty::TyCtxt<'a, 'tcx, 'tcx>) -> Cow<'tcx, [ast::Attribute]> {
+ pub fn attrs<'a>(&self, tcx: ty::TyCtxt<'a, 'tcx, 'tcx>) -> ty::Attributes<'tcx> {
tcx.get_attrs(self.def_id())
}
/// A structure, a product type in ADT terms.
#[derive(PartialEq, Eq, Hash, Debug)]
pub struct Struct {
+ /// Maximum alignment of fields and repr alignment.
pub align: Align,
+ /// Primitive alignment of fields without repr alignment.
+ pub primitive_align: Align,
+
/// If true, no alignment padding is used.
pub packed: bool,
fn new(dl: &TargetDataLayout, fields: &Vec<&'a Layout>,
repr: &ReprOptions, kind: StructKind,
scapegoat: Ty<'gcx>) -> Result<Struct, LayoutError<'gcx>> {
- let packed = repr.packed();
+ if repr.packed() && repr.align > 0 {
+ bug!("Struct cannot be packed and aligned");
+ }
+
+ let align = if repr.packed() {
+ dl.i8_align
+ } else {
+ dl.aggregate_align
+ };
+
let mut ret = Struct {
- align: if packed { dl.i8_align } else { dl.aggregate_align },
- packed: packed,
+ align: align,
+ primitive_align: align,
+ packed: repr.packed(),
sized: true,
offsets: vec![],
memory_index: vec![],
// Invariant: offset < dl.obj_size_bound() <= 1<<61
if !ret.packed {
let align = field.align(dl);
+ let primitive_align = field.primitive_align(dl);
ret.align = ret.align.max(align);
+ ret.primitive_align = ret.primitive_align.max(primitive_align);
offset = offset.abi_align(align);
}
.map_or(Err(LayoutError::SizeOverflow(scapegoat)), Ok)?;
}
+ if repr.align > 0 {
+ let repr_align = repr.align as u64;
+ ret.align = ret.align.max(Align::from_bytes(repr_align, repr_align).unwrap());
+ debug!("Struct::new repr_align: {:?}", repr_align);
+ }
debug!("Struct::new min_size: {:?}", offset);
ret.min_size = offset;
}
(_, &ty::TyProjection(_)) | (_, &ty::TyAnon(..)) => {
- let normalized = normalize_associated_type(infcx, ty);
+ let normalized = infcx.normalize_projections(ty);
if ty == normalized {
return Ok(None);
}
}
Ok(None)
}
+
+ pub fn over_align(&self) -> Option<u32> {
+ let align = self.align.abi();
+ let primitive_align = self.primitive_align.abi();
+ if align > primitive_align {
+ Some(align as u32)
+ } else {
+ None
+ }
+ }
}
/// An untagged union.
#[derive(PartialEq, Eq, Hash, Debug)]
pub struct Union {
pub align: Align,
+ pub primitive_align: Align,
pub min_size: Size,
impl<'a, 'gcx, 'tcx> Union {
fn new(dl: &TargetDataLayout, packed: bool) -> Union {
+ let align = if packed { dl.i8_align } else { dl.aggregate_align };
Union {
- align: if packed { dl.i8_align } else { dl.aggregate_align },
+ align: align,
+ primitive_align: align,
min_size: Size::from_bytes(0),
packed: packed,
}
if !self.packed {
self.align = self.align.max(field.align(dl));
+ self.primitive_align = self.primitive_align.max(field.primitive_align(dl));
}
self.min_size = cmp::max(self.min_size, field.size(dl));
}
pub fn stride(&self) -> Size {
self.min_size.abi_align(self.align)
}
+
+ pub fn over_align(&self) -> Option<u32> {
+ let align = self.align.abi();
+ let primitive_align = self.primitive_align.abi();
+ if align > primitive_align {
+ Some(align as u32)
+ } else {
+ None
+ }
+ }
}
/// The first half of a fat pointer.
/// If true, the size is exact, otherwise it's only a lower bound.
sized: bool,
align: Align,
+ primitive_align: Align,
element_size: Size,
count: u64
},
discr: Integer,
variants: Vec<Struct>,
size: Size,
- align: Align
+ align: Align,
+ primitive_align: Align,
},
/// Two cases distinguished by a nullable pointer: the case with discriminant
}
}
-/// Helper function for normalizing associated types in an inference context.
-fn normalize_associated_type<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>,
- ty: Ty<'gcx>)
- -> Ty<'gcx> {
- if !ty.has_projection_types() {
- return ty;
- }
-
- let mut selcx = traits::SelectionContext::new(infcx);
- let cause = traits::ObligationCause::dummy();
- let traits::Normalized { value: result, obligations } =
- traits::normalize(&mut selcx, cause, &ty);
-
- let mut fulfill_cx = traits::FulfillmentContext::new();
-
- for obligation in obligations {
- fulfill_cx.register_predicate_obligation(infcx, obligation);
- }
-
- infcx.drain_fulfillment_cx_or_panic(DUMMY_SP, &mut fulfill_cx, &result)
-}
-
impl<'a, 'gcx, 'tcx> Layout {
pub fn compute_uncached(ty: Ty<'gcx>,
infcx: &InferCtxt<'a, 'gcx, 'tcx>)
let ptr_layout = |pointee: Ty<'gcx>| {
let non_zero = !ty.is_unsafe_ptr();
- let pointee = normalize_associated_type(infcx, pointee);
+ let pointee = infcx.normalize_projections(pointee);
if pointee.is_sized(tcx, &infcx.parameter_environment, DUMMY_SP) {
Ok(Scalar { value: Pointer, non_zero: non_zero })
} else {
Array {
sized: true,
align: element.align(dl),
+ primitive_align: element.primitive_align(dl),
element_size: element_size,
count: count
}
Array {
sized: false,
align: element.align(dl),
+ primitive_align: element.primitive_align(dl),
element_size: element.size(dl),
count: 0
}
Array {
sized: false,
align: dl.i8_align,
+ primitive_align: dl.i8_align,
element_size: Size::from_bytes(1),
count: 0
}
assert!(discr_max >= 0);
let (min_ity, _) = Integer::repr_discr(tcx, ty, &def.repr, 0, discr_max);
let mut align = dl.aggregate_align;
+ let mut primitive_align = dl.aggregate_align;
let mut size = Size::from_bytes(0);
// We're interested in the smallest alignment, so start large.
}
size = cmp::max(size, st.min_size);
align = align.max(st.align);
+ primitive_align = primitive_align.max(st.primitive_align);
Ok(st)
}).collect::<Result<Vec<_>, _>>()?;
discr: ity,
variants: variants,
size: size,
- align: align
+ align: align,
+ primitive_align: primitive_align
}
}
// Types with no meaningful known layout.
ty::TyProjection(_) | ty::TyAnon(..) => {
- let normalized = normalize_associated_type(infcx, ty);
+ let normalized = infcx.normalize_projections(ty);
if ty == normalized {
return Err(LayoutError::Unknown(ty));
}
}
}
+ /// Returns alignment before repr alignment is applied
+ pub fn primitive_align(&self, dl: &TargetDataLayout) -> Align {
+ match *self {
+ Array { primitive_align, .. } | General { primitive_align, .. } => primitive_align,
+ Univariant { ref variant, .. } |
+ StructWrappedNullablePointer { nonnull: ref variant, .. } => {
+ variant.primitive_align
+ },
+
+ _ => self.align(dl)
+ }
+ }
+
+ /// Returns repr alignment if it is greater than the primitive alignment.
+ pub fn over_align(&self, dl: &TargetDataLayout) -> Option<u32> {
+ let align = self.align(dl);
+ let primitive_align = self.primitive_align(dl);
+ if align.abi() > primitive_align.abi() {
+ Some(align.abi() as u32)
+ } else {
+ None
+ }
+ }
+
pub fn field_offset<C: HasDataLayout>(&self,
cx: C,
i: usize,
}
ty::TyProjection(_) | ty::TyAnon(..) => {
- let normalized = normalize_associated_type(infcx, ty);
+ let normalized = infcx.normalize_projections(ty);
if ty == normalized {
Err(err)
} else {
type TyLayout;
fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout;
+ fn normalize_projections(self, ty: Ty<'tcx>) -> Ty<'tcx>;
}
impl<'a, 'gcx, 'tcx> LayoutTyper<'gcx> for &'a InferCtxt<'a, 'gcx, 'tcx> {
type TyLayout = Result<TyLayout<'gcx>, LayoutError<'gcx>>;
fn layout_of(self, ty: Ty<'gcx>) -> Self::TyLayout {
- let ty = normalize_associated_type(self, ty);
+ let ty = self.normalize_projections(ty);
Ok(TyLayout {
ty: ty,
variant_index: None
})
}
+
+ fn normalize_projections(self, ty: Ty<'gcx>) -> Ty<'gcx> {
+ if !ty.has_projection_types() {
+ return ty;
+ }
+
+ let mut selcx = traits::SelectionContext::new(self);
+ let cause = traits::ObligationCause::dummy();
+ let traits::Normalized { value: result, obligations } =
+ traits::normalize(&mut selcx, cause, &ty);
+
+ let mut fulfill_cx = traits::FulfillmentContext::new();
+
+ for obligation in obligations {
+ fulfill_cx.register_predicate_obligation(self, obligation);
+ }
+
+ self.drain_fulfillment_cx_or_panic(DUMMY_SP, &mut fulfill_cx, &result)
+ }
}
impl<'a, 'tcx> TyLayout<'tcx> {
}
pub fn field<C: LayoutTyper<'tcx>>(&self, cx: C, i: usize) -> C::TyLayout {
- cx.layout_of(self.field_type(cx, i))
+ cx.layout_of(cx.normalize_projections(self.field_type(cx, i)))
}
}
use dep_graph::{DepGraph, DepNode, DepTrackingMap, DepTrackingMapConfig};
use hir::def_id::{CrateNum, DefId, LOCAL_CRATE};
+use hir;
use middle::const_val;
use middle::privacy::AccessLevels;
use mir;
pub adt_destructor: AdtDestructor(DefId) -> Option<ty::Destructor>,
pub adt_sized_constraint: SizedConstraint(DefId) -> Ty<'tcx>,
+ /// True if this is a foreign item (i.e., linked via `extern { ... }`).
+ pub is_foreign_item: IsForeignItem(DefId) -> bool,
+
/// Maps from def-id of a type or region parameter to its
/// (inferred) variance.
pub variances: ItemSignature(DefId) -> Rc<Vec<ty::Variance>>,
pub associated_item: AssociatedItems(DefId) -> ty::AssociatedItem,
pub impl_trait_ref: ItemSignature(DefId) -> Option<ty::TraitRef<'tcx>>,
+ pub impl_polarity: ItemSignature(DefId) -> hir::ImplPolarity,
/// Maps a DefId of a type to a list of its inherent impls.
/// Contains implementations of methods that are inherent to a type.
/// Performs the privacy check and computes "access levels".
pub privacy_access_levels: PrivacyAccessLevels(CrateNum) -> Rc<AccessLevels>,
- pub reachable_set: reachability_dep_node(CrateNum) -> NodeSet,
+ pub reachable_set: reachability_dep_node(CrateNum) -> Rc<NodeSet>,
pub mir_shims: mir_shim(ty::InstanceDef<'tcx>) -> &'tcx RefCell<mir::Mir<'tcx>>
}
use util::nodemap::{NodeSet, DefIdMap, FxHashMap};
use serialize::{self, Encodable, Encoder};
-use std::borrow::Cow;
use std::cell::{Cell, RefCell, Ref};
use std::collections::BTreeMap;
+use std::cmp;
use std::hash::{Hash, Hasher};
use std::ops::Deref;
use std::rc::Rc;
pub use self::sty::Region::*;
pub use self::sty::TypeVariants::*;
-pub use self::contents::TypeContents;
pub use self::context::{TyCtxt, GlobalArenas, tls};
pub use self::context::{Lift, TypeckTables};
pub mod wf;
pub mod util;
-mod contents;
mod context;
mod flags;
mod instance;
#[derive(Clone)]
pub struct CrateAnalysis {
pub access_levels: Rc<AccessLevels>,
- pub reachable: NodeSet,
+ pub reachable: Rc<NodeSet>,
pub name: String,
pub glob_map: Option<hir::GlobMap>,
}
const IS_SIZED = 1 << 17,
const MOVENESS_CACHED = 1 << 18,
const MOVES_BY_DEFAULT = 1 << 19,
+ const FREEZENESS_CACHED = 1 << 20,
+ const IS_FREEZE = 1 << 21,
+ const NEEDS_DROP_CACHED = 1 << 22,
+ const NEEDS_DROP = 1 << 23,
}
}
/// A cache for `type_is_sized`
pub is_sized_cache: RefCell<FxHashMap<Ty<'tcx>, bool>>,
+
+ /// A cache for `type_is_freeze`
+ pub is_freeze_cache: RefCell<FxHashMap<Ty<'tcx>, bool>>,
}
impl<'a, 'tcx> ParameterEnvironment<'tcx> {
free_id_outlive: self.free_id_outlive,
is_copy_cache: RefCell::new(FxHashMap()),
is_sized_cache: RefCell::new(FxHashMap()),
+ is_freeze_cache: RefCell::new(FxHashMap()),
}
}
#[derive(Copy, Clone, Eq, PartialEq, RustcEncodable, RustcDecodable, Default)]
pub struct ReprOptions {
pub int: Option<attr::IntType>,
+ pub align: u16,
pub flags: ReprFlags,
}
impl_stable_hash_for!(struct ReprOptions {
+ align,
int,
flags
});
pub fn new(tcx: TyCtxt, did: DefId) -> ReprOptions {
let mut flags = ReprFlags::empty();
let mut size = None;
-
+ let mut max_align = 0;
for attr in tcx.get_attrs(did).iter() {
for r in attr::find_repr_attrs(tcx.sess.diagnostic(), attr) {
flags.insert(match r {
size = Some(i);
ReprFlags::empty()
},
+ attr::ReprAlign(align) => {
+ max_align = cmp::max(align, max_align);
+ ReprFlags::empty()
+ },
});
}
}
if !tcx.consider_optimizing(|| format!("Reorder fields of {:?}", tcx.item_path_str(did))) {
flags.insert(ReprFlags::IS_LINEAR);
}
- ReprOptions { int: size, flags: flags }
+ ReprOptions { int: size, align: max_align, flags: flags }
}
#[inline]
}
}
+#[derive(Debug, Clone)]
+pub enum Attributes<'gcx> {
+ Owned(Rc<[ast::Attribute]>),
+ Borrowed(&'gcx [ast::Attribute])
+}
+
+impl<'gcx> ::std::ops::Deref for Attributes<'gcx> {
+ type Target = [ast::Attribute];
+
+ fn deref(&self) -> &[ast::Attribute] {
+ match self {
+ &Attributes::Owned(ref data) => &data,
+ &Attributes::Borrowed(data) => data
+ }
+ }
+}
+
impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
pub fn body_tables(self, body: hir::BodyId) -> &'gcx TypeckTables<'gcx> {
self.item_tables(self.hir.body_owner_def_id(body))
}
pub fn trait_impl_polarity(self, id: DefId) -> hir::ImplPolarity {
- if let Some(id) = self.hir.as_local_node_id(id) {
- match self.hir.expect_item(id).node {
- hir::ItemImpl(_, polarity, ..) => polarity,
- ref item => bug!("trait_impl_polarity: {:?} not an impl", item)
- }
- } else {
- self.sess.cstore.impl_polarity(id)
- }
+ queries::impl_polarity::get(self, DUMMY_SP, id)
}
pub fn trait_relevant_for_never(self, did: DefId) -> bool {
Some(self.item_mir(did))
}
- /// If `type_needs_drop` returns true, then `ty` is definitely
- /// non-copy and *might* have a destructor attached; if it returns
- /// false, then `ty` definitely has no destructor (i.e. no drop glue).
- ///
- /// (Note that this implies that if `ty` has a destructor attached,
- /// then `type_needs_drop` will definitely return `true` for `ty`.)
- pub fn type_needs_drop_given_env(self,
- ty: Ty<'gcx>,
- param_env: &ty::ParameterEnvironment<'gcx>) -> bool {
- // Issue #22536: We first query type_moves_by_default. It sees a
- // normalized version of the type, and therefore will definitely
- // know whether the type implements Copy (and thus needs no
- // cleanup/drop/zeroing) ...
- let tcx = self.global_tcx();
- let implements_copy = !ty.moves_by_default(tcx, param_env, DUMMY_SP);
-
- if implements_copy { return false; }
-
- // ... (issue #22536 continued) but as an optimization, still use
- // prior logic of asking if the `needs_drop` bit is set; we need
- // not zero non-Copy types if they have no destructor.
-
- // FIXME(#22815): Note that calling `ty::type_contents` is a
- // conservative heuristic; it may report that `needs_drop` is set
- // when actual type does not actually have a destructor associated
- // with it. But since `ty` absolutely did not have the `Copy`
- // bound attached (see above), it is sound to treat it as having a
- // destructor (e.g. zero its memory on move).
-
- let contents = ty.type_contents(tcx);
- debug!("type_needs_drop ty={:?} contents={:?}", ty, contents);
- contents.needs_drop(tcx)
- }
-
/// Get the attributes of a definition.
- pub fn get_attrs(self, did: DefId) -> Cow<'gcx, [ast::Attribute]> {
+ pub fn get_attrs(self, did: DefId) -> Attributes<'gcx> {
if let Some(id) = self.hir.as_local_node_id(did) {
- Cow::Borrowed(self.hir.attrs(id))
+ Attributes::Borrowed(self.hir.attrs(id))
} else {
- Cow::Owned(self.sess.cstore.item_attrs(did))
+ Attributes::Owned(self.sess.cstore.item_attrs(did))
}
}
/// Construct a parameter environment suitable for static contexts or other contexts where there
/// are no free type/lifetime parameters in scope.
pub fn empty_parameter_environment(self) -> ParameterEnvironment<'tcx> {
-
- // for an empty parameter environment, there ARE no free
- // regions, so it shouldn't matter what we use for the free id
- let free_id_outlive = self.region_maps.node_extent(ast::DUMMY_NODE_ID);
ty::ParameterEnvironment {
free_substs: self.intern_substs(&[]),
caller_bounds: Vec::new(),
- implicit_region_bound: self.mk_region(ty::ReEmpty),
- free_id_outlive: free_id_outlive,
+ implicit_region_bound: self.types.re_empty,
+ // for an empty parameter environment, there ARE no free
+ // regions, so it shouldn't matter what we use for the free id
+ free_id_outlive: ROOT_CODE_EXTENT,
is_copy_cache: RefCell::new(FxHashMap()),
is_sized_cache: RefCell::new(FxHashMap()),
+ is_freeze_cache: RefCell::new(FxHashMap()),
}
}
free_id_outlive: free_id_outlive,
is_copy_cache: RefCell::new(FxHashMap()),
is_sized_cache: RefCell::new(FxHashMap()),
+ is_freeze_cache: RefCell::new(FxHashMap()),
};
let cause = traits::ObligationCause::misc(span, free_id_outlive.node_id(&self.region_maps));
pub struct CrateInherentImpls {
pub inherent_impls: DefIdMap<Rc<Vec<DefId>>>,
}
-
}
fn shift_region_through_binders(&self, region: &'tcx ty::Region) -> &'tcx ty::Region {
+ if self.region_binders_passed == 0 || !region.has_escaping_regions() {
+ return region;
+ }
self.tcx().mk_region(ty::fold::shift_region(*region, self.region_binders_passed))
}
}
use ty::layout::{Layout, LayoutError};
use ty::TypeVariants::*;
use util::common::ErrorReported;
-use util::nodemap::FxHashMap;
+use util::nodemap::{FxHashMap, FxHashSet};
use middle::lang_items;
use rustc_const_math::{ConstInt, ConstIsize, ConstUsize};
/// a suitable "empty substs" for it.
pub fn empty_substs_for_def_id(self, item_def_id: DefId) -> &'tcx ty::Substs<'tcx> {
ty::Substs::for_item(self, item_def_id,
- |_, _| self.mk_region(ty::ReErased),
+ |_, _| self.types.re_erased,
|_, _| {
bug!("empty_substs_for_def_id: {:?} has type parameters", item_def_id)
})
result
}
+ /// Returns `true` if and only if there are no `UnsafeCell`s
+ /// nested within the type (ignoring `PhantomData` or pointers).
+ #[inline]
+ pub fn is_freeze(&'tcx self, tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ param_env: &ParameterEnvironment<'tcx>,
+ span: Span) -> bool
+ {
+ if self.flags.get().intersects(TypeFlags::FREEZENESS_CACHED) {
+ return self.flags.get().intersects(TypeFlags::IS_FREEZE);
+ }
+
+ self.is_freeze_uncached(tcx, param_env, span)
+ }
+
+ fn is_freeze_uncached(&'tcx self, tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ param_env: &ParameterEnvironment<'tcx>,
+ span: Span) -> bool {
+ assert!(!self.needs_infer());
+
+ // Fast-path for primitive types
+ let result = match self.sty {
+ TyBool | TyChar | TyInt(..) | TyUint(..) | TyFloat(..) |
+ TyRawPtr(..) | TyRef(..) | TyFnDef(..) | TyFnPtr(_) |
+ TyStr | TyNever => Some(true),
+
+ TyArray(..) | TySlice(_) |
+ TyTuple(..) | TyClosure(..) | TyAdt(..) |
+ TyDynamic(..) | TyProjection(..) | TyParam(..) |
+ TyInfer(..) | TyAnon(..) | TyError => None
+ }.unwrap_or_else(|| {
+ self.impls_bound(tcx, param_env, tcx.require_lang_item(lang_items::FreezeTraitLangItem),
+ ¶m_env.is_freeze_cache, span) });
+
+ if !self.has_param_types() && !self.has_self_ty() {
+ self.flags.set(self.flags.get() | if result {
+ TypeFlags::FREEZENESS_CACHED | TypeFlags::IS_FREEZE
+ } else {
+ TypeFlags::FREEZENESS_CACHED
+ });
+ }
+
+ result
+ }
+
+ /// If `ty.needs_drop(...)` returns `true`, then `ty` is definitely
+ /// non-copy and *might* have a destructor attached; if it returns
+ /// `false`, then `ty` definitely has no destructor (i.e. no drop glue).
+ ///
+ /// (Note that this implies that if `ty` has a destructor attached,
+ /// then `needs_drop` will definitely return `true` for `ty`.)
+ #[inline]
+ pub fn needs_drop(&'tcx self, tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ param_env: &ty::ParameterEnvironment<'tcx>) -> bool {
+ if self.flags.get().intersects(TypeFlags::NEEDS_DROP_CACHED) {
+ return self.flags.get().intersects(TypeFlags::NEEDS_DROP);
+ }
+
+ self.needs_drop_uncached(tcx, param_env, &mut FxHashSet())
+ }
+
+ fn needs_drop_inner(&'tcx self,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ param_env: &ty::ParameterEnvironment<'tcx>,
+ stack: &mut FxHashSet<Ty<'tcx>>)
+ -> bool {
+ if self.flags.get().intersects(TypeFlags::NEEDS_DROP_CACHED) {
+ return self.flags.get().intersects(TypeFlags::NEEDS_DROP);
+ }
+
+ // This should be reported as an error by `check_representable`.
+ //
+ // Consider the type as not needing drop in the meanwhile to avoid
+ // further errors.
+ if let Some(_) = stack.replace(self) {
+ return false;
+ }
+
+ let needs_drop = self.needs_drop_uncached(tcx, param_env, stack);
+
+ // "Pop" the cycle detection "stack".
+ stack.remove(self);
+
+ needs_drop
+ }
+
+ fn needs_drop_uncached(&'tcx self,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ param_env: &ty::ParameterEnvironment<'tcx>,
+ stack: &mut FxHashSet<Ty<'tcx>>)
+ -> bool {
+ assert!(!self.needs_infer());
+
+ let result = match self.sty {
+ // Fast-path for primitive types
+ ty::TyInfer(ty::FreshIntTy(_)) | ty::TyInfer(ty::FreshFloatTy(_)) |
+ ty::TyBool | ty::TyInt(_) | ty::TyUint(_) | ty::TyFloat(_) | ty::TyNever |
+ ty::TyFnDef(..) | ty::TyFnPtr(_) | ty::TyChar |
+ ty::TyRawPtr(_) | ty::TyRef(..) | ty::TyStr => false,
+
+ // Issue #22536: We first query type_moves_by_default. It sees a
+ // normalized version of the type, and therefore will definitely
+ // know whether the type implements Copy (and thus needs no
+ // cleanup/drop/zeroing) ...
+ _ if !self.moves_by_default(tcx, param_env, DUMMY_SP) => false,
+
+ // ... (issue #22536 continued) but as an optimization, still use
+ // prior logic of asking for the structural "may drop".
+
+ // FIXME(#22815): Note that this is a conservative heuristic;
+ // it may report that the type "may drop" when actual type does
+ // not actually have a destructor associated with it. But since
+ // the type absolutely did not have the `Copy` bound attached
+ // (see above), it is sound to treat it as having a destructor.
+
+ // User destructors are the only way to have concrete drop types.
+ ty::TyAdt(def, _) if def.has_dtor(tcx) => true,
+
+ // Can refer to a type which may drop.
+ // FIXME(eddyb) check this against a ParameterEnvironment.
+ ty::TyDynamic(..) | ty::TyProjection(..) | ty::TyParam(_) |
+ ty::TyAnon(..) | ty::TyInfer(_) | ty::TyError => true,
+
+ // Structural recursion.
+ ty::TyArray(ty, _) | ty::TySlice(ty) => {
+ ty.needs_drop_inner(tcx, param_env, stack)
+ }
+
+ ty::TyClosure(def_id, ref substs) => {
+ substs.upvar_tys(def_id, tcx)
+ .any(|ty| ty.needs_drop_inner(tcx, param_env, stack))
+ }
+
+ ty::TyTuple(ref tys, _) => {
+ tys.iter().any(|ty| ty.needs_drop_inner(tcx, param_env, stack))
+ }
+
+ // unions don't have destructors regardless of the child types
+ ty::TyAdt(def, _) if def.is_union() => false,
+
+ ty::TyAdt(def, substs) => {
+ def.variants.iter().any(|v| {
+ v.fields.iter().any(|f| {
+ f.ty(tcx, substs).needs_drop_inner(tcx, param_env, stack)
+ })
+ })
+ }
+ };
+
+ if !self.has_param_types() && !self.has_self_ty() {
+ self.flags.set(self.flags.get() | if result {
+ TypeFlags::NEEDS_DROP_CACHED | TypeFlags::NEEDS_DROP
+ } else {
+ TypeFlags::NEEDS_DROP_CACHED
+ });
+ }
+
+ result
+ }
+
#[inline]
pub fn layout<'lcx>(&'tcx self, infcx: &InferCtxt<'a, 'tcx, 'lcx>)
-> Result<&'tcx Layout, LayoutError<'tcx>> {
("sparc64-unknown-linux-gnu", sparc64_unknown_linux_gnu),
("i686-linux-android", i686_linux_android),
+ ("x86_64-linux-android", x86_64_linux_android),
("arm-linux-androideabi", arm_linux_androideabi),
("armv7-linux-androideabi", armv7_linux_androideabi),
("aarch64-linux-android", aarch64_linux_android),
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use LinkerFlavor;
+use target::{Target, TargetResult};
+
+pub fn target() -> TargetResult {
+ let mut base = super::android_base::opts();
+ base.cpu = "x86-64".to_string();
+ // https://developer.android.com/ndk/guides/abis.html#86-64
+ base.features = "+mmx,+sse,+sse2,+sse3,+ssse3,+sse4.1,+sse4.2,+popcnt".to_string();
+ base.max_atomic_width = Some(64);
+ base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m64".to_string());
+
+ Ok(Target {
+ llvm_target: "x86_64-linux-android".to_string(),
+ target_endian: "little".to_string(),
+ target_pointer_width: "64".to_string(),
+ data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(),
+ arch: "x86_64".to_string(),
+ target_os: "android".to_string(),
+ target_env: "".to_string(),
+ target_vendor: "unknown".to_string(),
+ linker_flavor: LinkerFlavor::Gcc,
+ options: base,
+ })
+}
}
Categorization::StaticItem |
Categorization::Deref(.., mc::UnsafePtr(..)) => {
- self.bccx.tcx.mk_region(ty::ReStatic)
+ self.bccx.tcx.types.re_static
}
Categorization::Deref(.., mc::BorrowedPtr(_, r)) |
Categorization::Deref(.., mc::Implicit(_, r)) => {
let ty = lvalue.ty(mir, tcx).to_ty(tcx);
debug!("on_all_drop_children_bits({:?}, {:?} : {:?})", path, lvalue, ty);
- if tcx.type_needs_drop_given_env(ty, &ctxt.param_env) {
+ if ty.needs_drop(tcx, &ctxt.param_env) {
each_child(child);
} else {
debug!("on_all_drop_children_bits - skipping")
sess.code_stats.borrow().print_type_sizes();
}
+ if ::std::env::var("SKIP_LLVM").is_ok() { ::std::process::exit(0); }
+
let phase5_result = phase_5_run_llvm_passes(sess, &trans, &outputs);
controller_entry_point!(after_llvm,
defs: resolver.definitions,
analysis: ty::CrateAnalysis {
access_levels: Rc::new(AccessLevels::default()),
- reachable: NodeSet(),
+ reachable: Rc::new(NodeSet()),
name: crate_name.to_string(),
glob_map: if resolver.make_glob_map { Some(resolver.glob_map) } else { None },
},
let index = stability::Index::new(&hir_map);
let mut local_providers = ty::maps::Providers::default();
+ borrowck::provide(&mut local_providers);
mir::provide(&mut local_providers);
+ reachable::provide(&mut local_providers);
rustc_privacy::provide(&mut local_providers);
- borrowck::provide(&mut local_providers);
typeck::provide(&mut local_providers);
ty::provide(&mut local_providers);
reachable::provide(&mut local_providers);
}
pub fn t_rptr_static(&self) -> Ty<'tcx> {
- self.infcx.tcx.mk_imm_ref(self.infcx.tcx.mk_region(ty::ReStatic),
+ self.infcx.tcx.mk_imm_ref(self.infcx.tcx.types.re_static,
self.tcx().types.isize)
}
pub fn t_rptr_empty(&self) -> Ty<'tcx> {
- self.infcx.tcx.mk_imm_ref(self.infcx.tcx.mk_region(ty::ReEmpty),
+ self.infcx.tcx.mk_imm_ref(self.infcx.tcx.types.re_empty,
self.tcx().types.isize)
}
draw_col_separator(buffer, line_offset, width_offset - 2);
+ // Special case when there's only one annotation involved, it is the start of a multiline
+ // span and there's no text at the beginning of the code line. Instead of doing the whole
+ // graph:
+ //
+ // 2 | fn foo() {
+ // | _^
+ // 3 | |
+ // 4 | | }
+ // | |_^ test
+ //
+ // we simplify the output to:
+ //
+ // 2 | / fn foo() {
+ // 3 | |
+ // 4 | | }
+ // | |_^ test
+ if line.annotations.len() == 1 {
+ if let Some(ref ann) = line.annotations.get(0) {
+ if let AnnotationType::MultilineStart(depth) = ann.annotation_type {
+ if source_string[0..ann.start_col].trim() == "" {
+ let style = if ann.is_primary {
+ Style::UnderlinePrimary
+ } else {
+ Style::UnderlineSecondary
+ };
+ buffer.putc(line_offset,
+ width_offset + depth - 1,
+ '/',
+ style);
+ return vec![(depth, style)];
+ }
+ }
+ }
+ }
+
// We want to display like this:
//
// vec.push(vec.pop().unwrap());
for (i, annotation) in annotations.iter().enumerate() {
for (j, next) in annotations.iter().enumerate() {
if overlaps(next, annotation, 0) // This label overlaps with another one and both
- && !annotation.is_line() // take space (they have text and are not
- && !next.is_line() // multiline lines).
- && annotation.has_label()
- && j > i
+ && annotation.has_label() // take space (they have text and are not
+ && j > i // multiline lines).
&& p == 0 // We're currently on the first line, move the label one line down
{
// This annotation needs a new line in the output.
} else {
0
};
- if overlaps(next, annotation, l) // Do not allow two labels to be in the same
+ if (overlaps(next, annotation, l) // Do not allow two labels to be in the same
// line if they overlap including padding, to
// avoid situations like:
//
// | |
// fn_spanx_span
//
- && !annotation.is_line() // Do not add a new line if this annotation
- && !next.is_line() // or the next are vertical line placeholders.
&& annotation.has_label() // Both labels must have some text, otherwise
- && next.has_label() // they are not overlapping.
+ && next.has_label()) // they are not overlapping.
+ // Do not add a new line if this annotation
+ // or the next are vertical line placeholders.
+ || (annotation.takes_space() // If either this or the next annotation is
+ && next.has_label()) // multiline start/end, move it to a new line
+ || (annotation.has_label() // so as not to overlap the orizontal lines.
+ && next.takes_space())
+ || (annotation.takes_space()
+ && next.takes_space())
{
+ // This annotation needs a new line in the output.
p += 1;
break;
}
line_len = p;
}
}
+
if line_len != 0 {
line_len += 1;
}
};
let pos = pos + 1;
- if pos > 1 && annotation.has_label() {
+ if pos > 1 && (annotation.has_label() || annotation.takes_space()) {
for p in line_offset + 1..line_offset + pos + 1 {
buffer.putc(p,
code_offset + annotation.start_col,
// After this we will have:
//
// 2 | fn foo() {
- // | __________ starting here...
+ // | __________
// | |
// | something about `foo`
// 3 |
// 4 | }
- // | _ ...ending here: test
+ // | _ test
for &(pos, annotation) in &annotations_position {
let style = if annotation.is_primary {
Style::LabelPrimary
// After this we will have:
//
// 2 | fn foo() {
- // | ____-_____^ starting here...
+ // | ____-_____^
// | |
// | something about `foo`
// 3 |
// 4 | }
- // | _^ ...ending here: test
+ // | _^ test
for &(_, annotation) in &annotations_position {
let (underline, style) = if annotation.is_primary {
('^', Style::UnderlinePrimary)
start_col: self.start_col,
end_col: self.start_col + 1,
is_primary: self.is_primary,
- label: Some("starting here...".to_owned()),
+ label: None,
annotation_type: AnnotationType::MultilineStart(self.depth)
}
}
start_col: self.end_col - 1,
end_col: self.end_col,
is_primary: self.is_primary,
- label: match self.label {
- Some(ref label) => Some(format!("...ending here: {}", label)),
- None => Some("...ending here".to_owned()),
- },
+ label: self.label.clone(),
annotation_type: AnnotationType::MultilineEnd(self.depth)
}
}
// Each of these corresponds to one part of the following diagram:
//
// x | foo(1 + bar(x,
- // | _________^ starting here... < MultilineStart
- // x | | y), < MultilineLine
- // | |______________^ ...ending here: label < MultilineEnd
+ // | _________^ < MultilineStart
+ // x | | y), < MultilineLine
+ // | |______________^ label < MultilineEnd
// x | z);
/// Annotation marking the first character of a fully shown multiline span
MultilineStart(usize),
false
}
}
+
+ pub fn takes_space(&self) -> bool {
+ // Multiline annotations always have to keep vertical space.
+ match self.annotation_type {
+ AnnotationType::MultilineStart(_) |
+ AnnotationType::MultilineEnd(_) => true,
+ _ => false,
+ }
+ }
}
#[derive(Debug)]
let param_env = &ty::ParameterEnvironment::for_item(ctx.tcx, item.id);
for field in vdata.fields() {
let field_ty = ctx.tcx.item_type(ctx.tcx.hir.local_def_id(field.id));
- if ctx.tcx.type_needs_drop_given_env(field_ty, param_env) {
+ if field_ty.needs_drop(ctx.tcx, param_env) {
ctx.span_lint(UNIONS_WITH_DROP_FIELDS,
field.span,
"union contains a field with possibly non-trivial drop code, \
cnum_map: RefCell::new(cnum_map),
cnum: cnum,
codemap_import_info: RefCell::new(vec![]),
+ attribute_cache: RefCell::new([Vec::new(), Vec::new()]),
dep_kind: Cell::new(dep_kind),
source: cstore::CrateSource {
dylib: dylib,
pub cnum_map: RefCell<CrateNumMap>,
pub cnum: CrateNum,
pub codemap_import_info: RefCell<Vec<ImportedFileMap>>,
+ pub attribute_cache: RefCell<[Vec<Option<Rc<[ast::Attribute]>>>; 2]>,
pub root: schema::CrateRoot,
}
pub fn is_staged_api(&self) -> bool {
- for attr in self.get_item_attrs(CRATE_DEF_INDEX) {
+ for attr in self.get_item_attrs(CRATE_DEF_INDEX).iter() {
if attr.path == "stable" || attr.path == "unstable" {
return true;
}
}
associated_item => { cdata.get_associated_item(def_id.index) }
impl_trait_ref => { cdata.get_impl_trait(def_id.index, tcx) }
+ impl_polarity => { cdata.get_impl_polarity(def_id.index) }
coerce_unsized_info => {
cdata.get_coerce_unsized_info(def_id.index).unwrap_or_else(|| {
bug!("coerce_unsized_info: `{:?}` is missing its info", def_id);
closure_kind => { cdata.closure_kind(def_id.index) }
closure_type => { cdata.closure_ty(def_id.index, tcx) }
inherent_impls => { Rc::new(cdata.get_inherent_implementations_for_type(def_id.index)) }
+ is_foreign_item => { cdata.is_foreign_item(def_id.index) }
}
impl CrateStore for cstore::CStore {
self.get_crate_data(def.krate).get_generics(def.index)
}
- fn item_attrs(&self, def_id: DefId) -> Vec<ast::Attribute>
+ fn item_attrs(&self, def_id: DefId) -> Rc<[ast::Attribute]>
{
self.dep_graph.read(DepNode::MetaData(def_id));
self.get_crate_data(def_id.krate).get_item_attrs(def_id.index)
result
}
- fn impl_polarity(&self, def: DefId) -> hir::ImplPolarity
- {
- self.dep_graph.read(DepNode::MetaData(def));
- self.get_crate_data(def.krate).get_impl_polarity(def.index)
- }
-
fn impl_parent(&self, impl_def: DefId) -> Option<DefId> {
self.dep_graph.read(DepNode::MetaData(impl_def));
self.get_crate_data(impl_def.krate).get_parent_impl(impl_def.index)
// Mark the attrs as used
let attrs = data.get_item_attrs(id.index);
- for attr in &attrs {
+ for attr in attrs.iter() {
attr::mark_used(attr);
}
ident: ast::Ident::with_empty_ctxt(name),
id: ast::DUMMY_NODE_ID,
span: local_span,
- attrs: attrs,
+ attrs: attrs.iter().cloned().collect(),
node: ast::ItemKind::MacroDef(body.into()),
vis: ast::Visibility::Inherited,
})
use std::collections::BTreeMap;
use std::io;
use std::mem;
+use std::rc::Rc;
use std::str;
use std::u32;
}
}
- pub fn get_item_attrs(&self, node_id: DefIndex) -> Vec<ast::Attribute> {
+ pub fn get_item_attrs(&self, node_id: DefIndex) -> Rc<[ast::Attribute]> {
+ let (node_as, node_index) =
+ (node_id.address_space().index(), node_id.as_array_index());
if self.is_proc_macro(node_id) {
- return Vec::new();
+ return Rc::new([]);
}
+
+ if let Some(&Some(ref val)) =
+ self.attribute_cache.borrow()[node_as].get(node_index) {
+ return val.clone();
+ }
+
// The attributes for a tuple struct are attached to the definition, not the ctor;
// we assume that someone passing in a tuple struct ctor is actually wanting to
// look at the definition
if def_key.disambiguated_data.data == DefPathData::StructCtor {
item = self.entry(def_key.parent.unwrap());
}
- self.get_attributes(&item)
+ let result = Rc::__from_array(self.get_attributes(&item).into_boxed_slice());
+ let vec_ = &mut self.attribute_cache.borrow_mut()[node_as];
+ if vec_.len() < node_index + 1 {
+ vec_.resize(node_index + 1, None);
+ }
+ vec_[node_index] = Some(result.clone());
+ result
}
pub fn get_struct_field_names(&self, id: DefIndex) -> Vec<ast::Name> {
assert!(ty.is_slice());
let array_ty = tcx.mk_array(tcx.types.u8, bytes.len());
- let array_ref = tcx.mk_imm_ref(tcx.mk_region(ty::ReStatic), array_ty);
+ let array_ref = tcx.mk_imm_ref(tcx.types.re_static, array_ty);
let array = self.literal_operand(test.span, array_ref, Literal::Value {
value: value.clone()
});
type with inference types/regions",
ty);
});
- self.tcx.type_needs_drop_given_env(ty, &self.infcx.parameter_environment)
+ ty.needs_drop(self.tcx.global_tcx(), &self.infcx.parameter_environment)
}
pub fn tcx(&self) -> TyCtxt<'a, 'gcx, 'tcx> {
Adjustment::Deref => Operand::Consume(rcvr_l.deref()),
Adjustment::RefMut => {
// let rcvr = &mut rcvr;
- let re_erased = tcx.mk_region(ty::ReErased);
let ref_rcvr = local_decls.push(temp_decl(
Mutability::Not,
- tcx.mk_ref(re_erased, ty::TypeAndMut {
+ tcx.mk_ref(tcx.types.re_erased, ty::TypeAndMut {
ty: sig.inputs()[0],
mutbl: hir::Mutability::MutMutable
}),
source_info: source_info,
kind: StatementKind::Assign(
Lvalue::Local(ref_rcvr),
- Rvalue::Ref(re_erased, BorrowKind::Mut, rcvr_l)
+ Rvalue::Ref(tcx.types.re_erased, BorrowKind::Mut, rcvr_l)
)
});
Operand::Consume(Lvalue::Local(ref_rcvr))
//! care erasing regions all over the place.
use rustc::ty::subst::Substs;
-use rustc::ty::{Ty, TyCtxt, ReErased, ClosureSubsts};
+use rustc::ty::{Ty, TyCtxt, ClosureSubsts};
use rustc::mir::*;
use rustc::mir::visit::MutVisitor;
use rustc::mir::transform::{MirPass, MirSource, Pass};
fn visit_rvalue(&mut self, rvalue: &mut Rvalue<'tcx>, location: Location) {
match *rvalue {
Rvalue::Ref(ref mut r, _, _) => {
- *r = self.tcx.mk_region(ReErased);
+ *r = self.tcx.types.re_erased;
}
Rvalue::Use(..) |
Rvalue::Repeat(..) |
// a regular goto.
let ty = location.ty(&callee_mir, tcx).subst(tcx, callsite.substs);
let ty = ty.to_ty(tcx);
- if tcx.type_needs_drop_given_env(ty, ¶m_env) {
+ if ty.needs_drop(tcx, ¶m_env) {
cost += CALL_PENALTY;
if let Some(unwind) = unwind {
work_list.push(unwind);
let dest = if dest_needs_borrow(&destination.0) {
debug!("Creating temp for return destination");
let dest = Rvalue::Ref(
- self.tcx.mk_region(ty::ReErased),
+ self.tcx.types.re_erased,
BorrowKind::Mut,
destination.0);
fn cast_box_free_arg(&self, arg: Lvalue<'tcx>, ptr_ty: Ty<'tcx>,
callsite: &CallSite<'tcx>, caller_mir: &mut Mir<'tcx>) -> Operand<'tcx> {
let arg = Rvalue::Ref(
- self.tcx.mk_region(ty::ReErased),
+ self.tcx.types.re_erased,
BorrowKind::Mut,
arg.deref());
fn restrict(&mut self, ty: Ty<'tcx>,
tcx: TyCtxt<'a, 'tcx, 'tcx>,
param_env: &ty::ParameterEnvironment<'tcx>) {
- if !ty.type_contents(tcx).interior_unsafe() {
+ if ty.is_freeze(tcx, param_env, DUMMY_SP) {
*self = *self - Qualif::MUTABLE_INTERIOR;
}
- if !tcx.type_needs_drop_given_env(ty, param_env) {
+ if !ty.needs_drop(tcx, param_env) {
*self = *self - Qualif::NEEDS_DROP;
}
}
self.collapse_goto_chain(successor, &mut changed);
}
+ changed |= self.simplify_unwind(&mut terminator);
+
let mut new_stmts = vec![];
let mut inner_changed = true;
while inner_changed {
true
}
+ // turn an unwind branch to a resume block into a None
+ fn simplify_unwind(&mut self, terminator: &mut Terminator<'tcx>) -> bool {
+ let unwind = match terminator.kind {
+ TerminatorKind::Drop { ref mut unwind, .. } |
+ TerminatorKind::DropAndReplace { ref mut unwind, .. } |
+ TerminatorKind::Call { cleanup: ref mut unwind, .. } |
+ TerminatorKind::Assert { cleanup: ref mut unwind, .. } =>
+ unwind,
+ _ => return false
+ };
+
+ if let &mut Some(unwind_block) = unwind {
+ let is_resume_block = match self.basic_blocks[unwind_block] {
+ BasicBlockData {
+ ref statements,
+ terminator: Some(Terminator {
+ kind: TerminatorKind::Resume, ..
+ }), ..
+ } if statements.is_empty() => true,
+ _ => false
+ };
+ if is_resume_block {
+ debug!("simplifying unwind to {:?} from {:?}",
+ unwind_block, terminator.source_info);
+ *unwind = None;
+ }
+ return is_resume_block;
+ }
+
+ false
+ }
+
fn strip_nops(&mut self) {
for blk in self.basic_blocks.iter_mut() {
blk.statements.retain(|stmt| if let StatementKind::Nop = stmt.kind {
let mut fields = fields;
fields.retain(|&(ref lvalue, _)| {
- self.tcx().type_needs_drop_given_env(
- self.lvalue_ty(lvalue), self.elaborator.param_env())
+ self.lvalue_ty(lvalue).needs_drop(self.tcx(), self.elaborator.param_env())
});
debug!("drop_ladder - fields needing drop: {:?}", fields);
let ty = self.lvalue_ty(self.lvalue);
let substs = tcx.mk_substs(iter::once(Kind::from(ty)));
- let re_erased = tcx.mk_region(ty::ReErased);
- let ref_ty = tcx.mk_ref(re_erased, ty::TypeAndMut {
+ let ref_ty = tcx.mk_ref(tcx.types.re_erased, ty::TypeAndMut {
ty: ty,
mutbl: hir::Mutability::MutMutable
});
source_info: self.source_info,
kind: StatementKind::Assign(
Lvalue::Local(ref_lvalue),
- Rvalue::Ref(re_erased, BorrowKind::Mut, self.lvalue.clone())
+ Rvalue::Ref(tcx.types.re_erased, BorrowKind::Mut, self.lvalue.clone())
)
}],
terminator: Some(Terminator {
use rustc::hir::{self, PatKind, RangeEnd};
use syntax::ast;
-use syntax_pos::Span;
+use syntax_pos::{Span, DUMMY_SP};
use rustc::hir::intravisit::{self, Visitor, NestedVisitorMap};
use std::collections::hash_map::Entry;
// Adds the worst effect out of all the values of one type.
fn add_type(&mut self, ty: Ty<'gcx>) {
- if ty.type_contents(self.tcx).interior_unsafe() {
+ if !ty.is_freeze(self.tcx, &self.param_env, DUMMY_SP) {
self.promotable = false;
}
- if self.tcx.type_needs_drop_given_env(ty, &self.param_env) {
+ if ty.needs_drop(self.tcx, &self.param_env) {
self.promotable = false;
}
}
// bitcasting to the struct type yields invalid cast errors.
// We instead thus allocate some scratch space...
- let llscratch = bcx.alloca(ty, "abi_cast");
+ let llscratch = bcx.alloca(ty, "abi_cast", None);
base::Lifetime::Start.call(bcx, llscratch);
// ...where we first store the value...
// `&T` where `T` contains no `UnsafeCell<U>` is immutable, and can be marked as
// both `readonly` and `noalias`, as LLVM's definition of `noalias` is based solely
// on memory dependencies rather than pointer equality
- let interior_unsafe = mt.ty.type_contents(ccx.tcx()).interior_unsafe();
+ let is_freeze = ccx.shared().type_is_freeze(mt.ty);
- if mt.mutbl != hir::MutMutable && !interior_unsafe {
+ if mt.mutbl != hir::MutMutable && is_freeze {
arg.attrs.set(ArgAttribute::NoAlias);
}
- if mt.mutbl == hir::MutImmutable && !interior_unsafe {
+ if mt.mutbl == hir::MutImmutable && is_freeze {
arg.attrs.set(ArgAttribute::ReadOnly);
}
/// and fill in the actual contents in a second pass to prevent
/// unbounded recursion; see also the comments in `trans::type_of`.
pub fn type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type {
- generic_type_of(cx, t, None, false, false)
+ generic_type_of(cx, t, None)
}
pub fn incomplete_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
t: Ty<'tcx>, name: &str) -> Type {
- generic_type_of(cx, t, Some(name), false, false)
+ generic_type_of(cx, t, Some(name))
}
pub fn finish_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
_ => unreachable!()
};
let fields = compute_fields(cx, t, nonnull_variant_index as usize, true);
- llty.set_struct_body(&struct_llfields(cx, &fields, nonnull_variant, false, false),
+ llty.set_struct_body(&struct_llfields(cx, &fields, nonnull_variant),
packed)
},
_ => bug!("This function cannot handle {} with layout {:#?}", t, l)
fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
t: Ty<'tcx>,
- name: Option<&str>,
- sizing: bool,
- dst: bool) -> Type {
+ name: Option<&str>) -> Type {
let l = cx.layout_of(t);
- debug!("adt::generic_type_of t: {:?} name: {:?} sizing: {} dst: {}",
- t, name, sizing, dst);
+ debug!("adt::generic_type_of t: {:?} name: {:?}", t, name);
match *l {
layout::CEnum { discr, .. } => Type::from_integer(cx, discr),
layout::RawNullablePointer { nndiscr, .. } => {
let fields = compute_fields(cx, t, nndiscr as usize, false);
match name {
None => {
- Type::struct_(cx, &struct_llfields(cx, &fields, nonnull, sizing, dst),
+ Type::struct_(cx, &struct_llfields(cx, &fields, nonnull),
nonnull.packed)
}
Some(name) => {
- assert_eq!(sizing, false);
Type::named_struct(cx, name)
}
}
let fields = compute_fields(cx, t, 0, true);
match name {
None => {
- let fields = struct_llfields(cx, &fields, &variant, sizing, dst);
+ let fields = struct_llfields(cx, &fields, &variant);
Type::struct_(cx, &fields, variant.packed)
}
Some(name) => {
// Hypothesis: named_struct's can never need a
// drop flag. (... needs validation.)
- assert_eq!(sizing, false);
Type::named_struct(cx, name)
}
}
}
}
}
- layout::General { discr, size, align, .. } => {
+ layout::General { discr, size, align, primitive_align, .. } => {
// We need a representation that has:
// * The alignment of the most-aligned field
// * The size of the largest variant (rounded up to that alignment)
// of the size.
let size = size.bytes();
let align = align.abi();
+ let primitive_align = primitive_align.abi();
assert!(align <= std::u32::MAX as u64);
let discr_ty = Type::from_integer(cx, discr);
let discr_size = discr.size().bytes();
let padded_discr_size = roundup(discr_size, align as u32);
let variant_part_size = size-padded_discr_size;
- let variant_fill = union_fill(cx, variant_part_size, align);
+ let variant_fill = union_fill(cx, variant_part_size, primitive_align);
- assert_eq!(machine::llalign_of_min(cx, variant_fill), align as u32);
+ assert_eq!(machine::llalign_of_min(cx, variant_fill), primitive_align as u32);
assert_eq!(padded_discr_size % discr_size, 0); // Ensure discr_ty can fill pad evenly
let fields: Vec<Type> =
[discr_ty,
}
-fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, fields: &Vec<Ty<'tcx>>,
- variant: &layout::Struct,
- sizing: bool, _dst: bool) -> Vec<Type> {
- let fields = variant.field_index_by_increasing_offset().map(|i| fields[i as usize]);
- if sizing {
- bug!()
+// Double index to account for padding (FieldPath already uses `Struct::memory_index`)
+fn struct_llfields_path(discrfield: &layout::FieldPath) -> Vec<usize> {
+ discrfield.iter().map(|&i| (i as usize) << 1).collect::<Vec<_>>()
+}
+
+
+// Lookup `Struct::memory_index` and double it to account for padding
+pub fn struct_llfields_index(variant: &layout::Struct, index: usize) -> usize {
+ (variant.memory_index[index] as usize) << 1
+}
+
+
+pub fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, field_tys: &Vec<Ty<'tcx>>,
+ variant: &layout::Struct) -> Vec<Type> {
+ debug!("struct_llfields: variant: {:?}", variant);
+ let mut first_field = true;
+ let mut min_offset = 0;
+ let mut result: Vec<Type> = Vec::with_capacity(field_tys.len() * 2);
+ let field_iter = variant.field_index_by_increasing_offset().map(|i| {
+ (i, field_tys[i as usize], variant.offsets[i as usize].bytes()) });
+ for (index, ty, target_offset) in field_iter {
+ if first_field {
+ debug!("struct_llfields: {} ty: {} min_offset: {} target_offset: {}",
+ index, ty, min_offset, target_offset);
+ first_field = false;
+ } else {
+ assert!(target_offset >= min_offset);
+ let padding_bytes = if variant.packed { 0 } else { target_offset - min_offset };
+ result.push(Type::array(&Type::i8(cx), padding_bytes));
+ debug!("struct_llfields: {} ty: {} pad_bytes: {} min_offset: {} target_offset: {}",
+ index, ty, padding_bytes, min_offset, target_offset);
+ }
+ let llty = type_of::in_memory_type_of(cx, ty);
+ result.push(llty);
+ let layout = cx.layout_of(ty);
+ let target_size = layout.size(&cx.tcx().data_layout).bytes();
+ min_offset = target_offset + target_size;
+ }
+ if variant.sized && !field_tys.is_empty() {
+ if variant.stride().bytes() < min_offset {
+ bug!("variant: {:?} stride: {} min_offset: {}", variant, variant.stride().bytes(),
+ min_offset);
+ }
+ let padding_bytes = variant.stride().bytes() - min_offset;
+ debug!("struct_llfields: pad_bytes: {} min_offset: {} min_size: {} stride: {}\n",
+ padding_bytes, min_offset, variant.min_size.bytes(), variant.stride().bytes());
+ result.push(Type::array(&Type::i8(cx), padding_bytes));
+ assert!(result.len() == (field_tys.len() * 2));
} else {
- fields.map(|ty| type_of::in_memory_type_of(cx, ty)).collect()
+ debug!("struct_llfields: min_offset: {} min_size: {} stride: {}\n",
+ min_offset, variant.min_size.bytes(), variant.stride().bytes());
}
+
+ result
}
pub fn is_discr_signed<'tcx>(l: &layout::Layout) -> bool {
scrutinee: ValueRef,
alignment: Alignment,
) -> ValueRef {
- let llptrptr = bcx.gepi(scrutinee,
- &discrfield.iter().map(|f| *f as usize).collect::<Vec<_>>());
+ let path = struct_llfields_path(discrfield);
+ let llptrptr = bcx.gepi(scrutinee, &path);
let llptr = bcx.load(llptrptr, alignment.to_align());
let cmp = if nndiscr == 0 { IntEQ } else { IntNE };
bcx.icmp(cmp, llptr, C_null(val_ty(llptr)))
let align = C_i32(bcx.ccx, nonnull.align.abi() as i32);
base::call_memset(bcx, llptr, fill_byte, size, align, false);
} else {
- let path = discrfield.iter().map(|&i| i as usize).collect::<Vec<_>>();
+ let path = struct_llfields_path(discrfield);
let llptrptr = bcx.gepi(val, &path);
let llptrty = val_ty(llptrptr).element_type();
bcx.store(C_null(llptrty), llptrptr, None);
scx.tcx().hir.local_def_id(node_id)
})
.map(|def_id| {
- let name = symbol_for_def_id(scx, def_id, symbol_map);
+ let name = symbol_for_def_id(scx.tcx(), def_id, symbol_map);
let export_level = export_level(scx, def_id);
debug!("EXPORTED SYMBOL (local): {} ({:?})", name, export_level);
(name, export_level)
.exported_symbols(cnum)
.iter()
.map(|&def_id| {
- let name = symbol_name(Instance::mono(scx.tcx(), def_id), scx);
+ let name = symbol_name(Instance::mono(scx.tcx(), def_id), scx.tcx());
let export_level = if special_runtime_crate {
// We can probably do better here by just ensuring that
// it has hidden visibility rather than public
}
}
-fn symbol_for_def_id<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>,
+fn symbol_for_def_id<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId,
symbol_map: &SymbolMap<'tcx>)
-> String {
// Just try to look things up in the symbol map. If nothing's there, we
// recompute.
- if let Some(node_id) = scx.tcx().hir.as_local_node_id(def_id) {
+ if let Some(node_id) = tcx.hir.as_local_node_id(def_id) {
if let Some(sym) = symbol_map.get(TransItem::Static(node_id)) {
return sym.to_owned();
}
}
- let instance = Instance::mono(scx.tcx(), def_id);
+ let instance = Instance::mono(tcx, def_id);
symbol_map.get(TransItem::Fn(instance))
.map(str::to_owned)
- .unwrap_or_else(|| symbol_name(instance, scx))
+ .unwrap_or_else(|| symbol_name(instance, tcx))
}
//! virtually impossible. Thus, symbol hash generation exclusively relies on
//! DefPaths which are much more robust in the face of changes to the code base.
-use common::SharedCrateContext;
use monomorphize::Instance;
use rustc::middle::weak_lang_items;
use rustc::hir::def_id::DefId;
use rustc::hir::map as hir_map;
-use rustc::ty::{self, Ty, TypeFoldable};
+use rustc::ty::{self, Ty, TyCtxt, TypeFoldable};
use rustc::ty::fold::TypeVisitor;
use rustc::ty::item_path::{self, ItemPathBuffer, RootMode};
use rustc::ty::subst::Substs;
use rustc::util::common::record_time;
use syntax::attr;
-use syntax::symbol::{Symbol, InternedString};
-fn get_symbol_hash<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>,
+use std::fmt::Write;
+
+fn get_symbol_hash<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
// the DefId of the item this name is for
def_id: Option<DefId>,
-> String {
debug!("get_symbol_hash(def_id={:?}, parameters={:?})", def_id, substs);
- let tcx = scx.tcx();
-
let mut hasher = ty::util::TypeIdHasher::<u64>::new(tcx);
record_time(&tcx.sess.perf_stats.symbol_hash_time, || {
// in case the same instances is emitted in two crates of the same
// project.
if substs.types().next().is_some() {
- hasher.hash(scx.tcx().crate_name.as_str());
- hasher.hash(scx.sess().local_crate_disambiguator().as_str());
+ hasher.hash(tcx.crate_name.as_str());
+ hasher.hash(tcx.sess.local_crate_disambiguator().as_str());
}
}
});
}
pub fn symbol_name<'a, 'tcx>(instance: Instance<'tcx>,
- scx: &SharedCrateContext<'a, 'tcx>) -> String {
+ tcx: TyCtxt<'a, 'tcx, 'tcx>) -> String {
let def_id = instance.def_id();
let substs = instance.substs;
debug!("symbol_name(def_id={:?}, substs={:?})",
def_id, substs);
- let node_id = scx.tcx().hir.as_local_node_id(def_id);
+ let node_id = tcx.hir.as_local_node_id(def_id);
if let Some(id) = node_id {
- if scx.sess().plugin_registrar_fn.get() == Some(id) {
+ if tcx.sess.plugin_registrar_fn.get() == Some(id) {
let idx = def_id.index;
- let disambiguator = scx.sess().local_crate_disambiguator();
- return scx.sess().generate_plugin_registrar_symbol(disambiguator, idx);
+ let disambiguator = tcx.sess.local_crate_disambiguator();
+ return tcx.sess.generate_plugin_registrar_symbol(disambiguator, idx);
}
- if scx.sess().derive_registrar_fn.get() == Some(id) {
+ if tcx.sess.derive_registrar_fn.get() == Some(id) {
let idx = def_id.index;
- let disambiguator = scx.sess().local_crate_disambiguator();
- return scx.sess().generate_derive_registrar_symbol(disambiguator, idx);
+ let disambiguator = tcx.sess.local_crate_disambiguator();
+ return tcx.sess.generate_derive_registrar_symbol(disambiguator, idx);
}
}
// FIXME(eddyb) Precompute a custom symbol name based on attributes.
- let attrs = scx.tcx().get_attrs(def_id);
+ let attrs = tcx.get_attrs(def_id);
let is_foreign = if let Some(id) = node_id {
- match scx.tcx().hir.get(id) {
+ match tcx.hir.get(id) {
hir_map::NodeForeignItem(_) => true,
_ => false
}
} else {
- scx.sess().cstore.is_foreign_item(def_id)
+ tcx.sess.cstore.is_foreign_item(def_id)
};
if let Some(name) = weak_lang_items::link_name(&attrs) {
return name.to_string();
}
// Don't mangle foreign items.
- return scx.tcx().item_name(def_id).as_str().to_string();
+ return tcx.item_name(def_id).as_str().to_string();
}
- if let Some(name) = attr::find_export_name_attr(scx.sess().diagnostic(), &attrs) {
+ if let Some(name) = attr::find_export_name_attr(tcx.sess.diagnostic(), &attrs) {
// Use provided name
return name.to_string();
}
if attr::contains_name(&attrs, "no_mangle") {
// Don't mangle
- return scx.tcx().item_name(def_id).as_str().to_string();
+ return tcx.item_name(def_id).as_str().to_string();
}
// We want to compute the "type" of this item. Unfortunately, some
let mut ty_def_id = def_id;
let instance_ty;
loop {
- let key = scx.tcx().def_key(ty_def_id);
+ let key = tcx.def_key(ty_def_id);
match key.disambiguated_data.data {
DefPathData::TypeNs(_) |
DefPathData::ValueNs(_) => {
- instance_ty = scx.tcx().item_type(ty_def_id);
+ instance_ty = tcx.item_type(ty_def_id);
break;
}
_ => {
// Erase regions because they may not be deterministic when hashed
// and should not matter anyhow.
- let instance_ty = scx.tcx().erase_regions(&instance_ty);
-
- let hash = get_symbol_hash(scx, Some(def_id), instance_ty, Some(substs));
+ let instance_ty = tcx.erase_regions(&instance_ty);
- let mut buffer = SymbolPathBuffer {
- names: Vec::new()
- };
+ let hash = get_symbol_hash(tcx, Some(def_id), instance_ty, Some(substs));
+ let mut buffer = SymbolPathBuffer::new();
item_path::with_forced_absolute_paths(|| {
- scx.tcx().push_item_path(&mut buffer, def_id);
+ tcx.push_item_path(&mut buffer, def_id);
});
-
- mangle(buffer.names.into_iter(), &hash)
+ buffer.finish(&hash)
}
+// Follow C++ namespace-mangling style, see
+// http://en.wikipedia.org/wiki/Name_mangling for more info.
+//
+// It turns out that on macOS you can actually have arbitrary symbols in
+// function names (at least when given to LLVM), but this is not possible
+// when using unix's linker. Perhaps one day when we just use a linker from LLVM
+// we won't need to do this name mangling. The problem with name mangling is
+// that it seriously limits the available characters. For example we can't
+// have things like &T in symbol names when one would theoretically
+// want them for things like impls of traits on that type.
+//
+// To be able to work on all platforms and get *some* reasonable output, we
+// use C++ name-mangling.
struct SymbolPathBuffer {
- names: Vec<InternedString>,
+ result: String,
+ temp_buf: String
+}
+
+impl SymbolPathBuffer {
+ fn new() -> Self {
+ let mut result = SymbolPathBuffer {
+ result: String::with_capacity(64),
+ temp_buf: String::with_capacity(16)
+ };
+ result.result.push_str("_ZN"); // _Z == Begin name-sequence, N == nested
+ result
+ }
+
+ fn finish(mut self, hash: &str) -> String {
+ // end name-sequence
+ self.push(hash);
+ self.result.push('E');
+ self.result
+ }
}
impl ItemPathBuffer for SymbolPathBuffer {
}
fn push(&mut self, text: &str) {
- self.names.push(Symbol::intern(text).as_str());
+ self.temp_buf.clear();
+ let need_underscore = sanitize(&mut self.temp_buf, text);
+ let _ = write!(self.result, "{}", self.temp_buf.len() + (need_underscore as usize));
+ if need_underscore {
+ self.result.push('_');
+ }
+ self.result.push_str(&self.temp_buf);
}
}
-pub fn exported_name_from_type_and_prefix<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>,
+pub fn exported_name_from_type_and_prefix<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
t: Ty<'tcx>,
prefix: &str)
-> String {
- let hash = get_symbol_hash(scx, None, t, None);
- let path = [Symbol::intern(prefix).as_str()];
- mangle(path.iter().cloned(), &hash)
+ let hash = get_symbol_hash(tcx, None, t, None);
+ let mut buffer = SymbolPathBuffer::new();
+ buffer.push(prefix);
+ buffer.finish(&hash)
}
// Name sanitation. LLVM will happily accept identifiers with weird names, but
// gas doesn't!
// gas accepts the following characters in symbols: a-z, A-Z, 0-9, ., _, $
-pub fn sanitize(s: &str) -> String {
- let mut result = String::new();
+//
+// returns true if an underscore must be added at the start
+pub fn sanitize(result: &mut String, s: &str) -> bool {
for c in s.chars() {
match c {
// Escape these with $ sequences
}
// Underscore-qualify anything that didn't start as an ident.
- if !result.is_empty() &&
+ !result.is_empty() &&
result.as_bytes()[0] != '_' as u8 &&
- ! (result.as_bytes()[0] as char).is_xid_start() {
- return format!("_{}", result);
- }
-
- return result;
-}
-
-fn mangle<PI: Iterator<Item=InternedString>>(path: PI, hash: &str) -> String {
- // Follow C++ namespace-mangling style, see
- // http://en.wikipedia.org/wiki/Name_mangling for more info.
- //
- // It turns out that on macOS you can actually have arbitrary symbols in
- // function names (at least when given to LLVM), but this is not possible
- // when using unix's linker. Perhaps one day when we just use a linker from LLVM
- // we won't need to do this name mangling. The problem with name mangling is
- // that it seriously limits the available characters. For example we can't
- // have things like &T in symbol names when one would theoretically
- // want them for things like impls of traits on that type.
- //
- // To be able to work on all platforms and get *some* reasonable output, we
- // use C++ name-mangling.
-
- let mut n = String::from("_ZN"); // _Z == Begin name-sequence, N == nested
-
- fn push(n: &mut String, s: &str) {
- let sani = sanitize(s);
- n.push_str(&format!("{}{}", sani.len(), sani));
- }
-
- // First, connect each component with <len, name> pairs.
- for data in path {
- push(&mut n, &data);
- }
-
- push(&mut n, hash);
-
- n.push('E'); // End name-sequence.
- n
+ ! (result.as_bytes()[0] as char).is_xid_start()
}
use mir;
use monomorphize::{self, Instance};
use partitioning::{self, PartitioningStrategy, CodegenUnit};
+use symbol_cache::SymbolCache;
use symbol_map::SymbolMap;
use symbol_names_test;
use trans_item::{TransItem, DefPathBasedNames};
use libc::c_uint;
use std::ffi::{CStr, CString};
-use std::rc::Rc;
use std::str;
use std::i32;
use syntax_pos::Span;
/// in any other compilation unit. Give these symbols internal linkage.
fn internalize_symbols<'a, 'tcx>(sess: &Session,
scx: &SharedCrateContext<'a, 'tcx>,
+ translation_items: &FxHashSet<TransItem<'tcx>>,
llvm_modules: &[ModuleLlvm],
symbol_map: &SymbolMap<'tcx>,
exported_symbols: &ExportedSymbols) {
let mut locally_defined_symbols = FxHashSet();
let mut linkage_fixed_explicitly = FxHashSet();
- for trans_item in scx.translation_items().borrow().iter() {
+ for trans_item in translation_items {
let symbol_name = symbol_map.get_or_compute(scx, *trans_item);
if trans_item.explicit_linkage(tcx).is_some() {
linkage_fixed_explicitly.insert(symbol_name.clone());
///
/// This list is later used by linkers to determine the set of symbols needed to
/// be exposed from a dynamic library and it's also encoded into the metadata.
-pub fn find_exported_symbols(tcx: TyCtxt, reachable: NodeSet) -> NodeSet {
- reachable.into_iter().filter(|&id| {
+pub fn find_exported_symbols(tcx: TyCtxt, reachable: &NodeSet) -> NodeSet {
+ reachable.iter().cloned().filter(|&id| {
// Next, we want to ignore some FFI functions that are not exposed from
// this crate. Reachable FFI functions can be lumped into two
// categories:
let krate = tcx.hir.krate();
let ty::CrateAnalysis { reachable, .. } = analysis;
- let exported_symbols = find_exported_symbols(tcx, reachable);
+ let exported_symbols = find_exported_symbols(tcx, &reachable);
let check_overflow = tcx.sess.overflow_checks();
// Run the translation item collector and partition the collected items into
// codegen units.
- let (codegen_units, symbol_map) = collect_and_partition_translation_items(&shared_ccx);
-
- let symbol_map = Rc::new(symbol_map);
+ let (translation_items, codegen_units, symbol_map) =
+ collect_and_partition_translation_items(&shared_ccx);
let mut all_stats = Stats::default();
let modules: Vec<ModuleTranslation> = codegen_units
let (stats, module) =
tcx.dep_graph.with_task(dep_node,
AssertDepGraphSafe(&shared_ccx),
- AssertDepGraphSafe((cgu, symbol_map.clone())),
+ AssertDepGraphSafe(cgu),
module_translation);
all_stats.extend(stats);
module
fn module_translation<'a, 'tcx>(
scx: AssertDepGraphSafe<&SharedCrateContext<'a, 'tcx>>,
- args: AssertDepGraphSafe<(CodegenUnit<'tcx>, Rc<SymbolMap<'tcx>>)>)
+ args: AssertDepGraphSafe<CodegenUnit<'tcx>>)
-> (Stats, ModuleTranslation)
{
// FIXME(#40304): We ought to be using the id as a key and some queries, I think.
let AssertDepGraphSafe(scx) = scx;
- let AssertDepGraphSafe((cgu, symbol_map)) = args;
+ let AssertDepGraphSafe(cgu) = args;
let cgu_name = String::from(cgu.name());
let cgu_id = cgu.work_product_id();
- let symbol_name_hash = cgu.compute_symbol_name_hash(scx, &symbol_map);
+ let symbol_cache = SymbolCache::new(scx.tcx());
+ let symbol_name_hash = cgu.compute_symbol_name_hash(scx, &symbol_cache);
// Check whether there is a previous work-product we can
// re-use. Not only must the file exist, and the inputs not
}
// Instantiate translation items without filling out definitions yet...
- let lcx = LocalCrateContext::new(scx, cgu, symbol_map.clone());
+ let lcx = LocalCrateContext::new(scx, cgu, &symbol_cache);
let module = {
let ccx = CrateContext::new(scx, &lcx);
let trans_items = ccx.codegen_unit()
- .items_in_deterministic_order(ccx.tcx(), &symbol_map);
+ .items_in_deterministic_order(ccx.tcx(), &symbol_cache);
for &(trans_item, linkage) in &trans_items {
trans_item.predefine(&ccx, linkage);
}
assert_module_sources::assert_module_sources(tcx, &modules);
- symbol_names_test::report_symbol_names(&shared_ccx);
+ symbol_names_test::report_symbol_names(tcx);
if shared_ccx.sess().trans_stats() {
println!("--- trans stats ---");
time(shared_ccx.sess().time_passes(), "internalize symbols", || {
internalize_symbols(sess,
&shared_ccx,
+ &translation_items,
&llvm_modules,
&symbol_map,
&exported_symbols);
}
fn collect_and_partition_translation_items<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>)
- -> (Vec<CodegenUnit<'tcx>>, SymbolMap<'tcx>) {
+ -> (FxHashSet<TransItem<'tcx>>,
+ Vec<CodegenUnit<'tcx>>,
+ SymbolMap<'tcx>) {
let time_passes = scx.sess().time_passes();
let collection_mode = match scx.sess().opts.debugging_opts.print_trans_items {
assert!(scx.tcx().sess.opts.cg.codegen_units == codegen_units.len() ||
scx.tcx().sess.opts.debugging_opts.incremental.is_some());
- {
- let mut ccx_map = scx.translation_items().borrow_mut();
-
- for trans_item in items.iter().cloned() {
- ccx_map.insert(trans_item);
- }
- }
+ let translation_items: FxHashSet<TransItem<'tcx>> = items.iter().cloned().collect();
if scx.sess().opts.debugging_opts.print_trans_items.is_some() {
let mut item_to_cgus = FxHashMap();
}
}
- (codegen_units, symbol_map)
+ (translation_items, codegen_units, symbol_map)
}
}
}
- pub fn alloca(&self, ty: Type, name: &str) -> ValueRef {
+ pub fn alloca(&self, ty: Type, name: &str, align: Option<u32>) -> ValueRef {
let builder = Builder::with_ccx(self.ccx);
builder.position_at_start(unsafe {
llvm::LLVMGetFirstBasicBlock(self.llfn())
});
- builder.dynamic_alloca(ty, name)
+ builder.dynamic_alloca(ty, name, align)
}
- pub fn dynamic_alloca(&self, ty: Type, name: &str) -> ValueRef {
+ pub fn dynamic_alloca(&self, ty: Type, name: &str, align: Option<u32>) -> ValueRef {
self.count_insn("alloca");
unsafe {
- if name.is_empty() {
+ let alloca = if name.is_empty() {
llvm::LLVMBuildAlloca(self.llbuilder, ty.to_ref(), noname())
} else {
let name = CString::new(name).unwrap();
llvm::LLVMBuildAlloca(self.llbuilder, ty.to_ref(),
name.as_ptr())
+ };
+ if let Some(align) = align {
+ llvm::LLVMSetAlignment(alloca, align as c_uint);
}
+ alloca
}
}
//! and methods are represented as just a fn ptr and not a full
//! closure.
-use llvm::{self, ValueRef};
-use rustc::hir::def_id::DefId;
-use rustc::ty::subst::Substs;
use attributes;
use common::{self, CrateContext};
-use monomorphize;
use consts;
use declare;
-use monomorphize::Instance;
+use llvm::{self, ValueRef};
+use monomorphize::{self, Instance};
+use rustc::hir::def_id::DefId;
+use rustc::ty::{self, TypeFoldable};
+use rustc::ty::subst::Substs;
+use syntax_pos::DUMMY_SP;
use trans_item::TransItem;
use type_of;
-use rustc::ty::TypeFoldable;
/// Translates a reference to a fn/method item, monomorphizing and
/// inlining as it goes.
return llfn;
}
- let sym = ccx.symbol_map().get_or_compute(ccx.shared(),
- TransItem::Fn(instance));
+ let sym = ccx.symbol_cache().get(TransItem::Fn(instance));
debug!("get_fn({:?}: {:?}) => {}", instance, fn_ty, sym);
// This is subtle and surprising, but sometimes we have to bitcast
let attrs = instance.def.attrs(ccx.tcx());
attributes::from_fn_attrs(ccx, &attrs, llfn);
- let is_local_def = ccx.shared().translation_items().borrow()
- .contains(&TransItem::Fn(instance));
- if is_local_def {
- // FIXME(eddyb) Doubt all extern fn should allow unwinding.
+ // Perhaps questionable, but we assume that anything defined
+ // *in Rust code* may unwind. Foreign items like `extern "C" {
+ // fn foo(); }` are assumed not to unwind **unless** they have
+ // a `#[unwind]` attribute.
+ if !ty::queries::is_foreign_item::get(tcx, DUMMY_SP, instance.def_id()) {
attributes::unwind(llfn, true);
unsafe {
llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::ExternalLinkage);
}
}
+
if ccx.use_dll_storage_attrs() &&
ccx.sess().cstore.is_dllimport_foreign_item(instance.def_id())
{
// have to instantiate all methods of the trait being cast to, so we
// can build the appropriate vtable.
mir::Rvalue::Cast(mir::CastKind::Unsize, ref operand, target_ty) => {
- let target_ty = monomorphize::apply_param_substs(self.scx,
- self.param_substs,
- &target_ty);
+ let target_ty = self.scx.tcx().trans_apply_param_substs(self.param_substs,
+ &target_ty);
let source_ty = operand.ty(self.mir, self.scx.tcx());
- let source_ty = monomorphize::apply_param_substs(self.scx,
- self.param_substs,
- &source_ty);
+ let source_ty = self.scx.tcx().trans_apply_param_substs(self.param_substs,
+ &source_ty);
let (source_ty, target_ty) = find_vtable_types_for_unsizing(self.scx,
source_ty,
target_ty);
}
mir::Rvalue::Cast(mir::CastKind::ReifyFnPointer, ref operand, _) => {
let fn_ty = operand.ty(self.mir, self.scx.tcx());
- let fn_ty = monomorphize::apply_param_substs(
- self.scx,
- self.param_substs,
- &fn_ty);
+ let fn_ty = self.scx.tcx().trans_apply_param_substs(self.param_substs,
+ &fn_ty);
visit_fn_use(self.scx, fn_ty, false, &mut self.output);
}
mir::Rvalue::Cast(mir::CastKind::ClosureFnPointer, ref operand, _) => {
}
if let mir::Literal::Item { def_id, substs } = constant.literal {
- let substs = monomorphize::apply_param_substs(self.scx,
- self.param_substs,
- &substs);
+ let substs = self.scx.tcx().trans_apply_param_substs(self.param_substs,
+ &substs);
let instance = monomorphize::resolve(self.scx, def_id, substs);
collect_neighbours(self.scx, instance, self.output);
}
match *kind {
mir::TerminatorKind::Call { ref func, .. } => {
let callee_ty = func.ty(self.mir, tcx);
- let callee_ty = monomorphize::apply_param_substs(
- self.scx, self.param_substs, &callee_ty);
+ let callee_ty = tcx.trans_apply_param_substs(self.param_substs, &callee_ty);
visit_fn_use(self.scx, callee_ty, true, &mut self.output);
}
mir::TerminatorKind::Drop { ref location, .. } |
mir::TerminatorKind::DropAndReplace { ref location, .. } => {
let ty = location.ty(self.mir, self.scx.tcx())
.to_ty(self.scx.tcx());
- let ty = monomorphize::apply_param_substs(self.scx,
- self.param_substs,
- &ty);
+ let ty = tcx.trans_apply_param_substs(self.param_substs, &ty);
visit_drop_use(self.scx, ty, true, self.output);
}
mir::TerminatorKind::Goto { .. } |
-> Ty<'tcx>
{
let ty = shared.tcx().item_type(def_id);
- monomorphize::apply_param_substs(shared, substs, &ty)
+ shared.tcx().trans_apply_param_substs(substs, &ty)
}
/// Return the substituted type of an instance.
-> Ty<'tcx>
{
let ty = instance.def.def_ty(shared.tcx());
- monomorphize::apply_param_substs(shared, instance.substs, &ty)
+ shared.tcx().trans_apply_param_substs(instance.substs, &ty)
}
hir_map::NodeItem(&hir::Item {
ref attrs, span, node: hir::ItemStatic(..), ..
}) => {
- let sym = ccx.symbol_map()
- .get(TransItem::Static(id))
- .expect("Local statics should always be in the SymbolMap");
+ let sym = ccx.symbol_cache()
+ .get(TransItem::Static(id));
let defined_in_current_codegen_unit = ccx.codegen_unit()
.items()
.contains_key(&TransItem::Static(id));
assert!(!defined_in_current_codegen_unit);
- if declare::get_declared_value(ccx, sym).is_some() {
+ if declare::get_declared_value(ccx, &sym[..]).is_some() {
span_bug!(span, "trans: Conflicting symbol names for static?");
}
- let g = declare::define_global(ccx, sym, llty).unwrap();
+ let g = declare::define_global(ccx, &sym[..], llty).unwrap();
(g, attrs)
}
hir_map::NodeForeignItem(&hir::ForeignItem {
ref attrs, span, node: hir::ForeignItemStatic(..), ..
}) => {
- let sym = symbol_names::symbol_name(instance, ccx.shared());
+ let sym = symbol_names::symbol_name(instance, ccx.tcx());
let g = if let Some(name) =
attr::first_attr_value_str_by_name(&attrs, "linkage") {
// If this is a static with a linkage specified, then we need to handle
g
} else {
- let sym = symbol_names::symbol_name(instance, ccx.shared());
+ let sym = symbol_names::symbol_name(instance, ccx.tcx());
// FIXME(nagisa): perhaps the map of externs could be offloaded to llvm somehow?
// FIXME(nagisa): investigate whether it can be changed into define_global
// As an optimization, all shared statics which do not have interior
// mutability are placed into read-only memory.
if m != hir::MutMutable {
- let tcontents = ty.type_contents(ccx.tcx());
- if !tcontents.interior_unsafe() {
+ if ccx.shared().type_is_freeze(ty) {
llvm::LLVMSetGlobalConstant(g, llvm::True);
}
}
use llvm;
use llvm::{ContextRef, ModuleRef, ValueRef};
-use rustc::dep_graph::{DepGraph, DepGraphSafe, DepNode, DepTrackingMap, DepTrackingMapConfig};
+use rustc::dep_graph::{DepGraph, DepGraphSafe};
use rustc::hir;
use rustc::hir::def_id::DefId;
use rustc::traits;
use monomorphize::Instance;
use partitioning::CodegenUnit;
-use trans_item::TransItem;
use type_::Type;
use rustc_data_structures::base_n;
use rustc::ty::subst::Substs;
use session::config::NoDebugInfo;
use session::Session;
use session::config;
-use symbol_map::SymbolMap;
-use util::nodemap::{NodeSet, DefIdMap, FxHashMap, FxHashSet};
+use symbol_cache::SymbolCache;
+use util::nodemap::{NodeSet, DefIdMap, FxHashMap};
use std::ffi::{CStr, CString};
use std::cell::{Cell, RefCell};
-use std::marker::PhantomData;
use std::ptr;
use std::iter;
-use std::rc::Rc;
use std::str;
use syntax::ast;
use syntax::symbol::InternedString;
check_overflow: bool,
use_dll_storage_attrs: bool,
-
- translation_items: RefCell<FxHashSet<TransItem<'tcx>>>,
- trait_cache: RefCell<DepTrackingMap<TraitSelectionCache<'tcx>>>,
- project_cache: RefCell<DepTrackingMap<ProjectionCache<'tcx>>>,
}
/// The local portion of a `CrateContext`. There is one `LocalCrateContext`
/// per compilation unit. Each one has its own LLVM `ContextRef` so that
/// several compilation units may be optimized in parallel. All other LLVM
/// data structures in the `LocalCrateContext` are tied to that `ContextRef`.
-pub struct LocalCrateContext<'tcx> {
+pub struct LocalCrateContext<'a, 'tcx: 'a> {
llmod: ModuleRef,
llcx: ContextRef,
stats: Stats,
/// Depth of the current type-of computation - used to bail out
type_of_depth: Cell<usize>,
- symbol_map: Rc<SymbolMap<'tcx>>,
-
/// A counter that is used for generating local symbol names
local_gen_sym_counter: Cell<usize>,
-}
-
-// Implement DepTrackingMapConfig for `trait_cache`
-pub struct TraitSelectionCache<'tcx> {
- data: PhantomData<&'tcx ()>
-}
-
-impl<'tcx> DepTrackingMapConfig for TraitSelectionCache<'tcx> {
- type Key = ty::PolyTraitRef<'tcx>;
- type Value = traits::Vtable<'tcx, ()>;
- fn to_dep_node(key: &ty::PolyTraitRef<'tcx>) -> DepNode<DefId> {
- key.to_poly_trait_predicate().dep_node()
- }
-}
-
-// # Global Cache
-pub struct ProjectionCache<'gcx> {
- data: PhantomData<&'gcx ()>
-}
-
-impl<'gcx> DepTrackingMapConfig for ProjectionCache<'gcx> {
- type Key = Ty<'gcx>;
- type Value = Ty<'gcx>;
- fn to_dep_node(key: &Self::Key) -> DepNode<DefId> {
- // Ideally, we'd just put `key` into the dep-node, but we
- // can't put full types in there. So just collect up all the
- // def-ids of structs/enums as well as any traits that we
- // project out of. It doesn't matter so much what we do here,
- // except that if we are too coarse, we'll create overly
- // coarse edges between impls and the trans. For example, if
- // we just used the def-id of things we are projecting out of,
- // then the key for `<Foo as SomeTrait>::T` and `<Bar as
- // SomeTrait>::T` would both share a dep-node
- // (`TraitSelect(SomeTrait)`), and hence the impls for both
- // `Foo` and `Bar` would be considered inputs. So a change to
- // `Bar` would affect things that just normalized `Foo`.
- // Anyway, this heuristic is not ideal, but better than
- // nothing.
- let def_ids: Vec<DefId> =
- key.walk()
- .filter_map(|t| match t.sty {
- ty::TyAdt(adt_def, _) => Some(adt_def.did),
- ty::TyProjection(ref proj) => Some(proj.trait_ref.def_id),
- _ => None,
- })
- .collect();
-
- DepNode::ProjectionCache { def_ids: def_ids }
- }
+ symbol_cache: &'a SymbolCache<'a, 'tcx>,
}
/// A CrateContext value binds together one LocalCrateContext with the
/// pass around (SharedCrateContext, LocalCrateContext) tuples all over trans.
pub struct CrateContext<'a, 'tcx: 'a> {
shared: &'a SharedCrateContext<'a, 'tcx>,
- local_ccx: &'a LocalCrateContext<'tcx>,
+ local_ccx: &'a LocalCrateContext<'a, 'tcx>,
}
impl<'a, 'tcx> CrateContext<'a, 'tcx> {
pub fn new(shared: &'a SharedCrateContext<'a, 'tcx>,
- local_ccx: &'a LocalCrateContext<'tcx>)
+ local_ccx: &'a LocalCrateContext<'a, 'tcx>)
-> Self {
CrateContext { shared, local_ccx }
}
tcx: tcx,
check_overflow: check_overflow,
use_dll_storage_attrs: use_dll_storage_attrs,
- translation_items: RefCell::new(FxHashSet()),
- trait_cache: RefCell::new(DepTrackingMap::new(tcx.dep_graph.clone())),
- project_cache: RefCell::new(DepTrackingMap::new(tcx.dep_graph.clone())),
}
}
pub fn type_needs_drop(&self, ty: Ty<'tcx>) -> bool {
- self.tcx.type_needs_drop_given_env(ty, &self.empty_param_env)
+ ty.needs_drop(self.tcx, &self.empty_param_env)
}
pub fn type_is_sized(&self, ty: Ty<'tcx>) -> bool {
ty.is_sized(self.tcx, &self.empty_param_env, DUMMY_SP)
}
- pub fn exported_symbols<'a>(&'a self) -> &'a NodeSet {
- &self.exported_symbols
- }
-
- pub fn trait_cache(&self) -> &RefCell<DepTrackingMap<TraitSelectionCache<'tcx>>> {
- &self.trait_cache
+ pub fn type_is_freeze(&self, ty: Ty<'tcx>) -> bool {
+ ty.is_freeze(self.tcx, &self.empty_param_env, DUMMY_SP)
}
- pub fn project_cache(&self) -> &RefCell<DepTrackingMap<ProjectionCache<'tcx>>> {
- &self.project_cache
+ pub fn exported_symbols<'a>(&'a self) -> &'a NodeSet {
+ &self.exported_symbols
}
pub fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx> {
pub fn use_dll_storage_attrs(&self) -> bool {
self.use_dll_storage_attrs
}
-
- pub fn translation_items(&self) -> &RefCell<FxHashSet<TransItem<'tcx>>> {
- &self.translation_items
- }
}
-impl<'tcx> LocalCrateContext<'tcx> {
- pub fn new<'a>(shared: &SharedCrateContext<'a, 'tcx>,
- codegen_unit: CodegenUnit<'tcx>,
- symbol_map: Rc<SymbolMap<'tcx>>)
- -> LocalCrateContext<'tcx> {
+impl<'a, 'tcx> LocalCrateContext<'a, 'tcx> {
+ pub fn new(shared: &SharedCrateContext<'a, 'tcx>,
+ codegen_unit: CodegenUnit<'tcx>,
+ symbol_cache: &'a SymbolCache<'a, 'tcx>)
+ -> LocalCrateContext<'a, 'tcx> {
unsafe {
// Append ".rs" to LLVM module identifier.
//
rust_try_fn: Cell::new(None),
intrinsics: RefCell::new(FxHashMap()),
type_of_depth: Cell::new(0),
- symbol_map: symbol_map,
local_gen_sym_counter: Cell::new(0),
+ symbol_cache: symbol_cache,
};
let (int_type, opaque_vec_type, str_slice_ty, mut local_ccx) = {
/// This is used in the `LocalCrateContext` constructor to allow calling
/// functions that expect a complete `CrateContext`, even before the local
/// portion is fully initialized and attached to the `SharedCrateContext`.
- fn dummy_ccx<'a>(shared: &'a SharedCrateContext<'a, 'tcx>,
- local_ccxs: &'a [LocalCrateContext<'tcx>])
- -> CrateContext<'a, 'tcx> {
+ fn dummy_ccx(shared: &'a SharedCrateContext<'a, 'tcx>,
+ local_ccxs: &'a [LocalCrateContext<'a, 'tcx>])
+ -> CrateContext<'a, 'tcx> {
assert!(local_ccxs.len() == 1);
CrateContext {
shared: shared,
self.shared
}
- fn local(&self) -> &'b LocalCrateContext<'tcx> {
+ fn local(&self) -> &'b LocalCrateContext<'b, 'tcx> {
self.local_ccx
}
self.shared.use_dll_storage_attrs()
}
- pub fn symbol_map(&self) -> &SymbolMap<'tcx> {
- &*self.local().symbol_map
- }
-
- pub fn translation_items(&self) -> &RefCell<FxHashSet<TransItem<'tcx>>> {
- &self.shared.translation_items
+ pub fn symbol_cache(&self) -> &'b SymbolCache<'b, 'tcx> {
+ self.local().symbol_cache
}
/// Given the def-id of some item that has no type parameters, make
type TyLayout = TyLayout<'tcx>;
fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
+ if let Some(&layout) = self.tcx().layout_cache.borrow().get(&ty) {
+ return TyLayout { ty: ty, layout: layout, variant_index: None };
+ }
+
self.tcx().infer_ctxt((), traits::Reveal::All).enter(|infcx| {
infcx.layout_of(ty).unwrap_or_else(|e| {
match e {
})
})
}
+
+ fn normalize_projections(self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ self.tcx().normalize_associated_type(&ty)
+ }
}
impl<'a, 'tcx> LayoutTyper<'tcx> for &'a CrateContext<'a, 'tcx> {
fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
self.shared.layout_of(ty)
}
+
+ fn normalize_projections(self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ self.shared.normalize_projections(ty)
+ }
}
-pub struct TypeOfDepthLock<'a, 'tcx: 'a>(&'a LocalCrateContext<'tcx>);
+pub struct TypeOfDepthLock<'a, 'tcx: 'a>(&'a LocalCrateContext<'a, 'tcx>);
impl<'a, 'tcx> Drop for TypeOfDepthLock<'a, 'tcx> {
fn drop(&mut self) {
//
// More information can be found in libstd's seh.rs implementation.
let i64p = Type::i64(ccx).ptr_to();
- let slot = bcx.alloca(i64p, "slot");
+ let slot = bcx.alloca(i64p, "slot", None);
bcx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(),
None);
mod mir;
mod monomorphize;
mod partitioning;
+mod symbol_cache;
mod symbol_map;
mod symbol_names_test;
mod trans_item;
use rustc::ty::layout::{self, LayoutTyper};
use rustc::mir;
use abi::{Abi, FnType, ArgType};
+use adt;
use base::{self, Lifetime};
use callee;
use builder::Builder;
};
let llslot = match op.val {
Immediate(_) | Pair(..) => {
- let llscratch = bcx.alloca(ret.memory_ty(bcx.ccx), "ret");
+ let llscratch = bcx.alloca(ret.memory_ty(bcx.ccx), "ret", None);
self.store_operand(&bcx, llscratch, None, op);
llscratch
}
let (mut llval, align, by_ref) = match op.val {
Immediate(_) | Pair(..) => {
if arg.is_indirect() || arg.cast.is_some() {
- let llscratch = bcx.alloca(arg.memory_ty(bcx.ccx), "arg");
+ let llscratch = bcx.alloca(arg.memory_ty(bcx.ccx), "arg", None);
self.store_operand(bcx, llscratch, None, op);
(llscratch, Alignment::AbiAligned, true)
} else {
// think that ATM (Rust 1.16) we only pass temporaries, but we shouldn't
// have scary latent bugs around.
- let llscratch = bcx.alloca(arg.memory_ty(bcx.ccx), "arg");
+ let llscratch = bcx.alloca(arg.memory_ty(bcx.ccx), "arg", None);
base::memcpy_ty(bcx, llscratch, llval, op.ty, Some(1));
(llscratch, Alignment::AbiAligned, true)
}
bug!("Not a tuple.");
};
for (n, &ty) in arg_types.iter().enumerate() {
- let mut elem = bcx.extract_value(llval, v.memory_index[n] as usize);
+ let mut elem = bcx.extract_value(
+ llval, adt::struct_llfields_index(v, n));
// Truncate bools to i1, if needed
if ty.is_bool() && common::val_ty(elem) != Type::i1(bcx.ccx) {
elem = bcx.trunc(elem, Type::i1(bcx.ccx));
slot
} else {
let llretty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false);
- let slot = bcx.alloca(llretty, "personalityslot");
+ let slot = bcx.alloca(llretty, "personalityslot", None);
self.llpersonalityslot = Some(slot);
slot
}
fn monomorphize<T>(&self, value: &T) -> T
where T: TransNormalize<'tcx>
{
- monomorphize::apply_param_substs(self.ccx.shared(),
- self.substs,
- value)
+ self.ccx.tcx().trans_apply_param_substs(self.substs, value)
}
fn trans(&mut self) -> Result<Const<'tcx>, ConstEvalErr<'tcx>> {
let tr_lvalue = self.const_lvalue(lvalue, span)?;
let ty = tr_lvalue.ty;
- let ref_ty = tcx.mk_ref(tcx.mk_region(ty::ReErased),
+ let ref_ty = tcx.mk_ref(tcx.types.re_erased,
ty::TypeAndMut { ty: ty, mutbl: bk.to_mutbl_lossy() });
let base = match tr_lvalue.base {
pub fn alloca(bcx: &Builder<'a, 'tcx>, ty: Ty<'tcx>, name: &str) -> LvalueRef<'tcx> {
debug!("alloca({:?}: {:?})", name, ty);
- let tmp = bcx.alloca(type_of::type_of(bcx.ccx, ty), name);
+ let tmp = bcx.alloca(
+ type_of::type_of(bcx.ccx, ty), name, bcx.ccx.over_align_of(ty));
assert!(!ty.has_param_types());
Self::new_sized_ty(tmp, ty, Alignment::AbiAligned)
}
let alignment = self.alignment | Alignment::from_packed(st.packed);
+ let llfields = adt::struct_llfields(ccx, fields, st);
let ptr_val = if needs_cast {
- let fields = st.field_index_by_increasing_offset().map(|i| {
- type_of::in_memory_type_of(ccx, fields[i])
- }).collect::<Vec<_>>();
- let real_ty = Type::struct_(ccx, &fields[..], st.packed);
+ let real_ty = Type::struct_(ccx, &llfields[..], st.packed);
bcx.pointercast(self.llval, real_ty.ptr_to())
} else {
self.llval
// * Field is sized - pointer is properly aligned already
if st.offsets[ix] == layout::Size::from_bytes(0) || st.packed ||
bcx.ccx.shared().type_is_sized(fty) {
- return (bcx.struct_gep(ptr_val, st.memory_index[ix] as usize), alignment);
+ return (bcx.struct_gep(
+ ptr_val, adt::struct_llfields_index(st, ix)), alignment);
}
// If the type of the last field is [T] or str, then we don't need to do
// any adjusments
match fty.sty {
ty::TySlice(..) | ty::TyStr => {
- return (bcx.struct_gep(ptr_val, st.memory_index[ix] as usize), alignment);
+ return (bcx.struct_gep(
+ ptr_val, adt::struct_llfields_index(st, ix)), alignment);
}
_ => ()
}
if !self.has_extra() {
debug!("Unsized field `{}`, of `{:?}` has no metadata for adjustment",
ix, Value(ptr_val));
- return (bcx.struct_gep(ptr_val, ix), alignment);
+ return (bcx.struct_gep(ptr_val, adt::struct_llfields_index(st, ix)), alignment);
}
// We need to get the pointer manually now.
use builder::Builder;
use common::{self, CrateContext, Funclet};
use debuginfo::{self, declare_local, VariableAccess, VariableKind, FunctionDebugContext};
-use monomorphize::{self, Instance};
+use monomorphize::Instance;
use abi::FnType;
use type_of;
impl<'a, 'tcx> MirContext<'a, 'tcx> {
pub fn monomorphize<T>(&self, value: &T) -> T
- where T: TransNormalize<'tcx> {
- monomorphize::apply_param_substs(self.ccx.shared(), self.param_substs, value)
+ where T: TransNormalize<'tcx>
+ {
+ self.ccx.tcx().trans_apply_param_substs(self.param_substs, value)
}
pub fn set_debug_loc(&mut self, bcx: &Builder, source_info: mir::SourceInfo) {
// doesn't actually strip the offset when splitting the closure
// environment into its components so it ends up out of bounds.
let env_ptr = if !env_ref {
- let alloc = bcx.alloca(common::val_ty(llval), "__debuginfo_env_ptr");
+ let alloc = bcx.alloca(common::val_ty(llval), "__debuginfo_env_ptr", None);
bcx.store(llval, alloc, None);
alloc
} else {
use rustc::mir::tcx::LvalueTy;
use rustc_data_structures::indexed_vec::Idx;
+use adt;
use base;
use common::{self, CrateContext, C_null};
use builder::Builder;
if common::val_ty(elem) == Type::i1(bcx.ccx) {
elem = bcx.zext(elem, Type::i8(bcx.ccx));
}
+ let layout = bcx.ccx.layout_of(self.ty);
+ let i = if let Layout::Univariant { ref variant, .. } = *layout {
+ adt::struct_llfields_index(variant, i)
+ } else {
+ i
+ };
llpair = bcx.insert_value(llpair, elem, i);
}
self.val = OperandValue::Immediate(llpair);
let (lldata, llextra) = base::load_fat_ptr(bcx, llval, align, ty);
OperandValue::Pair(lldata, llextra)
} else if common::type_is_imm_pair(bcx.ccx, ty) {
- let f_align = match *bcx.ccx.layout_of(ty) {
- Layout::Univariant { ref variant, .. } =>
- Alignment::from_packed(variant.packed) | align,
- _ => align
+ let (ix0, ix1, f_align) = match *bcx.ccx.layout_of(ty) {
+ Layout::Univariant { ref variant, .. } => {
+ (adt::struct_llfields_index(variant, 0),
+ adt::struct_llfields_index(variant, 1),
+ Alignment::from_packed(variant.packed) | align)
+ },
+ _ => (0, 1, align)
};
let [a_ty, b_ty] = common::type_pair_fields(bcx.ccx, ty).unwrap();
- let a_ptr = bcx.struct_gep(llval, 0);
- let b_ptr = bcx.struct_gep(llval, 1);
+ let a_ptr = bcx.struct_gep(llval, ix0);
+ let b_ptr = bcx.struct_gep(llval, ix1);
OperandValue::Pair(
base::load_ty(bcx, a_ptr, f_align, a_ty),
bcx.store(base::from_immediate(bcx, s), lldest, align);
}
OperandValue::Pair(a, b) => {
- let f_align = match *bcx.ccx.layout_of(operand.ty) {
- Layout::Univariant { ref variant, .. } if variant.packed => {
- Some(1)
+ let (ix0, ix1, f_align) = match *bcx.ccx.layout_of(operand.ty) {
+ Layout::Univariant { ref variant, .. } => {
+ (adt::struct_llfields_index(variant, 0),
+ adt::struct_llfields_index(variant, 1),
+ if variant.packed { Some(1) } else { None })
}
- _ => align
+ _ => (0, 1, align)
};
let a = base::from_immediate(bcx, a);
let b = base::from_immediate(bcx, b);
- bcx.store(a, bcx.struct_gep(lldest, 0), f_align);
- bcx.store(b, bcx.struct_gep(lldest, 1), f_align);
+ bcx.store(a, bcx.struct_gep(lldest, ix0), f_align);
+ bcx.store(b, bcx.struct_gep(lldest, ix1), f_align);
}
}
}
_ => {
// If this is a tuple or closure, we need to translate GEP indices.
let layout = bcx.ccx.layout_of(dest.ty.to_ty(bcx.tcx()));
- let translation = if let Layout::Univariant { ref variant, .. } = *layout {
- Some(&variant.memory_index)
- } else {
- None
+ let get_memory_index = |i| {
+ if let Layout::Univariant { ref variant, .. } = *layout {
+ adt::struct_llfields_index(variant, i)
+ } else {
+ i
+ }
};
let alignment = dest.alignment;
for (i, operand) in operands.iter().enumerate() {
// Note: perhaps this should be StructGep, but
// note that in some cases the values here will
// not be structs but arrays.
- let i = if let Some(ref t) = translation {
- t[i] as usize
- } else {
- i
- };
+ let i = get_memory_index(i);
let dest = bcx.gepi(dest.llval, &[0, i]);
self.store_operand(&bcx, dest, alignment.to_align(), op);
}
let ty = tr_lvalue.ty.to_ty(bcx.tcx());
let ref_ty = bcx.tcx().mk_ref(
- bcx.tcx().mk_region(ty::ReErased),
+ bcx.tcx().types.re_erased,
ty::TypeAndMut { ty: ty, mutbl: bk.to_mutbl_lossy() }
);
use glue;
use rustc::hir::def_id::DefId;
-use rustc::infer::TransNormalize;
use rustc::middle::lang_items::DropInPlaceFnLangItem;
-use rustc::traits::{self, SelectionContext, Reveal};
+use rustc::traits;
use rustc::ty::adjustment::CustomCoerceUnsized;
-use rustc::ty::fold::{TypeFolder, TypeFoldable};
use rustc::ty::subst::{Kind, Subst, Substs};
use rustc::ty::{self, Ty, TyCtxt};
-use rustc::util::common::MemoizationMap;
-use syntax::ast;
-use syntax::codemap::{Span, DUMMY_SP};
+use syntax::codemap::DUMMY_SP;
pub use rustc::ty::Instance;
}
}
-/// Attempts to resolve an obligation. The result is a shallow vtable resolution -- meaning that we
-/// do not (necessarily) resolve all nested obligations on the impl. Note that type check should
-/// guarantee to us that all nested obligations *could be* resolved if we wanted to.
-fn fulfill_obligation<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>,
- span: Span,
- trait_ref: ty::PolyTraitRef<'tcx>)
- -> traits::Vtable<'tcx, ()>
-{
- let tcx = scx.tcx();
-
- // Remove any references to regions; this helps improve caching.
- let trait_ref = tcx.erase_regions(&trait_ref);
-
- scx.trait_cache().memoize(trait_ref, || {
- debug!("trans::fulfill_obligation(trait_ref={:?}, def_id={:?})",
- trait_ref, trait_ref.def_id());
-
- // Do the initial selection for the obligation. This yields the
- // shallow result we are looking for -- that is, what specific impl.
- tcx.infer_ctxt((), Reveal::All).enter(|infcx| {
- let mut selcx = SelectionContext::new(&infcx);
-
- let obligation_cause = traits::ObligationCause::misc(span,
- ast::DUMMY_NODE_ID);
- let obligation = traits::Obligation::new(obligation_cause,
- trait_ref.to_poly_trait_predicate());
-
- let selection = match selcx.select(&obligation) {
- Ok(Some(selection)) => selection,
- Ok(None) => {
- // Ambiguity can happen when monomorphizing during trans
- // expands to some humongo type that never occurred
- // statically -- this humongo type can then overflow,
- // leading to an ambiguous result. So report this as an
- // overflow bug, since I believe this is the only case
- // where ambiguity can result.
- debug!("Encountered ambiguity selecting `{:?}` during trans, \
- presuming due to overflow",
- trait_ref);
- tcx.sess.span_fatal(span,
- "reached the recursion limit during monomorphization \
- (selection ambiguity)");
- }
- Err(e) => {
- span_bug!(span, "Encountered error `{:?}` selecting `{:?}` during trans",
- e, trait_ref)
- }
- };
-
- debug!("fulfill_obligation: selection={:?}", selection);
-
- // Currently, we use a fulfillment context to completely resolve
- // all nested obligations. This is because they can inform the
- // inference of the impl's type parameters.
- let mut fulfill_cx = traits::FulfillmentContext::new();
- let vtable = selection.map(|predicate| {
- debug!("fulfill_obligation: register_predicate_obligation {:?}", predicate);
- fulfill_cx.register_predicate_obligation(&infcx, predicate);
- });
- let vtable = infcx.drain_fulfillment_cx_or_panic(span, &mut fulfill_cx, &vtable);
-
- info!("Cache miss: {:?} => {:?}", trait_ref, vtable);
- vtable
- })
- })
-}
-
fn resolve_associated_item<'a, 'tcx>(
scx: &SharedCrateContext<'a, 'tcx>,
trait_item: &ty::AssociatedItem,
def_id, trait_id, rcvr_substs);
let trait_ref = ty::TraitRef::from_method(tcx, trait_id, rcvr_substs);
- let vtbl = fulfill_obligation(scx, DUMMY_SP, ty::Binder(trait_ref));
+ let vtbl = tcx.trans_fulfill_obligation(DUMMY_SP, ty::Binder(trait_ref));
// Now that we know which impl is being used, we can dispatch to
// the actual function:
substs: scx.tcx().mk_substs_trait(source_ty, &[target_ty])
});
- match fulfill_obligation(scx, DUMMY_SP, trait_ref) {
+ match scx.tcx().trans_fulfill_obligation(DUMMY_SP, trait_ref) {
traits::VtableImpl(traits::VtableImplData { impl_def_id, .. }) => {
scx.tcx().coerce_unsized_info(impl_def_id).custom_kind.unwrap()
}
}
}
-/// Monomorphizes a type from the AST by first applying the in-scope
-/// substitutions and then normalizing any associated types.
-pub fn apply_param_substs<'a, 'tcx, T>(scx: &SharedCrateContext<'a, 'tcx>,
- param_substs: &Substs<'tcx>,
- value: &T)
- -> T
- where T: TransNormalize<'tcx>
-{
- let tcx = scx.tcx();
- debug!("apply_param_substs(param_substs={:?}, value={:?})", param_substs, value);
- let substituted = value.subst(tcx, param_substs);
- let substituted = scx.tcx().erase_regions(&substituted);
- AssociatedTypeNormalizer::new(scx).fold(&substituted)
-}
-
/// Returns the normalized type of a struct field
pub fn field_ty<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
param_substs: &Substs<'tcx>,
tcx.normalize_associated_type(&f.ty(tcx, param_substs))
}
-struct AssociatedTypeNormalizer<'a, 'b: 'a, 'gcx: 'b> {
- shared: &'a SharedCrateContext<'b, 'gcx>,
-}
-
-impl<'a, 'b, 'gcx> AssociatedTypeNormalizer<'a, 'b, 'gcx> {
- fn new(shared: &'a SharedCrateContext<'b, 'gcx>) -> Self {
- AssociatedTypeNormalizer {
- shared: shared,
- }
- }
-
- fn fold<T:TypeFoldable<'gcx>>(&mut self, value: &T) -> T {
- if !value.has_projection_types() {
- value.clone()
- } else {
- value.fold_with(self)
- }
- }
-}
-
-impl<'a, 'b, 'gcx> TypeFolder<'gcx, 'gcx> for AssociatedTypeNormalizer<'a, 'b, 'gcx> {
- fn tcx<'c>(&'c self) -> TyCtxt<'c, 'gcx, 'gcx> {
- self.shared.tcx()
- }
-
- fn fold_ty(&mut self, ty: Ty<'gcx>) -> Ty<'gcx> {
- if !ty.has_projection_types() {
- ty
- } else {
- self.shared.project_cache().memoize(ty, || {
- debug!("AssociatedTypeNormalizer: ty={:?}", ty);
- self.shared.tcx().normalize_associated_type(&ty)
- })
- }
- }
-}
use std::cmp::Ordering;
use std::hash::Hash;
use std::sync::Arc;
-use symbol_map::SymbolMap;
+use symbol_cache::SymbolCache;
use syntax::ast::NodeId;
use syntax::symbol::{Symbol, InternedString};
use trans_item::{TransItem, InstantiationMode};
DepNode::WorkProduct(self.work_product_id())
}
- pub fn compute_symbol_name_hash(&self,
- scx: &SharedCrateContext,
- symbol_map: &SymbolMap) -> u64 {
+ pub fn compute_symbol_name_hash<'a>(&self,
+ scx: &SharedCrateContext<'a, 'tcx>,
+ symbol_cache: &SymbolCache<'a, 'tcx>)
+ -> u64 {
let mut state = IchHasher::new();
let exported_symbols = scx.exported_symbols();
- let all_items = self.items_in_deterministic_order(scx.tcx(), symbol_map);
+ let all_items = self.items_in_deterministic_order(scx.tcx(), symbol_cache);
for (item, _) in all_items {
- let symbol_name = symbol_map.get(item).unwrap();
+ let symbol_name = symbol_cache.get(item);
symbol_name.len().hash(&mut state);
symbol_name.hash(&mut state);
let exported = match item {
state.finish().to_smaller_hash()
}
- pub fn items_in_deterministic_order(&self,
- tcx: TyCtxt,
- symbol_map: &SymbolMap)
- -> Vec<(TransItem<'tcx>, llvm::Linkage)> {
+ pub fn items_in_deterministic_order<'a>(&self,
+ tcx: TyCtxt,
+ symbol_cache: &SymbolCache<'a, 'tcx>)
+ -> Vec<(TransItem<'tcx>, llvm::Linkage)> {
let mut items: Vec<(TransItem<'tcx>, llvm::Linkage)> =
self.items.iter().map(|(item, linkage)| (*item, *linkage)).collect();
match (node_id1, node_id2) {
(None, None) => {
- let symbol_name1 = symbol_map.get(trans_item1).unwrap();
- let symbol_name2 = symbol_map.get(trans_item2).unwrap();
- symbol_name1.cmp(symbol_name2)
+ let symbol_name1 = symbol_cache.get(trans_item1);
+ let symbol_name2 = symbol_cache.get(trans_item2);
+ symbol_name1.cmp(&symbol_name2)
}
// In the following two cases we can avoid looking up the symbol
(None, Some(_)) => Ordering::Less,
return ordering;
}
- let symbol_name1 = symbol_map.get(trans_item1).unwrap();
- let symbol_name2 = symbol_map.get(trans_item2).unwrap();
- symbol_name1.cmp(symbol_name2)
+ let symbol_name1 = symbol_cache.get(trans_item1);
+ let symbol_name2 = symbol_cache.get(trans_item2);
+ symbol_name1.cmp(&symbol_name2)
}
}
});
let mut initial_partitioning = place_root_translation_items(scx,
trans_items);
- debug_dump(scx, "INITIAL PARTITONING:", initial_partitioning.codegen_units.iter());
+ debug_dump(tcx, "INITIAL PARTITONING:", initial_partitioning.codegen_units.iter());
// If the partitioning should produce a fixed count of codegen units, merge
// until that count is reached.
if let PartitioningStrategy::FixedUnitCount(count) = strategy {
merge_codegen_units(&mut initial_partitioning, count, &tcx.crate_name.as_str());
- debug_dump(scx, "POST MERGING:", initial_partitioning.codegen_units.iter());
+ debug_dump(tcx, "POST MERGING:", initial_partitioning.codegen_units.iter());
}
// In the next step, we use the inlining map to determine which addtional
let post_inlining = place_inlined_translation_items(initial_partitioning,
inlining_map);
- debug_dump(scx, "POST INLINING:", post_inlining.0.iter());
+ debug_dump(tcx, "POST INLINING:", post_inlining.0.iter());
// Finally, sort by codegen unit name, so that we get deterministic results
let mut result = post_inlining.0;
Symbol::intern(&format!("{}{}{}", crate_name, NUMBERED_CODEGEN_UNIT_MARKER, index)).as_str()
}
-fn debug_dump<'a, 'b, 'tcx, I>(scx: &SharedCrateContext<'a, 'tcx>,
+fn debug_dump<'a, 'b, 'tcx, I>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
label: &str,
cgus: I)
where I: Iterator<Item=&'b CodegenUnit<'tcx>>,
{
if cfg!(debug_assertions) {
debug!("{}", label);
+ let symbol_cache = SymbolCache::new(tcx);
for cgu in cgus {
- let symbol_map = SymbolMap::build(scx, cgu.items
- .iter()
- .map(|(&trans_item, _)| trans_item));
debug!("CodegenUnit {}:", cgu.name);
for (trans_item, linkage) in &cgu.items {
- let symbol_name = symbol_map.get_or_compute(scx, *trans_item);
+ let symbol_name = symbol_cache.get(*trans_item);
let symbol_hash_start = symbol_name.rfind('h');
let symbol_hash = symbol_hash_start.map(|i| &symbol_name[i ..])
.unwrap_or("<no hash>");
debug!(" - {} [{:?}] [{}]",
- trans_item.to_string(scx.tcx()),
+ trans_item.to_string(tcx),
linkage,
symbol_hash);
}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use rustc::ty::TyCtxt;
+use std::cell::RefCell;
+use syntax_pos::symbol::{InternedString, Symbol};
+use trans_item::TransItem;
+use util::nodemap::FxHashMap;
+
+// In the SymbolCache we collect the symbol names of translation items
+// and cache them for later reference. This is just a performance
+// optimization and the cache is populated lazilly; symbol names of
+// translation items are deterministic and fully defined by the item.
+// Thus they can always be recomputed if needed.
+
+pub struct SymbolCache<'a, 'tcx: 'a> {
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ index: RefCell<FxHashMap<TransItem<'tcx>, Symbol>>,
+}
+
+impl<'a, 'tcx> SymbolCache<'a, 'tcx> {
+ pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Self {
+ SymbolCache {
+ tcx: tcx,
+ index: RefCell::new(FxHashMap())
+ }
+ }
+
+ pub fn get(&self, trans_item: TransItem<'tcx>) -> InternedString {
+ let mut index = self.index.borrow_mut();
+ index.entry(trans_item)
+ .or_insert_with(|| Symbol::intern(&trans_item.compute_symbol_name(self.tcx)))
+ .as_str()
+ }
+}
where I: Iterator<Item=TransItem<'tcx>>
{
// Check for duplicate symbol names
+ let tcx = scx.tcx();
let mut symbols: Vec<_> = trans_items.map(|trans_item| {
- (trans_item, trans_item.compute_symbol_name(scx))
+ (trans_item, trans_item.compute_symbol_name(tcx))
}).collect();
(&mut symbols[..]).sort_by(|&(_, ref sym1), &(_, ref sym2)|{
if let Some(sym) = self.get(trans_item) {
Cow::from(sym)
} else {
- Cow::from(trans_item.compute_symbol_name(scx))
+ Cow::from(trans_item.compute_symbol_name(scx.tcx()))
}
}
}
use back::symbol_names;
use rustc::hir;
use rustc::hir::intravisit::{self, Visitor, NestedVisitorMap};
+use rustc::ty::TyCtxt;
use syntax::ast;
-use common::SharedCrateContext;
use monomorphize::Instance;
const SYMBOL_NAME: &'static str = "rustc_symbol_name";
const ITEM_PATH: &'static str = "rustc_item_path";
-pub fn report_symbol_names(scx: &SharedCrateContext) {
+pub fn report_symbol_names<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
// if the `rustc_attrs` feature is not enabled, then the
// attributes we are interested in cannot be present anyway, so
// skip the walk.
- let tcx = scx.tcx();
if !tcx.sess.features.borrow().rustc_attrs {
return;
}
let _ignore = tcx.dep_graph.in_ignore();
- let mut visitor = SymbolNamesTest { scx: scx };
+ let mut visitor = SymbolNamesTest { tcx: tcx };
// FIXME(#37712) could use ItemLikeVisitor if trait items were item-like
tcx.hir.krate().visit_all_item_likes(&mut visitor.as_deep_visitor());
}
struct SymbolNamesTest<'a, 'tcx:'a> {
- scx: &'a SharedCrateContext<'a, 'tcx>,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
}
impl<'a, 'tcx> SymbolNamesTest<'a, 'tcx> {
fn process_attrs(&mut self,
node_id: ast::NodeId) {
- let tcx = self.scx.tcx();
+ let tcx = self.tcx;
let def_id = tcx.hir.local_def_id(node_id);
for attr in tcx.get_attrs(def_id).iter() {
if attr.check_name(SYMBOL_NAME) {
// for now, can only use on monomorphic names
let instance = Instance::mono(tcx, def_id);
- let name = symbol_names::symbol_name(instance, self.scx);
+ let name = symbol_names::symbol_name(instance, self.tcx);
tcx.sess.span_err(attr.span, &format!("symbol-name({})", name));
} else if attr.check_name(ITEM_PATH) {
let path = tcx.item_path_str(def_id);
use attributes;
use base;
use consts;
-use context::{CrateContext, SharedCrateContext};
+use context::CrateContext;
use common;
use declare;
use llvm;
self.to_raw_string(),
ccx.codegen_unit().name());
- let symbol_name = ccx.symbol_map()
- .get_or_compute(ccx.shared(), *self);
+ let symbol_name = ccx.symbol_cache().get(*self);
debug!("symbol {}", &symbol_name);
ccx.instances().borrow_mut().insert(instance, lldecl);
}
- pub fn compute_symbol_name(&self,
- scx: &SharedCrateContext<'a, 'tcx>) -> String {
+ pub fn compute_symbol_name(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> String {
match *self {
- TransItem::Fn(instance) => symbol_names::symbol_name(instance, scx),
+ TransItem::Fn(instance) => symbol_names::symbol_name(instance, tcx),
TransItem::Static(node_id) => {
- let def_id = scx.tcx().hir.local_def_id(node_id);
- symbol_names::symbol_name(Instance::mono(scx.tcx(), def_id), scx)
+ let def_id = tcx.hir.local_def_id(node_id);
+ symbol_names::symbol_name(Instance::mono(tcx, def_id), tcx)
}
TransItem::GlobalAsm(node_id) => {
- let def_id = scx.tcx().hir.local_def_id(node_id);
+ let def_id = tcx.hir.local_def_id(node_id);
format!("global_asm_{:?}", def_id)
}
}
pub fn size_of(&self, ty: Ty<'tcx>) -> machine::llsize {
self.layout_of(ty).size(self).bytes() as machine::llsize
}
+
+ pub fn over_align_of(&self, t: Ty<'tcx>)
+ -> Option<machine::llalign> {
+ let layout = self.layout_of(t);
+ if let Some(align) = layout.over_align(&self.tcx().data_layout) {
+ Some(align as machine::llalign)
+ } else {
+ None
+ }
+ }
}
fn llvm_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> String {
let tcx = self.tcx();
let r = match tcx.named_region_map.defs.get(&lifetime.id) {
Some(&rl::Region::Static) => {
- tcx.mk_region(ty::ReStatic)
+ tcx.types.re_static
}
Some(&rl::Region::LateBound(debruijn, id)) => {
.emit();
return Substs::for_item(tcx, def_id, |_, _| {
- tcx.mk_region(ty::ReStatic)
+ tcx.types.re_static
}, |_, _| {
tcx.types.err
});
if let Some(lifetime) = lifetimes.get(i) {
self.ast_region_to_region(lifetime, Some(def))
} else {
- tcx.mk_region(ty::ReStatic)
+ tcx.types.re_static
}
}, |def, substs| {
let i = def.index as usize;
span_err!(tcx.sess, span, E0228,
"the lifetime bound for this object type cannot be deduced \
from context; please supply an explicit bound");
- tcx.mk_region(ty::ReStatic)
+ tcx.types.re_static
})
}
})
// If any of the derived region bounds are 'static, that is always
// the best choice.
if derived_region_bounds.iter().any(|&r| ty::ReStatic == *r) {
- return Some(tcx.mk_region(ty::ReStatic));
+ return Some(tcx.types.re_static);
}
// Determine whether there is exactly one unique region in the set
let expected_ty = self.structurally_resolved_type(pat.span, expected);
if let ty::TyRef(_, mt) = expected_ty.sty {
if let ty::TySlice(_) = mt.ty.sty {
- pat_ty = tcx.mk_imm_ref(tcx.mk_region(ty::ReStatic),
+ pat_ty = tcx.mk_imm_ref(tcx.types.re_static,
tcx.mk_slice(tcx.types.u8))
}
}
use syntax::abi;
use syntax::feature_gate;
use syntax::ptr::P;
+use syntax_pos;
use std::collections::VecDeque;
use std::ops::Deref;
Ok(target)
}
+ /// Same as `try_coerce()`, but without side-effects.
+ pub fn can_coerce(&self, expr_ty: Ty<'tcx>, target: Ty<'tcx>) -> bool {
+ let source = self.resolve_type_vars_with_obligations(expr_ty);
+ debug!("coercion::can({:?} -> {:?})", source, target);
+
+ let cause = self.cause(syntax_pos::DUMMY_SP, ObligationCauseCode::ExprAssignable);
+ let coerce = Coerce::new(self, cause);
+ self.probe(|_| coerce.coerce::<hir::Expr>(&[], source, target)).is_ok()
+ }
+
/// Given some expressions, their known unified type and another expression,
/// tries to unify the types, potentially inserting coercions on any of the
/// provided expressions and returns their LUB (aka "common supertype").
use check::FnCtxt;
-use rustc::ty::Ty;
-use rustc::infer::{InferOk};
+use rustc::infer::InferOk;
use rustc::traits::ObligationCause;
use syntax::ast;
use syntax_pos::{self, Span};
use rustc::hir;
use rustc::hir::def::Def;
-use rustc::ty::{self, AssociatedItem};
+use rustc::ty::{self, Ty, AssociatedItem};
use errors::DiagnosticBuilder;
use super::method::probe;
if let Err(e) = self.try_coerce(expr, checked_ty, self.diverges.get(), expected) {
let cause = self.misc(expr.span);
let expr_ty = self.resolve_type_vars_with_obligations(checked_ty);
- let mode = probe::Mode::MethodCall;
- let suggestions = self.probe_for_return_type(syntax_pos::DUMMY_SP,
- mode,
- expected,
- checked_ty,
- ast::DUMMY_NODE_ID);
let mut err = self.report_mismatched_types(&cause, expected, expr_ty, e);
- if suggestions.len() > 0 {
- err.help(&format!("here are some functions which \
- might fulfill your needs:\n{}",
- self.get_best_match(&suggestions).join("\n")));
- };
+ if let Some(suggestion) = self.check_ref(expr,
+ checked_ty,
+ expected) {
+ err.help(&suggestion);
+ } else {
+ let mode = probe::Mode::MethodCall;
+ let suggestions = self.probe_for_return_type(syntax_pos::DUMMY_SP,
+ mode,
+ expected,
+ checked_ty,
+ ast::DUMMY_NODE_ID);
+ if suggestions.len() > 0 {
+ err.help(&format!("here are some functions which \
+ might fulfill your needs:\n{}",
+ self.get_best_match(&suggestions).join("\n")));
+ }
+ }
err.emit();
}
}
_ => false,
}
}
+
+ /// This function is used to determine potential "simple" improvements or users' errors and
+ /// provide them useful help. For example:
+ ///
+ /// ```
+ /// fn some_fn(s: &str) {}
+ ///
+ /// let x = "hey!".to_owned();
+ /// some_fn(x); // error
+ /// ```
+ ///
+ /// No need to find every potential function which could make a coercion to transform a
+ /// `String` into a `&str` since a `&` would do the trick!
+ ///
+ /// In addition of this check, it also checks between references mutability state. If the
+ /// expected is mutable but the provided isn't, maybe we could just say "Hey, try with
+ /// `&mut`!".
+ fn check_ref(&self,
+ expr: &hir::Expr,
+ checked_ty: Ty<'tcx>,
+ expected: Ty<'tcx>)
+ -> Option<String> {
+ match (&expected.sty, &checked_ty.sty) {
+ (&ty::TyRef(_, _), &ty::TyRef(_, _)) => None,
+ (&ty::TyRef(_, mutability), _) => {
+ // Check if it can work when put into a ref. For example:
+ //
+ // ```
+ // fn bar(x: &mut i32) {}
+ //
+ // let x = 0u32;
+ // bar(&x); // error, expected &mut
+ // ```
+ let ref_ty = match mutability.mutbl {
+ hir::Mutability::MutMutable => self.tcx.mk_mut_ref(
+ self.tcx.mk_region(ty::ReStatic),
+ checked_ty),
+ hir::Mutability::MutImmutable => self.tcx.mk_imm_ref(
+ self.tcx.mk_region(ty::ReStatic),
+ checked_ty),
+ };
+ if self.can_coerce(ref_ty, expected) {
+ if let Ok(src) = self.tcx.sess.codemap().span_to_snippet(expr.span) {
+ return Some(format!("try with `{}{}`",
+ match mutability.mutbl {
+ hir::Mutability::MutMutable => "&mut ",
+ hir::Mutability::MutImmutable => "&",
+ },
+ &src));
+ }
+ }
+ None
+ }
+ _ => None,
+ }
+ }
}
let impl_self_orig = self_substs.region_for_def(def);
let r = if let ty::Region::ReEarlyBound(ref ebr) = *impl_self_orig {
if impl_bindings.region_param(ebr).pure_wrt_drop {
- tcx.mk_region(ty::ReStatic)
+ tcx.types.re_static
} else {
r_orig
}
let def_id = tcx.hir.local_def_id(it.id);
let substs = Substs::for_item(tcx, def_id,
- |_, _| tcx.mk_region(ty::ReErased),
+ |_, _| tcx.types.re_erased,
|def, _| tcx.mk_param_from_def(def));
let fty = tcx.mk_fn_def(def_id, substs, ty::Binder(tcx.mk_fn_sig(
// In general, during probing we erase regions. See
// `impl_self_ty()` for an explanation.
- let region = tcx.mk_region(ty::ReErased);
+ let region = tcx.types.re_erased;
// Search through mutabilities in order to find one where pick works:
[hir::MutImmutable, hir::MutMutable]
} else {
// In general, during probe we erase regions. See
// `impl_self_ty()` for an explanation.
- self.tcx.mk_region(ty::ReErased)
+ self.tcx.types.re_erased
}
}, |def, cur_substs| {
let i = def.index as usize;
let substs = Substs::for_item(self.tcx,
impl_def_id,
- |_, _| self.tcx.mk_region(ty::ReErased),
+ |_, _| self.tcx.types.re_erased,
|_, _| self.next_ty_var(
TypeVariableOrigin::SubstitutionPlaceholder(
self.tcx.def_span(impl_def_id))));
if def.repr.simd() {
check_simd(tcx, span, def_id);
}
+
+ // if struct is packed and not aligned, check fields for alignment.
+ // Checks for combining packed and align attrs on single struct are done elsewhere.
+ if tcx.lookup_adt_def(def_id).repr.packed() && tcx.lookup_adt_def(def_id).repr.align == 0 {
+ check_packed(tcx, span, def_id);
+ }
}
fn check_union<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
}
}
+fn check_packed<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, sp: Span, def_id: DefId) {
+ if check_packed_inner(tcx, def_id, &mut Vec::new()) {
+ struct_span_err!(tcx.sess, sp, E0588,
+ "packed struct cannot transitively contain a `[repr(align)]` struct").emit();
+ }
+}
+
+fn check_packed_inner<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ def_id: DefId,
+ stack: &mut Vec<DefId>) -> bool {
+ let t = tcx.item_type(def_id);
+ if stack.contains(&def_id) {
+ debug!("check_packed_inner: {:?} is recursive", t);
+ return false;
+ }
+ match t.sty {
+ ty::TyAdt(def, substs) if def.is_struct() => {
+ if tcx.lookup_adt_def(def.did).repr.align > 0 {
+ return true;
+ }
+ // push struct def_id before checking fields
+ stack.push(def_id);
+ for field in &def.struct_variant().fields {
+ let f = field.ty(tcx, substs);
+ match f.sty {
+ ty::TyAdt(def, _) => {
+ if check_packed_inner(tcx, def.did, stack) {
+ return true;
+ }
+ }
+ _ => ()
+ }
+ }
+ // only need to pop if not early out
+ stack.pop();
+ }
+ _ => ()
+ }
+ false
+}
+
#[allow(trivial_numeric_casts)]
pub fn check_enum<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
sp: Span,
//
// FIXME(#27579) all uses of this should be migrated to register_wf_obligation eventually
let cause = traits::ObligationCause::new(span, self.body_id, code);
- self.register_region_obligation(ty, self.tcx.mk_region(ty::ReEmpty), cause);
+ self.register_region_obligation(ty, self.tcx.types.re_empty, cause);
}
/// Registers obligations that all types appearing in `substs` are well-formed.
match lit.node {
ast::LitKind::Str(..) => tcx.mk_static_str(),
ast::LitKind::ByteStr(ref v) => {
- tcx.mk_imm_ref(tcx.mk_region(ty::ReStatic),
+ tcx.mk_imm_ref(tcx.types.re_static,
tcx.mk_array(tcx.types.u8, v.len()))
}
ast::LitKind::Byte(_) => tcx.types.u8,
let outside_ty = gcx.fold_regions(&inside_ty, &mut false, |r, _| {
match *r {
// 'static is valid everywhere.
- ty::ReStatic |
- ty::ReEmpty => gcx.mk_region(*r),
+ ty::ReStatic => gcx.types.re_static,
+ ty::ReEmpty => gcx.types.re_empty,
// Free regions that come from early-bound regions are valid.
ty::ReFree(ty::FreeRegion {
span_err!(self.tcx().sess, span, E0564,
"only named lifetimes are allowed in `impl Trait`, \
but `{}` was found in the type `{}`", r, inside_ty);
- gcx.mk_region(ty::ReStatic)
+ gcx.types.re_static
}
ty::ReVar(_) |
match self.infcx.fully_resolve(&r) {
Ok(r) => r,
Err(_) => {
- self.tcx.mk_region(ty::ReStatic)
+ self.tcx.types.re_static
}
}
}
trait_def,
adt_def,
impl_trait_ref,
+ impl_polarity,
+ is_foreign_item,
..*providers
};
}
}
}
+fn impl_polarity<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ def_id: DefId)
+ -> hir::ImplPolarity {
+ let node_id = tcx.hir.as_local_node_id(def_id).unwrap();
+ match tcx.hir.expect_item(node_id).node {
+ hir::ItemImpl(_, polarity, ..) => polarity,
+ ref item => bug!("trait_impl_polarity: {:?} not an impl", item)
+ }
+}
+
// Is it marked with ?Sized
fn is_unsized<'gcx: 'tcx, 'tcx>(astconv: &AstConv<'gcx, 'tcx>,
ast_bounds: &[hir::TyParamBound],
let substs = Substs::identity_for_item(tcx, def_id);
tcx.mk_fn_def(def_id, substs, fty)
}
+
+fn is_foreign_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ def_id: DefId)
+ -> bool {
+ match tcx.hir.get_if_local(def_id) {
+ Some(hir_map::NodeForeignItem(..)) => true,
+ Some(_) => false,
+ _ => bug!("is_foreign_item applied to non-local def-id {:?}", def_id)
+ }
+}
// but `{}` was found in the type `{}`
E0567, // auto traits can not have type parameters
E0568, // auto-traits can not have predicates,
+ E0588, // packed struct cannot transitively contain a `[repr(align)]` struct
E0592, // duplicate definitions with name `{}`
}
use std::cell::RefCell;
use std::collections::{HashMap, VecDeque};
use std::default::Default;
+use std::ffi::CString;
use std::fmt::{self, Write};
use std::str;
use syntax::feature_gate::UnstableFeatures;
use html::render::derive_id;
use html::toc::TocBuilder;
use html::highlight;
+use html::escape::Escape;
use test;
use pulldown_cmark::{html, Event, Tag, Parser};
use pulldown_cmark::{Options, OPTION_ENABLE_FOOTNOTES, OPTION_ENABLE_TABLES};
+#[derive(PartialEq, Debug, Clone, Copy)]
+pub enum RenderType {
+ Hoedown,
+ Pulldown,
+}
+
/// A unit struct which has the `fmt::Display` trait implemented. When
/// formatted, this struct will emit the HTML corresponding to the rendered
/// version of the contained markdown string.
// The second parameter is whether we need a shorter version or not.
-pub struct Markdown<'a>(pub &'a str);
+pub struct Markdown<'a>(pub &'a str, pub RenderType);
/// A unit struct like `Markdown`, that renders the markdown with a
/// table of contents.
-pub struct MarkdownWithToc<'a>(pub &'a str);
+pub struct MarkdownWithToc<'a>(pub &'a str, pub RenderType);
/// A unit struct like `Markdown`, that renders the markdown escaping HTML tags.
-pub struct MarkdownHtml<'a>(pub &'a str);
+pub struct MarkdownHtml<'a>(pub &'a str, pub RenderType);
/// A unit struct like `Markdown`, that renders only the first paragraph.
pub struct MarkdownSummaryLine<'a>(pub &'a str);
}
}
+/// Returns a new string with all consecutive whitespace collapsed into
+/// single spaces.
+///
+/// Any leading or trailing whitespace will be trimmed.
+fn collapse_whitespace(s: &str) -> String {
+ s.split_whitespace().collect::<Vec<_>>().join(" ")
+}
+
/// Convert chars from a title for an id.
///
/// "Hello, world!" -> "hello-world"
const HOEDOWN_EXT_STRIKETHROUGH: libc::c_uint = 1 << 4;
const HOEDOWN_EXT_SUPERSCRIPT: libc::c_uint = 1 << 8;
const HOEDOWN_EXT_FOOTNOTES: libc::c_uint = 1 << 2;
+const HOEDOWN_HTML_ESCAPE: libc::c_uint = 1 << 1;
const HOEDOWN_EXTENSIONS: libc::c_uint =
HOEDOWN_EXT_NO_INTRA_EMPHASIS | HOEDOWN_EXT_TABLES |
unit: libc::size_t,
}
+struct MyOpaque {
+ dfltblk: extern "C" fn(*mut hoedown_buffer, *const hoedown_buffer,
+ *const hoedown_buffer, *const hoedown_renderer_data,
+ libc::size_t),
+ toc_builder: Option<TocBuilder>,
+}
+
extern {
fn hoedown_html_renderer_new(render_flags: libc::c_uint,
nesting_level: libc::c_int)
fn hoedown_document_free(md: *mut hoedown_document);
fn hoedown_buffer_new(unit: libc::size_t) -> *mut hoedown_buffer;
+ fn hoedown_buffer_puts(b: *mut hoedown_buffer, c: *const libc::c_char);
fn hoedown_buffer_free(b: *mut hoedown_buffer);
}
}
}
+pub fn render(w: &mut fmt::Formatter,
+ s: &str,
+ print_toc: bool,
+ html_flags: libc::c_uint) -> fmt::Result {
+ extern fn block(ob: *mut hoedown_buffer, orig_text: *const hoedown_buffer,
+ lang: *const hoedown_buffer, data: *const hoedown_renderer_data,
+ line: libc::size_t) {
+ unsafe {
+ if orig_text.is_null() { return }
+
+ let opaque = (*data).opaque as *mut hoedown_html_renderer_state;
+ let my_opaque: &MyOpaque = &*((*opaque).opaque as *const MyOpaque);
+ let text = (*orig_text).as_bytes();
+ let origtext = str::from_utf8(text).unwrap();
+ let origtext = origtext.trim_left();
+ debug!("docblock: ==============\n{:?}\n=======", text);
+ let rendered = if lang.is_null() || origtext.is_empty() {
+ false
+ } else {
+ let rlang = (*lang).as_bytes();
+ let rlang = str::from_utf8(rlang).unwrap();
+ if !LangString::parse(rlang).rust {
+ (my_opaque.dfltblk)(ob, orig_text, lang,
+ opaque as *const hoedown_renderer_data,
+ line);
+ true
+ } else {
+ false
+ }
+ };
+
+ let lines = origtext.lines().filter(|l| {
+ stripped_filtered_line(*l).is_none()
+ });
+ let text = lines.collect::<Vec<&str>>().join("\n");
+ if rendered { return }
+ PLAYGROUND.with(|play| {
+ // insert newline to clearly separate it from the
+ // previous block so we can shorten the html output
+ let mut s = String::from("\n");
+ let playground_button = play.borrow().as_ref().and_then(|&(ref krate, ref url)| {
+ if url.is_empty() {
+ return None;
+ }
+ let test = origtext.lines().map(|l| {
+ stripped_filtered_line(l).unwrap_or(l)
+ }).collect::<Vec<&str>>().join("\n");
+ let krate = krate.as_ref().map(|s| &**s);
+ let test = test::maketest(&test, krate, false,
+ &Default::default());
+ let channel = if test.contains("#![feature(") {
+ "&version=nightly"
+ } else {
+ ""
+ };
+ // These characters don't need to be escaped in a URI.
+ // FIXME: use a library function for percent encoding.
+ fn dont_escape(c: u8) -> bool {
+ (b'a' <= c && c <= b'z') ||
+ (b'A' <= c && c <= b'Z') ||
+ (b'0' <= c && c <= b'9') ||
+ c == b'-' || c == b'_' || c == b'.' ||
+ c == b'~' || c == b'!' || c == b'\'' ||
+ c == b'(' || c == b')' || c == b'*'
+ }
+ let mut test_escaped = String::new();
+ for b in test.bytes() {
+ if dont_escape(b) {
+ test_escaped.push(char::from(b));
+ } else {
+ write!(test_escaped, "%{:02X}", b).unwrap();
+ }
+ }
+ Some(format!(
+ r#"<a class="test-arrow" target="_blank" href="{}?code={}{}">Run</a>"#,
+ url, test_escaped, channel
+ ))
+ });
+ s.push_str(&highlight::render_with_highlighting(
+ &text,
+ Some("rust-example-rendered"),
+ None,
+ playground_button.as_ref().map(String::as_str)));
+ let output = CString::new(s).unwrap();
+ hoedown_buffer_puts(ob, output.as_ptr());
+ })
+ }
+ }
+
+ extern fn header(ob: *mut hoedown_buffer, text: *const hoedown_buffer,
+ level: libc::c_int, data: *const hoedown_renderer_data,
+ _: libc::size_t) {
+ // hoedown does this, we may as well too
+ unsafe { hoedown_buffer_puts(ob, "\n\0".as_ptr() as *const _); }
+
+ // Extract the text provided
+ let s = if text.is_null() {
+ "".to_owned()
+ } else {
+ let s = unsafe { (*text).as_bytes() };
+ str::from_utf8(&s).unwrap().to_owned()
+ };
+
+ // Discard '<em>', '<code>' tags and some escaped characters,
+ // transform the contents of the header into a hyphenated string
+ // without non-alphanumeric characters other than '-' and '_'.
+ //
+ // This is a terrible hack working around how hoedown gives us rendered
+ // html for text rather than the raw text.
+ let mut id = s.clone();
+ let repl_sub = vec!["<em>", "</em>", "<code>", "</code>",
+ "<strong>", "</strong>",
+ "<", ">", "&", "'", """];
+ for sub in repl_sub {
+ id = id.replace(sub, "");
+ }
+ let id = id.chars().filter_map(|c| {
+ if c.is_alphanumeric() || c == '-' || c == '_' {
+ if c.is_ascii() {
+ Some(c.to_ascii_lowercase())
+ } else {
+ Some(c)
+ }
+ } else if c.is_whitespace() && c.is_ascii() {
+ Some('-')
+ } else {
+ None
+ }
+ }).collect::<String>();
+
+ let opaque = unsafe { (*data).opaque as *mut hoedown_html_renderer_state };
+ let opaque = unsafe { &mut *((*opaque).opaque as *mut MyOpaque) };
+
+ let id = derive_id(id);
+
+ let sec = opaque.toc_builder.as_mut().map_or("".to_owned(), |builder| {
+ format!("{} ", builder.push(level as u32, s.clone(), id.clone()))
+ });
+
+ // Render the HTML
+ let text = format!("<h{lvl} id='{id}' class='section-header'>\
+ <a href='#{id}'>{sec}{}</a></h{lvl}>",
+ s, lvl = level, id = id, sec = sec);
+
+ let text = CString::new(text).unwrap();
+ unsafe { hoedown_buffer_puts(ob, text.as_ptr()) }
+ }
+
+ extern fn codespan(
+ ob: *mut hoedown_buffer,
+ text: *const hoedown_buffer,
+ _: *const hoedown_renderer_data,
+ _: libc::size_t
+ ) -> libc::c_int {
+ let content = if text.is_null() {
+ "".to_owned()
+ } else {
+ let bytes = unsafe { (*text).as_bytes() };
+ let s = str::from_utf8(bytes).unwrap();
+ collapse_whitespace(s)
+ };
+
+ let content = format!("<code>{}</code>", Escape(&content));
+ let element = CString::new(content).unwrap();
+ unsafe { hoedown_buffer_puts(ob, element.as_ptr()); }
+ // Return anything except 0, which would mean "also print the code span verbatim".
+ 1
+ }
+
+ unsafe {
+ let ob = hoedown_buffer_new(DEF_OUNIT);
+ let renderer = hoedown_html_renderer_new(html_flags, 0);
+ let mut opaque = MyOpaque {
+ dfltblk: (*renderer).blockcode.unwrap(),
+ toc_builder: if print_toc {Some(TocBuilder::new())} else {None}
+ };
+ (*((*renderer).opaque as *mut hoedown_html_renderer_state)).opaque
+ = &mut opaque as *mut _ as *mut libc::c_void;
+ (*renderer).blockcode = Some(block);
+ (*renderer).header = Some(header);
+ (*renderer).codespan = Some(codespan);
+
+ let document = hoedown_document_new(renderer, HOEDOWN_EXTENSIONS, 16);
+ hoedown_document_render(document, ob, s.as_ptr(),
+ s.len() as libc::size_t);
+ hoedown_document_free(document);
+
+ hoedown_html_renderer_free(renderer);
+
+ let mut ret = opaque.toc_builder.map_or(Ok(()), |builder| {
+ write!(w, "<nav id=\"TOC\">{}</nav>", builder.into_toc())
+ });
+
+ if ret.is_ok() {
+ let buf = (*ob).as_bytes();
+ ret = w.write_str(str::from_utf8(buf).unwrap());
+ }
+ hoedown_buffer_free(ob);
+ ret
+ }
+}
+
pub fn old_find_testable_code(doc: &str, tests: &mut ::test::Collector, position: Span) {
extern fn block(_ob: *mut hoedown_buffer,
text: *const hoedown_buffer,
LangString::parse(s)
};
if !block_info.rust { return }
+ let text = (*text).as_bytes();
let opaque = (*data).opaque as *mut hoedown_html_renderer_state;
let tests = &mut *((*opaque).opaque as *mut ::test::Collector);
- let line = tests.get_line() + line;
+ let text = str::from_utf8(text).unwrap();
+ let lines = text.lines().map(|l| {
+ stripped_filtered_line(l).unwrap_or(l)
+ });
+ let text = lines.collect::<Vec<&str>>().join("\n");
let filename = tests.get_filename();
- tests.add_old_test(line, filename);
+
+ if tests.render_type == RenderType::Hoedown {
+ let line = tests.get_line() + line;
+ tests.add_test(text.to_owned(),
+ block_info.should_panic, block_info.no_run,
+ block_info.ignore, block_info.test_harness,
+ block_info.compile_fail, block_info.error_codes,
+ line, filename);
+ } else {
+ tests.add_old_test(text, filename);
+ }
}
}
}
tests.set_position(position);
-
unsafe {
let ob = hoedown_buffer_new(DEF_OUNIT);
let renderer = hoedown_html_renderer_new(0, 0);
impl<'a> fmt::Display for Markdown<'a> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
- let Markdown(md) = *self;
+ let Markdown(md, render_type) = *self;
+
// This is actually common enough to special-case
if md.is_empty() { return Ok(()) }
+ if render_type == RenderType::Hoedown {
+ render(fmt, md, false, 0)
+ } else {
+ let mut opts = Options::empty();
+ opts.insert(OPTION_ENABLE_TABLES);
+ opts.insert(OPTION_ENABLE_FOOTNOTES);
- let mut opts = Options::empty();
- opts.insert(OPTION_ENABLE_TABLES);
- opts.insert(OPTION_ENABLE_FOOTNOTES);
-
- let p = Parser::new_ext(md, opts);
+ let p = Parser::new_ext(md, opts);
- let mut s = String::with_capacity(md.len() * 3 / 2);
+ let mut s = String::with_capacity(md.len() * 3 / 2);
- html::push_html(&mut s,
- Footnotes::new(CodeBlocks::new(HeadingLinks::new(p, None))));
+ html::push_html(&mut s,
+ Footnotes::new(CodeBlocks::new(HeadingLinks::new(p, None))));
- fmt.write_str(&s)
+ fmt.write_str(&s)
+ }
}
}
impl<'a> fmt::Display for MarkdownWithToc<'a> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
- let MarkdownWithToc(md) = *self;
+ let MarkdownWithToc(md, render_type) = *self;
- let mut opts = Options::empty();
- opts.insert(OPTION_ENABLE_TABLES);
- opts.insert(OPTION_ENABLE_FOOTNOTES);
+ if render_type == RenderType::Hoedown {
+ render(fmt, md, true, 0)
+ } else {
+ let mut opts = Options::empty();
+ opts.insert(OPTION_ENABLE_TABLES);
+ opts.insert(OPTION_ENABLE_FOOTNOTES);
- let p = Parser::new_ext(md, opts);
+ let p = Parser::new_ext(md, opts);
- let mut s = String::with_capacity(md.len() * 3 / 2);
+ let mut s = String::with_capacity(md.len() * 3 / 2);
- let mut toc = TocBuilder::new();
+ let mut toc = TocBuilder::new();
- html::push_html(&mut s,
- Footnotes::new(CodeBlocks::new(HeadingLinks::new(p, Some(&mut toc)))));
+ html::push_html(&mut s,
+ Footnotes::new(CodeBlocks::new(HeadingLinks::new(p, Some(&mut toc)))));
- write!(fmt, "<nav id=\"TOC\">{}</nav>", toc.into_toc())?;
+ write!(fmt, "<nav id=\"TOC\">{}</nav>", toc.into_toc())?;
- fmt.write_str(&s)
+ fmt.write_str(&s)
+ }
}
}
impl<'a> fmt::Display for MarkdownHtml<'a> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
- let MarkdownHtml(md) = *self;
+ let MarkdownHtml(md, render_type) = *self;
+
// This is actually common enough to special-case
if md.is_empty() { return Ok(()) }
+ if render_type == RenderType::Hoedown {
+ render(fmt, md, false, HOEDOWN_HTML_ESCAPE)
+ } else {
+ let mut opts = Options::empty();
+ opts.insert(OPTION_ENABLE_TABLES);
+ opts.insert(OPTION_ENABLE_FOOTNOTES);
- let mut opts = Options::empty();
- opts.insert(OPTION_ENABLE_TABLES);
- opts.insert(OPTION_ENABLE_FOOTNOTES);
-
- let p = Parser::new_ext(md, opts);
+ let p = Parser::new_ext(md, opts);
- // Treat inline HTML as plain text.
- let p = p.map(|event| match event {
- Event::Html(text) | Event::InlineHtml(text) => Event::Text(text),
- _ => event
- });
+ // Treat inline HTML as plain text.
+ let p = p.map(|event| match event {
+ Event::Html(text) | Event::InlineHtml(text) => Event::Text(text),
+ _ => event
+ });
- let mut s = String::with_capacity(md.len() * 3 / 2);
+ let mut s = String::with_capacity(md.len() * 3 / 2);
- html::push_html(&mut s,
- Footnotes::new(CodeBlocks::new(HeadingLinks::new(p, None))));
+ html::push_html(&mut s,
+ Footnotes::new(CodeBlocks::new(HeadingLinks::new(p, None))));
- fmt.write_str(&s)
+ fmt.write_str(&s)
+ }
}
}
mod tests {
use super::{LangString, Markdown, MarkdownHtml};
use super::plain_summary_line;
+ use super::RenderType;
use html::render::reset_ids;
#[test]
#[test]
fn issue_17736() {
let markdown = "# title";
- format!("{}", Markdown(markdown));
+ format!("{}", Markdown(markdown, RenderType::Pulldown));
reset_ids(true);
}
#[test]
fn test_header() {
fn t(input: &str, expect: &str) {
- let output = format!("{}", Markdown(input));
+ let output = format!("{}", Markdown(input, RenderType::Pulldown));
assert_eq!(output, expect, "original: {}", input);
reset_ids(true);
}
#[test]
fn test_header_ids_multiple_blocks() {
fn t(input: &str, expect: &str) {
- let output = format!("{}", Markdown(input));
+ let output = format!("{}", Markdown(input, RenderType::Pulldown));
assert_eq!(output, expect, "original: {}", input);
}
#[test]
fn test_markdown_html_escape() {
fn t(input: &str, expect: &str) {
- let output = format!("{}", MarkdownHtml(input));
+ let output = format!("{}", MarkdownHtml(input, RenderType::Pulldown));
assert_eq!(output, expect, "original: {}", input);
}
use html::format::{VisSpace, Method, UnsafetySpace, MutableSpace};
use html::format::fmt_impl_for_trait_page;
use html::item_type::ItemType;
-use html::markdown::{self, Markdown, MarkdownHtml, MarkdownSummaryLine};
+use html::markdown::{self, Markdown, MarkdownHtml, MarkdownSummaryLine, RenderType};
use html::{highlight, layout};
/// A pair of name and its optional document.
/// publicly reused items to redirect to the right location.
pub render_redirect_pages: bool,
pub shared: Arc<SharedContext>,
+ pub render_type: RenderType,
}
pub struct SharedContext {
dst: PathBuf,
passes: FxHashSet<String>,
css_file_extension: Option<PathBuf>,
- renderinfo: RenderInfo) -> Result<(), Error> {
+ renderinfo: RenderInfo,
+ render_type: RenderType) -> Result<(), Error> {
let src_root = match krate.src.parent() {
Some(p) => p.to_path_buf(),
None => PathBuf::new(),
dst: dst,
render_redirect_pages: false,
shared: Arc::new(scx),
+ render_type: render_type,
};
// Crawl the crate to build various caches used for the output
fn document(w: &mut fmt::Formatter, cx: &Context, item: &clean::Item) -> fmt::Result {
document_stability(w, cx, item)?;
- document_full(w, item)?;
+ document_full(w, item, cx.render_type)?;
Ok(())
}
-fn document_short(w: &mut fmt::Formatter, item: &clean::Item, link: AssocItemLink) -> fmt::Result {
+fn document_short(w: &mut fmt::Formatter, item: &clean::Item, link: AssocItemLink,
+ render_type: RenderType) -> fmt::Result {
if let Some(s) = item.doc_value() {
let markdown = if s.contains('\n') {
format!("{} [Read more]({})",
format!("{}", &plain_summary_line(Some(s)))
};
write!(w, "<div class='docblock'>{}</div>",
- Markdown(&markdown))?;
+ Markdown(&markdown, render_type))?;
}
Ok(())
}
}
}
-fn document_full(w: &mut fmt::Formatter, item: &clean::Item) -> fmt::Result {
+fn document_full(w: &mut fmt::Formatter, item: &clean::Item,
+ render_type: RenderType) -> fmt::Result {
if let Some(s) = get_doc_value(item) {
write!(w, "<div class='docblock'>{}</div>",
- Markdown(&format!("{}{}", md_render_assoc_item(item), s)))?;
+ Markdown(&format!("{}{}", md_render_assoc_item(item), s), render_type))?;
}
Ok(())
}
</tr>",
name = *myitem.name.as_ref().unwrap(),
stab_docs = stab_docs,
- docs = MarkdownSummaryLine(doc_value),
+ docs = if cx.render_type == RenderType::Hoedown {
+ format!("{}",
+ shorter(Some(&Markdown(doc_value,
+ RenderType::Hoedown).to_string())))
+ } else {
+ format!("{}", MarkdownSummaryLine(doc_value))
+ },
class = myitem.type_(),
stab = myitem.stability_class().unwrap_or("".to_string()),
unsafety_flag = unsafety_flag,
} else {
String::new()
};
- let text = format!("Deprecated{}{}", since, MarkdownHtml(&deprecated_reason));
+ let text = format!("Deprecated{}{}",
+ since,
+ MarkdownHtml(&deprecated_reason, cx.render_type));
stability.push(format!("<div class='stab deprecated'>{}</div>", text))
};
let text = format!("<summary><span class=microscope>🔬</span> \
This is a nightly-only experimental API. {}\
</summary>{}",
- unstable_extra, MarkdownHtml(&stab.unstable_reason));
+ unstable_extra,
+ MarkdownHtml(&stab.unstable_reason, cx.render_type));
stability.push(format!("<div class='stab unstable'><details>{}</details></div>",
text));
}
String::new()
};
- let text = format!("Deprecated{}{}", since, MarkdownHtml(¬e));
+ let text = format!("Deprecated{}{}", since, MarkdownHtml(¬e, cx.render_type));
stability.push(format!("<div class='stab deprecated'>{}</div>", text))
}
write!(w, "</span>")?;
write!(w, "</h3>\n")?;
if let Some(ref dox) = i.impl_item.doc_value() {
- write!(w, "<div class='docblock'>{}</div>", Markdown(dox))?;
+ write!(w, "<div class='docblock'>{}</div>", Markdown(dox, cx.render_type))?;
}
}
// because impls can't have a stability.
document_stability(w, cx, it)?;
if get_doc_value(item).is_some() {
- document_full(w, item)?;
+ document_full(w, item, cx.render_type)?;
} else {
// In case the item isn't documented,
// provide short documentation from the trait.
- document_short(w, it, link)?;
+ document_short(w, it, link, cx.render_type)?;
}
}
} else {
}
} else {
document_stability(w, cx, item)?;
- document_short(w, item, link)?;
+ document_short(w, item, link, cx.render_type)?;
}
}
Ok(())
#![feature(staged_api)]
#![feature(test)]
#![feature(unicode)]
+#![feature(vec_remove_item)]
extern crate arena;
extern crate getopts;
use clean::AttributesExt;
+use html::markdown::RenderType;
+
struct Output {
krate: clean::Crate,
renderinfo: html::render::RenderInfo,
"URL to send code snippets to, may be reset by --markdown-playground-url \
or `#![doc(html_playground_url=...)]`",
"URL")),
+ unstable(optflag("", "enable-commonmark", "to enable commonmark doc rendering/testing")),
]
}
let css_file_extension = matches.opt_str("e").map(|s| PathBuf::from(&s));
let cfgs = matches.opt_strs("cfg");
+ let render_type = if matches.opt_present("enable-commonmark") {
+ RenderType::Pulldown
+ } else {
+ RenderType::Hoedown
+ };
+
if let Some(ref p) = css_file_extension {
if !p.is_file() {
writeln!(
match (should_test, markdown_input) {
(true, true) => {
- return markdown::test(input, cfgs, libs, externs, test_args, maybe_sysroot)
+ return markdown::test(input, cfgs, libs, externs, test_args, maybe_sysroot, render_type)
}
(true, false) => {
- return test::run(input, cfgs, libs, externs, test_args, crate_name, maybe_sysroot)
+ return test::run(input, cfgs, libs, externs, test_args, crate_name, maybe_sysroot,
+ render_type)
}
(false, true) => return markdown::render(input,
output.unwrap_or(PathBuf::from("doc")),
&matches, &external_html,
- !matches.opt_present("markdown-no-toc")),
+ !matches.opt_present("markdown-no-toc"),
+ render_type),
(false, false) => {}
}
output.unwrap_or(PathBuf::from("doc")),
passes.into_iter().collect(),
css_file_extension,
- renderinfo)
+ renderinfo,
+ render_type)
.expect("failed to generate documentation");
0
}
use html::escape::Escape;
use html::markdown;
use html::markdown::{Markdown, MarkdownWithToc, find_testable_code, old_find_testable_code};
+use html::markdown::RenderType;
use test::{TestOptions, Collector};
/// Separate any lines at the start of the file that begin with `# ` or `%`.
/// Render `input` (e.g. "foo.md") into an HTML file in `output`
/// (e.g. output = "bar" => "bar/foo.html").
pub fn render(input: &str, mut output: PathBuf, matches: &getopts::Matches,
- external_html: &ExternalHtml, include_toc: bool) -> isize {
+ external_html: &ExternalHtml, include_toc: bool,
+ render_type: RenderType) -> isize {
let input_p = Path::new(input);
output.push(input_p.file_stem().unwrap());
output.set_extension("html");
reset_ids(false);
let rendered = if include_toc {
- format!("{}", MarkdownWithToc(text))
+ format!("{}", MarkdownWithToc(text, render_type))
} else {
- format!("{}", Markdown(text))
+ format!("{}", Markdown(text, render_type))
};
let err = write!(
/// Run any tests/code examples in the markdown file `input`.
pub fn test(input: &str, cfgs: Vec<String>, libs: SearchPaths, externs: Externs,
- mut test_args: Vec<String>, maybe_sysroot: Option<PathBuf>) -> isize {
+ mut test_args: Vec<String>, maybe_sysroot: Option<PathBuf>,
+ render_type: RenderType) -> isize {
let input_str = match load_string(input) {
Ok(s) => s,
Err(LoadStringError::ReadFail) => return 1,
opts.no_crate_inject = true;
let mut collector = Collector::new(input.to_string(), cfgs, libs, externs,
true, opts, maybe_sysroot, None,
- Some(input.to_owned()));
+ Some(input.to_owned()),
+ render_type);
old_find_testable_code(&input_str, &mut collector, DUMMY_SP);
find_testable_code(&input_str, &mut collector, DUMMY_SP);
test_args.insert(0, "rustdoctest".to_string());
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+use std::collections::HashMap;
use std::env;
use std::ffi::OsString;
use std::io::prelude::*;
use errors::emitter::ColorConfig;
use clean::Attributes;
-use html::markdown;
+use html::markdown::{self, RenderType};
#[derive(Clone, Default)]
pub struct TestOptions {
externs: Externs,
mut test_args: Vec<String>,
crate_name: Option<String>,
- maybe_sysroot: Option<PathBuf>)
+ maybe_sysroot: Option<PathBuf>,
+ render_type: RenderType)
-> isize {
let input_path = PathBuf::from(input);
let input = config::Input::File(input_path.clone());
opts,
maybe_sysroot,
Some(codemap),
- None);
+ None,
+ render_type);
{
let dep_graph = DepGraph::new(false);
pub struct Collector {
pub tests: Vec<testing::TestDescAndFn>,
// to be removed when hoedown will be definitely gone
- pub old_tests: Vec<String>,
+ pub old_tests: HashMap<String, Vec<String>>,
names: Vec<String>,
cfgs: Vec<String>,
libs: SearchPaths,
position: Span,
codemap: Option<Rc<CodeMap>>,
filename: Option<String>,
+ // to be removed when hoedown will be removed as well
+ pub render_type: RenderType,
}
impl Collector {
pub fn new(cratename: String, cfgs: Vec<String>, libs: SearchPaths, externs: Externs,
use_headers: bool, opts: TestOptions, maybe_sysroot: Option<PathBuf>,
- codemap: Option<Rc<CodeMap>>, filename: Option<String>) -> Collector {
+ codemap: Option<Rc<CodeMap>>, filename: Option<String>,
+ render_type: RenderType) -> Collector {
Collector {
tests: Vec::new(),
- old_tests: Vec::new(),
+ old_tests: HashMap::new(),
names: Vec::new(),
cfgs: cfgs,
libs: libs,
position: DUMMY_SP,
codemap: codemap,
filename: filename,
+ render_type: render_type,
}
}
}
}
- pub fn add_old_test(&mut self, line: usize, filename: String) {
- let name = self.generate_name(line, &filename);
- self.old_tests.push(name);
+ // to be removed once hoedown is gone
+ fn generate_name_beginning(&self, filename: &str) -> String {
+ if self.use_headers {
+ if let Some(ref header) = self.current_header {
+ format!("{} - {} (line", filename, header)
+ } else {
+ format!("{} - (line", filename)
+ }
+ } else {
+ format!("{} - {} (line", filename, self.names.join("::"))
+ }
+ }
+
+ pub fn add_old_test(&mut self, test: String, filename: String) {
+ let name_beg = self.generate_name_beginning(&filename);
+ let entry = self.old_tests.entry(name_beg)
+ .or_insert(Vec::new());
+ entry.push(test.trim().to_owned());
}
pub fn add_test(&mut self, test: String,
as_test_harness: bool, compile_fail: bool, error_codes: Vec<String>,
line: usize, filename: String) {
let name = self.generate_name(line, &filename);
- if self.old_tests.iter().find(|&x| x == &name).is_none() {
- let _ = writeln!(&mut io::stderr(),
- "WARNING: {} Code block is not currently run as a test, but will in \
- future versions of rustdoc. Please ensure this code block is a \
- runnable test, or use the `ignore` directive.",
- name);
- return
+ // to be removed when hoedown is removed
+ if self.render_type == RenderType::Pulldown {
+ let name_beg = self.generate_name_beginning(&filename);
+ let mut found = false;
+ let test = test.trim().to_owned();
+ if let Some(entry) = self.old_tests.get_mut(&name_beg) {
+ found = entry.remove_item(&test).is_some();
+ }
+ if !found {
+ let _ = writeln!(&mut io::stderr(),
+ "WARNING: {} Code block is not currently run as a test, but will \
+ in future versions of rustdoc. Please ensure this code block is \
+ a runnable test, or use the `ignore` directive.",
+ name);
+ return
+ }
}
let cfgs = self.cfgs.clone();
let libs = self.libs.clone();
attrs.unindent_doc_comments();
if let Some(doc) = attrs.doc_value() {
self.collector.cnt = 0;
- markdown::old_find_testable_code(doc, self.collector,
+ if self.collector.render_type == RenderType::Pulldown {
+ markdown::old_find_testable_code(doc, self.collector,
+ attrs.span.unwrap_or(DUMMY_SP));
+ markdown::find_testable_code(doc, self.collector,
attrs.span.unwrap_or(DUMMY_SP));
- markdown::find_testable_code(doc, self.collector,
- attrs.span.unwrap_or(DUMMY_SP));
+ } else {
+ markdown::old_find_testable_code(doc, self.collector,
+ attrs.span.unwrap_or(DUMMY_SP));
+ }
}
nested(self);
/// An owning iterator over the entries of a `HashMap`.
///
-/// This `struct` is created by the [`into_iter`] method on [`HashMap`]
+/// This `struct` is created by the [`into_iter`] method on [`HashMap`][`HashMap`]
/// (provided by the `IntoIterator` trait). See its documentation for more.
///
/// [`into_iter`]: struct.HashMap.html#method.into_iter
/// An owning iterator over the items of a `HashSet`.
///
-/// This `struct` is created by the [`into_iter`] method on [`HashSet`]
+/// This `struct` is created by the [`into_iter`] method on [`HashSet`][`HashSet`]
/// (provided by the `IntoIterator` trait). See its documentation for more.
///
/// [`HashSet`]: struct.HashSet.html
}
}
+#[cfg(target_arch = "x86_64")]
+mod arch {
+ use os::raw::{c_uint, c_long, c_ulong};
+ use os::unix::raw::{uid_t, gid_t};
+
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type dev_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type mode_t = u32;
+
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type blkcnt_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type blksize_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type ino_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type nlink_t = u32;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type off_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type time_t = i64;
+
+ #[repr(C)]
+ #[derive(Clone)]
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub struct stat {
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_dev: dev_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ino: ino_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_nlink: c_ulong,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mode: c_uint,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_uid: uid_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_gid: gid_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_rdev: dev_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_size: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blksize: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blocks: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime: c_ulong,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime_nsec: c_ulong,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime: c_ulong,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime_nsec: c_ulong,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime: c_ulong,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime_nsec: c_ulong,
+ __unused: [c_long; 3],
+ }
+}
+
//! Cross-platform path manipulation.
//!
-//! This module provides two types, [`PathBuf`] and [`Path`] (akin to [`String`]
+//! This module provides two types, [`PathBuf`] and [`Path`][`Path`] (akin to [`String`]
//! and [`str`]), for working with paths abstractly. These types are thin wrappers
//! around [`OsString`] and [`OsStr`] respectively, meaning that they work directly
//! on strings according to the local platform's path syntax.
/// If [`self.file_name`] was [`None`], this is equivalent to pushing
/// `file_name`.
///
+ /// Otherwise it is equivalent to calling [`pop`] and then pushing
+ /// `file_name`. The new path will be a sibling of the original path.
+ /// (That is, it will have the same parent.)
+ ///
/// [`self.file_name`]: struct.PathBuf.html#method.file_name
/// [`None`]: ../../std/option/enum.Option.html#variant.None
+ /// [`pop`]: struct.PathBuf.html#method.pop
///
/// # Examples
///
})
}
- /// Returns the final component of the `Path`, if it is a normal file.
+ /// Returns the final component of the `Path`, if there is one.
+ ///
+ /// If the path is a normal file, this is the file name. If it's the path of a directory, this
+ /// is the directory name.
///
/// Returns [`None`] If the path terminates in `..`.
///
/// use std::path::Path;
/// use std::ffi::OsStr;
///
- /// assert_eq!(Some(OsStr::new("foo.txt")), Path::new("foo.txt").file_name());
+ /// assert_eq!(Some(OsStr::new("bin")), Path::new("/usr/bin/").file_name());
+ /// assert_eq!(Some(OsStr::new("foo.txt")), Path::new("tmp/foo.txt").file_name());
/// assert_eq!(Some(OsStr::new("foo.txt")), Path::new("foo.txt/.").file_name());
/// assert_eq!(Some(OsStr::new("foo.txt")), Path::new("foo.txt/.//").file_name());
/// assert_eq!(None, Path::new("foo.txt/..").file_name());
+ /// assert_eq!(None, Path::new("/").file_name());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn file_name(&self) -> Option<&OsStr> {
///
/// let path = Path::new("/tmp/foo.txt");
/// assert_eq!(path.with_file_name("bar.txt"), PathBuf::from("/tmp/bar.txt"));
+ ///
+ /// let path = Path::new("/tmp");
+ /// assert_eq!(path.with_file_name("var"), PathBuf::from("/var"));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn with_file_name<S: AsRef<OsStr>>(&self, file_name: S) -> PathBuf {
/// An error returned from the [`recv`] function on a [`Receiver`].
///
/// The [`recv`] operation can only fail if the sending half of a
-/// [`channel`] (or [`sync_channel`]) is disconnected, implying that no further
+/// [`channel`][`channel`] (or [`sync_channel`]) is disconnected, implying that no further
/// messages will ever be received.
///
/// [`recv`]: struct.Receiver.html#method.recv
self.meta_item().and_then(|meta_item| meta_item.value_str())
}
+ /// Returns a name and single literal value tuple of the MetaItem.
+ pub fn name_value_literal(&self) -> Option<(Name, &Lit)> {
+ self.meta_item().and_then(
+ |meta_item| meta_item.meta_item_list().and_then(
+ |meta_item_list| {
+ if meta_item_list.len() == 1 {
+ let nested_item = &meta_item_list[0];
+ if nested_item.is_literal() {
+ Some((meta_item.name(), nested_item.literal().unwrap()))
+ } else {
+ None
+ }
+ }
+ else {
+ None
+ }}))
+ }
+
/// Returns a MetaItem if self is a MetaItem with Kind Word.
pub fn word(&self) -> Option<&MetaItem> {
self.meta_item().and_then(|meta_item| if meta_item.is_word() {
continue
}
+ let mut recognised = false;
if let Some(mi) = item.word() {
let word = &*mi.name().as_str();
let hint = match word {
_ => match int_type_of_word(word) {
Some(ity) => Some(ReprInt(ity)),
None => {
- // Not a word we recognize
- span_err!(diagnostic, item.span, E0552,
- "unrecognized representation hint");
None
}
}
};
if let Some(h) = hint {
+ recognised = true;
acc.push(h);
}
- } else {
- span_err!(diagnostic, item.span, E0553,
- "unrecognized enum representation hint");
+ } else if let Some((name, value)) = item.name_value_literal() {
+ if name == "align" {
+ recognised = true;
+ let mut align_error = None;
+ if let ast::LitKind::Int(align, ast::LitIntType::Unsuffixed) = value.node {
+ if align.is_power_of_two() {
+ // rustc::ty::layout::Align restricts align to <= 32768
+ if align <= 32768 {
+ acc.push(ReprAlign(align as u16));
+ } else {
+ align_error = Some("larger than 32768");
+ }
+ } else {
+ align_error = Some("not a power of two");
+ }
+ } else {
+ align_error = Some("not an unsuffixed integer");
+ }
+ if let Some(align_error) = align_error {
+ span_err!(diagnostic, item.span, E0589,
+ "invalid `repr(align)` attribute: {}", align_error);
+ }
+ }
+ }
+ if !recognised {
+ // Not a word we recognize
+ span_err!(diagnostic, item.span, E0552,
+ "unrecognized representation hint");
}
}
}
ReprExtern,
ReprPacked,
ReprSimd,
+ ReprAlign(u16),
}
#[derive(Eq, Hash, PartialEq, Debug, RustcEncodable, RustcDecodable, Copy, Clone)]
E0550, // multiple deprecated attributes
E0551, // incorrect meta item
E0552, // unrecognized representation hint
- E0553, // unrecognized enum representation hint
E0554, // #[feature] may not be used on the [] release channel
E0555, // malformed feature attribute, expected #![feature(...)]
E0556, // malformed feature, expected just one word
E0557, // feature has been removed
E0584, // file for module `..` found at both .. and ..
+ E0589, // invalid `repr(align)` attribute
}
module.directory.pop();
self.cx.current_expansion.module = Rc::new(module);
+ let orig_mod_span = krate.module.inner;
+
let krate_item = Expansion::Items(SmallVector::one(P(ast::Item {
attrs: krate.attrs,
span: krate.span,
vis: ast::Visibility::Public,
})));
- match self.expand(krate_item).make_items().pop().unwrap().unwrap() {
- ast::Item { attrs, node: ast::ItemKind::Mod(module), .. } => {
+ match self.expand(krate_item).make_items().pop().map(P::unwrap) {
+ Some(ast::Item { attrs, node: ast::ItemKind::Mod(module), .. }) => {
krate.attrs = attrs;
krate.module = module;
},
+ None => {
+ // Resolution failed so we return an empty expansion
+ krate.attrs = vec![];
+ krate.module = ast::Mod {
+ inner: orig_mod_span,
+ items: vec![],
+ };
+ },
_ => unreachable!(),
};
// Allows the `catch {...}` expression
(active, catch_expr, "1.17.0", Some(31436)),
+ // Allows `repr(align(u16))` struct attribute (RFC 1358)
+ (active, repr_align, "1.17.0", Some(33626)),
+
// See rust-lang/rfcs#1414. Allows code like `let x: &'static u32 = &42` to work.
(active, rvalue_static_promotion, "1.15.1", Some(38865)),
and possibly buggy");
}
+ if item.check_name("align") {
+ gate_feature_post!(&self, repr_align, i.span,
+ "the struct `#[repr(align(u16))]` attribute \
+ is experimental");
+ }
}
}
}
enum PrevTokenKind {
DocComment,
Comma,
+ Plus,
Interpolated,
Eof,
Other,
self.prev_token_kind = match self.token {
token::DocComment(..) => PrevTokenKind::DocComment,
token::Comma => PrevTokenKind::Comma,
+ token::BinOp(token::Plus) => PrevTokenKind::Plus,
token::Interpolated(..) => PrevTokenKind::Interpolated,
token::Eof => PrevTokenKind::Eof,
_ => PrevTokenKind::Other,
break;
}
}
+ let trailing_plus = self.prev_token_kind == PrevTokenKind::Plus;
self.expect(&token::CloseDelim(token::Paren))?;
if ts.len() == 1 && !last_comma {
let ty = ts.into_iter().nth(0).unwrap().unwrap();
+ let maybe_bounds = allow_plus && self.token == token::BinOp(token::Plus);
match ty.node {
- // Accept `(Trait1) + Trait2 + 'a` for backward compatibility (#39318).
- TyKind::Path(None, ref path)
- if allow_plus && self.token == token::BinOp(token::Plus) => {
- self.bump(); // `+`
- let pt = PolyTraitRef::new(Vec::new(), path.clone(), lo.to(self.prev_span));
- let mut bounds = vec![TraitTyParamBound(pt, TraitBoundModifier::None)];
- bounds.append(&mut self.parse_ty_param_bounds()?);
- TyKind::TraitObject(bounds)
+ // `(TY_BOUND_NOPAREN) + BOUND + ...`.
+ TyKind::Path(None, ref path) if maybe_bounds => {
+ self.parse_remaining_bounds(Vec::new(), path.clone(), lo, true)?
}
+ TyKind::TraitObject(ref bounds)
+ if maybe_bounds && bounds.len() == 1 && !trailing_plus => {
+ let path = match bounds[0] {
+ TraitTyParamBound(ref pt, ..) => pt.trait_ref.path.clone(),
+ _ => self.bug("unexpected lifetime bound"),
+ };
+ self.parse_remaining_bounds(Vec::new(), path, lo, true)?
+ }
+ // `(TYPE)`
_ => TyKind::Paren(P(ty))
}
} else {
// Just a type path or bound list (trait object type) starting with a trait.
// `Type`
// `Trait1 + Trait2 + 'a`
- if allow_plus && self.eat(&token::BinOp(token::Plus)) {
- let poly_trait = PolyTraitRef::new(Vec::new(), path, lo.to(self.prev_span));
- let mut bounds = vec![TraitTyParamBound(poly_trait, TraitBoundModifier::None)];
- bounds.append(&mut self.parse_ty_param_bounds()?);
- TyKind::TraitObject(bounds)
+ if allow_plus && self.check(&token::BinOp(token::Plus)) {
+ self.parse_remaining_bounds(Vec::new(), path, lo, true)?
} else {
TyKind::Path(None, path)
}
self.parse_ty_bare_fn(lifetime_defs)?
} else {
let path = self.parse_path(PathStyle::Type)?;
- let poly_trait = PolyTraitRef::new(lifetime_defs, path, lo.to(self.prev_span));
- let mut bounds = vec![TraitTyParamBound(poly_trait, TraitBoundModifier::None)];
- if allow_plus && self.eat(&token::BinOp(token::Plus)) {
- bounds.append(&mut self.parse_ty_param_bounds()?)
- }
- TyKind::TraitObject(bounds)
+ let parse_plus = allow_plus && self.check(&token::BinOp(token::Plus));
+ self.parse_remaining_bounds(lifetime_defs, path, lo, parse_plus)?
}
} else if self.eat_keyword(keywords::Impl) {
// FIXME: figure out priority of `+` in `impl Trait1 + Trait2` (#34511).
Ok(P(ty))
}
+ fn parse_remaining_bounds(&mut self, lifetime_defs: Vec<LifetimeDef>, path: ast::Path,
+ lo: Span, parse_plus: bool) -> PResult<'a, TyKind> {
+ let poly_trait_ref = PolyTraitRef::new(lifetime_defs, path, lo.to(self.prev_span));
+ let mut bounds = vec![TraitTyParamBound(poly_trait_ref, TraitBoundModifier::None)];
+ if parse_plus {
+ self.bump(); // `+`
+ bounds.append(&mut self.parse_ty_param_bounds()?);
+ }
+ Ok(TyKind::TraitObject(bounds))
+ }
+
fn maybe_recover_from_bad_type_plus(&mut self, allow_plus: bool, ty: &Ty) -> PResult<'a, ()> {
// Do not add `+` to expected tokens.
if !allow_plus || self.token != token::BinOp(token::Plus) {
// Parse bounds of a type parameter `BOUND + BOUND + BOUND`, possibly with trailing `+`.
// BOUND = TY_BOUND | LT_BOUND
// LT_BOUND = LIFETIME (e.g. `'a`)
- // TY_BOUND = [?] [for<LT_PARAM_DEFS>] SIMPLE_PATH (e.g. `?for<'a: 'b> m::Trait<'a>`)
+ // TY_BOUND = TY_BOUND_NOPAREN | (TY_BOUND_NOPAREN)
+ // TY_BOUND_NOPAREN = [?] [for<LT_PARAM_DEFS>] SIMPLE_PATH (e.g. `?for<'a: 'b> m::Trait<'a>`)
fn parse_ty_param_bounds_common(&mut self, allow_plus: bool) -> PResult<'a, TyParamBounds> {
let mut bounds = Vec::new();
loop {
- let question = if self.eat(&token::Question) { Some(self.prev_span) } else { None };
- if self.check_lifetime() {
- if let Some(question_span) = question {
- self.span_err(question_span,
- "`?` may only modify trait bounds, not lifetime bounds");
- }
- bounds.push(RegionTyParamBound(self.expect_lifetime()));
- } else if self.check_keyword(keywords::For) || self.check_path() {
- let lo = self.span;
- let lifetime_defs = self.parse_late_bound_lifetime_defs()?;
- let path = self.parse_path(PathStyle::Type)?;
- let poly_trait = PolyTraitRef::new(lifetime_defs, path, lo.to(self.prev_span));
- let modifier = if question.is_some() {
- TraitBoundModifier::Maybe
+ let is_bound_start = self.check_path() || self.check_lifetime() ||
+ self.check(&token::Question) ||
+ self.check_keyword(keywords::For) ||
+ self.check(&token::OpenDelim(token::Paren));
+ if is_bound_start {
+ let has_parens = self.eat(&token::OpenDelim(token::Paren));
+ let question = if self.eat(&token::Question) { Some(self.prev_span) } else { None };
+ if self.token.is_lifetime() {
+ if let Some(question_span) = question {
+ self.span_err(question_span,
+ "`?` may only modify trait bounds, not lifetime bounds");
+ }
+ bounds.push(RegionTyParamBound(self.expect_lifetime()));
} else {
- TraitBoundModifier::None
- };
- bounds.push(TraitTyParamBound(poly_trait, modifier));
+ let lo = self.span;
+ let lifetime_defs = self.parse_late_bound_lifetime_defs()?;
+ let path = self.parse_path(PathStyle::Type)?;
+ let poly_trait = PolyTraitRef::new(lifetime_defs, path, lo.to(self.prev_span));
+ let modifier = if question.is_some() {
+ TraitBoundModifier::Maybe
+ } else {
+ TraitBoundModifier::None
+ };
+ bounds.push(TraitTyParamBound(poly_trait, modifier));
+ }
+ if has_parens {
+ self.expect(&token::CloseDelim(token::Paren))?;
+ if let Some(&RegionTyParamBound(..)) = bounds.last() {
+ self.span_err(self.prev_span,
+ "parenthesized lifetime bounds are not supported");
+ }
+ }
} else {
break
}
--> test.rs:2:10
|
2 | fn foo() {
- | __________^ starting here...
+ | __________^
3 | | }
- | |_^ ...ending here: test
+ | |_^ test
"#);
}
--> test.rs:2:10
|
2 | fn foo() {
- | __________^ starting here...
+ | __________^
3 | |
4 | |
5 | | }
- | |___^ ...ending here: test
+ | |___^ test
"#);
}
--> test.rs:3:3
|
3 | X0 Y0
- | ____^__- starting here...
+ | ____^__-
| | ___|
- | || starting here...
+ | ||
4 | || X1 Y1
5 | || X2 Y2
- | ||____^__- ...ending here: `Y` is a good letter too
+ | ||____^__- `Y` is a good letter too
| |____|
- | ...ending here: `X` is a good letter
+ | `X` is a good letter
"#);
}
--> test.rs:3:3
|
3 | X0 Y0
- | ____^__- starting here...
+ | ____^__-
| | ___|
- | || starting here...
+ | ||
4 | || Y1 X1
- | ||____-__^ ...ending here: `X` is a good letter
+ | ||____-__^ `X` is a good letter
| |_____|
- | ...ending here: `Y` is a good letter too
+ | `Y` is a good letter too
"#);
}
--> test.rs:3:6
|
3 | X0 Y0 Z0
- | ______^ starting here...
+ | ______^
4 | | X1 Y1 Z1
- | |_________- starting here...
+ | |_________-
5 | || X2 Y2 Z2
- | ||____^ ...ending here: `X` is a good letter
+ | ||____^ `X` is a good letter
6 | | X3 Y3 Z3
- | |_____- ...ending here: `Y` is a good letter too
+ | |_____- `Y` is a good letter too
"#);
}
--> test.rs:3:3
|
3 | X0 Y0 Z0
- | _____^__-__- starting here...
+ | _____^__-__-
| | ____|__|
- | || ___| starting here...
- | ||| starting here...
+ | || ___|
+ | |||
4 | ||| X1 Y1 Z1
5 | ||| X2 Y2 Z2
- | |||____^__-__- ...ending here: `Z` label
+ | |||____^__-__- `Z` label
| ||____|__|
- | |____| ...ending here: `Y` is a good letter too
- | ...ending here: `X` is a good letter
+ | |____| `Y` is a good letter too
+ | `X` is a good letter
"#);
}
--> test.rs:3:6
|
3 | X0 Y0 Z0
- | ______^ starting here...
+ | ______^
4 | | X1 Y1 Z1
- | |____^_- starting here...
+ | |____^_-
| ||____|
- | | ...ending here: `X` is a good letter
+ | | `X` is a good letter
5 | | X2 Y2 Z2
- | |____-______- ...ending here: `Y` is a good letter too
+ | |____-______- `Y` is a good letter too
| ____|
- | | starting here...
+ | |
6 | | X3 Y3 Z3
- | |________- ...ending here: `Z`
+ | |________- `Z`
"#);
}
vec![
SpanLabel {
start: Position {
- string: "Y0",
+ string: "X0",
count: 1,
},
end: Position {
],
r#"
error: foo
- --> test.rs:3:6
+ --> test.rs:3:3
|
-3 | X0 Y0 Z0
- | ______^ starting here...
+3 | / X0 Y0 Z0
4 | | X1 Y1 Z1
- | |____^ ...ending here: `X` is a good letter
+ | |____^ `X` is a good letter
5 | X2 Y2 Z2
- | ______- starting here...
+ | ______-
6 | | X3 Y3 Z3
- | |__________- ...ending here: `Y` is a good letter too
+ | |__________- `Y` is a good letter too
"#);
}
--> test.rs:3:6
|
3 | X0 Y0 Z0
- | ______^ starting here...
+ | ______^
4 | | X1 Y1 Z1
- | |____^____- starting here...
+ | |____^____-
| ||____|
- | | ...ending here: `X` is a good letter
+ | | `X` is a good letter
5 | | X2 Y2 Z2
6 | | X3 Y3 Z3
- | |___________- ...ending here: `Y` is a good letter too
+ | |___________- `Y` is a good letter too
"#);
}
--> test.rs:3:6
|
3 | X0 Y0 Z0
- | ______^ starting here...
+ | ______^
4 | | X1 Y1 Z1
- | |____^____- starting here...
+ | |____^____-
| ||____|
- | | ...ending here: `X` is a good letter
+ | | `X` is a good letter
5 | | 1
6 | | 2
7 | | 3
... |
15 | | X2 Y2 Z2
16 | | X3 Y3 Z3
- | |___________- ...ending here: `Y` is a good letter too
+ | |___________- `Y` is a good letter too
"#);
}
--> test.rs:3:6
|
3 | X0 Y0 Z0
- | ______^ starting here...
+ | ______^
4 | | 1
5 | | 2
6 | | 3
7 | | X1 Y1 Z1
- | |_________- starting here...
+ | |_________-
8 | || 4
9 | || 5
10 | || 6
11 | || X2 Y2 Z2
- | ||__________- ...ending here: `Z` is a good letter too
+ | ||__________- `Z` is a good letter too
... |
15 | | 10
16 | | X3 Y3 Z3
- | |_______^ ...ending here: `Y` is a good letter
+ | |_______^ `Y` is a good letter
"#);
}
for a in type_attrs {
for r in &attr::find_repr_attrs(diagnostic, a) {
repr_type_name = match *r {
- attr::ReprPacked | attr::ReprSimd => continue,
+ attr::ReprPacked | attr::ReprSimd | attr::ReprAlign(_) => continue,
attr::ReprExtern => "i32",
attr::ReprInt(attr::SignedInt(ast::IntTy::Is)) => "isize",
// object (usually called `crtX.o), which then invokes initialization callbacks
// of other runtime components (registered via yet another special image section).
-#![feature(no_core, lang_items)]
+#![feature(no_core, lang_items, optin_builtin_traits)]
#![crate_type="rlib"]
#![no_core]
#![allow(non_camel_case_types)]
trait Sized {}
#[lang = "sync"]
trait Sync {}
+impl Sync for .. {}
#[lang = "copy"]
trait Copy {}
-impl<T> Sync for T {}
+#[cfg_attr(not(stage0), lang = "freeze")]
+trait Freeze {}
+impl Freeze for .. {}
#[cfg(all(target_os="windows", target_arch = "x86", target_env="gnu"))]
pub mod eh_frames {
// CHECK-NOT: call{{.*}}drop{{.*}}SomeUniqueName
// CHECK: invoke{{.*}}drop{{.*}}SomeUniqueName
// CHECK: invoke{{.*}}drop{{.*}}SomeUniqueName
-// CHECK: invoke{{.*}}drop{{.*}}SomeUniqueName
+// CHECK: call{{.*}}drop{{.*}}SomeUniqueName
// CHECK-NOT: {{(call|invoke).*}}drop{{.*}}SomeUniqueName
// The next line checks for the } that ends the function definition
// CHECK-LABEL: {{^[}]}}
// CHECK: bitcast{{.*}}personalityslot
// CHECK-NEXT: call void @llvm.lifetime.start
might_unwind();
+ let _t = S;
might_unwind();
}
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// force-host
+// no-prefer-dynamic
+
+#![crate_type = "proc-macro"]
+#![feature(proc_macro)]
+
+extern crate proc_macro;
+use proc_macro::TokenStream;
+
+#[proc_macro_attribute]
+pub fn emit_unchanged(_args: TokenStream, input: TokenStream) -> TokenStream {
+ input
+}
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// aux-build:issue-41211.rs
+
+// FIXME: https://github.com/rust-lang/rust/issues/41430
+// This is a temporary regression test for the ICE reported in #41211
+
+#![feature(proc_macro)]
+#![emit_unchanged]
+//~^ ERROR: cannot find attribute macro `emit_unchanged` in this scope
+extern crate issue_41211;
+use issue_41211::emit_unchanged;
+
+fn main() {}
// except according to those terms.
#![allow(dead_code)]
+#![feature(attr_literals)]
#![feature(repr_simd)]
#[repr(C)] //~ ERROR: attribute should be applied to struct, enum or union
#[repr(C)]
enum EExtern { A, B }
+#[repr(align(8))] //~ ERROR: attribute should be applied to struct
+enum EAlign { A, B }
+
#[repr(packed)] //~ ERROR: attribute should be applied to struct
enum EPacked { A, B }
let _: &[i32] = [0];
//~^ ERROR mismatched types
//~| expected type `&[i32]`
- //~| found type `[{integer}; 1]`
//~| expected &[i32], found array of 1 elements
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(rustc_attrs)]
#![allow(dead_code)]
+#![feature(attr_literals)]
+#![feature(repr_align)]
#[repr(C)]
enum A { A }
#[repr(C, packed)]
struct E(i32);
-#[rustc_error]
-fn main() {} //~ ERROR compilation successful
+#[repr(packed, align(8))] //~ ERROR conflicting packed and align representation hints
+struct F(i32);
+
+fn main() {}
pub fn main() {
let x: Box<Trait> = Box::new(Foo);
- let _y: &Trait = x; //~ ERROR mismatched types
+ let _y: &Trait = x; //~ ERROR E0308
//~| expected type `&Trait`
//~| found type `std::boxed::Box<Trait>`
}
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+#![feature(attr_literals)]
+
+#[repr(align(64))]
+struct Foo(u64, u64); //~ error: the struct `#[repr(align(u16))]` attribute is experimental
+
+fn main() {}
fn main() {
let mut c = for_stdin();
let mut v = Vec::new();
- c.read_to(v); //~ ERROR mismatched types
+ c.read_to(v); //~ ERROR E0308
}
fn main() {
check((3, 5));
//~^ ERROR mismatched types
+//~| HELP try with `&(3, 5)`
}
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+#![allow(dead_code)]
+#![feature(attr_literals)]
+#![feature(repr_align)]
+
+#[repr(align(16.0))] //~ ERROR: invalid `repr(align)` attribute: not an unsuffixed integer
+struct A(i32);
+
+#[repr(align(15))] //~ ERROR: invalid `repr(align)` attribute: not a power of two
+struct B(i32);
+
+#[repr(align(65536))] //~ ERROR: invalid `repr(align)` attribute: larger than 32768
+struct C(i32);
+
+fn main() {}
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+#![feature(attr_literals)]
+#![feature(repr_align)]
+#![allow(dead_code)]
+
+#[repr(align(16))]
+struct A(i32);
+
+struct B(A);
+
+#[repr(packed)]
+struct C(A); //~ ERROR: packed struct cannot transitively contain a `[repr(align)]` struct
+
+#[repr(packed)]
+struct D(B); //~ ERROR: packed struct cannot transitively contain a `[repr(align)]` struct
+
+fn main() {}
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -Z parse-only
+
+fn f<T: ?>() {} //~ ERROR expected identifier, found `>`
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -Z parse-only -Z continue-parse-after-error
+
+fn main() {
+ let _: Box<((Copy)) + Copy>;
+ //~^ ERROR expected a path on the left-hand side of `+`, not `((Copy))`
+ let _: Box<(Copy + Copy) + Copy>;
+ //~^ ERROR expected a path on the left-hand side of `+`, not `( Copy + Copy)`
+ let _: Box<(Copy +) + Copy>;
+ //~^ ERROR expected a path on the left-hand side of `+`, not `( Copy)`
+}
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -Z parse-only -Z continue-parse-after-error
+
+fn f<T: Copy + ('a)>() {} //~ ERROR parenthesized lifetime bounds are not supported
+
+fn main() {
+ let _: Box<Copy + ('a)>; //~ ERROR parenthesized lifetime bounds are not supported
+ let _: Box<('a) + Copy>; //~ ERROR expected type, found `'a`
+}
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -Z parse-only
+
+fn f<T: (Copy) + (?Sized) + (for<'a> Trait<'a>)>() {}
+
+fn main() {
+ let _: Box<(Copy) + (?Sized) + (for<'a> Trait<'a>)>;
+ let _: Box<(?Sized) + (for<'a> Trait<'a>) + (Copy)>;
+ let _: Box<(for<'a> Trait<'a>) + (Copy) + (?Sized)>;
+}
+
+FAIL //~ ERROR
#![crate_type = "lib"]
// we can compile to a variety of platforms, because we don't need
// cross-compiled standard libraries.
-#![feature(no_core)]
+#![feature(no_core, optin_builtin_traits)]
#![no_core]
#![feature(repr_simd, simd_ffi, link_llvm_intrinsics, lang_items)]
pub mod marker {
pub use Copy;
}
+
+#[lang = "freeze"]
+trait Freeze {}
+impl Freeze for .. {}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(lang_items, no_core)]
+#![feature(lang_items, no_core, optin_builtin_traits)]
#![no_core]
#[lang="copy"]
#[lang="sized"]
trait Sized { }
+#[lang = "freeze"]
+trait Freeze {}
+impl Freeze for .. {}
+
#[lang="start"]
fn start(_main: *const u8, _argc: isize, _argv: *const *const u8) -> isize { 0 }
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+#![feature(attr_literals)]
+#![feature(repr_align)]
+
+use std::mem;
+
+// Raising alignment
+#[repr(align(16))]
+struct Align16(i32);
+
+// Lowering has no effect
+#[repr(align(1))]
+struct Align1(i32);
+
+// Multiple attributes take the max
+#[repr(align(4))]
+#[repr(align(16))]
+#[repr(align(8))]
+struct AlignMany(i32);
+
+// Raising alignment may not alter size.
+#[repr(align(8))]
+#[allow(dead_code)]
+struct Align8Many {
+ a: i32,
+ b: i32,
+ c: i32,
+ d: u8,
+}
+
+enum Enum {
+ #[allow(dead_code)]
+ A(i32),
+ B(Align16)
+}
+
+// Nested alignment - use `#[repr(C)]` to suppress field reordering for sizeof test
+#[repr(C)]
+struct Nested {
+ a: i32,
+ b: i32,
+ c: Align16,
+ d: i8,
+}
+
+#[repr(packed)]
+struct Packed(i32);
+
+#[repr(align(16))]
+struct AlignContainsPacked {
+ a: Packed,
+ b: Packed,
+}
+
+impl Align16 {
+ // return aligned type
+ pub fn new(i: i32) -> Align16 {
+ Align16(i)
+ }
+ // pass aligned type
+ pub fn consume(a: Align16) -> i32 {
+ a.0
+ }
+}
+
+const CONST_ALIGN16: Align16 = Align16(7);
+static STATIC_ALIGN16: Align16 = Align16(8);
+
+// Check the actual address is aligned
+fn is_aligned_to<T>(p: &T, align: usize) -> bool {
+ let addr = p as *const T as usize;
+ (addr & (align - 1)) == 0
+}
+
+pub fn main() {
+ // check alignment and size by type and value
+ assert_eq!(mem::align_of::<Align16>(), 16);
+ assert_eq!(mem::size_of::<Align16>(), 16);
+
+ let a = Align16(7);
+ assert_eq!(a.0, 7);
+ assert_eq!(mem::align_of_val(&a), 16);
+ assert_eq!(mem::size_of_val(&a), 16);
+
+ assert!(is_aligned_to(&a, 16));
+
+ // lowering should have no effect
+ assert_eq!(mem::align_of::<Align1>(), 4);
+ assert_eq!(mem::size_of::<Align1>(), 4);
+ let a = Align1(7);
+ assert_eq!(a.0, 7);
+ assert_eq!(mem::align_of_val(&a), 4);
+ assert_eq!(mem::size_of_val(&a), 4);
+ assert!(is_aligned_to(&a, 4));
+
+ // when multiple attributes are specified the max should be used
+ assert_eq!(mem::align_of::<AlignMany>(), 16);
+ assert_eq!(mem::size_of::<AlignMany>(), 16);
+ let a = AlignMany(7);
+ assert_eq!(a.0, 7);
+ assert_eq!(mem::align_of_val(&a), 16);
+ assert_eq!(mem::size_of_val(&a), 16);
+ assert!(is_aligned_to(&a, 16));
+
+ // raising alignment should not reduce size
+ assert_eq!(mem::align_of::<Align8Many>(), 8);
+ assert_eq!(mem::size_of::<Align8Many>(), 16);
+ let a = Align8Many { a: 1, b: 2, c: 3, d: 4 };
+ assert_eq!(a.a, 1);
+ assert_eq!(mem::align_of_val(&a), 8);
+ assert_eq!(mem::size_of_val(&a), 16);
+ assert!(is_aligned_to(&a, 8));
+
+ // return type
+ let a = Align16::new(1);
+ assert_eq!(mem::align_of_val(&a), 16);
+ assert_eq!(mem::size_of_val(&a), 16);
+ assert_eq!(a.0, 1);
+ assert!(is_aligned_to(&a, 16));
+ assert_eq!(Align16::consume(a), 1);
+
+ // check const alignment, size and value
+ assert_eq!(mem::align_of_val(&CONST_ALIGN16), 16);
+ assert_eq!(mem::size_of_val(&CONST_ALIGN16), 16);
+ assert_eq!(CONST_ALIGN16.0, 7);
+ assert!(is_aligned_to(&CONST_ALIGN16, 16));
+
+ // check global static alignment, size and value
+ assert_eq!(mem::align_of_val(&STATIC_ALIGN16), 16);
+ assert_eq!(mem::size_of_val(&STATIC_ALIGN16), 16);
+ assert_eq!(STATIC_ALIGN16.0, 8);
+ assert!(is_aligned_to(&STATIC_ALIGN16, 16));
+
+ // Note that the size of Nested may change if struct field re-ordering is enabled
+ assert_eq!(mem::align_of::<Nested>(), 16);
+ assert_eq!(mem::size_of::<Nested>(), 48);
+ let a = Nested{ a: 1, b: 2, c: Align16(3), d: 4};
+ assert_eq!(mem::align_of_val(&a), 16);
+ assert_eq!(mem::align_of_val(&a.b), 4);
+ assert_eq!(mem::align_of_val(&a.c), 16);
+ assert_eq!(mem::size_of_val(&a), 48);
+ assert!(is_aligned_to(&a, 16));
+ // check the correct fields are indexed
+ assert_eq!(a.a, 1);
+ assert_eq!(a.b, 2);
+ assert_eq!(a.c.0, 3);
+ assert_eq!(a.d, 4);
+
+ // enum should be aligned to max alignment
+ assert_eq!(mem::align_of::<Enum>(), 16);
+ assert_eq!(mem::align_of_val(&Enum::B(Align16(0))), 16);
+ let e = Enum::B(Align16(15));
+ match e {
+ Enum::B(ref a) => {
+ assert_eq!(a.0, 15);
+ assert_eq!(mem::align_of_val(a), 16);
+ assert_eq!(mem::size_of_val(a), 16);
+ },
+ _ => ()
+ }
+ assert!(is_aligned_to(&e, 16));
+
+ // arrays of aligned elements should also be aligned
+ assert_eq!(mem::align_of::<[Align16;2]>(), 16);
+ assert_eq!(mem::size_of::<[Align16;2]>(), 32);
+
+ let a = [Align16(0), Align16(1)];
+ assert_eq!(mem::align_of_val(&a[0]), 16);
+ assert_eq!(mem::align_of_val(&a[1]), 16);
+ assert!(is_aligned_to(&a, 16));
+
+ // check heap value is aligned
+ assert_eq!(mem::align_of_val(Box::new(Align16(0)).as_ref()), 16);
+
+ // check heap array is aligned
+ let a = vec!(Align16(0), Align16(1));
+ assert_eq!(mem::align_of_val(&a[0]), 16);
+ assert_eq!(mem::align_of_val(&a[1]), 16);
+
+ assert_eq!(mem::align_of::<AlignContainsPacked>(), 16);
+ assert_eq!(mem::size_of::<AlignContainsPacked>(), 16);
+ let a = AlignContainsPacked { a: Packed(1), b: Packed(2) };
+ assert_eq!(mem::align_of_val(&a), 16);
+ assert_eq!(mem::align_of_val(&a.a), 1);
+ assert_eq!(mem::align_of_val(&a.b), 1);
+ assert_eq!(mem::size_of_val(&a), 16);
+ assert!(is_aligned_to(&a, 16));
+}
+++ /dev/null
-// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![crate_name = "foo"]
-
-// ignore-tidy-end-whitespace
-
-// @has foo/fn.f.html
-// @has - '<p>hard break:<br />'
-// @has - 'after hard break</p>'
-/// hard break:
-/// after hard break
-pub fn f() {}
+++ /dev/null
-// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![crate_name = "foo"]
-
-// ignore-tidy-linelength
-
-// @has foo/fn.f.html
-// @has - '<p>markdown test</p>'
-// @has - '<p>this is a <a href="https://example.com" title="this is a title">link</a>.</p>'
-// @has - '<hr />'
-// @has - '<p>a footnote<sup id="supref1"><a href="#ref1">1</a></sup>.</p>'
-// @has - '<p>another footnote<sup id="supref2"><a href="#ref2">2</a></sup>.</p>'
-// @has - '<p><img src="https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png" alt="Rust" /></p>'
-// @has - '<div class="footnotes"><hr><ol><li id="ref1">'
-// @has - '<p>Thing <a href="#supref1" rev="footnote">↩</a></p></li><li id="ref2">'
-// @has - '<p>Another Thing <a href="#supref2" rev="footnote">↩</a></p></li></ol></div>'
-/// markdown test
-///
-/// this is a [link].
-///
-/// [link]: https://example.com "this is a title"
-///
-/// -----------
-///
-/// a footnote[^footnote].
-///
-/// another footnote[^footnotebis].
-///
-/// [^footnote]: Thing
-///
-///
-/// [^footnotebis]: Another Thing
-///
-///
-/// ![Rust](https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png)
-pub fn f() {}
15 | fn renew<'b: 'a>(self) -> &'b mut [T];
| -------------------------------------- definition of `renew` from trait
...
-19 | fn renew<'b: 'a>(self) -> &'b mut [T] where 'a: 'b {
- | _____^ starting here...
+19 | / fn renew<'b: 'a>(self) -> &'b mut [T] where 'a: 'b {
20 | | //~^ ERROR E0276
21 | | &mut self[..]
22 | | }
- | |_____^ ...ending here: impl has extra requirement `'a: 'b`
+ | |_____^ impl has extra requirement `'a: 'b`
error: aborting due to previous error
19 | fn zip<B, U: Iterator<U>>(self, other: U) -> ZipIterator<Self, U>;
| ------------------------------------------------------------------ definition of `zip` from trait
...
-23 | fn zip<B, U: Iterator<B>>(self, other: U) -> ZipIterator<T, U> {
- | _____^ starting here...
+23 | / fn zip<B, U: Iterator<B>>(self, other: U) -> ZipIterator<T, U> {
24 | | //~^ ERROR E0276
25 | | ZipIterator{a: self, b: other}
26 | | }
- | |_____^ ...ending here: impl has extra requirement `U: Iterator<B>`
+ | |_____^ impl has extra requirement `U: Iterator<B>`
error: aborting due to previous error
--> $DIR/issue-40006.rs:11:9
|
11 | impl X {
- | _________^ starting here...
+ | _________^
12 | | Y
- | |____^ ...ending here: missing `fn`, `type`, or `const`
+ | |____^ missing `fn`, `type`, or `const`
error: missing `fn`, `type`, or `const` for trait-item declaration
--> $DIR/issue-40006.rs:17:10
|
17 | trait X {
- | __________^ starting here...
+ | __________^
18 | | X() {}
- | |____^ ...ending here: missing `fn`, `type`, or `const`
+ | |____^ missing `fn`, `type`, or `const`
error: expected `[`, found `#`
--> $DIR/issue-40006.rs:19:17
--> $DIR/issue-40006.rs:19:21
|
19 | fn xxx() { ### }
- | _____________________^ starting here...
+ | _____________________^
20 | | L = M;
- | |____^ ...ending here: missing `fn`, `type`, or `const`
+ | |____^ missing `fn`, `type`, or `const`
error: missing `fn`, `type`, or `const` for trait-item declaration
--> $DIR/issue-40006.rs:20:11
|
20 | L = M;
- | ___________^ starting here...
+ | ___________^
21 | | Z = { 2 + 3 };
- | |____^ ...ending here: missing `fn`, `type`, or `const`
+ | |____^ missing `fn`, `type`, or `const`
error: expected one of `const`, `extern`, `fn`, `type`, `unsafe`, or `}`, found `;`
--> $DIR/issue-40006.rs:21:18
error[E0569]: requires an `unsafe impl` declaration due to `#[may_dangle]` attribute
--> $DIR/dropck-eyepatch-implies-unsafe-impl.rs:32:1
|
-32 | impl<#[may_dangle] A, B: fmt::Debug> Drop for Pt<A, B> {
- | _^ starting here...
+32 | / impl<#[may_dangle] A, B: fmt::Debug> Drop for Pt<A, B> {
33 | | //~^ ERROR requires an `unsafe impl` declaration due to `#[may_dangle]` attribute
34 | |
35 | | // (unsafe to access self.1 due to #[may_dangle] on A)
36 | | fn drop(&mut self) { println!("drop {} {:?}", self.0, self.2); }
37 | | }
- | |_^ ...ending here
+ | |_^
error[E0569]: requires an `unsafe impl` declaration due to `#[may_dangle]` attribute
--> $DIR/dropck-eyepatch-implies-unsafe-impl.rs:38:1
|
-38 | impl<#[may_dangle] 'a, 'b, B: fmt::Debug> Drop for Pr<'a, 'b, B> {
- | _^ starting here...
+38 | / impl<#[may_dangle] 'a, 'b, B: fmt::Debug> Drop for Pr<'a, 'b, B> {
39 | | //~^ ERROR requires an `unsafe impl` declaration due to `#[may_dangle]` attribute
40 | |
41 | | // (unsafe to access self.1 due to #[may_dangle] on 'a)
42 | | fn drop(&mut self) { println!("drop {} {:?}", self.0, self.2); }
43 | | }
- | |_^ ...ending here
+ | |_^
error: aborting due to 2 previous errors
error: reached the type-length limit while instantiating `<T as Foo><(&(&(&(&(&(&(&(&(&(&(&(&(&(&(&(&(&(&(&(), &()), &(&()...`
--> $DIR/issue-37311.rs:23:5
|
-23 | fn recurse(&self) {
- | _____^ starting here...
+23 | / fn recurse(&self) {
24 | | (self, self).recurse();
25 | | }
- | |_____^ ...ending here
+ | |_____^
|
= note: consider adding a `#![type_length_limit="2097152"]` attribute to your crate
--> $DIR/ex1-return-one-existing-name-if-else.rs:11:44
|
11 | fn foo<'a>(x: &'a i32, y: &i32) -> &'a i32 {
- | ____________________________________________^ starting here...
+ | ____________________________________________^
12 | | if x > y { x } else { y }
13 | | }
- | |_^ ...ending here
+ | |_^
note: ...but the borrowed content is only valid for the anonymous lifetime #1 defined on the body at 11:43
--> $DIR/ex1-return-one-existing-name-if-else.rs:11:44
|
11 | fn foo<'a>(x: &'a i32, y: &i32) -> &'a i32 {
- | ____________________________________________^ starting here...
+ | ____________________________________________^
12 | | if x > y { x } else { y }
13 | | }
- | |_^ ...ending here
+ | |_^
error: aborting due to previous error
--> $DIR/ex2a-push-one-existing-name.rs:15:52
|
15 | fn foo<'a>(x: &mut Vec<Ref<'a, i32>>, y: Ref<i32>) {
- | ____________________________________________________^ starting here...
+ | ____________________________________________________^
16 | | x.push(y);
17 | | }
- | |_^ ...ending here
+ | |_^
note: ...does not necessarily outlive the lifetime 'a as defined on the body at 15:51
--> $DIR/ex2a-push-one-existing-name.rs:15:52
|
15 | fn foo<'a>(x: &mut Vec<Ref<'a, i32>>, y: Ref<i32>) {
- | ____________________________________________________^ starting here...
+ | ____________________________________________________^
16 | | x.push(y);
17 | | }
- | |_^ ...ending here
+ | |_^
error: aborting due to previous error
--> $DIR/ex2b-push-no-existing-names.rs:15:44
|
15 | fn foo(x: &mut Vec<Ref<i32>>, y: Ref<i32>) {
- | ____________________________________________^ starting here...
+ | ____________________________________________^
16 | | x.push(y);
17 | | }
- | |_^ ...ending here
+ | |_^
note: ...does not necessarily outlive the anonymous lifetime #2 defined on the body at 15:43
--> $DIR/ex2b-push-no-existing-names.rs:15:44
|
15 | fn foo(x: &mut Vec<Ref<i32>>, y: Ref<i32>) {
- | ____________________________________________^ starting here...
+ | ____________________________________________^
16 | | x.push(y);
17 | | }
- | |_^ ...ending here
+ | |_^
error: aborting due to previous error
--> $DIR/ex2c-push-inference-variable.rs:15:67
|
15 | fn foo<'a, 'b, 'c>(x: &'a mut Vec<Ref<'b, i32>>, y: Ref<'c, i32>) {
- | ___________________________________________________________________^ starting here...
+ | ___________________________________________________________________^
16 | | let z = Ref { data: y.data };
17 | | x.push(z);
18 | | }
- | |_^ ...ending here
+ | |_^
note: ...so that reference does not outlive borrowed content
--> $DIR/ex2c-push-inference-variable.rs:16:25
|
--> $DIR/ex2c-push-inference-variable.rs:15:67
|
15 | fn foo<'a, 'b, 'c>(x: &'a mut Vec<Ref<'b, i32>>, y: Ref<'c, i32>) {
- | ___________________________________________________________________^ starting here...
+ | ___________________________________________________________________^
16 | | let z = Ref { data: y.data };
17 | | x.push(z);
18 | | }
- | |_^ ...ending here
+ | |_^
note: ...so that expression is assignable (expected Ref<'b, _>, found Ref<'_, _>)
--> $DIR/ex2c-push-inference-variable.rs:17:12
|
--> $DIR/ex2d-push-inference-variable-2.rs:15:67
|
15 | fn foo<'a, 'b, 'c>(x: &'a mut Vec<Ref<'b, i32>>, y: Ref<'c, i32>) {
- | ___________________________________________________________________^ starting here...
+ | ___________________________________________________________________^
16 | | let a: &mut Vec<Ref<i32>> = x;
17 | | let b = Ref { data: y.data };
18 | | a.push(b);
19 | | }
- | |_^ ...ending here
+ | |_^
note: ...so that reference does not outlive borrowed content
--> $DIR/ex2d-push-inference-variable-2.rs:17:25
|
--> $DIR/ex2d-push-inference-variable-2.rs:15:67
|
15 | fn foo<'a, 'b, 'c>(x: &'a mut Vec<Ref<'b, i32>>, y: Ref<'c, i32>) {
- | ___________________________________________________________________^ starting here...
+ | ___________________________________________________________________^
16 | | let a: &mut Vec<Ref<i32>> = x;
17 | | let b = Ref { data: y.data };
18 | | a.push(b);
19 | | }
- | |_^ ...ending here
+ | |_^
note: ...so that expression is assignable (expected &mut std::vec::Vec<Ref<'_, i32>>, found &mut std::vec::Vec<Ref<'b, i32>>)
--> $DIR/ex2d-push-inference-variable-2.rs:16:33
|
--> $DIR/ex2e-push-inference-variable-3.rs:15:67
|
15 | fn foo<'a, 'b, 'c>(x: &'a mut Vec<Ref<'b, i32>>, y: Ref<'c, i32>) {
- | ___________________________________________________________________^ starting here...
+ | ___________________________________________________________________^
16 | | let a: &mut Vec<Ref<i32>> = x;
17 | | let b = Ref { data: y.data };
18 | | Vec::push(a, b);
19 | | }
- | |_^ ...ending here
+ | |_^
note: ...so that reference does not outlive borrowed content
--> $DIR/ex2e-push-inference-variable-3.rs:17:25
|
--> $DIR/ex2e-push-inference-variable-3.rs:15:67
|
15 | fn foo<'a, 'b, 'c>(x: &'a mut Vec<Ref<'b, i32>>, y: Ref<'c, i32>) {
- | ___________________________________________________________________^ starting here...
+ | ___________________________________________________________________^
16 | | let a: &mut Vec<Ref<i32>> = x;
17 | | let b = Ref { data: y.data };
18 | | Vec::push(a, b);
19 | | }
- | |_^ ...ending here
+ | |_^
note: ...so that expression is assignable (expected &mut std::vec::Vec<Ref<'_, i32>>, found &mut std::vec::Vec<Ref<'b, i32>>)
--> $DIR/ex2e-push-inference-variable-3.rs:16:33
|
error[E0308]: mismatched types
--> $DIR/abridged.rs:42:5
|
-42 | X {
- | _____^ starting here...
+42 | / X {
43 | | x: X {
44 | | x: "".to_string(),
45 | | y: 2,
46 | | },
47 | | y: 3,
48 | | }
- | |_____^ ...ending here: expected struct `std::string::String`, found integral variable
+ | |_____^ expected struct `std::string::String`, found integral variable
|
= note: expected type `X<X<_, std::string::String>, std::string::String>`
found type `X<X<_, {integer}>, {integer}>`
error[E0308]: mismatched types
--> $DIR/abridged.rs:52:5
|
-52 | X {
- | _____^ starting here...
+52 | / X {
53 | | x: X {
54 | | x: "".to_string(),
55 | | y: 2,
56 | | },
57 | | y: "".to_string(),
58 | | }
- | |_____^ ...ending here: expected struct `std::string::String`, found integral variable
+ | |_____^ expected struct `std::string::String`, found integral variable
|
= note: expected type `X<X<_, std::string::String>, _>`
found type `X<X<_, {integer}>, _>`
--> $DIR/main.rs:12:18
|
12 | let x: u32 = (
- | __________________^ starting here...
+ | __________________^
13 | | );
- | |_____^ ...ending here: expected u32, found ()
+ | |_____^ expected u32, found ()
|
= note: expected type `u32`
found type `()`
error[E0046]: not all trait items implemented, missing: `CONSTANT`, `Type`, `method`
--> $DIR/m2.rs:20:1
|
-20 | impl m1::X for X {
- | _^ starting here...
+20 | / impl m1::X for X {
21 | | }
- | |_^ ...ending here: missing `CONSTANT`, `Type`, `method` in implementation
+ | |_^ missing `CONSTANT`, `Type`, `method` in implementation
|
= note: `CONSTANT` from trait: `const CONSTANT: u32;`
= note: `Type` from trait: `type Type;`
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -Z print-type-sizes
+
+// This file illustrates how padding is handled: alignment
+// requirements can lead to the introduction of padding, either before
+// fields or at the end of the structure as a whole.
+//
+// It avoids using u64/i64 because on some targets that is only 4-byte
+// aligned (while on most it is 8-byte aligned) and so the resulting
+// padding and overall computed sizes can be quite different.
+#![feature(attr_literals)]
+#![feature(repr_align)]
+#![allow(dead_code)]
+
+#[repr(align(16))]
+#[derive(Default)]
+struct A(i32);
+
+enum E {
+ A(i32),
+ B(A)
+}
+
+#[derive(Default)]
+struct S {
+ a: i32,
+ b: i32,
+ c: A,
+ d: i8,
+}
+
+fn main() {
+ let _s: S = Default::default();
+}
--- /dev/null
+print-type-size type: `E`: 32 bytes, alignment: 16 bytes
+print-type-size discriminant: 4 bytes
+print-type-size variant `A`: 4 bytes
+print-type-size field `.0`: 4 bytes
+print-type-size variant `B`: 28 bytes
+print-type-size padding: 12 bytes
+print-type-size field `.0`: 16 bytes, alignment: 16 bytes
+print-type-size type: `S`: 32 bytes, alignment: 16 bytes
+print-type-size field `.c`: 16 bytes
+print-type-size field `.a`: 4 bytes
+print-type-size field `.b`: 4 bytes
+print-type-size field `.d`: 1 bytes
+print-type-size end padding: 7 bytes
+print-type-size type: `A`: 16 bytes, alignment: 16 bytes
+print-type-size field `.0`: 4 bytes
+print-type-size end padding: 12 bytes
//~| NOTE types differ in mutability
//~| NOTE expected type `&mut std::string::String`
//~| NOTE found type `&std::string::String`
- //~| HELP try with `&mut y`
test2(&y);
//~^ ERROR E0308
//~| NOTE types differ in mutability
|
= note: expected type `&str`
found type `std::string::String`
- = help: here are some functions which might fulfill your needs:
- - .as_str()
- - .trim()
- - .trim_left()
- - .trim_right()
+ = help: try with `&String::new()`
error[E0308]: mismatched types
--> $DIR/coerce-suggestions.rs:30:10
found type `&std::string::String`
error[E0308]: mismatched types
- --> $DIR/coerce-suggestions.rs:36:11
+ --> $DIR/coerce-suggestions.rs:35:11
|
-36 | test2(&y);
+35 | test2(&y);
| ^^ types differ in mutability
|
= note: expected type `&mut i32`
found type `&std::string::String`
error[E0308]: mismatched types
- --> $DIR/coerce-suggestions.rs:42:9
+ --> $DIR/coerce-suggestions.rs:41:9
|
-42 | f = box f;
+41 | f = box f;
| ^^^^^ cyclic type of infinite size
|
= note: expected type `_`
16 | fn bar(&self);
| -------------- `bar` from trait
...
-22 | impl Foo for FooConstForMethod {
- | _^ starting here...
+22 | / impl Foo for FooConstForMethod {
23 | | //~^ ERROR E0046
24 | | //~| NOTE missing `bar` in implementation
25 | | const bar: u64 = 1;
... |
28 | | const MY_CONST: u32 = 1;
29 | | }
- | |_^ ...ending here: missing `bar` in implementation
+ | |_^ missing `bar` in implementation
error[E0324]: item `MY_CONST` is an associated method, which doesn't match its trait `Foo`
--> $DIR/impl-wrong-item-for-trait.rs:37:5
17 | const MY_CONST: u32;
| -------------------- `MY_CONST` from trait
...
-33 | impl Foo for FooMethodForConst {
- | _^ starting here...
+33 | / impl Foo for FooMethodForConst {
34 | | //~^ ERROR E0046
35 | | //~| NOTE missing `MY_CONST` in implementation
36 | | fn bar(&self) {}
... |
39 | | //~| NOTE does not match trait
40 | | }
- | |_^ ...ending here: missing `MY_CONST` in implementation
+ | |_^ missing `MY_CONST` in implementation
error[E0325]: item `bar` is an associated type, which doesn't match its trait `Foo`
--> $DIR/impl-wrong-item-for-trait.rs:47:5
16 | fn bar(&self);
| -------------- `bar` from trait
...
-44 | impl Foo for FooTypeForMethod {
- | _^ starting here...
+44 | / impl Foo for FooTypeForMethod {
45 | | //~^ ERROR E0046
46 | | //~| NOTE missing `bar` in implementation
47 | | type bar = u64;
... |
50 | | const MY_CONST: u32 = 1;
51 | | }
- | |_^ ...ending here: missing `bar` in implementation
+ | |_^ missing `bar` in implementation
error[E0046]: not all trait items implemented, missing: `fmt`
--> $DIR/impl-wrong-item-for-trait.rs:53:1
|
-53 | impl Debug for FooTypeForMethod {
- | _^ starting here...
+53 | / impl Debug for FooTypeForMethod {
54 | | }
- | |_^ ...ending here: missing `fmt` in implementation
+ | |_^ missing `fmt` in implementation
|
= note: `fmt` from trait: `fn(&Self, &mut std::fmt::Formatter<'_>) -> std::result::Result<(), std::fmt::Error>`
error[E0046]: not all trait items implemented, missing: `Item`
--> $DIR/issue-23729.rs:20:9
|
-20 | impl Iterator for Recurrence {
- | _________^ starting here...
+20 | / impl Iterator for Recurrence {
21 | | //~^ ERROR E0046
22 | | //~| NOTE missing `Item` in implementation
23 | | //~| NOTE `Item` from trait: `type Item;`
... |
36 | | }
37 | | }
- | |_________^ ...ending here: missing `Item` in implementation
+ | |_________^ missing `Item` in implementation
|
= note: `Item` from trait: `type Item;`
error[E0046]: not all trait items implemented, missing: `Output`
--> $DIR/issue-23827.rs:36:1
|
-36 | impl<C: Component> FnOnce<(C,)> for Prototype {
- | _^ starting here...
+36 | / impl<C: Component> FnOnce<(C,)> for Prototype {
37 | | //~^ ERROR E0046
38 | | //~| NOTE missing `Output` in implementation
39 | | //~| NOTE `Output` from trait: `type Output;`
... |
42 | | }
43 | | }
- | |_^ ...ending here: missing `Output` in implementation
+ | |_^ missing `Output` in implementation
|
= note: `Output` from trait: `type Output;`
error[E0046]: not all trait items implemented, missing: `Target`
--> $DIR/issue-24356.rs:30:9
|
-30 | impl Deref for Thing {
- | _________^ starting here...
+30 | / impl Deref for Thing {
31 | | //~^ ERROR E0046
32 | | //~| NOTE missing `Target` in implementation
33 | | //~| NOTE `Target` from trait: `type Target;`
34 | | fn deref(&self) -> i8 { self.0 }
35 | | }
- | |_________^ ...ending here: missing `Target` in implementation
+ | |_________^ missing `Target` in implementation
|
= note: `Target` from trait: `type Target;`
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::net::TcpListener;
+use std::net::TcpStream;
+use std::io::{self, Read, Write};
+
+fn handle_client(stream: TcpStream) -> io::Result<()> {
+ stream.write_fmt(format!("message received"))
+}
+
+fn main() {
+ if let Ok(listener) = TcpListener::bind("127.0.0.1:8080") {
+ for incoming in listener.incoming() {
+ if let Ok(stream) = incoming {
+ handle_client(stream);
+ }
+ }
+ }
+}
--- /dev/null
+error[E0308]: mismatched types
+ --> $DIR/issue-33884.rs:16:22
+ |
+16 | stream.write_fmt(format!("message received"))
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ expected struct `std::fmt::Arguments`, found struct `std::string::String`
+ |
+ = note: expected type `std::fmt::Arguments<'_>`
+ found type `std::string::String`
+ = note: this error originates in a macro outside of the current crate
+
+error: aborting due to previous error
+
note: candidate #1 is defined in an impl for the type `Myisize`
--> $DIR/issue-7575.rs:51:5
|
-51 | fn fff(i: isize) -> isize { //~ NOTE candidate
- | _____^ starting here...
+51 | / fn fff(i: isize) -> isize { //~ NOTE candidate
52 | | i
53 | | }
- | |_____^ ...ending here
+ | |_____^
error: no method named `is_str` found for type `T` in the current scope
--> $DIR/issue-7575.rs:85:7
note: candidate #1 is defined in the trait `ManyImplTrait`
--> $DIR/issue-7575.rs:57:5
|
-57 | fn is_str() -> bool { //~ NOTE candidate
- | _____^ starting here...
+57 | / fn is_str() -> bool { //~ NOTE candidate
58 | | false
59 | | }
- | |_____^ ...ending here
+ | |_____^
= help: to disambiguate the method call, write `ManyImplTrait::is_str(t)` instead
= help: items from traits can only be used if the trait is implemented and in scope; the following trait defines an item `is_str`, perhaps you need to implement it:
= help: candidate #1: `ManyImplTrait`
error: unnecessary `unsafe` block
--> $DIR/lint-unused-unsafe.rs:33:9
|
-33 | unsafe { //~ ERROR: unnecessary `unsafe` block
- | _________^ starting here...
+33 | / unsafe { //~ ERROR: unnecessary `unsafe` block
34 | | unsf()
35 | | }
- | |_________^ ...ending here: unnecessary `unsafe` block
+ | |_________^ unnecessary `unsafe` block
|
note: because it's nested under this `unsafe` block
--> $DIR/lint-unused-unsafe.rs:32:5
|
-32 | unsafe { // don't put the warning here
- | _____^ starting here...
+32 | / unsafe { // don't put the warning here
33 | | unsafe { //~ ERROR: unnecessary `unsafe` block
34 | | unsf()
35 | | }
36 | | }
- | |_____^ ...ending here
+ | |_____^
error: unnecessary `unsafe` block
--> $DIR/lint-unused-unsafe.rs:39:5
|
-39 | unsafe { //~ ERROR: unnecessary `unsafe` block
- | _____^ starting here...
+39 | / unsafe { //~ ERROR: unnecessary `unsafe` block
40 | | unsafe { //~ ERROR: unnecessary `unsafe` block
41 | | unsf()
42 | | }
43 | | }
- | |_____^ ...ending here: unnecessary `unsafe` block
+ | |_____^ unnecessary `unsafe` block
|
note: because it's nested under this `unsafe` fn
--> $DIR/lint-unused-unsafe.rs:38:1
|
-38 | unsafe fn bad7() {
- | _^ starting here...
+38 | / unsafe fn bad7() {
39 | | unsafe { //~ ERROR: unnecessary `unsafe` block
40 | | unsafe { //~ ERROR: unnecessary `unsafe` block
41 | | unsf()
42 | | }
43 | | }
44 | | }
- | |_^ ...ending here
+ | |_^
error: unnecessary `unsafe` block
--> $DIR/lint-unused-unsafe.rs:40:9
|
-40 | unsafe { //~ ERROR: unnecessary `unsafe` block
- | _________^ starting here...
+40 | / unsafe { //~ ERROR: unnecessary `unsafe` block
41 | | unsf()
42 | | }
- | |_________^ ...ending here: unnecessary `unsafe` block
+ | |_________^ unnecessary `unsafe` block
|
note: because it's nested under this `unsafe` fn
--> $DIR/lint-unused-unsafe.rs:38:1
|
-38 | unsafe fn bad7() {
- | _^ starting here...
+38 | / unsafe fn bad7() {
39 | | unsafe { //~ ERROR: unnecessary `unsafe` block
40 | | unsafe { //~ ERROR: unnecessary `unsafe` block
41 | | unsf()
42 | | }
43 | | }
44 | | }
- | |_^ ...ending here
+ | |_^
error: aborting due to 8 previous errors
error[E0072]: recursive type `ListNode` has infinite size
--> $DIR/multiline-span-E0072.rs:12:1
|
-12 | struct
- | _^ starting here...
+12 | / struct
13 | | ListNode
14 | | {
15 | | head: u8,
16 | | tail: Option<ListNode>,
17 | | }
- | |_^ ...ending here: recursive type has infinite size
+ | |_^ recursive type has infinite size
|
= help: insert indirection (e.g., a `Box`, `Rc`, or `&`) at some point to make `ListNode` representable
--> $DIR/multiline-span-simple.rs:23:9
|
23 | foo(1 as u32 +
- | _________^ starting here...
+ | _________^
24 | |
25 | | bar(x,
26 | |
27 | | y),
- | |______________^ ...ending here: the trait `std::ops::Add<()>` is not implemented for `u32`
+ | |______________^ the trait `std::ops::Add<()>` is not implemented for `u32`
|
= note: no implementation for `u32 + ()`
error[E0282]: type annotations needed
--> $DIR/issue-40294.rs:15:1
|
-15 | fn foo<'a,'b,T>(x: &'a T, y: &'b T)
- | _^ starting here...
+15 | / fn foo<'a,'b,T>(x: &'a T, y: &'b T)
16 | | where &'a T : Foo,
17 | | &'b T : Foo
18 | | {
19 | | x.foo();
20 | | y.foo();
21 | | }
- | |_^ ...ending here: cannot infer type for `&'a T`
+ | |_^ cannot infer type for `&'a T`
error: aborting due to previous error
"s390x-unknown-linux-gnu",
"sparc64-unknown-linux-gnu",
"wasm32-unknown-emscripten",
+ "x86_64-linux-android",
"x86_64-apple-darwin",
"x86_64-apple-ios",
"x86_64-pc-windows-gnu",
use syntax::diagnostics::metadata::{get_metadata_dir, ErrorMetadataMap, ErrorMetadata};
-use rustdoc::html::markdown::{Markdown, PLAYGROUND};
+use rustdoc::html::markdown::{Markdown, PLAYGROUND, RenderType};
use rustc_serialize::json;
enum OutputFormat {
// Description rendered as markdown.
match info.description {
- Some(ref desc) => write!(output, "{}", Markdown(desc))?,
+ Some(ref desc) => write!(output, "{}", Markdown(desc, RenderType::Hoedown))?,
None => write!(output, "<p>No description.</p>\n")?,
}