let dst_src = dst.join("rust");
t!(fs::create_dir_all(&dst_src));
+ let src_files = [
+ "src/Cargo.lock",
+ ];
// This is the reduced set of paths which will become the rust-src component
// (essentially libstd and all of its path dependencies)
let std_src_dirs = [
];
copy_src_dirs(build, &std_src_dirs[..], &std_src_dirs_exclude[..], &dst_src);
+ for file in src_files.iter() {
+ copy(&build.src.join(file), &dst_src.join(file));
+ }
// Create source tarball in rust-installer format
let mut cmd = rust_installer(builder);
let build = builder.build;
let target = self.target;
- builder.ensure(compile::Rustc {
- compiler: builder.compiler(0, build.build),
- target,
- });
-
println!("Documenting error index ({})", target);
let out = build.doc_out(target);
t!(fs::create_dir_all(&out));
None => {
// No subcommand -- show the general usage and subcommand help
println!("{}\n", subcommand_help);
- process::exit(0);
+ process::exit(1);
}
};
extern crate serde_derive;
#[macro_use]
extern crate lazy_static;
-extern crate serde;
extern crate serde_json;
extern crate cmake;
extern crate filetime;
fn force_use_stage1(&self, compiler: Compiler, target: Interned<String>) -> bool {
!self.config.full_bootstrap &&
compiler.stage >= 2 &&
- self.hosts.iter().any(|h| *h == target)
+ (self.hosts.iter().any(|h| *h == target) || target == self.build)
}
/// Returns the directory that OpenSSL artifacts are compiled into if
Linkchecker, "src/tools/linkchecker", "linkchecker", Mode::Libstd;
CargoTest, "src/tools/cargotest", "cargotest", Mode::Libstd;
Compiletest, "src/tools/compiletest", "compiletest", Mode::Libtest;
- BuildManifest, "src/tools/build-manifest", "build-manifest", Mode::Librustc;
+ BuildManifest, "src/tools/build-manifest", "build-manifest", Mode::Libstd;
RemoteTestClient, "src/tools/remote-test-client", "remote-test-client", Mode::Libstd;
RustInstaller, "src/tools/rust-installer", "rust-installer", Mode::Libstd;
);
cmd.arg("--with-jemalloc-prefix=je_");
}
- if cfg!(feature = "debug-jemalloc") {
- cmd.arg("--enable-debug");
- }
+ // FIXME: building with jemalloc assertions is currently broken.
+ // See <https://github.com/rust-lang/rust/issues/44152>.
+ //if cfg!(feature = "debug") {
+ // cmd.arg("--enable-debug");
+ //}
cmd.arg(format!("--host={}", build_helper::gnu_target(&target)));
cmd.arg(format!("--build={}", build_helper::gnu_target(&host)));
// decode last 1 or 2 chars
if n < 10 {
curr -= 1;
- *buf_ptr.offset(curr) = (n as u8) + 48;
+ *buf_ptr.offset(curr) = (n as u8) + b'0';
} else {
let d1 = n << 1;
curr -= 2;
}
}
+impl<'a, T: Clone> Option<&'a mut T> {
+ /// Maps an `Option<&mut T>` to an `Option<T>` by cloning the contents of the
+ /// option.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(option_ref_mut_cloned)]
+ /// let mut x = 12;
+ /// let opt_x = Some(&mut x);
+ /// assert_eq!(opt_x, Some(&mut 12));
+ /// let cloned = opt_x.cloned();
+ /// assert_eq!(cloned, Some(12));
+ /// ```
+ #[unstable(feature = "option_ref_mut_cloned", issue = "43738")]
+ pub fn cloned(self) -> Option<T> {
+ self.map(|t| t.clone())
+ }
+}
+
impl<T: Default> Option<T> {
/// Returns the contained value or a default
///
//! fact an unbound type variable, we want the match to be regarded as ambiguous, because depending
//! on what type that type variable is ultimately assigned, the match may or may not succeed.
//!
+//! To handle closures, freshened types also have to contain the signature and kind of any
+//! closure in the local inference context, as otherwise the cache key might be invalidated.
+//! The way this is done is somewhat hacky - the closure signature is appended to the substs,
+//! as well as the closure kind "encoded" as a type. Also, special handling is needed when
+//! the closure signature contains a reference to the original closure.
+//!
//! Note that you should be careful not to allow the output of freshening to leak to the user in
//! error messages or in any other form. Freshening is only really useful as an internal detail.
//!
-//! __An important detail concerning regions.__ The freshener also replaces *all* regions with
+//! Because of the manipulation required to handle closures, doing arbitrary operations on
+//! freshened types is not recommended. However, in addition to doing equality/hash
+//! comparisons (for caching), it is possible to do a `ty::_match` operation between
+//! 2 freshened types - this works even with the closure encoding.
+//!
+//! __An important detail concerning regions.__ The freshener also replaces *all* free regions with
//! 'erased. The reason behind this is that, in general, we do not take region relationships into
//! account when making type-overloaded decisions. This is important because of the design of the
//! region inferencer, which is not based on unification but rather on accumulating and then
use ty::{self, Ty, TyCtxt, TypeFoldable};
use ty::fold::TypeFolder;
+use ty::subst::Substs;
use util::nodemap::FxHashMap;
+use hir::def_id::DefId;
+
use std::collections::hash_map::Entry;
use super::InferCtxt;
infcx: &'a InferCtxt<'a, 'gcx, 'tcx>,
freshen_count: u32,
freshen_map: FxHashMap<ty::InferTy, Ty<'tcx>>,
+ closure_set: Vec<DefId>,
}
impl<'a, 'gcx, 'tcx> TypeFreshener<'a, 'gcx, 'tcx> {
infcx,
freshen_count: 0,
freshen_map: FxHashMap(),
+ closure_set: vec![],
}
}
}
}
}
+
+ fn next_fresh<F>(&mut self,
+ freshener: F)
+ -> Ty<'tcx>
+ where F: FnOnce(u32) -> ty::InferTy,
+ {
+ let index = self.freshen_count;
+ self.freshen_count += 1;
+ self.infcx.tcx.mk_infer(freshener(index))
+ }
+
+ fn freshen_closure_like<M, C>(&mut self,
+ def_id: DefId,
+ substs: ty::ClosureSubsts<'tcx>,
+ t: Ty<'tcx>,
+ markers: M,
+ combine: C)
+ -> Ty<'tcx>
+ where M: FnOnce(&mut Self) -> (Ty<'tcx>, Ty<'tcx>),
+ C: FnOnce(&'tcx Substs<'tcx>) -> Ty<'tcx>
+ {
+ let tcx = self.infcx.tcx;
+
+ let closure_in_progress = self.infcx.in_progress_tables.map_or(false, |tables| {
+ tcx.hir.as_local_node_id(def_id).map_or(false, |closure_id| {
+ tables.borrow().local_id_root ==
+ Some(DefId::local(tcx.hir.node_to_hir_id(closure_id).owner))
+ })
+ });
+
+ if !closure_in_progress {
+ // If this closure belongs to another infcx, its kind etc. were
+ // fully inferred and its signature/kind are exactly what's listed
+ // in its infcx. So we don't need to add the markers for them.
+ return t.super_fold_with(self);
+ }
+
+ // We are encoding a closure in progress. Because we want our freshening
+ // key to contain all inference information needed to make sense of our
+ // value, we need to encode the closure signature and kind. The way
+ // we do that is to add them as 2 variables to the closure substs,
+ // basically because it's there (and nobody cares about adding extra stuff
+ // to substs).
+ //
+ // This means the "freshened" closure substs ends up looking like
+ // fresh_substs = [PARENT_SUBSTS* ; UPVARS* ; SIG_MARKER ; KIND_MARKER]
+ let (marker_1, marker_2) = if self.closure_set.contains(&def_id) {
+ // We found the closure def-id within its own signature. Just
+ // leave a new freshened type - any matching operations would
+ // have found and compared the exterior closure already to
+ // get here.
+ //
+ // In that case, we already know what the signature would
+ // be - the parent closure on the stack already contains a
+ // "copy" of the signature, so there is no reason to encode
+ // it again for injectivity. Just use a fresh type variable
+ // to make everything comparable.
+ //
+ // For example (closure kinds omitted for clarity)
+ // t=[closure FOO sig=[closure BAR sig=[closure FOO ..]]]
+ // Would get encoded to
+ // t=[closure FOO sig=[closure BAR sig=[closure FOO sig=$0]]]
+ //
+ // and we can decode by having
+ // $0=[closure BAR {sig doesn't exist in decode}]
+ // and get
+ // t=[closure FOO]
+ // sig[FOO] = [closure BAR]
+ // sig[BAR] = [closure FOO]
+ (self.next_fresh(ty::FreshTy), self.next_fresh(ty::FreshTy))
+ } else {
+ self.closure_set.push(def_id);
+ let markers = markers(self);
+ self.closure_set.pop();
+ markers
+ };
+
+ combine(tcx.mk_substs(
+ substs.substs.iter().map(|k| k.fold_with(self)).chain(
+ [marker_1, marker_2].iter().cloned().map(From::from)
+ )))
+ }
}
impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for TypeFreshener<'a, 'gcx, 'tcx> {
}
fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
- if !t.needs_infer() && !t.has_erasable_regions() {
+ if !t.needs_infer() && !t.has_erasable_regions() &&
+ !(t.has_closure_types() && self.infcx.in_progress_tables.is_some()) {
return t;
}
t
}
+ ty::TyClosure(def_id, substs) => {
+ self.freshen_closure_like(
+ def_id, substs, t,
+ |this| {
+ // HACK: use a "random" integer type to mark the kind. Because
+ // different closure kinds shouldn't get unified during
+ // selection, the "subtyping" relationship (where any kind is
+ // better than no kind) shouldn't matter here, just that the
+ // types are different.
+ let closure_kind = this.infcx.closure_kind(def_id);
+ let closure_kind_marker = match closure_kind {
+ None => tcx.types.i8,
+ Some(ty::ClosureKind::Fn) => tcx.types.i16,
+ Some(ty::ClosureKind::FnMut) => tcx.types.i32,
+ Some(ty::ClosureKind::FnOnce) => tcx.types.i64,
+ };
+
+ let closure_sig = this.infcx.fn_sig(def_id);
+ (tcx.mk_fn_ptr(closure_sig.fold_with(this)),
+ closure_kind_marker)
+ },
+ |substs| tcx.mk_closure(def_id, substs)
+ )
+ }
+
+ ty::TyGenerator(def_id, substs, interior) => {
+ self.freshen_closure_like(
+ def_id, substs, t,
+ |this| {
+ let gen_sig = this.infcx.generator_sig(def_id).unwrap();
+ // FIXME: want to revise this strategy when generator
+ // signatures can actually contain LBRs.
+ let sig = this.tcx().no_late_bound_regions(&gen_sig)
+ .unwrap_or_else(|| {
+ bug!("late-bound regions in signature of {:?}",
+ def_id)
+ });
+ (sig.yield_ty, sig.return_ty).fold_with(this)
+ },
+ |substs| {
+ tcx.mk_generator(def_id, ty::ClosureSubsts { substs }, interior)
+ }
+ )
+ }
+
ty::TyBool |
ty::TyChar |
ty::TyInt(..) |
ty::TyFnDef(..) |
ty::TyFnPtr(_) |
ty::TyDynamic(..) |
- ty::TyClosure(..) |
- ty::TyGenerator(..) |
ty::TyNever |
ty::TyTuple(..) |
ty::TyProjection(..) |
dep_node: DepNodeIndex,
result: EvaluationResult)
{
- // Avoid caching results that depend on more than just the trait-ref:
- // The stack can create recursion, and closure signatures
- // being yet uninferred can create "spurious" EvaluatedToAmbig
- // and EvaluatedToOk.
- if result.is_stack_dependent() ||
- ((result == EvaluatedToAmbig || result == EvaluatedToOk)
- && trait_ref.has_closure_types())
- {
+ // Avoid caching results that depend on more than just the trait-ref
+ // - the stack can create recursion.
+ if result.is_stack_dependent() {
return;
}
this.candidate_from_obligation_no_cache(stack)
});
- if self.should_update_candidate_cache(&cache_fresh_trait_pred, &candidate) {
- debug!("CACHE MISS: SELECT({:?})={:?}",
- cache_fresh_trait_pred, candidate);
- self.insert_candidate_cache(stack.obligation.param_env,
- cache_fresh_trait_pred,
- dep_node,
- candidate.clone());
- }
-
+ debug!("CACHE MISS: SELECT({:?})={:?}",
+ cache_fresh_trait_pred, candidate);
+ self.insert_candidate_cache(stack.obligation.param_env,
+ cache_fresh_trait_pred,
+ dep_node,
+ candidate.clone());
candidate
}
.insert(trait_ref, WithDepNode::new(dep_node, candidate));
}
- fn should_update_candidate_cache(&mut self,
- cache_fresh_trait_pred: &ty::PolyTraitPredicate<'tcx>,
- candidate: &SelectionResult<'tcx, SelectionCandidate<'tcx>>)
- -> bool
- {
- // In general, it's a good idea to cache results, even
- // ambiguous ones, to save us some trouble later. But we have
- // to be careful not to cache results that could be
- // invalidated later by advances in inference. Normally, this
- // is not an issue, because any inference variables whose
- // types are not yet bound are "freshened" in the cache key,
- // which means that if we later get the same request once that
- // type variable IS bound, we'll have a different cache key.
- // For example, if we have `Vec<_#0t> : Foo`, and `_#0t` is
- // not yet known, we may cache the result as `None`. But if
- // later `_#0t` is bound to `Bar`, then when we freshen we'll
- // have `Vec<Bar> : Foo` as the cache key.
- //
- // HOWEVER, it CAN happen that we get an ambiguity result in
- // one particular case around closures where the cache key
- // would not change. That is when the precise types of the
- // upvars that a closure references have not yet been figured
- // out (i.e., because it is not yet known if they are captured
- // by ref, and if by ref, what kind of ref). In these cases,
- // when matching a builtin bound, we will yield back an
- // ambiguous result. But the *cache key* is just the closure type,
- // it doesn't capture the state of the upvar computation.
- //
- // To avoid this trap, just don't cache ambiguous results if
- // the self-type contains no inference byproducts (that really
- // shouldn't happen in other circumstances anyway, given
- // coherence).
-
- match *candidate {
- Ok(Some(_)) | Err(_) => true,
- Ok(None) => cache_fresh_trait_pred.has_infer_types()
- }
- }
-
fn assemble_candidates<'o>(&mut self,
stack: &TraitObligationStack<'o, 'tcx>)
-> Result<SelectionCandidateSet<'tcx>, SelectionError<'tcx>>
self.continue_after_error.set(continue_after_error);
}
+ // NOTE: DO NOT call this function from rustc, as it relies on `err_count` being non-zero
+ // if an error happened to avoid ICEs. This function should only be called from tools.
+ pub fn reset_err_count(&self) {
+ self.err_count.set(0);
+ }
+
pub fn struct_dummy<'a>(&'a self) -> DiagnosticBuilder<'a> {
DiagnosticBuilder::new(self, Level::Cancelled, "")
}
}
if end_newline {
- //add a space so stripping <br> tags and breaking spaces still renders properly
+ // add a space so stripping <br> tags and breaking spaces still renders properly
if f.alternate() {
clause.push(' ');
} else {
}
}
+ /// Gets the next token out of the lexer, emitting fatal errors if lexing fails.
+ fn try_next_token(&mut self) -> io::Result<TokenAndSpan> {
+ match self.lexer.try_next_token() {
+ Ok(tas) => Ok(tas),
+ Err(_) => {
+ self.lexer.emit_fatal_errors();
+ self.lexer.sess.span_diagnostic
+ .struct_warn("Backing out of syntax highlighting")
+ .note("You probably did not intend to render this as a rust code-block")
+ .emit();
+ Err(io::Error::new(io::ErrorKind::Other, ""))
+ }
+ }
+ }
+
/// Exhausts the `lexer` writing the output into `out`.
///
/// The general structure for this method is to iterate over each token,
out: &mut W)
-> io::Result<()> {
loop {
- let next = match self.lexer.try_next_token() {
- Ok(tas) => tas,
- Err(_) => {
- self.lexer.emit_fatal_errors();
- self.lexer.sess.span_diagnostic
- .struct_warn("Backing out of syntax highlighting")
- .note("You probably did not intend to render this as a rust code-block")
- .emit();
- return Err(io::Error::new(io::ErrorKind::Other, ""));
- }
- };
-
+ let next = self.try_next_token()?;
if next.tok == token::Eof {
break;
}
}
}
- // This is the start of an attribute. We're going to want to
+ // This might be the start of an attribute. We're going to want to
// continue highlighting it as an attribute until the ending ']' is
// seen, so skip out early. Down below we terminate the attribute
// span when we see the ']'.
token::Pound => {
- self.in_attribute = true;
- out.enter_span(Class::Attribute)?;
+ // We can't be sure that our # begins an attribute (it could
+ // just be appearing in a macro) until we read either `#![` or
+ // `#[` from the input stream.
+ //
+ // We don't want to start highlighting as an attribute until
+ // we're confident there is going to be a ] coming up, as
+ // otherwise # tokens in macros highlight the rest of the input
+ // as an attribute.
+
+ // Case 1: #![inner_attribute]
+ if self.lexer.peek().tok == token::Not {
+ self.try_next_token()?; // NOTE: consumes `!` token!
+ if self.lexer.peek().tok == token::OpenDelim(token::Bracket) {
+ self.in_attribute = true;
+ out.enter_span(Class::Attribute)?;
+ }
+ out.string("#", Class::None, None)?;
+ out.string("!", Class::None, None)?;
+ return Ok(());
+ }
+
+ // Case 2: #[outer_attribute]
+ if self.lexer.peek().tok == token::OpenDelim(token::Bracket) {
+ self.in_attribute = true;
+ out.enter_span(Class::Attribute)?;
+ }
out.string("#", Class::None, None)?;
return Ok(());
}
} else {
write!(fmt, "Module ")?;
},
- clean::FunctionItem(..) | clean::ForeignFunctionItem(..) =>
- write!(fmt, "Function ")?,
+ clean::FunctionItem(..) | clean::ForeignFunctionItem(..) => write!(fmt, "Function ")?,
clean::TraitItem(..) => write!(fmt, "Trait ")?,
clean::StructItem(..) => write!(fmt, "Struct ")?,
clean::UnionItem(..) => write!(fmt, "Union ")?,
clean::TypedefItem(..) => write!(fmt, "Type Definition ")?,
clean::MacroItem(..) => write!(fmt, "Macro ")?,
clean::PrimitiveItem(..) => write!(fmt, "Primitive Type ")?,
- clean::StaticItem(..) | clean::ForeignStaticItem(..) =>
- write!(fmt, "Static ")?,
+ clean::StaticItem(..) | clean::ForeignStaticItem(..) => write!(fmt, "Static ")?,
clean::ConstantItem(..) => write!(fmt, "Constant ")?,
_ => {
// We don't generate pages for any other type.
display: inline-block;
}
+.in-band > code {
+ display: inline-block;
+}
+
#main { position: relative; }
#main > .since {
top: inherit;
}
.in-band:hover > .anchor {
- display: initial;
+ display: inline-block;
+ position: absolute;
}
.anchor {
display: none;
///
/// # Examples
///
+/// Creating a [`SocketAddr`] iterator that yields one item:
+///
+/// ```
+/// use std::net::{ToSocketAddrs, SocketAddr};
+///
+/// let addr = SocketAddr::from(([127, 0, 0, 1], 443));
+/// let mut addrs_iter = addr.to_socket_addrs().unwrap();
+///
+/// assert_eq!(Some(addr), addrs_iter.next());
+/// assert!(addrs_iter.next().is_none());
+/// ```
+///
+/// Creating a [`SocketAddr`] iterator from a hostname:
+///
/// ```no_run
-/// use std::net::{SocketAddrV4, TcpStream, UdpSocket, TcpListener, Ipv4Addr};
-///
-/// fn main() {
-/// let ip = Ipv4Addr::new(127, 0, 0, 1);
-/// let port = 12345;
-///
-/// // The following lines are equivalent modulo possible "localhost" name
-/// // resolution differences
-/// let tcp_s = TcpStream::connect(SocketAddrV4::new(ip, port));
-/// let tcp_s = TcpStream::connect((ip, port));
-/// let tcp_s = TcpStream::connect(("127.0.0.1", port));
-/// let tcp_s = TcpStream::connect(("localhost", port));
-/// let tcp_s = TcpStream::connect("127.0.0.1:12345");
-/// let tcp_s = TcpStream::connect("localhost:12345");
-///
-/// // TcpListener::bind(), UdpSocket::bind() and UdpSocket::send_to()
-/// // behave similarly
-/// let tcp_l = TcpListener::bind("localhost:12345");
-///
-/// let mut udp_s = UdpSocket::bind(("127.0.0.1", port)).unwrap();
-/// udp_s.send_to(&[7], (ip, 23451)).unwrap();
-/// }
+/// use std::net::{SocketAddr, ToSocketAddrs};
+///
+/// // assuming 'localhost' resolves to 127.0.0.1
+/// let mut addrs_iter = "localhost:443".to_socket_addrs().unwrap();
+/// assert_eq!(addrs_iter.next(), Some(SocketAddr::from(([127, 0, 0, 1], 443))));
+/// assert!(addrs_iter.next().is_none());
+///
+/// // assuming 'foo' does not resolve
+/// assert!("foo:443".to_socket_addrs().is_err());
/// ```
+///
+/// Creating a [`SocketAddr`] iterator that yields multiple items:
+///
+/// ```
+/// use std::net::{SocketAddr, ToSocketAddrs};
+///
+/// let addr1 = SocketAddr::from(([0, 0, 0, 0], 80));
+/// let addr2 = SocketAddr::from(([127, 0, 0, 1], 443));
+/// let addrs = vec![addr1, addr2];
+///
+/// let mut addrs_iter = (&addrs[..]).to_socket_addrs().unwrap();
+///
+/// assert_eq!(Some(addr1), addrs_iter.next());
+/// assert_eq!(Some(addr2), addrs_iter.next());
+/// assert!(addrs_iter.next().is_none());
+/// ```
+///
+/// Attempting to create a [`SocketAddr`] iterator from an improperly formatted
+/// socket address `&str` (missing the port):
+///
+/// ```
+/// use std::io;
+/// use std::net::ToSocketAddrs;
+///
+/// let err = "127.0.0.1".to_socket_addrs().unwrap_err();
+/// assert_eq!(err.kind(), io::ErrorKind::InvalidInput);
+/// ```
+///
+/// [`TcpStream::connect`] is an example of an function that utilizes
+/// `ToSocketsAddr` as a trait bound on its parameter in order to accept
+/// different types:
+///
+/// ```no_run
+/// use std::net::{TcpStream, Ipv4Addr};
+///
+/// let stream = TcpStream::connect(("127.0.0.1", 443));
+/// // or
+/// let stream = TcpStream::connect("127.0.0.1.443");
+/// // or
+/// let stream = TcpStream::connect((Ipv4Addr::new(127, 0, 0, 1), 443));
+/// ```
+///
+/// [`TcpStream::connect`]: ../../std/net/struct.TcpStream.html#method.connect
#[stable(feature = "rust1", since = "1.0.0")]
pub trait ToSocketAddrs {
/// Returned iterator over socket addresses which this type may correspond
})
}
+pub fn find_by_name<'a>(attrs: &'a [Attribute], name: &str) -> Option<&'a Attribute> {
+ attrs.iter().find(|attr| attr.check_name(name))
+}
+
pub fn first_attr_value_str_by_name(attrs: &[Attribute], name: &str) -> Option<Symbol> {
attrs.iter()
.find(|at| at.check_name(name))
fn visit_item(&mut self, i: &'a ast::Item) {
match i.node {
ast::ItemKind::ExternCrate(_) => {
- if attr::contains_name(&i.attrs[..], "macro_reexport") {
- gate_feature_post!(&self, macro_reexport, i.span,
+ if let Some(attr) = attr::find_by_name(&i.attrs[..], "macro_reexport") {
+ gate_feature_post!(&self, macro_reexport, attr.span,
"macros reexports are experimental \
and possibly buggy");
}
function may change over time, for now \
a top-level `fn main()` is required");
}
- if attr::contains_name(&i.attrs[..], "must_use") {
- gate_feature_post!(&self, fn_must_use, i.span,
+ if let Some(attr) = attr::find_by_name(&i.attrs[..], "must_use") {
+ gate_feature_post!(&self, fn_must_use, attr.span,
"`#[must_use]` on functions is experimental",
GateStrength::Soft);
}
}
ast::ItemKind::Struct(..) => {
- if attr::contains_name(&i.attrs[..], "simd") {
- gate_feature_post!(&self, simd, i.span,
+ if let Some(attr) = attr::find_by_name(&i.attrs[..], "simd") {
+ gate_feature_post!(&self, simd, attr.span,
"SIMD types are experimental and possibly buggy");
- self.context.parse_sess.span_diagnostic.span_warn(i.span,
+ self.context.parse_sess.span_diagnostic.span_warn(attr.span,
"the `#[simd]` attribute \
is deprecated, use \
`#[repr(simd)]` instead");
}
- for attr in &i.attrs {
- if attr.path == "repr" {
- for item in attr.meta_item_list().unwrap_or_else(Vec::new) {
- if item.check_name("simd") {
- gate_feature_post!(&self, repr_simd, i.span,
- "SIMD types are experimental \
- and possibly buggy");
-
- }
- if item.check_name("align") {
- gate_feature_post!(&self, repr_align, i.span,
- "the struct `#[repr(align(u16))]` attribute \
- is experimental");
- }
+ if let Some(attr) = attr::find_by_name(&i.attrs[..], "repr") {
+ for item in attr.meta_item_list().unwrap_or_else(Vec::new) {
+ if item.check_name("simd") {
+ gate_feature_post!(&self, repr_simd, attr.span,
+ "SIMD types are experimental and possibly buggy");
+ }
+ if item.check_name("align") {
+ gate_feature_post!(&self, repr_align, attr.span,
+ "the struct `#[repr(align(u16))]` attribute \
+ is experimental");
}
}
}
for impl_item in impl_items {
if let ast::ImplItemKind::Method(..) = impl_item.node {
- if attr::contains_name(&impl_item.attrs[..], "must_use") {
- gate_feature_post!(&self, fn_must_use, impl_item.span,
+ if let Some(attr) = attr::find_by_name(&impl_item.attrs[..], "must_use") {
+ gate_feature_post!(&self, fn_must_use, attr.span,
"`#[must_use]` on methods is experimental",
GateStrength::Soft);
}
#![crate_type = "dylib"]
#[macro_reexport(reexported)]
+//~^ ERROR macros reexports are experimental and possibly buggy
#[macro_use] #[no_link]
extern crate macro_reexport_1;
-//~^ ERROR macros reexports are experimental and possibly buggy
struct MyStruct;
impl MyStruct {
- #[must_use]
- fn need_to_use_method() -> bool { true } //~ WARN `#[must_use]` on methods is experimental
+ #[must_use] //~ WARN `#[must_use]` on methods is experimental
+ fn need_to_use_method() -> bool { true }
}
-#[must_use]
-fn need_to_use_it() -> bool { true } //~ WARN `#[must_use]` on functions is experimental
+#[must_use] //~ WARN `#[must_use]` on functions is experimental
+fn need_to_use_it() -> bool { true }
// Feature gates are tidy-required to have a specially named (or
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#[repr(simd)]
-struct Foo(u64, u64); //~ error: SIMD types are experimental
+#[repr(simd)] //~ error: SIMD types are experimental
+struct Foo(u64, u64);
fn main() {}
// except according to those terms.
#![feature(attr_literals)]
-#[repr(align(64))]
-struct Foo(u64, u64); //~ error: the struct `#[repr(align(u16))]` attribute is experimental
+#[repr(align(64))] //~ error: the struct `#[repr(align(u16))]` attribute is experimental
+struct Foo(u64, u64);
fn main() {}
// pretty-expanded FIXME #23616
-#[repr(simd)]
+#[repr(simd)] //~ ERROR SIMD types are experimental
struct RGBA {
r: f32,
g: f32,
b: f32,
a: f32
}
-//~^^^^^^ ERROR SIMD types are experimental and possibly buggy (see issue #27731)
pub fn main() {}
#[repr = "3900"] fn f() { }
//~^ WARN unused attribute
- #[repr = "3900"] struct S;
- //~^ WARN unused attribute
+ struct S;
#[repr = "3900"] type T = S;
//~^ WARN unused attribute
// @!has - 'space'
// @!has - 'comment'
// @has - '# <span class="ident">single'
-// @has - '#<span class="attribute"># <span class="ident">double</span>'
-// @has - '#<span class="attribute">#<span class="attribute"># <span class="ident">triple</span>'
+// @has - '## <span class="ident">double</span>'
+// @has - '### <span class="ident">triple</span>'
+// @has - '<span class="attribute">#[<span class="ident">outer</span>]</span>'
+// @has - '<span class="attribute">#![<span class="ident">inner</span>]</span>'
/// ```no_run
/// # # space
/// ## single
/// ### double
/// #### triple
+/// ##[outer]
+/// ##![inner]
/// ```
pub struct Foo;
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(attr_literals)]
+
+#[repr(align(16))]
+struct Gem {
+ mohs_hardness: u8,
+ poofed: bool,
+ weapon: Weapon,
+}
+
+#[repr(simd)]
+struct Weapon {
+ name: String,
+ damage: u32
+}
+
+impl Gem {
+ #[must_use] fn summon_weapon(&self) -> Weapon { self.weapon }
+}
+
+#[must_use]
+fn bubble(gem: Gem) -> Result<Gem, ()> {
+ if gem.poofed {
+ Ok(gem)
+ } else {
+ Err(())
+ }
+}
+
+fn main() {}
--- /dev/null
+error: the struct `#[repr(align(u16))]` attribute is experimental (see issue #33626)
+ --> $DIR/gated-features-attr-spans.rs:13:1
+ |
+13 | #[repr(align(16))]
+ | ^^^^^^^^^^^^^^^^^^
+ |
+ = help: add #![feature(repr_align)] to the crate attributes to enable
+
+error: SIMD types are experimental and possibly buggy (see issue #27731)
+ --> $DIR/gated-features-attr-spans.rs:20:1
+ |
+20 | #[repr(simd)]
+ | ^^^^^^^^^^^^^
+ |
+ = help: add #![feature(repr_simd)] to the crate attributes to enable
+
+warning: `#[must_use]` on methods is experimental (see issue #43302)
+ --> $DIR/gated-features-attr-spans.rs:27:5
+ |
+27 | #[must_use] fn summon_weapon(&self) -> Weapon { self.weapon }
+ | ^^^^^^^^^^^
+ |
+ = help: add #![feature(fn_must_use)] to the crate attributes to enable
+
+warning: `#[must_use]` on functions is experimental (see issue #43302)
+ --> $DIR/gated-features-attr-spans.rs:30:1
+ |
+30 | #[must_use]
+ | ^^^^^^^^^^^
+ |
+ = help: add #![feature(fn_must_use)] to the crate attributes to enable
+
+error: aborting due to 2 previous errors
+
self.package("rust-std", &mut manifest.pkg, TARGETS);
self.package("rust-docs", &mut manifest.pkg, TARGETS);
self.package("rust-src", &mut manifest.pkg, &["*"]);
- self.package("rls", &mut manifest.pkg, HOSTS);
+ let rls_package_name = if self.rust_release == "nightly" {
+ "rls"
+ } else {
+ "rls-preview"
+ };
+ self.package(rls_package_name, &mut manifest.pkg, HOSTS);
self.package("rust-analysis", &mut manifest.pkg, TARGETS);
let mut pkg = Package {
}
extensions.push(Component {
- pkg: "rls".to_string(),
+ pkg: rls_package_name.to_string(),
target: host.to_string(),
});
extensions.push(Component {
format!("rust-src-{}.tar.gz", self.rust_release)
} else if component == "cargo" {
format!("cargo-{}-{}.tar.gz", self.cargo_release, target)
- } else if component == "rls" {
+ } else if component == "rls" || component == "rls-preview" {
format!("rls-{}-{}.tar.gz", self.rls_release, target)
} else {
format!("{}-{}-{}.tar.gz", component, self.rust_release, target)
fn cached_version(&self, component: &str) -> &str {
if component == "cargo" {
&self.cargo_version
- } else if component == "rls" {
+ } else if component == "rls" || component == "rls-preview" {
&self.rls_version
} else {
&self.rust_version
#[derive(Clone)]
pub struct Config {
- // The library paths required for running the compiler
+ /// The library paths required for running the compiler
pub compile_lib_path: PathBuf,
- // The library paths required for running compiled programs
+ /// The library paths required for running compiled programs
pub run_lib_path: PathBuf,
- // The rustc executable
+ /// The rustc executable
pub rustc_path: PathBuf,
- // The rustdoc executable
+ /// The rustdoc executable
pub rustdoc_path: Option<PathBuf>,
- // The python executable to use for LLDB
+ /// The python executable to use for LLDB
pub lldb_python: String,
- // The python executable to use for htmldocck
+ /// The python executable to use for htmldocck
pub docck_python: String,
- // The llvm FileCheck binary path
+ /// The llvm FileCheck binary path
pub llvm_filecheck: Option<PathBuf>,
- // The valgrind path
+ /// The valgrind path
pub valgrind_path: Option<String>,
- // Whether to fail if we can't run run-pass-valgrind tests under valgrind
- // (or, alternatively, to silently run them like regular run-pass tests).
+ /// Whether to fail if we can't run run-pass-valgrind tests under valgrind
+ /// (or, alternatively, to silently run them like regular run-pass tests).
pub force_valgrind: bool,
- // The directory containing the tests to run
+ /// The directory containing the tests to run
pub src_base: PathBuf,
- // The directory where programs should be built
+ /// The directory where programs should be built
pub build_base: PathBuf,
- // The name of the stage being built (stage1, etc)
+ /// The name of the stage being built (stage1, etc)
pub stage_id: String,
- // The test mode, compile-fail, run-fail, run-pass
+ /// The test mode, compile-fail, run-fail, run-pass
pub mode: Mode,
- // Run ignored tests
+ /// Run ignored tests
pub run_ignored: bool,
- // Only run tests that match this filter
+ /// Only run tests that match this filter
pub filter: Option<String>,
- // Exactly match the filter, rather than a substring
+ /// Exactly match the filter, rather than a substring
pub filter_exact: bool,
- // Write out a parseable log of tests that were run
+ /// Write out a parseable log of tests that were run
pub logfile: Option<PathBuf>,
- // A command line to prefix program execution with,
- // for running under valgrind
+ /// A command line to prefix program execution with,
+ /// for running under valgrind
pub runtool: Option<String>,
- // Flags to pass to the compiler when building for the host
+ /// Flags to pass to the compiler when building for the host
pub host_rustcflags: Option<String>,
- // Flags to pass to the compiler when building for the target
+ /// Flags to pass to the compiler when building for the target
pub target_rustcflags: Option<String>,
- // Target system to be tested
+ /// Target system to be tested
pub target: String,
- // Host triple for the compiler being invoked
+ /// Host triple for the compiler being invoked
pub host: String,
- // Path to / name of the GDB executable
+ /// Path to / name of the GDB executable
pub gdb: Option<String>,
- // Version of GDB, encoded as ((major * 1000) + minor) * 1000 + patch
+ /// Version of GDB, encoded as ((major * 1000) + minor) * 1000 + patch
pub gdb_version: Option<u32>,
- // Whether GDB has native rust support
+ /// Whether GDB has native rust support
pub gdb_native_rust: bool,
- // Version of LLDB
+ /// Version of LLDB
pub lldb_version: Option<String>,
- // Version of LLVM
+ /// Version of LLVM
pub llvm_version: Option<String>,
- // Is LLVM a system LLVM
+ /// Is LLVM a system LLVM
pub system_llvm: bool,
- // Path to the android tools
+ /// Path to the android tools
pub android_cross_path: PathBuf,
- // Extra parameter to run adb on arm-linux-androideabi
+ /// Extra parameter to run adb on arm-linux-androideabi
pub adb_path: String,
- // Extra parameter to run test suite on arm-linux-androideabi
+ /// Extra parameter to run test suite on arm-linux-androideabi
pub adb_test_dir: String,
- // status whether android device available or not
+ /// status whether android device available or not
pub adb_device_status: bool,
- // the path containing LLDB's Python module
+ /// the path containing LLDB's Python module
pub lldb_python_dir: Option<String>,
- // Explain what's going on
+ /// Explain what's going on
pub verbose: bool,
- // Print one character per test instead of one line
+ /// Print one character per test instead of one line
pub quiet: bool,
- // Whether to use colors in test.
+ /// Whether to use colors in test.
pub color: ColorConfig,
- // where to find the remote test client process, if we're using it
+ /// where to find the remote test client process, if we're using it
pub remote_test_client: Option<PathBuf>,
// Configuration for various run-make tests frobbing things like C compilers